Bug Summary

File:tools/lldb/source/Target/Thread.cpp
Warning:line 67, column 29
Use of memory after it is freed

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name Thread.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D HAVE_ROUND -D LLDB_CONFIGURATION_RELEASE -D LLDB_USE_BUILTIN_DEMANGLER -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn326246/build-llvm/tools/lldb/source/Target -I /build/llvm-toolchain-snapshot-7~svn326246/tools/lldb/source/Target -I /build/llvm-toolchain-snapshot-7~svn326246/build-llvm/tools/lldb/include -I /build/llvm-toolchain-snapshot-7~svn326246/tools/lldb/include -I /build/llvm-toolchain-snapshot-7~svn326246/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn326246/include -I /usr/include/python2.7 -I /build/llvm-toolchain-snapshot-7~svn326246/tools/clang/include -I /build/llvm-toolchain-snapshot-7~svn326246/build-llvm/tools/lldb/../clang/include -I /build/llvm-toolchain-snapshot-7~svn326246/tools/lldb/source/. -I /build/llvm-toolchain-snapshot-7~svn326246/tools/lldb/source/Target/../Plugins/Process/Utility -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -Wno-deprecated-declarations -Wno-unknown-pragmas -Wno-strict-aliasing -Wno-deprecated-register -Wno-vla-extension -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn326246/build-llvm/tools/lldb/source/Target -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-02-28-041547-14988-1 -x c++ /build/llvm-toolchain-snapshot-7~svn326246/tools/lldb/source/Target/Thread.cpp

/build/llvm-toolchain-snapshot-7~svn326246/tools/lldb/source/Target/Thread.cpp

1//===-- Thread.cpp ----------------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10// C Includes
11// C++ Includes
12// Other libraries and framework includes
13// Project includes
14#include "lldb/Target/Thread.h"
15#include "Plugins/Process/Utility/UnwindLLDB.h"
16#include "Plugins/Process/Utility/UnwindMacOSXFrameBackchain.h"
17#include "lldb/Breakpoint/BreakpointLocation.h"
18#include "lldb/Core/Debugger.h"
19#include "lldb/Core/FormatEntity.h"
20#include "lldb/Core/Module.h"
21#include "lldb/Core/State.h"
22#include "lldb/Core/ValueObject.h"
23#include "lldb/Host/Host.h"
24#include "lldb/Interpreter/OptionValueFileSpecList.h"
25#include "lldb/Interpreter/OptionValueProperties.h"
26#include "lldb/Interpreter/Property.h"
27#include "lldb/Symbol/Function.h"
28#include "lldb/Target/ABI.h"
29#include "lldb/Target/DynamicLoader.h"
30#include "lldb/Target/ExecutionContext.h"
31#include "lldb/Target/Process.h"
32#include "lldb/Target/RegisterContext.h"
33#include "lldb/Target/StopInfo.h"
34#include "lldb/Target/SystemRuntime.h"
35#include "lldb/Target/Target.h"
36#include "lldb/Target/ThreadPlan.h"
37#include "lldb/Target/ThreadPlanBase.h"
38#include "lldb/Target/ThreadPlanCallFunction.h"
39#include "lldb/Target/ThreadPlanPython.h"
40#include "lldb/Target/ThreadPlanRunToAddress.h"
41#include "lldb/Target/ThreadPlanStepInRange.h"
42#include "lldb/Target/ThreadPlanStepInstruction.h"
43#include "lldb/Target/ThreadPlanStepOut.h"
44#include "lldb/Target/ThreadPlanStepOverBreakpoint.h"
45#include "lldb/Target/ThreadPlanStepOverRange.h"
46#include "lldb/Target/ThreadPlanStepThrough.h"
47#include "lldb/Target/ThreadPlanStepUntil.h"
48#include "lldb/Target/ThreadSpec.h"
49#include "lldb/Target/Unwind.h"
50#include "lldb/Utility/Log.h"
51#include "lldb/Utility/RegularExpression.h"
52#include "lldb/Utility/Stream.h"
53#include "lldb/Utility/StreamString.h"
54#include "lldb/lldb-enumerations.h"
55
56using namespace lldb;
57using namespace lldb_private;
58
59const ThreadPropertiesSP &Thread::GetGlobalProperties() {
60 // NOTE: intentional leak so we don't crash if global destructor chain gets
61 // called as other threads still use the result of this function
62 static ThreadPropertiesSP *g_settings_sp_ptr =
63 new ThreadPropertiesSP(new ThreadProperties(true));
64 return *g_settings_sp_ptr;
65}
66
67static PropertyDefinition g_properties[] = {
68 {"step-in-avoid-nodebug", OptionValue::eTypeBoolean, true, true, nullptr,
69 nullptr,
70 "If true, step-in will not stop in functions with no debug information."},
71 {"step-out-avoid-nodebug", OptionValue::eTypeBoolean, true, false, nullptr,
72 nullptr, "If true, when step-in/step-out/step-over leave the current "
73 "frame, they will continue to step out till they come to a "
74 "function with "
75 "debug information. Passing a frame argument to step-out will "
76 "override this option."},
77 {"step-avoid-regexp", OptionValue::eTypeRegex, true, 0, "^std::", nullptr,
78 "A regular expression defining functions step-in won't stop in."},
79 {"step-avoid-libraries", OptionValue::eTypeFileSpecList, true, 0, nullptr,
80 nullptr, "A list of libraries that source stepping won't stop in."},
81 {"trace-thread", OptionValue::eTypeBoolean, false, false, nullptr, nullptr,
82 "If true, this thread will single-step and log execution."},
83 {nullptr, OptionValue::eTypeInvalid, false, 0, nullptr, nullptr, nullptr}};
84
85enum {
86 ePropertyStepInAvoidsNoDebug,
87 ePropertyStepOutAvoidsNoDebug,
88 ePropertyStepAvoidRegex,
89 ePropertyStepAvoidLibraries,
90 ePropertyEnableThreadTrace
91};
92
93class ThreadOptionValueProperties : public OptionValueProperties {
94public:
95 ThreadOptionValueProperties(const ConstString &name)
96 : OptionValueProperties(name) {}
97
98 // This constructor is used when creating ThreadOptionValueProperties when it
99 // is part of a new lldb_private::Thread instance. It will copy all current
100 // global property values as needed
101 ThreadOptionValueProperties(ThreadProperties *global_properties)
102 : OptionValueProperties(*global_properties->GetValueProperties()) {}
103
104 const Property *GetPropertyAtIndex(const ExecutionContext *exe_ctx,
105 bool will_modify,
106 uint32_t idx) const override {
107 // When getting the value for a key from the thread options, we will always
108 // try and grab the setting from the current thread if there is one. Else we
109 // just
110 // use the one from this instance.
111 if (exe_ctx) {
112 Thread *thread = exe_ctx->GetThreadPtr();
113 if (thread) {
114 ThreadOptionValueProperties *instance_properties =
115 static_cast<ThreadOptionValueProperties *>(
116 thread->GetValueProperties().get());
117 if (this != instance_properties)
118 return instance_properties->ProtectedGetPropertyAtIndex(idx);
119 }
120 }
121 return ProtectedGetPropertyAtIndex(idx);
122 }
123};
124
125ThreadProperties::ThreadProperties(bool is_global) : Properties() {
126 if (is_global) {
127 m_collection_sp.reset(
128 new ThreadOptionValueProperties(ConstString("thread")));
129 m_collection_sp->Initialize(g_properties);
130 } else
131 m_collection_sp.reset(
132 new ThreadOptionValueProperties(Thread::GetGlobalProperties().get()));
133}
134
135ThreadProperties::~ThreadProperties() = default;
136
137const RegularExpression *ThreadProperties::GetSymbolsToAvoidRegexp() {
138 const uint32_t idx = ePropertyStepAvoidRegex;
139 return m_collection_sp->GetPropertyAtIndexAsOptionValueRegex(nullptr, idx);
140}
141
142FileSpecList &ThreadProperties::GetLibrariesToAvoid() const {
143 const uint32_t idx = ePropertyStepAvoidLibraries;
144 OptionValueFileSpecList *option_value =
145 m_collection_sp->GetPropertyAtIndexAsOptionValueFileSpecList(nullptr,
146 false, idx);
147 assert(option_value)(static_cast <bool> (option_value) ? void (0) : __assert_fail
("option_value", "/build/llvm-toolchain-snapshot-7~svn326246/tools/lldb/source/Target/Thread.cpp"
, 147, __extension__ __PRETTY_FUNCTION__))
;
148 return option_value->GetCurrentValue();
149}
150
151bool ThreadProperties::GetTraceEnabledState() const {
152 const uint32_t idx = ePropertyEnableThreadTrace;
153 return m_collection_sp->GetPropertyAtIndexAsBoolean(
154 nullptr, idx, g_properties[idx].default_uint_value != 0);
155}
156
157bool ThreadProperties::GetStepInAvoidsNoDebug() const {
158 const uint32_t idx = ePropertyStepInAvoidsNoDebug;
159 return m_collection_sp->GetPropertyAtIndexAsBoolean(
160 nullptr, idx, g_properties[idx].default_uint_value != 0);
161}
162
163bool ThreadProperties::GetStepOutAvoidsNoDebug() const {
164 const uint32_t idx = ePropertyStepOutAvoidsNoDebug;
165 return m_collection_sp->GetPropertyAtIndexAsBoolean(
166 nullptr, idx, g_properties[idx].default_uint_value != 0);
167}
168
169//------------------------------------------------------------------
170// Thread Event Data
171//------------------------------------------------------------------
172
173const ConstString &Thread::ThreadEventData::GetFlavorString() {
174 static ConstString g_flavor("Thread::ThreadEventData");
175 return g_flavor;
176}
177
178Thread::ThreadEventData::ThreadEventData(const lldb::ThreadSP thread_sp)
179 : m_thread_sp(thread_sp), m_stack_id() {}
180
181Thread::ThreadEventData::ThreadEventData(const lldb::ThreadSP thread_sp,
182 const StackID &stack_id)
183 : m_thread_sp(thread_sp), m_stack_id(stack_id) {}
184
185Thread::ThreadEventData::ThreadEventData() : m_thread_sp(), m_stack_id() {}
186
187Thread::ThreadEventData::~ThreadEventData() = default;
188
189void Thread::ThreadEventData::Dump(Stream *s) const {}
190
191const Thread::ThreadEventData *
192Thread::ThreadEventData::GetEventDataFromEvent(const Event *event_ptr) {
193 if (event_ptr) {
194 const EventData *event_data = event_ptr->GetData();
195 if (event_data &&
196 event_data->GetFlavor() == ThreadEventData::GetFlavorString())
197 return static_cast<const ThreadEventData *>(event_ptr->GetData());
198 }
199 return nullptr;
200}
201
202ThreadSP Thread::ThreadEventData::GetThreadFromEvent(const Event *event_ptr) {
203 ThreadSP thread_sp;
204 const ThreadEventData *event_data = GetEventDataFromEvent(event_ptr);
205 if (event_data)
206 thread_sp = event_data->GetThread();
207 return thread_sp;
208}
209
210StackID Thread::ThreadEventData::GetStackIDFromEvent(const Event *event_ptr) {
211 StackID stack_id;
212 const ThreadEventData *event_data = GetEventDataFromEvent(event_ptr);
213 if (event_data)
214 stack_id = event_data->GetStackID();
215 return stack_id;
216}
217
218StackFrameSP
219Thread::ThreadEventData::GetStackFrameFromEvent(const Event *event_ptr) {
220 const ThreadEventData *event_data = GetEventDataFromEvent(event_ptr);
221 StackFrameSP frame_sp;
222 if (event_data) {
223 ThreadSP thread_sp = event_data->GetThread();
224 if (thread_sp) {
225 frame_sp = thread_sp->GetStackFrameList()->GetFrameWithStackID(
226 event_data->GetStackID());
227 }
228 }
229 return frame_sp;
230}
231
232//------------------------------------------------------------------
233// Thread class
234//------------------------------------------------------------------
235
236ConstString &Thread::GetStaticBroadcasterClass() {
237 static ConstString class_name("lldb.thread");
238 return class_name;
239}
240
241Thread::Thread(Process &process, lldb::tid_t tid, bool use_invalid_index_id)
242 : ThreadProperties(false), UserID(tid),
243 Broadcaster(process.GetTarget().GetDebugger().GetBroadcasterManager(),
244 Thread::GetStaticBroadcasterClass().AsCString()),
245 m_process_wp(process.shared_from_this()), m_stop_info_sp(),
246 m_stop_info_stop_id(0), m_stop_info_override_stop_id(0),
247 m_index_id(use_invalid_index_id ? LLDB_INVALID_INDEX32(4294967295U)
248 : process.GetNextThreadIndexID(tid)),
249 m_reg_context_sp(), m_state(eStateUnloaded), m_state_mutex(),
250 m_plan_stack(), m_completed_plan_stack(), m_frame_mutex(),
251 m_curr_frames_sp(), m_prev_frames_sp(),
252 m_resume_signal(LLDB_INVALID_SIGNAL_NUMBER(2147483647)),
253 m_resume_state(eStateRunning), m_temporary_resume_state(eStateRunning),
254 m_unwinder_ap(), m_destroy_called(false),
255 m_override_should_notify(eLazyBoolCalculate),
256 m_extended_info_fetched(false), m_extended_info() {
257 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_OBJECT(1u << 11)));
258 if (log)
259 log->Printf("%p Thread::Thread(tid = 0x%4.4" PRIx64"l" "x" ")",
260 static_cast<void *>(this), GetID());
261
262 CheckInWithManager();
263 QueueFundamentalPlan(true);
264}
265
266Thread::~Thread() {
267 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_OBJECT(1u << 11)));
268 if (log)
269 log->Printf("%p Thread::~Thread(tid = 0x%4.4" PRIx64"l" "x" ")",
270 static_cast<void *>(this), GetID());
271 /// If you hit this assert, it means your derived class forgot to call
272 /// DoDestroy in its destructor.
273 assert(m_destroy_called)(static_cast <bool> (m_destroy_called) ? void (0) : __assert_fail
("m_destroy_called", "/build/llvm-toolchain-snapshot-7~svn326246/tools/lldb/source/Target/Thread.cpp"
, 273, __extension__ __PRETTY_FUNCTION__))
;
274}
275
276void Thread::DestroyThread() {
277 // Tell any plans on the plan stacks that the thread is being destroyed since
278 // any plans that have a thread go away in the middle of might need
279 // to do cleanup, or in some cases NOT do cleanup...
280 for (auto plan : m_plan_stack)
281 plan->ThreadDestroyed();
282
283 for (auto plan : m_discarded_plan_stack)
284 plan->ThreadDestroyed();
285
286 for (auto plan : m_completed_plan_stack)
287 plan->ThreadDestroyed();
288
289 m_destroy_called = true;
290 m_plan_stack.clear();
291 m_discarded_plan_stack.clear();
292 m_completed_plan_stack.clear();
293
294 // Push a ThreadPlanNull on the plan stack. That way we can continue assuming
295 // that the
296 // plan stack is never empty, but if somebody errantly asks questions of a
297 // destroyed thread
298 // without checking first whether it is destroyed, they won't crash.
299 ThreadPlanSP null_plan_sp(new ThreadPlanNull(*this));
300 m_plan_stack.push_back(null_plan_sp);
301
302 m_stop_info_sp.reset();
303 m_reg_context_sp.reset();
304 m_unwinder_ap.reset();
305 std::lock_guard<std::recursive_mutex> guard(m_frame_mutex);
306 m_curr_frames_sp.reset();
307 m_prev_frames_sp.reset();
308}
309
310void Thread::BroadcastSelectedFrameChange(StackID &new_frame_id) {
311 if (EventTypeHasListeners(eBroadcastBitSelectedFrameChanged))
312 BroadcastEvent(eBroadcastBitSelectedFrameChanged,
313 new ThreadEventData(this->shared_from_this(), new_frame_id));
314}
315
316lldb::StackFrameSP Thread::GetSelectedFrame() {
317 StackFrameListSP stack_frame_list_sp(GetStackFrameList());
318 StackFrameSP frame_sp = stack_frame_list_sp->GetFrameAtIndex(
319 stack_frame_list_sp->GetSelectedFrameIndex());
320 FunctionOptimizationWarning(frame_sp.get());
321 return frame_sp;
322}
323
324uint32_t Thread::SetSelectedFrame(lldb_private::StackFrame *frame,
325 bool broadcast) {
326 uint32_t ret_value = GetStackFrameList()->SetSelectedFrame(frame);
327 if (broadcast)
328 BroadcastSelectedFrameChange(frame->GetStackID());
329 FunctionOptimizationWarning(frame);
330 return ret_value;
331}
332
333bool Thread::SetSelectedFrameByIndex(uint32_t frame_idx, bool broadcast) {
334 StackFrameSP frame_sp(GetStackFrameList()->GetFrameAtIndex(frame_idx));
335 if (frame_sp) {
336 GetStackFrameList()->SetSelectedFrame(frame_sp.get());
337 if (broadcast)
338 BroadcastSelectedFrameChange(frame_sp->GetStackID());
339 FunctionOptimizationWarning(frame_sp.get());
340 return true;
341 } else
342 return false;
343}
344
345bool Thread::SetSelectedFrameByIndexNoisily(uint32_t frame_idx,
346 Stream &output_stream) {
347 const bool broadcast = true;
348 bool success = SetSelectedFrameByIndex(frame_idx, broadcast);
349 if (success) {
350 StackFrameSP frame_sp = GetSelectedFrame();
351 if (frame_sp) {
352 bool already_shown = false;
353 SymbolContext frame_sc(
354 frame_sp->GetSymbolContext(eSymbolContextLineEntry));
355 if (GetProcess()->GetTarget().GetDebugger().GetUseExternalEditor() &&
356 frame_sc.line_entry.file && frame_sc.line_entry.line != 0) {
357 already_shown = Host::OpenFileInExternalEditor(
358 frame_sc.line_entry.file, frame_sc.line_entry.line);
359 }
360
361 bool show_frame_info = true;
362 bool show_source = !already_shown;
363 FunctionOptimizationWarning(frame_sp.get());
364 return frame_sp->GetStatus(output_stream, show_frame_info, show_source);
365 }
366 return false;
367 } else
368 return false;
369}
370
371void Thread::FunctionOptimizationWarning(StackFrame *frame) {
372 if (frame && frame->HasDebugInformation() &&
373 GetProcess()->GetWarningsOptimization()) {
374 SymbolContext sc =
375 frame->GetSymbolContext(eSymbolContextFunction | eSymbolContextModule);
376 GetProcess()->PrintWarningOptimization(sc);
377 }
378}
379
380lldb::StopInfoSP Thread::GetStopInfo() {
381 if (m_destroy_called)
382 return m_stop_info_sp;
383
384 ThreadPlanSP completed_plan_sp(GetCompletedPlan());
385 ProcessSP process_sp(GetProcess());
386 const uint32_t stop_id = process_sp ? process_sp->GetStopID() : UINT32_MAX(4294967295U);
387
388 // Here we select the stop info according to priorirty:
389 // - m_stop_info_sp (if not trace) - preset value
390 // - completed plan stop info - new value with plan from completed plan stack
391 // - m_stop_info_sp (trace stop reason is OK now)
392 // - ask GetPrivateStopInfo to set stop info
393
394 bool have_valid_stop_info = m_stop_info_sp &&
395 m_stop_info_sp ->IsValid() &&
396 m_stop_info_stop_id == stop_id;
397 bool have_valid_completed_plan = completed_plan_sp && completed_plan_sp->PlanSucceeded();
398 bool plan_overrides_trace =
399 have_valid_stop_info && have_valid_completed_plan
400 && (m_stop_info_sp->GetStopReason() == eStopReasonTrace);
401
402 if (have_valid_stop_info && !plan_overrides_trace) {
403 return m_stop_info_sp;
404 } else if (have_valid_completed_plan) {
405 return StopInfo::CreateStopReasonWithPlan(
406 completed_plan_sp, GetReturnValueObject(), GetExpressionVariable());
407 } else {
408 GetPrivateStopInfo();
409 return m_stop_info_sp;
410 }
411}
412
413lldb::StopInfoSP Thread::GetPrivateStopInfo() {
414 if (m_destroy_called)
415 return m_stop_info_sp;
416
417 ProcessSP process_sp(GetProcess());
418 if (process_sp) {
419 const uint32_t process_stop_id = process_sp->GetStopID();
420 if (m_stop_info_stop_id != process_stop_id) {
421 if (m_stop_info_sp) {
422 if (m_stop_info_sp->IsValid() || IsStillAtLastBreakpointHit() ||
423 GetCurrentPlan()->IsVirtualStep())
424 SetStopInfo(m_stop_info_sp);
425 else
426 m_stop_info_sp.reset();
427 }
428
429 if (!m_stop_info_sp) {
430 if (!CalculateStopInfo())
431 SetStopInfo(StopInfoSP());
432 }
433 }
434
435 // The stop info can be manually set by calling Thread::SetStopInfo()
436 // prior to this function ever getting called, so we can't rely on
437 // "m_stop_info_stop_id != process_stop_id" as the condition for
438 // the if statement below, we must also check the stop info to see
439 // if we need to override it. See the header documentation in
440 // Process::GetStopInfoOverrideCallback() for more information on
441 // the stop info override callback.
442 if (m_stop_info_override_stop_id != process_stop_id) {
443 m_stop_info_override_stop_id = process_stop_id;
444 if (m_stop_info_sp) {
445 if (Architecture *arch =
446 process_sp->GetTarget().GetArchitecturePlugin())
447 arch->OverrideStopInfo(*this);
448 }
449 }
450 }
451 return m_stop_info_sp;
452}
453
454lldb::StopReason Thread::GetStopReason() {
455 lldb::StopInfoSP stop_info_sp(GetStopInfo());
456 if (stop_info_sp)
457 return stop_info_sp->GetStopReason();
458 return eStopReasonNone;
459}
460
461bool Thread::StopInfoIsUpToDate() const {
462 ProcessSP process_sp(GetProcess());
463 if (process_sp)
464 return m_stop_info_stop_id == process_sp->GetStopID();
465 else
466 return true; // Process is no longer around so stop info is always up to
467 // date...
468}
469
470void Thread::ResetStopInfo() {
471 if (m_stop_info_sp) {
472 m_stop_info_sp.reset();
473 }
474}
475
476void Thread::SetStopInfo(const lldb::StopInfoSP &stop_info_sp) {
477 m_stop_info_sp = stop_info_sp;
478 if (m_stop_info_sp) {
479 m_stop_info_sp->MakeStopInfoValid();
480 // If we are overriding the ShouldReportStop, do that here:
481 if (m_override_should_notify != eLazyBoolCalculate)
482 m_stop_info_sp->OverrideShouldNotify(m_override_should_notify ==
483 eLazyBoolYes);
484 }
485
486 ProcessSP process_sp(GetProcess());
487 if (process_sp)
488 m_stop_info_stop_id = process_sp->GetStopID();
489 else
490 m_stop_info_stop_id = UINT32_MAX(4294967295U);
491 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_THREAD(1u << 2)));
492 if (log)
493 log->Printf("%p: tid = 0x%" PRIx64"l" "x" ": stop info = %s (stop_id = %u)",
494 static_cast<void *>(this), GetID(),
495 stop_info_sp ? stop_info_sp->GetDescription() : "<NULL>",
496 m_stop_info_stop_id);
497}
498
499void Thread::SetShouldReportStop(Vote vote) {
500 if (vote == eVoteNoOpinion)
501 return;
502 else {
503 m_override_should_notify = (vote == eVoteYes ? eLazyBoolYes : eLazyBoolNo);
504 if (m_stop_info_sp)
505 m_stop_info_sp->OverrideShouldNotify(m_override_should_notify ==
506 eLazyBoolYes);
507 }
508}
509
510void Thread::SetStopInfoToNothing() {
511 // Note, we can't just NULL out the private reason, or the native thread
512 // implementation will try to
513 // go calculate it again. For now, just set it to a Unix Signal with an
514 // invalid signal number.
515 SetStopInfo(
516 StopInfo::CreateStopReasonWithSignal(*this, LLDB_INVALID_SIGNAL_NUMBER(2147483647)));
517}
518
519bool Thread::ThreadStoppedForAReason(void) {
520 return (bool)GetPrivateStopInfo();
521}
522
523bool Thread::CheckpointThreadState(ThreadStateCheckpoint &saved_state) {
524 saved_state.register_backup_sp.reset();
525 lldb::StackFrameSP frame_sp(GetStackFrameAtIndex(0));
526 if (frame_sp) {
527 lldb::RegisterCheckpointSP reg_checkpoint_sp(
528 new RegisterCheckpoint(RegisterCheckpoint::Reason::eExpression));
529 if (reg_checkpoint_sp) {
530 lldb::RegisterContextSP reg_ctx_sp(frame_sp->GetRegisterContext());
531 if (reg_ctx_sp && reg_ctx_sp->ReadAllRegisterValues(*reg_checkpoint_sp))
532 saved_state.register_backup_sp = reg_checkpoint_sp;
533 }
534 }
535 if (!saved_state.register_backup_sp)
536 return false;
537
538 saved_state.stop_info_sp = GetStopInfo();
539 ProcessSP process_sp(GetProcess());
540 if (process_sp)
541 saved_state.orig_stop_id = process_sp->GetStopID();
542 saved_state.current_inlined_depth = GetCurrentInlinedDepth();
543 saved_state.m_completed_plan_stack = m_completed_plan_stack;
544
545 return true;
546}
547
548bool Thread::RestoreRegisterStateFromCheckpoint(
549 ThreadStateCheckpoint &saved_state) {
550 if (saved_state.register_backup_sp) {
551 lldb::StackFrameSP frame_sp(GetStackFrameAtIndex(0));
552 if (frame_sp) {
553 lldb::RegisterContextSP reg_ctx_sp(frame_sp->GetRegisterContext());
554 if (reg_ctx_sp) {
555 bool ret =
556 reg_ctx_sp->WriteAllRegisterValues(*saved_state.register_backup_sp);
557
558 // Clear out all stack frames as our world just changed.
559 ClearStackFrames();
560 reg_ctx_sp->InvalidateIfNeeded(true);
561 if (m_unwinder_ap.get())
562 m_unwinder_ap->Clear();
563 return ret;
564 }
565 }
566 }
567 return false;
568}
569
570bool Thread::RestoreThreadStateFromCheckpoint(
571 ThreadStateCheckpoint &saved_state) {
572 if (saved_state.stop_info_sp)
573 saved_state.stop_info_sp->MakeStopInfoValid();
574 SetStopInfo(saved_state.stop_info_sp);
575 GetStackFrameList()->SetCurrentInlinedDepth(
576 saved_state.current_inlined_depth);
577 m_completed_plan_stack = saved_state.m_completed_plan_stack;
578 return true;
579}
580
581StateType Thread::GetState() const {
582 // If any other threads access this we will need a mutex for it
583 std::lock_guard<std::recursive_mutex> guard(m_state_mutex);
584 return m_state;
585}
586
587void Thread::SetState(StateType state) {
588 std::lock_guard<std::recursive_mutex> guard(m_state_mutex);
589 m_state = state;
590}
591
592void Thread::WillStop() {
593 ThreadPlan *current_plan = GetCurrentPlan();
594
595 // FIXME: I may decide to disallow threads with no plans. In which
596 // case this should go to an assert.
597
598 if (!current_plan)
599 return;
600
601 current_plan->WillStop();
602}
603
604void Thread::SetupForResume() {
605 if (GetResumeState() != eStateSuspended) {
606 // If we're at a breakpoint push the step-over breakpoint plan. Do this
607 // before
608 // telling the current plan it will resume, since we might change what the
609 // current
610 // plan is.
611
612 lldb::RegisterContextSP reg_ctx_sp(GetRegisterContext());
613 if (reg_ctx_sp) {
614 const addr_t thread_pc = reg_ctx_sp->GetPC();
615 BreakpointSiteSP bp_site_sp =
616 GetProcess()->GetBreakpointSiteList().FindByAddress(thread_pc);
617 if (bp_site_sp) {
618 // Note, don't assume there's a ThreadPlanStepOverBreakpoint, the target
619 // may not require anything
620 // special to step over a breakpoint.
621
622 ThreadPlan *cur_plan = GetCurrentPlan();
623
624 bool push_step_over_bp_plan = false;
625 if (cur_plan->GetKind() == ThreadPlan::eKindStepOverBreakpoint) {
626 ThreadPlanStepOverBreakpoint *bp_plan =
627 (ThreadPlanStepOverBreakpoint *)cur_plan;
628 if (bp_plan->GetBreakpointLoadAddress() != thread_pc)
629 push_step_over_bp_plan = true;
630 } else
631 push_step_over_bp_plan = true;
632
633 if (push_step_over_bp_plan) {
634 ThreadPlanSP step_bp_plan_sp(new ThreadPlanStepOverBreakpoint(*this));
635 if (step_bp_plan_sp) {
636 step_bp_plan_sp->SetPrivate(true);
637
638 if (GetCurrentPlan()->RunState() != eStateStepping) {
639 ThreadPlanStepOverBreakpoint *step_bp_plan =
640 static_cast<ThreadPlanStepOverBreakpoint *>(
641 step_bp_plan_sp.get());
642 step_bp_plan->SetAutoContinue(true);
643 }
644 QueueThreadPlan(step_bp_plan_sp, false);
645 }
646 }
647 }
648 }
649 }
650}
651
652bool Thread::ShouldResume(StateType resume_state) {
653 // At this point clear the completed plan stack.
654 m_completed_plan_stack.clear();
655 m_discarded_plan_stack.clear();
656 m_override_should_notify = eLazyBoolCalculate;
657
658 StateType prev_resume_state = GetTemporaryResumeState();
659
660 SetTemporaryResumeState(resume_state);
661
662 lldb::ThreadSP backing_thread_sp(GetBackingThread());
663 if (backing_thread_sp)
664 backing_thread_sp->SetTemporaryResumeState(resume_state);
665
666 // Make sure m_stop_info_sp is valid. Don't do this for threads we suspended
667 // in the previous run.
668 if (prev_resume_state != eStateSuspended)
669 GetPrivateStopInfo();
670
671 // This is a little dubious, but we are trying to limit how often we actually
672 // fetch stop info from
673 // the target, 'cause that slows down single stepping. So assume that if we
674 // got to the point where
675 // we're about to resume, and we haven't yet had to fetch the stop reason,
676 // then it doesn't need to know
677 // about the fact that we are resuming...
678 const uint32_t process_stop_id = GetProcess()->GetStopID();
679 if (m_stop_info_stop_id == process_stop_id &&
680 (m_stop_info_sp && m_stop_info_sp->IsValid())) {
681 StopInfo *stop_info = GetPrivateStopInfo().get();
682 if (stop_info)
683 stop_info->WillResume(resume_state);
684 }
685
686 // Tell all the plans that we are about to resume in case they need to clear
687 // any state.
688 // We distinguish between the plan on the top of the stack and the lower
689 // plans in case a plan needs to do any special business before it runs.
690
691 bool need_to_resume = false;
692 ThreadPlan *plan_ptr = GetCurrentPlan();
693 if (plan_ptr) {
694 need_to_resume = plan_ptr->WillResume(resume_state, true);
695
696 while ((plan_ptr = GetPreviousPlan(plan_ptr)) != nullptr) {
697 plan_ptr->WillResume(resume_state, false);
698 }
699
700 // If the WillResume for the plan says we are faking a resume, then it will
701 // have set an appropriate stop info.
702 // In that case, don't reset it here.
703
704 if (need_to_resume && resume_state != eStateSuspended) {
705 m_stop_info_sp.reset();
706 }
707 }
708
709 if (need_to_resume) {
710 ClearStackFrames();
711 // Let Thread subclasses do any special work they need to prior to resuming
712 WillResume(resume_state);
713 }
714
715 return need_to_resume;
716}
717
718void Thread::DidResume() { SetResumeSignal(LLDB_INVALID_SIGNAL_NUMBER(2147483647)); }
719
720void Thread::DidStop() { SetState(eStateStopped); }
721
722bool Thread::ShouldStop(Event *event_ptr) {
723 ThreadPlan *current_plan = GetCurrentPlan();
724
725 bool should_stop = true;
726
727 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP(1u << 7)));
728
729 if (GetResumeState() == eStateSuspended) {
730 if (log)
731 log->Printf("Thread::%s for tid = 0x%4.4" PRIx64"l" "x" " 0x%4.4" PRIx64"l" "x"
732 ", should_stop = 0 (ignore since thread was suspended)",
733 __FUNCTION__, GetID(), GetProtocolID());
734 return false;
735 }
736
737 if (GetTemporaryResumeState() == eStateSuspended) {
738 if (log)
739 log->Printf("Thread::%s for tid = 0x%4.4" PRIx64"l" "x" " 0x%4.4" PRIx64"l" "x"
740 ", should_stop = 0 (ignore since thread was suspended)",
741 __FUNCTION__, GetID(), GetProtocolID());
742 return false;
743 }
744
745 // Based on the current thread plan and process stop info, check if this
746 // thread caused the process to stop. NOTE: this must take place before
747 // the plan is moved from the current plan stack to the completed plan
748 // stack.
749 if (!ThreadStoppedForAReason()) {
750 if (log)
751 log->Printf("Thread::%s for tid = 0x%4.4" PRIx64"l" "x" " 0x%4.4" PRIx64"l" "x"
752 ", pc = 0x%16.16" PRIx64"l" "x"
753 ", should_stop = 0 (ignore since no stop reason)",
754 __FUNCTION__, GetID(), GetProtocolID(),
755 GetRegisterContext() ? GetRegisterContext()->GetPC()
756 : LLDB_INVALID_ADDRESS(18446744073709551615UL));
757 return false;
758 }
759
760 if (log) {
761 log->Printf("Thread::%s(%p) for tid = 0x%4.4" PRIx64"l" "x" " 0x%4.4" PRIx64"l" "x"
762 ", pc = 0x%16.16" PRIx64"l" "x",
763 __FUNCTION__, static_cast<void *>(this), GetID(),
764 GetProtocolID(),
765 GetRegisterContext() ? GetRegisterContext()->GetPC()
766 : LLDB_INVALID_ADDRESS(18446744073709551615UL));
767 log->Printf("^^^^^^^^ Thread::ShouldStop Begin ^^^^^^^^");
768 StreamString s;
769 s.IndentMore();
770 DumpThreadPlans(&s);
771 log->Printf("Plan stack initial state:\n%s", s.GetData());
772 }
773
774 // The top most plan always gets to do the trace log...
775 current_plan->DoTraceLog();
776
777 // First query the stop info's ShouldStopSynchronous. This handles
778 // "synchronous" stop reasons, for example the breakpoint
779 // command on internal breakpoints. If a synchronous stop reason says we
780 // should not stop, then we don't have to
781 // do any more work on this stop.
782 StopInfoSP private_stop_info(GetPrivateStopInfo());
783 if (private_stop_info &&
784 !private_stop_info->ShouldStopSynchronous(event_ptr)) {
785 if (log)
786 log->Printf("StopInfo::ShouldStop async callback says we should not "
787 "stop, returning ShouldStop of false.");
788 return false;
789 }
790
791 // If we've already been restarted, don't query the plans since the state they
792 // would examine is not current.
793 if (Process::ProcessEventData::GetRestartedFromEvent(event_ptr))
794 return false;
795
796 // Before the plans see the state of the world, calculate the current inlined
797 // depth.
798 GetStackFrameList()->CalculateCurrentInlinedDepth();
799
800 // If the base plan doesn't understand why we stopped, then we have to find a
801 // plan that does.
802 // If that plan is still working, then we don't need to do any more work. If
803 // the plan that explains
804 // the stop is done, then we should pop all the plans below it, and pop it,
805 // and then let the plans above it decide
806 // whether they still need to do more work.
807
808 bool done_processing_current_plan = false;
809
810 if (!current_plan->PlanExplainsStop(event_ptr)) {
811 if (current_plan->TracerExplainsStop()) {
812 done_processing_current_plan = true;
813 should_stop = false;
814 } else {
815 // If the current plan doesn't explain the stop, then find one that
816 // does and let it handle the situation.
817 ThreadPlan *plan_ptr = current_plan;
818 while ((plan_ptr = GetPreviousPlan(plan_ptr)) != nullptr) {
819 if (plan_ptr->PlanExplainsStop(event_ptr)) {
820 should_stop = plan_ptr->ShouldStop(event_ptr);
821
822 // plan_ptr explains the stop, next check whether plan_ptr is done, if
823 // so, then we should take it
824 // and all the plans below it off the stack.
825
826 if (plan_ptr->MischiefManaged()) {
827 // We're going to pop the plans up to and including the plan that
828 // explains the stop.
829 ThreadPlan *prev_plan_ptr = GetPreviousPlan(plan_ptr);
830
831 do {
832 if (should_stop)
833 current_plan->WillStop();
834 PopPlan();
835 } while ((current_plan = GetCurrentPlan()) != prev_plan_ptr);
836 // Now, if the responsible plan was not "Okay to discard" then we're
837 // done,
838 // otherwise we forward this to the next plan in the stack below.
839 done_processing_current_plan =
840 (plan_ptr->IsMasterPlan() && !plan_ptr->OkayToDiscard());
841 } else
842 done_processing_current_plan = true;
843
844 break;
845 }
846 }
847 }
848 }
849
850 if (!done_processing_current_plan) {
851 bool over_ride_stop = current_plan->ShouldAutoContinue(event_ptr);
852
853 if (log)
854 log->Printf("Plan %s explains stop, auto-continue %i.",
855 current_plan->GetName(), over_ride_stop);
856
857 // We're starting from the base plan, so just let it decide;
858 if (PlanIsBasePlan(current_plan)) {
859 should_stop = current_plan->ShouldStop(event_ptr);
860 if (log)
861 log->Printf("Base plan says should stop: %i.", should_stop);
862 } else {
863 // Otherwise, don't let the base plan override what the other plans say to
864 // do, since
865 // presumably if there were other plans they would know what to do...
866 while (1) {
867 if (PlanIsBasePlan(current_plan))
868 break;
869
870 should_stop = current_plan->ShouldStop(event_ptr);
871 if (log)
872 log->Printf("Plan %s should stop: %d.", current_plan->GetName(),
873 should_stop);
874 if (current_plan->MischiefManaged()) {
875 if (should_stop)
876 current_plan->WillStop();
877
878 // If a Master Plan wants to stop, and wants to stick on the stack, we
879 // let it.
880 // Otherwise, see if the plan's parent wants to stop.
881
882 if (should_stop && current_plan->IsMasterPlan() &&
883 !current_plan->OkayToDiscard()) {
884 PopPlan();
885 break;
886 } else {
887 PopPlan();
888
889 current_plan = GetCurrentPlan();
890 if (current_plan == nullptr) {
891 break;
892 }
893 }
894 } else {
895 break;
896 }
897 }
898 }
899
900 if (over_ride_stop)
901 should_stop = false;
902 }
903
904 // One other potential problem is that we set up a master plan, then stop in
905 // before it is complete - for instance
906 // by hitting a breakpoint during a step-over - then do some step/finish/etc
907 // operations that wind up
908 // past the end point condition of the initial plan. We don't want to strand
909 // the original plan on the stack,
910 // This code clears stale plans off the stack.
911
912 if (should_stop) {
913 ThreadPlan *plan_ptr = GetCurrentPlan();
914
915 // Discard the stale plans and all plans below them in the stack,
916 // plus move the completed plans to the completed plan stack
917 while (!PlanIsBasePlan(plan_ptr)) {
918 bool stale = plan_ptr->IsPlanStale();
919 ThreadPlan *examined_plan = plan_ptr;
920 plan_ptr = GetPreviousPlan(examined_plan);
921
922 if (stale) {
923 if (log)
924 log->Printf(
925 "Plan %s being discarded in cleanup, it says it is already done.",
926 examined_plan->GetName());
927 while (GetCurrentPlan() != examined_plan) {
928 DiscardPlan();
929 }
930 if (examined_plan->IsPlanComplete()) {
931 // plan is complete but does not explain the stop (example: step to a line
932 // with breakpoint), let us move the plan to completed_plan_stack anyway
933 PopPlan();
934 } else
935 DiscardPlan();
936 }
937 }
938 }
939
940 if (log) {
941 StreamString s;
942 s.IndentMore();
943 DumpThreadPlans(&s);
944 log->Printf("Plan stack final state:\n%s", s.GetData());
945 log->Printf("vvvvvvvv Thread::ShouldStop End (returning %i) vvvvvvvv",
946 should_stop);
947 }
948 return should_stop;
949}
950
951Vote Thread::ShouldReportStop(Event *event_ptr) {
952 StateType thread_state = GetResumeState();
953 StateType temp_thread_state = GetTemporaryResumeState();
954
955 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP(1u << 7)));
956
957 if (thread_state == eStateSuspended || thread_state == eStateInvalid) {
958 if (log)
959 log->Printf("Thread::ShouldReportStop() tid = 0x%4.4" PRIx64"l" "x"
960 ": returning vote %i (state was suspended or invalid)",
961 GetID(), eVoteNoOpinion);
962 return eVoteNoOpinion;
963 }
964
965 if (temp_thread_state == eStateSuspended ||
966 temp_thread_state == eStateInvalid) {
967 if (log)
968 log->Printf(
969 "Thread::ShouldReportStop() tid = 0x%4.4" PRIx64"l" "x"
970 ": returning vote %i (temporary state was suspended or invalid)",
971 GetID(), eVoteNoOpinion);
972 return eVoteNoOpinion;
973 }
974
975 if (!ThreadStoppedForAReason()) {
976 if (log)
977 log->Printf("Thread::ShouldReportStop() tid = 0x%4.4" PRIx64"l" "x"
978 ": returning vote %i (thread didn't stop for a reason.)",
979 GetID(), eVoteNoOpinion);
980 return eVoteNoOpinion;
981 }
982
983 if (m_completed_plan_stack.size() > 0) {
984 // Don't use GetCompletedPlan here, since that suppresses private plans.
985 if (log)
986 log->Printf("Thread::ShouldReportStop() tid = 0x%4.4" PRIx64"l" "x"
987 ": returning vote for complete stack's back plan",
988 GetID());
989 return m_completed_plan_stack.back()->ShouldReportStop(event_ptr);
990 } else {
991 Vote thread_vote = eVoteNoOpinion;
992 ThreadPlan *plan_ptr = GetCurrentPlan();
993 while (1) {
994 if (plan_ptr->PlanExplainsStop(event_ptr)) {
995 thread_vote = plan_ptr->ShouldReportStop(event_ptr);
996 break;
997 }
998 if (PlanIsBasePlan(plan_ptr))
999 break;
1000 else
1001 plan_ptr = GetPreviousPlan(plan_ptr);
1002 }
1003 if (log)
1004 log->Printf("Thread::ShouldReportStop() tid = 0x%4.4" PRIx64"l" "x"
1005 ": returning vote %i for current plan",
1006 GetID(), thread_vote);
1007
1008 return thread_vote;
1009 }
1010}
1011
1012Vote Thread::ShouldReportRun(Event *event_ptr) {
1013 StateType thread_state = GetResumeState();
1014
1015 if (thread_state == eStateSuspended || thread_state == eStateInvalid) {
1016 return eVoteNoOpinion;
1017 }
1018
1019 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP(1u << 7)));
1020 if (m_completed_plan_stack.size() > 0) {
1021 // Don't use GetCompletedPlan here, since that suppresses private plans.
1022 if (log)
1023 log->Printf("Current Plan for thread %d(%p) (0x%4.4" PRIx64"l" "x"
1024 ", %s): %s being asked whether we should report run.",
1025 GetIndexID(), static_cast<void *>(this), GetID(),
1026 StateAsCString(GetTemporaryResumeState()),
1027 m_completed_plan_stack.back()->GetName());
1028
1029 return m_completed_plan_stack.back()->ShouldReportRun(event_ptr);
1030 } else {
1031 if (log)
1032 log->Printf("Current Plan for thread %d(%p) (0x%4.4" PRIx64"l" "x"
1033 ", %s): %s being asked whether we should report run.",
1034 GetIndexID(), static_cast<void *>(this), GetID(),
1035 StateAsCString(GetTemporaryResumeState()),
1036 GetCurrentPlan()->GetName());
1037
1038 return GetCurrentPlan()->ShouldReportRun(event_ptr);
1039 }
1040}
1041
1042bool Thread::MatchesSpec(const ThreadSpec *spec) {
1043 return (spec == nullptr) ? true : spec->ThreadPassesBasicTests(*this);
1044}
1045
1046void Thread::PushPlan(ThreadPlanSP &thread_plan_sp) {
1047 if (thread_plan_sp) {
1048 // If the thread plan doesn't already have a tracer, give it its parent's
1049 // tracer:
1050 if (!thread_plan_sp->GetThreadPlanTracer())
1051 thread_plan_sp->SetThreadPlanTracer(
1052 m_plan_stack.back()->GetThreadPlanTracer());
1053 m_plan_stack.push_back(thread_plan_sp);
1054
1055 thread_plan_sp->DidPush();
1056
1057 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP(1u << 7)));
1058 if (log) {
1059 StreamString s;
1060 thread_plan_sp->GetDescription(&s, lldb::eDescriptionLevelFull);
1061 log->Printf("Thread::PushPlan(0x%p): \"%s\", tid = 0x%4.4" PRIx64"l" "x" ".",
1062 static_cast<void *>(this), s.GetData(),
1063 thread_plan_sp->GetThread().GetID());
1064 }
1065 }
1066}
1067
1068void Thread::PopPlan() {
1069 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP(1u << 7)));
1070
1071 if (m_plan_stack.size() <= 1)
1072 return;
1073 else {
1074 ThreadPlanSP &plan = m_plan_stack.back();
1075 if (log) {
1076 log->Printf("Popping plan: \"%s\", tid = 0x%4.4" PRIx64"l" "x" ".",
1077 plan->GetName(), plan->GetThread().GetID());
1078 }
1079 m_completed_plan_stack.push_back(plan);
1080 plan->WillPop();
1081 m_plan_stack.pop_back();
1082 }
1083}
1084
1085void Thread::DiscardPlan() {
1086 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP(1u << 7)));
1087 if (m_plan_stack.size() > 1) {
1088 ThreadPlanSP &plan = m_plan_stack.back();
1089 if (log)
1090 log->Printf("Discarding plan: \"%s\", tid = 0x%4.4" PRIx64"l" "x" ".",
1091 plan->GetName(), plan->GetThread().GetID());
1092
1093 m_discarded_plan_stack.push_back(plan);
1094 plan->WillPop();
1095 m_plan_stack.pop_back();
1096 }
1097}
1098
1099ThreadPlan *Thread::GetCurrentPlan() {
1100 // There will always be at least the base plan. If somebody is mucking with a
1101 // thread with an empty plan stack, we should assert right away.
1102 return m_plan_stack.empty() ? nullptr : m_plan_stack.back().get();
1103}
1104
1105ThreadPlanSP Thread::GetCompletedPlan() {
1106 ThreadPlanSP empty_plan_sp;
1107 if (!m_completed_plan_stack.empty()) {
1108 for (int i = m_completed_plan_stack.size() - 1; i >= 0; i--) {
1109 ThreadPlanSP completed_plan_sp;
1110 completed_plan_sp = m_completed_plan_stack[i];
1111 if (!completed_plan_sp->GetPrivate())
1112 return completed_plan_sp;
1113 }
1114 }
1115 return empty_plan_sp;
1116}
1117
1118ValueObjectSP Thread::GetReturnValueObject() {
1119 if (!m_completed_plan_stack.empty()) {
1120 for (int i = m_completed_plan_stack.size() - 1; i >= 0; i--) {
1121 ValueObjectSP return_valobj_sp;
1122 return_valobj_sp = m_completed_plan_stack[i]->GetReturnValueObject();
1123 if (return_valobj_sp)
1124 return return_valobj_sp;
1125 }
1126 }
1127 return ValueObjectSP();
1128}
1129
1130ExpressionVariableSP Thread::GetExpressionVariable() {
1131 if (!m_completed_plan_stack.empty()) {
1132 for (int i = m_completed_plan_stack.size() - 1; i >= 0; i--) {
1133 ExpressionVariableSP expression_variable_sp;
1134 expression_variable_sp =
1135 m_completed_plan_stack[i]->GetExpressionVariable();
1136 if (expression_variable_sp)
1137 return expression_variable_sp;
1138 }
1139 }
1140 return ExpressionVariableSP();
1141}
1142
1143bool Thread::IsThreadPlanDone(ThreadPlan *plan) {
1144 if (!m_completed_plan_stack.empty()) {
1145 for (int i = m_completed_plan_stack.size() - 1; i >= 0; i--) {
1146 if (m_completed_plan_stack[i].get() == plan)
1147 return true;
1148 }
1149 }
1150 return false;
1151}
1152
1153bool Thread::WasThreadPlanDiscarded(ThreadPlan *plan) {
1154 if (!m_discarded_plan_stack.empty()) {
1155 for (int i = m_discarded_plan_stack.size() - 1; i >= 0; i--) {
1156 if (m_discarded_plan_stack[i].get() == plan)
1157 return true;
1158 }
1159 }
1160 return false;
1161}
1162
1163bool Thread::CompletedPlanOverridesBreakpoint() {
1164 return (!m_completed_plan_stack.empty()) ;
1165}
1166
1167ThreadPlan *Thread::GetPreviousPlan(ThreadPlan *current_plan) {
1168 if (current_plan == nullptr)
1169 return nullptr;
1170
1171 int stack_size = m_completed_plan_stack.size();
1172 for (int i = stack_size - 1; i > 0; i--) {
1173 if (current_plan == m_completed_plan_stack[i].get())
1174 return m_completed_plan_stack[i - 1].get();
1175 }
1176
1177 if (stack_size > 0 && m_completed_plan_stack[0].get() == current_plan) {
1178 return GetCurrentPlan();
1179 }
1180
1181 stack_size = m_plan_stack.size();
1182 for (int i = stack_size - 1; i > 0; i--) {
1183 if (current_plan == m_plan_stack[i].get())
1184 return m_plan_stack[i - 1].get();
1185 }
1186 return nullptr;
1187}
1188
1189void Thread::QueueThreadPlan(ThreadPlanSP &thread_plan_sp,
1190 bool abort_other_plans) {
1191 if (abort_other_plans)
1192 DiscardThreadPlans(true);
1193
1194 PushPlan(thread_plan_sp);
1195}
1196
1197void Thread::EnableTracer(bool value, bool single_stepping) {
1198 int stack_size = m_plan_stack.size();
1199 for (int i = 0; i < stack_size; i++) {
1200 if (m_plan_stack[i]->GetThreadPlanTracer()) {
1201 m_plan_stack[i]->GetThreadPlanTracer()->EnableTracing(value);
1202 m_plan_stack[i]->GetThreadPlanTracer()->EnableSingleStep(single_stepping);
1203 }
1204 }
1205}
1206
1207void Thread::SetTracer(lldb::ThreadPlanTracerSP &tracer_sp) {
1208 int stack_size = m_plan_stack.size();
1209 for (int i = 0; i < stack_size; i++)
1
Assuming 'i' is < 'stack_size'
2
Loop condition is true. Entering loop body
25
Assuming 'i' is < 'stack_size'
26
Loop condition is true. Entering loop body
1210 m_plan_stack[i]->SetThreadPlanTracer(tracer_sp);
3
Calling 'ThreadPlan::SetThreadPlanTracer'
24
Returning; memory was released
27
Calling 'ThreadPlan::SetThreadPlanTracer'
1211}
1212
1213bool Thread::DiscardUserThreadPlansUpToIndex(uint32_t thread_index) {
1214 // Count the user thread plans from the back end to get the number of the one
1215 // we want
1216 // to discard:
1217
1218 uint32_t idx = 0;
1219 ThreadPlan *up_to_plan_ptr = nullptr;
1220
1221 for (ThreadPlanSP plan_sp : m_plan_stack) {
1222 if (plan_sp->GetPrivate())
1223 continue;
1224 if (idx == thread_index) {
1225 up_to_plan_ptr = plan_sp.get();
1226 break;
1227 } else
1228 idx++;
1229 }
1230
1231 if (up_to_plan_ptr == nullptr)
1232 return false;
1233
1234 DiscardThreadPlansUpToPlan(up_to_plan_ptr);
1235 return true;
1236}
1237
1238void Thread::DiscardThreadPlansUpToPlan(lldb::ThreadPlanSP &up_to_plan_sp) {
1239 DiscardThreadPlansUpToPlan(up_to_plan_sp.get());
1240}
1241
1242void Thread::DiscardThreadPlansUpToPlan(ThreadPlan *up_to_plan_ptr) {
1243 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP(1u << 7)));
1244 if (log)
1245 log->Printf("Discarding thread plans for thread tid = 0x%4.4" PRIx64"l" "x"
1246 ", up to %p",
1247 GetID(), static_cast<void *>(up_to_plan_ptr));
1248
1249 int stack_size = m_plan_stack.size();
1250
1251 // If the input plan is nullptr, discard all plans. Otherwise make sure this
1252 // plan is in the
1253 // stack, and if so discard up to and including it.
1254
1255 if (up_to_plan_ptr == nullptr) {
1256 for (int i = stack_size - 1; i > 0; i--)
1257 DiscardPlan();
1258 } else {
1259 bool found_it = false;
1260 for (int i = stack_size - 1; i > 0; i--) {
1261 if (m_plan_stack[i].get() == up_to_plan_ptr)
1262 found_it = true;
1263 }
1264 if (found_it) {
1265 bool last_one = false;
1266 for (int i = stack_size - 1; i > 0 && !last_one; i--) {
1267 if (GetCurrentPlan() == up_to_plan_ptr)
1268 last_one = true;
1269 DiscardPlan();
1270 }
1271 }
1272 }
1273}
1274
1275void Thread::DiscardThreadPlans(bool force) {
1276 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP(1u << 7)));
1277 if (log) {
1278 log->Printf("Discarding thread plans for thread (tid = 0x%4.4" PRIx64"l" "x"
1279 ", force %d)",
1280 GetID(), force);
1281 }
1282
1283 if (force) {
1284 int stack_size = m_plan_stack.size();
1285 for (int i = stack_size - 1; i > 0; i--) {
1286 DiscardPlan();
1287 }
1288 return;
1289 }
1290
1291 while (1) {
1292 int master_plan_idx;
1293 bool discard = true;
1294
1295 // Find the first master plan, see if it wants discarding, and if yes
1296 // discard up to it.
1297 for (master_plan_idx = m_plan_stack.size() - 1; master_plan_idx >= 0;
1298 master_plan_idx--) {
1299 if (m_plan_stack[master_plan_idx]->IsMasterPlan()) {
1300 discard = m_plan_stack[master_plan_idx]->OkayToDiscard();
1301 break;
1302 }
1303 }
1304
1305 if (discard) {
1306 // First pop all the dependent plans:
1307 for (int i = m_plan_stack.size() - 1; i > master_plan_idx; i--) {
1308 // FIXME: Do we need a finalize here, or is the rule that
1309 // "PrepareForStop"
1310 // for the plan leaves it in a state that it is safe to pop the plan
1311 // with no more notice?
1312 DiscardPlan();
1313 }
1314
1315 // Now discard the master plan itself.
1316 // The bottom-most plan never gets discarded. "OkayToDiscard" for it
1317 // means
1318 // discard it's dependent plans, but not it...
1319 if (master_plan_idx > 0) {
1320 DiscardPlan();
1321 }
1322 } else {
1323 // If the master plan doesn't want to get discarded, then we're done.
1324 break;
1325 }
1326 }
1327}
1328
1329bool Thread::PlanIsBasePlan(ThreadPlan *plan_ptr) {
1330 if (plan_ptr->IsBasePlan())
1331 return true;
1332 else if (m_plan_stack.size() == 0)
1333 return false;
1334 else
1335 return m_plan_stack[0].get() == plan_ptr;
1336}
1337
1338Status Thread::UnwindInnermostExpression() {
1339 Status error;
1340 int stack_size = m_plan_stack.size();
1341
1342 // If the input plan is nullptr, discard all plans. Otherwise make sure this
1343 // plan is in the
1344 // stack, and if so discard up to and including it.
1345
1346 for (int i = stack_size - 1; i > 0; i--) {
1347 if (m_plan_stack[i]->GetKind() == ThreadPlan::eKindCallFunction) {
1348 DiscardThreadPlansUpToPlan(m_plan_stack[i].get());
1349 return error;
1350 }
1351 }
1352 error.SetErrorString("No expressions currently active on this thread");
1353 return error;
1354}
1355
1356ThreadPlanSP Thread::QueueFundamentalPlan(bool abort_other_plans) {
1357 ThreadPlanSP thread_plan_sp(new ThreadPlanBase(*this));
1358 QueueThreadPlan(thread_plan_sp, abort_other_plans);
1359 return thread_plan_sp;
1360}
1361
1362ThreadPlanSP Thread::QueueThreadPlanForStepSingleInstruction(
1363 bool step_over, bool abort_other_plans, bool stop_other_threads) {
1364 ThreadPlanSP thread_plan_sp(new ThreadPlanStepInstruction(
1365 *this, step_over, stop_other_threads, eVoteNoOpinion, eVoteNoOpinion));
1366 QueueThreadPlan(thread_plan_sp, abort_other_plans);
1367 return thread_plan_sp;
1368}
1369
1370ThreadPlanSP Thread::QueueThreadPlanForStepOverRange(
1371 bool abort_other_plans, const AddressRange &range,
1372 const SymbolContext &addr_context, lldb::RunMode stop_other_threads,
1373 LazyBool step_out_avoids_code_withoug_debug_info) {
1374 ThreadPlanSP thread_plan_sp;
1375 thread_plan_sp.reset(new ThreadPlanStepOverRange(
1376 *this, range, addr_context, stop_other_threads,
1377 step_out_avoids_code_withoug_debug_info));
1378
1379 QueueThreadPlan(thread_plan_sp, abort_other_plans);
1380 return thread_plan_sp;
1381}
1382
1383// Call the QueueThreadPlanForStepOverRange method which takes an address range.
1384ThreadPlanSP Thread::QueueThreadPlanForStepOverRange(
1385 bool abort_other_plans, const LineEntry &line_entry,
1386 const SymbolContext &addr_context, lldb::RunMode stop_other_threads,
1387 LazyBool step_out_avoids_code_withoug_debug_info) {
1388 return QueueThreadPlanForStepOverRange(
1389 abort_other_plans, line_entry.GetSameLineContiguousAddressRange(),
1390 addr_context, stop_other_threads,
1391 step_out_avoids_code_withoug_debug_info);
1392}
1393
1394ThreadPlanSP Thread::QueueThreadPlanForStepInRange(
1395 bool abort_other_plans, const AddressRange &range,
1396 const SymbolContext &addr_context, const char *step_in_target,
1397 lldb::RunMode stop_other_threads,
1398 LazyBool step_in_avoids_code_without_debug_info,
1399 LazyBool step_out_avoids_code_without_debug_info) {
1400 ThreadPlanSP thread_plan_sp(
1401 new ThreadPlanStepInRange(*this, range, addr_context, stop_other_threads,
1402 step_in_avoids_code_without_debug_info,
1403 step_out_avoids_code_without_debug_info));
1404 ThreadPlanStepInRange *plan =
1405 static_cast<ThreadPlanStepInRange *>(thread_plan_sp.get());
1406
1407 if (step_in_target)
1408 plan->SetStepInTarget(step_in_target);
1409
1410 QueueThreadPlan(thread_plan_sp, abort_other_plans);
1411 return thread_plan_sp;
1412}
1413
1414// Call the QueueThreadPlanForStepInRange method which takes an address range.
1415ThreadPlanSP Thread::QueueThreadPlanForStepInRange(
1416 bool abort_other_plans, const LineEntry &line_entry,
1417 const SymbolContext &addr_context, const char *step_in_target,
1418 lldb::RunMode stop_other_threads,
1419 LazyBool step_in_avoids_code_without_debug_info,
1420 LazyBool step_out_avoids_code_without_debug_info) {
1421 return QueueThreadPlanForStepInRange(
1422 abort_other_plans, line_entry.GetSameLineContiguousAddressRange(),
1423 addr_context, step_in_target, stop_other_threads,
1424 step_in_avoids_code_without_debug_info,
1425 step_out_avoids_code_without_debug_info);
1426}
1427
1428ThreadPlanSP Thread::QueueThreadPlanForStepOut(
1429 bool abort_other_plans, SymbolContext *addr_context, bool first_insn,
1430 bool stop_other_threads, Vote stop_vote, Vote run_vote, uint32_t frame_idx,
1431 LazyBool step_out_avoids_code_without_debug_info) {
1432 ThreadPlanSP thread_plan_sp(new ThreadPlanStepOut(
1433 *this, addr_context, first_insn, stop_other_threads, stop_vote, run_vote,
1434 frame_idx, step_out_avoids_code_without_debug_info));
1435
1436 if (thread_plan_sp->ValidatePlan(nullptr)) {
1437 QueueThreadPlan(thread_plan_sp, abort_other_plans);
1438 return thread_plan_sp;
1439 } else {
1440 return ThreadPlanSP();
1441 }
1442}
1443
1444ThreadPlanSP Thread::QueueThreadPlanForStepOutNoShouldStop(
1445 bool abort_other_plans, SymbolContext *addr_context, bool first_insn,
1446 bool stop_other_threads, Vote stop_vote, Vote run_vote, uint32_t frame_idx,
1447 bool continue_to_next_branch) {
1448 const bool calculate_return_value =
1449 false; // No need to calculate the return value here.
1450 ThreadPlanSP thread_plan_sp(new ThreadPlanStepOut(
1451 *this, addr_context, first_insn, stop_other_threads, stop_vote, run_vote,
1452 frame_idx, eLazyBoolNo, continue_to_next_branch, calculate_return_value));
1453
1454 ThreadPlanStepOut *new_plan =
1455 static_cast<ThreadPlanStepOut *>(thread_plan_sp.get());
1456 new_plan->ClearShouldStopHereCallbacks();
1457
1458 if (thread_plan_sp->ValidatePlan(nullptr)) {
1459 QueueThreadPlan(thread_plan_sp, abort_other_plans);
1460 return thread_plan_sp;
1461 } else {
1462 return ThreadPlanSP();
1463 }
1464}
1465
1466ThreadPlanSP Thread::QueueThreadPlanForStepThrough(StackID &return_stack_id,
1467 bool abort_other_plans,
1468 bool stop_other_threads) {
1469 ThreadPlanSP thread_plan_sp(
1470 new ThreadPlanStepThrough(*this, return_stack_id, stop_other_threads));
1471 if (!thread_plan_sp || !thread_plan_sp->ValidatePlan(nullptr))
1472 return ThreadPlanSP();
1473
1474 QueueThreadPlan(thread_plan_sp, abort_other_plans);
1475 return thread_plan_sp;
1476}
1477
1478ThreadPlanSP Thread::QueueThreadPlanForRunToAddress(bool abort_other_plans,
1479 Address &target_addr,
1480 bool stop_other_threads) {
1481 ThreadPlanSP thread_plan_sp(
1482 new ThreadPlanRunToAddress(*this, target_addr, stop_other_threads));
1483 QueueThreadPlan(thread_plan_sp, abort_other_plans);
1484 return thread_plan_sp;
1485}
1486
1487ThreadPlanSP Thread::QueueThreadPlanForStepUntil(bool abort_other_plans,
1488 lldb::addr_t *address_list,
1489 size_t num_addresses,
1490 bool stop_other_threads,
1491 uint32_t frame_idx) {
1492 ThreadPlanSP thread_plan_sp(new ThreadPlanStepUntil(
1493 *this, address_list, num_addresses, stop_other_threads, frame_idx));
1494 QueueThreadPlan(thread_plan_sp, abort_other_plans);
1495 return thread_plan_sp;
1496}
1497
1498lldb::ThreadPlanSP Thread::QueueThreadPlanForStepScripted(
1499 bool abort_other_plans, const char *class_name, bool stop_other_threads) {
1500 ThreadPlanSP thread_plan_sp(new ThreadPlanPython(*this, class_name));
1501 QueueThreadPlan(thread_plan_sp, abort_other_plans);
1502 // This seems a little funny, but I don't want to have to split up the
1503 // constructor and the
1504 // DidPush in the scripted plan, that seems annoying.
1505 // That means the constructor has to be in DidPush.
1506 // So I have to validate the plan AFTER pushing it, and then take it off
1507 // again...
1508 if (!thread_plan_sp->ValidatePlan(nullptr)) {
1509 DiscardThreadPlansUpToPlan(thread_plan_sp);
1510 return ThreadPlanSP();
1511 } else
1512 return thread_plan_sp;
1513}
1514
1515uint32_t Thread::GetIndexID() const { return m_index_id; }
1516
1517static void PrintPlanElement(Stream *s, const ThreadPlanSP &plan,
1518 lldb::DescriptionLevel desc_level,
1519 int32_t elem_idx) {
1520 s->IndentMore();
1521 s->Indent();
1522 s->Printf("Element %d: ", elem_idx);
1523 plan->GetDescription(s, desc_level);
1524 s->EOL();
1525 s->IndentLess();
1526}
1527
1528static void PrintPlanStack(Stream *s,
1529 const std::vector<lldb::ThreadPlanSP> &plan_stack,
1530 lldb::DescriptionLevel desc_level,
1531 bool include_internal) {
1532 int32_t print_idx = 0;
1533 for (ThreadPlanSP plan_sp : plan_stack) {
1534 if (include_internal || !plan_sp->GetPrivate()) {
1535 PrintPlanElement(s, plan_sp, desc_level, print_idx++);
1536 }
1537 }
1538}
1539
1540void Thread::DumpThreadPlans(Stream *s, lldb::DescriptionLevel desc_level,
1541 bool include_internal,
1542 bool ignore_boring_threads) const {
1543 uint32_t stack_size;
1544
1545 if (ignore_boring_threads) {
1546 uint32_t stack_size = m_plan_stack.size();
1547 uint32_t completed_stack_size = m_completed_plan_stack.size();
1548 uint32_t discarded_stack_size = m_discarded_plan_stack.size();
1549 if (stack_size == 1 && completed_stack_size == 0 &&
1550 discarded_stack_size == 0) {
1551 s->Printf("thread #%u: tid = 0x%4.4" PRIx64"l" "x" "\n", GetIndexID(), GetID());
1552 s->IndentMore();
1553 s->Indent();
1554 s->Printf("No active thread plans\n");
1555 s->IndentLess();
1556 return;
1557 }
1558 }
1559
1560 s->Indent();
1561 s->Printf("thread #%u: tid = 0x%4.4" PRIx64"l" "x" ":\n", GetIndexID(), GetID());
1562 s->IndentMore();
1563 s->Indent();
1564 s->Printf("Active plan stack:\n");
1565 PrintPlanStack(s, m_plan_stack, desc_level, include_internal);
1566
1567 stack_size = m_completed_plan_stack.size();
1568 if (stack_size > 0) {
1569 s->Indent();
1570 s->Printf("Completed Plan Stack:\n");
1571 PrintPlanStack(s, m_completed_plan_stack, desc_level, include_internal);
1572 }
1573
1574 stack_size = m_discarded_plan_stack.size();
1575 if (stack_size > 0) {
1576 s->Indent();
1577 s->Printf("Discarded Plan Stack:\n");
1578 PrintPlanStack(s, m_discarded_plan_stack, desc_level, include_internal);
1579 }
1580
1581 s->IndentLess();
1582}
1583
1584TargetSP Thread::CalculateTarget() {
1585 TargetSP target_sp;
1586 ProcessSP process_sp(GetProcess());
1587 if (process_sp)
1588 target_sp = process_sp->CalculateTarget();
1589 return target_sp;
1590}
1591
1592ProcessSP Thread::CalculateProcess() { return GetProcess(); }
1593
1594ThreadSP Thread::CalculateThread() { return shared_from_this(); }
1595
1596StackFrameSP Thread::CalculateStackFrame() { return StackFrameSP(); }
1597
1598void Thread::CalculateExecutionContext(ExecutionContext &exe_ctx) {
1599 exe_ctx.SetContext(shared_from_this());
1600}
1601
1602StackFrameListSP Thread::GetStackFrameList() {
1603 StackFrameListSP frame_list_sp;
1604 std::lock_guard<std::recursive_mutex> guard(m_frame_mutex);
1605 if (m_curr_frames_sp) {
1606 frame_list_sp = m_curr_frames_sp;
1607 } else {
1608 frame_list_sp.reset(new StackFrameList(*this, m_prev_frames_sp, true));
1609 m_curr_frames_sp = frame_list_sp;
1610 }
1611 return frame_list_sp;
1612}
1613
1614void Thread::ClearStackFrames() {
1615 std::lock_guard<std::recursive_mutex> guard(m_frame_mutex);
1616
1617 Unwind *unwinder = GetUnwinder();
1618 if (unwinder)
1619 unwinder->Clear();
1620
1621 // Only store away the old "reference" StackFrameList if we got all its
1622 // frames:
1623 // FIXME: At some point we can try to splice in the frames we have fetched
1624 // into
1625 // the new frame as we make it, but let's not try that now.
1626 if (m_curr_frames_sp && m_curr_frames_sp->GetAllFramesFetched())
1627 m_prev_frames_sp.swap(m_curr_frames_sp);
1628 m_curr_frames_sp.reset();
1629
1630 m_extended_info.reset();
1631 m_extended_info_fetched = false;
1632}
1633
1634lldb::StackFrameSP Thread::GetFrameWithConcreteFrameIndex(uint32_t unwind_idx) {
1635 return GetStackFrameList()->GetFrameWithConcreteFrameIndex(unwind_idx);
1636}
1637
1638Status Thread::ReturnFromFrameWithIndex(uint32_t frame_idx,
1639 lldb::ValueObjectSP return_value_sp,
1640 bool broadcast) {
1641 StackFrameSP frame_sp = GetStackFrameAtIndex(frame_idx);
1642 Status return_error;
1643
1644 if (!frame_sp) {
1645 return_error.SetErrorStringWithFormat(
1646 "Could not find frame with index %d in thread 0x%" PRIx64"l" "x" ".",
1647 frame_idx, GetID());
1648 }
1649
1650 return ReturnFromFrame(frame_sp, return_value_sp, broadcast);
1651}
1652
1653Status Thread::ReturnFromFrame(lldb::StackFrameSP frame_sp,
1654 lldb::ValueObjectSP return_value_sp,
1655 bool broadcast) {
1656 Status return_error;
1657
1658 if (!frame_sp) {
1659 return_error.SetErrorString("Can't return to a null frame.");
1660 return return_error;
1661 }
1662
1663 Thread *thread = frame_sp->GetThread().get();
1664 uint32_t older_frame_idx = frame_sp->GetFrameIndex() + 1;
1665 StackFrameSP older_frame_sp = thread->GetStackFrameAtIndex(older_frame_idx);
1666 if (!older_frame_sp) {
1667 return_error.SetErrorString("No older frame to return to.");
1668 return return_error;
1669 }
1670
1671 if (return_value_sp) {
1672 lldb::ABISP abi = thread->GetProcess()->GetABI();
1673 if (!abi) {
1674 return_error.SetErrorString("Could not find ABI to set return value.");
1675 return return_error;
1676 }
1677 SymbolContext sc = frame_sp->GetSymbolContext(eSymbolContextFunction);
1678
1679 // FIXME: ValueObject::Cast doesn't currently work correctly, at least not
1680 // for scalars.
1681 // Turn that back on when that works.
1682 if (/* DISABLES CODE */ (0) && sc.function != nullptr) {
1683 Type *function_type = sc.function->GetType();
1684 if (function_type) {
1685 CompilerType return_type =
1686 sc.function->GetCompilerType().GetFunctionReturnType();
1687 if (return_type) {
1688 StreamString s;
1689 return_type.DumpTypeDescription(&s);
1690 ValueObjectSP cast_value_sp = return_value_sp->Cast(return_type);
1691 if (cast_value_sp) {
1692 cast_value_sp->SetFormat(eFormatHex);
1693 return_value_sp = cast_value_sp;
1694 }
1695 }
1696 }
1697 }
1698
1699 return_error = abi->SetReturnValueObject(older_frame_sp, return_value_sp);
1700 if (!return_error.Success())
1701 return return_error;
1702 }
1703
1704 // Now write the return registers for the chosen frame:
1705 // Note, we can't use ReadAllRegisterValues->WriteAllRegisterValues, since the
1706 // read & write
1707 // cook their data
1708
1709 StackFrameSP youngest_frame_sp = thread->GetStackFrameAtIndex(0);
1710 if (youngest_frame_sp) {
1711 lldb::RegisterContextSP reg_ctx_sp(youngest_frame_sp->GetRegisterContext());
1712 if (reg_ctx_sp) {
1713 bool copy_success = reg_ctx_sp->CopyFromRegisterContext(
1714 older_frame_sp->GetRegisterContext());
1715 if (copy_success) {
1716 thread->DiscardThreadPlans(true);
1717 thread->ClearStackFrames();
1718 if (broadcast && EventTypeHasListeners(eBroadcastBitStackChanged))
1719 BroadcastEvent(eBroadcastBitStackChanged,
1720 new ThreadEventData(this->shared_from_this()));
1721 } else {
1722 return_error.SetErrorString("Could not reset register values.");
1723 }
1724 } else {
1725 return_error.SetErrorString("Frame has no register context.");
1726 }
1727 } else {
1728 return_error.SetErrorString("Returned past top frame.");
1729 }
1730 return return_error;
1731}
1732
1733static void DumpAddressList(Stream &s, const std::vector<Address> &list,
1734 ExecutionContextScope *exe_scope) {
1735 for (size_t n = 0; n < list.size(); n++) {
1736 s << "\t";
1737 list[n].Dump(&s, exe_scope, Address::DumpStyleResolvedDescription,
1738 Address::DumpStyleSectionNameOffset);
1739 s << "\n";
1740 }
1741}
1742
1743Status Thread::JumpToLine(const FileSpec &file, uint32_t line,
1744 bool can_leave_function, std::string *warnings) {
1745 ExecutionContext exe_ctx(GetStackFrameAtIndex(0));
1746 Target *target = exe_ctx.GetTargetPtr();
1747 TargetSP target_sp = exe_ctx.GetTargetSP();
1748 RegisterContext *reg_ctx = exe_ctx.GetRegisterContext();
1749 StackFrame *frame = exe_ctx.GetFramePtr();
1750 const SymbolContext &sc = frame->GetSymbolContext(eSymbolContextFunction);
1751
1752 // Find candidate locations.
1753 std::vector<Address> candidates, within_function, outside_function;
1754 target->GetImages().FindAddressesForLine(target_sp, file, line, sc.function,
1755 within_function, outside_function);
1756
1757 // If possible, we try and stay within the current function.
1758 // Within a function, we accept multiple locations (optimized code may do
1759 // this,
1760 // there's no solution here so we do the best we can).
1761 // However if we're trying to leave the function, we don't know how to pick
1762 // the
1763 // right location, so if there's more than one then we bail.
1764 if (!within_function.empty())
1765 candidates = within_function;
1766 else if (outside_function.size() == 1 && can_leave_function)
1767 candidates = outside_function;
1768
1769 // Check if we got anything.
1770 if (candidates.empty()) {
1771 if (outside_function.empty()) {
1772 return Status("Cannot locate an address for %s:%i.",
1773 file.GetFilename().AsCString(), line);
1774 } else if (outside_function.size() == 1) {
1775 return Status("%s:%i is outside the current function.",
1776 file.GetFilename().AsCString(), line);
1777 } else {
1778 StreamString sstr;
1779 DumpAddressList(sstr, outside_function, target);
1780 return Status("%s:%i has multiple candidate locations:\n%s",
1781 file.GetFilename().AsCString(), line, sstr.GetData());
1782 }
1783 }
1784
1785 // Accept the first location, warn about any others.
1786 Address dest = candidates[0];
1787 if (warnings && candidates.size() > 1) {
1788 StreamString sstr;
1789 sstr.Printf("%s:%i appears multiple times in this function, selecting the "
1790 "first location:\n",
1791 file.GetFilename().AsCString(), line);
1792 DumpAddressList(sstr, candidates, target);
1793 *warnings = sstr.GetString();
1794 }
1795
1796 if (!reg_ctx->SetPC(dest))
1797 return Status("Cannot change PC to target address.");
1798
1799 return Status();
1800}
1801
1802void Thread::DumpUsingSettingsFormat(Stream &strm, uint32_t frame_idx,
1803 bool stop_format) {
1804 ExecutionContext exe_ctx(shared_from_this());
1805 Process *process = exe_ctx.GetProcessPtr();
1806 if (process == nullptr)
1807 return;
1808
1809 StackFrameSP frame_sp;
1810 SymbolContext frame_sc;
1811 if (frame_idx != LLDB_INVALID_FRAME_ID(4294967295U)) {
1812 frame_sp = GetStackFrameAtIndex(frame_idx);
1813 if (frame_sp) {
1814 exe_ctx.SetFrameSP(frame_sp);
1815 frame_sc = frame_sp->GetSymbolContext(eSymbolContextEverything);
1816 }
1817 }
1818
1819 const FormatEntity::Entry *thread_format;
1820 if (stop_format)
1821 thread_format = exe_ctx.GetTargetRef().GetDebugger().GetThreadStopFormat();
1822 else
1823 thread_format = exe_ctx.GetTargetRef().GetDebugger().GetThreadFormat();
1824
1825 assert(thread_format)(static_cast <bool> (thread_format) ? void (0) : __assert_fail
("thread_format", "/build/llvm-toolchain-snapshot-7~svn326246/tools/lldb/source/Target/Thread.cpp"
, 1825, __extension__ __PRETTY_FUNCTION__))
;
1826
1827 FormatEntity::Format(*thread_format, strm, frame_sp ? &frame_sc : nullptr,
1828 &exe_ctx, nullptr, nullptr, false, false);
1829}
1830
1831void Thread::SettingsInitialize() {}
1832
1833void Thread::SettingsTerminate() {}
1834
1835lldb::addr_t Thread::GetThreadPointer() { return LLDB_INVALID_ADDRESS(18446744073709551615UL); }
1836
1837addr_t Thread::GetThreadLocalData(const ModuleSP module,
1838 lldb::addr_t tls_file_addr) {
1839 // The default implementation is to ask the dynamic loader for it.
1840 // This can be overridden for specific platforms.
1841 DynamicLoader *loader = GetProcess()->GetDynamicLoader();
1842 if (loader)
1843 return loader->GetThreadLocalData(module, shared_from_this(),
1844 tls_file_addr);
1845 else
1846 return LLDB_INVALID_ADDRESS(18446744073709551615UL);
1847}
1848
1849bool Thread::SafeToCallFunctions() {
1850 Process *process = GetProcess().get();
1851 if (process) {
1852 SystemRuntime *runtime = process->GetSystemRuntime();
1853 if (runtime) {
1854 return runtime->SafeToCallFunctionsOnThisThread(shared_from_this());
1855 }
1856 }
1857 return true;
1858}
1859
1860lldb::StackFrameSP
1861Thread::GetStackFrameSPForStackFramePtr(StackFrame *stack_frame_ptr) {
1862 return GetStackFrameList()->GetStackFrameSPForStackFramePtr(stack_frame_ptr);
1863}
1864
1865const char *Thread::StopReasonAsCString(lldb::StopReason reason) {
1866 switch (reason) {
1867 case eStopReasonInvalid:
1868 return "invalid";
1869 case eStopReasonNone:
1870 return "none";
1871 case eStopReasonTrace:
1872 return "trace";
1873 case eStopReasonBreakpoint:
1874 return "breakpoint";
1875 case eStopReasonWatchpoint:
1876 return "watchpoint";
1877 case eStopReasonSignal:
1878 return "signal";
1879 case eStopReasonException:
1880 return "exception";
1881 case eStopReasonExec:
1882 return "exec";
1883 case eStopReasonPlanComplete:
1884 return "plan complete";
1885 case eStopReasonThreadExiting:
1886 return "thread exiting";
1887 case eStopReasonInstrumentation:
1888 return "instrumentation break";
1889 }
1890
1891 static char unknown_state_string[64];
1892 snprintf(unknown_state_string, sizeof(unknown_state_string),
1893 "StopReason = %i", reason);
1894 return unknown_state_string;
1895}
1896
1897const char *Thread::RunModeAsCString(lldb::RunMode mode) {
1898 switch (mode) {
1899 case eOnlyThisThread:
1900 return "only this thread";
1901 case eAllThreads:
1902 return "all threads";
1903 case eOnlyDuringStepping:
1904 return "only during stepping";
1905 }
1906
1907 static char unknown_state_string[64];
1908 snprintf(unknown_state_string, sizeof(unknown_state_string), "RunMode = %i",
1909 mode);
1910 return unknown_state_string;
1911}
1912
1913size_t Thread::GetStatus(Stream &strm, uint32_t start_frame,
1914 uint32_t num_frames, uint32_t num_frames_with_source,
1915 bool stop_format, bool only_stacks) {
1916
1917 if (!only_stacks) {
1918 ExecutionContext exe_ctx(shared_from_this());
1919 Target *target = exe_ctx.GetTargetPtr();
1920 Process *process = exe_ctx.GetProcessPtr();
1921 strm.Indent();
1922 bool is_selected = false;
1923 if (process) {
1924 if (process->GetThreadList().GetSelectedThread().get() == this)
1925 is_selected = true;
1926 }
1927 strm.Printf("%c ", is_selected ? '*' : ' ');
1928 if (target && target->GetDebugger().GetUseExternalEditor()) {
1929 StackFrameSP frame_sp = GetStackFrameAtIndex(start_frame);
1930 if (frame_sp) {
1931 SymbolContext frame_sc(
1932 frame_sp->GetSymbolContext(eSymbolContextLineEntry));
1933 if (frame_sc.line_entry.line != 0 && frame_sc.line_entry.file) {
1934 Host::OpenFileInExternalEditor(frame_sc.line_entry.file,
1935 frame_sc.line_entry.line);
1936 }
1937 }
1938 }
1939
1940 DumpUsingSettingsFormat(strm, start_frame, stop_format);
1941 }
1942
1943 size_t num_frames_shown = 0;
1944 if (num_frames > 0) {
1945 strm.IndentMore();
1946
1947 const bool show_frame_info = true;
1948 const bool show_frame_unique = only_stacks;
1949 const char *selected_frame_marker = nullptr;
1950 if (num_frames == 1 || only_stacks ||
1951 (GetID() != GetProcess()->GetThreadList().GetSelectedThread()->GetID()))
1952 strm.IndentMore();
1953 else
1954 selected_frame_marker = "* ";
1955
1956 num_frames_shown = GetStackFrameList()->GetStatus(
1957 strm, start_frame, num_frames, show_frame_info, num_frames_with_source,
1958 show_frame_unique, selected_frame_marker);
1959 if (num_frames == 1)
1960 strm.IndentLess();
1961 strm.IndentLess();
1962 }
1963 return num_frames_shown;
1964}
1965
1966bool Thread::GetDescription(Stream &strm, lldb::DescriptionLevel level,
1967 bool print_json_thread, bool print_json_stopinfo) {
1968 const bool stop_format = false;
1969 DumpUsingSettingsFormat(strm, 0, stop_format);
1970 strm.Printf("\n");
1971
1972 StructuredData::ObjectSP thread_info = GetExtendedInfo();
1973
1974 if (print_json_thread || print_json_stopinfo) {
1975 if (thread_info && print_json_thread) {
1976 thread_info->Dump(strm);
1977 strm.Printf("\n");
1978 }
1979
1980 if (print_json_stopinfo && m_stop_info_sp) {
1981 StructuredData::ObjectSP stop_info = m_stop_info_sp->GetExtendedInfo();
1982 if (stop_info) {
1983 stop_info->Dump(strm);
1984 strm.Printf("\n");
1985 }
1986 }
1987
1988 return true;
1989 }
1990
1991 if (thread_info) {
1992 StructuredData::ObjectSP activity =
1993 thread_info->GetObjectForDotSeparatedPath("activity");
1994 StructuredData::ObjectSP breadcrumb =
1995 thread_info->GetObjectForDotSeparatedPath("breadcrumb");
1996 StructuredData::ObjectSP messages =
1997 thread_info->GetObjectForDotSeparatedPath("trace_messages");
1998
1999 bool printed_activity = false;
2000 if (activity && activity->GetType() == eStructuredDataTypeDictionary) {
2001 StructuredData::Dictionary *activity_dict = activity->GetAsDictionary();
2002 StructuredData::ObjectSP id = activity_dict->GetValueForKey("id");
2003 StructuredData::ObjectSP name = activity_dict->GetValueForKey("name");
2004 if (name && name->GetType() == eStructuredDataTypeString && id &&
2005 id->GetType() == eStructuredDataTypeInteger) {
2006 strm.Format(" Activity '{0}', {1:x}\n",
2007 name->GetAsString()->GetValue(),
2008 id->GetAsInteger()->GetValue());
2009 }
2010 printed_activity = true;
2011 }
2012 bool printed_breadcrumb = false;
2013 if (breadcrumb && breadcrumb->GetType() == eStructuredDataTypeDictionary) {
2014 if (printed_activity)
2015 strm.Printf("\n");
2016 StructuredData::Dictionary *breadcrumb_dict =
2017 breadcrumb->GetAsDictionary();
2018 StructuredData::ObjectSP breadcrumb_text =
2019 breadcrumb_dict->GetValueForKey("name");
2020 if (breadcrumb_text &&
2021 breadcrumb_text->GetType() == eStructuredDataTypeString) {
2022 strm.Format(" Current Breadcrumb: {0}\n",
2023 breadcrumb_text->GetAsString()->GetValue());
2024 }
2025 printed_breadcrumb = true;
2026 }
2027 if (messages && messages->GetType() == eStructuredDataTypeArray) {
2028 if (printed_breadcrumb)
2029 strm.Printf("\n");
2030 StructuredData::Array *messages_array = messages->GetAsArray();
2031 const size_t msg_count = messages_array->GetSize();
2032 if (msg_count > 0) {
2033 strm.Printf(" %zu trace messages:\n", msg_count);
2034 for (size_t i = 0; i < msg_count; i++) {
2035 StructuredData::ObjectSP message = messages_array->GetItemAtIndex(i);
2036 if (message && message->GetType() == eStructuredDataTypeDictionary) {
2037 StructuredData::Dictionary *message_dict =
2038 message->GetAsDictionary();
2039 StructuredData::ObjectSP message_text =
2040 message_dict->GetValueForKey("message");
2041 if (message_text &&
2042 message_text->GetType() == eStructuredDataTypeString) {
2043 strm.Format(" {0}\n", message_text->GetAsString()->GetValue());
2044 }
2045 }
2046 }
2047 }
2048 }
2049 }
2050
2051 return true;
2052}
2053
2054size_t Thread::GetStackFrameStatus(Stream &strm, uint32_t first_frame,
2055 uint32_t num_frames, bool show_frame_info,
2056 uint32_t num_frames_with_source) {
2057 return GetStackFrameList()->GetStatus(
2058 strm, first_frame, num_frames, show_frame_info, num_frames_with_source);
2059}
2060
2061Unwind *Thread::GetUnwinder() {
2062 if (!m_unwinder_ap) {
2063 const ArchSpec target_arch(CalculateTarget()->GetArchitecture());
2064 const llvm::Triple::ArchType machine = target_arch.GetMachine();
2065 switch (machine) {
2066 case llvm::Triple::x86_64:
2067 case llvm::Triple::x86:
2068 case llvm::Triple::arm:
2069 case llvm::Triple::aarch64:
2070 case llvm::Triple::thumb:
2071 case llvm::Triple::mips:
2072 case llvm::Triple::mipsel:
2073 case llvm::Triple::mips64:
2074 case llvm::Triple::mips64el:
2075 case llvm::Triple::ppc:
2076 case llvm::Triple::ppc64:
2077 case llvm::Triple::ppc64le:
2078 case llvm::Triple::systemz:
2079 case llvm::Triple::hexagon:
2080 m_unwinder_ap.reset(new UnwindLLDB(*this));
2081 break;
2082
2083 default:
2084 if (target_arch.GetTriple().getVendor() == llvm::Triple::Apple)
2085 m_unwinder_ap.reset(new UnwindMacOSXFrameBackchain(*this));
2086 break;
2087 }
2088 }
2089 return m_unwinder_ap.get();
2090}
2091
2092void Thread::Flush() {
2093 ClearStackFrames();
2094 m_reg_context_sp.reset();
2095}
2096
2097bool Thread::IsStillAtLastBreakpointHit() {
2098 // If we are currently stopped at a breakpoint, always return that stopinfo
2099 // and don't reset it.
2100 // This allows threads to maintain their breakpoint stopinfo, such as when
2101 // thread-stepping in
2102 // multithreaded programs.
2103 if (m_stop_info_sp) {
2104 StopReason stop_reason = m_stop_info_sp->GetStopReason();
2105 if (stop_reason == lldb::eStopReasonBreakpoint) {
2106 uint64_t value = m_stop_info_sp->GetValue();
2107 lldb::RegisterContextSP reg_ctx_sp(GetRegisterContext());
2108 if (reg_ctx_sp) {
2109 lldb::addr_t pc = reg_ctx_sp->GetPC();
2110 BreakpointSiteSP bp_site_sp =
2111 GetProcess()->GetBreakpointSiteList().FindByAddress(pc);
2112 if (bp_site_sp && static_cast<break_id_t>(value) == bp_site_sp->GetID())
2113 return true;
2114 }
2115 }
2116 }
2117 return false;
2118}
2119
2120Status Thread::StepIn(bool source_step,
2121 LazyBool step_in_avoids_code_without_debug_info,
2122 LazyBool step_out_avoids_code_without_debug_info)
2123
2124{
2125 Status error;
2126 Process *process = GetProcess().get();
2127 if (StateIsStoppedState(process->GetState(), true)) {
2128 StackFrameSP frame_sp = GetStackFrameAtIndex(0);
2129 ThreadPlanSP new_plan_sp;
2130 const lldb::RunMode run_mode = eOnlyThisThread;
2131 const bool abort_other_plans = false;
2132
2133 if (source_step && frame_sp && frame_sp->HasDebugInformation()) {
2134 SymbolContext sc(frame_sp->GetSymbolContext(eSymbolContextEverything));
2135 new_plan_sp = QueueThreadPlanForStepInRange(
2136 abort_other_plans, sc.line_entry, sc, nullptr, run_mode,
2137 step_in_avoids_code_without_debug_info,
2138 step_out_avoids_code_without_debug_info);
2139 } else {
2140 new_plan_sp = QueueThreadPlanForStepSingleInstruction(
2141 false, abort_other_plans, run_mode);
2142 }
2143
2144 new_plan_sp->SetIsMasterPlan(true);
2145 new_plan_sp->SetOkayToDiscard(false);
2146
2147 // Why do we need to set the current thread by ID here???
2148 process->GetThreadList().SetSelectedThreadByID(GetID());
2149 error = process->Resume();
2150 } else {
2151 error.SetErrorString("process not stopped");
2152 }
2153 return error;
2154}
2155
2156Status Thread::StepOver(bool source_step,
2157 LazyBool step_out_avoids_code_without_debug_info) {
2158 Status error;
2159 Process *process = GetProcess().get();
2160 if (StateIsStoppedState(process->GetState(), true)) {
2161 StackFrameSP frame_sp = GetStackFrameAtIndex(0);
2162 ThreadPlanSP new_plan_sp;
2163
2164 const lldb::RunMode run_mode = eOnlyThisThread;
2165 const bool abort_other_plans = false;
2166
2167 if (source_step && frame_sp && frame_sp->HasDebugInformation()) {
2168 SymbolContext sc(frame_sp->GetSymbolContext(eSymbolContextEverything));
2169 new_plan_sp = QueueThreadPlanForStepOverRange(
2170 abort_other_plans, sc.line_entry, sc, run_mode,
2171 step_out_avoids_code_without_debug_info);
2172 } else {
2173 new_plan_sp = QueueThreadPlanForStepSingleInstruction(
2174 true, abort_other_plans, run_mode);
2175 }
2176
2177 new_plan_sp->SetIsMasterPlan(true);
2178 new_plan_sp->SetOkayToDiscard(false);
2179
2180 // Why do we need to set the current thread by ID here???
2181 process->GetThreadList().SetSelectedThreadByID(GetID());
2182 error = process->Resume();
2183 } else {
2184 error.SetErrorString("process not stopped");
2185 }
2186 return error;
2187}
2188
2189Status Thread::StepOut() {
2190 Status error;
2191 Process *process = GetProcess().get();
2192 if (StateIsStoppedState(process->GetState(), true)) {
2193 const bool first_instruction = false;
2194 const bool stop_other_threads = false;
2195 const bool abort_other_plans = false;
2196
2197 ThreadPlanSP new_plan_sp(QueueThreadPlanForStepOut(
2198 abort_other_plans, nullptr, first_instruction, stop_other_threads,
2199 eVoteYes, eVoteNoOpinion, 0));
2200
2201 new_plan_sp->SetIsMasterPlan(true);
2202 new_plan_sp->SetOkayToDiscard(false);
2203
2204 // Why do we need to set the current thread by ID here???
2205 process->GetThreadList().SetSelectedThreadByID(GetID());
2206 error = process->Resume();
2207 } else {
2208 error.SetErrorString("process not stopped");
2209 }
2210 return error;
2211}

/build/llvm-toolchain-snapshot-7~svn326246/tools/lldb/include/lldb/Target/ThreadPlan.h

1//===-- ThreadPlan.h --------------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#ifndef liblldb_ThreadPlan_h_
11#define liblldb_ThreadPlan_h_
12
13// C Includes
14// C++ Includes
15#include <mutex>
16#include <string>
17
18// Other libraries and framework includes
19// Project includes
20#include "lldb/Target/Process.h"
21#include "lldb/Target/StopInfo.h"
22#include "lldb/Target/Target.h"
23#include "lldb/Target/Thread.h"
24#include "lldb/Target/ThreadPlanTracer.h"
25#include "lldb/Utility/UserID.h"
26#include "lldb/lldb-private.h"
27
28namespace lldb_private {
29
30//------------------------------------------------------------------
31// ThreadPlan:
32// This is the pure virtual base class for thread plans.
33//
34// The thread plans provide the "atoms" of behavior that
35// all the logical process control, either directly from commands or through
36// more complex composite plans will rely on.
37//
38// Plan Stack:
39//
40// The thread maintaining a thread plan stack, and you program the actions of a
41// particular thread
42// by pushing plans onto the plan stack.
43// There is always a "Current" plan, which is the top of the plan stack,
44// though in some cases
45// a plan may defer to plans higher in the stack for some piece of information
46// (let us define that the plan stack grows downwards).
47//
48// The plan stack is never empty, there is always a Base Plan which persists
49// through the life
50// of the running process.
51//
52//
53// Creating Plans:
54//
55// The thread plan is generally created and added to the plan stack through the
56// QueueThreadPlanFor... API
57// in lldb::Thread. Those API's will return the plan that performs the named
58// operation in a manner
59// appropriate for the current process. The plans in lldb/source/Target are
60// generic
61// implementations, but a Process plugin can override them.
62//
63// ValidatePlan is then called. If it returns false, the plan is unshipped.
64// This is a little
65// convenience which keeps us from having to error out of the constructor.
66//
67// Then the plan is added to the plan stack. When the plan is added to the
68// plan stack its DidPush
69// will get called. This is useful if a plan wants to push any additional
70// plans as it is constructed,
71// since you need to make sure you're already on the stack before you push
72// additional plans.
73//
74// Completed Plans:
75//
76// When the target process stops the plans are queried, among other things, for
77// whether their job is done.
78// If it is they are moved from the plan stack to the Completed Plan stack in
79// reverse order from their position
80// on the plan stack (since multiple plans may be done at a given stop.) This
81// is used primarily so that
82// the lldb::Thread::StopInfo for the thread can be set properly. If one plan
83// pushes another to achieve part of
84// its job, but it doesn't want that sub-plan to be the one that sets the
85// StopInfo, then call SetPrivate on the
86// sub-plan when you create it, and the Thread will pass over that plan in
87// reporting the reason for the stop.
88//
89// Discarded plans:
90//
91// Your plan may also get discarded, i.e. moved from the plan stack to the
92// "discarded plan stack". This can
93// happen, for instance, if the plan is calling a function and the function
94// call crashes and you want
95// to unwind the attempt to call. So don't assume that your plan will always
96// successfully stop. Which leads to:
97//
98// Cleaning up after your plans:
99//
100// When the plan is moved from the plan stack its WillPop method is always
101// called, no matter why. Once it is
102// moved off the plan stack it is done, and won't get a chance to run again.
103// So you should
104// undo anything that affects target state in this method. But be sure to
105// leave the plan able to correctly
106// fill the StopInfo, however.
107// N.B. Don't wait to do clean up target state till the destructor, since that
108// will usually get called when
109// the target resumes, and you want to leave the target state correct for new
110// plans in the time between when
111// your plan gets unshipped and the next resume.
112//
113// Thread State Checkpoint:
114//
115// Note that calling functions on target process (ThreadPlanCallFunction) changes
116// current thread state. The function can be called either by direct user demand or
117// internally, for example lldb allocates memory on device to calculate breakpoint
118// condition expression - on Linux it is performed by calling mmap on device.
119// ThreadStateCheckpoint saves Thread state (stop info and completed
120// plan stack) to restore it after completing function call.
121//
122// Over the lifetime of the plan, various methods of the ThreadPlan are then
123// called in response to changes of state in
124// the process we are debugging as follows:
125//
126// Resuming:
127//
128// When the target process is about to be restarted, the plan's WillResume
129// method is called,
130// giving the plan a chance to prepare for the run. If WillResume returns
131// false, then the
132// process is not restarted. Be sure to set an appropriate error value in the
133// Process if
134// you have to do this. Note, ThreadPlans actually implement DoWillResume,
135// WillResume wraps that call.
136//
137// Next the "StopOthers" method of all the threads are polled, and if one
138// thread's Current plan
139// returns "true" then only that thread gets to run. If more than one returns
140// "true" the threads that want to run solo
141// get run one by one round robin fashion. Otherwise all are let to run.
142//
143// Note, the way StopOthers is implemented, the base class implementation just
144// asks the previous plan. So if your plan
145// has no opinion about whether it should run stopping others or not, just
146// don't implement StopOthers, and the parent
147// will be asked.
148//
149// Finally, for each thread that is running, it run state is set to the return
150// of RunState from the
151// thread's Current plan.
152//
153// Responding to a stop:
154//
155// When the target process stops, the plan is called in the following stages:
156//
157// First the thread asks the Current Plan if it can handle this stop by calling
158// PlanExplainsStop.
159// If the Current plan answers "true" then it is asked if the stop should
160// percolate all the way to the
161// user by calling the ShouldStop method. If the current plan doesn't explain
162// the stop, then we query up
163// the plan stack for a plan that does explain the stop. The plan that does
164// explain the stop then needs to
165// figure out what to do about the plans below it in the stack. If the stop is
166// recoverable, then the plan that
167// understands it can just do what it needs to set up to restart, and then
168// continue.
169// Otherwise, the plan that understood the stop should call DiscardPlanStack to
170// clean up the stack below it.
171// Note, plans actually implement DoPlanExplainsStop, the result is cached in
172// PlanExplainsStop so the DoPlanExplainsStop
173// itself will only get called once per stop.
174//
175// Master plans:
176//
177// In the normal case, when we decide to stop, we will collapse the plan stack
178// up to the point of the plan that understood
179// the stop reason. However, if a plan wishes to stay on the stack after an
180// event it didn't directly handle
181// it can designate itself a "Master" plan by responding true to IsMasterPlan,
182// and then if it wants not to be
183// discarded, it can return false to OkayToDiscard, and it and all its dependent
184// plans will be preserved when
185// we resume execution.
186//
187// The other effect of being a master plan is that when the Master plan is done
188// , if it has set "OkayToDiscard" to false,
189// then it will be popped & execution will stop and return to the user.
190// Remember that if OkayToDiscard is false, the
191// plan will be popped and control will be given to the next plan above it on
192// the stack So setting OkayToDiscard to
193// false means the user will regain control when the MasterPlan is completed.
194//
195// Between these two controls this allows things like: a MasterPlan/DontDiscard
196// Step Over to hit a breakpoint, stop and
197// return control to the user, but then when the user continues, the step out
198// succeeds.
199// Even more tricky, when the breakpoint is hit, the user can continue to step
200// in/step over/etc, and finally when they
201// continue, they will finish up the Step Over.
202//
203// FIXME: MasterPlan & OkayToDiscard aren't really orthogonal. MasterPlan
204// designation means that this plan controls
205// it's fate and the fate of plans below it. OkayToDiscard tells whether the
206// MasterPlan wants to stay on the stack. I
207// originally thought "MasterPlan-ness" would need to be a fixed characteristic
208// of a ThreadPlan, in which case you needed
209// the extra control. But that doesn't seem to be true. So we should be able
210// to convert to only MasterPlan status to mean
211// the current "MasterPlan/DontDiscard". Then no plans would be MasterPlans by
212// default, and you would set the ones you
213// wanted to be "user level" in this way.
214//
215//
216// Actually Stopping:
217//
218// If a plan says responds "true" to ShouldStop, then it is asked if it's job
219// is complete by calling
220// MischiefManaged. If that returns true, the plan is popped from the plan
221// stack and added to the
222// Completed Plan Stack. Then the next plan in the stack is asked if it
223// ShouldStop, and it returns "true",
224// it is asked if it is done, and if yes popped, and so on till we reach a plan
225// that is not done.
226//
227// Since you often know in the ShouldStop method whether your plan is complete,
228// as a convenience you can call
229// SetPlanComplete and the ThreadPlan implementation of MischiefManaged will
230// return "true", without your having
231// to redo the calculation when your sub-classes MischiefManaged is called. If
232// you call SetPlanComplete, you can
233// later use IsPlanComplete to determine whether the plan is complete. This is
234// only a convenience for sub-classes,
235// the logic in lldb::Thread will only call MischiefManaged.
236//
237// One slightly tricky point is you have to be careful using SetPlanComplete in
238// PlanExplainsStop because you
239// are not guaranteed that PlanExplainsStop for a plan will get called before
240// ShouldStop gets called. If your sub-plan
241// explained the stop and then popped itself, only your ShouldStop will get
242// called.
243//
244// If ShouldStop for any thread returns "true", then the WillStop method of the
245// Current plan of
246// all threads will be called, the stop event is placed on the Process's public
247// broadcaster, and
248// control returns to the upper layers of the debugger.
249//
250// Reporting the stop:
251//
252// When the process stops, the thread is given a StopReason, in the form of a
253// StopInfo object. If there is a completed
254// plan corresponding to the stop, then the "actual" stop reason can be
255// suppressed, and instead a StopInfoThreadPlan
256// object will be cons'ed up from the top completed plan in the stack.
257// However, if the plan doesn't want to be
258// the stop reason, then it can call SetPlanComplete and pass in "false" for
259// the "success" parameter. In that case,
260// the real stop reason will be used instead. One exapmle of this is the
261// "StepRangeStepIn" thread plan. If it stops
262// because of a crash or breakpoint hit, it wants to unship itself, because it
263// isn't so useful to have step in keep going
264// after a breakpoint hit. But it can't be the reason for the stop or no-one
265// would see that they had hit a breakpoint.
266//
267// Cleaning up the plan stack:
268//
269// One of the complications of MasterPlans is that you may get past the limits
270// of a plan without triggering it to clean
271// itself up. For instance, if you are doing a MasterPlan StepOver, and hit a
272// breakpoint in a called function, then
273// step over enough times to step out of the initial StepOver range, each of
274// the step overs will explain the stop &
275// take themselves off the stack, but control would never be returned to the
276// original StepOver. Eventually, the user
277// will continue, and when that continue stops, the old stale StepOver plan
278// that was left on the stack will get woken
279// up and notice it is done. But that can leave junk on the stack for a while.
280// To avoid that, the plans implement a
281// "IsPlanStale" method, that can check whether it is relevant anymore. On
282// stop, after the regular plan negotiation,
283// the remaining plan stack is consulted and if any plan says it is stale, it
284// and the plans below it are discarded from
285// the stack.
286//
287// Automatically Resuming:
288//
289// If ShouldStop for all threads returns "false", then the target process will
290// resume. This then cycles back to
291// Resuming above.
292//
293// Reporting eStateStopped events when the target is restarted:
294//
295// If a plan decides to auto-continue the target by returning "false" from
296// ShouldStop, then it will be asked
297// whether the Stopped event should still be reported. For instance, if you
298// hit a breakpoint that is a User set
299// breakpoint, but the breakpoint callback said to continue the target process,
300// you might still want to inform
301// the upper layers of lldb that the stop had happened.
302// The way this works is every thread gets to vote on whether to report the
303// stop. If all votes are eVoteNoOpinion,
304// then the thread list will decide what to do (at present it will pretty much
305// always suppress these stopped events.)
306// If there is an eVoteYes, then the event will be reported regardless of the
307// other votes. If there is an eVoteNo
308// and no eVoteYes's, then the event won't be reported.
309//
310// One other little detail here, sometimes a plan will push another plan onto
311// the plan stack to do some part of
312// the first plan's job, and it would be convenient to tell that plan how it
313// should respond to ShouldReportStop.
314// You can do that by setting the stop_vote in the child plan when you create
315// it.
316//
317// Suppressing the initial eStateRunning event:
318//
319// The private process running thread will take care of ensuring that only one
320// "eStateRunning" event will be
321// delivered to the public Process broadcaster per public eStateStopped event.
322// However there are some cases
323// where the public state of this process is eStateStopped, but a thread plan
324// needs to restart the target, but
325// doesn't want the running event to be publicly broadcast. The obvious
326// example of this is running functions
327// by hand as part of expression evaluation. To suppress the running event
328// return eVoteNo from ShouldReportStop,
329// to force a running event to be reported return eVoteYes, in general though
330// you should return eVoteNoOpinion
331// which will allow the ThreadList to figure out the right thing to do.
332// The run_vote argument to the constructor works like stop_vote, and is a way
333// for a plan to instruct a sub-plan
334// on how to respond to ShouldReportStop.
335//
336//------------------------------------------------------------------
337
338class ThreadPlan : public std::enable_shared_from_this<ThreadPlan>,
339 public UserID {
340public:
341 typedef enum { eAllThreads, eSomeThreads, eThisThread } ThreadScope;
342
343 // We use these enums so that we can cast a base thread plan to it's real type
344 // without having to resort
345 // to dynamic casting.
346 typedef enum {
347 eKindGeneric,
348 eKindNull,
349 eKindBase,
350 eKindCallFunction,
351 eKindPython,
352 eKindStepInstruction,
353 eKindStepOut,
354 eKindStepOverBreakpoint,
355 eKindStepOverRange,
356 eKindStepInRange,
357 eKindRunToAddress,
358 eKindStepThrough,
359 eKindStepUntil,
360 eKindTestCondition
361
362 } ThreadPlanKind;
363
364 //------------------------------------------------------------------
365 // Constructors and Destructors
366 //------------------------------------------------------------------
367 ThreadPlan(ThreadPlanKind kind, const char *name, Thread &thread,
368 Vote stop_vote, Vote run_vote);
369
370 virtual ~ThreadPlan();
371
372 //------------------------------------------------------------------
373 /// Returns the name of this thread plan.
374 ///
375 /// @return
376 /// A const char * pointer to the thread plan's name.
377 //------------------------------------------------------------------
378 const char *GetName() const { return m_name.c_str(); }
379
380 //------------------------------------------------------------------
381 /// Returns the Thread that is using this thread plan.
382 ///
383 /// @return
384 /// A pointer to the thread plan's owning thread.
385 //------------------------------------------------------------------
386 Thread &GetThread() { return m_thread; }
387
388 const Thread &GetThread() const { return m_thread; }
389
390 Target &GetTarget() { return m_thread.GetProcess()->GetTarget(); }
391
392 const Target &GetTarget() const { return m_thread.GetProcess()->GetTarget(); }
393
394 //------------------------------------------------------------------
395 /// Print a description of this thread to the stream \a s.
396 /// \a thread.
397 ///
398 /// @param[in] s
399 /// The stream to which to print the description.
400 ///
401 /// @param[in] level
402 /// The level of description desired. Note that eDescriptionLevelBrief
403 /// will be used in the stop message printed when the plan is complete.
404 //------------------------------------------------------------------
405 virtual void GetDescription(Stream *s, lldb::DescriptionLevel level) = 0;
406
407 //------------------------------------------------------------------
408 /// Returns whether this plan could be successfully created.
409 ///
410 /// @param[in] error
411 /// A stream to which to print some reason why the plan could not be
412 /// created.
413 /// Can be NULL.
414 ///
415 /// @return
416 /// \b true if the plan should be queued, \b false otherwise.
417 //------------------------------------------------------------------
418 virtual bool ValidatePlan(Stream *error) = 0;
419
420 bool TracerExplainsStop() {
421 if (!m_tracer_sp)
422 return false;
423 else
424 return m_tracer_sp->TracerExplainsStop();
425 }
426
427 lldb::StateType RunState();
428
429 bool PlanExplainsStop(Event *event_ptr);
430
431 virtual bool ShouldStop(Event *event_ptr) = 0;
432
433 virtual bool ShouldAutoContinue(Event *event_ptr) { return false; }
434
435 // Whether a "stop class" event should be reported to the "outside world". In
436 // general
437 // if a thread plan is active, events should not be reported.
438
439 virtual Vote ShouldReportStop(Event *event_ptr);
440
441 virtual Vote ShouldReportRun(Event *event_ptr);
442
443 virtual void SetStopOthers(bool new_value);
444
445 virtual bool StopOthers();
446
447 // This is the wrapper for DoWillResume that does generic ThreadPlan logic,
448 // then
449 // calls DoWillResume.
450 bool WillResume(lldb::StateType resume_state, bool current_plan);
451
452 virtual bool WillStop() = 0;
453
454 bool IsMasterPlan() { return m_is_master_plan; }
455
456 bool SetIsMasterPlan(bool value) {
457 bool old_value = m_is_master_plan;
458 m_is_master_plan = value;
459 return old_value;
460 }
461
462 virtual bool OkayToDiscard();
463
464 void SetOkayToDiscard(bool value) { m_okay_to_discard = value; }
465
466 // The base class MischiefManaged does some cleanup - so you have to call it
467 // in your MischiefManaged derived class.
468 virtual bool MischiefManaged();
469
470 virtual void ThreadDestroyed() {
471 // Any cleanup that a plan might want to do in case the thread goes away
472 // in the middle of the plan being queued on a thread can be done here.
473 }
474
475 bool GetPrivate() { return m_plan_private; }
476
477 void SetPrivate(bool input) { m_plan_private = input; }
478
479 virtual void DidPush();
480
481 virtual void WillPop();
482
483 // This pushes a plan onto the plan stack of the current plan's thread.
484 void PushPlan(lldb::ThreadPlanSP &thread_plan_sp) {
485 m_thread.PushPlan(thread_plan_sp);
486 }
487
488 ThreadPlanKind GetKind() const { return m_kind; }
489
490 bool IsPlanComplete();
491
492 void SetPlanComplete(bool success = true);
493
494 virtual bool IsPlanStale() { return false; }
495
496 bool PlanSucceeded() { return m_plan_succeeded; }
497
498 virtual bool IsBasePlan() { return false; }
499
500 lldb::ThreadPlanTracerSP &GetThreadPlanTracer() { return m_tracer_sp; }
501
502 void SetThreadPlanTracer(lldb::ThreadPlanTracerSP new_tracer_sp) {
503 m_tracer_sp = new_tracer_sp;
4
Calling defaulted copy assignment operator for 'shared_ptr'
23
Returning; memory was released
28
Calling defaulted copy assignment operator for 'shared_ptr'
504 }
505
506 void DoTraceLog() {
507 if (m_tracer_sp && m_tracer_sp->TracingEnabled())
508 m_tracer_sp->Log();
509 }
510
511 // Some thread plans hide away the actual stop info which caused any
512 // particular stop. For
513 // instance the ThreadPlanCallFunction restores the original stop reason so
514 // that stopping and
515 // calling a few functions won't lose the history of the run.
516 // This call can be implemented to get you back to the real stop info.
517 virtual lldb::StopInfoSP GetRealStopInfo() { return m_thread.GetStopInfo(); }
518
519 // If the completion of the thread plan stepped out of a function, the return
520 // value of the function
521 // might have been captured by the thread plan (currently only
522 // ThreadPlanStepOut does this.)
523 // If so, the ReturnValueObject can be retrieved from here.
524
525 virtual lldb::ValueObjectSP GetReturnValueObject() {
526 return lldb::ValueObjectSP();
527 }
528
529 // If the thread plan managing the evaluation of a user expression lives
530 // longer than the command
531 // that instigated the expression (generally because the expression evaluation
532 // hit a breakpoint, and
533 // the user regained control at that point) a subsequent process control
534 // command step/continue/etc. might
535 // complete the expression evaluations. If so, the result of the expression
536 // evaluation will show up here.
537
538 virtual lldb::ExpressionVariableSP GetExpressionVariable() {
539 return lldb::ExpressionVariableSP();
540 }
541
542 // If a thread plan stores the state before it was run, then you might
543 // want to restore the state when it is done. This will do that job.
544 // This is mostly useful for artificial plans like CallFunction plans.
545
546 virtual bool RestoreThreadState() {
547 // Nothing to do in general.
548 return true;
549 }
550
551 virtual bool IsVirtualStep() { return false; }
552
553 virtual bool SetIterationCount(size_t count) {
554 if (m_takes_iteration_count) {
555 // Don't tell me to do something 0 times...
556 if (count == 0)
557 return false;
558 m_iteration_count = count;
559 }
560 return m_takes_iteration_count;
561 }
562
563 virtual size_t GetIterationCount() {
564 if (!m_takes_iteration_count)
565 return 0;
566 else
567 return m_iteration_count;
568 }
569
570protected:
571 //------------------------------------------------------------------
572 // Classes that inherit from ThreadPlan can see and modify these
573 //------------------------------------------------------------------
574
575 virtual bool DoWillResume(lldb::StateType resume_state, bool current_plan) {
576 return true;
577 }
578
579 virtual bool DoPlanExplainsStop(Event *event_ptr) = 0;
580
581 // This gets the previous plan to the current plan (for forwarding requests).
582 // This is mostly a formal requirement, it allows us to make the Thread's
583 // GetPreviousPlan protected, but only friend ThreadPlan to thread.
584
585 ThreadPlan *GetPreviousPlan() { return m_thread.GetPreviousPlan(this); }
586
587 // This forwards the private Thread::GetPrivateStopInfo which is generally
588 // what
589 // ThreadPlan's need to know.
590
591 lldb::StopInfoSP GetPrivateStopInfo() {
592 return m_thread.GetPrivateStopInfo();
593 }
594
595 void SetStopInfo(lldb::StopInfoSP stop_reason_sp) {
596 m_thread.SetStopInfo(stop_reason_sp);
597 }
598
599 void CachePlanExplainsStop(bool does_explain) {
600 m_cached_plan_explains_stop = does_explain ? eLazyBoolYes : eLazyBoolNo;
601 }
602
603 LazyBool GetCachedPlanExplainsStop() const {
604 return m_cached_plan_explains_stop;
605 }
606
607 virtual lldb::StateType GetPlanRunState() = 0;
608
609 bool IsUsuallyUnexplainedStopReason(lldb::StopReason);
610
611 Thread &m_thread;
612 Vote m_stop_vote;
613 Vote m_run_vote;
614 bool m_takes_iteration_count = false;
615 int32_t m_iteration_count = 1;
616
617private:
618 //------------------------------------------------------------------
619 // For ThreadPlan only
620 //------------------------------------------------------------------
621 static lldb::user_id_t GetNextID();
622
623 ThreadPlanKind m_kind;
624 std::string m_name;
625 std::recursive_mutex m_plan_complete_mutex;
626 LazyBool m_cached_plan_explains_stop;
627 bool m_plan_complete;
628 bool m_plan_private;
629 bool m_okay_to_discard;
630 bool m_is_master_plan;
631 bool m_plan_succeeded;
632
633 lldb::ThreadPlanTracerSP m_tracer_sp;
634
635private:
636 DISALLOW_COPY_AND_ASSIGN(ThreadPlan)ThreadPlan(const ThreadPlan &) = delete; const ThreadPlan
&operator=(const ThreadPlan &) = delete
;
637};
638
639//----------------------------------------------------------------------
640// ThreadPlanNull:
641// Threads are assumed to always have at least one plan on the plan stack.
642// This is put on the plan stack when a thread is destroyed so that if you
643// accidentally access a thread after it is destroyed you won't crash.
644// But asking questions of the ThreadPlanNull is definitely an error.
645//----------------------------------------------------------------------
646
647class ThreadPlanNull : public ThreadPlan {
648public:
649 ThreadPlanNull(Thread &thread);
650 ~ThreadPlanNull() override;
651
652 void GetDescription(Stream *s, lldb::DescriptionLevel level) override;
653
654 bool ValidatePlan(Stream *error) override;
655
656 bool ShouldStop(Event *event_ptr) override;
657
658 bool MischiefManaged() override;
659
660 bool WillStop() override;
661
662 bool IsBasePlan() override { return true; }
663
664 bool OkayToDiscard() override { return false; }
665
666protected:
667 bool DoPlanExplainsStop(Event *event_ptr) override;
668
669 lldb::StateType GetPlanRunState() override;
670
671 DISALLOW_COPY_AND_ASSIGN(ThreadPlanNull)ThreadPlanNull(const ThreadPlanNull &) = delete; const ThreadPlanNull
&operator=(const ThreadPlanNull &) = delete
;
672};
673
674} // namespace lldb_private
675
676#endif // liblldb_ThreadPlan_h_

/usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/bits/shared_ptr.h

1// shared_ptr and weak_ptr implementation -*- C++ -*-
2
3// Copyright (C) 2007-2017 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25// GCC Note: Based on files from version 1.32.0 of the Boost library.
26
27// shared_count.hpp
28// Copyright (c) 2001, 2002, 2003 Peter Dimov and Multi Media Ltd.
29
30// shared_ptr.hpp
31// Copyright (C) 1998, 1999 Greg Colvin and Beman Dawes.
32// Copyright (C) 2001, 2002, 2003 Peter Dimov
33
34// weak_ptr.hpp
35// Copyright (C) 2001, 2002, 2003 Peter Dimov
36
37// enable_shared_from_this.hpp
38// Copyright (C) 2002 Peter Dimov
39
40// Distributed under the Boost Software License, Version 1.0. (See
41// accompanying file LICENSE_1_0.txt or copy at
42// http://www.boost.org/LICENSE_1_0.txt)
43
44/** @file
45 * This is an internal header file, included by other library headers.
46 * Do not attempt to use it directly. @headername{memory}
47 */
48
49#ifndef _SHARED_PTR_H1
50#define _SHARED_PTR_H1 1
51
52#include <bits/shared_ptr_base.h>
53
54namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
55{
56_GLIBCXX_BEGIN_NAMESPACE_VERSION
57
58 /**
59 * @addtogroup pointer_abstractions
60 * @{
61 */
62
63 /// 20.7.2.2.11 shared_ptr I/O
64 template<typename _Ch, typename _Tr, typename _Tp, _Lock_policy _Lp>
65 inline std::basic_ostream<_Ch, _Tr>&
66 operator<<(std::basic_ostream<_Ch, _Tr>& __os,
67 const __shared_ptr<_Tp, _Lp>& __p)
68 {
69 __os << __p.get();
70 return __os;
71 }
72
73 /// 20.7.2.2.10 shared_ptr get_deleter
74 template<typename _Del, typename _Tp, _Lock_policy _Lp>
75 inline _Del*
76 get_deleter(const __shared_ptr<_Tp, _Lp>& __p) noexcept
77 {
78#if __cpp_rtti199711
79 return static_cast<_Del*>(__p._M_get_deleter(typeid(_Del)));
80#else
81 return 0;
82#endif
83 }
84
85
86 /**
87 * @brief A smart pointer with reference-counted copy semantics.
88 *
89 * The object pointed to is deleted when the last shared_ptr pointing to
90 * it is destroyed or reset.
91 */
92 template<typename _Tp>
93 class shared_ptr : public __shared_ptr<_Tp>
94 {
95 template<typename... _Args>
96 using _Constructible = typename enable_if<
97 is_constructible<__shared_ptr<_Tp>, _Args...>::value
98 >::type;
99
100 template<typename _Arg>
101 using _Assignable = typename enable_if<
102 is_assignable<__shared_ptr<_Tp>&, _Arg>::value, shared_ptr&
103 >::type;
104
105 public:
106
107 using element_type = typename __shared_ptr<_Tp>::element_type;
108
109#if __cplusplus201103L > 201402L
110# define __cpp_lib_shared_ptr_weak_type 201606
111 using weak_type = weak_ptr<_Tp>;
112#endif
113 /**
114 * @brief Construct an empty %shared_ptr.
115 * @post use_count()==0 && get()==0
116 */
117 constexpr shared_ptr() noexcept : __shared_ptr<_Tp>() { }
118
119 shared_ptr(const shared_ptr&) noexcept = default;
120
121 /**
122 * @brief Construct a %shared_ptr that owns the pointer @a __p.
123 * @param __p A pointer that is convertible to element_type*.
124 * @post use_count() == 1 && get() == __p
125 * @throw std::bad_alloc, in which case @c delete @a __p is called.
126 */
127 template<typename _Yp, typename = _Constructible<_Yp*>>
128 explicit
129 shared_ptr(_Yp* __p) : __shared_ptr<_Tp>(__p) { }
130
131 /**
132 * @brief Construct a %shared_ptr that owns the pointer @a __p
133 * and the deleter @a __d.
134 * @param __p A pointer.
135 * @param __d A deleter.
136 * @post use_count() == 1 && get() == __p
137 * @throw std::bad_alloc, in which case @a __d(__p) is called.
138 *
139 * Requirements: _Deleter's copy constructor and destructor must
140 * not throw
141 *
142 * __shared_ptr will release __p by calling __d(__p)
143 */
144 template<typename _Yp, typename _Deleter,
145 typename = _Constructible<_Yp*, _Deleter>>
146 shared_ptr(_Yp* __p, _Deleter __d)
147 : __shared_ptr<_Tp>(__p, std::move(__d)) { }
148
149 /**
150 * @brief Construct a %shared_ptr that owns a null pointer
151 * and the deleter @a __d.
152 * @param __p A null pointer constant.
153 * @param __d A deleter.
154 * @post use_count() == 1 && get() == __p
155 * @throw std::bad_alloc, in which case @a __d(__p) is called.
156 *
157 * Requirements: _Deleter's copy constructor and destructor must
158 * not throw
159 *
160 * The last owner will call __d(__p)
161 */
162 template<typename _Deleter>
163 shared_ptr(nullptr_t __p, _Deleter __d)
164 : __shared_ptr<_Tp>(__p, std::move(__d)) { }
165
166 /**
167 * @brief Construct a %shared_ptr that owns the pointer @a __p
168 * and the deleter @a __d.
169 * @param __p A pointer.
170 * @param __d A deleter.
171 * @param __a An allocator.
172 * @post use_count() == 1 && get() == __p
173 * @throw std::bad_alloc, in which case @a __d(__p) is called.
174 *
175 * Requirements: _Deleter's copy constructor and destructor must
176 * not throw _Alloc's copy constructor and destructor must not
177 * throw.
178 *
179 * __shared_ptr will release __p by calling __d(__p)
180 */
181 template<typename _Yp, typename _Deleter, typename _Alloc,
182 typename = _Constructible<_Yp*, _Deleter, _Alloc>>
183 shared_ptr(_Yp* __p, _Deleter __d, _Alloc __a)
184 : __shared_ptr<_Tp>(__p, std::move(__d), std::move(__a)) { }
185
186 /**
187 * @brief Construct a %shared_ptr that owns a null pointer
188 * and the deleter @a __d.
189 * @param __p A null pointer constant.
190 * @param __d A deleter.
191 * @param __a An allocator.
192 * @post use_count() == 1 && get() == __p
193 * @throw std::bad_alloc, in which case @a __d(__p) is called.
194 *
195 * Requirements: _Deleter's copy constructor and destructor must
196 * not throw _Alloc's copy constructor and destructor must not
197 * throw.
198 *
199 * The last owner will call __d(__p)
200 */
201 template<typename _Deleter, typename _Alloc>
202 shared_ptr(nullptr_t __p, _Deleter __d, _Alloc __a)
203 : __shared_ptr<_Tp>(__p, std::move(__d), std::move(__a)) { }
204
205 // Aliasing constructor
206
207 /**
208 * @brief Constructs a %shared_ptr instance that stores @a __p
209 * and shares ownership with @a __r.
210 * @param __r A %shared_ptr.
211 * @param __p A pointer that will remain valid while @a *__r is valid.
212 * @post get() == __p && use_count() == __r.use_count()
213 *
214 * This can be used to construct a @c shared_ptr to a sub-object
215 * of an object managed by an existing @c shared_ptr.
216 *
217 * @code
218 * shared_ptr< pair<int,int> > pii(new pair<int,int>());
219 * shared_ptr<int> pi(pii, &pii->first);
220 * assert(pii.use_count() == 2);
221 * @endcode
222 */
223 template<typename _Yp>
224 shared_ptr(const shared_ptr<_Yp>& __r, element_type* __p) noexcept
225 : __shared_ptr<_Tp>(__r, __p) { }
226
227 /**
228 * @brief If @a __r is empty, constructs an empty %shared_ptr;
229 * otherwise construct a %shared_ptr that shares ownership
230 * with @a __r.
231 * @param __r A %shared_ptr.
232 * @post get() == __r.get() && use_count() == __r.use_count()
233 */
234 template<typename _Yp,
235 typename = _Constructible<const shared_ptr<_Yp>&>>
236 shared_ptr(const shared_ptr<_Yp>& __r) noexcept
237 : __shared_ptr<_Tp>(__r) { }
238
239 /**
240 * @brief Move-constructs a %shared_ptr instance from @a __r.
241 * @param __r A %shared_ptr rvalue.
242 * @post *this contains the old value of @a __r, @a __r is empty.
243 */
244 shared_ptr(shared_ptr&& __r) noexcept
245 : __shared_ptr<_Tp>(std::move(__r)) { }
246
247 /**
248 * @brief Move-constructs a %shared_ptr instance from @a __r.
249 * @param __r A %shared_ptr rvalue.
250 * @post *this contains the old value of @a __r, @a __r is empty.
251 */
252 template<typename _Yp, typename = _Constructible<shared_ptr<_Yp>>>
253 shared_ptr(shared_ptr<_Yp>&& __r) noexcept
254 : __shared_ptr<_Tp>(std::move(__r)) { }
255
256 /**
257 * @brief Constructs a %shared_ptr that shares ownership with @a __r
258 * and stores a copy of the pointer stored in @a __r.
259 * @param __r A weak_ptr.
260 * @post use_count() == __r.use_count()
261 * @throw bad_weak_ptr when __r.expired(),
262 * in which case the constructor has no effect.
263 */
264 template<typename _Yp, typename = _Constructible<const weak_ptr<_Yp>&>>
265 explicit shared_ptr(const weak_ptr<_Yp>& __r)
266 : __shared_ptr<_Tp>(__r) { }
267
268#if _GLIBCXX_USE_DEPRECATED1
269 template<typename _Yp, typename = _Constructible<auto_ptr<_Yp>>>
270 shared_ptr(auto_ptr<_Yp>&& __r);
271#endif
272
273 // _GLIBCXX_RESOLVE_LIB_DEFECTS
274 // 2399. shared_ptr's constructor from unique_ptr should be constrained
275 template<typename _Yp, typename _Del,
276 typename = _Constructible<unique_ptr<_Yp, _Del>>>
277 shared_ptr(unique_ptr<_Yp, _Del>&& __r)
278 : __shared_ptr<_Tp>(std::move(__r)) { }
279
280#if __cplusplus201103L <= 201402L && _GLIBCXX_USE_DEPRECATED1
281 // This non-standard constructor exists to support conversions that
282 // were possible in C++11 and C++14 but are ill-formed in C++17.
283 // If an exception is thrown this constructor has no effect.
284 template<typename _Yp, typename _Del,
285 _Constructible<unique_ptr<_Yp, _Del>, __sp_array_delete>* = 0>
286 shared_ptr(unique_ptr<_Yp, _Del>&& __r)
287 : __shared_ptr<_Tp>(std::move(__r), __sp_array_delete()) { }
288#endif
289
290 /**
291 * @brief Construct an empty %shared_ptr.
292 * @post use_count() == 0 && get() == nullptr
293 */
294 constexpr shared_ptr(nullptr_t) noexcept : shared_ptr() { }
295
296 shared_ptr& operator=(const shared_ptr&) noexcept = default;
5
Calling defaulted copy assignment operator for '__shared_ptr'
22
Returning; memory was released
29
Calling defaulted copy assignment operator for '__shared_ptr'
297
298 template<typename _Yp>
299 _Assignable<const shared_ptr<_Yp>&>
300 operator=(const shared_ptr<_Yp>& __r) noexcept
301 {
302 this->__shared_ptr<_Tp>::operator=(__r);
303 return *this;
304 }
305
306#if _GLIBCXX_USE_DEPRECATED1
307 template<typename _Yp>
308 _Assignable<auto_ptr<_Yp>>
309 operator=(auto_ptr<_Yp>&& __r)
310 {
311 this->__shared_ptr<_Tp>::operator=(std::move(__r));
312 return *this;
313 }
314#endif
315
316 shared_ptr&
317 operator=(shared_ptr&& __r) noexcept
318 {
319 this->__shared_ptr<_Tp>::operator=(std::move(__r));
320 return *this;
321 }
322
323 template<class _Yp>
324 _Assignable<shared_ptr<_Yp>>
325 operator=(shared_ptr<_Yp>&& __r) noexcept
326 {
327 this->__shared_ptr<_Tp>::operator=(std::move(__r));
328 return *this;
329 }
330
331 template<typename _Yp, typename _Del>
332 _Assignable<unique_ptr<_Yp, _Del>>
333 operator=(unique_ptr<_Yp, _Del>&& __r)
334 {
335 this->__shared_ptr<_Tp>::operator=(std::move(__r));
336 return *this;
337 }
338
339 private:
340 // This constructor is non-standard, it is used by allocate_shared.
341 template<typename _Alloc, typename... _Args>
342 shared_ptr(_Sp_make_shared_tag __tag, const _Alloc& __a,
343 _Args&&... __args)
344 : __shared_ptr<_Tp>(__tag, __a, std::forward<_Args>(__args)...)
345 { }
346
347 template<typename _Yp, typename _Alloc, typename... _Args>
348 friend shared_ptr<_Yp>
349 allocate_shared(const _Alloc& __a, _Args&&... __args);
350
351 // This constructor is non-standard, it is used by weak_ptr::lock().
352 shared_ptr(const weak_ptr<_Tp>& __r, std::nothrow_t)
353 : __shared_ptr<_Tp>(__r, std::nothrow) { }
354
355 friend class weak_ptr<_Tp>;
356 };
357
358#if __cpp_deduction_guides >= 201606
359 template<typename _Tp>
360 shared_ptr(weak_ptr<_Tp>) -> shared_ptr<_Tp>;
361 template<typename _Tp, typename _Del>
362 shared_ptr(unique_ptr<_Tp, _Del>) -> shared_ptr<_Tp>;
363#endif
364
365 // 20.7.2.2.7 shared_ptr comparisons
366 template<typename _Tp, typename _Up>
367 inline bool
368 operator==(const shared_ptr<_Tp>& __a, const shared_ptr<_Up>& __b) noexcept
369 { return __a.get() == __b.get(); }
370
371 template<typename _Tp>
372 inline bool
373 operator==(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
374 { return !__a; }
375
376 template<typename _Tp>
377 inline bool
378 operator==(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
379 { return !__a; }
380
381 template<typename _Tp, typename _Up>
382 inline bool
383 operator!=(const shared_ptr<_Tp>& __a, const shared_ptr<_Up>& __b) noexcept
384 { return __a.get() != __b.get(); }
385
386 template<typename _Tp>
387 inline bool
388 operator!=(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
389 { return (bool)__a; }
390
391 template<typename _Tp>
392 inline bool
393 operator!=(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
394 { return (bool)__a; }
395
396 template<typename _Tp, typename _Up>
397 inline bool
398 operator<(const shared_ptr<_Tp>& __a, const shared_ptr<_Up>& __b) noexcept
399 {
400 using _Tp_elt = typename shared_ptr<_Tp>::element_type;
401 using _Up_elt = typename shared_ptr<_Up>::element_type;
402 using _Vp = typename common_type<_Tp_elt*, _Up_elt*>::type;
403 return less<_Vp>()(__a.get(), __b.get());
404 }
405
406 template<typename _Tp>
407 inline bool
408 operator<(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
409 {
410 using _Tp_elt = typename shared_ptr<_Tp>::element_type;
411 return less<_Tp_elt*>()(__a.get(), nullptr);
412 }
413
414 template<typename _Tp>
415 inline bool
416 operator<(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
417 {
418 using _Tp_elt = typename shared_ptr<_Tp>::element_type;
419 return less<_Tp_elt*>()(nullptr, __a.get());
420 }
421
422 template<typename _Tp, typename _Up>
423 inline bool
424 operator<=(const shared_ptr<_Tp>& __a, const shared_ptr<_Up>& __b) noexcept
425 { return !(__b < __a); }
426
427 template<typename _Tp>
428 inline bool
429 operator<=(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
430 { return !(nullptr < __a); }
431
432 template<typename _Tp>
433 inline bool
434 operator<=(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
435 { return !(__a < nullptr); }
436
437 template<typename _Tp, typename _Up>
438 inline bool
439 operator>(const shared_ptr<_Tp>& __a, const shared_ptr<_Up>& __b) noexcept
440 { return (__b < __a); }
441
442 template<typename _Tp>
443 inline bool
444 operator>(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
445 { return nullptr < __a; }
446
447 template<typename _Tp>
448 inline bool
449 operator>(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
450 { return __a < nullptr; }
451
452 template<typename _Tp, typename _Up>
453 inline bool
454 operator>=(const shared_ptr<_Tp>& __a, const shared_ptr<_Up>& __b) noexcept
455 { return !(__a < __b); }
456
457 template<typename _Tp>
458 inline bool
459 operator>=(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
460 { return !(__a < nullptr); }
461
462 template<typename _Tp>
463 inline bool
464 operator>=(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
465 { return !(nullptr < __a); }
466
467 template<typename _Tp>
468 struct less<shared_ptr<_Tp>> : public _Sp_less<shared_ptr<_Tp>>
469 { };
470
471 // 20.7.2.2.8 shared_ptr specialized algorithms.
472 template<typename _Tp>
473 inline void
474 swap(shared_ptr<_Tp>& __a, shared_ptr<_Tp>& __b) noexcept
475 { __a.swap(__b); }
476
477 // 20.7.2.2.9 shared_ptr casts.
478 template<typename _Tp, typename _Up>
479 inline shared_ptr<_Tp>
480 static_pointer_cast(const shared_ptr<_Up>& __r) noexcept
481 {
482 using _Sp = shared_ptr<_Tp>;
483 return _Sp(__r, static_cast<typename _Sp::element_type*>(__r.get()));
484 }
485
486 template<typename _Tp, typename _Up>
487 inline shared_ptr<_Tp>
488 const_pointer_cast(const shared_ptr<_Up>& __r) noexcept
489 {
490 using _Sp = shared_ptr<_Tp>;
491 return _Sp(__r, const_cast<typename _Sp::element_type*>(__r.get()));
492 }
493
494 template<typename _Tp, typename _Up>
495 inline shared_ptr<_Tp>
496 dynamic_pointer_cast(const shared_ptr<_Up>& __r) noexcept
497 {
498 using _Sp = shared_ptr<_Tp>;
499 if (auto* __p = dynamic_cast<typename _Sp::element_type*>(__r.get()))
500 return _Sp(__r, __p);
501 return _Sp();
502 }
503
504#if __cplusplus201103L > 201402L
505 template<typename _Tp, typename _Up>
506 inline shared_ptr<_Tp>
507 reinterpret_pointer_cast(const shared_ptr<_Up>& __r) noexcept
508 {
509 using _Sp = shared_ptr<_Tp>;
510 return _Sp(__r, reinterpret_cast<typename _Sp::element_type*>(__r.get()));
511 }
512#endif
513
514 /**
515 * @brief A smart pointer with weak semantics.
516 *
517 * With forwarding constructors and assignment operators.
518 */
519 template<typename _Tp>
520 class weak_ptr : public __weak_ptr<_Tp>
521 {
522 template<typename _Arg>
523 using _Constructible = typename enable_if<
524 is_constructible<__weak_ptr<_Tp>, _Arg>::value
525 >::type;
526
527 template<typename _Arg>
528 using _Assignable = typename enable_if<
529 is_assignable<__weak_ptr<_Tp>&, _Arg>::value, weak_ptr&
530 >::type;
531
532 public:
533 constexpr weak_ptr() noexcept = default;
534
535 template<typename _Yp,
536 typename = _Constructible<const shared_ptr<_Yp>&>>
537 weak_ptr(const shared_ptr<_Yp>& __r) noexcept
538 : __weak_ptr<_Tp>(__r) { }
539
540 weak_ptr(const weak_ptr&) noexcept = default;
541
542 template<typename _Yp, typename = _Constructible<const weak_ptr<_Yp>&>>
543 weak_ptr(const weak_ptr<_Yp>& __r) noexcept
544 : __weak_ptr<_Tp>(__r) { }
545
546 weak_ptr(weak_ptr&&) noexcept = default;
547
548 template<typename _Yp, typename = _Constructible<weak_ptr<_Yp>>>
549 weak_ptr(weak_ptr<_Yp>&& __r) noexcept
550 : __weak_ptr<_Tp>(std::move(__r)) { }
551
552 weak_ptr&
553 operator=(const weak_ptr& __r) noexcept = default;
554
555 template<typename _Yp>
556 _Assignable<const weak_ptr<_Yp>&>
557 operator=(const weak_ptr<_Yp>& __r) noexcept
558 {
559 this->__weak_ptr<_Tp>::operator=(__r);
560 return *this;
561 }
562
563 template<typename _Yp>
564 _Assignable<const shared_ptr<_Yp>&>
565 operator=(const shared_ptr<_Yp>& __r) noexcept
566 {
567 this->__weak_ptr<_Tp>::operator=(__r);
568 return *this;
569 }
570
571 weak_ptr&
572 operator=(weak_ptr&& __r) noexcept = default;
573
574 template<typename _Yp>
575 _Assignable<weak_ptr<_Yp>>
576 operator=(weak_ptr<_Yp>&& __r) noexcept
577 {
578 this->__weak_ptr<_Tp>::operator=(std::move(__r));
579 return *this;
580 }
581
582 shared_ptr<_Tp>
583 lock() const noexcept
584 { return shared_ptr<_Tp>(*this, std::nothrow); }
585 };
586
587#if __cpp_deduction_guides >= 201606
588 template<typename _Tp>
589 weak_ptr(shared_ptr<_Tp>) -> weak_ptr<_Tp>;
590#endif
591
592 // 20.7.2.3.6 weak_ptr specialized algorithms.
593 template<typename _Tp>
594 inline void
595 swap(weak_ptr<_Tp>& __a, weak_ptr<_Tp>& __b) noexcept
596 { __a.swap(__b); }
597
598
599 /// Primary template owner_less
600 template<typename _Tp = void>
601 struct owner_less;
602
603 /// Void specialization of owner_less
604 template<>
605 struct owner_less<void> : _Sp_owner_less<void, void>
606 { };
607
608 /// Partial specialization of owner_less for shared_ptr.
609 template<typename _Tp>
610 struct owner_less<shared_ptr<_Tp>>
611 : public _Sp_owner_less<shared_ptr<_Tp>, weak_ptr<_Tp>>
612 { };
613
614 /// Partial specialization of owner_less for weak_ptr.
615 template<typename _Tp>
616 struct owner_less<weak_ptr<_Tp>>
617 : public _Sp_owner_less<weak_ptr<_Tp>, shared_ptr<_Tp>>
618 { };
619
620 /**
621 * @brief Base class allowing use of member function shared_from_this.
622 */
623 template<typename _Tp>
624 class enable_shared_from_this
625 {
626 protected:
627 constexpr enable_shared_from_this() noexcept { }
628
629 enable_shared_from_this(const enable_shared_from_this&) noexcept { }
630
631 enable_shared_from_this&
632 operator=(const enable_shared_from_this&) noexcept
633 { return *this; }
634
635 ~enable_shared_from_this() { }
636
637 public:
638 shared_ptr<_Tp>
639 shared_from_this()
640 { return shared_ptr<_Tp>(this->_M_weak_this); }
641
642 shared_ptr<const _Tp>
643 shared_from_this() const
644 { return shared_ptr<const _Tp>(this->_M_weak_this); }
645
646#if __cplusplus201103L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
647#define __cpp_lib_enable_shared_from_this 201603
648 weak_ptr<_Tp>
649 weak_from_this() noexcept
650 { return this->_M_weak_this; }
651
652 weak_ptr<const _Tp>
653 weak_from_this() const noexcept
654 { return this->_M_weak_this; }
655#endif
656
657 private:
658 template<typename _Tp1>
659 void
660 _M_weak_assign(_Tp1* __p, const __shared_count<>& __n) const noexcept
661 { _M_weak_this._M_assign(__p, __n); }
662
663 // Found by ADL when this is an associated class.
664 friend const enable_shared_from_this*
665 __enable_shared_from_this_base(const __shared_count<>&,
666 const enable_shared_from_this* __p)
667 { return __p; }
668
669 template<typename, _Lock_policy>
670 friend class __shared_ptr;
671
672 mutable weak_ptr<_Tp> _M_weak_this;
673 };
674
675 /**
676 * @brief Create an object that is owned by a shared_ptr.
677 * @param __a An allocator.
678 * @param __args Arguments for the @a _Tp object's constructor.
679 * @return A shared_ptr that owns the newly created object.
680 * @throw An exception thrown from @a _Alloc::allocate or from the
681 * constructor of @a _Tp.
682 *
683 * A copy of @a __a will be used to allocate memory for the shared_ptr
684 * and the new object.
685 */
686 template<typename _Tp, typename _Alloc, typename... _Args>
687 inline shared_ptr<_Tp>
688 allocate_shared(const _Alloc& __a, _Args&&... __args)
689 {
690 return shared_ptr<_Tp>(_Sp_make_shared_tag(), __a,
691 std::forward<_Args>(__args)...);
692 }
693
694 /**
695 * @brief Create an object that is owned by a shared_ptr.
696 * @param __args Arguments for the @a _Tp object's constructor.
697 * @return A shared_ptr that owns the newly created object.
698 * @throw std::bad_alloc, or an exception thrown from the
699 * constructor of @a _Tp.
700 */
701 template<typename _Tp, typename... _Args>
702 inline shared_ptr<_Tp>
703 make_shared(_Args&&... __args)
704 {
705 typedef typename std::remove_const<_Tp>::type _Tp_nc;
706 return std::allocate_shared<_Tp>(std::allocator<_Tp_nc>(),
707 std::forward<_Args>(__args)...);
708 }
709
710 /// std::hash specialization for shared_ptr.
711 template<typename _Tp>
712 struct hash<shared_ptr<_Tp>>
713 : public __hash_base<size_t, shared_ptr<_Tp>>
714 {
715 size_t
716 operator()(const shared_ptr<_Tp>& __s) const noexcept
717 {
718 return std::hash<typename shared_ptr<_Tp>::element_type*>()(__s.get());
719 }
720 };
721
722 // @} group pointer_abstractions
723
724_GLIBCXX_END_NAMESPACE_VERSION
725} // namespace
726
727#endif // _SHARED_PTR_H

/usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/bits/shared_ptr_base.h

1// shared_ptr and weak_ptr implementation details -*- C++ -*-
2
3// Copyright (C) 2007-2017 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25// GCC Note: Based on files from version 1.32.0 of the Boost library.
26
27// shared_count.hpp
28// Copyright (c) 2001, 2002, 2003 Peter Dimov and Multi Media Ltd.
29
30// shared_ptr.hpp
31// Copyright (C) 1998, 1999 Greg Colvin and Beman Dawes.
32// Copyright (C) 2001, 2002, 2003 Peter Dimov
33
34// weak_ptr.hpp
35// Copyright (C) 2001, 2002, 2003 Peter Dimov
36
37// enable_shared_from_this.hpp
38// Copyright (C) 2002 Peter Dimov
39
40// Distributed under the Boost Software License, Version 1.0. (See
41// accompanying file LICENSE_1_0.txt or copy at
42// http://www.boost.org/LICENSE_1_0.txt)
43
44/** @file bits/shared_ptr_base.h
45 * This is an internal header file, included by other library headers.
46 * Do not attempt to use it directly. @headername{memory}
47 */
48
49#ifndef _SHARED_PTR_BASE_H1
50#define _SHARED_PTR_BASE_H1 1
51
52#if __cpp_rtti199711
53# include <typeinfo>
54#endif
55#include <bits/allocated_ptr.h>
56#include <bits/refwrap.h>
57#include <bits/stl_function.h>
58#include <ext/aligned_buffer.h>
59
60namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
61{
62_GLIBCXX_BEGIN_NAMESPACE_VERSION
63
64#if _GLIBCXX_USE_DEPRECATED1
65 template<typename> class auto_ptr;
66#endif
67
68 /**
69 * @brief Exception possibly thrown by @c shared_ptr.
70 * @ingroup exceptions
71 */
72 class bad_weak_ptr : public std::exception
73 {
74 public:
75 virtual char const* what() const noexcept;
76
77 virtual ~bad_weak_ptr() noexcept;
78 };
79
80 // Substitute for bad_weak_ptr object in the case of -fno-exceptions.
81 inline void
82 __throw_bad_weak_ptr()
83 { _GLIBCXX_THROW_OR_ABORT(bad_weak_ptr())(__builtin_abort()); }
84
85 using __gnu_cxx::_Lock_policy;
86 using __gnu_cxx::__default_lock_policy;
87 using __gnu_cxx::_S_single;
88 using __gnu_cxx::_S_mutex;
89 using __gnu_cxx::_S_atomic;
90
91 // Empty helper class except when the template argument is _S_mutex.
92 template<_Lock_policy _Lp>
93 class _Mutex_base
94 {
95 protected:
96 // The atomic policy uses fully-fenced builtins, single doesn't care.
97 enum { _S_need_barriers = 0 };
98 };
99
100 template<>
101 class _Mutex_base<_S_mutex>
102 : public __gnu_cxx::__mutex
103 {
104 protected:
105 // This policy is used when atomic builtins are not available.
106 // The replacement atomic operations might not have the necessary
107 // memory barriers.
108 enum { _S_need_barriers = 1 };
109 };
110
111 template<_Lock_policy _Lp = __default_lock_policy>
112 class _Sp_counted_base
113 : public _Mutex_base<_Lp>
114 {
115 public:
116 _Sp_counted_base() noexcept
117 : _M_use_count(1), _M_weak_count(1) { }
118
119 virtual
120 ~_Sp_counted_base() noexcept
121 { }
122
123 // Called when _M_use_count drops to zero, to release the resources
124 // managed by *this.
125 virtual void
126 _M_dispose() noexcept = 0;
127
128 // Called when _M_weak_count drops to zero.
129 virtual void
130 _M_destroy() noexcept
131 { delete this; }
18
Memory is released
132
133 virtual void*
134 _M_get_deleter(const std::type_info&) noexcept = 0;
135
136 void
137 _M_add_ref_copy()
138 { __gnu_cxx::__atomic_add_dispatch(&_M_use_count, 1); }
139
140 void
141 _M_add_ref_lock();
142
143 bool
144 _M_add_ref_lock_nothrow();
145
146 void
147 _M_release() noexcept
148 {
149 // Be race-detector-friendly. For more info see bits/c++config.
150 _GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(&_M_use_count);
151 if (__gnu_cxx::__exchange_and_add_dispatch(&_M_use_count, -1) == 1)
12
Assuming the condition is true
13
Taking true branch
36
Calling '__exchange_and_add_dispatch'
152 {
153 _GLIBCXX_SYNCHRONIZATION_HAPPENS_AFTER(&_M_use_count);
154 _M_dispose();
155 // There must be a memory barrier between dispose() and destroy()
156 // to ensure that the effects of dispose() are observed in the
157 // thread that runs destroy().
158 // See http://gcc.gnu.org/ml/libstdc++/2005-11/msg00136.html
159 if (_Mutex_base<_Lp>::_S_need_barriers)
14
Taking false branch
160 {
161 __atomic_thread_fence (__ATOMIC_ACQ_REL4);
162 }
163
164 // Be race-detector-friendly. For more info see bits/c++config.
165 _GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(&_M_weak_count);
166 if (__gnu_cxx::__exchange_and_add_dispatch(&_M_weak_count,
15
Assuming the condition is true
16
Taking true branch
167 -1) == 1)
168 {
169 _GLIBCXX_SYNCHRONIZATION_HAPPENS_AFTER(&_M_weak_count);
170 _M_destroy();
17
Calling '_Sp_counted_base::_M_destroy'
19
Returning; memory was released
171 }
172 }
173 }
174
175 void
176 _M_weak_add_ref() noexcept
177 { __gnu_cxx::__atomic_add_dispatch(&_M_weak_count, 1); }
178
179 void
180 _M_weak_release() noexcept
181 {
182 // Be race-detector-friendly. For more info see bits/c++config.
183 _GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(&_M_weak_count);
184 if (__gnu_cxx::__exchange_and_add_dispatch(&_M_weak_count, -1) == 1)
185 {
186 _GLIBCXX_SYNCHRONIZATION_HAPPENS_AFTER(&_M_weak_count);
187 if (_Mutex_base<_Lp>::_S_need_barriers)
188 {
189 // See _M_release(),
190 // destroy() must observe results of dispose()
191 __atomic_thread_fence (__ATOMIC_ACQ_REL4);
192 }
193 _M_destroy();
194 }
195 }
196
197 long
198 _M_get_use_count() const noexcept
199 {
200 // No memory barrier is used here so there is no synchronization
201 // with other threads.
202 return __atomic_load_n(&_M_use_count, __ATOMIC_RELAXED0);
203 }
204
205 private:
206 _Sp_counted_base(_Sp_counted_base const&) = delete;
207 _Sp_counted_base& operator=(_Sp_counted_base const&) = delete;
208
209 _Atomic_word _M_use_count; // #shared
210 _Atomic_word _M_weak_count; // #weak + (#shared != 0)
211 };
212
213 template<>
214 inline void
215 _Sp_counted_base<_S_single>::
216 _M_add_ref_lock()
217 {
218 if (_M_use_count == 0)
219 __throw_bad_weak_ptr();
220 ++_M_use_count;
221 }
222
223 template<>
224 inline void
225 _Sp_counted_base<_S_mutex>::
226 _M_add_ref_lock()
227 {
228 __gnu_cxx::__scoped_lock sentry(*this);
229 if (__gnu_cxx::__exchange_and_add_dispatch(&_M_use_count, 1) == 0)
230 {
231 _M_use_count = 0;
232 __throw_bad_weak_ptr();
233 }
234 }
235
236 template<>
237 inline void
238 _Sp_counted_base<_S_atomic>::
239 _M_add_ref_lock()
240 {
241 // Perform lock-free add-if-not-zero operation.
242 _Atomic_word __count = _M_get_use_count();
243 do
244 {
245 if (__count == 0)
246 __throw_bad_weak_ptr();
247 // Replace the current counter value with the old value + 1, as
248 // long as it's not changed meanwhile.
249 }
250 while (!__atomic_compare_exchange_n(&_M_use_count, &__count, __count + 1,
251 true, __ATOMIC_ACQ_REL4,
252 __ATOMIC_RELAXED0));
253 }
254
255 template<>
256 inline bool
257 _Sp_counted_base<_S_single>::
258 _M_add_ref_lock_nothrow()
259 {
260 if (_M_use_count == 0)
261 return false;
262 ++_M_use_count;
263 return true;
264 }
265
266 template<>
267 inline bool
268 _Sp_counted_base<_S_mutex>::
269 _M_add_ref_lock_nothrow()
270 {
271 __gnu_cxx::__scoped_lock sentry(*this);
272 if (__gnu_cxx::__exchange_and_add_dispatch(&_M_use_count, 1) == 0)
273 {
274 _M_use_count = 0;
275 return false;
276 }
277 return true;
278 }
279
280 template<>
281 inline bool
282 _Sp_counted_base<_S_atomic>::
283 _M_add_ref_lock_nothrow()
284 {
285 // Perform lock-free add-if-not-zero operation.
286 _Atomic_word __count = _M_get_use_count();
287 do
288 {
289 if (__count == 0)
290 return false;
291 // Replace the current counter value with the old value + 1, as
292 // long as it's not changed meanwhile.
293 }
294 while (!__atomic_compare_exchange_n(&_M_use_count, &__count, __count + 1,
295 true, __ATOMIC_ACQ_REL4,
296 __ATOMIC_RELAXED0));
297 return true;
298 }
299
300 template<>
301 inline void
302 _Sp_counted_base<_S_single>::_M_add_ref_copy()
303 { ++_M_use_count; }
304
305 template<>
306 inline void
307 _Sp_counted_base<_S_single>::_M_release() noexcept
308 {
309 if (--_M_use_count == 0)
310 {
311 _M_dispose();
312 if (--_M_weak_count == 0)
313 _M_destroy();
314 }
315 }
316
317 template<>
318 inline void
319 _Sp_counted_base<_S_single>::_M_weak_add_ref() noexcept
320 { ++_M_weak_count; }
321
322 template<>
323 inline void
324 _Sp_counted_base<_S_single>::_M_weak_release() noexcept
325 {
326 if (--_M_weak_count == 0)
327 _M_destroy();
328 }
329
330 template<>
331 inline long
332 _Sp_counted_base<_S_single>::_M_get_use_count() const noexcept
333 { return _M_use_count; }
334
335
336 // Forward declarations.
337 template<typename _Tp, _Lock_policy _Lp = __default_lock_policy>
338 class __shared_ptr;
339
340 template<typename _Tp, _Lock_policy _Lp = __default_lock_policy>
341 class __weak_ptr;
342
343 template<typename _Tp, _Lock_policy _Lp = __default_lock_policy>
344 class __enable_shared_from_this;
345
346 template<typename _Tp>
347 class shared_ptr;
348
349 template<typename _Tp>
350 class weak_ptr;
351
352 template<typename _Tp>
353 struct owner_less;
354
355 template<typename _Tp>
356 class enable_shared_from_this;
357
358 template<_Lock_policy _Lp = __default_lock_policy>
359 class __weak_count;
360
361 template<_Lock_policy _Lp = __default_lock_policy>
362 class __shared_count;
363
364
365 // Counted ptr with no deleter or allocator support
366 template<typename _Ptr, _Lock_policy _Lp>
367 class _Sp_counted_ptr final : public _Sp_counted_base<_Lp>
368 {
369 public:
370 explicit
371 _Sp_counted_ptr(_Ptr __p) noexcept
372 : _M_ptr(__p) { }
373
374 virtual void
375 _M_dispose() noexcept
376 { delete _M_ptr; }
377
378 virtual void
379 _M_destroy() noexcept
380 { delete this; }
381
382 virtual void*
383 _M_get_deleter(const std::type_info&) noexcept
384 { return nullptr; }
385
386 _Sp_counted_ptr(const _Sp_counted_ptr&) = delete;
387 _Sp_counted_ptr& operator=(const _Sp_counted_ptr&) = delete;
388
389 private:
390 _Ptr _M_ptr;
391 };
392
393 template<>
394 inline void
395 _Sp_counted_ptr<nullptr_t, _S_single>::_M_dispose() noexcept { }
396
397 template<>
398 inline void
399 _Sp_counted_ptr<nullptr_t, _S_mutex>::_M_dispose() noexcept { }
400
401 template<>
402 inline void
403 _Sp_counted_ptr<nullptr_t, _S_atomic>::_M_dispose() noexcept { }
404
405 template<int _Nm, typename _Tp,
406 bool __use_ebo = !__is_final(_Tp) && __is_empty(_Tp)>
407 struct _Sp_ebo_helper;
408
409 /// Specialization using EBO.
410 template<int _Nm, typename _Tp>
411 struct _Sp_ebo_helper<_Nm, _Tp, true> : private _Tp
412 {
413 explicit _Sp_ebo_helper(const _Tp& __tp) : _Tp(__tp) { }
414 explicit _Sp_ebo_helper(_Tp&& __tp) : _Tp(std::move(__tp)) { }
415
416 static _Tp&
417 _S_get(_Sp_ebo_helper& __eboh) { return static_cast<_Tp&>(__eboh); }
418 };
419
420 /// Specialization not using EBO.
421 template<int _Nm, typename _Tp>
422 struct _Sp_ebo_helper<_Nm, _Tp, false>
423 {
424 explicit _Sp_ebo_helper(const _Tp& __tp) : _M_tp(__tp) { }
425 explicit _Sp_ebo_helper(_Tp&& __tp) : _M_tp(std::move(__tp)) { }
426
427 static _Tp&
428 _S_get(_Sp_ebo_helper& __eboh)
429 { return __eboh._M_tp; }
430
431 private:
432 _Tp _M_tp;
433 };
434
435 // Support for custom deleter and/or allocator
436 template<typename _Ptr, typename _Deleter, typename _Alloc, _Lock_policy _Lp>
437 class _Sp_counted_deleter final : public _Sp_counted_base<_Lp>
438 {
439 class _Impl : _Sp_ebo_helper<0, _Deleter>, _Sp_ebo_helper<1, _Alloc>
440 {
441 typedef _Sp_ebo_helper<0, _Deleter> _Del_base;
442 typedef _Sp_ebo_helper<1, _Alloc> _Alloc_base;
443
444 public:
445 _Impl(_Ptr __p, _Deleter __d, const _Alloc& __a) noexcept
446 : _M_ptr(__p), _Del_base(std::move(__d)), _Alloc_base(__a)
447 { }
448
449 _Deleter& _M_del() noexcept { return _Del_base::_S_get(*this); }
450 _Alloc& _M_alloc() noexcept { return _Alloc_base::_S_get(*this); }
451
452 _Ptr _M_ptr;
453 };
454
455 public:
456 using __allocator_type = __alloc_rebind<_Alloc, _Sp_counted_deleter>;
457
458 // __d(__p) must not throw.
459 _Sp_counted_deleter(_Ptr __p, _Deleter __d) noexcept
460 : _M_impl(__p, std::move(__d), _Alloc()) { }
461
462 // __d(__p) must not throw.
463 _Sp_counted_deleter(_Ptr __p, _Deleter __d, const _Alloc& __a) noexcept
464 : _M_impl(__p, std::move(__d), __a) { }
465
466 ~_Sp_counted_deleter() noexcept { }
467
468 virtual void
469 _M_dispose() noexcept
470 { _M_impl._M_del()(_M_impl._M_ptr); }
471
472 virtual void
473 _M_destroy() noexcept
474 {
475 __allocator_type __a(_M_impl._M_alloc());
476 __allocated_ptr<__allocator_type> __guard_ptr{ __a, this };
477 this->~_Sp_counted_deleter();
478 }
479
480 virtual void*
481 _M_get_deleter(const std::type_info& __ti) noexcept
482 {
483#if __cpp_rtti199711
484 // _GLIBCXX_RESOLVE_LIB_DEFECTS
485 // 2400. shared_ptr's get_deleter() should use addressof()
486 return __ti == typeid(_Deleter)
487 ? std::__addressof(_M_impl._M_del())
488 : nullptr;
489#else
490 return nullptr;
491#endif
492 }
493
494 private:
495 _Impl _M_impl;
496 };
497
498 // helpers for make_shared / allocate_shared
499
500 struct _Sp_make_shared_tag { };
501
502 template<typename _Tp, typename _Alloc, _Lock_policy _Lp>
503 class _Sp_counted_ptr_inplace final : public _Sp_counted_base<_Lp>
504 {
505 class _Impl : _Sp_ebo_helper<0, _Alloc>
506 {
507 typedef _Sp_ebo_helper<0, _Alloc> _A_base;
508
509 public:
510 explicit _Impl(_Alloc __a) noexcept : _A_base(__a) { }
511
512 _Alloc& _M_alloc() noexcept { return _A_base::_S_get(*this); }
513
514 __gnu_cxx::__aligned_buffer<_Tp> _M_storage;
515 };
516
517 public:
518 using __allocator_type = __alloc_rebind<_Alloc, _Sp_counted_ptr_inplace>;
519
520 template<typename... _Args>
521 _Sp_counted_ptr_inplace(_Alloc __a, _Args&&... __args)
522 : _M_impl(__a)
523 {
524 // _GLIBCXX_RESOLVE_LIB_DEFECTS
525 // 2070. allocate_shared should use allocator_traits<A>::construct
526 allocator_traits<_Alloc>::construct(__a, _M_ptr(),
527 std::forward<_Args>(__args)...); // might throw
528 }
529
530 ~_Sp_counted_ptr_inplace() noexcept { }
531
532 virtual void
533 _M_dispose() noexcept
534 {
535 allocator_traits<_Alloc>::destroy(_M_impl._M_alloc(), _M_ptr());
536 }
537
538 // Override because the allocator needs to know the dynamic type
539 virtual void
540 _M_destroy() noexcept
541 {
542 __allocator_type __a(_M_impl._M_alloc());
543 __allocated_ptr<__allocator_type> __guard_ptr{ __a, this };
544 this->~_Sp_counted_ptr_inplace();
545 }
546
547 // Sneaky trick so __shared_ptr can get the managed pointer
548 virtual void*
549 _M_get_deleter(const std::type_info& __ti) noexcept
550 {
551#if __cpp_rtti199711
552 if (__ti == typeid(_Sp_make_shared_tag))
553 return const_cast<typename remove_cv<_Tp>::type*>(_M_ptr());
554#endif
555 return nullptr;
556 }
557
558 private:
559 _Tp* _M_ptr() noexcept { return _M_impl._M_storage._M_ptr(); }
560
561 _Impl _M_impl;
562 };
563
564 // The default deleter for shared_ptr<T[]> and shared_ptr<T[N]>.
565 struct __sp_array_delete
566 {
567 template<typename _Yp>
568 void operator()(_Yp* __p) const { delete[] __p; }
569 };
570
571 template<_Lock_policy _Lp>
572 class __shared_count
573 {
574 public:
575 constexpr __shared_count() noexcept : _M_pi(0)
576 { }
577
578 template<typename _Ptr>
579 explicit
580 __shared_count(_Ptr __p) : _M_pi(0)
581 {
582 __tryif (true)
583 {
584 _M_pi = new _Sp_counted_ptr<_Ptr, _Lp>(__p);
585 }
586 __catch(...)if (false)
587 {
588 delete __p;
589 __throw_exception_again;
590 }
591 }
592
593 template<typename _Ptr>
594 __shared_count(_Ptr __p, /* is_array = */ false_type)
595 : __shared_count(__p)
596 { }
597
598 template<typename _Ptr>
599 __shared_count(_Ptr __p, /* is_array = */ true_type)
600 : __shared_count(__p, __sp_array_delete{}, allocator<void>())
601 { }
602
603 template<typename _Ptr, typename _Deleter>
604 __shared_count(_Ptr __p, _Deleter __d)
605 : __shared_count(__p, std::move(__d), allocator<void>())
606 { }
607
608 template<typename _Ptr, typename _Deleter, typename _Alloc>
609 __shared_count(_Ptr __p, _Deleter __d, _Alloc __a) : _M_pi(0)
610 {
611 typedef _Sp_counted_deleter<_Ptr, _Deleter, _Alloc, _Lp> _Sp_cd_type;
612 __tryif (true)
613 {
614 typename _Sp_cd_type::__allocator_type __a2(__a);
615 auto __guard = std::__allocate_guarded(__a2);
616 _Sp_cd_type* __mem = __guard.get();
617 ::new (__mem) _Sp_cd_type(__p, std::move(__d), std::move(__a));
618 _M_pi = __mem;
619 __guard = nullptr;
620 }
621 __catch(...)if (false)
622 {
623 __d(__p); // Call _Deleter on __p.
624 __throw_exception_again;
625 }
626 }
627
628 template<typename _Tp, typename _Alloc, typename... _Args>
629 __shared_count(_Sp_make_shared_tag, _Tp*, const _Alloc& __a,
630 _Args&&... __args)
631 : _M_pi(0)
632 {
633 typedef _Sp_counted_ptr_inplace<_Tp, _Alloc, _Lp> _Sp_cp_type;
634 typename _Sp_cp_type::__allocator_type __a2(__a);
635 auto __guard = std::__allocate_guarded(__a2);
636 _Sp_cp_type* __mem = __guard.get();
637 ::new (__mem) _Sp_cp_type(std::move(__a),
638 std::forward<_Args>(__args)...);
639 _M_pi = __mem;
640 __guard = nullptr;
641 }
642
643#if _GLIBCXX_USE_DEPRECATED1
644 // Special case for auto_ptr<_Tp> to provide the strong guarantee.
645 template<typename _Tp>
646 explicit
647 __shared_count(std::auto_ptr<_Tp>&& __r);
648#endif
649
650 // Special case for unique_ptr<_Tp,_Del> to provide the strong guarantee.
651 template<typename _Tp, typename _Del>
652 explicit
653 __shared_count(std::unique_ptr<_Tp, _Del>&& __r) : _M_pi(0)
654 {
655 // _GLIBCXX_RESOLVE_LIB_DEFECTS
656 // 2415. Inconsistency between unique_ptr and shared_ptr
657 if (__r.get() == nullptr)
658 return;
659
660 using _Ptr = typename unique_ptr<_Tp, _Del>::pointer;
661 using _Del2 = typename conditional<is_reference<_Del>::value,
662 reference_wrapper<typename remove_reference<_Del>::type>,
663 _Del>::type;
664 using _Sp_cd_type
665 = _Sp_counted_deleter<_Ptr, _Del2, allocator<void>, _Lp>;
666 using _Alloc = allocator<_Sp_cd_type>;
667 using _Alloc_traits = allocator_traits<_Alloc>;
668 _Alloc __a;
669 _Sp_cd_type* __mem = _Alloc_traits::allocate(__a, 1);
670 _Alloc_traits::construct(__a, __mem, __r.release(),
671 __r.get_deleter()); // non-throwing
672 _M_pi = __mem;
673 }
674
675 // Throw bad_weak_ptr when __r._M_get_use_count() == 0.
676 explicit __shared_count(const __weak_count<_Lp>& __r);
677
678 // Does not throw if __r._M_get_use_count() == 0, caller must check.
679 explicit __shared_count(const __weak_count<_Lp>& __r, std::nothrow_t);
680
681 ~__shared_count() noexcept
682 {
683 if (_M_pi != nullptr)
684 _M_pi->_M_release();
685 }
686
687 __shared_count(const __shared_count& __r) noexcept
688 : _M_pi(__r._M_pi)
689 {
690 if (_M_pi != 0)
691 _M_pi->_M_add_ref_copy();
692 }
693
694 __shared_count&
695 operator=(const __shared_count& __r) noexcept
696 {
697 _Sp_counted_base<_Lp>* __tmp = __r._M_pi;
698 if (__tmp != _M_pi)
7
Taking true branch
31
Taking true branch
699 {
700 if (__tmp != 0)
8
Assuming '__tmp' is equal to null
9
Taking false branch
32
Assuming '__tmp' is equal to null
33
Taking false branch
701 __tmp->_M_add_ref_copy();
702 if (_M_pi != 0)
10
Taking true branch
34
Taking true branch
703 _M_pi->_M_release();
11
Calling '_Sp_counted_base::_M_release'
20
Returning; memory was released
35
Calling '_Sp_counted_base::_M_release'
704 _M_pi = __tmp;
705 }
706 return *this;
707 }
708
709 void
710 _M_swap(__shared_count& __r) noexcept
711 {
712 _Sp_counted_base<_Lp>* __tmp = __r._M_pi;
713 __r._M_pi = _M_pi;
714 _M_pi = __tmp;
715 }
716
717 long
718 _M_get_use_count() const noexcept
719 { return _M_pi != 0 ? _M_pi->_M_get_use_count() : 0; }
720
721 bool
722 _M_unique() const noexcept
723 { return this->_M_get_use_count() == 1; }
724
725 void*
726 _M_get_deleter(const std::type_info& __ti) const noexcept
727 { return _M_pi ? _M_pi->_M_get_deleter(__ti) : nullptr; }
728
729 bool
730 _M_less(const __shared_count& __rhs) const noexcept
731 { return std::less<_Sp_counted_base<_Lp>*>()(this->_M_pi, __rhs._M_pi); }
732
733 bool
734 _M_less(const __weak_count<_Lp>& __rhs) const noexcept
735 { return std::less<_Sp_counted_base<_Lp>*>()(this->_M_pi, __rhs._M_pi); }
736
737 // Friend function injected into enclosing namespace and found by ADL
738 friend inline bool
739 operator==(const __shared_count& __a, const __shared_count& __b) noexcept
740 { return __a._M_pi == __b._M_pi; }
741
742 private:
743 friend class __weak_count<_Lp>;
744
745 _Sp_counted_base<_Lp>* _M_pi;
746 };
747
748
749 template<_Lock_policy _Lp>
750 class __weak_count
751 {
752 public:
753 constexpr __weak_count() noexcept : _M_pi(nullptr)
754 { }
755
756 __weak_count(const __shared_count<_Lp>& __r) noexcept
757 : _M_pi(__r._M_pi)
758 {
759 if (_M_pi != nullptr)
760 _M_pi->_M_weak_add_ref();
761 }
762
763 __weak_count(const __weak_count& __r) noexcept
764 : _M_pi(__r._M_pi)
765 {
766 if (_M_pi != nullptr)
767 _M_pi->_M_weak_add_ref();
768 }
769
770 __weak_count(__weak_count&& __r) noexcept
771 : _M_pi(__r._M_pi)
772 { __r._M_pi = nullptr; }
773
774 ~__weak_count() noexcept
775 {
776 if (_M_pi != nullptr)
777 _M_pi->_M_weak_release();
778 }
779
780 __weak_count&
781 operator=(const __shared_count<_Lp>& __r) noexcept
782 {
783 _Sp_counted_base<_Lp>* __tmp = __r._M_pi;
784 if (__tmp != nullptr)
785 __tmp->_M_weak_add_ref();
786 if (_M_pi != nullptr)
787 _M_pi->_M_weak_release();
788 _M_pi = __tmp;
789 return *this;
790 }
791
792 __weak_count&
793 operator=(const __weak_count& __r) noexcept
794 {
795 _Sp_counted_base<_Lp>* __tmp = __r._M_pi;
796 if (__tmp != nullptr)
797 __tmp->_M_weak_add_ref();
798 if (_M_pi != nullptr)
799 _M_pi->_M_weak_release();
800 _M_pi = __tmp;
801 return *this;
802 }
803
804 __weak_count&
805 operator=(__weak_count&& __r) noexcept
806 {
807 if (_M_pi != nullptr)
808 _M_pi->_M_weak_release();
809 _M_pi = __r._M_pi;
810 __r._M_pi = nullptr;
811 return *this;
812 }
813
814 void
815 _M_swap(__weak_count& __r) noexcept
816 {
817 _Sp_counted_base<_Lp>* __tmp = __r._M_pi;
818 __r._M_pi = _M_pi;
819 _M_pi = __tmp;
820 }
821
822 long
823 _M_get_use_count() const noexcept
824 { return _M_pi != nullptr ? _M_pi->_M_get_use_count() : 0; }
825
826 bool
827 _M_less(const __weak_count& __rhs) const noexcept
828 { return std::less<_Sp_counted_base<_Lp>*>()(this->_M_pi, __rhs._M_pi); }
829
830 bool
831 _M_less(const __shared_count<_Lp>& __rhs) const noexcept
832 { return std::less<_Sp_counted_base<_Lp>*>()(this->_M_pi, __rhs._M_pi); }
833
834 // Friend function injected into enclosing namespace and found by ADL
835 friend inline bool
836 operator==(const __weak_count& __a, const __weak_count& __b) noexcept
837 { return __a._M_pi == __b._M_pi; }
838
839 private:
840 friend class __shared_count<_Lp>;
841
842 _Sp_counted_base<_Lp>* _M_pi;
843 };
844
845 // Now that __weak_count is defined we can define this constructor:
846 template<_Lock_policy _Lp>
847 inline
848 __shared_count<_Lp>::__shared_count(const __weak_count<_Lp>& __r)
849 : _M_pi(__r._M_pi)
850 {
851 if (_M_pi != nullptr)
852 _M_pi->_M_add_ref_lock();
853 else
854 __throw_bad_weak_ptr();
855 }
856
857 // Now that __weak_count is defined we can define this constructor:
858 template<_Lock_policy _Lp>
859 inline
860 __shared_count<_Lp>::
861 __shared_count(const __weak_count<_Lp>& __r, std::nothrow_t)
862 : _M_pi(__r._M_pi)
863 {
864 if (_M_pi != nullptr)
865 if (!_M_pi->_M_add_ref_lock_nothrow())
866 _M_pi = nullptr;
867 }
868
869#define __cpp_lib_shared_ptr_arrays201603 201603
870
871 // Helper traits for shared_ptr of array:
872
873 // A pointer type Y* is said to be compatible with a pointer type T* when
874 // either Y* is convertible to T* or Y is U[N] and T is U cv [].
875 template<typename _Yp_ptr, typename _Tp_ptr>
876 struct __sp_compatible_with
877 : false_type
878 { };
879
880 template<typename _Yp, typename _Tp>
881 struct __sp_compatible_with<_Yp*, _Tp*>
882 : is_convertible<_Yp*, _Tp*>::type
883 { };
884
885 template<typename _Up, size_t _Nm>
886 struct __sp_compatible_with<_Up(*)[_Nm], _Up(*)[]>
887 : true_type
888 { };
889
890 template<typename _Up, size_t _Nm>
891 struct __sp_compatible_with<_Up(*)[_Nm], const _Up(*)[]>
892 : true_type
893 { };
894
895 template<typename _Up, size_t _Nm>
896 struct __sp_compatible_with<_Up(*)[_Nm], volatile _Up(*)[]>
897 : true_type
898 { };
899
900 template<typename _Up, size_t _Nm>
901 struct __sp_compatible_with<_Up(*)[_Nm], const volatile _Up(*)[]>
902 : true_type
903 { };
904
905 // Test conversion from Y(*)[N] to U(*)[N] without forming invalid type Y[N].
906 template<typename _Up, size_t _Nm, typename _Yp, typename = void>
907 struct __sp_is_constructible_arrN
908 : false_type
909 { };
910
911 template<typename _Up, size_t _Nm, typename _Yp>
912 struct __sp_is_constructible_arrN<_Up, _Nm, _Yp, __void_t<_Yp[_Nm]>>
913 : is_convertible<_Yp(*)[_Nm], _Up(*)[_Nm]>::type
914 { };
915
916 // Test conversion from Y(*)[] to U(*)[] without forming invalid type Y[].
917 template<typename _Up, typename _Yp, typename = void>
918 struct __sp_is_constructible_arr
919 : false_type
920 { };
921
922 template<typename _Up, typename _Yp>
923 struct __sp_is_constructible_arr<_Up, _Yp, __void_t<_Yp[]>>
924 : is_convertible<_Yp(*)[], _Up(*)[]>::type
925 { };
926
927 // Trait to check if shared_ptr<T> can be constructed from Y*.
928 template<typename _Tp, typename _Yp>
929 struct __sp_is_constructible;
930
931 // When T is U[N], Y(*)[N] shall be convertible to T*;
932 template<typename _Up, size_t _Nm, typename _Yp>
933 struct __sp_is_constructible<_Up[_Nm], _Yp>
934 : __sp_is_constructible_arrN<_Up, _Nm, _Yp>::type
935 { };
936
937 // when T is U[], Y(*)[] shall be convertible to T*;
938 template<typename _Up, typename _Yp>
939 struct __sp_is_constructible<_Up[], _Yp>
940 : __sp_is_constructible_arr<_Up, _Yp>::type
941 { };
942
943 // otherwise, Y* shall be convertible to T*.
944 template<typename _Tp, typename _Yp>
945 struct __sp_is_constructible
946 : is_convertible<_Yp*, _Tp*>::type
947 { };
948
949
950 // Define operator* and operator-> for shared_ptr<T>.
951 template<typename _Tp, _Lock_policy _Lp,
952 bool = is_array<_Tp>::value, bool = is_void<_Tp>::value>
953 class __shared_ptr_access
954 {
955 public:
956 using element_type = _Tp;
957
958 element_type&
959 operator*() const noexcept
960 {
961 __glibcxx_assert(_M_get() != nullptr);
962 return *_M_get();
963 }
964
965 element_type*
966 operator->() const noexcept
967 {
968 _GLIBCXX_DEBUG_PEDASSERT(_M_get() != nullptr);
969 return _M_get();
970 }
971
972 private:
973 element_type*
974 _M_get() const noexcept
975 { return static_cast<const __shared_ptr<_Tp, _Lp>*>(this)->get(); }
976 };
977
978 // Define operator-> for shared_ptr<cv void>.
979 template<typename _Tp, _Lock_policy _Lp>
980 class __shared_ptr_access<_Tp, _Lp, false, true>
981 {
982 public:
983 using element_type = _Tp;
984
985 element_type*
986 operator->() const noexcept
987 {
988 auto __ptr = static_cast<const __shared_ptr<_Tp, _Lp>*>(this)->get();
989 _GLIBCXX_DEBUG_PEDASSERT(__ptr != nullptr);
990 return __ptr;
991 }
992 };
993
994 // Define operator[] for shared_ptr<T[]> and shared_ptr<T[N]>.
995 template<typename _Tp, _Lock_policy _Lp>
996 class __shared_ptr_access<_Tp, _Lp, true, false>
997 {
998 public:
999 using element_type = typename remove_extent<_Tp>::type;
1000
1001#if __cplusplus201103L <= 201402L
1002 [[__deprecated__("shared_ptr<T[]>::operator* is absent from C++17")]]
1003 element_type&
1004 operator*() const noexcept
1005 {
1006 __glibcxx_assert(_M_get() != nullptr);
1007 return *_M_get();
1008 }
1009
1010 [[__deprecated__("shared_ptr<T[]>::operator-> is absent from C++17")]]
1011 element_type*
1012 operator->() const noexcept
1013 {
1014 _GLIBCXX_DEBUG_PEDASSERT(_M_get() != nullptr);
1015 return _M_get();
1016 }
1017#endif
1018
1019 element_type&
1020 operator[](ptrdiff_t __i) const
1021 {
1022 __glibcxx_assert(_M_get() != nullptr);
1023 __glibcxx_assert(!extent<_Tp>::value || __i < extent<_Tp>::value);
1024 return _M_get()[__i];
1025 }
1026
1027 private:
1028 element_type*
1029 _M_get() const noexcept
1030 { return static_cast<const __shared_ptr<_Tp, _Lp>*>(this)->get(); }
1031 };
1032
1033 template<typename _Tp, _Lock_policy _Lp>
1034 class __shared_ptr
1035 : public __shared_ptr_access<_Tp, _Lp>
1036 {
1037 public:
1038 using element_type = typename remove_extent<_Tp>::type;
1039
1040 private:
1041 // Constraint for taking ownership of a pointer of type _Yp*:
1042 template<typename _Yp>
1043 using _SafeConv
1044 = typename enable_if<__sp_is_constructible<_Tp, _Yp>::value>::type;
1045
1046 // Constraint for construction from shared_ptr and weak_ptr:
1047 template<typename _Yp, typename _Res = void>
1048 using _Compatible = typename
1049 enable_if<__sp_compatible_with<_Yp*, _Tp*>::value, _Res>::type;
1050
1051 // Constraint for assignment from shared_ptr and weak_ptr:
1052 template<typename _Yp>
1053 using _Assignable = _Compatible<_Yp, __shared_ptr&>;
1054
1055 // Constraint for construction from unique_ptr:
1056 template<typename _Yp, typename _Del, typename _Res = void,
1057 typename _Ptr = typename unique_ptr<_Yp, _Del>::pointer>
1058 using _UniqCompatible = typename enable_if<__and_<
1059 __sp_compatible_with<_Yp*, _Tp*>, is_convertible<_Ptr, element_type*>
1060 >::value, _Res>::type;
1061
1062 // Constraint for assignment from unique_ptr:
1063 template<typename _Yp, typename _Del>
1064 using _UniqAssignable = _UniqCompatible<_Yp, _Del, __shared_ptr&>;
1065
1066 public:
1067
1068#if __cplusplus201103L > 201402L
1069 using weak_type = __weak_ptr<_Tp, _Lp>;
1070#endif
1071
1072 constexpr __shared_ptr() noexcept
1073 : _M_ptr(0), _M_refcount()
1074 { }
1075
1076 template<typename _Yp, typename = _SafeConv<_Yp>>
1077 explicit
1078 __shared_ptr(_Yp* __p)
1079 : _M_ptr(__p), _M_refcount(__p, typename is_array<_Tp>::type())
1080 {
1081 static_assert( !is_void<_Yp>::value, "incomplete type" );
1082 static_assert( sizeof(_Yp) > 0, "incomplete type" );
1083 _M_enable_shared_from_this_with(__p);
1084 }
1085
1086 template<typename _Yp, typename _Deleter, typename = _SafeConv<_Yp>>
1087 __shared_ptr(_Yp* __p, _Deleter __d)
1088 : _M_ptr(__p), _M_refcount(__p, std::move(__d))
1089 {
1090 static_assert(__is_invocable<_Deleter&, _Yp*&>::value,
1091 "deleter expression d(p) is well-formed");
1092 _M_enable_shared_from_this_with(__p);
1093 }
1094
1095 template<typename _Yp, typename _Deleter, typename _Alloc,
1096 typename = _SafeConv<_Yp>>
1097 __shared_ptr(_Yp* __p, _Deleter __d, _Alloc __a)
1098 : _M_ptr(__p), _M_refcount(__p, std::move(__d), std::move(__a))
1099 {
1100 static_assert(__is_invocable<_Deleter&, _Yp*&>::value,
1101 "deleter expression d(p) is well-formed");
1102 _M_enable_shared_from_this_with(__p);
1103 }
1104
1105 template<typename _Deleter>
1106 __shared_ptr(nullptr_t __p, _Deleter __d)
1107 : _M_ptr(0), _M_refcount(__p, std::move(__d))
1108 { }
1109
1110 template<typename _Deleter, typename _Alloc>
1111 __shared_ptr(nullptr_t __p, _Deleter __d, _Alloc __a)
1112 : _M_ptr(0), _M_refcount(__p, std::move(__d), std::move(__a))
1113 { }
1114
1115 template<typename _Yp>
1116 __shared_ptr(const __shared_ptr<_Yp, _Lp>& __r,
1117 element_type* __p) noexcept
1118 : _M_ptr(__p), _M_refcount(__r._M_refcount) // never throws
1119 { }
1120
1121 __shared_ptr(const __shared_ptr&) noexcept = default;
1122 __shared_ptr& operator=(const __shared_ptr&) noexcept = default;
6
Calling copy assignment operator for '__shared_count'
21
Returning; memory was released
30
Calling copy assignment operator for '__shared_count'
1123 ~__shared_ptr() = default;
1124
1125 template<typename _Yp, typename = _Compatible<_Yp>>
1126 __shared_ptr(const __shared_ptr<_Yp, _Lp>& __r) noexcept
1127 : _M_ptr(__r._M_ptr), _M_refcount(__r._M_refcount)
1128 { }
1129
1130 __shared_ptr(__shared_ptr&& __r) noexcept
1131 : _M_ptr(__r._M_ptr), _M_refcount()
1132 {
1133 _M_refcount._M_swap(__r._M_refcount);
1134 __r._M_ptr = 0;
1135 }
1136
1137 template<typename _Yp, typename = _Compatible<_Yp>>
1138 __shared_ptr(__shared_ptr<_Yp, _Lp>&& __r) noexcept
1139 : _M_ptr(__r._M_ptr), _M_refcount()
1140 {
1141 _M_refcount._M_swap(__r._M_refcount);
1142 __r._M_ptr = 0;
1143 }
1144
1145 template<typename _Yp, typename = _Compatible<_Yp>>
1146 explicit __shared_ptr(const __weak_ptr<_Yp, _Lp>& __r)
1147 : _M_refcount(__r._M_refcount) // may throw
1148 {
1149 // It is now safe to copy __r._M_ptr, as
1150 // _M_refcount(__r._M_refcount) did not throw.
1151 _M_ptr = __r._M_ptr;
1152 }
1153
1154 // If an exception is thrown this constructor has no effect.
1155 template<typename _Yp, typename _Del,
1156 typename = _UniqCompatible<_Yp, _Del>>
1157 __shared_ptr(unique_ptr<_Yp, _Del>&& __r)
1158 : _M_ptr(__r.get()), _M_refcount()
1159 {
1160 auto __raw = _S_raw_ptr(__r.get());
1161 _M_refcount = __shared_count<_Lp>(std::move(__r));
1162 _M_enable_shared_from_this_with(__raw);
1163 }
1164
1165#if __cplusplus201103L <= 201402L && _GLIBCXX_USE_DEPRECATED1
1166 protected:
1167 // If an exception is thrown this constructor has no effect.
1168 template<typename _Tp1, typename _Del,
1169 typename enable_if<__and_<
1170 __not_<is_array<_Tp>>, is_array<_Tp1>,
1171 is_convertible<typename unique_ptr<_Tp1, _Del>::pointer, _Tp*>
1172 >::value, bool>::type = true>
1173 __shared_ptr(unique_ptr<_Tp1, _Del>&& __r, __sp_array_delete)
1174 : _M_ptr(__r.get()), _M_refcount()
1175 {
1176 auto __raw = _S_raw_ptr(__r.get());
1177 _M_refcount = __shared_count<_Lp>(std::move(__r));
1178 _M_enable_shared_from_this_with(__raw);
1179 }
1180 public:
1181#endif
1182
1183#if _GLIBCXX_USE_DEPRECATED1
1184 // Postcondition: use_count() == 1 and __r.get() == 0
1185 template<typename _Yp, typename = _Compatible<_Yp>>
1186 __shared_ptr(auto_ptr<_Yp>&& __r);
1187#endif
1188
1189 constexpr __shared_ptr(nullptr_t) noexcept : __shared_ptr() { }
1190
1191 template<typename _Yp>
1192 _Assignable<_Yp>
1193 operator=(const __shared_ptr<_Yp, _Lp>& __r) noexcept
1194 {
1195 _M_ptr = __r._M_ptr;
1196 _M_refcount = __r._M_refcount; // __shared_count::op= doesn't throw
1197 return *this;
1198 }
1199
1200#if _GLIBCXX_USE_DEPRECATED1
1201 template<typename _Yp>
1202 _Assignable<_Yp>
1203 operator=(auto_ptr<_Yp>&& __r)
1204 {
1205 __shared_ptr(std::move(__r)).swap(*this);
1206 return *this;
1207 }
1208#endif
1209
1210 __shared_ptr&
1211 operator=(__shared_ptr&& __r) noexcept
1212 {
1213 __shared_ptr(std::move(__r)).swap(*this);
1214 return *this;
1215 }
1216
1217 template<class _Yp>
1218 _Assignable<_Yp>
1219 operator=(__shared_ptr<_Yp, _Lp>&& __r) noexcept
1220 {
1221 __shared_ptr(std::move(__r)).swap(*this);
1222 return *this;
1223 }
1224
1225 template<typename _Yp, typename _Del>
1226 _UniqAssignable<_Yp, _Del>
1227 operator=(unique_ptr<_Yp, _Del>&& __r)
1228 {
1229 __shared_ptr(std::move(__r)).swap(*this);
1230 return *this;
1231 }
1232
1233 void
1234 reset() noexcept
1235 { __shared_ptr().swap(*this); }
1236
1237 template<typename _Yp>
1238 _SafeConv<_Yp>
1239 reset(_Yp* __p) // _Yp must be complete.
1240 {
1241 // Catch self-reset errors.
1242 __glibcxx_assert(__p == 0 || __p != _M_ptr);
1243 __shared_ptr(__p).swap(*this);
1244 }
1245
1246 template<typename _Yp, typename _Deleter>
1247 _SafeConv<_Yp>
1248 reset(_Yp* __p, _Deleter __d)
1249 { __shared_ptr(__p, std::move(__d)).swap(*this); }
1250
1251 template<typename _Yp, typename _Deleter, typename _Alloc>
1252 _SafeConv<_Yp>
1253 reset(_Yp* __p, _Deleter __d, _Alloc __a)
1254 { __shared_ptr(__p, std::move(__d), std::move(__a)).swap(*this); }
1255
1256 element_type*
1257 get() const noexcept
1258 { return _M_ptr; }
1259
1260 explicit operator bool() const // never throws
1261 { return _M_ptr == 0 ? false : true; }
1262
1263 bool
1264 unique() const noexcept
1265 { return _M_refcount._M_unique(); }
1266
1267 long
1268 use_count() const noexcept
1269 { return _M_refcount._M_get_use_count(); }
1270
1271 void
1272 swap(__shared_ptr<_Tp, _Lp>& __other) noexcept
1273 {
1274 std::swap(_M_ptr, __other._M_ptr);
1275 _M_refcount._M_swap(__other._M_refcount);
1276 }
1277
1278 template<typename _Tp1>
1279 bool
1280 owner_before(__shared_ptr<_Tp1, _Lp> const& __rhs) const noexcept
1281 { return _M_refcount._M_less(__rhs._M_refcount); }
1282
1283 template<typename _Tp1>
1284 bool
1285 owner_before(__weak_ptr<_Tp1, _Lp> const& __rhs) const noexcept
1286 { return _M_refcount._M_less(__rhs._M_refcount); }
1287
1288#if __cpp_rtti199711
1289 protected:
1290 // This constructor is non-standard, it is used by allocate_shared.
1291 template<typename _Alloc, typename... _Args>
1292 __shared_ptr(_Sp_make_shared_tag __tag, const _Alloc& __a,
1293 _Args&&... __args)
1294 : _M_ptr(), _M_refcount(__tag, (_Tp*)0, __a,
1295 std::forward<_Args>(__args)...)
1296 {
1297 // _M_ptr needs to point to the newly constructed object.
1298 // This relies on _Sp_counted_ptr_inplace::_M_get_deleter.
1299 void* __p = _M_refcount._M_get_deleter(typeid(__tag));
1300 _M_ptr = static_cast<_Tp*>(__p);
1301 _M_enable_shared_from_this_with(_M_ptr);
1302 }
1303#else
1304 template<typename _Alloc>
1305 struct _Deleter
1306 {
1307 void operator()(typename _Alloc::value_type* __ptr)
1308 {
1309 __allocated_ptr<_Alloc> __guard{ _M_alloc, __ptr };
1310 allocator_traits<_Alloc>::destroy(_M_alloc, __guard.get());
1311 }
1312 _Alloc _M_alloc;
1313 };
1314
1315 template<typename _Alloc, typename... _Args>
1316 __shared_ptr(_Sp_make_shared_tag __tag, const _Alloc& __a,
1317 _Args&&... __args)
1318 : _M_ptr(), _M_refcount()
1319 {
1320 typedef typename allocator_traits<_Alloc>::template
1321 rebind_traits<typename std::remove_cv<_Tp>::type> __traits;
1322 _Deleter<typename __traits::allocator_type> __del = { __a };
1323 auto __guard = std::__allocate_guarded(__del._M_alloc);
1324 auto __ptr = __guard.get();
1325 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1326 // 2070. allocate_shared should use allocator_traits<A>::construct
1327 __traits::construct(__del._M_alloc, __ptr,
1328 std::forward<_Args>(__args)...);
1329 __guard = nullptr;
1330 __shared_count<_Lp> __count(__ptr, __del, __del._M_alloc);
1331 _M_refcount._M_swap(__count);
1332 _M_ptr = __ptr;
1333 _M_enable_shared_from_this_with(_M_ptr);
1334 }
1335#endif
1336
1337 template<typename _Tp1, _Lock_policy _Lp1, typename _Alloc,
1338 typename... _Args>
1339 friend __shared_ptr<_Tp1, _Lp1>
1340 __allocate_shared(const _Alloc& __a, _Args&&... __args);
1341
1342 // This constructor is used by __weak_ptr::lock() and
1343 // shared_ptr::shared_ptr(const weak_ptr&, std::nothrow_t).
1344 __shared_ptr(const __weak_ptr<_Tp, _Lp>& __r, std::nothrow_t)
1345 : _M_refcount(__r._M_refcount, std::nothrow)
1346 {
1347 _M_ptr = _M_refcount._M_get_use_count() ? __r._M_ptr : nullptr;
1348 }
1349
1350 friend class __weak_ptr<_Tp, _Lp>;
1351
1352 private:
1353
1354 template<typename _Yp>
1355 using __esft_base_t = decltype(__enable_shared_from_this_base(
1356 std::declval<const __shared_count<_Lp>&>(),
1357 std::declval<_Yp*>()));
1358
1359 // Detect an accessible and unambiguous enable_shared_from_this base.
1360 template<typename _Yp, typename = void>
1361 struct __has_esft_base
1362 : false_type { };
1363
1364 template<typename _Yp>
1365 struct __has_esft_base<_Yp, __void_t<__esft_base_t<_Yp>>>
1366 : __not_<is_array<_Tp>> { }; // No enable shared_from_this for arrays
1367
1368 template<typename _Yp, typename _Yp2 = typename remove_cv<_Yp>::type>
1369 typename enable_if<__has_esft_base<_Yp2>::value>::type
1370 _M_enable_shared_from_this_with(_Yp* __p) noexcept
1371 {
1372 if (auto __base = __enable_shared_from_this_base(_M_refcount, __p))
1373 __base->_M_weak_assign(const_cast<_Yp2*>(__p), _M_refcount);
1374 }
1375
1376 template<typename _Yp, typename _Yp2 = typename remove_cv<_Yp>::type>
1377 typename enable_if<!__has_esft_base<_Yp2>::value>::type
1378 _M_enable_shared_from_this_with(_Yp*) noexcept
1379 { }
1380
1381 void*
1382 _M_get_deleter(const std::type_info& __ti) const noexcept
1383 { return _M_refcount._M_get_deleter(__ti); }
1384
1385 template<typename _Tp1>
1386 static _Tp1*
1387 _S_raw_ptr(_Tp1* __ptr)
1388 { return __ptr; }
1389
1390 template<typename _Tp1>
1391 static auto
1392 _S_raw_ptr(_Tp1 __ptr) -> decltype(std::__addressof(*__ptr))
1393 { return std::__addressof(*__ptr); }
1394
1395 template<typename _Tp1, _Lock_policy _Lp1> friend class __shared_ptr;
1396 template<typename _Tp1, _Lock_policy _Lp1> friend class __weak_ptr;
1397
1398 template<typename _Del, typename _Tp1, _Lock_policy _Lp1>
1399 friend _Del* get_deleter(const __shared_ptr<_Tp1, _Lp1>&) noexcept;
1400
1401 element_type* _M_ptr; // Contained pointer.
1402 __shared_count<_Lp> _M_refcount; // Reference counter.
1403 };
1404
1405
1406 // 20.7.2.2.7 shared_ptr comparisons
1407 template<typename _Tp1, typename _Tp2, _Lock_policy _Lp>
1408 inline bool
1409 operator==(const __shared_ptr<_Tp1, _Lp>& __a,
1410 const __shared_ptr<_Tp2, _Lp>& __b) noexcept
1411 { return __a.get() == __b.get(); }
1412
1413 template<typename _Tp, _Lock_policy _Lp>
1414 inline bool
1415 operator==(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1416 { return !__a; }
1417
1418 template<typename _Tp, _Lock_policy _Lp>
1419 inline bool
1420 operator==(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1421 { return !__a; }
1422
1423 template<typename _Tp1, typename _Tp2, _Lock_policy _Lp>
1424 inline bool
1425 operator!=(const __shared_ptr<_Tp1, _Lp>& __a,
1426 const __shared_ptr<_Tp2, _Lp>& __b) noexcept
1427 { return __a.get() != __b.get(); }
1428
1429 template<typename _Tp, _Lock_policy _Lp>
1430 inline bool
1431 operator!=(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1432 { return (bool)__a; }
1433
1434 template<typename _Tp, _Lock_policy _Lp>
1435 inline bool
1436 operator!=(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1437 { return (bool)__a; }
1438
1439 template<typename _Tp, typename _Up, _Lock_policy _Lp>
1440 inline bool
1441 operator<(const __shared_ptr<_Tp, _Lp>& __a,
1442 const __shared_ptr<_Up, _Lp>& __b) noexcept
1443 {
1444 using _Tp_elt = typename __shared_ptr<_Tp, _Lp>::element_type;
1445 using _Up_elt = typename __shared_ptr<_Up, _Lp>::element_type;
1446 using _Vp = typename common_type<_Tp_elt*, _Up_elt*>::type;
1447 return less<_Vp>()(__a.get(), __b.get());
1448 }
1449
1450 template<typename _Tp, _Lock_policy _Lp>
1451 inline bool
1452 operator<(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1453 {
1454 using _Tp_elt = typename __shared_ptr<_Tp, _Lp>::element_type;
1455 return less<_Tp_elt*>()(__a.get(), nullptr);
1456 }
1457
1458 template<typename _Tp, _Lock_policy _Lp>
1459 inline bool
1460 operator<(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1461 {
1462 using _Tp_elt = typename __shared_ptr<_Tp, _Lp>::element_type;
1463 return less<_Tp_elt*>()(nullptr, __a.get());
1464 }
1465
1466 template<typename _Tp1, typename _Tp2, _Lock_policy _Lp>
1467 inline bool
1468 operator<=(const __shared_ptr<_Tp1, _Lp>& __a,
1469 const __shared_ptr<_Tp2, _Lp>& __b) noexcept
1470 { return !(__b < __a); }
1471
1472 template<typename _Tp, _Lock_policy _Lp>
1473 inline bool
1474 operator<=(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1475 { return !(nullptr < __a); }
1476
1477 template<typename _Tp, _Lock_policy _Lp>
1478 inline bool
1479 operator<=(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1480 { return !(__a < nullptr); }
1481
1482 template<typename _Tp1, typename _Tp2, _Lock_policy _Lp>
1483 inline bool
1484 operator>(const __shared_ptr<_Tp1, _Lp>& __a,
1485 const __shared_ptr<_Tp2, _Lp>& __b) noexcept
1486 { return (__b < __a); }
1487
1488 template<typename _Tp, _Lock_policy _Lp>
1489 inline bool
1490 operator>(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1491 { return nullptr < __a; }
1492
1493 template<typename _Tp, _Lock_policy _Lp>
1494 inline bool
1495 operator>(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1496 { return __a < nullptr; }
1497
1498 template<typename _Tp1, typename _Tp2, _Lock_policy _Lp>
1499 inline bool
1500 operator>=(const __shared_ptr<_Tp1, _Lp>& __a,
1501 const __shared_ptr<_Tp2, _Lp>& __b) noexcept
1502 { return !(__a < __b); }
1503
1504 template<typename _Tp, _Lock_policy _Lp>
1505 inline bool
1506 operator>=(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1507 { return !(__a < nullptr); }
1508
1509 template<typename _Tp, _Lock_policy _Lp>
1510 inline bool
1511 operator>=(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1512 { return !(nullptr < __a); }
1513
1514 template<typename _Sp>
1515 struct _Sp_less : public binary_function<_Sp, _Sp, bool>
1516 {
1517 bool
1518 operator()(const _Sp& __lhs, const _Sp& __rhs) const noexcept
1519 {
1520 typedef typename _Sp::element_type element_type;
1521 return std::less<element_type*>()(__lhs.get(), __rhs.get());
1522 }
1523 };
1524
1525 template<typename _Tp, _Lock_policy _Lp>
1526 struct less<__shared_ptr<_Tp, _Lp>>
1527 : public _Sp_less<__shared_ptr<_Tp, _Lp>>
1528 { };
1529
1530 // 20.7.2.2.8 shared_ptr specialized algorithms.
1531 template<typename _Tp, _Lock_policy _Lp>
1532 inline void
1533 swap(__shared_ptr<_Tp, _Lp>& __a, __shared_ptr<_Tp, _Lp>& __b) noexcept
1534 { __a.swap(__b); }
1535
1536 // 20.7.2.2.9 shared_ptr casts
1537
1538 // The seemingly equivalent code:
1539 // shared_ptr<_Tp, _Lp>(static_cast<_Tp*>(__r.get()))
1540 // will eventually result in undefined behaviour, attempting to
1541 // delete the same object twice.
1542 /// static_pointer_cast
1543 template<typename _Tp, typename _Tp1, _Lock_policy _Lp>
1544 inline __shared_ptr<_Tp, _Lp>
1545 static_pointer_cast(const __shared_ptr<_Tp1, _Lp>& __r) noexcept
1546 {
1547 using _Sp = __shared_ptr<_Tp, _Lp>;
1548 return _Sp(__r, static_cast<typename _Sp::element_type*>(__r.get()));
1549 }
1550
1551 // The seemingly equivalent code:
1552 // shared_ptr<_Tp, _Lp>(const_cast<_Tp*>(__r.get()))
1553 // will eventually result in undefined behaviour, attempting to
1554 // delete the same object twice.
1555 /// const_pointer_cast
1556 template<typename _Tp, typename _Tp1, _Lock_policy _Lp>
1557 inline __shared_ptr<_Tp, _Lp>
1558 const_pointer_cast(const __shared_ptr<_Tp1, _Lp>& __r) noexcept
1559 {
1560 using _Sp = __shared_ptr<_Tp, _Lp>;
1561 return _Sp(__r, const_cast<typename _Sp::element_type*>(__r.get()));
1562 }
1563
1564 // The seemingly equivalent code:
1565 // shared_ptr<_Tp, _Lp>(dynamic_cast<_Tp*>(__r.get()))
1566 // will eventually result in undefined behaviour, attempting to
1567 // delete the same object twice.
1568 /// dynamic_pointer_cast
1569 template<typename _Tp, typename _Tp1, _Lock_policy _Lp>
1570 inline __shared_ptr<_Tp, _Lp>
1571 dynamic_pointer_cast(const __shared_ptr<_Tp1, _Lp>& __r) noexcept
1572 {
1573 using _Sp = __shared_ptr<_Tp, _Lp>;
1574 if (auto* __p = dynamic_cast<typename _Sp::element_type*>(__r.get()))
1575 return _Sp(__r, __p);
1576 return _Sp();
1577 }
1578
1579#if __cplusplus201103L > 201402L
1580 template<typename _Tp, typename _Tp1, _Lock_policy _Lp>
1581 inline __shared_ptr<_Tp, _Lp>
1582 reinterpret_pointer_cast(const __shared_ptr<_Tp1, _Lp>& __r) noexcept
1583 {
1584 using _Sp = __shared_ptr<_Tp, _Lp>;
1585 return _Sp(__r, reinterpret_cast<typename _Sp::element_type*>(__r.get()));
1586 }
1587#endif
1588
1589 template<typename _Tp, _Lock_policy _Lp>
1590 class __weak_ptr
1591 {
1592 template<typename _Yp, typename _Res = void>
1593 using _Compatible = typename
1594 enable_if<__sp_compatible_with<_Yp*, _Tp*>::value, _Res>::type;
1595
1596 // Constraint for assignment from shared_ptr and weak_ptr:
1597 template<typename _Yp>
1598 using _Assignable = _Compatible<_Yp, __weak_ptr&>;
1599
1600 public:
1601 using element_type = typename remove_extent<_Tp>::type;
1602
1603 constexpr __weak_ptr() noexcept
1604 : _M_ptr(nullptr), _M_refcount()
1605 { }
1606
1607 __weak_ptr(const __weak_ptr&) noexcept = default;
1608
1609 ~__weak_ptr() = default;
1610
1611 // The "obvious" converting constructor implementation:
1612 //
1613 // template<typename _Tp1>
1614 // __weak_ptr(const __weak_ptr<_Tp1, _Lp>& __r)
1615 // : _M_ptr(__r._M_ptr), _M_refcount(__r._M_refcount) // never throws
1616 // { }
1617 //
1618 // has a serious problem.
1619 //
1620 // __r._M_ptr may already have been invalidated. The _M_ptr(__r._M_ptr)
1621 // conversion may require access to *__r._M_ptr (virtual inheritance).
1622 //
1623 // It is not possible to avoid spurious access violations since
1624 // in multithreaded programs __r._M_ptr may be invalidated at any point.
1625 template<typename _Yp, typename = _Compatible<_Yp>>
1626 __weak_ptr(const __weak_ptr<_Yp, _Lp>& __r) noexcept
1627 : _M_refcount(__r._M_refcount)
1628 { _M_ptr = __r.lock().get(); }
1629
1630 template<typename _Yp, typename = _Compatible<_Yp>>
1631 __weak_ptr(const __shared_ptr<_Yp, _Lp>& __r) noexcept
1632 : _M_ptr(__r._M_ptr), _M_refcount(__r._M_refcount)
1633 { }
1634
1635 __weak_ptr(__weak_ptr&& __r) noexcept
1636 : _M_ptr(__r._M_ptr), _M_refcount(std::move(__r._M_refcount))
1637 { __r._M_ptr = nullptr; }
1638
1639 template<typename _Yp, typename = _Compatible<_Yp>>
1640 __weak_ptr(__weak_ptr<_Yp, _Lp>&& __r) noexcept
1641 : _M_ptr(__r.lock().get()), _M_refcount(std::move(__r._M_refcount))
1642 { __r._M_ptr = nullptr; }
1643
1644 __weak_ptr&
1645 operator=(const __weak_ptr& __r) noexcept = default;
1646
1647 template<typename _Yp>
1648 _Assignable<_Yp>
1649 operator=(const __weak_ptr<_Yp, _Lp>& __r) noexcept
1650 {
1651 _M_ptr = __r.lock().get();
1652 _M_refcount = __r._M_refcount;
1653 return *this;
1654 }
1655
1656 template<typename _Yp>
1657 _Assignable<_Yp>
1658 operator=(const __shared_ptr<_Yp, _Lp>& __r) noexcept
1659 {
1660 _M_ptr = __r._M_ptr;
1661 _M_refcount = __r._M_refcount;
1662 return *this;
1663 }
1664
1665 __weak_ptr&
1666 operator=(__weak_ptr&& __r) noexcept
1667 {
1668 _M_ptr = __r._M_ptr;
1669 _M_refcount = std::move(__r._M_refcount);
1670 __r._M_ptr = nullptr;
1671 return *this;
1672 }
1673
1674 template<typename _Yp>
1675 _Assignable<_Yp>
1676 operator=(__weak_ptr<_Yp, _Lp>&& __r) noexcept
1677 {
1678 _M_ptr = __r.lock().get();
1679 _M_refcount = std::move(__r._M_refcount);
1680 __r._M_ptr = nullptr;
1681 return *this;
1682 }
1683
1684 __shared_ptr<_Tp, _Lp>
1685 lock() const noexcept
1686 { return __shared_ptr<element_type, _Lp>(*this, std::nothrow); }
1687
1688 long
1689 use_count() const noexcept
1690 { return _M_refcount._M_get_use_count(); }
1691
1692 bool
1693 expired() const noexcept
1694 { return _M_refcount._M_get_use_count() == 0; }
1695
1696 template<typename _Tp1>
1697 bool
1698 owner_before(const __shared_ptr<_Tp1, _Lp>& __rhs) const noexcept
1699 { return _M_refcount._M_less(__rhs._M_refcount); }
1700
1701 template<typename _Tp1>
1702 bool
1703 owner_before(const __weak_ptr<_Tp1, _Lp>& __rhs) const noexcept
1704 { return _M_refcount._M_less(__rhs._M_refcount); }
1705
1706 void
1707 reset() noexcept
1708 { __weak_ptr().swap(*this); }
1709
1710 void
1711 swap(__weak_ptr& __s) noexcept
1712 {
1713 std::swap(_M_ptr, __s._M_ptr);
1714 _M_refcount._M_swap(__s._M_refcount);
1715 }
1716
1717 private:
1718 // Used by __enable_shared_from_this.
1719 void
1720 _M_assign(_Tp* __ptr, const __shared_count<_Lp>& __refcount) noexcept
1721 {
1722 if (use_count() == 0)
1723 {
1724 _M_ptr = __ptr;
1725 _M_refcount = __refcount;
1726 }
1727 }
1728
1729 template<typename _Tp1, _Lock_policy _Lp1> friend class __shared_ptr;
1730 template<typename _Tp1, _Lock_policy _Lp1> friend class __weak_ptr;
1731 friend class __enable_shared_from_this<_Tp, _Lp>;
1732 friend class enable_shared_from_this<_Tp>;
1733
1734 element_type* _M_ptr; // Contained pointer.
1735 __weak_count<_Lp> _M_refcount; // Reference counter.
1736 };
1737
1738 // 20.7.2.3.6 weak_ptr specialized algorithms.
1739 template<typename _Tp, _Lock_policy _Lp>
1740 inline void
1741 swap(__weak_ptr<_Tp, _Lp>& __a, __weak_ptr<_Tp, _Lp>& __b) noexcept
1742 { __a.swap(__b); }
1743
1744 template<typename _Tp, typename _Tp1>
1745 struct _Sp_owner_less : public binary_function<_Tp, _Tp, bool>
1746 {
1747 bool
1748 operator()(const _Tp& __lhs, const _Tp& __rhs) const noexcept
1749 { return __lhs.owner_before(__rhs); }
1750
1751 bool
1752 operator()(const _Tp& __lhs, const _Tp1& __rhs) const noexcept
1753 { return __lhs.owner_before(__rhs); }
1754
1755 bool
1756 operator()(const _Tp1& __lhs, const _Tp& __rhs) const noexcept
1757 { return __lhs.owner_before(__rhs); }
1758 };
1759
1760 template<>
1761 struct _Sp_owner_less<void, void>
1762 {
1763 template<typename _Tp, typename _Up>
1764 auto
1765 operator()(const _Tp& __lhs, const _Up& __rhs) const noexcept
1766 -> decltype(__lhs.owner_before(__rhs))
1767 { return __lhs.owner_before(__rhs); }
1768
1769 using is_transparent = void;
1770 };
1771
1772 template<typename _Tp, _Lock_policy _Lp>
1773 struct owner_less<__shared_ptr<_Tp, _Lp>>
1774 : public _Sp_owner_less<__shared_ptr<_Tp, _Lp>, __weak_ptr<_Tp, _Lp>>
1775 { };
1776
1777 template<typename _Tp, _Lock_policy _Lp>
1778 struct owner_less<__weak_ptr<_Tp, _Lp>>
1779 : public _Sp_owner_less<__weak_ptr<_Tp, _Lp>, __shared_ptr<_Tp, _Lp>>
1780 { };
1781
1782
1783 template<typename _Tp, _Lock_policy _Lp>
1784 class __enable_shared_from_this
1785 {
1786 protected:
1787 constexpr __enable_shared_from_this() noexcept { }
1788
1789 __enable_shared_from_this(const __enable_shared_from_this&) noexcept { }
1790
1791 __enable_shared_from_this&
1792 operator=(const __enable_shared_from_this&) noexcept
1793 { return *this; }
1794
1795 ~__enable_shared_from_this() { }
1796
1797 public:
1798 __shared_ptr<_Tp, _Lp>
1799 shared_from_this()
1800 { return __shared_ptr<_Tp, _Lp>(this->_M_weak_this); }
1801
1802 __shared_ptr<const _Tp, _Lp>
1803 shared_from_this() const
1804 { return __shared_ptr<const _Tp, _Lp>(this->_M_weak_this); }
1805
1806#if __cplusplus201103L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
1807 __weak_ptr<_Tp, _Lp>
1808 weak_from_this() noexcept
1809 { return this->_M_weak_this; }
1810
1811 __weak_ptr<const _Tp, _Lp>
1812 weak_from_this() const noexcept
1813 { return this->_M_weak_this; }
1814#endif
1815
1816 private:
1817 template<typename _Tp1>
1818 void
1819 _M_weak_assign(_Tp1* __p, const __shared_count<_Lp>& __n) const noexcept
1820 { _M_weak_this._M_assign(__p, __n); }
1821
1822 friend const __enable_shared_from_this*
1823 __enable_shared_from_this_base(const __shared_count<_Lp>&,
1824 const __enable_shared_from_this* __p)
1825 { return __p; }
1826
1827 template<typename, _Lock_policy>
1828 friend class __shared_ptr;
1829
1830 mutable __weak_ptr<_Tp, _Lp> _M_weak_this;
1831 };
1832
1833 template<typename _Tp, _Lock_policy _Lp, typename _Alloc, typename... _Args>
1834 inline __shared_ptr<_Tp, _Lp>
1835 __allocate_shared(const _Alloc& __a, _Args&&... __args)
1836 {
1837 return __shared_ptr<_Tp, _Lp>(_Sp_make_shared_tag(), __a,
1838 std::forward<_Args>(__args)...);
1839 }
1840
1841 template<typename _Tp, _Lock_policy _Lp, typename... _Args>
1842 inline __shared_ptr<_Tp, _Lp>
1843 __make_shared(_Args&&... __args)
1844 {
1845 typedef typename std::remove_const<_Tp>::type _Tp_nc;
1846 return std::__allocate_shared<_Tp, _Lp>(std::allocator<_Tp_nc>(),
1847 std::forward<_Args>(__args)...);
1848 }
1849
1850 /// std::hash specialization for __shared_ptr.
1851 template<typename _Tp, _Lock_policy _Lp>
1852 struct hash<__shared_ptr<_Tp, _Lp>>
1853 : public __hash_base<size_t, __shared_ptr<_Tp, _Lp>>
1854 {
1855 size_t
1856 operator()(const __shared_ptr<_Tp, _Lp>& __s) const noexcept
1857 {
1858 return hash<typename __shared_ptr<_Tp, _Lp>::element_type*>()(
1859 __s.get());
1860 }
1861 };
1862
1863_GLIBCXX_END_NAMESPACE_VERSION
1864} // namespace
1865
1866#endif // _SHARED_PTR_BASE_H

/usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/ext/atomicity.h

1// Support for atomic operations -*- C++ -*-
2
3// Copyright (C) 2004-2017 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file ext/atomicity.h
26 * This file is a GNU extension to the Standard C++ Library.
27 */
28
29#ifndef _GLIBCXX_ATOMICITY_H1
30#define _GLIBCXX_ATOMICITY_H1 1
31
32#pragma GCC system_header
33
34#include <bits/c++config.h>
35#include <bits/gthr.h>
36#include <bits/atomic_word.h>
37
38namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
39{
40_GLIBCXX_BEGIN_NAMESPACE_VERSION
41
42 // Functions for portable atomic access.
43 // To abstract locking primitives across all thread policies, use:
44 // __exchange_and_add_dispatch
45 // __atomic_add_dispatch
46#ifdef _GLIBCXX_ATOMIC_BUILTINS1
47 static inline _Atomic_word
48 __exchange_and_add(volatile _Atomic_word* __mem, int __val)
49 { return __atomic_fetch_add(__mem, __val, __ATOMIC_ACQ_REL4); }
50
51 static inline void
52 __atomic_add(volatile _Atomic_word* __mem, int __val)
53 { __atomic_fetch_add(__mem, __val, __ATOMIC_ACQ_REL4); }
54#else
55 _Atomic_word
56 __attribute__ ((__unused__))
57 __exchange_and_add(volatile _Atomic_word*, int) throw ();
58
59 void
60 __attribute__ ((__unused__))
61 __atomic_add(volatile _Atomic_word*, int) throw ();
62#endif
63
64 static inline _Atomic_word
65 __exchange_and_add_single(_Atomic_word* __mem, int __val)
66 {
67 _Atomic_word __result = *__mem;
39
Use of memory after it is freed
68 *__mem += __val;
69 return __result;
70 }
71
72 static inline void
73 __atomic_add_single(_Atomic_word* __mem, int __val)
74 { *__mem += __val; }
75
76 static inline _Atomic_word
77 __attribute__ ((__unused__))
78 __exchange_and_add_dispatch(_Atomic_word* __mem, int __val)
79 {
80#ifdef __GTHREADS1
81 if (__gthread_active_p())
37
Taking false branch
82 return __exchange_and_add(__mem, __val);
83 else
84 return __exchange_and_add_single(__mem, __val);
38
Calling '__exchange_and_add_single'
85#else
86 return __exchange_and_add_single(__mem, __val);
87#endif
88 }
89
90 static inline void
91 __attribute__ ((__unused__))
92 __atomic_add_dispatch(_Atomic_word* __mem, int __val)
93 {
94#ifdef __GTHREADS1
95 if (__gthread_active_p())
96 __atomic_add(__mem, __val);
97 else
98 __atomic_add_single(__mem, __val);
99#else
100 __atomic_add_single(__mem, __val);
101#endif
102 }
103
104_GLIBCXX_END_NAMESPACE_VERSION
105} // namespace
106
107// Even if the CPU doesn't need a memory barrier, we need to ensure
108// that the compiler doesn't reorder memory accesses across the
109// barriers.
110#ifndef _GLIBCXX_READ_MEM_BARRIER__atomic_thread_fence (2)
111#define _GLIBCXX_READ_MEM_BARRIER__atomic_thread_fence (2) __atomic_thread_fence (__ATOMIC_ACQUIRE2)
112#endif
113#ifndef _GLIBCXX_WRITE_MEM_BARRIER__atomic_thread_fence (3)
114#define _GLIBCXX_WRITE_MEM_BARRIER__atomic_thread_fence (3) __atomic_thread_fence (__ATOMIC_RELEASE3)
115#endif
116
117#endif