LLVM 17.0.0git
Parallel.cpp
Go to the documentation of this file.
1//===- llvm/Support/Parallel.cpp - Parallel algorithms --------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10#include "llvm/Config/llvm-config.h"
13
14#include <atomic>
15#include <future>
16#include <stack>
17#include <thread>
18#include <vector>
19
21
22namespace llvm {
23namespace parallel {
24#if LLVM_ENABLE_THREADS
25
26#ifdef _WIN32
27static thread_local unsigned threadIndex;
28
29unsigned getThreadIndex() { return threadIndex; }
30#else
31thread_local unsigned threadIndex;
32#endif
33
34namespace detail {
35
36namespace {
37
38/// An abstract class that takes closures and runs them asynchronously.
39class Executor {
40public:
41 virtual ~Executor() = default;
42 virtual void add(std::function<void()> func) = 0;
43
44 static Executor *getDefaultExecutor();
45};
46
47/// An implementation of an Executor that runs closures on a thread pool
48/// in filo order.
49class ThreadPoolExecutor : public Executor {
50public:
51 explicit ThreadPoolExecutor(ThreadPoolStrategy S = hardware_concurrency()) {
52 unsigned ThreadCount = S.compute_thread_count();
53 // Spawn all but one of the threads in another thread as spawning threads
54 // can take a while.
55 Threads.reserve(ThreadCount);
56 Threads.resize(1);
57 std::lock_guard<std::mutex> Lock(Mutex);
58 // Use operator[] before creating the thread to avoid data race in .size()
59 // in “safe libc++” mode.
60 auto &Thread0 = Threads[0];
61 Thread0 = std::thread([this, ThreadCount, S] {
62 for (unsigned I = 1; I < ThreadCount; ++I) {
63 Threads.emplace_back([=] { work(S, I); });
64 if (Stop)
65 break;
66 }
67 ThreadsCreated.set_value();
68 work(S, 0);
69 });
70 }
71
72 void stop() {
73 {
74 std::lock_guard<std::mutex> Lock(Mutex);
75 if (Stop)
76 return;
77 Stop = true;
78 }
79 Cond.notify_all();
80 ThreadsCreated.get_future().wait();
81 }
82
83 ~ThreadPoolExecutor() override {
84 stop();
85 std::thread::id CurrentThreadId = std::this_thread::get_id();
86 for (std::thread &T : Threads)
87 if (T.get_id() == CurrentThreadId)
88 T.detach();
89 else
90 T.join();
91 }
92
93 struct Creator {
94 static void *call() { return new ThreadPoolExecutor(strategy); }
95 };
96 struct Deleter {
97 static void call(void *Ptr) { ((ThreadPoolExecutor *)Ptr)->stop(); }
98 };
99
100 void add(std::function<void()> F) override {
101 {
102 std::lock_guard<std::mutex> Lock(Mutex);
103 WorkStack.push(std::move(F));
104 }
105 Cond.notify_one();
106 }
107
108private:
109 void work(ThreadPoolStrategy S, unsigned ThreadID) {
110 threadIndex = ThreadID;
111 S.apply_thread_strategy(ThreadID);
112 while (true) {
113 std::unique_lock<std::mutex> Lock(Mutex);
114 Cond.wait(Lock, [&] { return Stop || !WorkStack.empty(); });
115 if (Stop)
116 break;
117 auto Task = std::move(WorkStack.top());
118 WorkStack.pop();
119 Lock.unlock();
120 Task();
121 }
122 }
123
124 std::atomic<bool> Stop{false};
125 std::stack<std::function<void()>> WorkStack;
126 std::mutex Mutex;
127 std::condition_variable Cond;
128 std::promise<void> ThreadsCreated;
129 std::vector<std::thread> Threads;
130};
131
132Executor *Executor::getDefaultExecutor() {
133 // The ManagedStatic enables the ThreadPoolExecutor to be stopped via
134 // llvm_shutdown() which allows a "clean" fast exit, e.g. via _exit(). This
135 // stops the thread pool and waits for any worker thread creation to complete
136 // but does not wait for the threads to finish. The wait for worker thread
137 // creation to complete is important as it prevents intermittent crashes on
138 // Windows due to a race condition between thread creation and process exit.
139 //
140 // The ThreadPoolExecutor will only be destroyed when the static unique_ptr to
141 // it is destroyed, i.e. in a normal full exit. The ThreadPoolExecutor
142 // destructor ensures it has been stopped and waits for worker threads to
143 // finish. The wait is important as it prevents intermittent crashes on
144 // Windows when the process is doing a full exit.
145 //
146 // The Windows crashes appear to only occur with the MSVC static runtimes and
147 // are more frequent with the debug static runtime.
148 //
149 // This also prevents intermittent deadlocks on exit with the MinGW runtime.
150
151 static ManagedStatic<ThreadPoolExecutor, ThreadPoolExecutor::Creator,
152 ThreadPoolExecutor::Deleter>
153 ManagedExec;
154 static std::unique_ptr<ThreadPoolExecutor> Exec(&(*ManagedExec));
155 return Exec.get();
156}
157} // namespace
158} // namespace detail
159#endif
160
161static std::atomic<int> TaskGroupInstances;
162
163// Latch::sync() called by the dtor may cause one thread to block. If is a dead
164// lock if all threads in the default executor are blocked. To prevent the dead
165// lock, only allow the first TaskGroup to run tasks parallelly. In the scenario
166// of nested parallel_for_each(), only the outermost one runs parallelly.
169 // We must ensure that all the workloads have finished before decrementing the
170 // instances count.
171 L.sync();
173}
174
175void TaskGroup::spawn(std::function<void()> F) {
176#if LLVM_ENABLE_THREADS
177 if (Parallel) {
178 L.inc();
179 detail::Executor::getDefaultExecutor()->add([&, F = std::move(F)] {
180 F();
181 L.dec();
182 });
183 return;
184 }
185#endif
186 F();
187}
188
189void TaskGroup::execute(std::function<void()> F) {
190 if (parallel::strategy.ThreadsRequested == 1)
191 F();
192 else
193 spawn(F);
194}
195} // namespace parallel
196} // namespace llvm
197
198void llvm::parallelFor(size_t Begin, size_t End,
199 llvm::function_ref<void(size_t)> Fn) {
200 // If we have zero or one items, then do not incur the overhead of spinning up
201 // a task group. They are surprisingly expensive, and because they do not
202 // support nested parallelism, a single entry task group can block parallel
203 // execution underneath them.
204#if LLVM_ENABLE_THREADS
205 auto NumItems = End - Begin;
206 if (NumItems > 1 && parallel::strategy.ThreadsRequested != 1) {
207 // Limit the number of tasks to MaxTasksPerGroup to limit job scheduling
208 // overhead on large inputs.
209 auto TaskSize = NumItems / parallel::detail::MaxTasksPerGroup;
210 if (TaskSize == 0)
211 TaskSize = 1;
212
214 for (; Begin + TaskSize < End; Begin += TaskSize) {
215 TG.spawn([=, &Fn] {
216 for (size_t I = Begin, E = Begin + TaskSize; I != E; ++I)
217 Fn(I);
218 });
219 }
220 if (Begin != End) {
221 TG.spawn([=, &Fn] {
222 for (size_t I = Begin; I != End; ++I)
223 Fn(I);
224 });
225 }
226 return;
227 }
228#endif
229
230 for (; Begin != End; ++Begin)
231 Fn(Begin);
232}
SmallVector< MachineOperand, 4 > Cond
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
ManagedStatic - This transparently changes the behavior of global statics to be lazily constructed on...
Definition: ManagedStatic.h:83
This tells how a thread pool will be used.
Definition: Threading.h:116
void apply_thread_strategy(unsigned ThreadPoolNum) const
Assign the current thread to an ideal hardware CPU or NUMA node.
An efficient, type-erasing, non-owning reference to a callable.
void execute(std::function< void()> f)
Definition: Parallel.cpp:189
void spawn(std::function< void()> f)
Definition: Parallel.cpp:175
static std::atomic< int > TaskGroupInstances
Definition: Parallel.cpp:161
ThreadPoolStrategy strategy
Definition: Parallel.cpp:20
unsigned getThreadIndex()
Definition: Parallel.h:44
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
ThreadPoolStrategy hardware_concurrency(unsigned ThreadCount=0)
Returns a default thread strategy where all available hardware resources are to be used,...
Definition: Threading.h:185
void parallelFor(size_t Begin, size_t End, function_ref< void(size_t)> Fn)
Definition: Parallel.cpp:198