16#include "llvm/Config/llvm-config.h"
36#if LLVM_ENABLE_THREADS
39 : Strategy(S), MaxThreadCount(S.compute_thread_count()) {
40 if (Strategy.UseJobserver)
44void StdThreadPool::grow(
int requested) {
46 if (Threads.size() >= MaxThreadCount)
48 int newThreadCount = std::min<int>(requested, MaxThreadCount);
49 while (
static_cast<int>(Threads.size()) < newThreadCount) {
50 int ThreadID = Threads.size();
51 Threads.emplace_back([
this, ThreadID] {
53 Strategy.apply_thread_strategy(ThreadID);
60 processTasksWithJobserver();
62 processTasks(
nullptr);
70 *CurrentThreadTaskGroups =
nullptr;
76 std::function<void()> Task;
79 std::unique_lock<std::mutex> LockGuard(QueueLock);
80 bool workCompletedForGroup =
false;
82 QueueCondition.wait(LockGuard, [&] {
83 return !EnableFlag || !Tasks.empty() ||
84 (WaitingForGroup !=
nullptr &&
85 (workCompletedForGroup =
86 workCompletedUnlocked(WaitingForGroup)));
89 if (!EnableFlag && Tasks.empty())
91 if (WaitingForGroup !=
nullptr && workCompletedForGroup)
99 Task = std::move(Tasks.front().first);
100 GroupOfTask = Tasks.front().second;
103 if (GroupOfTask !=
nullptr)
104 ++ActiveGroups[GroupOfTask];
108 if (CurrentThreadTaskGroups ==
nullptr)
109 CurrentThreadTaskGroups =
new std::vector<ThreadPoolTaskGroup *>;
110 CurrentThreadTaskGroups->push_back(GroupOfTask);
117 CurrentThreadTaskGroups->pop_back();
118 if (CurrentThreadTaskGroups->empty()) {
119 delete CurrentThreadTaskGroups;
120 CurrentThreadTaskGroups =
nullptr;
128 std::lock_guard<std::mutex> LockGuard(QueueLock);
130 if (GroupOfTask !=
nullptr) {
131 auto A = ActiveGroups.find(GroupOfTask);
132 if (--(
A->second) == 0)
133 ActiveGroups.erase(
A);
135 Notify = workCompletedUnlocked(GroupOfTask);
136 NotifyGroup = GroupOfTask !=
nullptr && Notify;
141 CompletionCondition.notify_all();
146 QueueCondition.notify_all();
154void StdThreadPool::processTasksWithJobserver() {
163 bool AcquiredToken =
false;
167 std::unique_lock<std::mutex> LockGuard(QueueLock);
172 Slot = TheJobserver->tryAcquire();
173 if (
Slot.isValid()) {
174 AcquiredToken =
true;
177 }
while (Backoff.waitForNextAttempt());
179 if (!AcquiredToken) {
192 std::function<void()> Task;
196 std::unique_lock<std::mutex> LockGuard(QueueLock);
199 QueueCondition.wait(LockGuard,
200 [&] {
return !EnableFlag || !Tasks.empty(); });
203 if (!EnableFlag && Tasks.empty())
214 Task = std::move(Tasks.front().first);
215 GroupOfTask = Tasks.front().second;
216 if (GroupOfTask !=
nullptr)
217 ++ActiveGroups[GroupOfTask];
226 std::lock_guard<std::mutex> LockGuard(QueueLock);
228 if (GroupOfTask !=
nullptr) {
229 auto A = ActiveGroups.find(GroupOfTask);
230 if (--(
A->second) == 0)
231 ActiveGroups.erase(
A);
234 if (workCompletedUnlocked(
nullptr))
235 CompletionCondition.notify_all();
241 if (Group ==
nullptr)
242 return !ActiveThreads && Tasks.empty();
243 return ActiveGroups.count(Group) == 0 &&
247void StdThreadPool::wait() {
248 assert(!isWorkerThread());
250 std::unique_lock<std::mutex> LockGuard(QueueLock);
251 CompletionCondition.wait(LockGuard,
252 [&] {
return workCompletedUnlocked(
nullptr); });
257 if (!isWorkerThread()) {
258 std::unique_lock<std::mutex> LockGuard(QueueLock);
259 CompletionCondition.wait(LockGuard,
260 [&] {
return workCompletedUnlocked(&Group); });
264 assert(CurrentThreadTaskGroups ==
nullptr ||
269 processTasks(&Group);
272bool StdThreadPool::isWorkerThread()
const {
274 llvm::thread::id CurrentThreadId = llvm::this_thread::get_id();
276 if (CurrentThreadId ==
Thread.get_id())
282StdThreadPool::~StdThreadPool() {
284 std::unique_lock<std::mutex> LockGuard(QueueLock);
287 QueueCondition.notify_all();
289 for (
auto &Worker : Threads)
300 <<
" threads, but LLVM_ENABLE_THREADS has been turned off\n";
306 while (!Tasks.empty()) {
307 auto Task = std::move(Tasks.front().first);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
#define LLVM_THREAD_LOCAL
\macro LLVM_THREAD_LOCAL A thread-local storage specifier which can be used with globals,...
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static cl::opt< int > ThreadCount("threads", cl::init(0))
A class to help implement exponential backoff.
A JobSlot represents a single job slot that can be acquired from or released to a jobserver pool.
static JobserverClient * getInstance()
Returns the singleton instance of the JobserverClient.
SingleThreadExecutor(ThreadPoolStrategy ignored={})
Construct a non-threaded pool, ignoring using the hardware strategy.
void wait() override
Blocking wait for all the tasks to execute first.
~SingleThreadExecutor() override
Blocking destructor: the pool will first execute the pending tasks.
bool isWorkerThread() const
Returns true if the current thread is a worker thread of this thread pool.
virtual ~ThreadPoolInterface()
Destroying the pool will drain the pending tasks and wait.
This tells how a thread pool will be used.
LLVM_ABI unsigned compute_thread_count() const
Retrieves the max available threads for the current strategy.
A group of tasks to be run on a thread pool.
SmartScopedReader< false > ScopedReader
SmartScopedWriter< false > ScopedWriter
This is an optimization pass for GlobalISel generic memory operations.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
auto formatv(bool Validate, const char *Fmt, Ts &&...Vals)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
LLVM_ABI void set_thread_name(const Twine &Name)
Set the name of the current thread.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
auto make_second_range(ContainerTy &&c)
Given a container of pairs, return a range over the second elements.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.