Bug Summary

File:projects/openmp/runtime/src/kmp_tasking.cpp
Warning:line 2978, column 30
Dereference of null pointer

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name kmp_tasking.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-8/lib/clang/8.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -D omp_EXPORTS -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/projects/openmp/runtime/src -I /build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/include -I /build/llvm-toolchain-snapshot-8~svn345461/include -I /build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/i18n -I /build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/include/50 -I /build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/thirdparty/ittnotify -U NDEBUG -D _GNU_SOURCE -D _REENTRANT -D _FORTIFY_SOURCE=2 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/8.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-8/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -Wno-switch -Wno-missing-field-initializers -Wno-missing-braces -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/projects/openmp/runtime/src -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fno-rtti -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-10-27-211344-32123-1 -x c++ /build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp -faddrsig
1/*
2 * kmp_tasking.cpp -- OpenMP 3.0 tasking support.
3 */
4
5//===----------------------------------------------------------------------===//
6//
7// The LLVM Compiler Infrastructure
8//
9// This file is dual licensed under the MIT and the University of Illinois Open
10// Source Licenses. See LICENSE.txt for details.
11//
12//===----------------------------------------------------------------------===//
13
14#include "kmp.h"
15#include "kmp_i18n.h"
16#include "kmp_itt.h"
17#include "kmp_stats.h"
18#include "kmp_wait_release.h"
19#include "kmp_taskdeps.h"
20
21#if OMPT_SUPPORT1
22#include "ompt-specific.h"
23#endif
24
25#include "tsan_annotations.h"
26
27/* forward declaration */
28static void __kmp_enable_tasking(kmp_task_team_t *task_team,
29 kmp_info_t *this_thr);
30static void __kmp_alloc_task_deque(kmp_info_t *thread,
31 kmp_thread_data_t *thread_data);
32static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
33 kmp_task_team_t *task_team);
34
35#ifdef OMP_45_ENABLED(50 >= 45)
36static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask);
37#endif
38
39#ifdef BUILD_TIED_TASK_STACK
40
41// __kmp_trace_task_stack: print the tied tasks from the task stack in order
42// from top do bottom
43//
44// gtid: global thread identifier for thread containing stack
45// thread_data: thread data for task team thread containing stack
46// threshold: value above which the trace statement triggers
47// location: string identifying call site of this function (for trace)
48static void __kmp_trace_task_stack(kmp_int32 gtid,
49 kmp_thread_data_t *thread_data,
50 int threshold, char *location) {
51 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
52 kmp_taskdata_t **stack_top = task_stack->ts_top;
53 kmp_int32 entries = task_stack->ts_entries;
54 kmp_taskdata_t *tied_task;
55
56 KA_TRACE(if (kmp_a_debug >= threshold) { __kmp_debug_printf ("__kmp_trace_task_stack(start): location = %s, gtid = %d, entries = %d, "
"first_block = %p, stack_top = %p \n", location, gtid, entries
, task_stack->ts_first_block, stack_top); }
57 threshold,if (kmp_a_debug >= threshold) { __kmp_debug_printf ("__kmp_trace_task_stack(start): location = %s, gtid = %d, entries = %d, "
"first_block = %p, stack_top = %p \n", location, gtid, entries
, task_stack->ts_first_block, stack_top); }
58 ("__kmp_trace_task_stack(start): location = %s, gtid = %d, entries = %d, "if (kmp_a_debug >= threshold) { __kmp_debug_printf ("__kmp_trace_task_stack(start): location = %s, gtid = %d, entries = %d, "
"first_block = %p, stack_top = %p \n", location, gtid, entries
, task_stack->ts_first_block, stack_top); }
59 "first_block = %p, stack_top = %p \n",if (kmp_a_debug >= threshold) { __kmp_debug_printf ("__kmp_trace_task_stack(start): location = %s, gtid = %d, entries = %d, "
"first_block = %p, stack_top = %p \n", location, gtid, entries
, task_stack->ts_first_block, stack_top); }
60 location, gtid, entries, task_stack->ts_first_block, stack_top))if (kmp_a_debug >= threshold) { __kmp_debug_printf ("__kmp_trace_task_stack(start): location = %s, gtid = %d, entries = %d, "
"first_block = %p, stack_top = %p \n", location, gtid, entries
, task_stack->ts_first_block, stack_top); }
;
61
62 KMP_DEBUG_ASSERT(stack_top != NULL)if (!(stack_top != __null)) { __kmp_debug_assert("stack_top != __null"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 62); }
;
63 KMP_DEBUG_ASSERT(entries > 0)if (!(entries > 0)) { __kmp_debug_assert("entries > 0",
"/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 63); }
;
64
65 while (entries != 0) {
66 KMP_DEBUG_ASSERT(stack_top != &task_stack->ts_first_block.sb_block[0])if (!(stack_top != &task_stack->ts_first_block.sb_block
[0])) { __kmp_debug_assert("stack_top != &task_stack->ts_first_block.sb_block[0]"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 66); }
;
67 // fix up ts_top if we need to pop from previous block
68 if (entries & TASK_STACK_INDEX_MASK == 0) {
69 kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(stack_top);
70
71 stack_block = stack_block->sb_prev;
72 stack_top = &stack_block->sb_block[TASK_STACK_BLOCK_SIZE];
73 }
74
75 // finish bookkeeping
76 stack_top--;
77 entries--;
78
79 tied_task = *stack_top;
80
81 KMP_DEBUG_ASSERT(tied_task != NULL)if (!(tied_task != __null)) { __kmp_debug_assert("tied_task != __null"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 81); }
;
82 KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED)if (!(tied_task->td_flags.tasktype == 1)) { __kmp_debug_assert
("tied_task->td_flags.tasktype == 1", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 82); }
;
83
84 KA_TRACE(threshold,if (kmp_a_debug >= threshold) { __kmp_debug_printf ("__kmp_trace_task_stack(%s): gtid=%d, entry=%d, "
"stack_top=%p, tied_task=%p\n", location, gtid, entries, stack_top
, tied_task); }
85 ("__kmp_trace_task_stack(%s): gtid=%d, entry=%d, "if (kmp_a_debug >= threshold) { __kmp_debug_printf ("__kmp_trace_task_stack(%s): gtid=%d, entry=%d, "
"stack_top=%p, tied_task=%p\n", location, gtid, entries, stack_top
, tied_task); }
86 "stack_top=%p, tied_task=%p\n",if (kmp_a_debug >= threshold) { __kmp_debug_printf ("__kmp_trace_task_stack(%s): gtid=%d, entry=%d, "
"stack_top=%p, tied_task=%p\n", location, gtid, entries, stack_top
, tied_task); }
87 location, gtid, entries, stack_top, tied_task))if (kmp_a_debug >= threshold) { __kmp_debug_printf ("__kmp_trace_task_stack(%s): gtid=%d, entry=%d, "
"stack_top=%p, tied_task=%p\n", location, gtid, entries, stack_top
, tied_task); }
;
88 }
89 KMP_DEBUG_ASSERT(stack_top == &task_stack->ts_first_block.sb_block[0])if (!(stack_top == &task_stack->ts_first_block.sb_block
[0])) { __kmp_debug_assert("stack_top == &task_stack->ts_first_block.sb_block[0]"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 89); }
;
90
91 KA_TRACE(threshold,if (kmp_a_debug >= threshold) { __kmp_debug_printf ("__kmp_trace_task_stack(exit): location = %s, gtid = %d\n"
, location, gtid); }
92 ("__kmp_trace_task_stack(exit): location = %s, gtid = %d\n",if (kmp_a_debug >= threshold) { __kmp_debug_printf ("__kmp_trace_task_stack(exit): location = %s, gtid = %d\n"
, location, gtid); }
93 location, gtid))if (kmp_a_debug >= threshold) { __kmp_debug_printf ("__kmp_trace_task_stack(exit): location = %s, gtid = %d\n"
, location, gtid); }
;
94}
95
96// __kmp_init_task_stack: initialize the task stack for the first time
97// after a thread_data structure is created.
98// It should not be necessary to do this again (assuming the stack works).
99//
100// gtid: global thread identifier of calling thread
101// thread_data: thread data for task team thread containing stack
102static void __kmp_init_task_stack(kmp_int32 gtid,
103 kmp_thread_data_t *thread_data) {
104 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
105 kmp_stack_block_t *first_block;
106
107 // set up the first block of the stack
108 first_block = &task_stack->ts_first_block;
109 task_stack->ts_top = (kmp_taskdata_t **)first_block;
110 memset((void *)first_block, '\0',
111 TASK_STACK_BLOCK_SIZE * sizeof(kmp_taskdata_t *));
112
113 // initialize the stack to be empty
114 task_stack->ts_entries = TASK_STACK_EMPTY;
115 first_block->sb_next = NULL__null;
116 first_block->sb_prev = NULL__null;
117}
118
119// __kmp_free_task_stack: free the task stack when thread_data is destroyed.
120//
121// gtid: global thread identifier for calling thread
122// thread_data: thread info for thread containing stack
123static void __kmp_free_task_stack(kmp_int32 gtid,
124 kmp_thread_data_t *thread_data) {
125 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
126 kmp_stack_block_t *stack_block = &task_stack->ts_first_block;
127
128 KMP_DEBUG_ASSERT(task_stack->ts_entries == TASK_STACK_EMPTY)if (!(task_stack->ts_entries == TASK_STACK_EMPTY)) { __kmp_debug_assert
("task_stack->ts_entries == TASK_STACK_EMPTY", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 128); }
;
129 // free from the second block of the stack
130 while (stack_block != NULL__null) {
131 kmp_stack_block_t *next_block = (stack_block) ? stack_block->sb_next : NULL__null;
132
133 stack_block->sb_next = NULL__null;
134 stack_block->sb_prev = NULL__null;
135 if (stack_block != &task_stack->ts_first_block) {
136 __kmp_thread_free(thread,___kmp_thread_free((thread), (stack_block), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 137)
137 stack_block)___kmp_thread_free((thread), (stack_block), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 137)
; // free the block, if not the first
138 }
139 stack_block = next_block;
140 }
141 // initialize the stack to be empty
142 task_stack->ts_entries = 0;
143 task_stack->ts_top = NULL__null;
144}
145
146// __kmp_push_task_stack: Push the tied task onto the task stack.
147// Grow the stack if necessary by allocating another block.
148//
149// gtid: global thread identifier for calling thread
150// thread: thread info for thread containing stack
151// tied_task: the task to push on the stack
152static void __kmp_push_task_stack(kmp_int32 gtid, kmp_info_t *thread,
153 kmp_taskdata_t *tied_task) {
154 // GEH - need to consider what to do if tt_threads_data not allocated yet
155 kmp_thread_data_t *thread_data =
156 &thread->th.th_task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
157 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
158
159 if (tied_task->td_flags.team_serial || tied_task->td_flags.tasking_ser) {
160 return; // Don't push anything on stack if team or team tasks are serialized
161 }
162
163 KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED)if (!(tied_task->td_flags.tasktype == 1)) { __kmp_debug_assert
("tied_task->td_flags.tasktype == 1", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 163); }
;
164 KMP_DEBUG_ASSERT(task_stack->ts_top != NULL)if (!(task_stack->ts_top != __null)) { __kmp_debug_assert(
"task_stack->ts_top != __null", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 164); }
;
165
166 KA_TRACE(20,if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task_stack(enter): GTID: %d; THREAD: %p; TASK: %p\n"
, gtid, thread, tied_task); }
167 ("__kmp_push_task_stack(enter): GTID: %d; THREAD: %p; TASK: %p\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task_stack(enter): GTID: %d; THREAD: %p; TASK: %p\n"
, gtid, thread, tied_task); }
168 gtid, thread, tied_task))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task_stack(enter): GTID: %d; THREAD: %p; TASK: %p\n"
, gtid, thread, tied_task); }
;
169 // Store entry
170 *(task_stack->ts_top) = tied_task;
171
172 // Do bookkeeping for next push
173 task_stack->ts_top++;
174 task_stack->ts_entries++;
175
176 if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0) {
177 // Find beginning of this task block
178 kmp_stack_block_t *stack_block =
179 (kmp_stack_block_t *)(task_stack->ts_top - TASK_STACK_BLOCK_SIZE);
180
181 // Check if we already have a block
182 if (stack_block->sb_next !=
183 NULL__null) { // reset ts_top to beginning of next block
184 task_stack->ts_top = &stack_block->sb_next->sb_block[0];
185 } else { // Alloc new block and link it up
186 kmp_stack_block_t *new_block = (kmp_stack_block_t *)__kmp_thread_calloc(
187 thread, sizeof(kmp_stack_block_t));
188
189 task_stack->ts_top = &new_block->sb_block[0];
190 stack_block->sb_next = new_block;
191 new_block->sb_prev = stack_block;
192 new_block->sb_next = NULL__null;
193
194 KA_TRACE(if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_push_task_stack(): GTID: %d; TASK: %p; Alloc new block: %p\n"
, gtid, tied_task, new_block); }
195 30,if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_push_task_stack(): GTID: %d; TASK: %p; Alloc new block: %p\n"
, gtid, tied_task, new_block); }
196 ("__kmp_push_task_stack(): GTID: %d; TASK: %p; Alloc new block: %p\n",if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_push_task_stack(): GTID: %d; TASK: %p; Alloc new block: %p\n"
, gtid, tied_task, new_block); }
197 gtid, tied_task, new_block))if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_push_task_stack(): GTID: %d; TASK: %p; Alloc new block: %p\n"
, gtid, tied_task, new_block); }
;
198 }
199 }
200 KA_TRACE(20, ("__kmp_push_task_stack(exit): GTID: %d; TASK: %p\n", gtid,if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task_stack(exit): GTID: %d; TASK: %p\n"
, gtid, tied_task); }
201 tied_task))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task_stack(exit): GTID: %d; TASK: %p\n"
, gtid, tied_task); }
;
202}
203
204// __kmp_pop_task_stack: Pop the tied task from the task stack. Don't return
205// the task, just check to make sure it matches the ending task passed in.
206//
207// gtid: global thread identifier for the calling thread
208// thread: thread info structure containing stack
209// tied_task: the task popped off the stack
210// ending_task: the task that is ending (should match popped task)
211static void __kmp_pop_task_stack(kmp_int32 gtid, kmp_info_t *thread,
212 kmp_taskdata_t *ending_task) {
213 // GEH - need to consider what to do if tt_threads_data not allocated yet
214 kmp_thread_data_t *thread_data =
215 &thread->th.th_task_team->tt_threads_data[__kmp_tid_from_gtid(gtid)];
216 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
217 kmp_taskdata_t *tied_task;
218
219 if (ending_task->td_flags.team_serial || ending_task->td_flags.tasking_ser) {
220 // Don't pop anything from stack if team or team tasks are serialized
221 return;
222 }
223
224 KMP_DEBUG_ASSERT(task_stack->ts_top != NULL)if (!(task_stack->ts_top != __null)) { __kmp_debug_assert(
"task_stack->ts_top != __null", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 224); }
;
225 KMP_DEBUG_ASSERT(task_stack->ts_entries > 0)if (!(task_stack->ts_entries > 0)) { __kmp_debug_assert
("task_stack->ts_entries > 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 225); }
;
226
227 KA_TRACE(20, ("__kmp_pop_task_stack(enter): GTID: %d; THREAD: %p\n", gtid,if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_pop_task_stack(enter): GTID: %d; THREAD: %p\n"
, gtid, thread); }
228 thread))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_pop_task_stack(enter): GTID: %d; THREAD: %p\n"
, gtid, thread); }
;
229
230 // fix up ts_top if we need to pop from previous block
231 if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0) {
232 kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(task_stack->ts_top);
233
234 stack_block = stack_block->sb_prev;
235 task_stack->ts_top = &stack_block->sb_block[TASK_STACK_BLOCK_SIZE];
236 }
237
238 // finish bookkeeping
239 task_stack->ts_top--;
240 task_stack->ts_entries--;
241
242 tied_task = *(task_stack->ts_top);
243
244 KMP_DEBUG_ASSERT(tied_task != NULL)if (!(tied_task != __null)) { __kmp_debug_assert("tied_task != __null"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 244); }
;
245 KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED)if (!(tied_task->td_flags.tasktype == 1)) { __kmp_debug_assert
("tied_task->td_flags.tasktype == 1", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 245); }
;
246 KMP_DEBUG_ASSERT(tied_task == ending_task)if (!(tied_task == ending_task)) { __kmp_debug_assert("tied_task == ending_task"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 246); }
; // If we built the stack correctly
247
248 KA_TRACE(20, ("__kmp_pop_task_stack(exit): GTID: %d; TASK: %p\n", gtid,if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_pop_task_stack(exit): GTID: %d; TASK: %p\n"
, gtid, tied_task); }
249 tied_task))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_pop_task_stack(exit): GTID: %d; TASK: %p\n"
, gtid, tied_task); }
;
250 return;
251}
252#endif /* BUILD_TIED_TASK_STACK */
253
254// __kmp_push_task: Add a task to the thread's deque
255static kmp_int32 __kmp_push_task(kmp_int32 gtid, kmp_task_t *task) {
256 kmp_info_t *thread = __kmp_threads[gtid];
257 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task)(((kmp_taskdata_t *)task) - 1);
258 kmp_task_team_t *task_team = thread->th.th_task_team;
259 kmp_int32 tid = __kmp_tid_from_gtid(gtid);
260 kmp_thread_data_t *thread_data;
261
262 KA_TRACE(20,if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task: T#%d trying to push task %p.\n"
, gtid, taskdata); }
263 ("__kmp_push_task: T#%d trying to push task %p.\n", gtid, taskdata))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task: T#%d trying to push task %p.\n"
, gtid, taskdata); }
;
264
265 if (taskdata->td_flags.tiedness == TASK_UNTIED0) {
266 // untied task needs to increment counter so that the task structure is not
267 // freed prematurely
268 kmp_int32 counter = 1 + KMP_ATOMIC_INC(&taskdata->td_untied_count)(&taskdata->td_untied_count)->fetch_add(1, std::memory_order_acq_rel
)
;
269 KMP_DEBUG_USE_VAR(counter);
270 KA_TRACE(if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task: T#%d untied_count (%d) incremented for task %p\n"
, gtid, counter, taskdata); }
271 20,if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task: T#%d untied_count (%d) incremented for task %p\n"
, gtid, counter, taskdata); }
272 ("__kmp_push_task: T#%d untied_count (%d) incremented for task %p\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task: T#%d untied_count (%d) incremented for task %p\n"
, gtid, counter, taskdata); }
273 gtid, counter, taskdata))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task: T#%d untied_count (%d) incremented for task %p\n"
, gtid, counter, taskdata); }
;
274 }
275
276 // The first check avoids building task_team thread data if serialized
277 if (taskdata->td_flags.task_serial) {
278 KA_TRACE(20, ("__kmp_push_task: T#%d team serialized; returning "if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task: T#%d team serialized; returning "
"TASK_NOT_PUSHED for task %p\n", gtid, taskdata); }
279 "TASK_NOT_PUSHED for task %p\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task: T#%d team serialized; returning "
"TASK_NOT_PUSHED for task %p\n", gtid, taskdata); }
280 gtid, taskdata))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task: T#%d team serialized; returning "
"TASK_NOT_PUSHED for task %p\n", gtid, taskdata); }
;
281 return TASK_NOT_PUSHED1;
282 }
283
284 // Now that serialized tasks have returned, we can assume that we are not in
285 // immediate exec mode
286 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec)if (!(__kmp_tasking_mode != tskm_immediate_exec)) { __kmp_debug_assert
("__kmp_tasking_mode != tskm_immediate_exec", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 286); }
;
287 if (!KMP_TASKING_ENABLED(task_team)(((task_team)->tt.tt_found_tasks) == (!0))) {
288 __kmp_enable_tasking(task_team, thread);
289 }
290 KMP_DEBUG_ASSERT(TCR_4(task_team->tt.tt_found_tasks) == TRUE)if (!((task_team->tt.tt_found_tasks) == (!0))) { __kmp_debug_assert
("(task_team->tt.tt_found_tasks) == (!0)", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 290); }
;
291 KMP_DEBUG_ASSERT(TCR_PTR(task_team->tt.tt_threads_data) != NULL)if (!(((void *)(task_team->tt.tt_threads_data)) != __null)
) { __kmp_debug_assert("((void *)(task_team->tt.tt_threads_data)) != __null"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 291); }
;
292
293 // Find tasking deque specific to encountering thread
294 thread_data = &task_team->tt.tt_threads_data[tid];
295
296 // No lock needed since only owner can allocate
297 if (thread_data->td.td_deque == NULL__null) {
298 __kmp_alloc_task_deque(thread, thread_data);
299 }
300
301 // Check if deque is full
302 if (TCR_4(thread_data->td.td_deque_ntasks)(thread_data->td.td_deque_ntasks) >=
303 TASK_DEQUE_SIZE(thread_data->td)((thread_data->td).td_deque_size)) {
304 KA_TRACE(20, ("__kmp_push_task: T#%d deque is full; returning "if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task: T#%d deque is full; returning "
"TASK_NOT_PUSHED for task %p\n", gtid, taskdata); }
305 "TASK_NOT_PUSHED for task %p\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task: T#%d deque is full; returning "
"TASK_NOT_PUSHED for task %p\n", gtid, taskdata); }
306 gtid, taskdata))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task: T#%d deque is full; returning "
"TASK_NOT_PUSHED for task %p\n", gtid, taskdata); }
;
307 return TASK_NOT_PUSHED1;
308 }
309
310 // Lock the deque for the task push operation
311 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
312
313#if OMP_45_ENABLED(50 >= 45)
314 // Need to recheck as we can get a proxy task from a thread outside of OpenMP
315 if (TCR_4(thread_data->td.td_deque_ntasks)(thread_data->td.td_deque_ntasks) >=
316 TASK_DEQUE_SIZE(thread_data->td)((thread_data->td).td_deque_size)) {
317 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
318 KA_TRACE(20, ("__kmp_push_task: T#%d deque is full on 2nd check; returning "if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task: T#%d deque is full on 2nd check; returning "
"TASK_NOT_PUSHED for task %p\n", gtid, taskdata); }
319 "TASK_NOT_PUSHED for task %p\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task: T#%d deque is full on 2nd check; returning "
"TASK_NOT_PUSHED for task %p\n", gtid, taskdata); }
320 gtid, taskdata))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task: T#%d deque is full on 2nd check; returning "
"TASK_NOT_PUSHED for task %p\n", gtid, taskdata); }
;
321 return TASK_NOT_PUSHED1;
322 }
323#else
324 // Must have room since no thread can add tasks but calling thread
325 KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) <if (!((thread_data->td.td_deque_ntasks) < ((thread_data
->td).td_deque_size))) { __kmp_debug_assert("(thread_data->td.td_deque_ntasks) < ((thread_data->td).td_deque_size)"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 326); }
326 TASK_DEQUE_SIZE(thread_data->td))if (!((thread_data->td.td_deque_ntasks) < ((thread_data
->td).td_deque_size))) { __kmp_debug_assert("(thread_data->td.td_deque_ntasks) < ((thread_data->td).td_deque_size)"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 326); }
;
327#endif
328
329 thread_data->td.td_deque[thread_data->td.td_deque_tail] =
330 taskdata; // Push taskdata
331 // Wrap index.
332 thread_data->td.td_deque_tail =
333 (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td)((thread_data->td).td_deque_size - 1);
334 TCW_4(thread_data->td.td_deque_ntasks,(thread_data->td.td_deque_ntasks) = ((thread_data->td.td_deque_ntasks
) + 1)
335 TCR_4(thread_data->td.td_deque_ntasks) + 1)(thread_data->td.td_deque_ntasks) = ((thread_data->td.td_deque_ntasks
) + 1)
; // Adjust task count
336
337 KA_TRACE(20, ("__kmp_push_task: T#%d returning TASK_SUCCESSFULLY_PUSHED: "if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task: T#%d returning TASK_SUCCESSFULLY_PUSHED: "
"task=%p ntasks=%d head=%u tail=%u\n", gtid, taskdata, thread_data
->td.td_deque_ntasks, thread_data->td.td_deque_head, thread_data
->td.td_deque_tail); }
338 "task=%p ntasks=%d head=%u tail=%u\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task: T#%d returning TASK_SUCCESSFULLY_PUSHED: "
"task=%p ntasks=%d head=%u tail=%u\n", gtid, taskdata, thread_data
->td.td_deque_ntasks, thread_data->td.td_deque_head, thread_data
->td.td_deque_tail); }
339 gtid, taskdata, thread_data->td.td_deque_ntasks,if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task: T#%d returning TASK_SUCCESSFULLY_PUSHED: "
"task=%p ntasks=%d head=%u tail=%u\n", gtid, taskdata, thread_data
->td.td_deque_ntasks, thread_data->td.td_deque_head, thread_data
->td.td_deque_tail); }
340 thread_data->td.td_deque_head, thread_data->td.td_deque_tail))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_push_task: T#%d returning TASK_SUCCESSFULLY_PUSHED: "
"task=%p ntasks=%d head=%u tail=%u\n", gtid, taskdata, thread_data
->td.td_deque_ntasks, thread_data->td.td_deque_head, thread_data
->td.td_deque_tail); }
;
341
342 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
343
344 return TASK_SUCCESSFULLY_PUSHED0;
345}
346
347// __kmp_pop_current_task_from_thread: set up current task from called thread
348// when team ends
349//
350// this_thr: thread structure to set current_task in.
351void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr) {
352 KF_TRACE(10, ("__kmp_pop_current_task_from_thread(enter): T#%d "if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_pop_current_task_from_thread(enter): T#%d "
"this_thread=%p, curtask=%p, " "curtask_parent=%p\n", 0, this_thr
, this_thr->th.th_current_task, this_thr->th.th_current_task
->td_parent); }
353 "this_thread=%p, curtask=%p, "if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_pop_current_task_from_thread(enter): T#%d "
"this_thread=%p, curtask=%p, " "curtask_parent=%p\n", 0, this_thr
, this_thr->th.th_current_task, this_thr->th.th_current_task
->td_parent); }
354 "curtask_parent=%p\n",if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_pop_current_task_from_thread(enter): T#%d "
"this_thread=%p, curtask=%p, " "curtask_parent=%p\n", 0, this_thr
, this_thr->th.th_current_task, this_thr->th.th_current_task
->td_parent); }
355 0, this_thr, this_thr->th.th_current_task,if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_pop_current_task_from_thread(enter): T#%d "
"this_thread=%p, curtask=%p, " "curtask_parent=%p\n", 0, this_thr
, this_thr->th.th_current_task, this_thr->th.th_current_task
->td_parent); }
356 this_thr->th.th_current_task->td_parent))if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_pop_current_task_from_thread(enter): T#%d "
"this_thread=%p, curtask=%p, " "curtask_parent=%p\n", 0, this_thr
, this_thr->th.th_current_task, this_thr->th.th_current_task
->td_parent); }
;
357
358 this_thr->th.th_current_task = this_thr->th.th_current_task->td_parent;
359
360 KF_TRACE(10, ("__kmp_pop_current_task_from_thread(exit): T#%d "if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_pop_current_task_from_thread(exit): T#%d "
"this_thread=%p, curtask=%p, " "curtask_parent=%p\n", 0, this_thr
, this_thr->th.th_current_task, this_thr->th.th_current_task
->td_parent); }
361 "this_thread=%p, curtask=%p, "if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_pop_current_task_from_thread(exit): T#%d "
"this_thread=%p, curtask=%p, " "curtask_parent=%p\n", 0, this_thr
, this_thr->th.th_current_task, this_thr->th.th_current_task
->td_parent); }
362 "curtask_parent=%p\n",if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_pop_current_task_from_thread(exit): T#%d "
"this_thread=%p, curtask=%p, " "curtask_parent=%p\n", 0, this_thr
, this_thr->th.th_current_task, this_thr->th.th_current_task
->td_parent); }
363 0, this_thr, this_thr->th.th_current_task,if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_pop_current_task_from_thread(exit): T#%d "
"this_thread=%p, curtask=%p, " "curtask_parent=%p\n", 0, this_thr
, this_thr->th.th_current_task, this_thr->th.th_current_task
->td_parent); }
364 this_thr->th.th_current_task->td_parent))if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_pop_current_task_from_thread(exit): T#%d "
"this_thread=%p, curtask=%p, " "curtask_parent=%p\n", 0, this_thr
, this_thr->th.th_current_task, this_thr->th.th_current_task
->td_parent); }
;
365}
366
367// __kmp_push_current_task_to_thread: set up current task in called thread for a
368// new team
369//
370// this_thr: thread structure to set up
371// team: team for implicit task data
372// tid: thread within team to set up
373void __kmp_push_current_task_to_thread(kmp_info_t *this_thr, kmp_team_t *team,
374 int tid) {
375 // current task of the thread is a parent of the new just created implicit
376 // tasks of new team
377 KF_TRACE(10, ("__kmp_push_current_task_to_thread(enter): T#%d this_thread=%p "if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_push_current_task_to_thread(enter): T#%d this_thread=%p "
"curtask=%p " "parent_task=%p\n", tid, this_thr, this_thr->
th.th_current_task, team->t.t_implicit_task_taskdata[tid].
td_parent); }
378 "curtask=%p "if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_push_current_task_to_thread(enter): T#%d this_thread=%p "
"curtask=%p " "parent_task=%p\n", tid, this_thr, this_thr->
th.th_current_task, team->t.t_implicit_task_taskdata[tid].
td_parent); }
379 "parent_task=%p\n",if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_push_current_task_to_thread(enter): T#%d this_thread=%p "
"curtask=%p " "parent_task=%p\n", tid, this_thr, this_thr->
th.th_current_task, team->t.t_implicit_task_taskdata[tid].
td_parent); }
380 tid, this_thr, this_thr->th.th_current_task,if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_push_current_task_to_thread(enter): T#%d this_thread=%p "
"curtask=%p " "parent_task=%p\n", tid, this_thr, this_thr->
th.th_current_task, team->t.t_implicit_task_taskdata[tid].
td_parent); }
381 team->t.t_implicit_task_taskdata[tid].td_parent))if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_push_current_task_to_thread(enter): T#%d this_thread=%p "
"curtask=%p " "parent_task=%p\n", tid, this_thr, this_thr->
th.th_current_task, team->t.t_implicit_task_taskdata[tid].
td_parent); }
;
382
383 KMP_DEBUG_ASSERT(this_thr != NULL)if (!(this_thr != __null)) { __kmp_debug_assert("this_thr != __null"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 383); }
;
384
385 if (tid == 0) {
386 if (this_thr->th.th_current_task != &team->t.t_implicit_task_taskdata[0]) {
387 team->t.t_implicit_task_taskdata[0].td_parent =
388 this_thr->th.th_current_task;
389 this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[0];
390 }
391 } else {
392 team->t.t_implicit_task_taskdata[tid].td_parent =
393 team->t.t_implicit_task_taskdata[0].td_parent;
394 this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[tid];
395 }
396
397 KF_TRACE(10, ("__kmp_push_current_task_to_thread(exit): T#%d this_thread=%p "if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_push_current_task_to_thread(exit): T#%d this_thread=%p "
"curtask=%p " "parent_task=%p\n", tid, this_thr, this_thr->
th.th_current_task, team->t.t_implicit_task_taskdata[tid].
td_parent); }
398 "curtask=%p "if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_push_current_task_to_thread(exit): T#%d this_thread=%p "
"curtask=%p " "parent_task=%p\n", tid, this_thr, this_thr->
th.th_current_task, team->t.t_implicit_task_taskdata[tid].
td_parent); }
399 "parent_task=%p\n",if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_push_current_task_to_thread(exit): T#%d this_thread=%p "
"curtask=%p " "parent_task=%p\n", tid, this_thr, this_thr->
th.th_current_task, team->t.t_implicit_task_taskdata[tid].
td_parent); }
400 tid, this_thr, this_thr->th.th_current_task,if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_push_current_task_to_thread(exit): T#%d this_thread=%p "
"curtask=%p " "parent_task=%p\n", tid, this_thr, this_thr->
th.th_current_task, team->t.t_implicit_task_taskdata[tid].
td_parent); }
401 team->t.t_implicit_task_taskdata[tid].td_parent))if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_push_current_task_to_thread(exit): T#%d this_thread=%p "
"curtask=%p " "parent_task=%p\n", tid, this_thr, this_thr->
th.th_current_task, team->t.t_implicit_task_taskdata[tid].
td_parent); }
;
402}
403
404// __kmp_task_start: bookkeeping for a task starting execution
405//
406// GTID: global thread id of calling thread
407// task: task starting execution
408// current_task: task suspending
409static void __kmp_task_start(kmp_int32 gtid, kmp_task_t *task,
410 kmp_taskdata_t *current_task) {
411 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task)(((kmp_taskdata_t *)task) - 1);
412 kmp_info_t *thread = __kmp_threads[gtid];
413
414 KA_TRACE(10,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_task_start(enter): T#%d starting task %p: current_task=%p\n"
, gtid, taskdata, current_task); }
415 ("__kmp_task_start(enter): T#%d starting task %p: current_task=%p\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_task_start(enter): T#%d starting task %p: current_task=%p\n"
, gtid, taskdata, current_task); }
416 gtid, taskdata, current_task))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_task_start(enter): T#%d starting task %p: current_task=%p\n"
, gtid, taskdata, current_task); }
;
417
418 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT)if (!(taskdata->td_flags.tasktype == 1)) { __kmp_debug_assert
("taskdata->td_flags.tasktype == 1", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 418); }
;
419
420 // mark currently executing task as suspended
421 // TODO: GEH - make sure root team implicit task is initialized properly.
422 // KMP_DEBUG_ASSERT( current_task -> td_flags.executing == 1 );
423 current_task->td_flags.executing = 0;
424
425// Add task to stack if tied
426#ifdef BUILD_TIED_TASK_STACK
427 if (taskdata->td_flags.tiedness == TASK_TIED1) {
428 __kmp_push_task_stack(gtid, thread, taskdata);
429 }
430#endif /* BUILD_TIED_TASK_STACK */
431
432 // mark starting task as executing and as current task
433 thread->th.th_current_task = taskdata;
434
435 KMP_DEBUG_ASSERT(taskdata->td_flags.started == 0 ||if (!(taskdata->td_flags.started == 0 || taskdata->td_flags
.tiedness == 0)) { __kmp_debug_assert("taskdata->td_flags.started == 0 || taskdata->td_flags.tiedness == 0"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 436); }
436 taskdata->td_flags.tiedness == TASK_UNTIED)if (!(taskdata->td_flags.started == 0 || taskdata->td_flags
.tiedness == 0)) { __kmp_debug_assert("taskdata->td_flags.started == 0 || taskdata->td_flags.tiedness == 0"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 436); }
;
437 KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0 ||if (!(taskdata->td_flags.executing == 0 || taskdata->td_flags
.tiedness == 0)) { __kmp_debug_assert("taskdata->td_flags.executing == 0 || taskdata->td_flags.tiedness == 0"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 438); }
438 taskdata->td_flags.tiedness == TASK_UNTIED)if (!(taskdata->td_flags.executing == 0 || taskdata->td_flags
.tiedness == 0)) { __kmp_debug_assert("taskdata->td_flags.executing == 0 || taskdata->td_flags.tiedness == 0"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 438); }
;
439 taskdata->td_flags.started = 1;
440 taskdata->td_flags.executing = 1;
441 KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0)if (!(taskdata->td_flags.complete == 0)) { __kmp_debug_assert
("taskdata->td_flags.complete == 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 441); }
;
442 KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0)if (!(taskdata->td_flags.freed == 0)) { __kmp_debug_assert
("taskdata->td_flags.freed == 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 442); }
;
443
444 // GEH TODO: shouldn't we pass some sort of location identifier here?
445 // APT: yes, we will pass location here.
446 // need to store current thread state (in a thread or taskdata structure)
447 // before setting work_state, otherwise wrong state is set after end of task
448
449 KA_TRACE(10, ("__kmp_task_start(exit): T#%d task=%p\n", gtid, taskdata))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_task_start(exit): T#%d task=%p\n"
, gtid, taskdata); }
;
450
451 return;
452}
453
454#if OMPT_SUPPORT1
455//------------------------------------------------------------------------------
456// __ompt_task_init:
457// Initialize OMPT fields maintained by a task. This will only be called after
458// ompt_start_tool, so we already know whether ompt is enabled or not.
459
460static inline void __ompt_task_init(kmp_taskdata_t *task, int tid) {
461 // The calls to __ompt_task_init already have the ompt_enabled condition.
462 task->ompt_task_info.task_data.value = 0;
463 task->ompt_task_info.frame.exit_frame = NULL__null;
464 task->ompt_task_info.frame.enter_frame = NULL__null;
465#if OMP_40_ENABLED(50 >= 40)
466 task->ompt_task_info.ndeps = 0;
467 task->ompt_task_info.deps = NULL__null;
468#endif /* OMP_40_ENABLED */
469}
470
471// __ompt_task_start:
472// Build and trigger task-begin event
473static inline void __ompt_task_start(kmp_task_t *task,
474 kmp_taskdata_t *current_task,
475 kmp_int32 gtid) {
476 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task)(((kmp_taskdata_t *)task) - 1);
477 ompt_task_status_t status = ompt_task_switch;
478 if (__kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded) {
479 status = ompt_task_yield;
480 __kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded = 0;
481 }
482 /* let OMPT know that we're about to run this task */
483 if (ompt_enabled.ompt_callback_task_schedule) {
484 ompt_callbacks.ompt_callback(ompt_callback_task_schedule)ompt_callback_task_schedule_callback(
485 &(current_task->ompt_task_info.task_data), status,
486 &(taskdata->ompt_task_info.task_data));
487 }
488 taskdata->ompt_task_info.scheduling_parent = current_task;
489}
490
491// __ompt_task_finish:
492// Build and trigger final task-schedule event
493static inline void
494__ompt_task_finish(kmp_task_t *task, kmp_taskdata_t *resumed_task,
495 ompt_task_status_t status = ompt_task_complete) {
496 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task)(((kmp_taskdata_t *)task) - 1);
497 if (__kmp_omp_cancellation && taskdata->td_taskgroup &&
498 taskdata->td_taskgroup->cancel_request == cancel_taskgroup) {
499 status = ompt_task_cancel;
500 }
501
502 /* let OMPT know that we're returning to the callee task */
503 if (ompt_enabled.ompt_callback_task_schedule) {
504 ompt_callbacks.ompt_callback(ompt_callback_task_schedule)ompt_callback_task_schedule_callback(
505 &(taskdata->ompt_task_info.task_data), status,
506 &((resumed_task ? resumed_task
507 : (taskdata->ompt_task_info.scheduling_parent
508 ? taskdata->ompt_task_info.scheduling_parent
509 : taskdata->td_parent))
510 ->ompt_task_info.task_data));
511 }
512}
513#endif
514
515template <bool ompt>
516static void __kmpc_omp_task_begin_if0_template(ident_t *loc_ref, kmp_int32 gtid,
517 kmp_task_t *task,
518 void *frame_address,
519 void *return_address) {
520 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task)(((kmp_taskdata_t *)task) - 1);
521 kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
522
523 KA_TRACE(10, ("__kmpc_omp_task_begin_if0(enter): T#%d loc=%p task=%p "if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_begin_if0(enter): T#%d loc=%p task=%p "
"current_task=%p\n", gtid, loc_ref, taskdata, current_task);
}
524 "current_task=%p\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_begin_if0(enter): T#%d loc=%p task=%p "
"current_task=%p\n", gtid, loc_ref, taskdata, current_task);
}
525 gtid, loc_ref, taskdata, current_task))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_begin_if0(enter): T#%d loc=%p task=%p "
"current_task=%p\n", gtid, loc_ref, taskdata, current_task);
}
;
526
527 if (taskdata->td_flags.tiedness == TASK_UNTIED0) {
528 // untied task needs to increment counter so that the task structure is not
529 // freed prematurely
530 kmp_int32 counter = 1 + KMP_ATOMIC_INC(&taskdata->td_untied_count)(&taskdata->td_untied_count)->fetch_add(1, std::memory_order_acq_rel
)
;
531 KMP_DEBUG_USE_VAR(counter);
532 KA_TRACE(20, ("__kmpc_omp_task_begin_if0: T#%d untied_count (%d) "if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmpc_omp_task_begin_if0: T#%d untied_count (%d) "
"incremented for task %p\n", gtid, counter, taskdata); }
533 "incremented for task %p\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmpc_omp_task_begin_if0: T#%d untied_count (%d) "
"incremented for task %p\n", gtid, counter, taskdata); }
534 gtid, counter, taskdata))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmpc_omp_task_begin_if0: T#%d untied_count (%d) "
"incremented for task %p\n", gtid, counter, taskdata); }
;
535 }
536
537 taskdata->td_flags.task_serial =
538 1; // Execute this task immediately, not deferred.
539 __kmp_task_start(gtid, task, current_task);
540
541#if OMPT_SUPPORT1
542 if (ompt) {
543 if (current_task->ompt_task_info.frame.enter_frame == NULL__null) {
544 current_task->ompt_task_info.frame.enter_frame =
545 taskdata->ompt_task_info.frame.exit_frame = frame_address;
546 }
547 if (ompt_enabled.ompt_callback_task_create) {
548 ompt_task_info_t *parent_info = &(current_task->ompt_task_info);
549 ompt_callbacks.ompt_callback(ompt_callback_task_create)ompt_callback_task_create_callback(
550 &(parent_info->task_data), &(parent_info->frame),
551 &(taskdata->ompt_task_info.task_data),
552 ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(taskdata)((taskdata->td_flags.task_serial || taskdata->td_flags.
tasking_ser) ? ompt_task_undeferred : 0x0) | ((!(taskdata->
td_flags.tiedness)) ? ompt_task_untied : 0x0) | (taskdata->
td_flags.final ? ompt_task_final : 0x0) | (taskdata->td_flags
.merged_if0 ? ompt_task_mergeable : 0x0)
, 0,
553 return_address);
554 }
555 __ompt_task_start(task, current_task, gtid);
556 }
557#endif // OMPT_SUPPORT
558
559 KA_TRACE(10, ("__kmpc_omp_task_begin_if0(exit): T#%d loc=%p task=%p,\n", gtid,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_begin_if0(exit): T#%d loc=%p task=%p,\n"
, gtid, loc_ref, taskdata); }
560 loc_ref, taskdata))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_begin_if0(exit): T#%d loc=%p task=%p,\n"
, gtid, loc_ref, taskdata); }
;
561}
562
563#if OMPT_SUPPORT1
564OMPT_NOINLINE__attribute__((noinline))
565static void __kmpc_omp_task_begin_if0_ompt(ident_t *loc_ref, kmp_int32 gtid,
566 kmp_task_t *task,
567 void *frame_address,
568 void *return_address) {
569 __kmpc_omp_task_begin_if0_template<true>(loc_ref, gtid, task, frame_address,
570 return_address);
571}
572#endif // OMPT_SUPPORT
573
574// __kmpc_omp_task_begin_if0: report that a given serialized task has started
575// execution
576//
577// loc_ref: source location information; points to beginning of task block.
578// gtid: global thread number.
579// task: task thunk for the started task.
580void __kmpc_omp_task_begin_if0(ident_t *loc_ref, kmp_int32 gtid,
581 kmp_task_t *task) {
582#if OMPT_SUPPORT1
583 if (UNLIKELY(ompt_enabled.enabled)__builtin_expect(!!(ompt_enabled.enabled), 0)) {
584 OMPT_STORE_RETURN_ADDRESS(gtid)if (ompt_enabled.enabled && gtid >= 0 && __kmp_threads
[gtid] && !__kmp_threads[gtid]->th.ompt_thread_info
.return_address) __kmp_threads[gtid]->th.ompt_thread_info.
return_address = __builtin_return_address(0)
;
585 __kmpc_omp_task_begin_if0_ompt(loc_ref, gtid, task,
586 OMPT_GET_FRAME_ADDRESS(1)__builtin_frame_address(1),
587 OMPT_LOAD_RETURN_ADDRESS(gtid)__ompt_load_return_address(gtid));
588 return;
589 }
590#endif
591 __kmpc_omp_task_begin_if0_template<false>(loc_ref, gtid, task, NULL__null, NULL__null);
592}
593
594#ifdef TASK_UNUSED
595// __kmpc_omp_task_begin: report that a given task has started execution
596// NEVER GENERATED BY COMPILER, DEPRECATED!!!
597void __kmpc_omp_task_begin(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task) {
598 kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
599
600 KA_TRACE(if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_begin(enter): T#%d loc=%p task=%p current_task=%p\n"
, gtid, loc_ref, (((kmp_taskdata_t *)task) - 1), current_task
); }
601 10,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_begin(enter): T#%d loc=%p task=%p current_task=%p\n"
, gtid, loc_ref, (((kmp_taskdata_t *)task) - 1), current_task
); }
602 ("__kmpc_omp_task_begin(enter): T#%d loc=%p task=%p current_task=%p\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_begin(enter): T#%d loc=%p task=%p current_task=%p\n"
, gtid, loc_ref, (((kmp_taskdata_t *)task) - 1), current_task
); }
603 gtid, loc_ref, KMP_TASK_TO_TASKDATA(task), current_task))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_begin(enter): T#%d loc=%p task=%p current_task=%p\n"
, gtid, loc_ref, (((kmp_taskdata_t *)task) - 1), current_task
); }
;
604
605 __kmp_task_start(gtid, task, current_task);
606
607 KA_TRACE(10, ("__kmpc_omp_task_begin(exit): T#%d loc=%p task=%p,\n", gtid,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_begin(exit): T#%d loc=%p task=%p,\n"
, gtid, loc_ref, (((kmp_taskdata_t *)task) - 1)); }
608 loc_ref, KMP_TASK_TO_TASKDATA(task)))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_begin(exit): T#%d loc=%p task=%p,\n"
, gtid, loc_ref, (((kmp_taskdata_t *)task) - 1)); }
;
609 return;
610}
611#endif // TASK_UNUSED
612
613// __kmp_free_task: free the current task space and the space for shareds
614//
615// gtid: Global thread ID of calling thread
616// taskdata: task to free
617// thread: thread data structure of caller
618static void __kmp_free_task(kmp_int32 gtid, kmp_taskdata_t *taskdata,
619 kmp_info_t *thread) {
620 KA_TRACE(30, ("__kmp_free_task: T#%d freeing data from task %p\n", gtid,if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_free_task: T#%d freeing data from task %p\n"
, gtid, taskdata); }
621 taskdata))if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_free_task: T#%d freeing data from task %p\n"
, gtid, taskdata); }
;
622
623 // Check to make sure all flags and counters have the correct values
624 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT)if (!(taskdata->td_flags.tasktype == 1)) { __kmp_debug_assert
("taskdata->td_flags.tasktype == 1", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 624); }
;
625 KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0)if (!(taskdata->td_flags.executing == 0)) { __kmp_debug_assert
("taskdata->td_flags.executing == 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 625); }
;
626 KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 1)if (!(taskdata->td_flags.complete == 1)) { __kmp_debug_assert
("taskdata->td_flags.complete == 1", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 626); }
;
627 KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0)if (!(taskdata->td_flags.freed == 0)) { __kmp_debug_assert
("taskdata->td_flags.freed == 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 627); }
;
628 KMP_DEBUG_ASSERT(taskdata->td_allocated_child_tasks == 0 ||if (!(taskdata->td_allocated_child_tasks == 0 || taskdata->
td_flags.task_serial == 1)) { __kmp_debug_assert("taskdata->td_allocated_child_tasks == 0 || taskdata->td_flags.task_serial == 1"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 629); }
629 taskdata->td_flags.task_serial == 1)if (!(taskdata->td_allocated_child_tasks == 0 || taskdata->
td_flags.task_serial == 1)) { __kmp_debug_assert("taskdata->td_allocated_child_tasks == 0 || taskdata->td_flags.task_serial == 1"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 629); }
;
630 KMP_DEBUG_ASSERT(taskdata->td_incomplete_child_tasks == 0)if (!(taskdata->td_incomplete_child_tasks == 0)) { __kmp_debug_assert
("taskdata->td_incomplete_child_tasks == 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 630); }
;
631
632 taskdata->td_flags.freed = 1;
633 ANNOTATE_HAPPENS_BEFORE(taskdata);
634// deallocate the taskdata and shared variable blocks associated with this task
635#if USE_FAST_MEMORY3
636 __kmp_fast_free(thread, taskdata)___kmp_fast_free((thread), (taskdata), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 636)
;
637#else /* ! USE_FAST_MEMORY */
638 __kmp_thread_free(thread, taskdata)___kmp_thread_free((thread), (taskdata), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 638)
;
639#endif
640
641 KA_TRACE(20, ("__kmp_free_task: T#%d freed task %p\n", gtid, taskdata))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_free_task: T#%d freed task %p\n"
, gtid, taskdata); }
;
642}
643
644// __kmp_free_task_and_ancestors: free the current task and ancestors without
645// children
646//
647// gtid: Global thread ID of calling thread
648// taskdata: task to free
649// thread: thread data structure of caller
650static void __kmp_free_task_and_ancestors(kmp_int32 gtid,
651 kmp_taskdata_t *taskdata,
652 kmp_info_t *thread) {
653#if OMP_45_ENABLED(50 >= 45)
654 // Proxy tasks must always be allowed to free their parents
655 // because they can be run in background even in serial mode.
656 kmp_int32 team_serial =
657 (taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser) &&
658 !taskdata->td_flags.proxy;
659#else
660 kmp_int32 team_serial =
661 taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser;
662#endif
663 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT)if (!(taskdata->td_flags.tasktype == 1)) { __kmp_debug_assert
("taskdata->td_flags.tasktype == 1", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 663); }
;
664
665 kmp_int32 children = KMP_ATOMIC_DEC(&taskdata->td_allocated_child_tasks)(&taskdata->td_allocated_child_tasks)->fetch_sub(1,
std::memory_order_acq_rel)
- 1;
666 KMP_DEBUG_ASSERT(children >= 0)if (!(children >= 0)) { __kmp_debug_assert("children >= 0"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 666); }
;
667
668 // Now, go up the ancestor tree to see if any ancestors can now be freed.
669 while (children == 0) {
670 kmp_taskdata_t *parent_taskdata = taskdata->td_parent;
671
672 KA_TRACE(20, ("__kmp_free_task_and_ancestors(enter): T#%d task %p complete "if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_free_task_and_ancestors(enter): T#%d task %p complete "
"and freeing itself\n", gtid, taskdata); }
673 "and freeing itself\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_free_task_and_ancestors(enter): T#%d task %p complete "
"and freeing itself\n", gtid, taskdata); }
674 gtid, taskdata))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_free_task_and_ancestors(enter): T#%d task %p complete "
"and freeing itself\n", gtid, taskdata); }
;
675
676 // --- Deallocate my ancestor task ---
677 __kmp_free_task(gtid, taskdata, thread);
678
679 taskdata = parent_taskdata;
680
681 // Stop checking ancestors at implicit task instead of walking up ancestor
682 // tree to avoid premature deallocation of ancestors.
683 if (team_serial || taskdata->td_flags.tasktype == TASK_IMPLICIT0)
684 return;
685
686 // Predecrement simulated by "- 1" calculation
687 children = KMP_ATOMIC_DEC(&taskdata->td_allocated_child_tasks)(&taskdata->td_allocated_child_tasks)->fetch_sub(1,
std::memory_order_acq_rel)
- 1;
688 KMP_DEBUG_ASSERT(children >= 0)if (!(children >= 0)) { __kmp_debug_assert("children >= 0"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 688); }
;
689 }
690
691 KA_TRACE(if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_free_task_and_ancestors(exit): T#%d task %p has %d children; "
"not freeing it yet\n", gtid, taskdata, children); }
692 20, ("__kmp_free_task_and_ancestors(exit): T#%d task %p has %d children; "if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_free_task_and_ancestors(exit): T#%d task %p has %d children; "
"not freeing it yet\n", gtid, taskdata, children); }
693 "not freeing it yet\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_free_task_and_ancestors(exit): T#%d task %p has %d children; "
"not freeing it yet\n", gtid, taskdata, children); }
694 gtid, taskdata, children))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_free_task_and_ancestors(exit): T#%d task %p has %d children; "
"not freeing it yet\n", gtid, taskdata, children); }
;
695}
696
697// __kmp_task_finish: bookkeeping to do when a task finishes execution
698//
699// gtid: global thread ID for calling thread
700// task: task to be finished
701// resumed_task: task to be resumed. (may be NULL if task is serialized)
702template <bool ompt>
703static void __kmp_task_finish(kmp_int32 gtid, kmp_task_t *task,
704 kmp_taskdata_t *resumed_task) {
705 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task)(((kmp_taskdata_t *)task) - 1);
706 kmp_info_t *thread = __kmp_threads[gtid];
707 kmp_task_team_t *task_team =
708 thread->th.th_task_team; // might be NULL for serial teams...
709 kmp_int32 children = 0;
710
711 KA_TRACE(10, ("__kmp_task_finish(enter): T#%d finishing task %p and resuming "if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_task_finish(enter): T#%d finishing task %p and resuming "
"task %p\n", gtid, taskdata, resumed_task); }
712 "task %p\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_task_finish(enter): T#%d finishing task %p and resuming "
"task %p\n", gtid, taskdata, resumed_task); }
713 gtid, taskdata, resumed_task))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_task_finish(enter): T#%d finishing task %p and resuming "
"task %p\n", gtid, taskdata, resumed_task); }
;
714
715 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT)if (!(taskdata->td_flags.tasktype == 1)) { __kmp_debug_assert
("taskdata->td_flags.tasktype == 1", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 715); }
;
716
717// Pop task from stack if tied
718#ifdef BUILD_TIED_TASK_STACK
719 if (taskdata->td_flags.tiedness == TASK_TIED1) {
720 __kmp_pop_task_stack(gtid, thread, taskdata);
721 }
722#endif /* BUILD_TIED_TASK_STACK */
723
724 if (taskdata->td_flags.tiedness == TASK_UNTIED0) {
725 // untied task needs to check the counter so that the task structure is not
726 // freed prematurely
727 kmp_int32 counter = KMP_ATOMIC_DEC(&taskdata->td_untied_count)(&taskdata->td_untied_count)->fetch_sub(1, std::memory_order_acq_rel
)
- 1;
728 KA_TRACE(if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_finish: T#%d untied_count (%d) decremented for task %p\n"
, gtid, counter, taskdata); }
729 20,if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_finish: T#%d untied_count (%d) decremented for task %p\n"
, gtid, counter, taskdata); }
730 ("__kmp_task_finish: T#%d untied_count (%d) decremented for task %p\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_finish: T#%d untied_count (%d) decremented for task %p\n"
, gtid, counter, taskdata); }
731 gtid, counter, taskdata))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_finish: T#%d untied_count (%d) decremented for task %p\n"
, gtid, counter, taskdata); }
;
732 if (counter > 0) {
733 // untied task is not done, to be continued possibly by other thread, do
734 // not free it now
735 if (resumed_task == NULL__null) {
736 KMP_DEBUG_ASSERT(taskdata->td_flags.task_serial)if (!(taskdata->td_flags.task_serial)) { __kmp_debug_assert
("taskdata->td_flags.task_serial", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 736); }
;
737 resumed_task = taskdata->td_parent; // In a serialized task, the resumed
738 // task is the parent
739 }
740 thread->th.th_current_task = resumed_task; // restore current_task
741 resumed_task->td_flags.executing = 1; // resume previous task
742 KA_TRACE(10, ("__kmp_task_finish(exit): T#%d partially done task %p, "if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_task_finish(exit): T#%d partially done task %p, "
"resuming task %p\n", gtid, taskdata, resumed_task); }
743 "resuming task %p\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_task_finish(exit): T#%d partially done task %p, "
"resuming task %p\n", gtid, taskdata, resumed_task); }
744 gtid, taskdata, resumed_task))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_task_finish(exit): T#%d partially done task %p, "
"resuming task %p\n", gtid, taskdata, resumed_task); }
;
745 return;
746 }
747 }
748#if OMPT_SUPPORT1
749 if (ompt)
750 __ompt_task_finish(task, resumed_task);
751#endif
752
753 KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0)if (!(taskdata->td_flags.complete == 0)) { __kmp_debug_assert
("taskdata->td_flags.complete == 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 753); }
;
754 taskdata->td_flags.complete = 1; // mark the task as completed
755 KMP_DEBUG_ASSERT(taskdata->td_flags.started == 1)if (!(taskdata->td_flags.started == 1)) { __kmp_debug_assert
("taskdata->td_flags.started == 1", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 755); }
;
756 KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0)if (!(taskdata->td_flags.freed == 0)) { __kmp_debug_assert
("taskdata->td_flags.freed == 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 756); }
;
757
758 // Only need to keep track of count if team parallel and tasking not
759 // serialized
760 if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser)) {
761 // Predecrement simulated by "- 1" calculation
762 children =
763 KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks)(&taskdata->td_parent->td_incomplete_child_tasks)->
fetch_sub(1, std::memory_order_acq_rel)
- 1;
764 KMP_DEBUG_ASSERT(children >= 0)if (!(children >= 0)) { __kmp_debug_assert("children >= 0"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 764); }
;
765#if OMP_40_ENABLED(50 >= 40)
766 if (taskdata->td_taskgroup)
767 KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count)(&taskdata->td_taskgroup->count)->fetch_sub(1, std
::memory_order_acq_rel)
;
768 __kmp_release_deps(gtid, taskdata);
769#if OMP_45_ENABLED(50 >= 45)
770 } else if (task_team && task_team->tt.tt_found_proxy_tasks) {
771 // if we found proxy tasks there could exist a dependency chain
772 // with the proxy task as origin
773 __kmp_release_deps(gtid, taskdata);
774#endif // OMP_45_ENABLED
775#endif // OMP_40_ENABLED
776 }
777
778 // td_flags.executing must be marked as 0 after __kmp_release_deps has been
779 // called. Othertwise, if a task is executed immediately from the release_deps
780 // code, the flag will be reset to 1 again by this same function
781 KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 1)if (!(taskdata->td_flags.executing == 1)) { __kmp_debug_assert
("taskdata->td_flags.executing == 1", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 781); }
;
782 taskdata->td_flags.executing = 0; // suspend the finishing task
783
784 KA_TRACE(if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_finish: T#%d finished task %p, %d incomplete children\n"
, gtid, taskdata, children); }
785 20, ("__kmp_task_finish: T#%d finished task %p, %d incomplete children\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_finish: T#%d finished task %p, %d incomplete children\n"
, gtid, taskdata, children); }
786 gtid, taskdata, children))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_finish: T#%d finished task %p, %d incomplete children\n"
, gtid, taskdata, children); }
;
787
788#if OMP_40_ENABLED(50 >= 40)
789 /* If the tasks' destructor thunk flag has been set, we need to invoke the
790 destructor thunk that has been generated by the compiler. The code is
791 placed here, since at this point other tasks might have been released
792 hence overlapping the destructor invokations with some other work in the
793 released tasks. The OpenMP spec is not specific on when the destructors
794 are invoked, so we should be free to choose. */
795 if (taskdata->td_flags.destructors_thunk) {
796 kmp_routine_entry_t destr_thunk = task->data1.destructors;
797 KMP_ASSERT(destr_thunk)if (!(destr_thunk)) { __kmp_debug_assert("destr_thunk", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 797); }
;
798 destr_thunk(gtid, task);
799 }
800#endif // OMP_40_ENABLED
801
802 // bookkeeping for resuming task:
803 // GEH - note tasking_ser => task_serial
804 KMP_DEBUG_ASSERT(if (!((taskdata->td_flags.tasking_ser || taskdata->td_flags
.task_serial) == taskdata->td_flags.task_serial)) { __kmp_debug_assert
("(taskdata->td_flags.tasking_ser || taskdata->td_flags.task_serial) == taskdata->td_flags.task_serial"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 806); }
805 (taskdata->td_flags.tasking_ser || taskdata->td_flags.task_serial) ==if (!((taskdata->td_flags.tasking_ser || taskdata->td_flags
.task_serial) == taskdata->td_flags.task_serial)) { __kmp_debug_assert
("(taskdata->td_flags.tasking_ser || taskdata->td_flags.task_serial) == taskdata->td_flags.task_serial"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 806); }
806 taskdata->td_flags.task_serial)if (!((taskdata->td_flags.tasking_ser || taskdata->td_flags
.task_serial) == taskdata->td_flags.task_serial)) { __kmp_debug_assert
("(taskdata->td_flags.tasking_ser || taskdata->td_flags.task_serial) == taskdata->td_flags.task_serial"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 806); }
;
807 if (taskdata->td_flags.task_serial) {
808 if (resumed_task == NULL__null) {
809 resumed_task = taskdata->td_parent; // In a serialized task, the resumed
810 // task is the parent
811 }
812 } else {
813 KMP_DEBUG_ASSERT(resumed_task !=if (!(resumed_task != __null)) { __kmp_debug_assert("resumed_task != __null"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 814); }
814 NULL)if (!(resumed_task != __null)) { __kmp_debug_assert("resumed_task != __null"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 814); }
; // verify that resumed task is passed as arguemnt
815 }
816
817 // Free this task and then ancestor tasks if they have no children.
818 // Restore th_current_task first as suggested by John:
819 // johnmc: if an asynchronous inquiry peers into the runtime system
820 // it doesn't see the freed task as the current task.
821 thread->th.th_current_task = resumed_task;
822 __kmp_free_task_and_ancestors(gtid, taskdata, thread);
823
824 // TODO: GEH - make sure root team implicit task is initialized properly.
825 // KMP_DEBUG_ASSERT( resumed_task->td_flags.executing == 0 );
826 resumed_task->td_flags.executing = 1; // resume previous task
827
828 KA_TRACE(if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_task_finish(exit): T#%d finished task %p, resuming task %p\n"
, gtid, taskdata, resumed_task); }
829 10, ("__kmp_task_finish(exit): T#%d finished task %p, resuming task %p\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_task_finish(exit): T#%d finished task %p, resuming task %p\n"
, gtid, taskdata, resumed_task); }
830 gtid, taskdata, resumed_task))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_task_finish(exit): T#%d finished task %p, resuming task %p\n"
, gtid, taskdata, resumed_task); }
;
831
832 return;
833}
834
835template <bool ompt>
836static void __kmpc_omp_task_complete_if0_template(ident_t *loc_ref,
837 kmp_int32 gtid,
838 kmp_task_t *task) {
839 KA_TRACE(10, ("__kmpc_omp_task_complete_if0(enter): T#%d loc=%p task=%p\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_complete_if0(enter): T#%d loc=%p task=%p\n"
, gtid, loc_ref, (((kmp_taskdata_t *)task) - 1)); }
840 gtid, loc_ref, KMP_TASK_TO_TASKDATA(task)))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_complete_if0(enter): T#%d loc=%p task=%p\n"
, gtid, loc_ref, (((kmp_taskdata_t *)task) - 1)); }
;
841 // this routine will provide task to resume
842 __kmp_task_finish<ompt>(gtid, task, NULL__null);
843
844 KA_TRACE(10, ("__kmpc_omp_task_complete_if0(exit): T#%d loc=%p task=%p\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_complete_if0(exit): T#%d loc=%p task=%p\n"
, gtid, loc_ref, (((kmp_taskdata_t *)task) - 1)); }
845 gtid, loc_ref, KMP_TASK_TO_TASKDATA(task)))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_complete_if0(exit): T#%d loc=%p task=%p\n"
, gtid, loc_ref, (((kmp_taskdata_t *)task) - 1)); }
;
846
847#if OMPT_SUPPORT1
848 if (ompt) {
849 omp_frame_t *ompt_frame;
850 __ompt_get_task_info_internal(0, NULL__null, NULL__null, &ompt_frame, NULL__null, NULL__null);
851 ompt_frame->enter_frame = NULL__null;
852 }
853#endif
854
855 return;
856}
857
858#if OMPT_SUPPORT1
859OMPT_NOINLINE__attribute__((noinline))
860void __kmpc_omp_task_complete_if0_ompt(ident_t *loc_ref, kmp_int32 gtid,
861 kmp_task_t *task) {
862 __kmpc_omp_task_complete_if0_template<true>(loc_ref, gtid, task);
863}
864#endif // OMPT_SUPPORT
865
866// __kmpc_omp_task_complete_if0: report that a task has completed execution
867//
868// loc_ref: source location information; points to end of task block.
869// gtid: global thread number.
870// task: task thunk for the completed task.
871void __kmpc_omp_task_complete_if0(ident_t *loc_ref, kmp_int32 gtid,
872 kmp_task_t *task) {
873#if OMPT_SUPPORT1
874 if (UNLIKELY(ompt_enabled.enabled)__builtin_expect(!!(ompt_enabled.enabled), 0)) {
875 __kmpc_omp_task_complete_if0_ompt(loc_ref, gtid, task);
876 return;
877 }
878#endif
879 __kmpc_omp_task_complete_if0_template<false>(loc_ref, gtid, task);
880}
881
882#ifdef TASK_UNUSED
883// __kmpc_omp_task_complete: report that a task has completed execution
884// NEVER GENERATED BY COMPILER, DEPRECATED!!!
885void __kmpc_omp_task_complete(ident_t *loc_ref, kmp_int32 gtid,
886 kmp_task_t *task) {
887 KA_TRACE(10, ("__kmpc_omp_task_complete(enter): T#%d loc=%p task=%p\n", gtid,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_complete(enter): T#%d loc=%p task=%p\n"
, gtid, loc_ref, (((kmp_taskdata_t *)task) - 1)); }
888 loc_ref, KMP_TASK_TO_TASKDATA(task)))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_complete(enter): T#%d loc=%p task=%p\n"
, gtid, loc_ref, (((kmp_taskdata_t *)task) - 1)); }
;
889
890 __kmp_task_finish<false>(gtid, task,
891 NULL__null); // Not sure how to find task to resume
892
893 KA_TRACE(10, ("__kmpc_omp_task_complete(exit): T#%d loc=%p task=%p\n", gtid,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_complete(exit): T#%d loc=%p task=%p\n"
, gtid, loc_ref, (((kmp_taskdata_t *)task) - 1)); }
894 loc_ref, KMP_TASK_TO_TASKDATA(task)))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_complete(exit): T#%d loc=%p task=%p\n"
, gtid, loc_ref, (((kmp_taskdata_t *)task) - 1)); }
;
895 return;
896}
897#endif // TASK_UNUSED
898
899// __kmp_init_implicit_task: Initialize the appropriate fields in the implicit
900// task for a given thread
901//
902// loc_ref: reference to source location of parallel region
903// this_thr: thread data structure corresponding to implicit task
904// team: team for this_thr
905// tid: thread id of given thread within team
906// set_curr_task: TRUE if need to push current task to thread
907// NOTE: Routine does not set up the implicit task ICVS. This is assumed to
908// have already been done elsewhere.
909// TODO: Get better loc_ref. Value passed in may be NULL
910void __kmp_init_implicit_task(ident_t *loc_ref, kmp_info_t *this_thr,
911 kmp_team_t *team, int tid, int set_curr_task) {
912 kmp_taskdata_t *task = &team->t.t_implicit_task_taskdata[tid];
913
914 KF_TRACE(if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_init_implicit_task(enter): T#:%d team=%p task=%p, reinit=%s\n"
, tid, team, task, set_curr_task ? "TRUE" : "FALSE"); }
915 10,if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_init_implicit_task(enter): T#:%d team=%p task=%p, reinit=%s\n"
, tid, team, task, set_curr_task ? "TRUE" : "FALSE"); }
916 ("__kmp_init_implicit_task(enter): T#:%d team=%p task=%p, reinit=%s\n",if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_init_implicit_task(enter): T#:%d team=%p task=%p, reinit=%s\n"
, tid, team, task, set_curr_task ? "TRUE" : "FALSE"); }
917 tid, team, task, set_curr_task ? "TRUE" : "FALSE"))if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_init_implicit_task(enter): T#:%d team=%p task=%p, reinit=%s\n"
, tid, team, task, set_curr_task ? "TRUE" : "FALSE"); }
;
918
919 task->td_task_id = KMP_GEN_TASK_ID()(~0);
920 task->td_team = team;
921 // task->td_parent = NULL; // fix for CQ230101 (broken parent task info
922 // in debugger)
923 task->td_ident = loc_ref;
924 task->td_taskwait_ident = NULL__null;
925 task->td_taskwait_counter = 0;
926 task->td_taskwait_thread = 0;
927
928 task->td_flags.tiedness = TASK_TIED1;
929 task->td_flags.tasktype = TASK_IMPLICIT0;
930#if OMP_45_ENABLED(50 >= 45)
931 task->td_flags.proxy = TASK_FULL0;
932#endif
933
934 // All implicit tasks are executed immediately, not deferred
935 task->td_flags.task_serial = 1;
936 task->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec);
937 task->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
938
939 task->td_flags.started = 1;
940 task->td_flags.executing = 1;
941 task->td_flags.complete = 0;
942 task->td_flags.freed = 0;
943
944#if OMP_40_ENABLED(50 >= 40)
945 task->td_depnode = NULL__null;
946#endif
947 task->td_last_tied = task;
948
949 if (set_curr_task) { // only do this init first time thread is created
950 KMP_ATOMIC_ST_REL(&task->td_incomplete_child_tasks, 0)(&task->td_incomplete_child_tasks)->store(0, std::memory_order_release
)
;
951 // Not used: don't need to deallocate implicit task
952 KMP_ATOMIC_ST_REL(&task->td_allocated_child_tasks, 0)(&task->td_allocated_child_tasks)->store(0, std::memory_order_release
)
;
953#if OMP_40_ENABLED(50 >= 40)
954 task->td_taskgroup = NULL__null; // An implicit task does not have taskgroup
955 task->td_dephash = NULL__null;
956#endif
957 __kmp_push_current_task_to_thread(this_thr, team, tid);
958 } else {
959 KMP_DEBUG_ASSERT(task->td_incomplete_child_tasks == 0)if (!(task->td_incomplete_child_tasks == 0)) { __kmp_debug_assert
("task->td_incomplete_child_tasks == 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 959); }
;
960 KMP_DEBUG_ASSERT(task->td_allocated_child_tasks == 0)if (!(task->td_allocated_child_tasks == 0)) { __kmp_debug_assert
("task->td_allocated_child_tasks == 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 960); }
;
961 }
962
963#if OMPT_SUPPORT1
964 if (UNLIKELY(ompt_enabled.enabled)__builtin_expect(!!(ompt_enabled.enabled), 0))
965 __ompt_task_init(task, tid);
966#endif
967
968 KF_TRACE(10, ("__kmp_init_implicit_task(exit): T#:%d team=%p task=%p\n", tid,if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_init_implicit_task(exit): T#:%d team=%p task=%p\n"
, tid, team, task); }
969 team, task))if (kmp_f_debug >= 10) { __kmp_debug_printf ("__kmp_init_implicit_task(exit): T#:%d team=%p task=%p\n"
, tid, team, task); }
;
970}
971
972// __kmp_finish_implicit_task: Release resources associated to implicit tasks
973// at the end of parallel regions. Some resources are kept for reuse in the next
974// parallel region.
975//
976// thread: thread data structure corresponding to implicit task
977void __kmp_finish_implicit_task(kmp_info_t *thread) {
978 kmp_taskdata_t *task = thread->th.th_current_task;
979 if (task->td_dephash)
980 __kmp_dephash_free_entries(thread, task->td_dephash);
981}
982
983// __kmp_free_implicit_task: Release resources associated to implicit tasks
984// when these are destroyed regions
985//
986// thread: thread data structure corresponding to implicit task
987void __kmp_free_implicit_task(kmp_info_t *thread) {
988 kmp_taskdata_t *task = thread->th.th_current_task;
989 if (task && task->td_dephash) {
990 __kmp_dephash_free(thread, task->td_dephash);
991 task->td_dephash = NULL__null;
992 }
993}
994
995// Round up a size to a power of two specified by val: Used to insert padding
996// between structures co-allocated using a single malloc() call
997static size_t __kmp_round_up_to_val(size_t size, size_t val) {
998 if (size & (val - 1)) {
999 size &= ~(val - 1);
1000 if (size <= KMP_SIZE_T_MAX(0xFFFFFFFFFFFFFFFF) - val) {
1001 size += val; // Round up if there is no overflow.
1002 }
1003 }
1004 return size;
1005} // __kmp_round_up_to_va
1006
1007// __kmp_task_alloc: Allocate the taskdata and task data structures for a task
1008//
1009// loc_ref: source location information
1010// gtid: global thread number.
1011// flags: include tiedness & task type (explicit vs. implicit) of the ''new''
1012// task encountered. Converted from kmp_int32 to kmp_tasking_flags_t in routine.
1013// sizeof_kmp_task_t: Size in bytes of kmp_task_t data structure including
1014// private vars accessed in task.
1015// sizeof_shareds: Size in bytes of array of pointers to shared vars accessed
1016// in task.
1017// task_entry: Pointer to task code entry point generated by compiler.
1018// returns: a pointer to the allocated kmp_task_t structure (task).
1019kmp_task_t *__kmp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1020 kmp_tasking_flags_t *flags,
1021 size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1022 kmp_routine_entry_t task_entry) {
1023 kmp_task_t *task;
1024 kmp_taskdata_t *taskdata;
1025 kmp_info_t *thread = __kmp_threads[gtid];
1026 kmp_team_t *team = thread->th.th_team;
1027 kmp_taskdata_t *parent_task = thread->th.th_current_task;
1028 size_t shareds_offset;
1029
1030 if (!TCR_4(__kmp_init_middle)(__kmp_init_middle))
1031 __kmp_middle_initialize();
1032
1033 KA_TRACE(10, ("__kmp_task_alloc(enter): T#%d loc=%p, flags=(0x%x) "if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_task_alloc(enter): T#%d loc=%p, flags=(0x%x) "
"sizeof_task=%ld sizeof_shared=%ld entry=%p\n", gtid, loc_ref
, *((kmp_int32 *)flags), sizeof_kmp_task_t, sizeof_shareds, task_entry
); }
1034 "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_task_alloc(enter): T#%d loc=%p, flags=(0x%x) "
"sizeof_task=%ld sizeof_shared=%ld entry=%p\n", gtid, loc_ref
, *((kmp_int32 *)flags), sizeof_kmp_task_t, sizeof_shareds, task_entry
); }
1035 gtid, loc_ref, *((kmp_int32 *)flags), sizeof_kmp_task_t,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_task_alloc(enter): T#%d loc=%p, flags=(0x%x) "
"sizeof_task=%ld sizeof_shared=%ld entry=%p\n", gtid, loc_ref
, *((kmp_int32 *)flags), sizeof_kmp_task_t, sizeof_shareds, task_entry
); }
1036 sizeof_shareds, task_entry))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_task_alloc(enter): T#%d loc=%p, flags=(0x%x) "
"sizeof_task=%ld sizeof_shared=%ld entry=%p\n", gtid, loc_ref
, *((kmp_int32 *)flags), sizeof_kmp_task_t, sizeof_shareds, task_entry
); }
;
1037
1038 if (parent_task->td_flags.final) {
1039 if (flags->merged_if0) {
1040 }
1041 flags->final = 1;
1042 }
1043 if (flags->tiedness == TASK_UNTIED0 && !team->t.t_serialized) {
1044 // Untied task encountered causes the TSC algorithm to check entire deque of
1045 // the victim thread. If no untied task encountered, then checking the head
1046 // of the deque should be enough.
1047 KMP_CHECK_UPDATE(thread->th.th_task_team->tt.tt_untied_task_encountered, 1)if ((thread->th.th_task_team->tt.tt_untied_task_encountered
) != (1)) (thread->th.th_task_team->tt.tt_untied_task_encountered
) = (1)
;
1048 }
1049
1050#if OMP_45_ENABLED(50 >= 45)
1051 if (flags->proxy == TASK_PROXY1) {
1052 flags->tiedness = TASK_UNTIED0;
1053 flags->merged_if0 = 1;
1054
1055 /* are we running in a sequential parallel or tskm_immediate_exec... we need
1056 tasking support enabled */
1057 if ((thread->th.th_task_team) == NULL__null) {
1058 /* This should only happen if the team is serialized
1059 setup a task team and propagate it to the thread */
1060 KMP_DEBUG_ASSERT(team->t.t_serialized)if (!(team->t.t_serialized)) { __kmp_debug_assert("team->t.t_serialized"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1060); }
;
1061 KA_TRACE(30,if (kmp_a_debug >= 30) { __kmp_debug_printf ("T#%d creating task team in __kmp_task_alloc for proxy task\n"
, gtid); }
1062 ("T#%d creating task team in __kmp_task_alloc for proxy task\n",if (kmp_a_debug >= 30) { __kmp_debug_printf ("T#%d creating task team in __kmp_task_alloc for proxy task\n"
, gtid); }
1063 gtid))if (kmp_a_debug >= 30) { __kmp_debug_printf ("T#%d creating task team in __kmp_task_alloc for proxy task\n"
, gtid); }
;
1064 __kmp_task_team_setup(
1065 thread, team,
1066 1); // 1 indicates setup the current team regardless of nthreads
1067 thread->th.th_task_team = team->t.t_task_team[thread->th.th_task_state];
1068 }
1069 kmp_task_team_t *task_team = thread->th.th_task_team;
1070
1071 /* tasking must be enabled now as the task might not be pushed */
1072 if (!KMP_TASKING_ENABLED(task_team)(((task_team)->tt.tt_found_tasks) == (!0))) {
1073 KA_TRACE(if (kmp_a_debug >= 30) { __kmp_debug_printf ("T#%d enabling tasking in __kmp_task_alloc for proxy task\n"
, gtid); }
1074 30,if (kmp_a_debug >= 30) { __kmp_debug_printf ("T#%d enabling tasking in __kmp_task_alloc for proxy task\n"
, gtid); }
1075 ("T#%d enabling tasking in __kmp_task_alloc for proxy task\n", gtid))if (kmp_a_debug >= 30) { __kmp_debug_printf ("T#%d enabling tasking in __kmp_task_alloc for proxy task\n"
, gtid); }
;
1076 __kmp_enable_tasking(task_team, thread);
1077 kmp_int32 tid = thread->th.th_info.ds.ds_tid;
1078 kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
1079 // No lock needed since only owner can allocate
1080 if (thread_data->td.td_deque == NULL__null) {
1081 __kmp_alloc_task_deque(thread, thread_data);
1082 }
1083 }
1084
1085 if (task_team->tt.tt_found_proxy_tasks == FALSE0)
1086 TCW_4(task_team->tt.tt_found_proxy_tasks, TRUE)(task_team->tt.tt_found_proxy_tasks) = ((!0));
1087 }
1088#endif
1089
1090 // Calculate shared structure offset including padding after kmp_task_t struct
1091 // to align pointers in shared struct
1092 shareds_offset = sizeof(kmp_taskdata_t) + sizeof_kmp_task_t;
1093 shareds_offset = __kmp_round_up_to_val(shareds_offset, sizeof(void *));
1094
1095 // Allocate a kmp_taskdata_t block and a kmp_task_t block.
1096 KA_TRACE(30, ("__kmp_task_alloc: T#%d First malloc size: %ld\n", gtid,if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_task_alloc: T#%d First malloc size: %ld\n"
, gtid, shareds_offset); }
1097 shareds_offset))if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_task_alloc: T#%d First malloc size: %ld\n"
, gtid, shareds_offset); }
;
1098 KA_TRACE(30, ("__kmp_task_alloc: T#%d Second malloc size: %ld\n", gtid,if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_task_alloc: T#%d Second malloc size: %ld\n"
, gtid, sizeof_shareds); }
1099 sizeof_shareds))if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_task_alloc: T#%d Second malloc size: %ld\n"
, gtid, sizeof_shareds); }
;
1100
1101// Avoid double allocation here by combining shareds with taskdata
1102#if USE_FAST_MEMORY3
1103 taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, shareds_offset +___kmp_fast_allocate((thread), (shareds_offset + sizeof_shareds
), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1104)
1104 sizeof_shareds)___kmp_fast_allocate((thread), (shareds_offset + sizeof_shareds
), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1104)
;
1105#else /* ! USE_FAST_MEMORY */
1106 taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, shareds_offset +___kmp_thread_malloc((thread), (shareds_offset + sizeof_shareds
), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1107)
1107 sizeof_shareds)___kmp_thread_malloc((thread), (shareds_offset + sizeof_shareds
), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1107)
;
1108#endif /* USE_FAST_MEMORY */
1109 ANNOTATE_HAPPENS_AFTER(taskdata);
1110
1111 task = KMP_TASKDATA_TO_TASK(taskdata)(kmp_task_t *)(taskdata + 1);
1112
1113// Make sure task & taskdata are aligned appropriately
1114#if KMP_ARCH_X860 || KMP_ARCH_PPC64(0 || 0) || !KMP_HAVE_QUAD0
1115 KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(double) - 1)) == 0)if (!((((kmp_uintptr_t)taskdata) & (sizeof(double) - 1)) ==
0)) { __kmp_debug_assert("(((kmp_uintptr_t)taskdata) & (sizeof(double) - 1)) == 0"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1115); }
;
1116 KMP_DEBUG_ASSERT((((kmp_uintptr_t)task) & (sizeof(double) - 1)) == 0)if (!((((kmp_uintptr_t)task) & (sizeof(double) - 1)) == 0
)) { __kmp_debug_assert("(((kmp_uintptr_t)task) & (sizeof(double) - 1)) == 0"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1116); }
;
1117#else
1118 KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(_Quad) - 1)) == 0)if (!((((kmp_uintptr_t)taskdata) & (sizeof(_Quad) - 1)) ==
0)) { __kmp_debug_assert("(((kmp_uintptr_t)taskdata) & (sizeof(_Quad) - 1)) == 0"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1118); }
;
1119 KMP_DEBUG_ASSERT((((kmp_uintptr_t)task) & (sizeof(_Quad) - 1)) == 0)if (!((((kmp_uintptr_t)task) & (sizeof(_Quad) - 1)) == 0)
) { __kmp_debug_assert("(((kmp_uintptr_t)task) & (sizeof(_Quad) - 1)) == 0"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1119); }
;
1120#endif
1121 if (sizeof_shareds > 0) {
1122 // Avoid double allocation here by combining shareds with taskdata
1123 task->shareds = &((char *)taskdata)[shareds_offset];
1124 // Make sure shareds struct is aligned to pointer size
1125 KMP_DEBUG_ASSERT((((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) ==if (!((((kmp_uintptr_t)task->shareds) & (sizeof(void *
) - 1)) == 0)) { __kmp_debug_assert("(((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) == 0"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1126); }
1126 0)if (!((((kmp_uintptr_t)task->shareds) & (sizeof(void *
) - 1)) == 0)) { __kmp_debug_assert("(((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) == 0"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1126); }
;
1127 } else {
1128 task->shareds = NULL__null;
1129 }
1130 task->routine = task_entry;
1131 task->part_id = 0; // AC: Always start with 0 part id
1132
1133 taskdata->td_task_id = KMP_GEN_TASK_ID()(~0);
1134 taskdata->td_team = team;
1135 taskdata->td_alloc_thread = thread;
1136 taskdata->td_parent = parent_task;
1137 taskdata->td_level = parent_task->td_level + 1; // increment nesting level
1138 KMP_ATOMIC_ST_RLX(&taskdata->td_untied_count, 0)(&taskdata->td_untied_count)->store(0, std::memory_order_relaxed
)
;
1139 taskdata->td_ident = loc_ref;
1140 taskdata->td_taskwait_ident = NULL__null;
1141 taskdata->td_taskwait_counter = 0;
1142 taskdata->td_taskwait_thread = 0;
1143 KMP_DEBUG_ASSERT(taskdata->td_parent != NULL)if (!(taskdata->td_parent != __null)) { __kmp_debug_assert
("taskdata->td_parent != __null", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1143); }
;
1144#if OMP_45_ENABLED(50 >= 45)
1145 // avoid copying icvs for proxy tasks
1146 if (flags->proxy == TASK_FULL0)
1147#endif
1148 copy_icvs(&taskdata->td_icvs, &taskdata->td_parent->td_icvs);
1149
1150 taskdata->td_flags.tiedness = flags->tiedness;
1151 taskdata->td_flags.final = flags->final;
1152 taskdata->td_flags.merged_if0 = flags->merged_if0;
1153#if OMP_40_ENABLED(50 >= 40)
1154 taskdata->td_flags.destructors_thunk = flags->destructors_thunk;
1155#endif // OMP_40_ENABLED
1156#if OMP_45_ENABLED(50 >= 45)
1157 taskdata->td_flags.proxy = flags->proxy;
1158 taskdata->td_task_team = thread->th.th_task_team;
1159 taskdata->td_size_alloc = shareds_offset + sizeof_shareds;
1160#endif
1161 taskdata->td_flags.tasktype = TASK_EXPLICIT1;
1162
1163 // GEH - TODO: fix this to copy parent task's value of tasking_ser flag
1164 taskdata->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec);
1165
1166 // GEH - TODO: fix this to copy parent task's value of team_serial flag
1167 taskdata->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
1168
1169 // GEH - Note we serialize the task if the team is serialized to make sure
1170 // implicit parallel region tasks are not left until program termination to
1171 // execute. Also, it helps locality to execute immediately.
1172
1173 taskdata->td_flags.task_serial =
1174 (parent_task->td_flags.final || taskdata->td_flags.team_serial ||
1175 taskdata->td_flags.tasking_ser);
1176
1177 taskdata->td_flags.started = 0;
1178 taskdata->td_flags.executing = 0;
1179 taskdata->td_flags.complete = 0;
1180 taskdata->td_flags.freed = 0;
1181
1182 taskdata->td_flags.native = flags->native;
1183
1184 KMP_ATOMIC_ST_RLX(&taskdata->td_incomplete_child_tasks, 0)(&taskdata->td_incomplete_child_tasks)->store(0, std
::memory_order_relaxed)
;
1185 // start at one because counts current task and children
1186 KMP_ATOMIC_ST_RLX(&taskdata->td_allocated_child_tasks, 1)(&taskdata->td_allocated_child_tasks)->store(1, std
::memory_order_relaxed)
;
1187#if OMP_40_ENABLED(50 >= 40)
1188 taskdata->td_taskgroup =
1189 parent_task->td_taskgroup; // task inherits taskgroup from the parent task
1190 taskdata->td_dephash = NULL__null;
1191 taskdata->td_depnode = NULL__null;
1192#endif
1193 if (flags->tiedness == TASK_UNTIED0)
1194 taskdata->td_last_tied = NULL__null; // will be set when the task is scheduled
1195 else
1196 taskdata->td_last_tied = taskdata;
1197
1198#if OMPT_SUPPORT1
1199 if (UNLIKELY(ompt_enabled.enabled)__builtin_expect(!!(ompt_enabled.enabled), 0))
1200 __ompt_task_init(taskdata, gtid);
1201#endif
1202// Only need to keep track of child task counts if team parallel and tasking not
1203// serialized or if it is a proxy task
1204#if OMP_45_ENABLED(50 >= 45)
1205 if (flags->proxy == TASK_PROXY1 ||
1206 !(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser))
1207#else
1208 if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser))
1209#endif
1210 {
1211 KMP_ATOMIC_INC(&parent_task->td_incomplete_child_tasks)(&parent_task->td_incomplete_child_tasks)->fetch_add
(1, std::memory_order_acq_rel)
;
1212#if OMP_40_ENABLED(50 >= 40)
1213 if (parent_task->td_taskgroup)
1214 KMP_ATOMIC_INC(&parent_task->td_taskgroup->count)(&parent_task->td_taskgroup->count)->fetch_add(1
, std::memory_order_acq_rel)
;
1215#endif
1216 // Only need to keep track of allocated child tasks for explicit tasks since
1217 // implicit not deallocated
1218 if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT1) {
1219 KMP_ATOMIC_INC(&taskdata->td_parent->td_allocated_child_tasks)(&taskdata->td_parent->td_allocated_child_tasks)->
fetch_add(1, std::memory_order_acq_rel)
;
1220 }
1221 }
1222
1223 KA_TRACE(20, ("__kmp_task_alloc(exit): T#%d created task %p parent=%p\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_alloc(exit): T#%d created task %p parent=%p\n"
, gtid, taskdata, taskdata->td_parent); }
1224 gtid, taskdata, taskdata->td_parent))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_alloc(exit): T#%d created task %p parent=%p\n"
, gtid, taskdata, taskdata->td_parent); }
;
1225 ANNOTATE_HAPPENS_BEFORE(task);
1226
1227 return task;
1228}
1229
1230kmp_task_t *__kmpc_omp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1231 kmp_int32 flags, size_t sizeof_kmp_task_t,
1232 size_t sizeof_shareds,
1233 kmp_routine_entry_t task_entry) {
1234 kmp_task_t *retval;
1235 kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags;
1236
1237 input_flags->native = FALSE0;
1238// __kmp_task_alloc() sets up all other runtime flags
1239
1240#if OMP_45_ENABLED(50 >= 45)
1241 KA_TRACE(10, ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s %s) "if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s %s) "
"sizeof_task=%ld sizeof_shared=%ld entry=%p\n", gtid, loc_ref
, input_flags->tiedness ? "tied " : "untied", input_flags
->proxy ? "proxy" : "", sizeof_kmp_task_t, sizeof_shareds,
task_entry); }
1242 "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s %s) "
"sizeof_task=%ld sizeof_shared=%ld entry=%p\n", gtid, loc_ref
, input_flags->tiedness ? "tied " : "untied", input_flags
->proxy ? "proxy" : "", sizeof_kmp_task_t, sizeof_shareds,
task_entry); }
1243 gtid, loc_ref, input_flags->tiedness ? "tied " : "untied",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s %s) "
"sizeof_task=%ld sizeof_shared=%ld entry=%p\n", gtid, loc_ref
, input_flags->tiedness ? "tied " : "untied", input_flags
->proxy ? "proxy" : "", sizeof_kmp_task_t, sizeof_shareds,
task_entry); }
1244 input_flags->proxy ? "proxy" : "", sizeof_kmp_task_t,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s %s) "
"sizeof_task=%ld sizeof_shared=%ld entry=%p\n", gtid, loc_ref
, input_flags->tiedness ? "tied " : "untied", input_flags
->proxy ? "proxy" : "", sizeof_kmp_task_t, sizeof_shareds,
task_entry); }
1245 sizeof_shareds, task_entry))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s %s) "
"sizeof_task=%ld sizeof_shared=%ld entry=%p\n", gtid, loc_ref
, input_flags->tiedness ? "tied " : "untied", input_flags
->proxy ? "proxy" : "", sizeof_kmp_task_t, sizeof_shareds,
task_entry); }
;
1246#else
1247 KA_TRACE(10, ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s) "if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s) "
"sizeof_task=%ld sizeof_shared=%ld entry=%p\n", gtid, loc_ref
, input_flags->tiedness ? "tied " : "untied", sizeof_kmp_task_t
, sizeof_shareds, task_entry); }
1248 "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s) "
"sizeof_task=%ld sizeof_shared=%ld entry=%p\n", gtid, loc_ref
, input_flags->tiedness ? "tied " : "untied", sizeof_kmp_task_t
, sizeof_shareds, task_entry); }
1249 gtid, loc_ref, input_flags->tiedness ? "tied " : "untied",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s) "
"sizeof_task=%ld sizeof_shared=%ld entry=%p\n", gtid, loc_ref
, input_flags->tiedness ? "tied " : "untied", sizeof_kmp_task_t
, sizeof_shareds, task_entry); }
1250 sizeof_kmp_task_t, sizeof_shareds, task_entry))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s) "
"sizeof_task=%ld sizeof_shared=%ld entry=%p\n", gtid, loc_ref
, input_flags->tiedness ? "tied " : "untied", sizeof_kmp_task_t
, sizeof_shareds, task_entry); }
;
1251#endif
1252
1253 retval = __kmp_task_alloc(loc_ref, gtid, input_flags, sizeof_kmp_task_t,
1254 sizeof_shareds, task_entry);
1255
1256 KA_TRACE(20, ("__kmpc_omp_task_alloc(exit): T#%d retval %p\n", gtid, retval))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmpc_omp_task_alloc(exit): T#%d retval %p\n"
, gtid, retval); }
;
1257
1258 return retval;
1259}
1260
1261// __kmp_invoke_task: invoke the specified task
1262//
1263// gtid: global thread ID of caller
1264// task: the task to invoke
1265// current_task: the task to resume after task invokation
1266static void __kmp_invoke_task(kmp_int32 gtid, kmp_task_t *task,
1267 kmp_taskdata_t *current_task) {
1268 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task)(((kmp_taskdata_t *)task) - 1);
1269 kmp_info_t *thread;
1270#if OMP_40_ENABLED(50 >= 40)
1271 int discard = 0 /* false */;
1272#endif
1273 KA_TRACE(if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_invoke_task(enter): T#%d invoking task %p, current_task=%p\n"
, gtid, taskdata, current_task); }
1274 30, ("__kmp_invoke_task(enter): T#%d invoking task %p, current_task=%p\n",if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_invoke_task(enter): T#%d invoking task %p, current_task=%p\n"
, gtid, taskdata, current_task); }
1275 gtid, taskdata, current_task))if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_invoke_task(enter): T#%d invoking task %p, current_task=%p\n"
, gtid, taskdata, current_task); }
;
1276 KMP_DEBUG_ASSERT(task)if (!(task)) { __kmp_debug_assert("task", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1276); }
;
1277#if OMP_45_ENABLED(50 >= 45)
1278 if (taskdata->td_flags.proxy == TASK_PROXY1 &&
1279 taskdata->td_flags.complete == 1) {
1280 // This is a proxy task that was already completed but it needs to run
1281 // its bottom-half finish
1282 KA_TRACE(if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_invoke_task: T#%d running bottom finish for proxy task %p\n"
, gtid, taskdata); }
1283 30,if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_invoke_task: T#%d running bottom finish for proxy task %p\n"
, gtid, taskdata); }
1284 ("__kmp_invoke_task: T#%d running bottom finish for proxy task %p\n",if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_invoke_task: T#%d running bottom finish for proxy task %p\n"
, gtid, taskdata); }
1285 gtid, taskdata))if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_invoke_task: T#%d running bottom finish for proxy task %p\n"
, gtid, taskdata); }
;
1286
1287 __kmp_bottom_half_finish_proxy(gtid, task);
1288
1289 KA_TRACE(30, ("__kmp_invoke_task(exit): T#%d completed bottom finish for "if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_invoke_task(exit): T#%d completed bottom finish for "
"proxy task %p, resuming task %p\n", gtid, taskdata, current_task
); }
1290 "proxy task %p, resuming task %p\n",if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_invoke_task(exit): T#%d completed bottom finish for "
"proxy task %p, resuming task %p\n", gtid, taskdata, current_task
); }
1291 gtid, taskdata, current_task))if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_invoke_task(exit): T#%d completed bottom finish for "
"proxy task %p, resuming task %p\n", gtid, taskdata, current_task
); }
;
1292
1293 return;
1294 }
1295#endif
1296
1297#if OMPT_SUPPORT1
1298 // For untied tasks, the first task executed only calls __kmpc_omp_task and
1299 // does not execute code.
1300 ompt_thread_info_t oldInfo;
1301 if (UNLIKELY(ompt_enabled.enabled)__builtin_expect(!!(ompt_enabled.enabled), 0)) {
1302 // Store the threads states and restore them after the task
1303 thread = __kmp_threads[gtid];
1304 oldInfo = thread->th.ompt_thread_info;
1305 thread->th.ompt_thread_info.wait_id = 0;
1306 thread->th.ompt_thread_info.state = (thread->th.th_team_serialized)
1307 ? omp_state_work_serial
1308 : omp_state_work_parallel;
1309 taskdata->ompt_task_info.frame.exit_frame = OMPT_GET_FRAME_ADDRESS(0)__builtin_frame_address(0);
1310 }
1311#endif
1312
1313#if OMP_45_ENABLED(50 >= 45)
1314 // Proxy tasks are not handled by the runtime
1315 if (taskdata->td_flags.proxy != TASK_PROXY1) {
1316#endif
1317 ANNOTATE_HAPPENS_AFTER(task);
1318 __kmp_task_start(gtid, task, current_task); // OMPT only if not discarded
1319#if OMP_45_ENABLED(50 >= 45)
1320 }
1321#endif
1322
1323#if OMP_40_ENABLED(50 >= 40)
1324 // TODO: cancel tasks if the parallel region has also been cancelled
1325 // TODO: check if this sequence can be hoisted above __kmp_task_start
1326 // if cancellation has been enabled for this run ...
1327 if (__kmp_omp_cancellation) {
1328 thread = __kmp_threads[gtid];
1329 kmp_team_t *this_team = thread->th.th_team;
1330 kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
1331 if ((taskgroup && taskgroup->cancel_request) ||
1332 (this_team->t.t_cancel_request == cancel_parallel)) {
1333#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
1334 ompt_data_t *task_data;
1335 if (UNLIKELY(ompt_enabled.ompt_callback_cancel)__builtin_expect(!!(ompt_enabled.ompt_callback_cancel), 0)) {
1336 __ompt_get_task_info_internal(0, NULL__null, &task_data, NULL__null, NULL__null, NULL__null);
1337 ompt_callbacks.ompt_callback(ompt_callback_cancel)ompt_callback_cancel_callback(
1338 task_data,
1339 ((taskgroup && taskgroup->cancel_request) ? ompt_cancel_taskgroup
1340 : ompt_cancel_parallel) |
1341 ompt_cancel_discarded_task,
1342 NULL__null);
1343 }
1344#endif
1345 KMP_COUNT_BLOCK(TASK_cancelled)((void)0);
1346 // this task belongs to a task group and we need to cancel it
1347 discard = 1 /* true */;
1348 }
1349 }
1350
1351 // Invoke the task routine and pass in relevant data.
1352 // Thunks generated by gcc take a different argument list.
1353 if (!discard) {
1354 if (taskdata->td_flags.tiedness == TASK_UNTIED0) {
1355 taskdata->td_last_tied = current_task->td_last_tied;
1356 KMP_DEBUG_ASSERT(taskdata->td_last_tied)if (!(taskdata->td_last_tied)) { __kmp_debug_assert("taskdata->td_last_tied"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1356); }
;
1357 }
1358#if KMP_STATS_ENABLED0
1359 KMP_COUNT_BLOCK(TASK_executed)((void)0);
1360 switch (KMP_GET_THREAD_STATE()((void)0)) {
1361 case FORK_JOIN_BARRIER:
1362 KMP_PUSH_PARTITIONED_TIMER(OMP_task_join_bar)((void)0);
1363 break;
1364 case PLAIN_BARRIER:
1365 KMP_PUSH_PARTITIONED_TIMER(OMP_task_plain_bar)((void)0);
1366 break;
1367 case TASKYIELD:
1368 KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskyield)((void)0);
1369 break;
1370 case TASKWAIT:
1371 KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskwait)((void)0);
1372 break;
1373 case TASKGROUP:
1374 KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskgroup)((void)0);
1375 break;
1376 default:
1377 KMP_PUSH_PARTITIONED_TIMER(OMP_task_immediate)((void)0);
1378 break;
1379 }
1380#endif // KMP_STATS_ENABLED
1381#endif // OMP_40_ENABLED
1382
1383// OMPT task begin
1384#if OMPT_SUPPORT1
1385 if (UNLIKELY(ompt_enabled.enabled)__builtin_expect(!!(ompt_enabled.enabled), 0))
1386 __ompt_task_start(task, current_task, gtid);
1387#endif
1388
1389#if USE_ITT_BUILD1 && USE_ITT_NOTIFY1
1390 kmp_uint64 cur_time;
1391 kmp_int32 kmp_itt_count_task =
1392 __kmp_forkjoin_frames_mode == 3 && !taskdata->td_flags.task_serial &&
1393 current_task->td_flags.tasktype == TASK_IMPLICIT0;
1394 if (kmp_itt_count_task) {
1395 thread = __kmp_threads[gtid];
1396 // Time outer level explicit task on barrier for adjusting imbalance time
1397 if (thread->th.th_bar_arrive_time)
1398 cur_time = __itt_get_timestamp(!__kmp_itt_get_timestamp_ptr__3_0) ? 0 : __kmp_itt_get_timestamp_ptr__3_0();
1399 else
1400 kmp_itt_count_task = 0; // thread is not on a barrier - skip timing
1401 }
1402#endif
1403
1404#ifdef KMP_GOMP_COMPAT
1405 if (taskdata->td_flags.native) {
1406 ((void (*)(void *))(*(task->routine)))(task->shareds);
1407 } else
1408#endif /* KMP_GOMP_COMPAT */
1409 {
1410 (*(task->routine))(gtid, task);
1411 }
1412 KMP_POP_PARTITIONED_TIMER()((void)0);
1413
1414#if USE_ITT_BUILD1 && USE_ITT_NOTIFY1
1415 if (kmp_itt_count_task) {
1416 // Barrier imbalance - adjust arrive time with the task duration
1417 thread->th.th_bar_arrive_time += (__itt_get_timestamp(!__kmp_itt_get_timestamp_ptr__3_0) ? 0 : __kmp_itt_get_timestamp_ptr__3_0() - cur_time);
1418 }
1419#endif
1420
1421#if OMP_40_ENABLED(50 >= 40)
1422 }
1423#endif // OMP_40_ENABLED
1424
1425
1426#if OMP_45_ENABLED(50 >= 45)
1427 // Proxy tasks are not handled by the runtime
1428 if (taskdata->td_flags.proxy != TASK_PROXY1) {
1429#endif
1430 ANNOTATE_HAPPENS_BEFORE(taskdata->td_parent);
1431#if OMPT_SUPPORT1
1432 if (UNLIKELY(ompt_enabled.enabled)__builtin_expect(!!(ompt_enabled.enabled), 0)) {
1433 thread->th.ompt_thread_info = oldInfo;
1434 if (taskdata->td_flags.tiedness == TASK_TIED1) {
1435 taskdata->ompt_task_info.frame.exit_frame = NULL__null;
1436 }
1437 __kmp_task_finish<true>(gtid, task, current_task);
1438 } else
1439#endif
1440 __kmp_task_finish<false>(gtid, task, current_task);
1441#if OMP_45_ENABLED(50 >= 45)
1442 }
1443#endif
1444
1445 KA_TRACE(if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_invoke_task(exit): T#%d completed task %p, resuming task %p\n"
, gtid, taskdata, current_task); }
1446 30,if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_invoke_task(exit): T#%d completed task %p, resuming task %p\n"
, gtid, taskdata, current_task); }
1447 ("__kmp_invoke_task(exit): T#%d completed task %p, resuming task %p\n",if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_invoke_task(exit): T#%d completed task %p, resuming task %p\n"
, gtid, taskdata, current_task); }
1448 gtid, taskdata, current_task))if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_invoke_task(exit): T#%d completed task %p, resuming task %p\n"
, gtid, taskdata, current_task); }
;
1449 return;
1450}
1451
1452// __kmpc_omp_task_parts: Schedule a thread-switchable task for execution
1453//
1454// loc_ref: location of original task pragma (ignored)
1455// gtid: Global Thread ID of encountering thread
1456// new_task: task thunk allocated by __kmp_omp_task_alloc() for the ''new task''
1457// Returns:
1458// TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1459// be resumed later.
1460// TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1461// resumed later.
1462kmp_int32 __kmpc_omp_task_parts(ident_t *loc_ref, kmp_int32 gtid,
1463 kmp_task_t *new_task) {
1464 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task)(((kmp_taskdata_t *)new_task) - 1);
1465
1466 KA_TRACE(10, ("__kmpc_omp_task_parts(enter): T#%d loc=%p task=%p\n", gtid,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_parts(enter): T#%d loc=%p task=%p\n"
, gtid, loc_ref, new_taskdata); }
1467 loc_ref, new_taskdata))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_parts(enter): T#%d loc=%p task=%p\n"
, gtid, loc_ref, new_taskdata); }
;
1468
1469#if OMPT_SUPPORT1
1470 kmp_taskdata_t *parent;
1471 if (UNLIKELY(ompt_enabled.enabled)__builtin_expect(!!(ompt_enabled.enabled), 0)) {
1472 parent = new_taskdata->td_parent;
1473 if (ompt_enabled.ompt_callback_task_create) {
1474 ompt_data_t task_data = ompt_data_none;
1475 ompt_callbacks.ompt_callback(ompt_callback_task_create)ompt_callback_task_create_callback(
1476 parent ? &(parent->ompt_task_info.task_data) : &task_data,
1477 parent ? &(parent->ompt_task_info.frame) : NULL__null,
1478 &(new_taskdata->ompt_task_info.task_data), ompt_task_explicit, 0,
1479 OMPT_GET_RETURN_ADDRESS(0)__builtin_return_address(0));
1480 }
1481 }
1482#endif
1483
1484 /* Should we execute the new task or queue it? For now, let's just always try
1485 to queue it. If the queue fills up, then we'll execute it. */
1486
1487 if (__kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED1) // if cannot defer
1488 { // Execute this task immediately
1489 kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
1490 new_taskdata->td_flags.task_serial = 1;
1491 __kmp_invoke_task(gtid, new_task, current_task);
1492 }
1493
1494 KA_TRACE(if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_parts(exit): T#%d returning TASK_CURRENT_NOT_QUEUED: "
"loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n", gtid, loc_ref
, new_taskdata); }
1495 10,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_parts(exit): T#%d returning TASK_CURRENT_NOT_QUEUED: "
"loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n", gtid, loc_ref
, new_taskdata); }
1496 ("__kmpc_omp_task_parts(exit): T#%d returning TASK_CURRENT_NOT_QUEUED: "if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_parts(exit): T#%d returning TASK_CURRENT_NOT_QUEUED: "
"loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n", gtid, loc_ref
, new_taskdata); }
1497 "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_parts(exit): T#%d returning TASK_CURRENT_NOT_QUEUED: "
"loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n", gtid, loc_ref
, new_taskdata); }
1498 gtid, loc_ref, new_taskdata))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task_parts(exit): T#%d returning TASK_CURRENT_NOT_QUEUED: "
"loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n", gtid, loc_ref
, new_taskdata); }
;
1499
1500 ANNOTATE_HAPPENS_BEFORE(new_task);
1501#if OMPT_SUPPORT1
1502 if (UNLIKELY(ompt_enabled.enabled)__builtin_expect(!!(ompt_enabled.enabled), 0)) {
1503 parent->ompt_task_info.frame.enter_frame = NULL__null;
1504 }
1505#endif
1506 return TASK_CURRENT_NOT_QUEUED0;
1507}
1508
1509// __kmp_omp_task: Schedule a non-thread-switchable task for execution
1510//
1511// gtid: Global Thread ID of encountering thread
1512// new_task:non-thread-switchable task thunk allocated by __kmp_omp_task_alloc()
1513// serialize_immediate: if TRUE then if the task is executed immediately its
1514// execution will be serialized
1515// Returns:
1516// TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1517// be resumed later.
1518// TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1519// resumed later.
1520kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task,
1521 bool serialize_immediate) {
1522 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task)(((kmp_taskdata_t *)new_task) - 1);
1523
1524/* Should we execute the new task or queue it? For now, let's just always try to
1525 queue it. If the queue fills up, then we'll execute it. */
1526#if OMP_45_ENABLED(50 >= 45)
1527 if (new_taskdata->td_flags.proxy == TASK_PROXY1 ||
1528 __kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED1) // if cannot defer
1529#else
1530 if (__kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED1) // if cannot defer
1531#endif
1532 { // Execute this task immediately
1533 kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
1534 if (serialize_immediate)
1535 new_taskdata->td_flags.task_serial = 1;
1536 __kmp_invoke_task(gtid, new_task, current_task);
1537 }
1538
1539 ANNOTATE_HAPPENS_BEFORE(new_task);
1540 return TASK_CURRENT_NOT_QUEUED0;
1541}
1542
1543// __kmpc_omp_task: Wrapper around __kmp_omp_task to schedule a
1544// non-thread-switchable task from the parent thread only!
1545//
1546// loc_ref: location of original task pragma (ignored)
1547// gtid: Global Thread ID of encountering thread
1548// new_task: non-thread-switchable task thunk allocated by
1549// __kmp_omp_task_alloc()
1550// Returns:
1551// TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1552// be resumed later.
1553// TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1554// resumed later.
1555kmp_int32 __kmpc_omp_task(ident_t *loc_ref, kmp_int32 gtid,
1556 kmp_task_t *new_task) {
1557 kmp_int32 res;
1558 KMP_SET_THREAD_STATE_BLOCK(EXPLICIT_TASK)((void)0);
1559
1560#if KMP_DEBUG1 || OMPT_SUPPORT1
1561 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task)(((kmp_taskdata_t *)new_task) - 1);
1562#endif
1563 KA_TRACE(10, ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n", gtid, loc_ref,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n"
, gtid, loc_ref, new_taskdata); }
1564 new_taskdata))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n"
, gtid, loc_ref, new_taskdata); }
;
1565
1566#if OMPT_SUPPORT1
1567 kmp_taskdata_t *parent = NULL__null;
1568 if (UNLIKELY(ompt_enabled.enabled)__builtin_expect(!!(ompt_enabled.enabled), 0)) {
1569 if (!new_taskdata->td_flags.started) {
1570 OMPT_STORE_RETURN_ADDRESS(gtid)if (ompt_enabled.enabled && gtid >= 0 && __kmp_threads
[gtid] && !__kmp_threads[gtid]->th.ompt_thread_info
.return_address) __kmp_threads[gtid]->th.ompt_thread_info.
return_address = __builtin_return_address(0)
;
1571 parent = new_taskdata->td_parent;
1572 if (!parent->ompt_task_info.frame.enter_frame) {
1573 parent->ompt_task_info.frame.enter_frame = OMPT_GET_FRAME_ADDRESS(1)__builtin_frame_address(1);
1574 }
1575 if (ompt_enabled.ompt_callback_task_create) {
1576 ompt_data_t task_data = ompt_data_none;
1577 ompt_callbacks.ompt_callback(ompt_callback_task_create)ompt_callback_task_create_callback(
1578 parent ? &(parent->ompt_task_info.task_data) : &task_data,
1579 parent ? &(parent->ompt_task_info.frame) : NULL__null,
1580 &(new_taskdata->ompt_task_info.task_data),
1581 ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata)((new_taskdata->td_flags.task_serial || new_taskdata->td_flags
.tasking_ser) ? ompt_task_undeferred : 0x0) | ((!(new_taskdata
->td_flags.tiedness)) ? ompt_task_untied : 0x0) | (new_taskdata
->td_flags.final ? ompt_task_final : 0x0) | (new_taskdata->
td_flags.merged_if0 ? ompt_task_mergeable : 0x0)
, 0,
1582 OMPT_LOAD_RETURN_ADDRESS(gtid)__ompt_load_return_address(gtid));
1583 }
1584 } else {
1585 // We are scheduling the continuation of an UNTIED task.
1586 // Scheduling back to the parent task.
1587 __ompt_task_finish(new_task,
1588 new_taskdata->ompt_task_info.scheduling_parent,
1589 ompt_task_switch);
1590 new_taskdata->ompt_task_info.frame.exit_frame = NULL__null;
1591 }
1592 }
1593#endif
1594
1595 res = __kmp_omp_task(gtid, new_task, true);
1596
1597 KA_TRACE(10, ("__kmpc_omp_task(exit): T#%d returning "if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task(exit): T#%d returning "
"TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n", gtid, loc_ref, new_taskdata
); }
1598 "TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task(exit): T#%d returning "
"TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n", gtid, loc_ref, new_taskdata
); }
1599 gtid, loc_ref, new_taskdata))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task(exit): T#%d returning "
"TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n", gtid, loc_ref, new_taskdata
); }
;
1600#if OMPT_SUPPORT1
1601 if (UNLIKELY(ompt_enabled.enabled && parent != NULL)__builtin_expect(!!(ompt_enabled.enabled && parent !=
__null), 0)
) {
1602 parent->ompt_task_info.frame.enter_frame = NULL__null;
1603 }
1604#endif
1605 return res;
1606}
1607
1608// __kmp_omp_taskloop_task: Wrapper around __kmp_omp_task to schedule
1609// a taskloop task with the correct OMPT return address
1610//
1611// loc_ref: location of original task pragma (ignored)
1612// gtid: Global Thread ID of encountering thread
1613// new_task: non-thread-switchable task thunk allocated by
1614// __kmp_omp_task_alloc()
1615// codeptr_ra: return address for OMPT callback
1616// Returns:
1617// TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1618// be resumed later.
1619// TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1620// resumed later.
1621kmp_int32 __kmp_omp_taskloop_task(ident_t *loc_ref, kmp_int32 gtid,
1622 kmp_task_t *new_task, void *codeptr_ra) {
1623 kmp_int32 res;
1624 KMP_SET_THREAD_STATE_BLOCK(EXPLICIT_TASK)((void)0);
1625
1626#if KMP_DEBUG1 || OMPT_SUPPORT1
1627 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task)(((kmp_taskdata_t *)new_task) - 1);
1628#endif
1629 KA_TRACE(10, ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n", gtid, loc_ref,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n"
, gtid, loc_ref, new_taskdata); }
1630 new_taskdata))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n"
, gtid, loc_ref, new_taskdata); }
;
1631
1632#if OMPT_SUPPORT1
1633 kmp_taskdata_t *parent = NULL__null;
1634 if (UNLIKELY(ompt_enabled.enabled && !new_taskdata->td_flags.started)__builtin_expect(!!(ompt_enabled.enabled && !new_taskdata
->td_flags.started), 0)
) {
1635 parent = new_taskdata->td_parent;
1636 if (!parent->ompt_task_info.frame.enter_frame)
1637 parent->ompt_task_info.frame.enter_frame = OMPT_GET_FRAME_ADDRESS(1)__builtin_frame_address(1);
1638 if (ompt_enabled.ompt_callback_task_create) {
1639 ompt_data_t task_data = ompt_data_none;
1640 ompt_callbacks.ompt_callback(ompt_callback_task_create)ompt_callback_task_create_callback(
1641 parent ? &(parent->ompt_task_info.task_data) : &task_data,
1642 parent ? &(parent->ompt_task_info.frame) : NULL__null,
1643 &(new_taskdata->ompt_task_info.task_data),
1644 ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata)((new_taskdata->td_flags.task_serial || new_taskdata->td_flags
.tasking_ser) ? ompt_task_undeferred : 0x0) | ((!(new_taskdata
->td_flags.tiedness)) ? ompt_task_untied : 0x0) | (new_taskdata
->td_flags.final ? ompt_task_final : 0x0) | (new_taskdata->
td_flags.merged_if0 ? ompt_task_mergeable : 0x0)
, 0,
1645 codeptr_ra);
1646 }
1647 }
1648#endif
1649
1650 res = __kmp_omp_task(gtid, new_task, true);
1651
1652 KA_TRACE(10, ("__kmpc_omp_task(exit): T#%d returning "if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task(exit): T#%d returning "
"TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n", gtid, loc_ref, new_taskdata
); }
1653 "TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task(exit): T#%d returning "
"TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n", gtid, loc_ref, new_taskdata
); }
1654 gtid, loc_ref, new_taskdata))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_task(exit): T#%d returning "
"TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n", gtid, loc_ref, new_taskdata
); }
;
1655#if OMPT_SUPPORT1
1656 if (UNLIKELY(ompt_enabled.enabled && parent != NULL)__builtin_expect(!!(ompt_enabled.enabled && parent !=
__null), 0)
) {
1657 parent->ompt_task_info.frame.enter_frame = NULL__null;
1658 }
1659#endif
1660 return res;
1661}
1662
1663template <bool ompt>
1664static kmp_int32 __kmpc_omp_taskwait_template(ident_t *loc_ref, kmp_int32 gtid,
1665 void *frame_address,
1666 void *return_address) {
1667 kmp_taskdata_t *taskdata;
1668 kmp_info_t *thread;
1669 int thread_finished = FALSE0;
1670 KMP_SET_THREAD_STATE_BLOCK(TASKWAIT)((void)0);
1671
1672 KA_TRACE(10, ("__kmpc_omp_taskwait(enter): T#%d loc=%p\n", gtid, loc_ref))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_taskwait(enter): T#%d loc=%p\n"
, gtid, loc_ref); }
;
1673
1674 if (__kmp_tasking_mode != tskm_immediate_exec) {
1675 thread = __kmp_threads[gtid];
1676 taskdata = thread->th.th_current_task;
1677
1678#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
1679 ompt_data_t *my_task_data;
1680 ompt_data_t *my_parallel_data;
1681
1682 if (ompt) {
1683 my_task_data = &(taskdata->ompt_task_info.task_data);
1684 my_parallel_data = OMPT_CUR_TEAM_DATA(thread)(&(thread->th.th_team->t.ompt_team_info.parallel_data
))
;
1685
1686 taskdata->ompt_task_info.frame.enter_frame = frame_address;
1687
1688 if (ompt_enabled.ompt_callback_sync_region) {
1689 ompt_callbacks.ompt_callback(ompt_callback_sync_region)ompt_callback_sync_region_callback(
1690 ompt_sync_region_taskwait, ompt_scope_begin, my_parallel_data,
1691 my_task_data, return_address);
1692 }
1693
1694 if (ompt_enabled.ompt_callback_sync_region_wait) {
1695 ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)ompt_callback_sync_region_wait_callback(
1696 ompt_sync_region_taskwait, ompt_scope_begin, my_parallel_data,
1697 my_task_data, return_address);
1698 }
1699 }
1700#endif // OMPT_SUPPORT && OMPT_OPTIONAL
1701
1702// Debugger: The taskwait is active. Store location and thread encountered the
1703// taskwait.
1704#if USE_ITT_BUILD1
1705// Note: These values are used by ITT events as well.
1706#endif /* USE_ITT_BUILD */
1707 taskdata->td_taskwait_counter += 1;
1708 taskdata->td_taskwait_ident = loc_ref;
1709 taskdata->td_taskwait_thread = gtid + 1;
1710
1711#if USE_ITT_BUILD1
1712 void *itt_sync_obj = __kmp_itt_taskwait_object(gtid);
1713 if (itt_sync_obj != NULL__null)
1714 __kmp_itt_taskwait_starting(gtid, itt_sync_obj);
1715#endif /* USE_ITT_BUILD */
1716
1717 bool must_wait =
1718 !taskdata->td_flags.team_serial && !taskdata->td_flags.final;
1719
1720#if OMP_45_ENABLED(50 >= 45)
1721 must_wait = must_wait || (thread->th.th_task_team != NULL__null &&
1722 thread->th.th_task_team->tt.tt_found_proxy_tasks);
1723#endif
1724 if (must_wait) {
1725 kmp_flag_32 flag(RCAST(std::atomic<kmp_uint32> *,reinterpret_cast<std::atomic<kmp_uint32> *>(&
(taskdata->td_incomplete_child_tasks))
1726 &(taskdata->td_incomplete_child_tasks))reinterpret_cast<std::atomic<kmp_uint32> *>(&
(taskdata->td_incomplete_child_tasks))
,
1727 0U);
1728 while (KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks)(&taskdata->td_incomplete_child_tasks)->load(std::memory_order_acquire
)
!= 0) {
1729 flag.execute_tasks(thread, gtid, FALSE0,
1730 &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), itt_sync_obj,
1731 __kmp_task_stealing_constraint);
1732 }
1733 }
1734#if USE_ITT_BUILD1
1735 if (itt_sync_obj != NULL__null)
1736 __kmp_itt_taskwait_finished(gtid, itt_sync_obj);
1737#endif /* USE_ITT_BUILD */
1738
1739 // Debugger: The taskwait is completed. Location remains, but thread is
1740 // negated.
1741 taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread;
1742
1743#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
1744 if (ompt) {
1745 if (ompt_enabled.ompt_callback_sync_region_wait) {
1746 ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)ompt_callback_sync_region_wait_callback(
1747 ompt_sync_region_taskwait, ompt_scope_end, my_parallel_data,
1748 my_task_data, return_address);
1749 }
1750 if (ompt_enabled.ompt_callback_sync_region) {
1751 ompt_callbacks.ompt_callback(ompt_callback_sync_region)ompt_callback_sync_region_callback(
1752 ompt_sync_region_taskwait, ompt_scope_end, my_parallel_data,
1753 my_task_data, return_address);
1754 }
1755 taskdata->ompt_task_info.frame.enter_frame = NULL__null;
1756 }
1757#endif // OMPT_SUPPORT && OMPT_OPTIONAL
1758
1759 ANNOTATE_HAPPENS_AFTER(taskdata);
1760 }
1761
1762 KA_TRACE(10, ("__kmpc_omp_taskwait(exit): T#%d task %p finished waiting, "if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_taskwait(exit): T#%d task %p finished waiting, "
"returning TASK_CURRENT_NOT_QUEUED\n", gtid, taskdata); }
1763 "returning TASK_CURRENT_NOT_QUEUED\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_taskwait(exit): T#%d task %p finished waiting, "
"returning TASK_CURRENT_NOT_QUEUED\n", gtid, taskdata); }
1764 gtid, taskdata))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_taskwait(exit): T#%d task %p finished waiting, "
"returning TASK_CURRENT_NOT_QUEUED\n", gtid, taskdata); }
;
1765
1766 return TASK_CURRENT_NOT_QUEUED0;
1767}
1768
1769#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
1770OMPT_NOINLINE__attribute__((noinline))
1771static kmp_int32 __kmpc_omp_taskwait_ompt(ident_t *loc_ref, kmp_int32 gtid,
1772 void *frame_address,
1773 void *return_address) {
1774 return __kmpc_omp_taskwait_template<true>(loc_ref, gtid, frame_address,
1775 return_address);
1776}
1777#endif // OMPT_SUPPORT && OMPT_OPTIONAL
1778
1779// __kmpc_omp_taskwait: Wait until all tasks generated by the current task are
1780// complete
1781kmp_int32 __kmpc_omp_taskwait(ident_t *loc_ref, kmp_int32 gtid) {
1782#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
1783 if (UNLIKELY(ompt_enabled.enabled)__builtin_expect(!!(ompt_enabled.enabled), 0)) {
1784 OMPT_STORE_RETURN_ADDRESS(gtid)if (ompt_enabled.enabled && gtid >= 0 && __kmp_threads
[gtid] && !__kmp_threads[gtid]->th.ompt_thread_info
.return_address) __kmp_threads[gtid]->th.ompt_thread_info.
return_address = __builtin_return_address(0)
;
1785 return __kmpc_omp_taskwait_ompt(loc_ref, gtid, OMPT_GET_FRAME_ADDRESS(1)__builtin_frame_address(1),
1786 OMPT_LOAD_RETURN_ADDRESS(gtid)__ompt_load_return_address(gtid));
1787 }
1788#endif
1789 return __kmpc_omp_taskwait_template<false>(loc_ref, gtid, NULL__null, NULL__null);
1790}
1791
1792// __kmpc_omp_taskyield: switch to a different task
1793kmp_int32 __kmpc_omp_taskyield(ident_t *loc_ref, kmp_int32 gtid, int end_part) {
1794 kmp_taskdata_t *taskdata;
1795 kmp_info_t *thread;
1796 int thread_finished = FALSE0;
1797
1798 KMP_COUNT_BLOCK(OMP_TASKYIELD)((void)0);
1799 KMP_SET_THREAD_STATE_BLOCK(TASKYIELD)((void)0);
1800
1801 KA_TRACE(10, ("__kmpc_omp_taskyield(enter): T#%d loc=%p end_part = %d\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_taskyield(enter): T#%d loc=%p end_part = %d\n"
, gtid, loc_ref, end_part); }
1802 gtid, loc_ref, end_part))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_taskyield(enter): T#%d loc=%p end_part = %d\n"
, gtid, loc_ref, end_part); }
;
1803
1804 if (__kmp_tasking_mode != tskm_immediate_exec && __kmp_init_parallel) {
1805 thread = __kmp_threads[gtid];
1806 taskdata = thread->th.th_current_task;
1807// Should we model this as a task wait or not?
1808// Debugger: The taskwait is active. Store location and thread encountered the
1809// taskwait.
1810#if USE_ITT_BUILD1
1811// Note: These values are used by ITT events as well.
1812#endif /* USE_ITT_BUILD */
1813 taskdata->td_taskwait_counter += 1;
1814 taskdata->td_taskwait_ident = loc_ref;
1815 taskdata->td_taskwait_thread = gtid + 1;
1816
1817#if USE_ITT_BUILD1
1818 void *itt_sync_obj = __kmp_itt_taskwait_object(gtid);
1819 if (itt_sync_obj != NULL__null)
1820 __kmp_itt_taskwait_starting(gtid, itt_sync_obj);
1821#endif /* USE_ITT_BUILD */
1822 if (!taskdata->td_flags.team_serial) {
1823 kmp_task_team_t *task_team = thread->th.th_task_team;
1824 if (task_team != NULL__null) {
1825 if (KMP_TASKING_ENABLED(task_team)(((task_team)->tt.tt_found_tasks) == (!0))) {
1826#if OMPT_SUPPORT1
1827 if (UNLIKELY(ompt_enabled.enabled)__builtin_expect(!!(ompt_enabled.enabled), 0))
1828 thread->th.ompt_thread_info.ompt_task_yielded = 1;
1829#endif
1830 __kmp_execute_tasks_32(
1831 thread, gtid, NULL__null, FALSE0,
1832 &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), itt_sync_obj,
1833 __kmp_task_stealing_constraint);
1834#if OMPT_SUPPORT1
1835 if (UNLIKELY(ompt_enabled.enabled)__builtin_expect(!!(ompt_enabled.enabled), 0))
1836 thread->th.ompt_thread_info.ompt_task_yielded = 0;
1837#endif
1838 }
1839 }
1840 }
1841#if USE_ITT_BUILD1
1842 if (itt_sync_obj != NULL__null)
1843 __kmp_itt_taskwait_finished(gtid, itt_sync_obj);
1844#endif /* USE_ITT_BUILD */
1845
1846 // Debugger: The taskwait is completed. Location remains, but thread is
1847 // negated.
1848 taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread;
1849 }
1850
1851 KA_TRACE(10, ("__kmpc_omp_taskyield(exit): T#%d task %p resuming, "if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_taskyield(exit): T#%d task %p resuming, "
"returning TASK_CURRENT_NOT_QUEUED\n", gtid, taskdata); }
1852 "returning TASK_CURRENT_NOT_QUEUED\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_taskyield(exit): T#%d task %p resuming, "
"returning TASK_CURRENT_NOT_QUEUED\n", gtid, taskdata); }
1853 gtid, taskdata))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_omp_taskyield(exit): T#%d task %p resuming, "
"returning TASK_CURRENT_NOT_QUEUED\n", gtid, taskdata); }
;
1854
1855 return TASK_CURRENT_NOT_QUEUED0;
1856}
1857
1858#if OMP_50_ENABLED(50 >= 50)
1859// Task Reduction implementation
1860
1861typedef struct kmp_task_red_flags {
1862 unsigned lazy_priv : 1; // hint: (1) use lazy allocation (big objects)
1863 unsigned reserved31 : 31;
1864} kmp_task_red_flags_t;
1865
1866// internal structure for reduction data item related info
1867typedef struct kmp_task_red_data {
1868 void *reduce_shar; // shared reduction item
1869 size_t reduce_size; // size of data item
1870 void *reduce_priv; // thread specific data
1871 void *reduce_pend; // end of private data for comparison op
1872 void *reduce_init; // data initialization routine
1873 void *reduce_fini; // data finalization routine
1874 void *reduce_comb; // data combiner routine
1875 kmp_task_red_flags_t flags; // flags for additional info from compiler
1876} kmp_task_red_data_t;
1877
1878// structure sent us by compiler - one per reduction item
1879typedef struct kmp_task_red_input {
1880 void *reduce_shar; // shared reduction item
1881 size_t reduce_size; // size of data item
1882 void *reduce_init; // data initialization routine
1883 void *reduce_fini; // data finalization routine
1884 void *reduce_comb; // data combiner routine
1885 kmp_task_red_flags_t flags; // flags for additional info from compiler
1886} kmp_task_red_input_t;
1887
1888/*!
1889@ingroup TASKING
1890@param gtid Global thread ID
1891@param num Number of data items to reduce
1892@param data Array of data for reduction
1893@return The taskgroup identifier
1894
1895Initialize task reduction for the taskgroup.
1896*/
1897void *__kmpc_task_reduction_init(int gtid, int num, void *data) {
1898 kmp_info_t *thread = __kmp_threads[gtid];
1899 kmp_taskgroup_t *tg = thread->th.th_current_task->td_taskgroup;
1900 kmp_int32 nth = thread->th.th_team_nproc;
1901 kmp_task_red_input_t *input = (kmp_task_red_input_t *)data;
1902 kmp_task_red_data_t *arr;
1903
1904 // check input data just in case
1905 KMP_ASSERT(tg != NULL)if (!(tg != __null)) { __kmp_debug_assert("tg != NULL", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1905); }
;
1906 KMP_ASSERT(data != NULL)if (!(data != __null)) { __kmp_debug_assert("data != NULL", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1906); }
;
1907 KMP_ASSERT(num > 0)if (!(num > 0)) { __kmp_debug_assert("num > 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1907); }
;
1908 if (nth == 1) {
1909 KA_TRACE(10, ("__kmpc_task_reduction_init: T#%d, tg %p, exiting nth=1\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_task_reduction_init: T#%d, tg %p, exiting nth=1\n"
, gtid, tg); }
1910 gtid, tg))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_task_reduction_init: T#%d, tg %p, exiting nth=1\n"
, gtid, tg); }
;
1911 return (void *)tg;
1912 }
1913 KA_TRACE(10, ("__kmpc_task_reduction_init: T#%d, taskgroup %p, #items %d\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_task_reduction_init: T#%d, taskgroup %p, #items %d\n"
, gtid, tg, num); }
1914 gtid, tg, num))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_task_reduction_init: T#%d, taskgroup %p, #items %d\n"
, gtid, tg, num); }
;
1915 arr = (kmp_task_red_data_t *)__kmp_thread_malloc(___kmp_thread_malloc((thread), (num * sizeof(kmp_task_red_data_t
)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1916)
1916 thread, num * sizeof(kmp_task_red_data_t))___kmp_thread_malloc((thread), (num * sizeof(kmp_task_red_data_t
)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1916)
;
1917 for (int i = 0; i < num; ++i) {
1918 void (*f_init)(void *) = (void (*)(void *))(input[i].reduce_init);
1919 size_t size = input[i].reduce_size - 1;
1920 // round the size up to cache line per thread-specific item
1921 size += CACHE_LINE64 - size % CACHE_LINE64;
1922 KMP_ASSERT(input[i].reduce_comb != NULL)if (!(input[i].reduce_comb != __null)) { __kmp_debug_assert("input[i].reduce_comb != NULL"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1922); }
; // combiner is mandatory
1923 arr[i].reduce_shar = input[i].reduce_shar;
1924 arr[i].reduce_size = size;
1925 arr[i].reduce_init = input[i].reduce_init;
1926 arr[i].reduce_fini = input[i].reduce_fini;
1927 arr[i].reduce_comb = input[i].reduce_comb;
1928 arr[i].flags = input[i].flags;
1929 if (!input[i].flags.lazy_priv) {
1930 // allocate cache-line aligned block and fill it with zeros
1931 arr[i].reduce_priv = __kmp_allocate(nth * size)___kmp_allocate((nth * size), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1931)
;
1932 arr[i].reduce_pend = (char *)(arr[i].reduce_priv) + nth * size;
1933 if (f_init != NULL__null) {
1934 // initialize thread-specific items
1935 for (int j = 0; j < nth; ++j) {
1936 f_init((char *)(arr[i].reduce_priv) + j * size);
1937 }
1938 }
1939 } else {
1940 // only allocate space for pointers now,
1941 // objects will be lazily allocated/initialized once requested
1942 arr[i].reduce_priv = __kmp_allocate(nth * sizeof(void *))___kmp_allocate((nth * sizeof(void *)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1942)
;
1943 }
1944 }
1945 tg->reduce_data = (void *)arr;
1946 tg->reduce_num_data = num;
1947 return (void *)tg;
1948}
1949
1950/*!
1951@ingroup TASKING
1952@param gtid Global thread ID
1953@param tskgrp The taskgroup ID (optional)
1954@param data Shared location of the item
1955@return The pointer to per-thread data
1956
1957Get thread-specific location of data item
1958*/
1959void *__kmpc_task_reduction_get_th_data(int gtid, void *tskgrp, void *data) {
1960 kmp_info_t *thread = __kmp_threads[gtid];
1961 kmp_int32 nth = thread->th.th_team_nproc;
1962 if (nth == 1)
1963 return data; // nothing to do
1964
1965 kmp_taskgroup_t *tg = (kmp_taskgroup_t *)tskgrp;
1966 if (tg == NULL__null)
1967 tg = thread->th.th_current_task->td_taskgroup;
1968 KMP_ASSERT(tg != NULL)if (!(tg != __null)) { __kmp_debug_assert("tg != NULL", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1968); }
;
1969 kmp_task_red_data_t *arr = (kmp_task_red_data_t *)(tg->reduce_data);
1970 kmp_int32 num = tg->reduce_num_data;
1971 kmp_int32 tid = thread->th.th_info.ds.ds_tid;
1972
1973 KMP_ASSERT(data != NULL)if (!(data != __null)) { __kmp_debug_assert("data != NULL", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1973); }
;
1974 while (tg != NULL__null) {
1975 for (int i = 0; i < num; ++i) {
1976 if (!arr[i].flags.lazy_priv) {
1977 if (data == arr[i].reduce_shar ||
1978 (data >= arr[i].reduce_priv && data < arr[i].reduce_pend))
1979 return (char *)(arr[i].reduce_priv) + tid * arr[i].reduce_size;
1980 } else {
1981 // check shared location first
1982 void **p_priv = (void **)(arr[i].reduce_priv);
1983 if (data == arr[i].reduce_shar)
1984 goto found;
1985 // check if we get some thread specific location as parameter
1986 for (int j = 0; j < nth; ++j)
1987 if (data == p_priv[j])
1988 goto found;
1989 continue; // not found, continue search
1990 found:
1991 if (p_priv[tid] == NULL__null) {
1992 // allocate thread specific object lazily
1993 void (*f_init)(void *) = (void (*)(void *))(arr[i].reduce_init);
1994 p_priv[tid] = __kmp_allocate(arr[i].reduce_size)___kmp_allocate((arr[i].reduce_size), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 1994)
;
1995 if (f_init != NULL__null) {
1996 f_init(p_priv[tid]);
1997 }
1998 }
1999 return p_priv[tid];
2000 }
2001 }
2002 tg = tg->parent;
2003 arr = (kmp_task_red_data_t *)(tg->reduce_data);
2004 num = tg->reduce_num_data;
2005 }
2006 KMP_ASSERT2(0, "Unknown task reduction item")if (!(0)) { __kmp_debug_assert(("Unknown task reduction item"
), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2006); }
;
2007 return NULL__null; // ERROR, this line never executed
2008}
2009
2010// Finalize task reduction.
2011// Called from __kmpc_end_taskgroup()
2012static void __kmp_task_reduction_fini(kmp_info_t *th, kmp_taskgroup_t *tg) {
2013 kmp_int32 nth = th->th.th_team_nproc;
2014 KMP_DEBUG_ASSERT(nth > 1)if (!(nth > 1)) { __kmp_debug_assert("nth > 1", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2014); }
; // should not be called if nth == 1
2015 kmp_task_red_data_t *arr = (kmp_task_red_data_t *)tg->reduce_data;
2016 kmp_int32 num = tg->reduce_num_data;
2017 for (int i = 0; i < num; ++i) {
2018 void *sh_data = arr[i].reduce_shar;
2019 void (*f_fini)(void *) = (void (*)(void *))(arr[i].reduce_fini);
2020 void (*f_comb)(void *, void *) =
2021 (void (*)(void *, void *))(arr[i].reduce_comb);
2022 if (!arr[i].flags.lazy_priv) {
2023 void *pr_data = arr[i].reduce_priv;
2024 size_t size = arr[i].reduce_size;
2025 for (int j = 0; j < nth; ++j) {
2026 void *priv_data = (char *)pr_data + j * size;
2027 f_comb(sh_data, priv_data); // combine results
2028 if (f_fini)
2029 f_fini(priv_data); // finalize if needed
2030 }
2031 } else {
2032 void **pr_data = (void **)(arr[i].reduce_priv);
2033 for (int j = 0; j < nth; ++j) {
2034 if (pr_data[j] != NULL__null) {
2035 f_comb(sh_data, pr_data[j]); // combine results
2036 if (f_fini)
2037 f_fini(pr_data[j]); // finalize if needed
2038 __kmp_free(pr_data[j])___kmp_free((pr_data[j]), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2038)
;
2039 }
2040 }
2041 }
2042 __kmp_free(arr[i].reduce_priv)___kmp_free((arr[i].reduce_priv), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2042)
;
2043 }
2044 __kmp_thread_free(th, arr)___kmp_thread_free((th), (arr), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2044)
;
2045 tg->reduce_data = NULL__null;
2046 tg->reduce_num_data = 0;
2047}
2048#endif
2049
2050#if OMP_40_ENABLED(50 >= 40)
2051// __kmpc_taskgroup: Start a new taskgroup
2052void __kmpc_taskgroup(ident_t *loc, int gtid) {
2053 kmp_info_t *thread = __kmp_threads[gtid];
2054 kmp_taskdata_t *taskdata = thread->th.th_current_task;
2055 kmp_taskgroup_t *tg_new =
2056 (kmp_taskgroup_t *)__kmp_thread_malloc(thread, sizeof(kmp_taskgroup_t))___kmp_thread_malloc((thread), (sizeof(kmp_taskgroup_t)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2056)
;
2057 KA_TRACE(10, ("__kmpc_taskgroup: T#%d loc=%p group=%p\n", gtid, loc, tg_new))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_taskgroup: T#%d loc=%p group=%p\n"
, gtid, loc, tg_new); }
;
2058 KMP_ATOMIC_ST_RLX(&tg_new->count, 0)(&tg_new->count)->store(0, std::memory_order_relaxed
)
;
2059 KMP_ATOMIC_ST_RLX(&tg_new->cancel_request, cancel_noreq)(&tg_new->cancel_request)->store(cancel_noreq, std::
memory_order_relaxed)
;
2060 tg_new->parent = taskdata->td_taskgroup;
2061#if OMP_50_ENABLED(50 >= 50)
2062 tg_new->reduce_data = NULL__null;
2063 tg_new->reduce_num_data = 0;
2064#endif
2065 taskdata->td_taskgroup = tg_new;
2066
2067#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
2068 if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)__builtin_expect(!!(ompt_enabled.ompt_callback_sync_region), 0
)
) {
2069 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid)__ompt_load_return_address(gtid);
2070 if (!codeptr)
2071 codeptr = OMPT_GET_RETURN_ADDRESS(0)__builtin_return_address(0);
2072 kmp_team_t *team = thread->th.th_team;
2073 ompt_data_t my_task_data = taskdata->ompt_task_info.task_data;
2074 // FIXME: I think this is wrong for lwt!
2075 ompt_data_t my_parallel_data = team->t.ompt_team_info.parallel_data;
2076
2077 ompt_callbacks.ompt_callback(ompt_callback_sync_region)ompt_callback_sync_region_callback(
2078 ompt_sync_region_taskgroup, ompt_scope_begin, &(my_parallel_data),
2079 &(my_task_data), codeptr);
2080 }
2081#endif
2082}
2083
2084// __kmpc_end_taskgroup: Wait until all tasks generated by the current task
2085// and its descendants are complete
2086void __kmpc_end_taskgroup(ident_t *loc, int gtid) {
2087 kmp_info_t *thread = __kmp_threads[gtid];
2088 kmp_taskdata_t *taskdata = thread->th.th_current_task;
2089 kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
2090 int thread_finished = FALSE0;
2091
2092#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
2093 kmp_team_t *team;
2094 ompt_data_t my_task_data;
2095 ompt_data_t my_parallel_data;
2096 void *codeptr;
2097 if (UNLIKELY(ompt_enabled.enabled)__builtin_expect(!!(ompt_enabled.enabled), 0)) {
2098 team = thread->th.th_team;
2099 my_task_data = taskdata->ompt_task_info.task_data;
2100 // FIXME: I think this is wrong for lwt!
2101 my_parallel_data = team->t.ompt_team_info.parallel_data;
2102 codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid)__ompt_load_return_address(gtid);
2103 if (!codeptr)
2104 codeptr = OMPT_GET_RETURN_ADDRESS(0)__builtin_return_address(0);
2105 }
2106#endif
2107
2108 KA_TRACE(10, ("__kmpc_end_taskgroup(enter): T#%d loc=%p\n", gtid, loc))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_end_taskgroup(enter): T#%d loc=%p\n"
, gtid, loc); }
;
2109 KMP_DEBUG_ASSERT(taskgroup != NULL)if (!(taskgroup != __null)) { __kmp_debug_assert("taskgroup != __null"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2109); }
;
2110 KMP_SET_THREAD_STATE_BLOCK(TASKGROUP)((void)0);
2111
2112 if (__kmp_tasking_mode != tskm_immediate_exec) {
2113 // mark task as waiting not on a barrier
2114 taskdata->td_taskwait_counter += 1;
2115 taskdata->td_taskwait_ident = loc;
2116 taskdata->td_taskwait_thread = gtid + 1;
2117#if USE_ITT_BUILD1
2118 // For ITT the taskgroup wait is similar to taskwait until we need to
2119 // distinguish them
2120 void *itt_sync_obj = __kmp_itt_taskwait_object(gtid);
2121 if (itt_sync_obj != NULL__null)
2122 __kmp_itt_taskwait_starting(gtid, itt_sync_obj);
2123#endif /* USE_ITT_BUILD */
2124
2125#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
2126 if (UNLIKELY(ompt_enabled.ompt_callback_sync_region_wait)__builtin_expect(!!(ompt_enabled.ompt_callback_sync_region_wait
), 0)
) {
2127 ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)ompt_callback_sync_region_wait_callback(
2128 ompt_sync_region_taskgroup, ompt_scope_begin, &(my_parallel_data),
2129 &(my_task_data), codeptr);
2130 }
2131#endif
2132
2133#if OMP_45_ENABLED(50 >= 45)
2134 if (!taskdata->td_flags.team_serial ||
2135 (thread->th.th_task_team != NULL__null &&
2136 thread->th.th_task_team->tt.tt_found_proxy_tasks))
2137#else
2138 if (!taskdata->td_flags.team_serial)
2139#endif
2140 {
2141 kmp_flag_32 flag(RCAST(std::atomic<kmp_uint32> *, &(taskgroup->count))reinterpret_cast<std::atomic<kmp_uint32> *>(&
(taskgroup->count))
,
2142 0U);
2143 while (KMP_ATOMIC_LD_ACQ(&taskgroup->count)(&taskgroup->count)->load(std::memory_order_acquire
)
!= 0) {
2144 flag.execute_tasks(thread, gtid, FALSE0,
2145 &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), itt_sync_obj,
2146 __kmp_task_stealing_constraint);
2147 }
2148 }
2149 taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread; // end waiting
2150
2151#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
2152 if (UNLIKELY(ompt_enabled.ompt_callback_sync_region_wait)__builtin_expect(!!(ompt_enabled.ompt_callback_sync_region_wait
), 0)
) {
2153 ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)ompt_callback_sync_region_wait_callback(
2154 ompt_sync_region_taskgroup, ompt_scope_end, &(my_parallel_data),
2155 &(my_task_data), codeptr);
2156 }
2157#endif
2158
2159#if USE_ITT_BUILD1
2160 if (itt_sync_obj != NULL__null)
2161 __kmp_itt_taskwait_finished(gtid, itt_sync_obj);
2162#endif /* USE_ITT_BUILD */
2163 }
2164 KMP_DEBUG_ASSERT(taskgroup->count == 0)if (!(taskgroup->count == 0)) { __kmp_debug_assert("taskgroup->count == 0"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2164); }
;
2165
2166#if OMP_50_ENABLED(50 >= 50)
2167 if (taskgroup->reduce_data != NULL__null) // need to reduce?
2168 __kmp_task_reduction_fini(thread, taskgroup);
2169#endif
2170 // Restore parent taskgroup for the current task
2171 taskdata->td_taskgroup = taskgroup->parent;
2172 __kmp_thread_free(thread, taskgroup)___kmp_thread_free((thread), (taskgroup), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2172)
;
2173
2174 KA_TRACE(10, ("__kmpc_end_taskgroup(exit): T#%d task %p finished waiting\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_end_taskgroup(exit): T#%d task %p finished waiting\n"
, gtid, taskdata); }
2175 gtid, taskdata))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmpc_end_taskgroup(exit): T#%d task %p finished waiting\n"
, gtid, taskdata); }
;
2176 ANNOTATE_HAPPENS_AFTER(taskdata);
2177
2178#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
2179 if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)__builtin_expect(!!(ompt_enabled.ompt_callback_sync_region), 0
)
) {
2180 ompt_callbacks.ompt_callback(ompt_callback_sync_region)ompt_callback_sync_region_callback(
2181 ompt_sync_region_taskgroup, ompt_scope_end, &(my_parallel_data),
2182 &(my_task_data), codeptr);
2183 }
2184#endif
2185}
2186#endif
2187
2188// __kmp_remove_my_task: remove a task from my own deque
2189static kmp_task_t *__kmp_remove_my_task(kmp_info_t *thread, kmp_int32 gtid,
2190 kmp_task_team_t *task_team,
2191 kmp_int32 is_constrained) {
2192 kmp_task_t *task;
2193 kmp_taskdata_t *taskdata;
2194 kmp_thread_data_t *thread_data;
2195 kmp_uint32 tail;
2196
2197 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec)if (!(__kmp_tasking_mode != tskm_immediate_exec)) { __kmp_debug_assert
("__kmp_tasking_mode != tskm_immediate_exec", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2197); }
;
2198 KMP_DEBUG_ASSERT(task_team->tt.tt_threads_data !=if (!(task_team->tt.tt_threads_data != __null)) { __kmp_debug_assert
("task_team->tt.tt_threads_data != __null", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2199); }
2199 NULL)if (!(task_team->tt.tt_threads_data != __null)) { __kmp_debug_assert
("task_team->tt.tt_threads_data != __null", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2199); }
; // Caller should check this condition
2200
2201 thread_data = &task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
2202
2203 KA_TRACE(10, ("__kmp_remove_my_task(enter): T#%d ntasks=%d head=%u tail=%u\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_remove_my_task(enter): T#%d ntasks=%d head=%u tail=%u\n"
, gtid, thread_data->td.td_deque_ntasks, thread_data->td
.td_deque_head, thread_data->td.td_deque_tail); }
2204 gtid, thread_data->td.td_deque_ntasks,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_remove_my_task(enter): T#%d ntasks=%d head=%u tail=%u\n"
, gtid, thread_data->td.td_deque_ntasks, thread_data->td
.td_deque_head, thread_data->td.td_deque_tail); }
2205 thread_data->td.td_deque_head, thread_data->td.td_deque_tail))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_remove_my_task(enter): T#%d ntasks=%d head=%u tail=%u\n"
, gtid, thread_data->td.td_deque_ntasks, thread_data->td
.td_deque_head, thread_data->td.td_deque_tail); }
;
2206
2207 if (TCR_4(thread_data->td.td_deque_ntasks)(thread_data->td.td_deque_ntasks) == 0) {
2208 KA_TRACE(10,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_remove_my_task(exit #1): T#%d No tasks to remove: "
"ntasks=%d head=%u tail=%u\n", gtid, thread_data->td.td_deque_ntasks
, thread_data->td.td_deque_head, thread_data->td.td_deque_tail
); }
2209 ("__kmp_remove_my_task(exit #1): T#%d No tasks to remove: "if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_remove_my_task(exit #1): T#%d No tasks to remove: "
"ntasks=%d head=%u tail=%u\n", gtid, thread_data->td.td_deque_ntasks
, thread_data->td.td_deque_head, thread_data->td.td_deque_tail
); }
2210 "ntasks=%d head=%u tail=%u\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_remove_my_task(exit #1): T#%d No tasks to remove: "
"ntasks=%d head=%u tail=%u\n", gtid, thread_data->td.td_deque_ntasks
, thread_data->td.td_deque_head, thread_data->td.td_deque_tail
); }
2211 gtid, thread_data->td.td_deque_ntasks,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_remove_my_task(exit #1): T#%d No tasks to remove: "
"ntasks=%d head=%u tail=%u\n", gtid, thread_data->td.td_deque_ntasks
, thread_data->td.td_deque_head, thread_data->td.td_deque_tail
); }
2212 thread_data->td.td_deque_head, thread_data->td.td_deque_tail))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_remove_my_task(exit #1): T#%d No tasks to remove: "
"ntasks=%d head=%u tail=%u\n", gtid, thread_data->td.td_deque_ntasks
, thread_data->td.td_deque_head, thread_data->td.td_deque_tail
); }
;
2213 return NULL__null;
2214 }
2215
2216 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
2217
2218 if (TCR_4(thread_data->td.td_deque_ntasks)(thread_data->td.td_deque_ntasks) == 0) {
2219 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2220 KA_TRACE(10,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "
"ntasks=%d head=%u tail=%u\n", gtid, thread_data->td.td_deque_ntasks
, thread_data->td.td_deque_head, thread_data->td.td_deque_tail
); }
2221 ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "
"ntasks=%d head=%u tail=%u\n", gtid, thread_data->td.td_deque_ntasks
, thread_data->td.td_deque_head, thread_data->td.td_deque_tail
); }
2222 "ntasks=%d head=%u tail=%u\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "
"ntasks=%d head=%u tail=%u\n", gtid, thread_data->td.td_deque_ntasks
, thread_data->td.td_deque_head, thread_data->td.td_deque_tail
); }
2223 gtid, thread_data->td.td_deque_ntasks,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "
"ntasks=%d head=%u tail=%u\n", gtid, thread_data->td.td_deque_ntasks
, thread_data->td.td_deque_head, thread_data->td.td_deque_tail
); }
2224 thread_data->td.td_deque_head, thread_data->td.td_deque_tail))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "
"ntasks=%d head=%u tail=%u\n", gtid, thread_data->td.td_deque_ntasks
, thread_data->td.td_deque_head, thread_data->td.td_deque_tail
); }
;
2225 return NULL__null;
2226 }
2227
2228 tail = (thread_data->td.td_deque_tail - 1) &
2229 TASK_DEQUE_MASK(thread_data->td)((thread_data->td).td_deque_size - 1); // Wrap index.
2230 taskdata = thread_data->td.td_deque[tail];
2231
2232 if (is_constrained && (taskdata->td_flags.tiedness == TASK_TIED1)) {
2233 // we need to check if the candidate obeys task scheduling constraint (TSC)
2234 // only descendant of all deferred tied tasks can be scheduled, checking
2235 // the last one is enough, as it in turn is the descendant of all others
2236 kmp_taskdata_t *current = thread->th.th_current_task->td_last_tied;
2237 KMP_DEBUG_ASSERT(current != NULL)if (!(current != __null)) { __kmp_debug_assert("current != __null"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2237); }
;
2238 // check if last tied task is not suspended on barrier
2239 if (current->td_flags.tasktype == TASK_EXPLICIT1 ||
2240 current->td_taskwait_thread > 0) { // <= 0 on barrier
2241 kmp_int32 level = current->td_level;
2242 kmp_taskdata_t *parent = taskdata->td_parent;
2243 while (parent != current && parent->td_level > level) {
2244 parent = parent->td_parent; // check generation up to the level of the
2245 // current task
2246 KMP_DEBUG_ASSERT(parent != NULL)if (!(parent != __null)) { __kmp_debug_assert("parent != __null"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2246); }
;
2247 }
2248 if (parent != current) {
2249 // The TSC does not allow to steal victim task
2250 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2251 KA_TRACE(10, ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "
"ntasks=%d head=%u tail=%u\n", gtid, thread_data->td.td_deque_ntasks
, thread_data->td.td_deque_head, thread_data->td.td_deque_tail
); }
2252 "ntasks=%d head=%u tail=%u\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "
"ntasks=%d head=%u tail=%u\n", gtid, thread_data->td.td_deque_ntasks
, thread_data->td.td_deque_head, thread_data->td.td_deque_tail
); }
2253 gtid, thread_data->td.td_deque_ntasks,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "
"ntasks=%d head=%u tail=%u\n", gtid, thread_data->td.td_deque_ntasks
, thread_data->td.td_deque_head, thread_data->td.td_deque_tail
); }
2254 thread_data->td.td_deque_head,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "
"ntasks=%d head=%u tail=%u\n", gtid, thread_data->td.td_deque_ntasks
, thread_data->td.td_deque_head, thread_data->td.td_deque_tail
); }
2255 thread_data->td.td_deque_tail))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "
"ntasks=%d head=%u tail=%u\n", gtid, thread_data->td.td_deque_ntasks
, thread_data->td.td_deque_head, thread_data->td.td_deque_tail
); }
;
2256 return NULL__null;
2257 }
2258 }
2259 }
2260
2261 thread_data->td.td_deque_tail = tail;
2262 TCW_4(thread_data->td.td_deque_ntasks, thread_data->td.td_deque_ntasks - 1)(thread_data->td.td_deque_ntasks) = (thread_data->td.td_deque_ntasks
- 1)
;
2263
2264 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2265
2266 KA_TRACE(10, ("__kmp_remove_my_task(exit #2): T#%d task %p removed: "if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_remove_my_task(exit #2): T#%d task %p removed: "
"ntasks=%d head=%u tail=%u\n", gtid, taskdata, thread_data->
td.td_deque_ntasks, thread_data->td.td_deque_head, thread_data
->td.td_deque_tail); }
2267 "ntasks=%d head=%u tail=%u\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_remove_my_task(exit #2): T#%d task %p removed: "
"ntasks=%d head=%u tail=%u\n", gtid, taskdata, thread_data->
td.td_deque_ntasks, thread_data->td.td_deque_head, thread_data
->td.td_deque_tail); }
2268 gtid, taskdata, thread_data->td.td_deque_ntasks,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_remove_my_task(exit #2): T#%d task %p removed: "
"ntasks=%d head=%u tail=%u\n", gtid, taskdata, thread_data->
td.td_deque_ntasks, thread_data->td.td_deque_head, thread_data
->td.td_deque_tail); }
2269 thread_data->td.td_deque_head, thread_data->td.td_deque_tail))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_remove_my_task(exit #2): T#%d task %p removed: "
"ntasks=%d head=%u tail=%u\n", gtid, taskdata, thread_data->
td.td_deque_ntasks, thread_data->td.td_deque_head, thread_data
->td.td_deque_tail); }
;
2270
2271 task = KMP_TASKDATA_TO_TASK(taskdata)(kmp_task_t *)(taskdata + 1);
2272 return task;
2273}
2274
2275// __kmp_steal_task: remove a task from another thread's deque
2276// Assume that calling thread has already checked existence of
2277// task_team thread_data before calling this routine.
2278static kmp_task_t *__kmp_steal_task(kmp_info_t *victim_thr, kmp_int32 gtid,
2279 kmp_task_team_t *task_team,
2280 std::atomic<kmp_int32> *unfinished_threads,
2281 int *thread_finished,
2282 kmp_int32 is_constrained) {
2283 kmp_task_t *task;
2284 kmp_taskdata_t *taskdata;
2285 kmp_taskdata_t *current;
2286 kmp_thread_data_t *victim_td, *threads_data;
2287 kmp_int32 level, target;
2288 kmp_int32 victim_tid;
2289
2290 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec)if (!(__kmp_tasking_mode != tskm_immediate_exec)) { __kmp_debug_assert
("__kmp_tasking_mode != tskm_immediate_exec", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2290); }
;
2291
2292 threads_data = task_team->tt.tt_threads_data;
2293 KMP_DEBUG_ASSERT(threads_data != NULL)if (!(threads_data != __null)) { __kmp_debug_assert("threads_data != __null"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2293); }
; // Caller should check this condition
2294
2295 victim_tid = victim_thr->th.th_info.ds.ds_tid;
2296 victim_td = &threads_data[victim_tid];
2297
2298 KA_TRACE(10, ("__kmp_steal_task(enter): T#%d try to steal from T#%d: "if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(enter): T#%d try to steal from T#%d: "
"task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, victim_td->td.td_deque_ntasks, victim_td
->td.td_deque_head, victim_td->td.td_deque_tail); }
2299 "task_team=%p ntasks=%d head=%u tail=%u\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(enter): T#%d try to steal from T#%d: "
"task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, victim_td->td.td_deque_ntasks, victim_td
->td.td_deque_head, victim_td->td.td_deque_tail); }
2300 gtid, __kmp_gtid_from_thread(victim_thr), task_team,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(enter): T#%d try to steal from T#%d: "
"task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, victim_td->td.td_deque_ntasks, victim_td
->td.td_deque_head, victim_td->td.td_deque_tail); }
2301 victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(enter): T#%d try to steal from T#%d: "
"task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, victim_td->td.td_deque_ntasks, victim_td
->td.td_deque_head, victim_td->td.td_deque_tail); }
2302 victim_td->td.td_deque_tail))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(enter): T#%d try to steal from T#%d: "
"task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, victim_td->td.td_deque_ntasks, victim_td
->td.td_deque_head, victim_td->td.td_deque_tail); }
;
2303
2304 if (TCR_4(victim_td->td.td_deque_ntasks)(victim_td->td.td_deque_ntasks) == 0) {
2305 KA_TRACE(10, ("__kmp_steal_task(exit #1): T#%d could not steal from T#%d: "if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #1): T#%d could not steal from T#%d: "
"task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, victim_td->td.td_deque_ntasks, victim_td
->td.td_deque_head, victim_td->td.td_deque_tail); }
2306 "task_team=%p ntasks=%d head=%u tail=%u\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #1): T#%d could not steal from T#%d: "
"task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, victim_td->td.td_deque_ntasks, victim_td
->td.td_deque_head, victim_td->td.td_deque_tail); }
2307 gtid, __kmp_gtid_from_thread(victim_thr), task_team,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #1): T#%d could not steal from T#%d: "
"task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, victim_td->td.td_deque_ntasks, victim_td
->td.td_deque_head, victim_td->td.td_deque_tail); }
2308 victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #1): T#%d could not steal from T#%d: "
"task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, victim_td->td.td_deque_ntasks, victim_td
->td.td_deque_head, victim_td->td.td_deque_tail); }
2309 victim_td->td.td_deque_tail))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #1): T#%d could not steal from T#%d: "
"task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, victim_td->td.td_deque_ntasks, victim_td
->td.td_deque_head, victim_td->td.td_deque_tail); }
;
2310 return NULL__null;
2311 }
2312
2313 __kmp_acquire_bootstrap_lock(&victim_td->td.td_deque_lock);
2314
2315 int ntasks = TCR_4(victim_td->td.td_deque_ntasks)(victim_td->td.td_deque_ntasks);
2316 // Check again after we acquire the lock
2317 if (ntasks == 0) {
2318 __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2319 KA_TRACE(10, ("__kmp_steal_task(exit #2): T#%d could not steal from T#%d: "if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #2): T#%d could not steal from T#%d: "
"task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, ntasks, victim_td->td.td_deque_head
, victim_td->td.td_deque_tail); }
2320 "task_team=%p ntasks=%d head=%u tail=%u\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #2): T#%d could not steal from T#%d: "
"task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, ntasks, victim_td->td.td_deque_head
, victim_td->td.td_deque_tail); }
2321 gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #2): T#%d could not steal from T#%d: "
"task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, ntasks, victim_td->td.td_deque_head
, victim_td->td.td_deque_tail); }
2322 victim_td->td.td_deque_head, victim_td->td.td_deque_tail))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #2): T#%d could not steal from T#%d: "
"task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, ntasks, victim_td->td.td_deque_head
, victim_td->td.td_deque_tail); }
;
2323 return NULL__null;
2324 }
2325
2326 KMP_DEBUG_ASSERT(victim_td->td.td_deque != NULL)if (!(victim_td->td.td_deque != __null)) { __kmp_debug_assert
("victim_td->td.td_deque != __null", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2326); }
;
2327
2328 taskdata = victim_td->td.td_deque[victim_td->td.td_deque_head];
2329 if (is_constrained && (taskdata->td_flags.tiedness == TASK_TIED1)) {
2330 // we need to check if the candidate obeys task scheduling constraint (TSC)
2331 // only descendant of all deferred tied tasks can be scheduled, checking
2332 // the last one is enough, as it in turn is the descendant of all others
2333 current = __kmp_threads[gtid]->th.th_current_task->td_last_tied;
2334 KMP_DEBUG_ASSERT(current != NULL)if (!(current != __null)) { __kmp_debug_assert("current != __null"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2334); }
;
2335 // check if last tied task is not suspended on barrier
2336 if (current->td_flags.tasktype == TASK_EXPLICIT1 ||
2337 current->td_taskwait_thread > 0) { // <= 0 on barrier
2338 level = current->td_level;
2339 kmp_taskdata_t *parent = taskdata->td_parent;
2340 while (parent != current && parent->td_level > level) {
2341 parent = parent->td_parent; // check generation up to the level of the
2342 // current task
2343 KMP_DEBUG_ASSERT(parent != NULL)if (!(parent != __null)) { __kmp_debug_assert("parent != __null"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2343); }
;
2344 }
2345 if (parent != current) {
2346 if (!task_team->tt.tt_untied_task_encountered) {
2347 // The TSC does not allow to steal victim task
2348 __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2349 KA_TRACE(10,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #3): T#%d could not steal from "
"T#%d: task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, ntasks, victim_td->td.td_deque_head
, victim_td->td.td_deque_tail); }
2350 ("__kmp_steal_task(exit #3): T#%d could not steal from "if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #3): T#%d could not steal from "
"T#%d: task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, ntasks, victim_td->td.td_deque_head
, victim_td->td.td_deque_tail); }
2351 "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #3): T#%d could not steal from "
"T#%d: task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, ntasks, victim_td->td.td_deque_head
, victim_td->td.td_deque_tail); }
2352 gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #3): T#%d could not steal from "
"T#%d: task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, ntasks, victim_td->td.td_deque_head
, victim_td->td.td_deque_tail); }
2353 victim_td->td.td_deque_head, victim_td->td.td_deque_tail))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #3): T#%d could not steal from "
"T#%d: task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, ntasks, victim_td->td.td_deque_head
, victim_td->td.td_deque_tail); }
;
2354 return NULL__null;
2355 }
2356 taskdata = NULL__null; // will check other tasks in victim's deque
2357 }
2358 }
2359 }
2360 if (taskdata != NULL__null) {
2361 // Bump head pointer and Wrap.
2362 victim_td->td.td_deque_head =
2363 (victim_td->td.td_deque_head + 1) & TASK_DEQUE_MASK(victim_td->td)((victim_td->td).td_deque_size - 1);
2364 } else {
2365 int i;
2366 // walk through victim's deque trying to steal any task
2367 target = victim_td->td.td_deque_head;
2368 for (i = 1; i < ntasks; ++i) {
2369 target = (target + 1) & TASK_DEQUE_MASK(victim_td->td)((victim_td->td).td_deque_size - 1);
2370 taskdata = victim_td->td.td_deque[target];
2371 if (taskdata->td_flags.tiedness == TASK_TIED1) {
2372 // check if the candidate obeys the TSC
2373 kmp_taskdata_t *parent = taskdata->td_parent;
2374 // check generation up to the level of the current task
2375 while (parent != current && parent->td_level > level) {
2376 parent = parent->td_parent;
2377 KMP_DEBUG_ASSERT(parent != NULL)if (!(parent != __null)) { __kmp_debug_assert("parent != __null"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2377); }
;
2378 }
2379 if (parent != current) {
2380 // The TSC does not allow to steal the candidate
2381 taskdata = NULL__null;
2382 continue;
2383 } else {
2384 // found victim tied task
2385 break;
2386 }
2387 } else {
2388 // found victim untied task
2389 break;
2390 }
2391 }
2392 if (taskdata == NULL__null) {
2393 // No appropriate candidate to steal found
2394 __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2395 KA_TRACE(10, ("__kmp_steal_task(exit #4): T#%d could not steal from "if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #4): T#%d could not steal from "
"T#%d: task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, ntasks, victim_td->td.td_deque_head
, victim_td->td.td_deque_tail); }
2396 "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #4): T#%d could not steal from "
"T#%d: task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, ntasks, victim_td->td.td_deque_head
, victim_td->td.td_deque_tail); }
2397 gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #4): T#%d could not steal from "
"T#%d: task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, ntasks, victim_td->td.td_deque_head
, victim_td->td.td_deque_tail); }
2398 victim_td->td.td_deque_head, victim_td->td.td_deque_tail))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #4): T#%d could not steal from "
"T#%d: task_team=%p ntasks=%d head=%u tail=%u\n", gtid, __kmp_gtid_from_thread
(victim_thr), task_team, ntasks, victim_td->td.td_deque_head
, victim_td->td.td_deque_tail); }
;
2399 return NULL__null;
2400 }
2401 int prev = target;
2402 for (i = i + 1; i < ntasks; ++i) {
2403 // shift remaining tasks in the deque left by 1
2404 target = (target + 1) & TASK_DEQUE_MASK(victim_td->td)((victim_td->td).td_deque_size - 1);
2405 victim_td->td.td_deque[prev] = victim_td->td.td_deque[target];
2406 prev = target;
2407 }
2408 KMP_DEBUG_ASSERT(if (!(victim_td->td.td_deque_tail == (kmp_uint32)((target +
1) & ((victim_td->td).td_deque_size - 1)))) { __kmp_debug_assert
("victim_td->td.td_deque_tail == (kmp_uint32)((target + 1) & ((victim_td->td).td_deque_size - 1))"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2410); }
2409 victim_td->td.td_deque_tail ==if (!(victim_td->td.td_deque_tail == (kmp_uint32)((target +
1) & ((victim_td->td).td_deque_size - 1)))) { __kmp_debug_assert
("victim_td->td.td_deque_tail == (kmp_uint32)((target + 1) & ((victim_td->td).td_deque_size - 1))"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2410); }
2410 (kmp_uint32)((target + 1) & TASK_DEQUE_MASK(victim_td->td)))if (!(victim_td->td.td_deque_tail == (kmp_uint32)((target +
1) & ((victim_td->td).td_deque_size - 1)))) { __kmp_debug_assert
("victim_td->td.td_deque_tail == (kmp_uint32)((target + 1) & ((victim_td->td).td_deque_size - 1))"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2410); }
;
2411 victim_td->td.td_deque_tail = target; // tail -= 1 (wrapped))
2412 }
2413 if (*thread_finished) {
2414 // We need to un-mark this victim as a finished victim. This must be done
2415 // before releasing the lock, or else other threads (starting with the
2416 // master victim) might be prematurely released from the barrier!!!
2417 kmp_int32 count;
2418
2419 count = KMP_ATOMIC_INC(unfinished_threads)(unfinished_threads)->fetch_add(1, std::memory_order_acq_rel
)
;
2420
2421 KA_TRACE(if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_steal_task: T#%d inc unfinished_threads to %d: task_team=%p\n"
, gtid, count + 1, task_team); }
2422 20,if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_steal_task: T#%d inc unfinished_threads to %d: task_team=%p\n"
, gtid, count + 1, task_team); }
2423 ("__kmp_steal_task: T#%d inc unfinished_threads to %d: task_team=%p\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_steal_task: T#%d inc unfinished_threads to %d: task_team=%p\n"
, gtid, count + 1, task_team); }
2424 gtid, count + 1, task_team))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_steal_task: T#%d inc unfinished_threads to %d: task_team=%p\n"
, gtid, count + 1, task_team); }
;
2425
2426 *thread_finished = FALSE0;
2427 }
2428 TCW_4(victim_td->td.td_deque_ntasks, ntasks - 1)(victim_td->td.td_deque_ntasks) = (ntasks - 1);
2429
2430 __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2431
2432 KMP_COUNT_BLOCK(TASK_stolen)((void)0);
2433 KA_TRACE(10,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #5): T#%d stole task %p from T#%d: "
"task_team=%p ntasks=%d head=%u tail=%u\n", gtid, taskdata, __kmp_gtid_from_thread
(victim_thr), task_team, ntasks, victim_td->td.td_deque_head
, victim_td->td.td_deque_tail); }
2434 ("__kmp_steal_task(exit #5): T#%d stole task %p from T#%d: "if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #5): T#%d stole task %p from T#%d: "
"task_team=%p ntasks=%d head=%u tail=%u\n", gtid, taskdata, __kmp_gtid_from_thread
(victim_thr), task_team, ntasks, victim_td->td.td_deque_head
, victim_td->td.td_deque_tail); }
2435 "task_team=%p ntasks=%d head=%u tail=%u\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #5): T#%d stole task %p from T#%d: "
"task_team=%p ntasks=%d head=%u tail=%u\n", gtid, taskdata, __kmp_gtid_from_thread
(victim_thr), task_team, ntasks, victim_td->td.td_deque_head
, victim_td->td.td_deque_tail); }
2436 gtid, taskdata, __kmp_gtid_from_thread(victim_thr), task_team,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #5): T#%d stole task %p from T#%d: "
"task_team=%p ntasks=%d head=%u tail=%u\n", gtid, taskdata, __kmp_gtid_from_thread
(victim_thr), task_team, ntasks, victim_td->td.td_deque_head
, victim_td->td.td_deque_tail); }
2437 ntasks, victim_td->td.td_deque_head, victim_td->td.td_deque_tail))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_steal_task(exit #5): T#%d stole task %p from T#%d: "
"task_team=%p ntasks=%d head=%u tail=%u\n", gtid, taskdata, __kmp_gtid_from_thread
(victim_thr), task_team, ntasks, victim_td->td.td_deque_head
, victim_td->td.td_deque_tail); }
;
2438
2439 task = KMP_TASKDATA_TO_TASK(taskdata)(kmp_task_t *)(taskdata + 1);
2440 return task;
2441}
2442
2443// __kmp_execute_tasks_template: Choose and execute tasks until either the
2444// condition is statisfied (return true) or there are none left (return false).
2445//
2446// final_spin is TRUE if this is the spin at the release barrier.
2447// thread_finished indicates whether the thread is finished executing all
2448// the tasks it has on its deque, and is at the release barrier.
2449// spinner is the location on which to spin.
2450// spinner == NULL means only execute a single task and return.
2451// checker is the value to check to terminate the spin.
2452template <class C>
2453static inline int __kmp_execute_tasks_template(
2454 kmp_info_t *thread, kmp_int32 gtid, C *flag, int final_spin,
2455 int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj), void *itt_sync_obj,
2456 kmp_int32 is_constrained) {
2457 kmp_task_team_t *task_team = thread->th.th_task_team;
2458 kmp_thread_data_t *threads_data;
2459 kmp_task_t *task;
2460 kmp_info_t *other_thread;
2461 kmp_taskdata_t *current_task = thread->th.th_current_task;
2462 std::atomic<kmp_int32> *unfinished_threads;
2463 kmp_int32 nthreads, victim_tid = -2, use_own_tasks = 1, new_victim = 0,
2464 tid = thread->th.th_info.ds.ds_tid;
2465
2466 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec)if (!(__kmp_tasking_mode != tskm_immediate_exec)) { __kmp_debug_assert
("__kmp_tasking_mode != tskm_immediate_exec", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2466); }
;
2467 KMP_DEBUG_ASSERT(thread == __kmp_threads[gtid])if (!(thread == __kmp_threads[gtid])) { __kmp_debug_assert("thread == __kmp_threads[gtid]"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2467); }
;
2468
2469 if (task_team == NULL__null || current_task == NULL__null)
2470 return FALSE0;
2471
2472 KA_TRACE(15, ("__kmp_execute_tasks_template(enter): T#%d final_spin=%d "if (kmp_a_debug >= 15) { __kmp_debug_printf ("__kmp_execute_tasks_template(enter): T#%d final_spin=%d "
"*thread_finished=%d\n", gtid, final_spin, *thread_finished)
; }
2473 "*thread_finished=%d\n",if (kmp_a_debug >= 15) { __kmp_debug_printf ("__kmp_execute_tasks_template(enter): T#%d final_spin=%d "
"*thread_finished=%d\n", gtid, final_spin, *thread_finished)
; }
2474 gtid, final_spin, *thread_finished))if (kmp_a_debug >= 15) { __kmp_debug_printf ("__kmp_execute_tasks_template(enter): T#%d final_spin=%d "
"*thread_finished=%d\n", gtid, final_spin, *thread_finished)
; }
;
2475
2476 thread->th.th_reap_state = KMP_NOT_SAFE_TO_REAP0;
2477 threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data)((void *)(task_team->tt.tt_threads_data));
2478 KMP_DEBUG_ASSERT(threads_data != NULL)if (!(threads_data != __null)) { __kmp_debug_assert("threads_data != __null"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2478); }
;
2479
2480 nthreads = task_team->tt.tt_nproc;
2481 unfinished_threads = &(task_team->tt.tt_unfinished_threads);
2482#if OMP_45_ENABLED(50 >= 45)
2483 KMP_DEBUG_ASSERT(nthreads > 1 || task_team->tt.tt_found_proxy_tasks)if (!(nthreads > 1 || task_team->tt.tt_found_proxy_tasks
)) { __kmp_debug_assert("nthreads > 1 || task_team->tt.tt_found_proxy_tasks"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2483); }
;
2484#else
2485 KMP_DEBUG_ASSERT(nthreads > 1)if (!(nthreads > 1)) { __kmp_debug_assert("nthreads > 1"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2485); }
;
2486#endif
2487 KMP_DEBUG_ASSERT(*unfinished_threads >= 0)if (!(*unfinished_threads >= 0)) { __kmp_debug_assert("*unfinished_threads >= 0"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2487); }
;
2488
2489 while (1) { // Outer loop keeps trying to find tasks in case of single thread
2490 // getting tasks from target constructs
2491 while (1) { // Inner loop to find a task and execute it
2492 task = NULL__null;
2493 if (use_own_tasks) { // check on own queue first
2494 task = __kmp_remove_my_task(thread, gtid, task_team, is_constrained);
2495 }
2496 if ((task == NULL__null) && (nthreads > 1)) { // Steal a task
2497 int asleep = 1;
2498 use_own_tasks = 0;
2499 // Try to steal from the last place I stole from successfully.
2500 if (victim_tid == -2) { // haven't stolen anything yet
2501 victim_tid = threads_data[tid].td.td_deque_last_stolen;
2502 if (victim_tid !=
2503 -1) // if we have a last stolen from victim, get the thread
2504 other_thread = threads_data[victim_tid].td.td_thr;
2505 }
2506 if (victim_tid != -1) { // found last victim
2507 asleep = 0;
2508 } else if (!new_victim) { // no recent steals and we haven't already
2509 // used a new victim; select a random thread
2510 do { // Find a different thread to steal work from.
2511 // Pick a random thread. Initial plan was to cycle through all the
2512 // threads, and only return if we tried to steal from every thread,
2513 // and failed. Arch says that's not such a great idea.
2514 victim_tid = __kmp_get_random(thread) % (nthreads - 1);
2515 if (victim_tid >= tid) {
2516 ++victim_tid; // Adjusts random distribution to exclude self
2517 }
2518 // Found a potential victim
2519 other_thread = threads_data[victim_tid].td.td_thr;
2520 // There is a slight chance that __kmp_enable_tasking() did not wake
2521 // up all threads waiting at the barrier. If victim is sleeping,
2522 // then wake it up. Since we were going to pay the cache miss
2523 // penalty for referencing another thread's kmp_info_t struct
2524 // anyway,
2525 // the check shouldn't cost too much performance at this point. In
2526 // extra barrier mode, tasks do not sleep at the separate tasking
2527 // barrier, so this isn't a problem.
2528 asleep = 0;
2529 if ((__kmp_tasking_mode == tskm_task_teams) &&
2530 (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME(2147483647)) &&
2531 (TCR_PTR(CCAST(void *, other_thread->th.th_sleep_loc))((void *)(const_cast<void *>(other_thread->th.th_sleep_loc
)))
!=
2532 NULL__null)) {
2533 asleep = 1;
2534 __kmp_null_resume_wrapper(__kmp_gtid_from_thread(other_thread),
2535 other_thread->th.th_sleep_loc);
2536 // A sleeping thread should not have any tasks on it's queue.
2537 // There is a slight possibility that it resumes, steals a task
2538 // from another thread, which spawns more tasks, all in the time
2539 // that it takes this thread to check => don't write an assertion
2540 // that the victim's queue is empty. Try stealing from a
2541 // different thread.
2542 }
2543 } while (asleep);
2544 }
2545
2546 if (!asleep) {
2547 // We have a victim to try to steal from
2548 task = __kmp_steal_task(other_thread, gtid, task_team,
2549 unfinished_threads, thread_finished,
2550 is_constrained);
2551 }
2552 if (task != NULL__null) { // set last stolen to victim
2553 if (threads_data[tid].td.td_deque_last_stolen != victim_tid) {
2554 threads_data[tid].td.td_deque_last_stolen = victim_tid;
2555 // The pre-refactored code did not try more than 1 successful new
2556 // vicitm, unless the last one generated more local tasks;
2557 // new_victim keeps track of this
2558 new_victim = 1;
2559 }
2560 } else { // No tasks found; unset last_stolen
2561 KMP_CHECK_UPDATE(threads_data[tid].td.td_deque_last_stolen, -1)if ((threads_data[tid].td.td_deque_last_stolen) != (-1)) (threads_data
[tid].td.td_deque_last_stolen) = (-1)
;
2562 victim_tid = -2; // no successful victim found
2563 }
2564 }
2565
2566 if (task == NULL__null) // break out of tasking loop
2567 break;
2568
2569// Found a task; execute it
2570#if USE_ITT_BUILD1 && USE_ITT_NOTIFY1
2571 if (__itt_sync_create_ptr__kmp_itt_sync_create_ptr__3_0 || KMP_ITT_DEBUG0) {
2572 if (itt_sync_obj == NULL__null) { // we are at fork barrier where we could not
2573 // get the object reliably
2574 itt_sync_obj = __kmp_itt_barrier_object(gtid, bs_forkjoin_barrier);
2575 }
2576 __kmp_itt_task_starting(itt_sync_obj);
2577 }
2578#endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */
2579 __kmp_invoke_task(gtid, task, current_task);
2580#if USE_ITT_BUILD1
2581 if (itt_sync_obj != NULL__null)
2582 __kmp_itt_task_finished(itt_sync_obj);
2583#endif /* USE_ITT_BUILD */
2584 // If this thread is only partway through the barrier and the condition is
2585 // met, then return now, so that the barrier gather/release pattern can
2586 // proceed. If this thread is in the last spin loop in the barrier,
2587 // waiting to be released, we know that the termination condition will not
2588 // be satisified, so don't waste any cycles checking it.
2589 if (flag == NULL__null || (!final_spin && flag->done_check())) {
2590 KA_TRACE(if (kmp_a_debug >= 15) { __kmp_debug_printf ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n"
, gtid); }
2591 15,if (kmp_a_debug >= 15) { __kmp_debug_printf ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n"
, gtid); }
2592 ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",if (kmp_a_debug >= 15) { __kmp_debug_printf ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n"
, gtid); }
2593 gtid))if (kmp_a_debug >= 15) { __kmp_debug_printf ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n"
, gtid); }
;
2594 return TRUE(!0);
2595 }
2596 if (thread->th.th_task_team == NULL__null) {
2597 break;
2598 }
2599 // Yield before executing next task
2600 KMP_YIELD(__kmp_library == library_throughput){ __kmp_x86_pause(); __kmp_yield((__kmp_library == library_throughput
)); }
;
2601 // If execution of a stolen task results in more tasks being placed on our
2602 // run queue, reset use_own_tasks
2603 if (!use_own_tasks && TCR_4(threads_data[tid].td.td_deque_ntasks)(threads_data[tid].td.td_deque_ntasks) != 0) {
2604 KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d stolen task spawned "if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_execute_tasks_template: T#%d stolen task spawned "
"other tasks, restart\n", gtid); }
2605 "other tasks, restart\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_execute_tasks_template: T#%d stolen task spawned "
"other tasks, restart\n", gtid); }
2606 gtid))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_execute_tasks_template: T#%d stolen task spawned "
"other tasks, restart\n", gtid); }
;
2607 use_own_tasks = 1;
2608 new_victim = 0;
2609 }
2610 }
2611
2612// The task source has been exhausted. If in final spin loop of barrier, check
2613// if termination condition is satisfied.
2614#if OMP_45_ENABLED(50 >= 45)
2615 // The work queue may be empty but there might be proxy tasks still
2616 // executing
2617 if (final_spin &&
2618 KMP_ATOMIC_LD_ACQ(&current_task->td_incomplete_child_tasks)(&current_task->td_incomplete_child_tasks)->load(std
::memory_order_acquire)
== 0)
2619#else
2620 if (final_spin)
2621#endif
2622 {
2623 // First, decrement the #unfinished threads, if that has not already been
2624 // done. This decrement might be to the spin location, and result in the
2625 // termination condition being satisfied.
2626 if (!*thread_finished) {
2627 kmp_int32 count;
2628
2629 count = KMP_ATOMIC_DEC(unfinished_threads)(unfinished_threads)->fetch_sub(1, std::memory_order_acq_rel
)
- 1;
2630 KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d dec "if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_execute_tasks_template: T#%d dec "
"unfinished_threads to %d task_team=%p\n", gtid, count, task_team
); }
2631 "unfinished_threads to %d task_team=%p\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_execute_tasks_template: T#%d dec "
"unfinished_threads to %d task_team=%p\n", gtid, count, task_team
); }
2632 gtid, count, task_team))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_execute_tasks_template: T#%d dec "
"unfinished_threads to %d task_team=%p\n", gtid, count, task_team
); }
;
2633 *thread_finished = TRUE(!0);
2634 }
2635
2636 // It is now unsafe to reference thread->th.th_team !!!
2637 // Decrementing task_team->tt.tt_unfinished_threads can allow the master
2638 // thread to pass through the barrier, where it might reset each thread's
2639 // th.th_team field for the next parallel region. If we can steal more
2640 // work, we know that this has not happened yet.
2641 if (flag != NULL__null && flag->done_check()) {
2642 KA_TRACE(if (kmp_a_debug >= 15) { __kmp_debug_printf ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n"
, gtid); }
2643 15,if (kmp_a_debug >= 15) { __kmp_debug_printf ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n"
, gtid); }
2644 ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",if (kmp_a_debug >= 15) { __kmp_debug_printf ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n"
, gtid); }
2645 gtid))if (kmp_a_debug >= 15) { __kmp_debug_printf ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n"
, gtid); }
;
2646 return TRUE(!0);
2647 }
2648 }
2649
2650 // If this thread's task team is NULL, master has recognized that there are
2651 // no more tasks; bail out
2652 if (thread->th.th_task_team == NULL__null) {
2653 KA_TRACE(15,if (kmp_a_debug >= 15) { __kmp_debug_printf ("__kmp_execute_tasks_template: T#%d no more tasks\n"
, gtid); }
2654 ("__kmp_execute_tasks_template: T#%d no more tasks\n", gtid))if (kmp_a_debug >= 15) { __kmp_debug_printf ("__kmp_execute_tasks_template: T#%d no more tasks\n"
, gtid); }
;
2655 return FALSE0;
2656 }
2657
2658#if OMP_45_ENABLED(50 >= 45)
2659 // We could be getting tasks from target constructs; if this is the only
2660 // thread, keep trying to execute tasks from own queue
2661 if (nthreads == 1)
2662 use_own_tasks = 1;
2663 else
2664#endif
2665 {
2666 KA_TRACE(15,if (kmp_a_debug >= 15) { __kmp_debug_printf ("__kmp_execute_tasks_template: T#%d can't find work\n"
, gtid); }
2667 ("__kmp_execute_tasks_template: T#%d can't find work\n", gtid))if (kmp_a_debug >= 15) { __kmp_debug_printf ("__kmp_execute_tasks_template: T#%d can't find work\n"
, gtid); }
;
2668 return FALSE0;
2669 }
2670 }
2671}
2672
2673int __kmp_execute_tasks_32(
2674 kmp_info_t *thread, kmp_int32 gtid, kmp_flag_32 *flag, int final_spin,
2675 int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj), void *itt_sync_obj,
2676 kmp_int32 is_constrained) {
2677 return __kmp_execute_tasks_template(
2678 thread, gtid, flag, final_spin,
2679 thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), itt_sync_obj, is_constrained);
2680}
2681
2682int __kmp_execute_tasks_64(
2683 kmp_info_t *thread, kmp_int32 gtid, kmp_flag_64 *flag, int final_spin,
2684 int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj), void *itt_sync_obj,
2685 kmp_int32 is_constrained) {
2686 return __kmp_execute_tasks_template(
2687 thread, gtid, flag, final_spin,
2688 thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), itt_sync_obj, is_constrained);
2689}
2690
2691int __kmp_execute_tasks_oncore(
2692 kmp_info_t *thread, kmp_int32 gtid, kmp_flag_oncore *flag, int final_spin,
2693 int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj), void *itt_sync_obj,
2694 kmp_int32 is_constrained) {
2695 return __kmp_execute_tasks_template(
2696 thread, gtid, flag, final_spin,
2697 thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), itt_sync_obj, is_constrained);
2698}
2699
2700// __kmp_enable_tasking: Allocate task team and resume threads sleeping at the
2701// next barrier so they can assist in executing enqueued tasks.
2702// First thread in allocates the task team atomically.
2703static void __kmp_enable_tasking(kmp_task_team_t *task_team,
2704 kmp_info_t *this_thr) {
2705 kmp_thread_data_t *threads_data;
2706 int nthreads, i, is_init_thread;
2707
2708 KA_TRACE(10, ("__kmp_enable_tasking(enter): T#%d\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_enable_tasking(enter): T#%d\n"
, __kmp_gtid_from_thread(this_thr)); }
2709 __kmp_gtid_from_thread(this_thr)))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_enable_tasking(enter): T#%d\n"
, __kmp_gtid_from_thread(this_thr)); }
;
2710
2711 KMP_DEBUG_ASSERT(task_team != NULL)if (!(task_team != __null)) { __kmp_debug_assert("task_team != __null"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2711); }
;
2712 KMP_DEBUG_ASSERT(this_thr->th.th_team != NULL)if (!(this_thr->th.th_team != __null)) { __kmp_debug_assert
("this_thr->th.th_team != __null", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2712); }
;
2713
2714 nthreads = task_team->tt.tt_nproc;
2715 KMP_DEBUG_ASSERT(nthreads > 0)if (!(nthreads > 0)) { __kmp_debug_assert("nthreads > 0"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2715); }
;
2716 KMP_DEBUG_ASSERT(nthreads == this_thr->th.th_team->t.t_nproc)if (!(nthreads == this_thr->th.th_team->t.t_nproc)) { __kmp_debug_assert
("nthreads == this_thr->th.th_team->t.t_nproc", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2716); }
;
2717
2718 // Allocate or increase the size of threads_data if necessary
2719 is_init_thread = __kmp_realloc_task_threads_data(this_thr, task_team);
2720
2721 if (!is_init_thread) {
2722 // Some other thread already set up the array.
2723 KA_TRACE(if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_enable_tasking(exit): T#%d: threads array already set up.\n"
, __kmp_gtid_from_thread(this_thr)); }
2724 20,if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_enable_tasking(exit): T#%d: threads array already set up.\n"
, __kmp_gtid_from_thread(this_thr)); }
2725 ("__kmp_enable_tasking(exit): T#%d: threads array already set up.\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_enable_tasking(exit): T#%d: threads array already set up.\n"
, __kmp_gtid_from_thread(this_thr)); }
2726 __kmp_gtid_from_thread(this_thr)))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_enable_tasking(exit): T#%d: threads array already set up.\n"
, __kmp_gtid_from_thread(this_thr)); }
;
2727 return;
2728 }
2729 threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data)((void *)(task_team->tt.tt_threads_data));
2730 KMP_DEBUG_ASSERT(threads_data != NULL)if (!(threads_data != __null)) { __kmp_debug_assert("threads_data != __null"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2730); }
;
2731
2732 if ((__kmp_tasking_mode == tskm_task_teams) &&
2733 (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME(2147483647))) {
2734 // Release any threads sleeping at the barrier, so that they can steal
2735 // tasks and execute them. In extra barrier mode, tasks do not sleep
2736 // at the separate tasking barrier, so this isn't a problem.
2737 for (i = 0; i < nthreads; i++) {
2738 volatile void *sleep_loc;
2739 kmp_info_t *thread = threads_data[i].td.td_thr;
2740
2741 if (i == this_thr->th.th_info.ds.ds_tid) {
2742 continue;
2743 }
2744 // Since we haven't locked the thread's suspend mutex lock at this
2745 // point, there is a small window where a thread might be putting
2746 // itself to sleep, but hasn't set the th_sleep_loc field yet.
2747 // To work around this, __kmp_execute_tasks_template() periodically checks
2748 // see if other threads are sleeping (using the same random mechanism that
2749 // is used for task stealing) and awakens them if they are.
2750 if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))((void *)(const_cast<void *>(thread->th.th_sleep_loc
)))
) !=
2751 NULL__null) {
2752 KF_TRACE(50, ("__kmp_enable_tasking: T#%d waking up thread T#%d\n",if (kmp_f_debug >= 50) { __kmp_debug_printf ("__kmp_enable_tasking: T#%d waking up thread T#%d\n"
, __kmp_gtid_from_thread(this_thr), __kmp_gtid_from_thread(thread
)); }
2753 __kmp_gtid_from_thread(this_thr),if (kmp_f_debug >= 50) { __kmp_debug_printf ("__kmp_enable_tasking: T#%d waking up thread T#%d\n"
, __kmp_gtid_from_thread(this_thr), __kmp_gtid_from_thread(thread
)); }
2754 __kmp_gtid_from_thread(thread)))if (kmp_f_debug >= 50) { __kmp_debug_printf ("__kmp_enable_tasking: T#%d waking up thread T#%d\n"
, __kmp_gtid_from_thread(this_thr), __kmp_gtid_from_thread(thread
)); }
;
2755 __kmp_null_resume_wrapper(__kmp_gtid_from_thread(thread), sleep_loc);
2756 } else {
2757 KF_TRACE(50, ("__kmp_enable_tasking: T#%d don't wake up thread T#%d\n",if (kmp_f_debug >= 50) { __kmp_debug_printf ("__kmp_enable_tasking: T#%d don't wake up thread T#%d\n"
, __kmp_gtid_from_thread(this_thr), __kmp_gtid_from_thread(thread
)); }
2758 __kmp_gtid_from_thread(this_thr),if (kmp_f_debug >= 50) { __kmp_debug_printf ("__kmp_enable_tasking: T#%d don't wake up thread T#%d\n"
, __kmp_gtid_from_thread(this_thr), __kmp_gtid_from_thread(thread
)); }
2759 __kmp_gtid_from_thread(thread)))if (kmp_f_debug >= 50) { __kmp_debug_printf ("__kmp_enable_tasking: T#%d don't wake up thread T#%d\n"
, __kmp_gtid_from_thread(this_thr), __kmp_gtid_from_thread(thread
)); }
;
2760 }
2761 }
2762 }
2763
2764 KA_TRACE(10, ("__kmp_enable_tasking(exit): T#%d\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_enable_tasking(exit): T#%d\n"
, __kmp_gtid_from_thread(this_thr)); }
2765 __kmp_gtid_from_thread(this_thr)))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_enable_tasking(exit): T#%d\n"
, __kmp_gtid_from_thread(this_thr)); }
;
2766}
2767
2768/* // TODO: Check the comment consistency
2769 * Utility routines for "task teams". A task team (kmp_task_t) is kind of
2770 * like a shadow of the kmp_team_t data struct, with a different lifetime.
2771 * After a child * thread checks into a barrier and calls __kmp_release() from
2772 * the particular variant of __kmp_<barrier_kind>_barrier_gather(), it can no
2773 * longer assume that the kmp_team_t structure is intact (at any moment, the
2774 * master thread may exit the barrier code and free the team data structure,
2775 * and return the threads to the thread pool).
2776 *
2777 * This does not work with the the tasking code, as the thread is still
2778 * expected to participate in the execution of any tasks that may have been
2779 * spawned my a member of the team, and the thread still needs access to all
2780 * to each thread in the team, so that it can steal work from it.
2781 *
2782 * Enter the existence of the kmp_task_team_t struct. It employs a reference
2783 * counting mechanims, and is allocated by the master thread before calling
2784 * __kmp_<barrier_kind>_release, and then is release by the last thread to
2785 * exit __kmp_<barrier_kind>_release at the next barrier. I.e. the lifetimes
2786 * of the kmp_task_team_t structs for consecutive barriers can overlap
2787 * (and will, unless the master thread is the last thread to exit the barrier
2788 * release phase, which is not typical).
2789 *
2790 * The existence of such a struct is useful outside the context of tasking,
2791 * but for now, I'm trying to keep it specific to the OMP_30_ENABLED macro,
2792 * so that any performance differences show up when comparing the 2.5 vs. 3.0
2793 * libraries.
2794 *
2795 * We currently use the existence of the threads array as an indicator that
2796 * tasks were spawned since the last barrier. If the structure is to be
2797 * useful outside the context of tasking, then this will have to change, but
2798 * not settting the field minimizes the performance impact of tasking on
2799 * barriers, when no explicit tasks were spawned (pushed, actually).
2800 */
2801
2802static kmp_task_team_t *__kmp_free_task_teams =
2803 NULL__null; // Free list for task_team data structures
2804// Lock for task team data structures
2805kmp_bootstrap_lock_t __kmp_task_team_lock =
2806 KMP_BOOTSTRAP_LOCK_INITIALIZER(__kmp_task_team_lock){ { { true } , &((__kmp_task_team_lock)), __null, { 0U },
{ 0U }, { 0 }, { -1 } } }
;
2807
2808// __kmp_alloc_task_deque:
2809// Allocates a task deque for a particular thread, and initialize the necessary
2810// data structures relating to the deque. This only happens once per thread
2811// per task team since task teams are recycled. No lock is needed during
2812// allocation since each thread allocates its own deque.
2813static void __kmp_alloc_task_deque(kmp_info_t *thread,
2814 kmp_thread_data_t *thread_data) {
2815 __kmp_init_bootstrap_lock(&thread_data->td.td_deque_lock);
2816 KMP_DEBUG_ASSERT(thread_data->td.td_deque == NULL)if (!(thread_data->td.td_deque == __null)) { __kmp_debug_assert
("thread_data->td.td_deque == __null", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2816); }
;
2817
2818 // Initialize last stolen task field to "none"
2819 thread_data->td.td_deque_last_stolen = -1;
2820
2821 KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) == 0)if (!((thread_data->td.td_deque_ntasks) == 0)) { __kmp_debug_assert
("(thread_data->td.td_deque_ntasks) == 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2821); }
;
2822 KMP_DEBUG_ASSERT(thread_data->td.td_deque_head == 0)if (!(thread_data->td.td_deque_head == 0)) { __kmp_debug_assert
("thread_data->td.td_deque_head == 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2822); }
;
2823 KMP_DEBUG_ASSERT(thread_data->td.td_deque_tail == 0)if (!(thread_data->td.td_deque_tail == 0)) { __kmp_debug_assert
("thread_data->td.td_deque_tail == 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2823); }
;
2824
2825 KE_TRACE(if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmp_alloc_task_deque: T#%d allocating deque[%d] for thread_data %p\n"
, __kmp_gtid_from_thread(thread), (1 << 8), thread_data
); }
2826 10,if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmp_alloc_task_deque: T#%d allocating deque[%d] for thread_data %p\n"
, __kmp_gtid_from_thread(thread), (1 << 8), thread_data
); }
2827 ("__kmp_alloc_task_deque: T#%d allocating deque[%d] for thread_data %p\n",if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmp_alloc_task_deque: T#%d allocating deque[%d] for thread_data %p\n"
, __kmp_gtid_from_thread(thread), (1 << 8), thread_data
); }
2828 __kmp_gtid_from_thread(thread), INITIAL_TASK_DEQUE_SIZE, thread_data))if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmp_alloc_task_deque: T#%d allocating deque[%d] for thread_data %p\n"
, __kmp_gtid_from_thread(thread), (1 << 8), thread_data
); }
;
2829 // Allocate space for task deque, and zero the deque
2830 // Cannot use __kmp_thread_calloc() because threads not around for
2831 // kmp_reap_task_team( ).
2832 thread_data->td.td_deque = (kmp_taskdata_t **)__kmp_allocate(___kmp_allocate(((1 << 8) * sizeof(kmp_taskdata_t *)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2833)
2833 INITIAL_TASK_DEQUE_SIZE * sizeof(kmp_taskdata_t *))___kmp_allocate(((1 << 8) * sizeof(kmp_taskdata_t *)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2833)
;
2834 thread_data->td.td_deque_size = INITIAL_TASK_DEQUE_SIZE(1 << 8);
2835}
2836
2837// __kmp_realloc_task_deque:
2838// Re-allocates a task deque for a particular thread, copies the content from
2839// the old deque and adjusts the necessary data structures relating to the
2840// deque. This operation must be done with a the deque_lock being held
2841static void __kmp_realloc_task_deque(kmp_info_t *thread,
2842 kmp_thread_data_t *thread_data) {
2843 kmp_int32 size = TASK_DEQUE_SIZE(thread_data->td)((thread_data->td).td_deque_size);
2844 kmp_int32 new_size = 2 * size;
2845
2846 KE_TRACE(10, ("__kmp_realloc_task_deque: T#%d reallocating deque[from %d to "if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmp_realloc_task_deque: T#%d reallocating deque[from %d to "
"%d] for thread_data %p\n", __kmp_gtid_from_thread(thread), size
, new_size, thread_data); }
2847 "%d] for thread_data %p\n",if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmp_realloc_task_deque: T#%d reallocating deque[from %d to "
"%d] for thread_data %p\n", __kmp_gtid_from_thread(thread), size
, new_size, thread_data); }
2848 __kmp_gtid_from_thread(thread), size, new_size, thread_data))if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmp_realloc_task_deque: T#%d reallocating deque[from %d to "
"%d] for thread_data %p\n", __kmp_gtid_from_thread(thread), size
, new_size, thread_data); }
;
2849
2850 kmp_taskdata_t **new_deque =
2851 (kmp_taskdata_t **)__kmp_allocate(new_size * sizeof(kmp_taskdata_t *))___kmp_allocate((new_size * sizeof(kmp_taskdata_t *)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2851)
;
2852
2853 int i, j;
2854 for (i = thread_data->td.td_deque_head, j = 0; j < size;
2855 i = (i + 1) & TASK_DEQUE_MASK(thread_data->td)((thread_data->td).td_deque_size - 1), j++)
2856 new_deque[j] = thread_data->td.td_deque[i];
2857
2858 __kmp_free(thread_data->td.td_deque)___kmp_free((thread_data->td.td_deque), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2858)
;
2859
2860 thread_data->td.td_deque_head = 0;
2861 thread_data->td.td_deque_tail = size;
2862 thread_data->td.td_deque = new_deque;
2863 thread_data->td.td_deque_size = new_size;
2864}
2865
2866// __kmp_free_task_deque:
2867// Deallocates a task deque for a particular thread. Happens at library
2868// deallocation so don't need to reset all thread data fields.
2869static void __kmp_free_task_deque(kmp_thread_data_t *thread_data) {
2870 if (thread_data->td.td_deque != NULL__null) {
2871 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
2872 TCW_4(thread_data->td.td_deque_ntasks, 0)(thread_data->td.td_deque_ntasks) = (0);
2873 __kmp_free(thread_data->td.td_deque)___kmp_free((thread_data->td.td_deque), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2873)
;
2874 thread_data->td.td_deque = NULL__null;
2875 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2876 }
2877
2878#ifdef BUILD_TIED_TASK_STACK
2879 // GEH: Figure out what to do here for td_susp_tied_tasks
2880 if (thread_data->td.td_susp_tied_tasks.ts_entries != TASK_STACK_EMPTY) {
2881 __kmp_free_task_stack(__kmp_thread_from_gtid(gtid), thread_data);
2882 }
2883#endif // BUILD_TIED_TASK_STACK
2884}
2885
2886// __kmp_realloc_task_threads_data:
2887// Allocates a threads_data array for a task team, either by allocating an
2888// initial array or enlarging an existing array. Only the first thread to get
2889// the lock allocs or enlarges the array and re-initializes the array eleemnts.
2890// That thread returns "TRUE", the rest return "FALSE".
2891// Assumes that the new array size is given by task_team -> tt.tt_nproc.
2892// The current size is given by task_team -> tt.tt_max_threads.
2893static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
2894 kmp_task_team_t *task_team) {
2895 kmp_thread_data_t **threads_data_p;
2896 kmp_int32 nthreads, maxthreads;
2897 int is_init_thread = FALSE0;
2898
2899 if (TCR_4(task_team->tt.tt_found_tasks)(task_team->tt.tt_found_tasks)) {
1
Taking false branch
2900 // Already reallocated and initialized.
2901 return FALSE0;
2902 }
2903
2904 threads_data_p = &task_team->tt.tt_threads_data;
2905 nthreads = task_team->tt.tt_nproc;
2906 maxthreads = task_team->tt.tt_max_threads;
2907
2908 // All threads must lock when they encounter the first task of the implicit
2909 // task region to make sure threads_data fields are (re)initialized before
2910 // used.
2911 __kmp_acquire_bootstrap_lock(&task_team->tt.tt_threads_lock);
2912
2913 if (!TCR_4(task_team->tt.tt_found_tasks)(task_team->tt.tt_found_tasks)) {
2
Assuming the condition is true
3
Taking true branch
2914 // first thread to enable tasking
2915 kmp_team_t *team = thread->th.th_team;
2916 int i;
2917
2918 is_init_thread = TRUE(!0);
2919 if (maxthreads < nthreads) {
4
Assuming 'maxthreads' is >= 'nthreads'
5
Taking false branch
2920
2921 if (*threads_data_p != NULL__null) {
2922 kmp_thread_data_t *old_data = *threads_data_p;
2923 kmp_thread_data_t *new_data = NULL__null;
2924
2925 KE_TRACE(if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmp_realloc_task_threads_data: T#%d reallocating "
"threads data for task_team %p, new_size = %d, old_size = %d\n"
, __kmp_gtid_from_thread(thread), task_team, nthreads, maxthreads
); }
2926 10,if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmp_realloc_task_threads_data: T#%d reallocating "
"threads data for task_team %p, new_size = %d, old_size = %d\n"
, __kmp_gtid_from_thread(thread), task_team, nthreads, maxthreads
); }
2927 ("__kmp_realloc_task_threads_data: T#%d reallocating "if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmp_realloc_task_threads_data: T#%d reallocating "
"threads data for task_team %p, new_size = %d, old_size = %d\n"
, __kmp_gtid_from_thread(thread), task_team, nthreads, maxthreads
); }
2928 "threads data for task_team %p, new_size = %d, old_size = %d\n",if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmp_realloc_task_threads_data: T#%d reallocating "
"threads data for task_team %p, new_size = %d, old_size = %d\n"
, __kmp_gtid_from_thread(thread), task_team, nthreads, maxthreads
); }
2929 __kmp_gtid_from_thread(thread), task_team, nthreads, maxthreads))if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmp_realloc_task_threads_data: T#%d reallocating "
"threads data for task_team %p, new_size = %d, old_size = %d\n"
, __kmp_gtid_from_thread(thread), task_team, nthreads, maxthreads
); }
;
2930 // Reallocate threads_data to have more elements than current array
2931 // Cannot use __kmp_thread_realloc() because threads not around for
2932 // kmp_reap_task_team( ). Note all new array entries are initialized
2933 // to zero by __kmp_allocate().
2934 new_data = (kmp_thread_data_t *)__kmp_allocate(___kmp_allocate((nthreads * sizeof(kmp_thread_data_t)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2935)
2935 nthreads * sizeof(kmp_thread_data_t))___kmp_allocate((nthreads * sizeof(kmp_thread_data_t)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2935)
;
2936 // copy old data to new data
2937 KMP_MEMCPY_S((void *)new_data, nthreads * sizeof(kmp_thread_data_t),memcpy((void *)new_data, (void *)old_data, maxthreads * sizeof
(kmp_thread_data_t))
2938 (void *)old_data, maxthreads * sizeof(kmp_thread_data_t))memcpy((void *)new_data, (void *)old_data, maxthreads * sizeof
(kmp_thread_data_t))
;
2939
2940#ifdef BUILD_TIED_TASK_STACK
2941 // GEH: Figure out if this is the right thing to do
2942 for (i = maxthreads; i < nthreads; i++) {
2943 kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
2944 __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
2945 }
2946#endif // BUILD_TIED_TASK_STACK
2947 // Install the new data and free the old data
2948 (*threads_data_p) = new_data;
2949 __kmp_free(old_data)___kmp_free((old_data), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2949)
;
2950 } else {
2951 KE_TRACE(10, ("__kmp_realloc_task_threads_data: T#%d allocating "if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmp_realloc_task_threads_data: T#%d allocating "
"threads data for task_team %p, size = %d\n", __kmp_gtid_from_thread
(thread), task_team, nthreads); }
2952 "threads data for task_team %p, size = %d\n",if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmp_realloc_task_threads_data: T#%d allocating "
"threads data for task_team %p, size = %d\n", __kmp_gtid_from_thread
(thread), task_team, nthreads); }
2953 __kmp_gtid_from_thread(thread), task_team, nthreads))if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmp_realloc_task_threads_data: T#%d allocating "
"threads data for task_team %p, size = %d\n", __kmp_gtid_from_thread
(thread), task_team, nthreads); }
;
2954 // Make the initial allocate for threads_data array, and zero entries
2955 // Cannot use __kmp_thread_calloc() because threads not around for
2956 // kmp_reap_task_team( ).
2957 ANNOTATE_IGNORE_WRITES_BEGIN();
2958 *threads_data_p = (kmp_thread_data_t *)__kmp_allocate(___kmp_allocate((nthreads * sizeof(kmp_thread_data_t)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2959)
2959 nthreads * sizeof(kmp_thread_data_t))___kmp_allocate((nthreads * sizeof(kmp_thread_data_t)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2959)
;
2960 ANNOTATE_IGNORE_WRITES_END();
2961#ifdef BUILD_TIED_TASK_STACK
2962 // GEH: Figure out if this is the right thing to do
2963 for (i = 0; i < nthreads; i++) {
2964 kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
2965 __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
2966 }
2967#endif // BUILD_TIED_TASK_STACK
2968 }
2969 task_team->tt.tt_max_threads = nthreads;
2970 } else {
2971 // If array has (more than) enough elements, go ahead and use it
2972 KMP_DEBUG_ASSERT(*threads_data_p != NULL)if (!(*threads_data_p != __null)) { __kmp_debug_assert("*threads_data_p != __null"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 2972); }
;
2973 }
2974
2975 // initialize threads_data pointers back to thread_info structures
2976 for (i = 0; i < nthreads; i++) {
6
Assuming 'i' is < 'nthreads'
7
Loop condition is true. Entering loop body
2977 kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
8
'thread_data' initialized to a null pointer value
2978 thread_data->td.td_thr = team->t.t_threads[i];
9
Dereference of null pointer
2979
2980 if (thread_data->td.td_deque_last_stolen >= nthreads) {
2981 // The last stolen field survives across teams / barrier, and the number
2982 // of threads may have changed. It's possible (likely?) that a new
2983 // parallel region will exhibit the same behavior as previous region.
2984 thread_data->td.td_deque_last_stolen = -1;
2985 }
2986 }
2987
2988 KMP_MB();
2989 TCW_SYNC_4(task_team->tt.tt_found_tasks, TRUE)(task_team->tt.tt_found_tasks) = ((!0));
2990 }
2991
2992 __kmp_release_bootstrap_lock(&task_team->tt.tt_threads_lock);
2993 return is_init_thread;
2994}
2995
2996// __kmp_free_task_threads_data:
2997// Deallocates a threads_data array for a task team, including any attached
2998// tasking deques. Only occurs at library shutdown.
2999static void __kmp_free_task_threads_data(kmp_task_team_t *task_team) {
3000 __kmp_acquire_bootstrap_lock(&task_team->tt.tt_threads_lock);
3001 if (task_team->tt.tt_threads_data != NULL__null) {
3002 int i;
3003 for (i = 0; i < task_team->tt.tt_max_threads; i++) {
3004 __kmp_free_task_deque(&task_team->tt.tt_threads_data[i]);
3005 }
3006 __kmp_free(task_team->tt.tt_threads_data)___kmp_free((task_team->tt.tt_threads_data), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3006)
;
3007 task_team->tt.tt_threads_data = NULL__null;
3008 }
3009 __kmp_release_bootstrap_lock(&task_team->tt.tt_threads_lock);
3010}
3011
3012// __kmp_allocate_task_team:
3013// Allocates a task team associated with a specific team, taking it from
3014// the global task team free list if possible. Also initializes data
3015// structures.
3016static kmp_task_team_t *__kmp_allocate_task_team(kmp_info_t *thread,
3017 kmp_team_t *team) {
3018 kmp_task_team_t *task_team = NULL__null;
3019 int nthreads;
3020
3021 KA_TRACE(20, ("__kmp_allocate_task_team: T#%d entering; team = %p\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_allocate_task_team: T#%d entering; team = %p\n"
, (thread ? __kmp_gtid_from_thread(thread) : -1), team); }
3022 (thread ? __kmp_gtid_from_thread(thread) : -1), team))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_allocate_task_team: T#%d entering; team = %p\n"
, (thread ? __kmp_gtid_from_thread(thread) : -1), team); }
;
3023
3024 if (TCR_PTR(__kmp_free_task_teams)((void *)(__kmp_free_task_teams)) != NULL__null) {
3025 // Take a task team from the task team pool
3026 __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
3027 if (__kmp_free_task_teams != NULL__null) {
3028 task_team = __kmp_free_task_teams;
3029 TCW_PTR(__kmp_free_task_teams, task_team->tt.tt_next)((__kmp_free_task_teams)) = ((task_team->tt.tt_next));
3030 task_team->tt.tt_next = NULL__null;
3031 }
3032 __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
3033 }
3034
3035 if (task_team == NULL__null) {
3036 KE_TRACE(10, ("__kmp_allocate_task_team: T#%d allocating "if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmp_allocate_task_team: T#%d allocating "
"task team for team %p\n", __kmp_gtid_from_thread(thread), team
); }
3037 "task team for team %p\n",if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmp_allocate_task_team: T#%d allocating "
"task team for team %p\n", __kmp_gtid_from_thread(thread), team
); }
3038 __kmp_gtid_from_thread(thread), team))if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmp_allocate_task_team: T#%d allocating "
"task team for team %p\n", __kmp_gtid_from_thread(thread), team
); }
;
3039 // Allocate a new task team if one is not available.
3040 // Cannot use __kmp_thread_malloc() because threads not around for
3041 // kmp_reap_task_team( ).
3042 task_team = (kmp_task_team_t *)__kmp_allocate(sizeof(kmp_task_team_t))___kmp_allocate((sizeof(kmp_task_team_t)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3042)
;
3043 __kmp_init_bootstrap_lock(&task_team->tt.tt_threads_lock);
3044 // AC: __kmp_allocate zeroes returned memory
3045 // task_team -> tt.tt_threads_data = NULL;
3046 // task_team -> tt.tt_max_threads = 0;
3047 // task_team -> tt.tt_next = NULL;
3048 }
3049
3050 TCW_4(task_team->tt.tt_found_tasks, FALSE)(task_team->tt.tt_found_tasks) = (0);
3051#if OMP_45_ENABLED(50 >= 45)
3052 TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE)(task_team->tt.tt_found_proxy_tasks) = (0);
3053#endif
3054 task_team->tt.tt_nproc = nthreads = team->t.t_nproc;
3055
3056 KMP_ATOMIC_ST_REL(&task_team->tt.tt_unfinished_threads, nthreads)(&task_team->tt.tt_unfinished_threads)->store(nthreads
, std::memory_order_release)
;
3057 TCW_4(task_team->tt.tt_active, TRUE)(task_team->tt.tt_active) = ((!0));
3058
3059 KA_TRACE(20, ("__kmp_allocate_task_team: T#%d exiting; task_team = %p "if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_allocate_task_team: T#%d exiting; task_team = %p "
"unfinished_threads init'd to %d\n", (thread ? __kmp_gtid_from_thread
(thread) : -1), task_team, (&task_team->tt.tt_unfinished_threads
)->load(std::memory_order_relaxed)); }
3060 "unfinished_threads init'd to %d\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_allocate_task_team: T#%d exiting; task_team = %p "
"unfinished_threads init'd to %d\n", (thread ? __kmp_gtid_from_thread
(thread) : -1), task_team, (&task_team->tt.tt_unfinished_threads
)->load(std::memory_order_relaxed)); }
3061 (thread ? __kmp_gtid_from_thread(thread) : -1), task_team,if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_allocate_task_team: T#%d exiting; task_team = %p "
"unfinished_threads init'd to %d\n", (thread ? __kmp_gtid_from_thread
(thread) : -1), task_team, (&task_team->tt.tt_unfinished_threads
)->load(std::memory_order_relaxed)); }
3062 KMP_ATOMIC_LD_RLX(&task_team->tt.tt_unfinished_threads)))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_allocate_task_team: T#%d exiting; task_team = %p "
"unfinished_threads init'd to %d\n", (thread ? __kmp_gtid_from_thread
(thread) : -1), task_team, (&task_team->tt.tt_unfinished_threads
)->load(std::memory_order_relaxed)); }
;
3063 return task_team;
3064}
3065
3066// __kmp_free_task_team:
3067// Frees the task team associated with a specific thread, and adds it
3068// to the global task team free list.
3069void __kmp_free_task_team(kmp_info_t *thread, kmp_task_team_t *task_team) {
3070 KA_TRACE(20, ("__kmp_free_task_team: T#%d task_team = %p\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_free_task_team: T#%d task_team = %p\n"
, thread ? __kmp_gtid_from_thread(thread) : -1, task_team); }
3071 thread ? __kmp_gtid_from_thread(thread) : -1, task_team))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_free_task_team: T#%d task_team = %p\n"
, thread ? __kmp_gtid_from_thread(thread) : -1, task_team); }
;
3072
3073 // Put task team back on free list
3074 __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
3075
3076 KMP_DEBUG_ASSERT(task_team->tt.tt_next == NULL)if (!(task_team->tt.tt_next == __null)) { __kmp_debug_assert
("task_team->tt.tt_next == __null", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3076); }
;
3077 task_team->tt.tt_next = __kmp_free_task_teams;
3078 TCW_PTR(__kmp_free_task_teams, task_team)((__kmp_free_task_teams)) = ((task_team));
3079
3080 __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
3081}
3082
3083// __kmp_reap_task_teams:
3084// Free all the task teams on the task team free list.
3085// Should only be done during library shutdown.
3086// Cannot do anything that needs a thread structure or gtid since they are
3087// already gone.
3088void __kmp_reap_task_teams(void) {
3089 kmp_task_team_t *task_team;
3090
3091 if (TCR_PTR(__kmp_free_task_teams)((void *)(__kmp_free_task_teams)) != NULL__null) {
3092 // Free all task_teams on the free list
3093 __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
3094 while ((task_team = __kmp_free_task_teams) != NULL__null) {
3095 __kmp_free_task_teams = task_team->tt.tt_next;
3096 task_team->tt.tt_next = NULL__null;
3097
3098 // Free threads_data if necessary
3099 if (task_team->tt.tt_threads_data != NULL__null) {
3100 __kmp_free_task_threads_data(task_team);
3101 }
3102 __kmp_free(task_team)___kmp_free((task_team), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3102)
;
3103 }
3104 __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
3105 }
3106}
3107
3108// __kmp_wait_to_unref_task_teams:
3109// Some threads could still be in the fork barrier release code, possibly
3110// trying to steal tasks. Wait for each thread to unreference its task team.
3111void __kmp_wait_to_unref_task_teams(void) {
3112 kmp_info_t *thread;
3113 kmp_uint32 spins;
3114 int done;
3115
3116 KMP_INIT_YIELD(spins){ (spins) = __kmp_yield_init; };
3117
3118 for (;;) {
3119 done = TRUE(!0);
3120
3121 // TODO: GEH - this may be is wrong because some sync would be necessary
3122 // in case threads are added to the pool during the traversal. Need to
3123 // verify that lock for thread pool is held when calling this routine.
3124 for (thread = CCAST(kmp_info_t *, __kmp_thread_pool)const_cast<kmp_info_t *>(__kmp_thread_pool); thread != NULL__null;
3125 thread = thread->th.th_next_pool) {
3126#if KMP_OS_WINDOWS0
3127 DWORD exit_val;
3128#endif
3129 if (TCR_PTR(thread->th.th_task_team)((void *)(thread->th.th_task_team)) == NULL__null) {
3130 KA_TRACE(10, ("__kmp_wait_to_unref_task_team: T#%d task_team == NULL\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_wait_to_unref_task_team: T#%d task_team == NULL\n"
, __kmp_gtid_from_thread(thread)); }
3131 __kmp_gtid_from_thread(thread)))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_wait_to_unref_task_team: T#%d task_team == NULL\n"
, __kmp_gtid_from_thread(thread)); }
;
3132 continue;
3133 }
3134#if KMP_OS_WINDOWS0
3135 // TODO: GEH - add this check for Linux* OS / OS X* as well?
3136 if (!__kmp_is_thread_alive(thread, &exit_val)) {
3137 thread->th.th_task_team = NULL__null;
3138 continue;
3139 }
3140#endif
3141
3142 done = FALSE0; // Because th_task_team pointer is not NULL for this thread
3143
3144 KA_TRACE(10, ("__kmp_wait_to_unref_task_team: Waiting for T#%d to "if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_wait_to_unref_task_team: Waiting for T#%d to "
"unreference task_team\n", __kmp_gtid_from_thread(thread)); }
3145 "unreference task_team\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_wait_to_unref_task_team: Waiting for T#%d to "
"unreference task_team\n", __kmp_gtid_from_thread(thread)); }
3146 __kmp_gtid_from_thread(thread)))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_wait_to_unref_task_team: Waiting for T#%d to "
"unreference task_team\n", __kmp_gtid_from_thread(thread)); }
;
3147
3148 if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME(2147483647)) {
3149 volatile void *sleep_loc;
3150 // If the thread is sleeping, awaken it.
3151 if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))((void *)(const_cast<void *>(thread->th.th_sleep_loc
)))
) !=
3152 NULL__null) {
3153 KA_TRACE(if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_wait_to_unref_task_team: T#%d waking up thread T#%d\n"
, __kmp_gtid_from_thread(thread), __kmp_gtid_from_thread(thread
)); }
3154 10,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_wait_to_unref_task_team: T#%d waking up thread T#%d\n"
, __kmp_gtid_from_thread(thread), __kmp_gtid_from_thread(thread
)); }
3155 ("__kmp_wait_to_unref_task_team: T#%d waking up thread T#%d\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_wait_to_unref_task_team: T#%d waking up thread T#%d\n"
, __kmp_gtid_from_thread(thread), __kmp_gtid_from_thread(thread
)); }
3156 __kmp_gtid_from_thread(thread), __kmp_gtid_from_thread(thread)))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_wait_to_unref_task_team: T#%d waking up thread T#%d\n"
, __kmp_gtid_from_thread(thread), __kmp_gtid_from_thread(thread
)); }
;
3157 __kmp_null_resume_wrapper(__kmp_gtid_from_thread(thread), sleep_loc);
3158 }
3159 }
3160 }
3161 if (done) {
3162 break;
3163 }
3164
3165 // If we are oversubscribed, or have waited a bit (and library mode is
3166 // throughput), yield. Pause is in the following code.
3167 KMP_YIELD(TCR_4(__kmp_nth) > __kmp_avail_proc){ __kmp_x86_pause(); __kmp_yield(((__kmp_nth) > __kmp_avail_proc
)); }
;
3168 KMP_YIELD_SPIN(spins){ __kmp_x86_pause(); (spins) -= 2; if (!(spins)) { __kmp_yield
(1); (spins) = __kmp_yield_next; } }
; // Yields only if KMP_LIBRARY=throughput
3169 }
3170}
3171
3172// __kmp_task_team_setup: Create a task_team for the current team, but use
3173// an already created, unused one if it already exists.
3174void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team, int always) {
3175 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec)if (!(__kmp_tasking_mode != tskm_immediate_exec)) { __kmp_debug_assert
("__kmp_tasking_mode != tskm_immediate_exec", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3175); }
;
3176
3177 // If this task_team hasn't been created yet, allocate it. It will be used in
3178 // the region after the next.
3179 // If it exists, it is the current task team and shouldn't be touched yet as
3180 // it may still be in use.
3181 if (team->t.t_task_team[this_thr->th.th_task_state] == NULL__null &&
3182 (always || team->t.t_nproc > 1)) {
3183 team->t.t_task_team[this_thr->th.th_task_state] =
3184 __kmp_allocate_task_team(this_thr, team);
3185 KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d created new task_team %p "if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_setup: Master T#%d created new task_team %p "
"for team %d at parity=%d\n", __kmp_gtid_from_thread(this_thr
), team->t.t_task_team[this_thr->th.th_task_state], ((team
!= __null) ? team->t.t_id : -1), this_thr->th.th_task_state
); }
3186 "for team %d at parity=%d\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_setup: Master T#%d created new task_team %p "
"for team %d at parity=%d\n", __kmp_gtid_from_thread(this_thr
), team->t.t_task_team[this_thr->th.th_task_state], ((team
!= __null) ? team->t.t_id : -1), this_thr->th.th_task_state
); }
3187 __kmp_gtid_from_thread(this_thr),if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_setup: Master T#%d created new task_team %p "
"for team %d at parity=%d\n", __kmp_gtid_from_thread(this_thr
), team->t.t_task_team[this_thr->th.th_task_state], ((team
!= __null) ? team->t.t_id : -1), this_thr->th.th_task_state
); }
3188 team->t.t_task_team[this_thr->th.th_task_state],if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_setup: Master T#%d created new task_team %p "
"for team %d at parity=%d\n", __kmp_gtid_from_thread(this_thr
), team->t.t_task_team[this_thr->th.th_task_state], ((team
!= __null) ? team->t.t_id : -1), this_thr->th.th_task_state
); }
3189 ((team != NULL) ? team->t.t_id : -1),if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_setup: Master T#%d created new task_team %p "
"for team %d at parity=%d\n", __kmp_gtid_from_thread(this_thr
), team->t.t_task_team[this_thr->th.th_task_state], ((team
!= __null) ? team->t.t_id : -1), this_thr->th.th_task_state
); }
3190 this_thr->th.th_task_state))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_setup: Master T#%d created new task_team %p "
"for team %d at parity=%d\n", __kmp_gtid_from_thread(this_thr
), team->t.t_task_team[this_thr->th.th_task_state], ((team
!= __null) ? team->t.t_id : -1), this_thr->th.th_task_state
); }
;
3191 }
3192
3193 // After threads exit the release, they will call sync, and then point to this
3194 // other task_team; make sure it is allocated and properly initialized. As
3195 // threads spin in the barrier release phase, they will continue to use the
3196 // previous task_team struct(above), until they receive the signal to stop
3197 // checking for tasks (they can't safely reference the kmp_team_t struct,
3198 // which could be reallocated by the master thread). No task teams are formed
3199 // for serialized teams.
3200 if (team->t.t_nproc > 1) {
3201 int other_team = 1 - this_thr->th.th_task_state;
3202 if (team->t.t_task_team[other_team] == NULL__null) { // setup other team as well
3203 team->t.t_task_team[other_team] =
3204 __kmp_allocate_task_team(this_thr, team);
3205 KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d created second new "if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_setup: Master T#%d created second new "
"task_team %p for team %d at parity=%d\n", __kmp_gtid_from_thread
(this_thr), team->t.t_task_team[other_team], ((team != __null
) ? team->t.t_id : -1), other_team); }
3206 "task_team %p for team %d at parity=%d\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_setup: Master T#%d created second new "
"task_team %p for team %d at parity=%d\n", __kmp_gtid_from_thread
(this_thr), team->t.t_task_team[other_team], ((team != __null
) ? team->t.t_id : -1), other_team); }
3207 __kmp_gtid_from_thread(this_thr),if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_setup: Master T#%d created second new "
"task_team %p for team %d at parity=%d\n", __kmp_gtid_from_thread
(this_thr), team->t.t_task_team[other_team], ((team != __null
) ? team->t.t_id : -1), other_team); }
3208 team->t.t_task_team[other_team],if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_setup: Master T#%d created second new "
"task_team %p for team %d at parity=%d\n", __kmp_gtid_from_thread
(this_thr), team->t.t_task_team[other_team], ((team != __null
) ? team->t.t_id : -1), other_team); }
3209 ((team != NULL) ? team->t.t_id : -1), other_team))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_setup: Master T#%d created second new "
"task_team %p for team %d at parity=%d\n", __kmp_gtid_from_thread
(this_thr), team->t.t_task_team[other_team], ((team != __null
) ? team->t.t_id : -1), other_team); }
;
3210 } else { // Leave the old task team struct in place for the upcoming region;
3211 // adjust as needed
3212 kmp_task_team_t *task_team = team->t.t_task_team[other_team];
3213 if (!task_team->tt.tt_active ||
3214 team->t.t_nproc != task_team->tt.tt_nproc) {
3215 TCW_4(task_team->tt.tt_nproc, team->t.t_nproc)(task_team->tt.tt_nproc) = (team->t.t_nproc);
3216 TCW_4(task_team->tt.tt_found_tasks, FALSE)(task_team->tt.tt_found_tasks) = (0);
3217#if OMP_45_ENABLED(50 >= 45)
3218 TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE)(task_team->tt.tt_found_proxy_tasks) = (0);
3219#endif
3220 KMP_ATOMIC_ST_REL(&task_team->tt.tt_unfinished_threads,(&task_team->tt.tt_unfinished_threads)->store(team->
t.t_nproc, std::memory_order_release)
3221 team->t.t_nproc)(&task_team->tt.tt_unfinished_threads)->store(team->
t.t_nproc, std::memory_order_release)
;
3222 TCW_4(task_team->tt.tt_active, TRUE)(task_team->tt.tt_active) = ((!0));
3223 }
3224 // if team size has changed, the first thread to enable tasking will
3225 // realloc threads_data if necessary
3226 KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d reset next task_team "if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_setup: Master T#%d reset next task_team "
"%p for team %d at parity=%d\n", __kmp_gtid_from_thread(this_thr
), team->t.t_task_team[other_team], ((team != __null) ? team
->t.t_id : -1), other_team); }
3227 "%p for team %d at parity=%d\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_setup: Master T#%d reset next task_team "
"%p for team %d at parity=%d\n", __kmp_gtid_from_thread(this_thr
), team->t.t_task_team[other_team], ((team != __null) ? team
->t.t_id : -1), other_team); }
3228 __kmp_gtid_from_thread(this_thr),if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_setup: Master T#%d reset next task_team "
"%p for team %d at parity=%d\n", __kmp_gtid_from_thread(this_thr
), team->t.t_task_team[other_team], ((team != __null) ? team
->t.t_id : -1), other_team); }
3229 team->t.t_task_team[other_team],if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_setup: Master T#%d reset next task_team "
"%p for team %d at parity=%d\n", __kmp_gtid_from_thread(this_thr
), team->t.t_task_team[other_team], ((team != __null) ? team
->t.t_id : -1), other_team); }
3230 ((team != NULL) ? team->t.t_id : -1), other_team))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_setup: Master T#%d reset next task_team "
"%p for team %d at parity=%d\n", __kmp_gtid_from_thread(this_thr
), team->t.t_task_team[other_team], ((team != __null) ? team
->t.t_id : -1), other_team); }
;
3231 }
3232 }
3233}
3234
3235// __kmp_task_team_sync: Propagation of task team data from team to threads
3236// which happens just after the release phase of a team barrier. This may be
3237// called by any thread, but only for teams with # threads > 1.
3238void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team) {
3239 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec)if (!(__kmp_tasking_mode != tskm_immediate_exec)) { __kmp_debug_assert
("__kmp_tasking_mode != tskm_immediate_exec", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3239); }
;
3240
3241 // Toggle the th_task_state field, to switch which task_team this thread
3242 // refers to
3243 this_thr->th.th_task_state = 1 - this_thr->th.th_task_state;
3244 // It is now safe to propagate the task team pointer from the team struct to
3245 // the current thread.
3246 TCW_PTR(this_thr->th.th_task_team,((this_thr->th.th_task_team)) = ((team->t.t_task_team[this_thr
->th.th_task_state]))
3247 team->t.t_task_team[this_thr->th.th_task_state])((this_thr->th.th_task_team)) = ((team->t.t_task_team[this_thr
->th.th_task_state]))
;
3248 KA_TRACE(20,if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_sync: Thread T#%d task team switched to task_team "
"%p from Team #%d (parity=%d)\n", __kmp_gtid_from_thread(this_thr
), this_thr->th.th_task_team, ((team != __null) ? team->
t.t_id : -1), this_thr->th.th_task_state); }
3249 ("__kmp_task_team_sync: Thread T#%d task team switched to task_team "if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_sync: Thread T#%d task team switched to task_team "
"%p from Team #%d (parity=%d)\n", __kmp_gtid_from_thread(this_thr
), this_thr->th.th_task_team, ((team != __null) ? team->
t.t_id : -1), this_thr->th.th_task_state); }
3250 "%p from Team #%d (parity=%d)\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_sync: Thread T#%d task team switched to task_team "
"%p from Team #%d (parity=%d)\n", __kmp_gtid_from_thread(this_thr
), this_thr->th.th_task_team, ((team != __null) ? team->
t.t_id : -1), this_thr->th.th_task_state); }
3251 __kmp_gtid_from_thread(this_thr), this_thr->th.th_task_team,if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_sync: Thread T#%d task team switched to task_team "
"%p from Team #%d (parity=%d)\n", __kmp_gtid_from_thread(this_thr
), this_thr->th.th_task_team, ((team != __null) ? team->
t.t_id : -1), this_thr->th.th_task_state); }
3252 ((team != NULL) ? team->t.t_id : -1), this_thr->th.th_task_state))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_sync: Thread T#%d task team switched to task_team "
"%p from Team #%d (parity=%d)\n", __kmp_gtid_from_thread(this_thr
), this_thr->th.th_task_team, ((team != __null) ? team->
t.t_id : -1), this_thr->th.th_task_state); }
;
3253}
3254
3255// __kmp_task_team_wait: Master thread waits for outstanding tasks after the
3256// barrier gather phase. Only called by master thread if #threads in team > 1 or
3257// if proxy tasks were created.
3258//
3259// wait is a flag that defaults to 1 (see kmp.h), but waiting can be turned off
3260// by passing in 0 optionally as the last argument. When wait is zero, master
3261// thread does not wait for unfinished_threads to reach 0.
3262void __kmp_task_team_wait(
3263 kmp_info_t *this_thr,
3264 kmp_team_t *team USE_ITT_BUILD_ARG(void *itt_sync_obj), void *itt_sync_obj, int wait) {
3265 kmp_task_team_t *task_team = team->t.t_task_team[this_thr->th.th_task_state];
3266
3267 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec)if (!(__kmp_tasking_mode != tskm_immediate_exec)) { __kmp_debug_assert
("__kmp_tasking_mode != tskm_immediate_exec", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3267); }
;
3268 KMP_DEBUG_ASSERT(task_team == this_thr->th.th_task_team)if (!(task_team == this_thr->th.th_task_team)) { __kmp_debug_assert
("task_team == this_thr->th.th_task_team", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3268); }
;
3269
3270 if ((task_team != NULL__null) && KMP_TASKING_ENABLED(task_team)(((task_team)->tt.tt_found_tasks) == (!0))) {
3271 if (wait) {
3272 KA_TRACE(20, ("__kmp_task_team_wait: Master T#%d waiting for all tasks "if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_wait: Master T#%d waiting for all tasks "
"(for unfinished_threads to reach 0) on task_team = %p\n", __kmp_gtid_from_thread
(this_thr), task_team); }
3273 "(for unfinished_threads to reach 0) on task_team = %p\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_wait: Master T#%d waiting for all tasks "
"(for unfinished_threads to reach 0) on task_team = %p\n", __kmp_gtid_from_thread
(this_thr), task_team); }
3274 __kmp_gtid_from_thread(this_thr), task_team))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_wait: Master T#%d waiting for all tasks "
"(for unfinished_threads to reach 0) on task_team = %p\n", __kmp_gtid_from_thread
(this_thr), task_team); }
;
3275 // Worker threads may have dropped through to release phase, but could
3276 // still be executing tasks. Wait here for tasks to complete. To avoid
3277 // memory contention, only master thread checks termination condition.
3278 kmp_flag_32 flag(RCAST(std::atomic<kmp_uint32> *,reinterpret_cast<std::atomic<kmp_uint32> *>(&
task_team->tt.tt_unfinished_threads)
3279 &task_team->tt.tt_unfinished_threads)reinterpret_cast<std::atomic<kmp_uint32> *>(&
task_team->tt.tt_unfinished_threads)
,
3280 0U);
3281 flag.wait(this_thr, TRUE(!0) USE_ITT_BUILD_ARG(itt_sync_obj), itt_sync_obj);
3282 }
3283 // Deactivate the old task team, so that the worker threads will stop
3284 // referencing it while spinning.
3285 KA_TRACE(if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_wait: Master T#%d deactivating task_team %p: "
"setting active to false, setting local and team's pointer to NULL\n"
, __kmp_gtid_from_thread(this_thr), task_team); }
3286 20,if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_wait: Master T#%d deactivating task_team %p: "
"setting active to false, setting local and team's pointer to NULL\n"
, __kmp_gtid_from_thread(this_thr), task_team); }
3287 ("__kmp_task_team_wait: Master T#%d deactivating task_team %p: "if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_wait: Master T#%d deactivating task_team %p: "
"setting active to false, setting local and team's pointer to NULL\n"
, __kmp_gtid_from_thread(this_thr), task_team); }
3288 "setting active to false, setting local and team's pointer to NULL\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_wait: Master T#%d deactivating task_team %p: "
"setting active to false, setting local and team's pointer to NULL\n"
, __kmp_gtid_from_thread(this_thr), task_team); }
3289 __kmp_gtid_from_thread(this_thr), task_team))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_team_wait: Master T#%d deactivating task_team %p: "
"setting active to false, setting local and team's pointer to NULL\n"
, __kmp_gtid_from_thread(this_thr), task_team); }
;
3290#if OMP_45_ENABLED(50 >= 45)
3291 KMP_DEBUG_ASSERT(task_team->tt.tt_nproc > 1 ||if (!(task_team->tt.tt_nproc > 1 || task_team->tt.tt_found_proxy_tasks
== (!0))) { __kmp_debug_assert("task_team->tt.tt_nproc > 1 || task_team->tt.tt_found_proxy_tasks == (!0)"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3292); }
3292 task_team->tt.tt_found_proxy_tasks == TRUE)if (!(task_team->tt.tt_nproc > 1 || task_team->tt.tt_found_proxy_tasks
== (!0))) { __kmp_debug_assert("task_team->tt.tt_nproc > 1 || task_team->tt.tt_found_proxy_tasks == (!0)"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3292); }
;
3293 TCW_SYNC_4(task_team->tt.tt_found_proxy_tasks, FALSE)(task_team->tt.tt_found_proxy_tasks) = (0);
3294#else
3295 KMP_DEBUG_ASSERT(task_team->tt.tt_nproc > 1)if (!(task_team->tt.tt_nproc > 1)) { __kmp_debug_assert
("task_team->tt.tt_nproc > 1", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3295); }
;
3296#endif
3297 KMP_CHECK_UPDATE(task_team->tt.tt_untied_task_encountered, 0)if ((task_team->tt.tt_untied_task_encountered) != (0)) (task_team
->tt.tt_untied_task_encountered) = (0)
;
3298 TCW_SYNC_4(task_team->tt.tt_active, FALSE)(task_team->tt.tt_active) = (0);
3299 KMP_MB();
3300
3301 TCW_PTR(this_thr->th.th_task_team, NULL)((this_thr->th.th_task_team)) = ((__null));
3302 }
3303}
3304
3305// __kmp_tasking_barrier:
3306// This routine may only called when __kmp_tasking_mode == tskm_extra_barrier.
3307// Internal function to execute all tasks prior to a regular barrier or a join
3308// barrier. It is a full barrier itself, which unfortunately turns regular
3309// barriers into double barriers and join barriers into 1 1/2 barriers.
3310void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread, int gtid) {
3311 std::atomic<kmp_uint32> *spin = RCAST(reinterpret_cast<std::atomic<kmp_uint32> *>(&
team->t.t_task_team[thread->th.th_task_state]->tt.tt_unfinished_threads
)
3312 std::atomic<kmp_uint32> *,reinterpret_cast<std::atomic<kmp_uint32> *>(&
team->t.t_task_team[thread->th.th_task_state]->tt.tt_unfinished_threads
)
3313 &team->t.t_task_team[thread->th.th_task_state]->tt.tt_unfinished_threads)reinterpret_cast<std::atomic<kmp_uint32> *>(&
team->t.t_task_team[thread->th.th_task_state]->tt.tt_unfinished_threads
)
;
3314 int flag = FALSE0;
3315 KMP_DEBUG_ASSERT(__kmp_tasking_mode == tskm_extra_barrier)if (!(__kmp_tasking_mode == tskm_extra_barrier)) { __kmp_debug_assert
("__kmp_tasking_mode == tskm_extra_barrier", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3315); }
;
3316
3317#if USE_ITT_BUILD1
3318 KMP_FSYNC_SPIN_INIT(spin, NULL)int sync_iters = 0; if (__kmp_itt_fsync_prepare_ptr__3_0) { if
(spin == __null) { spin = __null; } } __asm__ __volatile__("movl %0, %%ebx; .byte 0x64, 0x67, 0x90 "
::"i"(0x4376) : "%ebx")
;
3319#endif /* USE_ITT_BUILD */
3320 kmp_flag_32 spin_flag(spin, 0U);
3321 while (!spin_flag.execute_tasks(thread, gtid, TRUE(!0),
3322 &flag USE_ITT_BUILD_ARG(NULL), __null, 0)) {
3323#if USE_ITT_BUILD1
3324 // TODO: What about itt_sync_obj??
3325 KMP_FSYNC_SPIN_PREPARE(RCAST(void *, spin))do { if (__kmp_itt_fsync_prepare_ptr__3_0 && sync_iters
< __kmp_itt_prepare_delay) { ++sync_iters; if (sync_iters
>= __kmp_itt_prepare_delay) { (!__kmp_itt_fsync_prepare_ptr__3_0
) ? (void)0 : __kmp_itt_fsync_prepare_ptr__3_0((void *)((void
*)reinterpret_cast<void *>(spin))); } } } while (0)
;
3326#endif /* USE_ITT_BUILD */
3327
3328 if (TCR_4(__kmp_global.g.g_done)(__kmp_global.g.g_done)) {
3329 if (__kmp_global.g.g_abort)
3330 __kmp_abort_thread();
3331 break;
3332 }
3333 KMP_YIELD(TRUE){ __kmp_x86_pause(); __kmp_yield(((!0))); }; // GH: We always yield here
3334 }
3335#if USE_ITT_BUILD1
3336 KMP_FSYNC_SPIN_ACQUIRED(RCAST(void *, spin))do { __asm__ __volatile__("movl %0, %%ebx; .byte 0x64, 0x67, 0x90 "
::"i"(0x4377) : "%ebx"); if (sync_iters >= __kmp_itt_prepare_delay
) { (!__kmp_itt_fsync_acquired_ptr__3_0) ? (void)0 : __kmp_itt_fsync_acquired_ptr__3_0
((void *)((void *)reinterpret_cast<void *>(spin))); } }
while (0)
;
3337#endif /* USE_ITT_BUILD */
3338}
3339
3340#if OMP_45_ENABLED(50 >= 45)
3341
3342// __kmp_give_task puts a task into a given thread queue if:
3343// - the queue for that thread was created
3344// - there's space in that queue
3345// Because of this, __kmp_push_task needs to check if there's space after
3346// getting the lock
3347static bool __kmp_give_task(kmp_info_t *thread, kmp_int32 tid, kmp_task_t *task,
3348 kmp_int32 pass) {
3349 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task)(((kmp_taskdata_t *)task) - 1);
3350 kmp_task_team_t *task_team = taskdata->td_task_team;
3351
3352 KA_TRACE(20, ("__kmp_give_task: trying to give task %p to thread %d.\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_give_task: trying to give task %p to thread %d.\n"
, taskdata, tid); }
3353 taskdata, tid))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_give_task: trying to give task %p to thread %d.\n"
, taskdata, tid); }
;
3354
3355 // If task_team is NULL something went really bad...
3356 KMP_DEBUG_ASSERT(task_team != NULL)if (!(task_team != __null)) { __kmp_debug_assert("task_team != __null"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3356); }
;
3357
3358 bool result = false;
3359 kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
3360
3361 if (thread_data->td.td_deque == NULL__null) {
3362 // There's no queue in this thread, go find another one
3363 // We're guaranteed that at least one thread has a queue
3364 KA_TRACE(30,if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_give_task: thread %d has no queue while giving task %p.\n"
, tid, taskdata); }
3365 ("__kmp_give_task: thread %d has no queue while giving task %p.\n",if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_give_task: thread %d has no queue while giving task %p.\n"
, tid, taskdata); }
3366 tid, taskdata))if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_give_task: thread %d has no queue while giving task %p.\n"
, tid, taskdata); }
;
3367 return result;
3368 }
3369
3370 if (TCR_4(thread_data->td.td_deque_ntasks)(thread_data->td.td_deque_ntasks) >=
3371 TASK_DEQUE_SIZE(thread_data->td)((thread_data->td).td_deque_size)) {
3372 KA_TRACE(if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_give_task: queue is full while giving task %p to thread %d.\n"
, taskdata, tid); }
3373 30,if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_give_task: queue is full while giving task %p to thread %d.\n"
, taskdata, tid); }
3374 ("__kmp_give_task: queue is full while giving task %p to thread %d.\n",if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_give_task: queue is full while giving task %p to thread %d.\n"
, taskdata, tid); }
3375 taskdata, tid))if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_give_task: queue is full while giving task %p to thread %d.\n"
, taskdata, tid); }
;
3376
3377 // if this deque is bigger than the pass ratio give a chance to another
3378 // thread
3379 if (TASK_DEQUE_SIZE(thread_data->td)((thread_data->td).td_deque_size) / INITIAL_TASK_DEQUE_SIZE(1 << 8) >= pass)
3380 return result;
3381
3382 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3383 __kmp_realloc_task_deque(thread, thread_data);
3384
3385 } else {
3386
3387 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3388
3389 if (TCR_4(thread_data->td.td_deque_ntasks)(thread_data->td.td_deque_ntasks) >=
3390 TASK_DEQUE_SIZE(thread_data->td)((thread_data->td).td_deque_size)) {
3391 KA_TRACE(30, ("__kmp_give_task: queue is full while giving task %p to "if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_give_task: queue is full while giving task %p to "
"thread %d.\n", taskdata, tid); }
3392 "thread %d.\n",if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_give_task: queue is full while giving task %p to "
"thread %d.\n", taskdata, tid); }
3393 taskdata, tid))if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_give_task: queue is full while giving task %p to "
"thread %d.\n", taskdata, tid); }
;
3394
3395 // if this deque is bigger than the pass ratio give a chance to another
3396 // thread
3397 if (TASK_DEQUE_SIZE(thread_data->td)((thread_data->td).td_deque_size) / INITIAL_TASK_DEQUE_SIZE(1 << 8) >= pass)
3398 goto release_and_exit;
3399
3400 __kmp_realloc_task_deque(thread, thread_data);
3401 }
3402 }
3403
3404 // lock is held here, and there is space in the deque
3405
3406 thread_data->td.td_deque[thread_data->td.td_deque_tail] = taskdata;
3407 // Wrap index.
3408 thread_data->td.td_deque_tail =
3409 (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td)((thread_data->td).td_deque_size - 1);
3410 TCW_4(thread_data->td.td_deque_ntasks,(thread_data->td.td_deque_ntasks) = ((thread_data->td.td_deque_ntasks
) + 1)
3411 TCR_4(thread_data->td.td_deque_ntasks) + 1)(thread_data->td.td_deque_ntasks) = ((thread_data->td.td_deque_ntasks
) + 1)
;
3412
3413 result = true;
3414 KA_TRACE(30, ("__kmp_give_task: successfully gave task %p to thread %d.\n",if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_give_task: successfully gave task %p to thread %d.\n"
, taskdata, tid); }
3415 taskdata, tid))if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_give_task: successfully gave task %p to thread %d.\n"
, taskdata, tid); }
;
3416
3417release_and_exit:
3418 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3419
3420 return result;
3421}
3422
3423/* The finish of the proxy tasks is divided in two pieces:
3424 - the top half is the one that can be done from a thread outside the team
3425 - the bottom half must be run from a them within the team
3426
3427 In order to run the bottom half the task gets queued back into one of the
3428 threads of the team. Once the td_incomplete_child_task counter of the parent
3429 is decremented the threads can leave the barriers. So, the bottom half needs
3430 to be queued before the counter is decremented. The top half is therefore
3431 divided in two parts:
3432 - things that can be run before queuing the bottom half
3433 - things that must be run after queuing the bottom half
3434
3435 This creates a second race as the bottom half can free the task before the
3436 second top half is executed. To avoid this we use the
3437 td_incomplete_child_task of the proxy task to synchronize the top and bottom
3438 half. */
3439static void __kmp_first_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
3440 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT)if (!(taskdata->td_flags.tasktype == 1)) { __kmp_debug_assert
("taskdata->td_flags.tasktype == 1", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3440); }
;
3441 KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY)if (!(taskdata->td_flags.proxy == 1)) { __kmp_debug_assert
("taskdata->td_flags.proxy == 1", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3441); }
;
3442 KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0)if (!(taskdata->td_flags.complete == 0)) { __kmp_debug_assert
("taskdata->td_flags.complete == 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3442); }
;
3443 KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0)if (!(taskdata->td_flags.freed == 0)) { __kmp_debug_assert
("taskdata->td_flags.freed == 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3443); }
;
3444
3445 taskdata->td_flags.complete = 1; // mark the task as completed
3446
3447 if (taskdata->td_taskgroup)
3448 KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count)(&taskdata->td_taskgroup->count)->fetch_sub(1, std
::memory_order_acq_rel)
;
3449
3450 // Create an imaginary children for this task so the bottom half cannot
3451 // release the task before we have completed the second top half
3452 KMP_ATOMIC_INC(&taskdata->td_incomplete_child_tasks)(&taskdata->td_incomplete_child_tasks)->fetch_add(1
, std::memory_order_acq_rel)
;
3453}
3454
3455static void __kmp_second_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
3456 kmp_int32 children = 0;
3457
3458 // Predecrement simulated by "- 1" calculation
3459 children =
3460 KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks)(&taskdata->td_parent->td_incomplete_child_tasks)->
fetch_sub(1, std::memory_order_acq_rel)
- 1;
3461 KMP_DEBUG_ASSERT(children >= 0)if (!(children >= 0)) { __kmp_debug_assert("children >= 0"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3461); }
;
3462
3463 // Remove the imaginary children
3464 KMP_ATOMIC_DEC(&taskdata->td_incomplete_child_tasks)(&taskdata->td_incomplete_child_tasks)->fetch_sub(1
, std::memory_order_acq_rel)
;
3465}
3466
3467static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask) {
3468 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask)(((kmp_taskdata_t *)ptask) - 1);
3469 kmp_info_t *thread = __kmp_threads[gtid];
3470
3471 KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY)if (!(taskdata->td_flags.proxy == 1)) { __kmp_debug_assert
("taskdata->td_flags.proxy == 1", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3471); }
;
3472 KMP_DEBUG_ASSERT(taskdata->td_flags.complete ==if (!(taskdata->td_flags.complete == 1)) { __kmp_debug_assert
("taskdata->td_flags.complete == 1", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3473); }
3473 1)if (!(taskdata->td_flags.complete == 1)) { __kmp_debug_assert
("taskdata->td_flags.complete == 1", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3473); }
; // top half must run before bottom half
3474
3475 // We need to wait to make sure the top half is finished
3476 // Spinning here should be ok as this should happen quickly
3477 while (KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks)(&taskdata->td_incomplete_child_tasks)->load(std::memory_order_acquire
)
> 0)
3478 ;
3479
3480 __kmp_release_deps(gtid, taskdata);
3481 __kmp_free_task_and_ancestors(gtid, taskdata, thread);
3482}
3483
3484/*!
3485@ingroup TASKING
3486@param gtid Global Thread ID of encountering thread
3487@param ptask Task which execution is completed
3488
3489Execute the completation of a proxy task from a thread of that is part of the
3490team. Run first and bottom halves directly.
3491*/
3492void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask) {
3493 KMP_DEBUG_ASSERT(ptask != NULL)if (!(ptask != __null)) { __kmp_debug_assert("ptask != __null"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3493); }
;
3494 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask)(((kmp_taskdata_t *)ptask) - 1);
3495 KA_TRACE(if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_proxy_task_completed(enter): T#%d proxy task %p completing\n"
, gtid, taskdata); }
3496 10, ("__kmp_proxy_task_completed(enter): T#%d proxy task %p completing\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_proxy_task_completed(enter): T#%d proxy task %p completing\n"
, gtid, taskdata); }
3497 gtid, taskdata))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_proxy_task_completed(enter): T#%d proxy task %p completing\n"
, gtid, taskdata); }
;
3498
3499 KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY)if (!(taskdata->td_flags.proxy == 1)) { __kmp_debug_assert
("taskdata->td_flags.proxy == 1", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3499); }
;
3500
3501 __kmp_first_top_half_finish_proxy(taskdata);
3502 __kmp_second_top_half_finish_proxy(taskdata);
3503 __kmp_bottom_half_finish_proxy(gtid, ptask);
3504
3505 KA_TRACE(10,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_proxy_task_completed(exit): T#%d proxy task %p completing\n"
, gtid, taskdata); }
3506 ("__kmp_proxy_task_completed(exit): T#%d proxy task %p completing\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_proxy_task_completed(exit): T#%d proxy task %p completing\n"
, gtid, taskdata); }
3507 gtid, taskdata))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_proxy_task_completed(exit): T#%d proxy task %p completing\n"
, gtid, taskdata); }
;
3508}
3509
3510/*!
3511@ingroup TASKING
3512@param ptask Task which execution is completed
3513
3514Execute the completation of a proxy task from a thread that could not belong to
3515the team.
3516*/
3517void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask) {
3518 KMP_DEBUG_ASSERT(ptask != NULL)if (!(ptask != __null)) { __kmp_debug_assert("ptask != __null"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3518); }
;
3519 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask)(((kmp_taskdata_t *)ptask) - 1);
3520
3521 KA_TRACE(if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_proxy_task_completed_ooo(enter): proxy task completing ooo %p\n"
, taskdata); }
3522 10,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_proxy_task_completed_ooo(enter): proxy task completing ooo %p\n"
, taskdata); }
3523 ("__kmp_proxy_task_completed_ooo(enter): proxy task completing ooo %p\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_proxy_task_completed_ooo(enter): proxy task completing ooo %p\n"
, taskdata); }
3524 taskdata))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_proxy_task_completed_ooo(enter): proxy task completing ooo %p\n"
, taskdata); }
;
3525
3526 KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY)if (!(taskdata->td_flags.proxy == 1)) { __kmp_debug_assert
("taskdata->td_flags.proxy == 1", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3526); }
;
3527
3528 __kmp_first_top_half_finish_proxy(taskdata);
3529
3530 // Enqueue task to complete bottom half completion from a thread within the
3531 // corresponding team
3532 kmp_team_t *team = taskdata->td_team;
3533 kmp_int32 nthreads = team->t.t_nproc;
3534 kmp_info_t *thread;
3535
3536 // This should be similar to start_k = __kmp_get_random( thread ) % nthreads
3537 // but we cannot use __kmp_get_random here
3538 kmp_int32 start_k = 0;
3539 kmp_int32 pass = 1;
3540 kmp_int32 k = start_k;
3541
3542 do {
3543 // For now we're just linearly trying to find a thread
3544 thread = team->t.t_threads[k];
3545 k = (k + 1) % nthreads;
3546
3547 // we did a full pass through all the threads
3548 if (k == start_k)
3549 pass = pass << 1;
3550
3551 } while (!__kmp_give_task(thread, k, ptask, pass));
3552
3553 __kmp_second_top_half_finish_proxy(taskdata);
3554
3555 KA_TRACE(if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_proxy_task_completed_ooo(exit): proxy task completing ooo %p\n"
, taskdata); }
3556 10,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_proxy_task_completed_ooo(exit): proxy task completing ooo %p\n"
, taskdata); }
3557 ("__kmp_proxy_task_completed_ooo(exit): proxy task completing ooo %p\n",if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_proxy_task_completed_ooo(exit): proxy task completing ooo %p\n"
, taskdata); }
3558 taskdata))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_proxy_task_completed_ooo(exit): proxy task completing ooo %p\n"
, taskdata); }
;
3559}
3560
3561// __kmp_task_dup_alloc: Allocate the taskdata and make a copy of source task
3562// for taskloop
3563//
3564// thread: allocating thread
3565// task_src: pointer to source task to be duplicated
3566// returns: a pointer to the allocated kmp_task_t structure (task).
3567kmp_task_t *__kmp_task_dup_alloc(kmp_info_t *thread, kmp_task_t *task_src) {
3568 kmp_task_t *task;
3569 kmp_taskdata_t *taskdata;
3570 kmp_taskdata_t *taskdata_src;
3571 kmp_taskdata_t *parent_task = thread->th.th_current_task;
3572 size_t shareds_offset;
3573 size_t task_size;
3574
3575 KA_TRACE(10, ("__kmp_task_dup_alloc(enter): Th %p, source task %p\n", thread,if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_task_dup_alloc(enter): Th %p, source task %p\n"
, thread, task_src); }
3576 task_src))if (kmp_a_debug >= 10) { __kmp_debug_printf ("__kmp_task_dup_alloc(enter): Th %p, source task %p\n"
, thread, task_src); }
;
3577 taskdata_src = KMP_TASK_TO_TASKDATA(task_src)(((kmp_taskdata_t *)task_src) - 1);
3578 KMP_DEBUG_ASSERT(taskdata_src->td_flags.proxy ==if (!(taskdata_src->td_flags.proxy == 0)) { __kmp_debug_assert
("taskdata_src->td_flags.proxy == 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3579); }
3579 TASK_FULL)if (!(taskdata_src->td_flags.proxy == 0)) { __kmp_debug_assert
("taskdata_src->td_flags.proxy == 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3579); }
; // it should not be proxy task
3580 KMP_DEBUG_ASSERT(taskdata_src->td_flags.tasktype == TASK_EXPLICIT)if (!(taskdata_src->td_flags.tasktype == 1)) { __kmp_debug_assert
("taskdata_src->td_flags.tasktype == 1", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3580); }
;
3581 task_size = taskdata_src->td_size_alloc;
3582
3583 // Allocate a kmp_taskdata_t block and a kmp_task_t block.
3584 KA_TRACE(30, ("__kmp_task_dup_alloc: Th %p, malloc size %ld\n", thread,if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_task_dup_alloc: Th %p, malloc size %ld\n"
, thread, task_size); }
3585 task_size))if (kmp_a_debug >= 30) { __kmp_debug_printf ("__kmp_task_dup_alloc: Th %p, malloc size %ld\n"
, thread, task_size); }
;
3586#if USE_FAST_MEMORY3
3587 taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, task_size)___kmp_fast_allocate((thread), (task_size), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3587)
;
3588#else
3589 taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, task_size)___kmp_thread_malloc((thread), (task_size), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3589)
;
3590#endif /* USE_FAST_MEMORY */
3591 KMP_MEMCPYmemcpy(taskdata, taskdata_src, task_size);
3592
3593 task = KMP_TASKDATA_TO_TASK(taskdata)(kmp_task_t *)(taskdata + 1);
3594
3595 // Initialize new task (only specific fields not affected by memcpy)
3596 taskdata->td_task_id = KMP_GEN_TASK_ID()(~0);
3597 if (task->shareds != NULL__null) { // need setup shareds pointer
3598 shareds_offset = (char *)task_src->shareds - (char *)taskdata_src;
3599 task->shareds = &((char *)taskdata)[shareds_offset];
3600 KMP_DEBUG_ASSERT((((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) ==if (!((((kmp_uintptr_t)task->shareds) & (sizeof(void *
) - 1)) == 0)) { __kmp_debug_assert("(((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) == 0"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3601); }
3601 0)if (!((((kmp_uintptr_t)task->shareds) & (sizeof(void *
) - 1)) == 0)) { __kmp_debug_assert("(((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) == 0"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3601); }
;
3602 }
3603 taskdata->td_alloc_thread = thread;
3604 taskdata->td_parent = parent_task;
3605 taskdata->td_taskgroup =
3606 parent_task
3607 ->td_taskgroup; // task inherits the taskgroup from the parent task
3608
3609 // Only need to keep track of child task counts if team parallel and tasking
3610 // not serialized
3611 if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser)) {
3612 KMP_ATOMIC_INC(&parent_task->td_incomplete_child_tasks)(&parent_task->td_incomplete_child_tasks)->fetch_add
(1, std::memory_order_acq_rel)
;
3613 if (parent_task->td_taskgroup)
3614 KMP_ATOMIC_INC(&parent_task->td_taskgroup->count)(&parent_task->td_taskgroup->count)->fetch_add(1
, std::memory_order_acq_rel)
;
3615 // Only need to keep track of allocated child tasks for explicit tasks since
3616 // implicit not deallocated
3617 if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT1)
3618 KMP_ATOMIC_INC(&taskdata->td_parent->td_allocated_child_tasks)(&taskdata->td_parent->td_allocated_child_tasks)->
fetch_add(1, std::memory_order_acq_rel)
;
3619 }
3620
3621 KA_TRACE(20,if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_dup_alloc(exit): Th %p, created task %p, parent=%p\n"
, thread, taskdata, taskdata->td_parent); }
3622 ("__kmp_task_dup_alloc(exit): Th %p, created task %p, parent=%p\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_dup_alloc(exit): Th %p, created task %p, parent=%p\n"
, thread, taskdata, taskdata->td_parent); }
3623 thread, taskdata, taskdata->td_parent))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_task_dup_alloc(exit): Th %p, created task %p, parent=%p\n"
, thread, taskdata, taskdata->td_parent); }
;
3624#if OMPT_SUPPORT1
3625 if (UNLIKELY(ompt_enabled.enabled)__builtin_expect(!!(ompt_enabled.enabled), 0))
3626 __ompt_task_init(taskdata, thread->th.th_info.ds.ds_gtid);
3627#endif
3628 return task;
3629}
3630
3631// Routine optionally generated by the compiler for setting the lastprivate flag
3632// and calling needed constructors for private/firstprivate objects
3633// (used to form taskloop tasks from pattern task)
3634// Parameters: dest task, src task, lastprivate flag.
3635typedef void (*p_task_dup_t)(kmp_task_t *, kmp_task_t *, kmp_int32);
3636
3637KMP_BUILD_ASSERT(sizeof(long) == 4 || sizeof(long) == 8)static_assert(sizeof(long) == 4 || sizeof(long) == 8, "Build condition error"
)
;
3638
3639// class to encapsulate manipulating loop bounds in a taskloop task.
3640// this abstracts away the Intel vs GOMP taskloop interface for setting/getting
3641// the loop bound variables.
3642class kmp_taskloop_bounds_t {
3643 kmp_task_t *task;
3644 const kmp_taskdata_t *taskdata;
3645 size_t lower_offset;
3646 size_t upper_offset;
3647
3648public:
3649 kmp_taskloop_bounds_t(kmp_task_t *_task, kmp_uint64 *lb, kmp_uint64 *ub)
3650 : task(_task), taskdata(KMP_TASK_TO_TASKDATA(task)(((kmp_taskdata_t *)task) - 1)),
3651 lower_offset((char *)lb - (char *)task),
3652 upper_offset((char *)ub - (char *)task) {
3653 KMP_DEBUG_ASSERT((char *)lb > (char *)_task)if (!((char *)lb > (char *)_task)) { __kmp_debug_assert("(char *)lb > (char *)_task"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3653); }
;
3654 KMP_DEBUG_ASSERT((char *)ub > (char *)_task)if (!((char *)ub > (char *)_task)) { __kmp_debug_assert("(char *)ub > (char *)_task"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3654); }
;
3655 }
3656 kmp_taskloop_bounds_t(kmp_task_t *_task, const kmp_taskloop_bounds_t &bounds)
3657 : task(_task), taskdata(KMP_TASK_TO_TASKDATA(_task)(((kmp_taskdata_t *)_task) - 1)),
3658 lower_offset(bounds.lower_offset), upper_offset(bounds.upper_offset) {}
3659 size_t get_lower_offset() const { return lower_offset; }
3660 size_t get_upper_offset() const { return upper_offset; }
3661 kmp_uint64 get_lb() const {
3662 kmp_int64 retval;
3663#if defined(KMP_GOMP_COMPAT)
3664 // Intel task just returns the lower bound normally
3665 if (!taskdata->td_flags.native) {
3666 retval = *(kmp_int64 *)((char *)task + lower_offset);
3667 } else {
3668 // GOMP task has to take into account the sizeof(long)
3669 if (taskdata->td_size_loop_bounds == 4) {
3670 kmp_int32 *lb = RCAST(kmp_int32 *, task->shareds)reinterpret_cast<kmp_int32 *>(task->shareds);
3671 retval = (kmp_int64)*lb;
3672 } else {
3673 kmp_int64 *lb = RCAST(kmp_int64 *, task->shareds)reinterpret_cast<kmp_int64 *>(task->shareds);
3674 retval = (kmp_int64)*lb;
3675 }
3676 }
3677#else
3678 retval = *(kmp_int64 *)((char *)task + lower_offset);
3679#endif // defined(KMP_GOMP_COMPAT)
3680 return retval;
3681 }
3682 kmp_uint64 get_ub() const {
3683 kmp_int64 retval;
3684#if defined(KMP_GOMP_COMPAT)
3685 // Intel task just returns the upper bound normally
3686 if (!taskdata->td_flags.native) {
3687 retval = *(kmp_int64 *)((char *)task + upper_offset);
3688 } else {
3689 // GOMP task has to take into account the sizeof(long)
3690 if (taskdata->td_size_loop_bounds == 4) {
3691 kmp_int32 *ub = RCAST(kmp_int32 *, task->shareds)reinterpret_cast<kmp_int32 *>(task->shareds) + 1;
3692 retval = (kmp_int64)*ub;
3693 } else {
3694 kmp_int64 *ub = RCAST(kmp_int64 *, task->shareds)reinterpret_cast<kmp_int64 *>(task->shareds) + 1;
3695 retval = (kmp_int64)*ub;
3696 }
3697 }
3698#else
3699 retval = *(kmp_int64 *)((char *)task + upper_offset);
3700#endif // defined(KMP_GOMP_COMPAT)
3701 return retval;
3702 }
3703 void set_lb(kmp_uint64 lb) {
3704#if defined(KMP_GOMP_COMPAT)
3705 // Intel task just sets the lower bound normally
3706 if (!taskdata->td_flags.native) {
3707 *(kmp_uint64 *)((char *)task + lower_offset) = lb;
3708 } else {
3709 // GOMP task has to take into account the sizeof(long)
3710 if (taskdata->td_size_loop_bounds == 4) {
3711 kmp_uint32 *lower = RCAST(kmp_uint32 *, task->shareds)reinterpret_cast<kmp_uint32 *>(task->shareds);
3712 *lower = (kmp_uint32)lb;
3713 } else {
3714 kmp_uint64 *lower = RCAST(kmp_uint64 *, task->shareds)reinterpret_cast<kmp_uint64 *>(task->shareds);
3715 *lower = (kmp_uint64)lb;
3716 }
3717 }
3718#else
3719 *(kmp_uint64 *)((char *)task + lower_offset) = lb;
3720#endif // defined(KMP_GOMP_COMPAT)
3721 }
3722 void set_ub(kmp_uint64 ub) {
3723#if defined(KMP_GOMP_COMPAT)
3724 // Intel task just sets the upper bound normally
3725 if (!taskdata->td_flags.native) {
3726 *(kmp_uint64 *)((char *)task + upper_offset) = ub;
3727 } else {
3728 // GOMP task has to take into account the sizeof(long)
3729 if (taskdata->td_size_loop_bounds == 4) {
3730 kmp_uint32 *upper = RCAST(kmp_uint32 *, task->shareds)reinterpret_cast<kmp_uint32 *>(task->shareds) + 1;
3731 *upper = (kmp_uint32)ub;
3732 } else {
3733 kmp_uint64 *upper = RCAST(kmp_uint64 *, task->shareds)reinterpret_cast<kmp_uint64 *>(task->shareds) + 1;
3734 *upper = (kmp_uint64)ub;
3735 }
3736 }
3737#else
3738 *(kmp_uint64 *)((char *)task + upper_offset) = ub;
3739#endif // defined(KMP_GOMP_COMPAT)
3740 }
3741};
3742
3743// __kmp_taskloop_linear: Start tasks of the taskloop linearly
3744//
3745// loc Source location information
3746// gtid Global thread ID
3747// task Pattern task, exposes the loop iteration range
3748// lb Pointer to loop lower bound in task structure
3749// ub Pointer to loop upper bound in task structure
3750// st Loop stride
3751// ub_glob Global upper bound (used for lastprivate check)
3752// num_tasks Number of tasks to execute
3753// grainsize Number of loop iterations per task
3754// extras Number of chunks with grainsize+1 iterations
3755// tc Iterations count
3756// task_dup Tasks duplication routine
3757// codeptr_ra Return address for OMPT events
3758void __kmp_taskloop_linear(ident_t *loc, int gtid, kmp_task_t *task,
3759 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
3760 kmp_uint64 ub_glob, kmp_uint64 num_tasks,
3761 kmp_uint64 grainsize, kmp_uint64 extras,
3762 kmp_uint64 tc,
3763#if OMPT_SUPPORT1
3764 void *codeptr_ra,
3765#endif
3766 void *task_dup) {
3767 KMP_COUNT_BLOCK(OMP_TASKLOOP)((void)0);
3768 KMP_TIME_PARTITIONED_BLOCK(OMP_taskloop_scheduling)((void)0);
3769 p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
3770 // compiler provides global bounds here
3771 kmp_taskloop_bounds_t task_bounds(task, lb, ub);
3772 kmp_uint64 lower = task_bounds.get_lb();
3773 kmp_uint64 upper = task_bounds.get_ub();
3774 kmp_uint64 i;
3775 kmp_info_t *thread = __kmp_threads[gtid];
3776 kmp_taskdata_t *current_task = thread->th.th_current_task;
3777 kmp_task_t *next_task;
3778 kmp_int32 lastpriv = 0;
3779
3780 KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + extras)if (!(tc == num_tasks * grainsize + extras)) { __kmp_debug_assert
("tc == num_tasks * grainsize + extras", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3780); }
;
3781 KMP_DEBUG_ASSERT(num_tasks > extras)if (!(num_tasks > extras)) { __kmp_debug_assert("num_tasks > extras"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3781); }
;
3782 KMP_DEBUG_ASSERT(num_tasks > 0)if (!(num_tasks > 0)) { __kmp_debug_assert("num_tasks > 0"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3782); }
;
3783 KA_TRACE(20, ("__kmp_taskloop_linear: T#%d: %lld tasks, grainsize %lld, "if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_taskloop_linear: T#%d: %lld tasks, grainsize %lld, "
"extras %lld, i=%lld,%lld(%d)%lld, dup %p\n", gtid, num_tasks
, grainsize, extras, lower, upper, ub_glob, st, task_dup); }
3784 "extras %lld, i=%lld,%lld(%d)%lld, dup %p\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_taskloop_linear: T#%d: %lld tasks, grainsize %lld, "
"extras %lld, i=%lld,%lld(%d)%lld, dup %p\n", gtid, num_tasks
, grainsize, extras, lower, upper, ub_glob, st, task_dup); }
3785 gtid, num_tasks, grainsize, extras, lower, upper, ub_glob, st,if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_taskloop_linear: T#%d: %lld tasks, grainsize %lld, "
"extras %lld, i=%lld,%lld(%d)%lld, dup %p\n", gtid, num_tasks
, grainsize, extras, lower, upper, ub_glob, st, task_dup); }
3786 task_dup))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_taskloop_linear: T#%d: %lld tasks, grainsize %lld, "
"extras %lld, i=%lld,%lld(%d)%lld, dup %p\n", gtid, num_tasks
, grainsize, extras, lower, upper, ub_glob, st, task_dup); }
;
3787
3788 // Launch num_tasks tasks, assign grainsize iterations each task
3789 for (i = 0; i < num_tasks; ++i) {
3790 kmp_uint64 chunk_minus_1;
3791 if (extras == 0) {
3792 chunk_minus_1 = grainsize - 1;
3793 } else {
3794 chunk_minus_1 = grainsize;
3795 --extras; // first extras iterations get bigger chunk (grainsize+1)
3796 }
3797 upper = lower + st * chunk_minus_1;
3798 if (i == num_tasks - 1) {
3799 // schedule the last task, set lastprivate flag if needed
3800 if (st == 1) { // most common case
3801 KMP_DEBUG_ASSERT(upper == *ub)if (!(upper == *ub)) { __kmp_debug_assert("upper == *ub", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3801); }
;
3802 if (upper == ub_glob)
3803 lastpriv = 1;
3804 } else if (st > 0) { // positive loop stride
3805 KMP_DEBUG_ASSERT((kmp_uint64)st > *ub - upper)if (!((kmp_uint64)st > *ub - upper)) { __kmp_debug_assert(
"(kmp_uint64)st > *ub - upper", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3805); }
;
3806 if ((kmp_uint64)st > ub_glob - upper)
3807 lastpriv = 1;
3808 } else { // negative loop stride
3809 KMP_DEBUG_ASSERT(upper + st < *ub)if (!(upper + st < *ub)) { __kmp_debug_assert("upper + st < *ub"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3809); }
;
3810 if (upper - ub_glob < (kmp_uint64)(-st))
3811 lastpriv = 1;
3812 }
3813 }
3814 next_task = __kmp_task_dup_alloc(thread, task); // allocate new task
3815 kmp_taskdata_t *next_taskdata = KMP_TASK_TO_TASKDATA(next_task)(((kmp_taskdata_t *)next_task) - 1);
3816 kmp_taskloop_bounds_t next_task_bounds =
3817 kmp_taskloop_bounds_t(next_task, task_bounds);
3818
3819 // adjust task-specific bounds
3820 next_task_bounds.set_lb(lower);
3821 if (next_taskdata->td_flags.native) {
3822 next_task_bounds.set_ub(upper + (st > 0 ? 1 : -1));
3823 } else {
3824 next_task_bounds.set_ub(upper);
3825 }
3826 if (ptask_dup != NULL__null) // set lastprivate flag, construct fistprivates, etc.
3827 ptask_dup(next_task, task, lastpriv);
3828 KA_TRACE(40,if (kmp_a_debug >= 40) { __kmp_debug_printf ("__kmp_taskloop_linear: T#%d; task #%llu: task %p: lower %lld, "
"upper %lld stride %lld, (offsets %p %p)\n", gtid, i, next_task
, lower, upper, st, next_task_bounds.get_lower_offset(), next_task_bounds
.get_upper_offset()); }
3829 ("__kmp_taskloop_linear: T#%d; task #%llu: task %p: lower %lld, "if (kmp_a_debug >= 40) { __kmp_debug_printf ("__kmp_taskloop_linear: T#%d; task #%llu: task %p: lower %lld, "
"upper %lld stride %lld, (offsets %p %p)\n", gtid, i, next_task
, lower, upper, st, next_task_bounds.get_lower_offset(), next_task_bounds
.get_upper_offset()); }
3830 "upper %lld stride %lld, (offsets %p %p)\n",if (kmp_a_debug >= 40) { __kmp_debug_printf ("__kmp_taskloop_linear: T#%d; task #%llu: task %p: lower %lld, "
"upper %lld stride %lld, (offsets %p %p)\n", gtid, i, next_task
, lower, upper, st, next_task_bounds.get_lower_offset(), next_task_bounds
.get_upper_offset()); }
3831 gtid, i, next_task, lower, upper, st,if (kmp_a_debug >= 40) { __kmp_debug_printf ("__kmp_taskloop_linear: T#%d; task #%llu: task %p: lower %lld, "
"upper %lld stride %lld, (offsets %p %p)\n", gtid, i, next_task
, lower, upper, st, next_task_bounds.get_lower_offset(), next_task_bounds
.get_upper_offset()); }
3832 next_task_bounds.get_lower_offset(),if (kmp_a_debug >= 40) { __kmp_debug_printf ("__kmp_taskloop_linear: T#%d; task #%llu: task %p: lower %lld, "
"upper %lld stride %lld, (offsets %p %p)\n", gtid, i, next_task
, lower, upper, st, next_task_bounds.get_lower_offset(), next_task_bounds
.get_upper_offset()); }
3833 next_task_bounds.get_upper_offset()))if (kmp_a_debug >= 40) { __kmp_debug_printf ("__kmp_taskloop_linear: T#%d; task #%llu: task %p: lower %lld, "
"upper %lld stride %lld, (offsets %p %p)\n", gtid, i, next_task
, lower, upper, st, next_task_bounds.get_lower_offset(), next_task_bounds
.get_upper_offset()); }
;
3834#if OMPT_SUPPORT1
3835 __kmp_omp_taskloop_task(NULL__null, gtid, next_task,
3836 codeptr_ra); // schedule new task
3837#else
3838 __kmp_omp_task(gtid, next_task, true); // schedule new task
3839#endif
3840 lower = upper + st; // adjust lower bound for the next iteration
3841 }
3842 // free the pattern task and exit
3843 __kmp_task_start(gtid, task, current_task); // make internal bookkeeping
3844 // do not execute the pattern task, just do internal bookkeeping
3845 __kmp_task_finish<false>(gtid, task, current_task);
3846}
3847
3848// Structure to keep taskloop parameters for auxiliary task
3849// kept in the shareds of the task structure.
3850typedef struct __taskloop_params {
3851 kmp_task_t *task;
3852 kmp_uint64 *lb;
3853 kmp_uint64 *ub;
3854 void *task_dup;
3855 kmp_int64 st;
3856 kmp_uint64 ub_glob;
3857 kmp_uint64 num_tasks;
3858 kmp_uint64 grainsize;
3859 kmp_uint64 extras;
3860 kmp_uint64 tc;
3861 kmp_uint64 num_t_min;
3862#if OMPT_SUPPORT1
3863 void *codeptr_ra;
3864#endif
3865} __taskloop_params_t;
3866
3867void __kmp_taskloop_recur(ident_t *, int, kmp_task_t *, kmp_uint64 *,
3868 kmp_uint64 *, kmp_int64, kmp_uint64, kmp_uint64,
3869 kmp_uint64, kmp_uint64, kmp_uint64, kmp_uint64,
3870#if OMPT_SUPPORT1
3871 void *,
3872#endif
3873 void *);
3874
3875// Execute part of the the taskloop submitted as a task.
3876int __kmp_taskloop_task(int gtid, void *ptask) {
3877 __taskloop_params_t *p =
3878 (__taskloop_params_t *)((kmp_task_t *)ptask)->shareds;
3879 kmp_task_t *task = p->task;
3880 kmp_uint64 *lb = p->lb;
3881 kmp_uint64 *ub = p->ub;
3882 void *task_dup = p->task_dup;
3883 // p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
3884 kmp_int64 st = p->st;
3885 kmp_uint64 ub_glob = p->ub_glob;
3886 kmp_uint64 num_tasks = p->num_tasks;
3887 kmp_uint64 grainsize = p->grainsize;
3888 kmp_uint64 extras = p->extras;
3889 kmp_uint64 tc = p->tc;
3890 kmp_uint64 num_t_min = p->num_t_min;
3891#if OMPT_SUPPORT1
3892 void *codeptr_ra = p->codeptr_ra;
3893#endif
3894#if KMP_DEBUG1
3895 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task)(((kmp_taskdata_t *)task) - 1);
3896 KMP_DEBUG_ASSERT(task != NULL)if (!(task != __null)) { __kmp_debug_assert("task != __null",
"/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3896); }
;
3897 KA_TRACE(20, ("__kmp_taskloop_task: T#%d, task %p: %lld tasks, grainsize"if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_taskloop_task: T#%d, task %p: %lld tasks, grainsize"
" %lld, extras %lld, i=%lld,%lld(%d), dup %p\n", gtid, taskdata
, num_tasks, grainsize, extras, *lb, *ub, st, task_dup); }
3898 " %lld, extras %lld, i=%lld,%lld(%d), dup %p\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_taskloop_task: T#%d, task %p: %lld tasks, grainsize"
" %lld, extras %lld, i=%lld,%lld(%d), dup %p\n", gtid, taskdata
, num_tasks, grainsize, extras, *lb, *ub, st, task_dup); }
3899 gtid, taskdata, num_tasks, grainsize, extras, *lb, *ub, st,if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_taskloop_task: T#%d, task %p: %lld tasks, grainsize"
" %lld, extras %lld, i=%lld,%lld(%d), dup %p\n", gtid, taskdata
, num_tasks, grainsize, extras, *lb, *ub, st, task_dup); }
3900 task_dup))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_taskloop_task: T#%d, task %p: %lld tasks, grainsize"
" %lld, extras %lld, i=%lld,%lld(%d), dup %p\n", gtid, taskdata
, num_tasks, grainsize, extras, *lb, *ub, st, task_dup); }
;
3901#endif
3902 KMP_DEBUG_ASSERT(num_tasks * 2 + 1 > num_t_min)if (!(num_tasks * 2 + 1 > num_t_min)) { __kmp_debug_assert
("num_tasks * 2 + 1 > num_t_min", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3902); }
;
3903 if (num_tasks > num_t_min)
3904 __kmp_taskloop_recur(NULL__null, gtid, task, lb, ub, st, ub_glob, num_tasks,
3905 grainsize, extras, tc, num_t_min,
3906#if OMPT_SUPPORT1
3907 codeptr_ra,
3908#endif
3909 task_dup);
3910 else
3911 __kmp_taskloop_linear(NULL__null, gtid, task, lb, ub, st, ub_glob, num_tasks,
3912 grainsize, extras, tc,
3913#if OMPT_SUPPORT1
3914 codeptr_ra,
3915#endif
3916 task_dup);
3917
3918 KA_TRACE(40, ("__kmp_taskloop_task(exit): T#%d\n", gtid))if (kmp_a_debug >= 40) { __kmp_debug_printf ("__kmp_taskloop_task(exit): T#%d\n"
, gtid); }
;
3919 return 0;
3920}
3921
3922// Schedule part of the the taskloop as a task,
3923// execute the rest of the the taskloop.
3924//
3925// loc Source location information
3926// gtid Global thread ID
3927// task Pattern task, exposes the loop iteration range
3928// lb Pointer to loop lower bound in task structure
3929// ub Pointer to loop upper bound in task structure
3930// st Loop stride
3931// ub_glob Global upper bound (used for lastprivate check)
3932// num_tasks Number of tasks to execute
3933// grainsize Number of loop iterations per task
3934// extras Number of chunks with grainsize+1 iterations
3935// tc Iterations count
3936// num_t_min Threashold to launch tasks recursively
3937// task_dup Tasks duplication routine
3938// codeptr_ra Return address for OMPT events
3939void __kmp_taskloop_recur(ident_t *loc, int gtid, kmp_task_t *task,
3940 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
3941 kmp_uint64 ub_glob, kmp_uint64 num_tasks,
3942 kmp_uint64 grainsize, kmp_uint64 extras,
3943 kmp_uint64 tc, kmp_uint64 num_t_min,
3944#if OMPT_SUPPORT1
3945 void *codeptr_ra,
3946#endif
3947 void *task_dup) {
3948#if KMP_DEBUG1
3949 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task)(((kmp_taskdata_t *)task) - 1);
3950 KMP_DEBUG_ASSERT(task != NULL)if (!(task != __null)) { __kmp_debug_assert("task != __null",
"/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3950); }
;
3951 KMP_DEBUG_ASSERT(num_tasks > num_t_min)if (!(num_tasks > num_t_min)) { __kmp_debug_assert("num_tasks > num_t_min"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3951); }
;
3952 KA_TRACE(20, ("__kmp_taskloop_recur: T#%d, task %p: %lld tasks, grainsize"if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_taskloop_recur: T#%d, task %p: %lld tasks, grainsize"
" %lld, extras %lld, i=%lld,%lld(%d), dup %p\n", gtid, taskdata
, num_tasks, grainsize, extras, *lb, *ub, st, task_dup); }
3953 " %lld, extras %lld, i=%lld,%lld(%d), dup %p\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_taskloop_recur: T#%d, task %p: %lld tasks, grainsize"
" %lld, extras %lld, i=%lld,%lld(%d), dup %p\n", gtid, taskdata
, num_tasks, grainsize, extras, *lb, *ub, st, task_dup); }
3954 gtid, taskdata, num_tasks, grainsize, extras, *lb, *ub, st,if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_taskloop_recur: T#%d, task %p: %lld tasks, grainsize"
" %lld, extras %lld, i=%lld,%lld(%d), dup %p\n", gtid, taskdata
, num_tasks, grainsize, extras, *lb, *ub, st, task_dup); }
3955 task_dup))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_taskloop_recur: T#%d, task %p: %lld tasks, grainsize"
" %lld, extras %lld, i=%lld,%lld(%d), dup %p\n", gtid, taskdata
, num_tasks, grainsize, extras, *lb, *ub, st, task_dup); }
;
3956#endif
3957 p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
3958 kmp_uint64 lower = *lb;
3959 kmp_info_t *thread = __kmp_threads[gtid];
3960 // kmp_taskdata_t *current_task = thread->th.th_current_task;
3961 kmp_task_t *next_task;
3962 size_t lower_offset =
3963 (char *)lb - (char *)task; // remember offset of lb in the task structure
3964 size_t upper_offset =
3965 (char *)ub - (char *)task; // remember offset of ub in the task structure
3966
3967 KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + extras)if (!(tc == num_tasks * grainsize + extras)) { __kmp_debug_assert
("tc == num_tasks * grainsize + extras", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3967); }
;
3968 KMP_DEBUG_ASSERT(num_tasks > extras)if (!(num_tasks > extras)) { __kmp_debug_assert("num_tasks > extras"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3968); }
;
3969 KMP_DEBUG_ASSERT(num_tasks > 0)if (!(num_tasks > 0)) { __kmp_debug_assert("num_tasks > 0"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 3969); }
;
3970
3971 // split the loop in two halves
3972 kmp_uint64 lb1, ub0, tc0, tc1, ext0, ext1;
3973 kmp_uint64 gr_size0 = grainsize;
3974 kmp_uint64 n_tsk0 = num_tasks >> 1; // num_tasks/2 to execute
3975 kmp_uint64 n_tsk1 = num_tasks - n_tsk0; // to schedule as a task
3976 if (n_tsk0 <= extras) {
3977 gr_size0++; // integrate extras into grainsize
3978 ext0 = 0; // no extra iters in 1st half
3979 ext1 = extras - n_tsk0; // remaining extras
3980 tc0 = gr_size0 * n_tsk0;
3981 tc1 = tc - tc0;
3982 } else { // n_tsk0 > extras
3983 ext1 = 0; // no extra iters in 2nd half
3984 ext0 = extras;
3985 tc1 = grainsize * n_tsk1;
3986 tc0 = tc - tc1;
3987 }
3988 ub0 = lower + st * (tc0 - 1);
3989 lb1 = ub0 + st;
3990
3991 // create pattern task for 2nd half of the loop
3992 next_task = __kmp_task_dup_alloc(thread, task); // duplicate the task
3993 // adjust lower bound (upper bound is not changed) for the 2nd half
3994 *(kmp_uint64 *)((char *)next_task + lower_offset) = lb1;
3995 if (ptask_dup != NULL__null) // construct fistprivates, etc.
3996 ptask_dup(next_task, task, 0);
3997 *ub = ub0; // adjust upper bound for the 1st half
3998
3999 // create auxiliary task for 2nd half of the loop
4000 kmp_task_t *new_task =
4001 __kmpc_omp_task_alloc(loc, gtid, 1, 3 * sizeof(void *),
4002 sizeof(__taskloop_params_t), &__kmp_taskloop_task);
4003 __taskloop_params_t *p = (__taskloop_params_t *)new_task->shareds;
4004 p->task = next_task;
4005 p->lb = (kmp_uint64 *)((char *)next_task + lower_offset);
4006 p->ub = (kmp_uint64 *)((char *)next_task + upper_offset);
4007 p->task_dup = task_dup;
4008 p->st = st;
4009 p->ub_glob = ub_glob;
4010 p->num_tasks = n_tsk1;
4011 p->grainsize = grainsize;
4012 p->extras = ext1;
4013 p->tc = tc1;
4014 p->num_t_min = num_t_min;
4015#if OMPT_SUPPORT1
4016 p->codeptr_ra = codeptr_ra;
4017#endif
4018
4019#if OMPT_SUPPORT1
4020 // schedule new task with correct return address for OMPT events
4021 __kmp_omp_taskloop_task(NULL__null, gtid, new_task, codeptr_ra);
4022#else
4023 __kmp_omp_task(gtid, new_task, true); // schedule new task
4024#endif
4025
4026 // execute the 1st half of current subrange
4027 if (n_tsk0 > num_t_min)
4028 __kmp_taskloop_recur(loc, gtid, task, lb, ub, st, ub_glob, n_tsk0, gr_size0,
4029 ext0, tc0, num_t_min,
4030#if OMPT_SUPPORT1
4031 codeptr_ra,
4032#endif
4033 task_dup);
4034 else
4035 __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, n_tsk0,
4036 gr_size0, ext0, tc0,
4037#if OMPT_SUPPORT1
4038 codeptr_ra,
4039#endif
4040 task_dup);
4041
4042 KA_TRACE(40, ("__kmpc_taskloop_recur(exit): T#%d\n", gtid))if (kmp_a_debug >= 40) { __kmp_debug_printf ("__kmpc_taskloop_recur(exit): T#%d\n"
, gtid); }
;
4043}
4044
4045/*!
4046@ingroup TASKING
4047@param loc Source location information
4048@param gtid Global thread ID
4049@param task Task structure
4050@param if_val Value of the if clause
4051@param lb Pointer to loop lower bound in task structure
4052@param ub Pointer to loop upper bound in task structure
4053@param st Loop stride
4054@param nogroup Flag, 1 if nogroup clause specified, 0 otherwise
4055@param sched Schedule specified 0/1/2 for none/grainsize/num_tasks
4056@param grainsize Schedule value if specified
4057@param task_dup Tasks duplication routine
4058
4059Execute the taskloop construct.
4060*/
4061void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val,
4062 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup,
4063 int sched, kmp_uint64 grainsize, void *task_dup) {
4064 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task)(((kmp_taskdata_t *)task) - 1);
4065 KMP_DEBUG_ASSERT(task != NULL)if (!(task != __null)) { __kmp_debug_assert("task != __null",
"/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 4065); }
;
4066
4067 if (nogroup == 0) {
4068#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
4069 OMPT_STORE_RETURN_ADDRESS(gtid)if (ompt_enabled.enabled && gtid >= 0 && __kmp_threads
[gtid] && !__kmp_threads[gtid]->th.ompt_thread_info
.return_address) __kmp_threads[gtid]->th.ompt_thread_info.
return_address = __builtin_return_address(0)
;
4070#endif
4071 __kmpc_taskgroup(loc, gtid);
4072 }
4073
4074 // =========================================================================
4075 // calculate loop parameters
4076 kmp_taskloop_bounds_t task_bounds(task, lb, ub);
4077 kmp_uint64 tc;
4078 // compiler provides global bounds here
4079 kmp_uint64 lower = task_bounds.get_lb();
4080 kmp_uint64 upper = task_bounds.get_ub();
4081 kmp_uint64 ub_glob = upper; // global upper used to calc lastprivate flag
4082 kmp_uint64 num_tasks = 0, extras = 0;
4083 kmp_uint64 num_tasks_min = __kmp_taskloop_min_tasks;
4084 kmp_info_t *thread = __kmp_threads[gtid];
4085 kmp_taskdata_t *current_task = thread->th.th_current_task;
4086
4087 KA_TRACE(20, ("__kmpc_taskloop: T#%d, task %p, lb %lld, ub %lld, st %lld, "if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmpc_taskloop: T#%d, task %p, lb %lld, ub %lld, st %lld, "
"grain %llu(%d), dup %p\n", gtid, taskdata, lower, upper, st
, grainsize, sched, task_dup); }
4088 "grain %llu(%d), dup %p\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmpc_taskloop: T#%d, task %p, lb %lld, ub %lld, st %lld, "
"grain %llu(%d), dup %p\n", gtid, taskdata, lower, upper, st
, grainsize, sched, task_dup); }
4089 gtid, taskdata, lower, upper, st, grainsize, sched, task_dup))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmpc_taskloop: T#%d, task %p, lb %lld, ub %lld, st %lld, "
"grain %llu(%d), dup %p\n", gtid, taskdata, lower, upper, st
, grainsize, sched, task_dup); }
;
4090
4091 // compute trip count
4092 if (st == 1) { // most common case
4093 tc = upper - lower + 1;
4094 } else if (st < 0) {
4095 tc = (lower - upper) / (-st) + 1;
4096 } else { // st > 0
4097 tc = (upper - lower) / st + 1;
4098 }
4099 if (tc == 0) {
4100 KA_TRACE(20, ("__kmpc_taskloop(exit): T#%d zero-trip loop\n", gtid))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmpc_taskloop(exit): T#%d zero-trip loop\n"
, gtid); }
;
4101 // free the pattern task and exit
4102 __kmp_task_start(gtid, task, current_task);
4103 // do not execute anything for zero-trip loop
4104 __kmp_task_finish<false>(gtid, task, current_task);
4105 return;
4106 }
4107
4108#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
4109 ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL__null);
4110 ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
4111 if (ompt_enabled.ompt_callback_work) {
4112 ompt_callbacks.ompt_callback(ompt_callback_work)ompt_callback_work_callback(
4113 ompt_work_taskloop, ompt_scope_begin, &(team_info->parallel_data),
4114 &(task_info->task_data), tc, OMPT_GET_RETURN_ADDRESS(0)__builtin_return_address(0));
4115 }
4116#endif
4117
4118 if (num_tasks_min == 0)
4119 // TODO: can we choose better default heuristic?
4120 num_tasks_min =
4121 KMP_MIN(thread->th.th_team_nproc * 10, INITIAL_TASK_DEQUE_SIZE)((thread->th.th_team_nproc * 10) < ((1 << 8)) ? (
thread->th.th_team_nproc * 10) : ((1 << 8)))
;
4122
4123 // compute num_tasks/grainsize based on the input provided
4124 switch (sched) {
4125 case 0: // no schedule clause specified, we can choose the default
4126 // let's try to schedule (team_size*10) tasks
4127 grainsize = thread->th.th_team_nproc * 10;
4128 case 2: // num_tasks provided
4129 if (grainsize > tc) {
4130 num_tasks = tc; // too big num_tasks requested, adjust values
4131 grainsize = 1;
4132 extras = 0;
4133 } else {
4134 num_tasks = grainsize;
4135 grainsize = tc / num_tasks;
4136 extras = tc % num_tasks;
4137 }
4138 break;
4139 case 1: // grainsize provided
4140 if (grainsize > tc) {
4141 num_tasks = 1; // too big grainsize requested, adjust values
4142 grainsize = tc;
4143 extras = 0;
4144 } else {
4145 num_tasks = tc / grainsize;
4146 // adjust grainsize for balanced distribution of iterations
4147 grainsize = tc / num_tasks;
4148 extras = tc % num_tasks;
4149 }
4150 break;
4151 default:
4152 KMP_ASSERT2(0, "unknown scheduling of taskloop")if (!(0)) { __kmp_debug_assert(("unknown scheduling of taskloop"
), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 4152); }
;
4153 }
4154 KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + extras)if (!(tc == num_tasks * grainsize + extras)) { __kmp_debug_assert
("tc == num_tasks * grainsize + extras", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 4154); }
;
4155 KMP_DEBUG_ASSERT(num_tasks > extras)if (!(num_tasks > extras)) { __kmp_debug_assert("num_tasks > extras"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 4155); }
;
4156 KMP_DEBUG_ASSERT(num_tasks > 0)if (!(num_tasks > 0)) { __kmp_debug_assert("num_tasks > 0"
, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_tasking.cpp"
, 4156); }
;
4157 // =========================================================================
4158
4159 // check if clause value first
4160 // Also require GOMP_taskloop to reduce to linear (taskdata->td_flags.native)
4161 if (if_val == 0) { // if(0) specified, mark task as serial
4162 taskdata->td_flags.task_serial = 1;
4163 taskdata->td_flags.tiedness = TASK_TIED1; // AC: serial task cannot be untied
4164 // always start serial tasks linearly
4165 __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
4166 grainsize, extras, tc,
4167#if OMPT_SUPPORT1
4168 OMPT_GET_RETURN_ADDRESS(0)__builtin_return_address(0),
4169#endif
4170 task_dup);
4171 // !taskdata->td_flags.native => currently force linear spawning of tasks
4172 // for GOMP_taskloop
4173 } else if (num_tasks > num_tasks_min && !taskdata->td_flags.native) {
4174 KA_TRACE(20, ("__kmpc_taskloop: T#%d, go recursive: tc %llu, #tasks %llu"if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmpc_taskloop: T#%d, go recursive: tc %llu, #tasks %llu"
"(%lld), grain %llu, extras %llu\n", gtid, tc, num_tasks, num_tasks_min
, grainsize, extras); }
4175 "(%lld), grain %llu, extras %llu\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmpc_taskloop: T#%d, go recursive: tc %llu, #tasks %llu"
"(%lld), grain %llu, extras %llu\n", gtid, tc, num_tasks, num_tasks_min
, grainsize, extras); }
4176 gtid, tc, num_tasks, num_tasks_min, grainsize, extras))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmpc_taskloop: T#%d, go recursive: tc %llu, #tasks %llu"
"(%lld), grain %llu, extras %llu\n", gtid, tc, num_tasks, num_tasks_min
, grainsize, extras); }
;
4177 __kmp_taskloop_recur(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
4178 grainsize, extras, tc, num_tasks_min,
4179#if OMPT_SUPPORT1
4180 OMPT_GET_RETURN_ADDRESS(0)__builtin_return_address(0),
4181#endif
4182 task_dup);
4183 } else {
4184 KA_TRACE(20, ("__kmpc_taskloop: T#%d, go linear: tc %llu, #tasks %llu"if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmpc_taskloop: T#%d, go linear: tc %llu, #tasks %llu"
"(%lld), grain %llu, extras %llu\n", gtid, tc, num_tasks, num_tasks_min
, grainsize, extras); }
4185 "(%lld), grain %llu, extras %llu\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmpc_taskloop: T#%d, go linear: tc %llu, #tasks %llu"
"(%lld), grain %llu, extras %llu\n", gtid, tc, num_tasks, num_tasks_min
, grainsize, extras); }
4186 gtid, tc, num_tasks, num_tasks_min, grainsize, extras))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmpc_taskloop: T#%d, go linear: tc %llu, #tasks %llu"
"(%lld), grain %llu, extras %llu\n", gtid, tc, num_tasks, num_tasks_min
, grainsize, extras); }
;
4187 __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
4188 grainsize, extras, tc,
4189#if OMPT_SUPPORT1
4190 OMPT_GET_RETURN_ADDRESS(0)__builtin_return_address(0),
4191#endif
4192 task_dup);
4193 }
4194
4195#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
4196 if (ompt_enabled.ompt_callback_work) {
4197 ompt_callbacks.ompt_callback(ompt_callback_work)ompt_callback_work_callback(
4198 ompt_work_taskloop, ompt_scope_end, &(team_info->parallel_data),
4199 &(task_info->task_data), tc, OMPT_GET_RETURN_ADDRESS(0)__builtin_return_address(0));
4200 }
4201#endif
4202
4203 if (nogroup == 0) {
4204#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
4205 OMPT_STORE_RETURN_ADDRESS(gtid)if (ompt_enabled.enabled && gtid >= 0 && __kmp_threads
[gtid] && !__kmp_threads[gtid]->th.ompt_thread_info
.return_address) __kmp_threads[gtid]->th.ompt_thread_info.
return_address = __builtin_return_address(0)
;
4206#endif
4207 __kmpc_end_taskgroup(loc, gtid);
4208 }
4209 KA_TRACE(20, ("__kmpc_taskloop(exit): T#%d\n", gtid))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmpc_taskloop(exit): T#%d\n"
, gtid); }
;
4210}
4211
4212#endif