File: | projects/openmp/runtime/src/kmp_threadprivate.cpp |
Warning: | line 205, column 23 Dereference of null pointer |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* | |||
2 | * kmp_threadprivate.cpp -- OpenMP threadprivate support library | |||
3 | */ | |||
4 | ||||
5 | //===----------------------------------------------------------------------===// | |||
6 | // | |||
7 | // The LLVM Compiler Infrastructure | |||
8 | // | |||
9 | // This file is dual licensed under the MIT and the University of Illinois Open | |||
10 | // Source Licenses. See LICENSE.txt for details. | |||
11 | // | |||
12 | //===----------------------------------------------------------------------===// | |||
13 | ||||
14 | #include "kmp.h" | |||
15 | #include "kmp_i18n.h" | |||
16 | #include "kmp_itt.h" | |||
17 | ||||
18 | #define USE_CHECKS_COMMON | |||
19 | ||||
20 | #define KMP_INLINE_SUBR1 1 | |||
21 | ||||
22 | void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr, | |||
23 | void *data_addr, size_t pc_size); | |||
24 | struct private_common *kmp_threadprivate_insert(int gtid, void *pc_addr, | |||
25 | void *data_addr, | |||
26 | size_t pc_size); | |||
27 | ||||
28 | struct shared_table __kmp_threadprivate_d_table; | |||
29 | ||||
30 | static | |||
31 | #ifdef KMP_INLINE_SUBR1 | |||
32 | __forceinline__inline | |||
33 | #endif | |||
34 | struct private_common * | |||
35 | __kmp_threadprivate_find_task_common(struct common_table *tbl, int gtid, | |||
36 | void *pc_addr) | |||
37 | ||||
38 | { | |||
39 | struct private_common *tn; | |||
40 | ||||
41 | #ifdef KMP_TASK_COMMON_DEBUG | |||
42 | KC_TRACE(10, ("__kmp_threadprivate_find_task_common: thread#%d, called with "if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_threadprivate_find_task_common: thread#%d, called with " "address %p\n", gtid, pc_addr); } | |||
43 | "address %p\n",if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_threadprivate_find_task_common: thread#%d, called with " "address %p\n", gtid, pc_addr); } | |||
44 | gtid, pc_addr))if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_threadprivate_find_task_common: thread#%d, called with " "address %p\n", gtid, pc_addr); }; | |||
45 | dump_list(); | |||
46 | #endif | |||
47 | ||||
48 | for (tn = tbl->data[KMP_HASH(pc_addr)((((kmp_uintptr_t)pc_addr) >> 3) & ((1 << 9) - 1))]; tn; tn = tn->next) { | |||
49 | if (tn->gbl_addr == pc_addr) { | |||
50 | #ifdef KMP_TASK_COMMON_DEBUG | |||
51 | KC_TRACE(10, ("__kmp_threadprivate_find_task_common: thread#%d, found "if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_threadprivate_find_task_common: thread#%d, found " "node %p on list\n", gtid, pc_addr); } | |||
52 | "node %p on list\n",if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_threadprivate_find_task_common: thread#%d, found " "node %p on list\n", gtid, pc_addr); } | |||
53 | gtid, pc_addr))if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_threadprivate_find_task_common: thread#%d, found " "node %p on list\n", gtid, pc_addr); }; | |||
54 | #endif | |||
55 | return tn; | |||
56 | } | |||
57 | } | |||
58 | return 0; | |||
59 | } | |||
60 | ||||
61 | static | |||
62 | #ifdef KMP_INLINE_SUBR1 | |||
63 | __forceinline__inline | |||
64 | #endif | |||
65 | struct shared_common * | |||
66 | __kmp_find_shared_task_common(struct shared_table *tbl, int gtid, | |||
67 | void *pc_addr) { | |||
68 | struct shared_common *tn; | |||
69 | ||||
70 | for (tn = tbl->data[KMP_HASH(pc_addr)((((kmp_uintptr_t)pc_addr) >> 3) & ((1 << 9) - 1))]; tn; tn = tn->next) { | |||
71 | if (tn->gbl_addr == pc_addr) { | |||
72 | #ifdef KMP_TASK_COMMON_DEBUG | |||
73 | KC_TRACE(if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_find_shared_task_common: thread#%d, found node %p on list\n" , gtid, pc_addr); } | |||
74 | 10,if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_find_shared_task_common: thread#%d, found node %p on list\n" , gtid, pc_addr); } | |||
75 | ("__kmp_find_shared_task_common: thread#%d, found node %p on list\n",if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_find_shared_task_common: thread#%d, found node %p on list\n" , gtid, pc_addr); } | |||
76 | gtid, pc_addr))if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_find_shared_task_common: thread#%d, found node %p on list\n" , gtid, pc_addr); }; | |||
77 | #endif | |||
78 | return tn; | |||
79 | } | |||
80 | } | |||
81 | return 0; | |||
82 | } | |||
83 | ||||
84 | // Create a template for the data initialized storage. Either the template is | |||
85 | // NULL indicating zero fill, or the template is a copy of the original data. | |||
86 | static struct private_data *__kmp_init_common_data(void *pc_addr, | |||
87 | size_t pc_size) { | |||
88 | struct private_data *d; | |||
89 | size_t i; | |||
90 | char *p; | |||
91 | ||||
92 | d = (struct private_data *)__kmp_allocate(sizeof(struct private_data))___kmp_allocate((sizeof(struct private_data)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 92); | |||
93 | /* | |||
94 | d->data = 0; // AC: commented out because __kmp_allocate zeroes the | |||
95 | memory | |||
96 | d->next = 0; | |||
97 | */ | |||
98 | d->size = pc_size; | |||
99 | d->more = 1; | |||
100 | ||||
101 | p = (char *)pc_addr; | |||
102 | ||||
103 | for (i = pc_size; i > 0; --i) { | |||
104 | if (*p++ != '\0') { | |||
105 | d->data = __kmp_allocate(pc_size)___kmp_allocate((pc_size), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 105); | |||
106 | KMP_MEMCPYmemcpy(d->data, pc_addr, pc_size); | |||
107 | break; | |||
108 | } | |||
109 | } | |||
110 | ||||
111 | return d; | |||
112 | } | |||
113 | ||||
114 | // Initialize the data area from the template. | |||
115 | static void __kmp_copy_common_data(void *pc_addr, struct private_data *d) { | |||
116 | char *addr = (char *)pc_addr; | |||
117 | int i, offset; | |||
118 | ||||
119 | for (offset = 0; d != 0; d = d->next) { | |||
120 | for (i = d->more; i > 0; --i) { | |||
121 | if (d->data == 0) | |||
122 | memset(&addr[offset], '\0', d->size); | |||
123 | else | |||
124 | KMP_MEMCPYmemcpy(&addr[offset], d->data, d->size); | |||
125 | offset += d->size; | |||
126 | } | |||
127 | } | |||
128 | } | |||
129 | ||||
130 | /* we are called from __kmp_serial_initialize() with __kmp_initz_lock held. */ | |||
131 | void __kmp_common_initialize(void) { | |||
132 | if (!TCR_4(__kmp_init_common)(__kmp_init_common)) { | |||
133 | int q; | |||
134 | #ifdef KMP_DEBUG1 | |||
135 | int gtid; | |||
136 | #endif | |||
137 | ||||
138 | __kmp_threadpriv_cache_list = NULL__null; | |||
139 | ||||
140 | #ifdef KMP_DEBUG1 | |||
141 | /* verify the uber masters were initialized */ | |||
142 | for (gtid = 0; gtid < __kmp_threads_capacity; gtid++) | |||
143 | if (__kmp_root[gtid]) { | |||
144 | KMP_DEBUG_ASSERT(__kmp_root[gtid]->r.r_uber_thread)if (!(__kmp_root[gtid]->r.r_uber_thread)) { __kmp_debug_assert ("__kmp_root[gtid]->r.r_uber_thread", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 144); }; | |||
145 | for (q = 0; q < KMP_HASH_TABLE_SIZE(1 << 9); ++q) | |||
146 | KMP_DEBUG_ASSERT(if (!(!__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common ->data[q])) { __kmp_debug_assert("!__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q]" , "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 147); } | |||
147 | !__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q])if (!(!__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common ->data[q])) { __kmp_debug_assert("!__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q]" , "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 147); }; | |||
148 | /* __kmp_root[ gitd ]-> r.r_uber_thread -> | |||
149 | * th.th_pri_common -> data[ q ] = 0;*/ | |||
150 | } | |||
151 | #endif /* KMP_DEBUG */ | |||
152 | ||||
153 | for (q = 0; q < KMP_HASH_TABLE_SIZE(1 << 9); ++q) | |||
154 | __kmp_threadprivate_d_table.data[q] = 0; | |||
155 | ||||
156 | TCW_4(__kmp_init_common, TRUE)(__kmp_init_common) = ((!0)); | |||
157 | } | |||
158 | } | |||
159 | ||||
160 | /* Call all destructors for threadprivate data belonging to all threads. | |||
161 | Currently unused! */ | |||
162 | void __kmp_common_destroy(void) { | |||
163 | if (TCR_4(__kmp_init_common)(__kmp_init_common)) { | |||
| ||||
164 | int q; | |||
165 | ||||
166 | TCW_4(__kmp_init_common, FALSE)(__kmp_init_common) = (0); | |||
167 | ||||
168 | for (q = 0; q < KMP_HASH_TABLE_SIZE(1 << 9); ++q) { | |||
169 | int gtid; | |||
170 | struct private_common *tn; | |||
171 | struct shared_common *d_tn; | |||
172 | ||||
173 | /* C++ destructors need to be called once per thread before exiting. | |||
174 | Don't call destructors for master thread though unless we used copy | |||
175 | constructor */ | |||
176 | ||||
177 | for (d_tn = __kmp_threadprivate_d_table.data[q]; d_tn; | |||
178 | d_tn = d_tn->next) { | |||
179 | if (d_tn->is_vec) { | |||
180 | if (d_tn->dt.dtorv != 0) { | |||
181 | for (gtid = 0; gtid < __kmp_all_nth; ++gtid) { | |||
182 | if (__kmp_threads[gtid]) { | |||
183 | if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid)((gtid) == 0)) | |||
184 | : (!KMP_UBER_GTID(gtid))) { | |||
185 | tn = __kmp_threadprivate_find_task_common( | |||
186 | __kmp_threads[gtid]->th.th_pri_common, gtid, | |||
187 | d_tn->gbl_addr); | |||
188 | if (tn) { | |||
189 | (*d_tn->dt.dtorv)(tn->par_addr, d_tn->vec_len); | |||
190 | } | |||
191 | } | |||
192 | } | |||
193 | } | |||
194 | if (d_tn->obj_init != 0) { | |||
195 | (*d_tn->dt.dtorv)(d_tn->obj_init, d_tn->vec_len); | |||
196 | } | |||
197 | } | |||
198 | } else { | |||
199 | if (d_tn->dt.dtor != 0) { | |||
200 | for (gtid = 0; gtid < __kmp_all_nth; ++gtid) { | |||
201 | if (__kmp_threads[gtid]) { | |||
202 | if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid)((gtid) == 0)) | |||
203 | : (!KMP_UBER_GTID(gtid))) { | |||
204 | tn = __kmp_threadprivate_find_task_common( | |||
205 | __kmp_threads[gtid]->th.th_pri_common, gtid, | |||
| ||||
206 | d_tn->gbl_addr); | |||
207 | if (tn) { | |||
208 | (*d_tn->dt.dtor)(tn->par_addr); | |||
209 | } | |||
210 | } | |||
211 | } | |||
212 | } | |||
213 | if (d_tn->obj_init != 0) { | |||
214 | (*d_tn->dt.dtor)(d_tn->obj_init); | |||
215 | } | |||
216 | } | |||
217 | } | |||
218 | } | |||
219 | __kmp_threadprivate_d_table.data[q] = 0; | |||
220 | } | |||
221 | } | |||
222 | } | |||
223 | ||||
224 | /* Call all destructors for threadprivate data belonging to this thread */ | |||
225 | void __kmp_common_destroy_gtid(int gtid) { | |||
226 | struct private_common *tn; | |||
227 | struct shared_common *d_tn; | |||
228 | ||||
229 | if (!TCR_4(__kmp_init_gtid)(__kmp_init_gtid)) { | |||
230 | // This is possible when one of multiple roots initiates early library | |||
231 | // termination in a sequential region while other teams are active, and its | |||
232 | // child threads are about to end. | |||
233 | return; | |||
234 | } | |||
235 | ||||
236 | KC_TRACE(10, ("__kmp_common_destroy_gtid: T#%d called\n", gtid))if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_common_destroy_gtid: T#%d called\n" , gtid); }; | |||
237 | if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid)((gtid) == 0)) : (!KMP_UBER_GTID(gtid))) { | |||
238 | ||||
239 | if (TCR_4(__kmp_init_common)(__kmp_init_common)) { | |||
240 | ||||
241 | /* Cannot do this here since not all threads have destroyed their data */ | |||
242 | /* TCW_4(__kmp_init_common, FALSE); */ | |||
243 | ||||
244 | for (tn = __kmp_threads[gtid]->th.th_pri_head; tn; tn = tn->link) { | |||
245 | ||||
246 | d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, gtid, | |||
247 | tn->gbl_addr); | |||
248 | ||||
249 | KMP_DEBUG_ASSERT(d_tn)if (!(d_tn)) { __kmp_debug_assert("d_tn", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 249); }; | |||
250 | ||||
251 | if (d_tn->is_vec) { | |||
252 | if (d_tn->dt.dtorv != 0) { | |||
253 | (void)(*d_tn->dt.dtorv)(tn->par_addr, d_tn->vec_len); | |||
254 | } | |||
255 | if (d_tn->obj_init != 0) { | |||
256 | (void)(*d_tn->dt.dtorv)(d_tn->obj_init, d_tn->vec_len); | |||
257 | } | |||
258 | } else { | |||
259 | if (d_tn->dt.dtor != 0) { | |||
260 | (void)(*d_tn->dt.dtor)(tn->par_addr); | |||
261 | } | |||
262 | if (d_tn->obj_init != 0) { | |||
263 | (void)(*d_tn->dt.dtor)(d_tn->obj_init); | |||
264 | } | |||
265 | } | |||
266 | } | |||
267 | KC_TRACE(30, ("__kmp_common_destroy_gtid: T#%d threadprivate destructors "if (kmp_c_debug >= 30) { __kmp_debug_printf ("__kmp_common_destroy_gtid: T#%d threadprivate destructors " "complete\n", gtid); } | |||
268 | "complete\n",if (kmp_c_debug >= 30) { __kmp_debug_printf ("__kmp_common_destroy_gtid: T#%d threadprivate destructors " "complete\n", gtid); } | |||
269 | gtid))if (kmp_c_debug >= 30) { __kmp_debug_printf ("__kmp_common_destroy_gtid: T#%d threadprivate destructors " "complete\n", gtid); }; | |||
270 | } | |||
271 | } | |||
272 | } | |||
273 | ||||
274 | #ifdef KMP_TASK_COMMON_DEBUG | |||
275 | static void dump_list(void) { | |||
276 | int p, q; | |||
277 | ||||
278 | for (p = 0; p < __kmp_all_nth; ++p) { | |||
279 | if (!__kmp_threads[p]) | |||
280 | continue; | |||
281 | for (q = 0; q < KMP_HASH_TABLE_SIZE(1 << 9); ++q) { | |||
282 | if (__kmp_threads[p]->th.th_pri_common->data[q]) { | |||
283 | struct private_common *tn; | |||
284 | ||||
285 | KC_TRACE(10, ("\tdump_list: gtid:%d addresses\n", p))if (kmp_c_debug >= 10) { __kmp_debug_printf ("\tdump_list: gtid:%d addresses\n" , p); }; | |||
286 | ||||
287 | for (tn = __kmp_threads[p]->th.th_pri_common->data[q]; tn; | |||
288 | tn = tn->next) { | |||
289 | KC_TRACE(10,if (kmp_c_debug >= 10) { __kmp_debug_printf ("\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n" , tn->gbl_addr, tn->par_addr); } | |||
290 | ("\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n",if (kmp_c_debug >= 10) { __kmp_debug_printf ("\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n" , tn->gbl_addr, tn->par_addr); } | |||
291 | tn->gbl_addr, tn->par_addr))if (kmp_c_debug >= 10) { __kmp_debug_printf ("\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n" , tn->gbl_addr, tn->par_addr); }; | |||
292 | } | |||
293 | } | |||
294 | } | |||
295 | } | |||
296 | } | |||
297 | #endif /* KMP_TASK_COMMON_DEBUG */ | |||
298 | ||||
299 | // NOTE: this routine is to be called only from the serial part of the program. | |||
300 | void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr, | |||
301 | void *data_addr, size_t pc_size) { | |||
302 | struct shared_common **lnk_tn, *d_tn; | |||
303 | KMP_DEBUG_ASSERT(__kmp_threads[gtid] &&if (!(__kmp_threads[gtid] && __kmp_threads[gtid]-> th.th_root->r.r_active == 0)) { __kmp_debug_assert("__kmp_threads[gtid] && __kmp_threads[gtid]->th.th_root->r.r_active == 0" , "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 304); } | |||
304 | __kmp_threads[gtid]->th.th_root->r.r_active == 0)if (!(__kmp_threads[gtid] && __kmp_threads[gtid]-> th.th_root->r.r_active == 0)) { __kmp_debug_assert("__kmp_threads[gtid] && __kmp_threads[gtid]->th.th_root->r.r_active == 0" , "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 304); }; | |||
305 | ||||
306 | d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, gtid, | |||
307 | pc_addr); | |||
308 | ||||
309 | if (d_tn == 0) { | |||
310 | d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common))___kmp_allocate((sizeof(struct shared_common)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 310); | |||
311 | ||||
312 | d_tn->gbl_addr = pc_addr; | |||
313 | d_tn->pod_init = __kmp_init_common_data(data_addr, pc_size); | |||
314 | /* | |||
315 | d_tn->obj_init = 0; // AC: commented out because __kmp_allocate | |||
316 | zeroes the memory | |||
317 | d_tn->ct.ctor = 0; | |||
318 | d_tn->cct.cctor = 0;; | |||
319 | d_tn->dt.dtor = 0; | |||
320 | d_tn->is_vec = FALSE; | |||
321 | d_tn->vec_len = 0L; | |||
322 | */ | |||
323 | d_tn->cmn_size = pc_size; | |||
324 | ||||
325 | __kmp_acquire_lock(&__kmp_global_lock, gtid); | |||
326 | ||||
327 | lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(pc_addr)((((kmp_uintptr_t)pc_addr) >> 3) & ((1 << 9) - 1))]); | |||
328 | ||||
329 | d_tn->next = *lnk_tn; | |||
330 | *lnk_tn = d_tn; | |||
331 | ||||
332 | __kmp_release_lock(&__kmp_global_lock, gtid); | |||
333 | } | |||
334 | } | |||
335 | ||||
336 | struct private_common *kmp_threadprivate_insert(int gtid, void *pc_addr, | |||
337 | void *data_addr, | |||
338 | size_t pc_size) { | |||
339 | struct private_common *tn, **tt; | |||
340 | struct shared_common *d_tn; | |||
341 | ||||
342 | /* +++++++++ START OF CRITICAL SECTION +++++++++ */ | |||
343 | __kmp_acquire_lock(&__kmp_global_lock, gtid); | |||
344 | ||||
345 | tn = (struct private_common *)__kmp_allocate(sizeof(struct private_common))___kmp_allocate((sizeof(struct private_common)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 345); | |||
346 | ||||
347 | tn->gbl_addr = pc_addr; | |||
348 | ||||
349 | d_tn = __kmp_find_shared_task_common( | |||
350 | &__kmp_threadprivate_d_table, gtid, | |||
351 | pc_addr); /* Only the MASTER data table exists. */ | |||
352 | ||||
353 | if (d_tn != 0) { | |||
354 | /* This threadprivate variable has already been seen. */ | |||
355 | ||||
356 | if (d_tn->pod_init == 0 && d_tn->obj_init == 0) { | |||
357 | d_tn->cmn_size = pc_size; | |||
358 | ||||
359 | if (d_tn->is_vec) { | |||
360 | if (d_tn->ct.ctorv != 0) { | |||
361 | /* Construct from scratch so no prototype exists */ | |||
362 | d_tn->obj_init = 0; | |||
363 | } else if (d_tn->cct.cctorv != 0) { | |||
364 | /* Now data initialize the prototype since it was previously | |||
365 | * registered */ | |||
366 | d_tn->obj_init = (void *)__kmp_allocate(d_tn->cmn_size)___kmp_allocate((d_tn->cmn_size), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 366); | |||
367 | (void)(*d_tn->cct.cctorv)(d_tn->obj_init, pc_addr, d_tn->vec_len); | |||
368 | } else { | |||
369 | d_tn->pod_init = __kmp_init_common_data(data_addr, d_tn->cmn_size); | |||
370 | } | |||
371 | } else { | |||
372 | if (d_tn->ct.ctor != 0) { | |||
373 | /* Construct from scratch so no prototype exists */ | |||
374 | d_tn->obj_init = 0; | |||
375 | } else if (d_tn->cct.cctor != 0) { | |||
376 | /* Now data initialize the prototype since it was previously | |||
377 | registered */ | |||
378 | d_tn->obj_init = (void *)__kmp_allocate(d_tn->cmn_size)___kmp_allocate((d_tn->cmn_size), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 378); | |||
379 | (void)(*d_tn->cct.cctor)(d_tn->obj_init, pc_addr); | |||
380 | } else { | |||
381 | d_tn->pod_init = __kmp_init_common_data(data_addr, d_tn->cmn_size); | |||
382 | } | |||
383 | } | |||
384 | } | |||
385 | } else { | |||
386 | struct shared_common **lnk_tn; | |||
387 | ||||
388 | d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common))___kmp_allocate((sizeof(struct shared_common)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 388); | |||
389 | d_tn->gbl_addr = pc_addr; | |||
390 | d_tn->cmn_size = pc_size; | |||
391 | d_tn->pod_init = __kmp_init_common_data(data_addr, pc_size); | |||
392 | /* | |||
393 | d_tn->obj_init = 0; // AC: commented out because __kmp_allocate | |||
394 | zeroes the memory | |||
395 | d_tn->ct.ctor = 0; | |||
396 | d_tn->cct.cctor = 0; | |||
397 | d_tn->dt.dtor = 0; | |||
398 | d_tn->is_vec = FALSE; | |||
399 | d_tn->vec_len = 0L; | |||
400 | */ | |||
401 | lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(pc_addr)((((kmp_uintptr_t)pc_addr) >> 3) & ((1 << 9) - 1))]); | |||
402 | ||||
403 | d_tn->next = *lnk_tn; | |||
404 | *lnk_tn = d_tn; | |||
405 | } | |||
406 | ||||
407 | tn->cmn_size = d_tn->cmn_size; | |||
408 | ||||
409 | if ((__kmp_foreign_tp) ? (KMP_INITIAL_GTID(gtid)((gtid) == 0)) : (KMP_UBER_GTID(gtid))) { | |||
410 | tn->par_addr = (void *)pc_addr; | |||
411 | } else { | |||
412 | tn->par_addr = (void *)__kmp_allocate(tn->cmn_size)___kmp_allocate((tn->cmn_size), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 412); | |||
413 | } | |||
414 | ||||
415 | __kmp_release_lock(&__kmp_global_lock, gtid); | |||
416 | /* +++++++++ END OF CRITICAL SECTION +++++++++ */ | |||
417 | ||||
418 | #ifdef USE_CHECKS_COMMON | |||
419 | if (pc_size > d_tn->cmn_size) { | |||
420 | KC_TRACE(if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_threadprivate_insert: THREADPRIVATE: %p (%" "lu" " ,%" "lu" ")\n", pc_addr, pc_size, d_tn->cmn_size); } | |||
421 | 10, ("__kmp_threadprivate_insert: THREADPRIVATE: %p (%" KMP_UINTPTR_SPECif (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_threadprivate_insert: THREADPRIVATE: %p (%" "lu" " ,%" "lu" ")\n", pc_addr, pc_size, d_tn->cmn_size); } | |||
422 | " ,%" KMP_UINTPTR_SPEC ")\n",if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_threadprivate_insert: THREADPRIVATE: %p (%" "lu" " ,%" "lu" ")\n", pc_addr, pc_size, d_tn->cmn_size); } | |||
423 | pc_addr, pc_size, d_tn->cmn_size))if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_threadprivate_insert: THREADPRIVATE: %p (%" "lu" " ,%" "lu" ")\n", pc_addr, pc_size, d_tn->cmn_size); }; | |||
424 | KMP_FATAL(TPCommonBlocksInconsist)__kmp_fatal(__kmp_msg_format(kmp_i18n_msg_TPCommonBlocksInconsist ), __kmp_msg_null); | |||
425 | } | |||
426 | #endif /* USE_CHECKS_COMMON */ | |||
427 | ||||
428 | tt = &(__kmp_threads[gtid]->th.th_pri_common->data[KMP_HASH(pc_addr)((((kmp_uintptr_t)pc_addr) >> 3) & ((1 << 9) - 1))]); | |||
429 | ||||
430 | #ifdef KMP_TASK_COMMON_DEBUG | |||
431 | if (*tt != 0) { | |||
432 | KC_TRACE(if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n" , gtid, pc_addr); } | |||
433 | 10,if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n" , gtid, pc_addr); } | |||
434 | ("__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n",if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n" , gtid, pc_addr); } | |||
435 | gtid, pc_addr))if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n" , gtid, pc_addr); }; | |||
436 | } | |||
437 | #endif | |||
438 | tn->next = *tt; | |||
439 | *tt = tn; | |||
440 | ||||
441 | #ifdef KMP_TASK_COMMON_DEBUG | |||
442 | KC_TRACE(10,if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n" , gtid, pc_addr); } | |||
443 | ("__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n",if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n" , gtid, pc_addr); } | |||
444 | gtid, pc_addr))if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n" , gtid, pc_addr); }; | |||
445 | dump_list(); | |||
446 | #endif | |||
447 | ||||
448 | /* Link the node into a simple list */ | |||
449 | ||||
450 | tn->link = __kmp_threads[gtid]->th.th_pri_head; | |||
451 | __kmp_threads[gtid]->th.th_pri_head = tn; | |||
452 | ||||
453 | if ((__kmp_foreign_tp) ? (KMP_INITIAL_GTID(gtid)((gtid) == 0)) : (KMP_UBER_GTID(gtid))) | |||
454 | return tn; | |||
455 | ||||
456 | /* if C++ object with copy constructor, use it; | |||
457 | * else if C++ object with constructor, use it for the non-master copies only; | |||
458 | * else use pod_init and memcpy | |||
459 | * | |||
460 | * C++ constructors need to be called once for each non-master thread on | |||
461 | * allocate | |||
462 | * C++ copy constructors need to be called once for each thread on allocate */ | |||
463 | ||||
464 | /* C++ object with constructors/destructors; don't call constructors for | |||
465 | master thread though */ | |||
466 | if (d_tn->is_vec) { | |||
467 | if (d_tn->ct.ctorv != 0) { | |||
468 | (void)(*d_tn->ct.ctorv)(tn->par_addr, d_tn->vec_len); | |||
469 | } else if (d_tn->cct.cctorv != 0) { | |||
470 | (void)(*d_tn->cct.cctorv)(tn->par_addr, d_tn->obj_init, d_tn->vec_len); | |||
471 | } else if (tn->par_addr != tn->gbl_addr) { | |||
472 | __kmp_copy_common_data(tn->par_addr, d_tn->pod_init); | |||
473 | } | |||
474 | } else { | |||
475 | if (d_tn->ct.ctor != 0) { | |||
476 | (void)(*d_tn->ct.ctor)(tn->par_addr); | |||
477 | } else if (d_tn->cct.cctor != 0) { | |||
478 | (void)(*d_tn->cct.cctor)(tn->par_addr, d_tn->obj_init); | |||
479 | } else if (tn->par_addr != tn->gbl_addr) { | |||
480 | __kmp_copy_common_data(tn->par_addr, d_tn->pod_init); | |||
481 | } | |||
482 | } | |||
483 | /* !BUILD_OPENMP_C | |||
484 | if (tn->par_addr != tn->gbl_addr) | |||
485 | __kmp_copy_common_data( tn->par_addr, d_tn->pod_init ); */ | |||
486 | ||||
487 | return tn; | |||
488 | } | |||
489 | ||||
490 | /* ------------------------------------------------------------------------ */ | |||
491 | /* We are currently parallel, and we know the thread id. */ | |||
492 | /* ------------------------------------------------------------------------ */ | |||
493 | ||||
494 | /*! | |||
495 | @ingroup THREADPRIVATE | |||
496 | ||||
497 | @param loc source location information | |||
498 | @param data pointer to data being privatized | |||
499 | @param ctor pointer to constructor function for data | |||
500 | @param cctor pointer to copy constructor function for data | |||
501 | @param dtor pointer to destructor function for data | |||
502 | ||||
503 | Register constructors and destructors for thread private data. | |||
504 | This function is called when executing in parallel, when we know the thread id. | |||
505 | */ | |||
506 | void __kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor, | |||
507 | kmpc_cctor cctor, kmpc_dtor dtor) { | |||
508 | struct shared_common *d_tn, **lnk_tn; | |||
509 | ||||
510 | KC_TRACE(10, ("__kmpc_threadprivate_register: called\n"))if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmpc_threadprivate_register: called\n" ); }; | |||
511 | ||||
512 | #ifdef USE_CHECKS_COMMON | |||
513 | /* copy constructor must be zero for current code gen (Nov 2002 - jph) */ | |||
514 | KMP_ASSERT(cctor == 0)if (!(cctor == 0)) { __kmp_debug_assert("cctor == 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 514); }; | |||
515 | #endif /* USE_CHECKS_COMMON */ | |||
516 | ||||
517 | /* Only the global data table exists. */ | |||
518 | d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, -1, data); | |||
519 | ||||
520 | if (d_tn == 0) { | |||
521 | d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common))___kmp_allocate((sizeof(struct shared_common)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 521); | |||
522 | d_tn->gbl_addr = data; | |||
523 | ||||
524 | d_tn->ct.ctor = ctor; | |||
525 | d_tn->cct.cctor = cctor; | |||
526 | d_tn->dt.dtor = dtor; | |||
527 | /* | |||
528 | d_tn->is_vec = FALSE; // AC: commented out because __kmp_allocate | |||
529 | zeroes the memory | |||
530 | d_tn->vec_len = 0L; | |||
531 | d_tn->obj_init = 0; | |||
532 | d_tn->pod_init = 0; | |||
533 | */ | |||
534 | lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(data)((((kmp_uintptr_t)data) >> 3) & ((1 << 9) - 1 ))]); | |||
535 | ||||
536 | d_tn->next = *lnk_tn; | |||
537 | *lnk_tn = d_tn; | |||
538 | } | |||
539 | } | |||
540 | ||||
541 | void *__kmpc_threadprivate(ident_t *loc, kmp_int32 global_tid, void *data, | |||
542 | size_t size) { | |||
543 | void *ret; | |||
544 | struct private_common *tn; | |||
545 | ||||
546 | KC_TRACE(10, ("__kmpc_threadprivate: T#%d called\n", global_tid))if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmpc_threadprivate: T#%d called\n" , global_tid); }; | |||
547 | ||||
548 | #ifdef USE_CHECKS_COMMON | |||
549 | if (!__kmp_init_serial) | |||
550 | KMP_FATAL(RTLNotInitialized)__kmp_fatal(__kmp_msg_format(kmp_i18n_msg_RTLNotInitialized), __kmp_msg_null); | |||
551 | #endif /* USE_CHECKS_COMMON */ | |||
552 | ||||
553 | if (!__kmp_threads[global_tid]->th.th_root->r.r_active && !__kmp_foreign_tp) { | |||
554 | /* The parallel address will NEVER overlap with the data_address */ | |||
555 | /* dkp: 3rd arg to kmp_threadprivate_insert_private_data() is the | |||
556 | * data_address; use data_address = data */ | |||
557 | ||||
558 | KC_TRACE(20, ("__kmpc_threadprivate: T#%d inserting private data\n",if (kmp_c_debug >= 20) { __kmp_debug_printf ("__kmpc_threadprivate: T#%d inserting private data\n" , global_tid); } | |||
559 | global_tid))if (kmp_c_debug >= 20) { __kmp_debug_printf ("__kmpc_threadprivate: T#%d inserting private data\n" , global_tid); }; | |||
560 | kmp_threadprivate_insert_private_data(global_tid, data, data, size); | |||
561 | ||||
562 | ret = data; | |||
563 | } else { | |||
564 | KC_TRACE(if (kmp_c_debug >= 50) { __kmp_debug_printf ("__kmpc_threadprivate: T#%d try to find private data at address %p\n" , global_tid, data); } | |||
565 | 50,if (kmp_c_debug >= 50) { __kmp_debug_printf ("__kmpc_threadprivate: T#%d try to find private data at address %p\n" , global_tid, data); } | |||
566 | ("__kmpc_threadprivate: T#%d try to find private data at address %p\n",if (kmp_c_debug >= 50) { __kmp_debug_printf ("__kmpc_threadprivate: T#%d try to find private data at address %p\n" , global_tid, data); } | |||
567 | global_tid, data))if (kmp_c_debug >= 50) { __kmp_debug_printf ("__kmpc_threadprivate: T#%d try to find private data at address %p\n" , global_tid, data); }; | |||
568 | tn = __kmp_threadprivate_find_task_common( | |||
569 | __kmp_threads[global_tid]->th.th_pri_common, global_tid, data); | |||
570 | ||||
571 | if (tn) { | |||
572 | KC_TRACE(20, ("__kmpc_threadprivate: T#%d found data\n", global_tid))if (kmp_c_debug >= 20) { __kmp_debug_printf ("__kmpc_threadprivate: T#%d found data\n" , global_tid); }; | |||
573 | #ifdef USE_CHECKS_COMMON | |||
574 | if ((size_t)size > tn->cmn_size) { | |||
575 | KC_TRACE(10, ("THREADPRIVATE: %p (%" KMP_UINTPTR_SPECif (kmp_c_debug >= 10) { __kmp_debug_printf ("THREADPRIVATE: %p (%" "lu" " ,%" "lu" ")\n", data, size, tn->cmn_size); } | |||
576 | " ,%" KMP_UINTPTR_SPEC ")\n",if (kmp_c_debug >= 10) { __kmp_debug_printf ("THREADPRIVATE: %p (%" "lu" " ,%" "lu" ")\n", data, size, tn->cmn_size); } | |||
577 | data, size, tn->cmn_size))if (kmp_c_debug >= 10) { __kmp_debug_printf ("THREADPRIVATE: %p (%" "lu" " ,%" "lu" ")\n", data, size, tn->cmn_size); }; | |||
578 | KMP_FATAL(TPCommonBlocksInconsist)__kmp_fatal(__kmp_msg_format(kmp_i18n_msg_TPCommonBlocksInconsist ), __kmp_msg_null); | |||
579 | } | |||
580 | #endif /* USE_CHECKS_COMMON */ | |||
581 | } else { | |||
582 | /* The parallel address will NEVER overlap with the data_address */ | |||
583 | /* dkp: 3rd arg to kmp_threadprivate_insert() is the data_address; use | |||
584 | * data_address = data */ | |||
585 | KC_TRACE(20, ("__kmpc_threadprivate: T#%d inserting data\n", global_tid))if (kmp_c_debug >= 20) { __kmp_debug_printf ("__kmpc_threadprivate: T#%d inserting data\n" , global_tid); }; | |||
586 | tn = kmp_threadprivate_insert(global_tid, data, data, size); | |||
587 | } | |||
588 | ||||
589 | ret = tn->par_addr; | |||
590 | } | |||
591 | KC_TRACE(10, ("__kmpc_threadprivate: T#%d exiting; return value = %p\n",if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmpc_threadprivate: T#%d exiting; return value = %p\n" , global_tid, ret); } | |||
592 | global_tid, ret))if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmpc_threadprivate: T#%d exiting; return value = %p\n" , global_tid, ret); }; | |||
593 | ||||
594 | return ret; | |||
595 | } | |||
596 | ||||
597 | static kmp_cached_addr_t *__kmp_find_cache(void *data) { | |||
598 | kmp_cached_addr_t *ptr = __kmp_threadpriv_cache_list; | |||
599 | while (ptr && ptr->data != data) | |||
600 | ptr = ptr->next; | |||
601 | return ptr; | |||
602 | } | |||
603 | ||||
604 | /*! | |||
605 | @ingroup THREADPRIVATE | |||
606 | @param loc source location information | |||
607 | @param global_tid global thread number | |||
608 | @param data pointer to data to privatize | |||
609 | @param size size of data to privatize | |||
610 | @param cache pointer to cache | |||
611 | @return pointer to private storage | |||
612 | ||||
613 | Allocate private storage for threadprivate data. | |||
614 | */ | |||
615 | void * | |||
616 | __kmpc_threadprivate_cached(ident_t *loc, | |||
617 | kmp_int32 global_tid, // gtid. | |||
618 | void *data, // Pointer to original global variable. | |||
619 | size_t size, // Size of original global variable. | |||
620 | void ***cache) { | |||
621 | KC_TRACE(10, ("__kmpc_threadprivate_cached: T#%d called with cache: %p, "if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmpc_threadprivate_cached: T#%d called with cache: %p, " "address: %p, size: %" "llu" "\n", global_tid, *cache, data, size); } | |||
622 | "address: %p, size: %" KMP_SIZE_T_SPEC "\n",if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmpc_threadprivate_cached: T#%d called with cache: %p, " "address: %p, size: %" "llu" "\n", global_tid, *cache, data, size); } | |||
623 | global_tid, *cache, data, size))if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmpc_threadprivate_cached: T#%d called with cache: %p, " "address: %p, size: %" "llu" "\n", global_tid, *cache, data, size); }; | |||
624 | ||||
625 | if (TCR_PTR(*cache)((void *)(*cache)) == 0) { | |||
626 | __kmp_acquire_lock(&__kmp_global_lock, global_tid); | |||
627 | ||||
628 | if (TCR_PTR(*cache)((void *)(*cache)) == 0) { | |||
629 | __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock); | |||
630 | // Compiler often passes in NULL cache, even if it's already been created | |||
631 | void **my_cache; | |||
632 | kmp_cached_addr_t *tp_cache_addr; | |||
633 | // Look for an existing cache | |||
634 | tp_cache_addr = __kmp_find_cache(data); | |||
635 | if (!tp_cache_addr) { // Cache was never created; do it now | |||
636 | __kmp_tp_cached = 1; | |||
637 | KMP_ITT_IGNORE(my_cache = (void **)__kmp_allocate(do { __itt_state_t __itt_state_; if (__kmp_itt_state_get_ptr__3_0 ) { __itt_state_ = (!__kmp_itt_state_get_ptr__3_0) ? 0 : __kmp_itt_state_get_ptr__3_0 (); (!__kmp_itt_obj_mode_set_ptr__3_0) ? 0 : __kmp_itt_obj_mode_set_ptr__3_0 (__itt_obj_prop_ignore, __itt_obj_state_set); } { my_cache = ( void **)___kmp_allocate((sizeof(void *) * __kmp_tp_capacity + sizeof(kmp_cached_addr_t)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 639); } if (__kmp_itt_state_get_ptr__3_0) { (!__kmp_itt_state_set_ptr__3_0 ) ? 0 : __kmp_itt_state_set_ptr__3_0(__itt_state_); } } while (0) | |||
638 | sizeof(void *) * __kmp_tp_capacity +do { __itt_state_t __itt_state_; if (__kmp_itt_state_get_ptr__3_0 ) { __itt_state_ = (!__kmp_itt_state_get_ptr__3_0) ? 0 : __kmp_itt_state_get_ptr__3_0 (); (!__kmp_itt_obj_mode_set_ptr__3_0) ? 0 : __kmp_itt_obj_mode_set_ptr__3_0 (__itt_obj_prop_ignore, __itt_obj_state_set); } { my_cache = ( void **)___kmp_allocate((sizeof(void *) * __kmp_tp_capacity + sizeof(kmp_cached_addr_t)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 639); } if (__kmp_itt_state_get_ptr__3_0) { (!__kmp_itt_state_set_ptr__3_0 ) ? 0 : __kmp_itt_state_set_ptr__3_0(__itt_state_); } } while (0) | |||
639 | sizeof(kmp_cached_addr_t));)do { __itt_state_t __itt_state_; if (__kmp_itt_state_get_ptr__3_0 ) { __itt_state_ = (!__kmp_itt_state_get_ptr__3_0) ? 0 : __kmp_itt_state_get_ptr__3_0 (); (!__kmp_itt_obj_mode_set_ptr__3_0) ? 0 : __kmp_itt_obj_mode_set_ptr__3_0 (__itt_obj_prop_ignore, __itt_obj_state_set); } { my_cache = ( void **)___kmp_allocate((sizeof(void *) * __kmp_tp_capacity + sizeof(kmp_cached_addr_t)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 639); } if (__kmp_itt_state_get_ptr__3_0) { (!__kmp_itt_state_set_ptr__3_0 ) ? 0 : __kmp_itt_state_set_ptr__3_0(__itt_state_); } } while (0); | |||
640 | // No need to zero the allocated memory; __kmp_allocate does that. | |||
641 | KC_TRACE(50, ("__kmpc_threadprivate_cached: T#%d allocated cache at "if (kmp_c_debug >= 50) { __kmp_debug_printf ("__kmpc_threadprivate_cached: T#%d allocated cache at " "address %p\n", global_tid, my_cache); } | |||
642 | "address %p\n",if (kmp_c_debug >= 50) { __kmp_debug_printf ("__kmpc_threadprivate_cached: T#%d allocated cache at " "address %p\n", global_tid, my_cache); } | |||
643 | global_tid, my_cache))if (kmp_c_debug >= 50) { __kmp_debug_printf ("__kmpc_threadprivate_cached: T#%d allocated cache at " "address %p\n", global_tid, my_cache); }; | |||
644 | /* TODO: free all this memory in __kmp_common_destroy using | |||
645 | * __kmp_threadpriv_cache_list */ | |||
646 | /* Add address of mycache to linked list for cleanup later */ | |||
647 | tp_cache_addr = (kmp_cached_addr_t *)&my_cache[__kmp_tp_capacity]; | |||
648 | tp_cache_addr->addr = my_cache; | |||
649 | tp_cache_addr->data = data; | |||
650 | tp_cache_addr->compiler_cache = cache; | |||
651 | tp_cache_addr->next = __kmp_threadpriv_cache_list; | |||
652 | __kmp_threadpriv_cache_list = tp_cache_addr; | |||
653 | } else { // A cache was already created; use it | |||
654 | my_cache = tp_cache_addr->addr; | |||
655 | tp_cache_addr->compiler_cache = cache; | |||
656 | } | |||
657 | KMP_MB(); | |||
658 | ||||
659 | TCW_PTR(*cache, my_cache)((*cache)) = ((my_cache)); | |||
660 | __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock); | |||
661 | ||||
662 | KMP_MB(); | |||
663 | } | |||
664 | __kmp_release_lock(&__kmp_global_lock, global_tid); | |||
665 | } | |||
666 | ||||
667 | void *ret; | |||
668 | if ((ret = TCR_PTR((*cache)[global_tid])((void *)((*cache)[global_tid]))) == 0) { | |||
669 | ret = __kmpc_threadprivate(loc, global_tid, data, (size_t)size); | |||
670 | ||||
671 | TCW_PTR((*cache)[global_tid], ret)(((*cache)[global_tid])) = ((ret)); | |||
672 | } | |||
673 | KC_TRACE(10,if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n" , global_tid, ret); } | |||
674 | ("__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n",if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n" , global_tid, ret); } | |||
675 | global_tid, ret))if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n" , global_tid, ret); }; | |||
676 | return ret; | |||
677 | } | |||
678 | ||||
679 | // This function should only be called when both __kmp_tp_cached_lock and | |||
680 | // kmp_forkjoin_lock are held. | |||
681 | void __kmp_threadprivate_resize_cache(int newCapacity) { | |||
682 | KC_TRACE(10, ("__kmp_threadprivate_resize_cache: called with size: %d\n",if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_threadprivate_resize_cache: called with size: %d\n" , newCapacity); } | |||
683 | newCapacity))if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmp_threadprivate_resize_cache: called with size: %d\n" , newCapacity); }; | |||
684 | ||||
685 | kmp_cached_addr_t *ptr = __kmp_threadpriv_cache_list; | |||
686 | ||||
687 | while (ptr) { | |||
688 | if (ptr->data) { // this location has an active cache; resize it | |||
689 | void **my_cache; | |||
690 | KMP_ITT_IGNORE(my_cache =do { __itt_state_t __itt_state_; if (__kmp_itt_state_get_ptr__3_0 ) { __itt_state_ = (!__kmp_itt_state_get_ptr__3_0) ? 0 : __kmp_itt_state_get_ptr__3_0 (); (!__kmp_itt_obj_mode_set_ptr__3_0) ? 0 : __kmp_itt_obj_mode_set_ptr__3_0 (__itt_obj_prop_ignore, __itt_obj_state_set); } { my_cache = ( void **)___kmp_allocate((sizeof(void *) * newCapacity + sizeof (kmp_cached_addr_t)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 692); } if (__kmp_itt_state_get_ptr__3_0) { (!__kmp_itt_state_set_ptr__3_0 ) ? 0 : __kmp_itt_state_set_ptr__3_0(__itt_state_); } } while (0) | |||
691 | (void **)__kmp_allocate(sizeof(void *) * newCapacity +do { __itt_state_t __itt_state_; if (__kmp_itt_state_get_ptr__3_0 ) { __itt_state_ = (!__kmp_itt_state_get_ptr__3_0) ? 0 : __kmp_itt_state_get_ptr__3_0 (); (!__kmp_itt_obj_mode_set_ptr__3_0) ? 0 : __kmp_itt_obj_mode_set_ptr__3_0 (__itt_obj_prop_ignore, __itt_obj_state_set); } { my_cache = ( void **)___kmp_allocate((sizeof(void *) * newCapacity + sizeof (kmp_cached_addr_t)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 692); } if (__kmp_itt_state_get_ptr__3_0) { (!__kmp_itt_state_set_ptr__3_0 ) ? 0 : __kmp_itt_state_set_ptr__3_0(__itt_state_); } } while (0) | |||
692 | sizeof(kmp_cached_addr_t));)do { __itt_state_t __itt_state_; if (__kmp_itt_state_get_ptr__3_0 ) { __itt_state_ = (!__kmp_itt_state_get_ptr__3_0) ? 0 : __kmp_itt_state_get_ptr__3_0 (); (!__kmp_itt_obj_mode_set_ptr__3_0) ? 0 : __kmp_itt_obj_mode_set_ptr__3_0 (__itt_obj_prop_ignore, __itt_obj_state_set); } { my_cache = ( void **)___kmp_allocate((sizeof(void *) * newCapacity + sizeof (kmp_cached_addr_t)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 692); } if (__kmp_itt_state_get_ptr__3_0) { (!__kmp_itt_state_set_ptr__3_0 ) ? 0 : __kmp_itt_state_set_ptr__3_0(__itt_state_); } } while (0); | |||
693 | // No need to zero the allocated memory; __kmp_allocate does that. | |||
694 | KC_TRACE(50, ("__kmp_threadprivate_resize_cache: allocated cache at %p\n",if (kmp_c_debug >= 50) { __kmp_debug_printf ("__kmp_threadprivate_resize_cache: allocated cache at %p\n" , my_cache); } | |||
695 | my_cache))if (kmp_c_debug >= 50) { __kmp_debug_printf ("__kmp_threadprivate_resize_cache: allocated cache at %p\n" , my_cache); }; | |||
696 | // Now copy old cache into new cache | |||
697 | void **old_cache = ptr->addr; | |||
698 | for (int i = 0; i < __kmp_tp_capacity; ++i) { | |||
699 | my_cache[i] = old_cache[i]; | |||
700 | } | |||
701 | ||||
702 | // Add address of new my_cache to linked list for cleanup later | |||
703 | kmp_cached_addr_t *tp_cache_addr; | |||
704 | tp_cache_addr = (kmp_cached_addr_t *)&my_cache[newCapacity]; | |||
705 | tp_cache_addr->addr = my_cache; | |||
706 | tp_cache_addr->data = ptr->data; | |||
707 | tp_cache_addr->compiler_cache = ptr->compiler_cache; | |||
708 | tp_cache_addr->next = __kmp_threadpriv_cache_list; | |||
709 | __kmp_threadpriv_cache_list = tp_cache_addr; | |||
710 | ||||
711 | // Copy new cache to compiler's location: We can copy directly | |||
712 | // to (*compiler_cache) if compiler guarantees it will keep | |||
713 | // using the same location for the cache. This is not yet true | |||
714 | // for some compilers, in which case we have to check if | |||
715 | // compiler_cache is still pointing at old cache, and if so, we | |||
716 | // can point it at the new cache with an atomic compare&swap | |||
717 | // operation. (Old method will always work, but we should shift | |||
718 | // to new method (commented line below) when Intel and Clang | |||
719 | // compilers use new method.) | |||
720 | (void)KMP_COMPARE_AND_STORE_PTR(tp_cache_addr->compiler_cache, old_cache,__sync_bool_compare_and_swap((void *volatile *)(tp_cache_addr ->compiler_cache), (void *)(old_cache), (void *)(my_cache) ) | |||
721 | my_cache)__sync_bool_compare_and_swap((void *volatile *)(tp_cache_addr ->compiler_cache), (void *)(old_cache), (void *)(my_cache) ); | |||
722 | // TCW_PTR(*(tp_cache_addr->compiler_cache), my_cache); | |||
723 | ||||
724 | // If the store doesn't happen here, the compiler's old behavior will | |||
725 | // inevitably call __kmpc_threadprivate_cache with a new location for the | |||
726 | // cache, and that function will store the resized cache there at that | |||
727 | // point. | |||
728 | ||||
729 | // Nullify old cache's data pointer so we skip it next time | |||
730 | ptr->data = NULL__null; | |||
731 | } | |||
732 | ptr = ptr->next; | |||
733 | } | |||
734 | // After all caches are resized, update __kmp_tp_capacity to the new size | |||
735 | *(volatile int *)&__kmp_tp_capacity = newCapacity; | |||
736 | } | |||
737 | ||||
738 | /*! | |||
739 | @ingroup THREADPRIVATE | |||
740 | @param loc source location information | |||
741 | @param data pointer to data being privatized | |||
742 | @param ctor pointer to constructor function for data | |||
743 | @param cctor pointer to copy constructor function for data | |||
744 | @param dtor pointer to destructor function for data | |||
745 | @param vector_length length of the vector (bytes or elements?) | |||
746 | Register vector constructors and destructors for thread private data. | |||
747 | */ | |||
748 | void __kmpc_threadprivate_register_vec(ident_t *loc, void *data, | |||
749 | kmpc_ctor_vec ctor, kmpc_cctor_vec cctor, | |||
750 | kmpc_dtor_vec dtor, | |||
751 | size_t vector_length) { | |||
752 | struct shared_common *d_tn, **lnk_tn; | |||
753 | ||||
754 | KC_TRACE(10, ("__kmpc_threadprivate_register_vec: called\n"))if (kmp_c_debug >= 10) { __kmp_debug_printf ("__kmpc_threadprivate_register_vec: called\n" ); }; | |||
755 | ||||
756 | #ifdef USE_CHECKS_COMMON | |||
757 | /* copy constructor must be zero for current code gen (Nov 2002 - jph) */ | |||
758 | KMP_ASSERT(cctor == 0)if (!(cctor == 0)) { __kmp_debug_assert("cctor == 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 758); }; | |||
759 | #endif /* USE_CHECKS_COMMON */ | |||
760 | ||||
761 | d_tn = __kmp_find_shared_task_common( | |||
762 | &__kmp_threadprivate_d_table, -1, | |||
763 | data); /* Only the global data table exists. */ | |||
764 | ||||
765 | if (d_tn == 0) { | |||
766 | d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common))___kmp_allocate((sizeof(struct shared_common)), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 766); | |||
767 | d_tn->gbl_addr = data; | |||
768 | ||||
769 | d_tn->ct.ctorv = ctor; | |||
770 | d_tn->cct.cctorv = cctor; | |||
771 | d_tn->dt.dtorv = dtor; | |||
772 | d_tn->is_vec = TRUE(!0); | |||
773 | d_tn->vec_len = (size_t)vector_length; | |||
774 | // d_tn->obj_init = 0; // AC: __kmp_allocate zeroes the memory | |||
775 | // d_tn->pod_init = 0; | |||
776 | lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(data)((((kmp_uintptr_t)data) >> 3) & ((1 << 9) - 1 ))]); | |||
777 | ||||
778 | d_tn->next = *lnk_tn; | |||
779 | *lnk_tn = d_tn; | |||
780 | } | |||
781 | } | |||
782 | ||||
783 | void __kmp_cleanup_threadprivate_caches() { | |||
784 | kmp_cached_addr_t *ptr = __kmp_threadpriv_cache_list; | |||
785 | ||||
786 | while (ptr) { | |||
787 | void **cache = ptr->addr; | |||
788 | __kmp_threadpriv_cache_list = ptr->next; | |||
789 | if (*ptr->compiler_cache) | |||
790 | *ptr->compiler_cache = NULL__null; | |||
791 | ptr->compiler_cache = NULL__null; | |||
792 | ptr->data = NULL__null; | |||
793 | ptr->addr = NULL__null; | |||
794 | ptr->next = NULL__null; | |||
795 | // Threadprivate data pointed at by cache entries are destroyed at end of | |||
796 | // __kmp_launch_thread with __kmp_common_destroy_gtid. | |||
797 | __kmp_free(cache)___kmp_free((cache), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp_threadprivate.cpp" , 797); // implicitly frees ptr too | |||
798 | ptr = __kmp_threadpriv_cache_list; | |||
799 | } | |||
800 | } |
1 | /*! \file */ |
2 | /* |
3 | * kmp.h -- KPTS runtime header file. |
4 | */ |
5 | |
6 | //===----------------------------------------------------------------------===// |
7 | // |
8 | // The LLVM Compiler Infrastructure |
9 | // |
10 | // This file is dual licensed under the MIT and the University of Illinois Open |
11 | // Source Licenses. See LICENSE.txt for details. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #ifndef KMP_H |
16 | #define KMP_H |
17 | |
18 | #include "kmp_config.h" |
19 | |
20 | /* #define BUILD_PARALLEL_ORDERED 1 */ |
21 | |
22 | /* This fix replaces gettimeofday with clock_gettime for better scalability on |
23 | the Altix. Requires user code to be linked with -lrt. */ |
24 | //#define FIX_SGI_CLOCK |
25 | |
26 | /* Defines for OpenMP 3.0 tasking and auto scheduling */ |
27 | |
28 | #ifndef KMP_STATIC_STEAL_ENABLED1 |
29 | #define KMP_STATIC_STEAL_ENABLED1 1 |
30 | #endif |
31 | |
32 | #define TASK_CURRENT_NOT_QUEUED0 0 |
33 | #define TASK_CURRENT_QUEUED1 1 |
34 | |
35 | #ifdef BUILD_TIED_TASK_STACK |
36 | #define TASK_STACK_EMPTY 0 // entries when the stack is empty |
37 | #define TASK_STACK_BLOCK_BITS 5 // Used in TASK_STACK_SIZE and TASK_STACK_MASK |
38 | // Number of entries in each task stack array |
39 | #define TASK_STACK_BLOCK_SIZE (1 << TASK_STACK_BLOCK_BITS) |
40 | // Mask for determining index into stack block |
41 | #define TASK_STACK_INDEX_MASK (TASK_STACK_BLOCK_SIZE - 1) |
42 | #endif // BUILD_TIED_TASK_STACK |
43 | |
44 | #define TASK_NOT_PUSHED1 1 |
45 | #define TASK_SUCCESSFULLY_PUSHED0 0 |
46 | #define TASK_TIED1 1 |
47 | #define TASK_UNTIED0 0 |
48 | #define TASK_EXPLICIT1 1 |
49 | #define TASK_IMPLICIT0 0 |
50 | #define TASK_PROXY1 1 |
51 | #define TASK_FULL0 0 |
52 | |
53 | #define KMP_CANCEL_THREADS |
54 | #define KMP_THREAD_ATTR |
55 | |
56 | // Android does not have pthread_cancel. Undefine KMP_CANCEL_THREADS if being |
57 | // built on Android |
58 | #if defined(__ANDROID__) |
59 | #undef KMP_CANCEL_THREADS |
60 | #endif |
61 | |
62 | #include <signal.h> |
63 | #include <stdarg.h> |
64 | #include <stddef.h> |
65 | #include <stdio.h> |
66 | #include <stdlib.h> |
67 | #include <string.h> |
68 | /* include <ctype.h> don't use; problems with /MD on Windows* OS NT due to bad |
69 | Microsoft library. Some macros provided below to replace these functions */ |
70 | #ifndef __ABSOFT_WIN |
71 | #include <sys/types.h> |
72 | #endif |
73 | #include <limits.h> |
74 | #include <time.h> |
75 | |
76 | #include <errno(*__errno_location ()).h> |
77 | |
78 | #include "kmp_os.h" |
79 | |
80 | #include "kmp_safe_c_api.h" |
81 | |
82 | #if KMP_STATS_ENABLED0 |
83 | class kmp_stats_list; |
84 | #endif |
85 | |
86 | #if KMP_USE_HIER_SCHED0 |
87 | // Only include hierarchical scheduling if affinity is supported |
88 | #undef KMP_USE_HIER_SCHED0 |
89 | #define KMP_USE_HIER_SCHED0 KMP_AFFINITY_SUPPORTED1 |
90 | #endif |
91 | |
92 | #if KMP_USE_HWLOC0 && KMP_AFFINITY_SUPPORTED1 |
93 | #include "hwloc.h" |
94 | #ifndef HWLOC_OBJ_NUMANODE |
95 | #define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE |
96 | #endif |
97 | #ifndef HWLOC_OBJ_PACKAGE |
98 | #define HWLOC_OBJ_PACKAGE HWLOC_OBJ_SOCKET |
99 | #endif |
100 | #endif |
101 | |
102 | #if KMP_ARCH_X860 || KMP_ARCH_X86_641 |
103 | #include <xmmintrin.h> |
104 | #endif |
105 | |
106 | #include "kmp_debug.h" |
107 | #include "kmp_lock.h" |
108 | #include "kmp_version.h" |
109 | #if USE_DEBUGGER0 |
110 | #include "kmp_debugger.h" |
111 | #endif |
112 | #include "kmp_i18n.h" |
113 | |
114 | #define KMP_HANDLE_SIGNALS(1 || 0) (KMP_OS_UNIX1 || KMP_OS_WINDOWS0) |
115 | |
116 | #include "kmp_wrapper_malloc.h" |
117 | #if KMP_OS_UNIX1 |
118 | #include <unistd.h> |
119 | #if !defined NSIG65 && defined _NSIG65 |
120 | #define NSIG65 _NSIG65 |
121 | #endif |
122 | #endif |
123 | |
124 | #if KMP_OS_LINUX1 |
125 | #pragma weak clock_gettime |
126 | #endif |
127 | |
128 | #if OMPT_SUPPORT1 |
129 | #include "ompt-internal.h" |
130 | #endif |
131 | |
132 | // 0 - no fast memory allocation, alignment: 8-byte on x86, 16-byte on x64. |
133 | // 3 - fast allocation using sync, non-sync free lists of any size, non-self |
134 | // free lists of limited size. |
135 | #ifndef USE_FAST_MEMORY3 |
136 | #define USE_FAST_MEMORY3 3 |
137 | #endif |
138 | |
139 | #ifndef KMP_NESTED_HOT_TEAMS1 |
140 | #define KMP_NESTED_HOT_TEAMS1 0 |
141 | #define USE_NESTED_HOT_ARG(x), x |
142 | #else |
143 | #if KMP_NESTED_HOT_TEAMS1 |
144 | #if OMP_40_ENABLED(50 >= 40) |
145 | #define USE_NESTED_HOT_ARG(x), x , x |
146 | #else |
147 | // Nested hot teams feature depends on omp 4.0, disable it for earlier versions |
148 | #undef KMP_NESTED_HOT_TEAMS1 |
149 | #define KMP_NESTED_HOT_TEAMS1 0 |
150 | #define USE_NESTED_HOT_ARG(x), x |
151 | #endif |
152 | #else |
153 | #define USE_NESTED_HOT_ARG(x), x |
154 | #endif |
155 | #endif |
156 | |
157 | // Assume using BGET compare_exchange instruction instead of lock by default. |
158 | #ifndef USE_CMP_XCHG_FOR_BGET1 |
159 | #define USE_CMP_XCHG_FOR_BGET1 1 |
160 | #endif |
161 | |
162 | // Test to see if queuing lock is better than bootstrap lock for bget |
163 | // #ifndef USE_QUEUING_LOCK_FOR_BGET |
164 | // #define USE_QUEUING_LOCK_FOR_BGET |
165 | // #endif |
166 | |
167 | #define KMP_NSEC_PER_SEC1000000000L 1000000000L |
168 | #define KMP_USEC_PER_SEC1000000L 1000000L |
169 | |
170 | /*! |
171 | @ingroup BASIC_TYPES |
172 | @{ |
173 | */ |
174 | |
175 | /*! |
176 | Values for bit flags used in the ident_t to describe the fields. |
177 | */ |
178 | enum { |
179 | /*! Use trampoline for internal microtasks */ |
180 | KMP_IDENT_IMB = 0x01, |
181 | /*! Use c-style ident structure */ |
182 | KMP_IDENT_KMPC = 0x02, |
183 | /* 0x04 is no longer used */ |
184 | /*! Entry point generated by auto-parallelization */ |
185 | KMP_IDENT_AUTOPAR = 0x08, |
186 | /*! Compiler generates atomic reduction option for kmpc_reduce* */ |
187 | KMP_IDENT_ATOMIC_REDUCE = 0x10, |
188 | /*! To mark a 'barrier' directive in user code */ |
189 | KMP_IDENT_BARRIER_EXPL = 0x20, |
190 | /*! To Mark implicit barriers. */ |
191 | KMP_IDENT_BARRIER_IMPL = 0x0040, |
192 | KMP_IDENT_BARRIER_IMPL_MASK = 0x01C0, |
193 | KMP_IDENT_BARRIER_IMPL_FOR = 0x0040, |
194 | KMP_IDENT_BARRIER_IMPL_SECTIONS = 0x00C0, |
195 | |
196 | KMP_IDENT_BARRIER_IMPL_SINGLE = 0x0140, |
197 | KMP_IDENT_BARRIER_IMPL_WORKSHARE = 0x01C0, |
198 | |
199 | /*! To mark a static loop in OMPT callbacks */ |
200 | KMP_IDENT_WORK_LOOP = 0x200, |
201 | /*! To mark a sections directive in OMPT callbacks */ |
202 | KMP_IDENT_WORK_SECTIONS = 0x400, |
203 | /*! To mark a distirbute construct in OMPT callbacks */ |
204 | KMP_IDENT_WORK_DISTRIBUTE = 0x800, |
205 | /*! Atomic hint; bottom four bits as omp_sync_hint_t. Top four reserved and |
206 | not currently used. If one day we need more bits, then we can use |
207 | an invalid combination of hints to mean that another, larger field |
208 | should be used in a different flag. */ |
209 | KMP_IDENT_ATOMIC_HINT_MASK = 0xFF0000, |
210 | KMP_IDENT_ATOMIC_HINT_UNCONTENDED = 0x010000, |
211 | KMP_IDENT_ATOMIC_HINT_CONTENDED = 0x020000, |
212 | KMP_IDENT_ATOMIC_HINT_NONSPECULATIVE = 0x040000, |
213 | KMP_IDENT_ATOMIC_HINT_SPECULATIVE = 0x080000, |
214 | }; |
215 | |
216 | /*! |
217 | * The ident structure that describes a source location. |
218 | */ |
219 | typedef struct ident { |
220 | kmp_int32 reserved_1; /**< might be used in Fortran; see above */ |
221 | kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags; KMP_IDENT_KMPC |
222 | identifies this union member */ |
223 | kmp_int32 reserved_2; /**< not really used in Fortran any more; see above */ |
224 | #if USE_ITT_BUILD1 |
225 | /* but currently used for storing region-specific ITT */ |
226 | /* contextual information. */ |
227 | #endif /* USE_ITT_BUILD */ |
228 | kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for C++ */ |
229 | char const *psource; /**< String describing the source location. |
230 | The string is composed of semi-colon separated fields |
231 | which describe the source file, the function and a pair |
232 | of line numbers that delimit the construct. */ |
233 | } ident_t; |
234 | /*! |
235 | @} |
236 | */ |
237 | |
238 | // Some forward declarations. |
239 | typedef union kmp_team kmp_team_t; |
240 | typedef struct kmp_taskdata kmp_taskdata_t; |
241 | typedef union kmp_task_team kmp_task_team_t; |
242 | typedef union kmp_team kmp_team_p; |
243 | typedef union kmp_info kmp_info_p; |
244 | typedef union kmp_root kmp_root_p; |
245 | |
246 | #ifdef __cplusplus201103L |
247 | extern "C" { |
248 | #endif |
249 | |
250 | /* ------------------------------------------------------------------------ */ |
251 | |
252 | /* Pack two 32-bit signed integers into a 64-bit signed integer */ |
253 | /* ToDo: Fix word ordering for big-endian machines. */ |
254 | #define KMP_PACK_64(HIGH_32, LOW_32)((kmp_int64)((((kmp_uint64)(HIGH_32)) << 32) | (kmp_uint64 )(LOW_32))) \ |
255 | ((kmp_int64)((((kmp_uint64)(HIGH_32)) << 32) | (kmp_uint64)(LOW_32))) |
256 | |
257 | // Generic string manipulation macros. Assume that _x is of type char * |
258 | #define SKIP_WS(_x){ while (*(_x) == ' ' || *(_x) == '\t') (_x)++; } \ |
259 | { \ |
260 | while (*(_x) == ' ' || *(_x) == '\t') \ |
261 | (_x)++; \ |
262 | } |
263 | #define SKIP_DIGITS(_x){ while (*(_x) >= '0' && *(_x) <= '9') (_x)++; } \ |
264 | { \ |
265 | while (*(_x) >= '0' && *(_x) <= '9') \ |
266 | (_x)++; \ |
267 | } |
268 | #define SKIP_TOKEN(_x){ while ((*(_x) >= '0' && *(_x) <= '9') || (*(_x ) >= 'a' && *(_x) <= 'z') || (*(_x) >= 'A' && *(_x) <= 'Z') || *(_x) == '_') (_x)++; } \ |
269 | { \ |
270 | while ((*(_x) >= '0' && *(_x) <= '9') || (*(_x) >= 'a' && *(_x) <= 'z') || \ |
271 | (*(_x) >= 'A' && *(_x) <= 'Z') || *(_x) == '_') \ |
272 | (_x)++; \ |
273 | } |
274 | #define SKIP_TO(_x, _c){ while (*(_x) != '\0' && *(_x) != (_c)) (_x)++; } \ |
275 | { \ |
276 | while (*(_x) != '\0' && *(_x) != (_c)) \ |
277 | (_x)++; \ |
278 | } |
279 | |
280 | /* ------------------------------------------------------------------------ */ |
281 | |
282 | #define KMP_MAX(x, y)((x) > (y) ? (x) : (y)) ((x) > (y) ? (x) : (y)) |
283 | #define KMP_MIN(x, y)((x) < (y) ? (x) : (y)) ((x) < (y) ? (x) : (y)) |
284 | |
285 | /* ------------------------------------------------------------------------ */ |
286 | /* Enumeration types */ |
287 | |
288 | enum kmp_state_timer { |
289 | ts_stop, |
290 | ts_start, |
291 | ts_pause, |
292 | |
293 | ts_last_state |
294 | }; |
295 | |
296 | enum dynamic_mode { |
297 | dynamic_default, |
298 | #ifdef USE_LOAD_BALANCE1 |
299 | dynamic_load_balance, |
300 | #endif /* USE_LOAD_BALANCE */ |
301 | dynamic_random, |
302 | dynamic_thread_limit, |
303 | dynamic_max |
304 | }; |
305 | |
306 | /* external schedule constants, duplicate enum omp_sched in omp.h in order to |
307 | * not include it here */ |
308 | #ifndef KMP_SCHED_TYPE_DEFINED |
309 | #define KMP_SCHED_TYPE_DEFINED |
310 | typedef enum kmp_sched { |
311 | kmp_sched_lower = 0, // lower and upper bounds are for routine parameter check |
312 | // Note: need to adjust __kmp_sch_map global array in case enum is changed |
313 | kmp_sched_static = 1, // mapped to kmp_sch_static_chunked (33) |
314 | kmp_sched_dynamic = 2, // mapped to kmp_sch_dynamic_chunked (35) |
315 | kmp_sched_guided = 3, // mapped to kmp_sch_guided_chunked (36) |
316 | kmp_sched_auto = 4, // mapped to kmp_sch_auto (38) |
317 | kmp_sched_upper_std = 5, // upper bound for standard schedules |
318 | kmp_sched_lower_ext = 100, // lower bound of Intel extension schedules |
319 | kmp_sched_trapezoidal = 101, // mapped to kmp_sch_trapezoidal (39) |
320 | #if KMP_STATIC_STEAL_ENABLED1 |
321 | kmp_sched_static_steal = 102, // mapped to kmp_sch_static_steal (44) |
322 | #endif |
323 | kmp_sched_upper, |
324 | kmp_sched_default = kmp_sched_static // default scheduling |
325 | } kmp_sched_t; |
326 | #endif |
327 | |
328 | /*! |
329 | @ingroup WORK_SHARING |
330 | * Describes the loop schedule to be used for a parallel for loop. |
331 | */ |
332 | enum sched_type : kmp_int32 { |
333 | kmp_sch_lower = 32, /**< lower bound for unordered values */ |
334 | kmp_sch_static_chunked = 33, |
335 | kmp_sch_static = 34, /**< static unspecialized */ |
336 | kmp_sch_dynamic_chunked = 35, |
337 | kmp_sch_guided_chunked = 36, /**< guided unspecialized */ |
338 | kmp_sch_runtime = 37, |
339 | kmp_sch_auto = 38, /**< auto */ |
340 | kmp_sch_trapezoidal = 39, |
341 | |
342 | /* accessible only through KMP_SCHEDULE environment variable */ |
343 | kmp_sch_static_greedy = 40, |
344 | kmp_sch_static_balanced = 41, |
345 | /* accessible only through KMP_SCHEDULE environment variable */ |
346 | kmp_sch_guided_iterative_chunked = 42, |
347 | kmp_sch_guided_analytical_chunked = 43, |
348 | /* accessible only through KMP_SCHEDULE environment variable */ |
349 | kmp_sch_static_steal = 44, |
350 | |
351 | #if OMP_45_ENABLED(50 >= 45) |
352 | /* static with chunk adjustment (e.g., simd) */ |
353 | kmp_sch_static_balanced_chunked = 45, |
354 | kmp_sch_guided_simd = 46, /**< guided with chunk adjustment */ |
355 | kmp_sch_runtime_simd = 47, /**< runtime with chunk adjustment */ |
356 | #endif |
357 | |
358 | /* accessible only through KMP_SCHEDULE environment variable */ |
359 | kmp_sch_upper, /**< upper bound for unordered values */ |
360 | |
361 | kmp_ord_lower = 64, /**< lower bound for ordered values, must be power of 2 */ |
362 | kmp_ord_static_chunked = 65, |
363 | kmp_ord_static = 66, /**< ordered static unspecialized */ |
364 | kmp_ord_dynamic_chunked = 67, |
365 | kmp_ord_guided_chunked = 68, |
366 | kmp_ord_runtime = 69, |
367 | kmp_ord_auto = 70, /**< ordered auto */ |
368 | kmp_ord_trapezoidal = 71, |
369 | kmp_ord_upper, /**< upper bound for ordered values */ |
370 | |
371 | #if OMP_40_ENABLED(50 >= 40) |
372 | /* Schedules for Distribute construct */ |
373 | kmp_distribute_static_chunked = 91, /**< distribute static chunked */ |
374 | kmp_distribute_static = 92, /**< distribute static unspecialized */ |
375 | #endif |
376 | |
377 | /* For the "nomerge" versions, kmp_dispatch_next*() will always return a |
378 | single iteration/chunk, even if the loop is serialized. For the schedule |
379 | types listed above, the entire iteration vector is returned if the loop is |
380 | serialized. This doesn't work for gcc/gcomp sections. */ |
381 | kmp_nm_lower = 160, /**< lower bound for nomerge values */ |
382 | |
383 | kmp_nm_static_chunked = |
384 | (kmp_sch_static_chunked - kmp_sch_lower + kmp_nm_lower), |
385 | kmp_nm_static = 162, /**< static unspecialized */ |
386 | kmp_nm_dynamic_chunked = 163, |
387 | kmp_nm_guided_chunked = 164, /**< guided unspecialized */ |
388 | kmp_nm_runtime = 165, |
389 | kmp_nm_auto = 166, /**< auto */ |
390 | kmp_nm_trapezoidal = 167, |
391 | |
392 | /* accessible only through KMP_SCHEDULE environment variable */ |
393 | kmp_nm_static_greedy = 168, |
394 | kmp_nm_static_balanced = 169, |
395 | /* accessible only through KMP_SCHEDULE environment variable */ |
396 | kmp_nm_guided_iterative_chunked = 170, |
397 | kmp_nm_guided_analytical_chunked = 171, |
398 | kmp_nm_static_steal = |
399 | 172, /* accessible only through OMP_SCHEDULE environment variable */ |
400 | |
401 | kmp_nm_ord_static_chunked = 193, |
402 | kmp_nm_ord_static = 194, /**< ordered static unspecialized */ |
403 | kmp_nm_ord_dynamic_chunked = 195, |
404 | kmp_nm_ord_guided_chunked = 196, |
405 | kmp_nm_ord_runtime = 197, |
406 | kmp_nm_ord_auto = 198, /**< auto */ |
407 | kmp_nm_ord_trapezoidal = 199, |
408 | kmp_nm_upper, /**< upper bound for nomerge values */ |
409 | |
410 | #if OMP_45_ENABLED(50 >= 45) |
411 | /* Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers. Since |
412 | we need to distinguish the three possible cases (no modifier, monotonic |
413 | modifier, nonmonotonic modifier), we need separate bits for each modifier. |
414 | The absence of monotonic does not imply nonmonotonic, especially since 4.5 |
415 | says that the behaviour of the "no modifier" case is implementation defined |
416 | in 4.5, but will become "nonmonotonic" in 5.0. |
417 | |
418 | Since we're passing a full 32 bit value, we can use a couple of high bits |
419 | for these flags; out of paranoia we avoid the sign bit. |
420 | |
421 | These modifiers can be or-ed into non-static schedules by the compiler to |
422 | pass the additional information. They will be stripped early in the |
423 | processing in __kmp_dispatch_init when setting up schedules, so most of the |
424 | code won't ever see schedules with these bits set. */ |
425 | kmp_sch_modifier_monotonic = |
426 | (1 << 29), /**< Set if the monotonic schedule modifier was present */ |
427 | kmp_sch_modifier_nonmonotonic = |
428 | (1 << 30), /**< Set if the nonmonotonic schedule modifier was present */ |
429 | |
430 | #define SCHEDULE_WITHOUT_MODIFIERS(s)(enum sched_type)( (s) & ~(kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)) \ |
431 | (enum sched_type)( \ |
432 | (s) & ~(kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)) |
433 | #define SCHEDULE_HAS_MONOTONIC(s)(((s)&kmp_sch_modifier_monotonic) != 0) (((s)&kmp_sch_modifier_monotonic) != 0) |
434 | #define SCHEDULE_HAS_NONMONOTONIC(s)(((s)&kmp_sch_modifier_nonmonotonic) != 0) (((s)&kmp_sch_modifier_nonmonotonic) != 0) |
435 | #define SCHEDULE_HAS_NO_MODIFIERS(s)(((s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic )) == 0) \ |
436 | (((s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)) == 0) |
437 | #else |
438 | /* By doing this we hope to avoid multiple tests on OMP_45_ENABLED. Compilers |
439 | can now eliminate tests on compile time constants and dead code that results |
440 | from them, so we can leave code guarded by such an if in place. */ |
441 | #define SCHEDULE_WITHOUT_MODIFIERS(s)(enum sched_type)( (s) & ~(kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)) (s) |
442 | #define SCHEDULE_HAS_MONOTONIC(s)(((s)&kmp_sch_modifier_monotonic) != 0) false |
443 | #define SCHEDULE_HAS_NONMONOTONIC(s)(((s)&kmp_sch_modifier_nonmonotonic) != 0) false |
444 | #define SCHEDULE_HAS_NO_MODIFIERS(s)(((s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic )) == 0) true |
445 | #endif |
446 | |
447 | kmp_sch_default = kmp_sch_static /**< default scheduling algorithm */ |
448 | }; |
449 | |
450 | /* Type to keep runtime schedule set via OMP_SCHEDULE or omp_set_schedule() */ |
451 | typedef union kmp_r_sched { |
452 | struct { |
453 | enum sched_type r_sched_type; |
454 | int chunk; |
455 | }; |
456 | kmp_int64 sched; |
457 | } kmp_r_sched_t; |
458 | |
459 | extern enum sched_type __kmp_sch_map[]; // map OMP 3.0 schedule types with our |
460 | // internal schedule types |
461 | |
462 | enum library_type { |
463 | library_none, |
464 | library_serial, |
465 | library_turnaround, |
466 | library_throughput |
467 | }; |
468 | |
469 | #if KMP_OS_LINUX1 |
470 | enum clock_function_type { |
471 | clock_function_gettimeofday, |
472 | clock_function_clock_gettime |
473 | }; |
474 | #endif /* KMP_OS_LINUX */ |
475 | |
476 | #if KMP_MIC_SUPPORTED((0 || 1) && (1 || 0)) |
477 | enum mic_type { non_mic, mic1, mic2, mic3, dummy }; |
478 | #endif |
479 | |
480 | /* -- fast reduction stuff ------------------------------------------------ */ |
481 | |
482 | #undef KMP_FAST_REDUCTION_BARRIER1 |
483 | #define KMP_FAST_REDUCTION_BARRIER1 1 |
484 | |
485 | #undef KMP_FAST_REDUCTION_CORE_DUO1 |
486 | #if KMP_ARCH_X860 || KMP_ARCH_X86_641 |
487 | #define KMP_FAST_REDUCTION_CORE_DUO1 1 |
488 | #endif |
489 | |
490 | enum _reduction_method { |
491 | reduction_method_not_defined = 0, |
492 | critical_reduce_block = (1 << 8), |
493 | atomic_reduce_block = (2 << 8), |
494 | tree_reduce_block = (3 << 8), |
495 | empty_reduce_block = (4 << 8) |
496 | }; |
497 | |
498 | // Description of the packed_reduction_method variable: |
499 | // The packed_reduction_method variable consists of two enum types variables |
500 | // that are packed together into 0-th byte and 1-st byte: |
501 | // 0: (packed_reduction_method & 0x000000FF) is a 'enum barrier_type' value of |
502 | // barrier that will be used in fast reduction: bs_plain_barrier or |
503 | // bs_reduction_barrier |
504 | // 1: (packed_reduction_method & 0x0000FF00) is a reduction method that will |
505 | // be used in fast reduction; |
506 | // Reduction method is of 'enum _reduction_method' type and it's defined the way |
507 | // so that the bits of 0-th byte are empty, so no need to execute a shift |
508 | // instruction while packing/unpacking |
509 | |
510 | #if KMP_FAST_REDUCTION_BARRIER1 |
511 | #define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type)((reduction_method) | (barrier_type)) \ |
512 | ((reduction_method) | (barrier_type)) |
513 | |
514 | #define UNPACK_REDUCTION_METHOD(packed_reduction_method)((enum _reduction_method)((packed_reduction_method) & (0x0000FF00 ))) \ |
515 | ((enum _reduction_method)((packed_reduction_method) & (0x0000FF00))) |
516 | |
517 | #define UNPACK_REDUCTION_BARRIER(packed_reduction_method)((enum barrier_type)((packed_reduction_method) & (0x000000FF ))) \ |
518 | ((enum barrier_type)((packed_reduction_method) & (0x000000FF))) |
519 | #else |
520 | #define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type)((reduction_method) | (barrier_type)) \ |
521 | (reduction_method) |
522 | |
523 | #define UNPACK_REDUCTION_METHOD(packed_reduction_method)((enum _reduction_method)((packed_reduction_method) & (0x0000FF00 ))) \ |
524 | (packed_reduction_method) |
525 | |
526 | #define UNPACK_REDUCTION_BARRIER(packed_reduction_method)((enum barrier_type)((packed_reduction_method) & (0x000000FF ))) (bs_plain_barrier) |
527 | #endif |
528 | |
529 | #define TEST_REDUCTION_METHOD(packed_reduction_method, which_reduction_block)((((enum _reduction_method)((packed_reduction_method) & ( 0x0000FF00)))) == (which_reduction_block)) \ |
530 | ((UNPACK_REDUCTION_METHOD(packed_reduction_method)((enum _reduction_method)((packed_reduction_method) & (0x0000FF00 )))) == \ |
531 | (which_reduction_block)) |
532 | |
533 | #if KMP_FAST_REDUCTION_BARRIER1 |
534 | #define TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER(((tree_reduce_block) | (bs_reduction_barrier))) \ |
535 | (PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_reduction_barrier)((tree_reduce_block) | (bs_reduction_barrier))) |
536 | |
537 | #define TREE_REDUCE_BLOCK_WITH_PLAIN_BARRIER(((tree_reduce_block) | (bs_plain_barrier))) \ |
538 | (PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_plain_barrier)((tree_reduce_block) | (bs_plain_barrier))) |
539 | #endif |
540 | |
541 | typedef int PACKED_REDUCTION_METHOD_T; |
542 | |
543 | /* -- end of fast reduction stuff ----------------------------------------- */ |
544 | |
545 | #if KMP_OS_WINDOWS0 |
546 | #define USE_CBLKDATA |
547 | #pragma warning(push) |
548 | #pragma warning(disable : 271 310) |
549 | #include <windows.h> |
550 | #pragma warning(pop) |
551 | #endif |
552 | |
553 | #if KMP_OS_UNIX1 |
554 | #include <dlfcn.h> |
555 | #include <pthread.h> |
556 | #endif |
557 | |
558 | /* Only Linux* OS and Windows* OS support thread affinity. */ |
559 | #if KMP_AFFINITY_SUPPORTED1 |
560 | |
561 | // GROUP_AFFINITY is already defined for _MSC_VER>=1600 (VS2010 and later). |
562 | #if KMP_OS_WINDOWS0 |
563 | #if _MSC_VER < 1600 |
564 | typedef struct GROUP_AFFINITY { |
565 | KAFFINITY Mask; |
566 | WORD Group; |
567 | WORD Reserved[3]; |
568 | } GROUP_AFFINITY; |
569 | #endif /* _MSC_VER < 1600 */ |
570 | #if KMP_GROUP_AFFINITY0 |
571 | extern int __kmp_num_proc_groups; |
572 | #else |
573 | static const int __kmp_num_proc_groups = 1; |
574 | #endif /* KMP_GROUP_AFFINITY */ |
575 | typedef DWORD (*kmp_GetActiveProcessorCount_t)(WORD); |
576 | extern kmp_GetActiveProcessorCount_t __kmp_GetActiveProcessorCount; |
577 | |
578 | typedef WORD (*kmp_GetActiveProcessorGroupCount_t)(void); |
579 | extern kmp_GetActiveProcessorGroupCount_t __kmp_GetActiveProcessorGroupCount; |
580 | |
581 | typedef BOOL (*kmp_GetThreadGroupAffinity_t)(HANDLE, GROUP_AFFINITY *); |
582 | extern kmp_GetThreadGroupAffinity_t __kmp_GetThreadGroupAffinity; |
583 | |
584 | typedef BOOL (*kmp_SetThreadGroupAffinity_t)(HANDLE, const GROUP_AFFINITY *, |
585 | GROUP_AFFINITY *); |
586 | extern kmp_SetThreadGroupAffinity_t __kmp_SetThreadGroupAffinity; |
587 | #endif /* KMP_OS_WINDOWS */ |
588 | |
589 | #if KMP_USE_HWLOC0 |
590 | extern hwloc_topology_t __kmp_hwloc_topology; |
591 | extern int __kmp_hwloc_error; |
592 | extern int __kmp_numa_detected; |
593 | extern int __kmp_tile_depth; |
594 | #endif |
595 | |
596 | extern size_t __kmp_affin_mask_size; |
597 | #define KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0) (__kmp_affin_mask_size > 0) |
598 | #define KMP_AFFINITY_DISABLE()(__kmp_affin_mask_size = 0) (__kmp_affin_mask_size = 0) |
599 | #define KMP_AFFINITY_ENABLE(mask_size)(__kmp_affin_mask_size = mask_size) (__kmp_affin_mask_size = mask_size) |
600 | #define KMP_CPU_SET_ITERATE(i, mask)for (i = (mask)->begin(); (int)i != (mask)->end(); i = ( mask)->next(i)) \ |
601 | for (i = (mask)->begin(); (int)i != (mask)->end(); i = (mask)->next(i)) |
602 | #define KMP_CPU_SET(i, mask)(mask)->set(i) (mask)->set(i) |
603 | #define KMP_CPU_ISSET(i, mask)(mask)->is_set(i) (mask)->is_set(i) |
604 | #define KMP_CPU_CLR(i, mask)(mask)->clear(i) (mask)->clear(i) |
605 | #define KMP_CPU_ZERO(mask)(mask)->zero() (mask)->zero() |
606 | #define KMP_CPU_COPY(dest, src)(dest)->copy(src) (dest)->copy(src) |
607 | #define KMP_CPU_AND(dest, src)(dest)->bitwise_and(src) (dest)->bitwise_and(src) |
608 | #define KMP_CPU_COMPLEMENT(max_bit_number, mask)(mask)->bitwise_not() (mask)->bitwise_not() |
609 | #define KMP_CPU_UNION(dest, src)(dest)->bitwise_or(src) (dest)->bitwise_or(src) |
610 | #define KMP_CPU_ALLOC(ptr)(ptr = __kmp_affinity_dispatch->allocate_mask()) (ptr = __kmp_affinity_dispatch->allocate_mask()) |
611 | #define KMP_CPU_FREE(ptr)__kmp_affinity_dispatch->deallocate_mask(ptr) __kmp_affinity_dispatch->deallocate_mask(ptr) |
612 | #define KMP_CPU_ALLOC_ON_STACK(ptr)(ptr = __kmp_affinity_dispatch->allocate_mask()) KMP_CPU_ALLOC(ptr)(ptr = __kmp_affinity_dispatch->allocate_mask()) |
613 | #define KMP_CPU_FREE_FROM_STACK(ptr)__kmp_affinity_dispatch->deallocate_mask(ptr) KMP_CPU_FREE(ptr)__kmp_affinity_dispatch->deallocate_mask(ptr) |
614 | #define KMP_CPU_INTERNAL_ALLOC(ptr)(ptr = __kmp_affinity_dispatch->allocate_mask()) KMP_CPU_ALLOC(ptr)(ptr = __kmp_affinity_dispatch->allocate_mask()) |
615 | #define KMP_CPU_INTERNAL_FREE(ptr)__kmp_affinity_dispatch->deallocate_mask(ptr) KMP_CPU_FREE(ptr)__kmp_affinity_dispatch->deallocate_mask(ptr) |
616 | #define KMP_CPU_INDEX(arr, i)__kmp_affinity_dispatch->index_mask_array(arr, i) __kmp_affinity_dispatch->index_mask_array(arr, i) |
617 | #define KMP_CPU_ALLOC_ARRAY(arr, n)(arr = __kmp_affinity_dispatch->allocate_mask_array(n)) \ |
618 | (arr = __kmp_affinity_dispatch->allocate_mask_array(n)) |
619 | #define KMP_CPU_FREE_ARRAY(arr, n)__kmp_affinity_dispatch->deallocate_mask_array(arr) \ |
620 | __kmp_affinity_dispatch->deallocate_mask_array(arr) |
621 | #define KMP_CPU_INTERNAL_ALLOC_ARRAY(arr, n)(arr = __kmp_affinity_dispatch->allocate_mask_array(n)) KMP_CPU_ALLOC_ARRAY(arr, n)(arr = __kmp_affinity_dispatch->allocate_mask_array(n)) |
622 | #define KMP_CPU_INTERNAL_FREE_ARRAY(arr, n)__kmp_affinity_dispatch->deallocate_mask_array(arr) KMP_CPU_FREE_ARRAY(arr, n)__kmp_affinity_dispatch->deallocate_mask_array(arr) |
623 | #define __kmp_get_system_affinity(mask, abort_bool)(mask)->get_system_affinity(abort_bool) \ |
624 | (mask)->get_system_affinity(abort_bool) |
625 | #define __kmp_set_system_affinity(mask, abort_bool)(mask)->set_system_affinity(abort_bool) \ |
626 | (mask)->set_system_affinity(abort_bool) |
627 | #define __kmp_get_proc_group(mask)(mask)->get_proc_group() (mask)->get_proc_group() |
628 | |
629 | class KMPAffinity { |
630 | public: |
631 | class Mask { |
632 | public: |
633 | void *operator new(size_t n); |
634 | void operator delete(void *p); |
635 | void *operator new[](size_t n); |
636 | void operator delete[](void *p); |
637 | virtual ~Mask() {} |
638 | // Set bit i to 1 |
639 | virtual void set(int i) {} |
640 | // Return bit i |
641 | virtual bool is_set(int i) const { return false; } |
642 | // Set bit i to 0 |
643 | virtual void clear(int i) {} |
644 | // Zero out entire mask |
645 | virtual void zero() {} |
646 | // Copy src into this mask |
647 | virtual void copy(const Mask *src) {} |
648 | // this &= rhs |
649 | virtual void bitwise_and(const Mask *rhs) {} |
650 | // this |= rhs |
651 | virtual void bitwise_or(const Mask *rhs) {} |
652 | // this = ~this |
653 | virtual void bitwise_not() {} |
654 | // API for iterating over an affinity mask |
655 | // for (int i = mask->begin(); i != mask->end(); i = mask->next(i)) |
656 | virtual int begin() const { return 0; } |
657 | virtual int end() const { return 0; } |
658 | virtual int next(int previous) const { return 0; } |
659 | // Set the system's affinity to this affinity mask's value |
660 | virtual int set_system_affinity(bool abort_on_error) const { return -1; } |
661 | // Set this affinity mask to the current system affinity |
662 | virtual int get_system_affinity(bool abort_on_error) { return -1; } |
663 | // Only 1 DWORD in the mask should have any procs set. |
664 | // Return the appropriate index, or -1 for an invalid mask. |
665 | virtual int get_proc_group() const { return -1; } |
666 | }; |
667 | void *operator new(size_t n); |
668 | void operator delete(void *p); |
669 | // Need virtual destructor |
670 | virtual ~KMPAffinity() = default; |
671 | // Determine if affinity is capable |
672 | virtual void determine_capable(const char *env_var) {} |
673 | // Bind the current thread to os proc |
674 | virtual void bind_thread(int proc) {} |
675 | // Factory functions to allocate/deallocate a mask |
676 | virtual Mask *allocate_mask() { return nullptr; } |
677 | virtual void deallocate_mask(Mask *m) {} |
678 | virtual Mask *allocate_mask_array(int num) { return nullptr; } |
679 | virtual void deallocate_mask_array(Mask *m) {} |
680 | virtual Mask *index_mask_array(Mask *m, int index) { return nullptr; } |
681 | static void pick_api(); |
682 | static void destroy_api(); |
683 | enum api_type { |
684 | NATIVE_OS |
685 | #if KMP_USE_HWLOC0 |
686 | , |
687 | HWLOC |
688 | #endif |
689 | }; |
690 | virtual api_type get_api_type() const { |
691 | KMP_ASSERT(0)if (!(0)) { __kmp_debug_assert("0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 691); }; |
692 | return NATIVE_OS; |
693 | } |
694 | |
695 | private: |
696 | static bool picked_api; |
697 | }; |
698 | |
699 | typedef KMPAffinity::Mask kmp_affin_mask_t; |
700 | extern KMPAffinity *__kmp_affinity_dispatch; |
701 | |
702 | // Declare local char buffers with this size for printing debug and info |
703 | // messages, using __kmp_affinity_print_mask(). |
704 | #define KMP_AFFIN_MASK_PRINT_LEN1024 1024 |
705 | |
706 | enum affinity_type { |
707 | affinity_none = 0, |
708 | affinity_physical, |
709 | affinity_logical, |
710 | affinity_compact, |
711 | affinity_scatter, |
712 | affinity_explicit, |
713 | affinity_balanced, |
714 | affinity_disabled, // not used outsize the env var parser |
715 | affinity_default |
716 | }; |
717 | |
718 | enum affinity_gran { |
719 | affinity_gran_fine = 0, |
720 | affinity_gran_thread, |
721 | affinity_gran_core, |
722 | affinity_gran_tile, |
723 | affinity_gran_numa, |
724 | affinity_gran_package, |
725 | affinity_gran_node, |
726 | #if KMP_GROUP_AFFINITY0 |
727 | // The "group" granularity isn't necesssarily coarser than all of the |
728 | // other levels, but we put it last in the enum. |
729 | affinity_gran_group, |
730 | #endif /* KMP_GROUP_AFFINITY */ |
731 | affinity_gran_default |
732 | }; |
733 | |
734 | enum affinity_top_method { |
735 | affinity_top_method_all = 0, // try all (supported) methods, in order |
736 | #if KMP_ARCH_X860 || KMP_ARCH_X86_641 |
737 | affinity_top_method_apicid, |
738 | affinity_top_method_x2apicid, |
739 | #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ |
740 | affinity_top_method_cpuinfo, // KMP_CPUINFO_FILE is usable on Windows* OS, too |
741 | #if KMP_GROUP_AFFINITY0 |
742 | affinity_top_method_group, |
743 | #endif /* KMP_GROUP_AFFINITY */ |
744 | affinity_top_method_flat, |
745 | #if KMP_USE_HWLOC0 |
746 | affinity_top_method_hwloc, |
747 | #endif |
748 | affinity_top_method_default |
749 | }; |
750 | |
751 | #define affinity_respect_mask_default(-1) (-1) |
752 | |
753 | extern enum affinity_type __kmp_affinity_type; /* Affinity type */ |
754 | extern enum affinity_gran __kmp_affinity_gran; /* Affinity granularity */ |
755 | extern int __kmp_affinity_gran_levels; /* corresponding int value */ |
756 | extern int __kmp_affinity_dups; /* Affinity duplicate masks */ |
757 | extern enum affinity_top_method __kmp_affinity_top_method; |
758 | extern int __kmp_affinity_compact; /* Affinity 'compact' value */ |
759 | extern int __kmp_affinity_offset; /* Affinity offset value */ |
760 | extern int __kmp_affinity_verbose; /* Was verbose specified for KMP_AFFINITY? */ |
761 | extern int __kmp_affinity_warnings; /* KMP_AFFINITY warnings enabled ? */ |
762 | extern int __kmp_affinity_respect_mask; // Respect process' init affinity mask? |
763 | extern char *__kmp_affinity_proclist; /* proc ID list */ |
764 | extern kmp_affin_mask_t *__kmp_affinity_masks; |
765 | extern unsigned __kmp_affinity_num_masks; |
766 | extern void __kmp_affinity_bind_thread(int which); |
767 | |
768 | extern kmp_affin_mask_t *__kmp_affin_fullMask; |
769 | extern char *__kmp_cpuinfo_file; |
770 | |
771 | #endif /* KMP_AFFINITY_SUPPORTED */ |
772 | |
773 | #if OMP_40_ENABLED(50 >= 40) |
774 | |
775 | // This needs to be kept in sync with the values in omp.h !!! |
776 | typedef enum kmp_proc_bind_t { |
777 | proc_bind_false = 0, |
778 | proc_bind_true, |
779 | proc_bind_master, |
780 | proc_bind_close, |
781 | proc_bind_spread, |
782 | proc_bind_intel, // use KMP_AFFINITY interface |
783 | proc_bind_default |
784 | } kmp_proc_bind_t; |
785 | |
786 | typedef struct kmp_nested_proc_bind_t { |
787 | kmp_proc_bind_t *bind_types; |
788 | int size; |
789 | int used; |
790 | } kmp_nested_proc_bind_t; |
791 | |
792 | extern kmp_nested_proc_bind_t __kmp_nested_proc_bind; |
793 | |
794 | #endif /* OMP_40_ENABLED */ |
795 | |
796 | #if KMP_AFFINITY_SUPPORTED1 |
797 | #define KMP_PLACE_ALL(-1) (-1) |
798 | #define KMP_PLACE_UNDEFINED(-2) (-2) |
799 | // Is KMP_AFFINITY is being used instead of OMP_PROC_BIND/OMP_PLACES? |
800 | #define KMP_AFFINITY_NON_PROC_BIND((__kmp_nested_proc_bind.bind_types[0] == proc_bind_false || __kmp_nested_proc_bind .bind_types[0] == proc_bind_intel) && (__kmp_affinity_num_masks > 0 || __kmp_affinity_type == affinity_balanced)) \ |
801 | ((__kmp_nested_proc_bind.bind_types[0] == proc_bind_false || \ |
802 | __kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) && \ |
803 | (__kmp_affinity_num_masks > 0 || __kmp_affinity_type == affinity_balanced)) |
804 | #endif /* KMP_AFFINITY_SUPPORTED */ |
805 | |
806 | extern int __kmp_affinity_num_places; |
807 | |
808 | #if OMP_40_ENABLED(50 >= 40) |
809 | typedef enum kmp_cancel_kind_t { |
810 | cancel_noreq = 0, |
811 | cancel_parallel = 1, |
812 | cancel_loop = 2, |
813 | cancel_sections = 3, |
814 | cancel_taskgroup = 4 |
815 | } kmp_cancel_kind_t; |
816 | #endif // OMP_40_ENABLED |
817 | |
818 | // KMP_HW_SUBSET support: |
819 | typedef struct kmp_hws_item { |
820 | int num; |
821 | int offset; |
822 | } kmp_hws_item_t; |
823 | |
824 | extern kmp_hws_item_t __kmp_hws_socket; |
825 | extern kmp_hws_item_t __kmp_hws_node; |
826 | extern kmp_hws_item_t __kmp_hws_tile; |
827 | extern kmp_hws_item_t __kmp_hws_core; |
828 | extern kmp_hws_item_t __kmp_hws_proc; |
829 | extern int __kmp_hws_requested; |
830 | extern int __kmp_hws_abs_flag; // absolute or per-item number requested |
831 | |
832 | /* ------------------------------------------------------------------------ */ |
833 | |
834 | #define KMP_PAD(type, sz)(sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1)) \ |
835 | (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1)) |
836 | |
837 | // We need to avoid using -1 as a GTID as +1 is added to the gtid |
838 | // when storing it in a lock, and the value 0 is reserved. |
839 | #define KMP_GTID_DNE(-2) (-2) /* Does not exist */ |
840 | #define KMP_GTID_SHUTDOWN(-3) (-3) /* Library is shutting down */ |
841 | #define KMP_GTID_MONITOR(-4) (-4) /* Monitor thread ID */ |
842 | #define KMP_GTID_UNKNOWN(-5) (-5) /* Is not known */ |
843 | #define KMP_GTID_MIN(-6) (-6) /* Minimal gtid for low bound check in DEBUG */ |
844 | |
845 | #if OMP_50_ENABLED(50 >= 50) |
846 | /* OpenMP 5.0 Memory Management support */ |
847 | extern int __kmp_memkind_available; |
848 | extern int __kmp_hbw_mem_available; |
849 | typedef void *omp_allocator_t; |
850 | extern const omp_allocator_t *OMP_NULL_ALLOCATOR; |
851 | extern const omp_allocator_t *omp_default_mem_alloc; |
852 | extern const omp_allocator_t *omp_large_cap_mem_alloc; |
853 | extern const omp_allocator_t *omp_const_mem_alloc; |
854 | extern const omp_allocator_t *omp_high_bw_mem_alloc; |
855 | extern const omp_allocator_t *omp_low_lat_mem_alloc; |
856 | extern const omp_allocator_t *omp_cgroup_mem_alloc; |
857 | extern const omp_allocator_t *omp_pteam_mem_alloc; |
858 | extern const omp_allocator_t *omp_thread_mem_alloc; |
859 | extern const omp_allocator_t *__kmp_def_allocator; |
860 | |
861 | extern void __kmpc_set_default_allocator(int gtid, const omp_allocator_t *al); |
862 | extern const omp_allocator_t *__kmpc_get_default_allocator(int gtid); |
863 | extern void *__kmpc_alloc(int gtid, size_t sz, const omp_allocator_t *al); |
864 | extern void __kmpc_free(int gtid, void *ptr, const omp_allocator_t *al); |
865 | |
866 | extern void __kmp_init_memkind(); |
867 | extern void __kmp_fini_memkind(); |
868 | #endif // OMP_50_ENABLED |
869 | |
870 | /* ------------------------------------------------------------------------ */ |
871 | |
872 | #define KMP_UINT64_MAX(~((kmp_uint64)1 << ((sizeof(kmp_uint64) * (1 << 3 )) - 1))) \ |
873 | (~((kmp_uint64)1 << ((sizeof(kmp_uint64) * (1 << 3)) - 1))) |
874 | |
875 | #define KMP_MIN_NTH1 1 |
876 | |
877 | #ifndef KMP_MAX_NTH2147483647 |
878 | #if defined(PTHREAD_THREADS_MAX) && PTHREAD_THREADS_MAX < INT_MAX2147483647 |
879 | #define KMP_MAX_NTH2147483647 PTHREAD_THREADS_MAX |
880 | #else |
881 | #define KMP_MAX_NTH2147483647 INT_MAX2147483647 |
882 | #endif |
883 | #endif /* KMP_MAX_NTH */ |
884 | |
885 | #ifdef PTHREAD_STACK_MIN16384 |
886 | #define KMP_MIN_STKSIZE16384 PTHREAD_STACK_MIN16384 |
887 | #else |
888 | #define KMP_MIN_STKSIZE16384 ((size_t)(32 * 1024)) |
889 | #endif |
890 | |
891 | #define KMP_MAX_STKSIZE(~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1) )) (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1))) |
892 | |
893 | #if KMP_ARCH_X860 |
894 | #define KMP_DEFAULT_STKSIZE((size_t)(4 * 1024 * 1024)) ((size_t)(2 * 1024 * 1024)) |
895 | #elif KMP_ARCH_X86_641 |
896 | #define KMP_DEFAULT_STKSIZE((size_t)(4 * 1024 * 1024)) ((size_t)(4 * 1024 * 1024)) |
897 | #define KMP_BACKUP_STKSIZE((size_t)(2 * 1024 * 1024)) ((size_t)(2 * 1024 * 1024)) |
898 | #else |
899 | #define KMP_DEFAULT_STKSIZE((size_t)(4 * 1024 * 1024)) ((size_t)(1024 * 1024)) |
900 | #endif |
901 | |
902 | #define KMP_DEFAULT_MALLOC_POOL_INCR((size_t)(1024 * 1024)) ((size_t)(1024 * 1024)) |
903 | #define KMP_MIN_MALLOC_POOL_INCR((size_t)(4 * 1024)) ((size_t)(4 * 1024)) |
904 | #define KMP_MAX_MALLOC_POOL_INCR(~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1) )) \ |
905 | (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1))) |
906 | |
907 | #define KMP_MIN_STKOFFSET(0) (0) |
908 | #define KMP_MAX_STKOFFSET(~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1) )) KMP_MAX_STKSIZE(~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1) )) |
909 | #if KMP_OS_DARWIN0 |
910 | #define KMP_DEFAULT_STKOFFSET64 KMP_MIN_STKOFFSET(0) |
911 | #else |
912 | #define KMP_DEFAULT_STKOFFSET64 CACHE_LINE64 |
913 | #endif |
914 | |
915 | #define KMP_MIN_STKPADDING(0) (0) |
916 | #define KMP_MAX_STKPADDING(2 * 1024 * 1024) (2 * 1024 * 1024) |
917 | |
918 | #define KMP_BLOCKTIME_MULTIPLIER(1000) \ |
919 | (1000) /* number of blocktime units per second */ |
920 | #define KMP_MIN_BLOCKTIME(0) (0) |
921 | #define KMP_MAX_BLOCKTIME(2147483647) \ |
922 | (INT_MAX2147483647) /* Must be this for "infinite" setting the work */ |
923 | #define KMP_DEFAULT_BLOCKTIME(200) (200) /* __kmp_blocktime is in milliseconds */ |
924 | |
925 | #if KMP_USE_MONITOR |
926 | #define KMP_DEFAULT_MONITOR_STKSIZE ((size_t)(64 * 1024)) |
927 | #define KMP_MIN_MONITOR_WAKEUPS (1) // min times monitor wakes up per second |
928 | #define KMP_MAX_MONITOR_WAKEUPS (1000) // max times monitor can wake up per sec |
929 | |
930 | /* Calculate new number of monitor wakeups for a specific block time based on |
931 | previous monitor_wakeups. Only allow increasing number of wakeups */ |
932 | #define KMP_WAKEUPS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \ |
933 | (((blocktime) == KMP_MAX_BLOCKTIME(2147483647)) \ |
934 | ? (monitor_wakeups) \ |
935 | : ((blocktime) == KMP_MIN_BLOCKTIME(0)) \ |
936 | ? KMP_MAX_MONITOR_WAKEUPS \ |
937 | : ((monitor_wakeups) > (KMP_BLOCKTIME_MULTIPLIER(1000) / (blocktime))) \ |
938 | ? (monitor_wakeups) \ |
939 | : (KMP_BLOCKTIME_MULTIPLIER(1000)) / (blocktime)) |
940 | |
941 | /* Calculate number of intervals for a specific block time based on |
942 | monitor_wakeups */ |
943 | #define KMP_INTERVALS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \ |
944 | (((blocktime) + (KMP_BLOCKTIME_MULTIPLIER(1000) / (monitor_wakeups)) - 1) / \ |
945 | (KMP_BLOCKTIME_MULTIPLIER(1000) / (monitor_wakeups))) |
946 | #else |
947 | #define KMP_BLOCKTIME(team, tid)(((team)->t.t_threads[(tid)]->th.th_current_task->td_icvs .bt_set) ? ((team)->t.t_threads[(tid)]->th.th_current_task ->td_icvs.blocktime) : __kmp_dflt_blocktime) \ |
948 | (get__bt_set(team, tid)((team)->t.t_threads[(tid)]->th.th_current_task->td_icvs .bt_set) ? get__blocktime(team, tid)((team)->t.t_threads[(tid)]->th.th_current_task->td_icvs .blocktime) : __kmp_dflt_blocktime) |
949 | #if KMP_OS_UNIX1 && (KMP_ARCH_X860 || KMP_ARCH_X86_641) |
950 | // HW TSC is used to reduce overhead (clock tick instead of nanosecond). |
951 | extern kmp_uint64 __kmp_ticks_per_msec; |
952 | #if KMP_COMPILER_ICC0 |
953 | #define KMP_NOW()__kmp_hardware_timestamp() ((kmp_uint64)_rdtsc()) |
954 | #else |
955 | #define KMP_NOW()__kmp_hardware_timestamp() __kmp_hardware_timestamp() |
956 | #endif |
957 | #define KMP_NOW_MSEC()(__kmp_hardware_timestamp() / __kmp_ticks_per_msec) (KMP_NOW()__kmp_hardware_timestamp() / __kmp_ticks_per_msec) |
958 | #define KMP_BLOCKTIME_INTERVAL(team, tid)((((team)->t.t_threads[(tid)]->th.th_current_task->td_icvs .bt_set) ? ((team)->t.t_threads[(tid)]->th.th_current_task ->td_icvs.blocktime) : __kmp_dflt_blocktime) * __kmp_ticks_per_msec ) \ |
959 | (KMP_BLOCKTIME(team, tid)(((team)->t.t_threads[(tid)]->th.th_current_task->td_icvs .bt_set) ? ((team)->t.t_threads[(tid)]->th.th_current_task ->td_icvs.blocktime) : __kmp_dflt_blocktime) * __kmp_ticks_per_msec) |
960 | #define KMP_BLOCKING(goal, count)((goal) > __kmp_hardware_timestamp()) ((goal) > KMP_NOW()__kmp_hardware_timestamp()) |
961 | #else |
962 | // System time is retrieved sporadically while blocking. |
963 | extern kmp_uint64 __kmp_now_nsec(); |
964 | #define KMP_NOW()__kmp_hardware_timestamp() __kmp_now_nsec() |
965 | #define KMP_NOW_MSEC()(__kmp_hardware_timestamp() / __kmp_ticks_per_msec) (KMP_NOW()__kmp_hardware_timestamp() / KMP_USEC_PER_SEC1000000L) |
966 | #define KMP_BLOCKTIME_INTERVAL(team, tid)((((team)->t.t_threads[(tid)]->th.th_current_task->td_icvs .bt_set) ? ((team)->t.t_threads[(tid)]->th.th_current_task ->td_icvs.blocktime) : __kmp_dflt_blocktime) * __kmp_ticks_per_msec ) \ |
967 | (KMP_BLOCKTIME(team, tid)(((team)->t.t_threads[(tid)]->th.th_current_task->td_icvs .bt_set) ? ((team)->t.t_threads[(tid)]->th.th_current_task ->td_icvs.blocktime) : __kmp_dflt_blocktime) * KMP_USEC_PER_SEC1000000L) |
968 | #define KMP_BLOCKING(goal, count)((goal) > __kmp_hardware_timestamp()) ((count) % 1000 != 0 || (goal) > KMP_NOW()__kmp_hardware_timestamp()) |
969 | #endif |
970 | #define KMP_YIELD_NOW()((__kmp_hardware_timestamp() / __kmp_ticks_per_msec) / ((__kmp_dflt_blocktime ) > (1) ? (__kmp_dflt_blocktime) : (1)) % (__kmp_yield_on_count + __kmp_yield_off_count) < (kmp_uint32)__kmp_yield_on_count ) \ |
971 | (KMP_NOW_MSEC()(__kmp_hardware_timestamp() / __kmp_ticks_per_msec) / KMP_MAX(__kmp_dflt_blocktime, 1)((__kmp_dflt_blocktime) > (1) ? (__kmp_dflt_blocktime) : ( 1)) % \ |
972 | (__kmp_yield_on_count + __kmp_yield_off_count) < \ |
973 | (kmp_uint32)__kmp_yield_on_count) |
974 | #endif // KMP_USE_MONITOR |
975 | |
976 | #define KMP_MIN_STATSCOLS40 40 |
977 | #define KMP_MAX_STATSCOLS4096 4096 |
978 | #define KMP_DEFAULT_STATSCOLS80 80 |
979 | |
980 | #define KMP_MIN_INTERVAL0 0 |
981 | #define KMP_MAX_INTERVAL(2147483647 - 1) (INT_MAX2147483647 - 1) |
982 | #define KMP_DEFAULT_INTERVAL0 0 |
983 | |
984 | #define KMP_MIN_CHUNK1 1 |
985 | #define KMP_MAX_CHUNK(2147483647 - 1) (INT_MAX2147483647 - 1) |
986 | #define KMP_DEFAULT_CHUNK1 1 |
987 | |
988 | #define KMP_MIN_INIT_WAIT1 1 |
989 | #define KMP_MAX_INIT_WAIT(2147483647 / 2) (INT_MAX2147483647 / 2) |
990 | #define KMP_DEFAULT_INIT_WAIT2048U 2048U |
991 | |
992 | #define KMP_MIN_NEXT_WAIT1 1 |
993 | #define KMP_MAX_NEXT_WAIT(2147483647 / 2) (INT_MAX2147483647 / 2) |
994 | #define KMP_DEFAULT_NEXT_WAIT1024U 1024U |
995 | |
996 | #define KMP_DFLT_DISP_NUM_BUFF7 7 |
997 | #define KMP_MAX_ORDERED8 8 |
998 | |
999 | #define KMP_MAX_FIELDS32 32 |
1000 | |
1001 | #define KMP_MAX_BRANCH_BITS31 31 |
1002 | |
1003 | #define KMP_MAX_ACTIVE_LEVELS_LIMIT2147483647 INT_MAX2147483647 |
1004 | |
1005 | #define KMP_MAX_DEFAULT_DEVICE_LIMIT2147483647 INT_MAX2147483647 |
1006 | |
1007 | #define KMP_MAX_TASK_PRIORITY_LIMIT2147483647 INT_MAX2147483647 |
1008 | |
1009 | /* Minimum number of threads before switch to TLS gtid (experimentally |
1010 | determined) */ |
1011 | /* josh TODO: what about OS X* tuning? */ |
1012 | #if KMP_ARCH_X860 || KMP_ARCH_X86_641 |
1013 | #define KMP_TLS_GTID_MIN5 5 |
1014 | #else |
1015 | #define KMP_TLS_GTID_MIN5 INT_MAX2147483647 |
1016 | #endif |
1017 | |
1018 | #define KMP_MASTER_TID(tid)((tid) == 0) ((tid) == 0) |
1019 | #define KMP_WORKER_TID(tid)((tid) != 0) ((tid) != 0) |
1020 | |
1021 | #define KMP_MASTER_GTID(gtid)(__kmp_tid_from_gtid((gtid)) == 0) (__kmp_tid_from_gtid((gtid)) == 0) |
1022 | #define KMP_WORKER_GTID(gtid)(__kmp_tid_from_gtid((gtid)) != 0) (__kmp_tid_from_gtid((gtid)) != 0) |
1023 | #define KMP_INITIAL_GTID(gtid)((gtid) == 0) ((gtid) == 0) |
1024 | |
1025 | #ifndef TRUE(!0) |
1026 | #define FALSE0 0 |
1027 | #define TRUE(!0) (!FALSE0) |
1028 | #endif |
1029 | |
1030 | /* NOTE: all of the following constants must be even */ |
1031 | |
1032 | #if KMP_OS_WINDOWS0 |
1033 | #define KMP_INIT_WAIT1024U 64U /* initial number of spin-tests */ |
1034 | #define KMP_NEXT_WAIT512U 32U /* susequent number of spin-tests */ |
1035 | #elif KMP_OS_CNK0 |
1036 | #define KMP_INIT_WAIT1024U 16U /* initial number of spin-tests */ |
1037 | #define KMP_NEXT_WAIT512U 8U /* susequent number of spin-tests */ |
1038 | #elif KMP_OS_LINUX1 |
1039 | #define KMP_INIT_WAIT1024U 1024U /* initial number of spin-tests */ |
1040 | #define KMP_NEXT_WAIT512U 512U /* susequent number of spin-tests */ |
1041 | #elif KMP_OS_DARWIN0 |
1042 | /* TODO: tune for KMP_OS_DARWIN */ |
1043 | #define KMP_INIT_WAIT1024U 1024U /* initial number of spin-tests */ |
1044 | #define KMP_NEXT_WAIT512U 512U /* susequent number of spin-tests */ |
1045 | #elif KMP_OS_FREEBSD0 |
1046 | /* TODO: tune for KMP_OS_FREEBSD */ |
1047 | #define KMP_INIT_WAIT1024U 1024U /* initial number of spin-tests */ |
1048 | #define KMP_NEXT_WAIT512U 512U /* susequent number of spin-tests */ |
1049 | #elif KMP_OS_NETBSD0 |
1050 | /* TODO: tune for KMP_OS_NETBSD */ |
1051 | #define KMP_INIT_WAIT1024U 1024U /* initial number of spin-tests */ |
1052 | #define KMP_NEXT_WAIT512U 512U /* susequent number of spin-tests */ |
1053 | #endif |
1054 | |
1055 | #if KMP_ARCH_X860 || KMP_ARCH_X86_641 |
1056 | typedef struct kmp_cpuid { |
1057 | kmp_uint32 eax; |
1058 | kmp_uint32 ebx; |
1059 | kmp_uint32 ecx; |
1060 | kmp_uint32 edx; |
1061 | } kmp_cpuid_t; |
1062 | extern void __kmp_x86_cpuid(int mode, int mode2, struct kmp_cpuid *p); |
1063 | #if KMP_ARCH_X860 |
1064 | extern void __kmp_x86_pause(void); |
1065 | #elif KMP_MIC0 |
1066 | // Performance testing on KNC (C0QS-7120 P/A/X/D, 61-core, 16 GB Memory) showed |
1067 | // regression after removal of extra PAUSE from KMP_YIELD_SPIN(). Changing |
1068 | // the delay from 100 to 300 showed even better performance than double PAUSE |
1069 | // on Spec OMP2001 and LCPC tasking tests, no regressions on EPCC. |
1070 | static inline void __kmp_x86_pause(void) { _mm_delay_32(300); } |
1071 | #else |
1072 | static inline void __kmp_x86_pause(void) { _mm_pause(); } |
1073 | #endif |
1074 | #define KMP_CPU_PAUSE()__kmp_x86_pause() __kmp_x86_pause() |
1075 | #elif KMP_ARCH_PPC64(0 || 0) |
1076 | #define KMP_PPC64_PRI_LOW() __asm__ volatile("or 1, 1, 1") |
1077 | #define KMP_PPC64_PRI_MED() __asm__ volatile("or 2, 2, 2") |
1078 | #define KMP_PPC64_PRI_LOC_MB() __asm__ volatile("" : : : "memory") |
1079 | #define KMP_CPU_PAUSE()__kmp_x86_pause() \ |
1080 | do { \ |
1081 | KMP_PPC64_PRI_LOW(); \ |
1082 | KMP_PPC64_PRI_MED(); \ |
1083 | KMP_PPC64_PRI_LOC_MB(); \ |
1084 | } while (0) |
1085 | #else |
1086 | #define KMP_CPU_PAUSE()__kmp_x86_pause() /* nothing to do */ |
1087 | #endif |
1088 | |
1089 | #define KMP_INIT_YIELD(count){ (count) = __kmp_yield_init; } \ |
1090 | { (count) = __kmp_yield_init; } |
1091 | |
1092 | #define KMP_YIELD(cond){ __kmp_x86_pause(); __kmp_yield((cond)); } \ |
1093 | { \ |
1094 | KMP_CPU_PAUSE()__kmp_x86_pause(); \ |
1095 | __kmp_yield((cond)); \ |
1096 | } |
1097 | |
1098 | // Note the decrement of 2 in the following Macros. With KMP_LIBRARY=turnaround, |
1099 | // there should be no yielding since initial value from KMP_INIT_YIELD() is odd. |
1100 | |
1101 | #define KMP_YIELD_WHEN(cond, count){ __kmp_x86_pause(); (count) -= 2; if (!(count)) { __kmp_yield (cond); (count) = __kmp_yield_next; } } \ |
1102 | { \ |
1103 | KMP_CPU_PAUSE()__kmp_x86_pause(); \ |
1104 | (count) -= 2; \ |
1105 | if (!(count)) { \ |
1106 | __kmp_yield(cond); \ |
1107 | (count) = __kmp_yield_next; \ |
1108 | } \ |
1109 | } |
1110 | #define KMP_YIELD_SPIN(count){ __kmp_x86_pause(); (count) -= 2; if (!(count)) { __kmp_yield (1); (count) = __kmp_yield_next; } } \ |
1111 | { \ |
1112 | KMP_CPU_PAUSE()__kmp_x86_pause(); \ |
1113 | (count) -= 2; \ |
1114 | if (!(count)) { \ |
1115 | __kmp_yield(1); \ |
1116 | (count) = __kmp_yield_next; \ |
1117 | } \ |
1118 | } |
1119 | |
1120 | /* ------------------------------------------------------------------------ */ |
1121 | /* Support datatypes for the orphaned construct nesting checks. */ |
1122 | /* ------------------------------------------------------------------------ */ |
1123 | |
1124 | enum cons_type { |
1125 | ct_none, |
1126 | ct_parallel, |
1127 | ct_pdo, |
1128 | ct_pdo_ordered, |
1129 | ct_psections, |
1130 | ct_psingle, |
1131 | |
1132 | /* the following must be left in order and not split up */ |
1133 | ct_taskq, |
1134 | ct_task, // really task inside non-ordered taskq, considered worksharing type |
1135 | ct_task_ordered, /* really task inside ordered taskq, considered a worksharing |
1136 | type */ |
1137 | /* the preceding must be left in order and not split up */ |
1138 | |
1139 | ct_critical, |
1140 | ct_ordered_in_parallel, |
1141 | ct_ordered_in_pdo, |
1142 | ct_ordered_in_taskq, |
1143 | ct_master, |
1144 | ct_reduce, |
1145 | ct_barrier |
1146 | }; |
1147 | |
1148 | /* test to see if we are in a taskq construct */ |
1149 | #define IS_CONS_TYPE_TASKQ(ct)(((int)(ct)) >= ((int)ct_taskq) && ((int)(ct)) <= ((int)ct_task_ordered)) \ |
1150 | (((int)(ct)) >= ((int)ct_taskq) && ((int)(ct)) <= ((int)ct_task_ordered)) |
1151 | #define IS_CONS_TYPE_ORDERED(ct)((ct) == ct_pdo_ordered || (ct) == ct_task_ordered) \ |
1152 | ((ct) == ct_pdo_ordered || (ct) == ct_task_ordered) |
1153 | |
1154 | struct cons_data { |
1155 | ident_t const *ident; |
1156 | enum cons_type type; |
1157 | int prev; |
1158 | kmp_user_lock_p |
1159 | name; /* address exclusively for critical section name comparison */ |
1160 | }; |
1161 | |
1162 | struct cons_header { |
1163 | int p_top, w_top, s_top; |
1164 | int stack_size, stack_top; |
1165 | struct cons_data *stack_data; |
1166 | }; |
1167 | |
1168 | struct kmp_region_info { |
1169 | char *text; |
1170 | int offset[KMP_MAX_FIELDS32]; |
1171 | int length[KMP_MAX_FIELDS32]; |
1172 | }; |
1173 | |
1174 | /* ---------------------------------------------------------------------- */ |
1175 | /* ---------------------------------------------------------------------- */ |
1176 | |
1177 | #if KMP_OS_WINDOWS0 |
1178 | typedef HANDLE kmp_thread_t; |
1179 | typedef DWORD kmp_key_t; |
1180 | #endif /* KMP_OS_WINDOWS */ |
1181 | |
1182 | #if KMP_OS_UNIX1 |
1183 | typedef pthread_t kmp_thread_t; |
1184 | typedef pthread_key_t kmp_key_t; |
1185 | #endif |
1186 | |
1187 | extern kmp_key_t __kmp_gtid_threadprivate_key; |
1188 | |
1189 | typedef struct kmp_sys_info { |
1190 | long maxrss; /* the maximum resident set size utilized (in kilobytes) */ |
1191 | long minflt; /* the number of page faults serviced without any I/O */ |
1192 | long majflt; /* the number of page faults serviced that required I/O */ |
1193 | long nswap; /* the number of times a process was "swapped" out of memory */ |
1194 | long inblock; /* the number of times the file system had to perform input */ |
1195 | long oublock; /* the number of times the file system had to perform output */ |
1196 | long nvcsw; /* the number of times a context switch was voluntarily */ |
1197 | long nivcsw; /* the number of times a context switch was forced */ |
1198 | } kmp_sys_info_t; |
1199 | |
1200 | #if KMP_ARCH_X860 || KMP_ARCH_X86_641 |
1201 | typedef struct kmp_cpuinfo { |
1202 | int initialized; // If 0, other fields are not initialized. |
1203 | int signature; // CPUID(1).EAX |
1204 | int family; // CPUID(1).EAX[27:20]+CPUID(1).EAX[11:8] (Extended Family+Family) |
1205 | int model; // ( CPUID(1).EAX[19:16] << 4 ) + CPUID(1).EAX[7:4] ( ( Extended |
1206 | // Model << 4 ) + Model) |
1207 | int stepping; // CPUID(1).EAX[3:0] ( Stepping ) |
1208 | int sse2; // 0 if SSE2 instructions are not supported, 1 otherwise. |
1209 | int rtm; // 0 if RTM instructions are not supported, 1 otherwise. |
1210 | int cpu_stackoffset; |
1211 | int apic_id; |
1212 | int physical_id; |
1213 | int logical_id; |
1214 | kmp_uint64 frequency; // Nominal CPU frequency in Hz. |
1215 | char name[3 * sizeof(kmp_cpuid_t)]; // CPUID(0x80000002,0x80000003,0x80000004) |
1216 | } kmp_cpuinfo_t; |
1217 | #endif |
1218 | |
1219 | #if USE_ITT_BUILD1 |
1220 | // We cannot include "kmp_itt.h" due to circular dependency. Declare the only |
1221 | // required type here. Later we will check the type meets requirements. |
1222 | typedef int kmp_itt_mark_t; |
1223 | #define KMP_ITT_DEBUG0 0 |
1224 | #endif /* USE_ITT_BUILD */ |
1225 | |
1226 | /* Taskq data structures */ |
1227 | |
1228 | #define HIGH_WATER_MARK(nslots)(((nslots)*3) / 4) (((nslots)*3) / 4) |
1229 | // num thunks that each thread can simultaneously execute from a task queue |
1230 | #define __KMP_TASKQ_THUNKS_PER_TH1 1 |
1231 | |
1232 | /* flags for taskq_global_flags, kmp_task_queue_t tq_flags, kmpc_thunk_t |
1233 | th_flags */ |
1234 | |
1235 | #define TQF_IS_ORDERED0x0001 0x0001 // __kmpc_taskq interface, taskq ordered |
1236 | // __kmpc_taskq interface, taskq with lastprivate list |
1237 | #define TQF_IS_LASTPRIVATE0x0002 0x0002 |
1238 | #define TQF_IS_NOWAIT0x0004 0x0004 // __kmpc_taskq interface, end taskq nowait |
1239 | // __kmpc_taskq interface, use heuristics to decide task queue size |
1240 | #define TQF_HEURISTICS0x0008 0x0008 |
1241 | |
1242 | // __kmpc_taskq interface, reserved for future use |
1243 | #define TQF_INTERFACE_RESERVED10x0010 0x0010 |
1244 | // __kmpc_taskq interface, reserved for future use |
1245 | #define TQF_INTERFACE_RESERVED20x0020 0x0020 |
1246 | // __kmpc_taskq interface, reserved for future use |
1247 | #define TQF_INTERFACE_RESERVED30x0040 0x0040 |
1248 | // __kmpc_taskq interface, reserved for future use |
1249 | #define TQF_INTERFACE_RESERVED40x0080 0x0080 |
1250 | |
1251 | #define TQF_INTERFACE_FLAGS0x00ff 0x00ff // all the __kmpc_taskq interface flags |
1252 | // internal/read by instrumentation; only used with TQF_IS_LASTPRIVATE |
1253 | #define TQF_IS_LAST_TASK0x0100 0x0100 |
1254 | // internal use only; this thunk->th_task is the taskq_task |
1255 | #define TQF_TASKQ_TASK0x0200 0x0200 |
1256 | // internal use only; must release worker threads once ANY queued task |
1257 | // exists (global) |
1258 | #define TQF_RELEASE_WORKERS0x0400 0x0400 |
1259 | // internal use only; notify workers that master has finished enqueuing tasks |
1260 | #define TQF_ALL_TASKS_QUEUED0x0800 0x0800 |
1261 | // internal use only: this queue encountered in parallel context: not serialized |
1262 | #define TQF_PARALLEL_CONTEXT0x1000 0x1000 |
1263 | // internal use only; this queue is on the freelist and not in use |
1264 | #define TQF_DEALLOCATED0x2000 0x2000 |
1265 | |
1266 | #define TQF_INTERNAL_FLAGS0x3f00 0x3f00 // all the internal use only flags |
1267 | |
1268 | typedef struct KMP_ALIGN_CACHE__attribute__((aligned(64))) kmpc_aligned_int32_t { |
1269 | kmp_int32 ai_data; |
1270 | } kmpc_aligned_int32_t; |
1271 | |
1272 | typedef struct KMP_ALIGN_CACHE__attribute__((aligned(64))) kmpc_aligned_queue_slot_t { |
1273 | struct kmpc_thunk_t *qs_thunk; |
1274 | } kmpc_aligned_queue_slot_t; |
1275 | |
1276 | typedef struct kmpc_task_queue_t { |
1277 | /* task queue linkage fields for n-ary tree of queues (locked with global |
1278 | taskq_tree_lck) */ |
1279 | kmp_lock_t tq_link_lck; /* lock for child link, child next/prev links and |
1280 | child ref counts */ |
1281 | union { |
1282 | struct kmpc_task_queue_t *tq_parent; // pointer to parent taskq, not locked |
1283 | // for taskq internal freelists, locked with global taskq_freelist_lck |
1284 | struct kmpc_task_queue_t *tq_next_free; |
1285 | } tq; |
1286 | // pointer to linked-list of children, locked by tq's tq_link_lck |
1287 | volatile struct kmpc_task_queue_t *tq_first_child; |
1288 | // next child in linked-list, locked by parent tq's tq_link_lck |
1289 | struct kmpc_task_queue_t *tq_next_child; |
1290 | // previous child in linked-list, locked by parent tq's tq_link_lck |
1291 | struct kmpc_task_queue_t *tq_prev_child; |
1292 | // reference count of threads with access to this task queue |
1293 | volatile kmp_int32 tq_ref_count; |
1294 | /* (other than the thread executing the kmpc_end_taskq call) */ |
1295 | /* locked by parent tq's tq_link_lck */ |
1296 | |
1297 | /* shared data for task queue */ |
1298 | /* per-thread array of pointers to shared variable structures */ |
1299 | struct kmpc_aligned_shared_vars_t *tq_shareds; |
1300 | /* only one array element exists for all but outermost taskq */ |
1301 | |
1302 | /* bookkeeping for ordered task queue */ |
1303 | kmp_uint32 tq_tasknum_queuing; // ordered task # assigned while queuing tasks |
1304 | // ordered number of next task to be served (executed) |
1305 | volatile kmp_uint32 tq_tasknum_serving; |
1306 | |
1307 | /* thunk storage management for task queue */ |
1308 | kmp_lock_t tq_free_thunks_lck; /* lock for thunk freelist manipulation */ |
1309 | // thunk freelist, chained via th.th_next_free |
1310 | struct kmpc_thunk_t *tq_free_thunks; |
1311 | // space allocated for thunks for this task queue |
1312 | struct kmpc_thunk_t *tq_thunk_space; |
1313 | |
1314 | /* data fields for queue itself */ |
1315 | kmp_lock_t tq_queue_lck; /* lock for [de]enqueue operations: tq_queue, |
1316 | tq_head, tq_tail, tq_nfull */ |
1317 | /* array of queue slots to hold thunks for tasks */ |
1318 | kmpc_aligned_queue_slot_t *tq_queue; |
1319 | volatile struct kmpc_thunk_t *tq_taskq_slot; /* special slot for taskq task |
1320 | thunk, occupied if not NULL */ |
1321 | kmp_int32 tq_nslots; /* # of tq_thunk_space thunks alloc'd (not incl. |
1322 | tq_taskq_slot space) */ |
1323 | kmp_int32 tq_head; // enqueue puts item here (index into tq_queue array) |
1324 | kmp_int32 tq_tail; // dequeue takes item from here (index into tq_queue array) |
1325 | volatile kmp_int32 tq_nfull; // # of occupied entries in task queue right now |
1326 | kmp_int32 tq_hiwat; /* high-water mark for tq_nfull and queue scheduling */ |
1327 | volatile kmp_int32 tq_flags; /* TQF_xxx */ |
1328 | |
1329 | /* bookkeeping for outstanding thunks */ |
1330 | |
1331 | /* per-thread array for # of regular thunks currently being executed */ |
1332 | struct kmpc_aligned_int32_t *tq_th_thunks; |
1333 | kmp_int32 tq_nproc; /* number of thunks in the th_thunks array */ |
1334 | |
1335 | /* statistics library bookkeeping */ |
1336 | ident_t *tq_loc; /* source location information for taskq directive */ |
1337 | } kmpc_task_queue_t; |
1338 | |
1339 | typedef void (*kmpc_task_t)(kmp_int32 global_tid, struct kmpc_thunk_t *thunk); |
1340 | |
1341 | /* sizeof_shareds passed as arg to __kmpc_taskq call */ |
1342 | typedef struct kmpc_shared_vars_t { /* aligned during dynamic allocation */ |
1343 | kmpc_task_queue_t *sv_queue; /* (pointers to) shared vars */ |
1344 | } kmpc_shared_vars_t; |
1345 | |
1346 | typedef struct KMP_ALIGN_CACHE__attribute__((aligned(64))) kmpc_aligned_shared_vars_t { |
1347 | volatile struct kmpc_shared_vars_t *ai_data; |
1348 | } kmpc_aligned_shared_vars_t; |
1349 | |
1350 | /* sizeof_thunk passed as arg to kmpc_taskq call */ |
1351 | typedef struct kmpc_thunk_t { /* aligned during dynamic allocation */ |
1352 | union { /* field used for internal freelists too */ |
1353 | kmpc_shared_vars_t *th_shareds; |
1354 | struct kmpc_thunk_t *th_next_free; /* freelist of individual thunks within |
1355 | queue, head at tq_free_thunks */ |
1356 | } th; |
1357 | kmpc_task_t th_task; /* taskq_task if flags & TQF_TASKQ_TASK */ |
1358 | struct kmpc_thunk_t *th_encl_thunk; /* pointer to dynamically enclosing thunk |
1359 | on this thread's call stack */ |
1360 | // TQF_xxx(tq_flags interface plus possible internal flags) |
1361 | kmp_int32 th_flags; |
1362 | |
1363 | kmp_int32 th_status; |
1364 | kmp_uint32 th_tasknum; /* task number assigned in order of queuing, used for |
1365 | ordered sections */ |
1366 | /* private vars */ |
1367 | } kmpc_thunk_t; |
1368 | |
1369 | typedef struct KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_taskq { |
1370 | int tq_curr_thunk_capacity; |
1371 | |
1372 | kmpc_task_queue_t *tq_root; |
1373 | kmp_int32 tq_global_flags; |
1374 | |
1375 | kmp_lock_t tq_freelist_lck; |
1376 | kmpc_task_queue_t *tq_freelist; |
1377 | |
1378 | kmpc_thunk_t **tq_curr_thunk; |
1379 | } kmp_taskq_t; |
1380 | |
1381 | /* END Taskq data structures */ |
1382 | |
1383 | typedef kmp_int32 kmp_critical_name[8]; |
1384 | |
1385 | /*! |
1386 | @ingroup PARALLEL |
1387 | The type for a microtask which gets passed to @ref __kmpc_fork_call(). |
1388 | The arguments to the outlined function are |
1389 | @param global_tid the global thread identity of the thread executing the |
1390 | function. |
1391 | @param bound_tid the local identitiy of the thread executing the function |
1392 | @param ... pointers to shared variables accessed by the function. |
1393 | */ |
1394 | typedef void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid, ...); |
1395 | typedef void (*kmpc_micro_bound)(kmp_int32 *bound_tid, kmp_int32 *bound_nth, |
1396 | ...); |
1397 | |
1398 | /*! |
1399 | @ingroup THREADPRIVATE |
1400 | @{ |
1401 | */ |
1402 | /* --------------------------------------------------------------------------- |
1403 | */ |
1404 | /* Threadprivate initialization/finalization function declarations */ |
1405 | |
1406 | /* for non-array objects: __kmpc_threadprivate_register() */ |
1407 | |
1408 | /*! |
1409 | Pointer to the constructor function. |
1410 | The first argument is the <tt>this</tt> pointer |
1411 | */ |
1412 | typedef void *(*kmpc_ctor)(void *); |
1413 | |
1414 | /*! |
1415 | Pointer to the destructor function. |
1416 | The first argument is the <tt>this</tt> pointer |
1417 | */ |
1418 | typedef void (*kmpc_dtor)( |
1419 | void * /*, size_t */); /* 2nd arg: magic number for KCC unused by Intel |
1420 | compiler */ |
1421 | /*! |
1422 | Pointer to an alternate constructor. |
1423 | The first argument is the <tt>this</tt> pointer. |
1424 | */ |
1425 | typedef void *(*kmpc_cctor)(void *, void *); |
1426 | |
1427 | /* for array objects: __kmpc_threadprivate_register_vec() */ |
1428 | /* First arg: "this" pointer */ |
1429 | /* Last arg: number of array elements */ |
1430 | /*! |
1431 | Array constructor. |
1432 | First argument is the <tt>this</tt> pointer |
1433 | Second argument the number of array elements. |
1434 | */ |
1435 | typedef void *(*kmpc_ctor_vec)(void *, size_t); |
1436 | /*! |
1437 | Pointer to the array destructor function. |
1438 | The first argument is the <tt>this</tt> pointer |
1439 | Second argument the number of array elements. |
1440 | */ |
1441 | typedef void (*kmpc_dtor_vec)(void *, size_t); |
1442 | /*! |
1443 | Array constructor. |
1444 | First argument is the <tt>this</tt> pointer |
1445 | Third argument the number of array elements. |
1446 | */ |
1447 | typedef void *(*kmpc_cctor_vec)(void *, void *, |
1448 | size_t); /* function unused by compiler */ |
1449 | |
1450 | /*! |
1451 | @} |
1452 | */ |
1453 | |
1454 | /* keeps tracked of threadprivate cache allocations for cleanup later */ |
1455 | typedef struct kmp_cached_addr { |
1456 | void **addr; /* address of allocated cache */ |
1457 | void ***compiler_cache; /* pointer to compiler's cache */ |
1458 | void *data; /* pointer to global data */ |
1459 | struct kmp_cached_addr *next; /* pointer to next cached address */ |
1460 | } kmp_cached_addr_t; |
1461 | |
1462 | struct private_data { |
1463 | struct private_data *next; /* The next descriptor in the list */ |
1464 | void *data; /* The data buffer for this descriptor */ |
1465 | int more; /* The repeat count for this descriptor */ |
1466 | size_t size; /* The data size for this descriptor */ |
1467 | }; |
1468 | |
1469 | struct private_common { |
1470 | struct private_common *next; |
1471 | struct private_common *link; |
1472 | void *gbl_addr; |
1473 | void *par_addr; /* par_addr == gbl_addr for MASTER thread */ |
1474 | size_t cmn_size; |
1475 | }; |
1476 | |
1477 | struct shared_common { |
1478 | struct shared_common *next; |
1479 | struct private_data *pod_init; |
1480 | void *obj_init; |
1481 | void *gbl_addr; |
1482 | union { |
1483 | kmpc_ctor ctor; |
1484 | kmpc_ctor_vec ctorv; |
1485 | } ct; |
1486 | union { |
1487 | kmpc_cctor cctor; |
1488 | kmpc_cctor_vec cctorv; |
1489 | } cct; |
1490 | union { |
1491 | kmpc_dtor dtor; |
1492 | kmpc_dtor_vec dtorv; |
1493 | } dt; |
1494 | size_t vec_len; |
1495 | int is_vec; |
1496 | size_t cmn_size; |
1497 | }; |
1498 | |
1499 | #define KMP_HASH_TABLE_LOG29 9 /* log2 of the hash table size */ |
1500 | #define KMP_HASH_TABLE_SIZE(1 << 9) \ |
1501 | (1 << KMP_HASH_TABLE_LOG29) /* size of the hash table */ |
1502 | #define KMP_HASH_SHIFT3 3 /* throw away this many low bits from the address */ |
1503 | #define KMP_HASH(x)((((kmp_uintptr_t)x) >> 3) & ((1 << 9) - 1)) \ |
1504 | ((((kmp_uintptr_t)x) >> KMP_HASH_SHIFT3) & (KMP_HASH_TABLE_SIZE(1 << 9) - 1)) |
1505 | |
1506 | struct common_table { |
1507 | struct private_common *data[KMP_HASH_TABLE_SIZE(1 << 9)]; |
1508 | }; |
1509 | |
1510 | struct shared_table { |
1511 | struct shared_common *data[KMP_HASH_TABLE_SIZE(1 << 9)]; |
1512 | }; |
1513 | |
1514 | /* ------------------------------------------------------------------------ */ |
1515 | |
1516 | #if KMP_USE_HIER_SCHED0 |
1517 | // Shared barrier data that exists inside a single unit of the scheduling |
1518 | // hierarchy |
1519 | typedef struct kmp_hier_private_bdata_t { |
1520 | kmp_int32 num_active; |
1521 | kmp_uint64 index; |
1522 | kmp_uint64 wait_val[2]; |
1523 | } kmp_hier_private_bdata_t; |
1524 | #endif |
1525 | |
1526 | typedef struct kmp_sched_flags { |
1527 | unsigned ordered : 1; |
1528 | unsigned nomerge : 1; |
1529 | unsigned contains_last : 1; |
1530 | #if KMP_USE_HIER_SCHED0 |
1531 | unsigned use_hier : 1; |
1532 | unsigned unused : 28; |
1533 | #else |
1534 | unsigned unused : 29; |
1535 | #endif |
1536 | } kmp_sched_flags_t; |
1537 | |
1538 | KMP_BUILD_ASSERT(sizeof(kmp_sched_flags_t) == 4)static_assert(sizeof(kmp_sched_flags_t) == 4, "Build condition error" ); |
1539 | |
1540 | #if KMP_STATIC_STEAL_ENABLED1 |
1541 | typedef struct KMP_ALIGN_CACHE__attribute__((aligned(64))) dispatch_private_info32 { |
1542 | kmp_int32 count; |
1543 | kmp_int32 ub; |
1544 | /* Adding KMP_ALIGN_CACHE here doesn't help / can hurt performance */ |
1545 | kmp_int32 lb; |
1546 | kmp_int32 st; |
1547 | kmp_int32 tc; |
1548 | kmp_int32 static_steal_counter; /* for static_steal only; maybe better to put |
1549 | after ub */ |
1550 | |
1551 | // KMP_ALIGN( 16 ) ensures ( if the KMP_ALIGN macro is turned on ) |
1552 | // a) parm3 is properly aligned and |
1553 | // b) all parm1-4 are in the same cache line. |
1554 | // Because of parm1-4 are used together, performance seems to be better |
1555 | // if they are in the same line (not measured though). |
1556 | |
1557 | struct KMP_ALIGN(32)__attribute__((aligned(32))) { // AC: changed 16 to 32 in order to simplify template |
1558 | kmp_int32 parm1; // structures in kmp_dispatch.cpp. This should |
1559 | kmp_int32 parm2; // make no real change at least while padding is off. |
1560 | kmp_int32 parm3; |
1561 | kmp_int32 parm4; |
1562 | }; |
1563 | |
1564 | kmp_uint32 ordered_lower; |
1565 | kmp_uint32 ordered_upper; |
1566 | #if KMP_OS_WINDOWS0 |
1567 | // This var can be placed in the hole between 'tc' and 'parm1', instead of |
1568 | // 'static_steal_counter'. It would be nice to measure execution times. |
1569 | // Conditional if/endif can be removed at all. |
1570 | kmp_int32 last_upper; |
1571 | #endif /* KMP_OS_WINDOWS */ |
1572 | } dispatch_private_info32_t; |
1573 | |
1574 | typedef struct KMP_ALIGN_CACHE__attribute__((aligned(64))) dispatch_private_info64 { |
1575 | kmp_int64 count; // current chunk number for static & static-steal scheduling |
1576 | kmp_int64 ub; /* upper-bound */ |
1577 | /* Adding KMP_ALIGN_CACHE here doesn't help / can hurt performance */ |
1578 | kmp_int64 lb; /* lower-bound */ |
1579 | kmp_int64 st; /* stride */ |
1580 | kmp_int64 tc; /* trip count (number of iterations) */ |
1581 | kmp_int64 static_steal_counter; /* for static_steal only; maybe better to put |
1582 | after ub */ |
1583 | |
1584 | /* parm[1-4] are used in different ways by different scheduling algorithms */ |
1585 | |
1586 | // KMP_ALIGN( 32 ) ensures ( if the KMP_ALIGN macro is turned on ) |
1587 | // a) parm3 is properly aligned and |
1588 | // b) all parm1-4 are in the same cache line. |
1589 | // Because of parm1-4 are used together, performance seems to be better |
1590 | // if they are in the same line (not measured though). |
1591 | |
1592 | struct KMP_ALIGN(32)__attribute__((aligned(32))) { |
1593 | kmp_int64 parm1; |
1594 | kmp_int64 parm2; |
1595 | kmp_int64 parm3; |
1596 | kmp_int64 parm4; |
1597 | }; |
1598 | |
1599 | kmp_uint64 ordered_lower; |
1600 | kmp_uint64 ordered_upper; |
1601 | #if KMP_OS_WINDOWS0 |
1602 | // This var can be placed in the hole between 'tc' and 'parm1', instead of |
1603 | // 'static_steal_counter'. It would be nice to measure execution times. |
1604 | // Conditional if/endif can be removed at all. |
1605 | kmp_int64 last_upper; |
1606 | #endif /* KMP_OS_WINDOWS */ |
1607 | } dispatch_private_info64_t; |
1608 | #else /* KMP_STATIC_STEAL_ENABLED */ |
1609 | typedef struct KMP_ALIGN_CACHE__attribute__((aligned(64))) dispatch_private_info32 { |
1610 | kmp_int32 lb; |
1611 | kmp_int32 ub; |
1612 | kmp_int32 st; |
1613 | kmp_int32 tc; |
1614 | |
1615 | kmp_int32 parm1; |
1616 | kmp_int32 parm2; |
1617 | kmp_int32 parm3; |
1618 | kmp_int32 parm4; |
1619 | |
1620 | kmp_int32 count; |
1621 | |
1622 | kmp_uint32 ordered_lower; |
1623 | kmp_uint32 ordered_upper; |
1624 | #if KMP_OS_WINDOWS0 |
1625 | kmp_int32 last_upper; |
1626 | #endif /* KMP_OS_WINDOWS */ |
1627 | } dispatch_private_info32_t; |
1628 | |
1629 | typedef struct KMP_ALIGN_CACHE__attribute__((aligned(64))) dispatch_private_info64 { |
1630 | kmp_int64 lb; /* lower-bound */ |
1631 | kmp_int64 ub; /* upper-bound */ |
1632 | kmp_int64 st; /* stride */ |
1633 | kmp_int64 tc; /* trip count (number of iterations) */ |
1634 | |
1635 | /* parm[1-4] are used in different ways by different scheduling algorithms */ |
1636 | kmp_int64 parm1; |
1637 | kmp_int64 parm2; |
1638 | kmp_int64 parm3; |
1639 | kmp_int64 parm4; |
1640 | |
1641 | kmp_int64 count; /* current chunk number for static scheduling */ |
1642 | |
1643 | kmp_uint64 ordered_lower; |
1644 | kmp_uint64 ordered_upper; |
1645 | #if KMP_OS_WINDOWS0 |
1646 | kmp_int64 last_upper; |
1647 | #endif /* KMP_OS_WINDOWS */ |
1648 | } dispatch_private_info64_t; |
1649 | #endif /* KMP_STATIC_STEAL_ENABLED */ |
1650 | |
1651 | typedef struct KMP_ALIGN_CACHE__attribute__((aligned(64))) dispatch_private_info { |
1652 | union private_info { |
1653 | dispatch_private_info32_t p32; |
1654 | dispatch_private_info64_t p64; |
1655 | } u; |
1656 | enum sched_type schedule; /* scheduling algorithm */ |
1657 | kmp_sched_flags_t flags; /* flags (e.g., ordered, nomerge, etc.) */ |
1658 | kmp_int32 ordered_bumped; |
1659 | // To retain the structure size after making ordered_iteration scalar |
1660 | kmp_int32 ordered_dummy[KMP_MAX_ORDERED8 - 3]; |
1661 | // Stack of buffers for nest of serial regions |
1662 | struct dispatch_private_info *next; |
1663 | kmp_int32 type_size; /* the size of types in private_info */ |
1664 | #if KMP_USE_HIER_SCHED0 |
1665 | kmp_int32 hier_id; |
1666 | void *parent; /* hierarchical scheduling parent pointer */ |
1667 | #endif |
1668 | enum cons_type pushed_ws; |
1669 | } dispatch_private_info_t; |
1670 | |
1671 | typedef struct dispatch_shared_info32 { |
1672 | /* chunk index under dynamic, number of idle threads under static-steal; |
1673 | iteration index otherwise */ |
1674 | volatile kmp_uint32 iteration; |
1675 | volatile kmp_uint32 num_done; |
1676 | volatile kmp_uint32 ordered_iteration; |
1677 | // Dummy to retain the structure size after making ordered_iteration scalar |
1678 | kmp_int32 ordered_dummy[KMP_MAX_ORDERED8 - 1]; |
1679 | } dispatch_shared_info32_t; |
1680 | |
1681 | typedef struct dispatch_shared_info64 { |
1682 | /* chunk index under dynamic, number of idle threads under static-steal; |
1683 | iteration index otherwise */ |
1684 | volatile kmp_uint64 iteration; |
1685 | volatile kmp_uint64 num_done; |
1686 | volatile kmp_uint64 ordered_iteration; |
1687 | // Dummy to retain the structure size after making ordered_iteration scalar |
1688 | kmp_int64 ordered_dummy[KMP_MAX_ORDERED8 - 3]; |
1689 | } dispatch_shared_info64_t; |
1690 | |
1691 | typedef struct dispatch_shared_info { |
1692 | union shared_info { |
1693 | dispatch_shared_info32_t s32; |
1694 | dispatch_shared_info64_t s64; |
1695 | } u; |
1696 | volatile kmp_uint32 buffer_index; |
1697 | #if OMP_45_ENABLED(50 >= 45) |
1698 | volatile kmp_int32 doacross_buf_idx; // teamwise index |
1699 | volatile kmp_uint32 *doacross_flags; // shared array of iteration flags (0/1) |
1700 | kmp_int32 doacross_num_done; // count finished threads |
1701 | #endif |
1702 | #if KMP_USE_HIER_SCHED0 |
1703 | void *hier; |
1704 | #endif |
1705 | #if KMP_USE_HWLOC0 |
1706 | // When linking with libhwloc, the ORDERED EPCC test slows down on big |
1707 | // machines (> 48 cores). Performance analysis showed that a cache thrash |
1708 | // was occurring and this padding helps alleviate the problem. |
1709 | char padding[64]; |
1710 | #endif |
1711 | } dispatch_shared_info_t; |
1712 | |
1713 | typedef struct kmp_disp { |
1714 | /* Vector for ORDERED SECTION */ |
1715 | void (*th_deo_fcn)(int *gtid, int *cid, ident_t *); |
1716 | /* Vector for END ORDERED SECTION */ |
1717 | void (*th_dxo_fcn)(int *gtid, int *cid, ident_t *); |
1718 | |
1719 | dispatch_shared_info_t *th_dispatch_sh_current; |
1720 | dispatch_private_info_t *th_dispatch_pr_current; |
1721 | |
1722 | dispatch_private_info_t *th_disp_buffer; |
1723 | kmp_int32 th_disp_index; |
1724 | #if OMP_45_ENABLED(50 >= 45) |
1725 | kmp_int32 th_doacross_buf_idx; // thread's doacross buffer index |
1726 | volatile kmp_uint32 *th_doacross_flags; // pointer to shared array of flags |
1727 | union { // we can use union here because doacross cannot be used in |
1728 | // nonmonotonic loops |
1729 | kmp_int64 *th_doacross_info; // info on loop bounds |
1730 | kmp_lock_t *th_steal_lock; // lock used for chunk stealing (8-byte variable) |
1731 | }; |
1732 | #else |
1733 | #if KMP_STATIC_STEAL_ENABLED1 |
1734 | kmp_lock_t *th_steal_lock; // lock used for chunk stealing (8-byte variable) |
1735 | void *dummy_padding[1]; // make it 64 bytes on Intel(R) 64 |
1736 | #else |
1737 | void *dummy_padding[2]; // make it 64 bytes on Intel(R) 64 |
1738 | #endif |
1739 | #endif |
1740 | #if KMP_USE_INTERNODE_ALIGNMENT0 |
1741 | char more_padding[INTERNODE_CACHE_LINE4096]; |
1742 | #endif |
1743 | } kmp_disp_t; |
1744 | |
1745 | /* ------------------------------------------------------------------------ */ |
1746 | /* Barrier stuff */ |
1747 | |
1748 | /* constants for barrier state update */ |
1749 | #define KMP_INIT_BARRIER_STATE0 0 /* should probably start from zero */ |
1750 | #define KMP_BARRIER_SLEEP_BIT0 0 /* bit used for suspend/sleep part of state */ |
1751 | #define KMP_BARRIER_UNUSED_BIT1 1 // bit that must never be set for valid state |
1752 | #define KMP_BARRIER_BUMP_BIT2 2 /* lsb used for bump of go/arrived state */ |
1753 | |
1754 | #define KMP_BARRIER_SLEEP_STATE(1 << 0) (1 << KMP_BARRIER_SLEEP_BIT0) |
1755 | #define KMP_BARRIER_UNUSED_STATE(1 << 1) (1 << KMP_BARRIER_UNUSED_BIT1) |
1756 | #define KMP_BARRIER_STATE_BUMP(1 << 2) (1 << KMP_BARRIER_BUMP_BIT2) |
1757 | |
1758 | #if (KMP_BARRIER_SLEEP_BIT0 >= KMP_BARRIER_BUMP_BIT2) |
1759 | #error "Barrier sleep bit must be smaller than barrier bump bit" |
1760 | #endif |
1761 | #if (KMP_BARRIER_UNUSED_BIT1 >= KMP_BARRIER_BUMP_BIT2) |
1762 | #error "Barrier unused bit must be smaller than barrier bump bit" |
1763 | #endif |
1764 | |
1765 | // Constants for release barrier wait state: currently, hierarchical only |
1766 | #define KMP_BARRIER_NOT_WAITING0 0 // Normal state; worker not in wait_sleep |
1767 | #define KMP_BARRIER_OWN_FLAG1 \ |
1768 | 1 // Normal state; worker waiting on own b_go flag in release |
1769 | #define KMP_BARRIER_PARENT_FLAG2 \ |
1770 | 2 // Special state; worker waiting on parent's b_go flag in release |
1771 | #define KMP_BARRIER_SWITCH_TO_OWN_FLAG3 \ |
1772 | 3 // Special state; tells worker to shift from parent to own b_go |
1773 | #define KMP_BARRIER_SWITCHING4 \ |
1774 | 4 // Special state; worker resets appropriate flag on wake-up |
1775 | |
1776 | #define KMP_NOT_SAFE_TO_REAP0 \ |
1777 | 0 // Thread th_reap_state: not safe to reap (tasking) |
1778 | #define KMP_SAFE_TO_REAP1 1 // Thread th_reap_state: safe to reap (not tasking) |
1779 | |
1780 | enum barrier_type { |
1781 | bs_plain_barrier = 0, /* 0, All non-fork/join barriers (except reduction |
1782 | barriers if enabled) */ |
1783 | bs_forkjoin_barrier, /* 1, All fork/join (parallel region) barriers */ |
1784 | #if KMP_FAST_REDUCTION_BARRIER1 |
1785 | bs_reduction_barrier, /* 2, All barriers that are used in reduction */ |
1786 | #endif // KMP_FAST_REDUCTION_BARRIER |
1787 | bs_last_barrier /* Just a placeholder to mark the end */ |
1788 | }; |
1789 | |
1790 | // to work with reduction barriers just like with plain barriers |
1791 | #if !KMP_FAST_REDUCTION_BARRIER1 |
1792 | #define bs_reduction_barrier bs_plain_barrier |
1793 | #endif // KMP_FAST_REDUCTION_BARRIER |
1794 | |
1795 | typedef enum kmp_bar_pat { /* Barrier communication patterns */ |
1796 | bp_linear_bar = |
1797 | 0, /* Single level (degenerate) tree */ |
1798 | bp_tree_bar = |
1799 | 1, /* Balanced tree with branching factor 2^n */ |
1800 | bp_hyper_bar = |
1801 | 2, /* Hypercube-embedded tree with min branching |
1802 | factor 2^n */ |
1803 | bp_hierarchical_bar = 3, /* Machine hierarchy tree */ |
1804 | bp_last_bar /* Placeholder to mark the end */ |
1805 | } kmp_bar_pat_e; |
1806 | |
1807 | #define KMP_BARRIER_ICV_PUSH1 1 |
1808 | |
1809 | /* Record for holding the values of the internal controls stack records */ |
1810 | typedef struct kmp_internal_control { |
1811 | int serial_nesting_level; /* corresponds to the value of the |
1812 | th_team_serialized field */ |
1813 | kmp_int8 nested; /* internal control for nested parallelism (per thread) */ |
1814 | kmp_int8 dynamic; /* internal control for dynamic adjustment of threads (per |
1815 | thread) */ |
1816 | kmp_int8 |
1817 | bt_set; /* internal control for whether blocktime is explicitly set */ |
1818 | int blocktime; /* internal control for blocktime */ |
1819 | #if KMP_USE_MONITOR |
1820 | int bt_intervals; /* internal control for blocktime intervals */ |
1821 | #endif |
1822 | int nproc; /* internal control for #threads for next parallel region (per |
1823 | thread) */ |
1824 | int max_active_levels; /* internal control for max_active_levels */ |
1825 | kmp_r_sched_t |
1826 | sched; /* internal control for runtime schedule {sched,chunk} pair */ |
1827 | #if OMP_40_ENABLED(50 >= 40) |
1828 | kmp_proc_bind_t proc_bind; /* internal control for affinity */ |
1829 | kmp_int32 default_device; /* internal control for default device */ |
1830 | #endif // OMP_40_ENABLED |
1831 | struct kmp_internal_control *next; |
1832 | } kmp_internal_control_t; |
1833 | |
1834 | static inline void copy_icvs(kmp_internal_control_t *dst, |
1835 | kmp_internal_control_t *src) { |
1836 | *dst = *src; |
1837 | } |
1838 | |
1839 | /* Thread barrier needs volatile barrier fields */ |
1840 | typedef struct KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_bstate { |
1841 | // th_fixed_icvs is aligned by virtue of kmp_bstate being aligned (and all |
1842 | // uses of it). It is not explicitly aligned below, because we *don't* want |
1843 | // it to be padded -- instead, we fit b_go into the same cache line with |
1844 | // th_fixed_icvs, enabling NGO cache lines stores in the hierarchical barrier. |
1845 | kmp_internal_control_t th_fixed_icvs; // Initial ICVs for the thread |
1846 | // Tuck b_go into end of th_fixed_icvs cache line, so it can be stored with |
1847 | // same NGO store |
1848 | volatile kmp_uint64 b_go; // STATE => task should proceed (hierarchical) |
1849 | KMP_ALIGN_CACHE__attribute__((aligned(64))) volatile kmp_uint64 |
1850 | b_arrived; // STATE => task reached synch point. |
1851 | kmp_uint32 *skip_per_level; |
1852 | kmp_uint32 my_level; |
1853 | kmp_int32 parent_tid; |
1854 | kmp_int32 old_tid; |
1855 | kmp_uint32 depth; |
1856 | struct kmp_bstate *parent_bar; |
1857 | kmp_team_t *team; |
1858 | kmp_uint64 leaf_state; |
1859 | kmp_uint32 nproc; |
1860 | kmp_uint8 base_leaf_kids; |
1861 | kmp_uint8 leaf_kids; |
1862 | kmp_uint8 offset; |
1863 | kmp_uint8 wait_flag; |
1864 | kmp_uint8 use_oncore_barrier; |
1865 | #if USE_DEBUGGER0 |
1866 | // The following field is intended for the debugger solely. Only the worker |
1867 | // thread itself accesses this field: the worker increases it by 1 when it |
1868 | // arrives to a barrier. |
1869 | KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_uint b_worker_arrived; |
1870 | #endif /* USE_DEBUGGER */ |
1871 | } kmp_bstate_t; |
1872 | |
1873 | union KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_barrier_union { |
1874 | double b_align; /* use worst case alignment */ |
1875 | char b_pad[KMP_PAD(kmp_bstate_t, CACHE_LINE)(sizeof(kmp_bstate_t) + (64 - ((sizeof(kmp_bstate_t) - 1) % ( 64)) - 1))]; |
1876 | kmp_bstate_t bb; |
1877 | }; |
1878 | |
1879 | typedef union kmp_barrier_union kmp_balign_t; |
1880 | |
1881 | /* Team barrier needs only non-volatile arrived counter */ |
1882 | union KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_barrier_team_union { |
1883 | double b_align; /* use worst case alignment */ |
1884 | char b_pad[CACHE_LINE64]; |
1885 | struct { |
1886 | kmp_uint64 b_arrived; /* STATE => task reached synch point. */ |
1887 | #if USE_DEBUGGER0 |
1888 | // The following two fields are indended for the debugger solely. Only |
1889 | // master of the team accesses these fields: the first one is increased by |
1890 | // 1 when master arrives to a barrier, the second one is increased by one |
1891 | // when all the threads arrived. |
1892 | kmp_uint b_master_arrived; |
1893 | kmp_uint b_team_arrived; |
1894 | #endif |
1895 | }; |
1896 | }; |
1897 | |
1898 | typedef union kmp_barrier_team_union kmp_balign_team_t; |
1899 | |
1900 | /* Padding for Linux* OS pthreads condition variables and mutexes used to signal |
1901 | threads when a condition changes. This is to workaround an NPTL bug where |
1902 | padding was added to pthread_cond_t which caused the initialization routine |
1903 | to write outside of the structure if compiled on pre-NPTL threads. */ |
1904 | #if KMP_OS_WINDOWS0 |
1905 | typedef struct kmp_win32_mutex { |
1906 | /* The Lock */ |
1907 | CRITICAL_SECTION cs; |
1908 | } kmp_win32_mutex_t; |
1909 | |
1910 | typedef struct kmp_win32_cond { |
1911 | /* Count of the number of waiters. */ |
1912 | int waiters_count_; |
1913 | |
1914 | /* Serialize access to <waiters_count_> */ |
1915 | kmp_win32_mutex_t waiters_count_lock_; |
1916 | |
1917 | /* Number of threads to release via a <cond_broadcast> or a <cond_signal> */ |
1918 | int release_count_; |
1919 | |
1920 | /* Keeps track of the current "generation" so that we don't allow */ |
1921 | /* one thread to steal all the "releases" from the broadcast. */ |
1922 | int wait_generation_count_; |
1923 | |
1924 | /* A manual-reset event that's used to block and release waiting threads. */ |
1925 | HANDLE event_; |
1926 | } kmp_win32_cond_t; |
1927 | #endif |
1928 | |
1929 | #if KMP_OS_UNIX1 |
1930 | |
1931 | union KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_cond_union { |
1932 | double c_align; |
1933 | char c_pad[CACHE_LINE64]; |
1934 | pthread_cond_t c_cond; |
1935 | }; |
1936 | |
1937 | typedef union kmp_cond_union kmp_cond_align_t; |
1938 | |
1939 | union KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_mutex_union { |
1940 | double m_align; |
1941 | char m_pad[CACHE_LINE64]; |
1942 | pthread_mutex_t m_mutex; |
1943 | }; |
1944 | |
1945 | typedef union kmp_mutex_union kmp_mutex_align_t; |
1946 | |
1947 | #endif /* KMP_OS_UNIX */ |
1948 | |
1949 | typedef struct kmp_desc_base { |
1950 | void *ds_stackbase; |
1951 | size_t ds_stacksize; |
1952 | int ds_stackgrow; |
1953 | kmp_thread_t ds_thread; |
1954 | volatile int ds_tid; |
1955 | int ds_gtid; |
1956 | #if KMP_OS_WINDOWS0 |
1957 | volatile int ds_alive; |
1958 | DWORD ds_thread_id; |
1959 | /* ds_thread keeps thread handle on Windows* OS. It is enough for RTL purposes. |
1960 | However, debugger support (libomp_db) cannot work with handles, because they |
1961 | uncomparable. For example, debugger requests info about thread with handle h. |
1962 | h is valid within debugger process, and meaningless within debugee process. |
1963 | Even if h is duped by call to DuplicateHandle(), so the result h' is valid |
1964 | within debugee process, but it is a *new* handle which does *not* equal to |
1965 | any other handle in debugee... The only way to compare handles is convert |
1966 | them to system-wide ids. GetThreadId() function is available only in |
1967 | Longhorn and Server 2003. :-( In contrast, GetCurrentThreadId() is available |
1968 | on all Windows* OS flavours (including Windows* 95). Thus, we have to get |
1969 | thread id by call to GetCurrentThreadId() from within the thread and save it |
1970 | to let libomp_db identify threads. */ |
1971 | #endif /* KMP_OS_WINDOWS */ |
1972 | } kmp_desc_base_t; |
1973 | |
1974 | typedef union KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_desc { |
1975 | double ds_align; /* use worst case alignment */ |
1976 | char ds_pad[KMP_PAD(kmp_desc_base_t, CACHE_LINE)(sizeof(kmp_desc_base_t) + (64 - ((sizeof(kmp_desc_base_t) - 1 ) % (64)) - 1))]; |
1977 | kmp_desc_base_t ds; |
1978 | } kmp_desc_t; |
1979 | |
1980 | typedef struct kmp_local { |
1981 | volatile int this_construct; /* count of single's encountered by thread */ |
1982 | void *reduce_data; |
1983 | #if KMP_USE_BGET1 |
1984 | void *bget_data; |
1985 | void *bget_list; |
1986 | #if !USE_CMP_XCHG_FOR_BGET1 |
1987 | #ifdef USE_QUEUING_LOCK_FOR_BGET |
1988 | kmp_lock_t bget_lock; /* Lock for accessing bget free list */ |
1989 | #else |
1990 | kmp_bootstrap_lock_t bget_lock; // Lock for accessing bget free list. Must be |
1991 | // bootstrap lock so we can use it at library |
1992 | // shutdown. |
1993 | #endif /* USE_LOCK_FOR_BGET */ |
1994 | #endif /* ! USE_CMP_XCHG_FOR_BGET */ |
1995 | #endif /* KMP_USE_BGET */ |
1996 | |
1997 | PACKED_REDUCTION_METHOD_T |
1998 | packed_reduction_method; /* stored by __kmpc_reduce*(), used by |
1999 | __kmpc_end_reduce*() */ |
2000 | |
2001 | } kmp_local_t; |
2002 | |
2003 | #define KMP_CHECK_UPDATE(a, b)if ((a) != (b)) (a) = (b) \ |
2004 | if ((a) != (b)) \ |
2005 | (a) = (b) |
2006 | #define KMP_CHECK_UPDATE_SYNC(a, b)if ((a) != (b)) (((a))) = (((b))) \ |
2007 | if ((a) != (b)) \ |
2008 | TCW_SYNC_PTR((a), (b))(((a))) = (((b))) |
2009 | |
2010 | #define get__blocktime(xteam, xtid)((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs .blocktime) \ |
2011 | ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime) |
2012 | #define get__bt_set(xteam, xtid)((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs .bt_set) \ |
2013 | ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set) |
2014 | #if KMP_USE_MONITOR |
2015 | #define get__bt_intervals(xteam, xtid) \ |
2016 | ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals) |
2017 | #endif |
2018 | |
2019 | #define get__nested_2(xteam, xtid)((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs .nested) \ |
2020 | ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.nested) |
2021 | #define get__dynamic_2(xteam, xtid)((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs .dynamic) \ |
2022 | ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.dynamic) |
2023 | #define get__nproc_2(xteam, xtid)((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs .nproc) \ |
2024 | ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.nproc) |
2025 | #define get__sched_2(xteam, xtid)((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs .sched) \ |
2026 | ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.sched) |
2027 | |
2028 | #define set__blocktime_team(xteam, xtid, xval)(((xteam)->t.t_threads[(xtid)]->th.th_current_task-> td_icvs.blocktime) = (xval)) \ |
2029 | (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime) = \ |
2030 | (xval)) |
2031 | |
2032 | #if KMP_USE_MONITOR |
2033 | #define set__bt_intervals_team(xteam, xtid, xval) \ |
2034 | (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals) = \ |
2035 | (xval)) |
2036 | #endif |
2037 | |
2038 | #define set__bt_set_team(xteam, xtid, xval)(((xteam)->t.t_threads[(xtid)]->th.th_current_task-> td_icvs.bt_set) = (xval)) \ |
2039 | (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set) = (xval)) |
2040 | |
2041 | #define set__nested(xthread, xval)(((xthread)->th.th_current_task->td_icvs.nested) = (xval )) \ |
2042 | (((xthread)->th.th_current_task->td_icvs.nested) = (xval)) |
2043 | #define get__nested(xthread)(((xthread)->th.th_current_task->td_icvs.nested) ? ((!0 )) : (0)) \ |
2044 | (((xthread)->th.th_current_task->td_icvs.nested) ? (FTN_TRUE(!0)) : (FTN_FALSE0)) |
2045 | |
2046 | #define set__dynamic(xthread, xval)(((xthread)->th.th_current_task->td_icvs.dynamic) = (xval )) \ |
2047 | (((xthread)->th.th_current_task->td_icvs.dynamic) = (xval)) |
2048 | #define get__dynamic(xthread)(((xthread)->th.th_current_task->td_icvs.dynamic) ? ((! 0)) : (0)) \ |
2049 | (((xthread)->th.th_current_task->td_icvs.dynamic) ? (FTN_TRUE(!0)) : (FTN_FALSE0)) |
2050 | |
2051 | #define set__nproc(xthread, xval)(((xthread)->th.th_current_task->td_icvs.nproc) = (xval )) \ |
2052 | (((xthread)->th.th_current_task->td_icvs.nproc) = (xval)) |
2053 | |
2054 | #define set__max_active_levels(xthread, xval)(((xthread)->th.th_current_task->td_icvs.max_active_levels ) = (xval)) \ |
2055 | (((xthread)->th.th_current_task->td_icvs.max_active_levels) = (xval)) |
2056 | |
2057 | #define set__sched(xthread, xval)(((xthread)->th.th_current_task->td_icvs.sched) = (xval )) \ |
2058 | (((xthread)->th.th_current_task->td_icvs.sched) = (xval)) |
2059 | |
2060 | #if OMP_40_ENABLED(50 >= 40) |
2061 | |
2062 | #define set__proc_bind(xthread, xval)(((xthread)->th.th_current_task->td_icvs.proc_bind) = ( xval)) \ |
2063 | (((xthread)->th.th_current_task->td_icvs.proc_bind) = (xval)) |
2064 | #define get__proc_bind(xthread)((xthread)->th.th_current_task->td_icvs.proc_bind) \ |
2065 | ((xthread)->th.th_current_task->td_icvs.proc_bind) |
2066 | |
2067 | #endif /* OMP_40_ENABLED */ |
2068 | |
2069 | // OpenMP tasking data structures |
2070 | |
2071 | typedef enum kmp_tasking_mode { |
2072 | tskm_immediate_exec = 0, |
2073 | tskm_extra_barrier = 1, |
2074 | tskm_task_teams = 2, |
2075 | tskm_max = 2 |
2076 | } kmp_tasking_mode_t; |
2077 | |
2078 | extern kmp_tasking_mode_t |
2079 | __kmp_tasking_mode; /* determines how/when to execute tasks */ |
2080 | extern int __kmp_task_stealing_constraint; |
2081 | #if OMP_40_ENABLED(50 >= 40) |
2082 | extern kmp_int32 __kmp_default_device; // Set via OMP_DEFAULT_DEVICE if |
2083 | // specified, defaults to 0 otherwise |
2084 | #endif |
2085 | #if OMP_45_ENABLED(50 >= 45) |
2086 | // Set via OMP_MAX_TASK_PRIORITY if specified, defaults to 0 otherwise |
2087 | extern kmp_int32 __kmp_max_task_priority; |
2088 | // Set via KMP_TASKLOOP_MIN_TASKS if specified, defaults to 0 otherwise |
2089 | extern kmp_uint64 __kmp_taskloop_min_tasks; |
2090 | #endif |
2091 | |
2092 | /* NOTE: kmp_taskdata_t and kmp_task_t structures allocated in single block with |
2093 | taskdata first */ |
2094 | #define KMP_TASK_TO_TASKDATA(task)(((kmp_taskdata_t *)task) - 1) (((kmp_taskdata_t *)task) - 1) |
2095 | #define KMP_TASKDATA_TO_TASK(taskdata)(kmp_task_t *)(taskdata + 1) (kmp_task_t *)(taskdata + 1) |
2096 | |
2097 | // The tt_found_tasks flag is a signal to all threads in the team that tasks |
2098 | // were spawned and queued since the previous barrier release. |
2099 | #define KMP_TASKING_ENABLED(task_team)(((task_team)->tt.tt_found_tasks) == (!0)) \ |
2100 | (TCR_SYNC_4((task_team)->tt.tt_found_tasks)((task_team)->tt.tt_found_tasks) == TRUE(!0)) |
2101 | /*! |
2102 | @ingroup BASIC_TYPES |
2103 | @{ |
2104 | */ |
2105 | |
2106 | /*! |
2107 | */ |
2108 | typedef kmp_int32 (*kmp_routine_entry_t)(kmp_int32, void *); |
2109 | |
2110 | #if OMP_40_ENABLED(50 >= 40) || OMP_45_ENABLED(50 >= 45) |
2111 | typedef union kmp_cmplrdata { |
2112 | #if OMP_45_ENABLED(50 >= 45) |
2113 | kmp_int32 priority; /**< priority specified by user for the task */ |
2114 | #endif // OMP_45_ENABLED |
2115 | #if OMP_40_ENABLED(50 >= 40) |
2116 | kmp_routine_entry_t |
2117 | destructors; /* pointer to function to invoke deconstructors of |
2118 | firstprivate C++ objects */ |
2119 | #endif // OMP_40_ENABLED |
2120 | /* future data */ |
2121 | } kmp_cmplrdata_t; |
2122 | #endif |
2123 | |
2124 | /* sizeof_kmp_task_t passed as arg to kmpc_omp_task call */ |
2125 | /*! |
2126 | */ |
2127 | typedef struct kmp_task { /* GEH: Shouldn't this be aligned somehow? */ |
2128 | void *shareds; /**< pointer to block of pointers to shared vars */ |
2129 | kmp_routine_entry_t |
2130 | routine; /**< pointer to routine to call for executing task */ |
2131 | kmp_int32 part_id; /**< part id for the task */ |
2132 | #if OMP_40_ENABLED(50 >= 40) || OMP_45_ENABLED(50 >= 45) |
2133 | kmp_cmplrdata_t |
2134 | data1; /* Two known optional additions: destructors and priority */ |
2135 | kmp_cmplrdata_t data2; /* Process destructors first, priority second */ |
2136 | /* future data */ |
2137 | #endif |
2138 | /* private vars */ |
2139 | } kmp_task_t; |
2140 | |
2141 | /*! |
2142 | @} |
2143 | */ |
2144 | |
2145 | #if OMP_40_ENABLED(50 >= 40) |
2146 | typedef struct kmp_taskgroup { |
2147 | std::atomic<kmp_int32> count; // number of allocated and incomplete tasks |
2148 | std::atomic<kmp_int32> |
2149 | cancel_request; // request for cancellation of this taskgroup |
2150 | struct kmp_taskgroup *parent; // parent taskgroup |
2151 | #if OMP_50_ENABLED(50 >= 50) |
2152 | // Block of data to perform task reduction |
2153 | void *reduce_data; // reduction related info |
2154 | kmp_int32 reduce_num_data; // number of data items to reduce |
2155 | #endif |
2156 | } kmp_taskgroup_t; |
2157 | |
2158 | // forward declarations |
2159 | typedef union kmp_depnode kmp_depnode_t; |
2160 | typedef struct kmp_depnode_list kmp_depnode_list_t; |
2161 | typedef struct kmp_dephash_entry kmp_dephash_entry_t; |
2162 | |
2163 | typedef struct kmp_depend_info { |
2164 | kmp_intptr_t base_addr; |
2165 | size_t len; |
2166 | struct { |
2167 | bool in : 1; |
2168 | bool out : 1; |
2169 | } flags; |
2170 | } kmp_depend_info_t; |
2171 | |
2172 | struct kmp_depnode_list { |
2173 | kmp_depnode_t *node; |
2174 | kmp_depnode_list_t *next; |
2175 | }; |
2176 | |
2177 | typedef struct kmp_base_depnode { |
2178 | kmp_depnode_list_t *successors; |
2179 | kmp_task_t *task; |
2180 | |
2181 | kmp_lock_t lock; |
2182 | |
2183 | #if KMP_SUPPORT_GRAPH_OUTPUT |
2184 | kmp_uint32 id; |
2185 | #endif |
2186 | |
2187 | std::atomic<kmp_int32> npredecessors; |
2188 | std::atomic<kmp_int32> nrefs; |
2189 | } kmp_base_depnode_t; |
2190 | |
2191 | union KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_depnode { |
2192 | double dn_align; /* use worst case alignment */ |
2193 | char dn_pad[KMP_PAD(kmp_base_depnode_t, CACHE_LINE)(sizeof(kmp_base_depnode_t) + (64 - ((sizeof(kmp_base_depnode_t ) - 1) % (64)) - 1))]; |
2194 | kmp_base_depnode_t dn; |
2195 | }; |
2196 | |
2197 | struct kmp_dephash_entry { |
2198 | kmp_intptr_t addr; |
2199 | kmp_depnode_t *last_out; |
2200 | kmp_depnode_list_t *last_ins; |
2201 | kmp_dephash_entry_t *next_in_bucket; |
2202 | }; |
2203 | |
2204 | typedef struct kmp_dephash { |
2205 | kmp_dephash_entry_t **buckets; |
2206 | size_t size; |
2207 | #ifdef KMP_DEBUG1 |
2208 | kmp_uint32 nelements; |
2209 | kmp_uint32 nconflicts; |
2210 | #endif |
2211 | } kmp_dephash_t; |
2212 | |
2213 | #endif |
2214 | |
2215 | #ifdef BUILD_TIED_TASK_STACK |
2216 | |
2217 | /* Tied Task stack definitions */ |
2218 | typedef struct kmp_stack_block { |
2219 | kmp_taskdata_t *sb_block[TASK_STACK_BLOCK_SIZE]; |
2220 | struct kmp_stack_block *sb_next; |
2221 | struct kmp_stack_block *sb_prev; |
2222 | } kmp_stack_block_t; |
2223 | |
2224 | typedef struct kmp_task_stack { |
2225 | kmp_stack_block_t ts_first_block; // first block of stack entries |
2226 | kmp_taskdata_t **ts_top; // pointer to the top of stack |
2227 | kmp_int32 ts_entries; // number of entries on the stack |
2228 | } kmp_task_stack_t; |
2229 | |
2230 | #endif // BUILD_TIED_TASK_STACK |
2231 | |
2232 | typedef struct kmp_tasking_flags { /* Total struct must be exactly 32 bits */ |
2233 | /* Compiler flags */ /* Total compiler flags must be 16 bits */ |
2234 | unsigned tiedness : 1; /* task is either tied (1) or untied (0) */ |
2235 | unsigned final : 1; /* task is final(1) so execute immediately */ |
2236 | unsigned merged_if0 : 1; /* no __kmpc_task_{begin/complete}_if0 calls in if0 |
2237 | code path */ |
2238 | #if OMP_40_ENABLED(50 >= 40) |
2239 | unsigned destructors_thunk : 1; /* set if the compiler creates a thunk to |
2240 | invoke destructors from the runtime */ |
2241 | #if OMP_45_ENABLED(50 >= 45) |
2242 | unsigned proxy : 1; /* task is a proxy task (it will be executed outside the |
2243 | context of the RTL) */ |
2244 | unsigned priority_specified : 1; /* set if the compiler provides priority |
2245 | setting for the task */ |
2246 | unsigned reserved : 10; /* reserved for compiler use */ |
2247 | #else |
2248 | unsigned reserved : 12; /* reserved for compiler use */ |
2249 | #endif |
2250 | #else // OMP_40_ENABLED |
2251 | unsigned reserved : 13; /* reserved for compiler use */ |
2252 | #endif // OMP_40_ENABLED |
2253 | |
2254 | /* Library flags */ /* Total library flags must be 16 bits */ |
2255 | unsigned tasktype : 1; /* task is either explicit(1) or implicit (0) */ |
2256 | unsigned task_serial : 1; // task is executed immediately (1) or deferred (0) |
2257 | unsigned tasking_ser : 1; // all tasks in team are either executed immediately |
2258 | // (1) or may be deferred (0) |
2259 | unsigned team_serial : 1; // entire team is serial (1) [1 thread] or parallel |
2260 | // (0) [>= 2 threads] |
2261 | /* If either team_serial or tasking_ser is set, task team may be NULL */ |
2262 | /* Task State Flags: */ |
2263 | unsigned started : 1; /* 1==started, 0==not started */ |
2264 | unsigned executing : 1; /* 1==executing, 0==not executing */ |
2265 | unsigned complete : 1; /* 1==complete, 0==not complete */ |
2266 | unsigned freed : 1; /* 1==freed, 0==allocateed */ |
2267 | unsigned native : 1; /* 1==gcc-compiled task, 0==intel */ |
2268 | unsigned reserved31 : 7; /* reserved for library use */ |
2269 | |
2270 | } kmp_tasking_flags_t; |
2271 | |
2272 | struct kmp_taskdata { /* aligned during dynamic allocation */ |
2273 | kmp_int32 td_task_id; /* id, assigned by debugger */ |
2274 | kmp_tasking_flags_t td_flags; /* task flags */ |
2275 | kmp_team_t *td_team; /* team for this task */ |
2276 | kmp_info_p *td_alloc_thread; /* thread that allocated data structures */ |
2277 | /* Currently not used except for perhaps IDB */ |
2278 | kmp_taskdata_t *td_parent; /* parent task */ |
2279 | kmp_int32 td_level; /* task nesting level */ |
2280 | std::atomic<kmp_int32> td_untied_count; // untied task active parts counter |
2281 | ident_t *td_ident; /* task identifier */ |
2282 | // Taskwait data. |
2283 | ident_t *td_taskwait_ident; |
2284 | kmp_uint32 td_taskwait_counter; |
2285 | kmp_int32 td_taskwait_thread; /* gtid + 1 of thread encountered taskwait */ |
2286 | KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_internal_control_t |
2287 | td_icvs; /* Internal control variables for the task */ |
2288 | KMP_ALIGN_CACHE__attribute__((aligned(64))) std::atomic<kmp_int32> |
2289 | td_allocated_child_tasks; /* Child tasks (+ current task) not yet |
2290 | deallocated */ |
2291 | std::atomic<kmp_int32> |
2292 | td_incomplete_child_tasks; /* Child tasks not yet complete */ |
2293 | #if OMP_40_ENABLED(50 >= 40) |
2294 | kmp_taskgroup_t |
2295 | *td_taskgroup; // Each task keeps pointer to its current taskgroup |
2296 | kmp_dephash_t |
2297 | *td_dephash; // Dependencies for children tasks are tracked from here |
2298 | kmp_depnode_t |
2299 | *td_depnode; // Pointer to graph node if this task has dependencies |
2300 | #endif // OMP_40_ENABLED |
2301 | #if OMP_45_ENABLED(50 >= 45) |
2302 | kmp_task_team_t *td_task_team; |
2303 | kmp_int32 td_size_alloc; // The size of task structure, including shareds etc. |
2304 | #if defined(KMP_GOMP_COMPAT) |
2305 | // 4 or 8 byte integers for the loop bounds in GOMP_taskloop |
2306 | kmp_int32 td_size_loop_bounds; |
2307 | #endif |
2308 | #endif // OMP_45_ENABLED |
2309 | kmp_taskdata_t *td_last_tied; // keep tied task for task scheduling constraint |
2310 | #if defined(KMP_GOMP_COMPAT) && OMP_45_ENABLED(50 >= 45) |
2311 | // GOMP sends in a copy function for copy constructors |
2312 | void (*td_copy_func)(void *, void *); |
2313 | #endif |
2314 | #if OMPT_SUPPORT1 |
2315 | ompt_task_info_t ompt_task_info; |
2316 | #endif |
2317 | }; // struct kmp_taskdata |
2318 | |
2319 | // Make sure padding above worked |
2320 | KMP_BUILD_ASSERT(sizeof(kmp_taskdata_t) % sizeof(void *) == 0)static_assert(sizeof(kmp_taskdata_t) % sizeof(void *) == 0, "Build condition error" ); |
2321 | |
2322 | // Data for task team but per thread |
2323 | typedef struct kmp_base_thread_data { |
2324 | kmp_info_p *td_thr; // Pointer back to thread info |
2325 | // Used only in __kmp_execute_tasks_template, maybe not avail until task is |
2326 | // queued? |
2327 | kmp_bootstrap_lock_t td_deque_lock; // Lock for accessing deque |
2328 | kmp_taskdata_t * |
2329 | *td_deque; // Deque of tasks encountered by td_thr, dynamically allocated |
2330 | kmp_int32 td_deque_size; // Size of deck |
2331 | kmp_uint32 td_deque_head; // Head of deque (will wrap) |
2332 | kmp_uint32 td_deque_tail; // Tail of deque (will wrap) |
2333 | kmp_int32 td_deque_ntasks; // Number of tasks in deque |
2334 | // GEH: shouldn't this be volatile since used in while-spin? |
2335 | kmp_int32 td_deque_last_stolen; // Thread number of last successful steal |
2336 | #ifdef BUILD_TIED_TASK_STACK |
2337 | kmp_task_stack_t td_susp_tied_tasks; // Stack of suspended tied tasks for task |
2338 | // scheduling constraint |
2339 | #endif // BUILD_TIED_TASK_STACK |
2340 | } kmp_base_thread_data_t; |
2341 | |
2342 | #define TASK_DEQUE_BITS8 8 // Used solely to define INITIAL_TASK_DEQUE_SIZE |
2343 | #define INITIAL_TASK_DEQUE_SIZE(1 << 8) (1 << TASK_DEQUE_BITS8) |
2344 | |
2345 | #define TASK_DEQUE_SIZE(td)((td).td_deque_size) ((td).td_deque_size) |
2346 | #define TASK_DEQUE_MASK(td)((td).td_deque_size - 1) ((td).td_deque_size - 1) |
2347 | |
2348 | typedef union KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_thread_data { |
2349 | kmp_base_thread_data_t td; |
2350 | double td_align; /* use worst case alignment */ |
2351 | char td_pad[KMP_PAD(kmp_base_thread_data_t, CACHE_LINE)(sizeof(kmp_base_thread_data_t) + (64 - ((sizeof(kmp_base_thread_data_t ) - 1) % (64)) - 1))]; |
2352 | } kmp_thread_data_t; |
2353 | |
2354 | // Data for task teams which are used when tasking is enabled for the team |
2355 | typedef struct kmp_base_task_team { |
2356 | kmp_bootstrap_lock_t |
2357 | tt_threads_lock; /* Lock used to allocate per-thread part of task team */ |
2358 | /* must be bootstrap lock since used at library shutdown*/ |
2359 | kmp_task_team_t *tt_next; /* For linking the task team free list */ |
2360 | kmp_thread_data_t |
2361 | *tt_threads_data; /* Array of per-thread structures for task team */ |
2362 | /* Data survives task team deallocation */ |
2363 | kmp_int32 tt_found_tasks; /* Have we found tasks and queued them while |
2364 | executing this team? */ |
2365 | /* TRUE means tt_threads_data is set up and initialized */ |
2366 | kmp_int32 tt_nproc; /* #threads in team */ |
2367 | kmp_int32 |
2368 | tt_max_threads; /* number of entries allocated for threads_data array */ |
2369 | #if OMP_45_ENABLED(50 >= 45) |
2370 | kmp_int32 |
2371 | tt_found_proxy_tasks; /* Have we found proxy tasks since last barrier */ |
2372 | #endif |
2373 | kmp_int32 tt_untied_task_encountered; |
2374 | |
2375 | KMP_ALIGN_CACHE__attribute__((aligned(64))) |
2376 | std::atomic<kmp_int32> tt_unfinished_threads; /* #threads still active */ |
2377 | |
2378 | KMP_ALIGN_CACHE__attribute__((aligned(64))) |
2379 | volatile kmp_uint32 |
2380 | tt_active; /* is the team still actively executing tasks */ |
2381 | } kmp_base_task_team_t; |
2382 | |
2383 | union KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_task_team { |
2384 | kmp_base_task_team_t tt; |
2385 | double tt_align; /* use worst case alignment */ |
2386 | char tt_pad[KMP_PAD(kmp_base_task_team_t, CACHE_LINE)(sizeof(kmp_base_task_team_t) + (64 - ((sizeof(kmp_base_task_team_t ) - 1) % (64)) - 1))]; |
2387 | }; |
2388 | |
2389 | #if (USE_FAST_MEMORY3 == 3) || (USE_FAST_MEMORY3 == 5) |
2390 | // Free lists keep same-size free memory slots for fast memory allocation |
2391 | // routines |
2392 | typedef struct kmp_free_list { |
2393 | void *th_free_list_self; // Self-allocated tasks free list |
2394 | void *th_free_list_sync; // Self-allocated tasks stolen/returned by other |
2395 | // threads |
2396 | void *th_free_list_other; // Non-self free list (to be returned to owner's |
2397 | // sync list) |
2398 | } kmp_free_list_t; |
2399 | #endif |
2400 | #if KMP_NESTED_HOT_TEAMS1 |
2401 | // Hot teams array keeps hot teams and their sizes for given thread. Hot teams |
2402 | // are not put in teams pool, and they don't put threads in threads pool. |
2403 | typedef struct kmp_hot_team_ptr { |
2404 | kmp_team_p *hot_team; // pointer to hot_team of given nesting level |
2405 | kmp_int32 hot_team_nth; // number of threads allocated for the hot_team |
2406 | } kmp_hot_team_ptr_t; |
2407 | #endif |
2408 | #if OMP_40_ENABLED(50 >= 40) |
2409 | typedef struct kmp_teams_size { |
2410 | kmp_int32 nteams; // number of teams in a league |
2411 | kmp_int32 nth; // number of threads in each team of the league |
2412 | } kmp_teams_size_t; |
2413 | #endif |
2414 | |
2415 | // OpenMP thread data structures |
2416 | |
2417 | typedef struct KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_base_info { |
2418 | /* Start with the readonly data which is cache aligned and padded. This is |
2419 | written before the thread starts working by the master. Uber masters may |
2420 | update themselves later. Usage does not consider serialized regions. */ |
2421 | kmp_desc_t th_info; |
2422 | kmp_team_p *th_team; /* team we belong to */ |
2423 | kmp_root_p *th_root; /* pointer to root of task hierarchy */ |
2424 | kmp_info_p *th_next_pool; /* next available thread in the pool */ |
2425 | kmp_disp_t *th_dispatch; /* thread's dispatch data */ |
2426 | int th_in_pool; /* in thread pool (32 bits for TCR/TCW) */ |
2427 | |
2428 | /* The following are cached from the team info structure */ |
2429 | /* TODO use these in more places as determined to be needed via profiling */ |
2430 | int th_team_nproc; /* number of threads in a team */ |
2431 | kmp_info_p *th_team_master; /* the team's master thread */ |
2432 | int th_team_serialized; /* team is serialized */ |
2433 | #if OMP_40_ENABLED(50 >= 40) |
2434 | microtask_t th_teams_microtask; /* save entry address for teams construct */ |
2435 | int th_teams_level; /* save initial level of teams construct */ |
2436 | /* it is 0 on device but may be any on host */ |
2437 | #endif |
2438 | |
2439 | /* The blocktime info is copied from the team struct to the thread sruct */ |
2440 | /* at the start of a barrier, and the values stored in the team are used */ |
2441 | /* at points in the code where the team struct is no longer guaranteed */ |
2442 | /* to exist (from the POV of worker threads). */ |
2443 | #if KMP_USE_MONITOR |
2444 | int th_team_bt_intervals; |
2445 | int th_team_bt_set; |
2446 | #else |
2447 | kmp_uint64 th_team_bt_intervals; |
2448 | #endif |
2449 | |
2450 | #if KMP_AFFINITY_SUPPORTED1 |
2451 | kmp_affin_mask_t *th_affin_mask; /* thread's current affinity mask */ |
2452 | #endif |
2453 | #if OMP_50_ENABLED(50 >= 50) |
2454 | void *const *th_def_allocator; /* per implicit task default allocator */ |
2455 | #endif |
2456 | /* The data set by the master at reinit, then R/W by the worker */ |
2457 | KMP_ALIGN_CACHE__attribute__((aligned(64))) int |
2458 | th_set_nproc; /* if > 0, then only use this request for the next fork */ |
2459 | #if KMP_NESTED_HOT_TEAMS1 |
2460 | kmp_hot_team_ptr_t *th_hot_teams; /* array of hot teams */ |
2461 | #endif |
2462 | #if OMP_40_ENABLED(50 >= 40) |
2463 | kmp_proc_bind_t |
2464 | th_set_proc_bind; /* if != proc_bind_default, use request for next fork */ |
2465 | kmp_teams_size_t |
2466 | th_teams_size; /* number of teams/threads in teams construct */ |
2467 | #if KMP_AFFINITY_SUPPORTED1 |
2468 | int th_current_place; /* place currently bound to */ |
2469 | int th_new_place; /* place to bind to in par reg */ |
2470 | int th_first_place; /* first place in partition */ |
2471 | int th_last_place; /* last place in partition */ |
2472 | #endif |
2473 | #endif |
2474 | #if USE_ITT_BUILD1 |
2475 | kmp_uint64 th_bar_arrive_time; /* arrival to barrier timestamp */ |
2476 | kmp_uint64 th_bar_min_time; /* minimum arrival time at the barrier */ |
2477 | kmp_uint64 th_frame_time; /* frame timestamp */ |
2478 | #endif /* USE_ITT_BUILD */ |
2479 | kmp_local_t th_local; |
2480 | struct private_common *th_pri_head; |
2481 | |
2482 | /* Now the data only used by the worker (after initial allocation) */ |
2483 | /* TODO the first serial team should actually be stored in the info_t |
2484 | structure. this will help reduce initial allocation overhead */ |
2485 | KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_team_p |
2486 | *th_serial_team; /*serialized team held in reserve*/ |
2487 | |
2488 | #if OMPT_SUPPORT1 |
2489 | ompt_thread_info_t ompt_thread_info; |
2490 | #endif |
2491 | |
2492 | /* The following are also read by the master during reinit */ |
2493 | struct common_table *th_pri_common; |
2494 | |
2495 | volatile kmp_uint32 th_spin_here; /* thread-local location for spinning */ |
2496 | /* while awaiting queuing lock acquire */ |
2497 | |
2498 | volatile void *th_sleep_loc; // this points at a kmp_flag<T> |
2499 | |
2500 | ident_t *th_ident; |
2501 | unsigned th_x; // Random number generator data |
2502 | unsigned th_a; // Random number generator data |
2503 | |
2504 | /* Tasking-related data for the thread */ |
2505 | kmp_task_team_t *th_task_team; // Task team struct |
2506 | kmp_taskdata_t *th_current_task; // Innermost Task being executed |
2507 | kmp_uint8 th_task_state; // alternating 0/1 for task team identification |
2508 | kmp_uint8 *th_task_state_memo_stack; // Stack holding memos of th_task_state |
2509 | // at nested levels |
2510 | kmp_uint32 th_task_state_top; // Top element of th_task_state_memo_stack |
2511 | kmp_uint32 th_task_state_stack_sz; // Size of th_task_state_memo_stack |
2512 | kmp_uint32 th_reap_state; // Non-zero indicates thread is not |
2513 | // tasking, thus safe to reap |
2514 | |
2515 | /* More stuff for keeping track of active/sleeping threads (this part is |
2516 | written by the worker thread) */ |
2517 | kmp_uint8 th_active_in_pool; // included in count of #active threads in pool |
2518 | int th_active; // ! sleeping; 32 bits for TCR/TCW |
2519 | struct cons_header *th_cons; // used for consistency check |
2520 | #if KMP_USE_HIER_SCHED0 |
2521 | // used for hierarchical scheduling |
2522 | kmp_hier_private_bdata_t *th_hier_bar_data; |
2523 | #endif |
2524 | |
2525 | /* Add the syncronizing data which is cache aligned and padded. */ |
2526 | KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_balign_t th_bar[bs_last_barrier]; |
2527 | |
2528 | KMP_ALIGN_CACHE__attribute__((aligned(64))) volatile kmp_int32 |
2529 | th_next_waiting; /* gtid+1 of next thread on lock wait queue, 0 if none */ |
2530 | |
2531 | #if (USE_FAST_MEMORY3 == 3) || (USE_FAST_MEMORY3 == 5) |
2532 | #define NUM_LISTS4 4 |
2533 | kmp_free_list_t th_free_lists[NUM_LISTS4]; // Free lists for fast memory |
2534 | // allocation routines |
2535 | #endif |
2536 | |
2537 | #if KMP_OS_WINDOWS0 |
2538 | kmp_win32_cond_t th_suspend_cv; |
2539 | kmp_win32_mutex_t th_suspend_mx; |
2540 | int th_suspend_init; |
2541 | #endif |
2542 | #if KMP_OS_UNIX1 |
2543 | kmp_cond_align_t th_suspend_cv; |
2544 | kmp_mutex_align_t th_suspend_mx; |
2545 | int th_suspend_init_count; |
2546 | #endif |
2547 | |
2548 | #if USE_ITT_BUILD1 |
2549 | kmp_itt_mark_t th_itt_mark_single; |
2550 | // alignment ??? |
2551 | #endif /* USE_ITT_BUILD */ |
2552 | #if KMP_STATS_ENABLED0 |
2553 | kmp_stats_list *th_stats; |
2554 | #endif |
2555 | #if KMP_OS_UNIX1 |
2556 | std::atomic<bool> th_blocking; |
2557 | #endif |
2558 | } kmp_base_info_t; |
2559 | |
2560 | typedef union KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_info { |
2561 | double th_align; /* use worst case alignment */ |
2562 | char th_pad[KMP_PAD(kmp_base_info_t, CACHE_LINE)(sizeof(kmp_base_info_t) + (64 - ((sizeof(kmp_base_info_t) - 1 ) % (64)) - 1))]; |
2563 | kmp_base_info_t th; |
2564 | } kmp_info_t; |
2565 | |
2566 | // OpenMP thread team data structures |
2567 | |
2568 | typedef struct kmp_base_data { volatile kmp_uint32 t_value; } kmp_base_data_t; |
2569 | |
2570 | typedef union KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_sleep_team { |
2571 | double dt_align; /* use worst case alignment */ |
2572 | char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)(sizeof(kmp_base_data_t) + (64 - ((sizeof(kmp_base_data_t) - 1 ) % (64)) - 1))]; |
2573 | kmp_base_data_t dt; |
2574 | } kmp_sleep_team_t; |
2575 | |
2576 | typedef union KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_ordered_team { |
2577 | double dt_align; /* use worst case alignment */ |
2578 | char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)(sizeof(kmp_base_data_t) + (64 - ((sizeof(kmp_base_data_t) - 1 ) % (64)) - 1))]; |
2579 | kmp_base_data_t dt; |
2580 | } kmp_ordered_team_t; |
2581 | |
2582 | typedef int (*launch_t)(int gtid); |
2583 | |
2584 | /* Minimum number of ARGV entries to malloc if necessary */ |
2585 | #define KMP_MIN_MALLOC_ARGV_ENTRIES100 100 |
2586 | |
2587 | // Set up how many argv pointers will fit in cache lines containing |
2588 | // t_inline_argv. Historically, we have supported at least 96 bytes. Using a |
2589 | // larger value for more space between the master write/worker read section and |
2590 | // read/write by all section seems to buy more performance on EPCC PARALLEL. |
2591 | #if KMP_ARCH_X860 || KMP_ARCH_X86_641 |
2592 | #define KMP_INLINE_ARGV_BYTES(4 * 64 - ((3 * (sizeof(void *)) + 2 * sizeof(int) + 2 * sizeof (kmp_int8) + sizeof(kmp_int16) + sizeof(kmp_uint32)) % 64)) \ |
2593 | (4 * CACHE_LINE64 - \ |
2594 | ((3 * KMP_PTR_SKIP(sizeof(void *)) + 2 * sizeof(int) + 2 * sizeof(kmp_int8) + \ |
2595 | sizeof(kmp_int16) + sizeof(kmp_uint32)) % \ |
2596 | CACHE_LINE64)) |
2597 | #else |
2598 | #define KMP_INLINE_ARGV_BYTES(4 * 64 - ((3 * (sizeof(void *)) + 2 * sizeof(int) + 2 * sizeof (kmp_int8) + sizeof(kmp_int16) + sizeof(kmp_uint32)) % 64)) \ |
2599 | (2 * CACHE_LINE64 - ((3 * KMP_PTR_SKIP(sizeof(void *)) + 2 * sizeof(int)) % CACHE_LINE64)) |
2600 | #endif |
2601 | #define KMP_INLINE_ARGV_ENTRIES(int)((4 * 64 - ((3 * (sizeof(void *)) + 2 * sizeof(int) + 2 * sizeof(kmp_int8) + sizeof(kmp_int16) + sizeof(kmp_uint32)) % 64)) / (sizeof(void *))) (int)(KMP_INLINE_ARGV_BYTES(4 * 64 - ((3 * (sizeof(void *)) + 2 * sizeof(int) + 2 * sizeof (kmp_int8) + sizeof(kmp_int16) + sizeof(kmp_uint32)) % 64)) / KMP_PTR_SKIP(sizeof(void *))) |
2602 | |
2603 | typedef struct KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_base_team { |
2604 | // Synchronization Data |
2605 | // --------------------------------------------------------------------------- |
2606 | KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_ordered_team_t t_ordered; |
2607 | kmp_balign_team_t t_bar[bs_last_barrier]; |
2608 | std::atomic<int> t_construct; // count of single directive encountered by team |
2609 | char pad[sizeof(kmp_lock_t)]; // padding to maintain performance on big iron |
2610 | |
2611 | // Master only |
2612 | // --------------------------------------------------------------------------- |
2613 | KMP_ALIGN_CACHE__attribute__((aligned(64))) int t_master_tid; // tid of master in parent team |
2614 | int t_master_this_cons; // "this_construct" single counter of master in parent |
2615 | // team |
2616 | ident_t *t_ident; // if volatile, have to change too much other crud to |
2617 | // volatile too |
2618 | kmp_team_p *t_parent; // parent team |
2619 | kmp_team_p *t_next_pool; // next free team in the team pool |
2620 | kmp_disp_t *t_dispatch; // thread's dispatch data |
2621 | kmp_task_team_t *t_task_team[2]; // Task team struct; switch between 2 |
2622 | #if OMP_40_ENABLED(50 >= 40) |
2623 | kmp_proc_bind_t t_proc_bind; // bind type for par region |
2624 | #endif // OMP_40_ENABLED |
2625 | #if USE_ITT_BUILD1 |
2626 | kmp_uint64 t_region_time; // region begin timestamp |
2627 | #endif /* USE_ITT_BUILD */ |
2628 | |
2629 | // Master write, workers read |
2630 | // -------------------------------------------------------------------------- |
2631 | KMP_ALIGN_CACHE__attribute__((aligned(64))) void **t_argv; |
2632 | int t_argc; |
2633 | int t_nproc; // number of threads in team |
2634 | microtask_t t_pkfn; |
2635 | launch_t t_invoke; // procedure to launch the microtask |
2636 | |
2637 | #if OMPT_SUPPORT1 |
2638 | ompt_team_info_t ompt_team_info; |
2639 | ompt_lw_taskteam_t *ompt_serialized_team_info; |
2640 | #endif |
2641 | |
2642 | #if KMP_ARCH_X860 || KMP_ARCH_X86_641 |
2643 | kmp_int8 t_fp_control_saved; |
2644 | kmp_int8 t_pad2b; |
2645 | kmp_int16 t_x87_fpu_control_word; // FP control regs |
2646 | kmp_uint32 t_mxcsr; |
2647 | #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ |
2648 | |
2649 | void *t_inline_argv[KMP_INLINE_ARGV_ENTRIES(int)((4 * 64 - ((3 * (sizeof(void *)) + 2 * sizeof(int) + 2 * sizeof(kmp_int8) + sizeof(kmp_int16) + sizeof(kmp_uint32)) % 64)) / (sizeof(void *)))]; |
2650 | |
2651 | KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_info_t **t_threads; |
2652 | kmp_taskdata_t |
2653 | *t_implicit_task_taskdata; // Taskdata for the thread's implicit task |
2654 | int t_level; // nested parallel level |
2655 | |
2656 | KMP_ALIGN_CACHE__attribute__((aligned(64))) int t_max_argc; |
2657 | int t_max_nproc; // max threads this team can handle (dynamicly expandable) |
2658 | int t_serialized; // levels deep of serialized teams |
2659 | dispatch_shared_info_t *t_disp_buffer; // buffers for dispatch system |
2660 | int t_id; // team's id, assigned by debugger. |
2661 | int t_active_level; // nested active parallel level |
2662 | kmp_r_sched_t t_sched; // run-time schedule for the team |
2663 | #if OMP_40_ENABLED(50 >= 40) && KMP_AFFINITY_SUPPORTED1 |
2664 | int t_first_place; // first & last place in parent thread's partition. |
2665 | int t_last_place; // Restore these values to master after par region. |
2666 | #endif // OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED |
2667 | int t_size_changed; // team size was changed?: 0: no, 1: yes, -1: changed via |
2668 | // omp_set_num_threads() call |
2669 | #if OMP_50_ENABLED(50 >= 50) |
2670 | void *const *t_def_allocator; /* per implicit task default allocator */ |
2671 | #endif |
2672 | |
2673 | // Read/write by workers as well |
2674 | #if (KMP_ARCH_X860 || KMP_ARCH_X86_641) |
2675 | // Using CACHE_LINE=64 reduces memory footprint, but causes a big perf |
2676 | // regression of epcc 'parallel' and 'barrier' on fxe256lin01. This extra |
2677 | // padding serves to fix the performance of epcc 'parallel' and 'barrier' when |
2678 | // CACHE_LINE=64. TODO: investigate more and get rid if this padding. |
2679 | char dummy_padding[1024]; |
2680 | #endif |
2681 | // Internal control stack for additional nested teams. |
2682 | KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_internal_control_t *t_control_stack_top; |
2683 | // for SERIALIZED teams nested 2 or more levels deep |
2684 | #if OMP_40_ENABLED(50 >= 40) |
2685 | // typed flag to store request state of cancellation |
2686 | std::atomic<kmp_int32> t_cancel_request; |
2687 | #endif |
2688 | int t_master_active; // save on fork, restore on join |
2689 | kmp_taskq_t t_taskq; // this team's task queue |
2690 | void *t_copypriv_data; // team specific pointer to copyprivate data array |
2691 | #if KMP_OS_WINDOWS0 |
2692 | std::atomic<kmp_uint32> t_copyin_counter; |
2693 | #endif |
2694 | #if USE_ITT_BUILD1 |
2695 | void *t_stack_id; // team specific stack stitching id (for ittnotify) |
2696 | #endif /* USE_ITT_BUILD */ |
2697 | } kmp_base_team_t; |
2698 | |
2699 | union KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_team { |
2700 | kmp_base_team_t t; |
2701 | double t_align; /* use worst case alignment */ |
2702 | char t_pad[KMP_PAD(kmp_base_team_t, CACHE_LINE)(sizeof(kmp_base_team_t) + (64 - ((sizeof(kmp_base_team_t) - 1 ) % (64)) - 1))]; |
2703 | }; |
2704 | |
2705 | typedef union KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_time_global { |
2706 | double dt_align; /* use worst case alignment */ |
2707 | char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)(sizeof(kmp_base_data_t) + (64 - ((sizeof(kmp_base_data_t) - 1 ) % (64)) - 1))]; |
2708 | kmp_base_data_t dt; |
2709 | } kmp_time_global_t; |
2710 | |
2711 | typedef struct kmp_base_global { |
2712 | /* cache-aligned */ |
2713 | kmp_time_global_t g_time; |
2714 | |
2715 | /* non cache-aligned */ |
2716 | volatile int g_abort; |
2717 | volatile int g_done; |
2718 | |
2719 | int g_dynamic; |
2720 | enum dynamic_mode g_dynamic_mode; |
2721 | } kmp_base_global_t; |
2722 | |
2723 | typedef union KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_global { |
2724 | kmp_base_global_t g; |
2725 | double g_align; /* use worst case alignment */ |
2726 | char g_pad[KMP_PAD(kmp_base_global_t, CACHE_LINE)(sizeof(kmp_base_global_t) + (64 - ((sizeof(kmp_base_global_t ) - 1) % (64)) - 1))]; |
2727 | } kmp_global_t; |
2728 | |
2729 | typedef struct kmp_base_root { |
2730 | // TODO: GEH - combine r_active with r_in_parallel then r_active == |
2731 | // (r_in_parallel>= 0) |
2732 | // TODO: GEH - then replace r_active with t_active_levels if we can to reduce |
2733 | // the synch overhead or keeping r_active |
2734 | volatile int r_active; /* TRUE if some region in a nest has > 1 thread */ |
2735 | // GEH: This is misnamed, should be r_in_parallel |
2736 | volatile int r_nested; // TODO: GEH - This is unused, just remove it entirely. |
2737 | // keeps a count of active parallel regions per root |
2738 | std::atomic<int> r_in_parallel; |
2739 | // GEH: This is misnamed, should be r_active_levels |
2740 | kmp_team_t *r_root_team; |
2741 | kmp_team_t *r_hot_team; |
2742 | kmp_info_t *r_uber_thread; |
2743 | kmp_lock_t r_begin_lock; |
2744 | volatile int r_begin; |
2745 | int r_blocktime; /* blocktime for this root and descendants */ |
2746 | int r_cg_nthreads; // count of active threads in a contention group |
2747 | } kmp_base_root_t; |
2748 | |
2749 | typedef union KMP_ALIGN_CACHE__attribute__((aligned(64))) kmp_root { |
2750 | kmp_base_root_t r; |
2751 | double r_align; /* use worst case alignment */ |
2752 | char r_pad[KMP_PAD(kmp_base_root_t, CACHE_LINE)(sizeof(kmp_base_root_t) + (64 - ((sizeof(kmp_base_root_t) - 1 ) % (64)) - 1))]; |
2753 | } kmp_root_t; |
2754 | |
2755 | struct fortran_inx_info { |
2756 | kmp_int32 data; |
2757 | }; |
2758 | |
2759 | /* ------------------------------------------------------------------------ */ |
2760 | |
2761 | extern int __kmp_settings; |
2762 | extern int __kmp_duplicate_library_ok; |
2763 | #if USE_ITT_BUILD1 |
2764 | extern int __kmp_forkjoin_frames; |
2765 | extern int __kmp_forkjoin_frames_mode; |
2766 | #endif |
2767 | extern PACKED_REDUCTION_METHOD_T __kmp_force_reduction_method; |
2768 | extern int __kmp_determ_red; |
2769 | |
2770 | #ifdef KMP_DEBUG1 |
2771 | extern int kmp_a_debug; |
2772 | extern int kmp_b_debug; |
2773 | extern int kmp_c_debug; |
2774 | extern int kmp_d_debug; |
2775 | extern int kmp_e_debug; |
2776 | extern int kmp_f_debug; |
2777 | #endif /* KMP_DEBUG */ |
2778 | |
2779 | /* For debug information logging using rotating buffer */ |
2780 | #define KMP_DEBUG_BUF_LINES_INIT512 512 |
2781 | #define KMP_DEBUG_BUF_LINES_MIN1 1 |
2782 | |
2783 | #define KMP_DEBUG_BUF_CHARS_INIT128 128 |
2784 | #define KMP_DEBUG_BUF_CHARS_MIN2 2 |
2785 | |
2786 | extern int |
2787 | __kmp_debug_buf; /* TRUE means use buffer, FALSE means print to stderr */ |
2788 | extern int __kmp_debug_buf_lines; /* How many lines of debug stored in buffer */ |
2789 | extern int |
2790 | __kmp_debug_buf_chars; /* How many characters allowed per line in buffer */ |
2791 | extern int __kmp_debug_buf_atomic; /* TRUE means use atomic update of buffer |
2792 | entry pointer */ |
2793 | |
2794 | extern char *__kmp_debug_buffer; /* Debug buffer itself */ |
2795 | extern std::atomic<int> __kmp_debug_count; /* Counter for number of lines |
2796 | printed in buffer so far */ |
2797 | extern int __kmp_debug_buf_warn_chars; /* Keep track of char increase |
2798 | recommended in warnings */ |
2799 | /* end rotating debug buffer */ |
2800 | |
2801 | #ifdef KMP_DEBUG1 |
2802 | extern int __kmp_par_range; /* +1 => only go par for constructs in range */ |
2803 | |
2804 | #define KMP_PAR_RANGE_ROUTINE_LEN1024 1024 |
2805 | extern char __kmp_par_range_routine[KMP_PAR_RANGE_ROUTINE_LEN1024]; |
2806 | #define KMP_PAR_RANGE_FILENAME_LEN1024 1024 |
2807 | extern char __kmp_par_range_filename[KMP_PAR_RANGE_FILENAME_LEN1024]; |
2808 | extern int __kmp_par_range_lb; |
2809 | extern int __kmp_par_range_ub; |
2810 | #endif |
2811 | |
2812 | /* For printing out dynamic storage map for threads and teams */ |
2813 | extern int |
2814 | __kmp_storage_map; /* True means print storage map for threads and teams */ |
2815 | extern int __kmp_storage_map_verbose; /* True means storage map includes |
2816 | placement info */ |
2817 | extern int __kmp_storage_map_verbose_specified; |
2818 | |
2819 | #if KMP_ARCH_X860 || KMP_ARCH_X86_641 |
2820 | extern kmp_cpuinfo_t __kmp_cpuinfo; |
2821 | #endif |
2822 | |
2823 | extern volatile int __kmp_init_serial; |
2824 | extern volatile int __kmp_init_gtid; |
2825 | extern volatile int __kmp_init_common; |
2826 | extern volatile int __kmp_init_middle; |
2827 | extern volatile int __kmp_init_parallel; |
2828 | #if KMP_USE_MONITOR |
2829 | extern volatile int __kmp_init_monitor; |
2830 | #endif |
2831 | extern volatile int __kmp_init_user_locks; |
2832 | extern int __kmp_init_counter; |
2833 | extern int __kmp_root_counter; |
2834 | extern int __kmp_version; |
2835 | |
2836 | /* list of address of allocated caches for commons */ |
2837 | extern kmp_cached_addr_t *__kmp_threadpriv_cache_list; |
2838 | |
2839 | /* Barrier algorithm types and options */ |
2840 | extern kmp_uint32 __kmp_barrier_gather_bb_dflt; |
2841 | extern kmp_uint32 __kmp_barrier_release_bb_dflt; |
2842 | extern kmp_bar_pat_e __kmp_barrier_gather_pat_dflt; |
2843 | extern kmp_bar_pat_e __kmp_barrier_release_pat_dflt; |
2844 | extern kmp_uint32 __kmp_barrier_gather_branch_bits[bs_last_barrier]; |
2845 | extern kmp_uint32 __kmp_barrier_release_branch_bits[bs_last_barrier]; |
2846 | extern kmp_bar_pat_e __kmp_barrier_gather_pattern[bs_last_barrier]; |
2847 | extern kmp_bar_pat_e __kmp_barrier_release_pattern[bs_last_barrier]; |
2848 | extern char const *__kmp_barrier_branch_bit_env_name[bs_last_barrier]; |
2849 | extern char const *__kmp_barrier_pattern_env_name[bs_last_barrier]; |
2850 | extern char const *__kmp_barrier_type_name[bs_last_barrier]; |
2851 | extern char const *__kmp_barrier_pattern_name[bp_last_bar]; |
2852 | |
2853 | /* Global Locks */ |
2854 | extern kmp_bootstrap_lock_t __kmp_initz_lock; /* control initialization */ |
2855 | extern kmp_bootstrap_lock_t __kmp_forkjoin_lock; /* control fork/join access */ |
2856 | extern kmp_bootstrap_lock_t __kmp_task_team_lock; |
2857 | extern kmp_bootstrap_lock_t |
2858 | __kmp_exit_lock; /* exit() is not always thread-safe */ |
2859 | #if KMP_USE_MONITOR |
2860 | extern kmp_bootstrap_lock_t |
2861 | __kmp_monitor_lock; /* control monitor thread creation */ |
2862 | #endif |
2863 | extern kmp_bootstrap_lock_t |
2864 | __kmp_tp_cached_lock; /* used for the hack to allow threadprivate cache and |
2865 | __kmp_threads expansion to co-exist */ |
2866 | |
2867 | extern kmp_lock_t __kmp_global_lock; /* control OS/global access */ |
2868 | extern kmp_queuing_lock_t __kmp_dispatch_lock; /* control dispatch access */ |
2869 | extern kmp_lock_t __kmp_debug_lock; /* control I/O access for KMP_DEBUG */ |
2870 | |
2871 | /* used for yielding spin-waits */ |
2872 | extern unsigned int __kmp_init_wait; /* initial number of spin-tests */ |
2873 | extern unsigned int __kmp_next_wait; /* susequent number of spin-tests */ |
2874 | |
2875 | extern enum library_type __kmp_library; |
2876 | |
2877 | extern enum sched_type __kmp_sched; /* default runtime scheduling */ |
2878 | extern enum sched_type __kmp_static; /* default static scheduling method */ |
2879 | extern enum sched_type __kmp_guided; /* default guided scheduling method */ |
2880 | extern enum sched_type __kmp_auto; /* default auto scheduling method */ |
2881 | extern int __kmp_chunk; /* default runtime chunk size */ |
2882 | |
2883 | extern size_t __kmp_stksize; /* stack size per thread */ |
2884 | #if KMP_USE_MONITOR |
2885 | extern size_t __kmp_monitor_stksize; /* stack size for monitor thread */ |
2886 | #endif |
2887 | extern size_t __kmp_stkoffset; /* stack offset per thread */ |
2888 | extern int __kmp_stkpadding; /* Should we pad root thread(s) stack */ |
2889 | |
2890 | extern size_t |
2891 | __kmp_malloc_pool_incr; /* incremental size of pool for kmp_malloc() */ |
2892 | extern int __kmp_env_stksize; /* was KMP_STACKSIZE specified? */ |
2893 | extern int __kmp_env_blocktime; /* was KMP_BLOCKTIME specified? */ |
2894 | extern int __kmp_env_checks; /* was KMP_CHECKS specified? */ |
2895 | extern int __kmp_env_consistency_check; // was KMP_CONSISTENCY_CHECK specified? |
2896 | extern int __kmp_generate_warnings; /* should we issue warnings? */ |
2897 | extern int __kmp_reserve_warn; /* have we issued reserve_threads warning? */ |
2898 | |
2899 | #ifdef DEBUG_SUSPEND |
2900 | extern int __kmp_suspend_count; /* count inside __kmp_suspend_template() */ |
2901 | #endif |
2902 | |
2903 | extern kmp_uint32 __kmp_yield_init; |
2904 | extern kmp_uint32 __kmp_yield_next; |
2905 | |
2906 | #if KMP_USE_MONITOR |
2907 | extern kmp_uint32 __kmp_yielding_on; |
2908 | #endif |
2909 | extern kmp_uint32 __kmp_yield_cycle; |
2910 | extern kmp_int32 __kmp_yield_on_count; |
2911 | extern kmp_int32 __kmp_yield_off_count; |
2912 | |
2913 | /* ------------------------------------------------------------------------- */ |
2914 | extern int __kmp_allThreadsSpecified; |
2915 | |
2916 | extern size_t __kmp_align_alloc; |
2917 | /* following data protected by initialization routines */ |
2918 | extern int __kmp_xproc; /* number of processors in the system */ |
2919 | extern int __kmp_avail_proc; /* number of processors available to the process */ |
2920 | extern size_t __kmp_sys_min_stksize; /* system-defined minimum stack size */ |
2921 | extern int __kmp_sys_max_nth; /* system-imposed maximum number of threads */ |
2922 | // maximum total number of concurrently-existing threads on device |
2923 | extern int __kmp_max_nth; |
2924 | // maximum total number of concurrently-existing threads in a contention group |
2925 | extern int __kmp_cg_max_nth; |
2926 | extern int __kmp_teams_max_nth; // max threads used in a teams construct |
2927 | extern int __kmp_threads_capacity; /* capacity of the arrays __kmp_threads and |
2928 | __kmp_root */ |
2929 | extern int __kmp_dflt_team_nth; /* default number of threads in a parallel |
2930 | region a la OMP_NUM_THREADS */ |
2931 | extern int __kmp_dflt_team_nth_ub; /* upper bound on "" determined at serial |
2932 | initialization */ |
2933 | extern int __kmp_tp_capacity; /* capacity of __kmp_threads if threadprivate is |
2934 | used (fixed) */ |
2935 | extern int __kmp_tp_cached; /* whether threadprivate cache has been created |
2936 | (__kmpc_threadprivate_cached()) */ |
2937 | extern int __kmp_dflt_nested; /* nested parallelism enabled by default a la |
2938 | OMP_NESTED */ |
2939 | extern int __kmp_dflt_blocktime; /* number of milliseconds to wait before |
2940 | blocking (env setting) */ |
2941 | #if KMP_USE_MONITOR |
2942 | extern int |
2943 | __kmp_monitor_wakeups; /* number of times monitor wakes up per second */ |
2944 | extern int __kmp_bt_intervals; /* number of monitor timestamp intervals before |
2945 | blocking */ |
2946 | #endif |
2947 | #ifdef KMP_ADJUST_BLOCKTIME1 |
2948 | extern int __kmp_zero_bt; /* whether blocktime has been forced to zero */ |
2949 | #endif /* KMP_ADJUST_BLOCKTIME */ |
2950 | #ifdef KMP_DFLT_NTH_CORES |
2951 | extern int __kmp_ncores; /* Total number of cores for threads placement */ |
2952 | #endif |
2953 | /* Number of millisecs to delay on abort for Intel(R) VTune(TM) tools */ |
2954 | extern int __kmp_abort_delay; |
2955 | |
2956 | extern int __kmp_need_register_atfork_specified; |
2957 | extern int |
2958 | __kmp_need_register_atfork; /* At initialization, call pthread_atfork to |
2959 | install fork handler */ |
2960 | extern int __kmp_gtid_mode; /* Method of getting gtid, values: |
2961 | 0 - not set, will be set at runtime |
2962 | 1 - using stack search |
2963 | 2 - dynamic TLS (pthread_getspecific(Linux* OS/OS |
2964 | X*) or TlsGetValue(Windows* OS)) |
2965 | 3 - static TLS (__declspec(thread) __kmp_gtid), |
2966 | Linux* OS .so only. */ |
2967 | extern int |
2968 | __kmp_adjust_gtid_mode; /* If true, adjust method based on #threads */ |
2969 | #ifdef KMP_TDATA_GTID1 |
2970 | extern KMP_THREAD_LOCAL__thread int __kmp_gtid; |
2971 | #endif |
2972 | extern int __kmp_tls_gtid_min; /* #threads below which use sp search for gtid */ |
2973 | extern int __kmp_foreign_tp; // If true, separate TP var for each foreign thread |
2974 | #if KMP_ARCH_X860 || KMP_ARCH_X86_641 |
2975 | extern int __kmp_inherit_fp_control; // copy fp creg(s) parent->workers at fork |
2976 | extern kmp_int16 __kmp_init_x87_fpu_control_word; // init thread's FP ctrl reg |
2977 | extern kmp_uint32 __kmp_init_mxcsr; /* init thread's mxscr */ |
2978 | #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ |
2979 | |
2980 | extern int __kmp_dflt_max_active_levels; /* max_active_levels for nested |
2981 | parallelism enabled by default via |
2982 | OMP_MAX_ACTIVE_LEVELS */ |
2983 | extern int __kmp_dispatch_num_buffers; /* max possible dynamic loops in |
2984 | concurrent execution per team */ |
2985 | #if KMP_NESTED_HOT_TEAMS1 |
2986 | extern int __kmp_hot_teams_mode; |
2987 | extern int __kmp_hot_teams_max_level; |
2988 | #endif |
2989 | |
2990 | #if KMP_OS_LINUX1 |
2991 | extern enum clock_function_type __kmp_clock_function; |
2992 | extern int __kmp_clock_function_param; |
2993 | #endif /* KMP_OS_LINUX */ |
2994 | |
2995 | #if KMP_MIC_SUPPORTED((0 || 1) && (1 || 0)) |
2996 | extern enum mic_type __kmp_mic_type; |
2997 | #endif |
2998 | |
2999 | #ifdef USE_LOAD_BALANCE1 |
3000 | extern double __kmp_load_balance_interval; // load balance algorithm interval |
3001 | #endif /* USE_LOAD_BALANCE */ |
3002 | |
3003 | // OpenMP 3.1 - Nested num threads array |
3004 | typedef struct kmp_nested_nthreads_t { |
3005 | int *nth; |
3006 | int size; |
3007 | int used; |
3008 | } kmp_nested_nthreads_t; |
3009 | |
3010 | extern kmp_nested_nthreads_t __kmp_nested_nth; |
3011 | |
3012 | #if KMP_USE_ADAPTIVE_LOCKS(0 || 1) && !0 |
3013 | |
3014 | // Parameters for the speculative lock backoff system. |
3015 | struct kmp_adaptive_backoff_params_t { |
3016 | // Number of soft retries before it counts as a hard retry. |
3017 | kmp_uint32 max_soft_retries; |
3018 | // Badness is a bit mask : 0,1,3,7,15,... on each hard failure we move one to |
3019 | // the right |
3020 | kmp_uint32 max_badness; |
3021 | }; |
3022 | |
3023 | extern kmp_adaptive_backoff_params_t __kmp_adaptive_backoff_params; |
3024 | |
3025 | #if KMP_DEBUG_ADAPTIVE_LOCKS0 |
3026 | extern const char *__kmp_speculative_statsfile; |
3027 | #endif |
3028 | |
3029 | #endif // KMP_USE_ADAPTIVE_LOCKS |
3030 | |
3031 | #if OMP_40_ENABLED(50 >= 40) |
3032 | extern int __kmp_display_env; /* TRUE or FALSE */ |
3033 | extern int __kmp_display_env_verbose; /* TRUE if OMP_DISPLAY_ENV=VERBOSE */ |
3034 | extern int __kmp_omp_cancellation; /* TRUE or FALSE */ |
3035 | #endif |
3036 | |
3037 | /* ------------------------------------------------------------------------- */ |
3038 | |
3039 | /* the following are protected by the fork/join lock */ |
3040 | /* write: lock read: anytime */ |
3041 | extern kmp_info_t **__kmp_threads; /* Descriptors for the threads */ |
3042 | /* read/write: lock */ |
3043 | extern volatile kmp_team_t *__kmp_team_pool; |
3044 | extern volatile kmp_info_t *__kmp_thread_pool; |
3045 | extern kmp_info_t *__kmp_thread_pool_insert_pt; |
3046 | |
3047 | // total num threads reachable from some root thread including all root threads |
3048 | extern volatile int __kmp_nth; |
3049 | /* total number of threads reachable from some root thread including all root |
3050 | threads, and those in the thread pool */ |
3051 | extern volatile int __kmp_all_nth; |
3052 | extern int __kmp_thread_pool_nth; |
3053 | extern std::atomic<int> __kmp_thread_pool_active_nth; |
3054 | |
3055 | extern kmp_root_t **__kmp_root; /* root of thread hierarchy */ |
3056 | /* end data protected by fork/join lock */ |
3057 | /* ------------------------------------------------------------------------- */ |
3058 | |
3059 | #define __kmp_get_gtid()__kmp_get_global_thread_id() __kmp_get_global_thread_id() |
3060 | #define __kmp_entry_gtid()__kmp_get_global_thread_id_reg() __kmp_get_global_thread_id_reg() |
3061 | #define __kmp_get_tid()(__kmp_tid_from_gtid(__kmp_get_global_thread_id())) (__kmp_tid_from_gtid(__kmp_get_gtid()__kmp_get_global_thread_id())) |
3062 | #define __kmp_get_team()(__kmp_threads[(__kmp_get_global_thread_id())]->th.th_team ) (__kmp_threads[(__kmp_get_gtid()__kmp_get_global_thread_id())]->th.th_team) |
3063 | #define __kmp_get_thread()(__kmp_thread_from_gtid(__kmp_get_global_thread_id())) (__kmp_thread_from_gtid(__kmp_get_gtid()__kmp_get_global_thread_id())) |
3064 | |
3065 | // AT: Which way is correct? |
3066 | // AT: 1. nproc = __kmp_threads[ ( gtid ) ] -> th.th_team -> t.t_nproc; |
3067 | // AT: 2. nproc = __kmp_threads[ ( gtid ) ] -> th.th_team_nproc; |
3068 | #define __kmp_get_team_num_threads(gtid)(__kmp_threads[(gtid)]->th.th_team->t.t_nproc) \ |
3069 | (__kmp_threads[(gtid)]->th.th_team->t.t_nproc) |
3070 | |
3071 | static inline bool KMP_UBER_GTID(int gtid) { |
3072 | KMP_DEBUG_ASSERT(gtid >= KMP_GTID_MIN)if (!(gtid >= (-6))) { __kmp_debug_assert("gtid >= (-6)" , "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3072); }; |
3073 | KMP_DEBUG_ASSERT(gtid < __kmp_threads_capacity)if (!(gtid < __kmp_threads_capacity)) { __kmp_debug_assert ("gtid < __kmp_threads_capacity", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3073); }; |
3074 | return (gtid >= 0 && __kmp_root[gtid] && __kmp_threads[gtid] && |
3075 | __kmp_threads[gtid] == __kmp_root[gtid]->r.r_uber_thread); |
3076 | } |
3077 | |
3078 | static inline int __kmp_tid_from_gtid(int gtid) { |
3079 | KMP_DEBUG_ASSERT(gtid >= 0)if (!(gtid >= 0)) { __kmp_debug_assert("gtid >= 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3079); }; |
3080 | return __kmp_threads[gtid]->th.th_info.ds.ds_tid; |
3081 | } |
3082 | |
3083 | static inline int __kmp_gtid_from_tid(int tid, const kmp_team_t *team) { |
3084 | KMP_DEBUG_ASSERT(tid >= 0 && team)if (!(tid >= 0 && team)) { __kmp_debug_assert("tid >= 0 && team" , "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3084); }; |
3085 | return team->t.t_threads[tid]->th.th_info.ds.ds_gtid; |
3086 | } |
3087 | |
3088 | static inline int __kmp_gtid_from_thread(const kmp_info_t *thr) { |
3089 | KMP_DEBUG_ASSERT(thr)if (!(thr)) { __kmp_debug_assert("thr", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3089); }; |
3090 | return thr->th.th_info.ds.ds_gtid; |
3091 | } |
3092 | |
3093 | static inline kmp_info_t *__kmp_thread_from_gtid(int gtid) { |
3094 | KMP_DEBUG_ASSERT(gtid >= 0)if (!(gtid >= 0)) { __kmp_debug_assert("gtid >= 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3094); }; |
3095 | return __kmp_threads[gtid]; |
3096 | } |
3097 | |
3098 | static inline kmp_team_t *__kmp_team_from_gtid(int gtid) { |
3099 | KMP_DEBUG_ASSERT(gtid >= 0)if (!(gtid >= 0)) { __kmp_debug_assert("gtid >= 0", "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3099); }; |
3100 | return __kmp_threads[gtid]->th.th_team; |
3101 | } |
3102 | |
3103 | /* ------------------------------------------------------------------------- */ |
3104 | |
3105 | extern kmp_global_t __kmp_global; /* global status */ |
3106 | |
3107 | extern kmp_info_t __kmp_monitor; |
3108 | // For Debugging Support Library |
3109 | extern std::atomic<kmp_uint32> __kmp_team_counter; |
3110 | // For Debugging Support Library |
3111 | extern std::atomic<kmp_uint32> __kmp_task_counter; |
3112 | |
3113 | #if USE_DEBUGGER0 |
3114 | #define _KMP_GEN_ID(counter)(~0) \ |
3115 | (__kmp_debugging ? KMP_ATOMIC_INC(&counter)(&counter)->fetch_add(1, std::memory_order_acq_rel) + 1 : ~0) |
3116 | #else |
3117 | #define _KMP_GEN_ID(counter)(~0) (~0) |
3118 | #endif /* USE_DEBUGGER */ |
3119 | |
3120 | #define KMP_GEN_TASK_ID()(~0) _KMP_GEN_ID(__kmp_task_counter)(~0) |
3121 | #define KMP_GEN_TEAM_ID()(~0) _KMP_GEN_ID(__kmp_team_counter)(~0) |
3122 | |
3123 | /* ------------------------------------------------------------------------ */ |
3124 | |
3125 | extern void __kmp_print_storage_map_gtid(int gtid, void *p1, void *p2, |
3126 | size_t size, char const *format, ...); |
3127 | |
3128 | extern void __kmp_serial_initialize(void); |
3129 | extern void __kmp_middle_initialize(void); |
3130 | extern void __kmp_parallel_initialize(void); |
3131 | |
3132 | extern void __kmp_internal_begin(void); |
3133 | extern void __kmp_internal_end_library(int gtid); |
3134 | extern void __kmp_internal_end_thread(int gtid); |
3135 | extern void __kmp_internal_end_atexit(void); |
3136 | extern void __kmp_internal_end_fini(void); |
3137 | extern void __kmp_internal_end_dtor(void); |
3138 | extern void __kmp_internal_end_dest(void *); |
3139 | |
3140 | extern int __kmp_register_root(int initial_thread); |
3141 | extern void __kmp_unregister_root(int gtid); |
3142 | |
3143 | extern int __kmp_ignore_mppbeg(void); |
3144 | extern int __kmp_ignore_mppend(void); |
3145 | |
3146 | extern int __kmp_enter_single(int gtid, ident_t *id_ref, int push_ws); |
3147 | extern void __kmp_exit_single(int gtid); |
3148 | |
3149 | extern void __kmp_parallel_deo(int *gtid_ref, int *cid_ref, ident_t *loc_ref); |
3150 | extern void __kmp_parallel_dxo(int *gtid_ref, int *cid_ref, ident_t *loc_ref); |
3151 | |
3152 | #ifdef USE_LOAD_BALANCE1 |
3153 | extern int __kmp_get_load_balance(int); |
3154 | #endif |
3155 | |
3156 | extern int __kmp_get_global_thread_id(void); |
3157 | extern int __kmp_get_global_thread_id_reg(void); |
3158 | extern void __kmp_exit_thread(int exit_status); |
3159 | extern void __kmp_abort(char const *format, ...); |
3160 | extern void __kmp_abort_thread(void); |
3161 | KMP_NORETURN[[noreturn]] extern void __kmp_abort_process(void); |
3162 | extern void __kmp_warn(char const *format, ...); |
3163 | |
3164 | extern void __kmp_set_num_threads(int new_nth, int gtid); |
3165 | |
3166 | // Returns current thread (pointer to kmp_info_t). Current thread *must* be |
3167 | // registered. |
3168 | static inline kmp_info_t *__kmp_entry_thread() { |
3169 | int gtid = __kmp_entry_gtid()__kmp_get_global_thread_id_reg(); |
3170 | |
3171 | return __kmp_threads[gtid]; |
3172 | } |
3173 | |
3174 | extern void __kmp_set_max_active_levels(int gtid, int new_max_active_levels); |
3175 | extern int __kmp_get_max_active_levels(int gtid); |
3176 | extern int __kmp_get_ancestor_thread_num(int gtid, int level); |
3177 | extern int __kmp_get_team_size(int gtid, int level); |
3178 | extern void __kmp_set_schedule(int gtid, kmp_sched_t new_sched, int chunk); |
3179 | extern void __kmp_get_schedule(int gtid, kmp_sched_t *sched, int *chunk); |
3180 | |
3181 | extern unsigned short __kmp_get_random(kmp_info_t *thread); |
3182 | extern void __kmp_init_random(kmp_info_t *thread); |
3183 | |
3184 | extern kmp_r_sched_t __kmp_get_schedule_global(void); |
3185 | extern void __kmp_adjust_num_threads(int new_nproc); |
3186 | |
3187 | extern void *___kmp_allocate(size_t size KMP_SRC_LOC_DECL, char const *_file_, int _line_); |
3188 | extern void *___kmp_page_allocate(size_t size KMP_SRC_LOC_DECL, char const *_file_, int _line_); |
3189 | extern void ___kmp_free(void *ptr KMP_SRC_LOC_DECL, char const *_file_, int _line_); |
3190 | #define __kmp_allocate(size)___kmp_allocate((size), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3190) ___kmp_allocate((size)KMP_SRC_LOC_CURR, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3190) |
3191 | #define __kmp_page_allocate(size)___kmp_page_allocate((size), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3191) ___kmp_page_allocate((size)KMP_SRC_LOC_CURR, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3191) |
3192 | #define __kmp_free(ptr)___kmp_free((ptr), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3192) ___kmp_free((ptr)KMP_SRC_LOC_CURR, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3192) |
3193 | |
3194 | #if USE_FAST_MEMORY3 |
3195 | extern void *___kmp_fast_allocate(kmp_info_t *this_thr, |
3196 | size_t size KMP_SRC_LOC_DECL, char const *_file_, int _line_); |
3197 | extern void ___kmp_fast_free(kmp_info_t *this_thr, void *ptr KMP_SRC_LOC_DECL, char const *_file_, int _line_); |
3198 | extern void __kmp_free_fast_memory(kmp_info_t *this_thr); |
3199 | extern void __kmp_initialize_fast_memory(kmp_info_t *this_thr); |
3200 | #define __kmp_fast_allocate(this_thr, size)___kmp_fast_allocate((this_thr), (size), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3200) \ |
3201 | ___kmp_fast_allocate((this_thr), (size)KMP_SRC_LOC_CURR, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3201) |
3202 | #define __kmp_fast_free(this_thr, ptr)___kmp_fast_free((this_thr), (ptr), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3202) \ |
3203 | ___kmp_fast_free((this_thr), (ptr)KMP_SRC_LOC_CURR, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3203) |
3204 | #endif |
3205 | |
3206 | extern void *___kmp_thread_malloc(kmp_info_t *th, size_t size KMP_SRC_LOC_DECL, char const *_file_, int _line_); |
3207 | extern void *___kmp_thread_calloc(kmp_info_t *th, size_t nelem, |
3208 | size_t elsize KMP_SRC_LOC_DECL, char const *_file_, int _line_); |
3209 | extern void *___kmp_thread_realloc(kmp_info_t *th, void *ptr, |
3210 | size_t size KMP_SRC_LOC_DECL, char const *_file_, int _line_); |
3211 | extern void ___kmp_thread_free(kmp_info_t *th, void *ptr KMP_SRC_LOC_DECL, char const *_file_, int _line_); |
3212 | #define __kmp_thread_malloc(th, size)___kmp_thread_malloc((th), (size), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3212) \ |
3213 | ___kmp_thread_malloc((th), (size)KMP_SRC_LOC_CURR, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3213) |
3214 | #define __kmp_thread_calloc(th, nelem, elsize)___kmp_thread_calloc((th), (nelem), (elsize), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3214) \ |
3215 | ___kmp_thread_calloc((th), (nelem), (elsize)KMP_SRC_LOC_CURR, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3215) |
3216 | #define __kmp_thread_realloc(th, ptr, size)___kmp_thread_realloc((th), (ptr), (size), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3216) \ |
3217 | ___kmp_thread_realloc((th), (ptr), (size)KMP_SRC_LOC_CURR, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3217) |
3218 | #define __kmp_thread_free(th, ptr)___kmp_thread_free((th), (ptr), "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3218) \ |
3219 | ___kmp_thread_free((th), (ptr)KMP_SRC_LOC_CURR, "/build/llvm-toolchain-snapshot-8~svn345461/projects/openmp/runtime/src/kmp.h" , 3219) |
3220 | |
3221 | #define KMP_INTERNAL_MALLOC(sz)malloc(sz) malloc(sz) |
3222 | #define KMP_INTERNAL_FREE(p)free(p) free(p) |
3223 | #define KMP_INTERNAL_REALLOC(p, sz)realloc((p), (sz)) realloc((p), (sz)) |
3224 | #define KMP_INTERNAL_CALLOC(n, sz)calloc((n), (sz)) calloc((n), (sz)) |
3225 | |
3226 | extern void __kmp_push_num_threads(ident_t *loc, int gtid, int num_threads); |
3227 | |
3228 | #if OMP_40_ENABLED(50 >= 40) |
3229 | extern void __kmp_push_proc_bind(ident_t *loc, int gtid, |
3230 | kmp_proc_bind_t proc_bind); |
3231 | extern void __kmp_push_num_teams(ident_t *loc, int gtid, int num_teams, |
3232 | int num_threads); |
3233 | #endif |
3234 | |
3235 | extern void __kmp_yield(int cond); |
3236 | |
3237 | extern void __kmpc_dispatch_init_4(ident_t *loc, kmp_int32 gtid, |
3238 | enum sched_type schedule, kmp_int32 lb, |
3239 | kmp_int32 ub, kmp_int32 st, kmp_int32 chunk); |
3240 | extern void __kmpc_dispatch_init_4u(ident_t *loc, kmp_int32 gtid, |
3241 | enum sched_type schedule, kmp_uint32 lb, |
3242 | kmp_uint32 ub, kmp_int32 st, |
3243 | kmp_int32 chunk); |
3244 | extern void __kmpc_dispatch_init_8(ident_t *loc, kmp_int32 gtid, |
3245 | enum sched_type schedule, kmp_int64 lb, |
3246 | kmp_int64 ub, kmp_int64 st, kmp_int64 chunk); |
3247 | extern void __kmpc_dispatch_init_8u(ident_t *loc, kmp_int32 gtid, |
3248 | enum sched_type schedule, kmp_uint64 lb, |
3249 | kmp_uint64 ub, kmp_int64 st, |
3250 | kmp_int64 chunk); |
3251 | |
3252 | extern int __kmpc_dispatch_next_4(ident_t *loc, kmp_int32 gtid, |
3253 | kmp_int32 *p_last, kmp_int32 *p_lb, |
3254 | kmp_int32 *p_ub, kmp_int32 *p_st); |
3255 | extern int __kmpc_dispatch_next_4u(ident_t *loc, kmp_int32 gtid, |
3256 | kmp_int32 *p_last, kmp_uint32 *p_lb, |
3257 | kmp_uint32 *p_ub, kmp_int32 *p_st); |
3258 | extern int __kmpc_dispatch_next_8(ident_t *loc, kmp_int32 gtid, |
3259 | kmp_int32 *p_last, kmp_int64 *p_lb, |
3260 | kmp_int64 *p_ub, kmp_int64 *p_st); |
3261 | extern int __kmpc_dispatch_next_8u(ident_t *loc, kmp_int32 gtid, |
3262 | kmp_int32 *p_last, kmp_uint64 *p_lb, |
3263 | kmp_uint64 *p_ub, kmp_int64 *p_st); |
3264 | |
3265 | extern void __kmpc_dispatch_fini_4(ident_t *loc, kmp_int32 gtid); |
3266 | extern void __kmpc_dispatch_fini_8(ident_t *loc, kmp_int32 gtid); |
3267 | extern void __kmpc_dispatch_fini_4u(ident_t *loc, kmp_int32 gtid); |
3268 | extern void __kmpc_dispatch_fini_8u(ident_t *loc, kmp_int32 gtid); |
3269 | |
3270 | #ifdef KMP_GOMP_COMPAT |
3271 | |
3272 | extern void __kmp_aux_dispatch_init_4(ident_t *loc, kmp_int32 gtid, |
3273 | enum sched_type schedule, kmp_int32 lb, |
3274 | kmp_int32 ub, kmp_int32 st, |
3275 | kmp_int32 chunk, int push_ws); |
3276 | extern void __kmp_aux_dispatch_init_4u(ident_t *loc, kmp_int32 gtid, |
3277 | enum sched_type schedule, kmp_uint32 lb, |
3278 | kmp_uint32 ub, kmp_int32 st, |
3279 | kmp_int32 chunk, int push_ws); |
3280 | extern void __kmp_aux_dispatch_init_8(ident_t *loc, kmp_int32 gtid, |
3281 | enum sched_type schedule, kmp_int64 lb, |
3282 | kmp_int64 ub, kmp_int64 st, |
3283 | kmp_int64 chunk, int push_ws); |
3284 | extern void __kmp_aux_dispatch_init_8u(ident_t *loc, kmp_int32 gtid, |
3285 | enum sched_type schedule, kmp_uint64 lb, |
3286 | kmp_uint64 ub, kmp_int64 st, |
3287 | kmp_int64 chunk, int push_ws); |
3288 | extern void __kmp_aux_dispatch_fini_chunk_4(ident_t *loc, kmp_int32 gtid); |
3289 | extern void __kmp_aux_dispatch_fini_chunk_8(ident_t *loc, kmp_int32 gtid); |
3290 | extern void __kmp_aux_dispatch_fini_chunk_4u(ident_t *loc, kmp_int32 gtid); |
3291 | extern void __kmp_aux_dispatch_fini_chunk_8u(ident_t *loc, kmp_int32 gtid); |
3292 | |
3293 | #endif /* KMP_GOMP_COMPAT */ |
3294 | |
3295 | extern kmp_uint32 __kmp_eq_4(kmp_uint32 value, kmp_uint32 checker); |
3296 | extern kmp_uint32 __kmp_neq_4(kmp_uint32 value, kmp_uint32 checker); |
3297 | extern kmp_uint32 __kmp_lt_4(kmp_uint32 value, kmp_uint32 checker); |
3298 | extern kmp_uint32 __kmp_ge_4(kmp_uint32 value, kmp_uint32 checker); |
3299 | extern kmp_uint32 __kmp_le_4(kmp_uint32 value, kmp_uint32 checker); |
3300 | extern kmp_uint32 __kmp_wait_yield_4(kmp_uint32 volatile *spinner, |
3301 | kmp_uint32 checker, |
3302 | kmp_uint32 (*pred)(kmp_uint32, kmp_uint32), |
3303 | void *obj); |
3304 | extern void __kmp_wait_yield_4_ptr(void *spinner, kmp_uint32 checker, |
3305 | kmp_uint32 (*pred)(void *, kmp_uint32), |
3306 | void *obj); |
3307 | |
3308 | class kmp_flag_32; |
3309 | class kmp_flag_64; |
3310 | class kmp_flag_oncore; |
3311 | extern void __kmp_wait_64(kmp_info_t *this_thr, kmp_flag_64 *flag, |
3312 | int final_spin |
3313 | #if USE_ITT_BUILD1 |
3314 | , |
3315 | void *itt_sync_obj |
3316 | #endif |
3317 | ); |
3318 | extern void __kmp_release_64(kmp_flag_64 *flag); |
3319 | |
3320 | extern void __kmp_infinite_loop(void); |
3321 | |
3322 | extern void __kmp_cleanup(void); |
3323 | |
3324 | #if KMP_HANDLE_SIGNALS(1 || 0) |
3325 | extern int __kmp_handle_signals; |
3326 | extern void __kmp_install_signals(int parallel_init); |
3327 | extern void __kmp_remove_signals(void); |
3328 | #endif |
3329 | |
3330 | extern void __kmp_clear_system_time(void); |
3331 | extern void __kmp_read_system_time(double *delta); |
3332 | |
3333 | extern void __kmp_check_stack_overlap(kmp_info_t *thr); |
3334 | |
3335 | extern void __kmp_expand_host_name(char *buffer, size_t size); |
3336 | extern void __kmp_expand_file_name(char *result, size_t rlen, char *pattern); |
3337 | |
3338 | #if KMP_ARCH_X860 || KMP_ARCH_X86_641 |
3339 | extern void |
3340 | __kmp_initialize_system_tick(void); /* Initialize timer tick value */ |
3341 | #endif |
3342 | |
3343 | extern void |
3344 | __kmp_runtime_initialize(void); /* machine specific initialization */ |
3345 | extern void __kmp_runtime_destroy(void); |
3346 | |
3347 | #if KMP_AFFINITY_SUPPORTED1 |
3348 | extern char *__kmp_affinity_print_mask(char *buf, int buf_len, |
3349 | kmp_affin_mask_t *mask); |
3350 | extern void __kmp_affinity_initialize(void); |
3351 | extern void __kmp_affinity_uninitialize(void); |
3352 | extern void __kmp_affinity_set_init_mask( |
3353 | int gtid, int isa_root); /* set affinity according to KMP_AFFINITY */ |
3354 | #if OMP_40_ENABLED(50 >= 40) |
3355 | extern void __kmp_affinity_set_place(int gtid); |
3356 | #endif |
3357 | extern void __kmp_affinity_determine_capable(const char *env_var); |
3358 | extern int __kmp_aux_set_affinity(void **mask); |
3359 | extern int __kmp_aux_get_affinity(void **mask); |
3360 | extern int __kmp_aux_get_affinity_max_proc(); |
3361 | extern int __kmp_aux_set_affinity_mask_proc(int proc, void **mask); |
3362 | extern int __kmp_aux_unset_affinity_mask_proc(int proc, void **mask); |
3363 | extern int __kmp_aux_get_affinity_mask_proc(int proc, void **mask); |
3364 | extern void __kmp_balanced_affinity(kmp_info_t *th, int team_size); |
3365 | #if KMP_OS_LINUX1 |
3366 | extern int kmp_set_thread_affinity_mask_initial(void); |
3367 | #endif |
3368 | #endif /* KMP_AFFINITY_SUPPORTED */ |
3369 | |
3370 | extern void __kmp_cleanup_hierarchy(); |
3371 | extern void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar); |
3372 | |
3373 | #if KMP_USE_FUTEX(1 && !0 && (0 || 1 || KMP_ARCH_ARM || 0)) |
3374 | |
3375 | extern int __kmp_futex_determine_capable(void); |
3376 | |
3377 | #endif // KMP_USE_FUTEX |
3378 | |
3379 | extern void __kmp_gtid_set_specific(int gtid); |
3380 | extern int __kmp_gtid_get_specific(void); |
3381 | |
3382 | extern double __kmp_read_cpu_time(void); |
3383 | |
3384 | extern int __kmp_read_system_info(struct kmp_sys_info *info); |
3385 | |
3386 | #if KMP_USE_MONITOR |
3387 | extern void __kmp_create_monitor(kmp_info_t *th); |
3388 | #endif |
3389 | |
3390 | extern void *__kmp_launch_thread(kmp_info_t *thr); |
3391 | |
3392 | extern void __kmp_create_worker(int gtid, kmp_info_t *th, size_t stack_size); |
3393 | |
3394 | #if KMP_OS_WINDOWS0 |
3395 | extern int __kmp_still_running(kmp_info_t *th); |
3396 | extern int __kmp_is_thread_alive(kmp_info_t *th, DWORD *exit_val); |
3397 | extern void __kmp_free_handle(kmp_thread_t tHandle); |
3398 | #endif |
3399 | |
3400 | #if KMP_USE_MONITOR |
3401 | extern void __kmp_reap_monitor(kmp_info_t *th); |
3402 | #endif |
3403 | extern void __kmp_reap_worker(kmp_info_t *th); |
3404 | extern void __kmp_terminate_thread(int gtid); |
3405 | |
3406 | extern void __kmp_suspend_32(int th_gtid, kmp_flag_32 *flag); |
3407 | extern void __kmp_suspend_64(int th_gtid, kmp_flag_64 *flag); |
3408 | extern void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag); |
3409 | extern void __kmp_resume_32(int target_gtid, kmp_flag_32 *flag); |
3410 | extern void __kmp_resume_64(int target_gtid, kmp_flag_64 *flag); |
3411 | extern void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag); |
3412 | |
3413 | extern void __kmp_elapsed(double *); |
3414 | extern void __kmp_elapsed_tick(double *); |
3415 | |
3416 | extern void __kmp_enable(int old_state); |
3417 | extern void __kmp_disable(int *old_state); |
3418 | |
3419 | extern void __kmp_thread_sleep(int millis); |
3420 | |
3421 | extern void __kmp_common_initialize(void); |
3422 | extern void __kmp_common_destroy(void); |
3423 | extern void __kmp_common_destroy_gtid(int gtid); |
3424 | |
3425 | #if KMP_OS_UNIX1 |
3426 | extern void __kmp_register_atfork(void); |
3427 | #endif |
3428 | extern void __kmp_suspend_initialize(void); |
3429 | extern void __kmp_suspend_uninitialize_thread(kmp_info_t *th); |
3430 | |
3431 | extern kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team, |
3432 | int tid); |
3433 | #if OMP_40_ENABLED(50 >= 40) |
3434 | extern kmp_team_t * |
3435 | __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc, |
3436 | #if OMPT_SUPPORT1 |
3437 | ompt_data_t ompt_parallel_data, |
3438 | #endif |
3439 | kmp_proc_bind_t proc_bind, kmp_internal_control_t *new_icvs, |
3440 | int argc USE_NESTED_HOT_ARG(kmp_info_t *thr), kmp_info_t *thr); |
3441 | #else |
3442 | extern kmp_team_t * |
3443 | __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc, |
3444 | #if OMPT_SUPPORT1 |
3445 | ompt_id_t ompt_parallel_id, |
3446 | #endif |
3447 | kmp_internal_control_t *new_icvs, |
3448 | int argc USE_NESTED_HOT_ARG(kmp_info_t *thr), kmp_info_t *thr); |
3449 | #endif // OMP_40_ENABLED |
3450 | extern void __kmp_free_thread(kmp_info_t *); |
3451 | extern void __kmp_free_team(kmp_root_t *, |
3452 | kmp_team_t *USE_NESTED_HOT_ARG(kmp_info_t *), kmp_info_t *); |
3453 | extern kmp_team_t *__kmp_reap_team(kmp_team_t *); |
3454 | |
3455 | /* ------------------------------------------------------------------------ */ |
3456 | |
3457 | extern void __kmp_initialize_bget(kmp_info_t *th); |
3458 | extern void __kmp_finalize_bget(kmp_info_t *th); |
3459 | |
3460 | KMP_EXPORTextern void *kmpc_malloc(size_t size); |
3461 | KMP_EXPORTextern void *kmpc_aligned_malloc(size_t size, size_t alignment); |
3462 | KMP_EXPORTextern void *kmpc_calloc(size_t nelem, size_t elsize); |
3463 | KMP_EXPORTextern void *kmpc_realloc(void *ptr, size_t size); |
3464 | KMP_EXPORTextern void kmpc_free(void *ptr); |
3465 | |
3466 | /* declarations for internal use */ |
3467 | |
3468 | extern int __kmp_barrier(enum barrier_type bt, int gtid, int is_split, |
3469 | size_t reduce_size, void *reduce_data, |
3470 | void (*reduce)(void *, void *)); |
3471 | extern void __kmp_end_split_barrier(enum barrier_type bt, int gtid); |
3472 | |
3473 | /*! |
3474 | * Tell the fork call which compiler generated the fork call, and therefore how |
3475 | * to deal with the call. |
3476 | */ |
3477 | enum fork_context_e { |
3478 | fork_context_gnu, /**< Called from GNU generated code, so must not invoke the |
3479 | microtask internally. */ |
3480 | fork_context_intel, /**< Called from Intel generated code. */ |
3481 | fork_context_last |
3482 | }; |
3483 | extern int __kmp_fork_call(ident_t *loc, int gtid, |
3484 | enum fork_context_e fork_context, kmp_int32 argc, |
3485 | microtask_t microtask, launch_t invoker, |
3486 | /* TODO: revert workaround for Intel(R) 64 tracker #96 */ |
3487 | #if (KMP_ARCH_ARM || KMP_ARCH_X86_641 || KMP_ARCH_AARCH640) && KMP_OS_LINUX1 |
3488 | va_list *ap |
3489 | #else |
3490 | va_list ap |
3491 | #endif |
3492 | ); |
3493 | |
3494 | extern void __kmp_join_call(ident_t *loc, int gtid |
3495 | #if OMPT_SUPPORT1 |
3496 | , |
3497 | enum fork_context_e fork_context |
3498 | #endif |
3499 | #if OMP_40_ENABLED(50 >= 40) |
3500 | , |
3501 | int exit_teams = 0 |
3502 | #endif |
3503 | ); |
3504 | |
3505 | extern void __kmp_serialized_parallel(ident_t *id, kmp_int32 gtid); |
3506 | extern void __kmp_internal_fork(ident_t *id, int gtid, kmp_team_t *team); |
3507 | extern void __kmp_internal_join(ident_t *id, int gtid, kmp_team_t *team); |
3508 | extern int __kmp_invoke_task_func(int gtid); |
3509 | extern void __kmp_run_before_invoked_task(int gtid, int tid, |
3510 | kmp_info_t *this_thr, |
3511 | kmp_team_t *team); |
3512 | extern void __kmp_run_after_invoked_task(int gtid, int tid, |
3513 | kmp_info_t *this_thr, |
3514 | kmp_team_t *team); |
3515 | |
3516 | // should never have been exported |
3517 | KMP_EXPORTextern int __kmpc_invoke_task_func(int gtid); |
3518 | #if OMP_40_ENABLED(50 >= 40) |
3519 | extern int __kmp_invoke_teams_master(int gtid); |
3520 | extern void __kmp_teams_master(int gtid); |
3521 | #endif |
3522 | extern void __kmp_save_internal_controls(kmp_info_t *thread); |
3523 | extern void __kmp_user_set_library(enum library_type arg); |
3524 | extern void __kmp_aux_set_library(enum library_type arg); |
3525 | extern void __kmp_aux_set_stacksize(size_t arg); |
3526 | extern void __kmp_aux_set_blocktime(int arg, kmp_info_t *thread, int tid); |
3527 | extern void __kmp_aux_set_defaults(char const *str, int len); |
3528 | |
3529 | /* Functions called from __kmp_aux_env_initialize() in kmp_settings.cpp */ |
3530 | void kmpc_set_blocktime(int arg); |
3531 | void ompc_set_nested(int flag); |
3532 | void ompc_set_dynamic(int flag); |
3533 | void ompc_set_num_threads(int arg); |
3534 | |
3535 | extern void __kmp_push_current_task_to_thread(kmp_info_t *this_thr, |
3536 | kmp_team_t *team, int tid); |
3537 | extern void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr); |
3538 | extern kmp_task_t *__kmp_task_alloc(ident_t *loc_ref, kmp_int32 gtid, |
3539 | kmp_tasking_flags_t *flags, |
3540 | size_t sizeof_kmp_task_t, |
3541 | size_t sizeof_shareds, |
3542 | kmp_routine_entry_t task_entry); |
3543 | extern void __kmp_init_implicit_task(ident_t *loc_ref, kmp_info_t *this_thr, |
3544 | kmp_team_t *team, int tid, |
3545 | int set_curr_task); |
3546 | extern void __kmp_finish_implicit_task(kmp_info_t *this_thr); |
3547 | extern void __kmp_free_implicit_task(kmp_info_t *this_thr); |
3548 | int __kmp_execute_tasks_32(kmp_info_t *thread, kmp_int32 gtid, |
3549 | kmp_flag_32 *flag, int final_spin, |
3550 | int *thread_finished, |
3551 | #if USE_ITT_BUILD1 |
3552 | void *itt_sync_obj, |
3553 | #endif /* USE_ITT_BUILD */ |
3554 | kmp_int32 is_constrained); |
3555 | int __kmp_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid, |
3556 | kmp_flag_64 *flag, int final_spin, |
3557 | int *thread_finished, |
3558 | #if USE_ITT_BUILD1 |
3559 | void *itt_sync_obj, |
3560 | #endif /* USE_ITT_BUILD */ |
3561 | kmp_int32 is_constrained); |
3562 | int __kmp_execute_tasks_oncore(kmp_info_t *thread, kmp_int32 gtid, |
3563 | kmp_flag_oncore *flag, int final_spin, |
3564 | int *thread_finished, |
3565 | #if USE_ITT_BUILD1 |
3566 | void *itt_sync_obj, |
3567 | #endif /* USE_ITT_BUILD */ |
3568 | kmp_int32 is_constrained); |
3569 | |
3570 | extern void __kmp_free_task_team(kmp_info_t *thread, |
3571 | kmp_task_team_t *task_team); |
3572 | extern void __kmp_reap_task_teams(void); |
3573 | extern void __kmp_wait_to_unref_task_teams(void); |
3574 | extern void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team, |
3575 | int always); |
3576 | extern void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team); |
3577 | extern void __kmp_task_team_wait(kmp_info_t *this_thr, kmp_team_t *team |
3578 | #if USE_ITT_BUILD1 |
3579 | , |
3580 | void *itt_sync_obj |
3581 | #endif /* USE_ITT_BUILD */ |
3582 | , |
3583 | int wait = 1); |
3584 | extern void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread, |
3585 | int gtid); |
3586 | |
3587 | extern int __kmp_is_address_mapped(void *addr); |
3588 | extern kmp_uint64 __kmp_hardware_timestamp(void); |
3589 | |
3590 | #if KMP_OS_UNIX1 |
3591 | extern int __kmp_read_from_file(char const *path, char const *format, ...); |
3592 | #endif |
3593 | |
3594 | /* ------------------------------------------------------------------------ */ |
3595 | // |
3596 | // Assembly routines that have no compiler intrinsic replacement |
3597 | // |
3598 | |
3599 | #if KMP_ARCH_X860 || KMP_ARCH_X86_641 |
3600 | |
3601 | extern void __kmp_query_cpuid(kmp_cpuinfo_t *p); |
3602 | |
3603 | #define __kmp_load_mxcsr(p)_mm_setcsr(*(p)) _mm_setcsr(*(p)) |
3604 | static inline void __kmp_store_mxcsr(kmp_uint32 *p) { *p = _mm_getcsr(); } |
3605 | |
3606 | extern void __kmp_load_x87_fpu_control_word(kmp_int16 *p); |
3607 | extern void __kmp_store_x87_fpu_control_word(kmp_int16 *p); |
3608 | extern void __kmp_clear_x87_fpu_status_word(); |
3609 | #define KMP_X86_MXCSR_MASK0xffffffc0 0xffffffc0 /* ignore status flags (6 lsb) */ |
3610 | |
3611 | #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ |
3612 | |
3613 | extern int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int npr, int argc, |
3614 | void *argv[] |
3615 | #if OMPT_SUPPORT1 |
3616 | , |
3617 | void **exit_frame_ptr |
3618 | #endif |
3619 | ); |
3620 | |
3621 | /* ------------------------------------------------------------------------ */ |
3622 | |
3623 | KMP_EXPORTextern void __kmpc_begin(ident_t *, kmp_int32 flags); |
3624 | KMP_EXPORTextern void __kmpc_end(ident_t *); |
3625 | |
3626 | KMP_EXPORTextern void __kmpc_threadprivate_register_vec(ident_t *, void *data, |
3627 | kmpc_ctor_vec ctor, |
3628 | kmpc_cctor_vec cctor, |
3629 | kmpc_dtor_vec dtor, |
3630 | size_t vector_length); |
3631 | KMP_EXPORTextern void __kmpc_threadprivate_register(ident_t *, void *data, |
3632 | kmpc_ctor ctor, kmpc_cctor cctor, |
3633 | kmpc_dtor dtor); |
3634 | KMP_EXPORTextern void *__kmpc_threadprivate(ident_t *, kmp_int32 global_tid, |
3635 | void *data, size_t size); |
3636 | |
3637 | KMP_EXPORTextern kmp_int32 __kmpc_global_thread_num(ident_t *); |
3638 | KMP_EXPORTextern kmp_int32 __kmpc_global_num_threads(ident_t *); |
3639 | KMP_EXPORTextern kmp_int32 __kmpc_bound_thread_num(ident_t *); |
3640 | KMP_EXPORTextern kmp_int32 __kmpc_bound_num_threads(ident_t *); |
3641 | |
3642 | KMP_EXPORTextern kmp_int32 __kmpc_ok_to_fork(ident_t *); |
3643 | KMP_EXPORTextern void __kmpc_fork_call(ident_t *, kmp_int32 nargs, |
3644 | kmpc_micro microtask, ...); |
3645 | |
3646 | KMP_EXPORTextern void __kmpc_serialized_parallel(ident_t *, kmp_int32 global_tid); |
3647 | KMP_EXPORTextern void __kmpc_end_serialized_parallel(ident_t *, kmp_int32 global_tid); |
3648 | |
3649 | KMP_EXPORTextern void __kmpc_flush(ident_t *); |
3650 | KMP_EXPORTextern void __kmpc_barrier(ident_t *, kmp_int32 global_tid); |
3651 | KMP_EXPORTextern kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid); |
3652 | KMP_EXPORTextern void __kmpc_end_master(ident_t *, kmp_int32 global_tid); |
3653 | KMP_EXPORTextern void __kmpc_ordered(ident_t *, kmp_int32 global_tid); |
3654 | KMP_EXPORTextern void __kmpc_end_ordered(ident_t *, kmp_int32 global_tid); |
3655 | KMP_EXPORTextern void __kmpc_critical(ident_t *, kmp_int32 global_tid, |
3656 | kmp_critical_name *); |
3657 | KMP_EXPORTextern void __kmpc_end_critical(ident_t *, kmp_int32 global_tid, |
3658 | kmp_critical_name *); |
3659 | |
3660 | #if OMP_45_ENABLED(50 >= 45) |
3661 | KMP_EXPORTextern void __kmpc_critical_with_hint(ident_t *, kmp_int32 global_tid, |
3662 | kmp_critical_name *, uint32_t hint); |
3663 | #endif |
3664 | |
3665 | KMP_EXPORTextern kmp_int32 __kmpc_barrier_master(ident_t *, kmp_int32 global_tid); |
3666 | KMP_EXPORTextern void __kmpc_end_barrier_master(ident_t *, kmp_int32 global_tid); |
3667 | |
3668 | KMP_EXPORTextern kmp_int32 __kmpc_barrier_master_nowait(ident_t *, |
3669 | kmp_int32 global_tid); |
3670 | |
3671 | KMP_EXPORTextern kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid); |
3672 | KMP_EXPORTextern void __kmpc_end_single(ident_t *, kmp_int32 global_tid); |
3673 | |
3674 | KMP_EXPORTextern void KMPC_FOR_STATIC_INIT(ident_t *loc, kmp_int32 global_tid, |
3675 | kmp_int32 schedtype, kmp_int32 *plastiter, |
3676 | kmp_int *plower, kmp_int *pupper, |
3677 | kmp_int *pstride, kmp_int incr, |
3678 | kmp_int chunk); |
3679 | |
3680 | KMP_EXPORTextern void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid); |
3681 | |
3682 | KMP_EXPORTextern void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid, |
3683 | size_t cpy_size, void *cpy_data, |
3684 | void (*cpy_func)(void *, void *), |
3685 | kmp_int32 didit); |
3686 | |
3687 | extern void KMPC_SET_NUM_THREADS(int arg); |
3688 | extern void KMPC_SET_DYNAMIC(int flag); |
3689 | extern void KMPC_SET_NESTED(int flag); |
3690 | |
3691 | /* Taskq interface routines */ |
3692 | KMP_EXPORTextern kmpc_thunk_t *__kmpc_taskq(ident_t *loc, kmp_int32 global_tid, |
3693 | kmpc_task_t taskq_task, |
3694 | size_t sizeof_thunk, |
3695 | size_t sizeof_shareds, kmp_int32 flags, |
3696 | kmpc_shared_vars_t **shareds); |
3697 | KMP_EXPORTextern void __kmpc_end_taskq(ident_t *loc, kmp_int32 global_tid, |
3698 | kmpc_thunk_t *thunk); |
3699 | KMP_EXPORTextern kmp_int32 __kmpc_task(ident_t *loc, kmp_int32 global_tid, |
3700 | kmpc_thunk_t *thunk); |
3701 | KMP_EXPORTextern void __kmpc_taskq_task(ident_t *loc, kmp_int32 global_tid, |
3702 | kmpc_thunk_t *thunk, kmp_int32 status); |
3703 | KMP_EXPORTextern void __kmpc_end_taskq_task(ident_t *loc, kmp_int32 global_tid, |
3704 | kmpc_thunk_t *thunk); |
3705 | KMP_EXPORTextern kmpc_thunk_t *__kmpc_task_buffer(ident_t *loc, kmp_int32 global_tid, |
3706 | kmpc_thunk_t *taskq_thunk, |
3707 | kmpc_task_t task); |
3708 | |
3709 | /* OMP 3.0 tasking interface routines */ |
3710 | KMP_EXPORTextern kmp_int32 __kmpc_omp_task(ident_t *loc_ref, kmp_int32 gtid, |
3711 | kmp_task_t *new_task); |
3712 | KMP_EXPORTextern kmp_task_t *__kmpc_omp_task_alloc(ident_t *loc_ref, kmp_int32 gtid, |
3713 | kmp_int32 flags, |
3714 | size_t sizeof_kmp_task_t, |
3715 | size_t sizeof_shareds, |
3716 | kmp_routine_entry_t task_entry); |
3717 | KMP_EXPORTextern void __kmpc_omp_task_begin_if0(ident_t *loc_ref, kmp_int32 gtid, |
3718 | kmp_task_t *task); |
3719 | KMP_EXPORTextern void __kmpc_omp_task_complete_if0(ident_t *loc_ref, kmp_int32 gtid, |
3720 | kmp_task_t *task); |
3721 | KMP_EXPORTextern kmp_int32 __kmpc_omp_task_parts(ident_t *loc_ref, kmp_int32 gtid, |
3722 | kmp_task_t *new_task); |
3723 | KMP_EXPORTextern kmp_int32 __kmpc_omp_taskwait(ident_t *loc_ref, kmp_int32 gtid); |
3724 | |
3725 | KMP_EXPORTextern kmp_int32 __kmpc_omp_taskyield(ident_t *loc_ref, kmp_int32 gtid, |
3726 | int end_part); |
3727 | |
3728 | #if TASK_UNUSED |
3729 | void __kmpc_omp_task_begin(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task); |
3730 | void __kmpc_omp_task_complete(ident_t *loc_ref, kmp_int32 gtid, |
3731 | kmp_task_t *task); |
3732 | #endif // TASK_UNUSED |
3733 | |
3734 | /* ------------------------------------------------------------------------ */ |
3735 | |
3736 | #if OMP_40_ENABLED(50 >= 40) |
3737 | |
3738 | KMP_EXPORTextern void __kmpc_taskgroup(ident_t *loc, int gtid); |
3739 | KMP_EXPORTextern void __kmpc_end_taskgroup(ident_t *loc, int gtid); |
3740 | |
3741 | KMP_EXPORTextern kmp_int32 __kmpc_omp_task_with_deps( |
3742 | ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, |
3743 | kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, |
3744 | kmp_depend_info_t *noalias_dep_list); |
3745 | KMP_EXPORTextern void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, |
3746 | kmp_int32 ndeps, |
3747 | kmp_depend_info_t *dep_list, |
3748 | kmp_int32 ndeps_noalias, |
3749 | kmp_depend_info_t *noalias_dep_list); |
3750 | |
3751 | extern kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task, |
3752 | bool serialize_immediate); |
3753 | |
3754 | KMP_EXPORTextern kmp_int32 __kmpc_cancel(ident_t *loc_ref, kmp_int32 gtid, |
3755 | kmp_int32 cncl_kind); |
3756 | KMP_EXPORTextern kmp_int32 __kmpc_cancellationpoint(ident_t *loc_ref, kmp_int32 gtid, |
3757 | kmp_int32 cncl_kind); |
3758 | KMP_EXPORTextern kmp_int32 __kmpc_cancel_barrier(ident_t *loc_ref, kmp_int32 gtid); |
3759 | KMP_EXPORTextern int __kmp_get_cancellation_status(int cancel_kind); |
3760 | |
3761 | #if OMP_45_ENABLED(50 >= 45) |
3762 | |
3763 | KMP_EXPORTextern void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask); |
3764 | KMP_EXPORTextern void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask); |
3765 | KMP_EXPORTextern void __kmpc_taskloop(ident_t *loc, kmp_int32 gtid, kmp_task_t *task, |
3766 | kmp_int32 if_val, kmp_uint64 *lb, |
3767 | kmp_uint64 *ub, kmp_int64 st, kmp_int32 nogroup, |
3768 | kmp_int32 sched, kmp_uint64 grainsize, |
3769 | void *task_dup); |
3770 | #endif |
3771 | #if OMP_50_ENABLED(50 >= 50) |
3772 | KMP_EXPORTextern void *__kmpc_task_reduction_init(int gtid, int num_data, void *data); |
3773 | KMP_EXPORTextern void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void *d); |
3774 | #endif |
3775 | |
3776 | #endif |
3777 | |
3778 | /* Lock interface routines (fast versions with gtid passed in) */ |
3779 | KMP_EXPORTextern void __kmpc_init_lock(ident_t *loc, kmp_int32 gtid, |
3780 | void **user_lock); |
3781 | KMP_EXPORTextern void __kmpc_init_nest_lock(ident_t *loc, kmp_int32 gtid, |
3782 | void **user_lock); |
3783 | KMP_EXPORTextern void __kmpc_destroy_lock(ident_t *loc, kmp_int32 gtid, |
3784 | void **user_lock); |
3785 | KMP_EXPORTextern void __kmpc_destroy_nest_lock(ident_t *loc, kmp_int32 gtid, |
3786 | void **user_lock); |
3787 | KMP_EXPORTextern void __kmpc_set_lock(ident_t *loc, kmp_int32 gtid, void **user_lock); |
3788 | KMP_EXPORTextern void __kmpc_set_nest_lock(ident_t *loc, kmp_int32 gtid, |
3789 | void **user_lock); |
3790 | KMP_EXPORTextern void __kmpc_unset_lock(ident_t *loc, kmp_int32 gtid, |
3791 | void **user_lock); |
3792 | KMP_EXPORTextern void __kmpc_unset_nest_lock(ident_t *loc, kmp_int32 gtid, |
3793 | void **user_lock); |
3794 | KMP_EXPORTextern int __kmpc_test_lock(ident_t *loc, kmp_int32 gtid, void **user_lock); |
3795 | KMP_EXPORTextern int __kmpc_test_nest_lock(ident_t *loc, kmp_int32 gtid, |
3796 | void **user_lock); |
3797 | |
3798 | #if OMP_45_ENABLED(50 >= 45) |
3799 | KMP_EXPORTextern void __kmpc_init_lock_with_hint(ident_t *loc, kmp_int32 gtid, |
3800 | void **user_lock, uintptr_t hint); |
3801 | KMP_EXPORTextern void __kmpc_init_nest_lock_with_hint(ident_t *loc, kmp_int32 gtid, |
3802 | void **user_lock, |
3803 | uintptr_t hint); |
3804 | #endif |
3805 | |
3806 | /* Interface to fast scalable reduce methods routines */ |
3807 | |
3808 | KMP_EXPORTextern kmp_int32 __kmpc_reduce_nowait( |
3809 | ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, |
3810 | void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data), |
3811 | kmp_critical_name *lck); |
3812 | KMP_EXPORTextern void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid, |
3813 | kmp_critical_name *lck); |
3814 | KMP_EXPORTextern kmp_int32 __kmpc_reduce( |
3815 | ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, |
3816 | void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data), |
3817 | kmp_critical_name *lck); |
3818 | KMP_EXPORTextern void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid, |
3819 | kmp_critical_name *lck); |
3820 | |
3821 | /* Internal fast reduction routines */ |
3822 | |
3823 | extern PACKED_REDUCTION_METHOD_T __kmp_determine_reduction_method( |
3824 | ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, |
3825 | void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data), |
3826 | kmp_critical_name *lck); |
3827 | |
3828 | // this function is for testing set/get/determine reduce method |
3829 | KMP_EXPORTextern kmp_int32 __kmp_get_reduce_method(void); |
3830 | |
3831 | KMP_EXPORTextern kmp_uint64 __kmpc_get_taskid(); |
3832 | KMP_EXPORTextern kmp_uint64 __kmpc_get_parent_taskid(); |
3833 | |
3834 | // C++ port |
3835 | // missing 'extern "C"' declarations |
3836 | |
3837 | KMP_EXPORTextern kmp_int32 __kmpc_in_parallel(ident_t *loc); |
3838 | KMP_EXPORTextern void __kmpc_pop_num_threads(ident_t *loc, kmp_int32 global_tid); |
3839 | KMP_EXPORTextern void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, |
3840 | kmp_int32 num_threads); |
3841 | |
3842 | #if OMP_40_ENABLED(50 >= 40) |
3843 | KMP_EXPORTextern void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid, |
3844 | int proc_bind); |
3845 | KMP_EXPORTextern void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, |
3846 | kmp_int32 num_teams, |
3847 | kmp_int32 num_threads); |
3848 | KMP_EXPORTextern void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, |
3849 | kmpc_micro microtask, ...); |
3850 | #endif |
3851 | #if OMP_45_ENABLED(50 >= 45) |
3852 | struct kmp_dim { // loop bounds info casted to kmp_int64 |
3853 | kmp_int64 lo; // lower |
3854 | kmp_int64 up; // upper |
3855 | kmp_int64 st; // stride |
3856 | }; |
3857 | KMP_EXPORTextern void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, |
3858 | kmp_int32 num_dims, |
3859 | const struct kmp_dim *dims); |
3860 | KMP_EXPORTextern void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, |
3861 | const kmp_int64 *vec); |
3862 | KMP_EXPORTextern void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, |
3863 | const kmp_int64 *vec); |
3864 | KMP_EXPORTextern void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid); |
3865 | #endif |
3866 | |
3867 | KMP_EXPORTextern void *__kmpc_threadprivate_cached(ident_t *loc, kmp_int32 global_tid, |
3868 | void *data, size_t size, |
3869 | void ***cache); |
3870 | |
3871 | // Symbols for MS mutual detection. |
3872 | extern int _You_must_link_with_exactly_one_OpenMP_library; |
3873 | extern int _You_must_link_with_Intel_OpenMP_library; |
3874 | #if KMP_OS_WINDOWS0 && (KMP_VERSION_MAJOR5 > 4) |
3875 | extern int _You_must_link_with_Microsoft_OpenMP_library; |
3876 | #endif |
3877 | |
3878 | // The routines below are not exported. |
3879 | // Consider making them 'static' in corresponding source files. |
3880 | void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr, |
3881 | void *data_addr, size_t pc_size); |
3882 | struct private_common *kmp_threadprivate_insert(int gtid, void *pc_addr, |
3883 | void *data_addr, |
3884 | size_t pc_size); |
3885 | void __kmp_threadprivate_resize_cache(int newCapacity); |
3886 | void __kmp_cleanup_threadprivate_caches(); |
3887 | |
3888 | // ompc_, kmpc_ entries moved from omp.h. |
3889 | #if KMP_OS_WINDOWS0 |
3890 | #define KMPC_CONVENTION __cdecl |
3891 | #else |
3892 | #define KMPC_CONVENTION |
3893 | #endif |
3894 | |
3895 | #ifndef __OMP_H |
3896 | typedef enum omp_sched_t { |
3897 | omp_sched_static = 1, |
3898 | omp_sched_dynamic = 2, |
3899 | omp_sched_guided = 3, |
3900 | omp_sched_auto = 4 |
3901 | } omp_sched_t; |
3902 | typedef void *kmp_affinity_mask_t; |
3903 | #endif |
3904 | |
3905 | KMP_EXPORTextern void KMPC_CONVENTION ompc_set_max_active_levels(int); |
3906 | KMP_EXPORTextern void KMPC_CONVENTION ompc_set_schedule(omp_sched_t, int); |
3907 | KMP_EXPORTextern int KMPC_CONVENTION ompc_get_ancestor_thread_num(int); |
3908 | KMP_EXPORTextern int KMPC_CONVENTION ompc_get_team_size(int); |
3909 | KMP_EXPORTextern int KMPC_CONVENTION |
3910 | kmpc_set_affinity_mask_proc(int, kmp_affinity_mask_t *); |
3911 | KMP_EXPORTextern int KMPC_CONVENTION |
3912 | kmpc_unset_affinity_mask_proc(int, kmp_affinity_mask_t *); |
3913 | KMP_EXPORTextern int KMPC_CONVENTION |
3914 | kmpc_get_affinity_mask_proc(int, kmp_affinity_mask_t *); |
3915 | |
3916 | KMP_EXPORTextern void KMPC_CONVENTION kmpc_set_stacksize(int); |
3917 | KMP_EXPORTextern void KMPC_CONVENTION kmpc_set_stacksize_s(size_t); |
3918 | KMP_EXPORTextern void KMPC_CONVENTION kmpc_set_library(int); |
3919 | KMP_EXPORTextern void KMPC_CONVENTION kmpc_set_defaults(char const *); |
3920 | KMP_EXPORTextern void KMPC_CONVENTION kmpc_set_disp_num_buffers(int); |
3921 | |
3922 | #if OMP_50_ENABLED(50 >= 50) |
3923 | enum kmp_target_offload_kind { |
3924 | tgt_disabled = 0, |
3925 | tgt_default = 1, |
3926 | tgt_mandatory = 2 |
3927 | }; |
3928 | typedef enum kmp_target_offload_kind kmp_target_offload_kind_t; |
3929 | // Set via OMP_TARGET_OFFLOAD if specified, defaults to tgt_default otherwise |
3930 | extern kmp_target_offload_kind_t __kmp_target_offload; |
3931 | extern int __kmpc_get_target_offload(); |
3932 | #endif |
3933 | |
3934 | #ifdef __cplusplus201103L |
3935 | } |
3936 | #endif |
3937 | |
3938 | #endif /* KMP_H */ |