Bug Summary

File:build/source/openmp/runtime/src/kmp_sched.cpp
Warning:line 810, column 38
Division by zero

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name kmp_sched.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-16/lib/clang/16.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -D omp_EXPORTS -I projects/openmp/runtime/src -I /build/source/openmp/runtime/src -I include -I /build/source/llvm/include -I /build/source/openmp/runtime/src/i18n -I /build/source/openmp/runtime/src/include -I /build/source/openmp/runtime/src/thirdparty/ittnotify -D _FORTIFY_SOURCE=2 -D NDEBUG -D _GNU_SOURCE -D _REENTRANT -D _FORTIFY_SOURCE=2 -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/source/= -fcoverage-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/source/= -source-date-epoch 1668078801 -O2 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -Wno-enum-constexpr-conversion -Wno-extra -Wno-pedantic -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-covered-switch-default -Wno-frame-address -Wno-strict-aliasing -Wno-stringop-truncation -Wno-switch -Wno-uninitialized -Wno-return-type-c-linkage -Wno-cast-qual -Wno-int-to-void-pointer-cast -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/source/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fno-rtti -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-11-10-135928-647445-1 -x c++ /build/source/openmp/runtime/src/kmp_sched.cpp
1/*
2 * kmp_sched.cpp -- static scheduling -- iteration initialization
3 */
4
5//===----------------------------------------------------------------------===//
6//
7// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8// See https://llvm.org/LICENSE.txt for license information.
9// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10//
11//===----------------------------------------------------------------------===//
12
13/* Static scheduling initialization.
14
15 NOTE: team->t.t_nproc is a constant inside of any dispatch loop, however
16 it may change values between parallel regions. __kmp_max_nth
17 is the largest value __kmp_nth may take, 1 is the smallest. */
18
19#include "kmp.h"
20#include "kmp_error.h"
21#include "kmp_i18n.h"
22#include "kmp_itt.h"
23#include "kmp_stats.h"
24#include "kmp_str.h"
25
26#if OMPT_SUPPORT1
27#include "ompt-specific.h"
28#endif
29
30#ifdef KMP_DEBUG1
31//-------------------------------------------------------------------------
32// template for debug prints specification ( d, u, lld, llu )
33char const *traits_t<int>::spec = "d";
34char const *traits_t<unsigned int>::spec = "u";
35char const *traits_t<long long>::spec = "lld";
36char const *traits_t<unsigned long long>::spec = "llu";
37char const *traits_t<long>::spec = "ld";
38//-------------------------------------------------------------------------
39#endif
40
41#if KMP_STATS_ENABLED0
42#define KMP_STATS_LOOP_END(stat) \
43 { \
44 kmp_int64 t; \
45 kmp_int64 u = (kmp_int64)(*pupper); \
46 kmp_int64 l = (kmp_int64)(*plower); \
47 kmp_int64 i = (kmp_int64)incr; \
48 if (i == 1) { \
49 t = u - l + 1; \
50 } else if (i == -1) { \
51 t = l - u + 1; \
52 } else if (i > 0) { \
53 t = (u - l) / i + 1; \
54 } else { \
55 t = (l - u) / (-i) + 1; \
56 } \
57 KMP_COUNT_VALUE(stat, t)((void)0); \
58 KMP_POP_PARTITIONED_TIMER()((void)0); \
59 }
60#else
61#define KMP_STATS_LOOP_END(stat) /* Nothing */
62#endif
63
64static ident_t loc_stub = {0, KMP_IDENT_KMPC, 0, 0, ";unknown;unknown;0;0;;"};
65static inline void check_loc(ident_t *&loc) {
66 if (loc == NULL__null)
67 loc = &loc_stub; // may need to report location info to ittnotify
68}
69
70template <typename T>
71static void __kmp_for_static_init(ident_t *loc, kmp_int32 global_tid,
72 kmp_int32 schedtype, kmp_int32 *plastiter,
73 T *plower, T *pupper,
74 typename traits_t<T>::signed_t *pstride,
75 typename traits_t<T>::signed_t incr,
76 typename traits_t<T>::signed_t chunk
77#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
78 ,
79 void *codeptr
80#endif
81) {
82 KMP_COUNT_BLOCK(OMP_LOOP_STATIC)((void)0);
83 KMP_PUSH_PARTITIONED_TIMER(OMP_loop_static)((void)0);
84 KMP_PUSH_PARTITIONED_TIMER(OMP_loop_static_scheduling)((void)0);
85
86 // Clear monotonic/nonmonotonic bits (ignore it)
87 schedtype = SCHEDULE_WITHOUT_MODIFIERS(schedtype)(enum sched_type)( (schedtype) & ~(kmp_sch_modifier_nonmonotonic
| kmp_sch_modifier_monotonic))
;
88
89 typedef typename traits_t<T>::unsigned_t UT;
90 typedef typename traits_t<T>::signed_t ST;
91 /* this all has to be changed back to TID and such.. */
92 kmp_int32 gtid = global_tid;
93 kmp_uint32 tid;
94 kmp_uint32 nth;
95 UT trip_count;
96 kmp_team_t *team;
97 __kmp_assert_valid_gtid(gtid);
98 kmp_info_t *th = __kmp_threads[gtid];
99
100#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
101 ompt_team_info_t *team_info = NULL__null;
102 ompt_task_info_t *task_info = NULL__null;
103 ompt_work_t ompt_work_type = ompt_work_loop;
104
105 static kmp_int8 warn = 0;
106
107 if (ompt_enabled.ompt_callback_work || ompt_enabled.ompt_callback_dispatch) {
108 // Only fully initialize variables needed by OMPT if OMPT is enabled.
109 team_info = __ompt_get_teaminfo(0, NULL__null);
110 task_info = __ompt_get_task_info_object(0);
111 // Determine workshare type
112 if (loc != NULL__null) {
113 if ((loc->flags & KMP_IDENT_WORK_LOOP) != 0) {
114 ompt_work_type = ompt_work_loop;
115 } else if ((loc->flags & KMP_IDENT_WORK_SECTIONS) != 0) {
116 ompt_work_type = ompt_work_sections;
117 } else if ((loc->flags & KMP_IDENT_WORK_DISTRIBUTE) != 0) {
118 ompt_work_type = ompt_work_distribute;
119 } else {
120 kmp_int8 bool_res =
121 KMP_COMPARE_AND_STORE_ACQ8(&warn, (kmp_int8)0, (kmp_int8)1)__sync_bool_compare_and_swap((volatile kmp_uint8 *)(&warn
), (kmp_uint8)((kmp_int8)0), (kmp_uint8)((kmp_int8)1))
;
122 if (bool_res)
123 KMP_WARNING(OmptOutdatedWorkshare)__kmp_msg(kmp_ms_warning, __kmp_msg_format(kmp_i18n_msg_OmptOutdatedWorkshare
), __kmp_msg_null)
;
124 }
125 KMP_DEBUG_ASSERT(ompt_work_type)if (!(ompt_work_type)) { __kmp_debug_assert("ompt_work_type",
"openmp/runtime/src/kmp_sched.cpp", 125); }
;
126 }
127 }
128#endif
129
130 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pstride)if (!(plastiter && plower && pupper &&
pstride)) { __kmp_debug_assert("plastiter && plower && pupper && pstride"
, "openmp/runtime/src/kmp_sched.cpp", 130); }
;
131 KE_TRACE(10, ("__kmpc_for_static_init called (%d)\n", global_tid))if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmpc_for_static_init called (%d)\n"
, global_tid); }
;
132#ifdef KMP_DEBUG1
133 {
134 char *buff;
135 // create format specifiers before the debug output
136 buff = __kmp_str_format(
137 "__kmpc_for_static_init: T#%%d sched=%%d liter=%%d iter=(%%%s,"
138 " %%%s, %%%s) incr=%%%s chunk=%%%s signed?<%s>\n",
139 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
140 traits_t<ST>::spec, traits_t<ST>::spec, traits_t<T>::spec);
141 KD_TRACE(100, (buff, global_tid, schedtype, *plastiter, *plower, *pupper,if (kmp_d_debug >= 100) { __kmp_debug_printf (buff, global_tid
, schedtype, *plastiter, *plower, *pupper, *pstride, incr, chunk
); }
142 *pstride, incr, chunk))if (kmp_d_debug >= 100) { __kmp_debug_printf (buff, global_tid
, schedtype, *plastiter, *plower, *pupper, *pstride, incr, chunk
); }
;
143 __kmp_str_free(&buff);
144 }
145#endif
146
147 if (__kmp_env_consistency_check) {
148 __kmp_push_workshare(global_tid, ct_pdo, loc);
149 if (incr == 0) {
150 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
151 loc);
152 }
153 }
154 /* special handling for zero-trip loops */
155 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
156 if (plastiter != NULL__null)
157 *plastiter = FALSE0;
158 /* leave pupper and plower set to entire iteration space */
159 *pstride = incr; /* value should never be used */
160// *plower = *pupper - incr;
161// let compiler bypass the illegal loop (like for(i=1;i<10;i--))
162// THE LINE COMMENTED ABOVE CAUSED shape2F/h_tests_1.f TO HAVE A FAILURE
163// ON A ZERO-TRIP LOOP (lower=1, upper=0,stride=1) - JPH June 23, 2009.
164#ifdef KMP_DEBUG1
165 {
166 char *buff;
167 // create format specifiers before the debug output
168 buff = __kmp_str_format("__kmpc_for_static_init:(ZERO TRIP) liter=%%d "
169 "lower=%%%s upper=%%%s stride = %%%s "
170 "signed?<%s>, loc = %%s\n",
171 traits_t<T>::spec, traits_t<T>::spec,
172 traits_t<ST>::spec, traits_t<T>::spec);
173 check_loc(loc);
174 KD_TRACE(100,if (kmp_d_debug >= 100) { __kmp_debug_printf (buff, *plastiter
, *plower, *pupper, *pstride, loc->psource); }
175 (buff, *plastiter, *plower, *pupper, *pstride, loc->psource))if (kmp_d_debug >= 100) { __kmp_debug_printf (buff, *plastiter
, *plower, *pupper, *pstride, loc->psource); }
;
176 __kmp_str_free(&buff);
177 }
178#endif
179 KE_TRACE(10, ("__kmpc_for_static_init: T#%d return\n", global_tid))if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmpc_for_static_init: T#%d return\n"
, global_tid); }
;
180
181#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
182 if (ompt_enabled.ompt_callback_work) {
183 ompt_callbacks.ompt_callback(ompt_callback_work)ompt_callback_work_callback(
184 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
185 &(task_info->task_data), 0, codeptr);
186 }
187#endif
188 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
189 return;
190 }
191
192 // Although there are schedule enumerations above kmp_ord_upper which are not
193 // schedules for "distribute", the only ones which are useful are dynamic, so
194 // cannot be seen here, since this codepath is only executed for static
195 // schedules.
196 if (schedtype > kmp_ord_upper) {
197 // we are in DISTRIBUTE construct
198 schedtype += kmp_sch_static -
199 kmp_distribute_static; // AC: convert to usual schedule type
200 if (th->th.th_team->t.t_serialized > 1) {
201 tid = 0;
202 team = th->th.th_team;
203 } else {
204 tid = th->th.th_team->t.t_master_tid;
205 team = th->th.th_team->t.t_parent;
206 }
207 } else {
208 tid = __kmp_tid_from_gtid(global_tid);
209 team = th->th.th_team;
210 }
211
212 /* determine if "for" loop is an active worksharing construct */
213 if (team->t.t_serialized) {
214 /* serialized parallel, each thread executes whole iteration space */
215 if (plastiter != NULL__null)
216 *plastiter = TRUE(!0);
217 /* leave pupper and plower set to entire iteration space */
218 *pstride =
219 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
220
221#ifdef KMP_DEBUG1
222 {
223 char *buff;
224 // create format specifiers before the debug output
225 buff = __kmp_str_format("__kmpc_for_static_init: (serial) liter=%%d "
226 "lower=%%%s upper=%%%s stride = %%%s\n",
227 traits_t<T>::spec, traits_t<T>::spec,
228 traits_t<ST>::spec);
229 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride))if (kmp_d_debug >= 100) { __kmp_debug_printf (buff, *plastiter
, *plower, *pupper, *pstride); }
;
230 __kmp_str_free(&buff);
231 }
232#endif
233 KE_TRACE(10, ("__kmpc_for_static_init: T#%d return\n", global_tid))if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmpc_for_static_init: T#%d return\n"
, global_tid); }
;
234
235#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
236 if (ompt_enabled.ompt_callback_work) {
237 ompt_callbacks.ompt_callback(ompt_callback_work)ompt_callback_work_callback(
238 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
239 &(task_info->task_data), *pstride, codeptr);
240 }
241#endif
242 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
243 return;
244 }
245 nth = team->t.t_nproc;
246 if (nth == 1) {
247 if (plastiter != NULL__null)
248 *plastiter = TRUE(!0);
249 *pstride =
250 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
251#ifdef KMP_DEBUG1
252 {
253 char *buff;
254 // create format specifiers before the debug output
255 buff = __kmp_str_format("__kmpc_for_static_init: (serial) liter=%%d "
256 "lower=%%%s upper=%%%s stride = %%%s\n",
257 traits_t<T>::spec, traits_t<T>::spec,
258 traits_t<ST>::spec);
259 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride))if (kmp_d_debug >= 100) { __kmp_debug_printf (buff, *plastiter
, *plower, *pupper, *pstride); }
;
260 __kmp_str_free(&buff);
261 }
262#endif
263 KE_TRACE(10, ("__kmpc_for_static_init: T#%d return\n", global_tid))if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmpc_for_static_init: T#%d return\n"
, global_tid); }
;
264
265#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
266 if (ompt_enabled.ompt_callback_work) {
267 ompt_callbacks.ompt_callback(ompt_callback_work)ompt_callback_work_callback(
268 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
269 &(task_info->task_data), *pstride, codeptr);
270 }
271#endif
272 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
273 return;
274 }
275
276 /* compute trip count */
277 if (incr == 1) {
278 trip_count = *pupper - *plower + 1;
279 } else if (incr == -1) {
280 trip_count = *plower - *pupper + 1;
281 } else if (incr > 0) {
282 // upper-lower can exceed the limit of signed type
283 trip_count = (UT)(*pupper - *plower) / incr + 1;
284 } else {
285 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
286 }
287
288#if KMP_STATS_ENABLED0
289 if (KMP_MASTER_GTID(gtid)(0 == __kmp_tid_from_gtid((gtid)))) {
290 KMP_COUNT_VALUE(OMP_loop_static_total_iterations, trip_count)((void)0);
291 }
292#endif
293
294 if (__kmp_env_consistency_check) {
295 /* tripcount overflow? */
296 if (trip_count == 0 && *pupper != *plower) {
297 __kmp_error_construct(kmp_i18n_msg_CnsIterationRangeTooLarge, ct_pdo,
298 loc);
299 }
300 }
301
302 /* compute remaining parameters */
303 switch (schedtype) {
304 case kmp_sch_static: {
305 if (trip_count < nth) {
306 KMP_DEBUG_ASSERT(if (!(__kmp_static == kmp_sch_static_greedy || __kmp_static ==
kmp_sch_static_balanced)) { __kmp_debug_assert("__kmp_static == kmp_sch_static_greedy || __kmp_static == kmp_sch_static_balanced"
, "openmp/runtime/src/kmp_sched.cpp", 309); }
307 __kmp_static == kmp_sch_static_greedy ||if (!(__kmp_static == kmp_sch_static_greedy || __kmp_static ==
kmp_sch_static_balanced)) { __kmp_debug_assert("__kmp_static == kmp_sch_static_greedy || __kmp_static == kmp_sch_static_balanced"
, "openmp/runtime/src/kmp_sched.cpp", 309); }
308 __kmp_static ==if (!(__kmp_static == kmp_sch_static_greedy || __kmp_static ==
kmp_sch_static_balanced)) { __kmp_debug_assert("__kmp_static == kmp_sch_static_greedy || __kmp_static == kmp_sch_static_balanced"
, "openmp/runtime/src/kmp_sched.cpp", 309); }
309 kmp_sch_static_balanced)if (!(__kmp_static == kmp_sch_static_greedy || __kmp_static ==
kmp_sch_static_balanced)) { __kmp_debug_assert("__kmp_static == kmp_sch_static_greedy || __kmp_static == kmp_sch_static_balanced"
, "openmp/runtime/src/kmp_sched.cpp", 309); }
; // Unknown static scheduling type.
310 if (tid < trip_count) {
311 *pupper = *plower = *plower + tid * incr;
312 } else {
313 // set bounds so non-active threads execute no iterations
314 *plower = *pupper + (incr > 0 ? 1 : -1);
315 }
316 if (plastiter != NULL__null)
317 *plastiter = (tid == trip_count - 1);
318 } else {
319 if (__kmp_static == kmp_sch_static_balanced) {
320 UT small_chunk = trip_count / nth;
321 UT extras = trip_count % nth;
322 *plower += incr * (tid * small_chunk + (tid < extras ? tid : extras));
323 *pupper = *plower + small_chunk * incr - (tid < extras ? 0 : incr);
324 if (plastiter != NULL__null)
325 *plastiter = (tid == nth - 1);
326 } else {
327 T big_chunk_inc_count =
328 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
329 T old_upper = *pupper;
330
331 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy)if (!(__kmp_static == kmp_sch_static_greedy)) { __kmp_debug_assert
("__kmp_static == kmp_sch_static_greedy", "openmp/runtime/src/kmp_sched.cpp"
, 331); }
;
332 // Unknown static scheduling type.
333
334 *plower += tid * big_chunk_inc_count;
335 *pupper = *plower + big_chunk_inc_count - incr;
336 if (incr > 0) {
337 if (*pupper < *plower)
338 *pupper = traits_t<T>::max_value;
339 if (plastiter != NULL__null)
340 *plastiter = *plower <= old_upper && *pupper > old_upper - incr;
341 if (*pupper > old_upper)
342 *pupper = old_upper; // tracker C73258
343 } else {
344 if (*pupper > *plower)
345 *pupper = traits_t<T>::min_value;
346 if (plastiter != NULL__null)
347 *plastiter = *plower >= old_upper && *pupper < old_upper - incr;
348 if (*pupper < old_upper)
349 *pupper = old_upper; // tracker C73258
350 }
351 }
352 }
353 *pstride = trip_count;
354 break;
355 }
356 case kmp_sch_static_chunked: {
357 ST span;
358 UT nchunks;
359 if (chunk < 1)
360 chunk = 1;
361 else if ((UT)chunk > trip_count)
362 chunk = trip_count;
363 nchunks = (trip_count) / (UT)chunk + (trip_count % (UT)chunk ? 1 : 0);
364 span = chunk * incr;
365 if (nchunks < nth) {
366 *pstride = span * nchunks;
367 if (tid < nchunks) {
368 *plower = *plower + (span * tid);
369 *pupper = *plower + span - incr;
370 } else {
371 *plower = *pupper + (incr > 0 ? 1 : -1);
372 }
373 } else {
374 *pstride = span * nth;
375 *plower = *plower + (span * tid);
376 *pupper = *plower + span - incr;
377 }
378 if (plastiter != NULL__null)
379 *plastiter = (tid == (nchunks - 1) % nth);
380 break;
381 }
382 case kmp_sch_static_balanced_chunked: {
383 T old_upper = *pupper;
384 // round up to make sure the chunk is enough to cover all iterations
385 UT span = (trip_count + nth - 1) / nth;
386
387 // perform chunk adjustment
388 chunk = (span + chunk - 1) & ~(chunk - 1);
389
390 span = chunk * incr;
391 *plower = *plower + (span * tid);
392 *pupper = *plower + span - incr;
393 if (incr > 0) {
394 if (*pupper > old_upper)
395 *pupper = old_upper;
396 } else if (*pupper < old_upper)
397 *pupper = old_upper;
398
399 if (plastiter != NULL__null)
400 *plastiter = (tid == ((trip_count - 1) / (UT)chunk));
401 break;
402 }
403 default:
404 KMP_ASSERT2(0, "__kmpc_for_static_init: unknown scheduling type")if (!(0)) { __kmp_debug_assert(("__kmpc_for_static_init: unknown scheduling type"
), "openmp/runtime/src/kmp_sched.cpp", 404); }
;
405 break;
406 }
407
408#if USE_ITT_BUILD1
409 // Report loop metadata
410 if (KMP_MASTER_TID(tid)(0 == (tid)) && __itt_metadata_add_ptr__kmp_itt_metadata_add_ptr__3_0 &&
411 __kmp_forkjoin_frames_mode == 3 && th->th.th_teams_microtask == NULL__null &&
412 team->t.t_active_level == 1) {
413 kmp_uint64 cur_chunk = chunk;
414 check_loc(loc);
415 // Calculate chunk in case it was not specified; it is specified for
416 // kmp_sch_static_chunked
417 if (schedtype == kmp_sch_static) {
418 cur_chunk = trip_count / nth + ((trip_count % nth) ? 1 : 0);
419 }
420 // 0 - "static" schedule
421 __kmp_itt_metadata_loop(loc, 0, trip_count, cur_chunk);
422 }
423#endif
424#ifdef KMP_DEBUG1
425 {
426 char *buff;
427 // create format specifiers before the debug output
428 buff = __kmp_str_format("__kmpc_for_static_init: liter=%%d lower=%%%s "
429 "upper=%%%s stride = %%%s signed?<%s>\n",
430 traits_t<T>::spec, traits_t<T>::spec,
431 traits_t<ST>::spec, traits_t<T>::spec);
432 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride))if (kmp_d_debug >= 100) { __kmp_debug_printf (buff, *plastiter
, *plower, *pupper, *pstride); }
;
433 __kmp_str_free(&buff);
434 }
435#endif
436 KE_TRACE(10, ("__kmpc_for_static_init: T#%d return\n", global_tid))if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmpc_for_static_init: T#%d return\n"
, global_tid); }
;
437
438#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
439 if (ompt_enabled.ompt_callback_work) {
440 ompt_callbacks.ompt_callback(ompt_callback_work)ompt_callback_work_callback(
441 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
442 &(task_info->task_data), trip_count, codeptr);
443 }
444 if (ompt_enabled.ompt_callback_dispatch) {
445 ompt_dispatch_t dispatch_type;
446 ompt_data_t instance = ompt_data_none{0};
447 ompt_dispatch_chunk_t dispatch_chunk;
448 if (ompt_work_type == ompt_work_sections) {
449 dispatch_type = ompt_dispatch_section;
450 instance.ptr = codeptr;
451 } else {
452 OMPT_GET_DISPATCH_CHUNK(dispatch_chunk, *plower, *pupper, incr)do { if (incr > 0) { dispatch_chunk.start = static_cast<
uint64_t>(*plower); dispatch_chunk.iterations = static_cast
<uint64_t>(((*pupper) - (*plower)) / (incr) + 1); } else
{ dispatch_chunk.start = static_cast<uint64_t>(*pupper
); dispatch_chunk.iterations = static_cast<uint64_t>(((
*plower) - (*pupper)) / -(incr) + 1); } } while (0)
;
453 dispatch_type = (ompt_work_type == ompt_work_distribute)
454 ? ompt_dispatch_distribute_chunk
455 : ompt_dispatch_ws_loop_chunk;
456 instance.ptr = &dispatch_chunk;
457 }
458 ompt_callbacks.ompt_callback(ompt_callback_dispatch)ompt_callback_dispatch_callback(
459 &(team_info->parallel_data), &(task_info->task_data), dispatch_type,
460 instance);
461 }
462#endif
463
464 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
465 return;
466}
467
468template <typename T>
469static void __kmp_dist_for_static_init(ident_t *loc, kmp_int32 gtid,
470 kmp_int32 schedule, kmp_int32 *plastiter,
471 T *plower, T *pupper, T *pupperDist,
472 typename traits_t<T>::signed_t *pstride,
473 typename traits_t<T>::signed_t incr,
474 typename traits_t<T>::signed_t chunk
475#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
476 ,
477 void *codeptr
478#endif
479) {
480 KMP_COUNT_BLOCK(OMP_DISTRIBUTE)((void)0);
481 KMP_PUSH_PARTITIONED_TIMER(OMP_distribute)((void)0);
482 KMP_PUSH_PARTITIONED_TIMER(OMP_distribute_scheduling)((void)0);
483 typedef typename traits_t<T>::unsigned_t UT;
484 typedef typename traits_t<T>::signed_t ST;
485 kmp_uint32 tid;
486 kmp_uint32 nth;
487 kmp_uint32 team_id;
488 kmp_uint32 nteams;
489 UT trip_count;
490 kmp_team_t *team;
491 kmp_info_t *th;
492
493 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pupperDist && pstride)if (!(plastiter && plower && pupper &&
pupperDist && pstride)) { __kmp_debug_assert("plastiter && plower && pupper && pupperDist && pstride"
, "openmp/runtime/src/kmp_sched.cpp", 493); }
;
494 KE_TRACE(10, ("__kmpc_dist_for_static_init called (%d)\n", gtid))if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmpc_dist_for_static_init called (%d)\n"
, gtid); }
;
495 __kmp_assert_valid_gtid(gtid);
496#ifdef KMP_DEBUG1
497 {
498 char *buff;
499 // create format specifiers before the debug output
500 buff = __kmp_str_format(
501 "__kmpc_dist_for_static_init: T#%%d schedLoop=%%d liter=%%d "
502 "iter=(%%%s, %%%s, %%%s) chunk=%%%s signed?<%s>\n",
503 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
504 traits_t<ST>::spec, traits_t<T>::spec);
505 KD_TRACE(100,if (kmp_d_debug >= 100) { __kmp_debug_printf (buff, gtid, schedule
, *plastiter, *plower, *pupper, incr, chunk); }
506 (buff, gtid, schedule, *plastiter, *plower, *pupper, incr, chunk))if (kmp_d_debug >= 100) { __kmp_debug_printf (buff, gtid, schedule
, *plastiter, *plower, *pupper, incr, chunk); }
;
507 __kmp_str_free(&buff);
508 }
509#endif
510
511 if (__kmp_env_consistency_check) {
512 __kmp_push_workshare(gtid, ct_pdo, loc);
513 if (incr == 0) {
514 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
515 loc);
516 }
517 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
518 // The loop is illegal.
519 // Some zero-trip loops maintained by compiler, e.g.:
520 // for(i=10;i<0;++i) // lower >= upper - run-time check
521 // for(i=0;i>10;--i) // lower <= upper - run-time check
522 // for(i=0;i>10;++i) // incr > 0 - compile-time check
523 // for(i=10;i<0;--i) // incr < 0 - compile-time check
524 // Compiler does not check the following illegal loops:
525 // for(i=0;i<10;i+=incr) // where incr<0
526 // for(i=10;i>0;i-=incr) // where incr<0
527 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
528 }
529 }
530 tid = __kmp_tid_from_gtid(gtid);
531 th = __kmp_threads[gtid];
532 nth = th->th.th_team_nproc;
533 team = th->th.th_team;
534 KMP_DEBUG_ASSERT(th->th.th_teams_microtask)if (!(th->th.th_teams_microtask)) { __kmp_debug_assert("th->th.th_teams_microtask"
, "openmp/runtime/src/kmp_sched.cpp", 534); }
; // we are in the teams construct
535 nteams = th->th.th_teams_size.nteams;
536 team_id = team->t.t_master_tid;
537 KMP_DEBUG_ASSERT(nteams == (kmp_uint32)team->t.t_parent->t.t_nproc)if (!(nteams == (kmp_uint32)team->t.t_parent->t.t_nproc
)) { __kmp_debug_assert("nteams == (kmp_uint32)team->t.t_parent->t.t_nproc"
, "openmp/runtime/src/kmp_sched.cpp", 537); }
;
538
539 // compute global trip count
540 if (incr == 1) {
541 trip_count = *pupper - *plower + 1;
542 } else if (incr == -1) {
543 trip_count = *plower - *pupper + 1;
544 } else if (incr > 0) {
545 // upper-lower can exceed the limit of signed type
546 trip_count = (UT)(*pupper - *plower) / incr + 1;
547 } else {
548 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
549 }
550
551 *pstride = *pupper - *plower; // just in case (can be unused)
552 if (trip_count <= nteams) {
553 KMP_DEBUG_ASSERT(if (!(__kmp_static == kmp_sch_static_greedy || __kmp_static ==
kmp_sch_static_balanced)) { __kmp_debug_assert("__kmp_static == kmp_sch_static_greedy || __kmp_static == kmp_sch_static_balanced"
, "openmp/runtime/src/kmp_sched.cpp", 556); }
554 __kmp_static == kmp_sch_static_greedy ||if (!(__kmp_static == kmp_sch_static_greedy || __kmp_static ==
kmp_sch_static_balanced)) { __kmp_debug_assert("__kmp_static == kmp_sch_static_greedy || __kmp_static == kmp_sch_static_balanced"
, "openmp/runtime/src/kmp_sched.cpp", 556); }
555 __kmp_static ==if (!(__kmp_static == kmp_sch_static_greedy || __kmp_static ==
kmp_sch_static_balanced)) { __kmp_debug_assert("__kmp_static == kmp_sch_static_greedy || __kmp_static == kmp_sch_static_balanced"
, "openmp/runtime/src/kmp_sched.cpp", 556); }
556 kmp_sch_static_balanced)if (!(__kmp_static == kmp_sch_static_greedy || __kmp_static ==
kmp_sch_static_balanced)) { __kmp_debug_assert("__kmp_static == kmp_sch_static_greedy || __kmp_static == kmp_sch_static_balanced"
, "openmp/runtime/src/kmp_sched.cpp", 556); }
; // Unknown static scheduling type.
557 // only primary threads of some teams get single iteration, other threads
558 // get nothing
559 if (team_id < trip_count && tid == 0) {
560 *pupper = *pupperDist = *plower = *plower + team_id * incr;
561 } else {
562 *pupperDist = *pupper;
563 *plower = *pupper + incr; // compiler should skip loop body
564 }
565 if (plastiter != NULL__null)
566 *plastiter = (tid == 0 && team_id == trip_count - 1);
567 } else {
568 // Get the team's chunk first (each team gets at most one chunk)
569 if (__kmp_static == kmp_sch_static_balanced) {
570 UT chunkD = trip_count / nteams;
571 UT extras = trip_count % nteams;
572 *plower +=
573 incr * (team_id * chunkD + (team_id < extras ? team_id : extras));
574 *pupperDist = *plower + chunkD * incr - (team_id < extras ? 0 : incr);
575 if (plastiter != NULL__null)
576 *plastiter = (team_id == nteams - 1);
577 } else {
578 T chunk_inc_count =
579 (trip_count / nteams + ((trip_count % nteams) ? 1 : 0)) * incr;
580 T upper = *pupper;
581 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy)if (!(__kmp_static == kmp_sch_static_greedy)) { __kmp_debug_assert
("__kmp_static == kmp_sch_static_greedy", "openmp/runtime/src/kmp_sched.cpp"
, 581); }
;
582 // Unknown static scheduling type.
583 *plower += team_id * chunk_inc_count;
584 *pupperDist = *plower + chunk_inc_count - incr;
585 // Check/correct bounds if needed
586 if (incr > 0) {
587 if (*pupperDist < *plower)
588 *pupperDist = traits_t<T>::max_value;
589 if (plastiter != NULL__null)
590 *plastiter = *plower <= upper && *pupperDist > upper - incr;
591 if (*pupperDist > upper)
592 *pupperDist = upper; // tracker C73258
593 if (*plower > *pupperDist) {
594 *pupper = *pupperDist; // no iterations available for the team
595 goto end;
596 }
597 } else {
598 if (*pupperDist > *plower)
599 *pupperDist = traits_t<T>::min_value;
600 if (plastiter != NULL__null)
601 *plastiter = *plower >= upper && *pupperDist < upper - incr;
602 if (*pupperDist < upper)
603 *pupperDist = upper; // tracker C73258
604 if (*plower < *pupperDist) {
605 *pupper = *pupperDist; // no iterations available for the team
606 goto end;
607 }
608 }
609 }
610 // Get the parallel loop chunk now (for thread)
611 // compute trip count for team's chunk
612 if (incr == 1) {
613 trip_count = *pupperDist - *plower + 1;
614 } else if (incr == -1) {
615 trip_count = *plower - *pupperDist + 1;
616 } else if (incr > 1) {
617 // upper-lower can exceed the limit of signed type
618 trip_count = (UT)(*pupperDist - *plower) / incr + 1;
619 } else {
620 trip_count = (UT)(*plower - *pupperDist) / (-incr) + 1;
621 }
622 KMP_DEBUG_ASSERT(trip_count)if (!(trip_count)) { __kmp_debug_assert("trip_count", "openmp/runtime/src/kmp_sched.cpp"
, 622); }
;
623 switch (schedule) {
624 case kmp_sch_static: {
625 if (trip_count <= nth) {
626 KMP_DEBUG_ASSERT(if (!(__kmp_static == kmp_sch_static_greedy || __kmp_static ==
kmp_sch_static_balanced)) { __kmp_debug_assert("__kmp_static == kmp_sch_static_greedy || __kmp_static == kmp_sch_static_balanced"
, "openmp/runtime/src/kmp_sched.cpp", 629); }
627 __kmp_static == kmp_sch_static_greedy ||if (!(__kmp_static == kmp_sch_static_greedy || __kmp_static ==
kmp_sch_static_balanced)) { __kmp_debug_assert("__kmp_static == kmp_sch_static_greedy || __kmp_static == kmp_sch_static_balanced"
, "openmp/runtime/src/kmp_sched.cpp", 629); }
628 __kmp_static ==if (!(__kmp_static == kmp_sch_static_greedy || __kmp_static ==
kmp_sch_static_balanced)) { __kmp_debug_assert("__kmp_static == kmp_sch_static_greedy || __kmp_static == kmp_sch_static_balanced"
, "openmp/runtime/src/kmp_sched.cpp", 629); }
629 kmp_sch_static_balanced)if (!(__kmp_static == kmp_sch_static_greedy || __kmp_static ==
kmp_sch_static_balanced)) { __kmp_debug_assert("__kmp_static == kmp_sch_static_greedy || __kmp_static == kmp_sch_static_balanced"
, "openmp/runtime/src/kmp_sched.cpp", 629); }
; // Unknown static scheduling type.
630 if (tid < trip_count)
631 *pupper = *plower = *plower + tid * incr;
632 else
633 *plower = *pupper + incr; // no iterations available
634 if (plastiter != NULL__null)
635 if (*plastiter != 0 && !(tid == trip_count - 1))
636 *plastiter = 0;
637 } else {
638 if (__kmp_static == kmp_sch_static_balanced) {
639 UT chunkL = trip_count / nth;
640 UT extras = trip_count % nth;
641 *plower += incr * (tid * chunkL + (tid < extras ? tid : extras));
642 *pupper = *plower + chunkL * incr - (tid < extras ? 0 : incr);
643 if (plastiter != NULL__null)
644 if (*plastiter != 0 && !(tid == nth - 1))
645 *plastiter = 0;
646 } else {
647 T chunk_inc_count =
648 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
649 T upper = *pupperDist;
650 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy)if (!(__kmp_static == kmp_sch_static_greedy)) { __kmp_debug_assert
("__kmp_static == kmp_sch_static_greedy", "openmp/runtime/src/kmp_sched.cpp"
, 650); }
;
651 // Unknown static scheduling type.
652 *plower += tid * chunk_inc_count;
653 *pupper = *plower + chunk_inc_count - incr;
654 if (incr > 0) {
655 if (*pupper < *plower)
656 *pupper = traits_t<T>::max_value;
657 if (plastiter != NULL__null)
658 if (*plastiter != 0 &&
659 !(*plower <= upper && *pupper > upper - incr))
660 *plastiter = 0;
661 if (*pupper > upper)
662 *pupper = upper; // tracker C73258
663 } else {
664 if (*pupper > *plower)
665 *pupper = traits_t<T>::min_value;
666 if (plastiter != NULL__null)
667 if (*plastiter != 0 &&
668 !(*plower >= upper && *pupper < upper - incr))
669 *plastiter = 0;
670 if (*pupper < upper)
671 *pupper = upper; // tracker C73258
672 }
673 }
674 }
675 break;
676 }
677 case kmp_sch_static_chunked: {
678 ST span;
679 if (chunk < 1)
680 chunk = 1;
681 span = chunk * incr;
682 *pstride = span * nth;
683 *plower = *plower + (span * tid);
684 *pupper = *plower + span - incr;
685 if (plastiter != NULL__null)
686 if (*plastiter != 0 && !(tid == ((trip_count - 1) / (UT)chunk) % nth))
687 *plastiter = 0;
688 break;
689 }
690 default:
691 KMP_ASSERT2(0,if (!(0)) { __kmp_debug_assert(("__kmpc_dist_for_static_init: unknown loop scheduling type"
), "openmp/runtime/src/kmp_sched.cpp", 692); }
692 "__kmpc_dist_for_static_init: unknown loop scheduling type")if (!(0)) { __kmp_debug_assert(("__kmpc_dist_for_static_init: unknown loop scheduling type"
), "openmp/runtime/src/kmp_sched.cpp", 692); }
;
693 break;
694 }
695 }
696end:;
697#ifdef KMP_DEBUG1
698 {
699 char *buff;
700 // create format specifiers before the debug output
701 buff = __kmp_str_format(
702 "__kmpc_dist_for_static_init: last=%%d lo=%%%s up=%%%s upDist=%%%s "
703 "stride=%%%s signed?<%s>\n",
704 traits_t<T>::spec, traits_t<T>::spec, traits_t<T>::spec,
705 traits_t<ST>::spec, traits_t<T>::spec);
706 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pupperDist, *pstride))if (kmp_d_debug >= 100) { __kmp_debug_printf (buff, *plastiter
, *plower, *pupper, *pupperDist, *pstride); }
;
707 __kmp_str_free(&buff);
708 }
709#endif
710 KE_TRACE(10, ("__kmpc_dist_for_static_init: T#%d return\n", gtid))if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmpc_dist_for_static_init: T#%d return\n"
, gtid); }
;
711#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
712 if (ompt_enabled.ompt_callback_work || ompt_enabled.ompt_callback_dispatch) {
713 ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL__null);
714 ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
715 if (ompt_enabled.ompt_callback_work) {
716 ompt_callbacks.ompt_callback(ompt_callback_work)ompt_callback_work_callback(
717 ompt_work_distribute, ompt_scope_begin, &(team_info->parallel_data),
718 &(task_info->task_data), 0, codeptr);
719 }
720 if (ompt_enabled.ompt_callback_dispatch) {
721 ompt_data_t instance = ompt_data_none{0};
722 ompt_dispatch_chunk_t dispatch_chunk;
723 OMPT_GET_DISPATCH_CHUNK(dispatch_chunk, *plower, *pupperDist, incr)do { if (incr > 0) { dispatch_chunk.start = static_cast<
uint64_t>(*plower); dispatch_chunk.iterations = static_cast
<uint64_t>(((*pupperDist) - (*plower)) / (incr) + 1); }
else { dispatch_chunk.start = static_cast<uint64_t>(*pupperDist
); dispatch_chunk.iterations = static_cast<uint64_t>(((
*plower) - (*pupperDist)) / -(incr) + 1); } } while (0)
;
724 instance.ptr = &dispatch_chunk;
725 ompt_callbacks.ompt_callback(ompt_callback_dispatch)ompt_callback_dispatch_callback(
726 &(team_info->parallel_data), &(task_info->task_data),
727 ompt_dispatch_distribute_chunk, instance);
728 }
729 }
730#endif // OMPT_SUPPORT && OMPT_OPTIONAL
731 KMP_STATS_LOOP_END(OMP_distribute_iterations);
732 return;
733}
734
735template <typename T>
736static void __kmp_team_static_init(ident_t *loc, kmp_int32 gtid,
737 kmp_int32 *p_last, T *p_lb, T *p_ub,
738 typename traits_t<T>::signed_t *p_st,
739 typename traits_t<T>::signed_t incr,
740 typename traits_t<T>::signed_t chunk) {
741 // The routine returns the first chunk distributed to the team and
742 // stride for next chunks calculation.
743 // Last iteration flag set for the team that will execute
744 // the last iteration of the loop.
745 // The routine is called for dist_schedule(static,chunk) only.
746 typedef typename traits_t<T>::unsigned_t UT;
747 typedef typename traits_t<T>::signed_t ST;
748 kmp_uint32 team_id;
749 kmp_uint32 nteams;
750 UT trip_count;
751 T lower;
752 T upper;
753 ST span;
754 kmp_team_t *team;
755 kmp_info_t *th;
756
757 KMP_DEBUG_ASSERT(p_last && p_lb && p_ub && p_st)if (!(p_last && p_lb && p_ub && p_st)
) { __kmp_debug_assert("p_last && p_lb && p_ub && p_st"
, "openmp/runtime/src/kmp_sched.cpp", 757); }
;
4
Assuming 'p_last' is null
5
Taking true branch
758 KE_TRACE(10, ("__kmp_team_static_init called (%d)\n", gtid))if (kmp_e_debug >= 10) { __kmp_debug_printf ("__kmp_team_static_init called (%d)\n"
, gtid); }
;
6
Assuming 'kmp_e_debug' is < 10
7
Taking false branch
759 __kmp_assert_valid_gtid(gtid);
760#ifdef KMP_DEBUG1
761 {
762 char *buff;
763 // create format specifiers before the debug output
764 buff = __kmp_str_format("__kmp_team_static_init enter: T#%%d liter=%%d "
765 "iter=(%%%s, %%%s, %%%s) chunk %%%s; signed?<%s>\n",
766 traits_t<T>::spec, traits_t<T>::spec,
767 traits_t<ST>::spec, traits_t<ST>::spec,
768 traits_t<T>::spec);
769 KD_TRACE(100, (buff, gtid, *p_last, *p_lb, *p_ub, *p_st, chunk))if (kmp_d_debug >= 100) { __kmp_debug_printf (buff, gtid, *
p_last, *p_lb, *p_ub, *p_st, chunk); }
;
8
Assuming 'kmp_d_debug' is < 100
9
Taking false branch
770 __kmp_str_free(&buff);
771 }
772#endif
773
774 lower = *p_lb;
775 upper = *p_ub;
776 if (__kmp_env_consistency_check) {
10
Assuming '__kmp_env_consistency_check' is not equal to 0
11
Taking true branch
777 if (incr == 0) {
12
Assuming 'incr' is equal to 0
13
Taking true branch
778 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
779 loc);
780 }
781 if (incr
13.1
'incr' is <= 0
> 0 ? (upper < lower) : (lower < upper)) {
14
'?' condition is false
15
Assuming 'lower' is >= 'upper'
16
Taking false branch
782 // The loop is illegal.
783 // Some zero-trip loops maintained by compiler, e.g.:
784 // for(i=10;i<0;++i) // lower >= upper - run-time check
785 // for(i=0;i>10;--i) // lower <= upper - run-time check
786 // for(i=0;i>10;++i) // incr > 0 - compile-time check
787 // for(i=10;i<0;--i) // incr < 0 - compile-time check
788 // Compiler does not check the following illegal loops:
789 // for(i=0;i<10;i+=incr) // where incr<0
790 // for(i=10;i>0;i-=incr) // where incr<0
791 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
792 }
793 }
794 th = __kmp_threads[gtid];
795 team = th->th.th_team;
796 KMP_DEBUG_ASSERT(th->th.th_teams_microtask)if (!(th->th.th_teams_microtask)) { __kmp_debug_assert("th->th.th_teams_microtask"
, "openmp/runtime/src/kmp_sched.cpp", 796); }
; // we are in the teams construct
17
Assuming field 'th_teams_microtask' is non-null
18
Taking false branch
797 nteams = th->th.th_teams_size.nteams;
798 team_id = team->t.t_master_tid;
799 KMP_DEBUG_ASSERT(nteams == (kmp_uint32)team->t.t_parent->t.t_nproc)if (!(nteams == (kmp_uint32)team->t.t_parent->t.t_nproc
)) { __kmp_debug_assert("nteams == (kmp_uint32)team->t.t_parent->t.t_nproc"
, "openmp/runtime/src/kmp_sched.cpp", 799); }
;
19
Assuming 'nteams' is equal to field 't_nproc'
20
Taking false branch
800
801 // compute trip count
802 if (incr
20.1
'incr' is not equal to 1
== 1) {
21
Taking false branch
803 trip_count = upper - lower + 1;
804 } else if (incr == -1) {
22
Taking false branch
805 trip_count = lower - upper + 1;
806 } else if (incr
22.1
'incr' is <= 0
> 0) {
23
Taking false branch
807 // upper-lower can exceed the limit of signed type
808 trip_count = (UT)(upper - lower) / incr + 1;
809 } else {
810 trip_count = (UT)(lower - upper) / (-incr) + 1;
24
Division by zero
811 }
812 if (chunk < 1)
813 chunk = 1;
814 span = chunk * incr;
815 *p_st = span * nteams;
816 *p_lb = lower + (span * team_id);
817 *p_ub = *p_lb + span - incr;
818 if (p_last != NULL__null)
819 *p_last = (team_id == ((trip_count - 1) / (UT)chunk) % nteams);
820 // Correct upper bound if needed
821 if (incr > 0) {
822 if (*p_ub < *p_lb) // overflow?
823 *p_ub = traits_t<T>::max_value;
824 if (*p_ub > upper)
825 *p_ub = upper; // tracker C73258
826 } else { // incr < 0
827 if (*p_ub > *p_lb)
828 *p_ub = traits_t<T>::min_value;
829 if (*p_ub < upper)
830 *p_ub = upper; // tracker C73258
831 }
832#ifdef KMP_DEBUG1
833 {
834 char *buff;
835 // create format specifiers before the debug output
836 buff =
837 __kmp_str_format("__kmp_team_static_init exit: T#%%d team%%u liter=%%d "
838 "iter=(%%%s, %%%s, %%%s) chunk %%%s\n",
839 traits_t<T>::spec, traits_t<T>::spec,
840 traits_t<ST>::spec, traits_t<ST>::spec);
841 KD_TRACE(100, (buff, gtid, team_id, *p_last, *p_lb, *p_ub, *p_st, chunk))if (kmp_d_debug >= 100) { __kmp_debug_printf (buff, gtid, team_id
, *p_last, *p_lb, *p_ub, *p_st, chunk); }
;
842 __kmp_str_free(&buff);
843 }
844#endif
845}
846
847//------------------------------------------------------------------------------
848extern "C" {
849/*!
850@ingroup WORK_SHARING
851@param loc Source code location
852@param gtid Global thread id of this thread
853@param schedtype Scheduling type
854@param plastiter Pointer to the "last iteration" flag
855@param plower Pointer to the lower bound
856@param pupper Pointer to the upper bound
857@param pstride Pointer to the stride
858@param incr Loop increment
859@param chunk The chunk size
860
861Each of the four functions here are identical apart from the argument types.
862
863The functions compute the upper and lower bounds and stride to be used for the
864set of iterations to be executed by the current thread from the statically
865scheduled loop that is described by the initial values of the bounds, stride,
866increment and chunk size.
867
868@{
869*/
870void __kmpc_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype,
871 kmp_int32 *plastiter, kmp_int32 *plower,
872 kmp_int32 *pupper, kmp_int32 *pstride,
873 kmp_int32 incr, kmp_int32 chunk) {
874 __kmp_for_static_init<kmp_int32>(loc, gtid, schedtype, plastiter, plower,
875 pupper, pstride, incr, chunk
876#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
877 ,
878 OMPT_GET_RETURN_ADDRESS(0)__builtin_return_address(0)
879#endif
880 );
881}
882
883/*!
884 See @ref __kmpc_for_static_init_4
885 */
886void __kmpc_for_static_init_4u(ident_t *loc, kmp_int32 gtid,
887 kmp_int32 schedtype, kmp_int32 *plastiter,
888 kmp_uint32 *plower, kmp_uint32 *pupper,
889 kmp_int32 *pstride, kmp_int32 incr,
890 kmp_int32 chunk) {
891 __kmp_for_static_init<kmp_uint32>(loc, gtid, schedtype, plastiter, plower,
892 pupper, pstride, incr, chunk
893#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
894 ,
895 OMPT_GET_RETURN_ADDRESS(0)__builtin_return_address(0)
896#endif
897 );
898}
899
900/*!
901 See @ref __kmpc_for_static_init_4
902 */
903void __kmpc_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype,
904 kmp_int32 *plastiter, kmp_int64 *plower,
905 kmp_int64 *pupper, kmp_int64 *pstride,
906 kmp_int64 incr, kmp_int64 chunk) {
907 __kmp_for_static_init<kmp_int64>(loc, gtid, schedtype, plastiter, plower,
908 pupper, pstride, incr, chunk
909#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
910 ,
911 OMPT_GET_RETURN_ADDRESS(0)__builtin_return_address(0)
912#endif
913 );
914}
915
916/*!
917 See @ref __kmpc_for_static_init_4
918 */
919void __kmpc_for_static_init_8u(ident_t *loc, kmp_int32 gtid,
920 kmp_int32 schedtype, kmp_int32 *plastiter,
921 kmp_uint64 *plower, kmp_uint64 *pupper,
922 kmp_int64 *pstride, kmp_int64 incr,
923 kmp_int64 chunk) {
924 __kmp_for_static_init<kmp_uint64>(loc, gtid, schedtype, plastiter, plower,
925 pupper, pstride, incr, chunk
926#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
927 ,
928 OMPT_GET_RETURN_ADDRESS(0)__builtin_return_address(0)
929#endif
930 );
931}
932/*!
933@}
934*/
935
936#if OMPT_SUPPORT1 && OMPT_OPTIONAL1
937#define OMPT_CODEPTR_ARG, __builtin_return_address(0) , OMPT_GET_RETURN_ADDRESS(0)__builtin_return_address(0)
938#else
939#define OMPT_CODEPTR_ARG, __builtin_return_address(0)
940#endif
941
942/*!
943@ingroup WORK_SHARING
944@param loc Source code location
945@param gtid Global thread id of this thread
946@param schedule Scheduling type for the parallel loop
947@param plastiter Pointer to the "last iteration" flag
948@param plower Pointer to the lower bound
949@param pupper Pointer to the upper bound of loop chunk
950@param pupperD Pointer to the upper bound of dist_chunk
951@param pstride Pointer to the stride for parallel loop
952@param incr Loop increment
953@param chunk The chunk size for the parallel loop
954
955Each of the four functions here are identical apart from the argument types.
956
957The functions compute the upper and lower bounds and strides to be used for the
958set of iterations to be executed by the current thread from the statically
959scheduled loop that is described by the initial values of the bounds, strides,
960increment and chunks for parallel loop and distribute constructs.
961
962@{
963*/
964void __kmpc_dist_for_static_init_4(ident_t *loc, kmp_int32 gtid,
965 kmp_int32 schedule, kmp_int32 *plastiter,
966 kmp_int32 *plower, kmp_int32 *pupper,
967 kmp_int32 *pupperD, kmp_int32 *pstride,
968 kmp_int32 incr, kmp_int32 chunk) {
969 __kmp_dist_for_static_init<kmp_int32>(loc, gtid, schedule, plastiter, plower,
970 pupper, pupperD, pstride, incr,
971 chunk OMPT_CODEPTR_ARG, __builtin_return_address(0));
972}
973
974/*!
975 See @ref __kmpc_dist_for_static_init_4
976 */
977void __kmpc_dist_for_static_init_4u(ident_t *loc, kmp_int32 gtid,
978 kmp_int32 schedule, kmp_int32 *plastiter,
979 kmp_uint32 *plower, kmp_uint32 *pupper,
980 kmp_uint32 *pupperD, kmp_int32 *pstride,
981 kmp_int32 incr, kmp_int32 chunk) {
982 __kmp_dist_for_static_init<kmp_uint32>(loc, gtid, schedule, plastiter, plower,
983 pupper, pupperD, pstride, incr,
984 chunk OMPT_CODEPTR_ARG, __builtin_return_address(0));
985}
986
987/*!
988 See @ref __kmpc_dist_for_static_init_4
989 */
990void __kmpc_dist_for_static_init_8(ident_t *loc, kmp_int32 gtid,
991 kmp_int32 schedule, kmp_int32 *plastiter,
992 kmp_int64 *plower, kmp_int64 *pupper,
993 kmp_int64 *pupperD, kmp_int64 *pstride,
994 kmp_int64 incr, kmp_int64 chunk) {
995 __kmp_dist_for_static_init<kmp_int64>(loc, gtid, schedule, plastiter, plower,
996 pupper, pupperD, pstride, incr,
997 chunk OMPT_CODEPTR_ARG, __builtin_return_address(0));
998}
999
1000/*!
1001 See @ref __kmpc_dist_for_static_init_4
1002 */
1003void __kmpc_dist_for_static_init_8u(ident_t *loc, kmp_int32 gtid,
1004 kmp_int32 schedule, kmp_int32 *plastiter,
1005 kmp_uint64 *plower, kmp_uint64 *pupper,
1006 kmp_uint64 *pupperD, kmp_int64 *pstride,
1007 kmp_int64 incr, kmp_int64 chunk) {
1008 __kmp_dist_for_static_init<kmp_uint64>(loc, gtid, schedule, plastiter, plower,
1009 pupper, pupperD, pstride, incr,
1010 chunk OMPT_CODEPTR_ARG, __builtin_return_address(0));
1011}
1012/*!
1013@}
1014*/
1015
1016//------------------------------------------------------------------------------
1017// Auxiliary routines for Distribute Parallel Loop construct implementation
1018// Transfer call to template< type T >
1019// __kmp_team_static_init( ident_t *loc, int gtid,
1020// int *p_last, T *lb, T *ub, ST *st, ST incr, ST chunk )
1021
1022/*!
1023@ingroup WORK_SHARING
1024@{
1025@param loc Source location
1026@param gtid Global thread id
1027@param p_last pointer to last iteration flag
1028@param p_lb pointer to Lower bound
1029@param p_ub pointer to Upper bound
1030@param p_st Step (or increment if you prefer)
1031@param incr Loop increment
1032@param chunk The chunk size to block with
1033
1034The functions compute the upper and lower bounds and stride to be used for the
1035set of iterations to be executed by the current team from the statically
1036scheduled loop that is described by the initial values of the bounds, stride,
1037increment and chunk for the distribute construct as part of composite distribute
1038parallel loop construct. These functions are all identical apart from the types
1039of the arguments.
1040*/
1041
1042void __kmpc_team_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
1043 kmp_int32 *p_lb, kmp_int32 *p_ub,
1044 kmp_int32 *p_st, kmp_int32 incr,
1045 kmp_int32 chunk) {
1046 KMP_DEBUG_ASSERT(__kmp_init_serial)if (!(__kmp_init_serial)) { __kmp_debug_assert("__kmp_init_serial"
, "openmp/runtime/src/kmp_sched.cpp", 1046); }
;
1047 __kmp_team_static_init<kmp_int32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
1048 chunk);
1049}
1050
1051/*!
1052 See @ref __kmpc_team_static_init_4
1053 */
1054void __kmpc_team_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
1055 kmp_uint32 *p_lb, kmp_uint32 *p_ub,
1056 kmp_int32 *p_st, kmp_int32 incr,
1057 kmp_int32 chunk) {
1058 KMP_DEBUG_ASSERT(__kmp_init_serial)if (!(__kmp_init_serial)) { __kmp_debug_assert("__kmp_init_serial"
, "openmp/runtime/src/kmp_sched.cpp", 1058); }
;
1059 __kmp_team_static_init<kmp_uint32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
1060 chunk);
1061}
1062
1063/*!
1064 See @ref __kmpc_team_static_init_4
1065 */
1066void __kmpc_team_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
1067 kmp_int64 *p_lb, kmp_int64 *p_ub,
1068 kmp_int64 *p_st, kmp_int64 incr,
1069 kmp_int64 chunk) {
1070 KMP_DEBUG_ASSERT(__kmp_init_serial)if (!(__kmp_init_serial)) { __kmp_debug_assert("__kmp_init_serial"
, "openmp/runtime/src/kmp_sched.cpp", 1070); }
;
1071 __kmp_team_static_init<kmp_int64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
1072 chunk);
1073}
1074
1075/*!
1076 See @ref __kmpc_team_static_init_4
1077 */
1078void __kmpc_team_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
1079 kmp_uint64 *p_lb, kmp_uint64 *p_ub,
1080 kmp_int64 *p_st, kmp_int64 incr,
1081 kmp_int64 chunk) {
1082 KMP_DEBUG_ASSERT(__kmp_init_serial)if (!(__kmp_init_serial)) { __kmp_debug_assert("__kmp_init_serial"
, "openmp/runtime/src/kmp_sched.cpp", 1082); }
;
1
Assuming '__kmp_init_serial' is not equal to 0
2
Taking false branch
1083 __kmp_team_static_init<kmp_uint64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
3
Calling '__kmp_team_static_init<unsigned long long>'
1084 chunk);
1085}
1086/*!
1087@}
1088*/
1089
1090} // extern "C"