File: | build/source/openmp/runtime/src/kmp_affinity.cpp |
Warning: | line 3645, column 5 Division by zero |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* | |||
2 | * kmp_affinity.cpp -- affinity management | |||
3 | */ | |||
4 | ||||
5 | //===----------------------------------------------------------------------===// | |||
6 | // | |||
7 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
8 | // See https://llvm.org/LICENSE.txt for license information. | |||
9 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
10 | // | |||
11 | //===----------------------------------------------------------------------===// | |||
12 | ||||
13 | #include "kmp.h" | |||
14 | #include "kmp_affinity.h" | |||
15 | #include "kmp_i18n.h" | |||
16 | #include "kmp_io.h" | |||
17 | #include "kmp_str.h" | |||
18 | #include "kmp_wrapper_getpid.h" | |||
19 | #if KMP_USE_HIER_SCHED0 | |||
20 | #include "kmp_dispatch_hier.h" | |||
21 | #endif | |||
22 | #if KMP_USE_HWLOC0 | |||
23 | // Copied from hwloc | |||
24 | #define HWLOC_GROUP_KIND_INTEL_MODULE 102 | |||
25 | #define HWLOC_GROUP_KIND_INTEL_TILE 103 | |||
26 | #define HWLOC_GROUP_KIND_INTEL_DIE 104 | |||
27 | #define HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP 220 | |||
28 | #endif | |||
29 | #include <ctype.h> | |||
30 | ||||
31 | // The machine topology | |||
32 | kmp_topology_t *__kmp_topology = nullptr; | |||
33 | // KMP_HW_SUBSET environment variable | |||
34 | kmp_hw_subset_t *__kmp_hw_subset = nullptr; | |||
35 | ||||
36 | // Store the real or imagined machine hierarchy here | |||
37 | static hierarchy_info machine_hierarchy; | |||
38 | ||||
39 | void __kmp_cleanup_hierarchy() { machine_hierarchy.fini(); } | |||
40 | ||||
41 | void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar) { | |||
42 | kmp_uint32 depth; | |||
43 | // The test below is true if affinity is available, but set to "none". Need to | |||
44 | // init on first use of hierarchical barrier. | |||
45 | if (TCR_1(machine_hierarchy.uninitialized)(machine_hierarchy.uninitialized)) | |||
46 | machine_hierarchy.init(nproc); | |||
47 | ||||
48 | // Adjust the hierarchy in case num threads exceeds original | |||
49 | if (nproc > machine_hierarchy.base_num_threads) | |||
50 | machine_hierarchy.resize(nproc); | |||
51 | ||||
52 | depth = machine_hierarchy.depth; | |||
53 | KMP_DEBUG_ASSERT(depth > 0)if (!(depth > 0)) { __kmp_debug_assert("depth > 0", "openmp/runtime/src/kmp_affinity.cpp" , 53); }; | |||
54 | ||||
55 | thr_bar->depth = depth; | |||
56 | __kmp_type_convert(machine_hierarchy.numPerLevel[0] - 1, | |||
57 | &(thr_bar->base_leaf_kids)); | |||
58 | thr_bar->skip_per_level = machine_hierarchy.skipPerLevel; | |||
59 | } | |||
60 | ||||
61 | static int nCoresPerPkg, nPackages; | |||
62 | static int __kmp_nThreadsPerCore; | |||
63 | #ifndef KMP_DFLT_NTH_CORES | |||
64 | static int __kmp_ncores; | |||
65 | #endif | |||
66 | ||||
67 | const char *__kmp_hw_get_catalog_string(kmp_hw_t type, bool plural) { | |||
68 | switch (type) { | |||
69 | case KMP_HW_SOCKET: | |||
70 | return ((plural) ? KMP_I18N_STR(Sockets)__kmp_i18n_catgets(kmp_i18n_str_Sockets) : KMP_I18N_STR(Socket)__kmp_i18n_catgets(kmp_i18n_str_Socket)); | |||
71 | case KMP_HW_DIE: | |||
72 | return ((plural) ? KMP_I18N_STR(Dice)__kmp_i18n_catgets(kmp_i18n_str_Dice) : KMP_I18N_STR(Die)__kmp_i18n_catgets(kmp_i18n_str_Die)); | |||
73 | case KMP_HW_MODULE: | |||
74 | return ((plural) ? KMP_I18N_STR(Modules)__kmp_i18n_catgets(kmp_i18n_str_Modules) : KMP_I18N_STR(Module)__kmp_i18n_catgets(kmp_i18n_str_Module)); | |||
75 | case KMP_HW_TILE: | |||
76 | return ((plural) ? KMP_I18N_STR(Tiles)__kmp_i18n_catgets(kmp_i18n_str_Tiles) : KMP_I18N_STR(Tile)__kmp_i18n_catgets(kmp_i18n_str_Tile)); | |||
77 | case KMP_HW_NUMA: | |||
78 | return ((plural) ? KMP_I18N_STR(NumaDomains)__kmp_i18n_catgets(kmp_i18n_str_NumaDomains) : KMP_I18N_STR(NumaDomain)__kmp_i18n_catgets(kmp_i18n_str_NumaDomain)); | |||
79 | case KMP_HW_L3: | |||
80 | return ((plural) ? KMP_I18N_STR(L3Caches)__kmp_i18n_catgets(kmp_i18n_str_L3Caches) : KMP_I18N_STR(L3Cache)__kmp_i18n_catgets(kmp_i18n_str_L3Cache)); | |||
81 | case KMP_HW_L2: | |||
82 | return ((plural) ? KMP_I18N_STR(L2Caches)__kmp_i18n_catgets(kmp_i18n_str_L2Caches) : KMP_I18N_STR(L2Cache)__kmp_i18n_catgets(kmp_i18n_str_L2Cache)); | |||
83 | case KMP_HW_L1: | |||
84 | return ((plural) ? KMP_I18N_STR(L1Caches)__kmp_i18n_catgets(kmp_i18n_str_L1Caches) : KMP_I18N_STR(L1Cache)__kmp_i18n_catgets(kmp_i18n_str_L1Cache)); | |||
85 | case KMP_HW_LLC: | |||
86 | return ((plural) ? KMP_I18N_STR(LLCaches)__kmp_i18n_catgets(kmp_i18n_str_LLCaches) : KMP_I18N_STR(LLCache)__kmp_i18n_catgets(kmp_i18n_str_LLCache)); | |||
87 | case KMP_HW_CORE: | |||
88 | return ((plural) ? KMP_I18N_STR(Cores)__kmp_i18n_catgets(kmp_i18n_str_Cores) : KMP_I18N_STR(Core)__kmp_i18n_catgets(kmp_i18n_str_Core)); | |||
89 | case KMP_HW_THREAD: | |||
90 | return ((plural) ? KMP_I18N_STR(Threads)__kmp_i18n_catgets(kmp_i18n_str_Threads) : KMP_I18N_STR(Thread)__kmp_i18n_catgets(kmp_i18n_str_Thread)); | |||
91 | case KMP_HW_PROC_GROUP: | |||
92 | return ((plural) ? KMP_I18N_STR(ProcGroups)__kmp_i18n_catgets(kmp_i18n_str_ProcGroups) : KMP_I18N_STR(ProcGroup)__kmp_i18n_catgets(kmp_i18n_str_ProcGroup)); | |||
93 | } | |||
94 | return KMP_I18N_STR(Unknown)__kmp_i18n_catgets(kmp_i18n_str_Unknown); | |||
95 | } | |||
96 | ||||
97 | const char *__kmp_hw_get_keyword(kmp_hw_t type, bool plural) { | |||
98 | switch (type) { | |||
99 | case KMP_HW_SOCKET: | |||
100 | return ((plural) ? "sockets" : "socket"); | |||
101 | case KMP_HW_DIE: | |||
102 | return ((plural) ? "dice" : "die"); | |||
103 | case KMP_HW_MODULE: | |||
104 | return ((plural) ? "modules" : "module"); | |||
105 | case KMP_HW_TILE: | |||
106 | return ((plural) ? "tiles" : "tile"); | |||
107 | case KMP_HW_NUMA: | |||
108 | return ((plural) ? "numa_domains" : "numa_domain"); | |||
109 | case KMP_HW_L3: | |||
110 | return ((plural) ? "l3_caches" : "l3_cache"); | |||
111 | case KMP_HW_L2: | |||
112 | return ((plural) ? "l2_caches" : "l2_cache"); | |||
113 | case KMP_HW_L1: | |||
114 | return ((plural) ? "l1_caches" : "l1_cache"); | |||
115 | case KMP_HW_LLC: | |||
116 | return ((plural) ? "ll_caches" : "ll_cache"); | |||
117 | case KMP_HW_CORE: | |||
118 | return ((plural) ? "cores" : "core"); | |||
119 | case KMP_HW_THREAD: | |||
120 | return ((plural) ? "threads" : "thread"); | |||
121 | case KMP_HW_PROC_GROUP: | |||
122 | return ((plural) ? "proc_groups" : "proc_group"); | |||
123 | } | |||
124 | return ((plural) ? "unknowns" : "unknown"); | |||
125 | } | |||
126 | ||||
127 | const char *__kmp_hw_get_core_type_string(kmp_hw_core_type_t type) { | |||
128 | switch (type) { | |||
129 | case KMP_HW_CORE_TYPE_UNKNOWN: | |||
130 | return "unknown"; | |||
131 | #if KMP_ARCH_X860 || KMP_ARCH_X86_641 | |||
132 | case KMP_HW_CORE_TYPE_ATOM: | |||
133 | return "Intel Atom(R) processor"; | |||
134 | case KMP_HW_CORE_TYPE_CORE: | |||
135 | return "Intel(R) Core(TM) processor"; | |||
136 | #endif | |||
137 | } | |||
138 | return "unknown"; | |||
139 | } | |||
140 | ||||
141 | #if KMP_AFFINITY_SUPPORTED1 | |||
142 | // If affinity is supported, check the affinity | |||
143 | // verbose and warning flags before printing warning | |||
144 | #define KMP_AFF_WARNING(s, ...)if (s.flags.verbose || (s.flags.warnings && (s.type != affinity_none))) { __kmp_msg(kmp_ms_warning, __kmp_msg_format (kmp_i18n_msg_...), __kmp_msg_null); } \ | |||
145 | if (s.flags.verbose || (s.flags.warnings && (s.type != affinity_none))) { \ | |||
146 | KMP_WARNING(__VA_ARGS__)__kmp_msg(kmp_ms_warning, __kmp_msg_format(kmp_i18n_msg___VA_ARGS__ ), __kmp_msg_null); \ | |||
147 | } | |||
148 | #else | |||
149 | #define KMP_AFF_WARNING(s, ...)if (s.flags.verbose || (s.flags.warnings && (s.type != affinity_none))) { __kmp_msg(kmp_ms_warning, __kmp_msg_format (kmp_i18n_msg_...), __kmp_msg_null); } KMP_WARNING(__VA_ARGS__)__kmp_msg(kmp_ms_warning, __kmp_msg_format(kmp_i18n_msg___VA_ARGS__ ), __kmp_msg_null) | |||
150 | #endif | |||
151 | ||||
152 | //////////////////////////////////////////////////////////////////////////////// | |||
153 | // kmp_hw_thread_t methods | |||
154 | int kmp_hw_thread_t::compare_ids(const void *a, const void *b) { | |||
155 | const kmp_hw_thread_t *ahwthread = (const kmp_hw_thread_t *)a; | |||
156 | const kmp_hw_thread_t *bhwthread = (const kmp_hw_thread_t *)b; | |||
157 | int depth = __kmp_topology->get_depth(); | |||
158 | for (int level = 0; level < depth; ++level) { | |||
159 | if (ahwthread->ids[level] < bhwthread->ids[level]) | |||
160 | return -1; | |||
161 | else if (ahwthread->ids[level] > bhwthread->ids[level]) | |||
162 | return 1; | |||
163 | } | |||
164 | if (ahwthread->os_id < bhwthread->os_id) | |||
165 | return -1; | |||
166 | else if (ahwthread->os_id > bhwthread->os_id) | |||
167 | return 1; | |||
168 | return 0; | |||
169 | } | |||
170 | ||||
171 | #if KMP_AFFINITY_SUPPORTED1 | |||
172 | int kmp_hw_thread_t::compare_compact(const void *a, const void *b) { | |||
173 | int i; | |||
174 | const kmp_hw_thread_t *aa = (const kmp_hw_thread_t *)a; | |||
175 | const kmp_hw_thread_t *bb = (const kmp_hw_thread_t *)b; | |||
176 | int depth = __kmp_topology->get_depth(); | |||
177 | int compact = __kmp_topology->compact; | |||
178 | KMP_DEBUG_ASSERT(compact >= 0)if (!(compact >= 0)) { __kmp_debug_assert("compact >= 0" , "openmp/runtime/src/kmp_affinity.cpp", 178); }; | |||
179 | KMP_DEBUG_ASSERT(compact <= depth)if (!(compact <= depth)) { __kmp_debug_assert("compact <= depth" , "openmp/runtime/src/kmp_affinity.cpp", 179); }; | |||
180 | for (i = 0; i < compact; i++) { | |||
181 | int j = depth - i - 1; | |||
182 | if (aa->sub_ids[j] < bb->sub_ids[j]) | |||
183 | return -1; | |||
184 | if (aa->sub_ids[j] > bb->sub_ids[j]) | |||
185 | return 1; | |||
186 | } | |||
187 | for (; i < depth; i++) { | |||
188 | int j = i - compact; | |||
189 | if (aa->sub_ids[j] < bb->sub_ids[j]) | |||
190 | return -1; | |||
191 | if (aa->sub_ids[j] > bb->sub_ids[j]) | |||
192 | return 1; | |||
193 | } | |||
194 | return 0; | |||
195 | } | |||
196 | #endif | |||
197 | ||||
198 | void kmp_hw_thread_t::print() const { | |||
199 | int depth = __kmp_topology->get_depth(); | |||
200 | printf("%4d ", os_id); | |||
201 | for (int i = 0; i < depth; ++i) { | |||
202 | printf("%4d ", ids[i]); | |||
203 | } | |||
204 | if (attrs) { | |||
205 | if (attrs.is_core_type_valid()) | |||
206 | printf(" (%s)", __kmp_hw_get_core_type_string(attrs.get_core_type())); | |||
207 | if (attrs.is_core_eff_valid()) | |||
208 | printf(" (eff=%d)", attrs.get_core_eff()); | |||
209 | } | |||
210 | printf("\n"); | |||
211 | } | |||
212 | ||||
213 | //////////////////////////////////////////////////////////////////////////////// | |||
214 | // kmp_topology_t methods | |||
215 | ||||
216 | // Add a layer to the topology based on the ids. Assume the topology | |||
217 | // is perfectly nested (i.e., so no object has more than one parent) | |||
218 | void kmp_topology_t::_insert_layer(kmp_hw_t type, const int *ids) { | |||
219 | // Figure out where the layer should go by comparing the ids of the current | |||
220 | // layers with the new ids | |||
221 | int target_layer; | |||
222 | int previous_id = kmp_hw_thread_t::UNKNOWN_ID; | |||
223 | int previous_new_id = kmp_hw_thread_t::UNKNOWN_ID; | |||
224 | ||||
225 | // Start from the highest layer and work down to find target layer | |||
226 | // If new layer is equal to another layer then put the new layer above | |||
227 | for (target_layer = 0; target_layer < depth; ++target_layer) { | |||
228 | bool layers_equal = true; | |||
229 | bool strictly_above_target_layer = false; | |||
230 | for (int i = 0; i < num_hw_threads; ++i) { | |||
231 | int id = hw_threads[i].ids[target_layer]; | |||
232 | int new_id = ids[i]; | |||
233 | if (id != previous_id && new_id == previous_new_id) { | |||
234 | // Found the layer we are strictly above | |||
235 | strictly_above_target_layer = true; | |||
236 | layers_equal = false; | |||
237 | break; | |||
238 | } else if (id == previous_id && new_id != previous_new_id) { | |||
239 | // Found a layer we are below. Move to next layer and check. | |||
240 | layers_equal = false; | |||
241 | break; | |||
242 | } | |||
243 | previous_id = id; | |||
244 | previous_new_id = new_id; | |||
245 | } | |||
246 | if (strictly_above_target_layer || layers_equal) | |||
247 | break; | |||
248 | } | |||
249 | ||||
250 | // Found the layer we are above. Now move everything to accommodate the new | |||
251 | // layer. And put the new ids and type into the topology. | |||
252 | for (int i = depth - 1, j = depth; i >= target_layer; --i, --j) | |||
253 | types[j] = types[i]; | |||
254 | types[target_layer] = type; | |||
255 | for (int k = 0; k < num_hw_threads; ++k) { | |||
256 | for (int i = depth - 1, j = depth; i >= target_layer; --i, --j) | |||
257 | hw_threads[k].ids[j] = hw_threads[k].ids[i]; | |||
258 | hw_threads[k].ids[target_layer] = ids[k]; | |||
259 | } | |||
260 | equivalent[type] = type; | |||
261 | depth++; | |||
262 | } | |||
263 | ||||
264 | #if KMP_GROUP_AFFINITY0 | |||
265 | // Insert the Windows Processor Group structure into the topology | |||
266 | void kmp_topology_t::_insert_windows_proc_groups() { | |||
267 | // Do not insert the processor group structure for a single group | |||
268 | if (__kmp_num_proc_groups == 1) | |||
269 | return; | |||
270 | kmp_affin_mask_t *mask; | |||
271 | int *ids = (int *)__kmp_allocate(sizeof(int) * num_hw_threads)___kmp_allocate((sizeof(int) * num_hw_threads), "openmp/runtime/src/kmp_affinity.cpp" , 271); | |||
272 | KMP_CPU_ALLOC(mask)(mask = __kmp_affinity_dispatch->allocate_mask()); | |||
273 | for (int i = 0; i < num_hw_threads; ++i) { | |||
274 | KMP_CPU_ZERO(mask)(mask)->zero(); | |||
275 | KMP_CPU_SET(hw_threads[i].os_id, mask)(mask)->set(hw_threads[i].os_id); | |||
276 | ids[i] = __kmp_get_proc_group(mask)(mask)->get_proc_group(); | |||
277 | } | |||
278 | KMP_CPU_FREE(mask)__kmp_affinity_dispatch->deallocate_mask(mask); | |||
279 | _insert_layer(KMP_HW_PROC_GROUP, ids); | |||
280 | __kmp_free(ids)___kmp_free((ids), "openmp/runtime/src/kmp_affinity.cpp", 280 ); | |||
281 | } | |||
282 | #endif | |||
283 | ||||
284 | // Remove layers that don't add information to the topology. | |||
285 | // This is done by having the layer take on the id = UNKNOWN_ID (-1) | |||
286 | void kmp_topology_t::_remove_radix1_layers() { | |||
287 | int preference[KMP_HW_LAST]; | |||
288 | int top_index1, top_index2; | |||
289 | // Set up preference associative array | |||
290 | preference[KMP_HW_SOCKET] = 110; | |||
291 | preference[KMP_HW_PROC_GROUP] = 100; | |||
292 | preference[KMP_HW_CORE] = 95; | |||
293 | preference[KMP_HW_THREAD] = 90; | |||
294 | preference[KMP_HW_NUMA] = 85; | |||
295 | preference[KMP_HW_DIE] = 80; | |||
296 | preference[KMP_HW_TILE] = 75; | |||
297 | preference[KMP_HW_MODULE] = 73; | |||
298 | preference[KMP_HW_L3] = 70; | |||
299 | preference[KMP_HW_L2] = 65; | |||
300 | preference[KMP_HW_L1] = 60; | |||
301 | preference[KMP_HW_LLC] = 5; | |||
302 | top_index1 = 0; | |||
303 | top_index2 = 1; | |||
304 | while (top_index1 < depth - 1 && top_index2 < depth) { | |||
305 | kmp_hw_t type1 = types[top_index1]; | |||
306 | kmp_hw_t type2 = types[top_index2]; | |||
307 | KMP_ASSERT_VALID_HW_TYPE(type1)if (!(type1 >= (kmp_hw_t)0 && type1 < KMP_HW_LAST )) { __kmp_debug_assert("type1 >= (kmp_hw_t)0 && type1 < KMP_HW_LAST" , "openmp/runtime/src/kmp_affinity.cpp", 307); }; | |||
308 | KMP_ASSERT_VALID_HW_TYPE(type2)if (!(type2 >= (kmp_hw_t)0 && type2 < KMP_HW_LAST )) { __kmp_debug_assert("type2 >= (kmp_hw_t)0 && type2 < KMP_HW_LAST" , "openmp/runtime/src/kmp_affinity.cpp", 308); }; | |||
309 | // Do not allow the three main topology levels (sockets, cores, threads) to | |||
310 | // be compacted down | |||
311 | if ((type1 == KMP_HW_THREAD || type1 == KMP_HW_CORE || | |||
312 | type1 == KMP_HW_SOCKET) && | |||
313 | (type2 == KMP_HW_THREAD || type2 == KMP_HW_CORE || | |||
314 | type2 == KMP_HW_SOCKET)) { | |||
315 | top_index1 = top_index2++; | |||
316 | continue; | |||
317 | } | |||
318 | bool radix1 = true; | |||
319 | bool all_same = true; | |||
320 | int id1 = hw_threads[0].ids[top_index1]; | |||
321 | int id2 = hw_threads[0].ids[top_index2]; | |||
322 | int pref1 = preference[type1]; | |||
323 | int pref2 = preference[type2]; | |||
324 | for (int hwidx = 1; hwidx < num_hw_threads; ++hwidx) { | |||
325 | if (hw_threads[hwidx].ids[top_index1] == id1 && | |||
326 | hw_threads[hwidx].ids[top_index2] != id2) { | |||
327 | radix1 = false; | |||
328 | break; | |||
329 | } | |||
330 | if (hw_threads[hwidx].ids[top_index2] != id2) | |||
331 | all_same = false; | |||
332 | id1 = hw_threads[hwidx].ids[top_index1]; | |||
333 | id2 = hw_threads[hwidx].ids[top_index2]; | |||
334 | } | |||
335 | if (radix1) { | |||
336 | // Select the layer to remove based on preference | |||
337 | kmp_hw_t remove_type, keep_type; | |||
338 | int remove_layer, remove_layer_ids; | |||
339 | if (pref1 > pref2) { | |||
340 | remove_type = type2; | |||
341 | remove_layer = remove_layer_ids = top_index2; | |||
342 | keep_type = type1; | |||
343 | } else { | |||
344 | remove_type = type1; | |||
345 | remove_layer = remove_layer_ids = top_index1; | |||
346 | keep_type = type2; | |||
347 | } | |||
348 | // If all the indexes for the second (deeper) layer are the same. | |||
349 | // e.g., all are zero, then make sure to keep the first layer's ids | |||
350 | if (all_same) | |||
351 | remove_layer_ids = top_index2; | |||
352 | // Remove radix one type by setting the equivalence, removing the id from | |||
353 | // the hw threads and removing the layer from types and depth | |||
354 | set_equivalent_type(remove_type, keep_type); | |||
355 | for (int idx = 0; idx < num_hw_threads; ++idx) { | |||
356 | kmp_hw_thread_t &hw_thread = hw_threads[idx]; | |||
357 | for (int d = remove_layer_ids; d < depth - 1; ++d) | |||
358 | hw_thread.ids[d] = hw_thread.ids[d + 1]; | |||
359 | } | |||
360 | for (int idx = remove_layer; idx < depth - 1; ++idx) | |||
361 | types[idx] = types[idx + 1]; | |||
362 | depth--; | |||
363 | } else { | |||
364 | top_index1 = top_index2++; | |||
365 | } | |||
366 | } | |||
367 | KMP_ASSERT(depth > 0)if (!(depth > 0)) { __kmp_debug_assert("depth > 0", "openmp/runtime/src/kmp_affinity.cpp" , 367); }; | |||
368 | } | |||
369 | ||||
370 | void kmp_topology_t::_set_last_level_cache() { | |||
371 | if (get_equivalent_type(KMP_HW_L3) != KMP_HW_UNKNOWN) | |||
372 | set_equivalent_type(KMP_HW_LLC, KMP_HW_L3); | |||
373 | else if (get_equivalent_type(KMP_HW_L2) != KMP_HW_UNKNOWN) | |||
374 | set_equivalent_type(KMP_HW_LLC, KMP_HW_L2); | |||
375 | #if KMP_MIC_SUPPORTED((0 || 1) && (1 || 0)) | |||
376 | else if (__kmp_mic_type == mic3) { | |||
377 | if (get_equivalent_type(KMP_HW_L2) != KMP_HW_UNKNOWN) | |||
378 | set_equivalent_type(KMP_HW_LLC, KMP_HW_L2); | |||
379 | else if (get_equivalent_type(KMP_HW_TILE) != KMP_HW_UNKNOWN) | |||
380 | set_equivalent_type(KMP_HW_LLC, KMP_HW_TILE); | |||
381 | // L2/Tile wasn't detected so just say L1 | |||
382 | else | |||
383 | set_equivalent_type(KMP_HW_LLC, KMP_HW_L1); | |||
384 | } | |||
385 | #endif | |||
386 | else if (get_equivalent_type(KMP_HW_L1) != KMP_HW_UNKNOWN) | |||
387 | set_equivalent_type(KMP_HW_LLC, KMP_HW_L1); | |||
388 | // Fallback is to set last level cache to socket or core | |||
389 | if (get_equivalent_type(KMP_HW_LLC) == KMP_HW_UNKNOWN) { | |||
390 | if (get_equivalent_type(KMP_HW_SOCKET) != KMP_HW_UNKNOWN) | |||
391 | set_equivalent_type(KMP_HW_LLC, KMP_HW_SOCKET); | |||
392 | else if (get_equivalent_type(KMP_HW_CORE) != KMP_HW_UNKNOWN) | |||
393 | set_equivalent_type(KMP_HW_LLC, KMP_HW_CORE); | |||
394 | } | |||
395 | KMP_ASSERT(get_equivalent_type(KMP_HW_LLC) != KMP_HW_UNKNOWN)if (!(get_equivalent_type(KMP_HW_LLC) != KMP_HW_UNKNOWN)) { __kmp_debug_assert ("get_equivalent_type(KMP_HW_LLC) != KMP_HW_UNKNOWN", "openmp/runtime/src/kmp_affinity.cpp" , 395); }; | |||
396 | } | |||
397 | ||||
398 | // Gather the count of each topology layer and the ratio | |||
399 | void kmp_topology_t::_gather_enumeration_information() { | |||
400 | int previous_id[KMP_HW_LAST]; | |||
401 | int max[KMP_HW_LAST]; | |||
402 | ||||
403 | for (int i = 0; i < depth; ++i) { | |||
404 | previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID; | |||
405 | max[i] = 0; | |||
406 | count[i] = 0; | |||
407 | ratio[i] = 0; | |||
408 | } | |||
409 | int core_level = get_level(KMP_HW_CORE); | |||
410 | for (int i = 0; i < num_hw_threads; ++i) { | |||
411 | kmp_hw_thread_t &hw_thread = hw_threads[i]; | |||
412 | for (int layer = 0; layer < depth; ++layer) { | |||
413 | int id = hw_thread.ids[layer]; | |||
414 | if (id != previous_id[layer]) { | |||
415 | // Add an additional increment to each count | |||
416 | for (int l = layer; l < depth; ++l) | |||
417 | count[l]++; | |||
418 | // Keep track of topology layer ratio statistics | |||
419 | max[layer]++; | |||
420 | for (int l = layer + 1; l < depth; ++l) { | |||
421 | if (max[l] > ratio[l]) | |||
422 | ratio[l] = max[l]; | |||
423 | max[l] = 1; | |||
424 | } | |||
425 | // Figure out the number of different core types | |||
426 | // and efficiencies for hybrid CPUs | |||
427 | if (__kmp_is_hybrid_cpu() && core_level >= 0 && layer <= core_level) { | |||
428 | if (hw_thread.attrs.is_core_eff_valid() && | |||
429 | hw_thread.attrs.core_eff >= num_core_efficiencies) { | |||
430 | // Because efficiencies can range from 0 to max efficiency - 1, | |||
431 | // the number of efficiencies is max efficiency + 1 | |||
432 | num_core_efficiencies = hw_thread.attrs.core_eff + 1; | |||
433 | } | |||
434 | if (hw_thread.attrs.is_core_type_valid()) { | |||
435 | bool found = false; | |||
436 | for (int j = 0; j < num_core_types; ++j) { | |||
437 | if (hw_thread.attrs.get_core_type() == core_types[j]) { | |||
438 | found = true; | |||
439 | break; | |||
440 | } | |||
441 | } | |||
442 | if (!found) { | |||
443 | KMP_ASSERT(num_core_types < KMP_HW_MAX_NUM_CORE_TYPES)if (!(num_core_types < KMP_HW_MAX_NUM_CORE_TYPES)) { __kmp_debug_assert ("num_core_types < KMP_HW_MAX_NUM_CORE_TYPES", "openmp/runtime/src/kmp_affinity.cpp" , 443); }; | |||
444 | core_types[num_core_types++] = hw_thread.attrs.get_core_type(); | |||
445 | } | |||
446 | } | |||
447 | } | |||
448 | break; | |||
449 | } | |||
450 | } | |||
451 | for (int layer = 0; layer < depth; ++layer) { | |||
452 | previous_id[layer] = hw_thread.ids[layer]; | |||
453 | } | |||
454 | } | |||
455 | for (int layer = 0; layer < depth; ++layer) { | |||
456 | if (max[layer] > ratio[layer]) | |||
457 | ratio[layer] = max[layer]; | |||
458 | } | |||
459 | } | |||
460 | ||||
461 | int kmp_topology_t::_get_ncores_with_attr(const kmp_hw_attr_t &attr, | |||
462 | int above_level, | |||
463 | bool find_all) const { | |||
464 | int current, current_max; | |||
465 | int previous_id[KMP_HW_LAST]; | |||
466 | for (int i = 0; i < depth; ++i) | |||
467 | previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID; | |||
468 | int core_level = get_level(KMP_HW_CORE); | |||
469 | if (find_all) | |||
470 | above_level = -1; | |||
471 | KMP_ASSERT(above_level < core_level)if (!(above_level < core_level)) { __kmp_debug_assert("above_level < core_level" , "openmp/runtime/src/kmp_affinity.cpp", 471); }; | |||
472 | current_max = 0; | |||
473 | current = 0; | |||
474 | for (int i = 0; i < num_hw_threads; ++i) { | |||
475 | kmp_hw_thread_t &hw_thread = hw_threads[i]; | |||
476 | if (!find_all && hw_thread.ids[above_level] != previous_id[above_level]) { | |||
477 | if (current > current_max) | |||
478 | current_max = current; | |||
479 | current = hw_thread.attrs.contains(attr); | |||
480 | } else { | |||
481 | for (int level = above_level + 1; level <= core_level; ++level) { | |||
482 | if (hw_thread.ids[level] != previous_id[level]) { | |||
483 | if (hw_thread.attrs.contains(attr)) | |||
484 | current++; | |||
485 | break; | |||
486 | } | |||
487 | } | |||
488 | } | |||
489 | for (int level = 0; level < depth; ++level) | |||
490 | previous_id[level] = hw_thread.ids[level]; | |||
491 | } | |||
492 | if (current > current_max) | |||
493 | current_max = current; | |||
494 | return current_max; | |||
495 | } | |||
496 | ||||
497 | // Find out if the topology is uniform | |||
498 | void kmp_topology_t::_discover_uniformity() { | |||
499 | int num = 1; | |||
500 | for (int level = 0; level < depth; ++level) | |||
501 | num *= ratio[level]; | |||
502 | flags.uniform = (num == count[depth - 1]); | |||
503 | } | |||
504 | ||||
505 | // Set all the sub_ids for each hardware thread | |||
506 | void kmp_topology_t::_set_sub_ids() { | |||
507 | int previous_id[KMP_HW_LAST]; | |||
508 | int sub_id[KMP_HW_LAST]; | |||
509 | ||||
510 | for (int i = 0; i < depth; ++i) { | |||
511 | previous_id[i] = -1; | |||
512 | sub_id[i] = -1; | |||
513 | } | |||
514 | for (int i = 0; i < num_hw_threads; ++i) { | |||
515 | kmp_hw_thread_t &hw_thread = hw_threads[i]; | |||
516 | // Setup the sub_id | |||
517 | for (int j = 0; j < depth; ++j) { | |||
518 | if (hw_thread.ids[j] != previous_id[j]) { | |||
519 | sub_id[j]++; | |||
520 | for (int k = j + 1; k < depth; ++k) { | |||
521 | sub_id[k] = 0; | |||
522 | } | |||
523 | break; | |||
524 | } | |||
525 | } | |||
526 | // Set previous_id | |||
527 | for (int j = 0; j < depth; ++j) { | |||
528 | previous_id[j] = hw_thread.ids[j]; | |||
529 | } | |||
530 | // Set the sub_ids field | |||
531 | for (int j = 0; j < depth; ++j) { | |||
532 | hw_thread.sub_ids[j] = sub_id[j]; | |||
533 | } | |||
534 | } | |||
535 | } | |||
536 | ||||
537 | void kmp_topology_t::_set_globals() { | |||
538 | // Set nCoresPerPkg, nPackages, __kmp_nThreadsPerCore, __kmp_ncores | |||
539 | int core_level, thread_level, package_level; | |||
540 | package_level = get_level(KMP_HW_SOCKET); | |||
541 | #if KMP_GROUP_AFFINITY0 | |||
542 | if (package_level == -1) | |||
543 | package_level = get_level(KMP_HW_PROC_GROUP); | |||
544 | #endif | |||
545 | core_level = get_level(KMP_HW_CORE); | |||
546 | thread_level = get_level(KMP_HW_THREAD); | |||
547 | ||||
548 | KMP_ASSERT(core_level != -1)if (!(core_level != -1)) { __kmp_debug_assert("core_level != -1" , "openmp/runtime/src/kmp_affinity.cpp", 548); }; | |||
549 | KMP_ASSERT(thread_level != -1)if (!(thread_level != -1)) { __kmp_debug_assert("thread_level != -1" , "openmp/runtime/src/kmp_affinity.cpp", 549); }; | |||
550 | ||||
551 | __kmp_nThreadsPerCore = calculate_ratio(thread_level, core_level); | |||
552 | if (package_level != -1) { | |||
553 | nCoresPerPkg = calculate_ratio(core_level, package_level); | |||
554 | nPackages = get_count(package_level); | |||
555 | } else { | |||
556 | // assume one socket | |||
557 | nCoresPerPkg = get_count(core_level); | |||
558 | nPackages = 1; | |||
559 | } | |||
560 | #ifndef KMP_DFLT_NTH_CORES | |||
561 | __kmp_ncores = get_count(core_level); | |||
562 | #endif | |||
563 | } | |||
564 | ||||
565 | kmp_topology_t *kmp_topology_t::allocate(int nproc, int ndepth, | |||
566 | const kmp_hw_t *types) { | |||
567 | kmp_topology_t *retval; | |||
568 | // Allocate all data in one large allocation | |||
569 | size_t size = sizeof(kmp_topology_t) + sizeof(kmp_hw_thread_t) * nproc + | |||
570 | sizeof(int) * (size_t)KMP_HW_LAST * 3; | |||
571 | char *bytes = (char *)__kmp_allocate(size)___kmp_allocate((size), "openmp/runtime/src/kmp_affinity.cpp" , 571); | |||
572 | retval = (kmp_topology_t *)bytes; | |||
573 | if (nproc > 0) { | |||
574 | retval->hw_threads = (kmp_hw_thread_t *)(bytes + sizeof(kmp_topology_t)); | |||
575 | } else { | |||
576 | retval->hw_threads = nullptr; | |||
577 | } | |||
578 | retval->num_hw_threads = nproc; | |||
579 | retval->depth = ndepth; | |||
580 | int *arr = | |||
581 | (int *)(bytes + sizeof(kmp_topology_t) + sizeof(kmp_hw_thread_t) * nproc); | |||
582 | retval->types = (kmp_hw_t *)arr; | |||
583 | retval->ratio = arr + (size_t)KMP_HW_LAST; | |||
584 | retval->count = arr + 2 * (size_t)KMP_HW_LAST; | |||
585 | retval->num_core_efficiencies = 0; | |||
586 | retval->num_core_types = 0; | |||
587 | retval->compact = 0; | |||
588 | for (int i = 0; i < KMP_HW_MAX_NUM_CORE_TYPES; ++i) | |||
589 | retval->core_types[i] = KMP_HW_CORE_TYPE_UNKNOWN; | |||
590 | KMP_FOREACH_HW_TYPE(type)for (kmp_hw_t type = (kmp_hw_t)0; type < KMP_HW_LAST; type = (kmp_hw_t)((int)type + 1)) { retval->equivalent[type] = KMP_HW_UNKNOWN; } | |||
591 | for (int i = 0; i < ndepth; ++i) { | |||
592 | retval->types[i] = types[i]; | |||
593 | retval->equivalent[types[i]] = types[i]; | |||
594 | } | |||
595 | return retval; | |||
596 | } | |||
597 | ||||
598 | void kmp_topology_t::deallocate(kmp_topology_t *topology) { | |||
599 | if (topology) | |||
600 | __kmp_free(topology)___kmp_free((topology), "openmp/runtime/src/kmp_affinity.cpp" , 600); | |||
601 | } | |||
602 | ||||
603 | bool kmp_topology_t::check_ids() const { | |||
604 | // Assume ids have been sorted | |||
605 | if (num_hw_threads == 0) | |||
606 | return true; | |||
607 | for (int i = 1; i < num_hw_threads; ++i) { | |||
608 | kmp_hw_thread_t ¤t_thread = hw_threads[i]; | |||
609 | kmp_hw_thread_t &previous_thread = hw_threads[i - 1]; | |||
610 | bool unique = false; | |||
611 | for (int j = 0; j < depth; ++j) { | |||
612 | if (previous_thread.ids[j] != current_thread.ids[j]) { | |||
613 | unique = true; | |||
614 | break; | |||
615 | } | |||
616 | } | |||
617 | if (unique) | |||
618 | continue; | |||
619 | return false; | |||
620 | } | |||
621 | return true; | |||
622 | } | |||
623 | ||||
624 | void kmp_topology_t::dump() const { | |||
625 | printf("***********************\n"); | |||
626 | printf("*** __kmp_topology: ***\n"); | |||
627 | printf("***********************\n"); | |||
628 | printf("* depth: %d\n", depth); | |||
629 | ||||
630 | printf("* types: "); | |||
631 | for (int i = 0; i < depth; ++i) | |||
632 | printf("%15s ", __kmp_hw_get_keyword(types[i])); | |||
633 | printf("\n"); | |||
634 | ||||
635 | printf("* ratio: "); | |||
636 | for (int i = 0; i < depth; ++i) { | |||
637 | printf("%15d ", ratio[i]); | |||
638 | } | |||
639 | printf("\n"); | |||
640 | ||||
641 | printf("* count: "); | |||
642 | for (int i = 0; i < depth; ++i) { | |||
643 | printf("%15d ", count[i]); | |||
644 | } | |||
645 | printf("\n"); | |||
646 | ||||
647 | printf("* num_core_eff: %d\n", num_core_efficiencies); | |||
648 | printf("* num_core_types: %d\n", num_core_types); | |||
649 | printf("* core_types: "); | |||
650 | for (int i = 0; i < num_core_types; ++i) | |||
651 | printf("%3d ", core_types[i]); | |||
652 | printf("\n"); | |||
653 | ||||
654 | printf("* equivalent map:\n"); | |||
655 | KMP_FOREACH_HW_TYPE(i)for (kmp_hw_t i = (kmp_hw_t)0; i < KMP_HW_LAST; i = (kmp_hw_t )((int)i + 1)) { | |||
656 | const char *key = __kmp_hw_get_keyword(i); | |||
657 | const char *value = __kmp_hw_get_keyword(equivalent[i]); | |||
658 | printf("%-15s -> %-15s\n", key, value); | |||
659 | } | |||
660 | ||||
661 | printf("* uniform: %s\n", (is_uniform() ? "Yes" : "No")); | |||
662 | ||||
663 | printf("* num_hw_threads: %d\n", num_hw_threads); | |||
664 | printf("* hw_threads:\n"); | |||
665 | for (int i = 0; i < num_hw_threads; ++i) { | |||
666 | hw_threads[i].print(); | |||
667 | } | |||
668 | printf("***********************\n"); | |||
669 | } | |||
670 | ||||
671 | void kmp_topology_t::print(const char *env_var) const { | |||
672 | kmp_str_buf_t buf; | |||
673 | int print_types_depth; | |||
674 | __kmp_str_buf_init(&buf){ (&buf)->str = (&buf)->bulk; (&buf)->size = sizeof((&buf)->bulk); (&buf)->used = 0; (& buf)->bulk[0] = 0; }; | |||
675 | kmp_hw_t print_types[KMP_HW_LAST + 2]; | |||
676 | ||||
677 | // Num Available Threads | |||
678 | if (num_hw_threads) { | |||
679 | KMP_INFORM(AvailableOSProc, env_var, num_hw_threads)__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_AvailableOSProc , env_var, num_hw_threads), __kmp_msg_null); | |||
680 | } else { | |||
681 | KMP_INFORM(AvailableOSProc, env_var, __kmp_xproc)__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_AvailableOSProc , env_var, __kmp_xproc), __kmp_msg_null); | |||
682 | } | |||
683 | ||||
684 | // Uniform or not | |||
685 | if (is_uniform()) { | |||
686 | KMP_INFORM(Uniform, env_var)__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_Uniform , env_var), __kmp_msg_null); | |||
687 | } else { | |||
688 | KMP_INFORM(NonUniform, env_var)__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_NonUniform , env_var), __kmp_msg_null); | |||
689 | } | |||
690 | ||||
691 | // Equivalent types | |||
692 | KMP_FOREACH_HW_TYPE(type)for (kmp_hw_t type = (kmp_hw_t)0; type < KMP_HW_LAST; type = (kmp_hw_t)((int)type + 1)) { | |||
693 | kmp_hw_t eq_type = equivalent[type]; | |||
694 | if (eq_type != KMP_HW_UNKNOWN && eq_type != type) { | |||
695 | KMP_INFORM(AffEqualTopologyTypes, env_var,__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_AffEqualTopologyTypes , env_var, __kmp_hw_get_catalog_string(type), __kmp_hw_get_catalog_string (eq_type)), __kmp_msg_null) | |||
696 | __kmp_hw_get_catalog_string(type),__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_AffEqualTopologyTypes , env_var, __kmp_hw_get_catalog_string(type), __kmp_hw_get_catalog_string (eq_type)), __kmp_msg_null) | |||
697 | __kmp_hw_get_catalog_string(eq_type))__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_AffEqualTopologyTypes , env_var, __kmp_hw_get_catalog_string(type), __kmp_hw_get_catalog_string (eq_type)), __kmp_msg_null); | |||
698 | } | |||
699 | } | |||
700 | ||||
701 | // Quick topology | |||
702 | KMP_ASSERT(depth > 0 && depth <= (int)KMP_HW_LAST)if (!(depth > 0 && depth <= (int)KMP_HW_LAST)) { __kmp_debug_assert("depth > 0 && depth <= (int)KMP_HW_LAST" , "openmp/runtime/src/kmp_affinity.cpp", 702); }; | |||
703 | // Create a print types array that always guarantees printing | |||
704 | // the core and thread level | |||
705 | print_types_depth = 0; | |||
706 | for (int level = 0; level < depth; ++level) | |||
707 | print_types[print_types_depth++] = types[level]; | |||
708 | if (equivalent[KMP_HW_CORE] != KMP_HW_CORE) { | |||
709 | // Force in the core level for quick topology | |||
710 | if (print_types[print_types_depth - 1] == KMP_HW_THREAD) { | |||
711 | // Force core before thread e.g., 1 socket X 2 threads/socket | |||
712 | // becomes 1 socket X 1 core/socket X 2 threads/socket | |||
713 | print_types[print_types_depth - 1] = KMP_HW_CORE; | |||
714 | print_types[print_types_depth++] = KMP_HW_THREAD; | |||
715 | } else { | |||
716 | print_types[print_types_depth++] = KMP_HW_CORE; | |||
717 | } | |||
718 | } | |||
719 | // Always put threads at very end of quick topology | |||
720 | if (equivalent[KMP_HW_THREAD] != KMP_HW_THREAD) | |||
721 | print_types[print_types_depth++] = KMP_HW_THREAD; | |||
722 | ||||
723 | __kmp_str_buf_clear(&buf); | |||
724 | kmp_hw_t numerator_type; | |||
725 | kmp_hw_t denominator_type = KMP_HW_UNKNOWN; | |||
726 | int core_level = get_level(KMP_HW_CORE); | |||
727 | int ncores = get_count(core_level); | |||
728 | ||||
729 | for (int plevel = 0, level = 0; plevel < print_types_depth; ++plevel) { | |||
730 | int c; | |||
731 | bool plural; | |||
732 | numerator_type = print_types[plevel]; | |||
733 | KMP_ASSERT_VALID_HW_TYPE(numerator_type)if (!(numerator_type >= (kmp_hw_t)0 && numerator_type < KMP_HW_LAST)) { __kmp_debug_assert("numerator_type >= (kmp_hw_t)0 && numerator_type < KMP_HW_LAST" , "openmp/runtime/src/kmp_affinity.cpp", 733); }; | |||
734 | if (equivalent[numerator_type] != numerator_type) | |||
735 | c = 1; | |||
736 | else | |||
737 | c = get_ratio(level++); | |||
738 | plural = (c > 1); | |||
739 | if (plevel == 0) { | |||
740 | __kmp_str_buf_print(&buf, "%d %s", c, | |||
741 | __kmp_hw_get_catalog_string(numerator_type, plural)); | |||
742 | } else { | |||
743 | __kmp_str_buf_print(&buf, " x %d %s/%s", c, | |||
744 | __kmp_hw_get_catalog_string(numerator_type, plural), | |||
745 | __kmp_hw_get_catalog_string(denominator_type)); | |||
746 | } | |||
747 | denominator_type = numerator_type; | |||
748 | } | |||
749 | KMP_INFORM(TopologyGeneric, env_var, buf.str, ncores)__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_TopologyGeneric , env_var, buf.str, ncores), __kmp_msg_null); | |||
750 | ||||
751 | // Hybrid topology information | |||
752 | if (__kmp_is_hybrid_cpu()) { | |||
753 | for (int i = 0; i < num_core_types; ++i) { | |||
754 | kmp_hw_core_type_t core_type = core_types[i]; | |||
755 | kmp_hw_attr_t attr; | |||
756 | attr.clear(); | |||
757 | attr.set_core_type(core_type); | |||
758 | int ncores = get_ncores_with_attr(attr); | |||
759 | if (ncores > 0) { | |||
760 | KMP_INFORM(TopologyHybrid, env_var, ncores,__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_TopologyHybrid , env_var, ncores, __kmp_hw_get_core_type_string(core_type)), __kmp_msg_null) | |||
761 | __kmp_hw_get_core_type_string(core_type))__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_TopologyHybrid , env_var, ncores, __kmp_hw_get_core_type_string(core_type)), __kmp_msg_null); | |||
762 | KMP_ASSERT(num_core_efficiencies <= KMP_HW_MAX_NUM_CORE_EFFS)if (!(num_core_efficiencies <= 8)) { __kmp_debug_assert("num_core_efficiencies <= KMP_HW_MAX_NUM_CORE_EFFS" , "openmp/runtime/src/kmp_affinity.cpp", 762); } | |||
763 | for (int eff = 0; eff < num_core_efficiencies; ++eff) { | |||
764 | attr.set_core_eff(eff); | |||
765 | int ncores_with_eff = get_ncores_with_attr(attr); | |||
766 | if (ncores_with_eff > 0) { | |||
767 | KMP_INFORM(TopologyHybridCoreEff, env_var, ncores_with_eff, eff)__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_TopologyHybridCoreEff , env_var, ncores_with_eff, eff), __kmp_msg_null); | |||
768 | } | |||
769 | } | |||
770 | } | |||
771 | } | |||
772 | } | |||
773 | ||||
774 | if (num_hw_threads <= 0) { | |||
775 | __kmp_str_buf_free(&buf); | |||
776 | return; | |||
777 | } | |||
778 | ||||
779 | // Full OS proc to hardware thread map | |||
780 | KMP_INFORM(OSProcToPhysicalThreadMap, env_var)__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_OSProcToPhysicalThreadMap , env_var), __kmp_msg_null); | |||
781 | for (int i = 0; i < num_hw_threads; i++) { | |||
782 | __kmp_str_buf_clear(&buf); | |||
783 | for (int level = 0; level < depth; ++level) { | |||
784 | kmp_hw_t type = types[level]; | |||
785 | __kmp_str_buf_print(&buf, "%s ", __kmp_hw_get_catalog_string(type)); | |||
786 | __kmp_str_buf_print(&buf, "%d ", hw_threads[i].ids[level]); | |||
787 | } | |||
788 | if (__kmp_is_hybrid_cpu()) | |||
789 | __kmp_str_buf_print( | |||
790 | &buf, "(%s)", | |||
791 | __kmp_hw_get_core_type_string(hw_threads[i].attrs.get_core_type())); | |||
792 | KMP_INFORM(OSProcMapToPack, env_var, hw_threads[i].os_id, buf.str)__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_OSProcMapToPack , env_var, hw_threads[i].os_id, buf.str), __kmp_msg_null); | |||
793 | } | |||
794 | ||||
795 | __kmp_str_buf_free(&buf); | |||
796 | } | |||
797 | ||||
798 | #if KMP_AFFINITY_SUPPORTED1 | |||
799 | void kmp_topology_t::set_granularity(kmp_affinity_t &affinity) const { | |||
800 | const char *env_var = affinity.env_var; | |||
801 | // Set the number of affinity granularity levels | |||
802 | if (affinity.gran_levels < 0) { | |||
803 | kmp_hw_t gran_type = get_equivalent_type(affinity.gran); | |||
804 | // Check if user's granularity request is valid | |||
805 | if (gran_type == KMP_HW_UNKNOWN) { | |||
806 | // First try core, then thread, then package | |||
807 | kmp_hw_t gran_types[3] = {KMP_HW_CORE, KMP_HW_THREAD, KMP_HW_SOCKET}; | |||
808 | for (auto g : gran_types) { | |||
809 | if (get_equivalent_type(g) != KMP_HW_UNKNOWN) { | |||
810 | gran_type = g; | |||
811 | break; | |||
812 | } | |||
813 | } | |||
814 | KMP_ASSERT(gran_type != KMP_HW_UNKNOWN)if (!(gran_type != KMP_HW_UNKNOWN)) { __kmp_debug_assert("gran_type != KMP_HW_UNKNOWN" , "openmp/runtime/src/kmp_affinity.cpp", 814); }; | |||
815 | // Warn user what granularity setting will be used instead | |||
816 | KMP_AFF_WARNING(affinity, AffGranularityBad, env_var,if (affinity.flags.verbose || (affinity.flags.warnings && (affinity.type != affinity_none))) { __kmp_msg(kmp_ms_warning , __kmp_msg_format(kmp_i18n_msg_AffGranularityBad, env_var, __kmp_hw_get_catalog_string (affinity.gran), __kmp_hw_get_catalog_string(gran_type)), __kmp_msg_null ); } | |||
817 | __kmp_hw_get_catalog_string(affinity.gran),if (affinity.flags.verbose || (affinity.flags.warnings && (affinity.type != affinity_none))) { __kmp_msg(kmp_ms_warning , __kmp_msg_format(kmp_i18n_msg_AffGranularityBad, env_var, __kmp_hw_get_catalog_string (affinity.gran), __kmp_hw_get_catalog_string(gran_type)), __kmp_msg_null ); } | |||
818 | __kmp_hw_get_catalog_string(gran_type))if (affinity.flags.verbose || (affinity.flags.warnings && (affinity.type != affinity_none))) { __kmp_msg(kmp_ms_warning , __kmp_msg_format(kmp_i18n_msg_AffGranularityBad, env_var, __kmp_hw_get_catalog_string (affinity.gran), __kmp_hw_get_catalog_string(gran_type)), __kmp_msg_null ); }; | |||
819 | affinity.gran = gran_type; | |||
820 | } | |||
821 | #if KMP_GROUP_AFFINITY0 | |||
822 | // If more than one processor group exists, and the level of | |||
823 | // granularity specified by the user is too coarse, then the | |||
824 | // granularity must be adjusted "down" to processor group affinity | |||
825 | // because threads can only exist within one processor group. | |||
826 | // For example, if a user sets granularity=socket and there are two | |||
827 | // processor groups that cover a socket, then the runtime must | |||
828 | // restrict the granularity down to the processor group level. | |||
829 | if (__kmp_num_proc_groups > 1) { | |||
830 | int gran_depth = get_level(gran_type); | |||
831 | int proc_group_depth = get_level(KMP_HW_PROC_GROUP); | |||
832 | if (gran_depth >= 0 && proc_group_depth >= 0 && | |||
833 | gran_depth < proc_group_depth) { | |||
834 | KMP_AFF_WARNING(affinity, AffGranTooCoarseProcGroup, env_var,if (affinity.flags.verbose || (affinity.flags.warnings && (affinity.type != affinity_none))) { __kmp_msg(kmp_ms_warning , __kmp_msg_format(kmp_i18n_msg_AffGranTooCoarseProcGroup, env_var , __kmp_hw_get_catalog_string(affinity.gran)), __kmp_msg_null ); } | |||
835 | __kmp_hw_get_catalog_string(affinity.gran))if (affinity.flags.verbose || (affinity.flags.warnings && (affinity.type != affinity_none))) { __kmp_msg(kmp_ms_warning , __kmp_msg_format(kmp_i18n_msg_AffGranTooCoarseProcGroup, env_var , __kmp_hw_get_catalog_string(affinity.gran)), __kmp_msg_null ); }; | |||
836 | affinity.gran = gran_type = KMP_HW_PROC_GROUP; | |||
837 | } | |||
838 | } | |||
839 | #endif | |||
840 | affinity.gran_levels = 0; | |||
841 | for (int i = depth - 1; i >= 0 && get_type(i) != gran_type; --i) | |||
842 | affinity.gran_levels++; | |||
843 | } | |||
844 | } | |||
845 | #endif | |||
846 | ||||
847 | void kmp_topology_t::canonicalize() { | |||
848 | #if KMP_GROUP_AFFINITY0 | |||
849 | _insert_windows_proc_groups(); | |||
850 | #endif | |||
851 | _remove_radix1_layers(); | |||
852 | _gather_enumeration_information(); | |||
853 | _discover_uniformity(); | |||
854 | _set_sub_ids(); | |||
855 | _set_globals(); | |||
856 | _set_last_level_cache(); | |||
857 | ||||
858 | #if KMP_MIC_SUPPORTED((0 || 1) && (1 || 0)) | |||
859 | // Manually Add L2 = Tile equivalence | |||
860 | if (__kmp_mic_type == mic3) { | |||
861 | if (get_level(KMP_HW_L2) != -1) | |||
862 | set_equivalent_type(KMP_HW_TILE, KMP_HW_L2); | |||
863 | else if (get_level(KMP_HW_TILE) != -1) | |||
864 | set_equivalent_type(KMP_HW_L2, KMP_HW_TILE); | |||
865 | } | |||
866 | #endif | |||
867 | ||||
868 | // Perform post canonicalization checking | |||
869 | KMP_ASSERT(depth > 0)if (!(depth > 0)) { __kmp_debug_assert("depth > 0", "openmp/runtime/src/kmp_affinity.cpp" , 869); }; | |||
870 | for (int level = 0; level < depth; ++level) { | |||
871 | // All counts, ratios, and types must be valid | |||
872 | KMP_ASSERT(count[level] > 0 && ratio[level] > 0)if (!(count[level] > 0 && ratio[level] > 0)) { __kmp_debug_assert ("count[level] > 0 && ratio[level] > 0", "openmp/runtime/src/kmp_affinity.cpp" , 872); }; | |||
873 | KMP_ASSERT_VALID_HW_TYPE(types[level])if (!(types[level] >= (kmp_hw_t)0 && types[level] < KMP_HW_LAST)) { __kmp_debug_assert("types[level] >= (kmp_hw_t)0 && types[level] < KMP_HW_LAST" , "openmp/runtime/src/kmp_affinity.cpp", 873); }; | |||
874 | // Detected types must point to themselves | |||
875 | KMP_ASSERT(equivalent[types[level]] == types[level])if (!(equivalent[types[level]] == types[level])) { __kmp_debug_assert ("equivalent[types[level]] == types[level]", "openmp/runtime/src/kmp_affinity.cpp" , 875); }; | |||
876 | } | |||
877 | } | |||
878 | ||||
879 | // Canonicalize an explicit packages X cores/pkg X threads/core topology | |||
880 | void kmp_topology_t::canonicalize(int npackages, int ncores_per_pkg, | |||
881 | int nthreads_per_core, int ncores) { | |||
882 | int ndepth = 3; | |||
883 | depth = ndepth; | |||
884 | KMP_FOREACH_HW_TYPE(i)for (kmp_hw_t i = (kmp_hw_t)0; i < KMP_HW_LAST; i = (kmp_hw_t )((int)i + 1)) { equivalent[i] = KMP_HW_UNKNOWN; } | |||
885 | for (int level = 0; level < depth; ++level) { | |||
886 | count[level] = 0; | |||
887 | ratio[level] = 0; | |||
888 | } | |||
889 | count[0] = npackages; | |||
890 | count[1] = ncores; | |||
891 | count[2] = __kmp_xproc; | |||
892 | ratio[0] = npackages; | |||
893 | ratio[1] = ncores_per_pkg; | |||
894 | ratio[2] = nthreads_per_core; | |||
895 | equivalent[KMP_HW_SOCKET] = KMP_HW_SOCKET; | |||
896 | equivalent[KMP_HW_CORE] = KMP_HW_CORE; | |||
897 | equivalent[KMP_HW_THREAD] = KMP_HW_THREAD; | |||
898 | types[0] = KMP_HW_SOCKET; | |||
899 | types[1] = KMP_HW_CORE; | |||
900 | types[2] = KMP_HW_THREAD; | |||
901 | //__kmp_avail_proc = __kmp_xproc; | |||
902 | _discover_uniformity(); | |||
903 | } | |||
904 | ||||
905 | // Represents running sub IDs for a single core attribute where | |||
906 | // attribute values have SIZE possibilities. | |||
907 | template <size_t SIZE, typename IndexFunc> struct kmp_sub_ids_t { | |||
908 | int last_level; // last level in topology to consider for sub_ids | |||
909 | int sub_id[SIZE]; // The sub ID for a given attribute value | |||
910 | int prev_sub_id[KMP_HW_LAST]; | |||
911 | IndexFunc indexer; | |||
912 | ||||
913 | public: | |||
914 | kmp_sub_ids_t(int last_level) : last_level(last_level) { | |||
915 | KMP_ASSERT(last_level < KMP_HW_LAST)if (!(last_level < KMP_HW_LAST)) { __kmp_debug_assert("last_level < KMP_HW_LAST" , "openmp/runtime/src/kmp_affinity.cpp", 915); }; | |||
916 | for (size_t i = 0; i < SIZE; ++i) | |||
917 | sub_id[i] = -1; | |||
918 | for (size_t i = 0; i < KMP_HW_LAST; ++i) | |||
919 | prev_sub_id[i] = -1; | |||
920 | } | |||
921 | void update(const kmp_hw_thread_t &hw_thread) { | |||
922 | int idx = indexer(hw_thread); | |||
923 | KMP_ASSERT(idx < (int)SIZE)if (!(idx < (int)SIZE)) { __kmp_debug_assert("idx < (int)SIZE" , "openmp/runtime/src/kmp_affinity.cpp", 923); }; | |||
924 | for (int level = 0; level <= last_level; ++level) { | |||
925 | if (hw_thread.sub_ids[level] != prev_sub_id[level]) { | |||
926 | if (level < last_level) | |||
927 | sub_id[idx] = -1; | |||
928 | sub_id[idx]++; | |||
929 | break; | |||
930 | } | |||
931 | } | |||
932 | for (int level = 0; level <= last_level; ++level) | |||
933 | prev_sub_id[level] = hw_thread.sub_ids[level]; | |||
934 | } | |||
935 | int get_sub_id(const kmp_hw_thread_t &hw_thread) const { | |||
936 | return sub_id[indexer(hw_thread)]; | |||
937 | } | |||
938 | }; | |||
939 | ||||
940 | static kmp_str_buf_t * | |||
941 | __kmp_hw_get_catalog_core_string(const kmp_hw_attr_t &attr, kmp_str_buf_t *buf, | |||
942 | bool plural) { | |||
943 | __kmp_str_buf_init(buf){ (buf)->str = (buf)->bulk; (buf)->size = sizeof((buf )->bulk); (buf)->used = 0; (buf)->bulk[0] = 0; }; | |||
944 | if (attr.is_core_type_valid()) | |||
945 | __kmp_str_buf_print(buf, "%s %s", | |||
946 | __kmp_hw_get_core_type_string(attr.get_core_type()), | |||
947 | __kmp_hw_get_catalog_string(KMP_HW_CORE, plural)); | |||
948 | else | |||
949 | __kmp_str_buf_print(buf, "%s eff=%d", | |||
950 | __kmp_hw_get_catalog_string(KMP_HW_CORE, plural), | |||
951 | attr.get_core_eff()); | |||
952 | return buf; | |||
953 | } | |||
954 | ||||
955 | // Apply the KMP_HW_SUBSET envirable to the topology | |||
956 | // Returns true if KMP_HW_SUBSET filtered any processors | |||
957 | // otherwise, returns false | |||
958 | bool kmp_topology_t::filter_hw_subset() { | |||
959 | // If KMP_HW_SUBSET wasn't requested, then do nothing. | |||
960 | if (!__kmp_hw_subset) | |||
961 | return false; | |||
962 | ||||
963 | // First, sort the KMP_HW_SUBSET items by the machine topology | |||
964 | __kmp_hw_subset->sort(); | |||
965 | ||||
966 | // Check to see if KMP_HW_SUBSET is a valid subset of the detected topology | |||
967 | bool using_core_types = false; | |||
968 | bool using_core_effs = false; | |||
969 | int hw_subset_depth = __kmp_hw_subset->get_depth(); | |||
970 | kmp_hw_t specified[KMP_HW_LAST]; | |||
971 | int *topology_levels = (int *)KMP_ALLOCA(sizeof(int) * hw_subset_depth)__builtin_alloca (sizeof(int) * hw_subset_depth); | |||
972 | KMP_ASSERT(hw_subset_depth > 0)if (!(hw_subset_depth > 0)) { __kmp_debug_assert("hw_subset_depth > 0" , "openmp/runtime/src/kmp_affinity.cpp", 972); }; | |||
973 | KMP_FOREACH_HW_TYPE(i)for (kmp_hw_t i = (kmp_hw_t)0; i < KMP_HW_LAST; i = (kmp_hw_t )((int)i + 1)) { specified[i] = KMP_HW_UNKNOWN; } | |||
974 | int core_level = get_level(KMP_HW_CORE); | |||
975 | for (int i = 0; i < hw_subset_depth; ++i) { | |||
976 | int max_count; | |||
977 | const kmp_hw_subset_t::item_t &item = __kmp_hw_subset->at(i); | |||
978 | int num = item.num[0]; | |||
979 | int offset = item.offset[0]; | |||
980 | kmp_hw_t type = item.type; | |||
981 | kmp_hw_t equivalent_type = equivalent[type]; | |||
982 | int level = get_level(type); | |||
983 | topology_levels[i] = level; | |||
984 | ||||
985 | // Check to see if current layer is in detected machine topology | |||
986 | if (equivalent_type != KMP_HW_UNKNOWN) { | |||
987 | __kmp_hw_subset->at(i).type = equivalent_type; | |||
988 | } else { | |||
989 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetNotExistGeneric,if (__kmp_affinity.flags.verbose || (__kmp_affinity.flags.warnings && (__kmp_affinity.type != affinity_none))) { __kmp_msg (kmp_ms_warning, __kmp_msg_format(kmp_i18n_msg_AffHWSubsetNotExistGeneric , __kmp_hw_get_catalog_string(type)), __kmp_msg_null); } | |||
990 | __kmp_hw_get_catalog_string(type))if (__kmp_affinity.flags.verbose || (__kmp_affinity.flags.warnings && (__kmp_affinity.type != affinity_none))) { __kmp_msg (kmp_ms_warning, __kmp_msg_format(kmp_i18n_msg_AffHWSubsetNotExistGeneric , __kmp_hw_get_catalog_string(type)), __kmp_msg_null); }; | |||
991 | return false; | |||
992 | } | |||
993 | ||||
994 | // Check to see if current layer has already been | |||
995 | // specified either directly or through an equivalent type | |||
996 | if (specified[equivalent_type] != KMP_HW_UNKNOWN) { | |||
997 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetEqvLayers,if (__kmp_affinity.flags.verbose || (__kmp_affinity.flags.warnings && (__kmp_affinity.type != affinity_none))) { __kmp_msg (kmp_ms_warning, __kmp_msg_format(kmp_i18n_msg_AffHWSubsetEqvLayers , __kmp_hw_get_catalog_string(type), __kmp_hw_get_catalog_string (specified[equivalent_type])), __kmp_msg_null); } | |||
998 | __kmp_hw_get_catalog_string(type),if (__kmp_affinity.flags.verbose || (__kmp_affinity.flags.warnings && (__kmp_affinity.type != affinity_none))) { __kmp_msg (kmp_ms_warning, __kmp_msg_format(kmp_i18n_msg_AffHWSubsetEqvLayers , __kmp_hw_get_catalog_string(type), __kmp_hw_get_catalog_string (specified[equivalent_type])), __kmp_msg_null); } | |||
999 | __kmp_hw_get_catalog_string(specified[equivalent_type]))if (__kmp_affinity.flags.verbose || (__kmp_affinity.flags.warnings && (__kmp_affinity.type != affinity_none))) { __kmp_msg (kmp_ms_warning, __kmp_msg_format(kmp_i18n_msg_AffHWSubsetEqvLayers , __kmp_hw_get_catalog_string(type), __kmp_hw_get_catalog_string (specified[equivalent_type])), __kmp_msg_null); }; | |||
1000 | return false; | |||
1001 | } | |||
1002 | specified[equivalent_type] = type; | |||
1003 | ||||
1004 | // Check to see if each layer's num & offset parameters are valid | |||
1005 | max_count = get_ratio(level); | |||
1006 | if (max_count < 0 || | |||
1007 | (num != kmp_hw_subset_t::USE_ALL && num + offset > max_count)) { | |||
1008 | bool plural = (num > 1); | |||
1009 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetManyGeneric,if (__kmp_affinity.flags.verbose || (__kmp_affinity.flags.warnings && (__kmp_affinity.type != affinity_none))) { __kmp_msg (kmp_ms_warning, __kmp_msg_format(kmp_i18n_msg_AffHWSubsetManyGeneric , __kmp_hw_get_catalog_string(type, plural)), __kmp_msg_null) ; } | |||
1010 | __kmp_hw_get_catalog_string(type, plural))if (__kmp_affinity.flags.verbose || (__kmp_affinity.flags.warnings && (__kmp_affinity.type != affinity_none))) { __kmp_msg (kmp_ms_warning, __kmp_msg_format(kmp_i18n_msg_AffHWSubsetManyGeneric , __kmp_hw_get_catalog_string(type, plural)), __kmp_msg_null) ; }; | |||
1011 | return false; | |||
1012 | } | |||
1013 | ||||
1014 | // Check to see if core attributes are consistent | |||
1015 | if (core_level == level) { | |||
1016 | // Determine which core attributes are specified | |||
1017 | for (int j = 0; j < item.num_attrs; ++j) { | |||
1018 | if (item.attr[j].is_core_type_valid()) | |||
1019 | using_core_types = true; | |||
1020 | if (item.attr[j].is_core_eff_valid()) | |||
1021 | using_core_effs = true; | |||
1022 | } | |||
1023 | ||||
1024 | // Check if using a single core attribute on non-hybrid arch. | |||
1025 | // Do not ignore all of KMP_HW_SUBSET, just ignore the attribute. | |||
1026 | // | |||
1027 | // Check if using multiple core attributes on non-hyrbid arch. | |||
1028 | // Ignore all of KMP_HW_SUBSET if this is the case. | |||
1029 | if ((using_core_effs || using_core_types) && !__kmp_is_hybrid_cpu()) { | |||
1030 | if (item.num_attrs == 1) { | |||
1031 | if (using_core_effs) { | |||
1032 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIgnoringAttr,if (__kmp_affinity.flags.verbose || (__kmp_affinity.flags.warnings && (__kmp_affinity.type != affinity_none))) { __kmp_msg (kmp_ms_warning, __kmp_msg_format(kmp_i18n_msg_AffHWSubsetIgnoringAttr , "efficiency"), __kmp_msg_null); } | |||
1033 | "efficiency")if (__kmp_affinity.flags.verbose || (__kmp_affinity.flags.warnings && (__kmp_affinity.type != affinity_none))) { __kmp_msg (kmp_ms_warning, __kmp_msg_format(kmp_i18n_msg_AffHWSubsetIgnoringAttr , "efficiency"), __kmp_msg_null); }; | |||
1034 | } else { | |||
1035 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIgnoringAttr,if (__kmp_affinity.flags.verbose || (__kmp_affinity.flags.warnings && (__kmp_affinity.type != affinity_none))) { __kmp_msg (kmp_ms_warning, __kmp_msg_format(kmp_i18n_msg_AffHWSubsetIgnoringAttr , "core_type"), __kmp_msg_null); } | |||
1036 | "core_type")if (__kmp_affinity.flags.verbose || (__kmp_affinity.flags.warnings && (__kmp_affinity.type != affinity_none))) { __kmp_msg (kmp_ms_warning, __kmp_msg_format(kmp_i18n_msg_AffHWSubsetIgnoringAttr , "core_type"), __kmp_msg_null); }; | |||
1037 | } | |||
1038 | using_core_effs = false; | |||
1039 | using_core_types = false; | |||
1040 | } else { | |||
1041 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetAttrsNonHybrid)if (__kmp_affinity.flags.verbose || (__kmp_affinity.flags.warnings && (__kmp_affinity.type != affinity_none))) { __kmp_msg (kmp_ms_warning, __kmp_msg_format(kmp_i18n_msg_AffHWSubsetAttrsNonHybrid ), __kmp_msg_null); }; | |||
1042 | return false; | |||
1043 | } | |||
1044 | } | |||
1045 | ||||
1046 | // Check if using both core types and core efficiencies together | |||
1047 | if (using_core_types && using_core_effs) { | |||
1048 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIncompat, "core_type",if (__kmp_affinity.flags.verbose || (__kmp_affinity.flags.warnings && (__kmp_affinity.type != affinity_none))) { __kmp_msg (kmp_ms_warning, __kmp_msg_format(kmp_i18n_msg_AffHWSubsetIncompat , "core_type", "efficiency"), __kmp_msg_null); } | |||
1049 | "efficiency")if (__kmp_affinity.flags.verbose || (__kmp_affinity.flags.warnings && (__kmp_affinity.type != affinity_none))) { __kmp_msg (kmp_ms_warning, __kmp_msg_format(kmp_i18n_msg_AffHWSubsetIncompat , "core_type", "efficiency"), __kmp_msg_null); }; | |||
1050 | return false; | |||
1051 | } | |||
1052 | ||||
1053 | // Check that core efficiency values are valid | |||
1054 | if (using_core_effs) { | |||
1055 | for (int j = 0; j < item.num_attrs; ++j) { | |||
1056 | if (item.attr[j].is_core_eff_valid()) { | |||
1057 | int core_eff = item.attr[j].get_core_eff(); | |||
1058 | if (core_eff < 0 || core_eff >= num_core_efficiencies) { | |||
1059 | kmp_str_buf_t buf; | |||
1060 | __kmp_str_buf_init(&buf){ (&buf)->str = (&buf)->bulk; (&buf)->size = sizeof((&buf)->bulk); (&buf)->used = 0; (& buf)->bulk[0] = 0; }; | |||
1061 | __kmp_str_buf_print(&buf, "%d", item.attr[j].get_core_eff()); | |||
1062 | __kmp_msg(kmp_ms_warning, | |||
1063 | KMP_MSG(AffHWSubsetAttrInvalid, "efficiency", buf.str)__kmp_msg_format(kmp_i18n_msg_AffHWSubsetAttrInvalid, "efficiency" , buf.str), | |||
1064 | KMP_HNT(ValidValuesRange, 0, num_core_efficiencies - 1)__kmp_msg_format(kmp_i18n_hnt_ValidValuesRange, 0, num_core_efficiencies - 1), | |||
1065 | __kmp_msg_null); | |||
1066 | __kmp_str_buf_free(&buf); | |||
1067 | return false; | |||
1068 | } | |||
1069 | } | |||
1070 | } | |||
1071 | } | |||
1072 | ||||
1073 | // Check that the number of requested cores with attributes is valid | |||
1074 | if (using_core_types || using_core_effs) { | |||
1075 | for (int j = 0; j < item.num_attrs; ++j) { | |||
1076 | int num = item.num[j]; | |||
1077 | int offset = item.offset[j]; | |||
1078 | int level_above = core_level - 1; | |||
1079 | if (level_above >= 0) { | |||
1080 | max_count = get_ncores_with_attr_per(item.attr[j], level_above); | |||
1081 | if (max_count <= 0 || | |||
1082 | (num != kmp_hw_subset_t::USE_ALL && num + offset > max_count)) { | |||
1083 | kmp_str_buf_t buf; | |||
1084 | __kmp_hw_get_catalog_core_string(item.attr[j], &buf, num > 0); | |||
1085 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetManyGeneric, buf.str)if (__kmp_affinity.flags.verbose || (__kmp_affinity.flags.warnings && (__kmp_affinity.type != affinity_none))) { __kmp_msg (kmp_ms_warning, __kmp_msg_format(kmp_i18n_msg_AffHWSubsetManyGeneric , buf.str), __kmp_msg_null); }; | |||
1086 | __kmp_str_buf_free(&buf); | |||
1087 | return false; | |||
1088 | } | |||
1089 | } | |||
1090 | } | |||
1091 | } | |||
1092 | ||||
1093 | if ((using_core_types || using_core_effs) && item.num_attrs > 1) { | |||
1094 | for (int j = 0; j < item.num_attrs; ++j) { | |||
1095 | // Ambiguous use of specific core attribute + generic core | |||
1096 | // e.g., 4c & 3c:intel_core or 4c & 3c:eff1 | |||
1097 | if (!item.attr[j]) { | |||
1098 | kmp_hw_attr_t other_attr; | |||
1099 | for (int k = 0; k < item.num_attrs; ++k) { | |||
1100 | if (item.attr[k] != item.attr[j]) { | |||
1101 | other_attr = item.attr[k]; | |||
1102 | break; | |||
1103 | } | |||
1104 | } | |||
1105 | kmp_str_buf_t buf; | |||
1106 | __kmp_hw_get_catalog_core_string(other_attr, &buf, item.num[j] > 0); | |||
1107 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIncompat,if (__kmp_affinity.flags.verbose || (__kmp_affinity.flags.warnings && (__kmp_affinity.type != affinity_none))) { __kmp_msg (kmp_ms_warning, __kmp_msg_format(kmp_i18n_msg_AffHWSubsetIncompat , __kmp_hw_get_catalog_string(KMP_HW_CORE), buf.str), __kmp_msg_null ); } | |||
1108 | __kmp_hw_get_catalog_string(KMP_HW_CORE), buf.str)if (__kmp_affinity.flags.verbose || (__kmp_affinity.flags.warnings && (__kmp_affinity.type != affinity_none))) { __kmp_msg (kmp_ms_warning, __kmp_msg_format(kmp_i18n_msg_AffHWSubsetIncompat , __kmp_hw_get_catalog_string(KMP_HW_CORE), buf.str), __kmp_msg_null ); }; | |||
1109 | __kmp_str_buf_free(&buf); | |||
1110 | return false; | |||
1111 | } | |||
1112 | // Allow specifying a specific core type or core eff exactly once | |||
1113 | for (int k = 0; k < j; ++k) { | |||
1114 | if (!item.attr[j] || !item.attr[k]) | |||
1115 | continue; | |||
1116 | if (item.attr[k] == item.attr[j]) { | |||
1117 | kmp_str_buf_t buf; | |||
1118 | __kmp_hw_get_catalog_core_string(item.attr[j], &buf, | |||
1119 | item.num[j] > 0); | |||
1120 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetAttrRepeat, buf.str)if (__kmp_affinity.flags.verbose || (__kmp_affinity.flags.warnings && (__kmp_affinity.type != affinity_none))) { __kmp_msg (kmp_ms_warning, __kmp_msg_format(kmp_i18n_msg_AffHWSubsetAttrRepeat , buf.str), __kmp_msg_null); }; | |||
1121 | __kmp_str_buf_free(&buf); | |||
1122 | return false; | |||
1123 | } | |||
1124 | } | |||
1125 | } | |||
1126 | } | |||
1127 | } | |||
1128 | } | |||
1129 | ||||
1130 | struct core_type_indexer { | |||
1131 | int operator()(const kmp_hw_thread_t &t) const { | |||
1132 | switch (t.attrs.get_core_type()) { | |||
1133 | #if KMP_ARCH_X860 || KMP_ARCH_X86_641 | |||
1134 | case KMP_HW_CORE_TYPE_ATOM: | |||
1135 | return 1; | |||
1136 | case KMP_HW_CORE_TYPE_CORE: | |||
1137 | return 2; | |||
1138 | #endif | |||
1139 | case KMP_HW_CORE_TYPE_UNKNOWN: | |||
1140 | return 0; | |||
1141 | } | |||
1142 | KMP_ASSERT(0)if (!(0)) { __kmp_debug_assert("0", "openmp/runtime/src/kmp_affinity.cpp" , 1142); }; | |||
1143 | return 0; | |||
1144 | } | |||
1145 | }; | |||
1146 | struct core_eff_indexer { | |||
1147 | int operator()(const kmp_hw_thread_t &t) const { | |||
1148 | return t.attrs.get_core_eff(); | |||
1149 | } | |||
1150 | }; | |||
1151 | ||||
1152 | kmp_sub_ids_t<KMP_HW_MAX_NUM_CORE_TYPES, core_type_indexer> core_type_sub_ids( | |||
1153 | core_level); | |||
1154 | kmp_sub_ids_t<KMP_HW_MAX_NUM_CORE_EFFS8, core_eff_indexer> core_eff_sub_ids( | |||
1155 | core_level); | |||
1156 | ||||
1157 | // Determine which hardware threads should be filtered. | |||
1158 | int num_filtered = 0; | |||
1159 | bool *filtered = (bool *)__kmp_allocate(sizeof(bool) * num_hw_threads)___kmp_allocate((sizeof(bool) * num_hw_threads), "openmp/runtime/src/kmp_affinity.cpp" , 1159); | |||
1160 | for (int i = 0; i < num_hw_threads; ++i) { | |||
1161 | kmp_hw_thread_t &hw_thread = hw_threads[i]; | |||
1162 | // Update type_sub_id | |||
1163 | if (using_core_types) | |||
1164 | core_type_sub_ids.update(hw_thread); | |||
1165 | if (using_core_effs) | |||
1166 | core_eff_sub_ids.update(hw_thread); | |||
1167 | ||||
1168 | // Check to see if this hardware thread should be filtered | |||
1169 | bool should_be_filtered = false; | |||
1170 | for (int hw_subset_index = 0; hw_subset_index < hw_subset_depth; | |||
1171 | ++hw_subset_index) { | |||
1172 | const auto &hw_subset_item = __kmp_hw_subset->at(hw_subset_index); | |||
1173 | int level = topology_levels[hw_subset_index]; | |||
1174 | if (level == -1) | |||
1175 | continue; | |||
1176 | if ((using_core_effs || using_core_types) && level == core_level) { | |||
1177 | // Look for the core attribute in KMP_HW_SUBSET which corresponds | |||
1178 | // to this hardware thread's core attribute. Use this num,offset plus | |||
1179 | // the running sub_id for the particular core attribute of this hardware | |||
1180 | // thread to determine if the hardware thread should be filtered or not. | |||
1181 | int attr_idx; | |||
1182 | kmp_hw_core_type_t core_type = hw_thread.attrs.get_core_type(); | |||
1183 | int core_eff = hw_thread.attrs.get_core_eff(); | |||
1184 | for (attr_idx = 0; attr_idx < hw_subset_item.num_attrs; ++attr_idx) { | |||
1185 | if (using_core_types && | |||
1186 | hw_subset_item.attr[attr_idx].get_core_type() == core_type) | |||
1187 | break; | |||
1188 | if (using_core_effs && | |||
1189 | hw_subset_item.attr[attr_idx].get_core_eff() == core_eff) | |||
1190 | break; | |||
1191 | } | |||
1192 | // This core attribute isn't in the KMP_HW_SUBSET so always filter it. | |||
1193 | if (attr_idx == hw_subset_item.num_attrs) { | |||
1194 | should_be_filtered = true; | |||
1195 | break; | |||
1196 | } | |||
1197 | int sub_id; | |||
1198 | int num = hw_subset_item.num[attr_idx]; | |||
1199 | int offset = hw_subset_item.offset[attr_idx]; | |||
1200 | if (using_core_types) | |||
1201 | sub_id = core_type_sub_ids.get_sub_id(hw_thread); | |||
1202 | else | |||
1203 | sub_id = core_eff_sub_ids.get_sub_id(hw_thread); | |||
1204 | if (sub_id < offset || | |||
1205 | (num != kmp_hw_subset_t::USE_ALL && sub_id >= offset + num)) { | |||
1206 | should_be_filtered = true; | |||
1207 | break; | |||
1208 | } | |||
1209 | } else { | |||
1210 | int num = hw_subset_item.num[0]; | |||
1211 | int offset = hw_subset_item.offset[0]; | |||
1212 | if (hw_thread.sub_ids[level] < offset || | |||
1213 | (num != kmp_hw_subset_t::USE_ALL && | |||
1214 | hw_thread.sub_ids[level] >= offset + num)) { | |||
1215 | should_be_filtered = true; | |||
1216 | break; | |||
1217 | } | |||
1218 | } | |||
1219 | } | |||
1220 | // Collect filtering information | |||
1221 | filtered[i] = should_be_filtered; | |||
1222 | if (should_be_filtered) | |||
1223 | num_filtered++; | |||
1224 | } | |||
1225 | ||||
1226 | // One last check that we shouldn't allow filtering entire machine | |||
1227 | if (num_filtered == num_hw_threads) { | |||
1228 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetAllFiltered)if (__kmp_affinity.flags.verbose || (__kmp_affinity.flags.warnings && (__kmp_affinity.type != affinity_none))) { __kmp_msg (kmp_ms_warning, __kmp_msg_format(kmp_i18n_msg_AffHWSubsetAllFiltered ), __kmp_msg_null); }; | |||
1229 | __kmp_free(filtered)___kmp_free((filtered), "openmp/runtime/src/kmp_affinity.cpp" , 1229); | |||
1230 | return false; | |||
1231 | } | |||
1232 | ||||
1233 | // Apply the filter | |||
1234 | int new_index = 0; | |||
1235 | for (int i = 0; i < num_hw_threads; ++i) { | |||
1236 | if (!filtered[i]) { | |||
1237 | if (i != new_index) | |||
1238 | hw_threads[new_index] = hw_threads[i]; | |||
1239 | new_index++; | |||
1240 | } else { | |||
1241 | #if KMP_AFFINITY_SUPPORTED1 | |||
1242 | KMP_CPU_CLR(hw_threads[i].os_id, __kmp_affin_fullMask)(__kmp_affin_fullMask)->clear(hw_threads[i].os_id); | |||
1243 | #endif | |||
1244 | __kmp_avail_proc--; | |||
1245 | } | |||
1246 | } | |||
1247 | ||||
1248 | KMP_DEBUG_ASSERT(new_index <= num_hw_threads)if (!(new_index <= num_hw_threads)) { __kmp_debug_assert("new_index <= num_hw_threads" , "openmp/runtime/src/kmp_affinity.cpp", 1248); }; | |||
1249 | num_hw_threads = new_index; | |||
1250 | ||||
1251 | // Post hardware subset canonicalization | |||
1252 | _gather_enumeration_information(); | |||
1253 | _discover_uniformity(); | |||
1254 | _set_globals(); | |||
1255 | _set_last_level_cache(); | |||
1256 | __kmp_free(filtered)___kmp_free((filtered), "openmp/runtime/src/kmp_affinity.cpp" , 1256); | |||
1257 | return true; | |||
1258 | } | |||
1259 | ||||
1260 | bool kmp_topology_t::is_close(int hwt1, int hwt2, int hw_level) const { | |||
1261 | if (hw_level >= depth) | |||
1262 | return true; | |||
1263 | bool retval = true; | |||
1264 | const kmp_hw_thread_t &t1 = hw_threads[hwt1]; | |||
1265 | const kmp_hw_thread_t &t2 = hw_threads[hwt2]; | |||
1266 | for (int i = 0; i < (depth - hw_level); ++i) { | |||
1267 | if (t1.ids[i] != t2.ids[i]) | |||
1268 | return false; | |||
1269 | } | |||
1270 | return retval; | |||
1271 | } | |||
1272 | ||||
1273 | //////////////////////////////////////////////////////////////////////////////// | |||
1274 | ||||
1275 | #if KMP_AFFINITY_SUPPORTED1 | |||
1276 | class kmp_affinity_raii_t { | |||
1277 | kmp_affin_mask_t *mask; | |||
1278 | bool restored; | |||
1279 | ||||
1280 | public: | |||
1281 | kmp_affinity_raii_t() : restored(false) { | |||
1282 | KMP_CPU_ALLOC(mask)(mask = __kmp_affinity_dispatch->allocate_mask()); | |||
1283 | KMP_ASSERT(mask != NULL)if (!(mask != __null)) { __kmp_debug_assert("mask != NULL", "openmp/runtime/src/kmp_affinity.cpp" , 1283); }; | |||
1284 | __kmp_get_system_affinity(mask, TRUE)(mask)->get_system_affinity((!0)); | |||
1285 | } | |||
1286 | void restore() { | |||
1287 | __kmp_set_system_affinity(mask, TRUE)(mask)->set_system_affinity((!0)); | |||
1288 | KMP_CPU_FREE(mask)__kmp_affinity_dispatch->deallocate_mask(mask); | |||
1289 | restored = true; | |||
1290 | } | |||
1291 | ~kmp_affinity_raii_t() { | |||
1292 | if (!restored) { | |||
1293 | __kmp_set_system_affinity(mask, TRUE)(mask)->set_system_affinity((!0)); | |||
1294 | KMP_CPU_FREE(mask)__kmp_affinity_dispatch->deallocate_mask(mask); | |||
1295 | } | |||
1296 | } | |||
1297 | }; | |||
1298 | ||||
1299 | bool KMPAffinity::picked_api = false; | |||
1300 | ||||
1301 | void *KMPAffinity::Mask::operator new(size_t n) { return __kmp_allocate(n)___kmp_allocate((n), "openmp/runtime/src/kmp_affinity.cpp", 1301 ); } | |||
1302 | void *KMPAffinity::Mask::operator new[](size_t n) { return __kmp_allocate(n)___kmp_allocate((n), "openmp/runtime/src/kmp_affinity.cpp", 1302 ); } | |||
1303 | void KMPAffinity::Mask::operator delete(void *p) { __kmp_free(p)___kmp_free((p), "openmp/runtime/src/kmp_affinity.cpp", 1303); } | |||
1304 | void KMPAffinity::Mask::operator delete[](void *p) { __kmp_free(p)___kmp_free((p), "openmp/runtime/src/kmp_affinity.cpp", 1304); } | |||
1305 | void *KMPAffinity::operator new(size_t n) { return __kmp_allocate(n)___kmp_allocate((n), "openmp/runtime/src/kmp_affinity.cpp", 1305 ); } | |||
1306 | void KMPAffinity::operator delete(void *p) { __kmp_free(p)___kmp_free((p), "openmp/runtime/src/kmp_affinity.cpp", 1306); } | |||
1307 | ||||
1308 | void KMPAffinity::pick_api() { | |||
1309 | KMPAffinity *affinity_dispatch; | |||
1310 | if (picked_api) | |||
1311 | return; | |||
1312 | #if KMP_USE_HWLOC0 | |||
1313 | // Only use Hwloc if affinity isn't explicitly disabled and | |||
1314 | // user requests Hwloc topology method | |||
1315 | if (__kmp_affinity_top_method == affinity_top_method_hwloc && | |||
1316 | __kmp_affinity.type != affinity_disabled) { | |||
1317 | affinity_dispatch = new KMPHwlocAffinity(); | |||
1318 | } else | |||
1319 | #endif | |||
1320 | { | |||
1321 | affinity_dispatch = new KMPNativeAffinity(); | |||
1322 | } | |||
1323 | __kmp_affinity_dispatch = affinity_dispatch; | |||
1324 | picked_api = true; | |||
1325 | } | |||
1326 | ||||
1327 | void KMPAffinity::destroy_api() { | |||
1328 | if (__kmp_affinity_dispatch != NULL__null) { | |||
1329 | delete __kmp_affinity_dispatch; | |||
1330 | __kmp_affinity_dispatch = NULL__null; | |||
1331 | picked_api = false; | |||
1332 | } | |||
1333 | } | |||
1334 | ||||
1335 | #define KMP_ADVANCE_SCAN(scan) \ | |||
1336 | while (*scan != '\0') { \ | |||
1337 | scan++; \ | |||
1338 | } | |||
1339 | ||||
1340 | // Print the affinity mask to the character array in a pretty format. | |||
1341 | // The format is a comma separated list of non-negative integers or integer | |||
1342 | // ranges: e.g., 1,2,3-5,7,9-15 | |||
1343 | // The format can also be the string "{<empty>}" if no bits are set in mask | |||
1344 | char *__kmp_affinity_print_mask(char *buf, int buf_len, | |||
1345 | kmp_affin_mask_t *mask) { | |||
1346 | int start = 0, finish = 0, previous = 0; | |||
1347 | bool first_range; | |||
1348 | KMP_ASSERT(buf)if (!(buf)) { __kmp_debug_assert("buf", "openmp/runtime/src/kmp_affinity.cpp" , 1348); }; | |||
1349 | KMP_ASSERT(buf_len >= 40)if (!(buf_len >= 40)) { __kmp_debug_assert("buf_len >= 40" , "openmp/runtime/src/kmp_affinity.cpp", 1349); }; | |||
1350 | KMP_ASSERT(mask)if (!(mask)) { __kmp_debug_assert("mask", "openmp/runtime/src/kmp_affinity.cpp" , 1350); }; | |||
1351 | char *scan = buf; | |||
1352 | char *end = buf + buf_len - 1; | |||
1353 | ||||
1354 | // Check for empty set. | |||
1355 | if (mask->begin() == mask->end()) { | |||
1356 | KMP_SNPRINTFsnprintf(scan, end - scan + 1, "{<empty>}"); | |||
1357 | KMP_ADVANCE_SCAN(scan); | |||
1358 | KMP_ASSERT(scan <= end)if (!(scan <= end)) { __kmp_debug_assert("scan <= end", "openmp/runtime/src/kmp_affinity.cpp", 1358); }; | |||
1359 | return buf; | |||
1360 | } | |||
1361 | ||||
1362 | first_range = true; | |||
1363 | start = mask->begin(); | |||
1364 | while (1) { | |||
1365 | // Find next range | |||
1366 | // [start, previous] is inclusive range of contiguous bits in mask | |||
1367 | for (finish = mask->next(start), previous = start; | |||
1368 | finish == previous + 1 && finish != mask->end(); | |||
1369 | finish = mask->next(finish)) { | |||
1370 | previous = finish; | |||
1371 | } | |||
1372 | ||||
1373 | // The first range does not need a comma printed before it, but the rest | |||
1374 | // of the ranges do need a comma beforehand | |||
1375 | if (!first_range) { | |||
1376 | KMP_SNPRINTFsnprintf(scan, end - scan + 1, "%s", ","); | |||
1377 | KMP_ADVANCE_SCAN(scan); | |||
1378 | } else { | |||
1379 | first_range = false; | |||
1380 | } | |||
1381 | // Range with three or more contiguous bits in the affinity mask | |||
1382 | if (previous - start > 1) { | |||
1383 | KMP_SNPRINTFsnprintf(scan, end - scan + 1, "%u-%u", start, previous); | |||
1384 | } else { | |||
1385 | // Range with one or two contiguous bits in the affinity mask | |||
1386 | KMP_SNPRINTFsnprintf(scan, end - scan + 1, "%u", start); | |||
1387 | KMP_ADVANCE_SCAN(scan); | |||
1388 | if (previous - start > 0) { | |||
1389 | KMP_SNPRINTFsnprintf(scan, end - scan + 1, ",%u", previous); | |||
1390 | } | |||
1391 | } | |||
1392 | KMP_ADVANCE_SCAN(scan); | |||
1393 | // Start over with new start point | |||
1394 | start = finish; | |||
1395 | if (start == mask->end()) | |||
1396 | break; | |||
1397 | // Check for overflow | |||
1398 | if (end - scan < 2) | |||
1399 | break; | |||
1400 | } | |||
1401 | ||||
1402 | // Check for overflow | |||
1403 | KMP_ASSERT(scan <= end)if (!(scan <= end)) { __kmp_debug_assert("scan <= end", "openmp/runtime/src/kmp_affinity.cpp", 1403); }; | |||
1404 | return buf; | |||
1405 | } | |||
1406 | #undef KMP_ADVANCE_SCAN | |||
1407 | ||||
1408 | // Print the affinity mask to the string buffer object in a pretty format | |||
1409 | // The format is a comma separated list of non-negative integers or integer | |||
1410 | // ranges: e.g., 1,2,3-5,7,9-15 | |||
1411 | // The format can also be the string "{<empty>}" if no bits are set in mask | |||
1412 | kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf, | |||
1413 | kmp_affin_mask_t *mask) { | |||
1414 | int start = 0, finish = 0, previous = 0; | |||
1415 | bool first_range; | |||
1416 | KMP_ASSERT(buf)if (!(buf)) { __kmp_debug_assert("buf", "openmp/runtime/src/kmp_affinity.cpp" , 1416); }; | |||
1417 | KMP_ASSERT(mask)if (!(mask)) { __kmp_debug_assert("mask", "openmp/runtime/src/kmp_affinity.cpp" , 1417); }; | |||
1418 | ||||
1419 | __kmp_str_buf_clear(buf); | |||
1420 | ||||
1421 | // Check for empty set. | |||
1422 | if (mask->begin() == mask->end()) { | |||
1423 | __kmp_str_buf_print(buf, "%s", "{<empty>}"); | |||
1424 | return buf; | |||
1425 | } | |||
1426 | ||||
1427 | first_range = true; | |||
1428 | start = mask->begin(); | |||
1429 | while (1) { | |||
1430 | // Find next range | |||
1431 | // [start, previous] is inclusive range of contiguous bits in mask | |||
1432 | for (finish = mask->next(start), previous = start; | |||
1433 | finish == previous + 1 && finish != mask->end(); | |||
1434 | finish = mask->next(finish)) { | |||
1435 | previous = finish; | |||
1436 | } | |||
1437 | ||||
1438 | // The first range does not need a comma printed before it, but the rest | |||
1439 | // of the ranges do need a comma beforehand | |||
1440 | if (!first_range) { | |||
1441 | __kmp_str_buf_print(buf, "%s", ","); | |||
1442 | } else { | |||
1443 | first_range = false; | |||
1444 | } | |||
1445 | // Range with three or more contiguous bits in the affinity mask | |||
1446 | if (previous - start > 1) { | |||
1447 | __kmp_str_buf_print(buf, "%u-%u", start, previous); | |||
1448 | } else { | |||
1449 | // Range with one or two contiguous bits in the affinity mask | |||
1450 | __kmp_str_buf_print(buf, "%u", start); | |||
1451 | if (previous - start > 0) { | |||
1452 | __kmp_str_buf_print(buf, ",%u", previous); | |||
1453 | } | |||
1454 | } | |||
1455 | // Start over with new start point | |||
1456 | start = finish; | |||
1457 | if (start == mask->end()) | |||
1458 | break; | |||
1459 | } | |||
1460 | return buf; | |||
1461 | } | |||
1462 | ||||
1463 | // Return (possibly empty) affinity mask representing the offline CPUs | |||
1464 | // Caller must free the mask | |||
1465 | kmp_affin_mask_t *__kmp_affinity_get_offline_cpus() { | |||
1466 | kmp_affin_mask_t *offline; | |||
1467 | KMP_CPU_ALLOC(offline)(offline = __kmp_affinity_dispatch->allocate_mask()); | |||
1468 | KMP_CPU_ZERO(offline)(offline)->zero(); | |||
1469 | #if KMP_OS_LINUX1 | |||
1470 | int n, begin_cpu, end_cpu; | |||
1471 | kmp_safe_raii_file_t offline_file; | |||
1472 | auto skip_ws = [](FILE *f) { | |||
1473 | int c; | |||
1474 | do { | |||
1475 | c = fgetc(f); | |||
1476 | } while (isspace(c)); | |||
1477 | if (c != EOF(-1)) | |||
1478 | ungetc(c, f); | |||
1479 | }; | |||
1480 | // File contains CSV of integer ranges representing the offline CPUs | |||
1481 | // e.g., 1,2,4-7,9,11-15 | |||
1482 | int status = offline_file.try_open("/sys/devices/system/cpu/offline", "r"); | |||
1483 | if (status != 0) | |||
1484 | return offline; | |||
1485 | while (!feof(offline_file)) { | |||
1486 | skip_ws(offline_file); | |||
1487 | n = fscanf(offline_file, "%d", &begin_cpu); | |||
1488 | if (n != 1) | |||
1489 | break; | |||
1490 | skip_ws(offline_file); | |||
1491 | int c = fgetc(offline_file); | |||
1492 | if (c == EOF(-1) || c == ',') { | |||
1493 | // Just single CPU | |||
1494 | end_cpu = begin_cpu; | |||
1495 | } else if (c == '-') { | |||
1496 | // Range of CPUs | |||
1497 | skip_ws(offline_file); | |||
1498 | n = fscanf(offline_file, "%d", &end_cpu); | |||
1499 | if (n != 1) | |||
1500 | break; | |||
1501 | skip_ws(offline_file); | |||
1502 | c = fgetc(offline_file); // skip ',' | |||
1503 | } else { | |||
1504 | // Syntax problem | |||
1505 | break; | |||
1506 | } | |||
1507 | // Ensure a valid range of CPUs | |||
1508 | if (begin_cpu < 0 || begin_cpu >= __kmp_xproc || end_cpu < 0 || | |||
1509 | end_cpu >= __kmp_xproc || begin_cpu > end_cpu) { | |||
1510 | continue; | |||
1511 | } | |||
1512 | // Insert [begin_cpu, end_cpu] into offline mask | |||
1513 | for (int cpu = begin_cpu; cpu <= end_cpu; ++cpu) { | |||
1514 | KMP_CPU_SET(cpu, offline)(offline)->set(cpu); | |||
1515 | } | |||
1516 | } | |||
1517 | #endif | |||
1518 | return offline; | |||
1519 | } | |||
1520 | ||||
1521 | // Return the number of available procs | |||
1522 | int __kmp_affinity_entire_machine_mask(kmp_affin_mask_t *mask) { | |||
1523 | int avail_proc = 0; | |||
1524 | KMP_CPU_ZERO(mask)(mask)->zero(); | |||
1525 | ||||
1526 | #if KMP_GROUP_AFFINITY0 | |||
1527 | ||||
1528 | if (__kmp_num_proc_groups > 1) { | |||
1529 | int group; | |||
1530 | KMP_DEBUG_ASSERT(__kmp_GetActiveProcessorCount != NULL)if (!(__kmp_GetActiveProcessorCount != __null)) { __kmp_debug_assert ("__kmp_GetActiveProcessorCount != __null", "openmp/runtime/src/kmp_affinity.cpp" , 1530); }; | |||
1531 | for (group = 0; group < __kmp_num_proc_groups; group++) { | |||
1532 | int i; | |||
1533 | int num = __kmp_GetActiveProcessorCount(group); | |||
1534 | for (i = 0; i < num; i++) { | |||
1535 | KMP_CPU_SET(i + group * (CHAR_BIT * sizeof(DWORD_PTR)), mask)(mask)->set(i + group * (8 * sizeof(DWORD_PTR))); | |||
1536 | avail_proc++; | |||
1537 | } | |||
1538 | } | |||
1539 | } else | |||
1540 | ||||
1541 | #endif /* KMP_GROUP_AFFINITY */ | |||
1542 | ||||
1543 | { | |||
1544 | int proc; | |||
1545 | kmp_affin_mask_t *offline_cpus = __kmp_affinity_get_offline_cpus(); | |||
1546 | for (proc = 0; proc < __kmp_xproc; proc++) { | |||
1547 | // Skip offline CPUs | |||
1548 | if (KMP_CPU_ISSET(proc, offline_cpus)(offline_cpus)->is_set(proc)) | |||
1549 | continue; | |||
1550 | KMP_CPU_SET(proc, mask)(mask)->set(proc); | |||
1551 | avail_proc++; | |||
1552 | } | |||
1553 | KMP_CPU_FREE(offline_cpus)__kmp_affinity_dispatch->deallocate_mask(offline_cpus); | |||
1554 | } | |||
1555 | ||||
1556 | return avail_proc; | |||
1557 | } | |||
1558 | ||||
1559 | // All of the __kmp_affinity_create_*_map() routines should allocate the | |||
1560 | // internal topology object and set the layer ids for it. Each routine | |||
1561 | // returns a boolean on whether it was successful at doing so. | |||
1562 | kmp_affin_mask_t *__kmp_affin_fullMask = NULL__null; | |||
1563 | // Original mask is a subset of full mask in multiple processor groups topology | |||
1564 | kmp_affin_mask_t *__kmp_affin_origMask = NULL__null; | |||
1565 | ||||
1566 | #if KMP_USE_HWLOC0 | |||
1567 | static inline bool __kmp_hwloc_is_cache_type(hwloc_obj_t obj) { | |||
1568 | #if HWLOC_API_VERSION >= 0x00020000 | |||
1569 | return hwloc_obj_type_is_cache(obj->type); | |||
1570 | #else | |||
1571 | return obj->type == HWLOC_OBJ_CACHE; | |||
1572 | #endif | |||
1573 | } | |||
1574 | ||||
1575 | // Returns KMP_HW_* type derived from HWLOC_* type | |||
1576 | static inline kmp_hw_t __kmp_hwloc_type_2_topology_type(hwloc_obj_t obj) { | |||
1577 | ||||
1578 | if (__kmp_hwloc_is_cache_type(obj)) { | |||
1579 | if (obj->attr->cache.type == HWLOC_OBJ_CACHE_INSTRUCTION) | |||
1580 | return KMP_HW_UNKNOWN; | |||
1581 | switch (obj->attr->cache.depth) { | |||
1582 | case 1: | |||
1583 | return KMP_HW_L1; | |||
1584 | case 2: | |||
1585 | #if KMP_MIC_SUPPORTED((0 || 1) && (1 || 0)) | |||
1586 | if (__kmp_mic_type == mic3) { | |||
1587 | return KMP_HW_TILE; | |||
1588 | } | |||
1589 | #endif | |||
1590 | return KMP_HW_L2; | |||
1591 | case 3: | |||
1592 | return KMP_HW_L3; | |||
1593 | } | |||
1594 | return KMP_HW_UNKNOWN; | |||
1595 | } | |||
1596 | ||||
1597 | switch (obj->type) { | |||
1598 | case HWLOC_OBJ_PACKAGE: | |||
1599 | return KMP_HW_SOCKET; | |||
1600 | case HWLOC_OBJ_NUMANODE: | |||
1601 | return KMP_HW_NUMA; | |||
1602 | case HWLOC_OBJ_CORE: | |||
1603 | return KMP_HW_CORE; | |||
1604 | case HWLOC_OBJ_PU: | |||
1605 | return KMP_HW_THREAD; | |||
1606 | case HWLOC_OBJ_GROUP: | |||
1607 | #if HWLOC_API_VERSION >= 0x00020000 | |||
1608 | if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_DIE) | |||
1609 | return KMP_HW_DIE; | |||
1610 | else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_TILE) | |||
1611 | return KMP_HW_TILE; | |||
1612 | else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_MODULE) | |||
1613 | return KMP_HW_MODULE; | |||
1614 | else if (obj->attr->group.kind == HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP) | |||
1615 | return KMP_HW_PROC_GROUP; | |||
1616 | #endif | |||
1617 | return KMP_HW_UNKNOWN; | |||
1618 | #if HWLOC_API_VERSION >= 0x00020100 | |||
1619 | case HWLOC_OBJ_DIE: | |||
1620 | return KMP_HW_DIE; | |||
1621 | #endif | |||
1622 | } | |||
1623 | return KMP_HW_UNKNOWN; | |||
1624 | } | |||
1625 | ||||
1626 | // Returns the number of objects of type 'type' below 'obj' within the topology | |||
1627 | // tree structure. e.g., if obj is a HWLOC_OBJ_PACKAGE object, and type is | |||
1628 | // HWLOC_OBJ_PU, then this will return the number of PU's under the SOCKET | |||
1629 | // object. | |||
1630 | static int __kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj, | |||
1631 | hwloc_obj_type_t type) { | |||
1632 | int retval = 0; | |||
1633 | hwloc_obj_t first; | |||
1634 | for (first = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, obj->type, | |||
1635 | obj->logical_index, type, 0); | |||
1636 | first != NULL__null && hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology, | |||
1637 | obj->type, first) == obj; | |||
1638 | first = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, first->type, | |||
1639 | first)) { | |||
1640 | ++retval; | |||
1641 | } | |||
1642 | return retval; | |||
1643 | } | |||
1644 | ||||
1645 | // This gets the sub_id for a lower object under a higher object in the | |||
1646 | // topology tree | |||
1647 | static int __kmp_hwloc_get_sub_id(hwloc_topology_t t, hwloc_obj_t higher, | |||
1648 | hwloc_obj_t lower) { | |||
1649 | hwloc_obj_t obj; | |||
1650 | hwloc_obj_type_t ltype = lower->type; | |||
1651 | int lindex = lower->logical_index - 1; | |||
1652 | int sub_id = 0; | |||
1653 | // Get the previous lower object | |||
1654 | obj = hwloc_get_obj_by_type(t, ltype, lindex); | |||
1655 | while (obj && lindex >= 0 && | |||
1656 | hwloc_bitmap_isincluded(obj->cpuset, higher->cpuset)) { | |||
1657 | if (obj->userdata) { | |||
1658 | sub_id = (int)(RCAST(kmp_intptr_t, obj->userdata)reinterpret_cast<kmp_intptr_t>(obj->userdata)); | |||
1659 | break; | |||
1660 | } | |||
1661 | sub_id++; | |||
1662 | lindex--; | |||
1663 | obj = hwloc_get_obj_by_type(t, ltype, lindex); | |||
1664 | } | |||
1665 | // store sub_id + 1 so that 0 is differed from NULL | |||
1666 | lower->userdata = RCAST(void *, sub_id + 1)reinterpret_cast<void *>(sub_id + 1); | |||
1667 | return sub_id; | |||
1668 | } | |||
1669 | ||||
1670 | static bool __kmp_affinity_create_hwloc_map(kmp_i18n_id_t *const msg_id) { | |||
1671 | kmp_hw_t type; | |||
1672 | int hw_thread_index, sub_id; | |||
1673 | int depth; | |||
1674 | hwloc_obj_t pu, obj, root, prev; | |||
1675 | kmp_hw_t types[KMP_HW_LAST]; | |||
1676 | hwloc_obj_type_t hwloc_types[KMP_HW_LAST]; | |||
1677 | ||||
1678 | hwloc_topology_t tp = __kmp_hwloc_topology; | |||
1679 | *msg_id = kmp_i18n_null; | |||
1680 | if (__kmp_affinity.flags.verbose) { | |||
1681 | KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY")__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_AffUsingHwloc , "KMP_AFFINITY"), __kmp_msg_null); | |||
1682 | } | |||
1683 | ||||
1684 | if (!KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0)) { | |||
1685 | // Hack to try and infer the machine topology using only the data | |||
1686 | // available from hwloc on the current thread, and __kmp_xproc. | |||
1687 | KMP_ASSERT(__kmp_affinity.type == affinity_none)if (!(__kmp_affinity.type == affinity_none)) { __kmp_debug_assert ("__kmp_affinity.type == affinity_none", "openmp/runtime/src/kmp_affinity.cpp" , 1687); }; | |||
1688 | // hwloc only guarantees existance of PU object, so check PACKAGE and CORE | |||
1689 | hwloc_obj_t o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PACKAGE, 0); | |||
1690 | if (o != NULL__null) | |||
1691 | nCoresPerPkg = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_CORE); | |||
1692 | else | |||
1693 | nCoresPerPkg = 1; // no PACKAGE found | |||
1694 | o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_CORE, 0); | |||
1695 | if (o != NULL__null) | |||
1696 | __kmp_nThreadsPerCore = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_PU); | |||
1697 | else | |||
1698 | __kmp_nThreadsPerCore = 1; // no CORE found | |||
1699 | __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore; | |||
1700 | if (nCoresPerPkg == 0) | |||
1701 | nCoresPerPkg = 1; // to prevent possible division by 0 | |||
1702 | nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg; | |||
1703 | return true; | |||
1704 | } | |||
1705 | ||||
1706 | #if HWLOC_API_VERSION >= 0x00020400 | |||
1707 | // Handle multiple types of cores if they exist on the system | |||
1708 | int nr_cpu_kinds = hwloc_cpukinds_get_nr(tp, 0); | |||
1709 | ||||
1710 | typedef struct kmp_hwloc_cpukinds_info_t { | |||
1711 | int efficiency; | |||
1712 | kmp_hw_core_type_t core_type; | |||
1713 | hwloc_bitmap_t mask; | |||
1714 | } kmp_hwloc_cpukinds_info_t; | |||
1715 | kmp_hwloc_cpukinds_info_t *cpukinds = nullptr; | |||
1716 | ||||
1717 | if (nr_cpu_kinds > 0) { | |||
1718 | unsigned nr_infos; | |||
1719 | struct hwloc_info_s *infos; | |||
1720 | cpukinds = (kmp_hwloc_cpukinds_info_t *)__kmp_allocate(___kmp_allocate((sizeof(kmp_hwloc_cpukinds_info_t) * nr_cpu_kinds ), "openmp/runtime/src/kmp_affinity.cpp", 1721) | |||
1721 | sizeof(kmp_hwloc_cpukinds_info_t) * nr_cpu_kinds)___kmp_allocate((sizeof(kmp_hwloc_cpukinds_info_t) * nr_cpu_kinds ), "openmp/runtime/src/kmp_affinity.cpp", 1721); | |||
1722 | for (unsigned idx = 0; idx < (unsigned)nr_cpu_kinds; ++idx) { | |||
1723 | cpukinds[idx].efficiency = -1; | |||
1724 | cpukinds[idx].core_type = KMP_HW_CORE_TYPE_UNKNOWN; | |||
1725 | cpukinds[idx].mask = hwloc_bitmap_alloc(); | |||
1726 | if (hwloc_cpukinds_get_info(tp, idx, cpukinds[idx].mask, | |||
1727 | &cpukinds[idx].efficiency, &nr_infos, &infos, | |||
1728 | 0) == 0) { | |||
1729 | for (unsigned i = 0; i < nr_infos; ++i) { | |||
1730 | if (__kmp_str_match("CoreType", 8, infos[i].name)) { | |||
1731 | #if KMP_ARCH_X860 || KMP_ARCH_X86_641 | |||
1732 | if (__kmp_str_match("IntelAtom", 9, infos[i].value)) { | |||
1733 | cpukinds[idx].core_type = KMP_HW_CORE_TYPE_ATOM; | |||
1734 | break; | |||
1735 | } else if (__kmp_str_match("IntelCore", 9, infos[i].value)) { | |||
1736 | cpukinds[idx].core_type = KMP_HW_CORE_TYPE_CORE; | |||
1737 | break; | |||
1738 | } | |||
1739 | #endif | |||
1740 | } | |||
1741 | } | |||
1742 | } | |||
1743 | } | |||
1744 | } | |||
1745 | #endif | |||
1746 | ||||
1747 | root = hwloc_get_root_obj(tp); | |||
1748 | ||||
1749 | // Figure out the depth and types in the topology | |||
1750 | depth = 0; | |||
1751 | pu = hwloc_get_pu_obj_by_os_index(tp, __kmp_affin_fullMask->begin()); | |||
1752 | KMP_ASSERT(pu)if (!(pu)) { __kmp_debug_assert("pu", "openmp/runtime/src/kmp_affinity.cpp" , 1752); }; | |||
1753 | obj = pu; | |||
1754 | types[depth] = KMP_HW_THREAD; | |||
1755 | hwloc_types[depth] = obj->type; | |||
1756 | depth++; | |||
1757 | while (obj != root && obj != NULL__null) { | |||
1758 | obj = obj->parent; | |||
1759 | #if HWLOC_API_VERSION >= 0x00020000 | |||
1760 | if (obj->memory_arity) { | |||
1761 | hwloc_obj_t memory; | |||
1762 | for (memory = obj->memory_first_child; memory; | |||
1763 | memory = hwloc_get_next_child(tp, obj, memory)) { | |||
1764 | if (memory->type == HWLOC_OBJ_NUMANODE) | |||
1765 | break; | |||
1766 | } | |||
1767 | if (memory && memory->type == HWLOC_OBJ_NUMANODE) { | |||
1768 | types[depth] = KMP_HW_NUMA; | |||
1769 | hwloc_types[depth] = memory->type; | |||
1770 | depth++; | |||
1771 | } | |||
1772 | } | |||
1773 | #endif | |||
1774 | type = __kmp_hwloc_type_2_topology_type(obj); | |||
1775 | if (type != KMP_HW_UNKNOWN) { | |||
1776 | types[depth] = type; | |||
1777 | hwloc_types[depth] = obj->type; | |||
1778 | depth++; | |||
1779 | } | |||
1780 | } | |||
1781 | KMP_ASSERT(depth > 0)if (!(depth > 0)) { __kmp_debug_assert("depth > 0", "openmp/runtime/src/kmp_affinity.cpp" , 1781); }; | |||
1782 | ||||
1783 | // Get the order for the types correct | |||
1784 | for (int i = 0, j = depth - 1; i < j; ++i, --j) { | |||
1785 | hwloc_obj_type_t hwloc_temp = hwloc_types[i]; | |||
1786 | kmp_hw_t temp = types[i]; | |||
1787 | types[i] = types[j]; | |||
1788 | types[j] = temp; | |||
1789 | hwloc_types[i] = hwloc_types[j]; | |||
1790 | hwloc_types[j] = hwloc_temp; | |||
1791 | } | |||
1792 | ||||
1793 | // Allocate the data structure to be returned. | |||
1794 | __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types); | |||
1795 | ||||
1796 | hw_thread_index = 0; | |||
1797 | pu = NULL__null; | |||
1798 | while ((pu = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, pu))) { | |||
1799 | int index = depth - 1; | |||
1800 | bool included = KMP_CPU_ISSET(pu->os_index, __kmp_affin_fullMask)(__kmp_affin_fullMask)->is_set(pu->os_index); | |||
1801 | kmp_hw_thread_t &hw_thread = __kmp_topology->at(hw_thread_index); | |||
1802 | if (included) { | |||
1803 | hw_thread.clear(); | |||
1804 | hw_thread.ids[index] = pu->logical_index; | |||
1805 | hw_thread.os_id = pu->os_index; | |||
1806 | // If multiple core types, then set that attribute for the hardware thread | |||
1807 | #if HWLOC_API_VERSION >= 0x00020400 | |||
1808 | if (cpukinds) { | |||
1809 | int cpukind_index = -1; | |||
1810 | for (int i = 0; i < nr_cpu_kinds; ++i) { | |||
1811 | if (hwloc_bitmap_isset(cpukinds[i].mask, hw_thread.os_id)) { | |||
1812 | cpukind_index = i; | |||
1813 | break; | |||
1814 | } | |||
1815 | } | |||
1816 | if (cpukind_index >= 0) { | |||
1817 | hw_thread.attrs.set_core_type(cpukinds[cpukind_index].core_type); | |||
1818 | hw_thread.attrs.set_core_eff(cpukinds[cpukind_index].efficiency); | |||
1819 | } | |||
1820 | } | |||
1821 | #endif | |||
1822 | index--; | |||
1823 | } | |||
1824 | obj = pu; | |||
1825 | prev = obj; | |||
1826 | while (obj != root && obj != NULL__null) { | |||
1827 | obj = obj->parent; | |||
1828 | #if HWLOC_API_VERSION >= 0x00020000 | |||
1829 | // NUMA Nodes are handled differently since they are not within the | |||
1830 | // parent/child structure anymore. They are separate children | |||
1831 | // of obj (memory_first_child points to first memory child) | |||
1832 | if (obj->memory_arity) { | |||
1833 | hwloc_obj_t memory; | |||
1834 | for (memory = obj->memory_first_child; memory; | |||
1835 | memory = hwloc_get_next_child(tp, obj, memory)) { | |||
1836 | if (memory->type == HWLOC_OBJ_NUMANODE) | |||
1837 | break; | |||
1838 | } | |||
1839 | if (memory && memory->type == HWLOC_OBJ_NUMANODE) { | |||
1840 | sub_id = __kmp_hwloc_get_sub_id(tp, memory, prev); | |||
1841 | if (included) { | |||
1842 | hw_thread.ids[index] = memory->logical_index; | |||
1843 | hw_thread.ids[index + 1] = sub_id; | |||
1844 | index--; | |||
1845 | } | |||
1846 | prev = memory; | |||
1847 | } | |||
1848 | prev = obj; | |||
1849 | } | |||
1850 | #endif | |||
1851 | type = __kmp_hwloc_type_2_topology_type(obj); | |||
1852 | if (type != KMP_HW_UNKNOWN) { | |||
1853 | sub_id = __kmp_hwloc_get_sub_id(tp, obj, prev); | |||
1854 | if (included) { | |||
1855 | hw_thread.ids[index] = obj->logical_index; | |||
1856 | hw_thread.ids[index + 1] = sub_id; | |||
1857 | index--; | |||
1858 | } | |||
1859 | prev = obj; | |||
1860 | } | |||
1861 | } | |||
1862 | if (included) | |||
1863 | hw_thread_index++; | |||
1864 | } | |||
1865 | ||||
1866 | #if HWLOC_API_VERSION >= 0x00020400 | |||
1867 | // Free the core types information | |||
1868 | if (cpukinds) { | |||
1869 | for (int idx = 0; idx < nr_cpu_kinds; ++idx) | |||
1870 | hwloc_bitmap_free(cpukinds[idx].mask); | |||
1871 | __kmp_free(cpukinds)___kmp_free((cpukinds), "openmp/runtime/src/kmp_affinity.cpp" , 1871); | |||
1872 | } | |||
1873 | #endif | |||
1874 | __kmp_topology->sort_ids(); | |||
1875 | return true; | |||
1876 | } | |||
1877 | #endif // KMP_USE_HWLOC | |||
1878 | ||||
1879 | // If we don't know how to retrieve the machine's processor topology, or | |||
1880 | // encounter an error in doing so, this routine is called to form a "flat" | |||
1881 | // mapping of os thread id's <-> processor id's. | |||
1882 | static bool __kmp_affinity_create_flat_map(kmp_i18n_id_t *const msg_id) { | |||
1883 | *msg_id = kmp_i18n_null; | |||
1884 | int depth = 3; | |||
1885 | kmp_hw_t types[] = {KMP_HW_SOCKET, KMP_HW_CORE, KMP_HW_THREAD}; | |||
1886 | ||||
1887 | if (__kmp_affinity.flags.verbose) { | |||
1888 | KMP_INFORM(UsingFlatOS, "KMP_AFFINITY")__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_UsingFlatOS , "KMP_AFFINITY"), __kmp_msg_null); | |||
1889 | } | |||
1890 | ||||
1891 | // Even if __kmp_affinity.type == affinity_none, this routine might still | |||
1892 | // be called to set __kmp_ncores, as well as | |||
1893 | // __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages. | |||
1894 | if (!KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0)) { | |||
1895 | KMP_ASSERT(__kmp_affinity.type == affinity_none)if (!(__kmp_affinity.type == affinity_none)) { __kmp_debug_assert ("__kmp_affinity.type == affinity_none", "openmp/runtime/src/kmp_affinity.cpp" , 1895); }; | |||
1896 | __kmp_ncores = nPackages = __kmp_xproc; | |||
1897 | __kmp_nThreadsPerCore = nCoresPerPkg = 1; | |||
1898 | return true; | |||
1899 | } | |||
1900 | ||||
1901 | // When affinity is off, this routine will still be called to set | |||
1902 | // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages. | |||
1903 | // Make sure all these vars are set correctly, and return now if affinity is | |||
1904 | // not enabled. | |||
1905 | __kmp_ncores = nPackages = __kmp_avail_proc; | |||
1906 | __kmp_nThreadsPerCore = nCoresPerPkg = 1; | |||
1907 | ||||
1908 | // Construct the data structure to be returned. | |||
1909 | __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types); | |||
1910 | int avail_ct = 0; | |||
1911 | int i; | |||
1912 | KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask)for (i = (__kmp_affin_fullMask)->begin(); (int)i != (__kmp_affin_fullMask )->end(); i = (__kmp_affin_fullMask)->next(i)) { | |||
1913 | // Skip this proc if it is not included in the machine model. | |||
1914 | if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)(__kmp_affin_fullMask)->is_set(i)) { | |||
1915 | continue; | |||
1916 | } | |||
1917 | kmp_hw_thread_t &hw_thread = __kmp_topology->at(avail_ct); | |||
1918 | hw_thread.clear(); | |||
1919 | hw_thread.os_id = i; | |||
1920 | hw_thread.ids[0] = i; | |||
1921 | hw_thread.ids[1] = 0; | |||
1922 | hw_thread.ids[2] = 0; | |||
1923 | avail_ct++; | |||
1924 | } | |||
1925 | if (__kmp_affinity.flags.verbose) { | |||
1926 | KMP_INFORM(OSProcToPackage, "KMP_AFFINITY")__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_OSProcToPackage , "KMP_AFFINITY"), __kmp_msg_null); | |||
1927 | } | |||
1928 | return true; | |||
1929 | } | |||
1930 | ||||
1931 | #if KMP_GROUP_AFFINITY0 | |||
1932 | // If multiple Windows* OS processor groups exist, we can create a 2-level | |||
1933 | // topology map with the groups at level 0 and the individual procs at level 1. | |||
1934 | // This facilitates letting the threads float among all procs in a group, | |||
1935 | // if granularity=group (the default when there are multiple groups). | |||
1936 | static bool __kmp_affinity_create_proc_group_map(kmp_i18n_id_t *const msg_id) { | |||
1937 | *msg_id = kmp_i18n_null; | |||
1938 | int depth = 3; | |||
1939 | kmp_hw_t types[] = {KMP_HW_PROC_GROUP, KMP_HW_CORE, KMP_HW_THREAD}; | |||
1940 | const static size_t BITS_PER_GROUP = CHAR_BIT8 * sizeof(DWORD_PTR); | |||
1941 | ||||
1942 | if (__kmp_affinity.flags.verbose) { | |||
1943 | KMP_INFORM(AffWindowsProcGroupMap, "KMP_AFFINITY")__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_AffWindowsProcGroupMap , "KMP_AFFINITY"), __kmp_msg_null); | |||
1944 | } | |||
1945 | ||||
1946 | // If we aren't affinity capable, then use flat topology | |||
1947 | if (!KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0)) { | |||
1948 | KMP_ASSERT(__kmp_affinity.type == affinity_none)if (!(__kmp_affinity.type == affinity_none)) { __kmp_debug_assert ("__kmp_affinity.type == affinity_none", "openmp/runtime/src/kmp_affinity.cpp" , 1948); }; | |||
1949 | nPackages = __kmp_num_proc_groups; | |||
1950 | __kmp_nThreadsPerCore = 1; | |||
1951 | __kmp_ncores = __kmp_xproc; | |||
1952 | nCoresPerPkg = nPackages / __kmp_ncores; | |||
1953 | return true; | |||
1954 | } | |||
1955 | ||||
1956 | // Construct the data structure to be returned. | |||
1957 | __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types); | |||
1958 | int avail_ct = 0; | |||
1959 | int i; | |||
1960 | KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask)for (i = (__kmp_affin_fullMask)->begin(); (int)i != (__kmp_affin_fullMask )->end(); i = (__kmp_affin_fullMask)->next(i)) { | |||
1961 | // Skip this proc if it is not included in the machine model. | |||
1962 | if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)(__kmp_affin_fullMask)->is_set(i)) { | |||
1963 | continue; | |||
1964 | } | |||
1965 | kmp_hw_thread_t &hw_thread = __kmp_topology->at(avail_ct++); | |||
1966 | hw_thread.clear(); | |||
1967 | hw_thread.os_id = i; | |||
1968 | hw_thread.ids[0] = i / BITS_PER_GROUP; | |||
1969 | hw_thread.ids[1] = hw_thread.ids[2] = i % BITS_PER_GROUP; | |||
1970 | } | |||
1971 | return true; | |||
1972 | } | |||
1973 | #endif /* KMP_GROUP_AFFINITY */ | |||
1974 | ||||
1975 | #if KMP_ARCH_X860 || KMP_ARCH_X86_641 | |||
1976 | ||||
1977 | template <kmp_uint32 LSB, kmp_uint32 MSB> | |||
1978 | static inline unsigned __kmp_extract_bits(kmp_uint32 v) { | |||
1979 | const kmp_uint32 SHIFT_LEFT = sizeof(kmp_uint32) * 8 - 1 - MSB; | |||
1980 | const kmp_uint32 SHIFT_RIGHT = LSB; | |||
1981 | kmp_uint32 retval = v; | |||
1982 | retval <<= SHIFT_LEFT; | |||
1983 | retval >>= (SHIFT_LEFT + SHIFT_RIGHT); | |||
1984 | return retval; | |||
1985 | } | |||
1986 | ||||
1987 | static int __kmp_cpuid_mask_width(int count) { | |||
1988 | int r = 0; | |||
1989 | ||||
1990 | while ((1 << r) < count) | |||
1991 | ++r; | |||
1992 | return r; | |||
1993 | } | |||
1994 | ||||
1995 | class apicThreadInfo { | |||
1996 | public: | |||
1997 | unsigned osId; // param to __kmp_affinity_bind_thread | |||
1998 | unsigned apicId; // from cpuid after binding | |||
1999 | unsigned maxCoresPerPkg; // "" | |||
2000 | unsigned maxThreadsPerPkg; // "" | |||
2001 | unsigned pkgId; // inferred from above values | |||
2002 | unsigned coreId; // "" | |||
2003 | unsigned threadId; // "" | |||
2004 | }; | |||
2005 | ||||
2006 | static int __kmp_affinity_cmp_apicThreadInfo_phys_id(const void *a, | |||
2007 | const void *b) { | |||
2008 | const apicThreadInfo *aa = (const apicThreadInfo *)a; | |||
2009 | const apicThreadInfo *bb = (const apicThreadInfo *)b; | |||
2010 | if (aa->pkgId < bb->pkgId) | |||
2011 | return -1; | |||
2012 | if (aa->pkgId > bb->pkgId) | |||
2013 | return 1; | |||
2014 | if (aa->coreId < bb->coreId) | |||
2015 | return -1; | |||
2016 | if (aa->coreId > bb->coreId) | |||
2017 | return 1; | |||
2018 | if (aa->threadId < bb->threadId) | |||
2019 | return -1; | |||
2020 | if (aa->threadId > bb->threadId) | |||
2021 | return 1; | |||
2022 | return 0; | |||
2023 | } | |||
2024 | ||||
2025 | class kmp_cache_info_t { | |||
2026 | public: | |||
2027 | struct info_t { | |||
2028 | unsigned level, mask; | |||
2029 | }; | |||
2030 | kmp_cache_info_t() : depth(0) { get_leaf4_levels(); } | |||
2031 | size_t get_depth() const { return depth; } | |||
2032 | info_t &operator[](size_t index) { return table[index]; } | |||
2033 | const info_t &operator[](size_t index) const { return table[index]; } | |||
2034 | ||||
2035 | static kmp_hw_t get_topology_type(unsigned level) { | |||
2036 | KMP_DEBUG_ASSERT(level >= 1 && level <= MAX_CACHE_LEVEL)if (!(level >= 1 && level <= MAX_CACHE_LEVEL)) { __kmp_debug_assert("level >= 1 && level <= MAX_CACHE_LEVEL" , "openmp/runtime/src/kmp_affinity.cpp", 2036); }; | |||
2037 | switch (level) { | |||
2038 | case 1: | |||
2039 | return KMP_HW_L1; | |||
2040 | case 2: | |||
2041 | return KMP_HW_L2; | |||
2042 | case 3: | |||
2043 | return KMP_HW_L3; | |||
2044 | } | |||
2045 | return KMP_HW_UNKNOWN; | |||
2046 | } | |||
2047 | ||||
2048 | private: | |||
2049 | static const int MAX_CACHE_LEVEL = 3; | |||
2050 | ||||
2051 | size_t depth; | |||
2052 | info_t table[MAX_CACHE_LEVEL]; | |||
2053 | ||||
2054 | void get_leaf4_levels() { | |||
2055 | unsigned level = 0; | |||
2056 | while (depth < MAX_CACHE_LEVEL) { | |||
2057 | unsigned cache_type, max_threads_sharing; | |||
2058 | unsigned cache_level, cache_mask_width; | |||
2059 | kmp_cpuid buf2; | |||
2060 | __kmp_x86_cpuid(4, level, &buf2); | |||
2061 | cache_type = __kmp_extract_bits<0, 4>(buf2.eax); | |||
2062 | if (!cache_type) | |||
2063 | break; | |||
2064 | // Skip instruction caches | |||
2065 | if (cache_type == 2) { | |||
2066 | level++; | |||
2067 | continue; | |||
2068 | } | |||
2069 | max_threads_sharing = __kmp_extract_bits<14, 25>(buf2.eax) + 1; | |||
2070 | cache_mask_width = __kmp_cpuid_mask_width(max_threads_sharing); | |||
2071 | cache_level = __kmp_extract_bits<5, 7>(buf2.eax); | |||
2072 | table[depth].level = cache_level; | |||
2073 | table[depth].mask = ((-1) << cache_mask_width); | |||
2074 | depth++; | |||
2075 | level++; | |||
2076 | } | |||
2077 | } | |||
2078 | }; | |||
2079 | ||||
2080 | // On IA-32 architecture and Intel(R) 64 architecture, we attempt to use | |||
2081 | // an algorithm which cycles through the available os threads, setting | |||
2082 | // the current thread's affinity mask to that thread, and then retrieves | |||
2083 | // the Apic Id for each thread context using the cpuid instruction. | |||
2084 | static bool __kmp_affinity_create_apicid_map(kmp_i18n_id_t *const msg_id) { | |||
2085 | kmp_cpuid buf; | |||
2086 | *msg_id = kmp_i18n_null; | |||
2087 | ||||
2088 | if (__kmp_affinity.flags.verbose) { | |||
2089 | KMP_INFORM(AffInfoStr, "KMP_AFFINITY", KMP_I18N_STR(DecodingLegacyAPIC))__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_AffInfoStr , "KMP_AFFINITY", __kmp_i18n_catgets(kmp_i18n_str_DecodingLegacyAPIC )), __kmp_msg_null); | |||
2090 | } | |||
2091 | ||||
2092 | // Check if cpuid leaf 4 is supported. | |||
2093 | __kmp_x86_cpuid(0, 0, &buf); | |||
2094 | if (buf.eax < 4) { | |||
2095 | *msg_id = kmp_i18n_str_NoLeaf4Support; | |||
2096 | return false; | |||
2097 | } | |||
2098 | ||||
2099 | // The algorithm used starts by setting the affinity to each available thread | |||
2100 | // and retrieving info from the cpuid instruction, so if we are not capable of | |||
2101 | // calling __kmp_get_system_affinity() and _kmp_get_system_affinity(), then we | |||
2102 | // need to do something else - use the defaults that we calculated from | |||
2103 | // issuing cpuid without binding to each proc. | |||
2104 | if (!KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0)) { | |||
2105 | // Hack to try and infer the machine topology using only the data | |||
2106 | // available from cpuid on the current thread, and __kmp_xproc. | |||
2107 | KMP_ASSERT(__kmp_affinity.type == affinity_none)if (!(__kmp_affinity.type == affinity_none)) { __kmp_debug_assert ("__kmp_affinity.type == affinity_none", "openmp/runtime/src/kmp_affinity.cpp" , 2107); }; | |||
2108 | ||||
2109 | // Get an upper bound on the number of threads per package using cpuid(1). | |||
2110 | // On some OS/chps combinations where HT is supported by the chip but is | |||
2111 | // disabled, this value will be 2 on a single core chip. Usually, it will be | |||
2112 | // 2 if HT is enabled and 1 if HT is disabled. | |||
2113 | __kmp_x86_cpuid(1, 0, &buf); | |||
2114 | int maxThreadsPerPkg = (buf.ebx >> 16) & 0xff; | |||
2115 | if (maxThreadsPerPkg == 0) { | |||
2116 | maxThreadsPerPkg = 1; | |||
2117 | } | |||
2118 | ||||
2119 | // The num cores per pkg comes from cpuid(4). 1 must be added to the encoded | |||
2120 | // value. | |||
2121 | // | |||
2122 | // The author of cpu_count.cpp treated this only an upper bound on the | |||
2123 | // number of cores, but I haven't seen any cases where it was greater than | |||
2124 | // the actual number of cores, so we will treat it as exact in this block of | |||
2125 | // code. | |||
2126 | // | |||
2127 | // First, we need to check if cpuid(4) is supported on this chip. To see if | |||
2128 | // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n or | |||
2129 | // greater. | |||
2130 | __kmp_x86_cpuid(0, 0, &buf); | |||
2131 | if (buf.eax >= 4) { | |||
2132 | __kmp_x86_cpuid(4, 0, &buf); | |||
2133 | nCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1; | |||
2134 | } else { | |||
2135 | nCoresPerPkg = 1; | |||
2136 | } | |||
2137 | ||||
2138 | // There is no way to reliably tell if HT is enabled without issuing the | |||
2139 | // cpuid instruction from every thread, can correlating the cpuid info, so | |||
2140 | // if the machine is not affinity capable, we assume that HT is off. We have | |||
2141 | // seen quite a few machines where maxThreadsPerPkg is 2, yet the machine | |||
2142 | // does not support HT. | |||
2143 | // | |||
2144 | // - Older OSes are usually found on machines with older chips, which do not | |||
2145 | // support HT. | |||
2146 | // - The performance penalty for mistakenly identifying a machine as HT when | |||
2147 | // it isn't (which results in blocktime being incorrectly set to 0) is | |||
2148 | // greater than the penalty when for mistakenly identifying a machine as | |||
2149 | // being 1 thread/core when it is really HT enabled (which results in | |||
2150 | // blocktime being incorrectly set to a positive value). | |||
2151 | __kmp_ncores = __kmp_xproc; | |||
2152 | nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg; | |||
2153 | __kmp_nThreadsPerCore = 1; | |||
2154 | return true; | |||
2155 | } | |||
2156 | ||||
2157 | // From here on, we can assume that it is safe to call | |||
2158 | // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if | |||
2159 | // __kmp_affinity.type = affinity_none. | |||
2160 | ||||
2161 | // Save the affinity mask for the current thread. | |||
2162 | kmp_affinity_raii_t previous_affinity; | |||
2163 | ||||
2164 | // Run through each of the available contexts, binding the current thread | |||
2165 | // to it, and obtaining the pertinent information using the cpuid instr. | |||
2166 | // | |||
2167 | // The relevant information is: | |||
2168 | // - Apic Id: Bits 24:31 of ebx after issuing cpuid(1) - each thread context | |||
2169 | // has a uniqie Apic Id, which is of the form pkg# : core# : thread#. | |||
2170 | // - Max Threads Per Pkg: Bits 16:23 of ebx after issuing cpuid(1). The value | |||
2171 | // of this field determines the width of the core# + thread# fields in the | |||
2172 | // Apic Id. It is also an upper bound on the number of threads per | |||
2173 | // package, but it has been verified that situations happen were it is not | |||
2174 | // exact. In particular, on certain OS/chip combinations where Intel(R) | |||
2175 | // Hyper-Threading Technology is supported by the chip but has been | |||
2176 | // disabled, the value of this field will be 2 (for a single core chip). | |||
2177 | // On other OS/chip combinations supporting Intel(R) Hyper-Threading | |||
2178 | // Technology, the value of this field will be 1 when Intel(R) | |||
2179 | // Hyper-Threading Technology is disabled and 2 when it is enabled. | |||
2180 | // - Max Cores Per Pkg: Bits 26:31 of eax after issuing cpuid(4). The value | |||
2181 | // of this field (+1) determines the width of the core# field in the Apic | |||
2182 | // Id. The comments in "cpucount.cpp" say that this value is an upper | |||
2183 | // bound, but the IA-32 architecture manual says that it is exactly the | |||
2184 | // number of cores per package, and I haven't seen any case where it | |||
2185 | // wasn't. | |||
2186 | // | |||
2187 | // From this information, deduce the package Id, core Id, and thread Id, | |||
2188 | // and set the corresponding fields in the apicThreadInfo struct. | |||
2189 | unsigned i; | |||
2190 | apicThreadInfo *threadInfo = (apicThreadInfo *)__kmp_allocate(___kmp_allocate((__kmp_avail_proc * sizeof(apicThreadInfo)), "openmp/runtime/src/kmp_affinity.cpp" , 2191) | |||
2191 | __kmp_avail_proc * sizeof(apicThreadInfo))___kmp_allocate((__kmp_avail_proc * sizeof(apicThreadInfo)), "openmp/runtime/src/kmp_affinity.cpp" , 2191); | |||
2192 | unsigned nApics = 0; | |||
2193 | KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask)for (i = (__kmp_affin_fullMask)->begin(); (int)i != (__kmp_affin_fullMask )->end(); i = (__kmp_affin_fullMask)->next(i)) { | |||
2194 | // Skip this proc if it is not included in the machine model. | |||
2195 | if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)(__kmp_affin_fullMask)->is_set(i)) { | |||
2196 | continue; | |||
2197 | } | |||
2198 | KMP_DEBUG_ASSERT((int)nApics < __kmp_avail_proc)if (!((int)nApics < __kmp_avail_proc)) { __kmp_debug_assert ("(int)nApics < __kmp_avail_proc", "openmp/runtime/src/kmp_affinity.cpp" , 2198); }; | |||
2199 | ||||
2200 | __kmp_affinity_dispatch->bind_thread(i); | |||
2201 | threadInfo[nApics].osId = i; | |||
2202 | ||||
2203 | // The apic id and max threads per pkg come from cpuid(1). | |||
2204 | __kmp_x86_cpuid(1, 0, &buf); | |||
2205 | if (((buf.edx >> 9) & 1) == 0) { | |||
2206 | __kmp_free(threadInfo)___kmp_free((threadInfo), "openmp/runtime/src/kmp_affinity.cpp" , 2206); | |||
2207 | *msg_id = kmp_i18n_str_ApicNotPresent; | |||
2208 | return false; | |||
2209 | } | |||
2210 | threadInfo[nApics].apicId = (buf.ebx >> 24) & 0xff; | |||
2211 | threadInfo[nApics].maxThreadsPerPkg = (buf.ebx >> 16) & 0xff; | |||
2212 | if (threadInfo[nApics].maxThreadsPerPkg == 0) { | |||
2213 | threadInfo[nApics].maxThreadsPerPkg = 1; | |||
2214 | } | |||
2215 | ||||
2216 | // Max cores per pkg comes from cpuid(4). 1 must be added to the encoded | |||
2217 | // value. | |||
2218 | // | |||
2219 | // First, we need to check if cpuid(4) is supported on this chip. To see if | |||
2220 | // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n | |||
2221 | // or greater. | |||
2222 | __kmp_x86_cpuid(0, 0, &buf); | |||
2223 | if (buf.eax >= 4) { | |||
2224 | __kmp_x86_cpuid(4, 0, &buf); | |||
2225 | threadInfo[nApics].maxCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1; | |||
2226 | } else { | |||
2227 | threadInfo[nApics].maxCoresPerPkg = 1; | |||
2228 | } | |||
2229 | ||||
2230 | // Infer the pkgId / coreId / threadId using only the info obtained locally. | |||
2231 | int widthCT = __kmp_cpuid_mask_width(threadInfo[nApics].maxThreadsPerPkg); | |||
2232 | threadInfo[nApics].pkgId = threadInfo[nApics].apicId >> widthCT; | |||
2233 | ||||
2234 | int widthC = __kmp_cpuid_mask_width(threadInfo[nApics].maxCoresPerPkg); | |||
2235 | int widthT = widthCT - widthC; | |||
2236 | if (widthT < 0) { | |||
2237 | // I've never seen this one happen, but I suppose it could, if the cpuid | |||
2238 | // instruction on a chip was really screwed up. Make sure to restore the | |||
2239 | // affinity mask before the tail call. | |||
2240 | __kmp_free(threadInfo)___kmp_free((threadInfo), "openmp/runtime/src/kmp_affinity.cpp" , 2240); | |||
2241 | *msg_id = kmp_i18n_str_InvalidCpuidInfo; | |||
2242 | return false; | |||
2243 | } | |||
2244 | ||||
2245 | int maskC = (1 << widthC) - 1; | |||
2246 | threadInfo[nApics].coreId = (threadInfo[nApics].apicId >> widthT) & maskC; | |||
2247 | ||||
2248 | int maskT = (1 << widthT) - 1; | |||
2249 | threadInfo[nApics].threadId = threadInfo[nApics].apicId & maskT; | |||
2250 | ||||
2251 | nApics++; | |||
2252 | } | |||
2253 | ||||
2254 | // We've collected all the info we need. | |||
2255 | // Restore the old affinity mask for this thread. | |||
2256 | previous_affinity.restore(); | |||
2257 | ||||
2258 | // Sort the threadInfo table by physical Id. | |||
2259 | qsort(threadInfo, nApics, sizeof(*threadInfo), | |||
2260 | __kmp_affinity_cmp_apicThreadInfo_phys_id); | |||
2261 | ||||
2262 | // The table is now sorted by pkgId / coreId / threadId, but we really don't | |||
2263 | // know the radix of any of the fields. pkgId's may be sparsely assigned among | |||
2264 | // the chips on a system. Although coreId's are usually assigned | |||
2265 | // [0 .. coresPerPkg-1] and threadId's are usually assigned | |||
2266 | // [0..threadsPerCore-1], we don't want to make any such assumptions. | |||
2267 | // | |||
2268 | // For that matter, we don't know what coresPerPkg and threadsPerCore (or the | |||
2269 | // total # packages) are at this point - we want to determine that now. We | |||
2270 | // only have an upper bound on the first two figures. | |||
2271 | // | |||
2272 | // We also perform a consistency check at this point: the values returned by | |||
2273 | // the cpuid instruction for any thread bound to a given package had better | |||
2274 | // return the same info for maxThreadsPerPkg and maxCoresPerPkg. | |||
2275 | nPackages = 1; | |||
2276 | nCoresPerPkg = 1; | |||
2277 | __kmp_nThreadsPerCore = 1; | |||
2278 | unsigned nCores = 1; | |||
2279 | ||||
2280 | unsigned pkgCt = 1; // to determine radii | |||
2281 | unsigned lastPkgId = threadInfo[0].pkgId; | |||
2282 | unsigned coreCt = 1; | |||
2283 | unsigned lastCoreId = threadInfo[0].coreId; | |||
2284 | unsigned threadCt = 1; | |||
2285 | unsigned lastThreadId = threadInfo[0].threadId; | |||
2286 | ||||
2287 | // intra-pkg consist checks | |||
2288 | unsigned prevMaxCoresPerPkg = threadInfo[0].maxCoresPerPkg; | |||
2289 | unsigned prevMaxThreadsPerPkg = threadInfo[0].maxThreadsPerPkg; | |||
2290 | ||||
2291 | for (i = 1; i < nApics; i++) { | |||
2292 | if (threadInfo[i].pkgId != lastPkgId) { | |||
2293 | nCores++; | |||
2294 | pkgCt++; | |||
2295 | lastPkgId = threadInfo[i].pkgId; | |||
2296 | if ((int)coreCt > nCoresPerPkg) | |||
2297 | nCoresPerPkg = coreCt; | |||
2298 | coreCt = 1; | |||
2299 | lastCoreId = threadInfo[i].coreId; | |||
2300 | if ((int)threadCt > __kmp_nThreadsPerCore) | |||
2301 | __kmp_nThreadsPerCore = threadCt; | |||
2302 | threadCt = 1; | |||
2303 | lastThreadId = threadInfo[i].threadId; | |||
2304 | ||||
2305 | // This is a different package, so go on to the next iteration without | |||
2306 | // doing any consistency checks. Reset the consistency check vars, though. | |||
2307 | prevMaxCoresPerPkg = threadInfo[i].maxCoresPerPkg; | |||
2308 | prevMaxThreadsPerPkg = threadInfo[i].maxThreadsPerPkg; | |||
2309 | continue; | |||
2310 | } | |||
2311 | ||||
2312 | if (threadInfo[i].coreId != lastCoreId) { | |||
2313 | nCores++; | |||
2314 | coreCt++; | |||
2315 | lastCoreId = threadInfo[i].coreId; | |||
2316 | if ((int)threadCt > __kmp_nThreadsPerCore) | |||
2317 | __kmp_nThreadsPerCore = threadCt; | |||
2318 | threadCt = 1; | |||
2319 | lastThreadId = threadInfo[i].threadId; | |||
2320 | } else if (threadInfo[i].threadId != lastThreadId) { | |||
2321 | threadCt++; | |||
2322 | lastThreadId = threadInfo[i].threadId; | |||
2323 | } else { | |||
2324 | __kmp_free(threadInfo)___kmp_free((threadInfo), "openmp/runtime/src/kmp_affinity.cpp" , 2324); | |||
2325 | *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique; | |||
2326 | return false; | |||
2327 | } | |||
2328 | ||||
2329 | // Check to make certain that the maxCoresPerPkg and maxThreadsPerPkg | |||
2330 | // fields agree between all the threads bounds to a given package. | |||
2331 | if ((prevMaxCoresPerPkg != threadInfo[i].maxCoresPerPkg) || | |||
2332 | (prevMaxThreadsPerPkg != threadInfo[i].maxThreadsPerPkg)) { | |||
2333 | __kmp_free(threadInfo)___kmp_free((threadInfo), "openmp/runtime/src/kmp_affinity.cpp" , 2333); | |||
2334 | *msg_id = kmp_i18n_str_InconsistentCpuidInfo; | |||
2335 | return false; | |||
2336 | } | |||
2337 | } | |||
2338 | // When affinity is off, this routine will still be called to set | |||
2339 | // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages. | |||
2340 | // Make sure all these vars are set correctly | |||
2341 | nPackages = pkgCt; | |||
2342 | if ((int)coreCt > nCoresPerPkg) | |||
2343 | nCoresPerPkg = coreCt; | |||
2344 | if ((int)threadCt > __kmp_nThreadsPerCore) | |||
2345 | __kmp_nThreadsPerCore = threadCt; | |||
2346 | __kmp_ncores = nCores; | |||
2347 | KMP_DEBUG_ASSERT(nApics == (unsigned)__kmp_avail_proc)if (!(nApics == (unsigned)__kmp_avail_proc)) { __kmp_debug_assert ("nApics == (unsigned)__kmp_avail_proc", "openmp/runtime/src/kmp_affinity.cpp" , 2347); }; | |||
2348 | ||||
2349 | // Now that we've determined the number of packages, the number of cores per | |||
2350 | // package, and the number of threads per core, we can construct the data | |||
2351 | // structure that is to be returned. | |||
2352 | int idx = 0; | |||
2353 | int pkgLevel = 0; | |||
2354 | int coreLevel = 1; | |||
2355 | int threadLevel = 2; | |||
2356 | //(__kmp_nThreadsPerCore <= 1) ? -1 : ((coreLevel >= 0) ? 2 : 1); | |||
2357 | int depth = (pkgLevel >= 0) + (coreLevel >= 0) + (threadLevel >= 0); | |||
2358 | kmp_hw_t types[3]; | |||
2359 | if (pkgLevel >= 0) | |||
2360 | types[idx++] = KMP_HW_SOCKET; | |||
2361 | if (coreLevel >= 0) | |||
2362 | types[idx++] = KMP_HW_CORE; | |||
2363 | if (threadLevel >= 0) | |||
2364 | types[idx++] = KMP_HW_THREAD; | |||
2365 | ||||
2366 | KMP_ASSERT(depth > 0)if (!(depth > 0)) { __kmp_debug_assert("depth > 0", "openmp/runtime/src/kmp_affinity.cpp" , 2366); }; | |||
2367 | __kmp_topology = kmp_topology_t::allocate(nApics, depth, types); | |||
2368 | ||||
2369 | for (i = 0; i < nApics; ++i) { | |||
2370 | idx = 0; | |||
2371 | unsigned os = threadInfo[i].osId; | |||
2372 | kmp_hw_thread_t &hw_thread = __kmp_topology->at(i); | |||
2373 | hw_thread.clear(); | |||
2374 | ||||
2375 | if (pkgLevel >= 0) { | |||
2376 | hw_thread.ids[idx++] = threadInfo[i].pkgId; | |||
2377 | } | |||
2378 | if (coreLevel >= 0) { | |||
2379 | hw_thread.ids[idx++] = threadInfo[i].coreId; | |||
2380 | } | |||
2381 | if (threadLevel >= 0) { | |||
2382 | hw_thread.ids[idx++] = threadInfo[i].threadId; | |||
2383 | } | |||
2384 | hw_thread.os_id = os; | |||
2385 | } | |||
2386 | ||||
2387 | __kmp_free(threadInfo)___kmp_free((threadInfo), "openmp/runtime/src/kmp_affinity.cpp" , 2387); | |||
2388 | __kmp_topology->sort_ids(); | |||
2389 | if (!__kmp_topology->check_ids()) { | |||
2390 | kmp_topology_t::deallocate(__kmp_topology); | |||
2391 | __kmp_topology = nullptr; | |||
2392 | *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique; | |||
2393 | return false; | |||
2394 | } | |||
2395 | return true; | |||
2396 | } | |||
2397 | ||||
2398 | // Hybrid cpu detection using CPUID.1A | |||
2399 | // Thread should be pinned to processor already | |||
2400 | static void __kmp_get_hybrid_info(kmp_hw_core_type_t *type, int *efficiency, | |||
2401 | unsigned *native_model_id) { | |||
2402 | kmp_cpuid buf; | |||
2403 | __kmp_x86_cpuid(0x1a, 0, &buf); | |||
2404 | *type = (kmp_hw_core_type_t)__kmp_extract_bits<24, 31>(buf.eax); | |||
2405 | switch (*type) { | |||
2406 | case KMP_HW_CORE_TYPE_ATOM: | |||
2407 | *efficiency = 0; | |||
2408 | break; | |||
2409 | case KMP_HW_CORE_TYPE_CORE: | |||
2410 | *efficiency = 1; | |||
2411 | break; | |||
2412 | default: | |||
2413 | *efficiency = 0; | |||
2414 | } | |||
2415 | *native_model_id = __kmp_extract_bits<0, 23>(buf.eax); | |||
2416 | } | |||
2417 | ||||
2418 | // Intel(R) microarchitecture code name Nehalem, Dunnington and later | |||
2419 | // architectures support a newer interface for specifying the x2APIC Ids, | |||
2420 | // based on CPUID.B or CPUID.1F | |||
2421 | /* | |||
2422 | * CPUID.B or 1F, Input ECX (sub leaf # aka level number) | |||
2423 | Bits Bits Bits Bits | |||
2424 | 31-16 15-8 7-4 4-0 | |||
2425 | ---+-----------+--------------+-------------+-----------------+ | |||
2426 | EAX| reserved | reserved | reserved | Bits to Shift | | |||
2427 | ---+-----------|--------------+-------------+-----------------| | |||
2428 | EBX| reserved | Num logical processors at level (16 bits) | | |||
2429 | ---+-----------|--------------+-------------------------------| | |||
2430 | ECX| reserved | Level Type | Level Number (8 bits) | | |||
2431 | ---+-----------+--------------+-------------------------------| | |||
2432 | EDX| X2APIC ID (32 bits) | | |||
2433 | ---+----------------------------------------------------------+ | |||
2434 | */ | |||
2435 | ||||
2436 | enum { | |||
2437 | INTEL_LEVEL_TYPE_INVALID = 0, // Package level | |||
2438 | INTEL_LEVEL_TYPE_SMT = 1, | |||
2439 | INTEL_LEVEL_TYPE_CORE = 2, | |||
2440 | INTEL_LEVEL_TYPE_MODULE = 3, | |||
2441 | INTEL_LEVEL_TYPE_TILE = 4, | |||
2442 | INTEL_LEVEL_TYPE_DIE = 5, | |||
2443 | INTEL_LEVEL_TYPE_LAST = 6, | |||
2444 | }; | |||
2445 | ||||
2446 | struct cpuid_level_info_t { | |||
2447 | unsigned level_type, mask, mask_width, nitems, cache_mask; | |||
2448 | }; | |||
2449 | ||||
2450 | static kmp_hw_t __kmp_intel_type_2_topology_type(int intel_type) { | |||
2451 | switch (intel_type) { | |||
2452 | case INTEL_LEVEL_TYPE_INVALID: | |||
2453 | return KMP_HW_SOCKET; | |||
2454 | case INTEL_LEVEL_TYPE_SMT: | |||
2455 | return KMP_HW_THREAD; | |||
2456 | case INTEL_LEVEL_TYPE_CORE: | |||
2457 | return KMP_HW_CORE; | |||
2458 | case INTEL_LEVEL_TYPE_TILE: | |||
2459 | return KMP_HW_TILE; | |||
2460 | case INTEL_LEVEL_TYPE_MODULE: | |||
2461 | return KMP_HW_MODULE; | |||
2462 | case INTEL_LEVEL_TYPE_DIE: | |||
2463 | return KMP_HW_DIE; | |||
2464 | } | |||
2465 | return KMP_HW_UNKNOWN; | |||
2466 | } | |||
2467 | ||||
2468 | // This function takes the topology leaf, a levels array to store the levels | |||
2469 | // detected and a bitmap of the known levels. | |||
2470 | // Returns the number of levels in the topology | |||
2471 | static unsigned | |||
2472 | __kmp_x2apicid_get_levels(int leaf, | |||
2473 | cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST], | |||
2474 | kmp_uint64 known_levels) { | |||
2475 | unsigned level, levels_index; | |||
2476 | unsigned level_type, mask_width, nitems; | |||
2477 | kmp_cpuid buf; | |||
2478 | ||||
2479 | // New algorithm has known topology layers act as highest unknown topology | |||
2480 | // layers when unknown topology layers exist. | |||
2481 | // e.g., Suppose layers were SMT <X> CORE <Y> <Z> PACKAGE, where <X> <Y> <Z> | |||
2482 | // are unknown topology layers, Then SMT will take the characteristics of | |||
2483 | // (SMT x <X>) and CORE will take the characteristics of (CORE x <Y> x <Z>). | |||
2484 | // This eliminates unknown portions of the topology while still keeping the | |||
2485 | // correct structure. | |||
2486 | level = levels_index = 0; | |||
2487 | do { | |||
2488 | __kmp_x86_cpuid(leaf, level, &buf); | |||
2489 | level_type = __kmp_extract_bits<8, 15>(buf.ecx); | |||
2490 | mask_width = __kmp_extract_bits<0, 4>(buf.eax); | |||
2491 | nitems = __kmp_extract_bits<0, 15>(buf.ebx); | |||
2492 | if (level_type != INTEL_LEVEL_TYPE_INVALID && nitems == 0) | |||
2493 | return 0; | |||
2494 | ||||
2495 | if (known_levels & (1ull << level_type)) { | |||
2496 | // Add a new level to the topology | |||
2497 | KMP_ASSERT(levels_index < INTEL_LEVEL_TYPE_LAST)if (!(levels_index < INTEL_LEVEL_TYPE_LAST)) { __kmp_debug_assert ("levels_index < INTEL_LEVEL_TYPE_LAST", "openmp/runtime/src/kmp_affinity.cpp" , 2497); }; | |||
2498 | levels[levels_index].level_type = level_type; | |||
2499 | levels[levels_index].mask_width = mask_width; | |||
2500 | levels[levels_index].nitems = nitems; | |||
2501 | levels_index++; | |||
2502 | } else { | |||
2503 | // If it is an unknown level, then logically move the previous layer up | |||
2504 | if (levels_index > 0) { | |||
2505 | levels[levels_index - 1].mask_width = mask_width; | |||
2506 | levels[levels_index - 1].nitems = nitems; | |||
2507 | } | |||
2508 | } | |||
2509 | level++; | |||
2510 | } while (level_type != INTEL_LEVEL_TYPE_INVALID); | |||
2511 | ||||
2512 | // Set the masks to & with apicid | |||
2513 | for (unsigned i = 0; i < levels_index; ++i) { | |||
2514 | if (levels[i].level_type != INTEL_LEVEL_TYPE_INVALID) { | |||
2515 | levels[i].mask = ~((-1) << levels[i].mask_width); | |||
2516 | levels[i].cache_mask = (-1) << levels[i].mask_width; | |||
2517 | for (unsigned j = 0; j < i; ++j) | |||
2518 | levels[i].mask ^= levels[j].mask; | |||
2519 | } else { | |||
2520 | KMP_DEBUG_ASSERT(levels_index > 0)if (!(levels_index > 0)) { __kmp_debug_assert("levels_index > 0" , "openmp/runtime/src/kmp_affinity.cpp", 2520); }; | |||
2521 | levels[i].mask = (-1) << levels[i - 1].mask_width; | |||
2522 | levels[i].cache_mask = 0; | |||
2523 | } | |||
2524 | } | |||
2525 | return levels_index; | |||
2526 | } | |||
2527 | ||||
2528 | static bool __kmp_affinity_create_x2apicid_map(kmp_i18n_id_t *const msg_id) { | |||
2529 | ||||
2530 | cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST]; | |||
2531 | kmp_hw_t types[INTEL_LEVEL_TYPE_LAST]; | |||
2532 | unsigned levels_index; | |||
2533 | kmp_cpuid buf; | |||
2534 | kmp_uint64 known_levels; | |||
2535 | int topology_leaf, highest_leaf, apic_id; | |||
2536 | int num_leaves; | |||
2537 | static int leaves[] = {0, 0}; | |||
2538 | ||||
2539 | kmp_i18n_id_t leaf_message_id; | |||
2540 | ||||
2541 | KMP_BUILD_ASSERT(sizeof(known_levels) * CHAR_BIT > KMP_HW_LAST)static_assert(sizeof(known_levels) * 8 > KMP_HW_LAST, "Build condition error" ); | |||
2542 | ||||
2543 | *msg_id = kmp_i18n_null; | |||
2544 | if (__kmp_affinity.flags.verbose) { | |||
2545 | KMP_INFORM(AffInfoStr, "KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC))__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_AffInfoStr , "KMP_AFFINITY", __kmp_i18n_catgets(kmp_i18n_str_Decodingx2APIC )), __kmp_msg_null); | |||
2546 | } | |||
2547 | ||||
2548 | // Figure out the known topology levels | |||
2549 | known_levels = 0ull; | |||
2550 | for (int i = 0; i < INTEL_LEVEL_TYPE_LAST; ++i) { | |||
2551 | if (__kmp_intel_type_2_topology_type(i) != KMP_HW_UNKNOWN) { | |||
2552 | known_levels |= (1ull << i); | |||
2553 | } | |||
2554 | } | |||
2555 | ||||
2556 | // Get the highest cpuid leaf supported | |||
2557 | __kmp_x86_cpuid(0, 0, &buf); | |||
2558 | highest_leaf = buf.eax; | |||
2559 | ||||
2560 | // If a specific topology method was requested, only allow that specific leaf | |||
2561 | // otherwise, try both leaves 31 and 11 in that order | |||
2562 | num_leaves = 0; | |||
2563 | if (__kmp_affinity_top_method == affinity_top_method_x2apicid) { | |||
2564 | num_leaves = 1; | |||
2565 | leaves[0] = 11; | |||
2566 | leaf_message_id = kmp_i18n_str_NoLeaf11Support; | |||
2567 | } else if (__kmp_affinity_top_method == affinity_top_method_x2apicid_1f) { | |||
2568 | num_leaves = 1; | |||
2569 | leaves[0] = 31; | |||
2570 | leaf_message_id = kmp_i18n_str_NoLeaf31Support; | |||
2571 | } else { | |||
2572 | num_leaves = 2; | |||
2573 | leaves[0] = 31; | |||
2574 | leaves[1] = 11; | |||
2575 | leaf_message_id = kmp_i18n_str_NoLeaf11Support; | |||
2576 | } | |||
2577 | ||||
2578 | // Check to see if cpuid leaf 31 or 11 is supported. | |||
2579 | __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1; | |||
2580 | topology_leaf = -1; | |||
2581 | for (int i = 0; i < num_leaves; ++i) { | |||
2582 | int leaf = leaves[i]; | |||
2583 | if (highest_leaf < leaf) | |||
2584 | continue; | |||
2585 | __kmp_x86_cpuid(leaf, 0, &buf); | |||
2586 | if (buf.ebx == 0) | |||
2587 | continue; | |||
2588 | topology_leaf = leaf; | |||
2589 | levels_index = __kmp_x2apicid_get_levels(leaf, levels, known_levels); | |||
2590 | if (levels_index == 0) | |||
2591 | continue; | |||
2592 | break; | |||
2593 | } | |||
2594 | if (topology_leaf == -1 || levels_index == 0) { | |||
2595 | *msg_id = leaf_message_id; | |||
2596 | return false; | |||
2597 | } | |||
2598 | KMP_ASSERT(levels_index <= INTEL_LEVEL_TYPE_LAST)if (!(levels_index <= INTEL_LEVEL_TYPE_LAST)) { __kmp_debug_assert ("levels_index <= INTEL_LEVEL_TYPE_LAST", "openmp/runtime/src/kmp_affinity.cpp" , 2598); }; | |||
2599 | ||||
2600 | // The algorithm used starts by setting the affinity to each available thread | |||
2601 | // and retrieving info from the cpuid instruction, so if we are not capable of | |||
2602 | // calling __kmp_get_system_affinity() and __kmp_get_system_affinity(), then | |||
2603 | // we need to do something else - use the defaults that we calculated from | |||
2604 | // issuing cpuid without binding to each proc. | |||
2605 | if (!KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0)) { | |||
2606 | // Hack to try and infer the machine topology using only the data | |||
2607 | // available from cpuid on the current thread, and __kmp_xproc. | |||
2608 | KMP_ASSERT(__kmp_affinity.type == affinity_none)if (!(__kmp_affinity.type == affinity_none)) { __kmp_debug_assert ("__kmp_affinity.type == affinity_none", "openmp/runtime/src/kmp_affinity.cpp" , 2608); }; | |||
2609 | for (unsigned i = 0; i < levels_index; ++i) { | |||
2610 | if (levels[i].level_type == INTEL_LEVEL_TYPE_SMT) { | |||
2611 | __kmp_nThreadsPerCore = levels[i].nitems; | |||
2612 | } else if (levels[i].level_type == INTEL_LEVEL_TYPE_CORE) { | |||
2613 | nCoresPerPkg = levels[i].nitems; | |||
2614 | } | |||
2615 | } | |||
2616 | __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore; | |||
2617 | nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg; | |||
2618 | return true; | |||
2619 | } | |||
2620 | ||||
2621 | // Allocate the data structure to be returned. | |||
2622 | int depth = levels_index; | |||
2623 | for (int i = depth - 1, j = 0; i >= 0; --i, ++j) | |||
2624 | types[j] = __kmp_intel_type_2_topology_type(levels[i].level_type); | |||
2625 | __kmp_topology = | |||
2626 | kmp_topology_t::allocate(__kmp_avail_proc, levels_index, types); | |||
2627 | ||||
2628 | // Insert equivalent cache types if they exist | |||
2629 | kmp_cache_info_t cache_info; | |||
2630 | for (size_t i = 0; i < cache_info.get_depth(); ++i) { | |||
2631 | const kmp_cache_info_t::info_t &info = cache_info[i]; | |||
2632 | unsigned cache_mask = info.mask; | |||
2633 | unsigned cache_level = info.level; | |||
2634 | for (unsigned j = 0; j < levels_index; ++j) { | |||
2635 | unsigned hw_cache_mask = levels[j].cache_mask; | |||
2636 | kmp_hw_t cache_type = kmp_cache_info_t::get_topology_type(cache_level); | |||
2637 | if (hw_cache_mask == cache_mask && j < levels_index - 1) { | |||
2638 | kmp_hw_t type = | |||
2639 | __kmp_intel_type_2_topology_type(levels[j + 1].level_type); | |||
2640 | __kmp_topology->set_equivalent_type(cache_type, type); | |||
2641 | } | |||
2642 | } | |||
2643 | } | |||
2644 | ||||
2645 | // From here on, we can assume that it is safe to call | |||
2646 | // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if | |||
2647 | // __kmp_affinity.type = affinity_none. | |||
2648 | ||||
2649 | // Save the affinity mask for the current thread. | |||
2650 | kmp_affinity_raii_t previous_affinity; | |||
2651 | ||||
2652 | // Run through each of the available contexts, binding the current thread | |||
2653 | // to it, and obtaining the pertinent information using the cpuid instr. | |||
2654 | unsigned int proc; | |||
2655 | int hw_thread_index = 0; | |||
2656 | KMP_CPU_SET_ITERATE(proc, __kmp_affin_fullMask)for (proc = (__kmp_affin_fullMask)->begin(); (int)proc != ( __kmp_affin_fullMask)->end(); proc = (__kmp_affin_fullMask )->next(proc)) { | |||
2657 | cpuid_level_info_t my_levels[INTEL_LEVEL_TYPE_LAST]; | |||
2658 | unsigned my_levels_index; | |||
2659 | ||||
2660 | // Skip this proc if it is not included in the machine model. | |||
2661 | if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)(__kmp_affin_fullMask)->is_set(proc)) { | |||
2662 | continue; | |||
2663 | } | |||
2664 | KMP_DEBUG_ASSERT(hw_thread_index < __kmp_avail_proc)if (!(hw_thread_index < __kmp_avail_proc)) { __kmp_debug_assert ("hw_thread_index < __kmp_avail_proc", "openmp/runtime/src/kmp_affinity.cpp" , 2664); }; | |||
2665 | ||||
2666 | __kmp_affinity_dispatch->bind_thread(proc); | |||
2667 | ||||
2668 | // New algorithm | |||
2669 | __kmp_x86_cpuid(topology_leaf, 0, &buf); | |||
2670 | apic_id = buf.edx; | |||
2671 | kmp_hw_thread_t &hw_thread = __kmp_topology->at(hw_thread_index); | |||
2672 | my_levels_index = | |||
2673 | __kmp_x2apicid_get_levels(topology_leaf, my_levels, known_levels); | |||
2674 | if (my_levels_index == 0 || my_levels_index != levels_index) { | |||
2675 | *msg_id = kmp_i18n_str_InvalidCpuidInfo; | |||
2676 | return false; | |||
2677 | } | |||
2678 | hw_thread.clear(); | |||
2679 | hw_thread.os_id = proc; | |||
2680 | // Put in topology information | |||
2681 | for (unsigned j = 0, idx = depth - 1; j < my_levels_index; ++j, --idx) { | |||
2682 | hw_thread.ids[idx] = apic_id & my_levels[j].mask; | |||
2683 | if (j > 0) { | |||
2684 | hw_thread.ids[idx] >>= my_levels[j - 1].mask_width; | |||
2685 | } | |||
2686 | } | |||
2687 | // Hybrid information | |||
2688 | if (__kmp_is_hybrid_cpu() && highest_leaf >= 0x1a) { | |||
2689 | kmp_hw_core_type_t type; | |||
2690 | unsigned native_model_id; | |||
2691 | int efficiency; | |||
2692 | __kmp_get_hybrid_info(&type, &efficiency, &native_model_id); | |||
2693 | hw_thread.attrs.set_core_type(type); | |||
2694 | hw_thread.attrs.set_core_eff(efficiency); | |||
2695 | } | |||
2696 | hw_thread_index++; | |||
2697 | } | |||
2698 | KMP_ASSERT(hw_thread_index > 0)if (!(hw_thread_index > 0)) { __kmp_debug_assert("hw_thread_index > 0" , "openmp/runtime/src/kmp_affinity.cpp", 2698); }; | |||
2699 | __kmp_topology->sort_ids(); | |||
2700 | if (!__kmp_topology->check_ids()) { | |||
2701 | kmp_topology_t::deallocate(__kmp_topology); | |||
2702 | __kmp_topology = nullptr; | |||
2703 | *msg_id = kmp_i18n_str_x2ApicIDsNotUnique; | |||
2704 | return false; | |||
2705 | } | |||
2706 | return true; | |||
2707 | } | |||
2708 | #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ | |||
2709 | ||||
2710 | #define osIdIndex0 0 | |||
2711 | #define threadIdIndex1 1 | |||
2712 | #define coreIdIndex2 2 | |||
2713 | #define pkgIdIndex3 3 | |||
2714 | #define nodeIdIndex4 4 | |||
2715 | ||||
2716 | typedef unsigned *ProcCpuInfo; | |||
2717 | static unsigned maxIndex = pkgIdIndex3; | |||
2718 | ||||
2719 | static int __kmp_affinity_cmp_ProcCpuInfo_phys_id(const void *a, | |||
2720 | const void *b) { | |||
2721 | unsigned i; | |||
2722 | const unsigned *aa = *(unsigned *const *)a; | |||
2723 | const unsigned *bb = *(unsigned *const *)b; | |||
2724 | for (i = maxIndex;; i--) { | |||
2725 | if (aa[i] < bb[i]) | |||
2726 | return -1; | |||
2727 | if (aa[i] > bb[i]) | |||
2728 | return 1; | |||
2729 | if (i == osIdIndex0) | |||
2730 | break; | |||
2731 | } | |||
2732 | return 0; | |||
2733 | } | |||
2734 | ||||
2735 | #if KMP_USE_HIER_SCHED0 | |||
2736 | // Set the array sizes for the hierarchy layers | |||
2737 | static void __kmp_dispatch_set_hierarchy_values() { | |||
2738 | // Set the maximum number of L1's to number of cores | |||
2739 | // Set the maximum number of L2's to to either number of cores / 2 for | |||
2740 | // Intel(R) Xeon Phi(TM) coprocessor formally codenamed Knights Landing | |||
2741 | // Or the number of cores for Intel(R) Xeon(R) processors | |||
2742 | // Set the maximum number of NUMA nodes and L3's to number of packages | |||
2743 | __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1] = | |||
2744 | nPackages * nCoresPerPkg * __kmp_nThreadsPerCore; | |||
2745 | __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L1 + 1] = __kmp_ncores; | |||
2746 | #if KMP_ARCH_X86_641 && (KMP_OS_LINUX1 || KMP_OS_FREEBSD0 || KMP_OS_WINDOWS0) && \ | |||
2747 | KMP_MIC_SUPPORTED((0 || 1) && (1 || 0)) | |||
2748 | if (__kmp_mic_type >= mic3) | |||
2749 | __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores / 2; | |||
2750 | else | |||
2751 | #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS) | |||
2752 | __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores; | |||
2753 | __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L3 + 1] = nPackages; | |||
2754 | __kmp_hier_max_units[kmp_hier_layer_e::LAYER_NUMA + 1] = nPackages; | |||
2755 | __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LOOP + 1] = 1; | |||
2756 | // Set the number of threads per unit | |||
2757 | // Number of hardware threads per L1/L2/L3/NUMA/LOOP | |||
2758 | __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_THREAD + 1] = 1; | |||
2759 | __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L1 + 1] = | |||
2760 | __kmp_nThreadsPerCore; | |||
2761 | #if KMP_ARCH_X86_641 && (KMP_OS_LINUX1 || KMP_OS_FREEBSD0 || KMP_OS_WINDOWS0) && \ | |||
2762 | KMP_MIC_SUPPORTED((0 || 1) && (1 || 0)) | |||
2763 | if (__kmp_mic_type >= mic3) | |||
2764 | __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] = | |||
2765 | 2 * __kmp_nThreadsPerCore; | |||
2766 | else | |||
2767 | #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS) | |||
2768 | __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] = | |||
2769 | __kmp_nThreadsPerCore; | |||
2770 | __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L3 + 1] = | |||
2771 | nCoresPerPkg * __kmp_nThreadsPerCore; | |||
2772 | __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_NUMA + 1] = | |||
2773 | nCoresPerPkg * __kmp_nThreadsPerCore; | |||
2774 | __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LOOP + 1] = | |||
2775 | nPackages * nCoresPerPkg * __kmp_nThreadsPerCore; | |||
2776 | } | |||
2777 | ||||
2778 | // Return the index into the hierarchy for this tid and layer type (L1, L2, etc) | |||
2779 | // i.e., this thread's L1 or this thread's L2, etc. | |||
2780 | int __kmp_dispatch_get_index(int tid, kmp_hier_layer_e type) { | |||
2781 | int index = type + 1; | |||
2782 | int num_hw_threads = __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1]; | |||
2783 | KMP_DEBUG_ASSERT(type != kmp_hier_layer_e::LAYER_LAST)if (!(type != kmp_hier_layer_e::LAYER_LAST)) { __kmp_debug_assert ("type != kmp_hier_layer_e::LAYER_LAST", "openmp/runtime/src/kmp_affinity.cpp" , 2783); }; | |||
2784 | if (type == kmp_hier_layer_e::LAYER_THREAD) | |||
2785 | return tid; | |||
2786 | else if (type == kmp_hier_layer_e::LAYER_LOOP) | |||
2787 | return 0; | |||
2788 | KMP_DEBUG_ASSERT(__kmp_hier_max_units[index] != 0)if (!(__kmp_hier_max_units[index] != 0)) { __kmp_debug_assert ("__kmp_hier_max_units[index] != 0", "openmp/runtime/src/kmp_affinity.cpp" , 2788); }; | |||
2789 | if (tid >= num_hw_threads) | |||
2790 | tid = tid % num_hw_threads; | |||
2791 | return (tid / __kmp_hier_threads_per[index]) % __kmp_hier_max_units[index]; | |||
2792 | } | |||
2793 | ||||
2794 | // Return the number of t1's per t2 | |||
2795 | int __kmp_dispatch_get_t1_per_t2(kmp_hier_layer_e t1, kmp_hier_layer_e t2) { | |||
2796 | int i1 = t1 + 1; | |||
2797 | int i2 = t2 + 1; | |||
2798 | KMP_DEBUG_ASSERT(i1 <= i2)if (!(i1 <= i2)) { __kmp_debug_assert("i1 <= i2", "openmp/runtime/src/kmp_affinity.cpp" , 2798); }; | |||
2799 | KMP_DEBUG_ASSERT(t1 != kmp_hier_layer_e::LAYER_LAST)if (!(t1 != kmp_hier_layer_e::LAYER_LAST)) { __kmp_debug_assert ("t1 != kmp_hier_layer_e::LAYER_LAST", "openmp/runtime/src/kmp_affinity.cpp" , 2799); }; | |||
2800 | KMP_DEBUG_ASSERT(t2 != kmp_hier_layer_e::LAYER_LAST)if (!(t2 != kmp_hier_layer_e::LAYER_LAST)) { __kmp_debug_assert ("t2 != kmp_hier_layer_e::LAYER_LAST", "openmp/runtime/src/kmp_affinity.cpp" , 2800); }; | |||
2801 | KMP_DEBUG_ASSERT(__kmp_hier_threads_per[i1] != 0)if (!(__kmp_hier_threads_per[i1] != 0)) { __kmp_debug_assert( "__kmp_hier_threads_per[i1] != 0", "openmp/runtime/src/kmp_affinity.cpp" , 2801); }; | |||
2802 | // (nthreads/t2) / (nthreads/t1) = t1 / t2 | |||
2803 | return __kmp_hier_threads_per[i2] / __kmp_hier_threads_per[i1]; | |||
2804 | } | |||
2805 | #endif // KMP_USE_HIER_SCHED | |||
2806 | ||||
2807 | static inline const char *__kmp_cpuinfo_get_filename() { | |||
2808 | const char *filename; | |||
2809 | if (__kmp_cpuinfo_file != nullptr) | |||
2810 | filename = __kmp_cpuinfo_file; | |||
2811 | else | |||
2812 | filename = "/proc/cpuinfo"; | |||
2813 | return filename; | |||
2814 | } | |||
2815 | ||||
2816 | static inline const char *__kmp_cpuinfo_get_envvar() { | |||
2817 | const char *envvar = nullptr; | |||
2818 | if (__kmp_cpuinfo_file != nullptr) | |||
2819 | envvar = "KMP_CPUINFO_FILE"; | |||
2820 | return envvar; | |||
2821 | } | |||
2822 | ||||
2823 | // Parse /proc/cpuinfo (or an alternate file in the same format) to obtain the | |||
2824 | // affinity map. | |||
2825 | static bool __kmp_affinity_create_cpuinfo_map(int *line, | |||
2826 | kmp_i18n_id_t *const msg_id) { | |||
2827 | const char *filename = __kmp_cpuinfo_get_filename(); | |||
2828 | const char *envvar = __kmp_cpuinfo_get_envvar(); | |||
2829 | *msg_id = kmp_i18n_null; | |||
2830 | ||||
2831 | if (__kmp_affinity.flags.verbose) { | |||
2832 | KMP_INFORM(AffParseFilename, "KMP_AFFINITY", filename)__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_AffParseFilename , "KMP_AFFINITY", filename), __kmp_msg_null); | |||
2833 | } | |||
2834 | ||||
2835 | kmp_safe_raii_file_t f(filename, "r", envvar); | |||
2836 | ||||
2837 | // Scan of the file, and count the number of "processor" (osId) fields, | |||
2838 | // and find the highest value of <n> for a node_<n> field. | |||
2839 | char buf[256]; | |||
2840 | unsigned num_records = 0; | |||
2841 | while (!feof(f)) { | |||
2842 | buf[sizeof(buf) - 1] = 1; | |||
2843 | if (!fgets(buf, sizeof(buf), f)) { | |||
2844 | // Read errors presumably because of EOF | |||
2845 | break; | |||
2846 | } | |||
2847 | ||||
2848 | char s1[] = "processor"; | |||
2849 | if (strncmp(buf, s1, sizeof(s1) - 1) == 0) { | |||
2850 | num_records++; | |||
2851 | continue; | |||
2852 | } | |||
2853 | ||||
2854 | // FIXME - this will match "node_<n> <garbage>" | |||
2855 | unsigned level; | |||
2856 | if (KMP_SSCANFsscanf(buf, "node_%u id", &level) == 1) { | |||
2857 | // validate the input fisrt: | |||
2858 | if (level > (unsigned)__kmp_xproc) { // level is too big | |||
2859 | level = __kmp_xproc; | |||
2860 | } | |||
2861 | if (nodeIdIndex4 + level >= maxIndex) { | |||
2862 | maxIndex = nodeIdIndex4 + level; | |||
2863 | } | |||
2864 | continue; | |||
2865 | } | |||
2866 | } | |||
2867 | ||||
2868 | // Check for empty file / no valid processor records, or too many. The number | |||
2869 | // of records can't exceed the number of valid bits in the affinity mask. | |||
2870 | if (num_records == 0) { | |||
2871 | *msg_id = kmp_i18n_str_NoProcRecords; | |||
2872 | return false; | |||
2873 | } | |||
2874 | if (num_records > (unsigned)__kmp_xproc) { | |||
2875 | *msg_id = kmp_i18n_str_TooManyProcRecords; | |||
2876 | return false; | |||
2877 | } | |||
2878 | ||||
2879 | // Set the file pointer back to the beginning, so that we can scan the file | |||
2880 | // again, this time performing a full parse of the data. Allocate a vector of | |||
2881 | // ProcCpuInfo object, where we will place the data. Adding an extra element | |||
2882 | // at the end allows us to remove a lot of extra checks for termination | |||
2883 | // conditions. | |||
2884 | if (fseek(f, 0, SEEK_SET0) != 0) { | |||
2885 | *msg_id = kmp_i18n_str_CantRewindCpuinfo; | |||
2886 | return false; | |||
2887 | } | |||
2888 | ||||
2889 | // Allocate the array of records to store the proc info in. The dummy | |||
2890 | // element at the end makes the logic in filling them out easier to code. | |||
2891 | unsigned **threadInfo = | |||
2892 | (unsigned **)__kmp_allocate((num_records + 1) * sizeof(unsigned *))___kmp_allocate(((num_records + 1) * sizeof(unsigned *)), "openmp/runtime/src/kmp_affinity.cpp" , 2892); | |||
2893 | unsigned i; | |||
2894 | for (i = 0; i <= num_records; i++) { | |||
2895 | threadInfo[i] = | |||
2896 | (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned))___kmp_allocate(((maxIndex + 1) * sizeof(unsigned)), "openmp/runtime/src/kmp_affinity.cpp" , 2896); | |||
2897 | } | |||
2898 | ||||
2899 | #define CLEANUP_THREAD_INFOfor (i = 0; i <= num_records; i++) { ___kmp_free((threadInfo [i]), "openmp/runtime/src/kmp_affinity.cpp", 2899); } ___kmp_free ((threadInfo), "openmp/runtime/src/kmp_affinity.cpp", 2899); \ | |||
2900 | for (i = 0; i <= num_records; i++) { \ | |||
2901 | __kmp_free(threadInfo[i])___kmp_free((threadInfo[i]), "openmp/runtime/src/kmp_affinity.cpp" , 2901); \ | |||
2902 | } \ | |||
2903 | __kmp_free(threadInfo)___kmp_free((threadInfo), "openmp/runtime/src/kmp_affinity.cpp" , 2903); | |||
2904 | ||||
2905 | // A value of UINT_MAX means that we didn't find the field | |||
2906 | unsigned __index; | |||
2907 | ||||
2908 | #define INIT_PROC_INFO(p)for (__index = 0; __index <= maxIndex; __index++) { (p)[__index ] = (2147483647 *2U +1U); } \ | |||
2909 | for (__index = 0; __index <= maxIndex; __index++) { \ | |||
2910 | (p)[__index] = UINT_MAX(2147483647 *2U +1U); \ | |||
2911 | } | |||
2912 | ||||
2913 | for (i = 0; i <= num_records; i++) { | |||
2914 | INIT_PROC_INFO(threadInfo[i])for (__index = 0; __index <= maxIndex; __index++) { (threadInfo [i])[__index] = (2147483647 *2U +1U); }; | |||
2915 | } | |||
2916 | ||||
2917 | unsigned num_avail = 0; | |||
2918 | *line = 0; | |||
2919 | while (!feof(f)) { | |||
2920 | // Create an inner scoping level, so that all the goto targets at the end of | |||
2921 | // the loop appear in an outer scoping level. This avoids warnings about | |||
2922 | // jumping past an initialization to a target in the same block. | |||
2923 | { | |||
2924 | buf[sizeof(buf) - 1] = 1; | |||
2925 | bool long_line = false; | |||
2926 | if (!fgets(buf, sizeof(buf), f)) { | |||
2927 | // Read errors presumably because of EOF | |||
2928 | // If there is valid data in threadInfo[num_avail], then fake | |||
2929 | // a blank line in ensure that the last address gets parsed. | |||
2930 | bool valid = false; | |||
2931 | for (i = 0; i <= maxIndex; i++) { | |||
2932 | if (threadInfo[num_avail][i] != UINT_MAX(2147483647 *2U +1U)) { | |||
2933 | valid = true; | |||
2934 | } | |||
2935 | } | |||
2936 | if (!valid) { | |||
2937 | break; | |||
2938 | } | |||
2939 | buf[0] = 0; | |||
2940 | } else if (!buf[sizeof(buf) - 1]) { | |||
2941 | // The line is longer than the buffer. Set a flag and don't | |||
2942 | // emit an error if we were going to ignore the line, anyway. | |||
2943 | long_line = true; | |||
2944 | ||||
2945 | #define CHECK_LINEif (long_line) { for (i = 0; i <= num_records; i++) { ___kmp_free ((threadInfo[i]), "openmp/runtime/src/kmp_affinity.cpp", 2945 ); } ___kmp_free((threadInfo), "openmp/runtime/src/kmp_affinity.cpp" , 2945);; *msg_id = kmp_i18n_str_LongLineCpuinfo; return false ; } \ | |||
2946 | if (long_line) { \ | |||
2947 | CLEANUP_THREAD_INFOfor (i = 0; i <= num_records; i++) { ___kmp_free((threadInfo [i]), "openmp/runtime/src/kmp_affinity.cpp", 2947); } ___kmp_free ((threadInfo), "openmp/runtime/src/kmp_affinity.cpp", 2947);; \ | |||
2948 | *msg_id = kmp_i18n_str_LongLineCpuinfo; \ | |||
2949 | return false; \ | |||
2950 | } | |||
2951 | } | |||
2952 | (*line)++; | |||
2953 | ||||
2954 | #if KMP_ARCH_LOONGARCH640 | |||
2955 | // The parsing logic of /proc/cpuinfo in this function highly depends on | |||
2956 | // the blank lines between each processor info block. But on LoongArch a | |||
2957 | // blank line exists before the first processor info block (i.e. after the | |||
2958 | // "system type" line). This blank line was added because the "system | |||
2959 | // type" line is unrelated to any of the CPUs. We must skip this line so | |||
2960 | // that the original logic works on LoongArch. | |||
2961 | if (*buf == '\n' && *line == 2) | |||
2962 | continue; | |||
2963 | #endif | |||
2964 | ||||
2965 | char s1[] = "processor"; | |||
2966 | if (strncmp(buf, s1, sizeof(s1) - 1) == 0) { | |||
2967 | CHECK_LINEif (long_line) { for (i = 0; i <= num_records; i++) { ___kmp_free ((threadInfo[i]), "openmp/runtime/src/kmp_affinity.cpp", 2967 ); } ___kmp_free((threadInfo), "openmp/runtime/src/kmp_affinity.cpp" , 2967);; *msg_id = kmp_i18n_str_LongLineCpuinfo; return false ; }; | |||
2968 | char *p = strchr(buf + sizeof(s1) - 1, ':'); | |||
2969 | unsigned val; | |||
2970 | if ((p == NULL__null) || (KMP_SSCANFsscanf(p + 1, "%u\n", &val) != 1)) | |||
2971 | goto no_val; | |||
2972 | if (threadInfo[num_avail][osIdIndex0] != UINT_MAX(2147483647 *2U +1U)) | |||
2973 | #if KMP_ARCH_AARCH640 | |||
2974 | // Handle the old AArch64 /proc/cpuinfo layout differently, | |||
2975 | // it contains all of the 'processor' entries listed in a | |||
2976 | // single 'Processor' section, therefore the normal looking | |||
2977 | // for duplicates in that section will always fail. | |||
2978 | num_avail++; | |||
2979 | #else | |||
2980 | goto dup_field; | |||
2981 | #endif | |||
2982 | threadInfo[num_avail][osIdIndex0] = val; | |||
2983 | #if KMP_OS_LINUX1 && !(KMP_ARCH_X860 || KMP_ARCH_X86_641) | |||
2984 | char path[256]; | |||
2985 | KMP_SNPRINTFsnprintf( | |||
2986 | path, sizeof(path), | |||
2987 | "/sys/devices/system/cpu/cpu%u/topology/physical_package_id", | |||
2988 | threadInfo[num_avail][osIdIndex0]); | |||
2989 | __kmp_read_from_file(path, "%u", &threadInfo[num_avail][pkgIdIndex3]); | |||
2990 | ||||
2991 | KMP_SNPRINTFsnprintf(path, sizeof(path), | |||
2992 | "/sys/devices/system/cpu/cpu%u/topology/core_id", | |||
2993 | threadInfo[num_avail][osIdIndex0]); | |||
2994 | __kmp_read_from_file(path, "%u", &threadInfo[num_avail][coreIdIndex2]); | |||
2995 | continue; | |||
2996 | #else | |||
2997 | } | |||
2998 | char s2[] = "physical id"; | |||
2999 | if (strncmp(buf, s2, sizeof(s2) - 1) == 0) { | |||
3000 | CHECK_LINEif (long_line) { for (i = 0; i <= num_records; i++) { ___kmp_free ((threadInfo[i]), "openmp/runtime/src/kmp_affinity.cpp", 3000 ); } ___kmp_free((threadInfo), "openmp/runtime/src/kmp_affinity.cpp" , 3000);; *msg_id = kmp_i18n_str_LongLineCpuinfo; return false ; }; | |||
3001 | char *p = strchr(buf + sizeof(s2) - 1, ':'); | |||
3002 | unsigned val; | |||
3003 | if ((p == NULL__null) || (KMP_SSCANFsscanf(p + 1, "%u\n", &val) != 1)) | |||
3004 | goto no_val; | |||
3005 | if (threadInfo[num_avail][pkgIdIndex3] != UINT_MAX(2147483647 *2U +1U)) | |||
3006 | goto dup_field; | |||
3007 | threadInfo[num_avail][pkgIdIndex3] = val; | |||
3008 | continue; | |||
3009 | } | |||
3010 | char s3[] = "core id"; | |||
3011 | if (strncmp(buf, s3, sizeof(s3) - 1) == 0) { | |||
3012 | CHECK_LINEif (long_line) { for (i = 0; i <= num_records; i++) { ___kmp_free ((threadInfo[i]), "openmp/runtime/src/kmp_affinity.cpp", 3012 ); } ___kmp_free((threadInfo), "openmp/runtime/src/kmp_affinity.cpp" , 3012);; *msg_id = kmp_i18n_str_LongLineCpuinfo; return false ; }; | |||
3013 | char *p = strchr(buf + sizeof(s3) - 1, ':'); | |||
3014 | unsigned val; | |||
3015 | if ((p == NULL__null) || (KMP_SSCANFsscanf(p + 1, "%u\n", &val) != 1)) | |||
3016 | goto no_val; | |||
3017 | if (threadInfo[num_avail][coreIdIndex2] != UINT_MAX(2147483647 *2U +1U)) | |||
3018 | goto dup_field; | |||
3019 | threadInfo[num_avail][coreIdIndex2] = val; | |||
3020 | continue; | |||
3021 | #endif // KMP_OS_LINUX && USE_SYSFS_INFO | |||
3022 | } | |||
3023 | char s4[] = "thread id"; | |||
3024 | if (strncmp(buf, s4, sizeof(s4) - 1) == 0) { | |||
3025 | CHECK_LINEif (long_line) { for (i = 0; i <= num_records; i++) { ___kmp_free ((threadInfo[i]), "openmp/runtime/src/kmp_affinity.cpp", 3025 ); } ___kmp_free((threadInfo), "openmp/runtime/src/kmp_affinity.cpp" , 3025);; *msg_id = kmp_i18n_str_LongLineCpuinfo; return false ; }; | |||
3026 | char *p = strchr(buf + sizeof(s4) - 1, ':'); | |||
3027 | unsigned val; | |||
3028 | if ((p == NULL__null) || (KMP_SSCANFsscanf(p + 1, "%u\n", &val) != 1)) | |||
3029 | goto no_val; | |||
3030 | if (threadInfo[num_avail][threadIdIndex1] != UINT_MAX(2147483647 *2U +1U)) | |||
3031 | goto dup_field; | |||
3032 | threadInfo[num_avail][threadIdIndex1] = val; | |||
3033 | continue; | |||
3034 | } | |||
3035 | unsigned level; | |||
3036 | if (KMP_SSCANFsscanf(buf, "node_%u id", &level) == 1) { | |||
3037 | CHECK_LINEif (long_line) { for (i = 0; i <= num_records; i++) { ___kmp_free ((threadInfo[i]), "openmp/runtime/src/kmp_affinity.cpp", 3037 ); } ___kmp_free((threadInfo), "openmp/runtime/src/kmp_affinity.cpp" , 3037);; *msg_id = kmp_i18n_str_LongLineCpuinfo; return false ; }; | |||
3038 | char *p = strchr(buf + sizeof(s4) - 1, ':'); | |||
3039 | unsigned val; | |||
3040 | if ((p == NULL__null) || (KMP_SSCANFsscanf(p + 1, "%u\n", &val) != 1)) | |||
3041 | goto no_val; | |||
3042 | // validate the input before using level: | |||
3043 | if (level > (unsigned)__kmp_xproc) { // level is too big | |||
3044 | level = __kmp_xproc; | |||
3045 | } | |||
3046 | if (threadInfo[num_avail][nodeIdIndex4 + level] != UINT_MAX(2147483647 *2U +1U)) | |||
3047 | goto dup_field; | |||
3048 | threadInfo[num_avail][nodeIdIndex4 + level] = val; | |||
3049 | continue; | |||
3050 | } | |||
3051 | ||||
3052 | // We didn't recognize the leading token on the line. There are lots of | |||
3053 | // leading tokens that we don't recognize - if the line isn't empty, go on | |||
3054 | // to the next line. | |||
3055 | if ((*buf != 0) && (*buf != '\n')) { | |||
3056 | // If the line is longer than the buffer, read characters | |||
3057 | // until we find a newline. | |||
3058 | if (long_line) { | |||
3059 | int ch; | |||
3060 | while (((ch = fgetc(f)) != EOF(-1)) && (ch != '\n')) | |||
3061 | ; | |||
3062 | } | |||
3063 | continue; | |||
3064 | } | |||
3065 | ||||
3066 | // A newline has signalled the end of the processor record. | |||
3067 | // Check that there aren't too many procs specified. | |||
3068 | if ((int)num_avail == __kmp_xproc) { | |||
3069 | CLEANUP_THREAD_INFOfor (i = 0; i <= num_records; i++) { ___kmp_free((threadInfo [i]), "openmp/runtime/src/kmp_affinity.cpp", 3069); } ___kmp_free ((threadInfo), "openmp/runtime/src/kmp_affinity.cpp", 3069);; | |||
3070 | *msg_id = kmp_i18n_str_TooManyEntries; | |||
3071 | return false; | |||
3072 | } | |||
3073 | ||||
3074 | // Check for missing fields. The osId field must be there, and we | |||
3075 | // currently require that the physical id field is specified, also. | |||
3076 | if (threadInfo[num_avail][osIdIndex0] == UINT_MAX(2147483647 *2U +1U)) { | |||
3077 | CLEANUP_THREAD_INFOfor (i = 0; i <= num_records; i++) { ___kmp_free((threadInfo [i]), "openmp/runtime/src/kmp_affinity.cpp", 3077); } ___kmp_free ((threadInfo), "openmp/runtime/src/kmp_affinity.cpp", 3077);; | |||
3078 | *msg_id = kmp_i18n_str_MissingProcField; | |||
3079 | return false; | |||
3080 | } | |||
3081 | if (threadInfo[0][pkgIdIndex3] == UINT_MAX(2147483647 *2U +1U)) { | |||
3082 | CLEANUP_THREAD_INFOfor (i = 0; i <= num_records; i++) { ___kmp_free((threadInfo [i]), "openmp/runtime/src/kmp_affinity.cpp", 3082); } ___kmp_free ((threadInfo), "openmp/runtime/src/kmp_affinity.cpp", 3082);; | |||
3083 | *msg_id = kmp_i18n_str_MissingPhysicalIDField; | |||
3084 | return false; | |||
3085 | } | |||
3086 | ||||
3087 | // Skip this proc if it is not included in the machine model. | |||
3088 | if (KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0) && | |||
3089 | !KMP_CPU_ISSET(threadInfo[num_avail][osIdIndex],(__kmp_affin_fullMask)->is_set(threadInfo[num_avail][0]) | |||
3090 | __kmp_affin_fullMask)(__kmp_affin_fullMask)->is_set(threadInfo[num_avail][0])) { | |||
3091 | INIT_PROC_INFO(threadInfo[num_avail])for (__index = 0; __index <= maxIndex; __index++) { (threadInfo [num_avail])[__index] = (2147483647 *2U +1U); }; | |||
3092 | continue; | |||
3093 | } | |||
3094 | ||||
3095 | // We have a successful parse of this proc's info. | |||
3096 | // Increment the counter, and prepare for the next proc. | |||
3097 | num_avail++; | |||
3098 | KMP_ASSERT(num_avail <= num_records)if (!(num_avail <= num_records)) { __kmp_debug_assert("num_avail <= num_records" , "openmp/runtime/src/kmp_affinity.cpp", 3098); }; | |||
3099 | INIT_PROC_INFO(threadInfo[num_avail])for (__index = 0; __index <= maxIndex; __index++) { (threadInfo [num_avail])[__index] = (2147483647 *2U +1U); }; | |||
3100 | } | |||
3101 | continue; | |||
3102 | ||||
3103 | no_val: | |||
3104 | CLEANUP_THREAD_INFOfor (i = 0; i <= num_records; i++) { ___kmp_free((threadInfo [i]), "openmp/runtime/src/kmp_affinity.cpp", 3104); } ___kmp_free ((threadInfo), "openmp/runtime/src/kmp_affinity.cpp", 3104);; | |||
3105 | *msg_id = kmp_i18n_str_MissingValCpuinfo; | |||
3106 | return false; | |||
3107 | ||||
3108 | dup_field: | |||
3109 | CLEANUP_THREAD_INFOfor (i = 0; i <= num_records; i++) { ___kmp_free((threadInfo [i]), "openmp/runtime/src/kmp_affinity.cpp", 3109); } ___kmp_free ((threadInfo), "openmp/runtime/src/kmp_affinity.cpp", 3109);; | |||
3110 | *msg_id = kmp_i18n_str_DuplicateFieldCpuinfo; | |||
3111 | return false; | |||
3112 | } | |||
3113 | *line = 0; | |||
3114 | ||||
3115 | #if KMP_MIC0 && REDUCE_TEAM_SIZE | |||
3116 | unsigned teamSize = 0; | |||
3117 | #endif // KMP_MIC && REDUCE_TEAM_SIZE | |||
3118 | ||||
3119 | // check for num_records == __kmp_xproc ??? | |||
3120 | ||||
3121 | // If it is configured to omit the package level when there is only a single | |||
3122 | // package, the logic at the end of this routine won't work if there is only a | |||
3123 | // single thread | |||
3124 | KMP_ASSERT(num_avail > 0)if (!(num_avail > 0)) { __kmp_debug_assert("num_avail > 0" , "openmp/runtime/src/kmp_affinity.cpp", 3124); }; | |||
3125 | KMP_ASSERT(num_avail <= num_records)if (!(num_avail <= num_records)) { __kmp_debug_assert("num_avail <= num_records" , "openmp/runtime/src/kmp_affinity.cpp", 3125); }; | |||
3126 | ||||
3127 | // Sort the threadInfo table by physical Id. | |||
3128 | qsort(threadInfo, num_avail, sizeof(*threadInfo), | |||
3129 | __kmp_affinity_cmp_ProcCpuInfo_phys_id); | |||
3130 | ||||
3131 | // The table is now sorted by pkgId / coreId / threadId, but we really don't | |||
3132 | // know the radix of any of the fields. pkgId's may be sparsely assigned among | |||
3133 | // the chips on a system. Although coreId's are usually assigned | |||
3134 | // [0 .. coresPerPkg-1] and threadId's are usually assigned | |||
3135 | // [0..threadsPerCore-1], we don't want to make any such assumptions. | |||
3136 | // | |||
3137 | // For that matter, we don't know what coresPerPkg and threadsPerCore (or the | |||
3138 | // total # packages) are at this point - we want to determine that now. We | |||
3139 | // only have an upper bound on the first two figures. | |||
3140 | unsigned *counts = | |||
3141 | (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned))___kmp_allocate(((maxIndex + 1) * sizeof(unsigned)), "openmp/runtime/src/kmp_affinity.cpp" , 3141); | |||
3142 | unsigned *maxCt = | |||
3143 | (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned))___kmp_allocate(((maxIndex + 1) * sizeof(unsigned)), "openmp/runtime/src/kmp_affinity.cpp" , 3143); | |||
3144 | unsigned *totals = | |||
3145 | (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned))___kmp_allocate(((maxIndex + 1) * sizeof(unsigned)), "openmp/runtime/src/kmp_affinity.cpp" , 3145); | |||
3146 | unsigned *lastId = | |||
3147 | (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned))___kmp_allocate(((maxIndex + 1) * sizeof(unsigned)), "openmp/runtime/src/kmp_affinity.cpp" , 3147); | |||
3148 | ||||
3149 | bool assign_thread_ids = false; | |||
3150 | unsigned threadIdCt; | |||
3151 | unsigned index; | |||
3152 | ||||
3153 | restart_radix_check: | |||
3154 | threadIdCt = 0; | |||
3155 | ||||
3156 | // Initialize the counter arrays with data from threadInfo[0]. | |||
3157 | if (assign_thread_ids) { | |||
3158 | if (threadInfo[0][threadIdIndex1] == UINT_MAX(2147483647 *2U +1U)) { | |||
3159 | threadInfo[0][threadIdIndex1] = threadIdCt++; | |||
3160 | } else if (threadIdCt <= threadInfo[0][threadIdIndex1]) { | |||
3161 | threadIdCt = threadInfo[0][threadIdIndex1] + 1; | |||
3162 | } | |||
3163 | } | |||
3164 | for (index = 0; index <= maxIndex; index++) { | |||
3165 | counts[index] = 1; | |||
3166 | maxCt[index] = 1; | |||
3167 | totals[index] = 1; | |||
3168 | lastId[index] = threadInfo[0][index]; | |||
3169 | ; | |||
3170 | } | |||
3171 | ||||
3172 | // Run through the rest of the OS procs. | |||
3173 | for (i = 1; i < num_avail; i++) { | |||
3174 | // Find the most significant index whose id differs from the id for the | |||
3175 | // previous OS proc. | |||
3176 | for (index = maxIndex; index >= threadIdIndex1; index--) { | |||
3177 | if (assign_thread_ids && (index == threadIdIndex1)) { | |||
3178 | // Auto-assign the thread id field if it wasn't specified. | |||
3179 | if (threadInfo[i][threadIdIndex1] == UINT_MAX(2147483647 *2U +1U)) { | |||
3180 | threadInfo[i][threadIdIndex1] = threadIdCt++; | |||
3181 | } | |||
3182 | // Apparently the thread id field was specified for some entries and not | |||
3183 | // others. Start the thread id counter off at the next higher thread id. | |||
3184 | else if (threadIdCt <= threadInfo[i][threadIdIndex1]) { | |||
3185 | threadIdCt = threadInfo[i][threadIdIndex1] + 1; | |||
3186 | } | |||
3187 | } | |||
3188 | if (threadInfo[i][index] != lastId[index]) { | |||
3189 | // Run through all indices which are less significant, and reset the | |||
3190 | // counts to 1. At all levels up to and including index, we need to | |||
3191 | // increment the totals and record the last id. | |||
3192 | unsigned index2; | |||
3193 | for (index2 = threadIdIndex1; index2 < index; index2++) { | |||
3194 | totals[index2]++; | |||
3195 | if (counts[index2] > maxCt[index2]) { | |||
3196 | maxCt[index2] = counts[index2]; | |||
3197 | } | |||
3198 | counts[index2] = 1; | |||
3199 | lastId[index2] = threadInfo[i][index2]; | |||
3200 | } | |||
3201 | counts[index]++; | |||
3202 | totals[index]++; | |||
3203 | lastId[index] = threadInfo[i][index]; | |||
3204 | ||||
3205 | if (assign_thread_ids && (index > threadIdIndex1)) { | |||
3206 | ||||
3207 | #if KMP_MIC0 && REDUCE_TEAM_SIZE | |||
3208 | // The default team size is the total #threads in the machine | |||
3209 | // minus 1 thread for every core that has 3 or more threads. | |||
3210 | teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1); | |||
3211 | #endif // KMP_MIC && REDUCE_TEAM_SIZE | |||
3212 | ||||
3213 | // Restart the thread counter, as we are on a new core. | |||
3214 | threadIdCt = 0; | |||
3215 | ||||
3216 | // Auto-assign the thread id field if it wasn't specified. | |||
3217 | if (threadInfo[i][threadIdIndex1] == UINT_MAX(2147483647 *2U +1U)) { | |||
3218 | threadInfo[i][threadIdIndex1] = threadIdCt++; | |||
3219 | } | |||
3220 | ||||
3221 | // Apparently the thread id field was specified for some entries and | |||
3222 | // not others. Start the thread id counter off at the next higher | |||
3223 | // thread id. | |||
3224 | else if (threadIdCt <= threadInfo[i][threadIdIndex1]) { | |||
3225 | threadIdCt = threadInfo[i][threadIdIndex1] + 1; | |||
3226 | } | |||
3227 | } | |||
3228 | break; | |||
3229 | } | |||
3230 | } | |||
3231 | if (index < threadIdIndex1) { | |||
3232 | // If thread ids were specified, it is an error if they are not unique. | |||
3233 | // Also, check that we waven't already restarted the loop (to be safe - | |||
3234 | // shouldn't need to). | |||
3235 | if ((threadInfo[i][threadIdIndex1] != UINT_MAX(2147483647 *2U +1U)) || assign_thread_ids) { | |||
3236 | __kmp_free(lastId)___kmp_free((lastId), "openmp/runtime/src/kmp_affinity.cpp", 3236 ); | |||
3237 | __kmp_free(totals)___kmp_free((totals), "openmp/runtime/src/kmp_affinity.cpp", 3237 ); | |||
3238 | __kmp_free(maxCt)___kmp_free((maxCt), "openmp/runtime/src/kmp_affinity.cpp", 3238 ); | |||
3239 | __kmp_free(counts)___kmp_free((counts), "openmp/runtime/src/kmp_affinity.cpp", 3239 ); | |||
3240 | CLEANUP_THREAD_INFOfor (i = 0; i <= num_records; i++) { ___kmp_free((threadInfo [i]), "openmp/runtime/src/kmp_affinity.cpp", 3240); } ___kmp_free ((threadInfo), "openmp/runtime/src/kmp_affinity.cpp", 3240);; | |||
3241 | *msg_id = kmp_i18n_str_PhysicalIDsNotUnique; | |||
3242 | return false; | |||
3243 | } | |||
3244 | ||||
3245 | // If the thread ids were not specified and we see entries entries that | |||
3246 | // are duplicates, start the loop over and assign the thread ids manually. | |||
3247 | assign_thread_ids = true; | |||
3248 | goto restart_radix_check; | |||
3249 | } | |||
3250 | } | |||
3251 | ||||
3252 | #if KMP_MIC0 && REDUCE_TEAM_SIZE | |||
3253 | // The default team size is the total #threads in the machine | |||
3254 | // minus 1 thread for every core that has 3 or more threads. | |||
3255 | teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1); | |||
3256 | #endif // KMP_MIC && REDUCE_TEAM_SIZE | |||
3257 | ||||
3258 | for (index = threadIdIndex1; index <= maxIndex; index++) { | |||
3259 | if (counts[index] > maxCt[index]) { | |||
3260 | maxCt[index] = counts[index]; | |||
3261 | } | |||
3262 | } | |||
3263 | ||||
3264 | __kmp_nThreadsPerCore = maxCt[threadIdIndex1]; | |||
3265 | nCoresPerPkg = maxCt[coreIdIndex2]; | |||
3266 | nPackages = totals[pkgIdIndex3]; | |||
3267 | ||||
3268 | // When affinity is off, this routine will still be called to set | |||
3269 | // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages. | |||
3270 | // Make sure all these vars are set correctly, and return now if affinity is | |||
3271 | // not enabled. | |||
3272 | __kmp_ncores = totals[coreIdIndex2]; | |||
3273 | if (!KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0)) { | |||
3274 | KMP_ASSERT(__kmp_affinity.type == affinity_none)if (!(__kmp_affinity.type == affinity_none)) { __kmp_debug_assert ("__kmp_affinity.type == affinity_none", "openmp/runtime/src/kmp_affinity.cpp" , 3274); }; | |||
3275 | return true; | |||
3276 | } | |||
3277 | ||||
3278 | #if KMP_MIC0 && REDUCE_TEAM_SIZE | |||
3279 | // Set the default team size. | |||
3280 | if ((__kmp_dflt_team_nth == 0) && (teamSize > 0)) { | |||
3281 | __kmp_dflt_team_nth = teamSize; | |||
3282 | KA_TRACE(20, ("__kmp_affinity_create_cpuinfo_map: setting "if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_affinity_create_cpuinfo_map: setting " "__kmp_dflt_team_nth = %d\n", __kmp_dflt_team_nth); } | |||
3283 | "__kmp_dflt_team_nth = %d\n",if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_affinity_create_cpuinfo_map: setting " "__kmp_dflt_team_nth = %d\n", __kmp_dflt_team_nth); } | |||
3284 | __kmp_dflt_team_nth))if (kmp_a_debug >= 20) { __kmp_debug_printf ("__kmp_affinity_create_cpuinfo_map: setting " "__kmp_dflt_team_nth = %d\n", __kmp_dflt_team_nth); }; | |||
3285 | } | |||
3286 | #endif // KMP_MIC && REDUCE_TEAM_SIZE | |||
3287 | ||||
3288 | KMP_DEBUG_ASSERT(num_avail == (unsigned)__kmp_avail_proc)if (!(num_avail == (unsigned)__kmp_avail_proc)) { __kmp_debug_assert ("num_avail == (unsigned)__kmp_avail_proc", "openmp/runtime/src/kmp_affinity.cpp" , 3288); }; | |||
3289 | ||||
3290 | // Count the number of levels which have more nodes at that level than at the | |||
3291 | // parent's level (with there being an implicit root node of the top level). | |||
3292 | // This is equivalent to saying that there is at least one node at this level | |||
3293 | // which has a sibling. These levels are in the map, and the package level is | |||
3294 | // always in the map. | |||
3295 | bool *inMap = (bool *)__kmp_allocate((maxIndex + 1) * sizeof(bool))___kmp_allocate(((maxIndex + 1) * sizeof(bool)), "openmp/runtime/src/kmp_affinity.cpp" , 3295); | |||
3296 | for (index = threadIdIndex1; index < maxIndex; index++) { | |||
3297 | KMP_ASSERT(totals[index] >= totals[index + 1])if (!(totals[index] >= totals[index + 1])) { __kmp_debug_assert ("totals[index] >= totals[index + 1]", "openmp/runtime/src/kmp_affinity.cpp" , 3297); }; | |||
3298 | inMap[index] = (totals[index] > totals[index + 1]); | |||
3299 | } | |||
3300 | inMap[maxIndex] = (totals[maxIndex] > 1); | |||
3301 | inMap[pkgIdIndex3] = true; | |||
3302 | inMap[coreIdIndex2] = true; | |||
3303 | inMap[threadIdIndex1] = true; | |||
3304 | ||||
3305 | int depth = 0; | |||
3306 | int idx = 0; | |||
3307 | kmp_hw_t types[KMP_HW_LAST]; | |||
3308 | int pkgLevel = -1; | |||
3309 | int coreLevel = -1; | |||
3310 | int threadLevel = -1; | |||
3311 | for (index = threadIdIndex1; index <= maxIndex; index++) { | |||
3312 | if (inMap[index]) { | |||
3313 | depth++; | |||
3314 | } | |||
3315 | } | |||
3316 | if (inMap[pkgIdIndex3]) { | |||
3317 | pkgLevel = idx; | |||
3318 | types[idx++] = KMP_HW_SOCKET; | |||
3319 | } | |||
3320 | if (inMap[coreIdIndex2]) { | |||
3321 | coreLevel = idx; | |||
3322 | types[idx++] = KMP_HW_CORE; | |||
3323 | } | |||
3324 | if (inMap[threadIdIndex1]) { | |||
3325 | threadLevel = idx; | |||
3326 | types[idx++] = KMP_HW_THREAD; | |||
3327 | } | |||
3328 | KMP_ASSERT(depth > 0)if (!(depth > 0)) { __kmp_debug_assert("depth > 0", "openmp/runtime/src/kmp_affinity.cpp" , 3328); }; | |||
3329 | ||||
3330 | // Construct the data structure that is to be returned. | |||
3331 | __kmp_topology = kmp_topology_t::allocate(num_avail, depth, types); | |||
3332 | ||||
3333 | for (i = 0; i < num_avail; ++i) { | |||
3334 | unsigned os = threadInfo[i][osIdIndex0]; | |||
3335 | int src_index; | |||
3336 | kmp_hw_thread_t &hw_thread = __kmp_topology->at(i); | |||
3337 | hw_thread.clear(); | |||
3338 | hw_thread.os_id = os; | |||
3339 | ||||
3340 | idx = 0; | |||
3341 | for (src_index = maxIndex; src_index >= threadIdIndex1; src_index--) { | |||
3342 | if (!inMap[src_index]) { | |||
3343 | continue; | |||
3344 | } | |||
3345 | if (src_index == pkgIdIndex3) { | |||
3346 | hw_thread.ids[pkgLevel] = threadInfo[i][src_index]; | |||
3347 | } else if (src_index == coreIdIndex2) { | |||
3348 | hw_thread.ids[coreLevel] = threadInfo[i][src_index]; | |||
3349 | } else if (src_index == threadIdIndex1) { | |||
3350 | hw_thread.ids[threadLevel] = threadInfo[i][src_index]; | |||
3351 | } | |||
3352 | } | |||
3353 | } | |||
3354 | ||||
3355 | __kmp_free(inMap)___kmp_free((inMap), "openmp/runtime/src/kmp_affinity.cpp", 3355 ); | |||
3356 | __kmp_free(lastId)___kmp_free((lastId), "openmp/runtime/src/kmp_affinity.cpp", 3356 ); | |||
3357 | __kmp_free(totals)___kmp_free((totals), "openmp/runtime/src/kmp_affinity.cpp", 3357 ); | |||
3358 | __kmp_free(maxCt)___kmp_free((maxCt), "openmp/runtime/src/kmp_affinity.cpp", 3358 ); | |||
3359 | __kmp_free(counts)___kmp_free((counts), "openmp/runtime/src/kmp_affinity.cpp", 3359 ); | |||
3360 | CLEANUP_THREAD_INFOfor (i = 0; i <= num_records; i++) { ___kmp_free((threadInfo [i]), "openmp/runtime/src/kmp_affinity.cpp", 3360); } ___kmp_free ((threadInfo), "openmp/runtime/src/kmp_affinity.cpp", 3360);; | |||
3361 | __kmp_topology->sort_ids(); | |||
3362 | if (!__kmp_topology->check_ids()) { | |||
3363 | kmp_topology_t::deallocate(__kmp_topology); | |||
3364 | __kmp_topology = nullptr; | |||
3365 | *msg_id = kmp_i18n_str_PhysicalIDsNotUnique; | |||
3366 | return false; | |||
3367 | } | |||
3368 | return true; | |||
3369 | } | |||
3370 | ||||
3371 | // Create and return a table of affinity masks, indexed by OS thread ID. | |||
3372 | // This routine handles OR'ing together all the affinity masks of threads | |||
3373 | // that are sufficiently close, if granularity > fine. | |||
3374 | static void __kmp_create_os_id_masks(unsigned *numUnique, | |||
3375 | kmp_affinity_t &affinity) { | |||
3376 | // First form a table of affinity masks in order of OS thread id. | |||
3377 | int maxOsId; | |||
3378 | int i; | |||
3379 | int numAddrs = __kmp_topology->get_num_hw_threads(); | |||
3380 | int depth = __kmp_topology->get_depth(); | |||
3381 | const char *env_var = affinity.env_var; | |||
3382 | KMP_ASSERT(numAddrs)if (!(numAddrs)) { __kmp_debug_assert("numAddrs", "openmp/runtime/src/kmp_affinity.cpp" , 3382); }; | |||
3383 | KMP_ASSERT(depth)if (!(depth)) { __kmp_debug_assert("depth", "openmp/runtime/src/kmp_affinity.cpp" , 3383); }; | |||
3384 | ||||
3385 | maxOsId = 0; | |||
3386 | for (i = numAddrs - 1;; --i) { | |||
3387 | int osId = __kmp_topology->at(i).os_id; | |||
3388 | if (osId > maxOsId) { | |||
3389 | maxOsId = osId; | |||
3390 | } | |||
3391 | if (i == 0) | |||
3392 | break; | |||
3393 | } | |||
3394 | affinity.num_os_id_masks = maxOsId + 1; | |||
3395 | KMP_CPU_ALLOC_ARRAY(affinity.os_id_masks, affinity.num_os_id_masks)(affinity.os_id_masks = __kmp_affinity_dispatch->allocate_mask_array (affinity.num_os_id_masks)); | |||
3396 | KMP_ASSERT(affinity.gran_levels >= 0)if (!(affinity.gran_levels >= 0)) { __kmp_debug_assert("affinity.gran_levels >= 0" , "openmp/runtime/src/kmp_affinity.cpp", 3396); }; | |||
3397 | if (affinity.flags.verbose && (affinity.gran_levels > 0)) { | |||
3398 | KMP_INFORM(ThreadsMigrate, env_var, affinity.gran_levels)__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_ThreadsMigrate , env_var, affinity.gran_levels), __kmp_msg_null); | |||
3399 | } | |||
3400 | if (affinity.gran_levels >= (int)depth) { | |||
3401 | KMP_AFF_WARNING(affinity, AffThreadsMayMigrate)if (affinity.flags.verbose || (affinity.flags.warnings && (affinity.type != affinity_none))) { __kmp_msg(kmp_ms_warning , __kmp_msg_format(kmp_i18n_msg_AffThreadsMayMigrate), __kmp_msg_null ); }; | |||
3402 | } | |||
3403 | ||||
3404 | // Run through the table, forming the masks for all threads on each core. | |||
3405 | // Threads on the same core will have identical kmp_hw_thread_t objects, not | |||
3406 | // considering the last level, which must be the thread id. All threads on a | |||
3407 | // core will appear consecutively. | |||
3408 | int unique = 0; | |||
3409 | int j = 0; // index of 1st thread on core | |||
3410 | int leader = 0; | |||
3411 | kmp_affin_mask_t *sum; | |||
3412 | KMP_CPU_ALLOC_ON_STACK(sum)(sum = __kmp_affinity_dispatch->allocate_mask()); | |||
3413 | KMP_CPU_ZERO(sum)(sum)->zero(); | |||
3414 | KMP_CPU_SET(__kmp_topology->at(0).os_id, sum)(sum)->set(__kmp_topology->at(0).os_id); | |||
3415 | for (i = 1; i < numAddrs; i++) { | |||
3416 | // If this thread is sufficiently close to the leader (within the | |||
3417 | // granularity setting), then set the bit for this os thread in the | |||
3418 | // affinity mask for this group, and go on to the next thread. | |||
3419 | if (__kmp_topology->is_close(leader, i, affinity.gran_levels)) { | |||
3420 | KMP_CPU_SET(__kmp_topology->at(i).os_id, sum)(sum)->set(__kmp_topology->at(i).os_id); | |||
3421 | continue; | |||
3422 | } | |||
3423 | ||||
3424 | // For every thread in this group, copy the mask to the thread's entry in | |||
3425 | // the OS Id mask table. Mark the first address as a leader. | |||
3426 | for (; j < i; j++) { | |||
3427 | int osId = __kmp_topology->at(j).os_id; | |||
3428 | KMP_DEBUG_ASSERT(osId <= maxOsId)if (!(osId <= maxOsId)) { __kmp_debug_assert("osId <= maxOsId" , "openmp/runtime/src/kmp_affinity.cpp", 3428); }; | |||
3429 | kmp_affin_mask_t *mask = KMP_CPU_INDEX(affinity.os_id_masks, osId)__kmp_affinity_dispatch->index_mask_array(affinity.os_id_masks , osId); | |||
3430 | KMP_CPU_COPY(mask, sum)(mask)->copy(sum); | |||
3431 | __kmp_topology->at(j).leader = (j == leader); | |||
3432 | } | |||
3433 | unique++; | |||
3434 | ||||
3435 | // Start a new mask. | |||
3436 | leader = i; | |||
3437 | KMP_CPU_ZERO(sum)(sum)->zero(); | |||
3438 | KMP_CPU_SET(__kmp_topology->at(i).os_id, sum)(sum)->set(__kmp_topology->at(i).os_id); | |||
3439 | } | |||
3440 | ||||
3441 | // For every thread in last group, copy the mask to the thread's | |||
3442 | // entry in the OS Id mask table. | |||
3443 | for (; j < i; j++) { | |||
3444 | int osId = __kmp_topology->at(j).os_id; | |||
3445 | KMP_DEBUG_ASSERT(osId <= maxOsId)if (!(osId <= maxOsId)) { __kmp_debug_assert("osId <= maxOsId" , "openmp/runtime/src/kmp_affinity.cpp", 3445); }; | |||
3446 | kmp_affin_mask_t *mask = KMP_CPU_INDEX(affinity.os_id_masks, osId)__kmp_affinity_dispatch->index_mask_array(affinity.os_id_masks , osId); | |||
3447 | KMP_CPU_COPY(mask, sum)(mask)->copy(sum); | |||
3448 | __kmp_topology->at(j).leader = (j == leader); | |||
3449 | } | |||
3450 | unique++; | |||
3451 | KMP_CPU_FREE_FROM_STACK(sum)__kmp_affinity_dispatch->deallocate_mask(sum); | |||
3452 | ||||
3453 | *numUnique = unique; | |||
3454 | } | |||
3455 | ||||
3456 | // Stuff for the affinity proclist parsers. It's easier to declare these vars | |||
3457 | // as file-static than to try and pass them through the calling sequence of | |||
3458 | // the recursive-descent OMP_PLACES parser. | |||
3459 | static kmp_affin_mask_t *newMasks; | |||
3460 | static int numNewMasks; | |||
3461 | static int nextNewMask; | |||
3462 | ||||
3463 | #define ADD_MASK(_mask) \ | |||
3464 | { \ | |||
3465 | if (nextNewMask >= numNewMasks) { \ | |||
3466 | int i; \ | |||
3467 | numNewMasks *= 2; \ | |||
3468 | kmp_affin_mask_t *temp; \ | |||
3469 | KMP_CPU_INTERNAL_ALLOC_ARRAY(temp, numNewMasks)(temp = __kmp_affinity_dispatch->allocate_mask_array(numNewMasks )); \ | |||
3470 | for (i = 0; i < numNewMasks / 2; i++) { \ | |||
3471 | kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i)__kmp_affinity_dispatch->index_mask_array(newMasks, i); \ | |||
3472 | kmp_affin_mask_t *dest = KMP_CPU_INDEX(temp, i)__kmp_affinity_dispatch->index_mask_array(temp, i); \ | |||
3473 | KMP_CPU_COPY(dest, src)(dest)->copy(src); \ | |||
3474 | } \ | |||
3475 | KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks / 2)__kmp_affinity_dispatch->deallocate_mask_array(newMasks); \ | |||
3476 | newMasks = temp; \ | |||
3477 | } \ | |||
3478 | KMP_CPU_COPY(KMP_CPU_INDEX(newMasks, nextNewMask), (_mask))(__kmp_affinity_dispatch->index_mask_array(newMasks, nextNewMask ))->copy((_mask)); \ | |||
3479 | nextNewMask++; \ | |||
3480 | } | |||
3481 | ||||
3482 | #define ADD_MASK_OSID(_osId, _osId2Mask, _maxOsId) \ | |||
3483 | { \ | |||
3484 | if (((_osId) > _maxOsId) || \ | |||
3485 | (!KMP_CPU_ISSET((_osId), KMP_CPU_INDEX((_osId2Mask), (_osId)))(__kmp_affinity_dispatch->index_mask_array((_osId2Mask), ( _osId)))->is_set((_osId)))) { \ | |||
3486 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, _osId)if (affinity.flags.verbose || (affinity.flags.warnings && (affinity.type != affinity_none))) { __kmp_msg(kmp_ms_warning , __kmp_msg_format(kmp_i18n_msg_AffIgnoreInvalidProcID, _osId ), __kmp_msg_null); }; \ | |||
3487 | } else { \ | |||
3488 | ADD_MASK(KMP_CPU_INDEX(_osId2Mask, (_osId))__kmp_affinity_dispatch->index_mask_array(_osId2Mask, (_osId ))); \ | |||
3489 | } \ | |||
3490 | } | |||
3491 | ||||
3492 | // Re-parse the proclist (for the explicit affinity type), and form the list | |||
3493 | // of affinity newMasks indexed by gtid. | |||
3494 | static void __kmp_affinity_process_proclist(kmp_affinity_t &affinity) { | |||
3495 | int i; | |||
3496 | kmp_affin_mask_t **out_masks = &affinity.masks; | |||
3497 | unsigned *out_numMasks = &affinity.num_masks; | |||
3498 | const char *proclist = affinity.proclist; | |||
3499 | kmp_affin_mask_t *osId2Mask = affinity.os_id_masks; | |||
3500 | int maxOsId = affinity.num_os_id_masks - 1; | |||
3501 | const char *scan = proclist; | |||
3502 | const char *next = proclist; | |||
3503 | ||||
3504 | // We use malloc() for the temporary mask vector, so that we can use | |||
3505 | // realloc() to extend it. | |||
3506 | numNewMasks = 2; | |||
3507 | KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks)(newMasks = __kmp_affinity_dispatch->allocate_mask_array(numNewMasks )); | |||
3508 | nextNewMask = 0; | |||
3509 | kmp_affin_mask_t *sumMask; | |||
3510 | KMP_CPU_ALLOC(sumMask)(sumMask = __kmp_affinity_dispatch->allocate_mask()); | |||
3511 | int setSize = 0; | |||
3512 | ||||
3513 | for (;;) { | |||
| ||||
3514 | int start, end, stride; | |||
3515 | ||||
3516 | SKIP_WS(scan){ while (*(scan) == ' ' || *(scan) == '\t') (scan)++; }; | |||
3517 | next = scan; | |||
3518 | if (*next == '\0') { | |||
3519 | break; | |||
3520 | } | |||
3521 | ||||
3522 | if (*next == '{') { | |||
3523 | int num; | |||
3524 | setSize = 0; | |||
3525 | next++; // skip '{' | |||
3526 | SKIP_WS(next){ while (*(next) == ' ' || *(next) == '\t') (next)++; }; | |||
3527 | scan = next; | |||
3528 | ||||
3529 | // Read the first integer in the set. | |||
3530 | KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad proclist")if (!((*next >= '0') && (*next <= '9'))) { __kmp_debug_assert (("bad proclist"), "openmp/runtime/src/kmp_affinity.cpp", 3530 ); }; | |||
3531 | SKIP_DIGITS(next){ while (*(next) >= '0' && *(next) <= '9') (next )++; }; | |||
3532 | num = __kmp_str_to_int(scan, *next); | |||
3533 | KMP_ASSERT2(num >= 0, "bad explicit proc list")if (!(num >= 0)) { __kmp_debug_assert(("bad explicit proc list" ), "openmp/runtime/src/kmp_affinity.cpp", 3533); }; | |||
3534 | ||||
3535 | // Copy the mask for that osId to the sum (union) mask. | |||
3536 | if ((num > maxOsId) || | |||
3537 | (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num))(__kmp_affinity_dispatch->index_mask_array(osId2Mask, num) )->is_set(num))) { | |||
3538 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, num)if (affinity.flags.verbose || (affinity.flags.warnings && (affinity.type != affinity_none))) { __kmp_msg(kmp_ms_warning , __kmp_msg_format(kmp_i18n_msg_AffIgnoreInvalidProcID, num), __kmp_msg_null); }; | |||
3539 | KMP_CPU_ZERO(sumMask)(sumMask)->zero(); | |||
3540 | } else { | |||
3541 | KMP_CPU_COPY(sumMask, KMP_CPU_INDEX(osId2Mask, num))(sumMask)->copy(__kmp_affinity_dispatch->index_mask_array (osId2Mask, num)); | |||
3542 | setSize = 1; | |||
3543 | } | |||
3544 | ||||
3545 | for (;;) { | |||
3546 | // Check for end of set. | |||
3547 | SKIP_WS(next){ while (*(next) == ' ' || *(next) == '\t') (next)++; }; | |||
3548 | if (*next == '}') { | |||
3549 | next++; // skip '}' | |||
3550 | break; | |||
3551 | } | |||
3552 | ||||
3553 | // Skip optional comma. | |||
3554 | if (*next == ',') { | |||
3555 | next++; | |||
3556 | } | |||
3557 | SKIP_WS(next){ while (*(next) == ' ' || *(next) == '\t') (next)++; }; | |||
3558 | ||||
3559 | // Read the next integer in the set. | |||
3560 | scan = next; | |||
3561 | KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list")if (!((*next >= '0') && (*next <= '9'))) { __kmp_debug_assert (("bad explicit proc list"), "openmp/runtime/src/kmp_affinity.cpp" , 3561); }; | |||
3562 | ||||
3563 | SKIP_DIGITS(next){ while (*(next) >= '0' && *(next) <= '9') (next )++; }; | |||
3564 | num = __kmp_str_to_int(scan, *next); | |||
3565 | KMP_ASSERT2(num >= 0, "bad explicit proc list")if (!(num >= 0)) { __kmp_debug_assert(("bad explicit proc list" ), "openmp/runtime/src/kmp_affinity.cpp", 3565); }; | |||
3566 | ||||
3567 | // Add the mask for that osId to the sum mask. | |||
3568 | if ((num > maxOsId) || | |||
3569 | (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num))(__kmp_affinity_dispatch->index_mask_array(osId2Mask, num) )->is_set(num))) { | |||
3570 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, num)if (affinity.flags.verbose || (affinity.flags.warnings && (affinity.type != affinity_none))) { __kmp_msg(kmp_ms_warning , __kmp_msg_format(kmp_i18n_msg_AffIgnoreInvalidProcID, num), __kmp_msg_null); }; | |||
3571 | } else { | |||
3572 | KMP_CPU_UNION(sumMask, KMP_CPU_INDEX(osId2Mask, num))(sumMask)->bitwise_or(__kmp_affinity_dispatch->index_mask_array (osId2Mask, num)); | |||
3573 | setSize++; | |||
3574 | } | |||
3575 | } | |||
3576 | if (setSize > 0) { | |||
3577 | ADD_MASK(sumMask); | |||
3578 | } | |||
3579 | ||||
3580 | SKIP_WS(next){ while (*(next) == ' ' || *(next) == '\t') (next)++; }; | |||
3581 | if (*next == ',') { | |||
3582 | next++; | |||
3583 | } | |||
3584 | scan = next; | |||
3585 | continue; | |||
3586 | } | |||
3587 | ||||
3588 | // Read the first integer. | |||
3589 | KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list")if (!((*next >= '0') && (*next <= '9'))) { __kmp_debug_assert (("bad explicit proc list"), "openmp/runtime/src/kmp_affinity.cpp" , 3589); }; | |||
3590 | SKIP_DIGITS(next){ while (*(next) >= '0' && *(next) <= '9') (next )++; }; | |||
3591 | start = __kmp_str_to_int(scan, *next); | |||
3592 | KMP_ASSERT2(start >= 0, "bad explicit proc list")if (!(start >= 0)) { __kmp_debug_assert(("bad explicit proc list" ), "openmp/runtime/src/kmp_affinity.cpp", 3592); }; | |||
3593 | SKIP_WS(next){ while (*(next) == ' ' || *(next) == '\t') (next)++; }; | |||
3594 | ||||
3595 | // If this isn't a range, then add a mask to the list and go on. | |||
3596 | if (*next != '-') { | |||
3597 | ADD_MASK_OSID(start, osId2Mask, maxOsId); | |||
3598 | ||||
3599 | // Skip optional comma. | |||
3600 | if (*next == ',') { | |||
3601 | next++; | |||
3602 | } | |||
3603 | scan = next; | |||
3604 | continue; | |||
3605 | } | |||
3606 | ||||
3607 | // This is a range. Skip over the '-' and read in the 2nd int. | |||
3608 | next++; // skip '-' | |||
3609 | SKIP_WS(next){ while (*(next) == ' ' || *(next) == '\t') (next)++; }; | |||
3610 | scan = next; | |||
3611 | KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list")if (!((*next >= '0') && (*next <= '9'))) { __kmp_debug_assert (("bad explicit proc list"), "openmp/runtime/src/kmp_affinity.cpp" , 3611); }; | |||
3612 | SKIP_DIGITS(next){ while (*(next) >= '0' && *(next) <= '9') (next )++; }; | |||
3613 | end = __kmp_str_to_int(scan, *next); | |||
3614 | KMP_ASSERT2(end >= 0, "bad explicit proc list")if (!(end >= 0)) { __kmp_debug_assert(("bad explicit proc list" ), "openmp/runtime/src/kmp_affinity.cpp", 3614); }; | |||
3615 | ||||
3616 | // Check for a stride parameter | |||
3617 | stride = 1; | |||
3618 | SKIP_WS(next){ while (*(next) == ' ' || *(next) == '\t') (next)++; }; | |||
3619 | if (*next == ':') { | |||
3620 | // A stride is specified. Skip over the ':" and read the 3rd int. | |||
3621 | int sign = +1; | |||
3622 | next++; // skip ':' | |||
3623 | SKIP_WS(next){ while (*(next) == ' ' || *(next) == '\t') (next)++; }; | |||
3624 | scan = next; | |||
3625 | if (*next == '-') { | |||
3626 | sign = -1; | |||
3627 | next++; | |||
3628 | SKIP_WS(next){ while (*(next) == ' ' || *(next) == '\t') (next)++; }; | |||
3629 | scan = next; | |||
3630 | } | |||
3631 | KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list")if (!((*next >= '0') && (*next <= '9'))) { __kmp_debug_assert (("bad explicit proc list"), "openmp/runtime/src/kmp_affinity.cpp" , 3631); }; | |||
3632 | SKIP_DIGITS(next){ while (*(next) >= '0' && *(next) <= '9') (next )++; }; | |||
3633 | stride = __kmp_str_to_int(scan, *next); | |||
3634 | KMP_ASSERT2(stride >= 0, "bad explicit proc list")if (!(stride >= 0)) { __kmp_debug_assert(("bad explicit proc list" ), "openmp/runtime/src/kmp_affinity.cpp", 3634); }; | |||
3635 | stride *= sign; | |||
3636 | } | |||
3637 | ||||
3638 | // Do some range checks. | |||
3639 | KMP_ASSERT2(stride != 0, "bad explicit proc list")if (!(stride != 0)) { __kmp_debug_assert(("bad explicit proc list" ), "openmp/runtime/src/kmp_affinity.cpp", 3639); }; | |||
3640 | if (stride
| |||
3641 | KMP_ASSERT2(start <= end, "bad explicit proc list")if (!(start <= end)) { __kmp_debug_assert(("bad explicit proc list" ), "openmp/runtime/src/kmp_affinity.cpp", 3641); }; | |||
3642 | } else { | |||
3643 | KMP_ASSERT2(start >= end, "bad explicit proc list")if (!(start >= end)) { __kmp_debug_assert(("bad explicit proc list" ), "openmp/runtime/src/kmp_affinity.cpp", 3643); }; | |||
3644 | } | |||
3645 | KMP_ASSERT2((end - start) / stride <= 65536, "bad explicit proc list")if (!((end - start) / stride <= 65536)) { __kmp_debug_assert (("bad explicit proc list"), "openmp/runtime/src/kmp_affinity.cpp" , 3645); }; | |||
| ||||
3646 | ||||
3647 | // Add the mask for each OS proc # to the list. | |||
3648 | if (stride > 0) { | |||
3649 | do { | |||
3650 | ADD_MASK_OSID(start, osId2Mask, maxOsId); | |||
3651 | start += stride; | |||
3652 | } while (start <= end); | |||
3653 | } else { | |||
3654 | do { | |||
3655 | ADD_MASK_OSID(start, osId2Mask, maxOsId); | |||
3656 | start += stride; | |||
3657 | } while (start >= end); | |||
3658 | } | |||
3659 | ||||
3660 | // Skip optional comma. | |||
3661 | SKIP_WS(next){ while (*(next) == ' ' || *(next) == '\t') (next)++; }; | |||
3662 | if (*next == ',') { | |||
3663 | next++; | |||
3664 | } | |||
3665 | scan = next; | |||
3666 | } | |||
3667 | ||||
3668 | *out_numMasks = nextNewMask; | |||
3669 | if (nextNewMask == 0) { | |||
3670 | *out_masks = NULL__null; | |||
3671 | KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks)__kmp_affinity_dispatch->deallocate_mask_array(newMasks); | |||
3672 | return; | |||
3673 | } | |||
3674 | KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask)((*out_masks) = __kmp_affinity_dispatch->allocate_mask_array (nextNewMask)); | |||
3675 | for (i = 0; i < nextNewMask; i++) { | |||
3676 | kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i)__kmp_affinity_dispatch->index_mask_array(newMasks, i); | |||
3677 | kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i)__kmp_affinity_dispatch->index_mask_array((*out_masks), i); | |||
3678 | KMP_CPU_COPY(dest, src)(dest)->copy(src); | |||
3679 | } | |||
3680 | KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks)__kmp_affinity_dispatch->deallocate_mask_array(newMasks); | |||
3681 | KMP_CPU_FREE(sumMask)__kmp_affinity_dispatch->deallocate_mask(sumMask); | |||
3682 | } | |||
3683 | ||||
3684 | /*----------------------------------------------------------------------------- | |||
3685 | Re-parse the OMP_PLACES proc id list, forming the newMasks for the different | |||
3686 | places. Again, Here is the grammar: | |||
3687 | ||||
3688 | place_list := place | |||
3689 | place_list := place , place_list | |||
3690 | place := num | |||
3691 | place := place : num | |||
3692 | place := place : num : signed | |||
3693 | place := { subplacelist } | |||
3694 | place := ! place // (lowest priority) | |||
3695 | subplace_list := subplace | |||
3696 | subplace_list := subplace , subplace_list | |||
3697 | subplace := num | |||
3698 | subplace := num : num | |||
3699 | subplace := num : num : signed | |||
3700 | signed := num | |||
3701 | signed := + signed | |||
3702 | signed := - signed | |||
3703 | -----------------------------------------------------------------------------*/ | |||
3704 | static void __kmp_process_subplace_list(const char **scan, | |||
3705 | kmp_affinity_t &affinity, int maxOsId, | |||
3706 | kmp_affin_mask_t *tempMask, | |||
3707 | int *setSize) { | |||
3708 | const char *next; | |||
3709 | kmp_affin_mask_t *osId2Mask = affinity.os_id_masks; | |||
3710 | ||||
3711 | for (;;) { | |||
3712 | int start, count, stride, i; | |||
3713 | ||||
3714 | // Read in the starting proc id | |||
3715 | SKIP_WS(*scan){ while (*(*scan) == ' ' || *(*scan) == '\t') (*scan)++; }; | |||
3716 | KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list")if (!((**scan >= '0') && (**scan <= '9'))) { __kmp_debug_assert (("bad explicit places list"), "openmp/runtime/src/kmp_affinity.cpp" , 3716); }; | |||
3717 | next = *scan; | |||
3718 | SKIP_DIGITS(next){ while (*(next) >= '0' && *(next) <= '9') (next )++; }; | |||
3719 | start = __kmp_str_to_int(*scan, *next); | |||
3720 | KMP_ASSERT(start >= 0)if (!(start >= 0)) { __kmp_debug_assert("start >= 0", "openmp/runtime/src/kmp_affinity.cpp" , 3720); }; | |||
3721 | *scan = next; | |||
3722 | ||||
3723 | // valid follow sets are ',' ':' and '}' | |||
3724 | SKIP_WS(*scan){ while (*(*scan) == ' ' || *(*scan) == '\t') (*scan)++; }; | |||
3725 | if (**scan == '}' || **scan == ',') { | |||
3726 | if ((start > maxOsId) || | |||
3727 | (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start))(__kmp_affinity_dispatch->index_mask_array(osId2Mask, start ))->is_set(start))) { | |||
3728 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, start)if (affinity.flags.verbose || (affinity.flags.warnings && (affinity.type != affinity_none))) { __kmp_msg(kmp_ms_warning , __kmp_msg_format(kmp_i18n_msg_AffIgnoreInvalidProcID, start ), __kmp_msg_null); }; | |||
3729 | } else { | |||
3730 | KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start))(tempMask)->bitwise_or(__kmp_affinity_dispatch->index_mask_array (osId2Mask, start)); | |||
3731 | (*setSize)++; | |||
3732 | } | |||
3733 | if (**scan == '}') { | |||
3734 | break; | |||
3735 | } | |||
3736 | (*scan)++; // skip ',' | |||
3737 | continue; | |||
3738 | } | |||
3739 | KMP_ASSERT2(**scan == ':', "bad explicit places list")if (!(**scan == ':')) { __kmp_debug_assert(("bad explicit places list" ), "openmp/runtime/src/kmp_affinity.cpp", 3739); }; | |||
3740 | (*scan)++; // skip ':' | |||
3741 | ||||
3742 | // Read count parameter | |||
3743 | SKIP_WS(*scan){ while (*(*scan) == ' ' || *(*scan) == '\t') (*scan)++; }; | |||
3744 | KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list")if (!((**scan >= '0') && (**scan <= '9'))) { __kmp_debug_assert (("bad explicit places list"), "openmp/runtime/src/kmp_affinity.cpp" , 3744); }; | |||
3745 | next = *scan; | |||
3746 | SKIP_DIGITS(next){ while (*(next) >= '0' && *(next) <= '9') (next )++; }; | |||
3747 | count = __kmp_str_to_int(*scan, *next); | |||
3748 | KMP_ASSERT(count >= 0)if (!(count >= 0)) { __kmp_debug_assert("count >= 0", "openmp/runtime/src/kmp_affinity.cpp" , 3748); }; | |||
3749 | *scan = next; | |||
3750 | ||||
3751 | // valid follow sets are ',' ':' and '}' | |||
3752 | SKIP_WS(*scan){ while (*(*scan) == ' ' || *(*scan) == '\t') (*scan)++; }; | |||
3753 | if (**scan == '}' || **scan == ',') { | |||
3754 | for (i = 0; i < count; i++) { | |||
3755 | if ((start > maxOsId) || | |||
3756 | (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start))(__kmp_affinity_dispatch->index_mask_array(osId2Mask, start ))->is_set(start))) { | |||
3757 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, start)if (affinity.flags.verbose || (affinity.flags.warnings && (affinity.type != affinity_none))) { __kmp_msg(kmp_ms_warning , __kmp_msg_format(kmp_i18n_msg_AffIgnoreInvalidProcID, start ), __kmp_msg_null); }; | |||
3758 | break; // don't proliferate warnings for large count | |||
3759 | } else { | |||
3760 | KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start))(tempMask)->bitwise_or(__kmp_affinity_dispatch->index_mask_array (osId2Mask, start)); | |||
3761 | start++; | |||
3762 | (*setSize)++; | |||
3763 | } | |||
3764 | } | |||
3765 | if (**scan == '}') { | |||
3766 | break; | |||
3767 | } | |||
3768 | (*scan)++; // skip ',' | |||
3769 | continue; | |||
3770 | } | |||
3771 | KMP_ASSERT2(**scan == ':', "bad explicit places list")if (!(**scan == ':')) { __kmp_debug_assert(("bad explicit places list" ), "openmp/runtime/src/kmp_affinity.cpp", 3771); }; | |||
3772 | (*scan)++; // skip ':' | |||
3773 | ||||
3774 | // Read stride parameter | |||
3775 | int sign = +1; | |||
3776 | for (;;) { | |||
3777 | SKIP_WS(*scan){ while (*(*scan) == ' ' || *(*scan) == '\t') (*scan)++; }; | |||
3778 | if (**scan == '+') { | |||
3779 | (*scan)++; // skip '+' | |||
3780 | continue; | |||
3781 | } | |||
3782 | if (**scan == '-') { | |||
3783 | sign *= -1; | |||
3784 | (*scan)++; // skip '-' | |||
3785 | continue; | |||
3786 | } | |||
3787 | break; | |||
3788 | } | |||
3789 | SKIP_WS(*scan){ while (*(*scan) == ' ' || *(*scan) == '\t') (*scan)++; }; | |||
3790 | KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list")if (!((**scan >= '0') && (**scan <= '9'))) { __kmp_debug_assert (("bad explicit places list"), "openmp/runtime/src/kmp_affinity.cpp" , 3790); }; | |||
3791 | next = *scan; | |||
3792 | SKIP_DIGITS(next){ while (*(next) >= '0' && *(next) <= '9') (next )++; }; | |||
3793 | stride = __kmp_str_to_int(*scan, *next); | |||
3794 | KMP_ASSERT(stride >= 0)if (!(stride >= 0)) { __kmp_debug_assert("stride >= 0", "openmp/runtime/src/kmp_affinity.cpp", 3794); }; | |||
3795 | *scan = next; | |||
3796 | stride *= sign; | |||
3797 | ||||
3798 | // valid follow sets are ',' and '}' | |||
3799 | SKIP_WS(*scan){ while (*(*scan) == ' ' || *(*scan) == '\t') (*scan)++; }; | |||
3800 | if (**scan == '}' || **scan == ',') { | |||
3801 | for (i = 0; i < count; i++) { | |||
3802 | if ((start > maxOsId) || | |||
3803 | (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start))(__kmp_affinity_dispatch->index_mask_array(osId2Mask, start ))->is_set(start))) { | |||
3804 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, start)if (affinity.flags.verbose || (affinity.flags.warnings && (affinity.type != affinity_none))) { __kmp_msg(kmp_ms_warning , __kmp_msg_format(kmp_i18n_msg_AffIgnoreInvalidProcID, start ), __kmp_msg_null); }; | |||
3805 | break; // don't proliferate warnings for large count | |||
3806 | } else { | |||
3807 | KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start))(tempMask)->bitwise_or(__kmp_affinity_dispatch->index_mask_array (osId2Mask, start)); | |||
3808 | start += stride; | |||
3809 | (*setSize)++; | |||
3810 | } | |||
3811 | } | |||
3812 | if (**scan == '}') { | |||
3813 | break; | |||
3814 | } | |||
3815 | (*scan)++; // skip ',' | |||
3816 | continue; | |||
3817 | } | |||
3818 | ||||
3819 | KMP_ASSERT2(0, "bad explicit places list")if (!(0)) { __kmp_debug_assert(("bad explicit places list"), "openmp/runtime/src/kmp_affinity.cpp" , 3819); }; | |||
3820 | } | |||
3821 | } | |||
3822 | ||||
3823 | static void __kmp_process_place(const char **scan, kmp_affinity_t &affinity, | |||
3824 | int maxOsId, kmp_affin_mask_t *tempMask, | |||
3825 | int *setSize) { | |||
3826 | const char *next; | |||
3827 | kmp_affin_mask_t *osId2Mask = affinity.os_id_masks; | |||
3828 | ||||
3829 | // valid follow sets are '{' '!' and num | |||
3830 | SKIP_WS(*scan){ while (*(*scan) == ' ' || *(*scan) == '\t') (*scan)++; }; | |||
3831 | if (**scan == '{') { | |||
3832 | (*scan)++; // skip '{' | |||
3833 | __kmp_process_subplace_list(scan, affinity, maxOsId, tempMask, setSize); | |||
3834 | KMP_ASSERT2(**scan == '}', "bad explicit places list")if (!(**scan == '}')) { __kmp_debug_assert(("bad explicit places list" ), "openmp/runtime/src/kmp_affinity.cpp", 3834); }; | |||
3835 | (*scan)++; // skip '}' | |||
3836 | } else if (**scan == '!') { | |||
3837 | (*scan)++; // skip '!' | |||
3838 | __kmp_process_place(scan, affinity, maxOsId, tempMask, setSize); | |||
3839 | KMP_CPU_COMPLEMENT(maxOsId, tempMask)(tempMask)->bitwise_not(); | |||
3840 | } else if ((**scan >= '0') && (**scan <= '9')) { | |||
3841 | next = *scan; | |||
3842 | SKIP_DIGITS(next){ while (*(next) >= '0' && *(next) <= '9') (next )++; }; | |||
3843 | int num = __kmp_str_to_int(*scan, *next); | |||
3844 | KMP_ASSERT(num >= 0)if (!(num >= 0)) { __kmp_debug_assert("num >= 0", "openmp/runtime/src/kmp_affinity.cpp" , 3844); }; | |||
3845 | if ((num > maxOsId) || | |||
3846 | (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num))(__kmp_affinity_dispatch->index_mask_array(osId2Mask, num) )->is_set(num))) { | |||
3847 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, num)if (affinity.flags.verbose || (affinity.flags.warnings && (affinity.type != affinity_none))) { __kmp_msg(kmp_ms_warning , __kmp_msg_format(kmp_i18n_msg_AffIgnoreInvalidProcID, num), __kmp_msg_null); }; | |||
3848 | } else { | |||
3849 | KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, num))(tempMask)->bitwise_or(__kmp_affinity_dispatch->index_mask_array (osId2Mask, num)); | |||
3850 | (*setSize)++; | |||
3851 | } | |||
3852 | *scan = next; // skip num | |||
3853 | } else { | |||
3854 | KMP_ASSERT2(0, "bad explicit places list")if (!(0)) { __kmp_debug_assert(("bad explicit places list"), "openmp/runtime/src/kmp_affinity.cpp" , 3854); }; | |||
3855 | } | |||
3856 | } | |||
3857 | ||||
3858 | // static void | |||
3859 | void __kmp_affinity_process_placelist(kmp_affinity_t &affinity) { | |||
3860 | int i, j, count, stride, sign; | |||
3861 | kmp_affin_mask_t **out_masks = &affinity.masks; | |||
3862 | unsigned *out_numMasks = &affinity.num_masks; | |||
3863 | const char *placelist = affinity.proclist; | |||
3864 | kmp_affin_mask_t *osId2Mask = affinity.os_id_masks; | |||
3865 | int maxOsId = affinity.num_os_id_masks - 1; | |||
3866 | const char *scan = placelist; | |||
3867 | const char *next = placelist; | |||
3868 | ||||
3869 | numNewMasks = 2; | |||
3870 | KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks)(newMasks = __kmp_affinity_dispatch->allocate_mask_array(numNewMasks )); | |||
3871 | nextNewMask = 0; | |||
3872 | ||||
3873 | // tempMask is modified based on the previous or initial | |||
3874 | // place to form the current place | |||
3875 | // previousMask contains the previous place | |||
3876 | kmp_affin_mask_t *tempMask; | |||
3877 | kmp_affin_mask_t *previousMask; | |||
3878 | KMP_CPU_ALLOC(tempMask)(tempMask = __kmp_affinity_dispatch->allocate_mask()); | |||
3879 | KMP_CPU_ZERO(tempMask)(tempMask)->zero(); | |||
3880 | KMP_CPU_ALLOC(previousMask)(previousMask = __kmp_affinity_dispatch->allocate_mask()); | |||
3881 | KMP_CPU_ZERO(previousMask)(previousMask)->zero(); | |||
3882 | int setSize = 0; | |||
3883 | ||||
3884 | for (;;) { | |||
3885 | __kmp_process_place(&scan, affinity, maxOsId, tempMask, &setSize); | |||
3886 | ||||
3887 | // valid follow sets are ',' ':' and EOL | |||
3888 | SKIP_WS(scan){ while (*(scan) == ' ' || *(scan) == '\t') (scan)++; }; | |||
3889 | if (*scan == '\0' || *scan == ',') { | |||
3890 | if (setSize > 0) { | |||
3891 | ADD_MASK(tempMask); | |||
3892 | } | |||
3893 | KMP_CPU_ZERO(tempMask)(tempMask)->zero(); | |||
3894 | setSize = 0; | |||
3895 | if (*scan == '\0') { | |||
3896 | break; | |||
3897 | } | |||
3898 | scan++; // skip ',' | |||
3899 | continue; | |||
3900 | } | |||
3901 | ||||
3902 | KMP_ASSERT2(*scan == ':', "bad explicit places list")if (!(*scan == ':')) { __kmp_debug_assert(("bad explicit places list" ), "openmp/runtime/src/kmp_affinity.cpp", 3902); }; | |||
3903 | scan++; // skip ':' | |||
3904 | ||||
3905 | // Read count parameter | |||
3906 | SKIP_WS(scan){ while (*(scan) == ' ' || *(scan) == '\t') (scan)++; }; | |||
3907 | KMP_ASSERT2((*scan >= '0') && (*scan <= '9'), "bad explicit places list")if (!((*scan >= '0') && (*scan <= '9'))) { __kmp_debug_assert (("bad explicit places list"), "openmp/runtime/src/kmp_affinity.cpp" , 3907); }; | |||
3908 | next = scan; | |||
3909 | SKIP_DIGITS(next){ while (*(next) >= '0' && *(next) <= '9') (next )++; }; | |||
3910 | count = __kmp_str_to_int(scan, *next); | |||
3911 | KMP_ASSERT(count >= 0)if (!(count >= 0)) { __kmp_debug_assert("count >= 0", "openmp/runtime/src/kmp_affinity.cpp" , 3911); }; | |||
3912 | scan = next; | |||
3913 | ||||
3914 | // valid follow sets are ',' ':' and EOL | |||
3915 | SKIP_WS(scan){ while (*(scan) == ' ' || *(scan) == '\t') (scan)++; }; | |||
3916 | if (*scan == '\0' || *scan == ',') { | |||
3917 | stride = +1; | |||
3918 | } else { | |||
3919 | KMP_ASSERT2(*scan == ':', "bad explicit places list")if (!(*scan == ':')) { __kmp_debug_assert(("bad explicit places list" ), "openmp/runtime/src/kmp_affinity.cpp", 3919); }; | |||
3920 | scan++; // skip ':' | |||
3921 | ||||
3922 | // Read stride parameter | |||
3923 | sign = +1; | |||
3924 | for (;;) { | |||
3925 | SKIP_WS(scan){ while (*(scan) == ' ' || *(scan) == '\t') (scan)++; }; | |||
3926 | if (*scan == '+') { | |||
3927 | scan++; // skip '+' | |||
3928 | continue; | |||
3929 | } | |||
3930 | if (*scan == '-') { | |||
3931 | sign *= -1; | |||
3932 | scan++; // skip '-' | |||
3933 | continue; | |||
3934 | } | |||
3935 | break; | |||
3936 | } | |||
3937 | SKIP_WS(scan){ while (*(scan) == ' ' || *(scan) == '\t') (scan)++; }; | |||
3938 | KMP_ASSERT2((*scan >= '0') && (*scan <= '9'), "bad explicit places list")if (!((*scan >= '0') && (*scan <= '9'))) { __kmp_debug_assert (("bad explicit places list"), "openmp/runtime/src/kmp_affinity.cpp" , 3938); }; | |||
3939 | next = scan; | |||
3940 | SKIP_DIGITS(next){ while (*(next) >= '0' && *(next) <= '9') (next )++; }; | |||
3941 | stride = __kmp_str_to_int(scan, *next); | |||
3942 | KMP_DEBUG_ASSERT(stride >= 0)if (!(stride >= 0)) { __kmp_debug_assert("stride >= 0", "openmp/runtime/src/kmp_affinity.cpp", 3942); }; | |||
3943 | scan = next; | |||
3944 | stride *= sign; | |||
3945 | } | |||
3946 | ||||
3947 | // Add places determined by initial_place : count : stride | |||
3948 | for (i = 0; i < count; i++) { | |||
3949 | if (setSize == 0) { | |||
3950 | break; | |||
3951 | } | |||
3952 | // Add the current place, then build the next place (tempMask) from that | |||
3953 | KMP_CPU_COPY(previousMask, tempMask)(previousMask)->copy(tempMask); | |||
3954 | ADD_MASK(previousMask); | |||
3955 | KMP_CPU_ZERO(tempMask)(tempMask)->zero(); | |||
3956 | setSize = 0; | |||
3957 | KMP_CPU_SET_ITERATE(j, previousMask)for (j = (previousMask)->begin(); (int)j != (previousMask) ->end(); j = (previousMask)->next(j)) { | |||
3958 | if (!KMP_CPU_ISSET(j, previousMask)(previousMask)->is_set(j)) { | |||
3959 | continue; | |||
3960 | } | |||
3961 | if ((j + stride > maxOsId) || (j + stride < 0) || | |||
3962 | (!KMP_CPU_ISSET(j, __kmp_affin_fullMask)(__kmp_affin_fullMask)->is_set(j)) || | |||
3963 | (!KMP_CPU_ISSET(j + stride,(__kmp_affinity_dispatch->index_mask_array(osId2Mask, j + stride ))->is_set(j + stride) | |||
3964 | KMP_CPU_INDEX(osId2Mask, j + stride))(__kmp_affinity_dispatch->index_mask_array(osId2Mask, j + stride ))->is_set(j + stride))) { | |||
3965 | if (i < count - 1) { | |||
3966 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, j + stride)if (affinity.flags.verbose || (affinity.flags.warnings && (affinity.type != affinity_none))) { __kmp_msg(kmp_ms_warning , __kmp_msg_format(kmp_i18n_msg_AffIgnoreInvalidProcID, j + stride ), __kmp_msg_null); }; | |||
3967 | } | |||
3968 | continue; | |||
3969 | } | |||
3970 | KMP_CPU_SET(j + stride, tempMask)(tempMask)->set(j + stride); | |||
3971 | setSize++; | |||
3972 | } | |||
3973 | } | |||
3974 | KMP_CPU_ZERO(tempMask)(tempMask)->zero(); | |||
3975 | setSize = 0; | |||
3976 | ||||
3977 | // valid follow sets are ',' and EOL | |||
3978 | SKIP_WS(scan){ while (*(scan) == ' ' || *(scan) == '\t') (scan)++; }; | |||
3979 | if (*scan == '\0') { | |||
3980 | break; | |||
3981 | } | |||
3982 | if (*scan == ',') { | |||
3983 | scan++; // skip ',' | |||
3984 | continue; | |||
3985 | } | |||
3986 | ||||
3987 | KMP_ASSERT2(0, "bad explicit places list")if (!(0)) { __kmp_debug_assert(("bad explicit places list"), "openmp/runtime/src/kmp_affinity.cpp" , 3987); }; | |||
3988 | } | |||
3989 | ||||
3990 | *out_numMasks = nextNewMask; | |||
3991 | if (nextNewMask == 0) { | |||
3992 | *out_masks = NULL__null; | |||
3993 | KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks)__kmp_affinity_dispatch->deallocate_mask_array(newMasks); | |||
3994 | return; | |||
3995 | } | |||
3996 | KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask)((*out_masks) = __kmp_affinity_dispatch->allocate_mask_array (nextNewMask)); | |||
3997 | KMP_CPU_FREE(tempMask)__kmp_affinity_dispatch->deallocate_mask(tempMask); | |||
3998 | KMP_CPU_FREE(previousMask)__kmp_affinity_dispatch->deallocate_mask(previousMask); | |||
3999 | for (i = 0; i < nextNewMask; i++) { | |||
4000 | kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i)__kmp_affinity_dispatch->index_mask_array(newMasks, i); | |||
4001 | kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i)__kmp_affinity_dispatch->index_mask_array((*out_masks), i); | |||
4002 | KMP_CPU_COPY(dest, src)(dest)->copy(src); | |||
4003 | } | |||
4004 | KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks)__kmp_affinity_dispatch->deallocate_mask_array(newMasks); | |||
4005 | } | |||
4006 | ||||
4007 | #undef ADD_MASK | |||
4008 | #undef ADD_MASK_OSID | |||
4009 | ||||
4010 | // This function figures out the deepest level at which there is at least one | |||
4011 | // cluster/core with more than one processing unit bound to it. | |||
4012 | static int __kmp_affinity_find_core_level(int nprocs, int bottom_level) { | |||
4013 | int core_level = 0; | |||
4014 | ||||
4015 | for (int i = 0; i < nprocs; i++) { | |||
4016 | const kmp_hw_thread_t &hw_thread = __kmp_topology->at(i); | |||
4017 | for (int j = bottom_level; j > 0; j--) { | |||
4018 | if (hw_thread.ids[j] > 0) { | |||
4019 | if (core_level < (j - 1)) { | |||
4020 | core_level = j - 1; | |||
4021 | } | |||
4022 | } | |||
4023 | } | |||
4024 | } | |||
4025 | return core_level; | |||
4026 | } | |||
4027 | ||||
4028 | // This function counts number of clusters/cores at given level. | |||
4029 | static int __kmp_affinity_compute_ncores(int nprocs, int bottom_level, | |||
4030 | int core_level) { | |||
4031 | return __kmp_topology->get_count(core_level); | |||
4032 | } | |||
4033 | // This function finds to which cluster/core given processing unit is bound. | |||
4034 | static int __kmp_affinity_find_core(int proc, int bottom_level, | |||
4035 | int core_level) { | |||
4036 | int core = 0; | |||
4037 | KMP_DEBUG_ASSERT(proc >= 0 && proc < __kmp_topology->get_num_hw_threads())if (!(proc >= 0 && proc < __kmp_topology->get_num_hw_threads ())) { __kmp_debug_assert("proc >= 0 && proc < __kmp_topology->get_num_hw_threads()" , "openmp/runtime/src/kmp_affinity.cpp", 4037); }; | |||
4038 | for (int i = 0; i <= proc; ++i) { | |||
4039 | if (i + 1 <= proc) { | |||
4040 | for (int j = 0; j <= core_level; ++j) { | |||
4041 | if (__kmp_topology->at(i + 1).sub_ids[j] != | |||
4042 | __kmp_topology->at(i).sub_ids[j]) { | |||
4043 | core++; | |||
4044 | break; | |||
4045 | } | |||
4046 | } | |||
4047 | } | |||
4048 | } | |||
4049 | return core; | |||
4050 | } | |||
4051 | ||||
4052 | // This function finds maximal number of processing units bound to a | |||
4053 | // cluster/core at given level. | |||
4054 | static int __kmp_affinity_max_proc_per_core(int nprocs, int bottom_level, | |||
4055 | int core_level) { | |||
4056 | if (core_level >= bottom_level) | |||
4057 | return 1; | |||
4058 | int thread_level = __kmp_topology->get_level(KMP_HW_THREAD); | |||
4059 | return __kmp_topology->calculate_ratio(thread_level, core_level); | |||
4060 | } | |||
4061 | ||||
4062 | static int *procarr = NULL__null; | |||
4063 | static int __kmp_aff_depth = 0; | |||
4064 | static int *__kmp_osid_to_hwthread_map = NULL__null; | |||
4065 | ||||
4066 | static void __kmp_affinity_get_mask_topology_info(const kmp_affin_mask_t *mask, | |||
4067 | kmp_affinity_ids_t &ids, | |||
4068 | kmp_affinity_attrs_t &attrs) { | |||
4069 | if (!KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0)) | |||
4070 | return; | |||
4071 | ||||
4072 | // Initiailze ids and attrs thread data | |||
4073 | for (int i = 0; i < KMP_HW_LAST; ++i) | |||
4074 | ids[i] = kmp_hw_thread_t::UNKNOWN_ID; | |||
4075 | attrs = KMP_AFFINITY_ATTRS_UNKNOWN{ KMP_HW_CORE_TYPE_UNKNOWN, kmp_hw_attr_t::UNKNOWN_CORE_EFF, 0 , 0 }; | |||
4076 | ||||
4077 | // Iterate through each os id within the mask and determine | |||
4078 | // the topology id and attribute information | |||
4079 | int cpu; | |||
4080 | int depth = __kmp_topology->get_depth(); | |||
4081 | KMP_CPU_SET_ITERATE(cpu, mask)for (cpu = (mask)->begin(); (int)cpu != (mask)->end(); cpu = (mask)->next(cpu)) { | |||
4082 | int osid_idx = __kmp_osid_to_hwthread_map[cpu]; | |||
4083 | const kmp_hw_thread_t &hw_thread = __kmp_topology->at(osid_idx); | |||
4084 | for (int level = 0; level < depth; ++level) { | |||
4085 | kmp_hw_t type = __kmp_topology->get_type(level); | |||
4086 | int id = hw_thread.sub_ids[level]; | |||
4087 | if (ids[type] == kmp_hw_thread_t::UNKNOWN_ID || ids[type] == id) { | |||
4088 | ids[type] = id; | |||
4089 | } else { | |||
4090 | // This mask spans across multiple topology units, set it as such | |||
4091 | // and mark every level below as such as well. | |||
4092 | ids[type] = kmp_hw_thread_t::MULTIPLE_ID; | |||
4093 | for (; level < depth; ++level) { | |||
4094 | kmp_hw_t type = __kmp_topology->get_type(level); | |||
4095 | ids[type] = kmp_hw_thread_t::MULTIPLE_ID; | |||
4096 | } | |||
4097 | } | |||
4098 | } | |||
4099 | if (!attrs.valid) { | |||
4100 | attrs.core_type = hw_thread.attrs.get_core_type(); | |||
4101 | attrs.core_eff = hw_thread.attrs.get_core_eff(); | |||
4102 | attrs.valid = 1; | |||
4103 | } else { | |||
4104 | // This mask spans across multiple attributes, set it as such | |||
4105 | if (attrs.core_type != hw_thread.attrs.get_core_type()) | |||
4106 | attrs.core_type = KMP_HW_CORE_TYPE_UNKNOWN; | |||
4107 | if (attrs.core_eff != hw_thread.attrs.get_core_eff()) | |||
4108 | attrs.core_eff = kmp_hw_attr_t::UNKNOWN_CORE_EFF; | |||
4109 | } | |||
4110 | } | |||
4111 | } | |||
4112 | ||||
4113 | static void __kmp_affinity_get_thread_topology_info(kmp_info_t *th) { | |||
4114 | if (!KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0)) | |||
4115 | return; | |||
4116 | const kmp_affin_mask_t *mask = th->th.th_affin_mask; | |||
4117 | kmp_affinity_ids_t &ids = th->th.th_topology_ids; | |||
4118 | kmp_affinity_attrs_t &attrs = th->th.th_topology_attrs; | |||
4119 | __kmp_affinity_get_mask_topology_info(mask, ids, attrs); | |||
4120 | } | |||
4121 | ||||
4122 | // Assign the topology information to each place in the place list | |||
4123 | // A thread can then grab not only its affinity mask, but the topology | |||
4124 | // information associated with that mask. e.g., Which socket is a thread on | |||
4125 | static void __kmp_affinity_get_topology_info(kmp_affinity_t &affinity) { | |||
4126 | if (!KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0)) | |||
4127 | return; | |||
4128 | if (affinity.type != affinity_none) { | |||
4129 | KMP_ASSERT(affinity.num_os_id_masks)if (!(affinity.num_os_id_masks)) { __kmp_debug_assert("affinity.num_os_id_masks" , "openmp/runtime/src/kmp_affinity.cpp", 4129); }; | |||
4130 | KMP_ASSERT(affinity.os_id_masks)if (!(affinity.os_id_masks)) { __kmp_debug_assert("affinity.os_id_masks" , "openmp/runtime/src/kmp_affinity.cpp", 4130); }; | |||
4131 | } | |||
4132 | KMP_ASSERT(affinity.num_masks)if (!(affinity.num_masks)) { __kmp_debug_assert("affinity.num_masks" , "openmp/runtime/src/kmp_affinity.cpp", 4132); }; | |||
4133 | KMP_ASSERT(affinity.masks)if (!(affinity.masks)) { __kmp_debug_assert("affinity.masks", "openmp/runtime/src/kmp_affinity.cpp", 4133); }; | |||
4134 | KMP_ASSERT(__kmp_affin_fullMask)if (!(__kmp_affin_fullMask)) { __kmp_debug_assert("__kmp_affin_fullMask" , "openmp/runtime/src/kmp_affinity.cpp", 4134); }; | |||
4135 | ||||
4136 | int max_cpu = __kmp_affin_fullMask->get_max_cpu(); | |||
4137 | int num_hw_threads = __kmp_topology->get_num_hw_threads(); | |||
4138 | ||||
4139 | // Allocate thread topology information | |||
4140 | if (!affinity.ids) { | |||
4141 | affinity.ids = (kmp_affinity_ids_t *)__kmp_allocate(___kmp_allocate((sizeof(kmp_affinity_ids_t) * affinity.num_masks ), "openmp/runtime/src/kmp_affinity.cpp", 4142) | |||
4142 | sizeof(kmp_affinity_ids_t) * affinity.num_masks)___kmp_allocate((sizeof(kmp_affinity_ids_t) * affinity.num_masks ), "openmp/runtime/src/kmp_affinity.cpp", 4142); | |||
4143 | } | |||
4144 | if (!affinity.attrs) { | |||
4145 | affinity.attrs = (kmp_affinity_attrs_t *)__kmp_allocate(___kmp_allocate((sizeof(kmp_affinity_attrs_t) * affinity.num_masks ), "openmp/runtime/src/kmp_affinity.cpp", 4146) | |||
4146 | sizeof(kmp_affinity_attrs_t) * affinity.num_masks)___kmp_allocate((sizeof(kmp_affinity_attrs_t) * affinity.num_masks ), "openmp/runtime/src/kmp_affinity.cpp", 4146); | |||
4147 | } | |||
4148 | if (!__kmp_osid_to_hwthread_map) { | |||
4149 | // Want the +1 because max_cpu should be valid index into map | |||
4150 | __kmp_osid_to_hwthread_map = | |||
4151 | (int *)__kmp_allocate(sizeof(int) * (max_cpu + 1))___kmp_allocate((sizeof(int) * (max_cpu + 1)), "openmp/runtime/src/kmp_affinity.cpp" , 4151); | |||
4152 | } | |||
4153 | ||||
4154 | // Create the OS proc to hardware thread map | |||
4155 | for (int hw_thread = 0; hw_thread < num_hw_threads; ++hw_thread) | |||
4156 | __kmp_osid_to_hwthread_map[__kmp_topology->at(hw_thread).os_id] = hw_thread; | |||
4157 | ||||
4158 | for (unsigned i = 0; i < affinity.num_masks; ++i) { | |||
4159 | kmp_affinity_ids_t &ids = affinity.ids[i]; | |||
4160 | kmp_affinity_attrs_t &attrs = affinity.attrs[i]; | |||
4161 | kmp_affin_mask_t *mask = KMP_CPU_INDEX(affinity.masks, i)__kmp_affinity_dispatch->index_mask_array(affinity.masks, i ); | |||
4162 | __kmp_affinity_get_mask_topology_info(mask, ids, attrs); | |||
4163 | } | |||
4164 | } | |||
4165 | ||||
4166 | // Create a one element mask array (set of places) which only contains the | |||
4167 | // initial process's affinity mask | |||
4168 | static void __kmp_create_affinity_none_places(kmp_affinity_t &affinity) { | |||
4169 | KMP_ASSERT(__kmp_affin_fullMask != NULL)if (!(__kmp_affin_fullMask != __null)) { __kmp_debug_assert("__kmp_affin_fullMask != NULL" , "openmp/runtime/src/kmp_affinity.cpp", 4169); }; | |||
4170 | KMP_ASSERT(affinity.type == affinity_none)if (!(affinity.type == affinity_none)) { __kmp_debug_assert("affinity.type == affinity_none" , "openmp/runtime/src/kmp_affinity.cpp", 4170); }; | |||
4171 | affinity.num_masks = 1; | |||
4172 | KMP_CPU_ALLOC_ARRAY(affinity.masks, affinity.num_masks)(affinity.masks = __kmp_affinity_dispatch->allocate_mask_array (affinity.num_masks)); | |||
4173 | kmp_affin_mask_t *dest = KMP_CPU_INDEX(affinity.masks, 0)__kmp_affinity_dispatch->index_mask_array(affinity.masks, 0 ); | |||
4174 | KMP_CPU_COPY(dest, __kmp_affin_fullMask)(dest)->copy(__kmp_affin_fullMask); | |||
4175 | __kmp_affinity_get_topology_info(affinity); | |||
4176 | } | |||
4177 | ||||
4178 | static void __kmp_aux_affinity_initialize_masks(kmp_affinity_t &affinity) { | |||
4179 | // Create the "full" mask - this defines all of the processors that we | |||
4180 | // consider to be in the machine model. If respect is set, then it is the | |||
4181 | // initialization thread's affinity mask. Otherwise, it is all processors that | |||
4182 | // we know about on the machine. | |||
4183 | int verbose = affinity.flags.verbose; | |||
4184 | const char *env_var = affinity.env_var; | |||
4185 | ||||
4186 | // Already initialized | |||
4187 | if (__kmp_affin_fullMask && __kmp_affin_origMask) | |||
4188 | return; | |||
4189 | ||||
4190 | if (__kmp_affin_fullMask == NULL__null) { | |||
4191 | KMP_CPU_ALLOC(__kmp_affin_fullMask)(__kmp_affin_fullMask = __kmp_affinity_dispatch->allocate_mask ()); | |||
4192 | } | |||
4193 | if (__kmp_affin_origMask == NULL__null) { | |||
4194 | KMP_CPU_ALLOC(__kmp_affin_origMask)(__kmp_affin_origMask = __kmp_affinity_dispatch->allocate_mask ()); | |||
4195 | } | |||
4196 | if (KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0)) { | |||
4197 | __kmp_get_system_affinity(__kmp_affin_fullMask, TRUE)(__kmp_affin_fullMask)->get_system_affinity((!0)); | |||
4198 | // Make a copy before possible expanding to the entire machine mask | |||
4199 | __kmp_affin_origMask->copy(__kmp_affin_fullMask); | |||
4200 | if (affinity.flags.respect) { | |||
4201 | // Count the number of available processors. | |||
4202 | unsigned i; | |||
4203 | __kmp_avail_proc = 0; | |||
4204 | KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask)for (i = (__kmp_affin_fullMask)->begin(); (int)i != (__kmp_affin_fullMask )->end(); i = (__kmp_affin_fullMask)->next(i)) { | |||
4205 | if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)(__kmp_affin_fullMask)->is_set(i)) { | |||
4206 | continue; | |||
4207 | } | |||
4208 | __kmp_avail_proc++; | |||
4209 | } | |||
4210 | if (__kmp_avail_proc > __kmp_xproc) { | |||
4211 | KMP_AFF_WARNING(affinity, ErrorInitializeAffinity)if (affinity.flags.verbose || (affinity.flags.warnings && (affinity.type != affinity_none))) { __kmp_msg(kmp_ms_warning , __kmp_msg_format(kmp_i18n_msg_ErrorInitializeAffinity), __kmp_msg_null ); }; | |||
4212 | affinity.type = affinity_none; | |||
4213 | KMP_AFFINITY_DISABLE()(__kmp_affin_mask_size = 0); | |||
4214 | return; | |||
4215 | } | |||
4216 | ||||
4217 | if (verbose) { | |||
4218 | char buf[KMP_AFFIN_MASK_PRINT_LEN1024]; | |||
4219 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN1024, | |||
4220 | __kmp_affin_fullMask); | |||
4221 | KMP_INFORM(InitOSProcSetRespect, env_var, buf)__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_InitOSProcSetRespect , env_var, buf), __kmp_msg_null); | |||
4222 | } | |||
4223 | } else { | |||
4224 | if (verbose) { | |||
4225 | char buf[KMP_AFFIN_MASK_PRINT_LEN1024]; | |||
4226 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN1024, | |||
4227 | __kmp_affin_fullMask); | |||
4228 | KMP_INFORM(InitOSProcSetNotRespect, env_var, buf)__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_InitOSProcSetNotRespect , env_var, buf), __kmp_msg_null); | |||
4229 | } | |||
4230 | __kmp_avail_proc = | |||
4231 | __kmp_affinity_entire_machine_mask(__kmp_affin_fullMask); | |||
4232 | #if KMP_OS_WINDOWS0 | |||
4233 | if (__kmp_num_proc_groups <= 1) { | |||
4234 | // Copy expanded full mask if topology has single processor group | |||
4235 | __kmp_affin_origMask->copy(__kmp_affin_fullMask); | |||
4236 | } | |||
4237 | // Set the process affinity mask since threads' affinity | |||
4238 | // masks must be subset of process mask in Windows* OS | |||
4239 | __kmp_affin_fullMask->set_process_affinity(true); | |||
4240 | #endif | |||
4241 | } | |||
4242 | } | |||
4243 | } | |||
4244 | ||||
4245 | static bool __kmp_aux_affinity_initialize_topology(kmp_affinity_t &affinity) { | |||
4246 | bool success = false; | |||
4247 | const char *env_var = affinity.env_var; | |||
4248 | kmp_i18n_id_t msg_id = kmp_i18n_null; | |||
4249 | int verbose = affinity.flags.verbose; | |||
4250 | ||||
4251 | // For backward compatibility, setting KMP_CPUINFO_FILE => | |||
4252 | // KMP_TOPOLOGY_METHOD=cpuinfo | |||
4253 | if ((__kmp_cpuinfo_file != NULL__null) && | |||
4254 | (__kmp_affinity_top_method == affinity_top_method_all)) { | |||
4255 | __kmp_affinity_top_method = affinity_top_method_cpuinfo; | |||
4256 | } | |||
4257 | ||||
4258 | if (__kmp_affinity_top_method == affinity_top_method_all) { | |||
4259 | // In the default code path, errors are not fatal - we just try using | |||
4260 | // another method. We only emit a warning message if affinity is on, or the | |||
4261 | // verbose flag is set, an the nowarnings flag was not set. | |||
4262 | #if KMP_USE_HWLOC0 | |||
4263 | if (!success && | |||
4264 | __kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC) { | |||
4265 | if (!__kmp_hwloc_error) { | |||
4266 | success = __kmp_affinity_create_hwloc_map(&msg_id); | |||
4267 | if (!success && verbose) { | |||
4268 | KMP_INFORM(AffIgnoringHwloc, env_var)__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_AffIgnoringHwloc , env_var), __kmp_msg_null); | |||
4269 | } | |||
4270 | } else if (verbose) { | |||
4271 | KMP_INFORM(AffIgnoringHwloc, env_var)__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_AffIgnoringHwloc , env_var), __kmp_msg_null); | |||
4272 | } | |||
4273 | } | |||
4274 | #endif | |||
4275 | ||||
4276 | #if KMP_ARCH_X860 || KMP_ARCH_X86_641 | |||
4277 | if (!success) { | |||
4278 | success = __kmp_affinity_create_x2apicid_map(&msg_id); | |||
4279 | if (!success && verbose && msg_id != kmp_i18n_null) { | |||
4280 | KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id))__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_AffInfoStr , env_var, __kmp_i18n_catgets(msg_id)), __kmp_msg_null); | |||
4281 | } | |||
4282 | } | |||
4283 | if (!success) { | |||
4284 | success = __kmp_affinity_create_apicid_map(&msg_id); | |||
4285 | if (!success && verbose && msg_id != kmp_i18n_null) { | |||
4286 | KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id))__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_AffInfoStr , env_var, __kmp_i18n_catgets(msg_id)), __kmp_msg_null); | |||
4287 | } | |||
4288 | } | |||
4289 | #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ | |||
4290 | ||||
4291 | #if KMP_OS_LINUX1 | |||
4292 | if (!success) { | |||
4293 | int line = 0; | |||
4294 | success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id); | |||
4295 | if (!success && verbose && msg_id != kmp_i18n_null) { | |||
4296 | KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id))__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_AffInfoStr , env_var, __kmp_i18n_catgets(msg_id)), __kmp_msg_null); | |||
4297 | } | |||
4298 | } | |||
4299 | #endif /* KMP_OS_LINUX */ | |||
4300 | ||||
4301 | #if KMP_GROUP_AFFINITY0 | |||
4302 | if (!success && (__kmp_num_proc_groups > 1)) { | |||
4303 | success = __kmp_affinity_create_proc_group_map(&msg_id); | |||
4304 | if (!success && verbose && msg_id != kmp_i18n_null) { | |||
4305 | KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id))__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_AffInfoStr , env_var, __kmp_i18n_catgets(msg_id)), __kmp_msg_null); | |||
4306 | } | |||
4307 | } | |||
4308 | #endif /* KMP_GROUP_AFFINITY */ | |||
4309 | ||||
4310 | if (!success) { | |||
4311 | success = __kmp_affinity_create_flat_map(&msg_id); | |||
4312 | if (!success && verbose && msg_id != kmp_i18n_null) { | |||
4313 | KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id))__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_AffInfoStr , env_var, __kmp_i18n_catgets(msg_id)), __kmp_msg_null); | |||
4314 | } | |||
4315 | KMP_ASSERT(success)if (!(success)) { __kmp_debug_assert("success", "openmp/runtime/src/kmp_affinity.cpp" , 4315); }; | |||
4316 | } | |||
4317 | } | |||
4318 | ||||
4319 | // If the user has specified that a paricular topology discovery method is to be | |||
4320 | // used, then we abort if that method fails. The exception is group affinity, | |||
4321 | // which might have been implicitly set. | |||
4322 | #if KMP_USE_HWLOC0 | |||
4323 | else if (__kmp_affinity_top_method == affinity_top_method_hwloc) { | |||
4324 | KMP_ASSERT(__kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC)if (!(__kmp_affinity_dispatch->get_api_type() == KMPAffinity ::HWLOC)) { __kmp_debug_assert("__kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC" , "openmp/runtime/src/kmp_affinity.cpp", 4324); }; | |||
4325 | success = __kmp_affinity_create_hwloc_map(&msg_id); | |||
4326 | if (!success) { | |||
4327 | KMP_ASSERT(msg_id != kmp_i18n_null)if (!(msg_id != kmp_i18n_null)) { __kmp_debug_assert("msg_id != kmp_i18n_null" , "openmp/runtime/src/kmp_affinity.cpp", 4327); }; | |||
4328 | KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id))__kmp_fatal(__kmp_msg_format(kmp_i18n_msg_MsgExiting, __kmp_i18n_catgets (msg_id)), __kmp_msg_null); | |||
4329 | } | |||
4330 | } | |||
4331 | #endif // KMP_USE_HWLOC | |||
4332 | ||||
4333 | #if KMP_ARCH_X860 || KMP_ARCH_X86_641 | |||
4334 | else if (__kmp_affinity_top_method == affinity_top_method_x2apicid || | |||
4335 | __kmp_affinity_top_method == affinity_top_method_x2apicid_1f) { | |||
4336 | success = __kmp_affinity_create_x2apicid_map(&msg_id); | |||
4337 | if (!success) { | |||
4338 | KMP_ASSERT(msg_id != kmp_i18n_null)if (!(msg_id != kmp_i18n_null)) { __kmp_debug_assert("msg_id != kmp_i18n_null" , "openmp/runtime/src/kmp_affinity.cpp", 4338); }; | |||
4339 | KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id))__kmp_fatal(__kmp_msg_format(kmp_i18n_msg_MsgExiting, __kmp_i18n_catgets (msg_id)), __kmp_msg_null); | |||
4340 | } | |||
4341 | } else if (__kmp_affinity_top_method == affinity_top_method_apicid) { | |||
4342 | success = __kmp_affinity_create_apicid_map(&msg_id); | |||
4343 | if (!success) { | |||
4344 | KMP_ASSERT(msg_id != kmp_i18n_null)if (!(msg_id != kmp_i18n_null)) { __kmp_debug_assert("msg_id != kmp_i18n_null" , "openmp/runtime/src/kmp_affinity.cpp", 4344); }; | |||
4345 | KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id))__kmp_fatal(__kmp_msg_format(kmp_i18n_msg_MsgExiting, __kmp_i18n_catgets (msg_id)), __kmp_msg_null); | |||
4346 | } | |||
4347 | } | |||
4348 | #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ | |||
4349 | ||||
4350 | else if (__kmp_affinity_top_method == affinity_top_method_cpuinfo) { | |||
4351 | int line = 0; | |||
4352 | success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id); | |||
4353 | if (!success) { | |||
4354 | KMP_ASSERT(msg_id != kmp_i18n_null)if (!(msg_id != kmp_i18n_null)) { __kmp_debug_assert("msg_id != kmp_i18n_null" , "openmp/runtime/src/kmp_affinity.cpp", 4354); }; | |||
4355 | const char *filename = __kmp_cpuinfo_get_filename(); | |||
4356 | if (line > 0) { | |||
4357 | KMP_FATAL(FileLineMsgExiting, filename, line,__kmp_fatal(__kmp_msg_format(kmp_i18n_msg_FileLineMsgExiting, filename, line, __kmp_i18n_catgets(msg_id)), __kmp_msg_null) | |||
4358 | __kmp_i18n_catgets(msg_id))__kmp_fatal(__kmp_msg_format(kmp_i18n_msg_FileLineMsgExiting, filename, line, __kmp_i18n_catgets(msg_id)), __kmp_msg_null); | |||
4359 | } else { | |||
4360 | KMP_FATAL(FileMsgExiting, filename, __kmp_i18n_catgets(msg_id))__kmp_fatal(__kmp_msg_format(kmp_i18n_msg_FileMsgExiting, filename , __kmp_i18n_catgets(msg_id)), __kmp_msg_null); | |||
4361 | } | |||
4362 | } | |||
4363 | } | |||
4364 | ||||
4365 | #if KMP_GROUP_AFFINITY0 | |||
4366 | else if (__kmp_affinity_top_method == affinity_top_method_group) { | |||
4367 | success = __kmp_affinity_create_proc_group_map(&msg_id); | |||
4368 | KMP_ASSERT(success)if (!(success)) { __kmp_debug_assert("success", "openmp/runtime/src/kmp_affinity.cpp" , 4368); }; | |||
4369 | if (!success) { | |||
4370 | KMP_ASSERT(msg_id != kmp_i18n_null)if (!(msg_id != kmp_i18n_null)) { __kmp_debug_assert("msg_id != kmp_i18n_null" , "openmp/runtime/src/kmp_affinity.cpp", 4370); }; | |||
4371 | KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id))__kmp_fatal(__kmp_msg_format(kmp_i18n_msg_MsgExiting, __kmp_i18n_catgets (msg_id)), __kmp_msg_null); | |||
4372 | } | |||
4373 | } | |||
4374 | #endif /* KMP_GROUP_AFFINITY */ | |||
4375 | ||||
4376 | else if (__kmp_affinity_top_method == affinity_top_method_flat) { | |||
4377 | success = __kmp_affinity_create_flat_map(&msg_id); | |||
4378 | // should not fail | |||
4379 | KMP_ASSERT(success)if (!(success)) { __kmp_debug_assert("success", "openmp/runtime/src/kmp_affinity.cpp" , 4379); }; | |||
4380 | } | |||
4381 | ||||
4382 | // Early exit if topology could not be created | |||
4383 | if (!__kmp_topology) { | |||
4384 | if (KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0)) { | |||
4385 | KMP_AFF_WARNING(affinity, ErrorInitializeAffinity)if (affinity.flags.verbose || (affinity.flags.warnings && (affinity.type != affinity_none))) { __kmp_msg(kmp_ms_warning , __kmp_msg_format(kmp_i18n_msg_ErrorInitializeAffinity), __kmp_msg_null ); }; | |||
4386 | } | |||
4387 | if (nPackages > 0 && nCoresPerPkg > 0 && __kmp_nThreadsPerCore > 0 && | |||
4388 | __kmp_ncores > 0) { | |||
4389 | __kmp_topology = kmp_topology_t::allocate(0, 0, NULL__null); | |||
4390 | __kmp_topology->canonicalize(nPackages, nCoresPerPkg, | |||
4391 | __kmp_nThreadsPerCore, __kmp_ncores); | |||
4392 | if (verbose) { | |||
4393 | __kmp_topology->print(env_var); | |||
4394 | } | |||
4395 | } | |||
4396 | return false; | |||
4397 | } | |||
4398 | ||||
4399 | // Canonicalize, print (if requested), apply KMP_HW_SUBSET | |||
4400 | __kmp_topology->canonicalize(); | |||
4401 | if (verbose) | |||
4402 | __kmp_topology->print(env_var); | |||
4403 | bool filtered = __kmp_topology->filter_hw_subset(); | |||
4404 | if (filtered) { | |||
4405 | #if KMP_OS_WINDOWS0 | |||
4406 | // Copy filtered full mask if topology has single processor group | |||
4407 | if (__kmp_num_proc_groups <= 1) | |||
4408 | #endif | |||
4409 | __kmp_affin_origMask->copy(__kmp_affin_fullMask); | |||
4410 | } | |||
4411 | if (filtered && verbose) | |||
4412 | __kmp_topology->print("KMP_HW_SUBSET"); | |||
4413 | return success; | |||
4414 | } | |||
4415 | ||||
4416 | static void __kmp_aux_affinity_initialize(kmp_affinity_t &affinity) { | |||
4417 | bool is_regular_affinity = (&affinity == &__kmp_affinity); | |||
4418 | bool is_hidden_helper_affinity = (&affinity == &__kmp_hh_affinity); | |||
4419 | const char *env_var = affinity.env_var; | |||
4420 | ||||
4421 | if (affinity.flags.initialized) { | |||
4422 | KMP_ASSERT(__kmp_affin_fullMask != NULL)if (!(__kmp_affin_fullMask != __null)) { __kmp_debug_assert("__kmp_affin_fullMask != NULL" , "openmp/runtime/src/kmp_affinity.cpp", 4422); }; | |||
4423 | return; | |||
4424 | } | |||
4425 | ||||
4426 | if (is_regular_affinity && (!__kmp_affin_fullMask || !__kmp_affin_origMask)) | |||
4427 | __kmp_aux_affinity_initialize_masks(affinity); | |||
4428 | ||||
4429 | if (is_regular_affinity && !__kmp_topology) { | |||
4430 | bool success = __kmp_aux_affinity_initialize_topology(affinity); | |||
4431 | if (success) { | |||
4432 | // Initialize other data structures which depend on the topology | |||
4433 | machine_hierarchy.init(__kmp_topology->get_num_hw_threads()); | |||
4434 | KMP_ASSERT(__kmp_avail_proc == __kmp_topology->get_num_hw_threads())if (!(__kmp_avail_proc == __kmp_topology->get_num_hw_threads ())) { __kmp_debug_assert("__kmp_avail_proc == __kmp_topology->get_num_hw_threads()" , "openmp/runtime/src/kmp_affinity.cpp", 4434); }; | |||
4435 | } else { | |||
4436 | affinity.type = affinity_none; | |||
4437 | KMP_AFFINITY_DISABLE()(__kmp_affin_mask_size = 0); | |||
4438 | } | |||
4439 | } | |||
4440 | ||||
4441 | // If KMP_AFFINITY=none, then only create the single "none" place | |||
4442 | // which is the process's initial affinity mask or the number of | |||
4443 | // hardware threads depending on respect,norespect | |||
4444 | if (affinity.type == affinity_none) { | |||
4445 | __kmp_create_affinity_none_places(affinity); | |||
4446 | #if KMP_USE_HIER_SCHED0 | |||
4447 | __kmp_dispatch_set_hierarchy_values(); | |||
4448 | #endif | |||
4449 | affinity.flags.initialized = TRUE(!0); | |||
4450 | return; | |||
4451 | } | |||
4452 | ||||
4453 | __kmp_topology->set_granularity(affinity); | |||
4454 | int depth = __kmp_topology->get_depth(); | |||
4455 | ||||
4456 | // Create the table of masks, indexed by thread Id. | |||
4457 | unsigned numUnique; | |||
4458 | __kmp_create_os_id_masks(&numUnique, affinity); | |||
4459 | if (affinity.gran_levels == 0) { | |||
4460 | KMP_DEBUG_ASSERT((int)numUnique == __kmp_avail_proc)if (!((int)numUnique == __kmp_avail_proc)) { __kmp_debug_assert ("(int)numUnique == __kmp_avail_proc", "openmp/runtime/src/kmp_affinity.cpp" , 4460); }; | |||
4461 | } | |||
4462 | ||||
4463 | switch (affinity.type) { | |||
4464 | ||||
4465 | case affinity_explicit: | |||
4466 | KMP_DEBUG_ASSERT(affinity.proclist != NULL)if (!(affinity.proclist != __null)) { __kmp_debug_assert("affinity.proclist != __null" , "openmp/runtime/src/kmp_affinity.cpp", 4466); }; | |||
4467 | if (is_hidden_helper_affinity || | |||
4468 | __kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) { | |||
4469 | __kmp_affinity_process_proclist(affinity); | |||
4470 | } else { | |||
4471 | __kmp_affinity_process_placelist(affinity); | |||
4472 | } | |||
4473 | if (affinity.num_masks == 0) { | |||
4474 | KMP_AFF_WARNING(affinity, AffNoValidProcID)if (affinity.flags.verbose || (affinity.flags.warnings && (affinity.type != affinity_none))) { __kmp_msg(kmp_ms_warning , __kmp_msg_format(kmp_i18n_msg_AffNoValidProcID), __kmp_msg_null ); }; | |||
4475 | affinity.type = affinity_none; | |||
4476 | __kmp_create_affinity_none_places(affinity); | |||
4477 | affinity.flags.initialized = TRUE(!0); | |||
4478 | return; | |||
4479 | } | |||
4480 | break; | |||
4481 | ||||
4482 | // The other affinity types rely on sorting the hardware threads according to | |||
4483 | // some permutation of the machine topology tree. Set affinity.compact | |||
4484 | // and affinity.offset appropriately, then jump to a common code | |||
4485 | // fragment to do the sort and create the array of affinity masks. | |||
4486 | case affinity_logical: | |||
4487 | affinity.compact = 0; | |||
4488 | if (affinity.offset) { | |||
4489 | affinity.offset = | |||
4490 | __kmp_nThreadsPerCore * affinity.offset % __kmp_avail_proc; | |||
4491 | } | |||
4492 | goto sortTopology; | |||
4493 | ||||
4494 | case affinity_physical: | |||
4495 | if (__kmp_nThreadsPerCore > 1) { | |||
4496 | affinity.compact = 1; | |||
4497 | if (affinity.compact >= depth) { | |||
4498 | affinity.compact = 0; | |||
4499 | } | |||
4500 | } else { | |||
4501 | affinity.compact = 0; | |||
4502 | } | |||
4503 | if (affinity.offset) { | |||
4504 | affinity.offset = | |||
4505 | __kmp_nThreadsPerCore * affinity.offset % __kmp_avail_proc; | |||
4506 | } | |||
4507 | goto sortTopology; | |||
4508 | ||||
4509 | case affinity_scatter: | |||
4510 | if (affinity.compact >= depth) { | |||
4511 | affinity.compact = 0; | |||
4512 | } else { | |||
4513 | affinity.compact = depth - 1 - affinity.compact; | |||
4514 | } | |||
4515 | goto sortTopology; | |||
4516 | ||||
4517 | case affinity_compact: | |||
4518 | if (affinity.compact >= depth) { | |||
4519 | affinity.compact = depth - 1; | |||
4520 | } | |||
4521 | goto sortTopology; | |||
4522 | ||||
4523 | case affinity_balanced: | |||
4524 | if (depth <= 1 || is_hidden_helper_affinity) { | |||
4525 | KMP_AFF_WARNING(affinity, AffBalancedNotAvail, env_var)if (affinity.flags.verbose || (affinity.flags.warnings && (affinity.type != affinity_none))) { __kmp_msg(kmp_ms_warning , __kmp_msg_format(kmp_i18n_msg_AffBalancedNotAvail, env_var) , __kmp_msg_null); }; | |||
4526 | affinity.type = affinity_none; | |||
4527 | __kmp_create_affinity_none_places(affinity); | |||
4528 | affinity.flags.initialized = TRUE(!0); | |||
4529 | return; | |||
4530 | } else if (!__kmp_topology->is_uniform()) { | |||
4531 | // Save the depth for further usage | |||
4532 | __kmp_aff_depth = depth; | |||
4533 | ||||
4534 | int core_level = | |||
4535 | __kmp_affinity_find_core_level(__kmp_avail_proc, depth - 1); | |||
4536 | int ncores = __kmp_affinity_compute_ncores(__kmp_avail_proc, depth - 1, | |||
4537 | core_level); | |||
4538 | int maxprocpercore = __kmp_affinity_max_proc_per_core( | |||
4539 | __kmp_avail_proc, depth - 1, core_level); | |||
4540 | ||||
4541 | int nproc = ncores * maxprocpercore; | |||
4542 | if ((nproc < 2) || (nproc < __kmp_avail_proc)) { | |||
4543 | KMP_AFF_WARNING(affinity, AffBalancedNotAvail, env_var)if (affinity.flags.verbose || (affinity.flags.warnings && (affinity.type != affinity_none))) { __kmp_msg(kmp_ms_warning , __kmp_msg_format(kmp_i18n_msg_AffBalancedNotAvail, env_var) , __kmp_msg_null); }; | |||
4544 | affinity.type = affinity_none; | |||
4545 | __kmp_create_affinity_none_places(affinity); | |||
4546 | affinity.flags.initialized = TRUE(!0); | |||
4547 | return; | |||
4548 | } | |||
4549 | ||||
4550 | procarr = (int *)__kmp_allocate(sizeof(int) * nproc)___kmp_allocate((sizeof(int) * nproc), "openmp/runtime/src/kmp_affinity.cpp" , 4550); | |||
4551 | for (int i = 0; i < nproc; i++) { | |||
4552 | procarr[i] = -1; | |||
4553 | } | |||
4554 | ||||
4555 | int lastcore = -1; | |||
4556 | int inlastcore = 0; | |||
4557 | for (int i = 0; i < __kmp_avail_proc; i++) { | |||
4558 | int proc = __kmp_topology->at(i).os_id; | |||
4559 | int core = __kmp_affinity_find_core(i, depth - 1, core_level); | |||
4560 | ||||
4561 | if (core == lastcore) { | |||
4562 | inlastcore++; | |||
4563 | } else { | |||
4564 | inlastcore = 0; | |||
4565 | } | |||
4566 | lastcore = core; | |||
4567 | ||||
4568 | procarr[core * maxprocpercore + inlastcore] = proc; | |||
4569 | } | |||
4570 | } | |||
4571 | if (affinity.compact >= depth) { | |||
4572 | affinity.compact = depth - 1; | |||
4573 | } | |||
4574 | ||||
4575 | sortTopology: | |||
4576 | // Allocate the gtid->affinity mask table. | |||
4577 | if (affinity.flags.dups) { | |||
4578 | affinity.num_masks = __kmp_avail_proc; | |||
4579 | } else { | |||
4580 | affinity.num_masks = numUnique; | |||
4581 | } | |||
4582 | ||||
4583 | if ((__kmp_nested_proc_bind.bind_types[0] != proc_bind_intel) && | |||
4584 | (__kmp_affinity_num_places > 0) && | |||
4585 | ((unsigned)__kmp_affinity_num_places < affinity.num_masks) && | |||
4586 | !is_hidden_helper_affinity) { | |||
4587 | affinity.num_masks = __kmp_affinity_num_places; | |||
4588 | } | |||
4589 | ||||
4590 | KMP_CPU_ALLOC_ARRAY(affinity.masks, affinity.num_masks)(affinity.masks = __kmp_affinity_dispatch->allocate_mask_array (affinity.num_masks)); | |||
4591 | ||||
4592 | // Sort the topology table according to the current setting of | |||
4593 | // affinity.compact, then fill out affinity.masks. | |||
4594 | __kmp_topology->sort_compact(affinity); | |||
4595 | { | |||
4596 | int i; | |||
4597 | unsigned j; | |||
4598 | int num_hw_threads = __kmp_topology->get_num_hw_threads(); | |||
4599 | for (i = 0, j = 0; i < num_hw_threads; i++) { | |||
4600 | if ((!affinity.flags.dups) && (!__kmp_topology->at(i).leader)) { | |||
4601 | continue; | |||
4602 | } | |||
4603 | int osId = __kmp_topology->at(i).os_id; | |||
4604 | ||||
4605 | kmp_affin_mask_t *src = KMP_CPU_INDEX(affinity.os_id_masks, osId)__kmp_affinity_dispatch->index_mask_array(affinity.os_id_masks , osId); | |||
4606 | kmp_affin_mask_t *dest = KMP_CPU_INDEX(affinity.masks, j)__kmp_affinity_dispatch->index_mask_array(affinity.masks, j ); | |||
4607 | KMP_ASSERT(KMP_CPU_ISSET(osId, src))if (!((src)->is_set(osId))) { __kmp_debug_assert("KMP_CPU_ISSET(osId, src)" , "openmp/runtime/src/kmp_affinity.cpp", 4607); }; | |||
4608 | KMP_CPU_COPY(dest, src)(dest)->copy(src); | |||
4609 | if (++j >= affinity.num_masks) { | |||
4610 | break; | |||
4611 | } | |||
4612 | } | |||
4613 | KMP_DEBUG_ASSERT(j == affinity.num_masks)if (!(j == affinity.num_masks)) { __kmp_debug_assert("j == affinity.num_masks" , "openmp/runtime/src/kmp_affinity.cpp", 4613); }; | |||
4614 | } | |||
4615 | // Sort the topology back using ids | |||
4616 | __kmp_topology->sort_ids(); | |||
4617 | break; | |||
4618 | ||||
4619 | default: | |||
4620 | KMP_ASSERT2(0, "Unexpected affinity setting")if (!(0)) { __kmp_debug_assert(("Unexpected affinity setting" ), "openmp/runtime/src/kmp_affinity.cpp", 4620); }; | |||
4621 | } | |||
4622 | __kmp_affinity_get_topology_info(affinity); | |||
4623 | affinity.flags.initialized = TRUE(!0); | |||
4624 | } | |||
4625 | ||||
4626 | void __kmp_affinity_initialize(kmp_affinity_t &affinity) { | |||
4627 | // Much of the code above was written assuming that if a machine was not | |||
4628 | // affinity capable, then affinity type == affinity_none. | |||
4629 | // We now explicitly represent this as affinity type == affinity_disabled. | |||
4630 | // There are too many checks for affinity type == affinity_none in this code. | |||
4631 | // Instead of trying to change them all, check if | |||
4632 | // affinity type == affinity_disabled, and if so, slam it with affinity_none, | |||
4633 | // call the real initialization routine, then restore affinity type to | |||
4634 | // affinity_disabled. | |||
4635 | int disabled = (affinity.type == affinity_disabled); | |||
4636 | if (!KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0)) | |||
4637 | KMP_ASSERT(disabled)if (!(disabled)) { __kmp_debug_assert("disabled", "openmp/runtime/src/kmp_affinity.cpp" , 4637); }; | |||
4638 | if (disabled) | |||
4639 | affinity.type = affinity_none; | |||
4640 | __kmp_aux_affinity_initialize(affinity); | |||
4641 | if (disabled) | |||
4642 | affinity.type = affinity_disabled; | |||
4643 | } | |||
4644 | ||||
4645 | void __kmp_affinity_uninitialize(void) { | |||
4646 | for (kmp_affinity_t *affinity : __kmp_affinities) { | |||
4647 | if (affinity->masks != NULL__null) | |||
4648 | KMP_CPU_FREE_ARRAY(affinity->masks, affinity->num_masks)__kmp_affinity_dispatch->deallocate_mask_array(affinity-> masks); | |||
4649 | if (affinity->os_id_masks != NULL__null) | |||
4650 | KMP_CPU_FREE_ARRAY(affinity->os_id_masks, affinity->num_os_id_masks)__kmp_affinity_dispatch->deallocate_mask_array(affinity-> os_id_masks); | |||
4651 | if (affinity->proclist != NULL__null) | |||
4652 | __kmp_free(affinity->proclist)___kmp_free((affinity->proclist), "openmp/runtime/src/kmp_affinity.cpp" , 4652); | |||
4653 | if (affinity->ids != NULL__null) | |||
4654 | __kmp_free(affinity->ids)___kmp_free((affinity->ids), "openmp/runtime/src/kmp_affinity.cpp" , 4654); | |||
4655 | if (affinity->attrs != NULL__null) | |||
4656 | __kmp_free(affinity->attrs)___kmp_free((affinity->attrs), "openmp/runtime/src/kmp_affinity.cpp" , 4656); | |||
4657 | *affinity = KMP_AFFINITY_INIT(affinity->env_var){ nullptr, affinity_default, KMP_HW_UNKNOWN, -1, 0, 0, {(!0), 0, (!0), (2), 0, 0}, 0, nullptr, nullptr, nullptr, 0, nullptr , affinity->env_var }; | |||
4658 | } | |||
4659 | if (__kmp_affin_origMask != NULL__null) { | |||
4660 | if (KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0)) { | |||
4661 | __kmp_set_system_affinity(__kmp_affin_origMask, FALSE)(__kmp_affin_origMask)->set_system_affinity(0); | |||
4662 | } | |||
4663 | KMP_CPU_FREE(__kmp_affin_origMask)__kmp_affinity_dispatch->deallocate_mask(__kmp_affin_origMask ); | |||
4664 | __kmp_affin_origMask = NULL__null; | |||
4665 | } | |||
4666 | __kmp_affinity_num_places = 0; | |||
4667 | if (procarr != NULL__null) { | |||
4668 | __kmp_free(procarr)___kmp_free((procarr), "openmp/runtime/src/kmp_affinity.cpp", 4668); | |||
4669 | procarr = NULL__null; | |||
4670 | } | |||
4671 | if (__kmp_osid_to_hwthread_map) { | |||
4672 | __kmp_free(__kmp_osid_to_hwthread_map)___kmp_free((__kmp_osid_to_hwthread_map), "openmp/runtime/src/kmp_affinity.cpp" , 4672); | |||
4673 | __kmp_osid_to_hwthread_map = NULL__null; | |||
4674 | } | |||
4675 | #if KMP_USE_HWLOC0 | |||
4676 | if (__kmp_hwloc_topology != NULL__null) { | |||
4677 | hwloc_topology_destroy(__kmp_hwloc_topology); | |||
4678 | __kmp_hwloc_topology = NULL__null; | |||
4679 | } | |||
4680 | #endif | |||
4681 | if (__kmp_hw_subset) { | |||
4682 | kmp_hw_subset_t::deallocate(__kmp_hw_subset); | |||
4683 | __kmp_hw_subset = nullptr; | |||
4684 | } | |||
4685 | if (__kmp_topology) { | |||
4686 | kmp_topology_t::deallocate(__kmp_topology); | |||
4687 | __kmp_topology = nullptr; | |||
4688 | } | |||
4689 | KMPAffinity::destroy_api(); | |||
4690 | } | |||
4691 | ||||
4692 | static void __kmp_select_mask_by_gtid(int gtid, const kmp_affinity_t *affinity, | |||
4693 | int *place, kmp_affin_mask_t **mask) { | |||
4694 | int mask_idx; | |||
4695 | bool is_hidden_helper = KMP_HIDDEN_HELPER_THREAD(gtid)((gtid) >= 1 && (gtid) <= __kmp_hidden_helper_threads_num ); | |||
4696 | if (is_hidden_helper) | |||
4697 | // The first gtid is the regular primary thread, the second gtid is the main | |||
4698 | // thread of hidden team which does not participate in task execution. | |||
4699 | mask_idx = gtid - 2; | |||
4700 | else | |||
4701 | mask_idx = __kmp_adjust_gtid_for_hidden_helpers(gtid); | |||
4702 | KMP_DEBUG_ASSERT(affinity->num_masks > 0)if (!(affinity->num_masks > 0)) { __kmp_debug_assert("affinity->num_masks > 0" , "openmp/runtime/src/kmp_affinity.cpp", 4702); }; | |||
4703 | *place = (mask_idx + affinity->offset) % affinity->num_masks; | |||
4704 | *mask = KMP_CPU_INDEX(affinity->masks, *place)__kmp_affinity_dispatch->index_mask_array(affinity->masks , *place); | |||
4705 | } | |||
4706 | ||||
4707 | // This function initializes the per-thread data concerning affinity including | |||
4708 | // the mask and topology information | |||
4709 | void __kmp_affinity_set_init_mask(int gtid, int isa_root) { | |||
4710 | ||||
4711 | kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid])((void *)(__kmp_threads[gtid])); | |||
4712 | ||||
4713 | // Set the thread topology information to default of unknown | |||
4714 | for (int id = 0; id < KMP_HW_LAST; ++id) | |||
4715 | th->th.th_topology_ids[id] = kmp_hw_thread_t::UNKNOWN_ID; | |||
4716 | th->th.th_topology_attrs = KMP_AFFINITY_ATTRS_UNKNOWN{ KMP_HW_CORE_TYPE_UNKNOWN, kmp_hw_attr_t::UNKNOWN_CORE_EFF, 0 , 0 }; | |||
4717 | ||||
4718 | if (!KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0)) { | |||
4719 | return; | |||
4720 | } | |||
4721 | ||||
4722 | if (th->th.th_affin_mask == NULL__null) { | |||
4723 | KMP_CPU_ALLOC(th->th.th_affin_mask)(th->th.th_affin_mask = __kmp_affinity_dispatch->allocate_mask ()); | |||
4724 | } else { | |||
4725 | KMP_CPU_ZERO(th->th.th_affin_mask)(th->th.th_affin_mask)->zero(); | |||
4726 | } | |||
4727 | ||||
4728 | // Copy the thread mask to the kmp_info_t structure. If | |||
4729 | // __kmp_affinity.type == affinity_none, copy the "full" mask, i.e. | |||
4730 | // one that has all of the OS proc ids set, or if | |||
4731 | // __kmp_affinity.flags.respect is set, then the full mask is the | |||
4732 | // same as the mask of the initialization thread. | |||
4733 | kmp_affin_mask_t *mask; | |||
4734 | int i; | |||
4735 | const kmp_affinity_t *affinity; | |||
4736 | const char *env_var; | |||
4737 | bool is_hidden_helper = KMP_HIDDEN_HELPER_THREAD(gtid)((gtid) >= 1 && (gtid) <= __kmp_hidden_helper_threads_num ); | |||
4738 | ||||
4739 | if (is_hidden_helper) | |||
4740 | affinity = &__kmp_hh_affinity; | |||
4741 | else | |||
4742 | affinity = &__kmp_affinity; | |||
4743 | env_var = affinity->env_var; | |||
4744 | ||||
4745 | if (KMP_AFFINITY_NON_PROC_BIND((__kmp_nested_proc_bind.bind_types[0] == proc_bind_false || __kmp_nested_proc_bind .bind_types[0] == proc_bind_intel) && (__kmp_affinity .num_masks > 0 || __kmp_affinity.type == affinity_balanced )) || is_hidden_helper) { | |||
4746 | if ((affinity->type == affinity_none) || | |||
4747 | (affinity->type == affinity_balanced) || | |||
4748 | KMP_HIDDEN_HELPER_MAIN_THREAD(gtid)((gtid) == 1 && (gtid) <= __kmp_hidden_helper_threads_num )) { | |||
4749 | #if KMP_GROUP_AFFINITY0 | |||
4750 | if (__kmp_num_proc_groups > 1) { | |||
4751 | return; | |||
4752 | } | |||
4753 | #endif | |||
4754 | KMP_ASSERT(__kmp_affin_fullMask != NULL)if (!(__kmp_affin_fullMask != __null)) { __kmp_debug_assert("__kmp_affin_fullMask != NULL" , "openmp/runtime/src/kmp_affinity.cpp", 4754); }; | |||
4755 | i = 0; | |||
4756 | mask = __kmp_affin_fullMask; | |||
4757 | } else { | |||
4758 | __kmp_select_mask_by_gtid(gtid, affinity, &i, &mask); | |||
4759 | } | |||
4760 | } else { | |||
4761 | if (!isa_root || __kmp_nested_proc_bind.bind_types[0] == proc_bind_false) { | |||
4762 | #if KMP_GROUP_AFFINITY0 | |||
4763 | if (__kmp_num_proc_groups > 1) { | |||
4764 | return; | |||
4765 | } | |||
4766 | #endif | |||
4767 | KMP_ASSERT(__kmp_affin_fullMask != NULL)if (!(__kmp_affin_fullMask != __null)) { __kmp_debug_assert("__kmp_affin_fullMask != NULL" , "openmp/runtime/src/kmp_affinity.cpp", 4767); }; | |||
4768 | i = KMP_PLACE_ALL(-1); | |||
4769 | mask = __kmp_affin_fullMask; | |||
4770 | } else { | |||
4771 | __kmp_select_mask_by_gtid(gtid, affinity, &i, &mask); | |||
4772 | } | |||
4773 | } | |||
4774 | ||||
4775 | th->th.th_current_place = i; | |||
4776 | if (isa_root && !is_hidden_helper) { | |||
4777 | th->th.th_new_place = i; | |||
4778 | th->th.th_first_place = 0; | |||
4779 | th->th.th_last_place = affinity->num_masks - 1; | |||
4780 | } else if (KMP_AFFINITY_NON_PROC_BIND((__kmp_nested_proc_bind.bind_types[0] == proc_bind_false || __kmp_nested_proc_bind .bind_types[0] == proc_bind_intel) && (__kmp_affinity .num_masks > 0 || __kmp_affinity.type == affinity_balanced ))) { | |||
4781 | // When using a Non-OMP_PROC_BIND affinity method, | |||
4782 | // set all threads' place-partition-var to the entire place list | |||
4783 | th->th.th_first_place = 0; | |||
4784 | th->th.th_last_place = affinity->num_masks - 1; | |||
4785 | } | |||
4786 | // Copy topology information associated with the place | |||
4787 | if (i >= 0) { | |||
4788 | th->th.th_topology_ids = __kmp_affinity.ids[i]; | |||
4789 | th->th.th_topology_attrs = __kmp_affinity.attrs[i]; | |||
4790 | } | |||
4791 | ||||
4792 | if (i == KMP_PLACE_ALL(-1)) { | |||
4793 | KA_TRACE(100, ("__kmp_affinity_set_init_mask: binding T#%d to all places\n",if (kmp_a_debug >= 100) { __kmp_debug_printf ("__kmp_affinity_set_init_mask: binding T#%d to all places\n" , gtid); } | |||
4794 | gtid))if (kmp_a_debug >= 100) { __kmp_debug_printf ("__kmp_affinity_set_init_mask: binding T#%d to all places\n" , gtid); }; | |||
4795 | } else { | |||
4796 | KA_TRACE(100, ("__kmp_affinity_set_init_mask: binding T#%d to place %d\n",if (kmp_a_debug >= 100) { __kmp_debug_printf ("__kmp_affinity_set_init_mask: binding T#%d to place %d\n" , gtid, i); } | |||
4797 | gtid, i))if (kmp_a_debug >= 100) { __kmp_debug_printf ("__kmp_affinity_set_init_mask: binding T#%d to place %d\n" , gtid, i); }; | |||
4798 | } | |||
4799 | ||||
4800 | KMP_CPU_COPY(th->th.th_affin_mask, mask)(th->th.th_affin_mask)->copy(mask); | |||
4801 | ||||
4802 | /* to avoid duplicate printing (will be correctly printed on barrier) */ | |||
4803 | if (affinity->flags.verbose && | |||
4804 | (affinity->type == affinity_none || | |||
4805 | (i != KMP_PLACE_ALL(-1) && affinity->type != affinity_balanced)) && | |||
4806 | !KMP_HIDDEN_HELPER_MAIN_THREAD(gtid)((gtid) == 1 && (gtid) <= __kmp_hidden_helper_threads_num )) { | |||
4807 | char buf[KMP_AFFIN_MASK_PRINT_LEN1024]; | |||
4808 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN1024, | |||
4809 | th->th.th_affin_mask); | |||
4810 | KMP_INFORM(BoundToOSProcSet, env_var, (kmp_int32)getpid(), __kmp_gettid(),__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_BoundToOSProcSet , env_var, (kmp_int32)getpid(), syscall(186), gtid, buf), __kmp_msg_null ) | |||
4811 | gtid, buf)__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_BoundToOSProcSet , env_var, (kmp_int32)getpid(), syscall(186), gtid, buf), __kmp_msg_null ); | |||
4812 | } | |||
4813 | ||||
4814 | #if KMP_OS_WINDOWS0 | |||
4815 | // On Windows* OS, the process affinity mask might have changed. If the user | |||
4816 | // didn't request affinity and this call fails, just continue silently. | |||
4817 | // See CQ171393. | |||
4818 | if (affinity->type == affinity_none) { | |||
4819 | __kmp_set_system_affinity(th->th.th_affin_mask, FALSE)(th->th.th_affin_mask)->set_system_affinity(0); | |||
4820 | } else | |||
4821 | #endif | |||
4822 | __kmp_set_system_affinity(th->th.th_affin_mask, TRUE)(th->th.th_affin_mask)->set_system_affinity((!0)); | |||
4823 | } | |||
4824 | ||||
4825 | void __kmp_affinity_set_place(int gtid) { | |||
4826 | // Hidden helper threads should not be affected by OMP_PLACES/OMP_PROC_BIND | |||
4827 | if (!KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0) || KMP_HIDDEN_HELPER_THREAD(gtid)((gtid) >= 1 && (gtid) <= __kmp_hidden_helper_threads_num )) { | |||
4828 | return; | |||
4829 | } | |||
4830 | ||||
4831 | kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid])((void *)(__kmp_threads[gtid])); | |||
4832 | ||||
4833 | KA_TRACE(100, ("__kmp_affinity_set_place: binding T#%d to place %d (current "if (kmp_a_debug >= 100) { __kmp_debug_printf ("__kmp_affinity_set_place: binding T#%d to place %d (current " "place = %d)\n", gtid, th->th.th_new_place, th->th.th_current_place ); } | |||
4834 | "place = %d)\n",if (kmp_a_debug >= 100) { __kmp_debug_printf ("__kmp_affinity_set_place: binding T#%d to place %d (current " "place = %d)\n", gtid, th->th.th_new_place, th->th.th_current_place ); } | |||
4835 | gtid, th->th.th_new_place, th->th.th_current_place))if (kmp_a_debug >= 100) { __kmp_debug_printf ("__kmp_affinity_set_place: binding T#%d to place %d (current " "place = %d)\n", gtid, th->th.th_new_place, th->th.th_current_place ); }; | |||
4836 | ||||
4837 | // Check that the new place is within this thread's partition. | |||
4838 | KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL)if (!(th->th.th_affin_mask != __null)) { __kmp_debug_assert ("th->th.th_affin_mask != __null", "openmp/runtime/src/kmp_affinity.cpp" , 4838); }; | |||
4839 | KMP_ASSERT(th->th.th_new_place >= 0)if (!(th->th.th_new_place >= 0)) { __kmp_debug_assert("th->th.th_new_place >= 0" , "openmp/runtime/src/kmp_affinity.cpp", 4839); }; | |||
4840 | KMP_ASSERT((unsigned)th->th.th_new_place <= __kmp_affinity.num_masks)if (!((unsigned)th->th.th_new_place <= __kmp_affinity.num_masks )) { __kmp_debug_assert("(unsigned)th->th.th_new_place <= __kmp_affinity.num_masks" , "openmp/runtime/src/kmp_affinity.cpp", 4840); }; | |||
4841 | if (th->th.th_first_place <= th->th.th_last_place) { | |||
4842 | KMP_ASSERT((th->th.th_new_place >= th->th.th_first_place) &&if (!((th->th.th_new_place >= th->th.th_first_place) && (th->th.th_new_place <= th->th.th_last_place ))) { __kmp_debug_assert("(th->th.th_new_place >= th->th.th_first_place) && (th->th.th_new_place <= th->th.th_last_place)" , "openmp/runtime/src/kmp_affinity.cpp", 4843); } | |||
4843 | (th->th.th_new_place <= th->th.th_last_place))if (!((th->th.th_new_place >= th->th.th_first_place) && (th->th.th_new_place <= th->th.th_last_place ))) { __kmp_debug_assert("(th->th.th_new_place >= th->th.th_first_place) && (th->th.th_new_place <= th->th.th_last_place)" , "openmp/runtime/src/kmp_affinity.cpp", 4843); }; | |||
4844 | } else { | |||
4845 | KMP_ASSERT((th->th.th_new_place <= th->th.th_first_place) ||if (!((th->th.th_new_place <= th->th.th_first_place) || (th->th.th_new_place >= th->th.th_last_place))) { __kmp_debug_assert("(th->th.th_new_place <= th->th.th_first_place) || (th->th.th_new_place >= th->th.th_last_place)" , "openmp/runtime/src/kmp_affinity.cpp", 4846); } | |||
4846 | (th->th.th_new_place >= th->th.th_last_place))if (!((th->th.th_new_place <= th->th.th_first_place) || (th->th.th_new_place >= th->th.th_last_place))) { __kmp_debug_assert("(th->th.th_new_place <= th->th.th_first_place) || (th->th.th_new_place >= th->th.th_last_place)" , "openmp/runtime/src/kmp_affinity.cpp", 4846); }; | |||
4847 | } | |||
4848 | ||||
4849 | // Copy the thread mask to the kmp_info_t structure, | |||
4850 | // and set this thread's affinity. | |||
4851 | kmp_affin_mask_t *mask = | |||
4852 | KMP_CPU_INDEX(__kmp_affinity.masks, th->th.th_new_place)__kmp_affinity_dispatch->index_mask_array(__kmp_affinity.masks , th->th.th_new_place); | |||
4853 | KMP_CPU_COPY(th->th.th_affin_mask, mask)(th->th.th_affin_mask)->copy(mask); | |||
4854 | th->th.th_current_place = th->th.th_new_place; | |||
4855 | // Copy topology information associated with the place | |||
4856 | th->th.th_topology_ids = __kmp_affinity.ids[th->th.th_new_place]; | |||
4857 | th->th.th_topology_attrs = __kmp_affinity.attrs[th->th.th_new_place]; | |||
4858 | ||||
4859 | if (__kmp_affinity.flags.verbose) { | |||
4860 | char buf[KMP_AFFIN_MASK_PRINT_LEN1024]; | |||
4861 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN1024, | |||
4862 | th->th.th_affin_mask); | |||
4863 | KMP_INFORM(BoundToOSProcSet, "OMP_PROC_BIND", (kmp_int32)getpid(),__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_BoundToOSProcSet , "OMP_PROC_BIND", (kmp_int32)getpid(), syscall(186), gtid, buf ), __kmp_msg_null) | |||
4864 | __kmp_gettid(), gtid, buf)__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_BoundToOSProcSet , "OMP_PROC_BIND", (kmp_int32)getpid(), syscall(186), gtid, buf ), __kmp_msg_null); | |||
4865 | } | |||
4866 | __kmp_set_system_affinity(th->th.th_affin_mask, TRUE)(th->th.th_affin_mask)->set_system_affinity((!0)); | |||
4867 | } | |||
4868 | ||||
4869 | int __kmp_aux_set_affinity(void **mask) { | |||
4870 | int gtid; | |||
4871 | kmp_info_t *th; | |||
4872 | int retval; | |||
4873 | ||||
4874 | if (!KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0)) { | |||
4875 | return -1; | |||
4876 | } | |||
4877 | ||||
4878 | gtid = __kmp_entry_gtid()__kmp_get_global_thread_id_reg(); | |||
4879 | KA_TRACE(if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_set_affinity: setting affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4880 | 1000, (""); {if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_set_affinity: setting affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4881 | char buf[KMP_AFFIN_MASK_PRINT_LEN];if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_set_affinity: setting affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4882 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_set_affinity: setting affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4883 | (kmp_affin_mask_t *)(*mask));if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_set_affinity: setting affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4884 | __kmp_debug_printf(if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_set_affinity: setting affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4885 | "kmp_set_affinity: setting affinity mask for thread %d = %s\n",if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_set_affinity: setting affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4886 | gtid, buf);if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_set_affinity: setting affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4887 | })if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_set_affinity: setting affinity mask for thread %d = %s\n" , gtid, buf); }; }; | |||
4888 | ||||
4889 | if (__kmp_env_consistency_check) { | |||
4890 | if ((mask == NULL__null) || (*mask == NULL__null)) { | |||
4891 | KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity")__kmp_fatal(__kmp_msg_format(kmp_i18n_msg_AffinityInvalidMask , "kmp_set_affinity"), __kmp_msg_null); | |||
4892 | } else { | |||
4893 | unsigned proc; | |||
4894 | int num_procs = 0; | |||
4895 | ||||
4896 | KMP_CPU_SET_ITERATE(proc, ((kmp_affin_mask_t *)(*mask)))for (proc = (((kmp_affin_mask_t *)(*mask)))->begin(); (int )proc != (((kmp_affin_mask_t *)(*mask)))->end(); proc = (( (kmp_affin_mask_t *)(*mask)))->next(proc)) { | |||
4897 | if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)(__kmp_affin_fullMask)->is_set(proc)) { | |||
4898 | KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity")__kmp_fatal(__kmp_msg_format(kmp_i18n_msg_AffinityInvalidMask , "kmp_set_affinity"), __kmp_msg_null); | |||
4899 | } | |||
4900 | if (!KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask))((kmp_affin_mask_t *)(*mask))->is_set(proc)) { | |||
4901 | continue; | |||
4902 | } | |||
4903 | num_procs++; | |||
4904 | } | |||
4905 | if (num_procs == 0) { | |||
4906 | KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity")__kmp_fatal(__kmp_msg_format(kmp_i18n_msg_AffinityInvalidMask , "kmp_set_affinity"), __kmp_msg_null); | |||
4907 | } | |||
4908 | ||||
4909 | #if KMP_GROUP_AFFINITY0 | |||
4910 | if (__kmp_get_proc_group((kmp_affin_mask_t *)(*mask))((kmp_affin_mask_t *)(*mask))->get_proc_group() < 0) { | |||
4911 | KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity")__kmp_fatal(__kmp_msg_format(kmp_i18n_msg_AffinityInvalidMask , "kmp_set_affinity"), __kmp_msg_null); | |||
4912 | } | |||
4913 | #endif /* KMP_GROUP_AFFINITY */ | |||
4914 | } | |||
4915 | } | |||
4916 | ||||
4917 | th = __kmp_threads[gtid]; | |||
4918 | KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL)if (!(th->th.th_affin_mask != __null)) { __kmp_debug_assert ("th->th.th_affin_mask != __null", "openmp/runtime/src/kmp_affinity.cpp" , 4918); }; | |||
4919 | retval = __kmp_set_system_affinity((kmp_affin_mask_t *)(*mask), FALSE)((kmp_affin_mask_t *)(*mask))->set_system_affinity(0); | |||
4920 | if (retval == 0) { | |||
4921 | KMP_CPU_COPY(th->th.th_affin_mask, (kmp_affin_mask_t *)(*mask))(th->th.th_affin_mask)->copy((kmp_affin_mask_t *)(*mask )); | |||
4922 | } | |||
4923 | ||||
4924 | th->th.th_current_place = KMP_PLACE_UNDEFINED(-2); | |||
4925 | th->th.th_new_place = KMP_PLACE_UNDEFINED(-2); | |||
4926 | th->th.th_first_place = 0; | |||
4927 | th->th.th_last_place = __kmp_affinity.num_masks - 1; | |||
4928 | ||||
4929 | // Turn off 4.0 affinity for the current tread at this parallel level. | |||
4930 | th->th.th_current_task->td_icvs.proc_bind = proc_bind_false; | |||
4931 | ||||
4932 | return retval; | |||
4933 | } | |||
4934 | ||||
4935 | int __kmp_aux_get_affinity(void **mask) { | |||
4936 | int gtid; | |||
4937 | int retval; | |||
4938 | #if KMP_OS_WINDOWS0 || KMP_DEBUG1 | |||
4939 | kmp_info_t *th; | |||
4940 | #endif | |||
4941 | if (!KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0)) { | |||
4942 | return -1; | |||
4943 | } | |||
4944 | ||||
4945 | gtid = __kmp_entry_gtid()__kmp_get_global_thread_id_reg(); | |||
4946 | #if KMP_OS_WINDOWS0 || KMP_DEBUG1 | |||
4947 | th = __kmp_threads[gtid]; | |||
4948 | #else | |||
4949 | (void)gtid; // unused variable | |||
4950 | #endif | |||
4951 | KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL)if (!(th->th.th_affin_mask != __null)) { __kmp_debug_assert ("th->th.th_affin_mask != __null", "openmp/runtime/src/kmp_affinity.cpp" , 4951); }; | |||
4952 | ||||
4953 | KA_TRACE(if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, th->th.th_affin_mask ); __kmp_printf( "kmp_get_affinity: stored affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4954 | 1000, (""); {if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, th->th.th_affin_mask ); __kmp_printf( "kmp_get_affinity: stored affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4955 | char buf[KMP_AFFIN_MASK_PRINT_LEN];if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, th->th.th_affin_mask ); __kmp_printf( "kmp_get_affinity: stored affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4956 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, th->th.th_affin_mask ); __kmp_printf( "kmp_get_affinity: stored affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4957 | th->th.th_affin_mask);if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, th->th.th_affin_mask ); __kmp_printf( "kmp_get_affinity: stored affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4958 | __kmp_printf(if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, th->th.th_affin_mask ); __kmp_printf( "kmp_get_affinity: stored affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4959 | "kmp_get_affinity: stored affinity mask for thread %d = %s\n", gtid,if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, th->th.th_affin_mask ); __kmp_printf( "kmp_get_affinity: stored affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4960 | buf);if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, th->th.th_affin_mask ); __kmp_printf( "kmp_get_affinity: stored affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4961 | })if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, th->th.th_affin_mask ); __kmp_printf( "kmp_get_affinity: stored affinity mask for thread %d = %s\n" , gtid, buf); }; }; | |||
4962 | ||||
4963 | if (__kmp_env_consistency_check) { | |||
4964 | if ((mask == NULL__null) || (*mask == NULL__null)) { | |||
4965 | KMP_FATAL(AffinityInvalidMask, "kmp_get_affinity")__kmp_fatal(__kmp_msg_format(kmp_i18n_msg_AffinityInvalidMask , "kmp_get_affinity"), __kmp_msg_null); | |||
4966 | } | |||
4967 | } | |||
4968 | ||||
4969 | #if !KMP_OS_WINDOWS0 | |||
4970 | ||||
4971 | retval = __kmp_get_system_affinity((kmp_affin_mask_t *)(*mask), FALSE)((kmp_affin_mask_t *)(*mask))->get_system_affinity(0); | |||
4972 | KA_TRACE(if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_printf( "kmp_get_affinity: system affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4973 | 1000, (""); {if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_printf( "kmp_get_affinity: system affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4974 | char buf[KMP_AFFIN_MASK_PRINT_LEN];if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_printf( "kmp_get_affinity: system affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4975 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_printf( "kmp_get_affinity: system affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4976 | (kmp_affin_mask_t *)(*mask));if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_printf( "kmp_get_affinity: system affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4977 | __kmp_printf(if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_printf( "kmp_get_affinity: system affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4978 | "kmp_get_affinity: system affinity mask for thread %d = %s\n", gtid,if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_printf( "kmp_get_affinity: system affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4979 | buf);if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_printf( "kmp_get_affinity: system affinity mask for thread %d = %s\n" , gtid, buf); }; } | |||
4980 | })if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { char buf[1024]; __kmp_affinity_print_mask(buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_printf( "kmp_get_affinity: system affinity mask for thread %d = %s\n" , gtid, buf); }; }; | |||
4981 | return retval; | |||
4982 | ||||
4983 | #else | |||
4984 | (void)retval; | |||
4985 | ||||
4986 | KMP_CPU_COPY((kmp_affin_mask_t *)(*mask), th->th.th_affin_mask)((kmp_affin_mask_t *)(*mask))->copy(th->th.th_affin_mask ); | |||
4987 | return 0; | |||
4988 | ||||
4989 | #endif /* KMP_OS_WINDOWS */ | |||
4990 | } | |||
4991 | ||||
4992 | int __kmp_aux_get_affinity_max_proc() { | |||
4993 | if (!KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0)) { | |||
4994 | return 0; | |||
4995 | } | |||
4996 | #if KMP_GROUP_AFFINITY0 | |||
4997 | if (__kmp_num_proc_groups > 1) { | |||
4998 | return (int)(__kmp_num_proc_groups * sizeof(DWORD_PTR) * CHAR_BIT8); | |||
4999 | } | |||
5000 | #endif | |||
5001 | return __kmp_xproc; | |||
5002 | } | |||
5003 | ||||
5004 | int __kmp_aux_set_affinity_mask_proc(int proc, void **mask) { | |||
5005 | if (!KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0)) { | |||
5006 | return -1; | |||
5007 | } | |||
5008 | ||||
5009 | KA_TRACE(if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_set_affinity_mask_proc: setting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5010 | 1000, (""); {if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_set_affinity_mask_proc: setting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5011 | int gtid = __kmp_entry_gtid();if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_set_affinity_mask_proc: setting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5012 | char buf[KMP_AFFIN_MASK_PRINT_LEN];if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_set_affinity_mask_proc: setting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5013 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_set_affinity_mask_proc: setting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5014 | (kmp_affin_mask_t *)(*mask));if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_set_affinity_mask_proc: setting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5015 | __kmp_debug_printf("kmp_set_affinity_mask_proc: setting proc %d in "if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_set_affinity_mask_proc: setting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5016 | "affinity mask for thread %d = %s\n",if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_set_affinity_mask_proc: setting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5017 | proc, gtid, buf);if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_set_affinity_mask_proc: setting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5018 | })if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_set_affinity_mask_proc: setting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; }; | |||
5019 | ||||
5020 | if (__kmp_env_consistency_check) { | |||
5021 | if ((mask == NULL__null) || (*mask == NULL__null)) { | |||
5022 | KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity_mask_proc")__kmp_fatal(__kmp_msg_format(kmp_i18n_msg_AffinityInvalidMask , "kmp_set_affinity_mask_proc"), __kmp_msg_null); | |||
5023 | } | |||
5024 | } | |||
5025 | ||||
5026 | if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) { | |||
5027 | return -1; | |||
5028 | } | |||
5029 | if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)(__kmp_affin_fullMask)->is_set(proc)) { | |||
5030 | return -2; | |||
5031 | } | |||
5032 | ||||
5033 | KMP_CPU_SET(proc, (kmp_affin_mask_t *)(*mask))((kmp_affin_mask_t *)(*mask))->set(proc); | |||
5034 | return 0; | |||
5035 | } | |||
5036 | ||||
5037 | int __kmp_aux_unset_affinity_mask_proc(int proc, void **mask) { | |||
5038 | if (!KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0)) { | |||
5039 | return -1; | |||
5040 | } | |||
5041 | ||||
5042 | KA_TRACE(if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_unset_affinity_mask_proc: unsetting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5043 | 1000, (""); {if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_unset_affinity_mask_proc: unsetting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5044 | int gtid = __kmp_entry_gtid();if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_unset_affinity_mask_proc: unsetting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5045 | char buf[KMP_AFFIN_MASK_PRINT_LEN];if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_unset_affinity_mask_proc: unsetting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5046 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_unset_affinity_mask_proc: unsetting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5047 | (kmp_affin_mask_t *)(*mask));if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_unset_affinity_mask_proc: unsetting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5048 | __kmp_debug_printf("kmp_unset_affinity_mask_proc: unsetting proc %d in "if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_unset_affinity_mask_proc: unsetting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5049 | "affinity mask for thread %d = %s\n",if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_unset_affinity_mask_proc: unsetting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5050 | proc, gtid, buf);if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_unset_affinity_mask_proc: unsetting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5051 | })if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_unset_affinity_mask_proc: unsetting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; }; | |||
5052 | ||||
5053 | if (__kmp_env_consistency_check) { | |||
5054 | if ((mask == NULL__null) || (*mask == NULL__null)) { | |||
5055 | KMP_FATAL(AffinityInvalidMask, "kmp_unset_affinity_mask_proc")__kmp_fatal(__kmp_msg_format(kmp_i18n_msg_AffinityInvalidMask , "kmp_unset_affinity_mask_proc"), __kmp_msg_null); | |||
5056 | } | |||
5057 | } | |||
5058 | ||||
5059 | if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) { | |||
5060 | return -1; | |||
5061 | } | |||
5062 | if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)(__kmp_affin_fullMask)->is_set(proc)) { | |||
5063 | return -2; | |||
5064 | } | |||
5065 | ||||
5066 | KMP_CPU_CLR(proc, (kmp_affin_mask_t *)(*mask))((kmp_affin_mask_t *)(*mask))->clear(proc); | |||
5067 | return 0; | |||
5068 | } | |||
5069 | ||||
5070 | int __kmp_aux_get_affinity_mask_proc(int proc, void **mask) { | |||
5071 | if (!KMP_AFFINITY_CAPABLE()(__kmp_affin_mask_size > 0)) { | |||
5072 | return -1; | |||
5073 | } | |||
5074 | ||||
5075 | KA_TRACE(if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_get_affinity_mask_proc: getting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5076 | 1000, (""); {if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_get_affinity_mask_proc: getting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5077 | int gtid = __kmp_entry_gtid();if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_get_affinity_mask_proc: getting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5078 | char buf[KMP_AFFIN_MASK_PRINT_LEN];if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_get_affinity_mask_proc: getting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5079 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_get_affinity_mask_proc: getting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5080 | (kmp_affin_mask_t *)(*mask));if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_get_affinity_mask_proc: getting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5081 | __kmp_debug_printf("kmp_get_affinity_mask_proc: getting proc %d in "if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_get_affinity_mask_proc: getting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5082 | "affinity mask for thread %d = %s\n",if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_get_affinity_mask_proc: getting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5083 | proc, gtid, buf);if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_get_affinity_mask_proc: getting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; } | |||
5084 | })if (kmp_a_debug >= 1000) { __kmp_debug_printf (""); { int gtid = __kmp_get_global_thread_id_reg(); char buf[1024]; __kmp_affinity_print_mask (buf, 1024, (kmp_affin_mask_t *)(*mask)); __kmp_debug_printf( "kmp_get_affinity_mask_proc: getting proc %d in " "affinity mask for thread %d = %s\n" , proc, gtid, buf); }; }; | |||
5085 | ||||
5086 | if (__kmp_env_consistency_check) { | |||
5087 | if ((mask == NULL__null) || (*mask == NULL__null)) { | |||
5088 | KMP_FATAL(AffinityInvalidMask, "kmp_get_affinity_mask_proc")__kmp_fatal(__kmp_msg_format(kmp_i18n_msg_AffinityInvalidMask , "kmp_get_affinity_mask_proc"), __kmp_msg_null); | |||
5089 | } | |||
5090 | } | |||
5091 | ||||
5092 | if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) { | |||
5093 | return -1; | |||
5094 | } | |||
5095 | if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)(__kmp_affin_fullMask)->is_set(proc)) { | |||
5096 | return 0; | |||
5097 | } | |||
5098 | ||||
5099 | return KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask))((kmp_affin_mask_t *)(*mask))->is_set(proc); | |||
5100 | } | |||
5101 | ||||
5102 | // Dynamic affinity settings - Affinity balanced | |||
5103 | void __kmp_balanced_affinity(kmp_info_t *th, int nthreads) { | |||
5104 | KMP_DEBUG_ASSERT(th)if (!(th)) { __kmp_debug_assert("th", "openmp/runtime/src/kmp_affinity.cpp" , 5104); }; | |||
5105 | bool fine_gran = true; | |||
5106 | int tid = th->th.th_info.ds.ds_tid; | |||
5107 | const char *env_var = "KMP_AFFINITY"; | |||
5108 | ||||
5109 | // Do not perform balanced affinity for the hidden helper threads | |||
5110 | if (KMP_HIDDEN_HELPER_THREAD(__kmp_gtid_from_thread(th))((__kmp_gtid_from_thread(th)) >= 1 && (__kmp_gtid_from_thread (th)) <= __kmp_hidden_helper_threads_num)) | |||
5111 | return; | |||
5112 | ||||
5113 | switch (__kmp_affinity.gran) { | |||
5114 | case KMP_HW_THREAD: | |||
5115 | break; | |||
5116 | case KMP_HW_CORE: | |||
5117 | if (__kmp_nThreadsPerCore > 1) { | |||
5118 | fine_gran = false; | |||
5119 | } | |||
5120 | break; | |||
5121 | case KMP_HW_SOCKET: | |||
5122 | if (nCoresPerPkg > 1) { | |||
5123 | fine_gran = false; | |||
5124 | } | |||
5125 | break; | |||
5126 | default: | |||
5127 | fine_gran = false; | |||
5128 | } | |||
5129 | ||||
5130 | if (__kmp_topology->is_uniform()) { | |||
5131 | int coreID; | |||
5132 | int threadID; | |||
5133 | // Number of hyper threads per core in HT machine | |||
5134 | int __kmp_nth_per_core = __kmp_avail_proc / __kmp_ncores; | |||
5135 | // Number of cores | |||
5136 | int ncores = __kmp_ncores; | |||
5137 | if ((nPackages > 1) && (__kmp_nth_per_core <= 1)) { | |||
5138 | __kmp_nth_per_core = __kmp_avail_proc / nPackages; | |||
5139 | ncores = nPackages; | |||
5140 | } | |||
5141 | // How many threads will be bound to each core | |||
5142 | int chunk = nthreads / ncores; | |||
5143 | // How many cores will have an additional thread bound to it - "big cores" | |||
5144 | int big_cores = nthreads % ncores; | |||
5145 | // Number of threads on the big cores | |||
5146 | int big_nth = (chunk + 1) * big_cores; | |||
5147 | if (tid < big_nth) { | |||
5148 | coreID = tid / (chunk + 1); | |||
5149 | threadID = (tid % (chunk + 1)) % __kmp_nth_per_core; | |||
5150 | } else { // tid >= big_nth | |||
5151 | coreID = (tid - big_cores) / chunk; | |||
5152 | threadID = ((tid - big_cores) % chunk) % __kmp_nth_per_core; | |||
5153 | } | |||
5154 | KMP_DEBUG_ASSERT2(KMP_AFFINITY_CAPABLE(),if (!((__kmp_affin_mask_size > 0))) { __kmp_debug_assert(( "Illegal set affinity operation when not capable"), "openmp/runtime/src/kmp_affinity.cpp" , 5155); } | |||
5155 | "Illegal set affinity operation when not capable")if (!((__kmp_affin_mask_size > 0))) { __kmp_debug_assert(( "Illegal set affinity operation when not capable"), "openmp/runtime/src/kmp_affinity.cpp" , 5155); }; | |||
5156 | ||||
5157 | kmp_affin_mask_t *mask = th->th.th_affin_mask; | |||
5158 | KMP_CPU_ZERO(mask)(mask)->zero(); | |||
5159 | ||||
5160 | if (fine_gran) { | |||
5161 | int osID = | |||
5162 | __kmp_topology->at(coreID * __kmp_nth_per_core + threadID).os_id; | |||
5163 | KMP_CPU_SET(osID, mask)(mask)->set(osID); | |||
5164 | } else { | |||
5165 | for (int i = 0; i < __kmp_nth_per_core; i++) { | |||
5166 | int osID; | |||
5167 | osID = __kmp_topology->at(coreID * __kmp_nth_per_core + i).os_id; | |||
5168 | KMP_CPU_SET(osID, mask)(mask)->set(osID); | |||
5169 | } | |||
5170 | } | |||
5171 | if (__kmp_affinity.flags.verbose) { | |||
5172 | char buf[KMP_AFFIN_MASK_PRINT_LEN1024]; | |||
5173 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN1024, mask); | |||
5174 | KMP_INFORM(BoundToOSProcSet, env_var, (kmp_int32)getpid(), __kmp_gettid(),__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_BoundToOSProcSet , env_var, (kmp_int32)getpid(), syscall(186), tid, buf), __kmp_msg_null ) | |||
5175 | tid, buf)__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_BoundToOSProcSet , env_var, (kmp_int32)getpid(), syscall(186), tid, buf), __kmp_msg_null ); | |||
5176 | } | |||
5177 | __kmp_affinity_get_thread_topology_info(th); | |||
5178 | __kmp_set_system_affinity(mask, TRUE)(mask)->set_system_affinity((!0)); | |||
5179 | } else { // Non-uniform topology | |||
5180 | ||||
5181 | kmp_affin_mask_t *mask = th->th.th_affin_mask; | |||
5182 | KMP_CPU_ZERO(mask)(mask)->zero(); | |||
5183 | ||||
5184 | int core_level = | |||
5185 | __kmp_affinity_find_core_level(__kmp_avail_proc, __kmp_aff_depth - 1); | |||
5186 | int ncores = __kmp_affinity_compute_ncores(__kmp_avail_proc, | |||
5187 | __kmp_aff_depth - 1, core_level); | |||
5188 | int nth_per_core = __kmp_affinity_max_proc_per_core( | |||
5189 | __kmp_avail_proc, __kmp_aff_depth - 1, core_level); | |||
5190 | ||||
5191 | // For performance gain consider the special case nthreads == | |||
5192 | // __kmp_avail_proc | |||
5193 | if (nthreads == __kmp_avail_proc) { | |||
5194 | if (fine_gran) { | |||
5195 | int osID = __kmp_topology->at(tid).os_id; | |||
5196 | KMP_CPU_SET(osID, mask)(mask)->set(osID); | |||
5197 | } else { | |||
5198 | int core = | |||
5199 | __kmp_affinity_find_core(tid, __kmp_aff_depth - 1, core_level); | |||
5200 | for (int i = 0; i < __kmp_avail_proc; i++) { | |||
5201 | int osID = __kmp_topology->at(i).os_id; | |||
5202 | if (__kmp_affinity_find_core(i, __kmp_aff_depth - 1, core_level) == | |||
5203 | core) { | |||
5204 | KMP_CPU_SET(osID, mask)(mask)->set(osID); | |||
5205 | } | |||
5206 | } | |||
5207 | } | |||
5208 | } else if (nthreads <= ncores) { | |||
5209 | ||||
5210 | int core = 0; | |||
5211 | for (int i = 0; i < ncores; i++) { | |||
5212 | // Check if this core from procarr[] is in the mask | |||
5213 | int in_mask = 0; | |||
5214 | for (int j = 0; j < nth_per_core; j++) { | |||
5215 | if (procarr[i * nth_per_core + j] != -1) { | |||
5216 | in_mask = 1; | |||
5217 | break; | |||
5218 | } | |||
5219 | } | |||
5220 | if (in_mask) { | |||
5221 | if (tid == core) { | |||
5222 | for (int j = 0; j < nth_per_core; j++) { | |||
5223 | int osID = procarr[i * nth_per_core + j]; | |||
5224 | if (osID != -1) { | |||
5225 | KMP_CPU_SET(osID, mask)(mask)->set(osID); | |||
5226 | // For fine granularity it is enough to set the first available | |||
5227 | // osID for this core | |||
5228 | if (fine_gran) { | |||
5229 | break; | |||
5230 | } | |||
5231 | } | |||
5232 | } | |||
5233 | break; | |||
5234 | } else { | |||
5235 | core++; | |||
5236 | } | |||
5237 | } | |||
5238 | } | |||
5239 | } else { // nthreads > ncores | |||
5240 | // Array to save the number of processors at each core | |||
5241 | int *nproc_at_core = (int *)KMP_ALLOCA(sizeof(int) * ncores)__builtin_alloca (sizeof(int) * ncores); | |||
5242 | // Array to save the number of cores with "x" available processors; | |||
5243 | int *ncores_with_x_procs = | |||
5244 | (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core + 1))__builtin_alloca (sizeof(int) * (nth_per_core + 1)); | |||
5245 | // Array to save the number of cores with # procs from x to nth_per_core | |||
5246 | int *ncores_with_x_to_max_procs = | |||
5247 | (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core + 1))__builtin_alloca (sizeof(int) * (nth_per_core + 1)); | |||
5248 | ||||
5249 | for (int i = 0; i <= nth_per_core; i++) { | |||
5250 | ncores_with_x_procs[i] = 0; | |||
5251 | ncores_with_x_to_max_procs[i] = 0; | |||
5252 | } | |||
5253 | ||||
5254 | for (int i = 0; i < ncores; i++) { | |||
5255 | int cnt = 0; | |||
5256 | for (int j = 0; j < nth_per_core; j++) { | |||
5257 | if (procarr[i * nth_per_core + j] != -1) { | |||
5258 | cnt++; | |||
5259 | } | |||
5260 | } | |||
5261 | nproc_at_core[i] = cnt; | |||
5262 | ncores_with_x_procs[cnt]++; | |||
5263 | } | |||
5264 | ||||
5265 | for (int i = 0; i <= nth_per_core; i++) { | |||
5266 | for (int j = i; j <= nth_per_core; j++) { | |||
5267 | ncores_with_x_to_max_procs[i] += ncores_with_x_procs[j]; | |||
5268 | } | |||
5269 | } | |||
5270 | ||||
5271 | // Max number of processors | |||
5272 | int nproc = nth_per_core * ncores; | |||
5273 | // An array to keep number of threads per each context | |||
5274 | int *newarr = (int *)__kmp_allocate(sizeof(int) * nproc)___kmp_allocate((sizeof(int) * nproc), "openmp/runtime/src/kmp_affinity.cpp" , 5274); | |||
5275 | for (int i = 0; i < nproc; i++) { | |||
5276 | newarr[i] = 0; | |||
5277 | } | |||
5278 | ||||
5279 | int nth = nthreads; | |||
5280 | int flag = 0; | |||
5281 | while (nth > 0) { | |||
5282 | for (int j = 1; j <= nth_per_core; j++) { | |||
5283 | int cnt = ncores_with_x_to_max_procs[j]; | |||
5284 | for (int i = 0; i < ncores; i++) { | |||
5285 | // Skip the core with 0 processors | |||
5286 | if (nproc_at_core[i] == 0) { | |||
5287 | continue; | |||
5288 | } | |||
5289 | for (int k = 0; k < nth_per_core; k++) { | |||
5290 | if (procarr[i * nth_per_core + k] != -1) { | |||
5291 | if (newarr[i * nth_per_core + k] == 0) { | |||
5292 | newarr[i * nth_per_core + k] = 1; | |||
5293 | cnt--; | |||
5294 | nth--; | |||
5295 | break; | |||
5296 | } else { | |||
5297 | if (flag != 0) { | |||
5298 | newarr[i * nth_per_core + k]++; | |||
5299 | cnt--; | |||
5300 | nth--; | |||
5301 | break; | |||
5302 | } | |||
5303 | } | |||
5304 | } | |||
5305 | } | |||
5306 | if (cnt == 0 || nth == 0) { | |||
5307 | break; | |||
5308 | } | |||
5309 | } | |||
5310 | if (nth == 0) { | |||
5311 | break; | |||
5312 | } | |||
5313 | } | |||
5314 | flag = 1; | |||
5315 | } | |||
5316 | int sum = 0; | |||
5317 | for (int i = 0; i < nproc; i++) { | |||
5318 | sum += newarr[i]; | |||
5319 | if (sum > tid) { | |||
5320 | if (fine_gran) { | |||
5321 | int osID = procarr[i]; | |||
5322 | KMP_CPU_SET(osID, mask)(mask)->set(osID); | |||
5323 | } else { | |||
5324 | int coreID = i / nth_per_core; | |||
5325 | for (int ii = 0; ii < nth_per_core; ii++) { | |||
5326 | int osID = procarr[coreID * nth_per_core + ii]; | |||
5327 | if (osID != -1) { | |||
5328 | KMP_CPU_SET(osID, mask)(mask)->set(osID); | |||
5329 | } | |||
5330 | } | |||
5331 | } | |||
5332 | break; | |||
5333 | } | |||
5334 | } | |||
5335 | __kmp_free(newarr)___kmp_free((newarr), "openmp/runtime/src/kmp_affinity.cpp", 5335 ); | |||
5336 | } | |||
5337 | ||||
5338 | if (__kmp_affinity.flags.verbose) { | |||
5339 | char buf[KMP_AFFIN_MASK_PRINT_LEN1024]; | |||
5340 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN1024, mask); | |||
5341 | KMP_INFORM(BoundToOSProcSet, env_var, (kmp_int32)getpid(), __kmp_gettid(),__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_BoundToOSProcSet , env_var, (kmp_int32)getpid(), syscall(186), tid, buf), __kmp_msg_null ) | |||
5342 | tid, buf)__kmp_msg(kmp_ms_inform, __kmp_msg_format(kmp_i18n_msg_BoundToOSProcSet , env_var, (kmp_int32)getpid(), syscall(186), tid, buf), __kmp_msg_null ); | |||
5343 | } | |||
5344 | __kmp_affinity_get_thread_topology_info(th); | |||
5345 | __kmp_set_system_affinity(mask, TRUE)(mask)->set_system_affinity((!0)); | |||
5346 | } | |||
5347 | } | |||
5348 | ||||
5349 | #if KMP_OS_LINUX1 || KMP_OS_FREEBSD0 | |||
5350 | // We don't need this entry for Windows because | |||
5351 | // there is GetProcessAffinityMask() api | |||
5352 | // | |||
5353 | // The intended usage is indicated by these steps: | |||
5354 | // 1) The user gets the current affinity mask | |||
5355 | // 2) Then sets the affinity by calling this function | |||
5356 | // 3) Error check the return value | |||
5357 | // 4) Use non-OpenMP parallelization | |||
5358 | // 5) Reset the affinity to what was stored in step 1) | |||
5359 | #ifdef __cplusplus201703L | |||
5360 | extern "C" | |||
5361 | #endif | |||
5362 | int | |||
5363 | kmp_set_thread_affinity_mask_initial() | |||
5364 | // the function returns 0 on success, | |||
5365 | // -1 if we cannot bind thread | |||
5366 | // >0 (errno) if an error happened during binding | |||
5367 | { | |||
5368 | int gtid = __kmp_get_gtid()__kmp_get_global_thread_id(); | |||
5369 | if (gtid < 0) { | |||
5370 | // Do not touch non-omp threads | |||
5371 | KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "if (kmp_a_debug >= 30) { __kmp_debug_printf ("kmp_set_thread_affinity_mask_initial: " "non-omp thread, returning\n"); } | |||