14 #include "kmp_affinity.h"
18 #include "kmp_wrapper_getpid.h"
19 #if KMP_USE_HIER_SCHED
20 #include "kmp_dispatch_hier.h"
24 #define HWLOC_GROUP_KIND_INTEL_MODULE 102
25 #define HWLOC_GROUP_KIND_INTEL_TILE 103
26 #define HWLOC_GROUP_KIND_INTEL_DIE 104
27 #define HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP 220
31 kmp_topology_t *__kmp_topology =
nullptr;
33 kmp_hw_subset_t *__kmp_hw_subset =
nullptr;
36 static hierarchy_info machine_hierarchy;
38 void __kmp_cleanup_hierarchy() { machine_hierarchy.fini(); }
40 void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar) {
44 if (TCR_1(machine_hierarchy.uninitialized))
45 machine_hierarchy.init(nproc);
48 if (nproc > machine_hierarchy.base_num_threads)
49 machine_hierarchy.resize(nproc);
51 depth = machine_hierarchy.depth;
52 KMP_DEBUG_ASSERT(depth > 0);
54 thr_bar->depth = depth;
55 __kmp_type_convert(machine_hierarchy.numPerLevel[0] - 1,
56 &(thr_bar->base_leaf_kids));
57 thr_bar->skip_per_level = machine_hierarchy.skipPerLevel;
60 static int nCoresPerPkg, nPackages;
61 static int __kmp_nThreadsPerCore;
62 #ifndef KMP_DFLT_NTH_CORES
63 static int __kmp_ncores;
66 const char *__kmp_hw_get_catalog_string(kmp_hw_t type,
bool plural) {
69 return ((plural) ? KMP_I18N_STR(Sockets) : KMP_I18N_STR(Socket));
71 return ((plural) ? KMP_I18N_STR(Dice) : KMP_I18N_STR(Die));
73 return ((plural) ? KMP_I18N_STR(Modules) : KMP_I18N_STR(Module));
75 return ((plural) ? KMP_I18N_STR(Tiles) : KMP_I18N_STR(Tile));
77 return ((plural) ? KMP_I18N_STR(NumaDomains) : KMP_I18N_STR(NumaDomain));
79 return ((plural) ? KMP_I18N_STR(L3Caches) : KMP_I18N_STR(L3Cache));
81 return ((plural) ? KMP_I18N_STR(L2Caches) : KMP_I18N_STR(L2Cache));
83 return ((plural) ? KMP_I18N_STR(L1Caches) : KMP_I18N_STR(L1Cache));
85 return ((plural) ? KMP_I18N_STR(LLCaches) : KMP_I18N_STR(LLCache));
87 return ((plural) ? KMP_I18N_STR(Cores) : KMP_I18N_STR(Core));
89 return ((plural) ? KMP_I18N_STR(Threads) : KMP_I18N_STR(Thread));
90 case KMP_HW_PROC_GROUP:
91 return ((plural) ? KMP_I18N_STR(ProcGroups) : KMP_I18N_STR(ProcGroup));
93 return KMP_I18N_STR(Unknown);
96 const char *__kmp_hw_get_keyword(kmp_hw_t type,
bool plural) {
99 return ((plural) ?
"sockets" :
"socket");
101 return ((plural) ?
"dice" :
"die");
103 return ((plural) ?
"modules" :
"module");
105 return ((plural) ?
"tiles" :
"tile");
107 return ((plural) ?
"numa_domains" :
"numa_domain");
109 return ((plural) ?
"l3_caches" :
"l3_cache");
111 return ((plural) ?
"l2_caches" :
"l2_cache");
113 return ((plural) ?
"l1_caches" :
"l1_cache");
115 return ((plural) ?
"ll_caches" :
"ll_cache");
117 return ((plural) ?
"cores" :
"core");
119 return ((plural) ?
"threads" :
"thread");
120 case KMP_HW_PROC_GROUP:
121 return ((plural) ?
"proc_groups" :
"proc_group");
123 return ((plural) ?
"unknowns" :
"unknown");
128 int kmp_hw_thread_t::compare_ids(
const void *a,
const void *b) {
129 const kmp_hw_thread_t *ahwthread = (
const kmp_hw_thread_t *)a;
130 const kmp_hw_thread_t *bhwthread = (
const kmp_hw_thread_t *)b;
131 int depth = __kmp_topology->get_depth();
132 for (
int level = 0; level < depth; ++level) {
133 if (ahwthread->ids[level] < bhwthread->ids[level])
135 else if (ahwthread->ids[level] > bhwthread->ids[level])
138 if (ahwthread->os_id < bhwthread->os_id)
140 else if (ahwthread->os_id > bhwthread->os_id)
145 #if KMP_AFFINITY_SUPPORTED
146 int kmp_hw_thread_t::compare_compact(
const void *a,
const void *b) {
148 const kmp_hw_thread_t *aa = (
const kmp_hw_thread_t *)a;
149 const kmp_hw_thread_t *bb = (
const kmp_hw_thread_t *)b;
150 int depth = __kmp_topology->get_depth();
151 KMP_DEBUG_ASSERT(__kmp_affinity_compact >= 0);
152 KMP_DEBUG_ASSERT(__kmp_affinity_compact <= depth);
153 for (i = 0; i < __kmp_affinity_compact; i++) {
154 int j = depth - i - 1;
155 if (aa->sub_ids[j] < bb->sub_ids[j])
157 if (aa->sub_ids[j] > bb->sub_ids[j])
160 for (; i < depth; i++) {
161 int j = i - __kmp_affinity_compact;
162 if (aa->sub_ids[j] < bb->sub_ids[j])
164 if (aa->sub_ids[j] > bb->sub_ids[j])
171 void kmp_hw_thread_t::print()
const {
172 int depth = __kmp_topology->get_depth();
173 printf(
"%4d ", os_id);
174 for (
int i = 0; i < depth; ++i) {
175 printf(
"%4d ", ids[i]);
185 void kmp_topology_t::_remove_radix1_layers() {
186 int preference[KMP_HW_LAST];
187 int top_index1, top_index2;
189 preference[KMP_HW_PROC_GROUP] = 110;
190 preference[KMP_HW_SOCKET] = 100;
191 preference[KMP_HW_CORE] = 95;
192 preference[KMP_HW_THREAD] = 90;
193 preference[KMP_HW_NUMA] = 85;
194 preference[KMP_HW_DIE] = 80;
195 preference[KMP_HW_TILE] = 75;
196 preference[KMP_HW_MODULE] = 73;
197 preference[KMP_HW_L3] = 70;
198 preference[KMP_HW_L2] = 65;
199 preference[KMP_HW_L1] = 60;
200 preference[KMP_HW_LLC] = 5;
203 while (top_index1 < depth - 1 && top_index2 < depth) {
204 kmp_hw_t type1 = types[top_index1];
205 kmp_hw_t type2 = types[top_index2];
206 KMP_ASSERT_VALID_HW_TYPE(type1);
207 KMP_ASSERT_VALID_HW_TYPE(type2);
210 if ((type1 == KMP_HW_THREAD || type1 == KMP_HW_CORE ||
211 type1 == KMP_HW_SOCKET) &&
212 (type2 == KMP_HW_THREAD || type2 == KMP_HW_CORE ||
213 type2 == KMP_HW_SOCKET)) {
214 top_index1 = top_index2++;
218 bool all_same =
true;
219 int id1 = hw_threads[0].ids[top_index1];
220 int id2 = hw_threads[0].ids[top_index2];
221 int pref1 = preference[type1];
222 int pref2 = preference[type2];
223 for (
int hwidx = 1; hwidx < num_hw_threads; ++hwidx) {
224 if (hw_threads[hwidx].ids[top_index1] == id1 &&
225 hw_threads[hwidx].ids[top_index2] != id2) {
229 if (hw_threads[hwidx].ids[top_index2] != id2)
231 id1 = hw_threads[hwidx].ids[top_index1];
232 id2 = hw_threads[hwidx].ids[top_index2];
236 kmp_hw_t remove_type, keep_type;
237 int remove_layer, remove_layer_ids;
240 remove_layer = remove_layer_ids = top_index2;
244 remove_layer = remove_layer_ids = top_index1;
250 remove_layer_ids = top_index2;
253 set_equivalent_type(remove_type, keep_type);
254 for (
int idx = 0; idx < num_hw_threads; ++idx) {
255 kmp_hw_thread_t &hw_thread = hw_threads[idx];
256 for (
int d = remove_layer_ids; d < depth - 1; ++d)
257 hw_thread.ids[d] = hw_thread.ids[d + 1];
259 for (
int idx = remove_layer; idx < depth - 1; ++idx)
260 types[idx] = types[idx + 1];
263 top_index1 = top_index2++;
266 KMP_ASSERT(depth > 0);
269 void kmp_topology_t::_set_last_level_cache() {
270 if (get_equivalent_type(KMP_HW_L3) != KMP_HW_UNKNOWN)
271 set_equivalent_type(KMP_HW_LLC, KMP_HW_L3);
272 else if (get_equivalent_type(KMP_HW_L2) != KMP_HW_UNKNOWN)
273 set_equivalent_type(KMP_HW_LLC, KMP_HW_L2);
274 #if KMP_MIC_SUPPORTED
275 else if (__kmp_mic_type == mic3) {
276 if (get_equivalent_type(KMP_HW_L2) != KMP_HW_UNKNOWN)
277 set_equivalent_type(KMP_HW_LLC, KMP_HW_L2);
278 else if (get_equivalent_type(KMP_HW_TILE) != KMP_HW_UNKNOWN)
279 set_equivalent_type(KMP_HW_LLC, KMP_HW_TILE);
282 set_equivalent_type(KMP_HW_LLC, KMP_HW_L1);
285 else if (get_equivalent_type(KMP_HW_L1) != KMP_HW_UNKNOWN)
286 set_equivalent_type(KMP_HW_LLC, KMP_HW_L1);
288 if (get_equivalent_type(KMP_HW_LLC) == KMP_HW_UNKNOWN) {
289 if (get_equivalent_type(KMP_HW_SOCKET) != KMP_HW_UNKNOWN)
290 set_equivalent_type(KMP_HW_LLC, KMP_HW_SOCKET);
291 else if (get_equivalent_type(KMP_HW_CORE) != KMP_HW_UNKNOWN)
292 set_equivalent_type(KMP_HW_LLC, KMP_HW_CORE);
294 KMP_ASSERT(get_equivalent_type(KMP_HW_LLC) != KMP_HW_UNKNOWN);
298 void kmp_topology_t::_gather_enumeration_information() {
299 int previous_id[KMP_HW_LAST];
300 int max[KMP_HW_LAST];
302 for (
int i = 0; i < depth; ++i) {
303 previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID;
308 for (
int i = 0; i < num_hw_threads; ++i) {
309 kmp_hw_thread_t &hw_thread = hw_threads[i];
310 for (
int layer = 0; layer < depth; ++layer) {
311 int id = hw_thread.ids[layer];
312 if (
id != previous_id[layer]) {
314 for (
int l = layer; l < depth; ++l)
318 for (
int l = layer + 1; l < depth; ++l) {
319 if (max[l] > ratio[l])
326 for (
int layer = 0; layer < depth; ++layer) {
327 previous_id[layer] = hw_thread.ids[layer];
330 for (
int layer = 0; layer < depth; ++layer) {
331 if (max[layer] > ratio[layer])
332 ratio[layer] = max[layer];
337 void kmp_topology_t::_discover_uniformity() {
339 for (
int level = 0; level < depth; ++level)
341 flags.uniform = (num == count[depth - 1]);
345 void kmp_topology_t::_set_sub_ids() {
346 int previous_id[KMP_HW_LAST];
347 int sub_id[KMP_HW_LAST];
349 for (
int i = 0; i < depth; ++i) {
353 for (
int i = 0; i < num_hw_threads; ++i) {
354 kmp_hw_thread_t &hw_thread = hw_threads[i];
356 for (
int j = 0; j < depth; ++j) {
357 if (hw_thread.ids[j] != previous_id[j]) {
359 for (
int k = j + 1; k < depth; ++k) {
366 for (
int j = 0; j < depth; ++j) {
367 previous_id[j] = hw_thread.ids[j];
370 for (
int j = 0; j < depth; ++j) {
371 hw_thread.sub_ids[j] = sub_id[j];
376 void kmp_topology_t::_set_globals() {
378 int core_level, thread_level, package_level;
379 package_level = get_level(KMP_HW_SOCKET);
380 #if KMP_GROUP_AFFINITY
381 if (package_level == -1)
382 package_level = get_level(KMP_HW_PROC_GROUP);
384 core_level = get_level(KMP_HW_CORE);
385 thread_level = get_level(KMP_HW_THREAD);
387 KMP_ASSERT(core_level != -1);
388 KMP_ASSERT(thread_level != -1);
390 __kmp_nThreadsPerCore = calculate_ratio(thread_level, core_level);
391 if (package_level != -1) {
392 nCoresPerPkg = calculate_ratio(core_level, package_level);
393 nPackages = get_count(package_level);
396 nCoresPerPkg = get_count(core_level);
399 #ifndef KMP_DFLT_NTH_CORES
400 __kmp_ncores = get_count(core_level);
404 kmp_topology_t *kmp_topology_t::allocate(
int nproc,
int ndepth,
405 const kmp_hw_t *types) {
406 kmp_topology_t *retval;
408 size_t size =
sizeof(kmp_topology_t) +
sizeof(kmp_hw_thread_t) * nproc +
409 sizeof(int) * ndepth * 3;
410 char *bytes = (
char *)__kmp_allocate(size);
411 retval = (kmp_topology_t *)bytes;
413 retval->hw_threads = (kmp_hw_thread_t *)(bytes +
sizeof(kmp_topology_t));
415 retval->hw_threads =
nullptr;
417 retval->num_hw_threads = nproc;
418 retval->depth = ndepth;
420 (
int *)(bytes +
sizeof(kmp_topology_t) +
sizeof(kmp_hw_thread_t) * nproc);
421 retval->types = (kmp_hw_t *)arr;
422 retval->ratio = arr + ndepth;
423 retval->count = arr + 2 * ndepth;
424 KMP_FOREACH_HW_TYPE(type) { retval->equivalent[type] = KMP_HW_UNKNOWN; }
425 for (
int i = 0; i < ndepth; ++i) {
426 retval->types[i] = types[i];
427 retval->equivalent[types[i]] = types[i];
432 void kmp_topology_t::deallocate(kmp_topology_t *topology) {
434 __kmp_free(topology);
437 bool kmp_topology_t::check_ids()
const {
439 if (num_hw_threads == 0)
441 for (
int i = 1; i < num_hw_threads; ++i) {
442 kmp_hw_thread_t ¤t_thread = hw_threads[i];
443 kmp_hw_thread_t &previous_thread = hw_threads[i - 1];
445 for (
int j = 0; j < depth; ++j) {
446 if (previous_thread.ids[j] != current_thread.ids[j]) {
458 void kmp_topology_t::dump()
const {
459 printf(
"***********************\n");
460 printf(
"*** __kmp_topology: ***\n");
461 printf(
"***********************\n");
462 printf(
"* depth: %d\n", depth);
465 for (
int i = 0; i < depth; ++i)
466 printf(
"%15s ", __kmp_hw_get_keyword(types[i]));
470 for (
int i = 0; i < depth; ++i) {
471 printf(
"%15d ", ratio[i]);
476 for (
int i = 0; i < depth; ++i) {
477 printf(
"%15d ", count[i]);
481 printf(
"* equivalent map:\n");
482 KMP_FOREACH_HW_TYPE(i) {
483 const char *key = __kmp_hw_get_keyword(i);
484 const char *value = __kmp_hw_get_keyword(equivalent[i]);
485 printf(
"%-15s -> %-15s\n", key, value);
488 printf(
"* uniform: %s\n", (is_uniform() ?
"Yes" :
"No"));
490 printf(
"* num_hw_threads: %d\n", num_hw_threads);
491 printf(
"* hw_threads:\n");
492 for (
int i = 0; i < num_hw_threads; ++i) {
493 hw_threads[i].print();
495 printf(
"***********************\n");
498 void kmp_topology_t::print(
const char *env_var)
const {
500 int print_types_depth;
501 __kmp_str_buf_init(&buf);
502 kmp_hw_t print_types[KMP_HW_LAST + 2];
505 KMP_INFORM(AvailableOSProc, env_var, num_hw_threads);
509 KMP_INFORM(Uniform, env_var);
511 KMP_INFORM(NonUniform, env_var);
515 KMP_FOREACH_HW_TYPE(type) {
516 kmp_hw_t eq_type = equivalent[type];
517 if (eq_type != KMP_HW_UNKNOWN && eq_type != type) {
518 KMP_INFORM(AffEqualTopologyTypes, env_var,
519 __kmp_hw_get_catalog_string(type),
520 __kmp_hw_get_catalog_string(eq_type));
525 KMP_ASSERT(depth > 0 && depth <= (
int)KMP_HW_LAST);
528 print_types_depth = 0;
529 for (
int level = 0; level < depth; ++level)
530 print_types[print_types_depth++] = types[level];
531 if (equivalent[KMP_HW_CORE] != KMP_HW_CORE) {
533 if (print_types[print_types_depth - 1] == KMP_HW_THREAD) {
536 print_types[print_types_depth - 1] = KMP_HW_CORE;
537 print_types[print_types_depth++] = KMP_HW_THREAD;
539 print_types[print_types_depth++] = KMP_HW_CORE;
543 if (equivalent[KMP_HW_THREAD] != KMP_HW_THREAD)
544 print_types[print_types_depth++] = KMP_HW_THREAD;
546 __kmp_str_buf_clear(&buf);
547 kmp_hw_t numerator_type;
548 kmp_hw_t denominator_type = KMP_HW_UNKNOWN;
549 int core_level = get_level(KMP_HW_CORE);
550 int ncores = get_count(core_level);
552 for (
int plevel = 0, level = 0; plevel < print_types_depth; ++plevel) {
555 numerator_type = print_types[plevel];
556 KMP_ASSERT_VALID_HW_TYPE(numerator_type);
557 if (equivalent[numerator_type] != numerator_type)
560 c = get_ratio(level++);
563 __kmp_str_buf_print(&buf,
"%d %s", c,
564 __kmp_hw_get_catalog_string(numerator_type, plural));
566 __kmp_str_buf_print(&buf,
" x %d %s/%s", c,
567 __kmp_hw_get_catalog_string(numerator_type, plural),
568 __kmp_hw_get_catalog_string(denominator_type));
570 denominator_type = numerator_type;
572 KMP_INFORM(TopologyGeneric, env_var, buf.str, ncores);
574 if (num_hw_threads <= 0) {
575 __kmp_str_buf_free(&buf);
580 KMP_INFORM(OSProcToPhysicalThreadMap, env_var);
581 for (
int i = 0; i < num_hw_threads; i++) {
582 __kmp_str_buf_clear(&buf);
583 for (
int level = 0; level < depth; ++level) {
584 kmp_hw_t type = types[level];
585 __kmp_str_buf_print(&buf,
"%s ", __kmp_hw_get_catalog_string(type));
586 __kmp_str_buf_print(&buf,
"%d ", hw_threads[i].ids[level]);
588 KMP_INFORM(OSProcMapToPack, env_var, hw_threads[i].os_id, buf.str);
591 __kmp_str_buf_free(&buf);
594 void kmp_topology_t::canonicalize() {
595 _remove_radix1_layers();
596 _gather_enumeration_information();
597 _discover_uniformity();
600 _set_last_level_cache();
602 #if KMP_MIC_SUPPORTED
604 if (__kmp_mic_type == mic3) {
605 if (get_level(KMP_HW_L2) != -1)
606 set_equivalent_type(KMP_HW_TILE, KMP_HW_L2);
607 else if (get_level(KMP_HW_TILE) != -1)
608 set_equivalent_type(KMP_HW_L2, KMP_HW_TILE);
613 KMP_ASSERT(depth > 0);
614 for (
int level = 0; level < depth; ++level) {
616 KMP_ASSERT(count[level] > 0 && ratio[level] > 0);
617 KMP_ASSERT_VALID_HW_TYPE(types[level]);
619 KMP_ASSERT(equivalent[types[level]] == types[level]);
622 #if KMP_AFFINITY_SUPPORTED
624 if (__kmp_affinity_gran_levels < 0) {
625 kmp_hw_t gran_type = get_equivalent_type(__kmp_affinity_gran);
627 if (gran_type == KMP_HW_UNKNOWN) {
629 kmp_hw_t gran_types[3] = {KMP_HW_CORE, KMP_HW_THREAD, KMP_HW_SOCKET};
630 for (
auto g : gran_types) {
631 if (__kmp_topology->get_equivalent_type(g) != KMP_HW_UNKNOWN) {
636 KMP_ASSERT(gran_type != KMP_HW_UNKNOWN);
638 KMP_WARNING(AffGranularityBad,
"KMP_AFFINITY",
639 __kmp_hw_get_catalog_string(__kmp_affinity_gran),
640 __kmp_hw_get_catalog_string(gran_type));
641 __kmp_affinity_gran = gran_type;
643 __kmp_affinity_gran_levels = 0;
644 for (
int i = depth - 1; i >= 0 && get_type(i) != gran_type; --i)
645 __kmp_affinity_gran_levels++;
651 void kmp_topology_t::canonicalize(
int npackages,
int ncores_per_pkg,
652 int nthreads_per_core,
int ncores) {
655 KMP_FOREACH_HW_TYPE(i) { equivalent[i] = KMP_HW_UNKNOWN; }
656 for (
int level = 0; level < depth; ++level) {
660 count[0] = npackages;
662 count[2] = __kmp_xproc;
663 ratio[0] = npackages;
664 ratio[1] = ncores_per_pkg;
665 ratio[2] = nthreads_per_core;
666 equivalent[KMP_HW_SOCKET] = KMP_HW_SOCKET;
667 equivalent[KMP_HW_CORE] = KMP_HW_CORE;
668 equivalent[KMP_HW_THREAD] = KMP_HW_THREAD;
669 types[0] = KMP_HW_SOCKET;
670 types[1] = KMP_HW_CORE;
671 types[2] = KMP_HW_THREAD;
673 _discover_uniformity();
679 bool kmp_topology_t::filter_hw_subset() {
681 if (!__kmp_hw_subset)
685 int hw_subset_depth = __kmp_hw_subset->get_depth();
686 kmp_hw_t specified[KMP_HW_LAST];
687 KMP_ASSERT(hw_subset_depth > 0);
688 KMP_FOREACH_HW_TYPE(i) { specified[i] = KMP_HW_UNKNOWN; }
689 for (
int i = 0; i < hw_subset_depth; ++i) {
691 int num = __kmp_hw_subset->at(i).num;
692 int offset = __kmp_hw_subset->at(i).offset;
693 kmp_hw_t type = __kmp_hw_subset->at(i).type;
694 kmp_hw_t equivalent_type = equivalent[type];
695 int level = get_level(type);
698 if (equivalent_type != KMP_HW_UNKNOWN) {
699 __kmp_hw_subset->at(i).type = equivalent_type;
701 KMP_WARNING(AffHWSubsetNotExistGeneric,
702 __kmp_hw_get_catalog_string(type));
708 if (specified[equivalent_type] != KMP_HW_UNKNOWN) {
709 KMP_WARNING(AffHWSubsetEqvLayers, __kmp_hw_get_catalog_string(type),
710 __kmp_hw_get_catalog_string(specified[equivalent_type]));
713 specified[equivalent_type] = type;
716 if (i + 1 < hw_subset_depth) {
717 kmp_hw_t next_type = get_equivalent_type(__kmp_hw_subset->at(i + 1).type);
718 if (next_type == KMP_HW_UNKNOWN) {
720 AffHWSubsetNotExistGeneric,
721 __kmp_hw_get_catalog_string(__kmp_hw_subset->at(i + 1).type));
724 int next_topology_level = get_level(next_type);
725 if (level > next_topology_level) {
726 KMP_WARNING(AffHWSubsetOutOfOrder, __kmp_hw_get_catalog_string(type),
727 __kmp_hw_get_catalog_string(next_type));
733 max_count = get_ratio(level);
734 if (max_count < 0 || num + offset > max_count) {
735 bool plural = (num > 1);
736 KMP_WARNING(AffHWSubsetManyGeneric,
737 __kmp_hw_get_catalog_string(type, plural));
744 for (
int i = 0; i < num_hw_threads; ++i) {
745 kmp_hw_thread_t &hw_thread = hw_threads[i];
747 bool should_be_filtered =
false;
748 for (
int level = 0, hw_subset_index = 0;
749 level < depth && hw_subset_index < hw_subset_depth; ++level) {
750 kmp_hw_t topology_type = types[level];
751 auto hw_subset_item = __kmp_hw_subset->at(hw_subset_index);
752 kmp_hw_t hw_subset_type = hw_subset_item.type;
753 if (topology_type != hw_subset_type)
755 int num = hw_subset_item.num;
756 int offset = hw_subset_item.offset;
758 if (hw_thread.sub_ids[level] < offset ||
759 hw_thread.sub_ids[level] >= offset + num) {
760 should_be_filtered =
true;
764 if (!should_be_filtered) {
766 hw_threads[new_index] = hw_thread;
769 #if KMP_AFFINITY_SUPPORTED
770 KMP_CPU_CLR(hw_thread.os_id, __kmp_affin_fullMask);
775 KMP_DEBUG_ASSERT(new_index <= num_hw_threads);
776 num_hw_threads = new_index;
779 _gather_enumeration_information();
780 _discover_uniformity();
782 _set_last_level_cache();
786 bool kmp_topology_t::is_close(
int hwt1,
int hwt2,
int hw_level)
const {
787 if (hw_level >= depth)
790 const kmp_hw_thread_t &t1 = hw_threads[hwt1];
791 const kmp_hw_thread_t &t2 = hw_threads[hwt2];
792 for (
int i = 0; i < (depth - hw_level); ++i) {
793 if (t1.ids[i] != t2.ids[i])
801 #if KMP_AFFINITY_SUPPORTED
802 class kmp_affinity_raii_t {
803 kmp_affin_mask_t *mask;
807 kmp_affinity_raii_t() : restored(false) {
809 KMP_ASSERT(mask != NULL);
810 __kmp_get_system_affinity(mask, TRUE);
813 __kmp_set_system_affinity(mask, TRUE);
817 ~kmp_affinity_raii_t() {
819 __kmp_set_system_affinity(mask, TRUE);
825 bool KMPAffinity::picked_api =
false;
827 void *KMPAffinity::Mask::operator
new(
size_t n) {
return __kmp_allocate(n); }
828 void *KMPAffinity::Mask::operator
new[](
size_t n) {
return __kmp_allocate(n); }
829 void KMPAffinity::Mask::operator
delete(
void *p) { __kmp_free(p); }
830 void KMPAffinity::Mask::operator
delete[](
void *p) { __kmp_free(p); }
831 void *KMPAffinity::operator
new(
size_t n) {
return __kmp_allocate(n); }
832 void KMPAffinity::operator
delete(
void *p) { __kmp_free(p); }
834 void KMPAffinity::pick_api() {
835 KMPAffinity *affinity_dispatch;
841 if (__kmp_affinity_top_method == affinity_top_method_hwloc &&
842 __kmp_affinity_type != affinity_disabled) {
843 affinity_dispatch =
new KMPHwlocAffinity();
847 affinity_dispatch =
new KMPNativeAffinity();
849 __kmp_affinity_dispatch = affinity_dispatch;
853 void KMPAffinity::destroy_api() {
854 if (__kmp_affinity_dispatch != NULL) {
855 delete __kmp_affinity_dispatch;
856 __kmp_affinity_dispatch = NULL;
861 #define KMP_ADVANCE_SCAN(scan) \
862 while (*scan != '\0') { \
870 char *__kmp_affinity_print_mask(
char *buf,
int buf_len,
871 kmp_affin_mask_t *mask) {
872 int start = 0, finish = 0, previous = 0;
875 KMP_ASSERT(buf_len >= 40);
878 char *end = buf + buf_len - 1;
881 if (mask->begin() == mask->end()) {
882 KMP_SNPRINTF(scan, end - scan + 1,
"{<empty>}");
883 KMP_ADVANCE_SCAN(scan);
884 KMP_ASSERT(scan <= end);
889 start = mask->begin();
893 for (finish = mask->next(start), previous = start;
894 finish == previous + 1 && finish != mask->end();
895 finish = mask->next(finish)) {
902 KMP_SNPRINTF(scan, end - scan + 1,
"%s",
",");
903 KMP_ADVANCE_SCAN(scan);
908 if (previous - start > 1) {
909 KMP_SNPRINTF(scan, end - scan + 1,
"%u-%u", start, previous);
912 KMP_SNPRINTF(scan, end - scan + 1,
"%u", start);
913 KMP_ADVANCE_SCAN(scan);
914 if (previous - start > 0) {
915 KMP_SNPRINTF(scan, end - scan + 1,
",%u", previous);
918 KMP_ADVANCE_SCAN(scan);
921 if (start == mask->end())
929 KMP_ASSERT(scan <= end);
932 #undef KMP_ADVANCE_SCAN
938 kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf,
939 kmp_affin_mask_t *mask) {
940 int start = 0, finish = 0, previous = 0;
945 __kmp_str_buf_clear(buf);
948 if (mask->begin() == mask->end()) {
949 __kmp_str_buf_print(buf,
"%s",
"{<empty>}");
954 start = mask->begin();
958 for (finish = mask->next(start), previous = start;
959 finish == previous + 1 && finish != mask->end();
960 finish = mask->next(finish)) {
967 __kmp_str_buf_print(buf,
"%s",
",");
972 if (previous - start > 1) {
973 __kmp_str_buf_print(buf,
"%u-%u", start, previous);
976 __kmp_str_buf_print(buf,
"%u", start);
977 if (previous - start > 0) {
978 __kmp_str_buf_print(buf,
",%u", previous);
983 if (start == mask->end())
989 void __kmp_affinity_entire_machine_mask(kmp_affin_mask_t *mask) {
992 #if KMP_GROUP_AFFINITY
994 if (__kmp_num_proc_groups > 1) {
996 KMP_DEBUG_ASSERT(__kmp_GetActiveProcessorCount != NULL);
997 for (group = 0; group < __kmp_num_proc_groups; group++) {
999 int num = __kmp_GetActiveProcessorCount(group);
1000 for (i = 0; i < num; i++) {
1001 KMP_CPU_SET(i + group * (CHAR_BIT *
sizeof(DWORD_PTR)), mask);
1010 for (proc = 0; proc < __kmp_xproc; proc++) {
1011 KMP_CPU_SET(proc, mask);
1019 kmp_affin_mask_t *__kmp_affin_fullMask = NULL;
1022 static inline bool __kmp_hwloc_is_cache_type(hwloc_obj_t obj) {
1023 #if HWLOC_API_VERSION >= 0x00020000
1024 return hwloc_obj_type_is_cache(obj->type);
1026 return obj->type == HWLOC_OBJ_CACHE;
1031 static inline kmp_hw_t __kmp_hwloc_type_2_topology_type(hwloc_obj_t obj) {
1033 if (__kmp_hwloc_is_cache_type(obj)) {
1034 if (obj->attr->cache.type == HWLOC_OBJ_CACHE_INSTRUCTION)
1035 return KMP_HW_UNKNOWN;
1036 switch (obj->attr->cache.depth) {
1040 #if KMP_MIC_SUPPORTED
1041 if (__kmp_mic_type == mic3) {
1049 return KMP_HW_UNKNOWN;
1052 switch (obj->type) {
1053 case HWLOC_OBJ_PACKAGE:
1054 return KMP_HW_SOCKET;
1055 case HWLOC_OBJ_NUMANODE:
1057 case HWLOC_OBJ_CORE:
1060 return KMP_HW_THREAD;
1061 case HWLOC_OBJ_GROUP:
1062 if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_DIE)
1064 else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_TILE)
1066 else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_MODULE)
1067 return KMP_HW_MODULE;
1068 else if (obj->attr->group.kind == HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP)
1069 return KMP_HW_PROC_GROUP;
1070 return KMP_HW_UNKNOWN;
1071 #if HWLOC_API_VERSION >= 0x00020100
1076 return KMP_HW_UNKNOWN;
1083 static int __kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj,
1084 hwloc_obj_type_t type) {
1087 for (first = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, obj->type,
1088 obj->logical_index, type, 0);
1089 first != NULL && hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology,
1090 obj->type, first) == obj;
1091 first = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, first->type,
1100 static int __kmp_hwloc_get_sub_id(hwloc_topology_t t, hwloc_obj_t higher,
1101 hwloc_obj_t lower) {
1103 hwloc_obj_type_t ltype = lower->type;
1104 int lindex = lower->logical_index - 1;
1107 obj = hwloc_get_obj_by_type(t, ltype, lindex);
1108 while (obj && lindex >= 0 &&
1109 hwloc_bitmap_isincluded(obj->cpuset, higher->cpuset)) {
1110 if (obj->userdata) {
1111 sub_id = (int)(RCAST(kmp_intptr_t, obj->userdata));
1116 obj = hwloc_get_obj_by_type(t, ltype, lindex);
1119 lower->userdata = RCAST(
void *, sub_id + 1);
1123 static bool __kmp_affinity_create_hwloc_map(kmp_i18n_id_t *
const msg_id) {
1125 int hw_thread_index, sub_id;
1127 hwloc_obj_t pu, obj, root, prev;
1128 kmp_hw_t types[KMP_HW_LAST];
1129 hwloc_obj_type_t hwloc_types[KMP_HW_LAST];
1131 hwloc_topology_t tp = __kmp_hwloc_topology;
1132 *msg_id = kmp_i18n_null;
1133 if (__kmp_affinity_verbose) {
1134 KMP_INFORM(AffUsingHwloc,
"KMP_AFFINITY");
1137 if (!KMP_AFFINITY_CAPABLE()) {
1140 KMP_ASSERT(__kmp_affinity_type == affinity_none);
1142 hwloc_obj_t o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PACKAGE, 0);
1144 nCoresPerPkg = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_CORE);
1147 o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_CORE, 0);
1149 __kmp_nThreadsPerCore = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_PU);
1151 __kmp_nThreadsPerCore = 1;
1152 __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
1153 if (nCoresPerPkg == 0)
1155 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
1159 root = hwloc_get_root_obj(tp);
1163 pu = hwloc_get_pu_obj_by_os_index(tp, __kmp_affin_fullMask->begin());
1166 types[depth] = KMP_HW_THREAD;
1167 hwloc_types[depth] = obj->type;
1169 while (obj != root && obj != NULL) {
1171 #if HWLOC_API_VERSION >= 0x00020000
1172 if (obj->memory_arity) {
1174 for (memory = obj->memory_first_child; memory;
1175 memory = hwloc_get_next_child(tp, obj, memory)) {
1176 if (memory->type == HWLOC_OBJ_NUMANODE)
1179 if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
1180 types[depth] = KMP_HW_NUMA;
1181 hwloc_types[depth] = memory->type;
1186 type = __kmp_hwloc_type_2_topology_type(obj);
1187 if (type != KMP_HW_UNKNOWN) {
1188 types[depth] = type;
1189 hwloc_types[depth] = obj->type;
1193 KMP_ASSERT(depth > 0);
1196 for (
int i = 0, j = depth - 1; i < j; ++i, --j) {
1197 hwloc_obj_type_t hwloc_temp = hwloc_types[i];
1198 kmp_hw_t temp = types[i];
1199 types[i] = types[j];
1201 hwloc_types[i] = hwloc_types[j];
1202 hwloc_types[j] = hwloc_temp;
1206 __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1208 hw_thread_index = 0;
1210 while (pu = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, pu)) {
1211 int index = depth - 1;
1212 bool included = KMP_CPU_ISSET(pu->os_index, __kmp_affin_fullMask);
1213 kmp_hw_thread_t &hw_thread = __kmp_topology->at(hw_thread_index);
1216 hw_thread.ids[index] = pu->logical_index;
1217 hw_thread.os_id = pu->os_index;
1222 while (obj != root && obj != NULL) {
1224 #if HWLOC_API_VERSION >= 0x00020000
1228 if (obj->memory_arity) {
1230 for (memory = obj->memory_first_child; memory;
1231 memory = hwloc_get_next_child(tp, obj, memory)) {
1232 if (memory->type == HWLOC_OBJ_NUMANODE)
1235 if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
1236 sub_id = __kmp_hwloc_get_sub_id(tp, memory, prev);
1238 hw_thread.ids[index] = memory->logical_index;
1239 hw_thread.ids[index + 1] = sub_id;
1247 type = __kmp_hwloc_type_2_topology_type(obj);
1248 if (type != KMP_HW_UNKNOWN) {
1249 sub_id = __kmp_hwloc_get_sub_id(tp, obj, prev);
1251 hw_thread.ids[index] = obj->logical_index;
1252 hw_thread.ids[index + 1] = sub_id;
1261 __kmp_topology->sort_ids();
1269 static bool __kmp_affinity_create_flat_map(kmp_i18n_id_t *
const msg_id) {
1270 *msg_id = kmp_i18n_null;
1272 kmp_hw_t types[] = {KMP_HW_SOCKET, KMP_HW_CORE, KMP_HW_THREAD};
1274 if (__kmp_affinity_verbose) {
1275 KMP_INFORM(UsingFlatOS,
"KMP_AFFINITY");
1281 if (!KMP_AFFINITY_CAPABLE()) {
1282 KMP_ASSERT(__kmp_affinity_type == affinity_none);
1283 __kmp_ncores = nPackages = __kmp_xproc;
1284 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1292 __kmp_ncores = nPackages = __kmp_avail_proc;
1293 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1296 __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1299 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1301 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1304 kmp_hw_thread_t &hw_thread = __kmp_topology->at(avail_ct);
1306 hw_thread.os_id = i;
1307 hw_thread.ids[0] = i;
1308 hw_thread.ids[1] = 0;
1309 hw_thread.ids[2] = 0;
1312 if (__kmp_affinity_verbose) {
1313 KMP_INFORM(OSProcToPackage,
"KMP_AFFINITY");
1318 #if KMP_GROUP_AFFINITY
1323 static bool __kmp_affinity_create_proc_group_map(kmp_i18n_id_t *
const msg_id) {
1324 *msg_id = kmp_i18n_null;
1326 kmp_hw_t types[] = {KMP_HW_PROC_GROUP, KMP_HW_CORE, KMP_HW_THREAD};
1327 const static size_t BITS_PER_GROUP = CHAR_BIT *
sizeof(DWORD_PTR);
1329 if (__kmp_affinity_verbose) {
1330 KMP_INFORM(AffWindowsProcGroupMap,
"KMP_AFFINITY");
1334 if (!KMP_AFFINITY_CAPABLE()) {
1335 KMP_ASSERT(__kmp_affinity_type == affinity_none);
1336 nPackages = __kmp_num_proc_groups;
1337 __kmp_nThreadsPerCore = 1;
1338 __kmp_ncores = __kmp_xproc;
1339 nCoresPerPkg = nPackages / __kmp_ncores;
1344 __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1347 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1349 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1352 kmp_hw_thread_t &hw_thread = __kmp_topology->at(avail_ct++);
1354 hw_thread.os_id = i;
1355 hw_thread.ids[0] = i / BITS_PER_GROUP;
1356 hw_thread.ids[1] = hw_thread.ids[2] = i % BITS_PER_GROUP;
1362 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1364 template <kmp_u
int32 LSB, kmp_u
int32 MSB>
1365 static inline unsigned __kmp_extract_bits(kmp_uint32 v) {
1366 const kmp_uint32 SHIFT_LEFT =
sizeof(kmp_uint32) * 8 - 1 - MSB;
1367 const kmp_uint32 SHIFT_RIGHT = LSB;
1368 kmp_uint32 retval = v;
1369 retval <<= SHIFT_LEFT;
1370 retval >>= (SHIFT_LEFT + SHIFT_RIGHT);
1374 static int __kmp_cpuid_mask_width(
int count) {
1377 while ((1 << r) < count)
1382 class apicThreadInfo {
1386 unsigned maxCoresPerPkg;
1387 unsigned maxThreadsPerPkg;
1393 static int __kmp_affinity_cmp_apicThreadInfo_phys_id(
const void *a,
1395 const apicThreadInfo *aa = (
const apicThreadInfo *)a;
1396 const apicThreadInfo *bb = (
const apicThreadInfo *)b;
1397 if (aa->pkgId < bb->pkgId)
1399 if (aa->pkgId > bb->pkgId)
1401 if (aa->coreId < bb->coreId)
1403 if (aa->coreId > bb->coreId)
1405 if (aa->threadId < bb->threadId)
1407 if (aa->threadId > bb->threadId)
1412 class kmp_cache_info_t {
1415 unsigned level, mask;
1417 kmp_cache_info_t() : depth(0) { get_leaf4_levels(); }
1418 size_t get_depth()
const {
return depth; }
1419 info_t &operator[](
size_t index) {
return table[index]; }
1420 const info_t &operator[](
size_t index)
const {
return table[index]; }
1422 static kmp_hw_t get_topology_type(
unsigned level) {
1423 KMP_DEBUG_ASSERT(level >= 1 && level <= MAX_CACHE_LEVEL);
1432 return KMP_HW_UNKNOWN;
1436 static const int MAX_CACHE_LEVEL = 3;
1439 info_t table[MAX_CACHE_LEVEL];
1441 void get_leaf4_levels() {
1443 while (depth < MAX_CACHE_LEVEL) {
1444 unsigned cache_type, max_threads_sharing;
1445 unsigned cache_level, cache_mask_width;
1447 __kmp_x86_cpuid(4, level, &buf2);
1448 cache_type = __kmp_extract_bits<0, 4>(buf2.eax);
1452 if (cache_type == 2) {
1456 max_threads_sharing = __kmp_extract_bits<14, 25>(buf2.eax) + 1;
1457 cache_mask_width = __kmp_cpuid_mask_width(max_threads_sharing);
1458 cache_level = __kmp_extract_bits<5, 7>(buf2.eax);
1459 table[depth].level = cache_level;
1460 table[depth].mask = ((-1) << cache_mask_width);
1471 static bool __kmp_affinity_create_apicid_map(kmp_i18n_id_t *
const msg_id) {
1473 *msg_id = kmp_i18n_null;
1475 if (__kmp_affinity_verbose) {
1476 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", KMP_I18N_STR(DecodingLegacyAPIC));
1480 __kmp_x86_cpuid(0, 0, &buf);
1482 *msg_id = kmp_i18n_str_NoLeaf4Support;
1491 if (!KMP_AFFINITY_CAPABLE()) {
1494 KMP_ASSERT(__kmp_affinity_type == affinity_none);
1500 __kmp_x86_cpuid(1, 0, &buf);
1501 int maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
1502 if (maxThreadsPerPkg == 0) {
1503 maxThreadsPerPkg = 1;
1517 __kmp_x86_cpuid(0, 0, &buf);
1519 __kmp_x86_cpuid(4, 0, &buf);
1520 nCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
1538 __kmp_ncores = __kmp_xproc;
1539 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
1540 __kmp_nThreadsPerCore = 1;
1549 kmp_affinity_raii_t previous_affinity;
1577 apicThreadInfo *threadInfo = (apicThreadInfo *)__kmp_allocate(
1578 __kmp_avail_proc *
sizeof(apicThreadInfo));
1579 unsigned nApics = 0;
1580 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1582 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1585 KMP_DEBUG_ASSERT((
int)nApics < __kmp_avail_proc);
1587 __kmp_affinity_dispatch->bind_thread(i);
1588 threadInfo[nApics].osId = i;
1591 __kmp_x86_cpuid(1, 0, &buf);
1592 if (((buf.edx >> 9) & 1) == 0) {
1593 __kmp_free(threadInfo);
1594 *msg_id = kmp_i18n_str_ApicNotPresent;
1597 threadInfo[nApics].apicId = (buf.ebx >> 24) & 0xff;
1598 threadInfo[nApics].maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
1599 if (threadInfo[nApics].maxThreadsPerPkg == 0) {
1600 threadInfo[nApics].maxThreadsPerPkg = 1;
1609 __kmp_x86_cpuid(0, 0, &buf);
1611 __kmp_x86_cpuid(4, 0, &buf);
1612 threadInfo[nApics].maxCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
1614 threadInfo[nApics].maxCoresPerPkg = 1;
1618 int widthCT = __kmp_cpuid_mask_width(threadInfo[nApics].maxThreadsPerPkg);
1619 threadInfo[nApics].pkgId = threadInfo[nApics].apicId >> widthCT;
1621 int widthC = __kmp_cpuid_mask_width(threadInfo[nApics].maxCoresPerPkg);
1622 int widthT = widthCT - widthC;
1627 __kmp_free(threadInfo);
1628 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1632 int maskC = (1 << widthC) - 1;
1633 threadInfo[nApics].coreId = (threadInfo[nApics].apicId >> widthT) & maskC;
1635 int maskT = (1 << widthT) - 1;
1636 threadInfo[nApics].threadId = threadInfo[nApics].apicId & maskT;
1643 previous_affinity.restore();
1646 qsort(threadInfo, nApics,
sizeof(*threadInfo),
1647 __kmp_affinity_cmp_apicThreadInfo_phys_id);
1664 __kmp_nThreadsPerCore = 1;
1665 unsigned nCores = 1;
1668 unsigned lastPkgId = threadInfo[0].pkgId;
1669 unsigned coreCt = 1;
1670 unsigned lastCoreId = threadInfo[0].coreId;
1671 unsigned threadCt = 1;
1672 unsigned lastThreadId = threadInfo[0].threadId;
1675 unsigned prevMaxCoresPerPkg = threadInfo[0].maxCoresPerPkg;
1676 unsigned prevMaxThreadsPerPkg = threadInfo[0].maxThreadsPerPkg;
1678 for (i = 1; i < nApics; i++) {
1679 if (threadInfo[i].pkgId != lastPkgId) {
1682 lastPkgId = threadInfo[i].pkgId;
1683 if ((
int)coreCt > nCoresPerPkg)
1684 nCoresPerPkg = coreCt;
1686 lastCoreId = threadInfo[i].coreId;
1687 if ((
int)threadCt > __kmp_nThreadsPerCore)
1688 __kmp_nThreadsPerCore = threadCt;
1690 lastThreadId = threadInfo[i].threadId;
1694 prevMaxCoresPerPkg = threadInfo[i].maxCoresPerPkg;
1695 prevMaxThreadsPerPkg = threadInfo[i].maxThreadsPerPkg;
1699 if (threadInfo[i].coreId != lastCoreId) {
1702 lastCoreId = threadInfo[i].coreId;
1703 if ((
int)threadCt > __kmp_nThreadsPerCore)
1704 __kmp_nThreadsPerCore = threadCt;
1706 lastThreadId = threadInfo[i].threadId;
1707 }
else if (threadInfo[i].threadId != lastThreadId) {
1709 lastThreadId = threadInfo[i].threadId;
1711 __kmp_free(threadInfo);
1712 *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
1718 if ((prevMaxCoresPerPkg != threadInfo[i].maxCoresPerPkg) ||
1719 (prevMaxThreadsPerPkg != threadInfo[i].maxThreadsPerPkg)) {
1720 __kmp_free(threadInfo);
1721 *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
1729 if ((
int)coreCt > nCoresPerPkg)
1730 nCoresPerPkg = coreCt;
1731 if ((
int)threadCt > __kmp_nThreadsPerCore)
1732 __kmp_nThreadsPerCore = threadCt;
1733 __kmp_ncores = nCores;
1734 KMP_DEBUG_ASSERT(nApics == (
unsigned)__kmp_avail_proc);
1742 int threadLevel = 2;
1744 int depth = (pkgLevel >= 0) + (coreLevel >= 0) + (threadLevel >= 0);
1747 types[idx++] = KMP_HW_SOCKET;
1749 types[idx++] = KMP_HW_CORE;
1750 if (threadLevel >= 0)
1751 types[idx++] = KMP_HW_THREAD;
1753 KMP_ASSERT(depth > 0);
1754 __kmp_topology = kmp_topology_t::allocate(nApics, depth, types);
1756 for (i = 0; i < nApics; ++i) {
1758 unsigned os = threadInfo[i].osId;
1759 kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
1762 if (pkgLevel >= 0) {
1763 hw_thread.ids[idx++] = threadInfo[i].pkgId;
1765 if (coreLevel >= 0) {
1766 hw_thread.ids[idx++] = threadInfo[i].coreId;
1768 if (threadLevel >= 0) {
1769 hw_thread.ids[idx++] = threadInfo[i].threadId;
1771 hw_thread.os_id = os;
1774 __kmp_free(threadInfo);
1775 __kmp_topology->sort_ids();
1776 if (!__kmp_topology->check_ids()) {
1777 kmp_topology_t::deallocate(__kmp_topology);
1778 __kmp_topology =
nullptr;
1779 *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
1804 INTEL_LEVEL_TYPE_INVALID = 0,
1805 INTEL_LEVEL_TYPE_SMT = 1,
1806 INTEL_LEVEL_TYPE_CORE = 2,
1807 INTEL_LEVEL_TYPE_TILE = 3,
1808 INTEL_LEVEL_TYPE_MODULE = 4,
1809 INTEL_LEVEL_TYPE_DIE = 5,
1810 INTEL_LEVEL_TYPE_LAST = 6,
1813 struct cpuid_level_info_t {
1814 unsigned level_type, mask, mask_width, nitems, cache_mask;
1817 static kmp_hw_t __kmp_intel_type_2_topology_type(
int intel_type) {
1818 switch (intel_type) {
1819 case INTEL_LEVEL_TYPE_INVALID:
1820 return KMP_HW_SOCKET;
1821 case INTEL_LEVEL_TYPE_SMT:
1822 return KMP_HW_THREAD;
1823 case INTEL_LEVEL_TYPE_CORE:
1825 case INTEL_LEVEL_TYPE_TILE:
1827 case INTEL_LEVEL_TYPE_MODULE:
1828 return KMP_HW_MODULE;
1829 case INTEL_LEVEL_TYPE_DIE:
1832 return KMP_HW_UNKNOWN;
1839 __kmp_x2apicid_get_levels(
int leaf,
1840 cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST],
1841 kmp_uint64 known_levels) {
1842 unsigned level, levels_index;
1843 unsigned level_type, mask_width, nitems;
1853 level = levels_index = 0;
1855 __kmp_x86_cpuid(leaf, level, &buf);
1856 level_type = __kmp_extract_bits<8, 15>(buf.ecx);
1857 mask_width = __kmp_extract_bits<0, 4>(buf.eax);
1858 nitems = __kmp_extract_bits<0, 15>(buf.ebx);
1859 if (level_type != INTEL_LEVEL_TYPE_INVALID && nitems == 0)
1862 if (known_levels & (1ull << level_type)) {
1864 KMP_ASSERT(levels_index < INTEL_LEVEL_TYPE_LAST);
1865 levels[levels_index].level_type = level_type;
1866 levels[levels_index].mask_width = mask_width;
1867 levels[levels_index].nitems = nitems;
1871 if (levels_index > 0) {
1872 levels[levels_index - 1].mask_width = mask_width;
1873 levels[levels_index - 1].nitems = nitems;
1877 }
while (level_type != INTEL_LEVEL_TYPE_INVALID);
1880 for (
unsigned i = 0; i < levels_index; ++i) {
1881 if (levels[i].level_type != INTEL_LEVEL_TYPE_INVALID) {
1882 levels[i].mask = ~((-1) << levels[i].mask_width);
1883 levels[i].cache_mask = (-1) << levels[i].mask_width;
1884 for (
unsigned j = 0; j < i; ++j)
1885 levels[i].mask ^= levels[j].mask;
1887 KMP_DEBUG_ASSERT(levels_index > 0);
1888 levels[i].mask = (-1) << levels[i - 1].mask_width;
1889 levels[i].cache_mask = 0;
1892 return levels_index;
1895 static bool __kmp_affinity_create_x2apicid_map(kmp_i18n_id_t *
const msg_id) {
1897 cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST];
1898 kmp_hw_t types[INTEL_LEVEL_TYPE_LAST];
1899 unsigned levels_index;
1901 kmp_uint64 known_levels;
1902 int topology_leaf, highest_leaf, apic_id;
1904 static int leaves[] = {0, 0};
1906 kmp_i18n_id_t leaf_message_id;
1908 KMP_BUILD_ASSERT(
sizeof(known_levels) * CHAR_BIT > KMP_HW_LAST);
1910 *msg_id = kmp_i18n_null;
1911 if (__kmp_affinity_verbose) {
1912 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC));
1916 known_levels = 0ull;
1917 for (
int i = 0; i < INTEL_LEVEL_TYPE_LAST; ++i) {
1918 if (__kmp_intel_type_2_topology_type(i) != KMP_HW_UNKNOWN) {
1919 known_levels |= (1ull << i);
1924 __kmp_x86_cpuid(0, 0, &buf);
1925 highest_leaf = buf.eax;
1930 if (__kmp_affinity_top_method == affinity_top_method_x2apicid) {
1933 leaf_message_id = kmp_i18n_str_NoLeaf11Support;
1934 }
else if (__kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
1937 leaf_message_id = kmp_i18n_str_NoLeaf31Support;
1942 leaf_message_id = kmp_i18n_str_NoLeaf11Support;
1946 __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
1948 for (
int i = 0; i < num_leaves; ++i) {
1949 int leaf = leaves[i];
1950 if (highest_leaf < leaf)
1952 __kmp_x86_cpuid(leaf, 0, &buf);
1955 topology_leaf = leaf;
1956 levels_index = __kmp_x2apicid_get_levels(leaf, levels, known_levels);
1957 if (levels_index == 0)
1961 if (topology_leaf == -1 || levels_index == 0) {
1962 *msg_id = leaf_message_id;
1965 KMP_ASSERT(levels_index <= INTEL_LEVEL_TYPE_LAST);
1972 if (!KMP_AFFINITY_CAPABLE()) {
1975 KMP_ASSERT(__kmp_affinity_type == affinity_none);
1976 for (
unsigned i = 0; i < levels_index; ++i) {
1977 if (levels[i].level_type == INTEL_LEVEL_TYPE_SMT) {
1978 __kmp_nThreadsPerCore = levels[i].nitems;
1979 }
else if (levels[i].level_type == INTEL_LEVEL_TYPE_CORE) {
1980 nCoresPerPkg = levels[i].nitems;
1983 __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
1984 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
1989 int depth = levels_index;
1990 for (
int i = depth - 1, j = 0; i >= 0; --i, ++j)
1991 types[j] = __kmp_intel_type_2_topology_type(levels[i].level_type);
1993 kmp_topology_t::allocate(__kmp_avail_proc, levels_index, types);
1996 kmp_cache_info_t cache_info;
1997 for (
size_t i = 0; i < cache_info.get_depth(); ++i) {
1998 const kmp_cache_info_t::info_t &info = cache_info[i];
1999 unsigned cache_mask = info.mask;
2000 unsigned cache_level = info.level;
2001 for (
unsigned j = 0; j < levels_index; ++j) {
2002 unsigned hw_cache_mask = levels[j].cache_mask;
2003 kmp_hw_t cache_type = kmp_cache_info_t::get_topology_type(cache_level);
2004 if (hw_cache_mask == cache_mask && j < levels_index - 1) {
2006 __kmp_intel_type_2_topology_type(levels[j + 1].level_type);
2007 __kmp_topology->set_equivalent_type(cache_type, type);
2017 kmp_affinity_raii_t previous_affinity;
2022 int hw_thread_index = 0;
2023 KMP_CPU_SET_ITERATE(proc, __kmp_affin_fullMask) {
2024 cpuid_level_info_t my_levels[INTEL_LEVEL_TYPE_LAST];
2025 unsigned my_levels_index;
2028 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
2031 KMP_DEBUG_ASSERT(hw_thread_index < __kmp_avail_proc);
2033 __kmp_affinity_dispatch->bind_thread(proc);
2036 __kmp_x86_cpuid(topology_leaf, 0, &buf);
2038 kmp_hw_thread_t &hw_thread = __kmp_topology->at(hw_thread_index);
2040 __kmp_x2apicid_get_levels(topology_leaf, my_levels, known_levels);
2041 if (my_levels_index == 0 || my_levels_index != levels_index) {
2042 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
2046 hw_thread.os_id = proc;
2048 for (
unsigned j = 0, idx = depth - 1; j < my_levels_index; ++j, --idx) {
2049 hw_thread.ids[idx] = apic_id & my_levels[j].mask;
2051 hw_thread.ids[idx] >>= my_levels[j - 1].mask_width;
2056 KMP_ASSERT(hw_thread_index > 0);
2057 __kmp_topology->sort_ids();
2058 if (!__kmp_topology->check_ids()) {
2059 kmp_topology_t::deallocate(__kmp_topology);
2060 __kmp_topology =
nullptr;
2061 *msg_id = kmp_i18n_str_x2ApicIDsNotUnique;
2069 #define threadIdIndex 1
2070 #define coreIdIndex 2
2071 #define pkgIdIndex 3
2072 #define nodeIdIndex 4
2074 typedef unsigned *ProcCpuInfo;
2075 static unsigned maxIndex = pkgIdIndex;
2077 static int __kmp_affinity_cmp_ProcCpuInfo_phys_id(
const void *a,
2080 const unsigned *aa = *(
unsigned *
const *)a;
2081 const unsigned *bb = *(
unsigned *
const *)b;
2082 for (i = maxIndex;; i--) {
2093 #if KMP_USE_HIER_SCHED
2095 static void __kmp_dispatch_set_hierarchy_values() {
2101 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1] =
2102 nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
2103 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L1 + 1] = __kmp_ncores;
2104 #if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) && \
2106 if (__kmp_mic_type >= mic3)
2107 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores / 2;
2110 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores;
2111 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L3 + 1] = nPackages;
2112 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_NUMA + 1] = nPackages;
2113 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LOOP + 1] = 1;
2116 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_THREAD + 1] = 1;
2117 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L1 + 1] =
2118 __kmp_nThreadsPerCore;
2119 #if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) && \
2121 if (__kmp_mic_type >= mic3)
2122 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
2123 2 * __kmp_nThreadsPerCore;
2126 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
2127 __kmp_nThreadsPerCore;
2128 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L3 + 1] =
2129 nCoresPerPkg * __kmp_nThreadsPerCore;
2130 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_NUMA + 1] =
2131 nCoresPerPkg * __kmp_nThreadsPerCore;
2132 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LOOP + 1] =
2133 nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
2138 int __kmp_dispatch_get_index(
int tid, kmp_hier_layer_e type) {
2139 int index = type + 1;
2140 int num_hw_threads = __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1];
2141 KMP_DEBUG_ASSERT(type != kmp_hier_layer_e::LAYER_LAST);
2142 if (type == kmp_hier_layer_e::LAYER_THREAD)
2144 else if (type == kmp_hier_layer_e::LAYER_LOOP)
2146 KMP_DEBUG_ASSERT(__kmp_hier_max_units[index] != 0);
2147 if (tid >= num_hw_threads)
2148 tid = tid % num_hw_threads;
2149 return (tid / __kmp_hier_threads_per[index]) % __kmp_hier_max_units[index];
2153 int __kmp_dispatch_get_t1_per_t2(kmp_hier_layer_e t1, kmp_hier_layer_e t2) {
2156 KMP_DEBUG_ASSERT(i1 <= i2);
2157 KMP_DEBUG_ASSERT(t1 != kmp_hier_layer_e::LAYER_LAST);
2158 KMP_DEBUG_ASSERT(t2 != kmp_hier_layer_e::LAYER_LAST);
2159 KMP_DEBUG_ASSERT(__kmp_hier_threads_per[i1] != 0);
2161 return __kmp_hier_threads_per[i2] / __kmp_hier_threads_per[i1];
2165 static inline const char *__kmp_cpuinfo_get_filename() {
2166 const char *filename;
2167 if (__kmp_cpuinfo_file !=
nullptr)
2168 filename = __kmp_cpuinfo_file;
2170 filename =
"/proc/cpuinfo";
2174 static inline const char *__kmp_cpuinfo_get_envvar() {
2175 const char *envvar =
nullptr;
2176 if (__kmp_cpuinfo_file !=
nullptr)
2177 envvar =
"KMP_CPUINFO_FILE";
2183 static bool __kmp_affinity_create_cpuinfo_map(
int *line,
2184 kmp_i18n_id_t *
const msg_id) {
2185 const char *filename = __kmp_cpuinfo_get_filename();
2186 const char *envvar = __kmp_cpuinfo_get_envvar();
2187 *msg_id = kmp_i18n_null;
2189 if (__kmp_affinity_verbose) {
2190 KMP_INFORM(AffParseFilename,
"KMP_AFFINITY", filename);
2198 unsigned num_records = 0;
2200 buf[
sizeof(buf) - 1] = 1;
2201 if (!fgets(buf,
sizeof(buf), f)) {
2206 char s1[] =
"processor";
2207 if (strncmp(buf, s1,
sizeof(s1) - 1) == 0) {
2214 if (KMP_SSCANF(buf,
"node_%u id", &level) == 1) {
2216 if (level > (
unsigned)__kmp_xproc) {
2217 level = __kmp_xproc;
2219 if (nodeIdIndex + level >= maxIndex) {
2220 maxIndex = nodeIdIndex + level;
2228 if (num_records == 0) {
2229 *msg_id = kmp_i18n_str_NoProcRecords;
2232 if (num_records > (
unsigned)__kmp_xproc) {
2233 *msg_id = kmp_i18n_str_TooManyProcRecords;
2242 if (fseek(f, 0, SEEK_SET) != 0) {
2243 *msg_id = kmp_i18n_str_CantRewindCpuinfo;
2249 unsigned **threadInfo =
2250 (
unsigned **)__kmp_allocate((num_records + 1) *
sizeof(
unsigned *));
2252 for (i = 0; i <= num_records; i++) {
2254 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
2257 #define CLEANUP_THREAD_INFO \
2258 for (i = 0; i <= num_records; i++) { \
2259 __kmp_free(threadInfo[i]); \
2261 __kmp_free(threadInfo);
2266 #define INIT_PROC_INFO(p) \
2267 for (__index = 0; __index <= maxIndex; __index++) { \
2268 (p)[__index] = UINT_MAX; \
2271 for (i = 0; i <= num_records; i++) {
2272 INIT_PROC_INFO(threadInfo[i]);
2275 unsigned num_avail = 0;
2282 buf[
sizeof(buf) - 1] = 1;
2283 bool long_line =
false;
2284 if (!fgets(buf,
sizeof(buf), f)) {
2289 for (i = 0; i <= maxIndex; i++) {
2290 if (threadInfo[num_avail][i] != UINT_MAX) {
2298 }
else if (!buf[
sizeof(buf) - 1]) {
2303 #define CHECK_LINE \
2305 CLEANUP_THREAD_INFO; \
2306 *msg_id = kmp_i18n_str_LongLineCpuinfo; \
2312 char s1[] =
"processor";
2313 if (strncmp(buf, s1,
sizeof(s1) - 1) == 0) {
2315 char *p = strchr(buf +
sizeof(s1) - 1,
':');
2317 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
2319 if (threadInfo[num_avail][osIdIndex] != UINT_MAX)
2320 #if KMP_ARCH_AARCH64
2329 threadInfo[num_avail][osIdIndex] = val;
2330 #if KMP_OS_LINUX && !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
2334 "/sys/devices/system/cpu/cpu%u/topology/physical_package_id",
2335 threadInfo[num_avail][osIdIndex]);
2336 __kmp_read_from_file(path,
"%u", &threadInfo[num_avail][pkgIdIndex]);
2338 KMP_SNPRINTF(path,
sizeof(path),
2339 "/sys/devices/system/cpu/cpu%u/topology/core_id",
2340 threadInfo[num_avail][osIdIndex]);
2341 __kmp_read_from_file(path,
"%u", &threadInfo[num_avail][coreIdIndex]);
2345 char s2[] =
"physical id";
2346 if (strncmp(buf, s2,
sizeof(s2) - 1) == 0) {
2348 char *p = strchr(buf +
sizeof(s2) - 1,
':');
2350 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
2352 if (threadInfo[num_avail][pkgIdIndex] != UINT_MAX)
2354 threadInfo[num_avail][pkgIdIndex] = val;
2357 char s3[] =
"core id";
2358 if (strncmp(buf, s3,
sizeof(s3) - 1) == 0) {
2360 char *p = strchr(buf +
sizeof(s3) - 1,
':');
2362 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
2364 if (threadInfo[num_avail][coreIdIndex] != UINT_MAX)
2366 threadInfo[num_avail][coreIdIndex] = val;
2370 char s4[] =
"thread id";
2371 if (strncmp(buf, s4,
sizeof(s4) - 1) == 0) {
2373 char *p = strchr(buf +
sizeof(s4) - 1,
':');
2375 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
2377 if (threadInfo[num_avail][threadIdIndex] != UINT_MAX)
2379 threadInfo[num_avail][threadIdIndex] = val;
2383 if (KMP_SSCANF(buf,
"node_%u id", &level) == 1) {
2385 char *p = strchr(buf +
sizeof(s4) - 1,
':');
2387 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
2389 KMP_ASSERT(nodeIdIndex + level <= maxIndex);
2390 if (threadInfo[num_avail][nodeIdIndex + level] != UINT_MAX)
2392 threadInfo[num_avail][nodeIdIndex + level] = val;
2399 if ((*buf != 0) && (*buf !=
'\n')) {
2404 while (((ch = fgetc(f)) != EOF) && (ch !=
'\n'))
2412 if ((
int)num_avail == __kmp_xproc) {
2413 CLEANUP_THREAD_INFO;
2414 *msg_id = kmp_i18n_str_TooManyEntries;
2420 if (threadInfo[num_avail][osIdIndex] == UINT_MAX) {
2421 CLEANUP_THREAD_INFO;
2422 *msg_id = kmp_i18n_str_MissingProcField;
2425 if (threadInfo[0][pkgIdIndex] == UINT_MAX) {
2426 CLEANUP_THREAD_INFO;
2427 *msg_id = kmp_i18n_str_MissingPhysicalIDField;
2432 if (!KMP_CPU_ISSET(threadInfo[num_avail][osIdIndex],
2433 __kmp_affin_fullMask)) {
2434 INIT_PROC_INFO(threadInfo[num_avail]);
2441 KMP_ASSERT(num_avail <= num_records);
2442 INIT_PROC_INFO(threadInfo[num_avail]);
2447 CLEANUP_THREAD_INFO;
2448 *msg_id = kmp_i18n_str_MissingValCpuinfo;
2452 CLEANUP_THREAD_INFO;
2453 *msg_id = kmp_i18n_str_DuplicateFieldCpuinfo;
2458 #if KMP_MIC && REDUCE_TEAM_SIZE
2459 unsigned teamSize = 0;
2467 KMP_ASSERT(num_avail > 0);
2468 KMP_ASSERT(num_avail <= num_records);
2471 qsort(threadInfo, num_avail,
sizeof(*threadInfo),
2472 __kmp_affinity_cmp_ProcCpuInfo_phys_id);
2484 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
2486 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
2488 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
2490 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
2492 bool assign_thread_ids =
false;
2493 unsigned threadIdCt;
2496 restart_radix_check:
2500 if (assign_thread_ids) {
2501 if (threadInfo[0][threadIdIndex] == UINT_MAX) {
2502 threadInfo[0][threadIdIndex] = threadIdCt++;
2503 }
else if (threadIdCt <= threadInfo[0][threadIdIndex]) {
2504 threadIdCt = threadInfo[0][threadIdIndex] + 1;
2507 for (index = 0; index <= maxIndex; index++) {
2511 lastId[index] = threadInfo[0][index];
2516 for (i = 1; i < num_avail; i++) {
2519 for (index = maxIndex; index >= threadIdIndex; index--) {
2520 if (assign_thread_ids && (index == threadIdIndex)) {
2522 if (threadInfo[i][threadIdIndex] == UINT_MAX) {
2523 threadInfo[i][threadIdIndex] = threadIdCt++;
2527 else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
2528 threadIdCt = threadInfo[i][threadIdIndex] + 1;
2531 if (threadInfo[i][index] != lastId[index]) {
2536 for (index2 = threadIdIndex; index2 < index; index2++) {
2538 if (counts[index2] > maxCt[index2]) {
2539 maxCt[index2] = counts[index2];
2542 lastId[index2] = threadInfo[i][index2];
2546 lastId[index] = threadInfo[i][index];
2548 if (assign_thread_ids && (index > threadIdIndex)) {
2550 #if KMP_MIC && REDUCE_TEAM_SIZE
2553 teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
2560 if (threadInfo[i][threadIdIndex] == UINT_MAX) {
2561 threadInfo[i][threadIdIndex] = threadIdCt++;
2567 else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
2568 threadIdCt = threadInfo[i][threadIdIndex] + 1;
2574 if (index < threadIdIndex) {
2578 if ((threadInfo[i][threadIdIndex] != UINT_MAX) || assign_thread_ids) {
2583 CLEANUP_THREAD_INFO;
2584 *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
2590 assign_thread_ids =
true;
2591 goto restart_radix_check;
2595 #if KMP_MIC && REDUCE_TEAM_SIZE
2598 teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
2601 for (index = threadIdIndex; index <= maxIndex; index++) {
2602 if (counts[index] > maxCt[index]) {
2603 maxCt[index] = counts[index];
2607 __kmp_nThreadsPerCore = maxCt[threadIdIndex];
2608 nCoresPerPkg = maxCt[coreIdIndex];
2609 nPackages = totals[pkgIdIndex];
2615 __kmp_ncores = totals[coreIdIndex];
2616 if (!KMP_AFFINITY_CAPABLE()) {
2617 KMP_ASSERT(__kmp_affinity_type == affinity_none);
2621 #if KMP_MIC && REDUCE_TEAM_SIZE
2623 if ((__kmp_dflt_team_nth == 0) && (teamSize > 0)) {
2624 __kmp_dflt_team_nth = teamSize;
2625 KA_TRACE(20, (
"__kmp_affinity_create_cpuinfo_map: setting "
2626 "__kmp_dflt_team_nth = %d\n",
2627 __kmp_dflt_team_nth));
2631 KMP_DEBUG_ASSERT(num_avail == (
unsigned)__kmp_avail_proc);
2638 bool *inMap = (
bool *)__kmp_allocate((maxIndex + 1) *
sizeof(bool));
2639 for (index = threadIdIndex; index < maxIndex; index++) {
2640 KMP_ASSERT(totals[index] >= totals[index + 1]);
2641 inMap[index] = (totals[index] > totals[index + 1]);
2643 inMap[maxIndex] = (totals[maxIndex] > 1);
2644 inMap[pkgIdIndex] =
true;
2645 inMap[coreIdIndex] =
true;
2646 inMap[threadIdIndex] =
true;
2650 kmp_hw_t types[KMP_HW_LAST];
2653 int threadLevel = -1;
2654 for (index = threadIdIndex; index <= maxIndex; index++) {
2659 if (inMap[pkgIdIndex]) {
2661 types[idx++] = KMP_HW_SOCKET;
2663 if (inMap[coreIdIndex]) {
2665 types[idx++] = KMP_HW_CORE;
2667 if (inMap[threadIdIndex]) {
2669 types[idx++] = KMP_HW_THREAD;
2671 KMP_ASSERT(depth > 0);
2674 __kmp_topology = kmp_topology_t::allocate(num_avail, depth, types);
2676 for (i = 0; i < num_avail; ++i) {
2677 unsigned os = threadInfo[i][osIdIndex];
2680 kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
2682 hw_thread.os_id = os;
2685 for (src_index = maxIndex; src_index >= threadIdIndex; src_index--) {
2686 if (!inMap[src_index]) {
2689 if (src_index == pkgIdIndex) {
2690 hw_thread.ids[pkgLevel] = threadInfo[i][src_index];
2691 }
else if (src_index == coreIdIndex) {
2692 hw_thread.ids[coreLevel] = threadInfo[i][src_index];
2693 }
else if (src_index == threadIdIndex) {
2694 hw_thread.ids[threadLevel] = threadInfo[i][src_index];
2705 CLEANUP_THREAD_INFO;
2706 __kmp_topology->sort_ids();
2707 if (!__kmp_topology->check_ids()) {
2708 kmp_topology_t::deallocate(__kmp_topology);
2709 __kmp_topology =
nullptr;
2710 *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
2719 static kmp_affin_mask_t *__kmp_create_masks(
unsigned *maxIndex,
2720 unsigned *numUnique) {
2724 int numAddrs = __kmp_topology->get_num_hw_threads();
2725 int depth = __kmp_topology->get_depth();
2726 KMP_ASSERT(numAddrs);
2730 for (i = numAddrs - 1;; --i) {
2731 int osId = __kmp_topology->at(i).os_id;
2732 if (osId > maxOsId) {
2738 kmp_affin_mask_t *osId2Mask;
2739 KMP_CPU_ALLOC_ARRAY(osId2Mask, (maxOsId + 1));
2740 KMP_ASSERT(__kmp_affinity_gran_levels >= 0);
2741 if (__kmp_affinity_verbose && (__kmp_affinity_gran_levels > 0)) {
2742 KMP_INFORM(ThreadsMigrate,
"KMP_AFFINITY", __kmp_affinity_gran_levels);
2744 if (__kmp_affinity_gran_levels >= (
int)depth) {
2745 if (__kmp_affinity_verbose ||
2746 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) {
2747 KMP_WARNING(AffThreadsMayMigrate);
2758 kmp_affin_mask_t *sum;
2759 KMP_CPU_ALLOC_ON_STACK(sum);
2761 KMP_CPU_SET(__kmp_topology->at(0).os_id, sum);
2762 for (i = 1; i < numAddrs; i++) {
2766 if (__kmp_topology->is_close(leader, i, __kmp_affinity_gran_levels)) {
2767 KMP_CPU_SET(__kmp_topology->at(i).os_id, sum);
2773 for (; j < i; j++) {
2774 int osId = __kmp_topology->at(j).os_id;
2775 KMP_DEBUG_ASSERT(osId <= maxOsId);
2776 kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
2777 KMP_CPU_COPY(mask, sum);
2778 __kmp_topology->at(j).leader = (j == leader);
2785 KMP_CPU_SET(__kmp_topology->at(i).os_id, sum);
2790 for (; j < i; j++) {
2791 int osId = __kmp_topology->at(j).os_id;
2792 KMP_DEBUG_ASSERT(osId <= maxOsId);
2793 kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
2794 KMP_CPU_COPY(mask, sum);
2795 __kmp_topology->at(j).leader = (j == leader);
2798 KMP_CPU_FREE_FROM_STACK(sum);
2800 *maxIndex = maxOsId;
2801 *numUnique = unique;
2808 static kmp_affin_mask_t *newMasks;
2809 static int numNewMasks;
2810 static int nextNewMask;
2812 #define ADD_MASK(_mask) \
2814 if (nextNewMask >= numNewMasks) { \
2817 kmp_affin_mask_t *temp; \
2818 KMP_CPU_INTERNAL_ALLOC_ARRAY(temp, numNewMasks); \
2819 for (i = 0; i < numNewMasks / 2; i++) { \
2820 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i); \
2821 kmp_affin_mask_t *dest = KMP_CPU_INDEX(temp, i); \
2822 KMP_CPU_COPY(dest, src); \
2824 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks / 2); \
2827 KMP_CPU_COPY(KMP_CPU_INDEX(newMasks, nextNewMask), (_mask)); \
2831 #define ADD_MASK_OSID(_osId, _osId2Mask, _maxOsId) \
2833 if (((_osId) > _maxOsId) || \
2834 (!KMP_CPU_ISSET((_osId), KMP_CPU_INDEX((_osId2Mask), (_osId))))) { \
2835 if (__kmp_affinity_verbose || \
2836 (__kmp_affinity_warnings && \
2837 (__kmp_affinity_type != affinity_none))) { \
2838 KMP_WARNING(AffIgnoreInvalidProcID, _osId); \
2841 ADD_MASK(KMP_CPU_INDEX(_osId2Mask, (_osId))); \
2847 static void __kmp_affinity_process_proclist(kmp_affin_mask_t **out_masks,
2848 unsigned int *out_numMasks,
2849 const char *proclist,
2850 kmp_affin_mask_t *osId2Mask,
2853 const char *scan = proclist;
2854 const char *next = proclist;
2859 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
2861 kmp_affin_mask_t *sumMask;
2862 KMP_CPU_ALLOC(sumMask);
2866 int start, end, stride;
2870 if (*next ==
'\0') {
2882 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad proclist");
2884 num = __kmp_str_to_int(scan, *next);
2885 KMP_ASSERT2(num >= 0,
"bad explicit proc list");
2888 if ((num > maxOsId) ||
2889 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
2890 if (__kmp_affinity_verbose ||
2891 (__kmp_affinity_warnings &&
2892 (__kmp_affinity_type != affinity_none))) {
2893 KMP_WARNING(AffIgnoreInvalidProcID, num);
2895 KMP_CPU_ZERO(sumMask);
2897 KMP_CPU_COPY(sumMask, KMP_CPU_INDEX(osId2Mask, num));
2917 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
2920 num = __kmp_str_to_int(scan, *next);
2921 KMP_ASSERT2(num >= 0,
"bad explicit proc list");
2924 if ((num > maxOsId) ||
2925 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
2926 if (__kmp_affinity_verbose ||
2927 (__kmp_affinity_warnings &&
2928 (__kmp_affinity_type != affinity_none))) {
2929 KMP_WARNING(AffIgnoreInvalidProcID, num);
2932 KMP_CPU_UNION(sumMask, KMP_CPU_INDEX(osId2Mask, num));
2949 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
2951 start = __kmp_str_to_int(scan, *next);
2952 KMP_ASSERT2(start >= 0,
"bad explicit proc list");
2957 ADD_MASK_OSID(start, osId2Mask, maxOsId);
2971 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
2973 end = __kmp_str_to_int(scan, *next);
2974 KMP_ASSERT2(end >= 0,
"bad explicit proc list");
2991 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
2993 stride = __kmp_str_to_int(scan, *next);
2994 KMP_ASSERT2(stride >= 0,
"bad explicit proc list");
2999 KMP_ASSERT2(stride != 0,
"bad explicit proc list");
3001 KMP_ASSERT2(start <= end,
"bad explicit proc list");
3003 KMP_ASSERT2(start >= end,
"bad explicit proc list");
3005 KMP_ASSERT2((end - start) / stride <= 65536,
"bad explicit proc list");
3010 ADD_MASK_OSID(start, osId2Mask, maxOsId);
3012 }
while (start <= end);
3015 ADD_MASK_OSID(start, osId2Mask, maxOsId);
3017 }
while (start >= end);
3028 *out_numMasks = nextNewMask;
3029 if (nextNewMask == 0) {
3031 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3034 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3035 for (i = 0; i < nextNewMask; i++) {
3036 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
3037 kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
3038 KMP_CPU_COPY(dest, src);
3040 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3041 KMP_CPU_FREE(sumMask);
3064 static void __kmp_process_subplace_list(
const char **scan,
3065 kmp_affin_mask_t *osId2Mask,
3066 int maxOsId, kmp_affin_mask_t *tempMask,
3071 int start, count, stride, i;
3075 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
"bad explicit places list");
3078 start = __kmp_str_to_int(*scan, *next);
3079 KMP_ASSERT(start >= 0);
3084 if (**scan ==
'}' || **scan ==
',') {
3085 if ((start > maxOsId) ||
3086 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3087 if (__kmp_affinity_verbose ||
3088 (__kmp_affinity_warnings &&
3089 (__kmp_affinity_type != affinity_none))) {
3090 KMP_WARNING(AffIgnoreInvalidProcID, start);
3093 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3096 if (**scan ==
'}') {
3102 KMP_ASSERT2(**scan ==
':',
"bad explicit places list");
3107 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
"bad explicit places list");
3110 count = __kmp_str_to_int(*scan, *next);
3111 KMP_ASSERT(count >= 0);
3116 if (**scan ==
'}' || **scan ==
',') {
3117 for (i = 0; i < count; i++) {
3118 if ((start > maxOsId) ||
3119 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3120 if (__kmp_affinity_verbose ||
3121 (__kmp_affinity_warnings &&
3122 (__kmp_affinity_type != affinity_none))) {
3123 KMP_WARNING(AffIgnoreInvalidProcID, start);
3127 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3132 if (**scan ==
'}') {
3138 KMP_ASSERT2(**scan ==
':',
"bad explicit places list");
3145 if (**scan ==
'+') {
3149 if (**scan ==
'-') {
3157 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
"bad explicit places list");
3160 stride = __kmp_str_to_int(*scan, *next);
3161 KMP_ASSERT(stride >= 0);
3167 if (**scan ==
'}' || **scan ==
',') {
3168 for (i = 0; i < count; i++) {
3169 if ((start > maxOsId) ||
3170 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3171 if (__kmp_affinity_verbose ||
3172 (__kmp_affinity_warnings &&
3173 (__kmp_affinity_type != affinity_none))) {
3174 KMP_WARNING(AffIgnoreInvalidProcID, start);
3178 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3183 if (**scan ==
'}') {
3190 KMP_ASSERT2(0,
"bad explicit places list");
3194 static void __kmp_process_place(
const char **scan, kmp_affin_mask_t *osId2Mask,
3195 int maxOsId, kmp_affin_mask_t *tempMask,
3201 if (**scan ==
'{') {
3203 __kmp_process_subplace_list(scan, osId2Mask, maxOsId, tempMask, setSize);
3204 KMP_ASSERT2(**scan ==
'}',
"bad explicit places list");
3206 }
else if (**scan ==
'!') {
3208 __kmp_process_place(scan, osId2Mask, maxOsId, tempMask, setSize);
3209 KMP_CPU_COMPLEMENT(maxOsId, tempMask);
3210 }
else if ((**scan >=
'0') && (**scan <=
'9')) {
3213 int num = __kmp_str_to_int(*scan, *next);
3214 KMP_ASSERT(num >= 0);
3215 if ((num > maxOsId) ||
3216 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3217 if (__kmp_affinity_verbose ||
3218 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) {
3219 KMP_WARNING(AffIgnoreInvalidProcID, num);
3222 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, num));
3227 KMP_ASSERT2(0,
"bad explicit places list");
3232 void __kmp_affinity_process_placelist(kmp_affin_mask_t **out_masks,
3233 unsigned int *out_numMasks,
3234 const char *placelist,
3235 kmp_affin_mask_t *osId2Mask,
3237 int i, j, count, stride, sign;
3238 const char *scan = placelist;
3239 const char *next = placelist;
3242 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
3248 kmp_affin_mask_t *tempMask;
3249 kmp_affin_mask_t *previousMask;
3250 KMP_CPU_ALLOC(tempMask);
3251 KMP_CPU_ZERO(tempMask);
3252 KMP_CPU_ALLOC(previousMask);
3253 KMP_CPU_ZERO(previousMask);
3257 __kmp_process_place(&scan, osId2Mask, maxOsId, tempMask, &setSize);
3261 if (*scan ==
'\0' || *scan ==
',') {
3265 KMP_CPU_ZERO(tempMask);
3267 if (*scan ==
'\0') {
3274 KMP_ASSERT2(*scan ==
':',
"bad explicit places list");
3279 KMP_ASSERT2((*scan >=
'0') && (*scan <=
'9'),
"bad explicit places list");
3282 count = __kmp_str_to_int(scan, *next);
3283 KMP_ASSERT(count >= 0);
3288 if (*scan ==
'\0' || *scan ==
',') {
3291 KMP_ASSERT2(*scan ==
':',
"bad explicit places list");
3310 KMP_ASSERT2((*scan >=
'0') && (*scan <=
'9'),
"bad explicit places list");
3313 stride = __kmp_str_to_int(scan, *next);
3314 KMP_DEBUG_ASSERT(stride >= 0);
3320 for (i = 0; i < count; i++) {
3325 KMP_CPU_COPY(previousMask, tempMask);
3326 ADD_MASK(previousMask);
3327 KMP_CPU_ZERO(tempMask);
3329 KMP_CPU_SET_ITERATE(j, previousMask) {
3330 if (!KMP_CPU_ISSET(j, previousMask)) {
3333 if ((j + stride > maxOsId) || (j + stride < 0) ||
3334 (!KMP_CPU_ISSET(j, __kmp_affin_fullMask)) ||
3335 (!KMP_CPU_ISSET(j + stride,
3336 KMP_CPU_INDEX(osId2Mask, j + stride)))) {
3337 if ((__kmp_affinity_verbose ||
3338 (__kmp_affinity_warnings &&
3339 (__kmp_affinity_type != affinity_none))) &&
3341 KMP_WARNING(AffIgnoreInvalidProcID, j + stride);
3345 KMP_CPU_SET(j + stride, tempMask);
3349 KMP_CPU_ZERO(tempMask);
3354 if (*scan ==
'\0') {
3362 KMP_ASSERT2(0,
"bad explicit places list");
3365 *out_numMasks = nextNewMask;
3366 if (nextNewMask == 0) {
3368 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3371 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3372 KMP_CPU_FREE(tempMask);
3373 KMP_CPU_FREE(previousMask);
3374 for (i = 0; i < nextNewMask; i++) {
3375 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
3376 kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
3377 KMP_CPU_COPY(dest, src);
3379 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3383 #undef ADD_MASK_OSID
3387 static int __kmp_affinity_find_core_level(
int nprocs,
int bottom_level) {
3390 for (
int i = 0; i < nprocs; i++) {
3391 const kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
3392 for (
int j = bottom_level; j > 0; j--) {
3393 if (hw_thread.ids[j] > 0) {
3394 if (core_level < (j - 1)) {
3404 static int __kmp_affinity_compute_ncores(
int nprocs,
int bottom_level,
3406 return __kmp_topology->get_count(core_level);
3409 static int __kmp_affinity_find_core(
int proc,
int bottom_level,
3412 KMP_DEBUG_ASSERT(proc >= 0 && proc < __kmp_topology->get_num_hw_threads());
3413 for (
int i = 0; i <= proc; ++i) {
3414 if (i + 1 <= proc) {
3415 for (
int j = 0; j <= core_level; ++j) {
3416 if (__kmp_topology->at(i + 1).sub_ids[j] !=
3417 __kmp_topology->at(i).sub_ids[j]) {
3429 static int __kmp_affinity_max_proc_per_core(
int nprocs,
int bottom_level,
3431 if (core_level >= bottom_level)
3433 int thread_level = __kmp_topology->get_level(KMP_HW_THREAD);
3434 return __kmp_topology->calculate_ratio(thread_level, core_level);
3437 static int *procarr = NULL;
3438 static int __kmp_aff_depth = 0;
3442 static void __kmp_create_affinity_none_places() {
3443 KMP_ASSERT(__kmp_affin_fullMask != NULL);
3444 KMP_ASSERT(__kmp_affinity_type == affinity_none);
3445 __kmp_affinity_num_masks = 1;
3446 KMP_CPU_ALLOC_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
3447 kmp_affin_mask_t *dest = KMP_CPU_INDEX(__kmp_affinity_masks, 0);
3448 KMP_CPU_COPY(dest, __kmp_affin_fullMask);
3451 static void __kmp_aux_affinity_initialize(
void) {
3452 if (__kmp_affinity_masks != NULL) {
3453 KMP_ASSERT(__kmp_affin_fullMask != NULL);
3461 if (__kmp_affin_fullMask == NULL) {
3462 KMP_CPU_ALLOC(__kmp_affin_fullMask);
3464 if (KMP_AFFINITY_CAPABLE()) {
3465 __kmp_get_system_affinity(__kmp_affin_fullMask, TRUE);
3466 if (__kmp_affinity_respect_mask) {
3469 __kmp_avail_proc = 0;
3470 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
3471 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
3476 if (__kmp_avail_proc > __kmp_xproc) {
3477 if (__kmp_affinity_verbose ||
3478 (__kmp_affinity_warnings &&
3479 (__kmp_affinity_type != affinity_none))) {
3480 KMP_WARNING(ErrorInitializeAffinity);
3482 __kmp_affinity_type = affinity_none;
3483 KMP_AFFINITY_DISABLE();
3487 if (__kmp_affinity_verbose) {
3488 char buf[KMP_AFFIN_MASK_PRINT_LEN];
3489 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
3490 __kmp_affin_fullMask);
3491 KMP_INFORM(InitOSProcSetRespect,
"KMP_AFFINITY", buf);
3494 if (__kmp_affinity_verbose) {
3495 char buf[KMP_AFFIN_MASK_PRINT_LEN];
3496 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
3497 __kmp_affin_fullMask);
3498 KMP_INFORM(InitOSProcSetNotRespect,
"KMP_AFFINITY", buf);
3500 __kmp_affinity_entire_machine_mask(__kmp_affin_fullMask);
3501 __kmp_avail_proc = __kmp_xproc;
3505 __kmp_affin_fullMask->set_process_affinity(
true);
3510 kmp_i18n_id_t msg_id = kmp_i18n_null;
3514 if ((__kmp_cpuinfo_file != NULL) &&
3515 (__kmp_affinity_top_method == affinity_top_method_all)) {
3516 __kmp_affinity_top_method = affinity_top_method_cpuinfo;
3519 bool success =
false;
3520 if (__kmp_affinity_top_method == affinity_top_method_all) {
3526 __kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC) {
3527 if (!__kmp_hwloc_error) {
3528 success = __kmp_affinity_create_hwloc_map(&msg_id);
3529 if (!success && __kmp_affinity_verbose) {
3530 KMP_INFORM(AffIgnoringHwloc,
"KMP_AFFINITY");
3532 }
else if (__kmp_affinity_verbose) {
3533 KMP_INFORM(AffIgnoringHwloc,
"KMP_AFFINITY");
3538 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
3540 success = __kmp_affinity_create_x2apicid_map(&msg_id);
3541 if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
3542 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
3546 success = __kmp_affinity_create_apicid_map(&msg_id);
3547 if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
3548 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
3556 success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id);
3557 if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
3558 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
3563 #if KMP_GROUP_AFFINITY
3564 if (!success && (__kmp_num_proc_groups > 1)) {
3565 success = __kmp_affinity_create_proc_group_map(&msg_id);
3566 if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
3567 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
3573 success = __kmp_affinity_create_flat_map(&msg_id);
3574 if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
3575 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
3577 KMP_ASSERT(success);
3585 else if (__kmp_affinity_top_method == affinity_top_method_hwloc) {
3586 KMP_ASSERT(__kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC);
3587 success = __kmp_affinity_create_hwloc_map(&msg_id);
3589 KMP_ASSERT(msg_id != kmp_i18n_null);
3590 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
3595 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
3596 else if (__kmp_affinity_top_method == affinity_top_method_x2apicid ||
3597 __kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
3598 success = __kmp_affinity_create_x2apicid_map(&msg_id);
3600 KMP_ASSERT(msg_id != kmp_i18n_null);
3601 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
3603 }
else if (__kmp_affinity_top_method == affinity_top_method_apicid) {
3604 success = __kmp_affinity_create_apicid_map(&msg_id);
3606 KMP_ASSERT(msg_id != kmp_i18n_null);
3607 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
3612 else if (__kmp_affinity_top_method == affinity_top_method_cpuinfo) {
3614 success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id);
3616 KMP_ASSERT(msg_id != kmp_i18n_null);
3617 const char *filename = __kmp_cpuinfo_get_filename();
3619 KMP_FATAL(FileLineMsgExiting, filename, line,
3620 __kmp_i18n_catgets(msg_id));
3622 KMP_FATAL(FileMsgExiting, filename, __kmp_i18n_catgets(msg_id));
3627 #if KMP_GROUP_AFFINITY
3628 else if (__kmp_affinity_top_method == affinity_top_method_group) {
3629 success = __kmp_affinity_create_proc_group_map(&msg_id);
3630 KMP_ASSERT(success);
3632 KMP_ASSERT(msg_id != kmp_i18n_null);
3633 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
3638 else if (__kmp_affinity_top_method == affinity_top_method_flat) {
3639 success = __kmp_affinity_create_flat_map(&msg_id);
3641 KMP_ASSERT(success);
3645 if (!__kmp_topology) {
3646 if (KMP_AFFINITY_CAPABLE() &&
3647 (__kmp_affinity_verbose ||
3648 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none)))) {
3649 KMP_WARNING(ErrorInitializeAffinity);
3651 if (nPackages > 0 && nCoresPerPkg > 0 && __kmp_nThreadsPerCore > 0 &&
3653 __kmp_topology = kmp_topology_t::allocate(0, 0, NULL);
3654 __kmp_topology->canonicalize(nPackages, nCoresPerPkg,
3655 __kmp_nThreadsPerCore, __kmp_ncores);
3656 if (__kmp_affinity_verbose) {
3657 __kmp_topology->print(
"KMP_AFFINITY");
3660 __kmp_affinity_type = affinity_none;
3661 __kmp_create_affinity_none_places();
3662 #if KMP_USE_HIER_SCHED
3663 __kmp_dispatch_set_hierarchy_values();
3665 KMP_AFFINITY_DISABLE();
3671 __kmp_topology->canonicalize();
3672 if (__kmp_affinity_verbose)
3673 __kmp_topology->print(
"KMP_AFFINITY");
3674 bool filtered = __kmp_topology->filter_hw_subset();
3675 if (filtered && __kmp_affinity_verbose)
3676 __kmp_topology->print(
"KMP_HW_SUBSET");
3677 machine_hierarchy.init(__kmp_topology->get_num_hw_threads());
3678 KMP_ASSERT(__kmp_avail_proc == __kmp_topology->get_num_hw_threads());
3682 if (__kmp_affinity_type == affinity_none) {
3683 __kmp_create_affinity_none_places();
3684 #if KMP_USE_HIER_SCHED
3685 __kmp_dispatch_set_hierarchy_values();
3689 int depth = __kmp_topology->get_depth();
3694 kmp_affin_mask_t *osId2Mask = __kmp_create_masks(&maxIndex, &numUnique);
3695 if (__kmp_affinity_gran_levels == 0) {
3696 KMP_DEBUG_ASSERT((
int)numUnique == __kmp_avail_proc);
3699 switch (__kmp_affinity_type) {
3701 case affinity_explicit:
3702 KMP_DEBUG_ASSERT(__kmp_affinity_proclist != NULL);
3703 if (__kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) {
3704 __kmp_affinity_process_proclist(
3705 &__kmp_affinity_masks, &__kmp_affinity_num_masks,
3706 __kmp_affinity_proclist, osId2Mask, maxIndex);
3708 __kmp_affinity_process_placelist(
3709 &__kmp_affinity_masks, &__kmp_affinity_num_masks,
3710 __kmp_affinity_proclist, osId2Mask, maxIndex);
3712 if (__kmp_affinity_num_masks == 0) {
3713 if (__kmp_affinity_verbose ||
3714 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) {
3715 KMP_WARNING(AffNoValidProcID);
3717 __kmp_affinity_type = affinity_none;
3718 __kmp_create_affinity_none_places();
3727 case affinity_logical:
3728 __kmp_affinity_compact = 0;
3729 if (__kmp_affinity_offset) {
3730 __kmp_affinity_offset =
3731 __kmp_nThreadsPerCore * __kmp_affinity_offset % __kmp_avail_proc;
3735 case affinity_physical:
3736 if (__kmp_nThreadsPerCore > 1) {
3737 __kmp_affinity_compact = 1;
3738 if (__kmp_affinity_compact >= depth) {
3739 __kmp_affinity_compact = 0;
3742 __kmp_affinity_compact = 0;
3744 if (__kmp_affinity_offset) {
3745 __kmp_affinity_offset =
3746 __kmp_nThreadsPerCore * __kmp_affinity_offset % __kmp_avail_proc;
3750 case affinity_scatter:
3751 if (__kmp_affinity_compact >= depth) {
3752 __kmp_affinity_compact = 0;
3754 __kmp_affinity_compact = depth - 1 - __kmp_affinity_compact;
3758 case affinity_compact:
3759 if (__kmp_affinity_compact >= depth) {
3760 __kmp_affinity_compact = depth - 1;
3764 case affinity_balanced:
3766 if (__kmp_affinity_verbose || __kmp_affinity_warnings) {
3767 KMP_WARNING(AffBalancedNotAvail,
"KMP_AFFINITY");
3769 __kmp_affinity_type = affinity_none;
3770 __kmp_create_affinity_none_places();
3772 }
else if (!__kmp_topology->is_uniform()) {
3774 __kmp_aff_depth = depth;
3777 __kmp_affinity_find_core_level(__kmp_avail_proc, depth - 1);
3778 int ncores = __kmp_affinity_compute_ncores(__kmp_avail_proc, depth - 1,
3780 int maxprocpercore = __kmp_affinity_max_proc_per_core(
3781 __kmp_avail_proc, depth - 1, core_level);
3783 int nproc = ncores * maxprocpercore;
3784 if ((nproc < 2) || (nproc < __kmp_avail_proc)) {
3785 if (__kmp_affinity_verbose || __kmp_affinity_warnings) {
3786 KMP_WARNING(AffBalancedNotAvail,
"KMP_AFFINITY");
3788 __kmp_affinity_type = affinity_none;
3792 procarr = (
int *)__kmp_allocate(
sizeof(
int) * nproc);
3793 for (
int i = 0; i < nproc; i++) {
3799 for (
int i = 0; i < __kmp_avail_proc; i++) {
3800 int proc = __kmp_topology->at(i).os_id;
3801 int core = __kmp_affinity_find_core(i, depth - 1, core_level);
3803 if (core == lastcore) {
3810 procarr[core * maxprocpercore + inlastcore] = proc;
3813 if (__kmp_affinity_compact >= depth) {
3814 __kmp_affinity_compact = depth - 1;
3819 if (__kmp_affinity_dups) {
3820 __kmp_affinity_num_masks = __kmp_avail_proc;
3822 __kmp_affinity_num_masks = numUnique;
3825 if ((__kmp_nested_proc_bind.bind_types[0] != proc_bind_intel) &&
3826 (__kmp_affinity_num_places > 0) &&
3827 ((
unsigned)__kmp_affinity_num_places < __kmp_affinity_num_masks)) {
3828 __kmp_affinity_num_masks = __kmp_affinity_num_places;
3831 KMP_CPU_ALLOC_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
3835 __kmp_topology->sort_compact();
3839 int num_hw_threads = __kmp_topology->get_num_hw_threads();
3840 for (i = 0, j = 0; i < num_hw_threads; i++) {
3841 if ((!__kmp_affinity_dups) && (!__kmp_topology->at(i).leader)) {
3844 int osId = __kmp_topology->at(i).os_id;
3846 kmp_affin_mask_t *src = KMP_CPU_INDEX(osId2Mask, osId);
3847 kmp_affin_mask_t *dest = KMP_CPU_INDEX(__kmp_affinity_masks, j);
3848 KMP_ASSERT(KMP_CPU_ISSET(osId, src));
3849 KMP_CPU_COPY(dest, src);
3850 if (++j >= __kmp_affinity_num_masks) {
3854 KMP_DEBUG_ASSERT(j == __kmp_affinity_num_masks);
3857 __kmp_topology->sort_ids();
3861 KMP_ASSERT2(0,
"Unexpected affinity setting");
3864 KMP_CPU_FREE_ARRAY(osId2Mask, maxIndex + 1);
3867 void __kmp_affinity_initialize(
void) {
3876 int disabled = (__kmp_affinity_type == affinity_disabled);
3877 if (!KMP_AFFINITY_CAPABLE()) {
3878 KMP_ASSERT(disabled);
3881 __kmp_affinity_type = affinity_none;
3883 __kmp_aux_affinity_initialize();
3885 __kmp_affinity_type = affinity_disabled;
3889 void __kmp_affinity_uninitialize(
void) {
3890 if (__kmp_affinity_masks != NULL) {
3891 KMP_CPU_FREE_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
3892 __kmp_affinity_masks = NULL;
3894 if (__kmp_affin_fullMask != NULL) {
3895 KMP_CPU_FREE(__kmp_affin_fullMask);
3896 __kmp_affin_fullMask = NULL;
3898 __kmp_affinity_num_masks = 0;
3899 __kmp_affinity_type = affinity_default;
3900 __kmp_affinity_num_places = 0;
3901 if (__kmp_affinity_proclist != NULL) {
3902 __kmp_free(__kmp_affinity_proclist);
3903 __kmp_affinity_proclist = NULL;
3905 if (procarr != NULL) {
3906 __kmp_free(procarr);
3910 if (__kmp_hwloc_topology != NULL) {
3911 hwloc_topology_destroy(__kmp_hwloc_topology);
3912 __kmp_hwloc_topology = NULL;
3915 if (__kmp_hw_subset) {
3916 kmp_hw_subset_t::deallocate(__kmp_hw_subset);
3917 __kmp_hw_subset =
nullptr;
3919 if (__kmp_topology) {
3920 kmp_topology_t::deallocate(__kmp_topology);
3921 __kmp_topology =
nullptr;
3923 KMPAffinity::destroy_api();
3926 void __kmp_affinity_set_init_mask(
int gtid,
int isa_root) {
3927 if (!KMP_AFFINITY_CAPABLE()) {
3931 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
3932 if (th->th.th_affin_mask == NULL) {
3933 KMP_CPU_ALLOC(th->th.th_affin_mask);
3935 KMP_CPU_ZERO(th->th.th_affin_mask);
3942 kmp_affin_mask_t *mask;
3945 if (KMP_AFFINITY_NON_PROC_BIND) {
3946 if ((__kmp_affinity_type == affinity_none) ||
3947 (__kmp_affinity_type == affinity_balanced) ||
3948 KMP_HIDDEN_HELPER_THREAD(gtid)) {
3949 #if KMP_GROUP_AFFINITY
3950 if (__kmp_num_proc_groups > 1) {
3954 KMP_ASSERT(__kmp_affin_fullMask != NULL);
3956 mask = __kmp_affin_fullMask;
3958 int mask_idx = __kmp_adjust_gtid_for_hidden_helpers(gtid);
3959 KMP_DEBUG_ASSERT(__kmp_affinity_num_masks > 0);
3960 i = (mask_idx + __kmp_affinity_offset) % __kmp_affinity_num_masks;
3961 mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
3964 if ((!isa_root) || KMP_HIDDEN_HELPER_THREAD(gtid) ||
3965 (__kmp_nested_proc_bind.bind_types[0] == proc_bind_false)) {
3966 #if KMP_GROUP_AFFINITY
3967 if (__kmp_num_proc_groups > 1) {
3971 KMP_ASSERT(__kmp_affin_fullMask != NULL);
3973 mask = __kmp_affin_fullMask;
3977 int mask_idx = __kmp_adjust_gtid_for_hidden_helpers(gtid);
3978 KMP_DEBUG_ASSERT(__kmp_affinity_num_masks > 0);
3979 i = (mask_idx + __kmp_affinity_offset) % __kmp_affinity_num_masks;
3980 mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
3984 th->th.th_current_place = i;
3985 if (isa_root || KMP_HIDDEN_HELPER_THREAD(gtid)) {
3986 th->th.th_new_place = i;
3987 th->th.th_first_place = 0;
3988 th->th.th_last_place = __kmp_affinity_num_masks - 1;
3989 }
else if (KMP_AFFINITY_NON_PROC_BIND) {
3992 th->th.th_first_place = 0;
3993 th->th.th_last_place = __kmp_affinity_num_masks - 1;
3996 if (i == KMP_PLACE_ALL) {
3997 KA_TRACE(100, (
"__kmp_affinity_set_init_mask: binding T#%d to all places\n",
4000 KA_TRACE(100, (
"__kmp_affinity_set_init_mask: binding T#%d to place %d\n",
4004 KMP_CPU_COPY(th->th.th_affin_mask, mask);
4006 if (__kmp_affinity_verbose && !KMP_HIDDEN_HELPER_THREAD(gtid)
4008 && (__kmp_affinity_type == affinity_none ||
4009 (i != KMP_PLACE_ALL && __kmp_affinity_type != affinity_balanced))) {
4010 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4011 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4012 th->th.th_affin_mask);
4013 KMP_INFORM(BoundToOSProcSet,
"KMP_AFFINITY", (kmp_int32)getpid(),
4014 __kmp_gettid(), gtid, buf);
4019 if (__kmp_affinity_verbose && KMP_HIDDEN_HELPER_THREAD(gtid)) {
4020 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4021 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4022 th->th.th_affin_mask);
4023 KMP_INFORM(BoundToOSProcSet,
"KMP_AFFINITY (hidden helper thread)",
4024 (kmp_int32)getpid(), __kmp_gettid(), gtid, buf);
4032 if (__kmp_affinity_type == affinity_none) {
4033 __kmp_set_system_affinity(th->th.th_affin_mask, FALSE);
4036 __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4039 void __kmp_affinity_set_place(
int gtid) {
4040 if (!KMP_AFFINITY_CAPABLE()) {
4044 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4046 KA_TRACE(100, (
"__kmp_affinity_set_place: binding T#%d to place %d (current "
4048 gtid, th->th.th_new_place, th->th.th_current_place));
4051 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4052 KMP_ASSERT(th->th.th_new_place >= 0);
4053 KMP_ASSERT((
unsigned)th->th.th_new_place <= __kmp_affinity_num_masks);
4054 if (th->th.th_first_place <= th->th.th_last_place) {
4055 KMP_ASSERT((th->th.th_new_place >= th->th.th_first_place) &&
4056 (th->th.th_new_place <= th->th.th_last_place));
4058 KMP_ASSERT((th->th.th_new_place <= th->th.th_first_place) ||
4059 (th->th.th_new_place >= th->th.th_last_place));
4064 kmp_affin_mask_t *mask =
4065 KMP_CPU_INDEX(__kmp_affinity_masks, th->th.th_new_place);
4066 KMP_CPU_COPY(th->th.th_affin_mask, mask);
4067 th->th.th_current_place = th->th.th_new_place;
4069 if (__kmp_affinity_verbose) {
4070 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4071 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4072 th->th.th_affin_mask);
4073 KMP_INFORM(BoundToOSProcSet,
"OMP_PROC_BIND", (kmp_int32)getpid(),
4074 __kmp_gettid(), gtid, buf);
4076 __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4079 int __kmp_aux_set_affinity(
void **mask) {
4084 if (!KMP_AFFINITY_CAPABLE()) {
4088 gtid = __kmp_entry_gtid();
4091 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4092 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4093 (kmp_affin_mask_t *)(*mask));
4095 "kmp_set_affinity: setting affinity mask for thread %d = %s\n",
4099 if (__kmp_env_consistency_check) {
4100 if ((mask == NULL) || (*mask == NULL)) {
4101 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4106 KMP_CPU_SET_ITERATE(proc, ((kmp_affin_mask_t *)(*mask))) {
4107 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4108 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4110 if (!KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask))) {
4115 if (num_procs == 0) {
4116 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4119 #if KMP_GROUP_AFFINITY
4120 if (__kmp_get_proc_group((kmp_affin_mask_t *)(*mask)) < 0) {
4121 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4127 th = __kmp_threads[gtid];
4128 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4129 retval = __kmp_set_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4131 KMP_CPU_COPY(th->th.th_affin_mask, (kmp_affin_mask_t *)(*mask));
4134 th->th.th_current_place = KMP_PLACE_UNDEFINED;
4135 th->th.th_new_place = KMP_PLACE_UNDEFINED;
4136 th->th.th_first_place = 0;
4137 th->th.th_last_place = __kmp_affinity_num_masks - 1;
4140 th->th.th_current_task->td_icvs.proc_bind = proc_bind_false;
4145 int __kmp_aux_get_affinity(
void **mask) {
4150 if (!KMP_AFFINITY_CAPABLE()) {
4154 gtid = __kmp_entry_gtid();
4155 th = __kmp_threads[gtid];
4156 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4160 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4161 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4162 th->th.th_affin_mask);
4164 "kmp_get_affinity: stored affinity mask for thread %d = %s\n", gtid,
4168 if (__kmp_env_consistency_check) {
4169 if ((mask == NULL) || (*mask == NULL)) {
4170 KMP_FATAL(AffinityInvalidMask,
"kmp_get_affinity");
4176 retval = __kmp_get_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4179 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4180 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4181 (kmp_affin_mask_t *)(*mask));
4183 "kmp_get_affinity: system affinity mask for thread %d = %s\n", gtid,
4191 KMP_CPU_COPY((kmp_affin_mask_t *)(*mask), th->th.th_affin_mask);
4197 int __kmp_aux_get_affinity_max_proc() {
4198 if (!KMP_AFFINITY_CAPABLE()) {
4201 #if KMP_GROUP_AFFINITY
4202 if (__kmp_num_proc_groups > 1) {
4203 return (
int)(__kmp_num_proc_groups *
sizeof(DWORD_PTR) * CHAR_BIT);
4209 int __kmp_aux_set_affinity_mask_proc(
int proc,
void **mask) {
4210 if (!KMP_AFFINITY_CAPABLE()) {
4216 int gtid = __kmp_entry_gtid();
4217 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4218 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4219 (kmp_affin_mask_t *)(*mask));
4220 __kmp_debug_printf(
"kmp_set_affinity_mask_proc: setting proc %d in "
4221 "affinity mask for thread %d = %s\n",
4225 if (__kmp_env_consistency_check) {
4226 if ((mask == NULL) || (*mask == NULL)) {
4227 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity_mask_proc");
4231 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
4234 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4238 KMP_CPU_SET(proc, (kmp_affin_mask_t *)(*mask));
4242 int __kmp_aux_unset_affinity_mask_proc(
int proc,
void **mask) {
4243 if (!KMP_AFFINITY_CAPABLE()) {
4249 int gtid = __kmp_entry_gtid();
4250 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4251 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4252 (kmp_affin_mask_t *)(*mask));
4253 __kmp_debug_printf(
"kmp_unset_affinity_mask_proc: unsetting proc %d in "
4254 "affinity mask for thread %d = %s\n",
4258 if (__kmp_env_consistency_check) {
4259 if ((mask == NULL) || (*mask == NULL)) {
4260 KMP_FATAL(AffinityInvalidMask,
"kmp_unset_affinity_mask_proc");
4264 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
4267 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4271 KMP_CPU_CLR(proc, (kmp_affin_mask_t *)(*mask));
4275 int __kmp_aux_get_affinity_mask_proc(
int proc,
void **mask) {
4276 if (!KMP_AFFINITY_CAPABLE()) {
4282 int gtid = __kmp_entry_gtid();
4283 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4284 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4285 (kmp_affin_mask_t *)(*mask));
4286 __kmp_debug_printf(
"kmp_get_affinity_mask_proc: getting proc %d in "
4287 "affinity mask for thread %d = %s\n",
4291 if (__kmp_env_consistency_check) {
4292 if ((mask == NULL) || (*mask == NULL)) {
4293 KMP_FATAL(AffinityInvalidMask,
"kmp_get_affinity_mask_proc");
4297 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
4300 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4304 return KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask));
4308 void __kmp_balanced_affinity(kmp_info_t *th,
int nthreads) {
4309 KMP_DEBUG_ASSERT(th);
4310 bool fine_gran =
true;
4311 int tid = th->th.th_info.ds.ds_tid;
4314 if (KMP_HIDDEN_HELPER_THREAD(__kmp_gtid_from_thread(th)))
4317 switch (__kmp_affinity_gran) {
4321 if (__kmp_nThreadsPerCore > 1) {
4326 if (nCoresPerPkg > 1) {
4334 if (__kmp_topology->is_uniform()) {
4338 int __kmp_nth_per_core = __kmp_avail_proc / __kmp_ncores;
4340 int ncores = __kmp_ncores;
4341 if ((nPackages > 1) && (__kmp_nth_per_core <= 1)) {
4342 __kmp_nth_per_core = __kmp_avail_proc / nPackages;
4346 int chunk = nthreads / ncores;
4348 int big_cores = nthreads % ncores;
4350 int big_nth = (chunk + 1) * big_cores;
4351 if (tid < big_nth) {
4352 coreID = tid / (chunk + 1);
4353 threadID = (tid % (chunk + 1)) % __kmp_nth_per_core;
4355 coreID = (tid - big_cores) / chunk;
4356 threadID = ((tid - big_cores) % chunk) % __kmp_nth_per_core;
4358 KMP_DEBUG_ASSERT2(KMP_AFFINITY_CAPABLE(),
4359 "Illegal set affinity operation when not capable");
4361 kmp_affin_mask_t *mask = th->th.th_affin_mask;
4366 __kmp_topology->at(coreID * __kmp_nth_per_core + threadID).os_id;
4367 KMP_CPU_SET(osID, mask);
4369 for (
int i = 0; i < __kmp_nth_per_core; i++) {
4371 osID = __kmp_topology->at(coreID * __kmp_nth_per_core + i).os_id;
4372 KMP_CPU_SET(osID, mask);
4375 if (__kmp_affinity_verbose) {
4376 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4377 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
4378 KMP_INFORM(BoundToOSProcSet,
"KMP_AFFINITY", (kmp_int32)getpid(),
4379 __kmp_gettid(), tid, buf);
4381 __kmp_set_system_affinity(mask, TRUE);
4384 kmp_affin_mask_t *mask = th->th.th_affin_mask;
4388 __kmp_affinity_find_core_level(__kmp_avail_proc, __kmp_aff_depth - 1);
4389 int ncores = __kmp_affinity_compute_ncores(__kmp_avail_proc,
4390 __kmp_aff_depth - 1, core_level);
4391 int nth_per_core = __kmp_affinity_max_proc_per_core(
4392 __kmp_avail_proc, __kmp_aff_depth - 1, core_level);
4396 if (nthreads == __kmp_avail_proc) {
4398 int osID = __kmp_topology->at(tid).os_id;
4399 KMP_CPU_SET(osID, mask);
4402 __kmp_affinity_find_core(tid, __kmp_aff_depth - 1, core_level);
4403 for (
int i = 0; i < __kmp_avail_proc; i++) {
4404 int osID = __kmp_topology->at(i).os_id;
4405 if (__kmp_affinity_find_core(i, __kmp_aff_depth - 1, core_level) ==
4407 KMP_CPU_SET(osID, mask);
4411 }
else if (nthreads <= ncores) {
4414 for (
int i = 0; i < ncores; i++) {
4417 for (
int j = 0; j < nth_per_core; j++) {
4418 if (procarr[i * nth_per_core + j] != -1) {
4425 for (
int j = 0; j < nth_per_core; j++) {
4426 int osID = procarr[i * nth_per_core + j];
4428 KMP_CPU_SET(osID, mask);
4444 int *nproc_at_core = (
int *)KMP_ALLOCA(
sizeof(
int) * ncores);
4446 int *ncores_with_x_procs =
4447 (
int *)KMP_ALLOCA(
sizeof(
int) * (nth_per_core + 1));
4449 int *ncores_with_x_to_max_procs =
4450 (
int *)KMP_ALLOCA(
sizeof(
int) * (nth_per_core + 1));
4452 for (
int i = 0; i <= nth_per_core; i++) {
4453 ncores_with_x_procs[i] = 0;
4454 ncores_with_x_to_max_procs[i] = 0;
4457 for (
int i = 0; i < ncores; i++) {
4459 for (
int j = 0; j < nth_per_core; j++) {
4460 if (procarr[i * nth_per_core + j] != -1) {
4464 nproc_at_core[i] = cnt;
4465 ncores_with_x_procs[cnt]++;
4468 for (
int i = 0; i <= nth_per_core; i++) {
4469 for (
int j = i; j <= nth_per_core; j++) {
4470 ncores_with_x_to_max_procs[i] += ncores_with_x_procs[j];
4475 int nproc = nth_per_core * ncores;
4477 int *newarr = (
int *)__kmp_allocate(
sizeof(
int) * nproc);
4478 for (
int i = 0; i < nproc; i++) {
4485 for (
int j = 1; j <= nth_per_core; j++) {
4486 int cnt = ncores_with_x_to_max_procs[j];
4487 for (
int i = 0; i < ncores; i++) {
4489 if (nproc_at_core[i] == 0) {
4492 for (
int k = 0; k < nth_per_core; k++) {
4493 if (procarr[i * nth_per_core + k] != -1) {
4494 if (newarr[i * nth_per_core + k] == 0) {
4495 newarr[i * nth_per_core + k] = 1;
4501 newarr[i * nth_per_core + k]++;
4509 if (cnt == 0 || nth == 0) {
4520 for (
int i = 0; i < nproc; i++) {
4524 int osID = procarr[i];
4525 KMP_CPU_SET(osID, mask);
4527 int coreID = i / nth_per_core;
4528 for (
int ii = 0; ii < nth_per_core; ii++) {
4529 int osID = procarr[coreID * nth_per_core + ii];
4531 KMP_CPU_SET(osID, mask);
4541 if (__kmp_affinity_verbose) {
4542 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4543 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
4544 KMP_INFORM(BoundToOSProcSet,
"KMP_AFFINITY", (kmp_int32)getpid(),
4545 __kmp_gettid(), tid, buf);
4547 __kmp_set_system_affinity(mask, TRUE);
4551 #if KMP_OS_LINUX || KMP_OS_FREEBSD
4565 kmp_set_thread_affinity_mask_initial()
4570 int gtid = __kmp_get_gtid();
4573 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: "
4574 "non-omp thread, returning\n"));
4577 if (!KMP_AFFINITY_CAPABLE() || !__kmp_init_middle) {
4578 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: "
4579 "affinity not initialized, returning\n"));
4582 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: "
4583 "set full mask for thread %d\n",
4585 KMP_DEBUG_ASSERT(__kmp_affin_fullMask != NULL);
4586 return __kmp_set_system_affinity(__kmp_affin_fullMask, FALSE);