14#include "kmp_affinity.h"
18#include "kmp_wrapper_getpid.h"
20#include "kmp_dispatch_hier.h"
24#define HWLOC_GROUP_KIND_INTEL_MODULE 102
25#define HWLOC_GROUP_KIND_INTEL_TILE 103
26#define HWLOC_GROUP_KIND_INTEL_DIE 104
27#define HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP 220
32kmp_topology_t *__kmp_topology =
nullptr;
34kmp_hw_subset_t *__kmp_hw_subset =
nullptr;
37static hierarchy_info machine_hierarchy;
39void __kmp_cleanup_hierarchy() { machine_hierarchy.fini(); }
41#if KMP_AFFINITY_SUPPORTED
43class kmp_full_mask_modifier_t {
44 kmp_affin_mask_t *mask;
47 kmp_full_mask_modifier_t() {
51 ~kmp_full_mask_modifier_t() {
55 void include(
const kmp_affin_mask_t *other) { KMP_CPU_UNION(mask, other); }
58 bool restrict_to_mask() {
60 if (KMP_CPU_EQUAL(__kmp_affin_fullMask, mask) || KMP_CPU_ISEMPTY(mask))
62 return __kmp_topology->restrict_to_mask(mask);
66static inline const char *
67__kmp_get_affinity_env_var(
const kmp_affinity_t &affinity,
68 bool for_binding =
false) {
69 if (affinity.flags.omp_places) {
71 return "OMP_PROC_BIND";
74 return affinity.env_var;
78void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar) {
82 if (TCR_1(machine_hierarchy.uninitialized))
83 machine_hierarchy.init(nproc);
86 if (nproc > machine_hierarchy.base_num_threads)
87 machine_hierarchy.resize(nproc);
89 depth = machine_hierarchy.depth;
90 KMP_DEBUG_ASSERT(depth > 0);
92 thr_bar->depth = depth;
93 __kmp_type_convert(machine_hierarchy.numPerLevel[0] - 1,
94 &(thr_bar->base_leaf_kids));
95 thr_bar->skip_per_level = machine_hierarchy.skipPerLevel;
98static int nCoresPerPkg, nPackages;
99static int __kmp_nThreadsPerCore;
100#ifndef KMP_DFLT_NTH_CORES
101static int __kmp_ncores;
104const char *__kmp_hw_get_catalog_string(kmp_hw_t type,
bool plural) {
107 return ((plural) ? KMP_I18N_STR(Sockets) : KMP_I18N_STR(Socket));
109 return ((plural) ? KMP_I18N_STR(Dice) : KMP_I18N_STR(Die));
111 return ((plural) ? KMP_I18N_STR(Modules) : KMP_I18N_STR(Module));
113 return ((plural) ? KMP_I18N_STR(Tiles) : KMP_I18N_STR(Tile));
115 return ((plural) ? KMP_I18N_STR(NumaDomains) : KMP_I18N_STR(NumaDomain));
117 return ((plural) ? KMP_I18N_STR(L3Caches) : KMP_I18N_STR(L3Cache));
119 return ((plural) ? KMP_I18N_STR(L2Caches) : KMP_I18N_STR(L2Cache));
121 return ((plural) ? KMP_I18N_STR(L1Caches) : KMP_I18N_STR(L1Cache));
123 return ((plural) ? KMP_I18N_STR(LLCaches) : KMP_I18N_STR(LLCache));
125 return ((plural) ? KMP_I18N_STR(Cores) : KMP_I18N_STR(Core));
127 return ((plural) ? KMP_I18N_STR(Threads) : KMP_I18N_STR(Thread));
128 case KMP_HW_PROC_GROUP:
129 return ((plural) ? KMP_I18N_STR(ProcGroups) : KMP_I18N_STR(ProcGroup));
132 return KMP_I18N_STR(Unknown);
134 KMP_ASSERT2(
false,
"Unhandled kmp_hw_t enumeration");
135 KMP_BUILTIN_UNREACHABLE;
138const char *__kmp_hw_get_keyword(kmp_hw_t type,
bool plural) {
141 return ((plural) ?
"sockets" :
"socket");
143 return ((plural) ?
"dice" :
"die");
145 return ((plural) ?
"modules" :
"module");
147 return ((plural) ?
"tiles" :
"tile");
149 return ((plural) ?
"numa_domains" :
"numa_domain");
151 return ((plural) ?
"l3_caches" :
"l3_cache");
153 return ((plural) ?
"l2_caches" :
"l2_cache");
155 return ((plural) ?
"l1_caches" :
"l1_cache");
157 return ((plural) ?
"ll_caches" :
"ll_cache");
159 return ((plural) ?
"cores" :
"core");
161 return ((plural) ?
"threads" :
"thread");
162 case KMP_HW_PROC_GROUP:
163 return ((plural) ?
"proc_groups" :
"proc_group");
166 return ((plural) ?
"unknowns" :
"unknown");
168 KMP_ASSERT2(
false,
"Unhandled kmp_hw_t enumeration");
169 KMP_BUILTIN_UNREACHABLE;
172const char *__kmp_hw_get_core_type_string(kmp_hw_core_type_t type) {
174 case KMP_HW_CORE_TYPE_UNKNOWN:
175 case KMP_HW_MAX_NUM_CORE_TYPES:
177#if KMP_ARCH_X86 || KMP_ARCH_X86_64
178 case KMP_HW_CORE_TYPE_ATOM:
179 return "Intel Atom(R) processor";
180 case KMP_HW_CORE_TYPE_CORE:
181 return "Intel(R) Core(TM) processor";
184 KMP_ASSERT2(
false,
"Unhandled kmp_hw_core_type_t enumeration");
185 KMP_BUILTIN_UNREACHABLE;
188#if KMP_AFFINITY_SUPPORTED
191#define KMP_AFF_WARNING(s, ...) \
192 if (s.flags.verbose || (s.flags.warnings && (s.type != affinity_none))) { \
193 KMP_WARNING(__VA_ARGS__); \
196#define KMP_AFF_WARNING(s, ...) KMP_WARNING(__VA_ARGS__)
201int kmp_hw_thread_t::compare_ids(
const void *a,
const void *b) {
202 const kmp_hw_thread_t *ahwthread = (
const kmp_hw_thread_t *)a;
203 const kmp_hw_thread_t *bhwthread = (
const kmp_hw_thread_t *)b;
204 int depth = __kmp_topology->get_depth();
205 for (
int level = 0; level < depth; ++level) {
206 if (ahwthread->ids[level] < bhwthread->ids[level])
208 else if (ahwthread->ids[level] > bhwthread->ids[level])
211 if (ahwthread->os_id < bhwthread->os_id)
213 else if (ahwthread->os_id > bhwthread->os_id)
218#if KMP_AFFINITY_SUPPORTED
219int kmp_hw_thread_t::compare_compact(
const void *a,
const void *b) {
221 const kmp_hw_thread_t *aa = (
const kmp_hw_thread_t *)a;
222 const kmp_hw_thread_t *bb = (
const kmp_hw_thread_t *)b;
223 int depth = __kmp_topology->get_depth();
224 int compact = __kmp_topology->compact;
225 KMP_DEBUG_ASSERT(compact >= 0);
226 KMP_DEBUG_ASSERT(compact <= depth);
227 for (i = 0; i < compact; i++) {
228 int j = depth - i - 1;
229 if (aa->sub_ids[j] < bb->sub_ids[j])
231 if (aa->sub_ids[j] > bb->sub_ids[j])
234 for (; i < depth; i++) {
236 if (aa->sub_ids[j] < bb->sub_ids[j])
238 if (aa->sub_ids[j] > bb->sub_ids[j])
245void kmp_hw_thread_t::print()
const {
246 int depth = __kmp_topology->get_depth();
247 printf(
"%4d ", os_id);
248 for (
int i = 0; i < depth; ++i) {
249 printf(
"%4d ", ids[i]);
252 if (attrs.is_core_type_valid())
253 printf(
" (%s)", __kmp_hw_get_core_type_string(attrs.get_core_type()));
254 if (attrs.is_core_eff_valid())
255 printf(
" (eff=%d)", attrs.get_core_eff());
267void kmp_topology_t::_insert_layer(kmp_hw_t type,
const int *ids) {
271 int previous_id = kmp_hw_thread_t::UNKNOWN_ID;
272 int previous_new_id = kmp_hw_thread_t::UNKNOWN_ID;
276 for (target_layer = 0; target_layer < depth; ++target_layer) {
277 bool layers_equal =
true;
278 bool strictly_above_target_layer =
false;
279 for (
int i = 0; i < num_hw_threads; ++i) {
280 int id = hw_threads[i].ids[target_layer];
282 if (
id != previous_id && new_id == previous_new_id) {
284 strictly_above_target_layer =
true;
285 layers_equal =
false;
287 }
else if (
id == previous_id && new_id != previous_new_id) {
289 layers_equal =
false;
293 previous_new_id = new_id;
295 if (strictly_above_target_layer || layers_equal)
301 for (
int i = depth - 1, j = depth; i >= target_layer; --i, --j)
303 types[target_layer] = type;
304 for (
int k = 0; k < num_hw_threads; ++k) {
305 for (
int i = depth - 1, j = depth; i >= target_layer; --i, --j)
306 hw_threads[k].ids[j] = hw_threads[k].ids[i];
307 hw_threads[k].ids[target_layer] = ids[k];
309 equivalent[type] = type;
313#if KMP_GROUP_AFFINITY
315void kmp_topology_t::_insert_windows_proc_groups() {
317 if (__kmp_num_proc_groups == 1)
319 kmp_affin_mask_t *mask;
320 int *ids = (
int *)__kmp_allocate(
sizeof(
int) * num_hw_threads);
322 for (
int i = 0; i < num_hw_threads; ++i) {
324 KMP_CPU_SET(hw_threads[i].os_id, mask);
325 ids[i] = __kmp_get_proc_group(mask);
328 _insert_layer(KMP_HW_PROC_GROUP, ids);
332 __kmp_topology->sort_ids();
338void kmp_topology_t::_remove_radix1_layers() {
339 int preference[KMP_HW_LAST];
340 int top_index1, top_index2;
342 preference[KMP_HW_SOCKET] = 110;
343 preference[KMP_HW_PROC_GROUP] = 100;
344 preference[KMP_HW_CORE] = 95;
345 preference[KMP_HW_THREAD] = 90;
346 preference[KMP_HW_NUMA] = 85;
347 preference[KMP_HW_DIE] = 80;
348 preference[KMP_HW_TILE] = 75;
349 preference[KMP_HW_MODULE] = 73;
350 preference[KMP_HW_L3] = 70;
351 preference[KMP_HW_L2] = 65;
352 preference[KMP_HW_L1] = 60;
353 preference[KMP_HW_LLC] = 5;
356 while (top_index1 < depth - 1 && top_index2 < depth) {
357 kmp_hw_t type1 = types[top_index1];
358 kmp_hw_t type2 = types[top_index2];
359 KMP_ASSERT_VALID_HW_TYPE(type1);
360 KMP_ASSERT_VALID_HW_TYPE(type2);
363 if ((type1 == KMP_HW_THREAD || type1 == KMP_HW_CORE ||
364 type1 == KMP_HW_SOCKET) &&
365 (type2 == KMP_HW_THREAD || type2 == KMP_HW_CORE ||
366 type2 == KMP_HW_SOCKET)) {
367 top_index1 = top_index2++;
371 bool all_same =
true;
372 int id1 = hw_threads[0].ids[top_index1];
373 int id2 = hw_threads[0].ids[top_index2];
374 int pref1 = preference[type1];
375 int pref2 = preference[type2];
376 for (
int hwidx = 1; hwidx < num_hw_threads; ++hwidx) {
377 if (hw_threads[hwidx].ids[top_index1] == id1 &&
378 hw_threads[hwidx].ids[top_index2] != id2) {
382 if (hw_threads[hwidx].ids[top_index2] != id2)
384 id1 = hw_threads[hwidx].ids[top_index1];
385 id2 = hw_threads[hwidx].ids[top_index2];
389 kmp_hw_t remove_type, keep_type;
390 int remove_layer, remove_layer_ids;
393 remove_layer = remove_layer_ids = top_index2;
397 remove_layer = remove_layer_ids = top_index1;
403 remove_layer_ids = top_index2;
406 set_equivalent_type(remove_type, keep_type);
407 for (
int idx = 0; idx < num_hw_threads; ++idx) {
408 kmp_hw_thread_t &hw_thread = hw_threads[idx];
409 for (
int d = remove_layer_ids; d < depth - 1; ++d)
410 hw_thread.ids[d] = hw_thread.ids[d + 1];
412 for (
int idx = remove_layer; idx < depth - 1; ++idx)
413 types[idx] = types[idx + 1];
416 top_index1 = top_index2++;
419 KMP_ASSERT(depth > 0);
422void kmp_topology_t::_set_last_level_cache() {
423 if (get_equivalent_type(KMP_HW_L3) != KMP_HW_UNKNOWN)
424 set_equivalent_type(KMP_HW_LLC, KMP_HW_L3);
425 else if (get_equivalent_type(KMP_HW_L2) != KMP_HW_UNKNOWN)
426 set_equivalent_type(KMP_HW_LLC, KMP_HW_L2);
428 else if (__kmp_mic_type == mic3) {
429 if (get_equivalent_type(KMP_HW_L2) != KMP_HW_UNKNOWN)
430 set_equivalent_type(KMP_HW_LLC, KMP_HW_L2);
431 else if (get_equivalent_type(KMP_HW_TILE) != KMP_HW_UNKNOWN)
432 set_equivalent_type(KMP_HW_LLC, KMP_HW_TILE);
435 set_equivalent_type(KMP_HW_LLC, KMP_HW_L1);
438 else if (get_equivalent_type(KMP_HW_L1) != KMP_HW_UNKNOWN)
439 set_equivalent_type(KMP_HW_LLC, KMP_HW_L1);
441 if (get_equivalent_type(KMP_HW_LLC) == KMP_HW_UNKNOWN) {
442 if (get_equivalent_type(KMP_HW_SOCKET) != KMP_HW_UNKNOWN)
443 set_equivalent_type(KMP_HW_LLC, KMP_HW_SOCKET);
444 else if (get_equivalent_type(KMP_HW_CORE) != KMP_HW_UNKNOWN)
445 set_equivalent_type(KMP_HW_LLC, KMP_HW_CORE);
447 KMP_ASSERT(get_equivalent_type(KMP_HW_LLC) != KMP_HW_UNKNOWN);
451void kmp_topology_t::_gather_enumeration_information() {
452 int previous_id[KMP_HW_LAST];
453 int max[KMP_HW_LAST];
455 for (
int i = 0; i < depth; ++i) {
456 previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID;
461 int core_level = get_level(KMP_HW_CORE);
462 for (
int i = 0; i < num_hw_threads; ++i) {
463 kmp_hw_thread_t &hw_thread = hw_threads[i];
464 for (
int layer = 0; layer < depth; ++layer) {
465 int id = hw_thread.ids[layer];
466 if (
id != previous_id[layer]) {
468 for (
int l = layer; l < depth; ++l)
472 for (
int l = layer + 1; l < depth; ++l) {
473 if (max[l] > ratio[l])
479 if (__kmp_is_hybrid_cpu() && core_level >= 0 && layer <= core_level) {
480 if (hw_thread.attrs.is_core_eff_valid() &&
481 hw_thread.attrs.core_eff >= num_core_efficiencies) {
484 num_core_efficiencies = hw_thread.attrs.core_eff + 1;
486 if (hw_thread.attrs.is_core_type_valid()) {
488 for (
int j = 0; j < num_core_types; ++j) {
489 if (hw_thread.attrs.get_core_type() == core_types[j]) {
495 KMP_ASSERT(num_core_types < KMP_HW_MAX_NUM_CORE_TYPES);
496 core_types[num_core_types++] = hw_thread.attrs.get_core_type();
503 for (
int layer = 0; layer < depth; ++layer) {
504 previous_id[layer] = hw_thread.ids[layer];
507 for (
int layer = 0; layer < depth; ++layer) {
508 if (max[layer] > ratio[layer])
509 ratio[layer] = max[layer];
513int kmp_topology_t::_get_ncores_with_attr(
const kmp_hw_attr_t &attr,
515 bool find_all)
const {
516 int current, current_max;
517 int previous_id[KMP_HW_LAST];
518 for (
int i = 0; i < depth; ++i)
519 previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID;
520 int core_level = get_level(KMP_HW_CORE);
523 KMP_ASSERT(above_level < core_level);
526 for (
int i = 0; i < num_hw_threads; ++i) {
527 kmp_hw_thread_t &hw_thread = hw_threads[i];
528 if (!find_all && hw_thread.ids[above_level] != previous_id[above_level]) {
529 if (current > current_max)
530 current_max = current;
531 current = hw_thread.attrs.contains(attr);
533 for (
int level = above_level + 1; level <= core_level; ++level) {
534 if (hw_thread.ids[level] != previous_id[level]) {
535 if (hw_thread.attrs.contains(attr))
541 for (
int level = 0; level < depth; ++level)
542 previous_id[level] = hw_thread.ids[level];
544 if (current > current_max)
545 current_max = current;
550void kmp_topology_t::_discover_uniformity() {
552 for (
int level = 0; level < depth; ++level)
554 flags.uniform = (num == count[depth - 1]);
558void kmp_topology_t::_set_sub_ids() {
559 int previous_id[KMP_HW_LAST];
560 int sub_id[KMP_HW_LAST];
562 for (
int i = 0; i < depth; ++i) {
566 for (
int i = 0; i < num_hw_threads; ++i) {
567 kmp_hw_thread_t &hw_thread = hw_threads[i];
569 for (
int j = 0; j < depth; ++j) {
570 if (hw_thread.ids[j] != previous_id[j]) {
572 for (
int k = j + 1; k < depth; ++k) {
579 for (
int j = 0; j < depth; ++j) {
580 previous_id[j] = hw_thread.ids[j];
583 for (
int j = 0; j < depth; ++j) {
584 hw_thread.sub_ids[j] = sub_id[j];
589void kmp_topology_t::_set_globals() {
591 int core_level, thread_level, package_level;
592 package_level = get_level(KMP_HW_SOCKET);
593#if KMP_GROUP_AFFINITY
594 if (package_level == -1)
595 package_level = get_level(KMP_HW_PROC_GROUP);
597 core_level = get_level(KMP_HW_CORE);
598 thread_level = get_level(KMP_HW_THREAD);
600 KMP_ASSERT(core_level != -1);
601 KMP_ASSERT(thread_level != -1);
603 __kmp_nThreadsPerCore = calculate_ratio(thread_level, core_level);
604 if (package_level != -1) {
605 nCoresPerPkg = calculate_ratio(core_level, package_level);
606 nPackages = get_count(package_level);
609 nCoresPerPkg = get_count(core_level);
612#ifndef KMP_DFLT_NTH_CORES
613 __kmp_ncores = get_count(core_level);
617kmp_topology_t *kmp_topology_t::allocate(
int nproc,
int ndepth,
618 const kmp_hw_t *types) {
619 kmp_topology_t *retval;
621 size_t size =
sizeof(kmp_topology_t) +
sizeof(kmp_hw_thread_t) * nproc +
622 sizeof(int) * (
size_t)KMP_HW_LAST * 3;
623 char *bytes = (
char *)__kmp_allocate(size);
624 retval = (kmp_topology_t *)bytes;
626 retval->hw_threads = (kmp_hw_thread_t *)(bytes +
sizeof(kmp_topology_t));
628 retval->hw_threads =
nullptr;
630 retval->num_hw_threads = nproc;
631 retval->depth = ndepth;
633 (
int *)(bytes +
sizeof(kmp_topology_t) +
sizeof(kmp_hw_thread_t) * nproc);
634 retval->types = (kmp_hw_t *)arr;
635 retval->ratio = arr + (size_t)KMP_HW_LAST;
636 retval->count = arr + 2 * (size_t)KMP_HW_LAST;
637 retval->num_core_efficiencies = 0;
638 retval->num_core_types = 0;
640 for (
int i = 0; i < KMP_HW_MAX_NUM_CORE_TYPES; ++i)
641 retval->core_types[i] = KMP_HW_CORE_TYPE_UNKNOWN;
642 KMP_FOREACH_HW_TYPE(type) { retval->equivalent[type] = KMP_HW_UNKNOWN; }
643 for (
int i = 0; i < ndepth; ++i) {
644 retval->types[i] = types[i];
645 retval->equivalent[types[i]] = types[i];
650void kmp_topology_t::deallocate(kmp_topology_t *topology) {
652 __kmp_free(topology);
655bool kmp_topology_t::check_ids()
const {
657 if (num_hw_threads == 0)
659 for (
int i = 1; i < num_hw_threads; ++i) {
660 kmp_hw_thread_t ¤t_thread = hw_threads[i];
661 kmp_hw_thread_t &previous_thread = hw_threads[i - 1];
663 for (
int j = 0; j < depth; ++j) {
664 if (previous_thread.ids[j] != current_thread.ids[j]) {
676void kmp_topology_t::dump()
const {
677 printf(
"***********************\n");
678 printf(
"*** __kmp_topology: ***\n");
679 printf(
"***********************\n");
680 printf(
"* depth: %d\n", depth);
683 for (
int i = 0; i < depth; ++i)
684 printf(
"%15s ", __kmp_hw_get_keyword(types[i]));
688 for (
int i = 0; i < depth; ++i) {
689 printf(
"%15d ", ratio[i]);
694 for (
int i = 0; i < depth; ++i) {
695 printf(
"%15d ", count[i]);
699 printf(
"* num_core_eff: %d\n", num_core_efficiencies);
700 printf(
"* num_core_types: %d\n", num_core_types);
701 printf(
"* core_types: ");
702 for (
int i = 0; i < num_core_types; ++i)
703 printf(
"%3d ", core_types[i]);
706 printf(
"* equivalent map:\n");
707 KMP_FOREACH_HW_TYPE(i) {
708 const char *key = __kmp_hw_get_keyword(i);
709 const char *value = __kmp_hw_get_keyword(equivalent[i]);
710 printf(
"%-15s -> %-15s\n", key, value);
713 printf(
"* uniform: %s\n", (is_uniform() ?
"Yes" :
"No"));
715 printf(
"* num_hw_threads: %d\n", num_hw_threads);
716 printf(
"* hw_threads:\n");
717 for (
int i = 0; i < num_hw_threads; ++i) {
718 hw_threads[i].print();
720 printf(
"***********************\n");
723void kmp_topology_t::print(
const char *env_var)
const {
725 int print_types_depth;
726 __kmp_str_buf_init(&buf);
727 kmp_hw_t print_types[KMP_HW_LAST + 2];
730 if (num_hw_threads) {
731 KMP_INFORM(AvailableOSProc, env_var, num_hw_threads);
733 KMP_INFORM(AvailableOSProc, env_var, __kmp_xproc);
738 KMP_INFORM(Uniform, env_var);
740 KMP_INFORM(NonUniform, env_var);
744 KMP_FOREACH_HW_TYPE(type) {
745 kmp_hw_t eq_type = equivalent[type];
746 if (eq_type != KMP_HW_UNKNOWN && eq_type != type) {
747 KMP_INFORM(AffEqualTopologyTypes, env_var,
748 __kmp_hw_get_catalog_string(type),
749 __kmp_hw_get_catalog_string(eq_type));
754 KMP_ASSERT(depth > 0 && depth <= (
int)KMP_HW_LAST);
757 print_types_depth = 0;
758 for (
int level = 0; level < depth; ++level)
759 print_types[print_types_depth++] = types[level];
760 if (equivalent[KMP_HW_CORE] != KMP_HW_CORE) {
762 if (print_types[print_types_depth - 1] == KMP_HW_THREAD) {
765 print_types[print_types_depth - 1] = KMP_HW_CORE;
766 print_types[print_types_depth++] = KMP_HW_THREAD;
768 print_types[print_types_depth++] = KMP_HW_CORE;
772 if (equivalent[KMP_HW_THREAD] != KMP_HW_THREAD)
773 print_types[print_types_depth++] = KMP_HW_THREAD;
775 __kmp_str_buf_clear(&buf);
776 kmp_hw_t numerator_type;
777 kmp_hw_t denominator_type = KMP_HW_UNKNOWN;
778 int core_level = get_level(KMP_HW_CORE);
779 int ncores = get_count(core_level);
781 for (
int plevel = 0, level = 0; plevel < print_types_depth; ++plevel) {
784 numerator_type = print_types[plevel];
785 KMP_ASSERT_VALID_HW_TYPE(numerator_type);
786 if (equivalent[numerator_type] != numerator_type)
789 c = get_ratio(level++);
792 __kmp_str_buf_print(&buf,
"%d %s", c,
793 __kmp_hw_get_catalog_string(numerator_type, plural));
795 __kmp_str_buf_print(&buf,
" x %d %s/%s", c,
796 __kmp_hw_get_catalog_string(numerator_type, plural),
797 __kmp_hw_get_catalog_string(denominator_type));
799 denominator_type = numerator_type;
801 KMP_INFORM(TopologyGeneric, env_var, buf.str, ncores);
804 if (__kmp_is_hybrid_cpu()) {
805 for (
int i = 0; i < num_core_types; ++i) {
806 kmp_hw_core_type_t core_type = core_types[i];
809 attr.set_core_type(core_type);
810 int ncores = get_ncores_with_attr(attr);
812 KMP_INFORM(TopologyHybrid, env_var, ncores,
813 __kmp_hw_get_core_type_string(core_type));
814 KMP_ASSERT(num_core_efficiencies <= KMP_HW_MAX_NUM_CORE_EFFS)
815 for (
int eff = 0; eff < num_core_efficiencies; ++eff) {
816 attr.set_core_eff(eff);
817 int ncores_with_eff = get_ncores_with_attr(attr);
818 if (ncores_with_eff > 0) {
819 KMP_INFORM(TopologyHybridCoreEff, env_var, ncores_with_eff, eff);
826 if (num_hw_threads <= 0) {
827 __kmp_str_buf_free(&buf);
832 KMP_INFORM(OSProcToPhysicalThreadMap, env_var);
833 for (
int i = 0; i < num_hw_threads; i++) {
834 __kmp_str_buf_clear(&buf);
835 for (
int level = 0; level < depth; ++level) {
836 kmp_hw_t type = types[level];
837 __kmp_str_buf_print(&buf,
"%s ", __kmp_hw_get_catalog_string(type));
838 __kmp_str_buf_print(&buf,
"%d ", hw_threads[i].ids[level]);
840 if (__kmp_is_hybrid_cpu())
843 __kmp_hw_get_core_type_string(hw_threads[i].attrs.get_core_type()));
844 KMP_INFORM(OSProcMapToPack, env_var, hw_threads[i].os_id, buf.str);
847 __kmp_str_buf_free(&buf);
850#if KMP_AFFINITY_SUPPORTED
851void kmp_topology_t::set_granularity(kmp_affinity_t &affinity)
const {
852 const char *env_var = __kmp_get_affinity_env_var(affinity);
856 if (!__kmp_is_hybrid_cpu()) {
857 if (affinity.core_attr_gran.valid) {
861 affinity, AffIgnoringNonHybrid, env_var,
862 __kmp_hw_get_catalog_string(KMP_HW_CORE,
true));
863 affinity.gran = KMP_HW_CORE;
864 affinity.gran_levels = -1;
865 affinity.core_attr_gran = KMP_AFFINITY_ATTRS_UNKNOWN;
866 affinity.flags.core_types_gran = affinity.flags.core_effs_gran = 0;
867 }
else if (affinity.flags.core_types_gran ||
868 affinity.flags.core_effs_gran) {
870 if (affinity.flags.omp_places) {
872 affinity, AffIgnoringNonHybrid, env_var,
873 __kmp_hw_get_catalog_string(KMP_HW_CORE,
true));
876 KMP_AFF_WARNING(affinity, AffGranularityBad, env_var,
877 "Intel(R) Hybrid Technology core attribute",
878 __kmp_hw_get_catalog_string(KMP_HW_CORE));
880 affinity.gran = KMP_HW_CORE;
881 affinity.gran_levels = -1;
882 affinity.core_attr_gran = KMP_AFFINITY_ATTRS_UNKNOWN;
883 affinity.flags.core_types_gran = affinity.flags.core_effs_gran = 0;
887 if (affinity.gran_levels < 0) {
888 kmp_hw_t gran_type = get_equivalent_type(affinity.gran);
890 if (gran_type == KMP_HW_UNKNOWN) {
892 kmp_hw_t gran_types[3] = {KMP_HW_CORE, KMP_HW_THREAD, KMP_HW_SOCKET};
893 for (
auto g : gran_types) {
894 if (get_equivalent_type(g) != KMP_HW_UNKNOWN) {
899 KMP_ASSERT(gran_type != KMP_HW_UNKNOWN);
901 KMP_AFF_WARNING(affinity, AffGranularityBad, env_var,
902 __kmp_hw_get_catalog_string(affinity.gran),
903 __kmp_hw_get_catalog_string(gran_type));
904 affinity.gran = gran_type;
906#if KMP_GROUP_AFFINITY
914 if (__kmp_num_proc_groups > 1) {
915 int gran_depth = get_level(gran_type);
916 int proc_group_depth = get_level(KMP_HW_PROC_GROUP);
917 if (gran_depth >= 0 && proc_group_depth >= 0 &&
918 gran_depth < proc_group_depth) {
919 KMP_AFF_WARNING(affinity, AffGranTooCoarseProcGroup, env_var,
920 __kmp_hw_get_catalog_string(affinity.gran));
921 affinity.gran = gran_type = KMP_HW_PROC_GROUP;
925 affinity.gran_levels = 0;
926 for (
int i = depth - 1; i >= 0 && get_type(i) != gran_type; --i)
927 affinity.gran_levels++;
932void kmp_topology_t::canonicalize() {
933#if KMP_GROUP_AFFINITY
934 _insert_windows_proc_groups();
936 _remove_radix1_layers();
937 _gather_enumeration_information();
938 _discover_uniformity();
941 _set_last_level_cache();
945 if (__kmp_mic_type == mic3) {
946 if (get_level(KMP_HW_L2) != -1)
947 set_equivalent_type(KMP_HW_TILE, KMP_HW_L2);
948 else if (get_level(KMP_HW_TILE) != -1)
949 set_equivalent_type(KMP_HW_L2, KMP_HW_TILE);
954 KMP_ASSERT(depth > 0);
955 for (
int level = 0; level < depth; ++level) {
957 KMP_ASSERT(count[level] > 0 && ratio[level] > 0);
958 KMP_ASSERT_VALID_HW_TYPE(types[level]);
960 KMP_ASSERT(equivalent[types[level]] == types[level]);
965void kmp_topology_t::canonicalize(
int npackages,
int ncores_per_pkg,
966 int nthreads_per_core,
int ncores) {
969 KMP_FOREACH_HW_TYPE(i) { equivalent[i] = KMP_HW_UNKNOWN; }
970 for (
int level = 0; level < depth; ++level) {
974 count[0] = npackages;
976 count[2] = __kmp_xproc;
977 ratio[0] = npackages;
978 ratio[1] = ncores_per_pkg;
979 ratio[2] = nthreads_per_core;
980 equivalent[KMP_HW_SOCKET] = KMP_HW_SOCKET;
981 equivalent[KMP_HW_CORE] = KMP_HW_CORE;
982 equivalent[KMP_HW_THREAD] = KMP_HW_THREAD;
983 types[0] = KMP_HW_SOCKET;
984 types[1] = KMP_HW_CORE;
985 types[2] = KMP_HW_THREAD;
987 _discover_uniformity();
990#if KMP_AFFINITY_SUPPORTED
991static kmp_str_buf_t *
992__kmp_hw_get_catalog_core_string(
const kmp_hw_attr_t &attr, kmp_str_buf_t *buf,
994 __kmp_str_buf_init(buf);
995 if (attr.is_core_type_valid())
996 __kmp_str_buf_print(buf,
"%s %s",
997 __kmp_hw_get_core_type_string(attr.get_core_type()),
998 __kmp_hw_get_catalog_string(KMP_HW_CORE, plural));
1000 __kmp_str_buf_print(buf,
"%s eff=%d",
1001 __kmp_hw_get_catalog_string(KMP_HW_CORE, plural),
1002 attr.get_core_eff());
1006bool kmp_topology_t::restrict_to_mask(
const kmp_affin_mask_t *mask) {
1010 for (
int i = 0; i < num_hw_threads; ++i) {
1011 int os_id = hw_threads[i].os_id;
1012 if (KMP_CPU_ISSET(os_id, mask)) {
1014 hw_threads[new_index] = hw_threads[i];
1017 KMP_CPU_CLR(os_id, __kmp_affin_fullMask);
1022 KMP_DEBUG_ASSERT(new_index <= num_hw_threads);
1023 affected = (num_hw_threads != new_index);
1024 num_hw_threads = new_index;
1028 _gather_enumeration_information();
1029 _discover_uniformity();
1031 _set_last_level_cache();
1034 if (__kmp_num_proc_groups <= 1)
1036 __kmp_affin_origMask->copy(__kmp_affin_fullMask);
1044bool kmp_topology_t::filter_hw_subset() {
1046 if (!__kmp_hw_subset)
1050 __kmp_hw_subset->sort();
1052 __kmp_hw_subset->canonicalize(__kmp_topology);
1055 bool using_core_types =
false;
1056 bool using_core_effs =
false;
1057 bool is_absolute = __kmp_hw_subset->is_absolute();
1058 int hw_subset_depth = __kmp_hw_subset->get_depth();
1059 kmp_hw_t specified[KMP_HW_LAST];
1060 int *topology_levels = (
int *)KMP_ALLOCA(
sizeof(
int) * hw_subset_depth);
1061 KMP_ASSERT(hw_subset_depth > 0);
1062 KMP_FOREACH_HW_TYPE(i) { specified[i] = KMP_HW_UNKNOWN; }
1063 int core_level = get_level(KMP_HW_CORE);
1064 for (
int i = 0; i < hw_subset_depth; ++i) {
1066 const kmp_hw_subset_t::item_t &item = __kmp_hw_subset->at(i);
1067 int num = item.num[0];
1068 int offset = item.offset[0];
1069 kmp_hw_t type = item.type;
1070 kmp_hw_t equivalent_type = equivalent[type];
1071 int level = get_level(type);
1072 topology_levels[i] = level;
1075 if (equivalent_type != KMP_HW_UNKNOWN) {
1076 __kmp_hw_subset->at(i).type = equivalent_type;
1078 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetNotExistGeneric,
1079 __kmp_hw_get_catalog_string(type));
1085 if (specified[equivalent_type] != KMP_HW_UNKNOWN) {
1086 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetEqvLayers,
1087 __kmp_hw_get_catalog_string(type),
1088 __kmp_hw_get_catalog_string(specified[equivalent_type]));
1091 specified[equivalent_type] = type;
1094 max_count = get_ratio(level);
1096 if (max_count < 0 ||
1097 (num != kmp_hw_subset_t::USE_ALL && num + offset > max_count)) {
1098 bool plural = (num > 1);
1099 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetManyGeneric,
1100 __kmp_hw_get_catalog_string(type, plural));
1106 if (core_level == level) {
1108 for (
int j = 0; j < item.num_attrs; ++j) {
1109 if (item.attr[j].is_core_type_valid())
1110 using_core_types =
true;
1111 if (item.attr[j].is_core_eff_valid())
1112 using_core_effs =
true;
1120 if ((using_core_effs || using_core_types) && !__kmp_is_hybrid_cpu()) {
1121 if (item.num_attrs == 1) {
1122 if (using_core_effs) {
1123 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIgnoringAttr,
1126 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIgnoringAttr,
1129 using_core_effs =
false;
1130 using_core_types =
false;
1132 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetAttrsNonHybrid);
1138 if (using_core_types && using_core_effs) {
1139 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIncompat,
"core_type",
1145 if (using_core_effs) {
1146 for (
int j = 0; j < item.num_attrs; ++j) {
1147 if (item.attr[j].is_core_eff_valid()) {
1148 int core_eff = item.attr[j].get_core_eff();
1149 if (core_eff < 0 || core_eff >= num_core_efficiencies) {
1151 __kmp_str_buf_init(&buf);
1152 __kmp_str_buf_print(&buf,
"%d", item.attr[j].get_core_eff());
1153 __kmp_msg(kmp_ms_warning,
1154 KMP_MSG(AffHWSubsetAttrInvalid,
"efficiency", buf.str),
1155 KMP_HNT(ValidValuesRange, 0, num_core_efficiencies - 1),
1157 __kmp_str_buf_free(&buf);
1165 if ((using_core_types || using_core_effs) && !is_absolute) {
1166 for (
int j = 0; j < item.num_attrs; ++j) {
1167 int num = item.num[j];
1168 int offset = item.offset[j];
1169 int level_above = core_level - 1;
1170 if (level_above >= 0) {
1171 max_count = get_ncores_with_attr_per(item.attr[j], level_above);
1172 if (max_count <= 0 ||
1173 (num != kmp_hw_subset_t::USE_ALL && num + offset > max_count)) {
1175 __kmp_hw_get_catalog_core_string(item.attr[j], &buf, num > 0);
1176 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetManyGeneric, buf.str);
1177 __kmp_str_buf_free(&buf);
1184 if ((using_core_types || using_core_effs) && item.num_attrs > 1) {
1185 for (
int j = 0; j < item.num_attrs; ++j) {
1188 if (!item.attr[j]) {
1189 kmp_hw_attr_t other_attr;
1190 for (
int k = 0; k < item.num_attrs; ++k) {
1191 if (item.attr[k] != item.attr[j]) {
1192 other_attr = item.attr[k];
1197 __kmp_hw_get_catalog_core_string(other_attr, &buf, item.num[j] > 0);
1198 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIncompat,
1199 __kmp_hw_get_catalog_string(KMP_HW_CORE), buf.str);
1200 __kmp_str_buf_free(&buf);
1204 for (
int k = 0; k < j; ++k) {
1205 if (!item.attr[j] || !item.attr[k])
1207 if (item.attr[k] == item.attr[j]) {
1209 __kmp_hw_get_catalog_core_string(item.attr[j], &buf,
1211 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetAttrRepeat, buf.str);
1212 __kmp_str_buf_free(&buf);
1223 int prev_sub_ids[KMP_HW_LAST];
1224 int abs_sub_ids[KMP_HW_LAST];
1225 int core_eff_sub_ids[KMP_HW_MAX_NUM_CORE_EFFS];
1226 int core_type_sub_ids[KMP_HW_MAX_NUM_CORE_TYPES];
1227 for (
size_t i = 0; i < KMP_HW_LAST; ++i) {
1228 abs_sub_ids[i] = -1;
1229 prev_sub_ids[i] = -1;
1231 for (
size_t i = 0; i < KMP_HW_MAX_NUM_CORE_EFFS; ++i)
1232 core_eff_sub_ids[i] = -1;
1233 for (
size_t i = 0; i < KMP_HW_MAX_NUM_CORE_TYPES; ++i)
1234 core_type_sub_ids[i] = -1;
1239 auto is_targeted = [&](
int level) {
1241 for (
int i = 0; i < hw_subset_depth; ++i)
1242 if (topology_levels[i] == level)
1251 auto get_core_type_index = [](
const kmp_hw_thread_t &t) {
1252 switch (t.attrs.get_core_type()) {
1253 case KMP_HW_CORE_TYPE_UNKNOWN:
1254 case KMP_HW_MAX_NUM_CORE_TYPES:
1256#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1257 case KMP_HW_CORE_TYPE_ATOM:
1259 case KMP_HW_CORE_TYPE_CORE:
1263 KMP_ASSERT2(
false,
"Unhandled kmp_hw_thread_t enumeration");
1264 KMP_BUILTIN_UNREACHABLE;
1268 auto get_core_eff_index = [](
const kmp_hw_thread_t &t) {
1269 return t.attrs.get_core_eff();
1272 int num_filtered = 0;
1273 kmp_affin_mask_t *filtered_mask;
1274 KMP_CPU_ALLOC(filtered_mask);
1275 KMP_CPU_COPY(filtered_mask, __kmp_affin_fullMask);
1276 for (
int i = 0; i < num_hw_threads; ++i) {
1277 kmp_hw_thread_t &hw_thread = hw_threads[i];
1280 if (is_absolute || using_core_effs || using_core_types) {
1281 for (
int level = 0; level < get_depth(); ++level) {
1282 if (hw_thread.sub_ids[level] != prev_sub_ids[level]) {
1283 bool found_targeted =
false;
1284 for (
int j = level; j < get_depth(); ++j) {
1285 bool targeted = is_targeted(j);
1286 if (!found_targeted && targeted) {
1287 found_targeted =
true;
1289 if (j == core_level && using_core_effs)
1290 core_eff_sub_ids[get_core_eff_index(hw_thread)]++;
1291 if (j == core_level && using_core_types)
1292 core_type_sub_ids[get_core_type_index(hw_thread)]++;
1293 }
else if (targeted) {
1295 if (j == core_level && using_core_effs)
1296 core_eff_sub_ids[get_core_eff_index(hw_thread)] = 0;
1297 if (j == core_level && using_core_types)
1298 core_type_sub_ids[get_core_type_index(hw_thread)] = 0;
1304 for (
int level = 0; level < get_depth(); ++level)
1305 prev_sub_ids[level] = hw_thread.sub_ids[level];
1309 bool should_be_filtered =
false;
1310 for (
int hw_subset_index = 0; hw_subset_index < hw_subset_depth;
1311 ++hw_subset_index) {
1312 const auto &hw_subset_item = __kmp_hw_subset->at(hw_subset_index);
1313 int level = topology_levels[hw_subset_index];
1316 if ((using_core_effs || using_core_types) && level == core_level) {
1322 kmp_hw_core_type_t core_type = hw_thread.attrs.get_core_type();
1323 int core_eff = hw_thread.attrs.get_core_eff();
1324 for (attr_idx = 0; attr_idx < hw_subset_item.num_attrs; ++attr_idx) {
1325 if (using_core_types &&
1326 hw_subset_item.attr[attr_idx].get_core_type() == core_type)
1328 if (using_core_effs &&
1329 hw_subset_item.attr[attr_idx].get_core_eff() == core_eff)
1333 if (attr_idx == hw_subset_item.num_attrs) {
1334 should_be_filtered =
true;
1338 int num = hw_subset_item.num[attr_idx];
1339 int offset = hw_subset_item.offset[attr_idx];
1340 if (using_core_types)
1341 sub_id = core_type_sub_ids[get_core_type_index(hw_thread)];
1343 sub_id = core_eff_sub_ids[get_core_eff_index(hw_thread)];
1344 if (sub_id < offset ||
1345 (num != kmp_hw_subset_t::USE_ALL && sub_id >= offset + num)) {
1346 should_be_filtered =
true;
1351 int num = hw_subset_item.num[0];
1352 int offset = hw_subset_item.offset[0];
1354 sub_id = abs_sub_ids[level];
1356 sub_id = hw_thread.sub_ids[level];
1357 if (sub_id < offset ||
1358 (num != kmp_hw_subset_t::USE_ALL && sub_id >= offset + num)) {
1359 should_be_filtered =
true;
1365 if (should_be_filtered) {
1366 KMP_CPU_CLR(hw_thread.os_id, filtered_mask);
1372 if (num_filtered == num_hw_threads) {
1373 KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetAllFiltered);
1378 restrict_to_mask(filtered_mask);
1382bool kmp_topology_t::is_close(
int hwt1,
int hwt2,
1383 const kmp_affinity_t &stgs)
const {
1384 int hw_level = stgs.gran_levels;
1385 if (hw_level >= depth)
1388 const kmp_hw_thread_t &t1 = hw_threads[hwt1];
1389 const kmp_hw_thread_t &t2 = hw_threads[hwt2];
1390 if (stgs.flags.core_types_gran)
1391 return t1.attrs.get_core_type() == t2.attrs.get_core_type();
1392 if (stgs.flags.core_effs_gran)
1393 return t1.attrs.get_core_eff() == t2.attrs.get_core_eff();
1394 for (
int i = 0; i < (depth - hw_level); ++i) {
1395 if (t1.ids[i] != t2.ids[i])
1403bool KMPAffinity::picked_api =
false;
1405void *KMPAffinity::Mask::operator
new(
size_t n) {
return __kmp_allocate(n); }
1406void *KMPAffinity::Mask::operator
new[](
size_t n) {
return __kmp_allocate(n); }
1407void KMPAffinity::Mask::operator
delete(
void *p) { __kmp_free(p); }
1408void KMPAffinity::Mask::operator
delete[](
void *p) { __kmp_free(p); }
1409void *KMPAffinity::operator
new(
size_t n) {
return __kmp_allocate(n); }
1410void KMPAffinity::operator
delete(
void *p) { __kmp_free(p); }
1412void KMPAffinity::pick_api() {
1413 KMPAffinity *affinity_dispatch;
1419 if (__kmp_affinity_top_method == affinity_top_method_hwloc &&
1420 __kmp_affinity.type != affinity_disabled) {
1421 affinity_dispatch =
new KMPHwlocAffinity();
1425 affinity_dispatch =
new KMPNativeAffinity();
1427 __kmp_affinity_dispatch = affinity_dispatch;
1431void KMPAffinity::destroy_api() {
1432 if (__kmp_affinity_dispatch != NULL) {
1433 delete __kmp_affinity_dispatch;
1434 __kmp_affinity_dispatch = NULL;
1439#define KMP_ADVANCE_SCAN(scan) \
1440 while (*scan != '\0') { \
1448char *__kmp_affinity_print_mask(
char *buf,
int buf_len,
1449 kmp_affin_mask_t *mask) {
1450 int start = 0, finish = 0, previous = 0;
1453 KMP_ASSERT(buf_len >= 40);
1456 char *end = buf + buf_len - 1;
1459 if (mask->begin() == mask->end()) {
1460 KMP_SNPRINTF(scan, end - scan + 1,
"{<empty>}");
1461 KMP_ADVANCE_SCAN(scan);
1462 KMP_ASSERT(scan <= end);
1467 start = mask->begin();
1471 for (finish = mask->next(start), previous = start;
1472 finish == previous + 1 && finish != mask->end();
1473 finish = mask->next(finish)) {
1480 KMP_SNPRINTF(scan, end - scan + 1,
"%s",
",");
1481 KMP_ADVANCE_SCAN(scan);
1483 first_range =
false;
1486 if (previous - start > 1) {
1487 KMP_SNPRINTF(scan, end - scan + 1,
"%u-%u", start, previous);
1490 KMP_SNPRINTF(scan, end - scan + 1,
"%u", start);
1491 KMP_ADVANCE_SCAN(scan);
1492 if (previous - start > 0) {
1493 KMP_SNPRINTF(scan, end - scan + 1,
",%u", previous);
1496 KMP_ADVANCE_SCAN(scan);
1499 if (start == mask->end())
1507 KMP_ASSERT(scan <= end);
1510#undef KMP_ADVANCE_SCAN
1516kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf,
1517 kmp_affin_mask_t *mask) {
1518 int start = 0, finish = 0, previous = 0;
1523 __kmp_str_buf_clear(buf);
1526 if (mask->begin() == mask->end()) {
1527 __kmp_str_buf_print(buf,
"%s",
"{<empty>}");
1532 start = mask->begin();
1536 for (finish = mask->next(start), previous = start;
1537 finish == previous + 1 && finish != mask->end();
1538 finish = mask->next(finish)) {
1545 __kmp_str_buf_print(buf,
"%s",
",");
1547 first_range =
false;
1550 if (previous - start > 1) {
1551 __kmp_str_buf_print(buf,
"%u-%u", start, previous);
1554 __kmp_str_buf_print(buf,
"%u", start);
1555 if (previous - start > 0) {
1556 __kmp_str_buf_print(buf,
",%u", previous);
1561 if (start == mask->end())
1569kmp_affin_mask_t *__kmp_affinity_get_offline_cpus() {
1570 kmp_affin_mask_t *offline;
1571 KMP_CPU_ALLOC(offline);
1572 KMP_CPU_ZERO(offline);
1574 int n, begin_cpu, end_cpu;
1576 auto skip_ws = [](FILE *f) {
1580 }
while (isspace(c));
1586 int status = offline_file.
try_open(
"/sys/devices/system/cpu/offline",
"r");
1589 while (!feof(offline_file)) {
1590 skip_ws(offline_file);
1591 n = fscanf(offline_file,
"%d", &begin_cpu);
1594 skip_ws(offline_file);
1595 int c = fgetc(offline_file);
1596 if (c == EOF || c ==
',') {
1598 end_cpu = begin_cpu;
1599 }
else if (c ==
'-') {
1601 skip_ws(offline_file);
1602 n = fscanf(offline_file,
"%d", &end_cpu);
1605 skip_ws(offline_file);
1606 c = fgetc(offline_file);
1612 if (begin_cpu < 0 || begin_cpu >= __kmp_xproc || end_cpu < 0 ||
1613 end_cpu >= __kmp_xproc || begin_cpu > end_cpu) {
1617 for (
int cpu = begin_cpu; cpu <= end_cpu; ++cpu) {
1618 KMP_CPU_SET(cpu, offline);
1626int __kmp_affinity_entire_machine_mask(kmp_affin_mask_t *mask) {
1630#if KMP_GROUP_AFFINITY
1632 if (__kmp_num_proc_groups > 1) {
1634 KMP_DEBUG_ASSERT(__kmp_GetActiveProcessorCount != NULL);
1635 for (group = 0; group < __kmp_num_proc_groups; group++) {
1637 int num = __kmp_GetActiveProcessorCount(group);
1638 for (i = 0; i < num; i++) {
1639 KMP_CPU_SET(i + group * (CHAR_BIT *
sizeof(DWORD_PTR)), mask);
1649 kmp_affin_mask_t *offline_cpus = __kmp_affinity_get_offline_cpus();
1650 for (proc = 0; proc < __kmp_xproc; proc++) {
1652 if (KMP_CPU_ISSET(proc, offline_cpus))
1654 KMP_CPU_SET(proc, mask);
1657 KMP_CPU_FREE(offline_cpus);
1666kmp_affin_mask_t *__kmp_affin_fullMask = NULL;
1668kmp_affin_mask_t *__kmp_affin_origMask = NULL;
1671static inline bool __kmp_hwloc_is_cache_type(hwloc_obj_t obj) {
1672#if HWLOC_API_VERSION >= 0x00020000
1673 return hwloc_obj_type_is_cache(obj->type);
1675 return obj->type == HWLOC_OBJ_CACHE;
1680static inline kmp_hw_t __kmp_hwloc_type_2_topology_type(hwloc_obj_t obj) {
1682 if (__kmp_hwloc_is_cache_type(obj)) {
1683 if (obj->attr->cache.type == HWLOC_OBJ_CACHE_INSTRUCTION)
1684 return KMP_HW_UNKNOWN;
1685 switch (obj->attr->cache.depth) {
1689#if KMP_MIC_SUPPORTED
1690 if (__kmp_mic_type == mic3) {
1698 return KMP_HW_UNKNOWN;
1701 switch (obj->type) {
1702 case HWLOC_OBJ_PACKAGE:
1703 return KMP_HW_SOCKET;
1704 case HWLOC_OBJ_NUMANODE:
1706 case HWLOC_OBJ_CORE:
1709 return KMP_HW_THREAD;
1710 case HWLOC_OBJ_GROUP:
1711#if HWLOC_API_VERSION >= 0x00020000
1712 if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_DIE)
1714 else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_TILE)
1716 else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_MODULE)
1717 return KMP_HW_MODULE;
1718 else if (obj->attr->group.kind == HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP)
1719 return KMP_HW_PROC_GROUP;
1721 return KMP_HW_UNKNOWN;
1722#if HWLOC_API_VERSION >= 0x00020100
1727 return KMP_HW_UNKNOWN;
1734static int __kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj,
1735 hwloc_obj_type_t type) {
1738 for (first = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, obj->type,
1739 obj->logical_index, type, 0);
1740 first != NULL && hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology,
1741 obj->type, first) == obj;
1742 first = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, first->type,
1751static int __kmp_hwloc_get_sub_id(hwloc_topology_t t, hwloc_obj_t higher,
1752 hwloc_obj_t lower) {
1754 hwloc_obj_type_t ltype = lower->type;
1755 int lindex = lower->logical_index - 1;
1758 obj = hwloc_get_obj_by_type(t, ltype, lindex);
1759 while (obj && lindex >= 0 &&
1760 hwloc_bitmap_isincluded(obj->cpuset, higher->cpuset)) {
1761 if (obj->userdata) {
1762 sub_id = (int)(RCAST(kmp_intptr_t, obj->userdata));
1767 obj = hwloc_get_obj_by_type(t, ltype, lindex);
1770 lower->userdata = RCAST(
void *, sub_id + 1);
1774static bool __kmp_affinity_create_hwloc_map(kmp_i18n_id_t *
const msg_id) {
1776 int hw_thread_index, sub_id;
1778 hwloc_obj_t pu, obj, root, prev;
1779 kmp_hw_t types[KMP_HW_LAST];
1780 hwloc_obj_type_t hwloc_types[KMP_HW_LAST];
1782 hwloc_topology_t tp = __kmp_hwloc_topology;
1783 *msg_id = kmp_i18n_null;
1784 if (__kmp_affinity.flags.verbose) {
1785 KMP_INFORM(AffUsingHwloc,
"KMP_AFFINITY");
1788 if (!KMP_AFFINITY_CAPABLE()) {
1791 KMP_ASSERT(__kmp_affinity.type == affinity_none);
1793 hwloc_obj_t o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PACKAGE, 0);
1795 nCoresPerPkg = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_CORE);
1798 o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_CORE, 0);
1800 __kmp_nThreadsPerCore = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_PU);
1802 __kmp_nThreadsPerCore = 1;
1803 if (__kmp_nThreadsPerCore == 0)
1804 __kmp_nThreadsPerCore = 1;
1805 __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
1806 if (nCoresPerPkg == 0)
1808 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
1812#if HWLOC_API_VERSION >= 0x00020400
1814 int nr_cpu_kinds = hwloc_cpukinds_get_nr(tp, 0);
1816 typedef struct kmp_hwloc_cpukinds_info_t {
1818 kmp_hw_core_type_t core_type;
1819 hwloc_bitmap_t mask;
1820 } kmp_hwloc_cpukinds_info_t;
1821 kmp_hwloc_cpukinds_info_t *cpukinds =
nullptr;
1823 if (nr_cpu_kinds > 0) {
1825 struct hwloc_info_s *infos;
1826 cpukinds = (kmp_hwloc_cpukinds_info_t *)__kmp_allocate(
1827 sizeof(kmp_hwloc_cpukinds_info_t) * nr_cpu_kinds);
1828 for (
unsigned idx = 0; idx < (unsigned)nr_cpu_kinds; ++idx) {
1829 cpukinds[idx].efficiency = -1;
1830 cpukinds[idx].core_type = KMP_HW_CORE_TYPE_UNKNOWN;
1831 cpukinds[idx].mask = hwloc_bitmap_alloc();
1832 if (hwloc_cpukinds_get_info(tp, idx, cpukinds[idx].mask,
1833 &cpukinds[idx].efficiency, &nr_infos, &infos,
1835 for (
unsigned i = 0; i < nr_infos; ++i) {
1836 if (__kmp_str_match(
"CoreType", 8, infos[i].name)) {
1837#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1838 if (__kmp_str_match(
"IntelAtom", 9, infos[i].value)) {
1839 cpukinds[idx].core_type = KMP_HW_CORE_TYPE_ATOM;
1841 }
else if (__kmp_str_match(
"IntelCore", 9, infos[i].value)) {
1842 cpukinds[idx].core_type = KMP_HW_CORE_TYPE_CORE;
1853 root = hwloc_get_root_obj(tp);
1857 obj = hwloc_get_pu_obj_by_os_index(tp, __kmp_affin_fullMask->begin());
1858 while (obj && obj != root) {
1859#if HWLOC_API_VERSION >= 0x00020000
1860 if (obj->memory_arity) {
1862 for (memory = obj->memory_first_child; memory;
1863 memory = hwloc_get_next_child(tp, obj, memory)) {
1864 if (memory->type == HWLOC_OBJ_NUMANODE)
1867 if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
1868 types[depth] = KMP_HW_NUMA;
1869 hwloc_types[depth] = memory->type;
1874 type = __kmp_hwloc_type_2_topology_type(obj);
1875 if (type != KMP_HW_UNKNOWN) {
1876 types[depth] = type;
1877 hwloc_types[depth] = obj->type;
1882 KMP_ASSERT(depth > 0);
1885 for (
int i = 0, j = depth - 1; i < j; ++i, --j) {
1886 hwloc_obj_type_t hwloc_temp = hwloc_types[i];
1887 kmp_hw_t temp = types[i];
1888 types[i] = types[j];
1890 hwloc_types[i] = hwloc_types[j];
1891 hwloc_types[j] = hwloc_temp;
1895 __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1897 hw_thread_index = 0;
1899 while ((pu = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, pu))) {
1900 int index = depth - 1;
1901 bool included = KMP_CPU_ISSET(pu->os_index, __kmp_affin_fullMask);
1902 kmp_hw_thread_t &hw_thread = __kmp_topology->at(hw_thread_index);
1905 hw_thread.ids[index] = pu->logical_index;
1906 hw_thread.os_id = pu->os_index;
1908#if HWLOC_API_VERSION >= 0x00020400
1910 int cpukind_index = -1;
1911 for (
int i = 0; i < nr_cpu_kinds; ++i) {
1912 if (hwloc_bitmap_isset(cpukinds[i].mask, hw_thread.os_id)) {
1917 if (cpukind_index >= 0) {
1918 hw_thread.attrs.set_core_type(cpukinds[cpukind_index].core_type);
1919 hw_thread.attrs.set_core_eff(cpukinds[cpukind_index].efficiency);
1927 while (obj != root && obj != NULL) {
1929#if HWLOC_API_VERSION >= 0x00020000
1933 if (obj->memory_arity) {
1935 for (memory = obj->memory_first_child; memory;
1936 memory = hwloc_get_next_child(tp, obj, memory)) {
1937 if (memory->type == HWLOC_OBJ_NUMANODE)
1940 if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
1941 sub_id = __kmp_hwloc_get_sub_id(tp, memory, prev);
1943 hw_thread.ids[index] = memory->logical_index;
1944 hw_thread.ids[index + 1] = sub_id;
1952 type = __kmp_hwloc_type_2_topology_type(obj);
1953 if (type != KMP_HW_UNKNOWN) {
1954 sub_id = __kmp_hwloc_get_sub_id(tp, obj, prev);
1956 hw_thread.ids[index] = obj->logical_index;
1957 hw_thread.ids[index + 1] = sub_id;
1967#if HWLOC_API_VERSION >= 0x00020400
1970 for (
int idx = 0; idx < nr_cpu_kinds; ++idx)
1971 hwloc_bitmap_free(cpukinds[idx].mask);
1972 __kmp_free(cpukinds);
1975 __kmp_topology->sort_ids();
1983static bool __kmp_affinity_create_flat_map(kmp_i18n_id_t *
const msg_id) {
1984 *msg_id = kmp_i18n_null;
1986 kmp_hw_t types[] = {KMP_HW_SOCKET, KMP_HW_CORE, KMP_HW_THREAD};
1988 if (__kmp_affinity.flags.verbose) {
1989 KMP_INFORM(UsingFlatOS,
"KMP_AFFINITY");
1995 if (!KMP_AFFINITY_CAPABLE()) {
1996 KMP_ASSERT(__kmp_affinity.type == affinity_none);
1997 __kmp_ncores = nPackages = __kmp_xproc;
1998 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
2006 __kmp_ncores = nPackages = __kmp_avail_proc;
2007 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
2010 __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
2013 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
2015 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
2018 kmp_hw_thread_t &hw_thread = __kmp_topology->at(avail_ct);
2020 hw_thread.os_id = i;
2021 hw_thread.ids[0] = i;
2022 hw_thread.ids[1] = 0;
2023 hw_thread.ids[2] = 0;
2026 if (__kmp_affinity.flags.verbose) {
2027 KMP_INFORM(OSProcToPackage,
"KMP_AFFINITY");
2032#if KMP_GROUP_AFFINITY
2037static bool __kmp_affinity_create_proc_group_map(kmp_i18n_id_t *
const msg_id) {
2038 *msg_id = kmp_i18n_null;
2040 kmp_hw_t types[] = {KMP_HW_PROC_GROUP, KMP_HW_CORE, KMP_HW_THREAD};
2041 const static size_t BITS_PER_GROUP = CHAR_BIT *
sizeof(DWORD_PTR);
2043 if (__kmp_affinity.flags.verbose) {
2044 KMP_INFORM(AffWindowsProcGroupMap,
"KMP_AFFINITY");
2048 if (!KMP_AFFINITY_CAPABLE()) {
2049 KMP_ASSERT(__kmp_affinity.type == affinity_none);
2050 nPackages = __kmp_num_proc_groups;
2051 __kmp_nThreadsPerCore = 1;
2052 __kmp_ncores = __kmp_xproc;
2053 nCoresPerPkg = nPackages / __kmp_ncores;
2058 __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
2061 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
2063 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
2066 kmp_hw_thread_t &hw_thread = __kmp_topology->at(avail_ct++);
2068 hw_thread.os_id = i;
2069 hw_thread.ids[0] = i / BITS_PER_GROUP;
2070 hw_thread.ids[1] = hw_thread.ids[2] = i % BITS_PER_GROUP;
2076#if KMP_ARCH_X86 || KMP_ARCH_X86_64
2078template <kmp_u
int32 LSB, kmp_u
int32 MSB>
2079static inline unsigned __kmp_extract_bits(kmp_uint32 v) {
2080 const kmp_uint32 SHIFT_LEFT =
sizeof(kmp_uint32) * 8 - 1 - MSB;
2081 const kmp_uint32 SHIFT_RIGHT = LSB;
2082 kmp_uint32 retval = v;
2083 retval <<= SHIFT_LEFT;
2084 retval >>= (SHIFT_LEFT + SHIFT_RIGHT);
2088static int __kmp_cpuid_mask_width(
int count) {
2091 while ((1 << r) < count)
2096class apicThreadInfo {
2100 unsigned maxCoresPerPkg;
2101 unsigned maxThreadsPerPkg;
2107static int __kmp_affinity_cmp_apicThreadInfo_phys_id(
const void *a,
2109 const apicThreadInfo *aa = (
const apicThreadInfo *)a;
2110 const apicThreadInfo *bb = (
const apicThreadInfo *)b;
2111 if (aa->pkgId < bb->pkgId)
2113 if (aa->pkgId > bb->pkgId)
2115 if (aa->coreId < bb->coreId)
2117 if (aa->coreId > bb->coreId)
2119 if (aa->threadId < bb->threadId)
2121 if (aa->threadId > bb->threadId)
2126class kmp_cache_info_t {
2129 unsigned level, mask;
2131 kmp_cache_info_t() : depth(0) { get_leaf4_levels(); }
2132 size_t get_depth()
const {
return depth; }
2133 info_t &operator[](
size_t index) {
return table[index]; }
2134 const info_t &operator[](
size_t index)
const {
return table[index]; }
2136 static kmp_hw_t get_topology_type(
unsigned level) {
2137 KMP_DEBUG_ASSERT(level >= 1 && level <= MAX_CACHE_LEVEL);
2146 return KMP_HW_UNKNOWN;
2150 static const int MAX_CACHE_LEVEL = 3;
2153 info_t table[MAX_CACHE_LEVEL];
2155 void get_leaf4_levels() {
2157 while (depth < MAX_CACHE_LEVEL) {
2158 unsigned cache_type, max_threads_sharing;
2159 unsigned cache_level, cache_mask_width;
2161 __kmp_x86_cpuid(4, level, &buf2);
2162 cache_type = __kmp_extract_bits<0, 4>(buf2.eax);
2166 if (cache_type == 2) {
2170 max_threads_sharing = __kmp_extract_bits<14, 25>(buf2.eax) + 1;
2171 cache_mask_width = __kmp_cpuid_mask_width(max_threads_sharing);
2172 cache_level = __kmp_extract_bits<5, 7>(buf2.eax);
2173 table[depth].level = cache_level;
2174 table[depth].mask = ((-1) << cache_mask_width);
2185static bool __kmp_affinity_create_apicid_map(kmp_i18n_id_t *
const msg_id) {
2187 *msg_id = kmp_i18n_null;
2189 if (__kmp_affinity.flags.verbose) {
2190 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", KMP_I18N_STR(DecodingLegacyAPIC));
2194 __kmp_x86_cpuid(0, 0, &buf);
2196 *msg_id = kmp_i18n_str_NoLeaf4Support;
2205 if (!KMP_AFFINITY_CAPABLE()) {
2208 KMP_ASSERT(__kmp_affinity.type == affinity_none);
2214 __kmp_x86_cpuid(1, 0, &buf);
2215 int maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
2216 if (maxThreadsPerPkg == 0) {
2217 maxThreadsPerPkg = 1;
2231 __kmp_x86_cpuid(0, 0, &buf);
2233 __kmp_x86_cpuid(4, 0, &buf);
2234 nCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
2252 __kmp_ncores = __kmp_xproc;
2253 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
2254 __kmp_nThreadsPerCore = 1;
2263 kmp_affinity_raii_t previous_affinity;
2291 apicThreadInfo *threadInfo = (apicThreadInfo *)__kmp_allocate(
2292 __kmp_avail_proc *
sizeof(apicThreadInfo));
2293 unsigned nApics = 0;
2294 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
2296 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
2299 KMP_DEBUG_ASSERT((
int)nApics < __kmp_avail_proc);
2301 __kmp_affinity_dispatch->bind_thread(i);
2302 threadInfo[nApics].osId = i;
2305 __kmp_x86_cpuid(1, 0, &buf);
2306 if (((buf.edx >> 9) & 1) == 0) {
2307 __kmp_free(threadInfo);
2308 *msg_id = kmp_i18n_str_ApicNotPresent;
2311 threadInfo[nApics].apicId = (buf.ebx >> 24) & 0xff;
2312 threadInfo[nApics].maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
2313 if (threadInfo[nApics].maxThreadsPerPkg == 0) {
2314 threadInfo[nApics].maxThreadsPerPkg = 1;
2323 __kmp_x86_cpuid(0, 0, &buf);
2325 __kmp_x86_cpuid(4, 0, &buf);
2326 threadInfo[nApics].maxCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
2328 threadInfo[nApics].maxCoresPerPkg = 1;
2332 int widthCT = __kmp_cpuid_mask_width(threadInfo[nApics].maxThreadsPerPkg);
2333 threadInfo[nApics].pkgId = threadInfo[nApics].apicId >> widthCT;
2335 int widthC = __kmp_cpuid_mask_width(threadInfo[nApics].maxCoresPerPkg);
2336 int widthT = widthCT - widthC;
2341 __kmp_free(threadInfo);
2342 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
2346 int maskC = (1 << widthC) - 1;
2347 threadInfo[nApics].coreId = (threadInfo[nApics].apicId >> widthT) & maskC;
2349 int maskT = (1 << widthT) - 1;
2350 threadInfo[nApics].threadId = threadInfo[nApics].apicId & maskT;
2357 previous_affinity.restore();
2360 qsort(threadInfo, nApics,
sizeof(*threadInfo),
2361 __kmp_affinity_cmp_apicThreadInfo_phys_id);
2378 __kmp_nThreadsPerCore = 1;
2379 unsigned nCores = 1;
2382 unsigned lastPkgId = threadInfo[0].pkgId;
2383 unsigned coreCt = 1;
2384 unsigned lastCoreId = threadInfo[0].coreId;
2385 unsigned threadCt = 1;
2386 unsigned lastThreadId = threadInfo[0].threadId;
2389 unsigned prevMaxCoresPerPkg = threadInfo[0].maxCoresPerPkg;
2390 unsigned prevMaxThreadsPerPkg = threadInfo[0].maxThreadsPerPkg;
2392 for (i = 1; i < nApics; i++) {
2393 if (threadInfo[i].pkgId != lastPkgId) {
2396 lastPkgId = threadInfo[i].pkgId;
2397 if ((
int)coreCt > nCoresPerPkg)
2398 nCoresPerPkg = coreCt;
2400 lastCoreId = threadInfo[i].coreId;
2401 if ((
int)threadCt > __kmp_nThreadsPerCore)
2402 __kmp_nThreadsPerCore = threadCt;
2404 lastThreadId = threadInfo[i].threadId;
2408 prevMaxCoresPerPkg = threadInfo[i].maxCoresPerPkg;
2409 prevMaxThreadsPerPkg = threadInfo[i].maxThreadsPerPkg;
2413 if (threadInfo[i].coreId != lastCoreId) {
2416 lastCoreId = threadInfo[i].coreId;
2417 if ((
int)threadCt > __kmp_nThreadsPerCore)
2418 __kmp_nThreadsPerCore = threadCt;
2420 lastThreadId = threadInfo[i].threadId;
2421 }
else if (threadInfo[i].threadId != lastThreadId) {
2423 lastThreadId = threadInfo[i].threadId;
2425 __kmp_free(threadInfo);
2426 *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
2432 if ((prevMaxCoresPerPkg != threadInfo[i].maxCoresPerPkg) ||
2433 (prevMaxThreadsPerPkg != threadInfo[i].maxThreadsPerPkg)) {
2434 __kmp_free(threadInfo);
2435 *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
2443 if ((
int)coreCt > nCoresPerPkg)
2444 nCoresPerPkg = coreCt;
2445 if ((
int)threadCt > __kmp_nThreadsPerCore)
2446 __kmp_nThreadsPerCore = threadCt;
2447 __kmp_ncores = nCores;
2448 KMP_DEBUG_ASSERT(nApics == (
unsigned)__kmp_avail_proc);
2456 int threadLevel = 2;
2458 int depth = (pkgLevel >= 0) + (coreLevel >= 0) + (threadLevel >= 0);
2461 types[idx++] = KMP_HW_SOCKET;
2463 types[idx++] = KMP_HW_CORE;
2464 if (threadLevel >= 0)
2465 types[idx++] = KMP_HW_THREAD;
2467 KMP_ASSERT(depth > 0);
2468 __kmp_topology = kmp_topology_t::allocate(nApics, depth, types);
2470 for (i = 0; i < nApics; ++i) {
2472 unsigned os = threadInfo[i].osId;
2473 kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
2476 if (pkgLevel >= 0) {
2477 hw_thread.ids[idx++] = threadInfo[i].pkgId;
2479 if (coreLevel >= 0) {
2480 hw_thread.ids[idx++] = threadInfo[i].coreId;
2482 if (threadLevel >= 0) {
2483 hw_thread.ids[idx++] = threadInfo[i].threadId;
2485 hw_thread.os_id = os;
2488 __kmp_free(threadInfo);
2489 __kmp_topology->sort_ids();
2490 if (!__kmp_topology->check_ids()) {
2491 kmp_topology_t::deallocate(__kmp_topology);
2492 __kmp_topology =
nullptr;
2493 *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
2501static void __kmp_get_hybrid_info(kmp_hw_core_type_t *type,
int *efficiency,
2502 unsigned *native_model_id) {
2504 __kmp_x86_cpuid(0x1a, 0, &buf);
2505 *type = (kmp_hw_core_type_t)__kmp_extract_bits<24, 31>(buf.eax);
2507 case KMP_HW_CORE_TYPE_ATOM:
2510 case KMP_HW_CORE_TYPE_CORE:
2516 *native_model_id = __kmp_extract_bits<0, 23>(buf.eax);
2538 INTEL_LEVEL_TYPE_INVALID = 0,
2539 INTEL_LEVEL_TYPE_SMT = 1,
2540 INTEL_LEVEL_TYPE_CORE = 2,
2541 INTEL_LEVEL_TYPE_MODULE = 3,
2542 INTEL_LEVEL_TYPE_TILE = 4,
2543 INTEL_LEVEL_TYPE_DIE = 5,
2544 INTEL_LEVEL_TYPE_LAST = 6,
2547struct cpuid_level_info_t {
2548 unsigned level_type, mask, mask_width, nitems, cache_mask;
2551static kmp_hw_t __kmp_intel_type_2_topology_type(
int intel_type) {
2552 switch (intel_type) {
2553 case INTEL_LEVEL_TYPE_INVALID:
2554 return KMP_HW_SOCKET;
2555 case INTEL_LEVEL_TYPE_SMT:
2556 return KMP_HW_THREAD;
2557 case INTEL_LEVEL_TYPE_CORE:
2559 case INTEL_LEVEL_TYPE_TILE:
2561 case INTEL_LEVEL_TYPE_MODULE:
2562 return KMP_HW_MODULE;
2563 case INTEL_LEVEL_TYPE_DIE:
2566 return KMP_HW_UNKNOWN;
2573__kmp_x2apicid_get_levels(
int leaf,
2574 cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST],
2575 kmp_uint64 known_levels) {
2576 unsigned level, levels_index;
2577 unsigned level_type, mask_width, nitems;
2587 level = levels_index = 0;
2589 __kmp_x86_cpuid(leaf, level, &buf);
2590 level_type = __kmp_extract_bits<8, 15>(buf.ecx);
2591 mask_width = __kmp_extract_bits<0, 4>(buf.eax);
2592 nitems = __kmp_extract_bits<0, 15>(buf.ebx);
2593 if (level_type != INTEL_LEVEL_TYPE_INVALID && nitems == 0)
2596 if (known_levels & (1ull << level_type)) {
2598 KMP_ASSERT(levels_index < INTEL_LEVEL_TYPE_LAST);
2599 levels[levels_index].level_type = level_type;
2600 levels[levels_index].mask_width = mask_width;
2601 levels[levels_index].nitems = nitems;
2605 if (levels_index > 0) {
2606 levels[levels_index - 1].mask_width = mask_width;
2607 levels[levels_index - 1].nitems = nitems;
2611 }
while (level_type != INTEL_LEVEL_TYPE_INVALID);
2614 if (levels_index == 0 || levels[0].level_type == INTEL_LEVEL_TYPE_INVALID)
2618 for (
unsigned i = 0; i < levels_index; ++i) {
2619 if (levels[i].level_type != INTEL_LEVEL_TYPE_INVALID) {
2620 levels[i].mask = ~((-1) << levels[i].mask_width);
2621 levels[i].cache_mask = (-1) << levels[i].mask_width;
2622 for (
unsigned j = 0; j < i; ++j)
2623 levels[i].mask ^= levels[j].mask;
2625 KMP_DEBUG_ASSERT(i > 0);
2626 levels[i].mask = (-1) << levels[i - 1].mask_width;
2627 levels[i].cache_mask = 0;
2630 return levels_index;
2633static bool __kmp_affinity_create_x2apicid_map(kmp_i18n_id_t *
const msg_id) {
2635 cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST];
2636 kmp_hw_t types[INTEL_LEVEL_TYPE_LAST];
2637 unsigned levels_index;
2639 kmp_uint64 known_levels;
2640 int topology_leaf, highest_leaf, apic_id;
2642 static int leaves[] = {0, 0};
2644 kmp_i18n_id_t leaf_message_id;
2646 KMP_BUILD_ASSERT(
sizeof(known_levels) * CHAR_BIT > KMP_HW_LAST);
2648 *msg_id = kmp_i18n_null;
2649 if (__kmp_affinity.flags.verbose) {
2650 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC));
2654 known_levels = 0ull;
2655 for (
int i = 0; i < INTEL_LEVEL_TYPE_LAST; ++i) {
2656 if (__kmp_intel_type_2_topology_type(i) != KMP_HW_UNKNOWN) {
2657 known_levels |= (1ull << i);
2662 __kmp_x86_cpuid(0, 0, &buf);
2663 highest_leaf = buf.eax;
2668 if (__kmp_affinity_top_method == affinity_top_method_x2apicid) {
2671 leaf_message_id = kmp_i18n_str_NoLeaf11Support;
2672 }
else if (__kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
2675 leaf_message_id = kmp_i18n_str_NoLeaf31Support;
2680 leaf_message_id = kmp_i18n_str_NoLeaf11Support;
2684 __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
2686 for (
int i = 0; i < num_leaves; ++i) {
2687 int leaf = leaves[i];
2688 if (highest_leaf < leaf)
2690 __kmp_x86_cpuid(leaf, 0, &buf);
2693 topology_leaf = leaf;
2694 levels_index = __kmp_x2apicid_get_levels(leaf, levels, known_levels);
2695 if (levels_index == 0)
2699 if (topology_leaf == -1 || levels_index == 0) {
2700 *msg_id = leaf_message_id;
2703 KMP_ASSERT(levels_index <= INTEL_LEVEL_TYPE_LAST);
2710 if (!KMP_AFFINITY_CAPABLE()) {
2713 KMP_ASSERT(__kmp_affinity.type == affinity_none);
2714 for (
unsigned i = 0; i < levels_index; ++i) {
2715 if (levels[i].level_type == INTEL_LEVEL_TYPE_SMT) {
2716 __kmp_nThreadsPerCore = levels[i].nitems;
2717 }
else if (levels[i].level_type == INTEL_LEVEL_TYPE_CORE) {
2718 nCoresPerPkg = levels[i].nitems;
2721 __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
2722 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
2727 int depth = levels_index;
2728 for (
int i = depth - 1, j = 0; i >= 0; --i, ++j)
2729 types[j] = __kmp_intel_type_2_topology_type(levels[i].level_type);
2731 kmp_topology_t::allocate(__kmp_avail_proc, levels_index, types);
2734 kmp_cache_info_t cache_info;
2735 for (
size_t i = 0; i < cache_info.get_depth(); ++i) {
2736 const kmp_cache_info_t::info_t &info = cache_info[i];
2737 unsigned cache_mask = info.mask;
2738 unsigned cache_level = info.level;
2739 for (
unsigned j = 0; j < levels_index; ++j) {
2740 unsigned hw_cache_mask = levels[j].cache_mask;
2741 kmp_hw_t cache_type = kmp_cache_info_t::get_topology_type(cache_level);
2742 if (hw_cache_mask == cache_mask && j < levels_index - 1) {
2744 __kmp_intel_type_2_topology_type(levels[j + 1].level_type);
2745 __kmp_topology->set_equivalent_type(cache_type, type);
2755 kmp_affinity_raii_t previous_affinity;
2760 int hw_thread_index = 0;
2761 KMP_CPU_SET_ITERATE(proc, __kmp_affin_fullMask) {
2762 cpuid_level_info_t my_levels[INTEL_LEVEL_TYPE_LAST];
2763 unsigned my_levels_index;
2766 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
2769 KMP_DEBUG_ASSERT(hw_thread_index < __kmp_avail_proc);
2771 __kmp_affinity_dispatch->bind_thread(proc);
2774 __kmp_x86_cpuid(topology_leaf, 0, &buf);
2776 kmp_hw_thread_t &hw_thread = __kmp_topology->at(hw_thread_index);
2778 __kmp_x2apicid_get_levels(topology_leaf, my_levels, known_levels);
2779 if (my_levels_index == 0 || my_levels_index != levels_index) {
2780 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
2784 hw_thread.os_id = proc;
2786 for (
unsigned j = 0, idx = depth - 1; j < my_levels_index; ++j, --idx) {
2787 hw_thread.ids[idx] = apic_id & my_levels[j].mask;
2789 hw_thread.ids[idx] >>= my_levels[j - 1].mask_width;
2793 if (__kmp_is_hybrid_cpu() && highest_leaf >= 0x1a) {
2794 kmp_hw_core_type_t type;
2795 unsigned native_model_id;
2797 __kmp_get_hybrid_info(&type, &efficiency, &native_model_id);
2798 hw_thread.attrs.set_core_type(type);
2799 hw_thread.attrs.set_core_eff(efficiency);
2803 KMP_ASSERT(hw_thread_index > 0);
2804 __kmp_topology->sort_ids();
2805 if (!__kmp_topology->check_ids()) {
2806 kmp_topology_t::deallocate(__kmp_topology);
2807 __kmp_topology =
nullptr;
2808 *msg_id = kmp_i18n_str_x2ApicIDsNotUnique;
2816#define threadIdIndex 1
2817#define coreIdIndex 2
2819#define nodeIdIndex 4
2821typedef unsigned *ProcCpuInfo;
2822static unsigned maxIndex = pkgIdIndex;
2824static int __kmp_affinity_cmp_ProcCpuInfo_phys_id(
const void *a,
2827 const unsigned *aa = *(
unsigned *
const *)a;
2828 const unsigned *bb = *(
unsigned *
const *)b;
2829 for (i = maxIndex;; i--) {
2840#if KMP_USE_HIER_SCHED
2842static void __kmp_dispatch_set_hierarchy_values() {
2848 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1] =
2849 nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
2850 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L1 + 1] = __kmp_ncores;
2851#if KMP_ARCH_X86_64 && \
2852 (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
2853 KMP_OS_WINDOWS) && \
2855 if (__kmp_mic_type >= mic3)
2856 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores / 2;
2859 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores;
2860 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L3 + 1] = nPackages;
2861 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_NUMA + 1] = nPackages;
2862 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LOOP + 1] = 1;
2865 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_THREAD + 1] = 1;
2866 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L1 + 1] =
2867 __kmp_nThreadsPerCore;
2868#if KMP_ARCH_X86_64 && \
2869 (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
2870 KMP_OS_WINDOWS) && \
2872 if (__kmp_mic_type >= mic3)
2873 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
2874 2 * __kmp_nThreadsPerCore;
2877 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
2878 __kmp_nThreadsPerCore;
2879 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L3 + 1] =
2880 nCoresPerPkg * __kmp_nThreadsPerCore;
2881 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_NUMA + 1] =
2882 nCoresPerPkg * __kmp_nThreadsPerCore;
2883 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LOOP + 1] =
2884 nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
2889int __kmp_dispatch_get_index(
int tid, kmp_hier_layer_e type) {
2890 int index = type + 1;
2891 int num_hw_threads = __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1];
2892 KMP_DEBUG_ASSERT(type != kmp_hier_layer_e::LAYER_LAST);
2893 if (type == kmp_hier_layer_e::LAYER_THREAD)
2895 else if (type == kmp_hier_layer_e::LAYER_LOOP)
2897 KMP_DEBUG_ASSERT(__kmp_hier_max_units[index] != 0);
2898 if (tid >= num_hw_threads)
2899 tid = tid % num_hw_threads;
2900 return (tid / __kmp_hier_threads_per[index]) % __kmp_hier_max_units[index];
2904int __kmp_dispatch_get_t1_per_t2(kmp_hier_layer_e t1, kmp_hier_layer_e t2) {
2907 KMP_DEBUG_ASSERT(i1 <= i2);
2908 KMP_DEBUG_ASSERT(t1 != kmp_hier_layer_e::LAYER_LAST);
2909 KMP_DEBUG_ASSERT(t2 != kmp_hier_layer_e::LAYER_LAST);
2910 KMP_DEBUG_ASSERT(__kmp_hier_threads_per[i1] != 0);
2912 return __kmp_hier_threads_per[i2] / __kmp_hier_threads_per[i1];
2916static inline const char *__kmp_cpuinfo_get_filename() {
2917 const char *filename;
2918 if (__kmp_cpuinfo_file !=
nullptr)
2919 filename = __kmp_cpuinfo_file;
2921 filename =
"/proc/cpuinfo";
2925static inline const char *__kmp_cpuinfo_get_envvar() {
2926 const char *envvar =
nullptr;
2927 if (__kmp_cpuinfo_file !=
nullptr)
2928 envvar =
"KMP_CPUINFO_FILE";
2935static bool __kmp_affinity_create_cpuinfo_map(
int *line,
2936 kmp_i18n_id_t *
const msg_id) {
2937 *msg_id = kmp_i18n_null;
2940 unsigned num_records = __kmp_xproc;
2942 const char *filename = __kmp_cpuinfo_get_filename();
2943 const char *envvar = __kmp_cpuinfo_get_envvar();
2945 if (__kmp_affinity.flags.verbose) {
2946 KMP_INFORM(AffParseFilename,
"KMP_AFFINITY", filename);
2954 unsigned num_records = 0;
2956 buf[
sizeof(buf) - 1] = 1;
2957 if (!fgets(buf,
sizeof(buf), f)) {
2962 char s1[] =
"processor";
2963 if (strncmp(buf, s1,
sizeof(s1) - 1) == 0) {
2970 if (KMP_SSCANF(buf,
"node_%u id", &level) == 1) {
2972 if (level > (
unsigned)__kmp_xproc) {
2973 level = __kmp_xproc;
2975 if (nodeIdIndex + level >= maxIndex) {
2976 maxIndex = nodeIdIndex + level;
2984 if (num_records == 0) {
2985 *msg_id = kmp_i18n_str_NoProcRecords;
2988 if (num_records > (
unsigned)__kmp_xproc) {
2989 *msg_id = kmp_i18n_str_TooManyProcRecords;
2998 if (fseek(f, 0, SEEK_SET) != 0) {
2999 *msg_id = kmp_i18n_str_CantRewindCpuinfo;
3006 unsigned **threadInfo =
3007 (
unsigned **)__kmp_allocate((num_records + 1) *
sizeof(
unsigned *));
3009 for (i = 0; i <= num_records; i++) {
3011 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
3014#define CLEANUP_THREAD_INFO \
3015 for (i = 0; i <= num_records; i++) { \
3016 __kmp_free(threadInfo[i]); \
3018 __kmp_free(threadInfo);
3023#define INIT_PROC_INFO(p) \
3024 for (__index = 0; __index <= maxIndex; __index++) { \
3025 (p)[__index] = UINT_MAX; \
3028 for (i = 0; i <= num_records; i++) {
3029 INIT_PROC_INFO(threadInfo[i]);
3034 lpar_info_format1_t cpuinfo;
3035 unsigned num_avail = __kmp_xproc;
3037 if (__kmp_affinity.flags.verbose)
3038 KMP_INFORM(AffParseFilename,
"KMP_AFFINITY",
"system info for topology");
3042 lpar_get_info(LPAR_INFO_FORMAT1, &cpuinfo,
sizeof(lpar_info_format1_t));
3044 smt_threads = cpuinfo.smt_threads;
3046 CLEANUP_THREAD_INFO;
3047 *msg_id = kmp_i18n_str_UnknownTopology;
3052 rsethandle_t sys_rset = rs_alloc(RS_SYSTEM);
3053 if (sys_rset == NULL) {
3054 CLEANUP_THREAD_INFO;
3055 *msg_id = kmp_i18n_str_UnknownTopology;
3059 rsethandle_t srad = rs_alloc(RS_EMPTY);
3062 CLEANUP_THREAD_INFO;
3063 *msg_id = kmp_i18n_str_UnknownTopology;
3068 int sradsdl = rs_getinfo(NULL, R_SRADSDL, 0);
3072 CLEANUP_THREAD_INFO;
3073 *msg_id = kmp_i18n_str_UnknownTopology;
3077 int num_rads = rs_numrads(sys_rset, sradsdl, 0);
3081 CLEANUP_THREAD_INFO;
3082 *msg_id = kmp_i18n_str_UnknownTopology;
3087 int max_procs = rs_getinfo(NULL, R_MAXPROCS, 0);
3088 if (max_procs < 0) {
3091 CLEANUP_THREAD_INFO;
3092 *msg_id = kmp_i18n_str_UnknownTopology;
3098 for (
int srad_idx = 0; cur_rad < num_rads && srad_idx < VMI_MAXRADS;
3101 if (rs_getrad(sys_rset, srad, sradsdl, srad_idx, 0) < 0)
3104 for (
int cpu = 0; cpu < max_procs; cpu++) {
3106 if (rs_op(RS_TESTRESOURCE, srad, NULL, R_PROCS, cpu)) {
3107 threadInfo[cpu][osIdIndex] = cpu;
3108 threadInfo[cpu][pkgIdIndex] = cur_rad;
3109 threadInfo[cpu][coreIdIndex] = cpu / smt_threads;
3111 if (num_set >= num_avail) {
3125 unsigned num_avail = 0;
3128 bool reading_s390x_sys_info =
true;
3135 buf[
sizeof(buf) - 1] = 1;
3136 bool long_line =
false;
3137 if (!fgets(buf,
sizeof(buf), f)) {
3142 for (i = 0; i <= maxIndex; i++) {
3143 if (threadInfo[num_avail][i] != UINT_MAX) {
3151 }
else if (!buf[
sizeof(buf) - 1]) {
3158 CLEANUP_THREAD_INFO; \
3159 *msg_id = kmp_i18n_str_LongLineCpuinfo; \
3165#if KMP_ARCH_LOONGARCH64
3172 if (*buf ==
'\n' && *line == 2)
3178 if (reading_s390x_sys_info) {
3180 reading_s390x_sys_info =
false;
3186 char s1[] =
"cpu number";
3188 char s1[] =
"processor";
3190 if (strncmp(buf, s1,
sizeof(s1) - 1) == 0) {
3192 char *p = strchr(buf +
sizeof(s1) - 1,
':');
3194 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
3196 if (threadInfo[num_avail][osIdIndex] != UINT_MAX)
3206 threadInfo[num_avail][osIdIndex] = val;
3207#if KMP_OS_LINUX && !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
3211 "/sys/devices/system/cpu/cpu%u/topology/physical_package_id",
3212 threadInfo[num_avail][osIdIndex]);
3213 __kmp_read_from_file(path,
"%u", &threadInfo[num_avail][pkgIdIndex]);
3218 KMP_SNPRINTF(path,
sizeof(path),
3219 "/sys/devices/system/cpu/cpu%u/topology/book_id",
3220 threadInfo[num_avail][osIdIndex]);
3221 __kmp_read_from_file(path,
"%u", &book_id);
3222 threadInfo[num_avail][pkgIdIndex] |= (book_id << 8);
3225 KMP_SNPRINTF(path,
sizeof(path),
3226 "/sys/devices/system/cpu/cpu%u/topology/drawer_id",
3227 threadInfo[num_avail][osIdIndex]);
3228 __kmp_read_from_file(path,
"%u", &drawer_id);
3229 threadInfo[num_avail][pkgIdIndex] |= (drawer_id << 16);
3232 KMP_SNPRINTF(path,
sizeof(path),
3233 "/sys/devices/system/cpu/cpu%u/topology/core_id",
3234 threadInfo[num_avail][osIdIndex]);
3235 __kmp_read_from_file(path,
"%u", &threadInfo[num_avail][coreIdIndex]);
3239 char s2[] =
"physical id";
3240 if (strncmp(buf, s2,
sizeof(s2) - 1) == 0) {
3242 char *p = strchr(buf +
sizeof(s2) - 1,
':');
3244 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
3246 if (threadInfo[num_avail][pkgIdIndex] != UINT_MAX)
3248 threadInfo[num_avail][pkgIdIndex] = val;
3251 char s3[] =
"core id";
3252 if (strncmp(buf, s3,
sizeof(s3) - 1) == 0) {
3254 char *p = strchr(buf +
sizeof(s3) - 1,
':');
3256 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
3258 if (threadInfo[num_avail][coreIdIndex] != UINT_MAX)
3260 threadInfo[num_avail][coreIdIndex] = val;
3264 char s4[] =
"thread id";
3265 if (strncmp(buf, s4,
sizeof(s4) - 1) == 0) {
3267 char *p = strchr(buf +
sizeof(s4) - 1,
':');
3269 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
3271 if (threadInfo[num_avail][threadIdIndex] != UINT_MAX)
3273 threadInfo[num_avail][threadIdIndex] = val;
3277 if (KMP_SSCANF(buf,
"node_%u id", &level) == 1) {
3279 char *p = strchr(buf +
sizeof(s4) - 1,
':');
3281 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
3284 if (level > (
unsigned)__kmp_xproc) {
3285 level = __kmp_xproc;
3287 if (threadInfo[num_avail][nodeIdIndex + level] != UINT_MAX)
3289 threadInfo[num_avail][nodeIdIndex + level] = val;
3296 if ((*buf != 0) && (*buf !=
'\n')) {
3301 while (((ch = fgetc(f)) != EOF) && (ch !=
'\n'))
3309 if ((
int)num_avail == __kmp_xproc) {
3310 CLEANUP_THREAD_INFO;
3311 *msg_id = kmp_i18n_str_TooManyEntries;
3317 if (threadInfo[num_avail][osIdIndex] == UINT_MAX) {
3318 CLEANUP_THREAD_INFO;
3319 *msg_id = kmp_i18n_str_MissingProcField;
3322 if (threadInfo[0][pkgIdIndex] == UINT_MAX) {
3323 CLEANUP_THREAD_INFO;
3324 *msg_id = kmp_i18n_str_MissingPhysicalIDField;
3329 if (KMP_AFFINITY_CAPABLE() &&
3330 !KMP_CPU_ISSET(threadInfo[num_avail][osIdIndex],
3331 __kmp_affin_fullMask)) {
3332 INIT_PROC_INFO(threadInfo[num_avail]);
3339 KMP_ASSERT(num_avail <= num_records);
3340 INIT_PROC_INFO(threadInfo[num_avail]);
3345 CLEANUP_THREAD_INFO;
3346 *msg_id = kmp_i18n_str_MissingValCpuinfo;
3350 CLEANUP_THREAD_INFO;
3351 *msg_id = kmp_i18n_str_DuplicateFieldCpuinfo;
3356#if KMP_MIC && REDUCE_TEAM_SIZE
3357 unsigned teamSize = 0;
3365 KMP_ASSERT(num_avail > 0);
3366 KMP_ASSERT(num_avail <= num_records);
3369 qsort(threadInfo, num_avail,
sizeof(*threadInfo),
3370 __kmp_affinity_cmp_ProcCpuInfo_phys_id);
3384 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
3386 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
3388 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
3390 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
3392 bool assign_thread_ids =
false;
3393 unsigned threadIdCt;
3400 if (assign_thread_ids) {
3401 if (threadInfo[0][threadIdIndex] == UINT_MAX) {
3402 threadInfo[0][threadIdIndex] = threadIdCt++;
3403 }
else if (threadIdCt <= threadInfo[0][threadIdIndex]) {
3404 threadIdCt = threadInfo[0][threadIdIndex] + 1;
3407 for (index = 0; index <= maxIndex; index++) {
3411 lastId[index] = threadInfo[0][index];
3416 for (i = 1; i < num_avail; i++) {
3419 for (index = maxIndex; index >= threadIdIndex; index--) {
3420 if (assign_thread_ids && (index == threadIdIndex)) {
3422 if (threadInfo[i][threadIdIndex] == UINT_MAX) {
3423 threadInfo[i][threadIdIndex] = threadIdCt++;
3427 else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
3428 threadIdCt = threadInfo[i][threadIdIndex] + 1;
3431 if (threadInfo[i][index] != lastId[index]) {
3436 for (index2 = threadIdIndex; index2 < index; index2++) {
3438 if (counts[index2] > maxCt[index2]) {
3439 maxCt[index2] = counts[index2];
3442 lastId[index2] = threadInfo[i][index2];
3446 lastId[index] = threadInfo[i][index];
3448 if (assign_thread_ids && (index > threadIdIndex)) {
3450#if KMP_MIC && REDUCE_TEAM_SIZE
3453 teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
3460 if (threadInfo[i][threadIdIndex] == UINT_MAX) {
3461 threadInfo[i][threadIdIndex] = threadIdCt++;
3467 else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
3468 threadIdCt = threadInfo[i][threadIdIndex] + 1;
3474 if (index < threadIdIndex) {
3478 if ((threadInfo[i][threadIdIndex] != UINT_MAX) || assign_thread_ids) {
3483 CLEANUP_THREAD_INFO;
3484 *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
3490 assign_thread_ids =
true;
3491 goto restart_radix_check;
3495#if KMP_MIC && REDUCE_TEAM_SIZE
3498 teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
3501 for (index = threadIdIndex; index <= maxIndex; index++) {
3502 if (counts[index] > maxCt[index]) {
3503 maxCt[index] = counts[index];
3507 __kmp_nThreadsPerCore = maxCt[threadIdIndex];
3508 nCoresPerPkg = maxCt[coreIdIndex];
3509 nPackages = totals[pkgIdIndex];
3515 __kmp_ncores = totals[coreIdIndex];
3516 if (!KMP_AFFINITY_CAPABLE()) {
3517 KMP_ASSERT(__kmp_affinity.type == affinity_none);
3521#if KMP_MIC && REDUCE_TEAM_SIZE
3523 if ((__kmp_dflt_team_nth == 0) && (teamSize > 0)) {
3524 __kmp_dflt_team_nth = teamSize;
3525 KA_TRACE(20, (
"__kmp_affinity_create_cpuinfo_map: setting "
3526 "__kmp_dflt_team_nth = %d\n",
3527 __kmp_dflt_team_nth));
3531 KMP_DEBUG_ASSERT(num_avail == (
unsigned)__kmp_avail_proc);
3538 bool *inMap = (
bool *)__kmp_allocate((maxIndex + 1) *
sizeof(bool));
3539 for (index = threadIdIndex; index < maxIndex; index++) {
3540 KMP_ASSERT(totals[index] >= totals[index + 1]);
3541 inMap[index] = (totals[index] > totals[index + 1]);
3543 inMap[maxIndex] = (totals[maxIndex] > 1);
3544 inMap[pkgIdIndex] =
true;
3545 inMap[coreIdIndex] =
true;
3546 inMap[threadIdIndex] =
true;
3550 kmp_hw_t types[KMP_HW_LAST];
3553 int threadLevel = -1;
3554 for (index = threadIdIndex; index <= maxIndex; index++) {
3559 if (inMap[pkgIdIndex]) {
3561 types[idx++] = KMP_HW_SOCKET;
3563 if (inMap[coreIdIndex]) {
3565 types[idx++] = KMP_HW_CORE;
3567 if (inMap[threadIdIndex]) {
3569 types[idx++] = KMP_HW_THREAD;
3571 KMP_ASSERT(depth > 0);
3574 __kmp_topology = kmp_topology_t::allocate(num_avail, depth, types);
3576 for (i = 0; i < num_avail; ++i) {
3577 unsigned os = threadInfo[i][osIdIndex];
3579 kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
3581 hw_thread.os_id = os;
3584 for (src_index = maxIndex; src_index >= threadIdIndex; src_index--) {
3585 if (!inMap[src_index]) {
3588 if (src_index == pkgIdIndex) {
3589 hw_thread.ids[pkgLevel] = threadInfo[i][src_index];
3590 }
else if (src_index == coreIdIndex) {
3591 hw_thread.ids[coreLevel] = threadInfo[i][src_index];
3592 }
else if (src_index == threadIdIndex) {
3593 hw_thread.ids[threadLevel] = threadInfo[i][src_index];
3603 CLEANUP_THREAD_INFO;
3604 __kmp_topology->sort_ids();
3605 if (!__kmp_topology->check_ids()) {
3606 kmp_topology_t::deallocate(__kmp_topology);
3607 __kmp_topology =
nullptr;
3608 *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
3617template <
typename FindNextFunctionType>
3618static void __kmp_create_os_id_masks(
unsigned *numUnique,
3619 kmp_affinity_t &affinity,
3620 FindNextFunctionType find_next) {
3624 int numAddrs = __kmp_topology->get_num_hw_threads();
3625 int depth = __kmp_topology->get_depth();
3626 const char *env_var = __kmp_get_affinity_env_var(affinity);
3627 KMP_ASSERT(numAddrs);
3637 for (i = numAddrs - 1;; --i) {
3638 int osId = __kmp_topology->at(i).os_id;
3639 if (osId > maxOsId) {
3645 affinity.num_os_id_masks = maxOsId + 1;
3646 KMP_CPU_ALLOC_ARRAY(affinity.os_id_masks, affinity.num_os_id_masks);
3647 KMP_ASSERT(affinity.gran_levels >= 0);
3648 if (affinity.flags.verbose && (affinity.gran_levels > 0)) {
3649 KMP_INFORM(ThreadsMigrate, env_var, affinity.gran_levels);
3651 if (affinity.gran_levels >= (
int)depth) {
3652 KMP_AFF_WARNING(affinity, AffThreadsMayMigrate);
3662 kmp_affin_mask_t *sum;
3663 KMP_CPU_ALLOC_ON_STACK(sum);
3666 i = j = leader = find_next(-1);
3667 KMP_CPU_SET(__kmp_topology->at(i).os_id, sum);
3668 kmp_full_mask_modifier_t full_mask;
3669 for (i = find_next(i); i < numAddrs; i = find_next(i)) {
3673 if (__kmp_topology->is_close(leader, i, affinity)) {
3674 KMP_CPU_SET(__kmp_topology->at(i).os_id, sum);
3680 for (; j < i; j = find_next(j)) {
3681 int osId = __kmp_topology->at(j).os_id;
3682 KMP_DEBUG_ASSERT(osId <= maxOsId);
3683 kmp_affin_mask_t *mask = KMP_CPU_INDEX(affinity.os_id_masks, osId);
3684 KMP_CPU_COPY(mask, sum);
3685 __kmp_topology->at(j).leader = (j == leader);
3691 full_mask.include(sum);
3693 KMP_CPU_SET(__kmp_topology->at(i).os_id, sum);
3698 for (; j < i; j = find_next(j)) {
3699 int osId = __kmp_topology->at(j).os_id;
3700 KMP_DEBUG_ASSERT(osId <= maxOsId);
3701 kmp_affin_mask_t *mask = KMP_CPU_INDEX(affinity.os_id_masks, osId);
3702 KMP_CPU_COPY(mask, sum);
3703 __kmp_topology->at(j).leader = (j == leader);
3705 full_mask.include(sum);
3707 KMP_CPU_FREE_FROM_STACK(sum);
3710 if (full_mask.restrict_to_mask() && affinity.flags.verbose) {
3711 __kmp_topology->print(env_var);
3714 *numUnique = unique;
3720static kmp_affin_mask_t *newMasks;
3721static int numNewMasks;
3722static int nextNewMask;
3724#define ADD_MASK(_mask) \
3726 if (nextNewMask >= numNewMasks) { \
3729 kmp_affin_mask_t *temp; \
3730 KMP_CPU_INTERNAL_ALLOC_ARRAY(temp, numNewMasks); \
3731 for (i = 0; i < numNewMasks / 2; i++) { \
3732 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i); \
3733 kmp_affin_mask_t *dest = KMP_CPU_INDEX(temp, i); \
3734 KMP_CPU_COPY(dest, src); \
3736 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks / 2); \
3739 KMP_CPU_COPY(KMP_CPU_INDEX(newMasks, nextNewMask), (_mask)); \
3743#define ADD_MASK_OSID(_osId, _osId2Mask, _maxOsId) \
3745 if (((_osId) > _maxOsId) || \
3746 (!KMP_CPU_ISSET((_osId), KMP_CPU_INDEX((_osId2Mask), (_osId))))) { \
3747 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, _osId); \
3749 ADD_MASK(KMP_CPU_INDEX(_osId2Mask, (_osId))); \
3755static void __kmp_affinity_process_proclist(kmp_affinity_t &affinity) {
3757 kmp_affin_mask_t **out_masks = &affinity.masks;
3758 unsigned *out_numMasks = &affinity.num_masks;
3759 const char *proclist = affinity.proclist;
3760 kmp_affin_mask_t *osId2Mask = affinity.os_id_masks;
3761 int maxOsId = affinity.num_os_id_masks - 1;
3762 const char *scan = proclist;
3763 const char *next = proclist;
3768 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
3770 kmp_affin_mask_t *sumMask;
3771 KMP_CPU_ALLOC(sumMask);
3775 int start, end, stride;
3779 if (*next ==
'\0') {
3791 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad proclist");
3793 num = __kmp_str_to_int(scan, *next);
3794 KMP_ASSERT2(num >= 0,
"bad explicit proc list");
3797 if ((num > maxOsId) ||
3798 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3799 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, num);
3800 KMP_CPU_ZERO(sumMask);
3802 KMP_CPU_COPY(sumMask, KMP_CPU_INDEX(osId2Mask, num));
3822 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
3825 num = __kmp_str_to_int(scan, *next);
3826 KMP_ASSERT2(num >= 0,
"bad explicit proc list");
3829 if ((num > maxOsId) ||
3830 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3831 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, num);
3833 KMP_CPU_UNION(sumMask, KMP_CPU_INDEX(osId2Mask, num));
3850 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
3852 start = __kmp_str_to_int(scan, *next);
3853 KMP_ASSERT2(start >= 0,
"bad explicit proc list");
3858 ADD_MASK_OSID(start, osId2Mask, maxOsId);
3872 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
3874 end = __kmp_str_to_int(scan, *next);
3875 KMP_ASSERT2(end >= 0,
"bad explicit proc list");
3892 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
3894 stride = __kmp_str_to_int(scan, *next);
3895 KMP_ASSERT2(stride >= 0,
"bad explicit proc list");
3900 KMP_ASSERT2(stride != 0,
"bad explicit proc list");
3902 KMP_ASSERT2(start <= end,
"bad explicit proc list");
3904 KMP_ASSERT2(start >= end,
"bad explicit proc list");
3906 KMP_ASSERT2((end - start) / stride <= 65536,
"bad explicit proc list");
3911 ADD_MASK_OSID(start, osId2Mask, maxOsId);
3913 }
while (start <= end);
3916 ADD_MASK_OSID(start, osId2Mask, maxOsId);
3918 }
while (start >= end);
3929 *out_numMasks = nextNewMask;
3930 if (nextNewMask == 0) {
3932 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3935 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3936 for (i = 0; i < nextNewMask; i++) {
3937 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
3938 kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
3939 KMP_CPU_COPY(dest, src);
3941 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3942 KMP_CPU_FREE(sumMask);
3965static void __kmp_process_subplace_list(
const char **scan,
3966 kmp_affinity_t &affinity,
int maxOsId,
3967 kmp_affin_mask_t *tempMask,
3970 kmp_affin_mask_t *osId2Mask = affinity.os_id_masks;
3973 int start, count, stride, i;
3977 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
"bad explicit places list");
3980 start = __kmp_str_to_int(*scan, *next);
3981 KMP_ASSERT(start >= 0);
3986 if (**scan ==
'}' || **scan ==
',') {
3987 if ((start > maxOsId) ||
3988 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3989 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, start);
3991 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3994 if (**scan ==
'}') {
4000 KMP_ASSERT2(**scan ==
':',
"bad explicit places list");
4005 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
"bad explicit places list");
4008 count = __kmp_str_to_int(*scan, *next);
4009 KMP_ASSERT(count >= 0);
4014 if (**scan ==
'}' || **scan ==
',') {
4015 for (i = 0; i < count; i++) {
4016 if ((start > maxOsId) ||
4017 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
4018 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, start);
4021 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
4026 if (**scan ==
'}') {
4032 KMP_ASSERT2(**scan ==
':',
"bad explicit places list");
4039 if (**scan ==
'+') {
4043 if (**scan ==
'-') {
4051 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
"bad explicit places list");
4054 stride = __kmp_str_to_int(*scan, *next);
4055 KMP_ASSERT(stride >= 0);
4061 if (**scan ==
'}' || **scan ==
',') {
4062 for (i = 0; i < count; i++) {
4063 if ((start > maxOsId) ||
4064 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
4065 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, start);
4068 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
4073 if (**scan ==
'}') {
4080 KMP_ASSERT2(0,
"bad explicit places list");
4084static void __kmp_process_place(
const char **scan, kmp_affinity_t &affinity,
4085 int maxOsId, kmp_affin_mask_t *tempMask,
4088 kmp_affin_mask_t *osId2Mask = affinity.os_id_masks;
4092 if (**scan ==
'{') {
4094 __kmp_process_subplace_list(scan, affinity, maxOsId, tempMask, setSize);
4095 KMP_ASSERT2(**scan ==
'}',
"bad explicit places list");
4097 }
else if (**scan ==
'!') {
4099 __kmp_process_place(scan, affinity, maxOsId, tempMask, setSize);
4100 KMP_CPU_COMPLEMENT(maxOsId, tempMask);
4101 }
else if ((**scan >=
'0') && (**scan <=
'9')) {
4104 int num = __kmp_str_to_int(*scan, *next);
4105 KMP_ASSERT(num >= 0);
4106 if ((num > maxOsId) ||
4107 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
4108 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, num);
4110 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, num));
4115 KMP_ASSERT2(0,
"bad explicit places list");
4120void __kmp_affinity_process_placelist(kmp_affinity_t &affinity) {
4121 int i, j, count, stride, sign;
4122 kmp_affin_mask_t **out_masks = &affinity.masks;
4123 unsigned *out_numMasks = &affinity.num_masks;
4124 const char *placelist = affinity.proclist;
4125 kmp_affin_mask_t *osId2Mask = affinity.os_id_masks;
4126 int maxOsId = affinity.num_os_id_masks - 1;
4127 const char *scan = placelist;
4128 const char *next = placelist;
4131 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
4137 kmp_affin_mask_t *tempMask;
4138 kmp_affin_mask_t *previousMask;
4139 KMP_CPU_ALLOC(tempMask);
4140 KMP_CPU_ZERO(tempMask);
4141 KMP_CPU_ALLOC(previousMask);
4142 KMP_CPU_ZERO(previousMask);
4146 __kmp_process_place(&scan, affinity, maxOsId, tempMask, &setSize);
4150 if (*scan ==
'\0' || *scan ==
',') {
4154 KMP_CPU_ZERO(tempMask);
4156 if (*scan ==
'\0') {
4163 KMP_ASSERT2(*scan ==
':',
"bad explicit places list");
4168 KMP_ASSERT2((*scan >=
'0') && (*scan <=
'9'),
"bad explicit places list");
4171 count = __kmp_str_to_int(scan, *next);
4172 KMP_ASSERT(count >= 0);
4177 if (*scan ==
'\0' || *scan ==
',') {
4180 KMP_ASSERT2(*scan ==
':',
"bad explicit places list");
4199 KMP_ASSERT2((*scan >=
'0') && (*scan <=
'9'),
"bad explicit places list");
4202 stride = __kmp_str_to_int(scan, *next);
4203 KMP_DEBUG_ASSERT(stride >= 0);
4209 for (i = 0; i < count; i++) {
4214 KMP_CPU_COPY(previousMask, tempMask);
4215 ADD_MASK(previousMask);
4216 KMP_CPU_ZERO(tempMask);
4218 KMP_CPU_SET_ITERATE(j, previousMask) {
4219 if (!KMP_CPU_ISSET(j, previousMask)) {
4222 if ((j + stride > maxOsId) || (j + stride < 0) ||
4223 (!KMP_CPU_ISSET(j, __kmp_affin_fullMask)) ||
4224 (!KMP_CPU_ISSET(j + stride,
4225 KMP_CPU_INDEX(osId2Mask, j + stride)))) {
4226 if (i < count - 1) {
4227 KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, j + stride);
4231 KMP_CPU_SET(j + stride, tempMask);
4235 KMP_CPU_ZERO(tempMask);
4240 if (*scan ==
'\0') {
4248 KMP_ASSERT2(0,
"bad explicit places list");
4251 *out_numMasks = nextNewMask;
4252 if (nextNewMask == 0) {
4254 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
4257 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
4258 KMP_CPU_FREE(tempMask);
4259 KMP_CPU_FREE(previousMask);
4260 for (i = 0; i < nextNewMask; i++) {
4261 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
4262 kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
4263 KMP_CPU_COPY(dest, src);
4265 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
4273static int __kmp_affinity_find_core_level(
int nprocs,
int bottom_level) {
4276 for (
int i = 0; i < nprocs; i++) {
4277 const kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
4278 for (
int j = bottom_level; j > 0; j--) {
4279 if (hw_thread.ids[j] > 0) {
4280 if (core_level < (j - 1)) {
4290static int __kmp_affinity_compute_ncores(
int nprocs,
int bottom_level,
4292 return __kmp_topology->get_count(core_level);
4295static int __kmp_affinity_find_core(
int proc,
int bottom_level,
4298 KMP_DEBUG_ASSERT(proc >= 0 && proc < __kmp_topology->get_num_hw_threads());
4299 for (
int i = 0; i <= proc; ++i) {
4300 if (i + 1 <= proc) {
4301 for (
int j = 0; j <= core_level; ++j) {
4302 if (__kmp_topology->at(i + 1).sub_ids[j] !=
4303 __kmp_topology->at(i).sub_ids[j]) {
4315static int __kmp_affinity_max_proc_per_core(
int nprocs,
int bottom_level,
4317 if (core_level >= bottom_level)
4319 int thread_level = __kmp_topology->get_level(KMP_HW_THREAD);
4320 return __kmp_topology->calculate_ratio(thread_level, core_level);
4323static int *procarr = NULL;
4324static int __kmp_aff_depth = 0;
4325static int *__kmp_osid_to_hwthread_map = NULL;
4327static void __kmp_affinity_get_mask_topology_info(
const kmp_affin_mask_t *mask,
4328 kmp_affinity_ids_t &ids,
4329 kmp_affinity_attrs_t &attrs) {
4330 if (!KMP_AFFINITY_CAPABLE())
4334 for (
int i = 0; i < KMP_HW_LAST; ++i)
4335 ids.ids[i] = kmp_hw_thread_t::UNKNOWN_ID;
4336 attrs = KMP_AFFINITY_ATTRS_UNKNOWN;
4341 int depth = __kmp_topology->get_depth();
4342 KMP_CPU_SET_ITERATE(cpu, mask) {
4343 int osid_idx = __kmp_osid_to_hwthread_map[cpu];
4345 const kmp_hw_thread_t &hw_thread = __kmp_topology->at(osid_idx);
4346 for (
int level = 0; level < depth; ++level) {
4347 kmp_hw_t type = __kmp_topology->get_type(level);
4348 int id = hw_thread.sub_ids[level];
4349 if (ids.ids[type] == kmp_hw_thread_t::UNKNOWN_ID || ids.ids[type] ==
id) {
4354 ids.ids[type] = kmp_hw_thread_t::MULTIPLE_ID;
4355 for (; level < depth; ++level) {
4356 kmp_hw_t type = __kmp_topology->get_type(level);
4357 ids.ids[type] = kmp_hw_thread_t::MULTIPLE_ID;
4362 attrs.core_type = hw_thread.attrs.get_core_type();
4363 attrs.core_eff = hw_thread.attrs.get_core_eff();
4367 if (attrs.core_type != hw_thread.attrs.get_core_type())
4368 attrs.core_type = KMP_HW_CORE_TYPE_UNKNOWN;
4369 if (attrs.core_eff != hw_thread.attrs.get_core_eff())
4370 attrs.core_eff = kmp_hw_attr_t::UNKNOWN_CORE_EFF;
4375static void __kmp_affinity_get_thread_topology_info(kmp_info_t *th) {
4376 if (!KMP_AFFINITY_CAPABLE())
4378 const kmp_affin_mask_t *mask = th->th.th_affin_mask;
4379 kmp_affinity_ids_t &ids = th->th.th_topology_ids;
4380 kmp_affinity_attrs_t &attrs = th->th.th_topology_attrs;
4381 __kmp_affinity_get_mask_topology_info(mask, ids, attrs);
4387static void __kmp_affinity_get_topology_info(kmp_affinity_t &affinity) {
4388 if (!KMP_AFFINITY_CAPABLE())
4390 if (affinity.type != affinity_none) {
4391 KMP_ASSERT(affinity.num_os_id_masks);
4392 KMP_ASSERT(affinity.os_id_masks);
4394 KMP_ASSERT(affinity.num_masks);
4395 KMP_ASSERT(affinity.masks);
4396 KMP_ASSERT(__kmp_affin_fullMask);
4398 int max_cpu = __kmp_affin_fullMask->get_max_cpu();
4399 int num_hw_threads = __kmp_topology->get_num_hw_threads();
4402 if (!affinity.ids) {
4403 affinity.ids = (kmp_affinity_ids_t *)__kmp_allocate(
4404 sizeof(kmp_affinity_ids_t) * affinity.num_masks);
4406 if (!affinity.attrs) {
4407 affinity.attrs = (kmp_affinity_attrs_t *)__kmp_allocate(
4408 sizeof(kmp_affinity_attrs_t) * affinity.num_masks);
4410 if (!__kmp_osid_to_hwthread_map) {
4412 __kmp_osid_to_hwthread_map =
4413 (
int *)__kmp_allocate(
sizeof(
int) * (max_cpu + 1));
4417 for (
int hw_thread = 0; hw_thread < num_hw_threads; ++hw_thread) {
4418 int os_id = __kmp_topology->at(hw_thread).os_id;
4419 if (KMP_CPU_ISSET(os_id, __kmp_affin_fullMask))
4420 __kmp_osid_to_hwthread_map[os_id] = hw_thread;
4423 for (
unsigned i = 0; i < affinity.num_masks; ++i) {
4424 kmp_affinity_ids_t &ids = affinity.ids[i];
4425 kmp_affinity_attrs_t &attrs = affinity.attrs[i];
4426 kmp_affin_mask_t *mask = KMP_CPU_INDEX(affinity.masks, i);
4427 __kmp_affinity_get_mask_topology_info(mask, ids, attrs);
4432static void __kmp_aux_affinity_initialize_other_data(kmp_affinity_t &affinity) {
4434 if (__kmp_topology && __kmp_topology->get_num_hw_threads()) {
4435 machine_hierarchy.init(__kmp_topology->get_num_hw_threads());
4436 __kmp_affinity_get_topology_info(affinity);
4437#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
4438 __kmp_first_osid_with_ecore = __kmp_get_first_osid_with_ecore();
4445static void __kmp_create_affinity_none_places(kmp_affinity_t &affinity) {
4446 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4447 KMP_ASSERT(affinity.type == affinity_none);
4448 KMP_ASSERT(__kmp_avail_proc == __kmp_topology->get_num_hw_threads());
4449 affinity.num_masks = 1;
4450 KMP_CPU_ALLOC_ARRAY(affinity.masks, affinity.num_masks);
4451 kmp_affin_mask_t *dest = KMP_CPU_INDEX(affinity.masks, 0);
4452 KMP_CPU_COPY(dest, __kmp_affin_fullMask);
4453 __kmp_aux_affinity_initialize_other_data(affinity);
4456static void __kmp_aux_affinity_initialize_masks(kmp_affinity_t &affinity) {
4461 int verbose = affinity.flags.verbose;
4462 const char *env_var = affinity.env_var;
4465 if (__kmp_affin_fullMask && __kmp_affin_origMask)
4468 if (__kmp_affin_fullMask == NULL) {
4469 KMP_CPU_ALLOC(__kmp_affin_fullMask);
4471 if (__kmp_affin_origMask == NULL) {
4472 KMP_CPU_ALLOC(__kmp_affin_origMask);
4474 if (KMP_AFFINITY_CAPABLE()) {
4475 __kmp_get_system_affinity(__kmp_affin_fullMask, TRUE);
4477 __kmp_affin_origMask->copy(__kmp_affin_fullMask);
4478 if (affinity.flags.respect) {
4481 __kmp_avail_proc = 0;
4482 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
4483 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
4488 if (__kmp_avail_proc > __kmp_xproc) {
4489 KMP_AFF_WARNING(affinity, ErrorInitializeAffinity);
4490 affinity.type = affinity_none;
4491 KMP_AFFINITY_DISABLE();
4496 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4497 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4498 __kmp_affin_fullMask);
4499 KMP_INFORM(InitOSProcSetRespect, env_var, buf);
4503 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4504 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4505 __kmp_affin_fullMask);
4506 KMP_INFORM(InitOSProcSetNotRespect, env_var, buf);
4509 __kmp_affinity_entire_machine_mask(__kmp_affin_fullMask);
4511 if (__kmp_num_proc_groups <= 1) {
4513 __kmp_affin_origMask->copy(__kmp_affin_fullMask);
4517 __kmp_affin_fullMask->set_process_affinity(
true);
4523static bool __kmp_aux_affinity_initialize_topology(kmp_affinity_t &affinity) {
4524 bool success =
false;
4525 const char *env_var = affinity.env_var;
4526 kmp_i18n_id_t msg_id = kmp_i18n_null;
4527 int verbose = affinity.flags.verbose;
4531 if ((__kmp_cpuinfo_file != NULL) &&
4532 (__kmp_affinity_top_method == affinity_top_method_all)) {
4533 __kmp_affinity_top_method = affinity_top_method_cpuinfo;
4536 if (__kmp_affinity_top_method == affinity_top_method_all) {
4542 __kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC) {
4543 if (!__kmp_hwloc_error) {
4544 success = __kmp_affinity_create_hwloc_map(&msg_id);
4545 if (!success && verbose) {
4546 KMP_INFORM(AffIgnoringHwloc, env_var);
4548 }
else if (verbose) {
4549 KMP_INFORM(AffIgnoringHwloc, env_var);
4554#if KMP_ARCH_X86 || KMP_ARCH_X86_64
4556 success = __kmp_affinity_create_x2apicid_map(&msg_id);
4557 if (!success && verbose && msg_id != kmp_i18n_null) {
4558 KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id));
4562 success = __kmp_affinity_create_apicid_map(&msg_id);
4563 if (!success && verbose && msg_id != kmp_i18n_null) {
4564 KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id));
4569#if KMP_OS_LINUX || KMP_OS_AIX
4572 success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id);
4573 if (!success && verbose && msg_id != kmp_i18n_null) {
4574 KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id));
4579#if KMP_GROUP_AFFINITY
4580 if (!success && (__kmp_num_proc_groups > 1)) {
4581 success = __kmp_affinity_create_proc_group_map(&msg_id);
4582 if (!success && verbose && msg_id != kmp_i18n_null) {
4583 KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id));
4589 success = __kmp_affinity_create_flat_map(&msg_id);
4590 if (!success && verbose && msg_id != kmp_i18n_null) {
4591 KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id));
4593 KMP_ASSERT(success);
4601 else if (__kmp_affinity_top_method == affinity_top_method_hwloc) {
4602 KMP_ASSERT(__kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC);
4603 success = __kmp_affinity_create_hwloc_map(&msg_id);
4605 KMP_ASSERT(msg_id != kmp_i18n_null);
4606 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4611#if KMP_ARCH_X86 || KMP_ARCH_X86_64
4612 else if (__kmp_affinity_top_method == affinity_top_method_x2apicid ||
4613 __kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
4614 success = __kmp_affinity_create_x2apicid_map(&msg_id);
4616 KMP_ASSERT(msg_id != kmp_i18n_null);
4617 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4619 }
else if (__kmp_affinity_top_method == affinity_top_method_apicid) {
4620 success = __kmp_affinity_create_apicid_map(&msg_id);
4622 KMP_ASSERT(msg_id != kmp_i18n_null);
4623 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4628 else if (__kmp_affinity_top_method == affinity_top_method_cpuinfo) {
4630 success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id);
4632 KMP_ASSERT(msg_id != kmp_i18n_null);
4633 const char *filename = __kmp_cpuinfo_get_filename();
4635 KMP_FATAL(FileLineMsgExiting, filename, line,
4636 __kmp_i18n_catgets(msg_id));
4638 KMP_FATAL(FileMsgExiting, filename, __kmp_i18n_catgets(msg_id));
4643#if KMP_GROUP_AFFINITY
4644 else if (__kmp_affinity_top_method == affinity_top_method_group) {
4645 success = __kmp_affinity_create_proc_group_map(&msg_id);
4646 KMP_ASSERT(success);
4648 KMP_ASSERT(msg_id != kmp_i18n_null);
4649 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4654 else if (__kmp_affinity_top_method == affinity_top_method_flat) {
4655 success = __kmp_affinity_create_flat_map(&msg_id);
4657 KMP_ASSERT(success);
4661 if (!__kmp_topology) {
4662 if (KMP_AFFINITY_CAPABLE()) {
4663 KMP_AFF_WARNING(affinity, ErrorInitializeAffinity);
4665 if (nPackages > 0 && nCoresPerPkg > 0 && __kmp_nThreadsPerCore > 0 &&
4667 __kmp_topology = kmp_topology_t::allocate(0, 0, NULL);
4668 __kmp_topology->canonicalize(nPackages, nCoresPerPkg,
4669 __kmp_nThreadsPerCore, __kmp_ncores);
4671 __kmp_topology->print(env_var);
4678 __kmp_topology->canonicalize();
4680 __kmp_topology->print(env_var);
4681 bool filtered = __kmp_topology->filter_hw_subset();
4682 if (filtered && verbose)
4683 __kmp_topology->print(
"KMP_HW_SUBSET");
4687static void __kmp_aux_affinity_initialize(kmp_affinity_t &affinity) {
4688 bool is_regular_affinity = (&affinity == &__kmp_affinity);
4689 bool is_hidden_helper_affinity = (&affinity == &__kmp_hh_affinity);
4690 const char *env_var = __kmp_get_affinity_env_var(affinity);
4692 if (affinity.flags.initialized) {
4693 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4697 if (is_regular_affinity && (!__kmp_affin_fullMask || !__kmp_affin_origMask))
4698 __kmp_aux_affinity_initialize_masks(affinity);
4700 if (is_regular_affinity && !__kmp_topology) {
4701 bool success = __kmp_aux_affinity_initialize_topology(affinity);
4703 KMP_ASSERT(__kmp_avail_proc == __kmp_topology->get_num_hw_threads());
4705 affinity.type = affinity_none;
4706 KMP_AFFINITY_DISABLE();
4713 if (affinity.type == affinity_none) {
4714 __kmp_create_affinity_none_places(affinity);
4715#if KMP_USE_HIER_SCHED
4716 __kmp_dispatch_set_hierarchy_values();
4718 affinity.flags.initialized = TRUE;
4722 __kmp_topology->set_granularity(affinity);
4723 int depth = __kmp_topology->get_depth();
4727 int numAddrs = __kmp_topology->get_num_hw_threads();
4730 if (affinity.core_attr_gran.valid) {
4731 __kmp_create_os_id_masks(&numUnique, affinity, [&](
int idx) {
4732 KMP_ASSERT(idx >= -1);
4733 for (
int i = idx + 1; i < numAddrs; ++i)
4734 if (__kmp_topology->at(i).attrs.contains(affinity.core_attr_gran))
4738 if (!affinity.os_id_masks) {
4739 const char *core_attribute;
4740 if (affinity.core_attr_gran.core_eff != kmp_hw_attr_t::UNKNOWN_CORE_EFF)
4741 core_attribute =
"core_efficiency";
4743 core_attribute =
"core_type";
4744 KMP_AFF_WARNING(affinity, AffIgnoringNotAvailable, env_var,
4746 __kmp_hw_get_catalog_string(KMP_HW_CORE,
true))
4751 if (!affinity.os_id_masks) {
4752 __kmp_create_os_id_masks(&numUnique, affinity, [](
int idx) {
4753 KMP_ASSERT(idx >= -1);
4757 if (affinity.gran_levels == 0) {
4758 KMP_DEBUG_ASSERT((
int)numUnique == __kmp_avail_proc);
4761 switch (affinity.type) {
4763 case affinity_explicit:
4764 KMP_DEBUG_ASSERT(affinity.proclist != NULL);
4765 if (is_hidden_helper_affinity ||
4766 __kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) {
4767 __kmp_affinity_process_proclist(affinity);
4769 __kmp_affinity_process_placelist(affinity);
4771 if (affinity.num_masks == 0) {
4772 KMP_AFF_WARNING(affinity, AffNoValidProcID);
4773 affinity.type = affinity_none;
4774 __kmp_create_affinity_none_places(affinity);
4775 affinity.flags.initialized = TRUE;
4784 case affinity_logical:
4785 affinity.compact = 0;
4786 if (affinity.offset) {
4788 __kmp_nThreadsPerCore * affinity.offset % __kmp_avail_proc;
4792 case affinity_physical:
4793 if (__kmp_nThreadsPerCore > 1) {
4794 affinity.compact = 1;
4795 if (affinity.compact >= depth) {
4796 affinity.compact = 0;
4799 affinity.compact = 0;
4801 if (affinity.offset) {
4803 __kmp_nThreadsPerCore * affinity.offset % __kmp_avail_proc;
4807 case affinity_scatter:
4808 if (affinity.compact >= depth) {
4809 affinity.compact = 0;
4811 affinity.compact = depth - 1 - affinity.compact;
4815 case affinity_compact:
4816 if (affinity.compact >= depth) {
4817 affinity.compact = depth - 1;
4821 case affinity_balanced:
4822 if (depth <= 1 || is_hidden_helper_affinity) {
4823 KMP_AFF_WARNING(affinity, AffBalancedNotAvail, env_var);
4824 affinity.type = affinity_none;
4825 __kmp_create_affinity_none_places(affinity);
4826 affinity.flags.initialized = TRUE;
4828 }
else if (!__kmp_topology->is_uniform()) {
4830 __kmp_aff_depth = depth;
4833 __kmp_affinity_find_core_level(__kmp_avail_proc, depth - 1);
4834 int ncores = __kmp_affinity_compute_ncores(__kmp_avail_proc, depth - 1,
4836 int maxprocpercore = __kmp_affinity_max_proc_per_core(
4837 __kmp_avail_proc, depth - 1, core_level);
4839 int nproc = ncores * maxprocpercore;
4840 if ((nproc < 2) || (nproc < __kmp_avail_proc)) {
4841 KMP_AFF_WARNING(affinity, AffBalancedNotAvail, env_var);
4842 affinity.type = affinity_none;
4843 __kmp_create_affinity_none_places(affinity);
4844 affinity.flags.initialized = TRUE;
4848 procarr = (
int *)__kmp_allocate(
sizeof(
int) * nproc);
4849 for (
int i = 0; i < nproc; i++) {
4855 for (
int i = 0; i < __kmp_avail_proc; i++) {
4856 int proc = __kmp_topology->at(i).os_id;
4857 int core = __kmp_affinity_find_core(i, depth - 1, core_level);
4859 if (core == lastcore) {
4866 procarr[core * maxprocpercore + inlastcore] = proc;
4869 if (affinity.compact >= depth) {
4870 affinity.compact = depth - 1;
4875 if (affinity.flags.dups) {
4876 affinity.num_masks = __kmp_avail_proc;
4878 affinity.num_masks = numUnique;
4881 if ((__kmp_nested_proc_bind.bind_types[0] != proc_bind_intel) &&
4882 (__kmp_affinity_num_places > 0) &&
4883 ((
unsigned)__kmp_affinity_num_places < affinity.num_masks) &&
4884 !is_hidden_helper_affinity) {
4885 affinity.num_masks = __kmp_affinity_num_places;
4888 KMP_CPU_ALLOC_ARRAY(affinity.masks, affinity.num_masks);
4892 __kmp_topology->sort_compact(affinity);
4896 int num_hw_threads = __kmp_topology->get_num_hw_threads();
4897 kmp_full_mask_modifier_t full_mask;
4898 for (i = 0, j = 0; i < num_hw_threads; i++) {
4899 if ((!affinity.flags.dups) && (!__kmp_topology->at(i).leader)) {
4902 int osId = __kmp_topology->at(i).os_id;
4904 kmp_affin_mask_t *src = KMP_CPU_INDEX(affinity.os_id_masks, osId);
4905 kmp_affin_mask_t *dest = KMP_CPU_INDEX(affinity.masks, j);
4906 KMP_ASSERT(KMP_CPU_ISSET(osId, src));
4907 KMP_CPU_COPY(dest, src);
4908 full_mask.include(src);
4909 if (++j >= affinity.num_masks) {
4913 KMP_DEBUG_ASSERT(j == affinity.num_masks);
4915 if (full_mask.restrict_to_mask() && affinity.flags.verbose) {
4916 __kmp_topology->print(env_var);
4920 __kmp_topology->sort_ids();
4924 KMP_ASSERT2(0,
"Unexpected affinity setting");
4926 __kmp_aux_affinity_initialize_other_data(affinity);
4927 affinity.flags.initialized = TRUE;
4930void __kmp_affinity_initialize(kmp_affinity_t &affinity) {
4939 int disabled = (affinity.type == affinity_disabled);
4940 if (!KMP_AFFINITY_CAPABLE())
4941 KMP_ASSERT(disabled);
4943 affinity.type = affinity_none;
4944 __kmp_aux_affinity_initialize(affinity);
4946 affinity.type = affinity_disabled;
4949void __kmp_affinity_uninitialize(
void) {
4950 for (kmp_affinity_t *affinity : __kmp_affinities) {
4951 if (affinity->masks != NULL)
4952 KMP_CPU_FREE_ARRAY(affinity->masks, affinity->num_masks);
4953 if (affinity->os_id_masks != NULL)
4954 KMP_CPU_FREE_ARRAY(affinity->os_id_masks, affinity->num_os_id_masks);
4955 if (affinity->proclist != NULL)
4956 __kmp_free(affinity->proclist);
4957 if (affinity->ids != NULL)
4958 __kmp_free(affinity->ids);
4959 if (affinity->attrs != NULL)
4960 __kmp_free(affinity->attrs);
4961 *affinity = KMP_AFFINITY_INIT(affinity->env_var);
4963 if (__kmp_affin_origMask != NULL) {
4964 if (KMP_AFFINITY_CAPABLE()) {
4967 bindprocessor(BINDTHREAD, thread_self(), PROCESSOR_CLASS_ANY);
4969 __kmp_set_system_affinity(__kmp_affin_origMask, FALSE);
4972 KMP_CPU_FREE(__kmp_affin_origMask);
4973 __kmp_affin_origMask = NULL;
4975 __kmp_affinity_num_places = 0;
4976 if (procarr != NULL) {
4977 __kmp_free(procarr);
4980 if (__kmp_osid_to_hwthread_map) {
4981 __kmp_free(__kmp_osid_to_hwthread_map);
4982 __kmp_osid_to_hwthread_map = NULL;
4985 if (__kmp_hwloc_topology != NULL) {
4986 hwloc_topology_destroy(__kmp_hwloc_topology);
4987 __kmp_hwloc_topology = NULL;
4990 if (__kmp_hw_subset) {
4991 kmp_hw_subset_t::deallocate(__kmp_hw_subset);
4992 __kmp_hw_subset =
nullptr;
4994 if (__kmp_topology) {
4995 kmp_topology_t::deallocate(__kmp_topology);
4996 __kmp_topology =
nullptr;
4998 KMPAffinity::destroy_api();
5001static void __kmp_select_mask_by_gtid(
int gtid,
const kmp_affinity_t *affinity,
5002 int *place, kmp_affin_mask_t **mask) {
5004 bool is_hidden_helper = KMP_HIDDEN_HELPER_THREAD(gtid);
5005 if (is_hidden_helper)
5008 mask_idx = gtid - 2;
5010 mask_idx = __kmp_adjust_gtid_for_hidden_helpers(gtid);
5011 KMP_DEBUG_ASSERT(affinity->num_masks > 0);
5012 *place = (mask_idx + affinity->offset) % affinity->num_masks;
5013 *mask = KMP_CPU_INDEX(affinity->masks, *place);
5018void __kmp_affinity_set_init_mask(
int gtid,
int isa_root) {
5020 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
5023 for (
int id = 0;
id < KMP_HW_LAST; ++id)
5024 th->th.th_topology_ids.ids[
id] = kmp_hw_thread_t::UNKNOWN_ID;
5025 th->th.th_topology_attrs = KMP_AFFINITY_ATTRS_UNKNOWN;
5027 if (!KMP_AFFINITY_CAPABLE()) {
5031 if (th->th.th_affin_mask == NULL) {
5032 KMP_CPU_ALLOC(th->th.th_affin_mask);
5034 KMP_CPU_ZERO(th->th.th_affin_mask);
5042 kmp_affin_mask_t *mask;
5044 const kmp_affinity_t *affinity;
5045 bool is_hidden_helper = KMP_HIDDEN_HELPER_THREAD(gtid);
5047 if (is_hidden_helper)
5048 affinity = &__kmp_hh_affinity;
5050 affinity = &__kmp_affinity;
5052 if (KMP_AFFINITY_NON_PROC_BIND || is_hidden_helper) {
5053 if ((affinity->type == affinity_none) ||
5054 (affinity->type == affinity_balanced) ||
5055 KMP_HIDDEN_HELPER_MAIN_THREAD(gtid)) {
5056#if KMP_GROUP_AFFINITY
5057 if (__kmp_num_proc_groups > 1) {
5061 KMP_ASSERT(__kmp_affin_fullMask != NULL);
5063 mask = __kmp_affin_fullMask;
5065 __kmp_select_mask_by_gtid(gtid, affinity, &i, &mask);
5068 if (!isa_root || __kmp_nested_proc_bind.bind_types[0] == proc_bind_false) {
5069#if KMP_GROUP_AFFINITY
5070 if (__kmp_num_proc_groups > 1) {
5074 KMP_ASSERT(__kmp_affin_fullMask != NULL);
5076 mask = __kmp_affin_fullMask;
5078 __kmp_select_mask_by_gtid(gtid, affinity, &i, &mask);
5082 th->th.th_current_place = i;
5083 if (isa_root && !is_hidden_helper) {
5084 th->th.th_new_place = i;
5085 th->th.th_first_place = 0;
5086 th->th.th_last_place = affinity->num_masks - 1;
5087 }
else if (KMP_AFFINITY_NON_PROC_BIND) {
5090 th->th.th_first_place = 0;
5091 th->th.th_last_place = affinity->num_masks - 1;
5095 th->th.th_topology_ids = __kmp_affinity.ids[i];
5096 th->th.th_topology_attrs = __kmp_affinity.attrs[i];
5099 if (i == KMP_PLACE_ALL) {
5100 KA_TRACE(100, (
"__kmp_affinity_set_init_mask: setting T#%d to all places\n",
5103 KA_TRACE(100, (
"__kmp_affinity_set_init_mask: setting T#%d to place %d\n",
5107 KMP_CPU_COPY(th->th.th_affin_mask, mask);
5110void __kmp_affinity_bind_init_mask(
int gtid) {
5111 if (!KMP_AFFINITY_CAPABLE()) {
5114 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
5115 const kmp_affinity_t *affinity;
5116 const char *env_var;
5117 bool is_hidden_helper = KMP_HIDDEN_HELPER_THREAD(gtid);
5119 if (is_hidden_helper)
5120 affinity = &__kmp_hh_affinity;
5122 affinity = &__kmp_affinity;
5123 env_var = __kmp_get_affinity_env_var(*affinity,
true);
5125 if (affinity->flags.verbose && (affinity->type == affinity_none ||
5126 (th->th.th_current_place != KMP_PLACE_ALL &&
5127 affinity->type != affinity_balanced)) &&
5128 !KMP_HIDDEN_HELPER_MAIN_THREAD(gtid)) {
5129 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5130 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5131 th->th.th_affin_mask);
5132 KMP_INFORM(BoundToOSProcSet, env_var, (kmp_int32)getpid(), __kmp_gettid(),
5140 if (affinity->type == affinity_none) {
5141 __kmp_set_system_affinity(th->th.th_affin_mask, FALSE);
5146 __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
5150void __kmp_affinity_bind_place(
int gtid) {
5152 if (!KMP_AFFINITY_CAPABLE() || KMP_HIDDEN_HELPER_THREAD(gtid)) {
5156 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
5158 KA_TRACE(100, (
"__kmp_affinity_bind_place: binding T#%d to place %d (current "
5160 gtid, th->th.th_new_place, th->th.th_current_place));
5163 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
5164 KMP_ASSERT(th->th.th_new_place >= 0);
5165 KMP_ASSERT((
unsigned)th->th.th_new_place <= __kmp_affinity.num_masks);
5166 if (th->th.th_first_place <= th->th.th_last_place) {
5167 KMP_ASSERT((th->th.th_new_place >= th->th.th_first_place) &&
5168 (th->th.th_new_place <= th->th.th_last_place));
5170 KMP_ASSERT((th->th.th_new_place <= th->th.th_first_place) ||
5171 (th->th.th_new_place >= th->th.th_last_place));
5176 kmp_affin_mask_t *mask =
5177 KMP_CPU_INDEX(__kmp_affinity.masks, th->th.th_new_place);
5178 KMP_CPU_COPY(th->th.th_affin_mask, mask);
5179 th->th.th_current_place = th->th.th_new_place;
5181 if (__kmp_affinity.flags.verbose) {
5182 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5183 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5184 th->th.th_affin_mask);
5185 KMP_INFORM(BoundToOSProcSet,
"OMP_PROC_BIND", (kmp_int32)getpid(),
5186 __kmp_gettid(), gtid, buf);
5188 __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
5191int __kmp_aux_set_affinity(
void **mask) {
5196 if (!KMP_AFFINITY_CAPABLE()) {
5200 gtid = __kmp_entry_gtid();
5203 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5204 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5205 (kmp_affin_mask_t *)(*mask));
5207 "kmp_set_affinity: setting affinity mask for thread %d = %s\n",
5211 if (__kmp_env_consistency_check) {
5212 if ((mask == NULL) || (*mask == NULL)) {
5213 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
5218 KMP_CPU_SET_ITERATE(proc, ((kmp_affin_mask_t *)(*mask))) {
5219 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5220 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
5222 if (!KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask))) {
5227 if (num_procs == 0) {
5228 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
5231#if KMP_GROUP_AFFINITY
5232 if (__kmp_get_proc_group((kmp_affin_mask_t *)(*mask)) < 0) {
5233 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
5239 th = __kmp_threads[gtid];
5240 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
5241 retval = __kmp_set_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
5243 KMP_CPU_COPY(th->th.th_affin_mask, (kmp_affin_mask_t *)(*mask));
5246 th->th.th_current_place = KMP_PLACE_UNDEFINED;
5247 th->th.th_new_place = KMP_PLACE_UNDEFINED;
5248 th->th.th_first_place = 0;
5249 th->th.th_last_place = __kmp_affinity.num_masks - 1;
5252 th->th.th_current_task->td_icvs.proc_bind = proc_bind_false;
5257int __kmp_aux_get_affinity(
void **mask) {
5260#if KMP_OS_WINDOWS || KMP_OS_AIX || KMP_DEBUG
5263 if (!KMP_AFFINITY_CAPABLE()) {
5267 gtid = __kmp_entry_gtid();
5268#if KMP_OS_WINDOWS || KMP_OS_AIX || KMP_DEBUG
5269 th = __kmp_threads[gtid];
5273 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
5277 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5278 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5279 th->th.th_affin_mask);
5281 "kmp_get_affinity: stored affinity mask for thread %d = %s\n", gtid,
5285 if (__kmp_env_consistency_check) {
5286 if ((mask == NULL) || (*mask == NULL)) {
5287 KMP_FATAL(AffinityInvalidMask,
"kmp_get_affinity");
5291#if !KMP_OS_WINDOWS && !KMP_OS_AIX
5293 retval = __kmp_get_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
5296 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5297 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5298 (kmp_affin_mask_t *)(*mask));
5300 "kmp_get_affinity: system affinity mask for thread %d = %s\n", gtid,
5308 KMP_CPU_COPY((kmp_affin_mask_t *)(*mask), th->th.th_affin_mask);
5314int __kmp_aux_get_affinity_max_proc() {
5315 if (!KMP_AFFINITY_CAPABLE()) {
5318#if KMP_GROUP_AFFINITY
5319 if (__kmp_num_proc_groups > 1) {
5320 return (
int)(__kmp_num_proc_groups *
sizeof(DWORD_PTR) * CHAR_BIT);
5326int __kmp_aux_set_affinity_mask_proc(
int proc,
void **mask) {
5327 if (!KMP_AFFINITY_CAPABLE()) {
5333 int gtid = __kmp_entry_gtid();
5334 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5335 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5336 (kmp_affin_mask_t *)(*mask));
5337 __kmp_debug_printf(
"kmp_set_affinity_mask_proc: setting proc %d in "
5338 "affinity mask for thread %d = %s\n",
5342 if (__kmp_env_consistency_check) {
5343 if ((mask == NULL) || (*mask == NULL)) {
5344 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity_mask_proc");
5348 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5351 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5355 KMP_CPU_SET(proc, (kmp_affin_mask_t *)(*mask));
5359int __kmp_aux_unset_affinity_mask_proc(
int proc,
void **mask) {
5360 if (!KMP_AFFINITY_CAPABLE()) {
5366 int gtid = __kmp_entry_gtid();
5367 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5368 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5369 (kmp_affin_mask_t *)(*mask));
5370 __kmp_debug_printf(
"kmp_unset_affinity_mask_proc: unsetting proc %d in "
5371 "affinity mask for thread %d = %s\n",
5375 if (__kmp_env_consistency_check) {
5376 if ((mask == NULL) || (*mask == NULL)) {
5377 KMP_FATAL(AffinityInvalidMask,
"kmp_unset_affinity_mask_proc");
5381 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5384 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5388 KMP_CPU_CLR(proc, (kmp_affin_mask_t *)(*mask));
5392int __kmp_aux_get_affinity_mask_proc(
int proc,
void **mask) {
5393 if (!KMP_AFFINITY_CAPABLE()) {
5399 int gtid = __kmp_entry_gtid();
5400 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5401 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5402 (kmp_affin_mask_t *)(*mask));
5403 __kmp_debug_printf(
"kmp_get_affinity_mask_proc: getting proc %d in "
5404 "affinity mask for thread %d = %s\n",
5408 if (__kmp_env_consistency_check) {
5409 if ((mask == NULL) || (*mask == NULL)) {
5410 KMP_FATAL(AffinityInvalidMask,
"kmp_get_affinity_mask_proc");
5414 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5417 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5421 return KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask));
5424#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
5426int __kmp_get_first_osid_with_ecore(
void) {
5428 int high = __kmp_topology->get_num_hw_threads() - 1;
5430 while (high - low > 1) {
5431 mid = (high + low) / 2;
5432 if (__kmp_topology->at(mid).attrs.get_core_type() ==
5433 KMP_HW_CORE_TYPE_CORE) {
5439 if (__kmp_topology->at(mid).attrs.get_core_type() == KMP_HW_CORE_TYPE_ATOM) {
5447void __kmp_balanced_affinity(kmp_info_t *th,
int nthreads) {
5448 KMP_DEBUG_ASSERT(th);
5449 bool fine_gran =
true;
5450 int tid = th->th.th_info.ds.ds_tid;
5451 const char *env_var =
"KMP_AFFINITY";
5454 if (KMP_HIDDEN_HELPER_THREAD(__kmp_gtid_from_thread(th)))
5457 switch (__kmp_affinity.gran) {
5461 if (__kmp_nThreadsPerCore > 1) {
5466 if (nCoresPerPkg > 1) {
5474 if (__kmp_topology->is_uniform()) {
5478 int __kmp_nth_per_core = __kmp_avail_proc / __kmp_ncores;
5480 int ncores = __kmp_ncores;
5481 if ((nPackages > 1) && (__kmp_nth_per_core <= 1)) {
5482 __kmp_nth_per_core = __kmp_avail_proc / nPackages;
5486 int chunk = nthreads / ncores;
5488 int big_cores = nthreads % ncores;
5490 int big_nth = (chunk + 1) * big_cores;
5491 if (tid < big_nth) {
5492 coreID = tid / (chunk + 1);
5493 threadID = (tid % (chunk + 1)) % __kmp_nth_per_core;
5495 coreID = (tid - big_cores) / chunk;
5496 threadID = ((tid - big_cores) % chunk) % __kmp_nth_per_core;
5498 KMP_DEBUG_ASSERT2(KMP_AFFINITY_CAPABLE(),
5499 "Illegal set affinity operation when not capable");
5501 kmp_affin_mask_t *mask = th->th.th_affin_mask;
5506 __kmp_topology->at(coreID * __kmp_nth_per_core + threadID).os_id;
5507 KMP_CPU_SET(osID, mask);
5509 for (
int i = 0; i < __kmp_nth_per_core; i++) {
5511 osID = __kmp_topology->at(coreID * __kmp_nth_per_core + i).os_id;
5512 KMP_CPU_SET(osID, mask);
5515 if (__kmp_affinity.flags.verbose) {
5516 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5517 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
5518 KMP_INFORM(BoundToOSProcSet, env_var, (kmp_int32)getpid(), __kmp_gettid(),
5521 __kmp_affinity_get_thread_topology_info(th);
5522 __kmp_set_system_affinity(mask, TRUE);
5525 kmp_affin_mask_t *mask = th->th.th_affin_mask;
5529 __kmp_affinity_find_core_level(__kmp_avail_proc, __kmp_aff_depth - 1);
5530 int ncores = __kmp_affinity_compute_ncores(__kmp_avail_proc,
5531 __kmp_aff_depth - 1, core_level);
5532 int nth_per_core = __kmp_affinity_max_proc_per_core(
5533 __kmp_avail_proc, __kmp_aff_depth - 1, core_level);
5537 if (nthreads == __kmp_avail_proc) {
5539 int osID = __kmp_topology->at(tid).os_id;
5540 KMP_CPU_SET(osID, mask);
5543 __kmp_affinity_find_core(tid, __kmp_aff_depth - 1, core_level);
5544 for (
int i = 0; i < __kmp_avail_proc; i++) {
5545 int osID = __kmp_topology->at(i).os_id;
5546 if (__kmp_affinity_find_core(i, __kmp_aff_depth - 1, core_level) ==
5548 KMP_CPU_SET(osID, mask);
5552 }
else if (nthreads <= ncores) {
5555 for (
int i = 0; i < ncores; i++) {
5558 for (
int j = 0; j < nth_per_core; j++) {
5559 if (procarr[i * nth_per_core + j] != -1) {
5566 for (
int j = 0; j < nth_per_core; j++) {
5567 int osID = procarr[i * nth_per_core + j];
5569 KMP_CPU_SET(osID, mask);
5585 int *nproc_at_core = (
int *)KMP_ALLOCA(
sizeof(
int) * ncores);
5587 int *ncores_with_x_procs =
5588 (
int *)KMP_ALLOCA(
sizeof(
int) * (nth_per_core + 1));
5590 int *ncores_with_x_to_max_procs =
5591 (
int *)KMP_ALLOCA(
sizeof(
int) * (nth_per_core + 1));
5593 for (
int i = 0; i <= nth_per_core; i++) {
5594 ncores_with_x_procs[i] = 0;
5595 ncores_with_x_to_max_procs[i] = 0;
5598 for (
int i = 0; i < ncores; i++) {
5600 for (
int j = 0; j < nth_per_core; j++) {
5601 if (procarr[i * nth_per_core + j] != -1) {
5605 nproc_at_core[i] = cnt;
5606 ncores_with_x_procs[cnt]++;
5609 for (
int i = 0; i <= nth_per_core; i++) {
5610 for (
int j = i; j <= nth_per_core; j++) {
5611 ncores_with_x_to_max_procs[i] += ncores_with_x_procs[j];
5616 int nproc = nth_per_core * ncores;
5618 int *newarr = (
int *)__kmp_allocate(
sizeof(
int) * nproc);
5619 for (
int i = 0; i < nproc; i++) {
5626 for (
int j = 1; j <= nth_per_core; j++) {
5627 int cnt = ncores_with_x_to_max_procs[j];
5628 for (
int i = 0; i < ncores; i++) {
5630 if (nproc_at_core[i] == 0) {
5633 for (
int k = 0; k < nth_per_core; k++) {
5634 if (procarr[i * nth_per_core + k] != -1) {
5635 if (newarr[i * nth_per_core + k] == 0) {
5636 newarr[i * nth_per_core + k] = 1;
5642 newarr[i * nth_per_core + k]++;
5650 if (cnt == 0 || nth == 0) {
5661 for (
int i = 0; i < nproc; i++) {
5665 int osID = procarr[i];
5666 KMP_CPU_SET(osID, mask);
5668 int coreID = i / nth_per_core;
5669 for (
int ii = 0; ii < nth_per_core; ii++) {
5670 int osID = procarr[coreID * nth_per_core + ii];
5672 KMP_CPU_SET(osID, mask);
5682 if (__kmp_affinity.flags.verbose) {
5683 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5684 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
5685 KMP_INFORM(BoundToOSProcSet, env_var, (kmp_int32)getpid(), __kmp_gettid(),
5688 __kmp_affinity_get_thread_topology_info(th);
5689 __kmp_set_system_affinity(mask, TRUE);
5693#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
5708 kmp_set_thread_affinity_mask_initial()
5713 int gtid = __kmp_get_gtid();
5716 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: "
5717 "non-omp thread, returning\n"));
5720 if (!KMP_AFFINITY_CAPABLE() || !__kmp_init_middle) {
5721 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: "
5722 "affinity not initialized, returning\n"));
5725 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: "
5726 "set full mask for thread %d\n",
5728 KMP_DEBUG_ASSERT(__kmp_affin_fullMask != NULL);
5730 return bindprocessor(BINDTHREAD, thread_self(), PROCESSOR_CLASS_ANY);
5732 return __kmp_set_system_affinity(__kmp_affin_fullMask, FALSE);
int try_open(const char *filename, const char *mode)