13 #ifndef KMP_AFFINITY_H
14 #define KMP_AFFINITY_H
19 #if KMP_AFFINITY_SUPPORTED
21 class KMPHwlocAffinity :
public KMPAffinity {
23 class Mask :
public KMPAffinity::Mask {
28 mask = hwloc_bitmap_alloc();
31 ~Mask() { hwloc_bitmap_free(mask); }
32 void set(
int i)
override { hwloc_bitmap_set(mask, i); }
33 bool is_set(
int i)
const override {
return hwloc_bitmap_isset(mask, i); }
34 void clear(
int i)
override { hwloc_bitmap_clr(mask, i); }
35 void zero()
override { hwloc_bitmap_zero(mask); }
36 void copy(
const KMPAffinity::Mask *src)
override {
37 const Mask *convert =
static_cast<const Mask *
>(src);
38 hwloc_bitmap_copy(mask, convert->mask);
40 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
41 const Mask *convert =
static_cast<const Mask *
>(rhs);
42 hwloc_bitmap_and(mask, mask, convert->mask);
44 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
45 const Mask *convert =
static_cast<const Mask *
>(rhs);
46 hwloc_bitmap_or(mask, mask, convert->mask);
48 void bitwise_not()
override { hwloc_bitmap_not(mask, mask); }
49 int begin()
const override {
return hwloc_bitmap_first(mask); }
50 int end()
const override {
return -1; }
51 int next(
int previous)
const override {
52 return hwloc_bitmap_next(mask, previous);
54 int get_system_affinity(
bool abort_on_error)
override {
55 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
56 "Illegal get affinity operation when not capable");
58 hwloc_get_cpubind(__kmp_hwloc_topology, mask, HWLOC_CPUBIND_THREAD);
64 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
68 int set_system_affinity(
bool abort_on_error)
const override {
69 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
70 "Illegal set affinity operation when not capable");
72 hwloc_set_cpubind(__kmp_hwloc_topology, mask, HWLOC_CPUBIND_THREAD);
78 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
83 int set_process_affinity(
bool abort_on_error)
const override {
84 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
85 "Illegal set process affinity operation when not capable");
87 const hwloc_topology_support *support =
88 hwloc_topology_get_support(__kmp_hwloc_topology);
89 if (support->cpubind->set_proc_cpubind) {
91 retval = hwloc_set_cpubind(__kmp_hwloc_topology, mask,
92 HWLOC_CPUBIND_PROCESS);
97 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
102 int get_proc_group()
const override {
105 if (__kmp_num_proc_groups == 1) {
108 for (
int i = 0; i < __kmp_num_proc_groups; i++) {
110 unsigned long first_32_bits = hwloc_bitmap_to_ith_ulong(mask, i * 2);
111 unsigned long second_32_bits =
112 hwloc_bitmap_to_ith_ulong(mask, i * 2 + 1);
113 if (first_32_bits == 0 && second_32_bits == 0) {
125 void determine_capable(
const char *var)
override {
126 const hwloc_topology_support *topology_support;
127 if (__kmp_hwloc_topology == NULL) {
128 if (hwloc_topology_init(&__kmp_hwloc_topology) < 0) {
129 __kmp_hwloc_error = TRUE;
130 if (__kmp_affinity_verbose)
131 KMP_WARNING(AffHwlocErrorOccurred, var,
"hwloc_topology_init()");
133 if (hwloc_topology_load(__kmp_hwloc_topology) < 0) {
134 __kmp_hwloc_error = TRUE;
135 if (__kmp_affinity_verbose)
136 KMP_WARNING(AffHwlocErrorOccurred, var,
"hwloc_topology_load()");
139 topology_support = hwloc_topology_get_support(__kmp_hwloc_topology);
144 if (topology_support && topology_support->cpubind->set_thisthread_cpubind &&
145 topology_support->cpubind->get_thisthread_cpubind &&
146 topology_support->discovery->pu && !__kmp_hwloc_error) {
148 KMP_AFFINITY_ENABLE(TRUE);
151 __kmp_hwloc_error = TRUE;
152 KMP_AFFINITY_DISABLE();
155 void bind_thread(
int which)
override {
156 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
157 "Illegal set affinity operation when not capable");
158 KMPAffinity::Mask *mask;
159 KMP_CPU_ALLOC_ON_STACK(mask);
161 KMP_CPU_SET(which, mask);
162 __kmp_set_system_affinity(mask, TRUE);
163 KMP_CPU_FREE_FROM_STACK(mask);
165 KMPAffinity::Mask *allocate_mask()
override {
return new Mask(); }
166 void deallocate_mask(KMPAffinity::Mask *m)
override {
delete m; }
167 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
168 return new Mask[num];
170 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
171 Mask *hwloc_array =
static_cast<Mask *
>(array);
172 delete[] hwloc_array;
174 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
175 int index)
override {
176 Mask *hwloc_array =
static_cast<Mask *
>(array);
177 return &(hwloc_array[index]);
179 api_type get_api_type()
const override {
return HWLOC; }
183 #if KMP_OS_LINUX || KMP_OS_FREEBSD
189 #include <sys/syscall.h>
190 #if KMP_ARCH_X86 || KMP_ARCH_ARM
191 #ifndef __NR_sched_setaffinity
192 #define __NR_sched_setaffinity 241
193 #elif __NR_sched_setaffinity != 241
194 #error Wrong code for setaffinity system call.
196 #ifndef __NR_sched_getaffinity
197 #define __NR_sched_getaffinity 242
198 #elif __NR_sched_getaffinity != 242
199 #error Wrong code for getaffinity system call.
201 #elif KMP_ARCH_AARCH64
202 #ifndef __NR_sched_setaffinity
203 #define __NR_sched_setaffinity 122
204 #elif __NR_sched_setaffinity != 122
205 #error Wrong code for setaffinity system call.
207 #ifndef __NR_sched_getaffinity
208 #define __NR_sched_getaffinity 123
209 #elif __NR_sched_getaffinity != 123
210 #error Wrong code for getaffinity system call.
212 #elif KMP_ARCH_X86_64
213 #ifndef __NR_sched_setaffinity
214 #define __NR_sched_setaffinity 203
215 #elif __NR_sched_setaffinity != 203
216 #error Wrong code for setaffinity system call.
218 #ifndef __NR_sched_getaffinity
219 #define __NR_sched_getaffinity 204
220 #elif __NR_sched_getaffinity != 204
221 #error Wrong code for getaffinity system call.
224 #ifndef __NR_sched_setaffinity
225 #define __NR_sched_setaffinity 222
226 #elif __NR_sched_setaffinity != 222
227 #error Wrong code for setaffinity system call.
229 #ifndef __NR_sched_getaffinity
230 #define __NR_sched_getaffinity 223
231 #elif __NR_sched_getaffinity != 223
232 #error Wrong code for getaffinity system call.
235 # ifndef __NR_sched_setaffinity
236 # define __NR_sched_setaffinity 4239
237 # elif __NR_sched_setaffinity != 4239
238 # error Wrong code for setaffinity system call.
240 # ifndef __NR_sched_getaffinity
241 # define __NR_sched_getaffinity 4240
242 # elif __NR_sched_getaffinity != 4240
243 # error Wrong code for getaffinity system call.
245 # elif KMP_ARCH_MIPS64
246 # ifndef __NR_sched_setaffinity
247 # define __NR_sched_setaffinity 5195
248 # elif __NR_sched_setaffinity != 5195
249 # error Wrong code for setaffinity system call.
251 # ifndef __NR_sched_getaffinity
252 # define __NR_sched_getaffinity 5196
253 # elif __NR_sched_getaffinity != 5196
254 # error Wrong code for getaffinity system call.
257 #error Unknown or unsupported architecture
261 #include <pthread_np.h>
263 class KMPNativeAffinity :
public KMPAffinity {
264 class Mask :
public KMPAffinity::Mask {
265 typedef unsigned long mask_t;
266 typedef decltype(__kmp_affin_mask_size) mask_size_type;
267 static const unsigned int BITS_PER_MASK_T =
sizeof(mask_t) * CHAR_BIT;
268 static const mask_t ONE = 1;
269 mask_size_type get_num_mask_types()
const {
270 return __kmp_affin_mask_size /
sizeof(mask_t);
275 Mask() { mask = (mask_t *)__kmp_allocate(__kmp_affin_mask_size); }
280 void set(
int i)
override {
281 mask[i / BITS_PER_MASK_T] |= (ONE << (i % BITS_PER_MASK_T));
283 bool is_set(
int i)
const override {
284 return (mask[i / BITS_PER_MASK_T] & (ONE << (i % BITS_PER_MASK_T)));
286 void clear(
int i)
override {
287 mask[i / BITS_PER_MASK_T] &= ~(ONE << (i % BITS_PER_MASK_T));
289 void zero()
override {
290 mask_size_type e = get_num_mask_types();
291 for (mask_size_type i = 0; i < e; ++i)
294 void copy(
const KMPAffinity::Mask *src)
override {
295 const Mask *convert =
static_cast<const Mask *
>(src);
296 mask_size_type e = get_num_mask_types();
297 for (mask_size_type i = 0; i < e; ++i)
298 mask[i] = convert->mask[i];
300 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
301 const Mask *convert =
static_cast<const Mask *
>(rhs);
302 mask_size_type e = get_num_mask_types();
303 for (mask_size_type i = 0; i < e; ++i)
304 mask[i] &= convert->mask[i];
306 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
307 const Mask *convert =
static_cast<const Mask *
>(rhs);
308 mask_size_type e = get_num_mask_types();
309 for (mask_size_type i = 0; i < e; ++i)
310 mask[i] |= convert->mask[i];
312 void bitwise_not()
override {
313 mask_size_type e = get_num_mask_types();
314 for (mask_size_type i = 0; i < e; ++i)
315 mask[i] = ~(mask[i]);
317 int begin()
const override {
319 while (retval < end() && !is_set(retval))
323 int end()
const override {
325 __kmp_type_convert(get_num_mask_types() * BITS_PER_MASK_T, &e);
328 int next(
int previous)
const override {
329 int retval = previous + 1;
330 while (retval < end() && !is_set(retval))
334 int get_system_affinity(
bool abort_on_error)
override {
335 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
336 "Illegal get affinity operation when not capable");
339 syscall(__NR_sched_getaffinity, 0, __kmp_affin_mask_size, mask);
341 int r = pthread_getaffinity_np(pthread_self(), __kmp_affin_mask_size,
342 reinterpret_cast<cpuset_t *
>(mask));
343 int retval = (r == 0 ? 0 : -1);
349 if (abort_on_error) {
350 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
354 int set_system_affinity(
bool abort_on_error)
const override {
355 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
356 "Illegal set affinity operation when not capable");
359 syscall(__NR_sched_setaffinity, 0, __kmp_affin_mask_size, mask);
361 int r = pthread_setaffinity_np(pthread_self(), __kmp_affin_mask_size,
362 reinterpret_cast<cpuset_t *
>(mask));
363 int retval = (r == 0 ? 0 : -1);
369 if (abort_on_error) {
370 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
375 void determine_capable(
const char *env_var)
override {
376 __kmp_affinity_determine_capable(env_var);
378 void bind_thread(
int which)
override { __kmp_affinity_bind_thread(which); }
379 KMPAffinity::Mask *allocate_mask()
override {
380 KMPNativeAffinity::Mask *retval =
new Mask();
383 void deallocate_mask(KMPAffinity::Mask *m)
override {
384 KMPNativeAffinity::Mask *native_mask =
385 static_cast<KMPNativeAffinity::Mask *
>(m);
388 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
389 return new Mask[num];
391 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
392 Mask *linux_array =
static_cast<Mask *
>(array);
393 delete[] linux_array;
395 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
396 int index)
override {
397 Mask *linux_array =
static_cast<Mask *
>(array);
398 return &(linux_array[index]);
400 api_type get_api_type()
const override {
return NATIVE_OS; }
405 class KMPNativeAffinity :
public KMPAffinity {
406 class Mask :
public KMPAffinity::Mask {
407 typedef ULONG_PTR mask_t;
408 static const int BITS_PER_MASK_T =
sizeof(mask_t) * CHAR_BIT;
413 mask = (mask_t *)__kmp_allocate(
sizeof(mask_t) * __kmp_num_proc_groups);
419 void set(
int i)
override {
420 mask[i / BITS_PER_MASK_T] |= ((mask_t)1 << (i % BITS_PER_MASK_T));
422 bool is_set(
int i)
const override {
423 return (mask[i / BITS_PER_MASK_T] & ((mask_t)1 << (i % BITS_PER_MASK_T)));
425 void clear(
int i)
override {
426 mask[i / BITS_PER_MASK_T] &= ~((mask_t)1 << (i % BITS_PER_MASK_T));
428 void zero()
override {
429 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
432 void copy(
const KMPAffinity::Mask *src)
override {
433 const Mask *convert =
static_cast<const Mask *
>(src);
434 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
435 mask[i] = convert->mask[i];
437 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
438 const Mask *convert =
static_cast<const Mask *
>(rhs);
439 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
440 mask[i] &= convert->mask[i];
442 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
443 const Mask *convert =
static_cast<const Mask *
>(rhs);
444 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
445 mask[i] |= convert->mask[i];
447 void bitwise_not()
override {
448 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
449 mask[i] = ~(mask[i]);
451 int begin()
const override {
453 while (retval < end() && !is_set(retval))
457 int end()
const override {
return __kmp_num_proc_groups * BITS_PER_MASK_T; }
458 int next(
int previous)
const override {
459 int retval = previous + 1;
460 while (retval < end() && !is_set(retval))
464 int set_process_affinity(
bool abort_on_error)
const override {
465 if (__kmp_num_proc_groups <= 1) {
466 if (!SetProcessAffinityMask(GetCurrentProcess(), *mask)) {
467 DWORD error = GetLastError();
468 if (abort_on_error) {
469 __kmp_fatal(KMP_MSG(CantSetThreadAffMask), KMP_ERR(error),
477 int set_system_affinity(
bool abort_on_error)
const override {
478 if (__kmp_num_proc_groups > 1) {
481 int group = get_proc_group();
483 if (abort_on_error) {
484 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
491 ga.Mask = mask[group];
492 ga.Reserved[0] = ga.Reserved[1] = ga.Reserved[2] = 0;
494 KMP_DEBUG_ASSERT(__kmp_SetThreadGroupAffinity != NULL);
495 if (__kmp_SetThreadGroupAffinity(GetCurrentThread(), &ga, NULL) == 0) {
496 DWORD error = GetLastError();
497 if (abort_on_error) {
498 __kmp_fatal(KMP_MSG(CantSetThreadAffMask), KMP_ERR(error),
504 if (!SetThreadAffinityMask(GetCurrentThread(), *mask)) {
505 DWORD error = GetLastError();
506 if (abort_on_error) {
507 __kmp_fatal(KMP_MSG(CantSetThreadAffMask), KMP_ERR(error),
515 int get_system_affinity(
bool abort_on_error)
override {
516 if (__kmp_num_proc_groups > 1) {
519 KMP_DEBUG_ASSERT(__kmp_GetThreadGroupAffinity != NULL);
520 if (__kmp_GetThreadGroupAffinity(GetCurrentThread(), &ga) == 0) {
521 DWORD error = GetLastError();
522 if (abort_on_error) {
523 __kmp_fatal(KMP_MSG(FunctionError,
"GetThreadGroupAffinity()"),
524 KMP_ERR(error), __kmp_msg_null);
528 if ((ga.Group < 0) || (ga.Group > __kmp_num_proc_groups) ||
532 mask[ga.Group] = ga.Mask;
534 mask_t newMask, sysMask, retval;
535 if (!GetProcessAffinityMask(GetCurrentProcess(), &newMask, &sysMask)) {
536 DWORD error = GetLastError();
537 if (abort_on_error) {
538 __kmp_fatal(KMP_MSG(FunctionError,
"GetProcessAffinityMask()"),
539 KMP_ERR(error), __kmp_msg_null);
543 retval = SetThreadAffinityMask(GetCurrentThread(), newMask);
545 DWORD error = GetLastError();
546 if (abort_on_error) {
547 __kmp_fatal(KMP_MSG(FunctionError,
"SetThreadAffinityMask()"),
548 KMP_ERR(error), __kmp_msg_null);
552 newMask = SetThreadAffinityMask(GetCurrentThread(), retval);
554 DWORD error = GetLastError();
555 if (abort_on_error) {
556 __kmp_fatal(KMP_MSG(FunctionError,
"SetThreadAffinityMask()"),
557 KMP_ERR(error), __kmp_msg_null);
564 int get_proc_group()
const override {
566 if (__kmp_num_proc_groups == 1) {
569 for (
int i = 0; i < __kmp_num_proc_groups; i++) {
579 void determine_capable(
const char *env_var)
override {
580 __kmp_affinity_determine_capable(env_var);
582 void bind_thread(
int which)
override { __kmp_affinity_bind_thread(which); }
583 KMPAffinity::Mask *allocate_mask()
override {
return new Mask(); }
584 void deallocate_mask(KMPAffinity::Mask *m)
override {
delete m; }
585 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
586 return new Mask[num];
588 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
589 Mask *windows_array =
static_cast<Mask *
>(array);
590 delete[] windows_array;
592 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
593 int index)
override {
594 Mask *windows_array =
static_cast<Mask *
>(array);
595 return &(windows_array[index]);
597 api_type get_api_type()
const override {
return NATIVE_OS; }
602 class kmp_hw_thread_t {
604 static const int UNKNOWN_ID = -1;
605 static int compare_ids(
const void *a,
const void *b);
606 static int compare_compact(
const void *a,
const void *b);
607 int ids[KMP_HW_LAST];
608 int sub_ids[KMP_HW_LAST];
613 for (
int i = 0; i < (int)KMP_HW_LAST; ++i)
619 class kmp_topology_t {
645 kmp_hw_thread_t *hw_threads;
651 kmp_hw_t equivalent[KMP_HW_LAST];
659 void _gather_enumeration_information();
663 void _remove_radix1_layers();
666 void _discover_uniformity();
677 void _set_last_level_cache();
681 kmp_topology_t() =
delete;
682 kmp_topology_t(
const kmp_topology_t &t) =
delete;
683 kmp_topology_t(kmp_topology_t &&t) =
delete;
684 kmp_topology_t &operator=(
const kmp_topology_t &t) =
delete;
685 kmp_topology_t &operator=(kmp_topology_t &&t) =
delete;
687 static kmp_topology_t *allocate(
int nproc,
int ndepth,
const kmp_hw_t *types);
688 static void deallocate(kmp_topology_t *);
691 kmp_hw_thread_t &at(
int index) {
692 KMP_DEBUG_ASSERT(index >= 0 && index < num_hw_threads);
693 return hw_threads[index];
695 const kmp_hw_thread_t &at(
int index)
const {
696 KMP_DEBUG_ASSERT(index >= 0 && index < num_hw_threads);
697 return hw_threads[index];
699 int get_num_hw_threads()
const {
return num_hw_threads; }
701 qsort(hw_threads, num_hw_threads,
sizeof(kmp_hw_thread_t),
702 kmp_hw_thread_t::compare_ids);
706 bool check_ids()
const;
710 void canonicalize(
int pkgs,
int cores_per_pkg,
int thr_per_core,
int cores);
713 bool filter_hw_subset();
714 bool is_close(
int hwt1,
int hwt2,
int level)
const;
715 bool is_uniform()
const {
return flags.uniform; }
718 kmp_hw_t get_equivalent_type(kmp_hw_t type)
const {
return equivalent[type]; }
720 void set_equivalent_type(kmp_hw_t type1, kmp_hw_t type2) {
721 KMP_DEBUG_ASSERT_VALID_HW_TYPE(type1);
722 KMP_DEBUG_ASSERT_VALID_HW_TYPE(type2);
723 kmp_hw_t real_type2 = equivalent[type2];
724 if (real_type2 == KMP_HW_UNKNOWN)
726 equivalent[type1] = real_type2;
729 KMP_FOREACH_HW_TYPE(type) {
730 if (equivalent[type] == type1) {
731 equivalent[type] = real_type2;
737 int calculate_ratio(
int level1,
int level2)
const {
738 KMP_DEBUG_ASSERT(level1 >= 0 && level1 < depth);
739 KMP_DEBUG_ASSERT(level2 >= 0 && level2 < depth);
741 for (
int level = level1; level > level2; --level)
745 int get_ratio(
int level)
const {
746 KMP_DEBUG_ASSERT(level >= 0 && level < depth);
749 int get_depth()
const {
return depth; };
750 kmp_hw_t get_type(
int level)
const {
751 KMP_DEBUG_ASSERT(level >= 0 && level < depth);
754 int get_level(kmp_hw_t type)
const {
755 KMP_DEBUG_ASSERT_VALID_HW_TYPE(type);
756 int eq_type = equivalent[type];
757 if (eq_type == KMP_HW_UNKNOWN)
759 for (
int i = 0; i < depth; ++i)
760 if (types[i] == eq_type)
764 int get_count(
int level)
const {
765 KMP_DEBUG_ASSERT(level >= 0 && level < depth);
768 #if KMP_AFFINITY_SUPPORTED
769 void sort_compact() {
770 qsort(hw_threads, num_hw_threads,
sizeof(kmp_hw_thread_t),
771 kmp_hw_thread_t::compare_compact);
774 void print(
const char *env_var =
"KMP_AFFINITY")
const;
778 class kmp_hw_subset_t {
793 KMP_BUILD_ASSERT(
sizeof(set) * 8 >= KMP_HW_LAST);
797 kmp_hw_subset_t() =
delete;
798 kmp_hw_subset_t(
const kmp_hw_subset_t &t) =
delete;
799 kmp_hw_subset_t(kmp_hw_subset_t &&t) =
delete;
800 kmp_hw_subset_t &operator=(
const kmp_hw_subset_t &t) =
delete;
801 kmp_hw_subset_t &operator=(kmp_hw_subset_t &&t) =
delete;
803 static kmp_hw_subset_t *allocate() {
804 int initial_capacity = 5;
805 kmp_hw_subset_t *retval =
806 (kmp_hw_subset_t *)__kmp_allocate(
sizeof(kmp_hw_subset_t));
808 retval->capacity = initial_capacity;
810 retval->absolute =
false;
811 retval->items = (item_t *)__kmp_allocate(
sizeof(item_t) * initial_capacity);
814 static void deallocate(kmp_hw_subset_t *subset) {
815 __kmp_free(subset->items);
818 void set_absolute() { absolute =
true; }
819 bool is_absolute()
const {
return absolute; }
820 void push_back(
int num, kmp_hw_t type,
int offset) {
821 if (depth == capacity - 1) {
823 item_t *new_items = (item_t *)__kmp_allocate(
sizeof(item_t) * capacity);
824 for (
int i = 0; i < depth; ++i)
825 new_items[i] = items[i];
829 items[depth].num = num;
830 items[depth].type = type;
831 items[depth].offset = offset;
833 set |= (1ull << type);
835 int get_depth()
const {
return depth; }
836 const item_t &at(
int index)
const {
837 KMP_DEBUG_ASSERT(index >= 0 && index < depth);
840 item_t &at(
int index) {
841 KMP_DEBUG_ASSERT(index >= 0 && index < depth);
844 void remove(
int index) {
845 KMP_DEBUG_ASSERT(index >= 0 && index < depth);
846 set &= ~(1ull << items[index].type);
847 for (
int j = index + 1; j < depth; ++j) {
848 items[j - 1] = items[j];
852 bool specified(kmp_hw_t type)
const {
return ((set & (1ull << type)) > 0); }
854 printf(
"**********************\n");
855 printf(
"*** kmp_hw_subset: ***\n");
856 printf(
"* depth: %d\n", depth);
857 printf(
"* items:\n");
858 for (
int i = 0; i < depth; ++i) {
859 printf(
"num: %d, type: %s, offset: %d\n", items[i].num,
860 __kmp_hw_get_keyword(items[i].type), items[i].offset);
862 printf(
"* set: 0x%llx\n", set);
863 printf(
"* absolute: %d\n", absolute);
864 printf(
"**********************\n");
868 extern kmp_topology_t *__kmp_topology;
869 extern kmp_hw_subset_t *__kmp_hw_subset;
877 class hierarchy_info {
881 static const kmp_uint32 maxLeaves = 4;
882 static const kmp_uint32 minBranch = 4;
888 kmp_uint32 maxLevels;
895 kmp_uint32 base_num_threads;
896 enum init_status { initialized = 0, not_initialized = 1, initializing = 2 };
897 volatile kmp_int8 uninitialized;
899 volatile kmp_int8 resizing;
905 kmp_uint32 *numPerLevel;
906 kmp_uint32 *skipPerLevel;
908 void deriveLevels() {
909 int hier_depth = __kmp_topology->get_depth();
910 for (
int i = hier_depth - 1, level = 0; i >= 0; --i, ++level) {
911 numPerLevel[level] = __kmp_topology->get_ratio(i);
916 : maxLevels(7), depth(1), uninitialized(not_initialized), resizing(0) {}
919 if (!uninitialized && numPerLevel) {
920 __kmp_free(numPerLevel);
922 uninitialized = not_initialized;
926 void init(
int num_addrs) {
927 kmp_int8 bool_result = KMP_COMPARE_AND_STORE_ACQ8(
928 &uninitialized, not_initialized, initializing);
929 if (bool_result == 0) {
930 while (TCR_1(uninitialized) != initialized)
934 KMP_DEBUG_ASSERT(bool_result == 1);
944 (kmp_uint32 *)__kmp_allocate(maxLevels * 2 *
sizeof(kmp_uint32));
945 skipPerLevel = &(numPerLevel[maxLevels]);
946 for (kmp_uint32 i = 0; i < maxLevels;
953 if (__kmp_topology && __kmp_topology->get_depth() > 0) {
956 numPerLevel[0] = maxLeaves;
957 numPerLevel[1] = num_addrs / maxLeaves;
958 if (num_addrs % maxLeaves)
962 base_num_threads = num_addrs;
963 for (
int i = maxLevels - 1; i >= 0;
965 if (numPerLevel[i] != 1 || depth > 1)
968 kmp_uint32 branch = minBranch;
969 if (numPerLevel[0] == 1)
970 branch = num_addrs / maxLeaves;
971 if (branch < minBranch)
973 for (kmp_uint32 d = 0; d < depth - 1; ++d) {
974 while (numPerLevel[d] > branch ||
975 (d == 0 && numPerLevel[d] > maxLeaves)) {
976 if (numPerLevel[d] & 1)
978 numPerLevel[d] = numPerLevel[d] >> 1;
979 if (numPerLevel[d + 1] == 1)
981 numPerLevel[d + 1] = numPerLevel[d + 1] << 1;
983 if (numPerLevel[0] == 1) {
984 branch = branch >> 1;
990 for (kmp_uint32 i = 1; i < depth; ++i)
991 skipPerLevel[i] = numPerLevel[i - 1] * skipPerLevel[i - 1];
993 for (kmp_uint32 i = depth; i < maxLevels; ++i)
994 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
996 uninitialized = initialized;
1000 void resize(kmp_uint32 nproc) {
1001 kmp_int8 bool_result = KMP_COMPARE_AND_STORE_ACQ8(&resizing, 0, 1);
1002 while (bool_result == 0) {
1004 if (nproc <= base_num_threads)
1007 bool_result = KMP_COMPARE_AND_STORE_ACQ8(&resizing, 0, 1);
1009 KMP_DEBUG_ASSERT(bool_result != 0);
1010 if (nproc <= base_num_threads)
1014 kmp_uint32 old_sz = skipPerLevel[depth - 1];
1015 kmp_uint32 incs = 0, old_maxLevels = maxLevels;
1017 for (kmp_uint32 i = depth; i < maxLevels && nproc > old_sz; ++i) {
1018 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
1019 numPerLevel[i - 1] *= 2;
1023 if (nproc > old_sz) {
1024 while (nproc > old_sz) {
1032 kmp_uint32 *old_numPerLevel = numPerLevel;
1033 kmp_uint32 *old_skipPerLevel = skipPerLevel;
1034 numPerLevel = skipPerLevel = NULL;
1036 (kmp_uint32 *)__kmp_allocate(maxLevels * 2 *
sizeof(kmp_uint32));
1037 skipPerLevel = &(numPerLevel[maxLevels]);
1040 for (kmp_uint32 i = 0; i < old_maxLevels; ++i) {
1042 numPerLevel[i] = old_numPerLevel[i];
1043 skipPerLevel[i] = old_skipPerLevel[i];
1047 for (kmp_uint32 i = old_maxLevels; i < maxLevels; ++i) {
1050 skipPerLevel[i] = 1;
1054 __kmp_free(old_numPerLevel);
1058 for (kmp_uint32 i = old_maxLevels; i < maxLevels; ++i)
1059 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
1061 base_num_threads = nproc;