13 #ifndef KMP_AFFINITY_H 14 #define KMP_AFFINITY_H 19 #if KMP_AFFINITY_SUPPORTED 21 class KMPHwlocAffinity :
public KMPAffinity {
23 class Mask :
public KMPAffinity::Mask {
28 mask = hwloc_bitmap_alloc();
31 ~Mask() { hwloc_bitmap_free(mask); }
32 void set(
int i)
override { hwloc_bitmap_set(mask, i); }
33 bool is_set(
int i)
const override {
return hwloc_bitmap_isset(mask, i); }
34 void clear(
int i)
override { hwloc_bitmap_clr(mask, i); }
35 void zero()
override { hwloc_bitmap_zero(mask); }
36 void copy(
const KMPAffinity::Mask *src)
override {
37 const Mask *convert =
static_cast<const Mask *
>(src);
38 hwloc_bitmap_copy(mask, convert->mask);
40 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
41 const Mask *convert =
static_cast<const Mask *
>(rhs);
42 hwloc_bitmap_and(mask, mask, convert->mask);
44 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
45 const Mask *convert =
static_cast<const Mask *
>(rhs);
46 hwloc_bitmap_or(mask, mask, convert->mask);
48 void bitwise_not()
override { hwloc_bitmap_not(mask, mask); }
49 int begin()
const override {
return hwloc_bitmap_first(mask); }
50 int end()
const override {
return -1; }
51 int next(
int previous)
const override {
52 return hwloc_bitmap_next(mask, previous);
54 int get_system_affinity(
bool abort_on_error)
override {
55 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
56 "Illegal get affinity operation when not capable");
58 hwloc_get_cpubind(__kmp_hwloc_topology, mask, HWLOC_CPUBIND_THREAD);
64 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
68 int set_system_affinity(
bool abort_on_error)
const override {
69 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
70 "Illegal set affinity operation when not capable");
72 hwloc_set_cpubind(__kmp_hwloc_topology, mask, HWLOC_CPUBIND_THREAD);
78 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
83 int set_process_affinity(
bool abort_on_error)
const override {
84 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
85 "Illegal set process affinity operation when not capable");
87 const hwloc_topology_support *support =
88 hwloc_topology_get_support(__kmp_hwloc_topology);
89 if (support->cpubind->set_proc_cpubind) {
91 retval = hwloc_set_cpubind(__kmp_hwloc_topology, mask,
92 HWLOC_CPUBIND_PROCESS);
97 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
102 int get_proc_group()
const override {
105 if (__kmp_num_proc_groups == 1) {
108 for (
int i = 0; i < __kmp_num_proc_groups; i++) {
110 unsigned long first_32_bits = hwloc_bitmap_to_ith_ulong(mask, i * 2);
111 unsigned long second_32_bits =
112 hwloc_bitmap_to_ith_ulong(mask, i * 2 + 1);
113 if (first_32_bits == 0 && second_32_bits == 0) {
125 void determine_capable(
const char *var)
override {
126 const hwloc_topology_support *topology_support;
127 if (__kmp_hwloc_topology == NULL) {
128 if (hwloc_topology_init(&__kmp_hwloc_topology) < 0) {
129 __kmp_hwloc_error = TRUE;
130 if (__kmp_affinity_verbose)
131 KMP_WARNING(AffHwlocErrorOccurred, var,
"hwloc_topology_init()");
133 if (hwloc_topology_load(__kmp_hwloc_topology) < 0) {
134 __kmp_hwloc_error = TRUE;
135 if (__kmp_affinity_verbose)
136 KMP_WARNING(AffHwlocErrorOccurred, var,
"hwloc_topology_load()");
139 topology_support = hwloc_topology_get_support(__kmp_hwloc_topology);
144 if (topology_support && topology_support->cpubind->set_thisthread_cpubind &&
145 topology_support->cpubind->get_thisthread_cpubind &&
146 topology_support->discovery->pu && !__kmp_hwloc_error) {
148 KMP_AFFINITY_ENABLE(TRUE);
151 __kmp_hwloc_error = TRUE;
152 KMP_AFFINITY_DISABLE();
155 void bind_thread(
int which)
override {
156 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
157 "Illegal set affinity operation when not capable");
158 KMPAffinity::Mask *mask;
159 KMP_CPU_ALLOC_ON_STACK(mask);
161 KMP_CPU_SET(which, mask);
162 __kmp_set_system_affinity(mask, TRUE);
163 KMP_CPU_FREE_FROM_STACK(mask);
165 KMPAffinity::Mask *allocate_mask()
override {
return new Mask(); }
166 void deallocate_mask(KMPAffinity::Mask *m)
override {
delete m; }
167 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
168 return new Mask[num];
170 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
171 Mask *hwloc_array =
static_cast<Mask *
>(array);
172 delete[] hwloc_array;
174 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
175 int index)
override {
176 Mask *hwloc_array =
static_cast<Mask *
>(array);
177 return &(hwloc_array[index]);
179 api_type get_api_type()
const override {
return HWLOC; }
183 #if KMP_OS_LINUX || KMP_OS_FREEBSD 189 #include <sys/syscall.h> 190 #if KMP_ARCH_X86 || KMP_ARCH_ARM 191 #ifndef __NR_sched_setaffinity 192 #define __NR_sched_setaffinity 241 193 #elif __NR_sched_setaffinity != 241 194 #error Wrong code for setaffinity system call. 196 #ifndef __NR_sched_getaffinity 197 #define __NR_sched_getaffinity 242 198 #elif __NR_sched_getaffinity != 242 199 #error Wrong code for getaffinity system call. 201 #elif KMP_ARCH_AARCH64 202 #ifndef __NR_sched_setaffinity 203 #define __NR_sched_setaffinity 122 204 #elif __NR_sched_setaffinity != 122 205 #error Wrong code for setaffinity system call. 207 #ifndef __NR_sched_getaffinity 208 #define __NR_sched_getaffinity 123 209 #elif __NR_sched_getaffinity != 123 210 #error Wrong code for getaffinity system call. 212 #elif KMP_ARCH_RISCV64 213 #ifndef __NR_sched_setaffinity 214 #define __NR_sched_setaffinity 122 215 #elif __NR_sched_setaffinity != 122 216 #error Wrong code for setaffinity system call. 218 #ifndef __NR_sched_getaffinity 219 #define __NR_sched_getaffinity 123 220 #elif __NR_sched_getaffinity != 123 221 #error Wrong code for getaffinity system call. 223 #elif KMP_ARCH_X86_64 224 #ifndef __NR_sched_setaffinity 225 #define __NR_sched_setaffinity 203 226 #elif __NR_sched_setaffinity != 203 227 #error Wrong code for setaffinity system call. 229 #ifndef __NR_sched_getaffinity 230 #define __NR_sched_getaffinity 204 231 #elif __NR_sched_getaffinity != 204 232 #error Wrong code for getaffinity system call. 235 #ifndef __NR_sched_setaffinity 236 #define __NR_sched_setaffinity 222 237 #elif __NR_sched_setaffinity != 222 238 #error Wrong code for setaffinity system call. 240 #ifndef __NR_sched_getaffinity 241 #define __NR_sched_getaffinity 223 242 #elif __NR_sched_getaffinity != 223 243 #error Wrong code for getaffinity system call. 246 # ifndef __NR_sched_setaffinity 247 # define __NR_sched_setaffinity 4239 248 # elif __NR_sched_setaffinity != 4239 249 # error Wrong code for setaffinity system call. 251 # ifndef __NR_sched_getaffinity 252 # define __NR_sched_getaffinity 4240 253 # elif __NR_sched_getaffinity != 4240 254 # error Wrong code for getaffinity system call. 256 # elif KMP_ARCH_MIPS64 257 # ifndef __NR_sched_setaffinity 258 # define __NR_sched_setaffinity 5195 259 # elif __NR_sched_setaffinity != 5195 260 # error Wrong code for setaffinity system call. 262 # ifndef __NR_sched_getaffinity 263 # define __NR_sched_getaffinity 5196 264 # elif __NR_sched_getaffinity != 5196 265 # error Wrong code for getaffinity system call. 268 #error Unknown or unsupported architecture 272 #include <pthread_np.h> 274 class KMPNativeAffinity :
public KMPAffinity {
275 class Mask :
public KMPAffinity::Mask {
276 typedef unsigned long mask_t;
277 typedef decltype(__kmp_affin_mask_size) mask_size_type;
278 static const unsigned int BITS_PER_MASK_T =
sizeof(mask_t) * CHAR_BIT;
279 static const mask_t ONE = 1;
280 mask_size_type get_num_mask_types()
const {
281 return __kmp_affin_mask_size /
sizeof(mask_t);
286 Mask() { mask = (mask_t *)__kmp_allocate(__kmp_affin_mask_size); }
291 void set(
int i)
override {
292 mask[i / BITS_PER_MASK_T] |= (ONE << (i % BITS_PER_MASK_T));
294 bool is_set(
int i)
const override {
295 return (mask[i / BITS_PER_MASK_T] & (ONE << (i % BITS_PER_MASK_T)));
297 void clear(
int i)
override {
298 mask[i / BITS_PER_MASK_T] &= ~(ONE << (i % BITS_PER_MASK_T));
300 void zero()
override {
301 mask_size_type e = get_num_mask_types();
302 for (mask_size_type i = 0; i < e; ++i)
305 void copy(
const KMPAffinity::Mask *src)
override {
306 const Mask *convert =
static_cast<const Mask *
>(src);
307 mask_size_type e = get_num_mask_types();
308 for (mask_size_type i = 0; i < e; ++i)
309 mask[i] = convert->mask[i];
311 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
312 const Mask *convert =
static_cast<const Mask *
>(rhs);
313 mask_size_type e = get_num_mask_types();
314 for (mask_size_type i = 0; i < e; ++i)
315 mask[i] &= convert->mask[i];
317 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
318 const Mask *convert =
static_cast<const Mask *
>(rhs);
319 mask_size_type e = get_num_mask_types();
320 for (mask_size_type i = 0; i < e; ++i)
321 mask[i] |= convert->mask[i];
323 void bitwise_not()
override {
324 mask_size_type e = get_num_mask_types();
325 for (mask_size_type i = 0; i < e; ++i)
326 mask[i] = ~(mask[i]);
328 int begin()
const override {
330 while (retval < end() && !is_set(retval))
334 int end()
const override {
336 __kmp_type_convert(get_num_mask_types() * BITS_PER_MASK_T, &e);
339 int next(
int previous)
const override {
340 int retval = previous + 1;
341 while (retval < end() && !is_set(retval))
345 int get_system_affinity(
bool abort_on_error)
override {
346 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
347 "Illegal get affinity operation when not capable");
350 syscall(__NR_sched_getaffinity, 0, __kmp_affin_mask_size, mask);
352 int r = pthread_getaffinity_np(pthread_self(), __kmp_affin_mask_size,
353 reinterpret_cast<cpuset_t *>(mask));
354 int retval = (r == 0 ? 0 : -1);
360 if (abort_on_error) {
361 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
365 int set_system_affinity(
bool abort_on_error)
const override {
366 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
367 "Illegal set affinity operation when not capable");
370 syscall(__NR_sched_setaffinity, 0, __kmp_affin_mask_size, mask);
372 int r = pthread_setaffinity_np(pthread_self(), __kmp_affin_mask_size,
373 reinterpret_cast<cpuset_t *>(mask));
374 int retval = (r == 0 ? 0 : -1);
380 if (abort_on_error) {
381 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
386 void determine_capable(
const char *env_var)
override {
387 __kmp_affinity_determine_capable(env_var);
389 void bind_thread(
int which)
override { __kmp_affinity_bind_thread(which); }
390 KMPAffinity::Mask *allocate_mask()
override {
391 KMPNativeAffinity::Mask *retval =
new Mask();
394 void deallocate_mask(KMPAffinity::Mask *m)
override {
395 KMPNativeAffinity::Mask *native_mask =
396 static_cast<KMPNativeAffinity::Mask *
>(m);
399 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
400 return new Mask[num];
402 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
403 Mask *linux_array =
static_cast<Mask *
>(array);
404 delete[] linux_array;
406 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
407 int index)
override {
408 Mask *linux_array =
static_cast<Mask *
>(array);
409 return &(linux_array[index]);
411 api_type get_api_type()
const override {
return NATIVE_OS; }
416 class KMPNativeAffinity :
public KMPAffinity {
417 class Mask :
public KMPAffinity::Mask {
418 typedef ULONG_PTR mask_t;
419 static const int BITS_PER_MASK_T =
sizeof(mask_t) * CHAR_BIT;
424 mask = (mask_t *)__kmp_allocate(
sizeof(mask_t) * __kmp_num_proc_groups);
430 void set(
int i)
override {
431 mask[i / BITS_PER_MASK_T] |= ((mask_t)1 << (i % BITS_PER_MASK_T));
433 bool is_set(
int i)
const override {
434 return (mask[i / BITS_PER_MASK_T] & ((mask_t)1 << (i % BITS_PER_MASK_T)));
436 void clear(
int i)
override {
437 mask[i / BITS_PER_MASK_T] &= ~((mask_t)1 << (i % BITS_PER_MASK_T));
439 void zero()
override {
440 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
443 void copy(
const KMPAffinity::Mask *src)
override {
444 const Mask *convert =
static_cast<const Mask *
>(src);
445 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
446 mask[i] = convert->mask[i];
448 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
449 const Mask *convert =
static_cast<const Mask *
>(rhs);
450 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
451 mask[i] &= convert->mask[i];
453 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
454 const Mask *convert =
static_cast<const Mask *
>(rhs);
455 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
456 mask[i] |= convert->mask[i];
458 void bitwise_not()
override {
459 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
460 mask[i] = ~(mask[i]);
462 int begin()
const override {
464 while (retval < end() && !is_set(retval))
468 int end()
const override {
return __kmp_num_proc_groups * BITS_PER_MASK_T; }
469 int next(
int previous)
const override {
470 int retval = previous + 1;
471 while (retval < end() && !is_set(retval))
475 int set_process_affinity(
bool abort_on_error)
const override {
476 if (__kmp_num_proc_groups <= 1) {
477 if (!SetProcessAffinityMask(GetCurrentProcess(), *mask)) {
478 DWORD error = GetLastError();
479 if (abort_on_error) {
480 __kmp_fatal(KMP_MSG(CantSetThreadAffMask), KMP_ERR(error),
488 int set_system_affinity(
bool abort_on_error)
const override {
489 if (__kmp_num_proc_groups > 1) {
492 int group = get_proc_group();
494 if (abort_on_error) {
495 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
502 ga.Mask = mask[group];
503 ga.Reserved[0] = ga.Reserved[1] = ga.Reserved[2] = 0;
505 KMP_DEBUG_ASSERT(__kmp_SetThreadGroupAffinity != NULL);
506 if (__kmp_SetThreadGroupAffinity(GetCurrentThread(), &ga, NULL) == 0) {
507 DWORD error = GetLastError();
508 if (abort_on_error) {
509 __kmp_fatal(KMP_MSG(CantSetThreadAffMask), KMP_ERR(error),
515 if (!SetThreadAffinityMask(GetCurrentThread(), *mask)) {
516 DWORD error = GetLastError();
517 if (abort_on_error) {
518 __kmp_fatal(KMP_MSG(CantSetThreadAffMask), KMP_ERR(error),
526 int get_system_affinity(
bool abort_on_error)
override {
527 if (__kmp_num_proc_groups > 1) {
530 KMP_DEBUG_ASSERT(__kmp_GetThreadGroupAffinity != NULL);
531 if (__kmp_GetThreadGroupAffinity(GetCurrentThread(), &ga) == 0) {
532 DWORD error = GetLastError();
533 if (abort_on_error) {
534 __kmp_fatal(KMP_MSG(FunctionError,
"GetThreadGroupAffinity()"),
535 KMP_ERR(error), __kmp_msg_null);
539 if ((ga.Group < 0) || (ga.Group > __kmp_num_proc_groups) ||
543 mask[ga.Group] = ga.Mask;
545 mask_t newMask, sysMask, retval;
546 if (!GetProcessAffinityMask(GetCurrentProcess(), &newMask, &sysMask)) {
547 DWORD error = GetLastError();
548 if (abort_on_error) {
549 __kmp_fatal(KMP_MSG(FunctionError,
"GetProcessAffinityMask()"),
550 KMP_ERR(error), __kmp_msg_null);
554 retval = SetThreadAffinityMask(GetCurrentThread(), newMask);
556 DWORD error = GetLastError();
557 if (abort_on_error) {
558 __kmp_fatal(KMP_MSG(FunctionError,
"SetThreadAffinityMask()"),
559 KMP_ERR(error), __kmp_msg_null);
563 newMask = SetThreadAffinityMask(GetCurrentThread(), retval);
565 DWORD error = GetLastError();
566 if (abort_on_error) {
567 __kmp_fatal(KMP_MSG(FunctionError,
"SetThreadAffinityMask()"),
568 KMP_ERR(error), __kmp_msg_null);
575 int get_proc_group()
const override {
577 if (__kmp_num_proc_groups == 1) {
580 for (
int i = 0; i < __kmp_num_proc_groups; i++) {
590 void determine_capable(
const char *env_var)
override {
591 __kmp_affinity_determine_capable(env_var);
593 void bind_thread(
int which)
override { __kmp_affinity_bind_thread(which); }
594 KMPAffinity::Mask *allocate_mask()
override {
return new Mask(); }
595 void deallocate_mask(KMPAffinity::Mask *m)
override {
delete m; }
596 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
597 return new Mask[num];
599 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
600 Mask *windows_array =
static_cast<Mask *
>(array);
601 delete[] windows_array;
603 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
604 int index)
override {
605 Mask *windows_array =
static_cast<Mask *
>(array);
606 return &(windows_array[index]);
608 api_type get_api_type()
const override {
return NATIVE_OS; }
613 class kmp_hw_thread_t {
615 static const int UNKNOWN_ID = -1;
616 static int compare_ids(
const void *a,
const void *b);
617 static int compare_compact(
const void *a,
const void *b);
618 int ids[KMP_HW_LAST];
619 int sub_ids[KMP_HW_LAST];
624 for (
int i = 0; i < (int)KMP_HW_LAST; ++i)
630 class kmp_topology_t {
656 kmp_hw_thread_t *hw_threads;
662 kmp_hw_t equivalent[KMP_HW_LAST];
670 void _gather_enumeration_information();
674 void _remove_radix1_layers();
677 void _discover_uniformity();
688 void _set_last_level_cache();
692 kmp_topology_t() =
delete;
693 kmp_topology_t(
const kmp_topology_t &t) =
delete;
694 kmp_topology_t(kmp_topology_t &&t) =
delete;
695 kmp_topology_t &operator=(
const kmp_topology_t &t) =
delete;
696 kmp_topology_t &operator=(kmp_topology_t &&t) =
delete;
698 static kmp_topology_t *allocate(
int nproc,
int ndepth,
const kmp_hw_t *types);
699 static void deallocate(kmp_topology_t *);
702 kmp_hw_thread_t &at(
int index) {
703 KMP_DEBUG_ASSERT(index >= 0 && index < num_hw_threads);
704 return hw_threads[index];
706 const kmp_hw_thread_t &at(
int index)
const {
707 KMP_DEBUG_ASSERT(index >= 0 && index < num_hw_threads);
708 return hw_threads[index];
710 int get_num_hw_threads()
const {
return num_hw_threads; }
712 qsort(hw_threads, num_hw_threads,
sizeof(kmp_hw_thread_t),
713 kmp_hw_thread_t::compare_ids);
717 bool check_ids()
const;
721 void canonicalize(
int pkgs,
int cores_per_pkg,
int thr_per_core,
int cores);
724 bool filter_hw_subset();
725 bool is_close(
int hwt1,
int hwt2,
int level)
const;
726 bool is_uniform()
const {
return flags.uniform; }
729 kmp_hw_t get_equivalent_type(kmp_hw_t type)
const {
return equivalent[type]; }
731 void set_equivalent_type(kmp_hw_t type1, kmp_hw_t type2) {
732 KMP_DEBUG_ASSERT_VALID_HW_TYPE(type1);
733 KMP_DEBUG_ASSERT_VALID_HW_TYPE(type2);
734 kmp_hw_t real_type2 = equivalent[type2];
735 if (real_type2 == KMP_HW_UNKNOWN)
737 equivalent[type1] = real_type2;
740 KMP_FOREACH_HW_TYPE(type) {
741 if (equivalent[type] == type1) {
742 equivalent[type] = real_type2;
748 int calculate_ratio(
int level1,
int level2)
const {
749 KMP_DEBUG_ASSERT(level1 >= 0 && level1 < depth);
750 KMP_DEBUG_ASSERT(level2 >= 0 && level2 < depth);
752 for (
int level = level1; level > level2; --level)
756 int get_ratio(
int level)
const {
757 KMP_DEBUG_ASSERT(level >= 0 && level < depth);
760 int get_depth()
const {
return depth; };
761 kmp_hw_t get_type(
int level)
const {
762 KMP_DEBUG_ASSERT(level >= 0 && level < depth);
765 int get_level(kmp_hw_t type)
const {
766 KMP_DEBUG_ASSERT_VALID_HW_TYPE(type);
767 int eq_type = equivalent[type];
768 if (eq_type == KMP_HW_UNKNOWN)
770 for (
int i = 0; i < depth; ++i)
771 if (types[i] == eq_type)
775 int get_count(
int level)
const {
776 KMP_DEBUG_ASSERT(level >= 0 && level < depth);
779 #if KMP_AFFINITY_SUPPORTED 780 void sort_compact() {
781 qsort(hw_threads, num_hw_threads,
sizeof(kmp_hw_thread_t),
782 kmp_hw_thread_t::compare_compact);
785 void print(
const char *env_var =
"KMP_AFFINITY")
const;
789 class kmp_hw_subset_t {
804 KMP_BUILD_ASSERT(
sizeof(
set) * 8 >= KMP_HW_LAST);
808 kmp_hw_subset_t() =
delete;
809 kmp_hw_subset_t(
const kmp_hw_subset_t &t) =
delete;
810 kmp_hw_subset_t(kmp_hw_subset_t &&t) =
delete;
811 kmp_hw_subset_t &operator=(
const kmp_hw_subset_t &t) =
delete;
812 kmp_hw_subset_t &operator=(kmp_hw_subset_t &&t) =
delete;
814 static kmp_hw_subset_t *allocate() {
815 int initial_capacity = 5;
816 kmp_hw_subset_t *retval =
817 (kmp_hw_subset_t *)__kmp_allocate(
sizeof(kmp_hw_subset_t));
819 retval->capacity = initial_capacity;
821 retval->absolute =
false;
822 retval->items = (item_t *)__kmp_allocate(
sizeof(item_t) * initial_capacity);
825 static void deallocate(kmp_hw_subset_t *subset) {
826 __kmp_free(subset->items);
829 void set_absolute() { absolute =
true; }
830 bool is_absolute()
const {
return absolute; }
831 void push_back(
int num, kmp_hw_t type,
int offset) {
832 if (depth == capacity - 1) {
834 item_t *new_items = (item_t *)__kmp_allocate(
sizeof(item_t) * capacity);
835 for (
int i = 0; i < depth; ++i)
836 new_items[i] = items[i];
840 items[depth].num = num;
841 items[depth].type = type;
842 items[depth].offset = offset;
844 set |= (1ull << type);
846 int get_depth()
const {
return depth; }
847 const item_t &at(
int index)
const {
848 KMP_DEBUG_ASSERT(index >= 0 && index < depth);
851 item_t &at(
int index) {
852 KMP_DEBUG_ASSERT(index >= 0 && index < depth);
855 void remove(
int index) {
856 KMP_DEBUG_ASSERT(index >= 0 && index < depth);
857 set &= ~(1ull << items[index].type);
858 for (
int j = index + 1; j < depth; ++j) {
859 items[j - 1] = items[j];
863 bool specified(kmp_hw_t type)
const {
return ((
set & (1ull << type)) > 0); }
865 printf(
"**********************\n");
866 printf(
"*** kmp_hw_subset: ***\n");
867 printf(
"* depth: %d\n", depth);
868 printf(
"* items:\n");
869 for (
int i = 0; i < depth; ++i) {
870 printf(
"num: %d, type: %s, offset: %d\n", items[i].num,
871 __kmp_hw_get_keyword(items[i].type), items[i].offset);
873 printf(
"* set: 0x%llx\n",
set);
874 printf(
"* absolute: %d\n", absolute);
875 printf(
"**********************\n");
879 extern kmp_topology_t *__kmp_topology;
880 extern kmp_hw_subset_t *__kmp_hw_subset;
888 class hierarchy_info {
892 static const kmp_uint32 maxLeaves = 4;
893 static const kmp_uint32 minBranch = 4;
899 kmp_uint32 maxLevels;
906 kmp_uint32 base_num_threads;
907 enum init_status { initialized = 0, not_initialized = 1, initializing = 2 };
908 volatile kmp_int8 uninitialized;
910 volatile kmp_int8 resizing;
916 kmp_uint32 *numPerLevel;
917 kmp_uint32 *skipPerLevel;
919 void deriveLevels() {
920 int hier_depth = __kmp_topology->get_depth();
921 for (
int i = hier_depth - 1, level = 0; i >= 0; --i, ++level) {
922 numPerLevel[level] = __kmp_topology->get_ratio(i);
927 : maxLevels(7), depth(1), uninitialized(not_initialized), resizing(0) {}
930 if (!uninitialized && numPerLevel) {
931 __kmp_free(numPerLevel);
933 uninitialized = not_initialized;
937 void init(
int num_addrs) {
938 kmp_int8 bool_result = KMP_COMPARE_AND_STORE_ACQ8(
939 &uninitialized, not_initialized, initializing);
940 if (bool_result == 0) {
941 while (TCR_1(uninitialized) != initialized)
945 KMP_DEBUG_ASSERT(bool_result == 1);
955 (kmp_uint32 *)__kmp_allocate(maxLevels * 2 *
sizeof(kmp_uint32));
956 skipPerLevel = &(numPerLevel[maxLevels]);
957 for (kmp_uint32 i = 0; i < maxLevels;
964 if (__kmp_topology && __kmp_topology->get_depth() > 0) {
967 numPerLevel[0] = maxLeaves;
968 numPerLevel[1] = num_addrs / maxLeaves;
969 if (num_addrs % maxLeaves)
973 base_num_threads = num_addrs;
974 for (
int i = maxLevels - 1; i >= 0;
976 if (numPerLevel[i] != 1 || depth > 1)
979 kmp_uint32 branch = minBranch;
980 if (numPerLevel[0] == 1)
981 branch = num_addrs / maxLeaves;
982 if (branch < minBranch)
984 for (kmp_uint32 d = 0; d < depth - 1; ++d) {
985 while (numPerLevel[d] > branch ||
986 (d == 0 && numPerLevel[d] > maxLeaves)) {
987 if (numPerLevel[d] & 1)
989 numPerLevel[d] = numPerLevel[d] >> 1;
990 if (numPerLevel[d + 1] == 1)
992 numPerLevel[d + 1] = numPerLevel[d + 1] << 1;
994 if (numPerLevel[0] == 1) {
995 branch = branch >> 1;
1001 for (kmp_uint32 i = 1; i < depth; ++i)
1002 skipPerLevel[i] = numPerLevel[i - 1] * skipPerLevel[i - 1];
1004 for (kmp_uint32 i = depth; i < maxLevels; ++i)
1005 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
1007 uninitialized = initialized;
1011 void resize(kmp_uint32 nproc) {
1012 kmp_int8 bool_result = KMP_COMPARE_AND_STORE_ACQ8(&resizing, 0, 1);
1013 while (bool_result == 0) {
1015 if (nproc <= base_num_threads)
1018 bool_result = KMP_COMPARE_AND_STORE_ACQ8(&resizing, 0, 1);
1020 KMP_DEBUG_ASSERT(bool_result != 0);
1021 if (nproc <= base_num_threads)
1025 kmp_uint32 old_sz = skipPerLevel[depth - 1];
1026 kmp_uint32 incs = 0, old_maxLevels = maxLevels;
1028 for (kmp_uint32 i = depth; i < maxLevels && nproc > old_sz; ++i) {
1029 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
1030 numPerLevel[i - 1] *= 2;
1034 if (nproc > old_sz) {
1035 while (nproc > old_sz) {
1043 kmp_uint32 *old_numPerLevel = numPerLevel;
1044 kmp_uint32 *old_skipPerLevel = skipPerLevel;
1045 numPerLevel = skipPerLevel = NULL;
1047 (kmp_uint32 *)__kmp_allocate(maxLevels * 2 *
sizeof(kmp_uint32));
1048 skipPerLevel = &(numPerLevel[maxLevels]);
1051 for (kmp_uint32 i = 0; i < old_maxLevels; ++i) {
1053 numPerLevel[i] = old_numPerLevel[i];
1054 skipPerLevel[i] = old_skipPerLevel[i];
1058 for (kmp_uint32 i = old_maxLevels; i < maxLevels; ++i) {
1061 skipPerLevel[i] = 1;
1065 __kmp_free(old_numPerLevel);
1069 for (kmp_uint32 i = old_maxLevels; i < maxLevels; ++i)
1070 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
1072 base_num_threads = nproc;
1076 #endif // KMP_AFFINITY_H