14 #include "kmp_affinity.h" 19 #include "kmp_stats.h" 21 #include "kmp_wait_release.h" 22 #include "kmp_wrapper_getpid.h" 24 #if !KMP_OS_DRAGONFLY && !KMP_OS_FREEBSD && !KMP_OS_NETBSD && !KMP_OS_OPENBSD 29 #include <semaphore.h> 30 #endif // KMP_OS_LINUX 31 #include <sys/resource.h> 32 #include <sys/syscall.h> 34 #include <sys/times.h> 38 #include <sys/sysinfo.h> 53 #include <mach/mach.h> 54 #include <sys/sysctl.h> 55 #elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD 56 #include <sys/types.h> 57 #include <sys/sysctl.h> 59 #include <pthread_np.h> 60 #elif KMP_OS_NETBSD || KMP_OS_OPENBSD 61 #include <sys/types.h> 62 #include <sys/sysctl.h> 69 struct kmp_sys_timer {
70 struct timespec start;
74 #define TS2NS(timespec) \ 75 (((timespec).tv_sec * (long int)1e9) + (timespec).tv_nsec) 77 static struct kmp_sys_timer __kmp_sys_timer_data;
79 #if KMP_HANDLE_SIGNALS 80 typedef void (*sig_func_t)(int);
81 STATIC_EFI2_WORKAROUND
struct sigaction __kmp_sighldrs[NSIG];
82 static sigset_t __kmp_sigset;
85 static int __kmp_init_runtime = FALSE;
87 static int __kmp_fork_count = 0;
89 static pthread_condattr_t __kmp_suspend_cond_attr;
90 static pthread_mutexattr_t __kmp_suspend_mutex_attr;
92 static kmp_cond_align_t __kmp_wait_cv;
93 static kmp_mutex_align_t __kmp_wait_mx;
95 kmp_uint64 __kmp_ticks_per_msec = 1000000;
98 static void __kmp_print_cond(
char *buffer, kmp_cond_align_t *cond) {
99 KMP_SNPRINTF(buffer, 128,
"(cond (lock (%ld, %d)), (descr (%p)))",
100 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
101 cond->c_cond.__c_waiting);
105 #if ((KMP_OS_LINUX || KMP_OS_FREEBSD) && KMP_AFFINITY_SUPPORTED) 109 void __kmp_affinity_bind_thread(
int which) {
110 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
111 "Illegal set affinity operation when not capable");
113 kmp_affin_mask_t *mask;
114 KMP_CPU_ALLOC_ON_STACK(mask);
116 KMP_CPU_SET(which, mask);
117 __kmp_set_system_affinity(mask, TRUE);
118 KMP_CPU_FREE_FROM_STACK(mask);
124 void __kmp_affinity_determine_capable(
const char *env_var) {
128 #define KMP_CPU_SET_SIZE_LIMIT (1024 * 1024) 129 #define KMP_CPU_SET_TRY_SIZE CACHE_LINE 131 #define KMP_CPU_SET_SIZE_LIMIT (sizeof(cpuset_t)) 137 buf = (
unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
141 gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_TRY_SIZE, buf);
142 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 143 "initial getaffinity call returned %ld errno = %d\n",
146 if (gCode < 0 && errno != EINVAL) {
148 if (__kmp_affinity_verbose ||
149 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none) &&
150 (__kmp_affinity_type != affinity_default) &&
151 (__kmp_affinity_type != affinity_disabled))) {
153 kmp_msg_t err_code = KMP_ERR(error);
154 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
155 err_code, __kmp_msg_null);
156 if (__kmp_generate_warnings == kmp_warnings_off) {
157 __kmp_str_free(&err_code.str);
160 KMP_AFFINITY_DISABLE();
161 KMP_INTERNAL_FREE(buf);
163 }
else if (gCode > 0) {
165 KMP_AFFINITY_ENABLE(gCode);
166 KA_TRACE(10, (
"__kmp_affinity_determine_capable: " 167 "affinity supported (mask size %d)\n",
168 (
int)__kmp_affin_mask_size));
169 KMP_INTERNAL_FREE(buf);
175 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 176 "searching for proper set size\n"));
178 for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
179 gCode = syscall(__NR_sched_getaffinity, 0, size, buf);
180 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 181 "getaffinity for mask size %ld returned %ld errno = %d\n",
182 size, gCode, errno));
185 if (errno == ENOSYS) {
187 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 188 "inconsistent OS call behavior: errno == ENOSYS for mask " 191 if (__kmp_affinity_verbose ||
192 (__kmp_affinity_warnings &&
193 (__kmp_affinity_type != affinity_none) &&
194 (__kmp_affinity_type != affinity_default) &&
195 (__kmp_affinity_type != affinity_disabled))) {
197 kmp_msg_t err_code = KMP_ERR(error);
198 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
199 err_code, __kmp_msg_null);
200 if (__kmp_generate_warnings == kmp_warnings_off) {
201 __kmp_str_free(&err_code.str);
204 KMP_AFFINITY_DISABLE();
205 KMP_INTERNAL_FREE(buf);
211 KMP_AFFINITY_ENABLE(gCode);
212 KA_TRACE(10, (
"__kmp_affinity_determine_capable: " 213 "affinity supported (mask size %d)\n",
214 (
int)__kmp_affin_mask_size));
215 KMP_INTERNAL_FREE(buf);
221 buf = (
unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
222 gCode = pthread_getaffinity_np(pthread_self(), KMP_CPU_SET_SIZE_LIMIT,
223 reinterpret_cast<cpuset_t *>(buf));
224 KA_TRACE(30, (
"__kmp_affinity_determine_capable: " 225 "initial getaffinity call returned %d errno = %d\n",
228 KMP_AFFINITY_ENABLE(KMP_CPU_SET_SIZE_LIMIT);
229 KA_TRACE(10, (
"__kmp_affinity_determine_capable: " 230 "affinity supported (mask size %d)\n",
231 (
int)__kmp_affin_mask_size));
232 KMP_INTERNAL_FREE(buf);
236 KMP_INTERNAL_FREE(buf);
239 KMP_AFFINITY_DISABLE();
240 KA_TRACE(10, (
"__kmp_affinity_determine_capable: " 241 "cannot determine mask size - affinity not supported\n"));
242 if (__kmp_affinity_verbose ||
243 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none) &&
244 (__kmp_affinity_type != affinity_default) &&
245 (__kmp_affinity_type != affinity_disabled))) {
246 KMP_WARNING(AffCantGetMaskSize, env_var);
250 #endif // KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED 254 int __kmp_futex_determine_capable() {
256 long rc = syscall(__NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0);
257 int retval = (rc == 0) || (errno != ENOSYS);
260 (
"__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, errno));
261 KA_TRACE(10, (
"__kmp_futex_determine_capable: futex syscall%s supported\n",
262 retval ?
"" :
" not"));
267 #endif // KMP_USE_FUTEX 269 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (!KMP_ASM_INTRINS) 273 kmp_int8 __kmp_test_then_or8(
volatile kmp_int8 *p, kmp_int8 d) {
274 kmp_int8 old_value, new_value;
276 old_value = TCR_1(*p);
277 new_value = old_value | d;
279 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
281 old_value = TCR_1(*p);
282 new_value = old_value | d;
287 kmp_int8 __kmp_test_then_and8(
volatile kmp_int8 *p, kmp_int8 d) {
288 kmp_int8 old_value, new_value;
290 old_value = TCR_1(*p);
291 new_value = old_value & d;
293 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
295 old_value = TCR_1(*p);
296 new_value = old_value & d;
301 kmp_uint32 __kmp_test_then_or32(
volatile kmp_uint32 *p, kmp_uint32 d) {
302 kmp_uint32 old_value, new_value;
304 old_value = TCR_4(*p);
305 new_value = old_value | d;
307 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
309 old_value = TCR_4(*p);
310 new_value = old_value | d;
315 kmp_uint32 __kmp_test_then_and32(
volatile kmp_uint32 *p, kmp_uint32 d) {
316 kmp_uint32 old_value, new_value;
318 old_value = TCR_4(*p);
319 new_value = old_value & d;
321 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
323 old_value = TCR_4(*p);
324 new_value = old_value & d;
330 kmp_int8 __kmp_test_then_add8(
volatile kmp_int8 *p, kmp_int8 d) {
331 kmp_int8 old_value, new_value;
333 old_value = TCR_1(*p);
334 new_value = old_value + d;
336 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
338 old_value = TCR_1(*p);
339 new_value = old_value + d;
344 kmp_int64 __kmp_test_then_add64(
volatile kmp_int64 *p, kmp_int64 d) {
345 kmp_int64 old_value, new_value;
347 old_value = TCR_8(*p);
348 new_value = old_value + d;
350 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
352 old_value = TCR_8(*p);
353 new_value = old_value + d;
359 kmp_uint64 __kmp_test_then_or64(
volatile kmp_uint64 *p, kmp_uint64 d) {
360 kmp_uint64 old_value, new_value;
362 old_value = TCR_8(*p);
363 new_value = old_value | d;
364 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
366 old_value = TCR_8(*p);
367 new_value = old_value | d;
372 kmp_uint64 __kmp_test_then_and64(
volatile kmp_uint64 *p, kmp_uint64 d) {
373 kmp_uint64 old_value, new_value;
375 old_value = TCR_8(*p);
376 new_value = old_value & d;
377 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
379 old_value = TCR_8(*p);
380 new_value = old_value & d;
387 void __kmp_terminate_thread(
int gtid) {
389 kmp_info_t *th = __kmp_threads[gtid];
394 #ifdef KMP_CANCEL_THREADS 395 KA_TRACE(10, (
"__kmp_terminate_thread: kill (%d)\n", gtid));
396 status = pthread_cancel(th->th.th_info.ds.ds_thread);
397 if (status != 0 && status != ESRCH) {
398 __kmp_fatal(KMP_MSG(CantTerminateWorkerThread), KMP_ERR(status),
409 static kmp_int32 __kmp_set_stack_info(
int gtid, kmp_info_t *th) {
411 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \ 421 if (!KMP_UBER_GTID(gtid)) {
424 status = pthread_attr_init(&attr);
425 KMP_CHECK_SYSFAIL(
"pthread_attr_init", status);
426 #if KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD 427 status = pthread_attr_get_np(pthread_self(), &attr);
428 KMP_CHECK_SYSFAIL(
"pthread_attr_get_np", status);
430 status = pthread_getattr_np(pthread_self(), &attr);
431 KMP_CHECK_SYSFAIL(
"pthread_getattr_np", status);
433 status = pthread_attr_getstack(&attr, &addr, &size);
434 KMP_CHECK_SYSFAIL(
"pthread_attr_getstack", status);
436 (
"__kmp_set_stack_info: T#%d pthread_attr_getstack returned size:" 437 " %lu, low addr: %p\n",
439 status = pthread_attr_destroy(&attr);
440 KMP_CHECK_SYSFAIL(
"pthread_attr_destroy", status);
443 if (size != 0 && addr != 0) {
445 TCW_PTR(th->th.th_info.ds.ds_stackbase, (((
char *)addr) + size));
446 TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
447 TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
453 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
454 TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data);
455 TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
459 static void *__kmp_launch_worker(
void *thr) {
460 int status, old_type, old_state;
461 #ifdef KMP_BLOCK_SIGNALS 462 sigset_t new_set, old_set;
465 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \ 466 KMP_OS_OPENBSD || KMP_OS_HURD 467 void *
volatile padding = 0;
471 gtid = ((kmp_info_t *)thr)->th.th_info.ds.ds_gtid;
472 __kmp_gtid_set_specific(gtid);
473 #ifdef KMP_TDATA_GTID 476 #if KMP_STATS_ENABLED 478 __kmp_stats_thread_ptr = ((kmp_info_t *)thr)->th.th_stats;
479 __kmp_stats_thread_ptr->startLife();
480 KMP_SET_THREAD_STATE(IDLE);
485 __kmp_itt_thread_name(gtid);
488 #if KMP_AFFINITY_SUPPORTED 489 __kmp_affinity_set_init_mask(gtid, FALSE);
492 #ifdef KMP_CANCEL_THREADS 493 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
494 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status);
496 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
497 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
500 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 502 __kmp_clear_x87_fpu_status_word();
503 __kmp_load_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
504 __kmp_load_mxcsr(&__kmp_init_mxcsr);
507 #ifdef KMP_BLOCK_SIGNALS 508 status = sigfillset(&new_set);
509 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status);
510 status = pthread_sigmask(SIG_BLOCK, &new_set, &old_set);
511 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
514 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \ 516 if (__kmp_stkoffset > 0 && gtid > 0) {
517 padding = KMP_ALLOCA(gtid * __kmp_stkoffset);
523 __kmp_set_stack_info(gtid, (kmp_info_t *)thr);
525 __kmp_check_stack_overlap((kmp_info_t *)thr);
527 exit_val = __kmp_launch_thread((kmp_info_t *)thr);
529 #ifdef KMP_BLOCK_SIGNALS 530 status = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
531 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
540 static void *__kmp_launch_monitor(
void *thr) {
541 int status, old_type, old_state;
542 #ifdef KMP_BLOCK_SIGNALS 545 struct timespec interval;
549 KA_TRACE(10, (
"__kmp_launch_monitor: #1 launched\n"));
552 __kmp_gtid_set_specific(KMP_GTID_MONITOR);
553 #ifdef KMP_TDATA_GTID 554 __kmp_gtid = KMP_GTID_MONITOR;
561 __kmp_itt_thread_ignore();
564 __kmp_set_stack_info(((kmp_info_t *)thr)->th.th_info.ds.ds_gtid,
567 __kmp_check_stack_overlap((kmp_info_t *)thr);
569 #ifdef KMP_CANCEL_THREADS 570 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
571 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status);
573 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
574 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
577 #if KMP_REAL_TIME_FIX 582 int sched = sched_getscheduler(0);
583 if (sched == SCHED_FIFO || sched == SCHED_RR) {
586 struct sched_param param;
587 int max_priority = sched_get_priority_max(sched);
589 KMP_WARNING(RealTimeSchedNotSupported);
590 sched_getparam(0, ¶m);
591 if (param.sched_priority < max_priority) {
592 param.sched_priority += 1;
593 rc = sched_setscheduler(0, sched, ¶m);
596 kmp_msg_t err_code = KMP_ERR(error);
597 __kmp_msg(kmp_ms_warning, KMP_MSG(CantChangeMonitorPriority),
598 err_code, KMP_MSG(MonitorWillStarve), __kmp_msg_null);
599 if (__kmp_generate_warnings == kmp_warnings_off) {
600 __kmp_str_free(&err_code.str);
607 __kmp_msg(kmp_ms_warning, KMP_MSG(RunningAtMaxPriority),
608 KMP_MSG(MonitorWillStarve), KMP_HNT(RunningAtMaxPriority),
613 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
615 #endif // KMP_REAL_TIME_FIX 619 if (__kmp_monitor_wakeups == 1) {
621 interval.tv_nsec = 0;
624 interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
627 KA_TRACE(10, (
"__kmp_launch_monitor: #2 monitor\n"));
629 while (!TCR_4(__kmp_global.g.g_done)) {
635 KA_TRACE(15, (
"__kmp_launch_monitor: update\n"));
637 status = gettimeofday(&tval, NULL);
638 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
639 TIMEVAL_TO_TIMESPEC(&tval, &now);
641 now.tv_sec += interval.tv_sec;
642 now.tv_nsec += interval.tv_nsec;
644 if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
646 now.tv_nsec -= KMP_NSEC_PER_SEC;
649 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
650 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
652 if (!TCR_4(__kmp_global.g.g_done)) {
653 status = pthread_cond_timedwait(&__kmp_wait_cv.c_cond,
654 &__kmp_wait_mx.m_mutex, &now);
656 if (status != ETIMEDOUT && status != EINTR) {
657 KMP_SYSFAIL(
"pthread_cond_timedwait", status);
661 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
662 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
664 TCW_4(__kmp_global.g.g_time.dt.t_value,
665 TCR_4(__kmp_global.g.g_time.dt.t_value) + 1);
670 KA_TRACE(10, (
"__kmp_launch_monitor: #3 cleanup\n"));
672 #ifdef KMP_BLOCK_SIGNALS 673 status = sigfillset(&new_set);
674 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status);
675 status = pthread_sigmask(SIG_UNBLOCK, &new_set, NULL);
676 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
679 KA_TRACE(10, (
"__kmp_launch_monitor: #4 finished\n"));
681 if (__kmp_global.g.g_abort != 0) {
687 KA_TRACE(10, (
"__kmp_launch_monitor: #5 terminate sig=%d\n",
688 __kmp_global.g.g_abort));
693 for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
694 __kmp_terminate_thread(gtid);
698 KA_TRACE(10, (
"__kmp_launch_monitor: #6 raise sig=%d\n",
699 __kmp_global.g.g_abort));
701 if (__kmp_global.g.g_abort > 0)
702 raise(__kmp_global.g.g_abort);
705 KA_TRACE(10, (
"__kmp_launch_monitor: #7 exit\n"));
709 #endif // KMP_USE_MONITOR 711 void __kmp_create_worker(
int gtid, kmp_info_t *th,
size_t stack_size) {
713 pthread_attr_t thread_attr;
716 th->th.th_info.ds.ds_gtid = gtid;
718 #if KMP_STATS_ENABLED 720 __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
726 if (!KMP_UBER_GTID(gtid)) {
727 th->th.th_stats = __kmp_stats_list->push_back(gtid);
731 th->th.th_stats = __kmp_stats_thread_ptr;
733 __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
735 #endif // KMP_STATS_ENABLED 737 if (KMP_UBER_GTID(gtid)) {
738 KA_TRACE(10, (
"__kmp_create_worker: uber thread (%d)\n", gtid));
739 th->th.th_info.ds.ds_thread = pthread_self();
740 __kmp_set_stack_info(gtid, th);
741 __kmp_check_stack_overlap(th);
745 KA_TRACE(10, (
"__kmp_create_worker: try to create thread (%d)\n", gtid));
749 #ifdef KMP_THREAD_ATTR 750 status = pthread_attr_init(&thread_attr);
752 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
754 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
756 __kmp_fatal(KMP_MSG(CantSetWorkerState), KMP_ERR(status), __kmp_msg_null);
766 stack_size += gtid * __kmp_stkoffset * 2;
768 #if defined(__ANDROID__) && __ANDROID_API__ < 19 772 stack_size = (stack_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
775 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, " 776 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
777 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
779 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 780 status = pthread_attr_setstacksize(&thread_attr, stack_size);
781 #ifdef KMP_BACKUP_STKSIZE 783 if (!__kmp_env_stksize) {
784 stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
785 __kmp_stksize = KMP_BACKUP_STKSIZE;
786 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, " 787 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu " 789 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
790 status = pthread_attr_setstacksize(&thread_attr, stack_size);
795 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
796 KMP_HNT(ChangeWorkerStackSize), __kmp_msg_null);
803 pthread_create(&handle, &thread_attr, __kmp_launch_worker, (
void *)th);
804 if (status != 0 || !handle) {
805 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 806 if (status == EINVAL) {
807 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
808 KMP_HNT(IncreaseWorkerStackSize), __kmp_msg_null);
810 if (status == ENOMEM) {
811 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
812 KMP_HNT(DecreaseWorkerStackSize), __kmp_msg_null);
815 if (status == EAGAIN) {
816 __kmp_fatal(KMP_MSG(NoResourcesForWorkerThread), KMP_ERR(status),
817 KMP_HNT(Decrease_NUM_THREADS), __kmp_msg_null);
819 KMP_SYSFAIL(
"pthread_create", status);
822 th->th.th_info.ds.ds_thread = handle;
824 #ifdef KMP_THREAD_ATTR 825 status = pthread_attr_destroy(&thread_attr);
827 kmp_msg_t err_code = KMP_ERR(status);
828 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
830 if (__kmp_generate_warnings == kmp_warnings_off) {
831 __kmp_str_free(&err_code.str);
838 KA_TRACE(10, (
"__kmp_create_worker: done creating thread (%d)\n", gtid));
843 void __kmp_create_monitor(kmp_info_t *th) {
845 pthread_attr_t thread_attr;
848 int auto_adj_size = FALSE;
850 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
852 KA_TRACE(10, (
"__kmp_create_monitor: skipping monitor thread because of " 854 th->th.th_info.ds.ds_tid = 0;
855 th->th.th_info.ds.ds_gtid = 0;
858 KA_TRACE(10, (
"__kmp_create_monitor: try to create monitor\n"));
862 th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
863 th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
864 #if KMP_REAL_TIME_FIX 865 TCW_4(__kmp_global.g.g_time.dt.t_value,
868 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
869 #endif // KMP_REAL_TIME_FIX 871 #ifdef KMP_THREAD_ATTR 872 if (__kmp_monitor_stksize == 0) {
873 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
874 auto_adj_size = TRUE;
876 status = pthread_attr_init(&thread_attr);
878 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
880 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
882 __kmp_fatal(KMP_MSG(CantSetMonitorState), KMP_ERR(status), __kmp_msg_null);
885 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 886 status = pthread_attr_getstacksize(&thread_attr, &size);
887 KMP_CHECK_SYSFAIL(
"pthread_attr_getstacksize", status);
889 size = __kmp_sys_min_stksize;
893 if (__kmp_monitor_stksize == 0) {
894 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
896 if (__kmp_monitor_stksize < __kmp_sys_min_stksize) {
897 __kmp_monitor_stksize = __kmp_sys_min_stksize;
900 KA_TRACE(10, (
"__kmp_create_monitor: default stacksize = %lu bytes," 901 "requested stacksize = %lu bytes\n",
902 size, __kmp_monitor_stksize));
907 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 908 KA_TRACE(10, (
"__kmp_create_monitor: setting stacksize = %lu bytes,",
909 __kmp_monitor_stksize));
910 status = pthread_attr_setstacksize(&thread_attr, __kmp_monitor_stksize);
913 __kmp_monitor_stksize *= 2;
916 kmp_msg_t err_code = KMP_ERR(status);
917 __kmp_msg(kmp_ms_warning,
918 KMP_MSG(CantSetMonitorStackSize, (
long int)__kmp_monitor_stksize),
919 err_code, KMP_HNT(ChangeMonitorStackSize), __kmp_msg_null);
920 if (__kmp_generate_warnings == kmp_warnings_off) {
921 __kmp_str_free(&err_code.str);
927 pthread_create(&handle, &thread_attr, __kmp_launch_monitor, (
void *)th);
930 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 931 if (status == EINVAL) {
932 if (auto_adj_size && (__kmp_monitor_stksize < (
size_t)0x40000000)) {
933 __kmp_monitor_stksize *= 2;
936 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
937 KMP_ERR(status), KMP_HNT(IncreaseMonitorStackSize),
940 if (status == ENOMEM) {
941 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
942 KMP_ERR(status), KMP_HNT(DecreaseMonitorStackSize),
946 if (status == EAGAIN) {
947 __kmp_fatal(KMP_MSG(NoResourcesForMonitorThread), KMP_ERR(status),
948 KMP_HNT(DecreaseNumberOfThreadsInUse), __kmp_msg_null);
950 KMP_SYSFAIL(
"pthread_create", status);
953 th->th.th_info.ds.ds_thread = handle;
955 #if KMP_REAL_TIME_FIX 957 KMP_DEBUG_ASSERT(
sizeof(kmp_uint32) ==
958 sizeof(__kmp_global.g.g_time.dt.t_value));
959 __kmp_wait_4((kmp_uint32
volatile *)&__kmp_global.g.g_time.dt.t_value, -1,
961 #endif // KMP_REAL_TIME_FIX 963 #ifdef KMP_THREAD_ATTR 964 status = pthread_attr_destroy(&thread_attr);
966 kmp_msg_t err_code = KMP_ERR(status);
967 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
969 if (__kmp_generate_warnings == kmp_warnings_off) {
970 __kmp_str_free(&err_code.str);
977 KA_TRACE(10, (
"__kmp_create_monitor: monitor created %#.8lx\n",
978 th->th.th_info.ds.ds_thread));
981 #endif // KMP_USE_MONITOR 983 void __kmp_exit_thread(
int exit_status) {
984 pthread_exit((
void *)(intptr_t)exit_status);
988 void __kmp_resume_monitor();
990 void __kmp_reap_monitor(kmp_info_t *th) {
994 KA_TRACE(10, (
"__kmp_reap_monitor: try to reap monitor thread with handle" 996 th->th.th_info.ds.ds_thread));
1001 KMP_DEBUG_ASSERT(th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid);
1002 if (th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR) {
1003 KA_TRACE(10, (
"__kmp_reap_monitor: monitor did not start, returning\n"));
1013 status = pthread_kill(th->th.th_info.ds.ds_thread, 0);
1014 if (status != ESRCH) {
1015 __kmp_resume_monitor();
1017 KA_TRACE(10, (
"__kmp_reap_monitor: try to join with monitor\n"));
1018 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1019 if (exit_val != th) {
1020 __kmp_fatal(KMP_MSG(ReapMonitorError), KMP_ERR(status), __kmp_msg_null);
1023 th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1024 th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1026 KA_TRACE(10, (
"__kmp_reap_monitor: done reaping monitor thread with handle" 1028 th->th.th_info.ds.ds_thread));
1032 #endif // KMP_USE_MONITOR 1034 void __kmp_reap_worker(kmp_info_t *th) {
1041 10, (
"__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid));
1043 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1047 __kmp_fatal(KMP_MSG(ReapWorkerError), KMP_ERR(status), __kmp_msg_null);
1049 if (exit_val != th) {
1050 KA_TRACE(10, (
"__kmp_reap_worker: worker T#%d did not reap properly, " 1052 th->th.th_info.ds.ds_gtid, exit_val));
1056 KA_TRACE(10, (
"__kmp_reap_worker: done reaping T#%d\n",
1057 th->th.th_info.ds.ds_gtid));
1062 #if KMP_HANDLE_SIGNALS 1064 static void __kmp_null_handler(
int signo) {
1068 static void __kmp_team_handler(
int signo) {
1069 if (__kmp_global.g.g_abort == 0) {
1072 __kmp_debug_printf(
"__kmp_team_handler: caught signal = %d\n", signo);
1087 if (__kmp_debug_buf) {
1088 __kmp_dump_debug_buffer();
1090 __kmp_unregister_library();
1092 TCW_4(__kmp_global.g.g_abort, signo);
1094 TCW_4(__kmp_global.g.g_done, TRUE);
1099 __kmp_debug_printf(
"__kmp_team_handler: unknown signal type");
1106 static void __kmp_sigaction(
int signum,
const struct sigaction *act,
1107 struct sigaction *oldact) {
1108 int rc = sigaction(signum, act, oldact);
1109 KMP_CHECK_SYSFAIL_ERRNO(
"sigaction", rc);
1112 static void __kmp_install_one_handler(
int sig, sig_func_t handler_func,
1113 int parallel_init) {
1116 (
"__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init));
1117 if (parallel_init) {
1118 struct sigaction new_action;
1119 struct sigaction old_action;
1120 new_action.sa_handler = handler_func;
1121 new_action.sa_flags = 0;
1122 sigfillset(&new_action.sa_mask);
1123 __kmp_sigaction(sig, &new_action, &old_action);
1124 if (old_action.sa_handler == __kmp_sighldrs[sig].sa_handler) {
1125 sigaddset(&__kmp_sigset, sig);
1128 __kmp_sigaction(sig, &old_action, NULL);
1132 __kmp_sigaction(sig, NULL, &__kmp_sighldrs[sig]);
1137 static void __kmp_remove_one_handler(
int sig) {
1138 KB_TRACE(60, (
"__kmp_remove_one_handler( %d )\n", sig));
1139 if (sigismember(&__kmp_sigset, sig)) {
1140 struct sigaction old;
1142 __kmp_sigaction(sig, &__kmp_sighldrs[sig], &old);
1143 if ((old.sa_handler != __kmp_team_handler) &&
1144 (old.sa_handler != __kmp_null_handler)) {
1146 KB_TRACE(10, (
"__kmp_remove_one_handler: oops, not our handler, " 1147 "restoring: sig=%d\n",
1149 __kmp_sigaction(sig, &old, NULL);
1151 sigdelset(&__kmp_sigset, sig);
1156 void __kmp_install_signals(
int parallel_init) {
1157 KB_TRACE(10, (
"__kmp_install_signals( %d )\n", parallel_init));
1158 if (__kmp_handle_signals || !parallel_init) {
1161 sigemptyset(&__kmp_sigset);
1162 __kmp_install_one_handler(SIGHUP, __kmp_team_handler, parallel_init);
1163 __kmp_install_one_handler(SIGINT, __kmp_team_handler, parallel_init);
1164 __kmp_install_one_handler(SIGQUIT, __kmp_team_handler, parallel_init);
1165 __kmp_install_one_handler(SIGILL, __kmp_team_handler, parallel_init);
1166 __kmp_install_one_handler(SIGABRT, __kmp_team_handler, parallel_init);
1167 __kmp_install_one_handler(SIGFPE, __kmp_team_handler, parallel_init);
1168 __kmp_install_one_handler(SIGBUS, __kmp_team_handler, parallel_init);
1169 __kmp_install_one_handler(SIGSEGV, __kmp_team_handler, parallel_init);
1171 __kmp_install_one_handler(SIGSYS, __kmp_team_handler, parallel_init);
1173 __kmp_install_one_handler(SIGTERM, __kmp_team_handler, parallel_init);
1175 __kmp_install_one_handler(SIGPIPE, __kmp_team_handler, parallel_init);
1180 void __kmp_remove_signals(
void) {
1182 KB_TRACE(10, (
"__kmp_remove_signals()\n"));
1183 for (sig = 1; sig < NSIG; ++sig) {
1184 __kmp_remove_one_handler(sig);
1188 #endif // KMP_HANDLE_SIGNALS 1190 void __kmp_enable(
int new_state) {
1191 #ifdef KMP_CANCEL_THREADS 1192 int status, old_state;
1193 status = pthread_setcancelstate(new_state, &old_state);
1194 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
1195 KMP_DEBUG_ASSERT(old_state == PTHREAD_CANCEL_DISABLE);
1199 void __kmp_disable(
int *old_state) {
1200 #ifdef KMP_CANCEL_THREADS 1202 status = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, old_state);
1203 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
1207 static void __kmp_atfork_prepare(
void) {
1208 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
1209 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1212 static void __kmp_atfork_parent(
void) {
1213 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1214 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1220 static void __kmp_atfork_child(
void) {
1221 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1222 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1229 #if KMP_AFFINITY_SUPPORTED 1230 #if KMP_OS_LINUX || KMP_OS_FREEBSD 1233 kmp_set_thread_affinity_mask_initial();
1238 __kmp_affinity_type = affinity_none;
1239 if (__kmp_nested_proc_bind.bind_types != NULL) {
1240 __kmp_nested_proc_bind.bind_types[0] = proc_bind_false;
1242 __kmp_affinity_masks = NULL;
1243 __kmp_affinity_num_masks = 0;
1244 #endif // KMP_AFFINITY_SUPPORTED 1247 __kmp_init_monitor = 0;
1249 __kmp_init_parallel = FALSE;
1250 __kmp_init_middle = FALSE;
1251 __kmp_init_serial = FALSE;
1252 TCW_4(__kmp_init_gtid, FALSE);
1253 __kmp_init_common = FALSE;
1255 TCW_4(__kmp_init_user_locks, FALSE);
1256 #if !KMP_USE_DYNAMIC_LOCK 1257 __kmp_user_lock_table.used = 1;
1258 __kmp_user_lock_table.allocated = 0;
1259 __kmp_user_lock_table.table = NULL;
1260 __kmp_lock_blocks = NULL;
1264 TCW_4(__kmp_nth, 0);
1266 __kmp_thread_pool = NULL;
1267 __kmp_thread_pool_insert_pt = NULL;
1268 __kmp_team_pool = NULL;
1272 KA_TRACE(10, (
"__kmp_atfork_child: checking cache address list %p\n",
1273 __kmp_threadpriv_cache_list));
1275 while (__kmp_threadpriv_cache_list != NULL) {
1277 if (*__kmp_threadpriv_cache_list->addr != NULL) {
1278 KC_TRACE(50, (
"__kmp_atfork_child: zeroing cache at address %p\n",
1279 &(*__kmp_threadpriv_cache_list->addr)));
1281 *__kmp_threadpriv_cache_list->addr = NULL;
1283 __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list->next;
1286 __kmp_init_runtime = FALSE;
1289 __kmp_init_bootstrap_lock(&__kmp_initz_lock);
1290 __kmp_init_bootstrap_lock(&__kmp_stdio_lock);
1291 __kmp_init_bootstrap_lock(&__kmp_console_lock);
1292 __kmp_init_bootstrap_lock(&__kmp_task_team_lock);
1298 __kmp_serial_initialize();
1311 void __kmp_register_atfork(
void) {
1312 if (__kmp_need_register_atfork) {
1313 int status = pthread_atfork(__kmp_atfork_prepare, __kmp_atfork_parent,
1314 __kmp_atfork_child);
1315 KMP_CHECK_SYSFAIL(
"pthread_atfork", status);
1316 __kmp_need_register_atfork = FALSE;
1320 void __kmp_suspend_initialize(
void) {
1322 status = pthread_mutexattr_init(&__kmp_suspend_mutex_attr);
1323 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status);
1324 status = pthread_condattr_init(&__kmp_suspend_cond_attr);
1325 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status);
1328 void __kmp_suspend_initialize_thread(kmp_info_t *th) {
1329 int old_value = KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count);
1330 int new_value = __kmp_fork_count + 1;
1332 if (old_value == new_value)
1335 if (old_value == -1 || !__kmp_atomic_compare_store(
1336 &th->th.th_suspend_init_count, old_value, -1)) {
1337 while (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) != new_value) {
1343 status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
1344 &__kmp_suspend_cond_attr);
1345 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
1346 status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
1347 &__kmp_suspend_mutex_attr);
1348 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
1349 KMP_ATOMIC_ST_REL(&th->th.th_suspend_init_count, new_value);
1353 void __kmp_suspend_uninitialize_thread(kmp_info_t *th) {
1354 if (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) > __kmp_fork_count) {
1359 status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond);
1360 if (status != 0 && status != EBUSY) {
1361 KMP_SYSFAIL(
"pthread_cond_destroy", status);
1363 status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex);
1364 if (status != 0 && status != EBUSY) {
1365 KMP_SYSFAIL(
"pthread_mutex_destroy", status);
1367 --th->th.th_suspend_init_count;
1368 KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count) ==
1374 int __kmp_try_suspend_mx(kmp_info_t *th) {
1375 return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0);
1378 void __kmp_lock_suspend_mx(kmp_info_t *th) {
1379 int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1380 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1383 void __kmp_unlock_suspend_mx(kmp_info_t *th) {
1384 int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1385 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1391 static inline void __kmp_suspend_template(
int th_gtid, C *flag) {
1392 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_suspend);
1393 kmp_info_t *th = __kmp_threads[th_gtid];
1395 typename C::flag_t old_spin;
1397 KF_TRACE(30, (
"__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid,
1400 __kmp_suspend_initialize_thread(th);
1402 __kmp_lock_suspend_mx(th);
1404 KF_TRACE(10, (
"__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1405 th_gtid, flag->get()));
1409 old_spin = flag->set_sleeping();
1410 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME &&
1411 __kmp_pause_status != kmp_soft_paused) {
1412 flag->unset_sleeping();
1413 __kmp_unlock_suspend_mx(th);
1416 KF_TRACE(5, (
"__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x," 1418 th_gtid, flag->get(), flag->load(), old_spin));
1420 if (flag->done_check_val(old_spin)) {
1421 old_spin = flag->unset_sleeping();
1422 KF_TRACE(5, (
"__kmp_suspend_template: T#%d false alarm, reset sleep bit " 1424 th_gtid, flag->get()));
1429 int deactivated = FALSE;
1430 TCW_PTR(th->th.th_sleep_loc, (
void *)flag);
1432 while (flag->is_sleeping()) {
1433 #ifdef DEBUG_SUSPEND 1435 __kmp_suspend_count++;
1436 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1437 __kmp_printf(
"__kmp_suspend_template: suspending T#%d: %s\n", th_gtid,
1443 th->th.th_active = FALSE;
1444 if (th->th.th_active_in_pool) {
1445 th->th.th_active_in_pool = FALSE;
1446 KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
1447 KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) >= 0);
1452 #if USE_SUSPEND_TIMEOUT 1453 struct timespec now;
1454 struct timeval tval;
1457 status = gettimeofday(&tval, NULL);
1458 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1459 TIMEVAL_TO_TIMESPEC(&tval, &now);
1461 msecs = (4 * __kmp_dflt_blocktime) + 200;
1462 now.tv_sec += msecs / 1000;
1463 now.tv_nsec += (msecs % 1000) * 1000;
1465 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform " 1466 "pthread_cond_timedwait\n",
1468 status = pthread_cond_timedwait(&th->th.th_suspend_cv.c_cond,
1469 &th->th.th_suspend_mx.m_mutex, &now);
1471 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform" 1472 " pthread_cond_wait\n",
1474 status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond,
1475 &th->th.th_suspend_mx.m_mutex);
1476 #endif // USE_SUSPEND_TIMEOUT 1478 if ((status != 0) && (status != EINTR) && (status != ETIMEDOUT)) {
1479 KMP_SYSFAIL(
"pthread_cond_wait", status);
1482 if (status == ETIMEDOUT) {
1483 if (flag->is_sleeping()) {
1485 (
"__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid));
1487 KF_TRACE(2, (
"__kmp_suspend_template: T#%d timeout wakeup, sleep bit " 1491 }
else if (flag->is_sleeping()) {
1493 (
"__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid));
1500 th->th.th_active = TRUE;
1501 if (TCR_4(th->th.th_in_pool)) {
1502 KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth);
1503 th->th.th_active_in_pool = TRUE;
1507 #ifdef DEBUG_SUSPEND 1510 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1511 __kmp_printf(
"__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid,
1516 __kmp_unlock_suspend_mx(th);
1517 KF_TRACE(30, (
"__kmp_suspend_template: T#%d exit\n", th_gtid));
1520 template <
bool C,
bool S>
1521 void __kmp_suspend_32(
int th_gtid, kmp_flag_32<C, S> *flag) {
1522 __kmp_suspend_template(th_gtid, flag);
1524 template <
bool C,
bool S>
1525 void __kmp_suspend_64(
int th_gtid, kmp_flag_64<C, S> *flag) {
1526 __kmp_suspend_template(th_gtid, flag);
1528 void __kmp_suspend_oncore(
int th_gtid, kmp_flag_oncore *flag) {
1529 __kmp_suspend_template(th_gtid, flag);
1532 template void __kmp_suspend_32<false, false>(int, kmp_flag_32<false, false> *);
1533 template void __kmp_suspend_64<false, true>(int, kmp_flag_64<false, true> *);
1534 template void __kmp_suspend_64<true, false>(int, kmp_flag_64<true, false> *);
1540 static inline void __kmp_resume_template(
int target_gtid, C *flag) {
1541 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1542 kmp_info_t *th = __kmp_threads[target_gtid];
1546 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1549 KF_TRACE(30, (
"__kmp_resume_template: T#%d wants to wakeup T#%d enter\n",
1550 gtid, target_gtid));
1551 KMP_DEBUG_ASSERT(gtid != target_gtid);
1553 __kmp_suspend_initialize_thread(th);
1555 __kmp_lock_suspend_mx(th);
1558 flag = (C *)CCAST(
void *, th->th.th_sleep_loc);
1563 if (!flag || flag->get_type() != flag->get_ptr_type()) {
1565 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already " 1566 "awake: flag(%p)\n",
1567 gtid, target_gtid, NULL));
1568 __kmp_unlock_suspend_mx(th);
1572 typename C::flag_t old_spin = flag->unset_sleeping();
1573 if (!flag->is_sleeping_val(old_spin)) {
1574 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already " 1577 gtid, target_gtid, flag->get(), old_spin, flag->load()));
1578 __kmp_unlock_suspend_mx(th);
1581 KF_TRACE(5, (
"__kmp_resume_template: T#%d about to wakeup T#%d, reset " 1582 "sleep bit for flag's loc(%p): " 1584 gtid, target_gtid, flag->get(), old_spin, flag->load()));
1586 TCW_PTR(th->th.th_sleep_loc, NULL);
1588 #ifdef DEBUG_SUSPEND 1591 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1592 __kmp_printf(
"__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid,
1593 target_gtid, buffer);
1596 status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond);
1597 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
1598 __kmp_unlock_suspend_mx(th);
1599 KF_TRACE(30, (
"__kmp_resume_template: T#%d exiting after signaling wake up" 1601 gtid, target_gtid));
1604 template <
bool C,
bool S>
1605 void __kmp_resume_32(
int target_gtid, kmp_flag_32<C, S> *flag) {
1606 __kmp_resume_template(target_gtid, flag);
1608 template <
bool C,
bool S>
1609 void __kmp_resume_64(
int target_gtid, kmp_flag_64<C, S> *flag) {
1610 __kmp_resume_template(target_gtid, flag);
1612 void __kmp_resume_oncore(
int target_gtid, kmp_flag_oncore *flag) {
1613 __kmp_resume_template(target_gtid, flag);
1616 template void __kmp_resume_32<false, true>(int, kmp_flag_32<false, true> *);
1617 template void __kmp_resume_64<false, true>(int, kmp_flag_64<false, true> *);
1620 void __kmp_resume_monitor() {
1621 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1624 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1625 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", gtid,
1627 KMP_DEBUG_ASSERT(gtid != KMP_GTID_MONITOR);
1629 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
1630 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1631 #ifdef DEBUG_SUSPEND 1634 __kmp_print_cond(buffer, &__kmp_wait_cv.c_cond);
1635 __kmp_printf(
"__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid,
1636 KMP_GTID_MONITOR, buffer);
1639 status = pthread_cond_signal(&__kmp_wait_cv.c_cond);
1640 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
1641 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
1642 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1643 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d exiting after signaling wake up" 1645 gtid, KMP_GTID_MONITOR));
1647 #endif // KMP_USE_MONITOR 1649 void __kmp_yield() { sched_yield(); }
1651 void __kmp_gtid_set_specific(
int gtid) {
1652 if (__kmp_init_gtid) {
1654 status = pthread_setspecific(__kmp_gtid_threadprivate_key,
1655 (
void *)(intptr_t)(gtid + 1));
1656 KMP_CHECK_SYSFAIL(
"pthread_setspecific", status);
1658 KA_TRACE(50, (
"__kmp_gtid_set_specific: runtime shutdown, returning\n"));
1662 int __kmp_gtid_get_specific() {
1664 if (!__kmp_init_gtid) {
1665 KA_TRACE(50, (
"__kmp_gtid_get_specific: runtime shutdown, returning " 1666 "KMP_GTID_SHUTDOWN\n"));
1667 return KMP_GTID_SHUTDOWN;
1669 gtid = (int)(
size_t)pthread_getspecific(__kmp_gtid_threadprivate_key);
1671 gtid = KMP_GTID_DNE;
1675 KA_TRACE(50, (
"__kmp_gtid_get_specific: key:%d gtid:%d\n",
1676 __kmp_gtid_threadprivate_key, gtid));
1680 double __kmp_read_cpu_time(
void) {
1686 return (
double)(buffer.tms_utime + buffer.tms_cutime) /
1687 (
double)CLOCKS_PER_SEC;
1690 int __kmp_read_system_info(
struct kmp_sys_info *info) {
1692 struct rusage r_usage;
1694 memset(info, 0,
sizeof(*info));
1696 status = getrusage(RUSAGE_SELF, &r_usage);
1697 KMP_CHECK_SYSFAIL_ERRNO(
"getrusage", status);
1700 info->maxrss = r_usage.ru_maxrss;
1702 info->minflt = r_usage.ru_minflt;
1704 info->majflt = r_usage.ru_majflt;
1706 info->nswap = r_usage.ru_nswap;
1708 info->inblock = r_usage.ru_inblock;
1710 info->oublock = r_usage.ru_oublock;
1712 info->nvcsw = r_usage.ru_nvcsw;
1714 info->nivcsw = r_usage.ru_nivcsw;
1716 return (status != 0);
1719 void __kmp_read_system_time(
double *delta) {
1721 struct timeval tval;
1722 struct timespec stop;
1725 status = gettimeofday(&tval, NULL);
1726 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1727 TIMEVAL_TO_TIMESPEC(&tval, &stop);
1728 t_ns = (double)(TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start));
1729 *delta = (t_ns * 1e-9);
1732 void __kmp_clear_system_time(
void) {
1733 struct timeval tval;
1735 status = gettimeofday(&tval, NULL);
1736 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1737 TIMEVAL_TO_TIMESPEC(&tval, &__kmp_sys_timer_data.start);
1740 static int __kmp_get_xproc(
void) {
1744 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \ 1745 KMP_OS_OPENBSD || KMP_OS_HURD 1747 __kmp_type_convert(sysconf(_SC_NPROCESSORS_ONLN), &(r));
1755 host_basic_info_data_t info;
1756 mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
1757 rc = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&info, &num);
1758 if (rc == 0 && num == HOST_BASIC_INFO_COUNT) {
1761 r = info.avail_cpus;
1763 KMP_WARNING(CantGetNumAvailCPU);
1764 KMP_INFORM(AssumedNumCPU);
1769 #error "Unknown or unsupported OS." 1773 return r > 0 ? r : 2;
1777 int __kmp_read_from_file(
char const *path,
char const *format, ...) {
1781 va_start(args, format);
1782 FILE *f = fopen(path,
"rb");
1785 result = vfscanf(f, format, args);
1791 void __kmp_runtime_initialize(
void) {
1793 pthread_mutexattr_t mutex_attr;
1794 pthread_condattr_t cond_attr;
1796 if (__kmp_init_runtime) {
1800 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64) 1801 if (!__kmp_cpuinfo.initialized) {
1802 __kmp_query_cpuid(&__kmp_cpuinfo);
1806 __kmp_xproc = __kmp_get_xproc();
1808 #if !KMP_32_BIT_ARCH 1812 status = getrlimit(RLIMIT_STACK, &rlim);
1814 __kmp_stksize = rlim.rlim_cur;
1815 __kmp_check_stksize(&__kmp_stksize);
1819 if (sysconf(_SC_THREADS)) {
1822 __kmp_type_convert(sysconf(_SC_THREAD_THREADS_MAX), &(__kmp_sys_max_nth));
1823 if (__kmp_sys_max_nth == -1) {
1825 __kmp_sys_max_nth = INT_MAX;
1826 }
else if (__kmp_sys_max_nth <= 1) {
1828 __kmp_sys_max_nth = KMP_MAX_NTH;
1832 __kmp_sys_min_stksize = sysconf(_SC_THREAD_STACK_MIN);
1833 if (__kmp_sys_min_stksize <= 1) {
1834 __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
1839 __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
1841 status = pthread_key_create(&__kmp_gtid_threadprivate_key,
1842 __kmp_internal_end_dest);
1843 KMP_CHECK_SYSFAIL(
"pthread_key_create", status);
1844 status = pthread_mutexattr_init(&mutex_attr);
1845 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status);
1846 status = pthread_mutex_init(&__kmp_wait_mx.m_mutex, &mutex_attr);
1847 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
1848 status = pthread_mutexattr_destroy(&mutex_attr);
1849 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_destroy", status);
1850 status = pthread_condattr_init(&cond_attr);
1851 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status);
1852 status = pthread_cond_init(&__kmp_wait_cv.c_cond, &cond_attr);
1853 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
1854 status = pthread_condattr_destroy(&cond_attr);
1855 KMP_CHECK_SYSFAIL(
"pthread_condattr_destroy", status);
1857 __kmp_itt_initialize();
1860 __kmp_init_runtime = TRUE;
1863 void __kmp_runtime_destroy(
void) {
1866 if (!__kmp_init_runtime) {
1871 __kmp_itt_destroy();
1874 status = pthread_key_delete(__kmp_gtid_threadprivate_key);
1875 KMP_CHECK_SYSFAIL(
"pthread_key_delete", status);
1877 status = pthread_mutex_destroy(&__kmp_wait_mx.m_mutex);
1878 if (status != 0 && status != EBUSY) {
1879 KMP_SYSFAIL(
"pthread_mutex_destroy", status);
1881 status = pthread_cond_destroy(&__kmp_wait_cv.c_cond);
1882 if (status != 0 && status != EBUSY) {
1883 KMP_SYSFAIL(
"pthread_cond_destroy", status);
1885 #if KMP_AFFINITY_SUPPORTED 1886 __kmp_affinity_uninitialize();
1889 __kmp_init_runtime = FALSE;
1894 void __kmp_thread_sleep(
int millis) { sleep((millis + 500) / 1000); }
1897 void __kmp_elapsed(
double *t) {
1899 #ifdef FIX_SGI_CLOCK 1902 status = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
1903 KMP_CHECK_SYSFAIL_ERRNO(
"clock_gettime", status);
1905 (double)ts.tv_nsec * (1.0 / (
double)KMP_NSEC_PER_SEC) + (
double)ts.tv_sec;
1909 status = gettimeofday(&tv, NULL);
1910 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1912 (double)tv.tv_usec * (1.0 / (
double)KMP_USEC_PER_SEC) + (
double)tv.tv_sec;
1917 void __kmp_elapsed_tick(
double *t) { *t = 1 / (double)CLOCKS_PER_SEC; }
1920 kmp_uint64 __kmp_now_nsec() {
1922 gettimeofday(&t, NULL);
1923 kmp_uint64 nsec = (kmp_uint64)KMP_NSEC_PER_SEC * (kmp_uint64)t.tv_sec +
1924 (kmp_uint64)1000 * (kmp_uint64)t.tv_usec;
1928 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 1930 void __kmp_initialize_system_tick() {
1931 kmp_uint64 now, nsec2, diff;
1932 kmp_uint64 delay = 100000;
1933 kmp_uint64 nsec = __kmp_now_nsec();
1934 kmp_uint64 goal = __kmp_hardware_timestamp() + delay;
1935 while ((now = __kmp_hardware_timestamp()) < goal)
1937 nsec2 = __kmp_now_nsec();
1938 diff = nsec2 - nsec;
1940 kmp_uint64 tpms = ((kmp_uint64)1e6 * (delay + (now - goal)) / diff);
1942 __kmp_ticks_per_msec = tpms;
1950 int __kmp_is_address_mapped(
void *addr) {
1955 #if KMP_OS_LINUX || KMP_OS_HURD 1960 char *name = __kmp_str_format(
"/proc/%d/maps", getpid());
1963 file = fopen(name,
"r");
1964 KMP_ASSERT(file != NULL);
1968 void *beginning = NULL;
1969 void *ending = NULL;
1972 rc = fscanf(file,
"%p-%p %4s %*[^\n]\n", &beginning, &ending, perms);
1976 KMP_ASSERT(rc == 3 &&
1977 KMP_STRLEN(perms) == 4);
1980 if ((addr >= beginning) && (addr < ending)) {
1982 if (strcmp(perms,
"rw") == 0) {
1992 KMP_INTERNAL_FREE(name);
1993 #elif KMP_OS_FREEBSD 1996 int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
1997 rc = sysctl(mib, 4, NULL, &lstsz, NULL, 0);
2002 lstsz = lstsz * 4 / 3;
2003 buf =
reinterpret_cast<char *
>(kmpc_malloc(lstsz));
2004 rc = sysctl(mib, 4, buf, &lstsz, NULL, 0);
2011 char *up = buf + lstsz;
2014 struct kinfo_vmentry *cur =
reinterpret_cast<struct kinfo_vmentry *
>(lw);
2015 size_t cursz = cur->kve_structsize;
2018 void *start =
reinterpret_cast<void *
>(cur->kve_start);
2019 void *end =
reinterpret_cast<void *
>(cur->kve_end);
2021 if ((addr >= start) && (addr < end)) {
2022 if ((cur->kve_protection & KVME_PROT_READ) != 0 &&
2023 (cur->kve_protection & KVME_PROT_WRITE) != 0) {
2039 rc = vm_read_overwrite(
2041 (vm_address_t)(addr),
2043 (vm_address_t)(&buffer),
2056 mib[2] = VM_PROC_MAP;
2058 mib[4] =
sizeof(
struct kinfo_vmentry);
2061 rc = sysctl(mib, __arraycount(mib), NULL, &size, NULL, 0);
2065 size = size * 4 / 3;
2066 struct kinfo_vmentry *kiv = (
struct kinfo_vmentry *)KMP_INTERNAL_MALLOC(size);
2069 rc = sysctl(mib, __arraycount(mib), kiv, &size, NULL, 0);
2073 for (
size_t i = 0; i < size; i++) {
2074 if (kiv[i].kve_start >= (uint64_t)addr &&
2075 kiv[i].kve_end <= (uint64_t)addr) {
2080 KMP_INTERNAL_FREE(kiv);
2081 #elif KMP_OS_OPENBSD 2085 mib[1] = KERN_PROC_VMMAP;
2090 rc = sysctl(mib, 3, NULL, &size, NULL, 0);
2095 struct kinfo_vmentry kiv = {.kve_start = 0};
2097 while ((rc = sysctl(mib, 3, &kiv, &size, NULL, 0)) == 0) {
2099 if (kiv.kve_end == end)
2102 if (kiv.kve_start >= (uint64_t)addr && kiv.kve_end <= (uint64_t)addr) {
2108 #elif KMP_OS_DRAGONFLY 2115 #error "Unknown or unsupported OS" 2123 #ifdef USE_LOAD_BALANCE 2125 #if KMP_OS_DARWIN || KMP_OS_NETBSD 2132 int __kmp_get_load_balance(
int max) {
2136 int res = getloadavg(averages, 3);
2141 if (__kmp_load_balance_interval < 180 && (res >= 1)) {
2142 ret_avg = (int)averages[0];
2143 }
else if ((__kmp_load_balance_interval >= 180 &&
2144 __kmp_load_balance_interval < 600) &&
2146 ret_avg = (int)averages[1];
2147 }
else if ((__kmp_load_balance_interval >= 600) && (res == 3)) {
2148 ret_avg = (int)averages[2];
2162 int __kmp_get_load_balance(
int max) {
2163 static int permanent_error = 0;
2164 static int glb_running_threads = 0;
2166 static double glb_call_time = 0;
2168 int running_threads = 0;
2170 DIR *proc_dir = NULL;
2171 struct dirent *proc_entry = NULL;
2173 kmp_str_buf_t task_path;
2174 DIR *task_dir = NULL;
2175 struct dirent *task_entry = NULL;
2176 int task_path_fixed_len;
2178 kmp_str_buf_t stat_path;
2180 int stat_path_fixed_len;
2182 int total_processes = 0;
2183 int total_threads = 0;
2185 double call_time = 0.0;
2187 __kmp_str_buf_init(&task_path);
2188 __kmp_str_buf_init(&stat_path);
2190 __kmp_elapsed(&call_time);
2192 if (glb_call_time &&
2193 (call_time - glb_call_time < __kmp_load_balance_interval)) {
2194 running_threads = glb_running_threads;
2198 glb_call_time = call_time;
2201 if (permanent_error) {
2202 running_threads = -1;
2211 proc_dir = opendir(
"/proc");
2212 if (proc_dir == NULL) {
2215 running_threads = -1;
2216 permanent_error = 1;
2221 __kmp_str_buf_cat(&task_path,
"/proc/", 6);
2222 task_path_fixed_len = task_path.used;
2224 proc_entry = readdir(proc_dir);
2225 while (proc_entry != NULL) {
2228 if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) {
2236 KMP_DEBUG_ASSERT(total_processes != 1 ||
2237 strcmp(proc_entry->d_name,
"1") == 0);
2240 task_path.used = task_path_fixed_len;
2241 __kmp_str_buf_cat(&task_path, proc_entry->d_name,
2242 KMP_STRLEN(proc_entry->d_name));
2243 __kmp_str_buf_cat(&task_path,
"/task", 5);
2245 task_dir = opendir(task_path.str);
2246 if (task_dir == NULL) {
2255 if (strcmp(proc_entry->d_name,
"1") == 0) {
2256 running_threads = -1;
2257 permanent_error = 1;
2262 __kmp_str_buf_clear(&stat_path);
2263 __kmp_str_buf_cat(&stat_path, task_path.str, task_path.used);
2264 __kmp_str_buf_cat(&stat_path,
"/", 1);
2265 stat_path_fixed_len = stat_path.used;
2267 task_entry = readdir(task_dir);
2268 while (task_entry != NULL) {
2270 if (proc_entry->d_type == DT_DIR && isdigit(task_entry->d_name[0])) {
2278 stat_path_fixed_len;
2279 __kmp_str_buf_cat(&stat_path, task_entry->d_name,
2280 KMP_STRLEN(task_entry->d_name));
2281 __kmp_str_buf_cat(&stat_path,
"/stat", 5);
2285 stat_file = open(stat_path.str, O_RDONLY);
2286 if (stat_file == -1) {
2316 len = read(stat_file, buffer,
sizeof(buffer) - 1);
2323 char *close_parent = strstr(buffer,
") ");
2324 if (close_parent != NULL) {
2325 char state = *(close_parent + 2);
2328 if (running_threads >= max) {
2338 task_entry = readdir(task_dir);
2344 proc_entry = readdir(proc_dir);
2350 KMP_DEBUG_ASSERT(running_threads > 0);
2351 if (running_threads <= 0) {
2352 running_threads = 1;
2356 if (proc_dir != NULL) {
2359 __kmp_str_buf_free(&task_path);
2360 if (task_dir != NULL) {
2363 __kmp_str_buf_free(&stat_path);
2364 if (stat_file != -1) {
2368 glb_running_threads = running_threads;
2370 return running_threads;
2374 #endif // KMP_OS_DARWIN 2376 #endif // USE_LOAD_BALANCE 2378 #if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC || \ 2379 ((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) || \ 2380 KMP_ARCH_PPC64 || KMP_ARCH_RISCV64) 2384 int __kmp_invoke_microtask(microtask_t pkfn,
int gtid,
int tid,
int argc,
2388 void **exit_frame_ptr
2392 *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0);
2397 fprintf(stderr,
"Too many args to microtask: %d!\n", argc);
2401 (*pkfn)(>id, &tid);
2404 (*pkfn)(>id, &tid, p_argv[0]);
2407 (*pkfn)(>id, &tid, p_argv[0], p_argv[1]);
2410 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2]);
2413 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2416 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2419 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2423 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2424 p_argv[5], p_argv[6]);
2427 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2428 p_argv[5], p_argv[6], p_argv[7]);
2431 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2432 p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2435 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2436 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2439 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2440 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2443 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2444 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2448 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2449 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2450 p_argv[11], p_argv[12]);
2453 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2454 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2455 p_argv[11], p_argv[12], p_argv[13]);
2458 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2459 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2460 p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2473 pthread_cond_t hidden_helper_threads_initz_cond_var;
2474 pthread_mutex_t hidden_helper_threads_initz_lock;
2475 volatile int hidden_helper_initz_signaled = FALSE;
2478 pthread_cond_t hidden_helper_threads_deinitz_cond_var;
2479 pthread_mutex_t hidden_helper_threads_deinitz_lock;
2480 volatile int hidden_helper_deinitz_signaled = FALSE;
2483 pthread_cond_t hidden_helper_main_thread_cond_var;
2484 pthread_mutex_t hidden_helper_main_thread_lock;
2485 volatile int hidden_helper_main_thread_signaled = FALSE;
2490 sem_t hidden_helper_task_sem;
2493 void __kmp_hidden_helper_worker_thread_wait() {
2494 int status = sem_wait(&hidden_helper_task_sem);
2495 KMP_CHECK_SYSFAIL(
"sem_wait", status);
2498 void __kmp_do_initialize_hidden_helper_threads() {
2501 pthread_cond_init(&hidden_helper_threads_initz_cond_var,
nullptr);
2502 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
2504 status = pthread_cond_init(&hidden_helper_threads_deinitz_cond_var,
nullptr);
2505 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
2507 status = pthread_cond_init(&hidden_helper_main_thread_cond_var,
nullptr);
2508 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
2510 status = pthread_mutex_init(&hidden_helper_threads_initz_lock,
nullptr);
2511 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
2513 status = pthread_mutex_init(&hidden_helper_threads_deinitz_lock,
nullptr);
2514 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
2516 status = pthread_mutex_init(&hidden_helper_main_thread_lock,
nullptr);
2517 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
2520 status = sem_init(&hidden_helper_task_sem, 0, 0);
2521 KMP_CHECK_SYSFAIL(
"sem_init", status);
2525 status = pthread_create(
2527 [](
void *) ->
void * {
2528 __kmp_hidden_helper_threads_initz_routine();
2532 KMP_CHECK_SYSFAIL(
"pthread_create", status);
2535 void __kmp_hidden_helper_threads_initz_wait() {
2538 int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2539 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2541 if (!TCR_4(hidden_helper_initz_signaled)) {
2542 status = pthread_cond_wait(&hidden_helper_threads_initz_cond_var,
2543 &hidden_helper_threads_initz_lock);
2544 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2547 status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2548 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2551 void __kmp_hidden_helper_initz_release() {
2553 int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2554 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2556 status = pthread_cond_signal(&hidden_helper_threads_initz_cond_var);
2557 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2559 TCW_SYNC_4(hidden_helper_initz_signaled, TRUE);
2561 status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2562 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2565 void __kmp_hidden_helper_main_thread_wait() {
2568 int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2569 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2571 if (!TCR_4(hidden_helper_main_thread_signaled)) {
2572 status = pthread_cond_wait(&hidden_helper_main_thread_cond_var,
2573 &hidden_helper_main_thread_lock);
2574 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2577 status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2578 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2581 void __kmp_hidden_helper_main_thread_release() {
2584 int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2585 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2587 status = pthread_cond_signal(&hidden_helper_main_thread_cond_var);
2588 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
2591 TCW_SYNC_4(hidden_helper_main_thread_signaled, TRUE);
2593 status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2594 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2597 void __kmp_hidden_helper_worker_thread_signal() {
2598 int status = sem_post(&hidden_helper_task_sem);
2599 KMP_CHECK_SYSFAIL(
"sem_post", status);
2602 void __kmp_hidden_helper_threads_deinitz_wait() {
2605 int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2606 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2608 if (!TCR_4(hidden_helper_deinitz_signaled)) {
2609 status = pthread_cond_wait(&hidden_helper_threads_deinitz_cond_var,
2610 &hidden_helper_threads_deinitz_lock);
2611 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2614 status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2615 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2618 void __kmp_hidden_helper_threads_deinitz_release() {
2619 int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2620 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2622 status = pthread_cond_signal(&hidden_helper_threads_deinitz_cond_var);
2623 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2625 TCW_SYNC_4(hidden_helper_deinitz_signaled, TRUE);
2627 status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2628 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2630 #else // KMP_OS_LINUX 2631 void __kmp_hidden_helper_worker_thread_wait() {
2632 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2635 void __kmp_do_initialize_hidden_helper_threads() {
2636 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2639 void __kmp_hidden_helper_threads_initz_wait() {
2640 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2643 void __kmp_hidden_helper_initz_release() {
2644 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2647 void __kmp_hidden_helper_main_thread_wait() {
2648 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2651 void __kmp_hidden_helper_main_thread_release() {
2652 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2655 void __kmp_hidden_helper_worker_thread_signal() {
2656 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2659 void __kmp_hidden_helper_threads_deinitz_wait() {
2660 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2663 void __kmp_hidden_helper_threads_deinitz_release() {
2664 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2666 #endif // KMP_OS_LINUX #define KMP_INIT_PARTITIONED_TIMERS(name)
Initializes the partitioned timers to begin with name.