14#include "kmp_affinity.h"
21#include "kmp_wait_release.h"
22#include "kmp_wrapper_getpid.h"
24#if !KMP_OS_DRAGONFLY && !KMP_OS_FREEBSD && !KMP_OS_NETBSD && !KMP_OS_OPENBSD
31#include <sys/resource.h>
33#include <sys/syscall.h>
40#include <sys/sysinfo.h>
56#include <sys/sysctl.h>
57#elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD
59#include <sys/sysctl.h>
61#include <pthread_np.h>
65#elif KMP_OS_NETBSD || KMP_OS_OPENBSD
67#include <sys/sysctl.h>
75#include <sys/loadavg.h>
83 struct timespec start;
86#ifndef TIMEVAL_TO_TIMESPEC
88#define TIMEVAL_TO_TIMESPEC(tv, ts) \
90 (ts)->tv_sec = (tv)->tv_sec; \
91 (ts)->tv_nsec = (tv)->tv_usec * 1000; \
96#define TS2NS(timespec) \
97 (((timespec).tv_sec * (long int)1e9) + (timespec).tv_nsec)
99static struct kmp_sys_timer __kmp_sys_timer_data;
101#if KMP_HANDLE_SIGNALS
102typedef void (*sig_func_t)(int);
103STATIC_EFI2_WORKAROUND
struct sigaction __kmp_sighldrs[NSIG];
104static sigset_t __kmp_sigset;
107static int __kmp_init_runtime = FALSE;
109static int __kmp_fork_count = 0;
111static pthread_condattr_t __kmp_suspend_cond_attr;
112static pthread_mutexattr_t __kmp_suspend_mutex_attr;
114static kmp_cond_align_t __kmp_wait_cv;
115static kmp_mutex_align_t __kmp_wait_mx;
117kmp_uint64 __kmp_ticks_per_msec = 1000000;
118kmp_uint64 __kmp_ticks_per_usec = 1000;
121static void __kmp_print_cond(
char *buffer, kmp_cond_align_t *cond) {
122 KMP_SNPRINTF(buffer, 128,
"(cond (lock (%ld, %d)), (descr (%p)))",
123 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
124 cond->c_cond.__c_waiting);
128#if ((KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
130 KMP_AFFINITY_SUPPORTED)
134void __kmp_affinity_bind_thread(
int which) {
135 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
136 "Illegal set affinity operation when not capable");
138 kmp_affin_mask_t *mask;
139 KMP_CPU_ALLOC_ON_STACK(mask);
141 KMP_CPU_SET(which, mask);
142 __kmp_set_system_affinity(mask, TRUE);
143 KMP_CPU_FREE_FROM_STACK(mask);
147void __kmp_affinity_determine_capable(
const char *env_var) {
150 size_t mask_size = __kmp_xproc / CHAR_BIT;
152 if (__kmp_xproc % CHAR_BIT)
156 if (mask_size %
sizeof(__kmp_affin_mask_size))
157 mask_size +=
sizeof(__kmp_affin_mask_size) -
158 mask_size %
sizeof(__kmp_affin_mask_size);
159 KMP_AFFINITY_ENABLE(mask_size);
161 (
"__kmp_affinity_determine_capable: "
162 "AIX OS affinity interface bindprocessor functional (mask size = "
163 "%" KMP_SIZE_T_SPEC
").\n",
164 __kmp_affin_mask_size));
172void __kmp_affinity_determine_capable(
const char *env_var) {
176#define KMP_CPU_SET_SIZE_LIMIT (1024 * 1024)
177#define KMP_CPU_SET_TRY_SIZE CACHE_LINE
178#elif KMP_OS_FREEBSD || KMP_OS_DRAGONFLY
179#define KMP_CPU_SET_SIZE_LIMIT (sizeof(cpuset_t))
181#define KMP_CPU_SET_SIZE_LIMIT (256)
184 int verbose = __kmp_affinity.flags.verbose;
185 int warnings = __kmp_affinity.flags.warnings;
186 enum affinity_type type = __kmp_affinity.type;
191 buf = (
unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
195 gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_TRY_SIZE, buf);
196 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
197 "initial getaffinity call returned %ld errno = %d\n",
200 if (gCode < 0 && errno != EINVAL) {
203 (warnings && (type != affinity_none) && (type != affinity_default) &&
204 (type != affinity_disabled))) {
206 kmp_msg_t err_code = KMP_ERR(error);
207 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
208 err_code, __kmp_msg_null);
209 if (__kmp_generate_warnings == kmp_warnings_off) {
210 __kmp_str_free(&err_code.str);
213 KMP_AFFINITY_DISABLE();
214 KMP_INTERNAL_FREE(buf);
216 }
else if (gCode > 0) {
218 KMP_AFFINITY_ENABLE(gCode);
219 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
220 "affinity supported (mask size %d)\n",
221 (
int)__kmp_affin_mask_size));
222 KMP_INTERNAL_FREE(buf);
228 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
229 "searching for proper set size\n"));
231 for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
232 gCode = syscall(__NR_sched_getaffinity, 0, size, buf);
233 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
234 "getaffinity for mask size %ld returned %ld errno = %d\n",
235 size, gCode, errno));
238 if (errno == ENOSYS) {
240 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
241 "inconsistent OS call behavior: errno == ENOSYS for mask "
245 (warnings && (type != affinity_none) &&
246 (type != affinity_default) && (type != affinity_disabled))) {
248 kmp_msg_t err_code = KMP_ERR(error);
249 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
250 err_code, __kmp_msg_null);
251 if (__kmp_generate_warnings == kmp_warnings_off) {
252 __kmp_str_free(&err_code.str);
255 KMP_AFFINITY_DISABLE();
256 KMP_INTERNAL_FREE(buf);
262 KMP_AFFINITY_ENABLE(gCode);
263 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
264 "affinity supported (mask size %d)\n",
265 (
int)__kmp_affin_mask_size));
266 KMP_INTERNAL_FREE(buf);
269#elif KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY
272 buf = (
unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
273 gCode = pthread_getaffinity_np(pthread_self(), KMP_CPU_SET_SIZE_LIMIT,
274 reinterpret_cast<cpuset_t *
>(buf));
275 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
276 "initial getaffinity call returned %d errno = %d\n",
279 KMP_AFFINITY_ENABLE(KMP_CPU_SET_SIZE_LIMIT);
280 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
281 "affinity supported (mask size %d)\n",
282 (
int)__kmp_affin_mask_size));
283 KMP_INTERNAL_FREE(buf);
287 KMP_INTERNAL_FREE(buf);
290 KMP_AFFINITY_DISABLE();
291 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
292 "cannot determine mask size - affinity not supported\n"));
293 if (verbose || (warnings && (type != affinity_none) &&
294 (type != affinity_default) && (type != affinity_disabled))) {
295 KMP_WARNING(AffCantGetMaskSize, env_var);
304int __kmp_futex_determine_capable() {
306 long rc = syscall(__NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0);
307 int retval = (rc == 0) || (errno != ENOSYS);
310 (
"__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, errno));
311 KA_TRACE(10, (
"__kmp_futex_determine_capable: futex syscall%s supported\n",
312 retval ?
"" :
" not"));
319#if (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_WASM) && (!KMP_ASM_INTRINS)
323kmp_int8 __kmp_test_then_or8(
volatile kmp_int8 *p, kmp_int8 d) {
324 kmp_int8 old_value, new_value;
326 old_value = TCR_1(*p);
327 new_value = old_value | d;
329 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
331 old_value = TCR_1(*p);
332 new_value = old_value | d;
337kmp_int8 __kmp_test_then_and8(
volatile kmp_int8 *p, kmp_int8 d) {
338 kmp_int8 old_value, new_value;
340 old_value = TCR_1(*p);
341 new_value = old_value & d;
343 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
345 old_value = TCR_1(*p);
346 new_value = old_value & d;
351kmp_uint32 __kmp_test_then_or32(
volatile kmp_uint32 *p, kmp_uint32 d) {
352 kmp_uint32 old_value, new_value;
354 old_value = TCR_4(*p);
355 new_value = old_value | d;
357 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
359 old_value = TCR_4(*p);
360 new_value = old_value | d;
365kmp_uint32 __kmp_test_then_and32(
volatile kmp_uint32 *p, kmp_uint32 d) {
366 kmp_uint32 old_value, new_value;
368 old_value = TCR_4(*p);
369 new_value = old_value & d;
371 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
373 old_value = TCR_4(*p);
374 new_value = old_value & d;
379#if KMP_ARCH_X86 || KMP_ARCH_WASM
380kmp_int8 __kmp_test_then_add8(
volatile kmp_int8 *p, kmp_int8 d) {
381 kmp_int8 old_value, new_value;
383 old_value = TCR_1(*p);
384 new_value = old_value + d;
386 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
388 old_value = TCR_1(*p);
389 new_value = old_value + d;
394kmp_int64 __kmp_test_then_add64(
volatile kmp_int64 *p, kmp_int64 d) {
395 kmp_int64 old_value, new_value;
397 old_value = TCR_8(*p);
398 new_value = old_value + d;
400 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
402 old_value = TCR_8(*p);
403 new_value = old_value + d;
409kmp_uint64 __kmp_test_then_or64(
volatile kmp_uint64 *p, kmp_uint64 d) {
410 kmp_uint64 old_value, new_value;
412 old_value = TCR_8(*p);
413 new_value = old_value | d;
414 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
416 old_value = TCR_8(*p);
417 new_value = old_value | d;
422kmp_uint64 __kmp_test_then_and64(
volatile kmp_uint64 *p, kmp_uint64 d) {
423 kmp_uint64 old_value, new_value;
425 old_value = TCR_8(*p);
426 new_value = old_value & d;
427 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
429 old_value = TCR_8(*p);
430 new_value = old_value & d;
437void __kmp_terminate_thread(
int gtid) {
439 kmp_info_t *th = __kmp_threads[gtid];
444#ifdef KMP_CANCEL_THREADS
445 KA_TRACE(10, (
"__kmp_terminate_thread: kill (%d)\n", gtid));
446 status = pthread_cancel(th->th.th_info.ds.ds_thread);
447 if (status != 0 && status != ESRCH) {
448 __kmp_fatal(KMP_MSG(CantTerminateWorkerThread), KMP_ERR(status),
459static kmp_int32 __kmp_set_stack_info(
int gtid, kmp_info_t *th) {
461#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
462 KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_AIX
470 if (!KMP_UBER_GTID(gtid)) {
474 if ((status = thr_stksegment(&s)) < 0) {
475 KMP_CHECK_SYSFAIL(
"thr_stksegment", status);
480 KA_TRACE(60, (
"__kmp_set_stack_info: T#%d thr_stksegment returned size:"
481 " %lu, low addr: %p\n",
486 status = pthread_attr_init(&attr);
487 KMP_CHECK_SYSFAIL(
"pthread_attr_init", status);
488#if KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD
489 status = pthread_attr_get_np(pthread_self(), &attr);
490 KMP_CHECK_SYSFAIL(
"pthread_attr_get_np", status);
492 status = pthread_getattr_np(pthread_self(), &attr);
493 KMP_CHECK_SYSFAIL(
"pthread_getattr_np", status);
495 status = pthread_attr_getstack(&attr, &addr, &size);
496 KMP_CHECK_SYSFAIL(
"pthread_attr_getstack", status);
498 (
"__kmp_set_stack_info: T#%d pthread_attr_getstack returned size:"
499 " %lu, low addr: %p\n",
501 status = pthread_attr_destroy(&attr);
502 KMP_CHECK_SYSFAIL(
"pthread_attr_destroy", status);
506 if (size != 0 && addr != 0) {
508 TCW_PTR(th->th.th_info.ds.ds_stackbase, (((
char *)addr) + size));
509 TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
510 TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
516 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
517 TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data);
518 TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
522static void *__kmp_launch_worker(
void *thr) {
523 int status, old_type, old_state;
524#ifdef KMP_BLOCK_SIGNALS
525 sigset_t new_set, old_set;
528#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
529 KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_AIX
530 void *
volatile padding = 0;
534 gtid = ((kmp_info_t *)thr)->th.th_info.ds.ds_gtid;
535 __kmp_gtid_set_specific(gtid);
541 __kmp_stats_thread_ptr = ((kmp_info_t *)thr)->th.th_stats;
542 __kmp_stats_thread_ptr->startLife();
543 KMP_SET_THREAD_STATE(IDLE);
548 __kmp_itt_thread_name(gtid);
551#if KMP_AFFINITY_SUPPORTED
552 __kmp_affinity_bind_init_mask(gtid);
555#ifdef KMP_CANCEL_THREADS
556 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
557 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status);
559 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
560 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
563#if KMP_ARCH_X86 || KMP_ARCH_X86_64
565 __kmp_clear_x87_fpu_status_word();
566 __kmp_load_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
567 __kmp_load_mxcsr(&__kmp_init_mxcsr);
570#ifdef KMP_BLOCK_SIGNALS
571 status = sigfillset(&new_set);
572 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status);
573 status = pthread_sigmask(SIG_BLOCK, &new_set, &old_set);
574 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
577#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
578 KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_AIX
579 if (__kmp_stkoffset > 0 && gtid > 0) {
580 padding = KMP_ALLOCA(gtid * __kmp_stkoffset);
586 __kmp_set_stack_info(gtid, (kmp_info_t *)thr);
588 __kmp_check_stack_overlap((kmp_info_t *)thr);
590 exit_val = __kmp_launch_thread((kmp_info_t *)thr);
592#ifdef KMP_BLOCK_SIGNALS
593 status = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
594 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
603static void *__kmp_launch_monitor(
void *thr) {
604 int status, old_type, old_state;
605#ifdef KMP_BLOCK_SIGNALS
608 struct timespec interval;
612 KA_TRACE(10, (
"__kmp_launch_monitor: #1 launched\n"));
615 __kmp_gtid_set_specific(KMP_GTID_MONITOR);
617 __kmp_gtid = KMP_GTID_MONITOR;
624 __kmp_itt_thread_ignore();
627 __kmp_set_stack_info(((kmp_info_t *)thr)->th.th_info.ds.ds_gtid,
630 __kmp_check_stack_overlap((kmp_info_t *)thr);
632#ifdef KMP_CANCEL_THREADS
633 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
634 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status);
636 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
637 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
645 int sched = sched_getscheduler(0);
646 if (sched == SCHED_FIFO || sched == SCHED_RR) {
649 struct sched_param param;
650 int max_priority = sched_get_priority_max(sched);
652 KMP_WARNING(RealTimeSchedNotSupported);
653 sched_getparam(0, ¶m);
654 if (param.sched_priority < max_priority) {
655 param.sched_priority += 1;
656 rc = sched_setscheduler(0, sched, ¶m);
659 kmp_msg_t err_code = KMP_ERR(error);
660 __kmp_msg(kmp_ms_warning, KMP_MSG(CantChangeMonitorPriority),
661 err_code, KMP_MSG(MonitorWillStarve), __kmp_msg_null);
662 if (__kmp_generate_warnings == kmp_warnings_off) {
663 __kmp_str_free(&err_code.str);
670 __kmp_msg(kmp_ms_warning, KMP_MSG(RunningAtMaxPriority),
671 KMP_MSG(MonitorWillStarve), KMP_HNT(RunningAtMaxPriority),
676 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
682 if (__kmp_monitor_wakeups == 1) {
684 interval.tv_nsec = 0;
687 interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
690 KA_TRACE(10, (
"__kmp_launch_monitor: #2 monitor\n"));
692 while (!TCR_4(__kmp_global.g.g_done)) {
698 KA_TRACE(15, (
"__kmp_launch_monitor: update\n"));
700 status = gettimeofday(&tval, NULL);
701 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
702 TIMEVAL_TO_TIMESPEC(&tval, &now);
704 now.tv_sec += interval.tv_sec;
705 now.tv_nsec += interval.tv_nsec;
707 if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
709 now.tv_nsec -= KMP_NSEC_PER_SEC;
712 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
713 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
715 if (!TCR_4(__kmp_global.g.g_done)) {
716 status = pthread_cond_timedwait(&__kmp_wait_cv.c_cond,
717 &__kmp_wait_mx.m_mutex, &now);
719 if (status != ETIMEDOUT && status != EINTR) {
720 KMP_SYSFAIL(
"pthread_cond_timedwait", status);
724 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
725 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
727 TCW_4(__kmp_global.g.g_time.dt.t_value,
728 TCR_4(__kmp_global.g.g_time.dt.t_value) + 1);
733 KA_TRACE(10, (
"__kmp_launch_monitor: #3 cleanup\n"));
735#ifdef KMP_BLOCK_SIGNALS
736 status = sigfillset(&new_set);
737 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status);
738 status = pthread_sigmask(SIG_UNBLOCK, &new_set, NULL);
739 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
742 KA_TRACE(10, (
"__kmp_launch_monitor: #4 finished\n"));
744 if (__kmp_global.g.g_abort != 0) {
750 KA_TRACE(10, (
"__kmp_launch_monitor: #5 terminate sig=%d\n",
751 __kmp_global.g.g_abort));
756 for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
757 __kmp_terminate_thread(gtid);
761 KA_TRACE(10, (
"__kmp_launch_monitor: #6 raise sig=%d\n",
762 __kmp_global.g.g_abort));
764 if (__kmp_global.g.g_abort > 0)
765 raise(__kmp_global.g.g_abort);
768 KA_TRACE(10, (
"__kmp_launch_monitor: #7 exit\n"));
774void __kmp_create_worker(
int gtid, kmp_info_t *th,
size_t stack_size) {
776 pthread_attr_t thread_attr;
779 th->th.th_info.ds.ds_gtid = gtid;
783 __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
789 if (!KMP_UBER_GTID(gtid)) {
790 th->th.th_stats = __kmp_stats_list->push_back(gtid);
794 th->th.th_stats = __kmp_stats_thread_ptr;
796 __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
800 if (KMP_UBER_GTID(gtid)) {
801 KA_TRACE(10, (
"__kmp_create_worker: uber thread (%d)\n", gtid));
802 th->th.th_info.ds.ds_thread = pthread_self();
803 __kmp_set_stack_info(gtid, th);
804 __kmp_check_stack_overlap(th);
808 KA_TRACE(10, (
"__kmp_create_worker: try to create thread (%d)\n", gtid));
812#ifdef KMP_THREAD_ATTR
813 status = pthread_attr_init(&thread_attr);
815 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
817 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
819 __kmp_fatal(KMP_MSG(CantSetWorkerState), KMP_ERR(status), __kmp_msg_null);
829 stack_size += gtid * __kmp_stkoffset * 2;
831 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
832 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
833 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
835#ifdef _POSIX_THREAD_ATTR_STACKSIZE
836 status = pthread_attr_setstacksize(&thread_attr, stack_size);
837#ifdef KMP_BACKUP_STKSIZE
839 if (!__kmp_env_stksize) {
840 stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
841 __kmp_stksize = KMP_BACKUP_STKSIZE;
842 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
843 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
845 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
846 status = pthread_attr_setstacksize(&thread_attr, stack_size);
851 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
852 KMP_HNT(ChangeWorkerStackSize), __kmp_msg_null);
859 pthread_create(&handle, &thread_attr, __kmp_launch_worker, (
void *)th);
860 if (status != 0 || !handle) {
861#ifdef _POSIX_THREAD_ATTR_STACKSIZE
862 if (status == EINVAL) {
863 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
864 KMP_HNT(IncreaseWorkerStackSize), __kmp_msg_null);
866 if (status == ENOMEM) {
867 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
868 KMP_HNT(DecreaseWorkerStackSize), __kmp_msg_null);
871 if (status == EAGAIN) {
872 __kmp_fatal(KMP_MSG(NoResourcesForWorkerThread), KMP_ERR(status),
873 KMP_HNT(Decrease_NUM_THREADS), __kmp_msg_null);
875 KMP_SYSFAIL(
"pthread_create", status);
878 th->th.th_info.ds.ds_thread = handle;
880#ifdef KMP_THREAD_ATTR
881 status = pthread_attr_destroy(&thread_attr);
883 kmp_msg_t err_code = KMP_ERR(status);
884 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
886 if (__kmp_generate_warnings == kmp_warnings_off) {
887 __kmp_str_free(&err_code.str);
894 KA_TRACE(10, (
"__kmp_create_worker: done creating thread (%d)\n", gtid));
899void __kmp_create_monitor(kmp_info_t *th) {
901 pthread_attr_t thread_attr;
904 int auto_adj_size = FALSE;
906 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
908 KA_TRACE(10, (
"__kmp_create_monitor: skipping monitor thread because of "
910 th->th.th_info.ds.ds_tid = 0;
911 th->th.th_info.ds.ds_gtid = 0;
914 KA_TRACE(10, (
"__kmp_create_monitor: try to create monitor\n"));
918 th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
919 th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
921 TCW_4(__kmp_global.g.g_time.dt.t_value,
924 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
927#ifdef KMP_THREAD_ATTR
928 if (__kmp_monitor_stksize == 0) {
929 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
930 auto_adj_size = TRUE;
932 status = pthread_attr_init(&thread_attr);
934 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
936 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
938 __kmp_fatal(KMP_MSG(CantSetMonitorState), KMP_ERR(status), __kmp_msg_null);
941#ifdef _POSIX_THREAD_ATTR_STACKSIZE
942 status = pthread_attr_getstacksize(&thread_attr, &size);
943 KMP_CHECK_SYSFAIL(
"pthread_attr_getstacksize", status);
945 size = __kmp_sys_min_stksize;
949 if (__kmp_monitor_stksize == 0) {
950 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
952 if (__kmp_monitor_stksize < __kmp_sys_min_stksize) {
953 __kmp_monitor_stksize = __kmp_sys_min_stksize;
956 KA_TRACE(10, (
"__kmp_create_monitor: default stacksize = %lu bytes,"
957 "requested stacksize = %lu bytes\n",
958 size, __kmp_monitor_stksize));
963#ifdef _POSIX_THREAD_ATTR_STACKSIZE
964 KA_TRACE(10, (
"__kmp_create_monitor: setting stacksize = %lu bytes,",
965 __kmp_monitor_stksize));
966 status = pthread_attr_setstacksize(&thread_attr, __kmp_monitor_stksize);
969 __kmp_monitor_stksize *= 2;
972 kmp_msg_t err_code = KMP_ERR(status);
973 __kmp_msg(kmp_ms_warning,
974 KMP_MSG(CantSetMonitorStackSize, (
long int)__kmp_monitor_stksize),
975 err_code, KMP_HNT(ChangeMonitorStackSize), __kmp_msg_null);
976 if (__kmp_generate_warnings == kmp_warnings_off) {
977 __kmp_str_free(&err_code.str);
983 pthread_create(&handle, &thread_attr, __kmp_launch_monitor, (
void *)th);
986#ifdef _POSIX_THREAD_ATTR_STACKSIZE
987 if (status == EINVAL) {
988 if (auto_adj_size && (__kmp_monitor_stksize < (
size_t)0x40000000)) {
989 __kmp_monitor_stksize *= 2;
992 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
993 KMP_ERR(status), KMP_HNT(IncreaseMonitorStackSize),
996 if (status == ENOMEM) {
997 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
998 KMP_ERR(status), KMP_HNT(DecreaseMonitorStackSize),
1002 if (status == EAGAIN) {
1003 __kmp_fatal(KMP_MSG(NoResourcesForMonitorThread), KMP_ERR(status),
1004 KMP_HNT(DecreaseNumberOfThreadsInUse), __kmp_msg_null);
1006 KMP_SYSFAIL(
"pthread_create", status);
1009 th->th.th_info.ds.ds_thread = handle;
1011#if KMP_REAL_TIME_FIX
1013 KMP_DEBUG_ASSERT(
sizeof(kmp_uint32) ==
1014 sizeof(__kmp_global.g.g_time.dt.t_value));
1015 __kmp_wait_4((kmp_uint32
volatile *)&__kmp_global.g.g_time.dt.t_value, -1,
1016 &__kmp_neq_4, NULL);
1019#ifdef KMP_THREAD_ATTR
1020 status = pthread_attr_destroy(&thread_attr);
1022 kmp_msg_t err_code = KMP_ERR(status);
1023 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
1025 if (__kmp_generate_warnings == kmp_warnings_off) {
1026 __kmp_str_free(&err_code.str);
1033 KA_TRACE(10, (
"__kmp_create_monitor: monitor created %#.8lx\n",
1034 th->th.th_info.ds.ds_thread));
1039void __kmp_exit_thread(
int exit_status) {
1043 pthread_exit((
void *)(intptr_t)exit_status);
1048void __kmp_resume_monitor();
1050extern "C" void __kmp_reap_monitor(kmp_info_t *th) {
1054 KA_TRACE(10, (
"__kmp_reap_monitor: try to reap monitor thread with handle"
1056 th->th.th_info.ds.ds_thread));
1061 KMP_DEBUG_ASSERT(th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid);
1062 if (th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR) {
1063 KA_TRACE(10, (
"__kmp_reap_monitor: monitor did not start, returning\n"));
1073 status = pthread_kill(th->th.th_info.ds.ds_thread, 0);
1074 if (status != ESRCH) {
1075 __kmp_resume_monitor();
1077 KA_TRACE(10, (
"__kmp_reap_monitor: try to join with monitor\n"));
1078 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1079 if (exit_val != th) {
1080 __kmp_fatal(KMP_MSG(ReapMonitorError), KMP_ERR(status), __kmp_msg_null);
1083 th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1084 th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1086 KA_TRACE(10, (
"__kmp_reap_monitor: done reaping monitor thread with handle"
1088 th->th.th_info.ds.ds_thread));
1095extern "C" void __kmp_reap_monitor(kmp_info_t *th) { (void)th; }
1098void __kmp_reap_worker(kmp_info_t *th) {
1105 10, (
"__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid));
1107 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1111 __kmp_fatal(KMP_MSG(ReapWorkerError), KMP_ERR(status), __kmp_msg_null);
1113 if (exit_val != th) {
1114 KA_TRACE(10, (
"__kmp_reap_worker: worker T#%d did not reap properly, "
1116 th->th.th_info.ds.ds_gtid, exit_val));
1122 KA_TRACE(10, (
"__kmp_reap_worker: done reaping T#%d\n",
1123 th->th.th_info.ds.ds_gtid));
1128#if KMP_HANDLE_SIGNALS
1130static void __kmp_null_handler(
int signo) {
1134static void __kmp_team_handler(
int signo) {
1135 if (__kmp_global.g.g_abort == 0) {
1138 __kmp_debug_printf(
"__kmp_team_handler: caught signal = %d\n", signo);
1153 if (__kmp_debug_buf) {
1154 __kmp_dump_debug_buffer();
1156 __kmp_unregister_library();
1158 TCW_4(__kmp_global.g.g_abort, signo);
1160 TCW_4(__kmp_global.g.g_done, TRUE);
1165 __kmp_debug_printf(
"__kmp_team_handler: unknown signal type");
1172static void __kmp_sigaction(
int signum,
const struct sigaction *act,
1173 struct sigaction *oldact) {
1174 int rc = sigaction(signum, act, oldact);
1175 KMP_CHECK_SYSFAIL_ERRNO(
"sigaction", rc);
1178static void __kmp_install_one_handler(
int sig, sig_func_t handler_func,
1179 int parallel_init) {
1182 (
"__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init));
1183 if (parallel_init) {
1184 struct sigaction new_action;
1185 struct sigaction old_action;
1186 new_action.sa_handler = handler_func;
1187 new_action.sa_flags = 0;
1188 sigfillset(&new_action.sa_mask);
1189 __kmp_sigaction(sig, &new_action, &old_action);
1190 if (old_action.sa_handler == __kmp_sighldrs[sig].sa_handler) {
1191 sigaddset(&__kmp_sigset, sig);
1194 __kmp_sigaction(sig, &old_action, NULL);
1198 __kmp_sigaction(sig, NULL, &__kmp_sighldrs[sig]);
1203static void __kmp_remove_one_handler(
int sig) {
1204 KB_TRACE(60, (
"__kmp_remove_one_handler( %d )\n", sig));
1205 if (sigismember(&__kmp_sigset, sig)) {
1206 struct sigaction old;
1208 __kmp_sigaction(sig, &__kmp_sighldrs[sig], &old);
1209 if ((old.sa_handler != __kmp_team_handler) &&
1210 (old.sa_handler != __kmp_null_handler)) {
1212 KB_TRACE(10, (
"__kmp_remove_one_handler: oops, not our handler, "
1213 "restoring: sig=%d\n",
1215 __kmp_sigaction(sig, &old, NULL);
1217 sigdelset(&__kmp_sigset, sig);
1222void __kmp_install_signals(
int parallel_init) {
1223 KB_TRACE(10, (
"__kmp_install_signals( %d )\n", parallel_init));
1224 if (__kmp_handle_signals || !parallel_init) {
1227 sigemptyset(&__kmp_sigset);
1228 __kmp_install_one_handler(SIGHUP, __kmp_team_handler, parallel_init);
1229 __kmp_install_one_handler(SIGINT, __kmp_team_handler, parallel_init);
1230 __kmp_install_one_handler(SIGQUIT, __kmp_team_handler, parallel_init);
1231 __kmp_install_one_handler(SIGILL, __kmp_team_handler, parallel_init);
1232 __kmp_install_one_handler(SIGABRT, __kmp_team_handler, parallel_init);
1233 __kmp_install_one_handler(SIGFPE, __kmp_team_handler, parallel_init);
1234 __kmp_install_one_handler(SIGBUS, __kmp_team_handler, parallel_init);
1235 __kmp_install_one_handler(SIGSEGV, __kmp_team_handler, parallel_init);
1237 __kmp_install_one_handler(SIGSYS, __kmp_team_handler, parallel_init);
1239 __kmp_install_one_handler(SIGTERM, __kmp_team_handler, parallel_init);
1241 __kmp_install_one_handler(SIGPIPE, __kmp_team_handler, parallel_init);
1246void __kmp_remove_signals(
void) {
1248 KB_TRACE(10, (
"__kmp_remove_signals()\n"));
1249 for (sig = 1; sig < NSIG; ++sig) {
1250 __kmp_remove_one_handler(sig);
1256void __kmp_enable(
int new_state) {
1257#ifdef KMP_CANCEL_THREADS
1258 int status, old_state;
1259 status = pthread_setcancelstate(new_state, &old_state);
1260 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
1261 KMP_DEBUG_ASSERT(old_state == PTHREAD_CANCEL_DISABLE);
1265void __kmp_disable(
int *old_state) {
1266#ifdef KMP_CANCEL_THREADS
1268 status = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, old_state);
1269 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
1273static void __kmp_atfork_prepare(
void) {
1274 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
1275 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1278static void __kmp_atfork_parent(
void) {
1279 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1280 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1286static void __kmp_atfork_child(
void) {
1287 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1288 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1295#if KMP_AFFINITY_SUPPORTED
1296#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
1300 kmp_set_thread_affinity_mask_initial();
1305 if (__kmp_nested_proc_bind.bind_types != NULL) {
1306 __kmp_nested_proc_bind.bind_types[0] = proc_bind_false;
1308 for (kmp_affinity_t *affinity : __kmp_affinities)
1309 *affinity = KMP_AFFINITY_INIT(affinity->env_var);
1310 __kmp_affin_fullMask =
nullptr;
1311 __kmp_affin_origMask =
nullptr;
1312 __kmp_topology =
nullptr;
1316 __kmp_init_monitor = 0;
1318 __kmp_init_parallel = FALSE;
1319 __kmp_init_middle = FALSE;
1320 __kmp_init_serial = FALSE;
1321 TCW_4(__kmp_init_gtid, FALSE);
1322 __kmp_init_common = FALSE;
1324 TCW_4(__kmp_init_user_locks, FALSE);
1325#if !KMP_USE_DYNAMIC_LOCK
1326 __kmp_user_lock_table.used = 1;
1327 __kmp_user_lock_table.allocated = 0;
1328 __kmp_user_lock_table.table = NULL;
1329 __kmp_lock_blocks = NULL;
1333 TCW_4(__kmp_nth, 0);
1335 __kmp_thread_pool = NULL;
1336 __kmp_thread_pool_insert_pt = NULL;
1337 __kmp_team_pool = NULL;
1341 KA_TRACE(10, (
"__kmp_atfork_child: checking cache address list %p\n",
1342 __kmp_threadpriv_cache_list));
1344 while (__kmp_threadpriv_cache_list != NULL) {
1346 if (*__kmp_threadpriv_cache_list->addr != NULL) {
1347 KC_TRACE(50, (
"__kmp_atfork_child: zeroing cache at address %p\n",
1348 &(*__kmp_threadpriv_cache_list->addr)));
1350 *__kmp_threadpriv_cache_list->addr = NULL;
1352 __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list->next;
1355 __kmp_init_runtime = FALSE;
1358 __kmp_init_bootstrap_lock(&__kmp_initz_lock);
1359 __kmp_init_bootstrap_lock(&__kmp_stdio_lock);
1360 __kmp_init_bootstrap_lock(&__kmp_console_lock);
1361 __kmp_init_bootstrap_lock(&__kmp_task_team_lock);
1371 __kmp_need_register_serial = FALSE;
1372 __kmp_serial_initialize();
1386void __kmp_register_atfork(
void) {
1387 if (__kmp_need_register_atfork) {
1389 int status = pthread_atfork(__kmp_atfork_prepare, __kmp_atfork_parent,
1390 __kmp_atfork_child);
1391 KMP_CHECK_SYSFAIL(
"pthread_atfork", status);
1393 __kmp_need_register_atfork = FALSE;
1397void __kmp_suspend_initialize(
void) {
1399 status = pthread_mutexattr_init(&__kmp_suspend_mutex_attr);
1400 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status);
1401 status = pthread_condattr_init(&__kmp_suspend_cond_attr);
1402 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status);
1405void __kmp_suspend_initialize_thread(kmp_info_t *th) {
1406 int old_value = KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count);
1407 int new_value = __kmp_fork_count + 1;
1409 if (old_value == new_value)
1412 if (old_value == -1 || !__kmp_atomic_compare_store(
1413 &th->th.th_suspend_init_count, old_value, -1)) {
1414 while (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) != new_value) {
1420 status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
1421 &__kmp_suspend_cond_attr);
1422 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
1423 status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
1424 &__kmp_suspend_mutex_attr);
1425 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
1426 KMP_ATOMIC_ST_REL(&th->th.th_suspend_init_count, new_value);
1430void __kmp_suspend_uninitialize_thread(kmp_info_t *th) {
1431 if (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) > __kmp_fork_count) {
1436 status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond);
1437 if (status != 0 && status != EBUSY) {
1438 KMP_SYSFAIL(
"pthread_cond_destroy", status);
1440 status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex);
1441 if (status != 0 && status != EBUSY) {
1442 KMP_SYSFAIL(
"pthread_mutex_destroy", status);
1444 --th->th.th_suspend_init_count;
1445 KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count) ==
1451int __kmp_try_suspend_mx(kmp_info_t *th) {
1452 return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0);
1455void __kmp_lock_suspend_mx(kmp_info_t *th) {
1456 int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1457 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1460void __kmp_unlock_suspend_mx(kmp_info_t *th) {
1461 int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1462 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1468static inline void __kmp_suspend_template(
int th_gtid, C *flag) {
1469 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_suspend);
1470 kmp_info_t *th = __kmp_threads[th_gtid];
1472 typename C::flag_t old_spin;
1474 KF_TRACE(30, (
"__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid,
1477 __kmp_suspend_initialize_thread(th);
1479 __kmp_lock_suspend_mx(th);
1481 KF_TRACE(10, (
"__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1482 th_gtid, flag->get()));
1486 old_spin = flag->set_sleeping();
1487 TCW_PTR(th->th.th_sleep_loc, (
void *)flag);
1488 th->th.th_sleep_loc_type = flag->get_type();
1489 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME &&
1490 __kmp_pause_status != kmp_soft_paused) {
1491 flag->unset_sleeping();
1492 TCW_PTR(th->th.th_sleep_loc, NULL);
1493 th->th.th_sleep_loc_type = flag_unset;
1494 __kmp_unlock_suspend_mx(th);
1497 KF_TRACE(5, (
"__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x,"
1499 th_gtid, flag->get(), flag->load(), old_spin));
1501 if (flag->done_check_val(old_spin) || flag->done_check()) {
1502 flag->unset_sleeping();
1503 TCW_PTR(th->th.th_sleep_loc, NULL);
1504 th->th.th_sleep_loc_type = flag_unset;
1505 KF_TRACE(5, (
"__kmp_suspend_template: T#%d false alarm, reset sleep bit "
1507 th_gtid, flag->get()));
1512 int deactivated = FALSE;
1514 while (flag->is_sleeping()) {
1517 __kmp_suspend_count++;
1518 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1519 __kmp_printf(
"__kmp_suspend_template: suspending T#%d: %s\n", th_gtid,
1525 th->th.th_active = FALSE;
1526 if (th->th.th_active_in_pool) {
1527 th->th.th_active_in_pool = FALSE;
1528 KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
1529 KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) >= 0);
1534 KMP_DEBUG_ASSERT(th->th.th_sleep_loc);
1535 KMP_DEBUG_ASSERT(flag->get_type() == th->th.th_sleep_loc_type);
1537#if USE_SUSPEND_TIMEOUT
1538 struct timespec now;
1539 struct timeval tval;
1542 status = gettimeofday(&tval, NULL);
1543 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1544 TIMEVAL_TO_TIMESPEC(&tval, &now);
1546 msecs = (4 * __kmp_dflt_blocktime) + 200;
1547 now.tv_sec += msecs / 1000;
1548 now.tv_nsec += (msecs % 1000) * 1000;
1550 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform "
1551 "pthread_cond_timedwait\n",
1553 status = pthread_cond_timedwait(&th->th.th_suspend_cv.c_cond,
1554 &th->th.th_suspend_mx.m_mutex, &now);
1556 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform"
1557 " pthread_cond_wait\n",
1559 status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond,
1560 &th->th.th_suspend_mx.m_mutex);
1563 if ((status != 0) && (status != EINTR) && (status != ETIMEDOUT)) {
1564 KMP_SYSFAIL(
"pthread_cond_wait", status);
1567 KMP_DEBUG_ASSERT(flag->get_type() == flag->get_ptr_type());
1569 if (!flag->is_sleeping() &&
1570 ((status == EINTR) || (status == ETIMEDOUT))) {
1574 flag->unset_sleeping();
1575 TCW_PTR(th->th.th_sleep_loc, NULL);
1576 th->th.th_sleep_loc_type = flag_unset;
1579 if (status == ETIMEDOUT) {
1580 if (flag->is_sleeping()) {
1582 (
"__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid));
1584 KF_TRACE(2, (
"__kmp_suspend_template: T#%d timeout wakeup, sleep bit "
1587 TCW_PTR(th->th.th_sleep_loc, NULL);
1588 th->th.th_sleep_loc_type = flag_unset;
1590 }
else if (flag->is_sleeping()) {
1592 (
"__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid));
1599 th->th.th_active = TRUE;
1600 if (TCR_4(th->th.th_in_pool)) {
1601 KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth);
1602 th->th.th_active_in_pool = TRUE;
1608 TCW_PTR(th->th.th_sleep_loc, NULL);
1609 th->th.th_sleep_loc_type = flag_unset;
1611 KMP_DEBUG_ASSERT(!flag->is_sleeping());
1612 KMP_DEBUG_ASSERT(!th->th.th_sleep_loc);
1616 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1617 __kmp_printf(
"__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid,
1622 __kmp_unlock_suspend_mx(th);
1623 KF_TRACE(30, (
"__kmp_suspend_template: T#%d exit\n", th_gtid));
1626template <
bool C,
bool S>
1627void __kmp_suspend_32(
int th_gtid, kmp_flag_32<C, S> *flag) {
1628 __kmp_suspend_template(th_gtid, flag);
1630template <
bool C,
bool S>
1631void __kmp_suspend_64(
int th_gtid, kmp_flag_64<C, S> *flag) {
1632 __kmp_suspend_template(th_gtid, flag);
1634template <
bool C,
bool S>
1635void __kmp_atomic_suspend_64(
int th_gtid, kmp_atomic_flag_64<C, S> *flag) {
1636 __kmp_suspend_template(th_gtid, flag);
1638void __kmp_suspend_oncore(
int th_gtid, kmp_flag_oncore *flag) {
1639 __kmp_suspend_template(th_gtid, flag);
1642template void __kmp_suspend_32<false, false>(
int, kmp_flag_32<false, false> *);
1643template void __kmp_suspend_64<false, true>(
int, kmp_flag_64<false, true> *);
1644template void __kmp_suspend_64<true, false>(
int, kmp_flag_64<true, false> *);
1646__kmp_atomic_suspend_64<false, true>(
int, kmp_atomic_flag_64<false, true> *);
1648__kmp_atomic_suspend_64<true, false>(
int, kmp_atomic_flag_64<true, false> *);
1654static inline void __kmp_resume_template(
int target_gtid, C *flag) {
1655 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1656 kmp_info_t *th = __kmp_threads[target_gtid];
1660 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1663 KF_TRACE(30, (
"__kmp_resume_template: T#%d wants to wakeup T#%d enter\n",
1664 gtid, target_gtid));
1665 KMP_DEBUG_ASSERT(gtid != target_gtid);
1667 __kmp_suspend_initialize_thread(th);
1669 __kmp_lock_suspend_mx(th);
1671 if (!flag || flag != th->th.th_sleep_loc) {
1674 flag = (C *)CCAST(
void *, th->th.th_sleep_loc);
1680 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already "
1681 "awake: flag(%p)\n",
1682 gtid, target_gtid, (
void *)NULL));
1683 __kmp_unlock_suspend_mx(th);
1685 }
else if (flag->get_type() != th->th.th_sleep_loc_type) {
1690 (
"__kmp_resume_template: T#%d retrying, thread T#%d Mismatch flag(%p), "
1691 "spin(%p) type=%d ptr_type=%d\n",
1692 gtid, target_gtid, flag, flag->get(), flag->get_type(),
1693 th->th.th_sleep_loc_type));
1694 __kmp_unlock_suspend_mx(th);
1695 __kmp_null_resume_wrapper(th);
1699 if (!flag->is_sleeping()) {
1700 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already "
1701 "awake: flag(%p): %u\n",
1702 gtid, target_gtid, flag->get(), (
unsigned int)flag->load()));
1703 __kmp_unlock_suspend_mx(th);
1707 KMP_DEBUG_ASSERT(flag);
1708 flag->unset_sleeping();
1709 TCW_PTR(th->th.th_sleep_loc, NULL);
1710 th->th.th_sleep_loc_type = flag_unset;
1712 KF_TRACE(5, (
"__kmp_resume_template: T#%d about to wakeup T#%d, reset "
1713 "sleep bit for flag's loc(%p): %u\n",
1714 gtid, target_gtid, flag->get(), (
unsigned int)flag->load()));
1719 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1720 __kmp_printf(
"__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid,
1721 target_gtid, buffer);
1724 status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond);
1725 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
1726 __kmp_unlock_suspend_mx(th);
1727 KF_TRACE(30, (
"__kmp_resume_template: T#%d exiting after signaling wake up"
1729 gtid, target_gtid));
1732template <
bool C,
bool S>
1733void __kmp_resume_32(
int target_gtid, kmp_flag_32<C, S> *flag) {
1734 __kmp_resume_template(target_gtid, flag);
1736template <
bool C,
bool S>
1737void __kmp_resume_64(
int target_gtid, kmp_flag_64<C, S> *flag) {
1738 __kmp_resume_template(target_gtid, flag);
1740template <
bool C,
bool S>
1741void __kmp_atomic_resume_64(
int target_gtid, kmp_atomic_flag_64<C, S> *flag) {
1742 __kmp_resume_template(target_gtid, flag);
1744void __kmp_resume_oncore(
int target_gtid, kmp_flag_oncore *flag) {
1745 __kmp_resume_template(target_gtid, flag);
1748template void __kmp_resume_32<false, true>(
int, kmp_flag_32<false, true> *);
1749template void __kmp_resume_32<false, false>(
int, kmp_flag_32<false, false> *);
1750template void __kmp_resume_64<false, true>(
int, kmp_flag_64<false, true> *);
1752__kmp_atomic_resume_64<false, true>(
int, kmp_atomic_flag_64<false, true> *);
1755void __kmp_resume_monitor() {
1756 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1759 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1760 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", gtid,
1762 KMP_DEBUG_ASSERT(gtid != KMP_GTID_MONITOR);
1764 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
1765 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1769 __kmp_print_cond(buffer, &__kmp_wait_cv.c_cond);
1770 __kmp_printf(
"__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid,
1771 KMP_GTID_MONITOR, buffer);
1774 status = pthread_cond_signal(&__kmp_wait_cv.c_cond);
1775 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
1776 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
1777 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1778 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d exiting after signaling wake up"
1780 gtid, KMP_GTID_MONITOR));
1784void __kmp_yield() { sched_yield(); }
1786void __kmp_gtid_set_specific(
int gtid) {
1787 if (__kmp_init_gtid) {
1789 status = pthread_setspecific(__kmp_gtid_threadprivate_key,
1790 (
void *)(intptr_t)(gtid + 1));
1791 KMP_CHECK_SYSFAIL(
"pthread_setspecific", status);
1793 KA_TRACE(50, (
"__kmp_gtid_set_specific: runtime shutdown, returning\n"));
1797int __kmp_gtid_get_specific() {
1799 if (!__kmp_init_gtid) {
1800 KA_TRACE(50, (
"__kmp_gtid_get_specific: runtime shutdown, returning "
1801 "KMP_GTID_SHUTDOWN\n"));
1802 return KMP_GTID_SHUTDOWN;
1804 gtid = (int)(
size_t)pthread_getspecific(__kmp_gtid_threadprivate_key);
1806 gtid = KMP_GTID_DNE;
1810 KA_TRACE(50, (
"__kmp_gtid_get_specific: key:%d gtid:%d\n",
1811 __kmp_gtid_threadprivate_key, gtid));
1815double __kmp_read_cpu_time(
void) {
1821 return (
double)(buffer.tms_utime + buffer.tms_cutime) /
1822 (
double)CLOCKS_PER_SEC;
1825int __kmp_read_system_info(
struct kmp_sys_info *info) {
1827 struct rusage r_usage;
1829 memset(info, 0,
sizeof(*info));
1831 status = getrusage(RUSAGE_SELF, &r_usage);
1832 KMP_CHECK_SYSFAIL_ERRNO(
"getrusage", status);
1836 info->maxrss = r_usage.ru_maxrss;
1838 info->minflt = r_usage.ru_minflt;
1840 info->majflt = r_usage.ru_majflt;
1842 info->nswap = r_usage.ru_nswap;
1844 info->inblock = r_usage.ru_inblock;
1846 info->oublock = r_usage.ru_oublock;
1848 info->nvcsw = r_usage.ru_nvcsw;
1850 info->nivcsw = r_usage.ru_nivcsw;
1853 return (status != 0);
1856void __kmp_read_system_time(
double *delta) {
1858 struct timeval tval;
1859 struct timespec stop;
1862 status = gettimeofday(&tval, NULL);
1863 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1864 TIMEVAL_TO_TIMESPEC(&tval, &stop);
1865 t_ns = (double)(TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start));
1866 *delta = (t_ns * 1e-9);
1869void __kmp_clear_system_time(
void) {
1870 struct timeval tval;
1872 status = gettimeofday(&tval, NULL);
1873 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1874 TIMEVAL_TO_TIMESPEC(&tval, &__kmp_sys_timer_data.start);
1877static int __kmp_get_xproc(
void) {
1883 __kmp_type_convert(sysconf(_SC_NPROCESSORS_CONF), &(r));
1885#elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_OPENBSD || \
1886 KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_WASI || KMP_OS_AIX
1888 __kmp_type_convert(sysconf(_SC_NPROCESSORS_ONLN), &(r));
1892 size_t len =
sizeof(r);
1893 sysctlbyname(
"hw.logicalcpu", &r, &len, NULL, 0);
1897#error "Unknown or unsupported OS."
1901 return r > 0 ? r : 2;
1905int __kmp_read_from_file(
char const *path,
char const *format, ...) {
1909 va_start(args, format);
1910 FILE *f = fopen(path,
"rb");
1915 result = vfscanf(f, format, args);
1922void __kmp_runtime_initialize(
void) {
1924 pthread_mutexattr_t mutex_attr;
1925 pthread_condattr_t cond_attr;
1927 if (__kmp_init_runtime) {
1931#if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1932 if (!__kmp_cpuinfo.initialized) {
1933 __kmp_query_cpuid(&__kmp_cpuinfo);
1937 __kmp_xproc = __kmp_get_xproc();
1943 status = getrlimit(RLIMIT_STACK, &rlim);
1945 __kmp_stksize = rlim.rlim_cur;
1946 __kmp_check_stksize(&__kmp_stksize);
1950 if (sysconf(_SC_THREADS)) {
1953 __kmp_type_convert(sysconf(_SC_THREAD_THREADS_MAX), &(__kmp_sys_max_nth));
1955 if (__kmp_sys_max_nth == -1) {
1958 __kmp_sys_max_nth = KMP_MAX_NTH;
1961 if (__kmp_sys_max_nth == -1) {
1963 __kmp_sys_max_nth = INT_MAX;
1964 }
else if (__kmp_sys_max_nth <= 1) {
1966 __kmp_sys_max_nth = KMP_MAX_NTH;
1971 __kmp_sys_min_stksize = sysconf(_SC_THREAD_STACK_MIN);
1972 if (__kmp_sys_min_stksize <= 1) {
1973 __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
1978 __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
1980 status = pthread_key_create(&__kmp_gtid_threadprivate_key,
1981 __kmp_internal_end_dest);
1982 KMP_CHECK_SYSFAIL(
"pthread_key_create", status);
1983 status = pthread_mutexattr_init(&mutex_attr);
1984 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status);
1985 status = pthread_mutex_init(&__kmp_wait_mx.m_mutex, &mutex_attr);
1986 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
1987 status = pthread_mutexattr_destroy(&mutex_attr);
1988 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_destroy", status);
1989 status = pthread_condattr_init(&cond_attr);
1990 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status);
1991 status = pthread_cond_init(&__kmp_wait_cv.c_cond, &cond_attr);
1992 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
1993 status = pthread_condattr_destroy(&cond_attr);
1994 KMP_CHECK_SYSFAIL(
"pthread_condattr_destroy", status);
1996 __kmp_itt_initialize();
1999 __kmp_init_runtime = TRUE;
2002void __kmp_runtime_destroy(
void) {
2005 if (!__kmp_init_runtime) {
2010 __kmp_itt_destroy();
2013 status = pthread_key_delete(__kmp_gtid_threadprivate_key);
2014 KMP_CHECK_SYSFAIL(
"pthread_key_delete", status);
2016 status = pthread_mutex_destroy(&__kmp_wait_mx.m_mutex);
2017 if (status != 0 && status != EBUSY) {
2018 KMP_SYSFAIL(
"pthread_mutex_destroy", status);
2020 status = pthread_cond_destroy(&__kmp_wait_cv.c_cond);
2021 if (status != 0 && status != EBUSY) {
2022 KMP_SYSFAIL(
"pthread_cond_destroy", status);
2024#if KMP_AFFINITY_SUPPORTED
2025 __kmp_affinity_uninitialize();
2028 __kmp_init_runtime = FALSE;
2033void __kmp_thread_sleep(
int millis) { sleep((millis + 500) / 1000); }
2036void __kmp_elapsed(
double *t) {
2041 status = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
2042 KMP_CHECK_SYSFAIL_ERRNO(
"clock_gettime", status);
2044 (double)ts.tv_nsec * (1.0 / (
double)KMP_NSEC_PER_SEC) + (
double)ts.tv_sec;
2048 status = gettimeofday(&tv, NULL);
2049 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
2051 (double)tv.tv_usec * (1.0 / (
double)KMP_USEC_PER_SEC) + (
double)tv.tv_sec;
2056void __kmp_elapsed_tick(
double *t) { *t = 1 / (double)CLOCKS_PER_SEC; }
2059kmp_uint64 __kmp_now_nsec() {
2061 gettimeofday(&t, NULL);
2062 kmp_uint64 nsec = (kmp_uint64)KMP_NSEC_PER_SEC * (kmp_uint64)t.tv_sec +
2063 (kmp_uint64)1000 * (kmp_uint64)t.tv_usec;
2067#if KMP_ARCH_X86 || KMP_ARCH_X86_64
2069void __kmp_initialize_system_tick() {
2070 kmp_uint64 now, nsec2, diff;
2071 kmp_uint64 delay = 1000000;
2072 kmp_uint64 nsec = __kmp_now_nsec();
2073 kmp_uint64 goal = __kmp_hardware_timestamp() + delay;
2074 while ((now = __kmp_hardware_timestamp()) < goal)
2076 nsec2 = __kmp_now_nsec();
2077 diff = nsec2 - nsec;
2079 double tpus = 1000.0 * (double)(delay + (now - goal)) / (
double)diff;
2081 __kmp_ticks_per_msec = (kmp_uint64)(tpus * 1000.0);
2082 __kmp_ticks_per_usec = (kmp_uint64)tpus;
2091int __kmp_is_address_mapped(
void *addr) {
2096#if KMP_OS_LINUX || KMP_OS_HURD
2101 char *name = __kmp_str_format(
"/proc/%d/maps", getpid());
2104 file = fopen(name,
"r");
2105 KMP_ASSERT(file != NULL);
2109 void *beginning = NULL;
2110 void *ending = NULL;
2113 rc = fscanf(file,
"%p-%p %4s %*[^\n]\n", &beginning, &ending, perms);
2117 KMP_ASSERT(rc == 3 &&
2118 KMP_STRLEN(perms) == 4);
2121 if ((addr >= beginning) && (addr < ending)) {
2123 if (strcmp(perms,
"rw") == 0) {
2133 KMP_INTERNAL_FREE(name);
2137 int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
2138 rc = sysctl(mib, 4, NULL, &lstsz, NULL, 0);
2143 lstsz = lstsz * 4 / 3;
2144 buf =
reinterpret_cast<char *
>(KMP_INTERNAL_MALLOC(lstsz));
2145 rc = sysctl(mib, 4, buf, &lstsz, NULL, 0);
2147 KMP_INTERNAL_FREE(buf);
2152 char *up = buf + lstsz;
2155 struct kinfo_vmentry *cur =
reinterpret_cast<struct kinfo_vmentry *
>(lw);
2156 size_t cursz = cur->kve_structsize;
2159 void *start =
reinterpret_cast<void *
>(cur->kve_start);
2160 void *end =
reinterpret_cast<void *
>(cur->kve_end);
2162 if ((addr >= start) && (addr < end)) {
2163 if ((cur->kve_protection & KVME_PROT_READ) != 0 &&
2164 (cur->kve_protection & KVME_PROT_WRITE) != 0) {
2171 KMP_INTERNAL_FREE(buf);
2172#elif KMP_OS_DRAGONFLY
2173 char err[_POSIX2_LINE_MAX];
2177 vm_map_entry entry, *c;
2183 fd = kvm_openfiles(
nullptr,
nullptr,
nullptr, O_RDONLY, err);
2188 proc = kvm_getprocs(fd, KERN_PROC_PID, getpid(), &num);
2190 if (kvm_read(fd,
static_cast<uintptr_t
>(proc->kp_paddr), &p,
sizeof(p)) !=
2192 kvm_read(fd,
reinterpret_cast<uintptr_t
>(p.p_vmspace), &sp,
sizeof(sp)) !=
2200 uaddr =
reinterpret_cast<uintptr_t
>(addr);
2201 for (c = kvm_vm_map_entry_first(fd, cur, &entry); c;
2202 c = kvm_vm_map_entry_next(fd, c, &entry)) {
2203 if ((uaddr >= entry.ba.start) && (uaddr <= entry.ba.end)) {
2204 if ((entry.protection & VM_PROT_READ) != 0 &&
2205 (entry.protection & VM_PROT_WRITE) != 0) {
2221 pid_t pid = getpid();
2222 struct ps_prochandle *fd = Pgrab(pid, PGRAB_RDONLY, &err);
2229 char *name = __kmp_str_format(
"/proc/%d/map", pid);
2230 size_t sz = (1 << 20);
2231 file = open(name, O_RDONLY);
2233 KMP_INTERNAL_FREE(name);
2237 buf = KMP_INTERNAL_MALLOC(sz);
2239 while (sz > 0 && (rd = pread(file, buf, sz, 0)) == sz) {
2242 newbuf = KMP_INTERNAL_REALLOC(buf, sz);
2246 map =
reinterpret_cast<prmap_t *
>(buf);
2247 uaddr =
reinterpret_cast<uintptr_t
>(addr);
2249 for (cur = map; rd > 0; cur++, rd = -
sizeof(*map)) {
2250 if ((uaddr >= cur->pr_vaddr) && (uaddr < cur->pr_vaddr)) {
2251 if ((cur->pr_mflags & MA_READ) != 0 && (cur->pr_mflags & MA_WRITE) != 0) {
2258 KMP_INTERNAL_FREE(map);
2260 KMP_INTERNAL_FREE(name);
2268 rc = vm_read_overwrite(
2270 (vm_address_t)(addr),
2272 (vm_address_t)(&buffer),
2285 mib[2] = VM_PROC_MAP;
2287 mib[4] =
sizeof(
struct kinfo_vmentry);
2290 rc = sysctl(mib, __arraycount(mib), NULL, &size, NULL, 0);
2294 size = size * 4 / 3;
2295 struct kinfo_vmentry *kiv = (
struct kinfo_vmentry *)KMP_INTERNAL_MALLOC(size);
2298 rc = sysctl(mib, __arraycount(mib), kiv, &size, NULL, 0);
2302 for (
size_t i = 0; i < size; i++) {
2303 if (kiv[i].kve_start >= (uint64_t)addr &&
2304 kiv[i].kve_end <= (uint64_t)addr) {
2309 KMP_INTERNAL_FREE(kiv);
2314 mib[1] = KERN_PROC_VMMAP;
2319 rc = sysctl(mib, 3, NULL, &size, NULL, 0);
2324 struct kinfo_vmentry kiv = {.kve_start = 0};
2326 while ((rc = sysctl(mib, 3, &kiv, &size, NULL, 0)) == 0) {
2328 if (kiv.kve_end == end)
2331 if (kiv.kve_start >= (uint64_t)addr && kiv.kve_end <= (uint64_t)addr) {
2338 found = (int)addr < (__builtin_wasm_memory_size(0) * PAGESIZE);
2347#error "Unknown or unsupported OS"
2355#ifdef USE_LOAD_BALANCE
2357#if KMP_OS_DARWIN || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
2358 KMP_OS_OPENBSD || KMP_OS_SOLARIS
2365int __kmp_get_load_balance(
int max) {
2369 int res = getloadavg(averages, 3);
2374 if (__kmp_load_balance_interval < 180 && (res >= 1)) {
2375 ret_avg = (int)averages[0];
2376 }
else if ((__kmp_load_balance_interval >= 180 &&
2377 __kmp_load_balance_interval < 600) &&
2379 ret_avg = (int)averages[1];
2380 }
else if ((__kmp_load_balance_interval >= 600) && (res == 3)) {
2381 ret_avg = (int)averages[2];
2395int __kmp_get_load_balance(
int max) {
2396 static int permanent_error = 0;
2397 static int glb_running_threads = 0;
2399 static double glb_call_time = 0;
2401 int running_threads = 0;
2403 DIR *proc_dir = NULL;
2404 struct dirent *proc_entry = NULL;
2406 kmp_str_buf_t task_path;
2407 DIR *task_dir = NULL;
2408 struct dirent *task_entry = NULL;
2409 int task_path_fixed_len;
2411 kmp_str_buf_t stat_path;
2413 int stat_path_fixed_len;
2416 int total_processes = 0;
2419 double call_time = 0.0;
2421 __kmp_str_buf_init(&task_path);
2422 __kmp_str_buf_init(&stat_path);
2424 __kmp_elapsed(&call_time);
2426 if (glb_call_time &&
2427 (call_time - glb_call_time < __kmp_load_balance_interval)) {
2428 running_threads = glb_running_threads;
2432 glb_call_time = call_time;
2435 if (permanent_error) {
2436 running_threads = -1;
2445 proc_dir = opendir(
"/proc");
2446 if (proc_dir == NULL) {
2449 running_threads = -1;
2450 permanent_error = 1;
2455 __kmp_str_buf_cat(&task_path,
"/proc/", 6);
2456 task_path_fixed_len = task_path.used;
2458 proc_entry = readdir(proc_dir);
2459 while (proc_entry != NULL) {
2462 if (isdigit(proc_entry->d_name[0])) {
2466 if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) {
2477 KMP_DEBUG_ASSERT(total_processes != 1 ||
2478 strcmp(proc_entry->d_name,
"1") == 0);
2481 task_path.used = task_path_fixed_len;
2482 __kmp_str_buf_cat(&task_path, proc_entry->d_name,
2483 KMP_STRLEN(proc_entry->d_name));
2484 __kmp_str_buf_cat(&task_path,
"/task", 5);
2486 task_dir = opendir(task_path.str);
2487 if (task_dir == NULL) {
2496 if (strcmp(proc_entry->d_name,
"1") == 0) {
2497 running_threads = -1;
2498 permanent_error = 1;
2503 __kmp_str_buf_clear(&stat_path);
2504 __kmp_str_buf_cat(&stat_path, task_path.str, task_path.used);
2505 __kmp_str_buf_cat(&stat_path,
"/", 1);
2506 stat_path_fixed_len = stat_path.used;
2508 task_entry = readdir(task_dir);
2509 while (task_entry != NULL) {
2512 if (isdigit(task_entry->d_name[0])) {
2514 if (proc_entry->d_type == DT_DIR && isdigit(task_entry->d_name[0])) {
2522 stat_path_fixed_len;
2523 __kmp_str_buf_cat(&stat_path, task_entry->d_name,
2524 KMP_STRLEN(task_entry->d_name));
2525 __kmp_str_buf_cat(&stat_path,
"/stat", 5);
2529 stat_file = open(stat_path.str, O_RDONLY);
2530 if (stat_file == -1) {
2560 len = read(stat_file, buffer,
sizeof(buffer) - 1);
2567 char *close_parent = strstr(buffer,
") ");
2568 if (close_parent != NULL) {
2569 char state = *(close_parent + 2);
2572 if (running_threads >= max) {
2582 task_entry = readdir(task_dir);
2588 proc_entry = readdir(proc_dir);
2594 KMP_DEBUG_ASSERT(running_threads > 0);
2595 if (running_threads <= 0) {
2596 running_threads = 1;
2600 if (proc_dir != NULL) {
2603 __kmp_str_buf_free(&task_path);
2604 if (task_dir != NULL) {
2607 __kmp_str_buf_free(&stat_path);
2608 if (stat_file != -1) {
2612 glb_running_threads = running_threads;
2614 return running_threads;
2622#if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC || \
2623 ((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) || \
2624 KMP_ARCH_PPC64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 || \
2625 KMP_ARCH_ARM || KMP_ARCH_VE || KMP_ARCH_S390X || KMP_ARCH_PPC_XCOFF || \
2626 KMP_ARCH_AARCH64_32)
2632typedef void (*microtask_t0)(
int *,
int *);
2633typedef void (*microtask_t1)(
int *,
int *,
void *);
2634typedef void (*microtask_t2)(
int *,
int *,
void *,
void *);
2635typedef void (*microtask_t3)(
int *,
int *,
void *,
void *,
void *);
2636typedef void (*microtask_t4)(
int *,
int *,
void *,
void *,
void *,
void *);
2637typedef void (*microtask_t5)(
int *,
int *,
void *,
void *,
void *,
void *,
2639typedef void (*microtask_t6)(
int *,
int *,
void *,
void *,
void *,
void *,
2641typedef void (*microtask_t7)(
int *,
int *,
void *,
void *,
void *,
void *,
2642 void *,
void *,
void *);
2643typedef void (*microtask_t8)(
int *,
int *,
void *,
void *,
void *,
void *,
2644 void *,
void *,
void *,
void *);
2645typedef void (*microtask_t9)(
int *,
int *,
void *,
void *,
void *,
void *,
2646 void *,
void *,
void *,
void *,
void *);
2647typedef void (*microtask_t10)(
int *,
int *,
void *,
void *,
void *,
void *,
2648 void *,
void *,
void *,
void *,
void *,
void *);
2649typedef void (*microtask_t11)(
int *,
int *,
void *,
void *,
void *,
void *,
2650 void *,
void *,
void *,
void *,
void *,
void *,
2652typedef void (*microtask_t12)(
int *,
int *,
void *,
void *,
void *,
void *,
2653 void *,
void *,
void *,
void *,
void *,
void *,
2655typedef void (*microtask_t13)(
int *,
int *,
void *,
void *,
void *,
void *,
2656 void *,
void *,
void *,
void *,
void *,
void *,
2657 void *,
void *,
void *);
2658typedef void (*microtask_t14)(
int *,
int *,
void *,
void *,
void *,
void *,
2659 void *,
void *,
void *,
void *,
void *,
void *,
2660 void *,
void *,
void *,
void *);
2661typedef void (*microtask_t15)(
int *,
int *,
void *,
void *,
void *,
void *,
2662 void *,
void *,
void *,
void *,
void *,
void *,
2663 void *,
void *,
void *,
void *,
void *);
2667int __kmp_invoke_microtask(microtask_t pkfn,
int gtid,
int tid,
int argc,
2671 void **exit_frame_ptr
2675 *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0);
2680 fprintf(stderr,
"Too many args to microtask: %d!\n", argc);
2684 (*(microtask_t0)pkfn)(>id, &tid);
2687 (*(microtask_t1)pkfn)(>id, &tid, p_argv[0]);
2690 (*(microtask_t2)pkfn)(>id, &tid, p_argv[0], p_argv[1]);
2693 (*(microtask_t3)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2]);
2696 (*(microtask_t4)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2700 (*(microtask_t5)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2701 p_argv[3], p_argv[4]);
2704 (*(microtask_t6)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2705 p_argv[3], p_argv[4], p_argv[5]);
2708 (*(microtask_t7)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2709 p_argv[3], p_argv[4], p_argv[5], p_argv[6]);
2712 (*(microtask_t8)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2713 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2717 (*(microtask_t9)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2718 p_argv[3], p_argv[4], p_argv[5], p_argv[6], p_argv[7],
2722 (*(microtask_t10)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2723 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2724 p_argv[7], p_argv[8], p_argv[9]);
2727 (*(microtask_t11)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2728 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2729 p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2732 (*(microtask_t12)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2733 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2734 p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2738 (*(microtask_t13)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2739 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2740 p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2741 p_argv[11], p_argv[12]);
2744 (*(microtask_t14)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2745 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2746 p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2747 p_argv[11], p_argv[12], p_argv[13]);
2750 (*(microtask_t15)pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2],
2751 p_argv[3], p_argv[4], p_argv[5], p_argv[6],
2752 p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2753 p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2766pthread_cond_t hidden_helper_threads_initz_cond_var;
2767pthread_mutex_t hidden_helper_threads_initz_lock;
2768volatile int hidden_helper_initz_signaled = FALSE;
2771pthread_cond_t hidden_helper_threads_deinitz_cond_var;
2772pthread_mutex_t hidden_helper_threads_deinitz_lock;
2773volatile int hidden_helper_deinitz_signaled = FALSE;
2776pthread_cond_t hidden_helper_main_thread_cond_var;
2777pthread_mutex_t hidden_helper_main_thread_lock;
2778volatile int hidden_helper_main_thread_signaled = FALSE;
2783sem_t hidden_helper_task_sem;
2786void __kmp_hidden_helper_worker_thread_wait() {
2787 int status = sem_wait(&hidden_helper_task_sem);
2788 KMP_CHECK_SYSFAIL(
"sem_wait", status);
2791void __kmp_do_initialize_hidden_helper_threads() {
2794 pthread_cond_init(&hidden_helper_threads_initz_cond_var,
nullptr);
2795 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
2797 status = pthread_cond_init(&hidden_helper_threads_deinitz_cond_var,
nullptr);
2798 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
2800 status = pthread_cond_init(&hidden_helper_main_thread_cond_var,
nullptr);
2801 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
2803 status = pthread_mutex_init(&hidden_helper_threads_initz_lock,
nullptr);
2804 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
2806 status = pthread_mutex_init(&hidden_helper_threads_deinitz_lock,
nullptr);
2807 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
2809 status = pthread_mutex_init(&hidden_helper_main_thread_lock,
nullptr);
2810 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
2813 status = sem_init(&hidden_helper_task_sem, 0, 0);
2814 KMP_CHECK_SYSFAIL(
"sem_init", status);
2818 status = pthread_create(
2820 [](
void *) ->
void * {
2821 __kmp_hidden_helper_threads_initz_routine();
2825 KMP_CHECK_SYSFAIL(
"pthread_create", status);
2828void __kmp_hidden_helper_threads_initz_wait() {
2831 int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2832 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2834 if (!TCR_4(hidden_helper_initz_signaled)) {
2835 status = pthread_cond_wait(&hidden_helper_threads_initz_cond_var,
2836 &hidden_helper_threads_initz_lock);
2837 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2840 status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2841 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2844void __kmp_hidden_helper_initz_release() {
2846 int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2847 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2849 status = pthread_cond_signal(&hidden_helper_threads_initz_cond_var);
2850 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2852 TCW_SYNC_4(hidden_helper_initz_signaled, TRUE);
2854 status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2855 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2858void __kmp_hidden_helper_main_thread_wait() {
2861 int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2862 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2864 if (!TCR_4(hidden_helper_main_thread_signaled)) {
2865 status = pthread_cond_wait(&hidden_helper_main_thread_cond_var,
2866 &hidden_helper_main_thread_lock);
2867 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2870 status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2871 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2874void __kmp_hidden_helper_main_thread_release() {
2877 int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2878 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2880 status = pthread_cond_signal(&hidden_helper_main_thread_cond_var);
2881 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
2884 TCW_SYNC_4(hidden_helper_main_thread_signaled, TRUE);
2886 status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2887 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2890void __kmp_hidden_helper_worker_thread_signal() {
2891 int status = sem_post(&hidden_helper_task_sem);
2892 KMP_CHECK_SYSFAIL(
"sem_post", status);
2895void __kmp_hidden_helper_threads_deinitz_wait() {
2898 int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2899 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2901 if (!TCR_4(hidden_helper_deinitz_signaled)) {
2902 status = pthread_cond_wait(&hidden_helper_threads_deinitz_cond_var,
2903 &hidden_helper_threads_deinitz_lock);
2904 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2907 status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2908 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2911void __kmp_hidden_helper_threads_deinitz_release() {
2912 int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2913 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2915 status = pthread_cond_signal(&hidden_helper_threads_deinitz_cond_var);
2916 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2918 TCW_SYNC_4(hidden_helper_deinitz_signaled, TRUE);
2920 status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2921 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2924void __kmp_hidden_helper_worker_thread_wait() {
2925 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2928void __kmp_do_initialize_hidden_helper_threads() {
2929 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2932void __kmp_hidden_helper_threads_initz_wait() {
2933 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2936void __kmp_hidden_helper_initz_release() {
2937 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2940void __kmp_hidden_helper_main_thread_wait() {
2941 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2944void __kmp_hidden_helper_main_thread_release() {
2945 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2948void __kmp_hidden_helper_worker_thread_signal() {
2949 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2952void __kmp_hidden_helper_threads_deinitz_wait() {
2953 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2956void __kmp_hidden_helper_threads_deinitz_release() {
2957 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2961bool __kmp_detect_shm() {
2962 DIR *dir = opendir(
"/dev/shm");
2966 }
else if (ENOENT == errno) {
2973bool __kmp_detect_tmp() {
2974 DIR *dir = opendir(
"/tmp");
2978 }
else if (ENOENT == errno) {
#define KMP_INIT_PARTITIONED_TIMERS(name)
Initializes the partitioned timers to begin with name.