38 #include "kmp_wrapper_getpid.h"
48 #include <sys/times.h>
49 #include <sys/resource.h>
50 #include <sys/syscall.h>
53 # include <sys/sysinfo.h>
54 # if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
68 # include <sys/sysctl.h>
69 # include <mach/mach.h>
78 #if KMP_COMPILER_GCC && !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
86 struct kmp_sys_timer {
87 struct timespec start;
91 #define TS2NS(timespec) (((timespec).tv_sec * 1e9) + (timespec).tv_nsec)
93 static struct kmp_sys_timer __kmp_sys_timer_data;
95 #if KMP_HANDLE_SIGNALS
96 typedef void (* sig_func_t )( int );
97 STATIC_EFI2_WORKAROUND
struct sigaction __kmp_sighldrs[ NSIG ];
98 static sigset_t __kmp_sigset;
101 static int __kmp_init_runtime = FALSE;
103 static int __kmp_fork_count = 0;
105 static pthread_condattr_t __kmp_suspend_cond_attr;
106 static pthread_mutexattr_t __kmp_suspend_mutex_attr;
108 static kmp_cond_align_t __kmp_wait_cv;
109 static kmp_mutex_align_t __kmp_wait_mx;
116 __kmp_print_cond(
char *buffer, kmp_cond_align_t *cond )
118 sprintf( buffer,
"(cond (lock (%ld, %d)), (descr (%p)))",
119 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
120 cond->c_cond.__c_waiting );
140 # if KMP_ARCH_X86 || KMP_ARCH_ARM
141 # ifndef __NR_sched_setaffinity
142 # define __NR_sched_setaffinity 241
143 # elif __NR_sched_setaffinity != 241
144 # error Wrong code for setaffinity system call.
146 # ifndef __NR_sched_getaffinity
147 # define __NR_sched_getaffinity 242
148 # elif __NR_sched_getaffinity != 242
149 # error Wrong code for getaffinity system call.
152 # elif KMP_ARCH_X86_64
153 # ifndef __NR_sched_setaffinity
154 # define __NR_sched_setaffinity 203
155 # elif __NR_sched_setaffinity != 203
156 # error Wrong code for setaffinity system call.
158 # ifndef __NR_sched_getaffinity
159 # define __NR_sched_getaffinity 204
160 # elif __NR_sched_getaffinity != 204
161 # error Wrong code for getaffinity system call.
165 # error Unknown or unsupported architecture
170 __kmp_set_system_affinity( kmp_affin_mask_t
const *mask,
int abort_on_error )
172 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
173 "Illegal set affinity operation when not capable");
175 int retval = syscall( __NR_sched_setaffinity, 0, __kmp_affin_mask_size, mask );
180 if (abort_on_error) {
183 KMP_MSG( FatalSysError ),
192 __kmp_get_system_affinity( kmp_affin_mask_t *mask,
int abort_on_error )
194 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
195 "Illegal get affinity operation when not capable");
197 int retval = syscall( __NR_sched_getaffinity, 0, __kmp_affin_mask_size, mask );
202 if (abort_on_error) {
205 KMP_MSG( FatalSysError ),
214 __kmp_affinity_bind_thread(
int which )
216 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
217 "Illegal set affinity operation when not capable");
219 kmp_affin_mask_t *mask = (kmp_affin_mask_t *)alloca(__kmp_affin_mask_size);
221 KMP_CPU_SET(which, mask);
222 __kmp_set_system_affinity(mask, TRUE);
231 __kmp_affinity_determine_capable(
const char *env_var)
237 # define KMP_CPU_SET_SIZE_LIMIT (1024*1024)
241 kmp_affin_mask_t *buf;
242 buf = ( kmp_affin_mask_t * ) KMP_INTERNAL_MALLOC( KMP_CPU_SET_SIZE_LIMIT );
247 gCode = syscall( __NR_sched_getaffinity, 0, KMP_CPU_SET_SIZE_LIMIT, buf );
248 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
249 "intial getaffinity call returned %d errno = %d\n",
257 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
258 && (__kmp_affinity_type != affinity_none)
259 && (__kmp_affinity_type != affinity_default)
260 && (__kmp_affinity_type != affinity_disabled))) {
264 KMP_MSG( GetAffSysCallNotSupported, env_var ),
269 __kmp_affin_mask_size = 0;
270 KMP_INTERNAL_FREE(buf);
279 sCode = syscall( __NR_sched_setaffinity, 0, gCode, NULL );
280 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
281 "setaffinity for mask size %d returned %d errno = %d\n",
282 gCode, sCode, errno));
284 if (errno == ENOSYS) {
285 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
286 && (__kmp_affinity_type != affinity_none)
287 && (__kmp_affinity_type != affinity_default)
288 && (__kmp_affinity_type != affinity_disabled))) {
292 KMP_MSG( SetAffSysCallNotSupported, env_var ),
297 __kmp_affin_mask_size = 0;
298 KMP_INTERNAL_FREE(buf);
300 if (errno == EFAULT) {
301 __kmp_affin_mask_size = gCode;
302 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
303 "affinity supported (mask size %d)\n",
304 (
int)__kmp_affin_mask_size));
305 KMP_INTERNAL_FREE(buf);
315 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
316 "searching for proper set size\n"));
318 for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
319 gCode = syscall( __NR_sched_getaffinity, 0, size, buf );
320 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
321 "getaffinity for mask size %d returned %d errno = %d\n", size,
325 if ( errno == ENOSYS )
330 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
331 "inconsistent OS call behavior: errno == ENOSYS for mask size %d\n",
333 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
334 && (__kmp_affinity_type != affinity_none)
335 && (__kmp_affinity_type != affinity_default)
336 && (__kmp_affinity_type != affinity_disabled))) {
340 KMP_MSG( GetAffSysCallNotSupported, env_var ),
345 __kmp_affin_mask_size = 0;
346 KMP_INTERNAL_FREE(buf);
352 sCode = syscall( __NR_sched_setaffinity, 0, gCode, NULL );
353 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
354 "setaffinity for mask size %d returned %d errno = %d\n",
355 gCode, sCode, errno));
357 if (errno == ENOSYS) {
361 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
362 "inconsistent OS call behavior: errno == ENOSYS for mask size %d\n",
364 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
365 && (__kmp_affinity_type != affinity_none)
366 && (__kmp_affinity_type != affinity_default)
367 && (__kmp_affinity_type != affinity_disabled))) {
371 KMP_MSG( SetAffSysCallNotSupported, env_var ),
376 __kmp_affin_mask_size = 0;
377 KMP_INTERNAL_FREE(buf);
380 if (errno == EFAULT) {
381 __kmp_affin_mask_size = gCode;
382 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
383 "affinity supported (mask size %d)\n",
384 (
int)__kmp_affin_mask_size));
385 KMP_INTERNAL_FREE(buf);
391 KMP_INTERNAL_FREE(buf);
397 __kmp_affin_mask_size = 0;
398 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
399 "cannot determine mask size - affinity not supported\n"));
400 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
401 && (__kmp_affinity_type != affinity_none)
402 && (__kmp_affinity_type != affinity_default)
403 && (__kmp_affinity_type != affinity_disabled))) {
404 KMP_WARNING( AffCantGetMaskSize, env_var );
416 __kmp_change_thread_affinity_mask(
int gtid, kmp_affin_mask_t *new_mask,
417 kmp_affin_mask_t *old_mask )
419 KMP_DEBUG_ASSERT( gtid == __kmp_get_gtid() );
420 if ( KMP_AFFINITY_CAPABLE() ) {
422 kmp_info_t *th = __kmp_threads[ gtid ];
424 KMP_DEBUG_ASSERT( new_mask != NULL );
426 if ( old_mask != NULL ) {
427 status = __kmp_get_system_affinity( old_mask, TRUE );
432 KMP_MSG( ChangeThreadAffMaskError ),
439 __kmp_set_system_affinity( new_mask, TRUE );
441 if (__kmp_affinity_verbose) {
442 char old_buf[KMP_AFFIN_MASK_PRINT_LEN];
443 char new_buf[KMP_AFFIN_MASK_PRINT_LEN];
444 __kmp_affinity_print_mask(old_buf, KMP_AFFIN_MASK_PRINT_LEN, old_mask);
445 __kmp_affinity_print_mask(new_buf, KMP_AFFIN_MASK_PRINT_LEN, new_mask);
446 KMP_INFORM( ChangeAffMask,
"KMP_AFFINITY (Bind)", gtid, old_buf, new_buf );
451 KMP_DEBUG_ASSERT( old_mask != NULL && (memcmp(old_mask,
452 th->th.th_affin_mask, __kmp_affin_mask_size) == 0) );
453 KMP_CPU_COPY( th->th.th_affin_mask, new_mask );
457 #endif // KMP_OS_LINUX
462 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
465 __kmp_futex_determine_capable()
468 int rc = syscall( __NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0 );
469 int retval = ( rc == 0 ) || ( errno != ENOSYS );
471 KA_TRACE(10, (
"__kmp_futex_determine_capable: rc = %d errno = %d\n", rc,
473 KA_TRACE(10, (
"__kmp_futex_determine_capable: futex syscall%s supported\n",
474 retval ?
"" :
" not" ) );
479 #endif // KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
484 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS)
491 __kmp_test_then_or32(
volatile kmp_int32 *p, kmp_int32 d )
493 kmp_int32 old_value, new_value;
495 old_value = TCR_4( *p );
496 new_value = old_value | d;
498 while ( ! __kmp_compare_and_store32 ( p, old_value, new_value ) )
501 old_value = TCR_4( *p );
502 new_value = old_value | d;
508 __kmp_test_then_and32(
volatile kmp_int32 *p, kmp_int32 d )
510 kmp_int32 old_value, new_value;
512 old_value = TCR_4( *p );
513 new_value = old_value & d;
515 while ( ! __kmp_compare_and_store32 ( p, old_value, new_value ) )
518 old_value = TCR_4( *p );
519 new_value = old_value & d;
526 __kmp_test_then_add64(
volatile kmp_int64 *p, kmp_int64 d )
528 kmp_int64 old_value, new_value;
530 old_value = TCR_8( *p );
531 new_value = old_value + d;
533 while ( ! __kmp_compare_and_store64 ( p, old_value, new_value ) )
536 old_value = TCR_8( *p );
537 new_value = old_value + d;
544 __kmp_test_then_or64(
volatile kmp_int64 *p, kmp_int64 d )
546 kmp_int64 old_value, new_value;
548 old_value = TCR_8( *p );
549 new_value = old_value | d;
550 while ( ! __kmp_compare_and_store64 ( p, old_value, new_value ) )
553 old_value = TCR_8( *p );
554 new_value = old_value | d;
560 __kmp_test_then_and64(
volatile kmp_int64 *p, kmp_int64 d )
562 kmp_int64 old_value, new_value;
564 old_value = TCR_8( *p );
565 new_value = old_value & d;
566 while ( ! __kmp_compare_and_store64 ( p, old_value, new_value ) )
569 old_value = TCR_8( *p );
570 new_value = old_value & d;
578 __kmp_terminate_thread(
int gtid )
581 kmp_info_t *th = __kmp_threads[ gtid ];
585 #ifdef KMP_CANCEL_THREADS
586 KA_TRACE( 10, (
"__kmp_terminate_thread: kill (%d)\n", gtid ) );
587 status = pthread_cancel( th->th.th_info.ds.ds_thread );
588 if ( status != 0 && status != ESRCH ) {
591 KMP_MSG( CantTerminateWorkerThread ),
615 __kmp_set_stack_info(
int gtid, kmp_info_t *th )
628 if ( ! KMP_UBER_GTID(gtid) ) {
631 status = pthread_attr_init( &attr );
632 KMP_CHECK_SYSFAIL(
"pthread_attr_init", status );
633 status = pthread_getattr_np( pthread_self(), &attr );
634 KMP_CHECK_SYSFAIL(
"pthread_getattr_np", status );
635 status = pthread_attr_getstack( &attr, &addr, &size );
636 KMP_CHECK_SYSFAIL(
"pthread_attr_getstack", status );
637 KA_TRACE( 60, (
"__kmp_set_stack_info: T#%d pthread_attr_getstack returned size: %lu, "
641 status = pthread_attr_destroy( &attr );
642 KMP_CHECK_SYSFAIL(
"pthread_attr_destroy", status );
645 if ( size != 0 && addr != 0 ) {
647 TCW_PTR(th->th.th_info.ds.ds_stackbase, (((
char *)addr) + size));
648 TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
649 TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
654 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
655 TCW_PTR(th -> th.th_info.ds.ds_stackbase, &stack_data);
656 TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
664 __kmp_launch_worker(
void *thr )
666 int status, old_type, old_state;
667 #ifdef KMP_BLOCK_SIGNALS
668 sigset_t new_set, old_set;
675 gtid = ((kmp_info_t*)thr) -> th.th_info.ds.ds_gtid;
676 __kmp_gtid_set_specific( gtid );
677 #ifdef KMP_TDATA_GTID
682 __kmp_itt_thread_name( gtid );
686 __kmp_affinity_set_init_mask( gtid, FALSE );
690 #error "Unknown or unsupported OS"
693 #ifdef KMP_CANCEL_THREADS
694 status = pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, & old_type );
695 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status );
697 status = pthread_setcancelstate( PTHREAD_CANCEL_ENABLE, & old_state );
698 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status );
701 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
706 __kmp_clear_x87_fpu_status_word();
707 __kmp_load_x87_fpu_control_word( &__kmp_init_x87_fpu_control_word );
708 __kmp_load_mxcsr( &__kmp_init_mxcsr );
711 #ifdef KMP_BLOCK_SIGNALS
712 status = sigfillset( & new_set );
713 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status );
714 status = pthread_sigmask( SIG_BLOCK, & new_set, & old_set );
715 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status );
719 if ( __kmp_stkoffset > 0 && gtid > 0 ) {
720 padding = alloca( gtid * __kmp_stkoffset );
725 __kmp_set_stack_info( gtid, (kmp_info_t*)thr );
727 __kmp_check_stack_overlap( (kmp_info_t*)thr );
729 exit_val = __kmp_launch_thread( (kmp_info_t *) thr );
731 #ifdef KMP_BLOCK_SIGNALS
732 status = pthread_sigmask( SIG_SETMASK, & old_set, NULL );
733 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status );
743 __kmp_launch_monitor(
void *thr )
745 int status, old_type, old_state;
746 #ifdef KMP_BLOCK_SIGNALS
749 struct timespec interval;
751 int yield_cycles = 0;
756 KA_TRACE( 10, (
"__kmp_launch_monitor: #1 launched\n" ) );
759 __kmp_gtid_set_specific( KMP_GTID_MONITOR );
760 #ifdef KMP_TDATA_GTID
761 __kmp_gtid = KMP_GTID_MONITOR;
767 __kmp_itt_thread_ignore();
770 __kmp_set_stack_info( ((kmp_info_t*)thr)->th.th_info.ds.ds_gtid, (kmp_info_t*)thr );
772 __kmp_check_stack_overlap( (kmp_info_t*)thr );
774 #ifdef KMP_CANCEL_THREADS
775 status = pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, & old_type );
776 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status );
778 status = pthread_setcancelstate( PTHREAD_CANCEL_ENABLE, & old_state );
779 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status );
782 #if KMP_REAL_TIME_FIX
786 int sched = sched_getscheduler( 0 );
787 if ( sched == SCHED_FIFO || sched == SCHED_RR ) {
790 struct sched_param param;
791 int max_priority = sched_get_priority_max( sched );
793 KMP_WARNING( RealTimeSchedNotSupported );
794 sched_getparam( 0, & param );
795 if ( param.sched_priority < max_priority ) {
796 param.sched_priority += 1;
797 rc = sched_setscheduler( 0, sched, & param );
802 KMP_MSG( CantChangeMonitorPriority ),
804 KMP_MSG( MonitorWillStarve ),
813 KMP_MSG( RunningAtMaxPriority ),
814 KMP_MSG( MonitorWillStarve ),
815 KMP_HNT( RunningAtMaxPriority ),
821 #endif // KMP_REAL_TIME_FIX
825 if ( __kmp_monitor_wakeups == 1 ) {
827 interval.tv_nsec = 0;
830 interval.tv_nsec = (NSEC_PER_SEC / __kmp_monitor_wakeups);
833 KA_TRACE( 10, (
"__kmp_launch_monitor: #2 monitor\n" ) );
835 if (__kmp_yield_cycle) {
836 __kmp_yielding_on = 0;
837 yield_count = __kmp_yield_off_count;
839 __kmp_yielding_on = 1;
842 while( ! TCR_4( __kmp_global.g.g_done ) ) {
848 KA_TRACE( 15, (
"__kmp_launch_monitor: update\n" ) );
850 status = gettimeofday( &tval, NULL );
851 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status );
852 TIMEVAL_TO_TIMESPEC( &tval, &now );
854 now.tv_sec += interval.tv_sec;
855 now.tv_nsec += interval.tv_nsec;
857 if (now.tv_nsec >= NSEC_PER_SEC) {
859 now.tv_nsec -= NSEC_PER_SEC;
862 status = pthread_mutex_lock( & __kmp_wait_mx.m_mutex );
863 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status );
864 status = pthread_cond_timedwait( & __kmp_wait_cv.c_cond, & __kmp_wait_mx.m_mutex,
867 if ( status != ETIMEDOUT && status != EINTR ) {
868 KMP_SYSFAIL(
"pthread_cond_timedwait", status );
872 status = pthread_mutex_unlock( & __kmp_wait_mx.m_mutex );
873 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status );
875 if (__kmp_yield_cycle) {
877 if ( (yield_cycles % yield_count) == 0 ) {
878 if (__kmp_yielding_on) {
879 __kmp_yielding_on = 0;
880 yield_count = __kmp_yield_off_count;
882 __kmp_yielding_on = 1;
883 yield_count = __kmp_yield_on_count;
888 __kmp_yielding_on = 1;
891 TCW_4( __kmp_global.g.g_time.dt.t_value,
892 TCR_4( __kmp_global.g.g_time.dt.t_value ) + 1 );
897 KA_TRACE( 10, (
"__kmp_launch_monitor: #3 cleanup\n" ) );
899 #ifdef KMP_BLOCK_SIGNALS
900 status = sigfillset( & new_set );
901 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status );
902 status = pthread_sigmask( SIG_UNBLOCK, & new_set, NULL );
903 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status );
906 KA_TRACE( 10, (
"__kmp_launch_monitor: #4 finished\n" ) );
908 if( __kmp_global.g.g_abort != 0 ) {
914 KA_TRACE( 10, (
"__kmp_launch_monitor: #5 terminate sig=%d\n", __kmp_global.g.g_abort ) );
919 for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
920 __kmp_terminate_thread( gtid );
924 KA_TRACE( 10, (
"__kmp_launch_monitor: #6 raise sig=%d\n", __kmp_global.g.g_abort ) );
926 if (__kmp_global.g.g_abort > 0)
927 raise( __kmp_global.g.g_abort );
931 KA_TRACE( 10, (
"__kmp_launch_monitor: #7 exit\n" ) );
937 __kmp_create_worker(
int gtid, kmp_info_t *th,
size_t stack_size )
940 pthread_attr_t thread_attr;
944 th->th.th_info.ds.ds_gtid = gtid;
946 if ( KMP_UBER_GTID(gtid) ) {
947 KA_TRACE( 10, (
"__kmp_create_worker: uber thread (%d)\n", gtid ) );
948 th -> th.th_info.ds.ds_thread = pthread_self();
949 __kmp_set_stack_info( gtid, th );
950 __kmp_check_stack_overlap( th );
954 KA_TRACE( 10, (
"__kmp_create_worker: try to create thread (%d)\n", gtid ) );
958 #ifdef KMP_THREAD_ATTR
960 status = pthread_attr_init( &thread_attr );
964 KMP_MSG( CantInitThreadAttrs ),
969 status = pthread_attr_setdetachstate( & thread_attr, PTHREAD_CREATE_JOINABLE );
973 KMP_MSG( CantSetWorkerState ),
980 stack_size += gtid * __kmp_stkoffset;
982 KA_TRACE( 10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
983 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
984 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size ) );
986 # ifdef _POSIX_THREAD_ATTR_STACKSIZE
987 status = pthread_attr_setstacksize( & thread_attr, stack_size );
988 # ifdef KMP_BACKUP_STKSIZE
990 if ( ! __kmp_env_stksize ) {
991 stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
992 __kmp_stksize = KMP_BACKUP_STKSIZE;
993 KA_TRACE( 10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
994 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
996 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size )
998 status = pthread_attr_setstacksize( &thread_attr, stack_size );
1002 if ( status != 0 ) {
1005 KMP_MSG( CantSetWorkerStackSize, stack_size ),
1007 KMP_HNT( ChangeWorkerStackSize ),
1016 status = pthread_create( & handle, & thread_attr, __kmp_launch_worker, (
void *) th );
1017 if ( status != 0 || ! handle ) {
1018 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
1019 if ( status == EINVAL ) {
1022 KMP_MSG( CantSetWorkerStackSize, stack_size ),
1024 KMP_HNT( IncreaseWorkerStackSize ),
1028 if ( status == ENOMEM ) {
1031 KMP_MSG( CantSetWorkerStackSize, stack_size ),
1033 KMP_HNT( DecreaseWorkerStackSize ),
1038 if ( status == EAGAIN ) {
1041 KMP_MSG( NoResourcesForWorkerThread ),
1043 KMP_HNT( Decrease_NUM_THREADS ),
1047 KMP_SYSFAIL(
"pthread_create", status );
1050 th->th.th_info.ds.ds_thread = handle;
1053 #ifdef KMP_THREAD_ATTR
1055 status = pthread_attr_destroy( & thread_attr );
1059 KMP_MSG( CantDestroyThreadAttrs ),
1069 KA_TRACE( 10, (
"__kmp_create_worker: done creating thread (%d)\n", gtid ) );
1075 __kmp_create_monitor( kmp_info_t *th )
1078 pthread_attr_t thread_attr;
1081 int caller_gtid = __kmp_get_gtid();
1082 int auto_adj_size = FALSE;
1084 KA_TRACE( 10, (
"__kmp_create_monitor: try to create monitor\n" ) );
1088 th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
1089 th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
1090 #if KMP_REAL_TIME_FIX
1091 TCW_4( __kmp_global.g.g_time.dt.t_value, -1 );
1092 #endif // KMP_REAL_TIME_FIX
1094 #ifdef KMP_THREAD_ATTR
1095 if ( __kmp_monitor_stksize == 0 ) {
1096 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
1097 auto_adj_size = TRUE;
1099 status = pthread_attr_init( &thread_attr );
1100 if ( status != 0 ) {
1103 KMP_MSG( CantInitThreadAttrs ),
1108 status = pthread_attr_setdetachstate( & thread_attr, PTHREAD_CREATE_JOINABLE );
1109 if ( status != 0 ) {
1112 KMP_MSG( CantSetMonitorState ),
1118 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
1119 status = pthread_attr_getstacksize( & thread_attr, & size );
1120 KMP_CHECK_SYSFAIL(
"pthread_attr_getstacksize", status );
1122 size = __kmp_sys_min_stksize;
1126 if ( __kmp_monitor_stksize == 0 ) {
1127 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
1129 if ( __kmp_monitor_stksize < __kmp_sys_min_stksize ) {
1130 __kmp_monitor_stksize = __kmp_sys_min_stksize;
1133 KA_TRACE( 10, (
"__kmp_create_monitor: default stacksize = %lu bytes,"
1134 "requested stacksize = %lu bytes\n",
1135 size, __kmp_monitor_stksize ) );
1141 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
1142 KA_TRACE( 10, (
"__kmp_create_monitor: setting stacksize = %lu bytes,",
1143 __kmp_monitor_stksize ) );
1144 status = pthread_attr_setstacksize( & thread_attr, __kmp_monitor_stksize );
1145 if ( status != 0 ) {
1146 if ( auto_adj_size ) {
1147 __kmp_monitor_stksize *= 2;
1152 KMP_MSG( CantSetMonitorStackSize, (
long int) __kmp_monitor_stksize ),
1154 KMP_HNT( ChangeMonitorStackSize ),
1160 TCW_4( __kmp_global.g.g_time.dt.t_value, 0 );
1162 status = pthread_create( &handle, & thread_attr, __kmp_launch_monitor, (
void *) th );
1164 if ( status != 0 ) {
1165 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
1166 if ( status == EINVAL ) {
1167 if ( auto_adj_size && ( __kmp_monitor_stksize < (
size_t)0x40000000 ) ) {
1168 __kmp_monitor_stksize *= 2;
1173 KMP_MSG( CantSetMonitorStackSize, __kmp_monitor_stksize ),
1175 KMP_HNT( IncreaseMonitorStackSize ),
1179 if ( status == ENOMEM ) {
1182 KMP_MSG( CantSetMonitorStackSize, __kmp_monitor_stksize ),
1184 KMP_HNT( DecreaseMonitorStackSize ),
1189 if ( status == EAGAIN ) {
1192 KMP_MSG( NoResourcesForMonitorThread ),
1194 KMP_HNT( DecreaseNumberOfThreadsInUse ),
1198 KMP_SYSFAIL(
"pthread_create", status );
1201 th->th.th_info.ds.ds_thread = handle;
1203 #if KMP_REAL_TIME_FIX
1205 KMP_DEBUG_ASSERT(
sizeof( kmp_uint32 ) ==
sizeof( __kmp_global.g.g_time.dt.t_value ) );
1207 (kmp_uint32
volatile *) & __kmp_global.g.g_time.dt.t_value, -1, & __kmp_neq_4, NULL
1209 #endif // KMP_REAL_TIME_FIX
1211 #ifdef KMP_THREAD_ATTR
1212 status = pthread_attr_destroy( & thread_attr );
1213 if ( status != 0 ) {
1216 KMP_MSG( CantDestroyThreadAttrs ),
1225 KA_TRACE( 10, (
"__kmp_create_monitor: monitor created %#.8lx\n", th->th.th_info.ds.ds_thread ) );
1233 pthread_exit( (
void *) exit_status );
1237 __kmp_reap_monitor( kmp_info_t *th )
1242 KA_TRACE( 10, (
"__kmp_reap_monitor: try to reap monitor thread with handle %#.8lx\n",
1243 th->th.th_info.ds.ds_thread ) );
1248 KMP_DEBUG_ASSERT( th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid );
1249 if ( th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR ) {
1260 status = pthread_kill( th->th.th_info.ds.ds_thread, 0 );
1261 if (status == ESRCH) {
1263 KA_TRACE( 10, (
"__kmp_reap_monitor: monitor does not exist, returning\n") );
1267 status = pthread_join( th->th.th_info.ds.ds_thread, & exit_val);
1268 if (exit_val != th) {
1271 KMP_MSG( ReapMonitorError ),
1278 th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1279 th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1281 KA_TRACE( 10, (
"__kmp_reap_monitor: done reaping monitor thread with handle %#.8lx\n",
1282 th->th.th_info.ds.ds_thread ) );
1289 __kmp_reap_worker( kmp_info_t *th )
1296 KA_TRACE( 10, (
"__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid ) );
1303 status = pthread_kill( th->th.th_info.ds.ds_thread, 0 );
1304 if (status == ESRCH) {
1305 KA_TRACE( 10, (
"__kmp_reap_worker: worker T#%d does not exist, returning\n",
1306 th->th.th_info.ds.ds_gtid ) );
1309 KA_TRACE( 10, (
"__kmp_reap_worker: try to join with worker T#%d\n",
1310 th->th.th_info.ds.ds_gtid ) );
1312 status = pthread_join( th->th.th_info.ds.ds_thread, & exit_val);
1315 if ( status != 0 ) {
1318 KMP_MSG( ReapWorkerError ),
1323 if ( exit_val != th ) {
1324 KA_TRACE( 10, (
"__kmp_reap_worker: worker T#%d did not reap properly, "
1326 th->th.th_info.ds.ds_gtid, exit_val ) );
1332 KA_TRACE( 10, (
"__kmp_reap_worker: done reaping T#%d\n", th->th.th_info.ds.ds_gtid ) );
1341 #if KMP_HANDLE_SIGNALS
1345 __kmp_null_handler(
int signo )
1352 __kmp_team_handler(
int signo )
1354 if ( __kmp_global.g.g_abort == 0 ) {
1357 __kmp_debug_printf(
"__kmp_team_handler: caught signal = %d\n", signo );
1372 if ( __kmp_debug_buf ) {
1373 __kmp_dump_debug_buffer( );
1376 TCW_4( __kmp_global.g.g_abort, signo );
1378 TCW_4( __kmp_global.g.g_done, TRUE );
1383 __kmp_debug_printf(
"__kmp_team_handler: unknown signal type" );
1392 void __kmp_sigaction(
int signum,
const struct sigaction * act,
struct sigaction * oldact ) {
1393 int rc = sigaction( signum, act, oldact );
1394 KMP_CHECK_SYSFAIL_ERRNO(
"sigaction", rc );
1399 __kmp_install_one_handler(
int sig, sig_func_t handler_func,
int parallel_init )
1402 KB_TRACE( 60, (
"__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init ) );
1403 if ( parallel_init ) {
1404 struct sigaction new_action;
1405 struct sigaction old_action;
1406 new_action.sa_handler = handler_func;
1407 new_action.sa_flags = 0;
1408 sigfillset( & new_action.sa_mask );
1409 __kmp_sigaction( sig, & new_action, & old_action );
1410 if ( old_action.sa_handler == __kmp_sighldrs[ sig ].sa_handler ) {
1411 sigaddset( & __kmp_sigset, sig );
1414 __kmp_sigaction( sig, & old_action, NULL );
1418 __kmp_sigaction( sig, NULL, & __kmp_sighldrs[ sig ] );
1425 __kmp_remove_one_handler(
int sig )
1427 KB_TRACE( 60, (
"__kmp_remove_one_handler( %d )\n", sig ) );
1428 if ( sigismember( & __kmp_sigset, sig ) ) {
1429 struct sigaction old;
1431 __kmp_sigaction( sig, & __kmp_sighldrs[ sig ], & old );
1432 if ( ( old.sa_handler != __kmp_team_handler ) && ( old.sa_handler != __kmp_null_handler ) ) {
1434 KB_TRACE( 10, (
"__kmp_remove_one_handler: oops, not our handler, restoring: sig=%d\n", sig ) );
1435 __kmp_sigaction( sig, & old, NULL );
1437 sigdelset( & __kmp_sigset, sig );
1444 __kmp_install_signals(
int parallel_init )
1446 KB_TRACE( 10, (
"__kmp_install_signals( %d )\n", parallel_init ) );
1447 if ( __kmp_handle_signals || ! parallel_init ) {
1450 sigemptyset( & __kmp_sigset );
1451 __kmp_install_one_handler( SIGHUP, __kmp_team_handler, parallel_init );
1452 __kmp_install_one_handler( SIGINT, __kmp_team_handler, parallel_init );
1453 __kmp_install_one_handler( SIGQUIT, __kmp_team_handler, parallel_init );
1454 __kmp_install_one_handler( SIGILL, __kmp_team_handler, parallel_init );
1455 __kmp_install_one_handler( SIGABRT, __kmp_team_handler, parallel_init );
1456 __kmp_install_one_handler( SIGFPE, __kmp_team_handler, parallel_init );
1457 __kmp_install_one_handler( SIGBUS, __kmp_team_handler, parallel_init );
1458 __kmp_install_one_handler( SIGSEGV, __kmp_team_handler, parallel_init );
1460 __kmp_install_one_handler( SIGSYS, __kmp_team_handler, parallel_init );
1462 __kmp_install_one_handler( SIGTERM, __kmp_team_handler, parallel_init );
1464 __kmp_install_one_handler( SIGPIPE, __kmp_team_handler, parallel_init );
1471 __kmp_remove_signals(
void )
1474 KB_TRACE( 10, (
"__kmp_remove_signals()\n" ) );
1475 for ( sig = 1; sig < NSIG; ++ sig ) {
1476 __kmp_remove_one_handler( sig );
1481 #endif // KMP_HANDLE_SIGNALS
1487 __kmp_enable(
int new_state )
1489 #ifdef KMP_CANCEL_THREADS
1490 int status, old_state;
1491 status = pthread_setcancelstate( new_state, & old_state );
1492 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status );
1493 KMP_DEBUG_ASSERT( old_state == PTHREAD_CANCEL_DISABLE );
1498 __kmp_disable(
int * old_state )
1500 #ifdef KMP_CANCEL_THREADS
1502 status = pthread_setcancelstate( PTHREAD_CANCEL_DISABLE, old_state );
1503 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status );
1511 __kmp_atfork_prepare (
void)
1517 __kmp_atfork_parent (
void)
1528 __kmp_atfork_child (
void)
1536 __kmp_init_runtime = FALSE;
1537 __kmp_init_monitor = 0;
1538 __kmp_init_parallel = FALSE;
1539 __kmp_init_middle = FALSE;
1540 __kmp_init_serial = FALSE;
1541 TCW_4(__kmp_init_gtid, FALSE);
1542 __kmp_init_common = FALSE;
1544 TCW_4(__kmp_init_user_locks, FALSE);
1545 __kmp_user_lock_table.used = 0;
1546 __kmp_user_lock_table.allocated = 0;
1547 __kmp_user_lock_table.table = NULL;
1548 __kmp_lock_blocks = NULL;
1551 TCW_4(__kmp_nth, 0);
1555 KA_TRACE( 10, (
"__kmp_atfork_child: checking cache address list %p\n",
1556 __kmp_threadpriv_cache_list ) );
1558 while ( __kmp_threadpriv_cache_list != NULL ) {
1560 if ( *__kmp_threadpriv_cache_list -> addr != NULL ) {
1561 KC_TRACE( 50, (
"__kmp_atfork_child: zeroing cache at address %p\n",
1562 &(*__kmp_threadpriv_cache_list -> addr) ) );
1564 *__kmp_threadpriv_cache_list -> addr = NULL;
1566 __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list -> next;
1569 __kmp_init_runtime = FALSE;
1572 __kmp_init_bootstrap_lock( &__kmp_initz_lock );
1573 __kmp_init_bootstrap_lock( &__kmp_stdio_lock );
1574 __kmp_init_bootstrap_lock( &__kmp_console_lock );
1589 __kmp_register_atfork(
void) {
1590 if ( __kmp_need_register_atfork ) {
1591 int status = pthread_atfork( __kmp_atfork_prepare, __kmp_atfork_parent, __kmp_atfork_child );
1592 KMP_CHECK_SYSFAIL(
"pthread_atfork", status );
1593 __kmp_need_register_atfork = FALSE;
1598 __kmp_suspend_initialize(
void )
1601 status = pthread_mutexattr_init( &__kmp_suspend_mutex_attr );
1602 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status );
1603 status = pthread_condattr_init( &__kmp_suspend_cond_attr );
1604 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status );
1608 __kmp_suspend_initialize_thread( kmp_info_t *th )
1610 if ( th->th.th_suspend_init_count <= __kmp_fork_count ) {
1614 status = pthread_cond_init( &th->th.th_suspend_cv.c_cond, &__kmp_suspend_cond_attr );
1615 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status );
1616 status = pthread_mutex_init( &th->th.th_suspend_mx.m_mutex, & __kmp_suspend_mutex_attr );
1617 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status );
1618 *(
volatile int*)&th->th.th_suspend_init_count = __kmp_fork_count + 1;
1623 __kmp_suspend_uninitialize_thread( kmp_info_t *th )
1625 if(th->th.th_suspend_init_count > __kmp_fork_count) {
1630 status = pthread_cond_destroy( &th->th.th_suspend_cv.c_cond );
1631 if ( status != 0 && status != EBUSY ) {
1632 KMP_SYSFAIL(
"pthread_cond_destroy", status );
1634 status = pthread_mutex_destroy( &th->th.th_suspend_mx.m_mutex );
1635 if ( status != 0 && status != EBUSY ) {
1636 KMP_SYSFAIL(
"pthread_mutex_destroy", status );
1638 --th->th.th_suspend_init_count;
1639 KMP_DEBUG_ASSERT(th->th.th_suspend_init_count == __kmp_fork_count);
1649 __kmp_suspend(
int th_gtid,
volatile kmp_uint *spinner, kmp_uint checker )
1651 kmp_info_t *th = __kmp_threads[th_gtid];
1655 KF_TRACE( 30, (
"__kmp_suspend: T#%d enter for spin = %p\n", th_gtid, spinner ) );
1657 __kmp_suspend_initialize_thread( th );
1659 status = pthread_mutex_lock( &th->th.th_suspend_mx.m_mutex );
1660 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status );
1662 KF_TRACE( 10, (
"__kmp_suspend: T#%d setting sleep bit for spin(%p)\n",
1663 th_gtid, spinner ) );
1668 old_spin = KMP_TEST_THEN_OR32( (
volatile kmp_int32 *) spinner,
1669 KMP_BARRIER_SLEEP_STATE );
1671 KF_TRACE( 5, (
"__kmp_suspend: T#%d set sleep bit for spin(%p)==%d\n",
1672 th_gtid, spinner, *spinner ) );
1674 if ( old_spin == checker ) {
1675 KMP_TEST_THEN_AND32( (
volatile kmp_int32 *) spinner, ~(KMP_BARRIER_SLEEP_STATE) );
1677 KF_TRACE( 5, (
"__kmp_suspend: T#%d false alarm, reset sleep bit for spin(%p)\n",
1678 th_gtid, spinner) );
1685 int deactivated = FALSE;
1686 TCW_PTR(th->th.th_sleep_loc, spinner);
1687 while ( TCR_4( *spinner ) & KMP_BARRIER_SLEEP_STATE ) {
1688 #ifdef DEBUG_SUSPEND
1690 __kmp_suspend_count++;
1691 __kmp_print_cond( buffer, &th->th.th_suspend_cv );
1692 __kmp_printf(
"__kmp_suspend: suspending T#%d: %s\n", th_gtid, buffer );
1699 if ( ! deactivated ) {
1700 th->th.th_active = FALSE;
1701 if ( th->th.th_active_in_pool ) {
1702 th->th.th_active_in_pool = FALSE;
1703 KMP_TEST_THEN_DEC32(
1704 (kmp_int32 *) &__kmp_thread_pool_active_nth );
1705 KMP_DEBUG_ASSERT( TCR_4(__kmp_thread_pool_active_nth) >= 0 );
1712 #if USE_SUSPEND_TIMEOUT
1713 struct timespec now;
1714 struct timeval tval;
1717 status = gettimeofday( &tval, NULL );
1718 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status );
1719 TIMEVAL_TO_TIMESPEC( &tval, &now );
1721 msecs = (4*__kmp_dflt_blocktime) + 200;
1722 now.tv_sec += msecs / 1000;
1723 now.tv_nsec += (msecs % 1000)*1000;
1725 KF_TRACE( 15, (
"__kmp_suspend: T#%d about to perform pthread_cond_timedwait\n",
1727 status = pthread_cond_timedwait( &th->th.th_suspend_cv.c_cond, &th->th.th_suspend_mx.m_mutex, & now );
1729 KF_TRACE( 15, (
"__kmp_suspend: T#%d about to perform pthread_cond_wait\n",
1732 status = pthread_cond_wait( &th->th.th_suspend_cv.c_cond, &th->th.th_suspend_mx.m_mutex );
1735 if ( (status != 0) && (status != EINTR) && (status != ETIMEDOUT) ) {
1736 KMP_SYSFAIL(
"pthread_cond_wait", status );
1739 if (status == ETIMEDOUT) {
1740 if ( (*spinner) & KMP_BARRIER_SLEEP_STATE ) {
1741 KF_TRACE( 100, (
"__kmp_suspend: T#%d timeout wakeup\n", th_gtid ) );
1743 KF_TRACE( 2, (
"__kmp_suspend: T#%d timeout wakeup, sleep bit not set!\n",
1746 }
else if ( (*spinner) & KMP_BARRIER_SLEEP_STATE ) {
1747 KF_TRACE( 100, (
"__kmp_suspend: T#%d spurious wakeup\n", th_gtid ) );
1757 if ( deactivated ) {
1758 th->th.th_active = TRUE;
1759 if ( TCR_4(th->th.th_in_pool) ) {
1760 KMP_TEST_THEN_INC32(
1761 (kmp_int32 *) &__kmp_thread_pool_active_nth );
1762 th->th.th_active_in_pool = TRUE;
1767 #ifdef DEBUG_SUSPEND
1770 __kmp_print_cond( buffer, &th->th.th_suspend_cv);
1771 __kmp_printf(
"__kmp_suspend: T#%d has awakened: %s\n", th_gtid, buffer );
1776 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1777 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status );
1779 KF_TRACE( 30, (
"__kmp_suspend: T#%d exit\n", th_gtid ) );
1789 __kmp_resume(
int target_gtid,
volatile kmp_uint *spin )
1791 kmp_info_t *th = __kmp_threads[target_gtid];
1796 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1799 KF_TRACE( 30, (
"__kmp_resume: T#%d wants to wakeup T#%d enter\n",
1800 gtid, target_gtid ) );
1802 KMP_DEBUG_ASSERT( gtid != target_gtid );
1804 __kmp_suspend_initialize_thread( th );
1806 status = pthread_mutex_lock( &th->th.th_suspend_mx.m_mutex );
1807 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status );
1808 if ( spin == NULL ) {
1809 spin = (
volatile kmp_uint *)TCR_PTR(th->th.th_sleep_loc);
1810 if ( spin == NULL ) {
1811 KF_TRACE( 5, (
"__kmp_resume: T#%d exiting, thread T#%d already awake - spin(%p)\n",
1812 gtid, target_gtid, spin ) );
1814 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1815 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status );
1820 old_spin = KMP_TEST_THEN_AND32( (kmp_int32
volatile *) spin,
1821 ~( KMP_BARRIER_SLEEP_STATE ) );
1822 if ( ( old_spin & KMP_BARRIER_SLEEP_STATE ) == 0 ) {
1823 KF_TRACE( 5, (
"__kmp_resume: T#%d exiting, thread T#%d already awake - spin(%p): "
1825 gtid, target_gtid, spin, old_spin, *spin ) );
1827 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1828 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status );
1831 TCW_PTR(th->th.th_sleep_loc, NULL);
1833 KF_TRACE( 5, (
"__kmp_resume: T#%d about to wakeup T#%d, reset sleep bit for spin(%p): "
1835 gtid, target_gtid, spin, old_spin, *spin ) );
1837 #ifdef DEBUG_SUSPEND
1840 __kmp_print_cond( buffer, &th->th.th_suspend_cv );
1841 __kmp_printf(
"__kmp_resume: T#%d resuming T#%d: %s\n", gtid, target_gtid, buffer );
1846 status = pthread_cond_signal( &th->th.th_suspend_cv.c_cond );
1847 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status );
1848 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1849 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status );
1850 KF_TRACE( 30, (
"__kmp_resume: T#%d exiting after signaling wake up for T#%d\n",
1851 gtid, target_gtid ) );
1859 __kmp_yield(
int cond )
1861 if (cond && __kmp_yielding_on) {
1870 __kmp_gtid_set_specific(
int gtid )
1873 KMP_ASSERT( __kmp_init_runtime );
1874 status = pthread_setspecific( __kmp_gtid_threadprivate_key, (
void*)(gtid+1) );
1875 KMP_CHECK_SYSFAIL(
"pthread_setspecific", status );
1879 __kmp_gtid_get_specific()
1882 if ( !__kmp_init_runtime ) {
1883 KA_TRACE( 50, (
"__kmp_get_specific: runtime shutdown, returning KMP_GTID_SHUTDOWN\n" ) );
1884 return KMP_GTID_SHUTDOWN;
1886 gtid = (int)(
size_t)pthread_getspecific( __kmp_gtid_threadprivate_key );
1888 gtid = KMP_GTID_DNE;
1893 KA_TRACE( 50, (
"__kmp_gtid_get_specific: key:%d gtid:%d\n",
1894 __kmp_gtid_threadprivate_key, gtid ));
1902 __kmp_read_cpu_time(
void )
1909 return (buffer.tms_utime + buffer.tms_cutime) / (double) CLOCKS_PER_SEC;
1913 __kmp_read_system_info(
struct kmp_sys_info *info )
1916 struct rusage r_usage;
1918 memset( info, 0,
sizeof( *info ) );
1920 status = getrusage( RUSAGE_SELF, &r_usage);
1921 KMP_CHECK_SYSFAIL_ERRNO(
"getrusage", status );
1923 info->maxrss = r_usage.ru_maxrss;
1924 info->minflt = r_usage.ru_minflt;
1925 info->majflt = r_usage.ru_majflt;
1926 info->nswap = r_usage.ru_nswap;
1927 info->inblock = r_usage.ru_inblock;
1928 info->oublock = r_usage.ru_oublock;
1929 info->nvcsw = r_usage.ru_nvcsw;
1930 info->nivcsw = r_usage.ru_nivcsw;
1932 return (status != 0);
1940 __kmp_read_system_time(
double *delta )
1943 struct timeval tval;
1944 struct timespec stop;
1947 status = gettimeofday( &tval, NULL );
1948 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status );
1949 TIMEVAL_TO_TIMESPEC( &tval, &stop );
1950 t_ns = TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start);
1951 *delta = (t_ns * 1e-9);
1955 __kmp_clear_system_time(
void )
1957 struct timeval tval;
1959 status = gettimeofday( &tval, NULL );
1960 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status );
1961 TIMEVAL_TO_TIMESPEC( &tval, &__kmp_sys_timer_data.start );
1970 __kmp_tv_threadprivate_store( kmp_info_t *th,
void *global_addr,
void *thread_addr )
1974 p = (
struct tv_data *) __kmp_allocate(
sizeof( *p ) );
1976 p->u.tp.global_addr = global_addr;
1977 p->u.tp.thread_addr = thread_addr;
1979 p->type = (
void *) 1;
1981 p->next = th->th.th_local.tv_data;
1982 th->th.th_local.tv_data = p;
1984 if ( p->next == 0 ) {
1985 int rc = pthread_setspecific( __kmp_tv_key, p );
1986 KMP_CHECK_SYSFAIL(
"pthread_setspecific", rc );
1996 __kmp_get_xproc(
void ) {
2002 r = sysconf( _SC_NPROCESSORS_ONLN );
2010 host_basic_info_data_t info;
2011 mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
2012 rc = host_info( mach_host_self(), HOST_BASIC_INFO, (host_info_t) & info, & num );
2013 if ( rc == 0 && num == HOST_BASIC_INFO_COUNT ) {
2016 r = info.avail_cpus;
2018 KMP_WARNING( CantGetNumAvailCPU );
2019 KMP_INFORM( AssumedNumCPU );
2024 #error "Unknown or unsupported OS."
2028 return r > 0 ? r : 2;
2033 __kmp_read_from_file(
char const *path,
char const *format, ... )
2038 va_start(args, format);
2039 FILE *f = fopen(path,
"rb");
2042 result = vfscanf(f, format, args);
2049 __kmp_runtime_initialize(
void )
2052 pthread_mutexattr_t mutex_attr;
2053 pthread_condattr_t cond_attr;
2055 if ( __kmp_init_runtime ) {
2059 #if ( KMP_ARCH_X86 || KMP_ARCH_X86_64 )
2060 if ( ! __kmp_cpuinfo.initialized ) {
2061 __kmp_query_cpuid( &__kmp_cpuinfo );
2065 __kmp_xproc = __kmp_get_xproc();
2067 if ( sysconf( _SC_THREADS ) ) {
2070 __kmp_sys_max_nth = sysconf( _SC_THREAD_THREADS_MAX );
2071 if ( __kmp_sys_max_nth == -1 ) {
2073 __kmp_sys_max_nth = INT_MAX;
2075 else if ( __kmp_sys_max_nth <= 1 ) {
2077 __kmp_sys_max_nth = KMP_MAX_NTH;
2081 __kmp_sys_min_stksize = sysconf( _SC_THREAD_STACK_MIN );
2082 if ( __kmp_sys_min_stksize <= 1 ) {
2083 __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
2088 __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
2093 int rc = pthread_key_create( & __kmp_tv_key, 0 );
2094 KMP_CHECK_SYSFAIL(
"pthread_key_create", rc );
2098 status = pthread_key_create( &__kmp_gtid_threadprivate_key, __kmp_internal_end_dest );
2099 KMP_CHECK_SYSFAIL(
"pthread_key_create", status );
2100 status = pthread_mutexattr_init( & mutex_attr );
2101 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status );
2102 status = pthread_mutex_init( & __kmp_wait_mx.m_mutex, & mutex_attr );
2103 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status );
2104 status = pthread_condattr_init( & cond_attr );
2105 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status );
2106 status = pthread_cond_init( & __kmp_wait_cv.c_cond, & cond_attr );
2107 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status );
2109 __kmp_itt_initialize();
2112 __kmp_init_runtime = TRUE;
2116 __kmp_runtime_destroy(
void )
2120 if ( ! __kmp_init_runtime ) {
2125 __kmp_itt_destroy();
2128 status = pthread_key_delete( __kmp_gtid_threadprivate_key );
2129 KMP_CHECK_SYSFAIL(
"pthread_key_delete", status );
2131 status = pthread_key_delete( __kmp_tv_key );
2132 KMP_CHECK_SYSFAIL(
"pthread_key_delete", status );
2135 status = pthread_mutex_destroy( & __kmp_wait_mx.m_mutex );
2136 if ( status != 0 && status != EBUSY ) {
2137 KMP_SYSFAIL(
"pthread_mutex_destroy", status );
2139 status = pthread_cond_destroy( & __kmp_wait_cv.c_cond );
2140 if ( status != 0 && status != EBUSY ) {
2141 KMP_SYSFAIL(
"pthread_cond_destroy", status );
2144 __kmp_affinity_uninitialize();
2148 #error "Unknown or unsupported OS"
2151 __kmp_init_runtime = FALSE;
2158 __kmp_thread_sleep(
int millis )
2160 sleep( ( millis + 500 ) / 1000 );
2165 __kmp_elapsed(
double *t )
2168 # ifdef FIX_SGI_CLOCK
2171 status = clock_gettime( CLOCK_PROCESS_CPUTIME_ID, &ts );
2172 KMP_CHECK_SYSFAIL_ERRNO(
"clock_gettime", status );
2173 *t = (double) ts.tv_nsec * (1.0 / (
double) NSEC_PER_SEC) +
2178 status = gettimeofday( & tv, NULL );
2179 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status );
2180 *t = (double) tv.tv_usec * (1.0 / (
double) USEC_PER_SEC) +
2187 __kmp_elapsed_tick(
double *t )
2189 *t = 1 / (double) CLOCKS_PER_SEC;
2197 __kmp_is_address_mapped(
void * addr ) {
2209 char * name = __kmp_str_format(
"/proc/%d/maps", getpid() );
2212 file = fopen( name,
"r" );
2213 KMP_ASSERT( file != NULL );
2217 void * beginning = NULL;
2218 void * ending = NULL;
2221 rc = fscanf( file,
"%p-%p %4s %*[^\n]\n", & beginning, & ending, perms );
2225 KMP_ASSERT( rc == 3 && strlen( perms ) == 4 );
2228 if ( ( addr >= beginning ) && ( addr < ending ) ) {
2230 if ( strcmp( perms,
"rw" ) == 0 ) {
2241 KMP_INTERNAL_FREE( name );
2255 (vm_address_t)( addr ),
2257 (vm_address_t)( & buffer ),
2267 #error "Unknown or unsupported OS"
2275 #ifdef USE_LOAD_BALANCE
2286 __kmp_get_load_balance(
int max )
2291 int res = getloadavg( averages, 3 );
2296 if ( __kmp_load_balance_interval < 180 && ( res >= 1 ) ) {
2297 ret_avg = averages[0];
2298 }
else if ( ( __kmp_load_balance_interval >= 180
2299 && __kmp_load_balance_interval < 600 ) && ( res >= 2 ) ) {
2300 ret_avg = averages[1];
2301 }
else if ( ( __kmp_load_balance_interval >= 600 ) && ( res == 3 ) ) {
2302 ret_avg = averages[2];
2316 __kmp_get_load_balance(
int max )
2318 static int permanent_error = 0;
2320 static int glb_running_threads = 0;
2321 static double glb_call_time = 0;
2323 int running_threads = 0;
2325 DIR * proc_dir = NULL;
2326 struct dirent * proc_entry = NULL;
2328 kmp_str_buf_t task_path;
2329 DIR * task_dir = NULL;
2330 struct dirent * task_entry = NULL;
2331 int task_path_fixed_len;
2333 kmp_str_buf_t stat_path;
2335 int stat_path_fixed_len;
2337 int total_processes = 0;
2338 int total_threads = 0;
2340 double call_time = 0.0;
2342 __kmp_str_buf_init( & task_path );
2343 __kmp_str_buf_init( & stat_path );
2345 __kmp_elapsed( & call_time );
2347 if ( glb_call_time &&
2348 ( call_time - glb_call_time < __kmp_load_balance_interval ) ) {
2349 running_threads = glb_running_threads;
2353 glb_call_time = call_time;
2356 if ( permanent_error ) {
2357 running_threads = -1;
2366 proc_dir = opendir(
"/proc" );
2367 if ( proc_dir == NULL ) {
2370 running_threads = -1;
2371 permanent_error = 1;
2376 __kmp_str_buf_cat( & task_path,
"/proc/", 6 );
2377 task_path_fixed_len = task_path.used;
2379 proc_entry = readdir( proc_dir );
2380 while ( proc_entry != NULL ) {
2383 if ( proc_entry->d_type == DT_DIR && isdigit( proc_entry->d_name[ 0 ] ) ) {
2391 KMP_DEBUG_ASSERT( total_processes != 1 || strcmp( proc_entry->d_name,
"1" ) == 0 );
2394 task_path.used = task_path_fixed_len;
2395 __kmp_str_buf_cat( & task_path, proc_entry->d_name, strlen( proc_entry->d_name ) );
2396 __kmp_str_buf_cat( & task_path,
"/task", 5 );
2398 task_dir = opendir( task_path.str );
2399 if ( task_dir == NULL ) {
2407 if ( strcmp( proc_entry->d_name,
"1" ) == 0 ) {
2408 running_threads = -1;
2409 permanent_error = 1;
2414 __kmp_str_buf_clear( & stat_path );
2415 __kmp_str_buf_cat( & stat_path, task_path.str, task_path.used );
2416 __kmp_str_buf_cat( & stat_path,
"/", 1 );
2417 stat_path_fixed_len = stat_path.used;
2419 task_entry = readdir( task_dir );
2420 while ( task_entry != NULL ) {
2422 if ( proc_entry->d_type == DT_DIR && isdigit( task_entry->d_name[ 0 ] ) ) {
2429 stat_path.used = stat_path_fixed_len;
2430 __kmp_str_buf_cat( & stat_path, task_entry->d_name, strlen( task_entry->d_name ) );
2431 __kmp_str_buf_cat( & stat_path,
"/stat", 5 );
2435 stat_file = open( stat_path.str, O_RDONLY );
2436 if ( stat_file == -1 ) {
2469 len = read( stat_file, buffer,
sizeof( buffer ) - 1 );
2476 char * close_parent = strstr( buffer,
") " );
2477 if ( close_parent != NULL ) {
2478 char state = * ( close_parent + 2 );
2479 if ( state ==
'R' ) {
2481 if ( running_threads >= max ) {
2491 task_entry = readdir( task_dir );
2493 closedir( task_dir );
2497 proc_entry = readdir( proc_dir );
2505 KMP_DEBUG_ASSERT( running_threads > 0 );
2506 if ( running_threads <= 0 ) {
2507 running_threads = 1;
2511 if ( proc_dir != NULL ) {
2512 closedir( proc_dir );
2514 __kmp_str_buf_free( & task_path );
2515 if ( task_dir != NULL ) {
2516 closedir( task_dir );
2518 __kmp_str_buf_free( & stat_path );
2519 if ( stat_file != -1 ) {
2523 glb_running_threads = running_threads;
2525 return running_threads;
2529 # endif // KMP_OS_DARWIN
2531 #endif // USE_LOAD_BALANCE
2534 #if KMP_COMPILER_GCC && !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
2536 int __kmp_invoke_microtask( microtask_t pkfn,
int gtid,
int tid,
int argc,
2539 int argc_full = argc + 2;
2542 ffi_type *types[argc_full];
2543 void *args[argc_full];
2547 for (i = 0; i < argc_full; i++)
2548 types[i] = &ffi_type_pointer;
2556 for (i = 0; i < argc; i++)
2557 args[2 + i] = &p_argv[i];
2559 if (ffi_prep_cif(&cif, FFI_DEFAULT_ABI, argc_full,
2560 &ffi_type_void, types) != FFI_OK)
2563 ffi_call(&cif, (
void (*)(
void))pkfn, NULL, args);
2568 #endif // KMP_COMPILER_GCC && !(KMP_ARCH_X86 || KMP_ARCH_X86_64)