44 #include "kmp_debug.h"
54 #define KMP_PAD(type, sz) (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
55 #define KMP_GTID_DNE (-2)
76 #if KMP_OS_LINUX && defined(KMP_GOMP_COMPAT)
77 # define OMP_LOCK_T_SIZE sizeof(int)
78 # define OMP_NEST_LOCK_T_SIZE sizeof(void *)
80 # define OMP_LOCK_T_SIZE sizeof(void *)
81 # define OMP_NEST_LOCK_T_SIZE sizeof(void *)
89 #define OMP_CRITICAL_SIZE sizeof(void *)
90 #define INTEL_CRITICAL_SIZE 32
95 typedef kmp_uint32 kmp_lock_flags_t;
97 #define kmp_lf_critical_section 1
102 typedef kmp_uint32 kmp_lock_index_t;
108 struct kmp_lock_pool {
109 union kmp_user_lock *next;
110 kmp_lock_index_t index;
113 typedef struct kmp_lock_pool kmp_lock_pool_t;
116 extern void __kmp_validate_locks(
void );
160 struct kmp_base_tas_lock {
161 volatile kmp_int32 poll;
163 kmp_int32 depth_locked;
166 typedef struct kmp_base_tas_lock kmp_base_tas_lock_t;
169 kmp_base_tas_lock_t lk;
170 kmp_lock_pool_t pool;
175 typedef union kmp_tas_lock kmp_tas_lock_t;
181 #define KMP_TAS_LOCK_INITIALIZER( lock ) { { 0, 0 } }
183 extern void __kmp_acquire_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
184 extern int __kmp_test_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
185 extern void __kmp_release_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
186 extern void __kmp_init_tas_lock( kmp_tas_lock_t *lck );
187 extern void __kmp_destroy_tas_lock( kmp_tas_lock_t *lck );
189 extern void __kmp_acquire_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
190 extern int __kmp_test_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
191 extern void __kmp_release_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
192 extern void __kmp_init_nested_tas_lock( kmp_tas_lock_t *lck );
193 extern void __kmp_destroy_nested_tas_lock( kmp_tas_lock_t *lck );
196 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
210 struct kmp_base_futex_lock {
211 volatile kmp_int32 poll;
214 kmp_int32 depth_locked;
217 typedef struct kmp_base_futex_lock kmp_base_futex_lock_t;
219 union kmp_futex_lock {
220 kmp_base_futex_lock_t lk;
221 kmp_lock_pool_t pool;
226 typedef union kmp_futex_lock kmp_futex_lock_t;
232 #define KMP_FUTEX_LOCK_INITIALIZER( lock ) { { 0, 0 } }
234 extern void __kmp_acquire_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
235 extern int __kmp_test_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
236 extern void __kmp_release_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
237 extern void __kmp_init_futex_lock( kmp_futex_lock_t *lck );
238 extern void __kmp_destroy_futex_lock( kmp_futex_lock_t *lck );
240 extern void __kmp_acquire_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
241 extern int __kmp_test_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
242 extern void __kmp_release_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
243 extern void __kmp_init_nested_futex_lock( kmp_futex_lock_t *lck );
244 extern void __kmp_destroy_nested_futex_lock( kmp_futex_lock_t *lck );
246 #endif // KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
253 struct kmp_base_ticket_lock {
255 volatile union kmp_ticket_lock * initialized;
257 volatile kmp_uint32 next_ticket;
258 volatile kmp_uint32 now_serving;
259 volatile kmp_int32 owner_id;
260 kmp_int32 depth_locked;
261 kmp_lock_flags_t flags;
264 typedef struct kmp_base_ticket_lock kmp_base_ticket_lock_t;
266 union KMP_ALIGN_CACHE kmp_ticket_lock {
267 kmp_base_ticket_lock_t lk;
268 kmp_lock_pool_t pool;
270 char lk_pad[ KMP_PAD( kmp_base_ticket_lock_t, CACHE_LINE ) ];
273 typedef union kmp_ticket_lock kmp_ticket_lock_t;
280 #define KMP_TICKET_LOCK_INITIALIZER( lock ) { { (kmp_ticket_lock_t *) & (lock), NULL, 0, 0, 0, -1 } }
282 extern void __kmp_acquire_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
283 extern int __kmp_test_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
284 extern int __kmp_test_ticket_lock_with_cheks( kmp_ticket_lock_t *lck, kmp_int32 gtid );
285 extern void __kmp_release_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
286 extern void __kmp_init_ticket_lock( kmp_ticket_lock_t *lck );
287 extern void __kmp_destroy_ticket_lock( kmp_ticket_lock_t *lck );
289 extern void __kmp_acquire_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
290 extern int __kmp_test_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
291 extern void __kmp_release_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
292 extern void __kmp_init_nested_ticket_lock( kmp_ticket_lock_t *lck );
293 extern void __kmp_destroy_nested_ticket_lock( kmp_ticket_lock_t *lck );
300 #if KMP_USE_ADAPTIVE_LOCKS
302 struct kmp_adaptive_lock;
304 typedef struct kmp_adaptive_lock kmp_adaptive_lock_t;
306 #if KMP_DEBUG_ADAPTIVE_LOCKS
308 struct kmp_adaptive_lock_statistics {
310 kmp_adaptive_lock_t * next;
311 kmp_adaptive_lock_t * prev;
314 kmp_uint32 successfulSpeculations;
315 kmp_uint32 hardFailedSpeculations;
316 kmp_uint32 softFailedSpeculations;
317 kmp_uint32 nonSpeculativeAcquires;
318 kmp_uint32 nonSpeculativeAcquireAttempts;
319 kmp_uint32 lemmingYields;
322 typedef struct kmp_adaptive_lock_statistics kmp_adaptive_lock_statistics_t;
324 extern void __kmp_print_speculative_stats();
325 extern void __kmp_init_speculative_stats();
327 #endif // KMP_DEBUG_ADAPTIVE_LOCKS
329 struct kmp_adaptive_lock
336 kmp_uint32
volatile badness;
337 kmp_uint32
volatile acquire_attempts;
339 kmp_uint32 max_badness;
340 kmp_uint32 max_soft_retries;
342 #if KMP_DEBUG_ADAPTIVE_LOCKS
343 kmp_adaptive_lock_statistics_t
volatile stats;
347 #endif // KMP_USE_ADAPTIVE_LOCKS
350 struct kmp_base_queuing_lock {
353 volatile union kmp_queuing_lock *initialized;
359 volatile kmp_int32 tail_id;
361 volatile kmp_int32 head_id;
364 volatile kmp_uint32 next_ticket;
365 volatile kmp_uint32 now_serving;
366 volatile kmp_int32 owner_id;
367 kmp_int32 depth_locked;
369 kmp_lock_flags_t flags;
370 #if KMP_USE_ADAPTIVE_LOCKS
371 KMP_ALIGN(CACHE_LINE)
372 kmp_adaptive_lock_t adaptive;
376 typedef struct kmp_base_queuing_lock kmp_base_queuing_lock_t;
378 KMP_BUILD_ASSERT( offsetof( kmp_base_queuing_lock_t, tail_id ) % 8 == 0 );
380 union KMP_ALIGN_CACHE kmp_queuing_lock {
381 kmp_base_queuing_lock_t lk;
382 kmp_lock_pool_t pool;
384 char lk_pad[ KMP_PAD( kmp_base_queuing_lock_t, CACHE_LINE ) ];
387 typedef union kmp_queuing_lock kmp_queuing_lock_t;
389 extern void __kmp_acquire_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
390 extern int __kmp_test_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
391 extern void __kmp_release_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
392 extern void __kmp_init_queuing_lock( kmp_queuing_lock_t *lck );
393 extern void __kmp_destroy_queuing_lock( kmp_queuing_lock_t *lck );
395 extern void __kmp_acquire_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
396 extern int __kmp_test_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
397 extern void __kmp_release_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
398 extern void __kmp_init_nested_queuing_lock( kmp_queuing_lock_t *lck );
399 extern void __kmp_destroy_nested_queuing_lock( kmp_queuing_lock_t *lck );
406 struct kmp_base_drdpa_lock {
417 volatile union kmp_drdpa_lock * initialized;
419 volatile struct kmp_lock_poll {
422 volatile kmp_uint64 mask;
423 kmp_uint64 cleanup_ticket;
424 volatile struct kmp_lock_poll * old_polls;
425 kmp_uint32 num_polls;
433 volatile kmp_uint64 next_ticket;
452 kmp_uint64 now_serving;
453 volatile kmp_uint32 owner_id;
454 kmp_int32 depth_locked;
455 kmp_lock_flags_t flags;
458 typedef struct kmp_base_drdpa_lock kmp_base_drdpa_lock_t;
460 union KMP_ALIGN_CACHE kmp_drdpa_lock {
461 kmp_base_drdpa_lock_t lk;
462 kmp_lock_pool_t pool;
464 char lk_pad[ KMP_PAD( kmp_base_drdpa_lock_t, CACHE_LINE ) ];
467 typedef union kmp_drdpa_lock kmp_drdpa_lock_t;
469 extern void __kmp_acquire_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
470 extern int __kmp_test_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
471 extern void __kmp_release_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
472 extern void __kmp_init_drdpa_lock( kmp_drdpa_lock_t *lck );
473 extern void __kmp_destroy_drdpa_lock( kmp_drdpa_lock_t *lck );
475 extern void __kmp_acquire_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
476 extern int __kmp_test_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
477 extern void __kmp_release_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
478 extern void __kmp_init_nested_drdpa_lock( kmp_drdpa_lock_t *lck );
479 extern void __kmp_destroy_nested_drdpa_lock( kmp_drdpa_lock_t *lck );
497 typedef kmp_ticket_lock_t kmp_bootstrap_lock_t;
499 #define KMP_BOOTSTRAP_LOCK_INITIALIZER( lock ) KMP_TICKET_LOCK_INITIALIZER( (lock) )
502 __kmp_acquire_bootstrap_lock( kmp_bootstrap_lock_t *lck )
504 __kmp_acquire_ticket_lock( lck, KMP_GTID_DNE );
508 __kmp_test_bootstrap_lock( kmp_bootstrap_lock_t *lck )
510 return __kmp_test_ticket_lock( lck, KMP_GTID_DNE );
514 __kmp_release_bootstrap_lock( kmp_bootstrap_lock_t *lck )
516 __kmp_release_ticket_lock( lck, KMP_GTID_DNE );
520 __kmp_init_bootstrap_lock( kmp_bootstrap_lock_t *lck )
522 __kmp_init_ticket_lock( lck );
526 __kmp_destroy_bootstrap_lock( kmp_bootstrap_lock_t *lck )
528 __kmp_destroy_ticket_lock( lck );
544 typedef kmp_ticket_lock_t kmp_lock_t;
547 __kmp_acquire_lock( kmp_lock_t *lck, kmp_int32 gtid )
549 __kmp_acquire_ticket_lock( lck, gtid );
553 __kmp_test_lock( kmp_lock_t *lck, kmp_int32 gtid )
555 return __kmp_test_ticket_lock( lck, gtid );
559 __kmp_release_lock( kmp_lock_t *lck, kmp_int32 gtid )
561 __kmp_release_ticket_lock( lck, gtid );
565 __kmp_init_lock( kmp_lock_t *lck )
567 __kmp_init_ticket_lock( lck );
571 __kmp_destroy_lock( kmp_lock_t *lck )
573 __kmp_destroy_ticket_lock( lck );
592 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
598 #if KMP_USE_ADAPTIVE_LOCKS
600 #endif // KMP_USE_ADAPTIVE_LOCKS
603 typedef enum kmp_lock_kind kmp_lock_kind_t;
605 extern kmp_lock_kind_t __kmp_user_lock_kind;
607 union kmp_user_lock {
609 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
610 kmp_futex_lock_t futex;
612 kmp_ticket_lock_t ticket;
613 kmp_queuing_lock_t queuing;
614 kmp_drdpa_lock_t drdpa;
615 #if KMP_USE_ADAPTIVE_LOCKS
616 kmp_adaptive_lock_t adaptive;
617 #endif // KMP_USE_ADAPTIVE_LOCKS
618 kmp_lock_pool_t pool;
621 typedef union kmp_user_lock *kmp_user_lock_p;
623 extern size_t __kmp_base_user_lock_size;
624 extern size_t __kmp_user_lock_size;
626 extern kmp_int32 ( *__kmp_get_user_lock_owner_ )( kmp_user_lock_p lck );
628 static inline kmp_int32
629 __kmp_get_user_lock_owner( kmp_user_lock_p lck )
631 KMP_DEBUG_ASSERT( __kmp_get_user_lock_owner_ != NULL );
632 return ( *__kmp_get_user_lock_owner_ )( lck );
635 extern void ( *__kmp_acquire_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
637 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
639 #define __kmp_acquire_user_lock_with_checks(lck,gtid) \
640 if (__kmp_user_lock_kind == lk_tas) { \
641 if ( __kmp_env_consistency_check ) { \
642 char const * const func = "omp_set_lock"; \
643 if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_LOCK_T_SIZE ) \
644 && lck->tas.lk.depth_locked != -1 ) { \
645 KMP_FATAL( LockNestableUsedAsSimple, func ); \
647 if ( ( gtid >= 0 ) && ( lck->tas.lk.poll - 1 == gtid ) ) { \
648 KMP_FATAL( LockIsAlreadyOwned, func ); \
651 if ( ( lck->tas.lk.poll != 0 ) || \
652 ( ! KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) ) ) { \
654 KMP_FSYNC_PREPARE( lck ); \
655 KMP_INIT_YIELD( spins ); \
656 if ( TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) ) { \
659 KMP_YIELD_SPIN( spins ); \
661 while ( ( lck->tas.lk.poll != 0 ) || \
662 ( ! KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) ) ) { \
663 if ( TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) ) { \
666 KMP_YIELD_SPIN( spins ); \
670 KMP_FSYNC_ACQUIRED( lck ); \
672 KMP_DEBUG_ASSERT( __kmp_acquire_user_lock_with_checks_ != NULL ); \
673 ( *__kmp_acquire_user_lock_with_checks_ )( lck, gtid ); \
678 __kmp_acquire_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
680 KMP_DEBUG_ASSERT( __kmp_acquire_user_lock_with_checks_ != NULL );
681 ( *__kmp_acquire_user_lock_with_checks_ )( lck, gtid );
685 extern int ( *__kmp_test_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
687 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)
689 #include "kmp_i18n.h"
690 extern int __kmp_env_consistency_check;
692 __kmp_test_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
694 if ( __kmp_user_lock_kind == lk_tas ) {
695 if ( __kmp_env_consistency_check ) {
696 char const *
const func =
"omp_test_lock";
697 if ( (
sizeof ( kmp_tas_lock_t ) <= OMP_LOCK_T_SIZE )
698 && lck->tas.lk.depth_locked != -1 ) {
699 KMP_FATAL( LockNestableUsedAsSimple, func );
702 return ( ( lck->tas.lk.poll == 0 ) &&
703 KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) );
705 KMP_DEBUG_ASSERT( __kmp_test_user_lock_with_checks_ != NULL );
706 return ( *__kmp_test_user_lock_with_checks_ )( lck, gtid );
711 __kmp_test_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
713 KMP_DEBUG_ASSERT( __kmp_test_user_lock_with_checks_ != NULL );
714 return ( *__kmp_test_user_lock_with_checks_ )( lck, gtid );
718 extern void ( *__kmp_release_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
721 __kmp_release_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
723 KMP_DEBUG_ASSERT( __kmp_release_user_lock_with_checks_ != NULL );
724 ( *__kmp_release_user_lock_with_checks_ ) ( lck, gtid );
727 extern void ( *__kmp_init_user_lock_with_checks_ )( kmp_user_lock_p lck );
730 __kmp_init_user_lock_with_checks( kmp_user_lock_p lck )
732 KMP_DEBUG_ASSERT( __kmp_init_user_lock_with_checks_ != NULL );
733 ( *__kmp_init_user_lock_with_checks_ )( lck );
740 extern void ( *__kmp_destroy_user_lock_ )( kmp_user_lock_p lck );
743 __kmp_destroy_user_lock( kmp_user_lock_p lck )
745 KMP_DEBUG_ASSERT( __kmp_destroy_user_lock_ != NULL );
746 ( *__kmp_destroy_user_lock_ )( lck );
749 extern void ( *__kmp_destroy_user_lock_with_checks_ )( kmp_user_lock_p lck );
752 __kmp_destroy_user_lock_with_checks( kmp_user_lock_p lck )
754 KMP_DEBUG_ASSERT( __kmp_destroy_user_lock_with_checks_ != NULL );
755 ( *__kmp_destroy_user_lock_with_checks_ )( lck );
758 extern void ( *__kmp_acquire_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
760 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
762 #define __kmp_acquire_nested_user_lock_with_checks(lck,gtid) \
763 if (__kmp_user_lock_kind == lk_tas) { \
764 if ( __kmp_env_consistency_check ) { \
765 char const * const func = "omp_set_nest_lock"; \
766 if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_NEST_LOCK_T_SIZE ) \
767 && lck->tas.lk.depth_locked == -1 ) { \
768 KMP_FATAL( LockSimpleUsedAsNestable, func ); \
771 if ( lck->tas.lk.poll - 1 == gtid ) { \
772 lck->tas.lk.depth_locked += 1; \
774 if ( ( lck->tas.lk.poll != 0 ) || \
775 ( ! KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) ) ) { \
777 KMP_FSYNC_PREPARE( lck ); \
778 KMP_INIT_YIELD( spins ); \
779 if ( TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) ) { \
782 KMP_YIELD_SPIN( spins ); \
784 while ( ( lck->tas.lk.poll != 0 ) || \
785 ( ! KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) ) ) { \
786 if ( TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) ) { \
789 KMP_YIELD_SPIN( spins ); \
793 lck->tas.lk.depth_locked = 1; \
795 KMP_FSYNC_ACQUIRED( lck ); \
797 KMP_DEBUG_ASSERT( __kmp_acquire_nested_user_lock_with_checks_ != NULL ); \
798 ( *__kmp_acquire_nested_user_lock_with_checks_ )( lck, gtid ); \
803 __kmp_acquire_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
805 KMP_DEBUG_ASSERT( __kmp_acquire_nested_user_lock_with_checks_ != NULL );
806 ( *__kmp_acquire_nested_user_lock_with_checks_ )( lck, gtid );
810 extern int ( *__kmp_test_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
812 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
814 __kmp_test_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
816 if ( __kmp_user_lock_kind == lk_tas ) {
818 if ( __kmp_env_consistency_check ) {
819 char const *
const func =
"omp_test_nest_lock";
820 if ( (
sizeof ( kmp_tas_lock_t ) <= OMP_NEST_LOCK_T_SIZE )
821 && lck->tas.lk.depth_locked == -1 ) {
822 KMP_FATAL( LockSimpleUsedAsNestable, func );
825 KMP_DEBUG_ASSERT( gtid >= 0 );
826 if ( lck->tas.lk.poll - 1 == gtid ) {
827 return ++lck->tas.lk.depth_locked;
829 retval = ( ( lck->tas.lk.poll == 0 ) &&
830 KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) );
833 lck->tas.lk.depth_locked = 1;
837 KMP_DEBUG_ASSERT( __kmp_test_nested_user_lock_with_checks_ != NULL );
838 return ( *__kmp_test_nested_user_lock_with_checks_ )( lck, gtid );
843 __kmp_test_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
845 KMP_DEBUG_ASSERT( __kmp_test_nested_user_lock_with_checks_ != NULL );
846 return ( *__kmp_test_nested_user_lock_with_checks_ )( lck, gtid );
850 extern void ( *__kmp_release_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
853 __kmp_release_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
855 KMP_DEBUG_ASSERT( __kmp_release_nested_user_lock_with_checks_ != NULL );
856 ( *__kmp_release_nested_user_lock_with_checks_ )( lck, gtid );
859 extern void ( *__kmp_init_nested_user_lock_with_checks_ )( kmp_user_lock_p lck );
861 static inline void __kmp_init_nested_user_lock_with_checks( kmp_user_lock_p lck )
863 KMP_DEBUG_ASSERT( __kmp_init_nested_user_lock_with_checks_ != NULL );
864 ( *__kmp_init_nested_user_lock_with_checks_ )( lck );
867 extern void ( *__kmp_destroy_nested_user_lock_with_checks_ )( kmp_user_lock_p lck );
870 __kmp_destroy_nested_user_lock_with_checks( kmp_user_lock_p lck )
872 KMP_DEBUG_ASSERT( __kmp_destroy_nested_user_lock_with_checks_ != NULL );
873 ( *__kmp_destroy_nested_user_lock_with_checks_ )( lck );
891 extern int ( *__kmp_is_user_lock_initialized_ )( kmp_user_lock_p lck );
895 extern const ident_t * ( *__kmp_get_user_lock_location_ )( kmp_user_lock_p lck );
898 __kmp_get_user_lock_location( kmp_user_lock_p lck )
900 if ( __kmp_get_user_lock_location_ != NULL ) {
901 return ( *__kmp_get_user_lock_location_ )( lck );
908 extern void ( *__kmp_set_user_lock_location_ )( kmp_user_lock_p lck,
const ident_t *loc );
911 __kmp_set_user_lock_location( kmp_user_lock_p lck,
const ident_t *loc )
913 if ( __kmp_set_user_lock_location_ != NULL ) {
914 ( *__kmp_set_user_lock_location_ )( lck, loc );
918 extern kmp_lock_flags_t ( *__kmp_get_user_lock_flags_ )( kmp_user_lock_p lck );
920 extern void ( *__kmp_set_user_lock_flags_ )( kmp_user_lock_p lck, kmp_lock_flags_t flags );
923 __kmp_set_user_lock_flags( kmp_user_lock_p lck, kmp_lock_flags_t flags )
925 if ( __kmp_set_user_lock_flags_ != NULL ) {
926 ( *__kmp_set_user_lock_flags_ )( lck, flags );
933 extern void __kmp_set_user_lock_vptrs( kmp_lock_kind_t user_lock_kind );
963 struct kmp_lock_table {
964 kmp_lock_index_t used;
965 kmp_lock_index_t allocated;
966 kmp_user_lock_p * table;
969 typedef struct kmp_lock_table kmp_lock_table_t;
971 extern kmp_lock_table_t __kmp_user_lock_table;
972 extern kmp_user_lock_p __kmp_lock_pool;
974 struct kmp_block_of_locks {
975 struct kmp_block_of_locks * next_block;
979 typedef struct kmp_block_of_locks kmp_block_of_locks_t;
981 extern kmp_block_of_locks_t *__kmp_lock_blocks;
982 extern int __kmp_num_locks_in_block;
984 extern kmp_user_lock_p __kmp_user_lock_allocate(
void **user_lock, kmp_int32 gtid, kmp_lock_flags_t flags );
985 extern void __kmp_user_lock_free(
void **user_lock, kmp_int32 gtid, kmp_user_lock_p lck );
986 extern kmp_user_lock_p __kmp_lookup_user_lock(
void **user_lock,
char const *func );
987 extern void __kmp_cleanup_user_locks();
989 #define KMP_CHECK_USER_LOCK_INIT() \
991 if ( ! TCR_4( __kmp_init_user_locks ) ) { \
992 __kmp_acquire_bootstrap_lock( &__kmp_initz_lock ); \
993 if ( ! TCR_4( __kmp_init_user_locks ) ) { \
994 TCW_4( __kmp_init_user_locks, TRUE ); \
996 __kmp_release_bootstrap_lock( &__kmp_initz_lock ); \
1005 #endif // __cplusplus