📄 gc_locks.h
字号:
# if defined(GC_TEST_AND_SET_DEFINED) && !defined(GC_CLEAR_DEFINED)# ifdef __GNUC__ inline static void GC_clear(volatile unsigned int *addr) { /* Try to discourage gcc from moving anything past this. */ __asm__ __volatile__(" " : : : "memory"); *(addr) = 0; }# else /* The function call in the following should prevent the */ /* compiler from moving assignments to below the UNLOCK. */# define GC_clear(addr) GC_noop1((word)(addr)); \ *((volatile unsigned int *)(addr)) = 0;# endif# define GC_CLEAR_DEFINED# endif /* !GC_CLEAR_DEFINED */# if !defined(GC_TEST_AND_SET_DEFINED)# define USE_PTHREAD_LOCKS# endif# if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \ && !defined(GC_IRIX_THREADS)# define NO_THREAD (pthread_t)(-1)# include <pthread.h># if defined(PARALLEL_MARK) /* We need compare-and-swap to update mark bits, where it's */ /* performance critical. If USE_MARK_BYTES is defined, it is */ /* no longer needed for this purpose. However we use it in */ /* either case to implement atomic fetch-and-add, though that's */ /* less performance critical, and could perhaps be done with */ /* a lock. */# if defined(GENERIC_COMPARE_AND_SWAP) /* Probably not useful, except for debugging. */ /* We do use GENERIC_COMPARE_AND_SWAP on PA_RISC, but we */ /* minimize its use. */ extern pthread_mutex_t GC_compare_and_swap_lock; /* Note that if GC_word updates are not atomic, a concurrent */ /* reader should acquire GC_compare_and_swap_lock. On */ /* currently supported platforms, such updates are atomic. */ extern GC_bool GC_compare_and_exchange(volatile GC_word *addr, GC_word old, GC_word new_val);# endif /* GENERIC_COMPARE_AND_SWAP */# if defined(I386)# if !defined(GENERIC_COMPARE_AND_SWAP) /* Returns TRUE if the comparison succeeded. */ inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr, GC_word old, GC_word new_val) { char result; __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1" : "=m"(*(addr)), "=r"(result) : "r" (new_val), "0"(*(addr)), "a"(old) : "memory"); return (GC_bool) result; }# endif /* !GENERIC_COMPARE_AND_SWAP */ inline static void GC_memory_write_barrier() { /* We believe the processor ensures at least processor */ /* consistent ordering. Thus a compiler barrier */ /* should suffice. */ __asm__ __volatile__("" : : : "memory"); }# endif /* I386 */# if defined(IA64)# if !defined(GENERIC_COMPARE_AND_SWAP) inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr, GC_word old, GC_word new_val) { unsigned long oldval; __asm__ __volatile__("mov ar.ccv=%4 ;; cmpxchg8.rel %0=%1,%2,ar.ccv" : "=r"(oldval), "=m"(*addr) : "r"(new_val), "1"(*addr), "r"(old) : "memory"); return (oldval == old); }# endif /* !GENERIC_COMPARE_AND_SWAP */# if 0 /* Shouldn't be needed; we use volatile stores instead. */ inline static void GC_memory_write_barrier() { __asm__ __volatile__("mf" : : : "memory"); }# endif /* 0 */# endif /* IA64 */# if defined(S390)# if !defined(GENERIC_COMPARE_AND_SWAP) inline static GC_bool GC_compare_and_exchange(volatile C_word *addr, GC_word old, GC_word new_val) { int retval; __asm__ __volatile__ (# ifndef __s390x__ " cs %1,%2,0(%3)\n"# else " csg %1,%2,0(%3)\n"# endif " ipm %0\n" " srl %0,28\n" : "=&d" (retval), "+d" (old) : "d" (new_val), "a" (addr) : "cc", "memory"); return retval == 0; }# endif# endif# if !defined(GENERIC_COMPARE_AND_SWAP) /* Returns the original value of *addr. */ inline static GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much) { GC_word old; do { old = *addr; } while (!GC_compare_and_exchange(addr, old, old+how_much)); return old; }# else /* GENERIC_COMPARE_AND_SWAP */ /* So long as a GC_word can be atomically updated, it should */ /* be OK to read *addr without a lock. */ extern GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much);# endif /* GENERIC_COMPARE_AND_SWAP */# endif /* PARALLEL_MARK */# if !defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_LOCKS) /* In the THREAD_LOCAL_ALLOC case, the allocation lock tends to */ /* be held for long periods, if it is held at all. Thus spinning */ /* and sleeping for fixed periods are likely to result in */ /* significant wasted time. We thus rely mostly on queued locks. */# define USE_SPIN_LOCK extern volatile unsigned int GC_allocate_lock; extern void GC_lock(void); /* Allocation lock holder. Only set if acquired by client through */ /* GC_call_with_alloc_lock. */# ifdef GC_ASSERTIONS# define LOCK() \ { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); \ SET_LOCK_HOLDER(); }# define UNLOCK() \ { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \ GC_clear(&GC_allocate_lock); }# else# define LOCK() \ { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }# define UNLOCK() \ GC_clear(&GC_allocate_lock)# endif /* !GC_ASSERTIONS */# if 0 /* Another alternative for OSF1 might be: */# include <sys/mman.h> extern msemaphore GC_allocate_semaphore;# define LOCK() { if (msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) \ != 0) GC_lock(); else GC_allocate_lock = 1; } /* The following is INCORRECT, since the memory model is too weak. */ /* Is this true? Presumably msem_unlock has the right semantics? */ /* - HB */# define UNLOCK() { GC_allocate_lock = 0; \ msem_unlock(&GC_allocate_semaphore, 0); }# endif /* 0 */# else /* THREAD_LOCAL_ALLOC || USE_PTHREAD_LOCKS */# ifndef USE_PTHREAD_LOCKS# define USE_PTHREAD_LOCKS# endif# endif /* THREAD_LOCAL_ALLOC */# ifdef USE_PTHREAD_LOCKS# include <pthread.h> extern pthread_mutex_t GC_allocate_ml;# ifdef GC_ASSERTIONS# define LOCK() \ { GC_lock(); \ SET_LOCK_HOLDER(); }# define UNLOCK() \ { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \ pthread_mutex_unlock(&GC_allocate_ml); }# else /* !GC_ASSERTIONS */# define LOCK() \ { if (0 != pthread_mutex_trylock(&GC_allocate_ml)) GC_lock(); }# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)# endif /* !GC_ASSERTIONS */# endif /* USE_PTHREAD_LOCKS */# define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD# define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self())) extern VOLATILE GC_bool GC_collecting;# define ENTER_GC() GC_collecting = 1;# define EXIT_GC() GC_collecting = 0; extern void GC_lock(void); extern pthread_t GC_lock_holder;# ifdef GC_ASSERTIONS extern pthread_t GC_mark_lock_holder;# endif# endif /* GC_PTHREADS with linux_threads.c implementation */# if defined(GC_IRIX_THREADS)# include <pthread.h> /* This probably should never be included, but I can't test */ /* on Irix anymore. */# include <mutex.h> extern unsigned long GC_allocate_lock; /* This is not a mutex because mutexes that obey the (optional) */ /* POSIX scheduling rules are subject to convoys in high contention */ /* applications. This is basically a spin lock. */ extern pthread_t GC_lock_holder; extern void GC_lock(void); /* Allocation lock holder. Only set if acquired by client through */ /* GC_call_with_alloc_lock. */# define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()# define NO_THREAD (pthread_t)(-1)# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD# define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))# define LOCK() { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }# define UNLOCK() GC_clear(&GC_allocate_lock); extern VOLATILE GC_bool GC_collecting;# define ENTER_GC() \ { \ GC_collecting = 1; \ }# define EXIT_GC() GC_collecting = 0;# endif /* GC_IRIX_THREADS */# ifdef GC_WIN32_THREADS# include <windows.h> GC_API CRITICAL_SECTION GC_allocate_ml;# define LOCK() EnterCriticalSection(&GC_allocate_ml);# define UNLOCK() LeaveCriticalSection(&GC_allocate_ml);# endif# ifndef SET_LOCK_HOLDER# define SET_LOCK_HOLDER()# define UNSET_LOCK_HOLDER()# define I_HOLD_LOCK() FALSE /* Used on platforms were locks can be reacquired, */ /* so it doesn't matter if we lie. */# endif# else /* !THREADS */# define LOCK()# define UNLOCK()# endif /* !THREADS */# ifndef SET_LOCK_HOLDER# define SET_LOCK_HOLDER()# define UNSET_LOCK_HOLDER()# define I_HOLD_LOCK() FALSE /* Used on platforms were locks can be reacquired, */ /* so it doesn't matter if we lie. */# endif# ifndef ENTER_GC# define ENTER_GC()# define EXIT_GC()# endif# ifndef DCL_LOCK_STATE# define DCL_LOCK_STATE# endif# ifndef FASTLOCK# define FASTLOCK() LOCK()# define FASTLOCK_SUCCEEDED() TRUE# define FASTUNLOCK() UNLOCK()# endif#endif /* GC_LOCKS_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -