📄 gc_locks.h
字号:
/* * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved. * * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. */#ifndef GC_LOCKS_H#define GC_LOCKS_H/* * Mutual exclusion between allocator/collector routines. * Needed if there is more than one allocator thread. * FASTLOCK() is assumed to try to acquire the lock in a cheap and * dirty way that is acceptable for a few instructions, e.g. by * inhibiting preemption. This is assumed to have succeeded only * if a subsequent call to FASTLOCK_SUCCEEDED() returns TRUE. * FASTUNLOCK() is called whether or not FASTLOCK_SUCCEEDED(). * If signals cannot be tolerated with the FASTLOCK held, then * FASTLOCK should disable signals. The code executed under * FASTLOCK is otherwise immune to interruption, provided it is * not restarted. * DCL_LOCK_STATE declares any local variables needed by LOCK and UNLOCK * and/or DISABLE_SIGNALS and ENABLE_SIGNALS and/or FASTLOCK. * (There is currently no equivalent for FASTLOCK.) * * In the PARALLEL_MARK case, we also need to define a number of * other inline finctions here: * GC_bool GC_compare_and_exchange( volatile GC_word *addr, * GC_word old, GC_word new ) * GC_word GC_atomic_add( volatile GC_word *addr, GC_word how_much ) * void GC_memory_barrier( ) * */ # ifdef THREADS void GC_noop1 GC_PROTO((word));# ifdef PCR_OBSOLETE /* Faster, but broken with multiple lwp's */# include "th/PCR_Th.h"# include "th/PCR_ThCrSec.h" extern struct PCR_Th_MLRep GC_allocate_ml;# define DCL_LOCK_STATE PCR_sigset_t GC_old_sig_mask# define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml) # define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)# define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)# define FASTLOCK() PCR_ThCrSec_EnterSys() /* Here we cheat (a lot): */# define FASTLOCK_SUCCEEDED() (*(int *)(&GC_allocate_ml) == 0) /* TRUE if nobody currently holds the lock */# define FASTUNLOCK() PCR_ThCrSec_ExitSys()# endif# ifdef PCR# include <base/PCR_Base.h># include <th/PCR_Th.h> extern PCR_Th_ML GC_allocate_ml;# define DCL_LOCK_STATE \ PCR_ERes GC_fastLockRes; PCR_sigset_t GC_old_sig_mask# define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)# define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)# define FASTLOCK() (GC_fastLockRes = PCR_Th_ML_Try(&GC_allocate_ml))# define FASTLOCK_SUCCEEDED() (GC_fastLockRes == PCR_ERes_okay)# define FASTUNLOCK() {\ if( FASTLOCK_SUCCEEDED() ) PCR_Th_ML_Release(&GC_allocate_ml); }# endif# ifdef SRC_M3 extern GC_word RT0u__inCritical;# define LOCK() RT0u__inCritical++# define UNLOCK() RT0u__inCritical--# endif# ifdef GC_SOLARIS_THREADS# include <thread.h># include <signal.h> extern mutex_t GC_allocate_ml;# define LOCK() mutex_lock(&GC_allocate_ml);# define UNLOCK() mutex_unlock(&GC_allocate_ml);# endif/* Try to define GC_TEST_AND_SET and a matching GC_CLEAR for spin lock *//* acquisition and release. We need this for correct operation of the *//* incremental GC. */# ifdef __GNUC__# if defined(I386) inline static int GC_test_and_set(volatile unsigned int *addr) { int oldval; /* Note: the "xchg" instruction does not need a "lock" prefix */ __asm__ __volatile__("xchgl %0, %1" : "=r"(oldval), "=m"(*(addr)) : "0"(1), "m"(*(addr)) : "memory"); return oldval; }# define GC_TEST_AND_SET_DEFINED# endif# if defined(IA64)# include <ia64intrin.h> inline static int GC_test_and_set(volatile unsigned int *addr) { return __sync_lock_test_and_set(addr, 1); }# define GC_TEST_AND_SET_DEFINED inline static void GC_clear(volatile unsigned int *addr) { *addr = 0; }# define GC_CLEAR_DEFINED# endif# ifdef SPARC inline static int GC_test_and_set(volatile unsigned int *addr) { int oldval; __asm__ __volatile__("ldstub %1,%0" : "=r"(oldval), "=m"(*addr) : "m"(*addr) : "memory"); return oldval; }# define GC_TEST_AND_SET_DEFINED# endif# ifdef M68K /* Contributed by Tony Mantler. I'm not sure how well it was */ /* tested. */ inline static int GC_test_and_set(volatile unsigned int *addr) { char oldval; /* this must be no longer than 8 bits */ /* The return value is semi-phony. */ /* 'tas' sets bit 7 while the return */ /* value pretends bit 0 was set */ __asm__ __volatile__( "tas %1@; sne %0; negb %0" : "=d" (oldval) : "a" (addr) : "memory"); return oldval; }# define GC_TEST_AND_SET_DEFINED# endif# if defined(POWERPC) inline static int GC_test_and_set(volatile unsigned int *addr) { int oldval; int temp = 1; /* locked value */ __asm__ __volatile__( "1:\tlwarx %0,0,%3\n" /* load and reserve */ "\tcmpwi %0, 0\n" /* if load is */ "\tbne 2f\n" /* non-zero, return already set */ "\tstwcx. %2,0,%1\n" /* else store conditional */ "\tbne- 1b\n" /* retry if lost reservation */ "\tsync\n" /* import barrier */ "2:\t\n" /* oldval is zero if we set */ : "=&r"(oldval), "=p"(addr) : "r"(temp), "1"(addr) : "cr0","memory"); return oldval; }# define GC_TEST_AND_SET_DEFINED inline static void GC_clear(volatile unsigned int *addr) { __asm__ __volatile__("eieio" : : : "memory"); *(addr) = 0; }# define GC_CLEAR_DEFINED# endif# if defined(ALPHA) inline static int GC_test_and_set(volatile unsigned int * addr) { unsigned long oldvalue; unsigned long temp; __asm__ __volatile__( "1: ldl_l %0,%1\n" " and %0,%3,%2\n" " bne %2,2f\n" " xor %0,%3,%0\n" " stl_c %0,%1\n"# ifdef __ELF__ " beq %0,3f\n"# else " beq %0,1b\n"# endif " mb\n" "2:\n"# ifdef __ELF__ ".section .text2,\"ax\"\n" "3: br 1b\n" ".previous"# endif :"=&r" (temp), "=m" (*addr), "=&r" (oldvalue) :"Ir" (1), "m" (*addr) :"memory"); return oldvalue; }# define GC_TEST_AND_SET_DEFINED inline static void GC_clear(volatile unsigned int *addr) { __asm__ __volatile__("mb" : : : "memory"); *(addr) = 0; }# define GC_CLEAR_DEFINED# endif /* ALPHA */# ifdef ARM32 inline static int GC_test_and_set(volatile unsigned int *addr) { int oldval; /* SWP on ARM is very similar to XCHG on x86. Doesn't lock the * bus because there are no SMP ARM machines. If/when there are, * this code will likely need to be updated. */ /* See linuxthreads/sysdeps/arm/pt-machine.h in glibc-2.1 */ __asm__ __volatile__("swp %0, %1, [%2]" : "=r"(oldval) : "0"(1), "r"(addr) : "memory"); return oldval; }# define GC_TEST_AND_SET_DEFINED# endif /* ARM32 */# ifdef CRIS inline static int GC_test_and_set(volatile unsigned int *addr) { /* Ripped from linuxthreads/sysdeps/cris/pt-machine.h. */ /* Included with Hans-Peter Nilsson's permission. */ register unsigned long int ret; /* Note the use of a dummy output of *addr to expose the write. * The memory barrier is to stop *other* writes being moved past * this code. */ __asm__ __volatile__("clearf\n" "0:\n\t" "movu.b [%2],%0\n\t" "ax\n\t" "move.b %3,[%2]\n\t" "bwf 0b\n\t" "clearf" : "=&r" (ret), "=m" (*addr) : "r" (addr), "r" ((int) 1), "m" (*addr) : "memory"); return ret; }# define GC_TEST_AND_SET_DEFINED# endif /* CRIS */# ifdef S390 inline static int GC_test_and_set(volatile unsigned int *addr) { int ret; __asm__ __volatile__ ( " l %0,0(%2)\n" "0: cs %0,%1,0(%2)\n" " jl 0b" : "=&d" (ret) : "d" (1), "a" (addr) : "cc", "memory"); return ret; }# endif# endif /* __GNUC__ */# if (defined(ALPHA) && !defined(__GNUC__))# ifndef OSF1 --> We currently assume that if gcc is not used, we are --> running under Tru64.# endif# include <machine/builtins.h># include <c_asm.h># define GC_test_and_set(addr) __ATOMIC_EXCH_LONG(addr, 1)# define GC_TEST_AND_SET_DEFINED# define GC_clear(addr) { asm("mb"); *(volatile unsigned *)addr = 0; }# define GC_CLEAR_DEFINED# endif# if defined(MSWIN32)# define GC_test_and_set(addr) InterlockedExchange((LPLONG)addr,1)# define GC_TEST_AND_SET_DEFINED# endif# ifdef MIPS# ifdef LINUX# include <sys/tas.h># define GC_test_and_set(addr) _test_and_set((int *) addr,1)# define GC_TEST_AND_SET_DEFINED# elif __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \ || !defined(_COMPILER_VERSION) || _COMPILER_VERSION < 700# ifdef __GNUC__# define GC_test_and_set(addr) _test_and_set((void *)addr,1)# else# define GC_test_and_set(addr) test_and_set((void *)addr,1)# endif# else# define GC_test_and_set(addr) __test_and_set32((void *)addr,1)# define GC_clear(addr) __lock_release(addr);# define GC_CLEAR_DEFINED# endif# define GC_TEST_AND_SET_DEFINED# endif /* MIPS */# if defined(_AIX)# include <sys/atomic_op.h># if (defined(_POWER) || defined(_POWERPC)) # if defined(__GNUC__) inline static void GC_memsync() { __asm__ __volatile__ ("sync" : : : "memory"); }# else# ifndef inline# define inline __inline# endif# pragma mc_func GC_memsync { \ "7c0004ac" /* sync (same opcode used for dcs)*/ \ }# endif# else # error dont know how to memsync# endif inline static int GC_test_and_set(volatile unsigned int * addr) { int oldvalue = 0; if (compare_and_swap((void *)addr, &oldvalue, 1)) { GC_memsync(); return 0; } else return 1; }# define GC_TEST_AND_SET_DEFINED inline static void GC_clear(volatile unsigned int *addr) { GC_memsync(); *(addr) = 0; }# define GC_CLEAR_DEFINED# endif# if 0 /* defined(HP_PA) */ /* The official recommendation seems to be to not use ldcw from */ /* user mode. Since multithreaded incremental collection doesn't */ /* work anyway on HP_PA, this shouldn't be a major loss. */ /* "set" means 0 and "clear" means 1 here. */# define GC_test_and_set(addr) !GC_test_and_clear(addr);# define GC_TEST_AND_SET_DEFINED# define GC_clear(addr) GC_noop1((word)(addr)); *(volatile unsigned int *)addr = 1; /* The above needs a memory barrier! */# define GC_CLEAR_DEFINED# endif# if defined(GC_TEST_AND_SET_DEFINED) && !defined(GC_CLEAR_DEFINED)# ifdef __GNUC__
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -