📄 pthread_support.c
字号:
/* * Copyright (c) 1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996 by Silicon Graphics. All rights reserved. * Copyright (c) 1998 by Fergus Henderson. All rights reserved. * Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. *//* * Support code for LinuxThreads, the clone()-based kernel * thread package for Linux which is included in libc6. * * This code relies on implementation details of LinuxThreads, * (i.e. properties not guaranteed by the Pthread standard), * though this version now does less of that than the other Pthreads * support code. * * Note that there is a lot of code duplication between linux_threads.c * and thread support for some of the other Posix platforms; any changes * made here may need to be reflected there too. */ /* DG/UX ix86 support <takis@xfree86.org> *//* * Linux_threads.c now also includes some code to support HPUX and * OSF1 (Compaq Tru64 Unix, really). The OSF1 support is based on Eric Benson's * patch. * * Eric also suggested an alternate basis for a lock implementation in * his code: * + #elif defined(OSF1) * + unsigned long GC_allocate_lock = 0; * + msemaphore GC_allocate_semaphore; * + # define GC_TRY_LOCK() \ * + ((msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) == 0) \ * + ? (GC_allocate_lock = 1) \ * + : 0) * + # define GC_LOCK_TAKEN GC_allocate_lock *//*#define DEBUG_THREADS 1*//*#define GC_ASSERTIONS*/# include "private/pthread_support.h"# if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \ && !defined(GC_WIN32_THREADS)# if defined(GC_HPUX_THREADS) && !defined(USE_PTHREAD_SPECIFIC) \ && !defined(USE_COMPILER_TLS)# ifdef __GNUC__# define USE_PTHREAD_SPECIFIC /* Empirically, as of gcc 3.3, USE_COMPILER_TLS doesn't work. */# else# define USE_COMPILER_TLS# endif# endif# if defined USE_HPUX_TLS --> Macro replaced by USE_COMPILER_TLS# endif# if (defined(GC_DGUX386_THREADS) || defined(GC_OSF1_THREADS) || \ defined(GC_DARWIN_THREADS) || defined(GC_AIX_THREADS)) \ && !defined(USE_PTHREAD_SPECIFIC)# define USE_PTHREAD_SPECIFIC# endif# if defined(GC_DGUX386_THREADS) && !defined(_POSIX4A_DRAFT10_SOURCE)# define _POSIX4A_DRAFT10_SOURCE 1# endif# if defined(GC_DGUX386_THREADS) && !defined(_USING_POSIX4A_DRAFT10)# define _USING_POSIX4A_DRAFT10 1# endif# ifdef THREAD_LOCAL_ALLOC# if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_COMPILER_TLS)# include "private/specific.h"# endif# if defined(USE_PTHREAD_SPECIFIC)# define GC_getspecific pthread_getspecific# define GC_setspecific pthread_setspecific# define GC_key_create pthread_key_create typedef pthread_key_t GC_key_t;# endif# if defined(USE_COMPILER_TLS)# define GC_getspecific(x) (x)# define GC_setspecific(key, v) ((key) = (v), 0)# define GC_key_create(key, d) 0 typedef void * GC_key_t;# endif# endif# include <stdlib.h># include <pthread.h># include <sched.h># include <time.h># include <errno.h># include <unistd.h># include <sys/mman.h># include <sys/time.h># include <sys/types.h># include <sys/stat.h># include <fcntl.h># include <signal.h>#if defined(GC_DARWIN_THREADS)# include "private/darwin_semaphore.h"#else# include <semaphore.h>#endif /* !GC_DARWIN_THREADS */#if defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS)# include <sys/sysctl.h>#endif /* GC_DARWIN_THREADS */#if defined(GC_DGUX386_THREADS)# include <sys/dg_sys_info.h># include <sys/_int_psem.h> /* sem_t is an uint in DG/UX */ typedef unsigned int sem_t;#endif /* GC_DGUX386_THREADS */#ifndef __GNUC__# define __inline__#endif#ifdef GC_USE_LD_WRAP# define WRAP_FUNC(f) __wrap_##f# define REAL_FUNC(f) __real_##f#else# define WRAP_FUNC(f) GC_##f# if !defined(GC_DGUX386_THREADS)# define REAL_FUNC(f) f# else /* GC_DGUX386_THREADS */# define REAL_FUNC(f) __d10_##f# endif /* GC_DGUX386_THREADS */# undef pthread_create# if !defined(GC_DARWIN_THREADS)# undef pthread_sigmask# endif# undef pthread_join# undef pthread_detach# if defined(GC_OSF1_THREADS) && defined(_PTHREAD_USE_MANGLED_NAMES_) \ && !defined(_PTHREAD_USE_PTDNAM_)/* Restore the original mangled names on Tru64 UNIX. */# define pthread_create __pthread_create# define pthread_join __pthread_join# define pthread_detach __pthread_detach# endif#endifvoid GC_thr_init();static GC_bool parallel_initialized = FALSE;void GC_init_parallel();# if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)/* We don't really support thread-local allocation with DBG_HDRS_ALL */#ifdef USE_COMPILER_TLS __thread#endifGC_key_t GC_thread_key;static GC_bool keys_initialized;/* Recover the contents of the freelist array fl into the global one gfl.*//* Note that the indexing scheme differs, in that gfl has finer size *//* resolution, even if not all entries are used. *//* We hold the allocator lock. */static void return_freelists(ptr_t *fl, ptr_t *gfl){ int i; ptr_t q, *qptr; size_t nwords; for (i = 1; i < NFREELISTS; ++i) { nwords = i * (GRANULARITY/sizeof(word)); qptr = fl + i; q = *qptr; if ((word)q >= HBLKSIZE) { if (gfl[nwords] == 0) { gfl[nwords] = q; } else { /* Concatenate: */ for (; (word)q >= HBLKSIZE; qptr = &(obj_link(q)), q = *qptr); GC_ASSERT(0 == q); *qptr = gfl[nwords]; gfl[nwords] = fl[i]; } } /* Clear fl[i], since the thread structure may hang around. */ /* Do it in a way that is likely to trap if we access it. */ fl[i] = (ptr_t)HBLKSIZE; }}/* We statically allocate a single "size 0" object. It is linked to *//* itself, and is thus repeatedly reused for all size 0 allocation *//* requests. (Size 0 gcj allocation requests are incorrect, and *//* we arrange for those to fault asap.) */static ptr_t size_zero_object = (ptr_t)(&size_zero_object);/* Each thread structure must be initialized. *//* This call must be made from the new thread. *//* Caller holds allocation lock. */void GC_init_thread_local(GC_thread p){ int i; if (!keys_initialized) { if (0 != GC_key_create(&GC_thread_key, 0)) { ABORT("Failed to create key for local allocator"); } keys_initialized = TRUE; } if (0 != GC_setspecific(GC_thread_key, p)) { ABORT("Failed to set thread specific allocation pointers"); } for (i = 1; i < NFREELISTS; ++i) { p -> ptrfree_freelists[i] = (ptr_t)1; p -> normal_freelists[i] = (ptr_t)1;# ifdef GC_GCJ_SUPPORT p -> gcj_freelists[i] = (ptr_t)1;# endif } /* Set up the size 0 free lists. */ p -> ptrfree_freelists[0] = (ptr_t)(&size_zero_object); p -> normal_freelists[0] = (ptr_t)(&size_zero_object);# ifdef GC_GCJ_SUPPORT p -> gcj_freelists[0] = (ptr_t)(-1);# endif}#ifdef GC_GCJ_SUPPORT extern ptr_t * GC_gcjobjfreelist;#endif/* We hold the allocator lock. */void GC_destroy_thread_local(GC_thread p){ /* We currently only do this from the thread itself or from */ /* the fork handler for a child process. */# ifndef HANDLE_FORK GC_ASSERT(GC_getspecific(GC_thread_key) == (void *)p);# endif return_freelists(p -> ptrfree_freelists, GC_aobjfreelist); return_freelists(p -> normal_freelists, GC_objfreelist);# ifdef GC_GCJ_SUPPORT return_freelists(p -> gcj_freelists, GC_gcjobjfreelist);# endif}extern GC_PTR GC_generic_malloc_many();GC_PTR GC_local_malloc(size_t bytes){ if (EXPECT(!SMALL_ENOUGH(bytes),0)) { return(GC_malloc(bytes)); } else { int index = INDEX_FROM_BYTES(bytes); ptr_t * my_fl; ptr_t my_entry;# if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC) GC_key_t k = GC_thread_key;# endif void * tsd;# if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC) if (EXPECT(0 == k, 0)) { /* This can happen if we get called when the world is */ /* being initialized. Whether we can actually complete */ /* the initialization then is unclear. */ GC_init_parallel(); k = GC_thread_key; }# endif tsd = GC_getspecific(GC_thread_key);# ifdef GC_ASSERTIONS LOCK(); GC_ASSERT(tsd == (void *)GC_lookup_thread(pthread_self())); UNLOCK();# endif my_fl = ((GC_thread)tsd) -> normal_freelists + index; my_entry = *my_fl; if (EXPECT((word)my_entry >= HBLKSIZE, 1)) { ptr_t next = obj_link(my_entry); GC_PTR result = (GC_PTR)my_entry; *my_fl = next; obj_link(my_entry) = 0; PREFETCH_FOR_WRITE(next); return result; } else if ((word)my_entry - 1 < DIRECT_GRANULES) { *my_fl = my_entry + index + 1; return GC_malloc(bytes); } else { GC_generic_malloc_many(BYTES_FROM_INDEX(index), NORMAL, my_fl); if (*my_fl == 0) return GC_oom_fn(bytes); return GC_local_malloc(bytes); } }}GC_PTR GC_local_malloc_atomic(size_t bytes){ if (EXPECT(!SMALL_ENOUGH(bytes), 0)) { return(GC_malloc_atomic(bytes)); } else { int index = INDEX_FROM_BYTES(bytes); ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key)) -> ptrfree_freelists + index; ptr_t my_entry = *my_fl; if (EXPECT((word)my_entry >= HBLKSIZE, 1)) { GC_PTR result = (GC_PTR)my_entry; *my_fl = obj_link(my_entry); return result; } else if ((word)my_entry - 1 < DIRECT_GRANULES) { *my_fl = my_entry + index + 1; return GC_malloc_atomic(bytes); } else { GC_generic_malloc_many(BYTES_FROM_INDEX(index), PTRFREE, my_fl); /* *my_fl is updated while the collector is excluded; */ /* the free list is always visible to the collector as */ /* such. */ if (*my_fl == 0) return GC_oom_fn(bytes); return GC_local_malloc_atomic(bytes); } }}#ifdef GC_GCJ_SUPPORT#include "include/gc_gcj.h"#ifdef GC_ASSERTIONS extern GC_bool GC_gcj_malloc_initialized;#endifextern int GC_gcj_kind;GC_PTR GC_local_gcj_malloc(size_t bytes, void * ptr_to_struct_containing_descr){ GC_ASSERT(GC_gcj_malloc_initialized); if (EXPECT(!SMALL_ENOUGH(bytes), 0)) { return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr); } else { int index = INDEX_FROM_BYTES(bytes); ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key)) -> gcj_freelists + index; ptr_t my_entry = *my_fl; if (EXPECT((word)my_entry >= HBLKSIZE, 1)) { GC_PTR result = (GC_PTR)my_entry; GC_ASSERT(!GC_incremental); /* We assert that any concurrent marker will stop us. */ /* Thus it is impossible for a mark procedure to see the */ /* allocation of the next object, but to see this object */ /* still containing a free list pointer. Otherwise the */ /* marker might find a random "mark descriptor". */ *(volatile ptr_t *)my_fl = obj_link(my_entry); /* We must update the freelist before we store the pointer. */ /* Otherwise a GC at this point would see a corrupted */ /* free list. */ /* A memory barrier is probably never needed, since the */ /* action of stopping this thread will cause prior writes */ /* to complete. */ GC_ASSERT(((void * volatile *)result)[1] == 0); *(void * volatile *)result = ptr_to_struct_containing_descr; return result; } else if ((word)my_entry - 1 < DIRECT_GRANULES) { if (!GC_incremental) *my_fl = my_entry + index + 1; /* In the incremental case, we always have to take this */ /* path. Thus we leave the counter alone. */ return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr); } else { GC_generic_malloc_many(BYTES_FROM_INDEX(index), GC_gcj_kind, my_fl); if (*my_fl == 0) return GC_oom_fn(bytes); return GC_local_gcj_malloc(bytes, ptr_to_struct_containing_descr); } }}#endif /* GC_GCJ_SUPPORT */# else /* !THREAD_LOCAL_ALLOC && !DBG_HDRS_ALL */# define GC_destroy_thread_local(t)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -