📄 pthread_support.c
字号:
return result;}GC_bool GC_in_thread_creation = FALSE; /* Protected by allocation lock. */GC_thread GC_register_my_thread_inner(struct GC_stack_base *sb, pthread_t my_pthread){ GC_thread me; GC_in_thread_creation = TRUE; /* OK to collect from unknown thread. */ me = GC_new_thread(my_pthread); GC_in_thread_creation = FALSE;# ifdef GC_DARWIN_THREADS me -> stop_info.mach_thread = mach_thread_self();# else me -> stop_info.stack_ptr = sb -> mem_base;# endif me -> stack_end = sb -> mem_base;# ifdef IA64 me -> backing_store_end = sb -> reg_base;# endif /* IA64 */ return me;}int GC_register_my_thread(struct GC_stack_base *sb){ pthread_t my_pthread = pthread_self(); GC_thread me; LOCK(); me = GC_lookup_thread(my_pthread); if (0 == me) { me = GC_register_my_thread_inner(sb, my_pthread); me -> flags |= DETACHED; /* Treat as detached, since we do not need to worry about */ /* pointer results. */ UNLOCK(); return GC_SUCCESS; } else { UNLOCK(); return GC_DUPLICATE; }}void * GC_inner_start_routine(struct GC_stack_base *sb, void * arg){ struct start_info * si = arg; void * result; GC_thread me; pthread_t my_pthread; void *(*start)(void *); void *start_arg; my_pthread = pthread_self();# ifdef DEBUG_THREADS GC_printf("Starting thread 0x%x\n", (unsigned)my_pthread); GC_printf("pid = %ld\n", (long) getpid()); GC_printf("sp = 0x%lx\n", (long) &arg);# endif LOCK(); me = GC_register_my_thread_inner(sb, my_pthread); me -> flags = si -> flags; UNLOCK(); start = si -> start_routine;# ifdef DEBUG_THREADS GC_printf("start_routine = %p\n", (void *)start);# endif start_arg = si -> arg; sem_post(&(si -> registered)); /* Last action on si. */ /* OK to deallocate. */ pthread_cleanup_push(GC_thread_exit_proc, 0);# if defined(THREAD_LOCAL_ALLOC) LOCK(); GC_init_thread_local(&(me->tlfs)); UNLOCK();# endif result = (*start)(start_arg);# if DEBUG_THREADS GC_printf("Finishing thread 0x%x\n", (unsigned)pthread_self());# endif me -> status = result; pthread_cleanup_pop(1); /* Cleanup acquires lock, ensuring that we can't exit */ /* while a collection that thinks we're alive is trying to stop */ /* us. */ return(result);}void * GC_start_routine(void * arg){# ifdef INCLUDE_LINUX_THREAD_DESCR struct GC_stack_base sb;# ifdef REDIRECT_MALLOC /* GC_get_stack_base may call pthread_getattr_np, which can */ /* unfortunately call realloc, which may allocate from an */ /* unregistered thread. This is unpleasant, since it might */ /* force heap growth. */ GC_disable();# endif if (GC_get_stack_base(&sb) != GC_SUCCESS) ABORT("Failed to get thread stack base.");# ifdef REDIRECT_MALLOC GC_enable();# endif return GC_inner_start_routine(&sb, arg);# else return GC_call_with_stack_base(GC_inner_start_routine, arg);# endif}intWRAP_FUNC(pthread_create)(pthread_t *new_thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg){ int result; int detachstate; word my_flags = 0; struct start_info * si; /* This is otherwise saved only in an area mmapped by the thread */ /* library, which isn't visible to the collector. */ /* We resist the temptation to muck with the stack size here, */ /* even if the default is unreasonably small. That's the client's */ /* responsibility. */ INIT_REAL_SYMS(); LOCK(); si = (struct start_info *)GC_INTERNAL_MALLOC(sizeof(struct start_info), NORMAL); UNLOCK(); if (!parallel_initialized) GC_init_parallel(); if (0 == si) return(ENOMEM); sem_init(&(si -> registered), 0, 0); si -> start_routine = start_routine; si -> arg = arg; LOCK(); if (!GC_thr_initialized) GC_thr_init();# ifdef GC_ASSERTIONS { size_t stack_size = 0; if (NULL != attr) { pthread_attr_getstacksize(attr, &stack_size); } if (0 == stack_size) { pthread_attr_t my_attr; pthread_attr_init(&my_attr); pthread_attr_getstacksize(&my_attr, &stack_size); } /* On Solaris 10, with default attr initialization, */ /* stack_size remains 0. Fudge it. */ if (0 == stack_size) {# ifndef SOLARIS WARN("Failed to get stack size for assertion checking\n", 0);# endif stack_size = 1000000; }# ifdef PARALLEL_MARK GC_ASSERT(stack_size >= (8*HBLKSIZE*sizeof(word)));# else /* FreeBSD-5.3/Alpha: default pthread stack is 64K, */ /* HBLKSIZE=8192, sizeof(word)=8 */ GC_ASSERT(stack_size >= 65536);# endif /* Our threads may need to do some work for the GC. */ /* Ridiculously small threads won't work, and they */ /* probably wouldn't work anyway. */ }# endif if (NULL == attr) { detachstate = PTHREAD_CREATE_JOINABLE; } else { pthread_attr_getdetachstate(attr, &detachstate); } if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED; si -> flags = my_flags; UNLOCK();# ifdef DEBUG_THREADS GC_printf("About to start new thread from thread 0x%x\n", (unsigned)pthread_self());# endif GC_need_to_lock = TRUE; result = REAL_FUNC(pthread_create)(new_thread, attr, GC_start_routine, si);# ifdef DEBUG_THREADS GC_printf("Started thread 0x%x\n", (unsigned)(*new_thread));# endif /* Wait until child has been added to the thread table. */ /* This also ensures that we hold onto si until the child is done */ /* with it. Thus it doesn't matter whether it is otherwise */ /* visible to the collector. */ if (0 == result) { while (0 != sem_wait(&(si -> registered))) { if (EINTR != errno) ABORT("sem_wait failed"); } } sem_destroy(&(si -> registered)); LOCK(); GC_INTERNAL_FREE(si); UNLOCK(); return(result);}/* Spend a few cycles in a way that can't introduce contention with *//* othre threads. */void GC_pause(void){ int i;# if !defined(__GNUC__) || defined(__INTEL_COMPILER) volatile word dummy = 0;# endif for (i = 0; i < 10; ++i) { # if defined(__GNUC__) && !defined(__INTEL_COMPILER) __asm__ __volatile__ (" " : : : "memory");# else /* Something that's unlikely to be optimized away. */ GC_noop(++dummy);# endif }} #define SPIN_MAX 128 /* Maximum number of calls to GC_pause before */ /* give up. */volatile GC_bool GC_collecting = 0; /* A hint that we're in the collector and */ /* holding the allocation lock for an */ /* extended period. */#if !defined(USE_SPIN_LOCK) || defined(PARALLEL_MARK)/* If we don't want to use the below spinlock implementation, either *//* because we don't have a GC_test_and_set implementation, or because *//* we don't want to risk sleeping, we can still try spinning on *//* pthread_mutex_trylock for a while. This appears to be very *//* beneficial in many cases. *//* I suspect that under high contention this is nearly always better *//* than the spin lock. But it's a bit slower on a uniprocessor. *//* Hence we still default to the spin lock. *//* This is also used to acquire the mark lock for the parallel *//* marker. *//* Here we use a strict exponential backoff scheme. I don't know *//* whether that's better or worse than the above. We eventually *//* yield by calling pthread_mutex_lock(); it never makes sense to *//* explicitly sleep. */#define LOCK_STATS#ifdef LOCK_STATS unsigned long GC_spin_count = 0; unsigned long GC_block_count = 0; unsigned long GC_unlocked_count = 0;#endifvoid GC_generic_lock(pthread_mutex_t * lock){#ifndef NO_PTHREAD_TRYLOCK unsigned pause_length = 1; unsigned i; if (0 == pthread_mutex_trylock(lock)) {# ifdef LOCK_STATS ++GC_unlocked_count;# endif return; } for (; pause_length <= SPIN_MAX; pause_length <<= 1) { for (i = 0; i < pause_length; ++i) { GC_pause(); } switch(pthread_mutex_trylock(lock)) { case 0:# ifdef LOCK_STATS ++GC_spin_count;# endif return; case EBUSY: break; default: ABORT("Unexpected error from pthread_mutex_trylock"); } }#endif /* !NO_PTHREAD_TRYLOCK */# ifdef LOCK_STATS ++GC_block_count;# endif pthread_mutex_lock(lock);}#endif /* !USE_SPIN_LOCK || PARALLEL_MARK */#if defined(USE_SPIN_LOCK)/* Reasonably fast spin locks. Basically the same implementation *//* as STL alloc.h. This isn't really the right way to do this. *//* but until the POSIX scheduling mess gets straightened out ... */volatile AO_TS_t GC_allocate_lock = 0;void GC_lock(void){# define low_spin_max 30 /* spin cycles if we suspect uniprocessor */# define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */ static unsigned spin_max = low_spin_max; unsigned my_spin_max; static unsigned last_spins = 0; unsigned my_last_spins; int i; if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) { return; } my_spin_max = spin_max; my_last_spins = last_spins; for (i = 0; i < my_spin_max; i++) { if (GC_collecting || GC_nprocs == 1) goto yield; if (i < my_last_spins/2) { GC_pause(); continue; } if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) { /* * got it! * Spinning worked. Thus we're probably not being scheduled * against the other process with which we were contending. * Thus it makes sense to spin longer the next time. */ last_spins = i; spin_max = high_spin_max; return; } } /* We are probably being scheduled against the other process. Sleep. */ spin_max = low_spin_max;yield: for (i = 0;; ++i) { if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) { return; }# define SLEEP_THRESHOLD 12 /* Under Linux very short sleeps tend to wait until */ /* the current time quantum expires. On old Linux */ /* kernels nanosleep(<= 2ms) just spins under Linux. */ /* (Under 2.4, this happens only for real-time */ /* processes.) We want to minimize both behaviors */ /* here. */ if (i < SLEEP_THRESHOLD) { sched_yield(); } else { struct timespec ts; if (i > 24) i = 24; /* Don't wait for more than about 15msecs, even */ /* under extreme contention. */ ts.tv_sec = 0; ts.tv_nsec = 1 << i; nanosleep(&ts, 0); } }}#else /* !USE_SPINLOCK */void GC_lock(void){#ifndef NO_PTHREAD_TRYLOCK if (1 == GC_nprocs || GC_collecting) { pthread_mutex_lock(&GC_allocate_ml); } else { GC_generic_lock(&GC_allocate_ml); }#else /* !NO_PTHREAD_TRYLOCK */ pthread_mutex_lock(&GC_allocate_ml);#endif /* !NO_PTHREAD_TRYLOCK */}#endif /* !USE_SPINLOCK */#if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)#ifdef GC_ASSERTIONS unsigned long GC_mark_lock_holder = NO_THREAD;#endif#if 0 /* Ugly workaround for a linux threads bug in the final versions */ /* of glibc2.1. Pthread_mutex_trylock sets the mutex owner */ /* field even when it fails to acquire the mutex. This causes */ /* pthread_cond_wait to die. Remove for glibc2.2. */ /* According to the man page, we should use */ /* PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, but that isn't actually */ /* defined. */ static pthread_mutex_t mark_mutex = {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, {0, 0}};#else static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;#endifstatic pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;void GC_acquire_mark_lock(void){/* if (pthread_mutex_lock(&mark_mutex) != 0) { ABORT("pthread_mutex_lock failed"); }*/ GC_generic_lock(&mark_mutex);# ifdef GC_ASSERTIONS GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self());# endif}void GC_release_mark_lock(void){ GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));# ifdef GC_ASSERTIONS GC_mark_lock_holder = NO_THREAD;# endif if (pthread_mutex_unlock(&mark_mutex) != 0) { ABORT("pthread_mutex_unlock failed"); }}/* Collector must wait for a freelist builders for 2 reasons: *//* 1) Mark bits may still be getting examined without lock. *//* 2) Partial free lists referenced only by locals may not be scanned *//* correctly, e.g. if they contain "pointer-free" objects, since the *//* free-list link may be ignored. */void GC_wait_builder(void){ GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));# ifdef GC_ASSERTIONS GC_mark_lock_holder = NO_THREAD;# endif if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) { ABORT("pthread_cond_wait failed"); } GC_ASSERT(GC_mark_lock_holder == NO_THREAD);# ifdef GC_ASSERTIONS GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self());# endif}void GC_wait_for_reclaim(void){ GC_acquire_mark_lock(); while (GC_fl_builder_count > 0) { GC_wait_builder(); } GC_release_mark_lock();}void GC_notify_all_builder(void){ GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self())); if (pthread_cond_broadcast(&builder_cv) != 0) { ABORT("pthread_cond_broadcast failed"); }}#endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */#ifdef PARALLEL_MARKstatic pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;void GC_wait_marker(void){ GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));# ifdef GC_ASSERTIONS GC_mark_lock_holder = NO_THREAD;# endif if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) { ABORT("pthread_cond_wait failed"); } GC_ASSERT(GC_mark_lock_holder == NO_THREAD);# ifdef GC_ASSERTIONS GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self());# endif}void GC_notify_all_marker(void){ if (pthread_cond_broadcast(&mark_cv) != 0) { ABORT("pthread_cond_broadcast failed"); }}#endif /* PARALLEL_MARK */# endif /* GC_LINUX_THREADS and friends */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -