⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pthread_support.c

📁 linux下建立JAVA虚拟机的源码KAFFE
💻 C
📖 第 1 页 / 共 4 页
字号:
    int detachstate;    word my_flags = 0;    struct start_info * si; 	/* This is otherwise saved only in an area mmapped by the thread */	/* library, which isn't visible to the collector.		 */     /* We resist the temptation to muck with the stack size here,	*/    /* even if the default is unreasonably small.  That's the client's	*/    /* responsibility.							*/    LOCK();    si = (struct start_info *)GC_INTERNAL_MALLOC(sizeof(struct start_info),						 NORMAL);    UNLOCK();    if (!parallel_initialized) GC_init_parallel();    if (0 == si) return(ENOMEM);    sem_init(&(si -> registered), 0, 0);    si -> start_routine = start_routine;    si -> arg = arg;    LOCK();    if (!GC_thr_initialized) GC_thr_init();#   ifdef GC_ASSERTIONS      {	size_t stack_size;	if (NULL == attr) {	   pthread_attr_t my_attr;	   pthread_attr_init(&my_attr);	   pthread_attr_getstacksize(&my_attr, &stack_size);	} else {	   pthread_attr_getstacksize(attr, &stack_size);	}#       ifdef PARALLEL_MARK	  GC_ASSERT(stack_size >= (8*HBLKSIZE*sizeof(word)));#       else          /* FreeBSD-5.3/Alpha: default pthread stack is 64K, 	*/	  /* HBLKSIZE=8192, sizeof(word)=8			*/	  GC_ASSERT(stack_size >= 65536);#       endif	/* Our threads may need to do some work for the GC.	*/	/* Ridiculously small threads won't work, and they	*/	/* probably wouldn't work anyway.			*/      }#   endif    if (NULL == attr) {	detachstate = PTHREAD_CREATE_JOINABLE;    } else {         pthread_attr_getdetachstate(attr, &detachstate);    }    if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;    si -> flags = my_flags;    UNLOCK();#   ifdef DEBUG_THREADS        GC_printf1("About to start new thread from thread 0x%X\n",		   pthread_self());#   endif    result = REAL_FUNC(pthread_create)(new_thread, attr, GC_start_routine, si);#   ifdef DEBUG_THREADS        GC_printf1("Started thread 0x%X\n", *new_thread);#   endif    /* Wait until child has been added to the thread table.		*/    /* This also ensures that we hold onto si until the child is done	*/    /* with it.  Thus it doesn't matter whether it is otherwise		*/    /* visible to the collector.					*/    if (0 == result) {	while (0 != sem_wait(&(si -> registered))) {            if (EINTR != errno) ABORT("sem_wait failed");	}    }    sem_destroy(&(si -> registered));    LOCK();    GC_INTERNAL_FREE(si);    UNLOCK();    return(result);}#ifdef GENERIC_COMPARE_AND_SWAP  pthread_mutex_t GC_compare_and_swap_lock = PTHREAD_MUTEX_INITIALIZER;  GC_bool GC_compare_and_exchange(volatile GC_word *addr,  			          GC_word old, GC_word new_val)  {    GC_bool result;    pthread_mutex_lock(&GC_compare_and_swap_lock);    if (*addr == old) {      *addr = new_val;      result = TRUE;    } else {      result = FALSE;    }    pthread_mutex_unlock(&GC_compare_and_swap_lock);    return result;  }    GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much)  {    GC_word old;    pthread_mutex_lock(&GC_compare_and_swap_lock);    old = *addr;    *addr = old + how_much;    pthread_mutex_unlock(&GC_compare_and_swap_lock);    return old;  }#endif /* GENERIC_COMPARE_AND_SWAP *//* Spend a few cycles in a way that can't introduce contention with	*//* othre threads.							*/void GC_pause(){    int i;#   if !defined(__GNUC__) || defined(__INTEL_COMPILER)      volatile word dummy = 0;#   endif    for (i = 0; i < 10; ++i) { #     if defined(__GNUC__) && !defined(__INTEL_COMPILER)        __asm__ __volatile__ (" " : : : "memory");#     else	/* Something that's unlikely to be optimized away. */	GC_noop(++dummy);#     endif    }}    #define SPIN_MAX 128	/* Maximum number of calls to GC_pause before	*/			/* give up.					*/VOLATILE GC_bool GC_collecting = 0;			/* A hint that we're in the collector and       */                        /* holding the allocation lock for an           */                        /* extended period.                             */#if !defined(USE_SPIN_LOCK) || defined(PARALLEL_MARK)/* If we don't want to use the below spinlock implementation, either	*//* because we don't have a GC_test_and_set implementation, or because 	*//* we don't want to risk sleeping, we can still try spinning on 	*//* pthread_mutex_trylock for a while.  This appears to be very		*//* beneficial in many cases.						*//* I suspect that under high contention this is nearly always better	*//* than the spin lock.  But it's a bit slower on a uniprocessor.	*//* Hence we still default to the spin lock.				*//* This is also used to acquire the mark lock for the parallel		*//* marker.								*//* Here we use a strict exponential backoff scheme.  I don't know 	*//* whether that's better or worse than the above.  We eventually 	*//* yield by calling pthread_mutex_lock(); it never makes sense to	*//* explicitly sleep.							*/#define LOCK_STATS#ifdef LOCK_STATS  unsigned long GC_spin_count = 0;  unsigned long GC_block_count = 0;  unsigned long GC_unlocked_count = 0;#endifvoid GC_generic_lock(pthread_mutex_t * lock){#ifndef NO_PTHREAD_TRYLOCK    unsigned pause_length = 1;    unsigned i;        if (0 == pthread_mutex_trylock(lock)) {#       ifdef LOCK_STATS	    ++GC_unlocked_count;#       endif	return;    }    for (; pause_length <= SPIN_MAX; pause_length <<= 1) {	for (i = 0; i < pause_length; ++i) {	    GC_pause();	}        switch(pthread_mutex_trylock(lock)) {	    case 0:#		ifdef LOCK_STATS		    ++GC_spin_count;#		endif		return;	    case EBUSY:		break;	    default:		ABORT("Unexpected error from pthread_mutex_trylock");        }    }#endif /* !NO_PTHREAD_TRYLOCK */#   ifdef LOCK_STATS	++GC_block_count;#   endif    pthread_mutex_lock(lock);}#endif /* !USE_SPIN_LOCK || PARALLEL_MARK */#if defined(USE_SPIN_LOCK)/* Reasonably fast spin locks.  Basically the same implementation *//* as STL alloc.h.  This isn't really the right way to do this.   *//* but until the POSIX scheduling mess gets straightened out ...  */volatile unsigned int GC_allocate_lock = 0;void GC_lock(){#   define low_spin_max 30  /* spin cycles if we suspect uniprocessor */#   define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */    static unsigned spin_max = low_spin_max;    unsigned my_spin_max;    static unsigned last_spins = 0;    unsigned my_last_spins;    int i;    if (!GC_test_and_set(&GC_allocate_lock)) {        return;    }    my_spin_max = spin_max;    my_last_spins = last_spins;    for (i = 0; i < my_spin_max; i++) {        if (GC_collecting || GC_nprocs == 1) goto yield;        if (i < my_last_spins/2 || GC_allocate_lock) {            GC_pause();            continue;        }        if (!GC_test_and_set(&GC_allocate_lock)) {	    /*             * got it!             * Spinning worked.  Thus we're probably not being scheduled             * against the other process with which we were contending.             * Thus it makes sense to spin longer the next time.	     */            last_spins = i;            spin_max = high_spin_max;            return;        }    }    /* We are probably being scheduled against the other process.  Sleep. */    spin_max = low_spin_max;yield:    for (i = 0;; ++i) {        if (!GC_test_and_set(&GC_allocate_lock)) {            return;        }#       define SLEEP_THRESHOLD 12		/* Under Linux very short sleeps tend to wait until	*/		/* the current time quantum expires.  On old Linux	*/		/* kernels nanosleep(<= 2ms) just spins under Linux.    */		/* (Under 2.4, this happens only for real-time		*/		/* processes.)  We want to minimize both behaviors	*/		/* here.						*/        if (i < SLEEP_THRESHOLD) {            sched_yield();	} else {	    struct timespec ts;		    if (i > 24) i = 24;			/* Don't wait for more than about 15msecs, even	*/			/* under extreme contention.			*/	    ts.tv_sec = 0;	    ts.tv_nsec = 1 << i;	    nanosleep(&ts, 0);	}    }}#else  /* !USE_SPINLOCK */void GC_lock(){#ifndef NO_PTHREAD_TRYLOCK    if (1 == GC_nprocs || GC_collecting) {	pthread_mutex_lock(&GC_allocate_ml);    } else {        GC_generic_lock(&GC_allocate_ml);    }#else  /* !NO_PTHREAD_TRYLOCK */    pthread_mutex_lock(&GC_allocate_ml);#endif /* !NO_PTHREAD_TRYLOCK */}#endif /* !USE_SPINLOCK */#if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)#ifdef GC_ASSERTIONS  pthread_t GC_mark_lock_holder = NO_THREAD;#endif#if 0  /* Ugly workaround for a linux threads bug in the final versions      */  /* of glibc2.1.  Pthread_mutex_trylock sets the mutex owner           */  /* field even when it fails to acquire the mutex.  This causes        */  /* pthread_cond_wait to die.  Remove for glibc2.2.                    */  /* According to the man page, we should use                           */  /* PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, but that isn't actually   */  /* defined.                                                           */  static pthread_mutex_t mark_mutex =        {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, {0, 0}};#else  static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;#endifstatic pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;void GC_acquire_mark_lock(){/*    if (pthread_mutex_lock(&mark_mutex) != 0) {	ABORT("pthread_mutex_lock failed");    }*/    GC_generic_lock(&mark_mutex);#   ifdef GC_ASSERTIONS	GC_mark_lock_holder = pthread_self();#   endif}void GC_release_mark_lock(){    GC_ASSERT(GC_mark_lock_holder == pthread_self());#   ifdef GC_ASSERTIONS	GC_mark_lock_holder = NO_THREAD;#   endif    if (pthread_mutex_unlock(&mark_mutex) != 0) {	ABORT("pthread_mutex_unlock failed");    }}/* Collector must wait for a freelist builders for 2 reasons:		*//* 1) Mark bits may still be getting examined without lock.		*//* 2) Partial free lists referenced only by locals may not be scanned 	*//*    correctly, e.g. if they contain "pointer-free" objects, since the	*//*    free-list link may be ignored.					*/void GC_wait_builder(){    GC_ASSERT(GC_mark_lock_holder == pthread_self());#   ifdef GC_ASSERTIONS	GC_mark_lock_holder = NO_THREAD;#   endif    if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {	ABORT("pthread_cond_wait failed");    }    GC_ASSERT(GC_mark_lock_holder == NO_THREAD);#   ifdef GC_ASSERTIONS	GC_mark_lock_holder = pthread_self();#   endif}void GC_wait_for_reclaim(){    GC_acquire_mark_lock();    while (GC_fl_builder_count > 0) {	GC_wait_builder();    }    GC_release_mark_lock();}void GC_notify_all_builder(){    GC_ASSERT(GC_mark_lock_holder == pthread_self());    if (pthread_cond_broadcast(&builder_cv) != 0) {	ABORT("pthread_cond_broadcast failed");    }}#endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */#ifdef PARALLEL_MARKstatic pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;void GC_wait_marker(){    GC_ASSERT(GC_mark_lock_holder == pthread_self());#   ifdef GC_ASSERTIONS	GC_mark_lock_holder = NO_THREAD;#   endif    if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {	ABORT("pthread_cond_wait failed");    }    GC_ASSERT(GC_mark_lock_holder == NO_THREAD);#   ifdef GC_ASSERTIONS	GC_mark_lock_holder = pthread_self();#   endif}void GC_notify_all_marker(){    if (pthread_cond_broadcast(&mark_cv) != 0) {	ABORT("pthread_cond_broadcast failed");    }}#endif /* PARALLEL_MARK */# endif /* GC_LINUX_THREADS and friends */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -