⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pthread_support.c

📁 Boost provides free peer-reviewed portable C++ source libraries. We emphasize libraries that work
💻 C
📖 第 1 页 / 共 3 页
字号:
    GC_ASSERT(I_HOLD_LOCK());#   ifdef PARALLEL_MARK      for (i = 0; i < GC_markers; ++i) {	if (marker_sp[i] > lo & marker_sp[i] < hi) return TRUE;#       ifdef IA64	  if (marker_bsp[i] > lo & marker_bsp[i] < hi) return TRUE;#	endif      }#   endif    for (i = 0; i < THREAD_TABLE_SZ; i++) {      for (p = GC_threads[i]; p != 0; p = p -> next) {	if (0 != p -> stack_end) {#	  ifdef STACK_GROWS_UP            if (p -> stack_end >= lo && p -> stack_end < hi) return TRUE;#	  else /* STACK_GROWS_DOWN */            if (p -> stack_end > lo && p -> stack_end <= hi) return TRUE;#	  endif	}      }    }    return FALSE;}#endif /* USE_PROC_FOR_LIBRARIES */#ifdef IA64/* Find the largest stack_base smaller than bound.  May be used	*//* to find the boundary between a register stack and adjacent	*//* immediately preceding memory stack.				*/ptr_t GC_greatest_stack_base_below(ptr_t bound){    int i;    GC_thread p;    ptr_t result = 0;        GC_ASSERT(I_HOLD_LOCK());#   ifdef PARALLEL_MARK      for (i = 0; i < GC_markers; ++i) {	if (marker_sp[i] > result && marker_sp[i] < bound)	  result = marker_sp[i];      }#   endif    for (i = 0; i < THREAD_TABLE_SZ; i++) {      for (p = GC_threads[i]; p != 0; p = p -> next) {	if (p -> stack_end > result && p -> stack_end < bound) {	  result = p -> stack_end;	}      }    }    return result;}#endif /* IA64 */#ifdef GC_LINUX_THREADS/* Return the number of processors, or i<= 0 if it can't be determined.	*/int GC_get_nprocs(void){    /* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that	*/    /* appears to be buggy in many cases.				*/    /* We look for lines "cpu<n>" in /proc/stat.			*/#   define STAT_BUF_SIZE 4096#   define STAT_READ read	/* If read is wrapped, this may need to be redefined to call 	*/	/* the real one.						*/    char stat_buf[STAT_BUF_SIZE];    int f;    word result = 1;	/* Some old kernels only have a single "cpu nnnn ..."	*/	/* entry in /proc/stat.  We identify those as 		*/	/* uniprocessors.					*/    size_t i, len = 0;    f = open("/proc/stat", O_RDONLY);    if (f < 0 || (len = STAT_READ(f, stat_buf, STAT_BUF_SIZE)) < 100) {	WARN("Couldn't read /proc/stat\n", 0);	return -1;    }    for (i = 0; i < len - 100; ++i) {        if (stat_buf[i] == '\n' && stat_buf[i+1] == 'c'	    && stat_buf[i+2] == 'p' && stat_buf[i+3] == 'u') {	    int cpu_no = atoi(stat_buf + i + 4);	    if (cpu_no >= result) result = cpu_no + 1;	}    }    close(f);    return result;}#endif /* GC_LINUX_THREADS *//* We hold the GC lock.  Wait until an in-progress GC has finished.	*//* Repeatedly RELEASES GC LOCK in order to wait.			*//* If wait_for_all is true, then we exit with the GC lock held and no	*//* collection in progress; otherwise we just wait for the current GC	*//* to finish.								*/extern GC_bool GC_collection_in_progress(void);void GC_wait_for_gc_completion(GC_bool wait_for_all){    GC_ASSERT(I_HOLD_LOCK());    if (GC_incremental && GC_collection_in_progress()) {	int old_gc_no = GC_gc_no;	/* Make sure that no part of our stack is still on the mark stack, */	/* since it's about to be unmapped.				   */	while (GC_incremental && GC_collection_in_progress()	       && (wait_for_all || old_gc_no == GC_gc_no)) {	    ENTER_GC();	    GC_in_thread_creation = TRUE;            GC_collect_a_little_inner(1);	    GC_in_thread_creation = FALSE;	    EXIT_GC();	    UNLOCK();	    sched_yield();	    LOCK();	}    }}#ifdef HANDLE_FORK/* Procedures called before and after a fork.  The goal here is to make *//* it safe to call GC_malloc() in a forked child.  It's unclear that is	*//* attainable, since the single UNIX spec seems to imply that one 	*//* should only call async-signal-safe functions, and we probably can't	*//* quite guarantee that.  But we give it our best shot.  (That same	*//* spec also implies that it's not safe to call the system malloc	*//* between fork() and exec().  Thus we're doing no worse than it.	*//* Called before a fork()		*/void GC_fork_prepare_proc(void){    /* Acquire all relevant locks, so that after releasing the locks	*/    /* the child will see a consistent state in which monitor 		*/    /* invariants hold.	 Unfortunately, we can't acquire libc locks	*/    /* we might need, and there seems to be no guarantee that libc	*/    /* must install a suitable fork handler.				*/    /* Wait for an ongoing GC to finish, since we can't finish it in	*/    /* the (one remaining thread in) the child.				*/      LOCK();#     if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)        GC_wait_for_reclaim();#     endif      GC_wait_for_gc_completion(TRUE);#     if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)        GC_acquire_mark_lock();#     endif}/* Called in parent after a fork()	*/void GC_fork_parent_proc(void){#   if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)      GC_release_mark_lock();#   endif    UNLOCK();}/* Called in child after a fork()	*/void GC_fork_child_proc(void){    /* Clean up the thread table, so that just our thread is left. */#   if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)      GC_release_mark_lock();#   endif    GC_remove_all_threads_but_me();#   ifdef PARALLEL_MARK      /* Turn off parallel marking in the child, since we are probably 	*/      /* just going to exec, and we would have to restart mark threads.	*/        GC_markers = 1;        GC_parallel = FALSE;#   endif /* PARALLEL_MARK */    UNLOCK();}#endif /* HANDLE_FORK */#if defined(GC_DGUX386_THREADS)/* Return the number of processors, or i<= 0 if it can't be determined. */int GC_get_nprocs(void){    /* <takis@XFree86.Org> */    int numCpus;    struct dg_sys_info_pm_info pm_sysinfo;    int status =0;    status = dg_sys_info((long int *) &pm_sysinfo,	DG_SYS_INFO_PM_INFO_TYPE, DG_SYS_INFO_PM_CURRENT_VERSION);    if (status < 0)       /* set -1 for error */       numCpus = -1;    else      /* Active CPUs */      numCpus = pm_sysinfo.idle_vp_count;#  ifdef DEBUG_THREADS    GC_printf("Number of active CPUs in this system: %d\n", numCpus);#  endif    return(numCpus);}#endif /* GC_DGUX386_THREADS */#if defined(GC_NETBSD_THREADS)static int get_ncpu(void){    int mib[] = {CTL_HW,HW_NCPU};    int res;    size_t len = sizeof(res);    sysctl(mib, sizeof(mib)/sizeof(int), &res, &len, NULL, 0);    return res;}#endif	/* GC_NETBSD_THREADS */# if defined(GC_LINUX_THREADS) && defined(INCLUDE_LINUX_THREAD_DESCR)__thread int dummy_thread_local;# endif/* We hold the allocation lock.	*/void GC_thr_init(void){#   ifndef GC_DARWIN_THREADS        int dummy;#   endif    GC_thread t;    if (GC_thr_initialized) return;    GC_thr_initialized = TRUE;    #   ifdef HANDLE_FORK      /* Prepare for a possible fork.	*/        pthread_atfork(GC_fork_prepare_proc, GC_fork_parent_proc,	  	       GC_fork_child_proc);#   endif /* HANDLE_FORK */#   if defined(INCLUDE_LINUX_THREAD_DESCR)      /* Explicitly register the region including the address 		*/      /* of a thread local variable.  This should included thread	*/      /* locals for the main thread, except for those allocated		*/      /* in response to dlopen calls.					*/  	{	  ptr_t thread_local_addr = (ptr_t)(&dummy_thread_local);	  ptr_t main_thread_start, main_thread_end;          if (!GC_enclosing_mapping(thread_local_addr, &main_thread_start,				    &main_thread_end)) {	    ABORT("Failed to find mapping for main thread thread locals");	  }	  GC_add_roots_inner(main_thread_start, main_thread_end, FALSE);	}#   endif    /* Add the initial thread, so we can stop it.	*/      t = GC_new_thread(pthread_self());#     ifdef GC_DARWIN_THREADS         t -> stop_info.mach_thread = mach_thread_self();#     else         t -> stop_info.stack_ptr = (ptr_t)(&dummy);#     endif      t -> flags = DETACHED | MAIN_THREAD;    GC_stop_init();    /* Set GC_nprocs.  */      {	char * nprocs_string = GETENV("GC_NPROCS");	GC_nprocs = -1;	if (nprocs_string != NULL) GC_nprocs = atoi(nprocs_string);      }      if (GC_nprocs <= 0) {#       if defined(GC_HPUX_THREADS)	  GC_nprocs = pthread_num_processors_np();#       endif#	if defined(GC_OSF1_THREADS) || defined(GC_AIX_THREADS) \	   || defined(GC_SOLARIS_THREADS)	  GC_nprocs = sysconf(_SC_NPROCESSORS_ONLN);	  if (GC_nprocs <= 0) GC_nprocs = 1;#	endif#       if defined(GC_IRIX_THREADS)	  GC_nprocs = sysconf(_SC_NPROC_ONLN);	  if (GC_nprocs <= 0) GC_nprocs = 1;#       endif#       if defined(GC_NETBSD_THREADS)	  GC_nprocs = get_ncpu();#       endif#       if defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS)	  int ncpus = 1;	  size_t len = sizeof(ncpus);	  sysctl((int[2]) {CTL_HW, HW_NCPU}, 2, &ncpus, &len, NULL, 0);	  GC_nprocs = ncpus;#       endif#	if defined(GC_LINUX_THREADS) || defined(GC_DGUX386_THREADS)          GC_nprocs = GC_get_nprocs();#	endif#       if defined(GC_GNU_THREADS)	  if (GC_nprocs <= 0) GC_nprocs = 1;#       endif      }      if (GC_nprocs <= 0) {	WARN("GC_get_nprocs() returned %ld\n", GC_nprocs);	GC_nprocs = 2;#	ifdef PARALLEL_MARK	  GC_markers = 1;#	endif      } else {#	ifdef PARALLEL_MARK          {	    char * markers_string = GETENV("GC_MARKERS");	    if (markers_string != NULL) {	      GC_markers = atoi(markers_string);	    } else {	      GC_markers = GC_nprocs;	    }          }#	endif      }#   ifdef PARALLEL_MARK      if (GC_print_stats) {          GC_log_printf("Number of processors = %ld, "		 "number of marker threads = %ld\n", GC_nprocs, GC_markers);      }      if (GC_markers == 1) {	GC_parallel = FALSE;	if (GC_print_stats) {	    GC_log_printf(		"Single marker thread, turning off parallel marking\n");	}      } else {	GC_parallel = TRUE;	/* Disable true incremental collection, but generational is OK.	*/	GC_time_limit = GC_TIME_UNLIMITED;      }      /* If we are using a parallel marker, actually start helper threads.  */        if (GC_parallel) start_mark_threads();#   endif}/* Perform all initializations, including those that	*//* may require allocation.				*//* Called without allocation lock.			*//* Must be called before a second thread is created.	*//* Did we say it's called without the allocation lock?	*/void GC_init_parallel(void){    if (parallel_initialized) return;    parallel_initialized = TRUE;    /* GC_init() calls us back, so set flag first.	*/    if (!GC_is_initialized) GC_init();    /* Initialize thread local free lists if used.	*/#   if defined(THREAD_LOCAL_ALLOC)      LOCK();      GC_init_thread_local(&(GC_lookup_thread(pthread_self())->tlfs));      UNLOCK();#   endif}#if !defined(GC_DARWIN_THREADS)int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set, sigset_t *oset){    sigset_t fudged_set;        INIT_REAL_SYMS();    if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {        fudged_set = *set;        sigdelset(&fudged_set, SIG_SUSPEND);        set = &fudged_set;    }    return(REAL_FUNC(pthread_sigmask)(how, set, oset));}#endif /* !GC_DARWIN_THREADS *//* Wrapper for functions that are likely to block for an appreciable	*//* length of time.							*/struct blocking_data {    void (*fn)(void *);    void *arg;};static void GC_do_blocking_inner(ptr_t data, void * context) {    struct blocking_data * d = (struct blocking_data *) data;    GC_thread me;    LOCK();    me = GC_lookup_thread(pthread_self());    GC_ASSERT(!(me -> thread_blocked));#   ifdef SPARC	me -> stop_info.stack_ptr = GC_save_regs_in_stack();#   elif !defined(GC_DARWIN_THREADS)	me -> stop_info.stack_ptr = GC_approx_sp();#   endif#   ifdef IA64	me -> backing_store_ptr = GC_save_regs_in_stack();#   endif    me -> thread_blocked = TRUE;    /* Save context here if we want to support precise stack marking */    UNLOCK();    (d -> fn)(d -> arg);    LOCK();   /* This will block if the world is stopped.	*/    me -> thread_blocked = FALSE;    UNLOCK();}void GC_do_blocking(void (*fn)(void *), void *arg) {    struct blocking_data my_data;    my_data.fn = fn;    my_data.arg = arg;    GC_with_callee_saves_pushed(GC_do_blocking_inner, (ptr_t)(&my_data));}    struct start_info {    void *(*start_routine)(void *);    void *arg;    word flags;    sem_t registered;   	/* 1 ==> in our thread table, but 	*/				/* parent hasn't yet noticed.		*/};int GC_unregister_my_thread(void){    GC_thread me;    LOCK();    /* Wait for any GC that may be marking from our stack to	*/    /* complete before we remove this thread.			*/    GC_wait_for_gc_completion(FALSE);    me = GC_lookup_thread(pthread_self());#   if defined(THREAD_LOCAL_ALLOC)      GC_destroy_thread_local(&(me->tlfs));#   endif    if (me -> flags & DETACHED) {    	GC_delete_thread(pthread_self());    } else {	me -> flags |= FINISHED;    }#   if defined(THREAD_LOCAL_ALLOC)      GC_remove_specific(GC_thread_key);#   endif    UNLOCK();    return GC_SUCCESS;}/* Called at thread exit.				*//* Never called for main thread.  That's OK, since it	*//* results in at most a tiny one-time leak.  And 	*//* linuxthreads doesn't reclaim the main threads 	*//* resources or id anyway.				*/void GC_thread_exit_proc(void *arg){    GC_unregister_my_thread();}int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval){    int result;    GC_thread thread_gc_id;        INIT_REAL_SYMS();    LOCK();    thread_gc_id = GC_lookup_thread(thread);    /* This is guaranteed to be the intended one, since the thread id	*/    /* cant have been recycled by pthreads.				*/    UNLOCK();    result = REAL_FUNC(pthread_join)(thread, retval);# if defined (GC_FREEBSD_THREADS)    /* On FreeBSD, the wrapped pthread_join() sometimes returns (what       appears to be) a spurious EINTR which caused the test and real code       to gratuitously fail.  Having looked at system pthread library source       code, I see how this return code may be generated.  In one path of       code, pthread_join() just returns the errno setting of the thread       being joined.  This does not match the POSIX specification or the       local man pages thus I have taken the liberty to catch this one       spurious return value properly conditionalized on GC_FREEBSD_THREADS. */    if (result == EINTR) result = 0;# endif    if (result == 0) {        LOCK();        /* Here the pthread thread id may have been recycled. */        GC_delete_gc_thread(thread_gc_id);        UNLOCK();    }    return result;}intWRAP_FUNC(pthread_detach)(pthread_t thread){    int result;    GC_thread thread_gc_id;        INIT_REAL_SYMS();    LOCK();    thread_gc_id = GC_lookup_thread(thread);    UNLOCK();    result = REAL_FUNC(pthread_detach)(thread);    if (result == 0) {      LOCK();      thread_gc_id -> flags |= DETACHED;      /* Here the pthread thread id may have been recycled. */      if (thread_gc_id -> flags & FINISHED) {        GC_delete_gc_thread(thread_gc_id);      }      UNLOCK();    }

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -