⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pthread_support.c

📁 linux下建立JAVA虚拟机的源码KAFFE
💻 C
📖 第 1 页 / 共 4 页
字号:
# endif /* !THREAD_LOCAL_ALLOC */#if 0/*To make sure that we're using LinuxThreads and not some other threadpackage, we generate a dummy reference to `pthread_kill_other_threads_np'(was `__pthread_initial_thread_bos' but that disappeared),which is a symbol defined in LinuxThreads, but (hopefully) not in otherthread packages.We no longer do this, since this code is now portable enough that it mightactually work for something else.*/void (*dummy_var_to_force_linux_threads)() = pthread_kill_other_threads_np;#endif /* 0 */long GC_nprocs = 1;	/* Number of processors.  We may not have	*/			/* access to all of them, but this is as good	*/			/* a guess as any ...				*/#ifdef PARALLEL_MARK# ifndef MAX_MARKERS#   define MAX_MARKERS 16# endifstatic ptr_t marker_sp[MAX_MARKERS] = {0};void * GC_mark_thread(void * id){  word my_mark_no = 0;  marker_sp[(word)id] = GC_approx_sp();  for (;; ++my_mark_no) {    /* GC_mark_no is passed only to allow GC_help_marker to terminate	*/    /* promptly.  This is important if it were called from the signal	*/    /* handler or from the GC lock acquisition code.  Under Linux, it's	*/    /* not safe to call it from a signal handler, since it uses mutexes	*/    /* and condition variables.  Since it is called only here, the 	*/    /* argument is unnecessary.						*/    if (my_mark_no < GC_mark_no || my_mark_no > GC_mark_no + 2) {	/* resynchronize if we get far off, e.g. because GC_mark_no	*/	/* wrapped.							*/	my_mark_no = GC_mark_no;    }#   ifdef DEBUG_THREADS	GC_printf1("Starting mark helper for mark number %ld\n", my_mark_no);#   endif    GC_help_marker(my_mark_no);  }}extern long GC_markers;		/* Number of mark threads we would	*/				/* like to have.  Includes the 		*/				/* initiating thread.			*/pthread_t GC_mark_threads[MAX_MARKERS];#define PTHREAD_CREATE REAL_FUNC(pthread_create)static void start_mark_threads(){    unsigned i;    pthread_attr_t attr;    if (GC_markers > MAX_MARKERS) {	WARN("Limiting number of mark threads\n", 0);	GC_markers = MAX_MARKERS;    }    if (0 != pthread_attr_init(&attr)) ABORT("pthread_attr_init failed");	    if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))	ABORT("pthread_attr_setdetachstate failed");#   if defined(HPUX) || defined(GC_DGUX386_THREADS)      /* Default stack size is usually too small: fix it. */      /* Otherwise marker threads or GC may run out of	  */      /* space.						  */#     define MIN_STACK_SIZE (8*HBLKSIZE*sizeof(word))      {	size_t old_size;	int code;        if (pthread_attr_getstacksize(&attr, &old_size) != 0)	  ABORT("pthread_attr_getstacksize failed\n");	if (old_size < MIN_STACK_SIZE) {	  if (pthread_attr_setstacksize(&attr, MIN_STACK_SIZE) != 0)		  ABORT("pthread_attr_setstacksize failed\n");	}      }#   endif /* HPUX || GC_DGUX386_THREADS */#   ifdef CONDPRINT      if (GC_print_stats) {	GC_printf1("Starting %ld marker threads\n", GC_markers - 1);      }#   endif    for (i = 0; i < GC_markers - 1; ++i) {      if (0 != PTHREAD_CREATE(GC_mark_threads + i, &attr,			      GC_mark_thread, (void *)(word)i)) {	WARN("Marker thread creation failed, errno = %ld.\n", errno);      }    }}#else  /* !PARALLEL_MARK */static __inline__ void start_mark_threads(){}#endif /* !PARALLEL_MARK */GC_bool GC_thr_initialized = FALSE;volatile GC_thread GC_threads[THREAD_TABLE_SZ];void GC_push_thread_structures GC_PROTO((void)){    GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));#   if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)      GC_push_all((ptr_t)(&GC_thread_key),	  (ptr_t)(&GC_thread_key)+sizeof(&GC_thread_key));#   endif}#ifdef THREAD_LOCAL_ALLOC/* We must explicitly mark ptrfree and gcj free lists, since the free 	*//* list links wouldn't otherwise be found.  We also set them in the 	*//* normal free lists, since that involves touching less memory than if	*//* we scanned them normally.						*/void GC_mark_thread_local_free_lists(void){    int i, j;    GC_thread p;    ptr_t q;        for (i = 0; i < THREAD_TABLE_SZ; ++i) {      for (p = GC_threads[i]; 0 != p; p = p -> next) {	for (j = 1; j < NFREELISTS; ++j) {	  q = p -> ptrfree_freelists[j];	  if ((word)q > HBLKSIZE) GC_set_fl_marks(q);	  q = p -> normal_freelists[j];	  if ((word)q > HBLKSIZE) GC_set_fl_marks(q);#	  ifdef GC_GCJ_SUPPORT	    q = p -> gcj_freelists[j];	    if ((word)q > HBLKSIZE) GC_set_fl_marks(q);#	  endif /* GC_GCJ_SUPPORT */	}      }    }}#endif /* THREAD_LOCAL_ALLOC */static struct GC_Thread_Rep first_thread;/* Add a thread to GC_threads.  We assume it wasn't already there.	*//* Caller holds allocation lock.					*/GC_thread GC_new_thread(pthread_t id){    int hv = ((word)id) % THREAD_TABLE_SZ;    GC_thread result;    static GC_bool first_thread_used = FALSE;        if (!first_thread_used) {    	result = &first_thread;    	first_thread_used = TRUE;    } else {        result = (struct GC_Thread_Rep *)        	 GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);    }    if (result == 0) return(0);    result -> id = id;    result -> next = GC_threads[hv];    GC_threads[hv] = result;    GC_ASSERT(result -> flags == 0 && result -> thread_blocked == 0);    return(result);}/* Delete a thread from GC_threads.  We assume it is there.	*//* (The code intentionally traps if it wasn't.)			*//* Caller holds allocation lock.				*/void GC_delete_thread(pthread_t id){    int hv = ((word)id) % THREAD_TABLE_SZ;    register GC_thread p = GC_threads[hv];    register GC_thread prev = 0;        while (!pthread_equal(p -> id, id)) {        prev = p;        p = p -> next;    }    if (prev == 0) {        GC_threads[hv] = p -> next;    } else {        prev -> next = p -> next;    }    GC_INTERNAL_FREE(p);}/* If a thread has been joined, but we have not yet		*//* been notified, then there may be more than one thread 	*//* in the table with the same pthread id.			*//* This is OK, but we need a way to delete a specific one.	*/void GC_delete_gc_thread(pthread_t id, GC_thread gc_id){    int hv = ((word)id) % THREAD_TABLE_SZ;    register GC_thread p = GC_threads[hv];    register GC_thread prev = 0;    while (p != gc_id) {        prev = p;        p = p -> next;    }    if (prev == 0) {        GC_threads[hv] = p -> next;    } else {        prev -> next = p -> next;    }    GC_INTERNAL_FREE(p);}/* Return a GC_thread corresponding to a given pthread_t.	*//* Returns 0 if it's not there.					*//* Caller holds  allocation lock or otherwise inhibits 		*//* updates.							*//* If there is more than one thread with the given id we 	*//* return the most recent one.					*/GC_thread GC_lookup_thread(pthread_t id){    int hv = ((word)id) % THREAD_TABLE_SZ;    register GC_thread p = GC_threads[hv];        while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;    return(p);}#ifdef HANDLE_FORK/* Remove all entries from the GC_threads table, except the	*//* one for the current thread.  We need to do this in the child	*//* process after a fork(), since only the current thread 	*//* survives in the child.					*/void GC_remove_all_threads_but_me(void){    pthread_t self = pthread_self();    int hv;    GC_thread p, next, me;    for (hv = 0; hv < THREAD_TABLE_SZ; ++hv) {      me = 0;      for (p = GC_threads[hv]; 0 != p; p = next) {	next = p -> next;	if (p -> id == self) {	  me = p;	  p -> next = 0;	} else {#	  ifdef THREAD_LOCAL_ALLOC	    if (!(p -> flags & FINISHED)) {	      GC_destroy_thread_local(p);	    }#	  endif /* THREAD_LOCAL_ALLOC */	  if (p != &first_thread) GC_INTERNAL_FREE(p);	}      }      GC_threads[hv] = me;    }}#endif /* HANDLE_FORK */#ifdef USE_PROC_FOR_LIBRARIESint GC_segment_is_thread_stack(ptr_t lo, ptr_t hi){    int i;    GC_thread p;    #   ifdef PARALLEL_MARK      for (i = 0; i < GC_markers; ++i) {	if (marker_sp[i] > lo & marker_sp[i] < hi) return 1;      }#   endif    for (i = 0; i < THREAD_TABLE_SZ; i++) {      for (p = GC_threads[i]; p != 0; p = p -> next) {	if (0 != p -> stack_end) {#	  ifdef STACK_GROWS_UP            if (p -> stack_end >= lo && p -> stack_end < hi) return 1;#	  else /* STACK_GROWS_DOWN */            if (p -> stack_end > lo && p -> stack_end <= hi) return 1;#	  endif	}      }    }    return 0;}#endif /* USE_PROC_FOR_LIBRARIES */#ifdef GC_LINUX_THREADS/* Return the number of processors, or i<= 0 if it can't be determined.	*/int GC_get_nprocs(){    /* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that	*/    /* appears to be buggy in many cases.				*/    /* We look for lines "cpu<n>" in /proc/stat.			*/#   define STAT_BUF_SIZE 4096#   define STAT_READ read	/* If read is wrapped, this may need to be redefined to call 	*/	/* the real one.						*/    char stat_buf[STAT_BUF_SIZE];    int f;    word result = 1;	/* Some old kernels only have a single "cpu nnnn ..."	*/	/* entry in /proc/stat.  We identify those as 		*/	/* uniprocessors.					*/    size_t i, len = 0;    f = open("/proc/stat", O_RDONLY);    if (f < 0 || (len = STAT_READ(f, stat_buf, STAT_BUF_SIZE)) < 100) {	WARN("Couldn't read /proc/stat\n", 0);	return -1;    }    for (i = 0; i < len - 100; ++i) {        if (stat_buf[i] == '\n' && stat_buf[i+1] == 'c'	    && stat_buf[i+2] == 'p' && stat_buf[i+3] == 'u') {	    int cpu_no = atoi(stat_buf + i + 4);	    if (cpu_no >= result) result = cpu_no + 1;	}    }    close(f);    return result;}#endif /* GC_LINUX_THREADS *//* We hold the GC lock.  Wait until an in-progress GC has finished.	*//* Repeatedly RELEASES GC LOCK in order to wait.			*//* If wait_for_all is true, then we exit with the GC lock held and no	*//* collection in progress; otherwise we just wait for the current GC	*//* to finish.								*/extern GC_bool GC_collection_in_progress();void GC_wait_for_gc_completion(GC_bool wait_for_all){    if (GC_incremental && GC_collection_in_progress()) {	int old_gc_no = GC_gc_no;	/* Make sure that no part of our stack is still on the mark stack, */	/* since it's about to be unmapped.				   */	while (GC_incremental && GC_collection_in_progress()	       && (wait_for_all || old_gc_no == GC_gc_no)) {	    ENTER_GC();	    GC_in_thread_creation = TRUE;            GC_collect_a_little_inner(1);	    GC_in_thread_creation = FALSE;	    EXIT_GC();	    UNLOCK();	    sched_yield();	    LOCK();	}    }}#ifdef HANDLE_FORK/* Procedures called before and after a fork.  The goal here is to make *//* it safe to call GC_malloc() in a forked child.  It's unclear that is	*//* attainable, since the single UNIX spec seems to imply that one 	*//* should only call async-signal-safe functions, and we probably can't	*//* quite guarantee that.  But we give it our best shot.  (That same	*//* spec also implies that it's not safe to call the system malloc	*//* between fork() and exec().  Thus we're doing no worse than it.	*//* Called before a fork()		*/void GC_fork_prepare_proc(void){    /* Acquire all relevant locks, so that after releasing the locks	*/    /* the child will see a consistent state in which monitor 		*/    /* invariants hold.	 Unfortunately, we can't acquire libc locks	*/    /* we might need, and there seems to be no guarantee that libc	*/    /* must install a suitable fork handler.				*/    /* Wait for an ongoing GC to finish, since we can't finish it in	*/    /* the (one remaining thread in) the child.				*/      LOCK();#     if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)        GC_wait_for_reclaim();#     endif      GC_wait_for_gc_completion(TRUE);#     if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)        GC_acquire_mark_lock();#     endif}/* Called in parent after a fork()	*/void GC_fork_parent_proc(void){#   if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)      GC_release_mark_lock();#   endif    UNLOCK();}/* Called in child after a fork()	*/void GC_fork_child_proc(void){    /* Clean up the thread table, so that just our thread is left. */#   if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -