⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 solaris_threads.c

📁 linux下建立JAVA虚拟机的源码KAFFE
💻 C
📖 第 1 页 / 共 2 页
字号:
/*  * Copyright (c) 1994 by Xerox Corporation.  All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose,  provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. *//* * Support code for Solaris threads.  Provides functionality we wish Sun * had provided.  Relies on some information we probably shouldn't rely on. *//* Boehm, September 14, 1994 4:44 pm PDT */# include "private/gc_priv.h"# if defined(GC_SOLARIS_THREADS) || defined(GC_SOLARIS_PTHREADS)# include "private/solaris_threads.h"# include <thread.h># include <synch.h># include <signal.h># include <fcntl.h># include <sys/types.h># include <sys/mman.h># include <sys/time.h># include <sys/resource.h># include <sys/stat.h># include <sys/syscall.h># include <sys/procfs.h># include <sys/lwp.h># include <sys/reg.h># define _CLASSIC_XOPEN_TYPES# include <unistd.h># include <errno.h>#ifdef HANDLE_FORK  --> Not yet supported.  Try porting the code from linux_threads.c.#endif/* * This is the default size of the LWP arrays. If there are more LWPs * than this when a stop-the-world GC happens, set_max_lwps will be * called to cope. * This must be higher than the number of LWPs at startup time. * The threads library creates a thread early on, so the min. is 3 */# define DEFAULT_MAX_LWPS	4#undef thr_join#undef thr_create#undef thr_suspend#undef thr_continuecond_t GC_prom_join_cv;		/* Broadcast when any thread terminates	*/cond_t GC_create_cv;		/* Signalled when a new undetached	*/				/* thread starts.			*/				#ifdef MMAP_STACKSstatic int GC_zfd;#endif /* MMAP_STACKS *//* We use the allocation lock to protect thread-related data structures. *//* We stop the world using /proc primitives.  This makes some	*//* minimal assumptions about the threads implementation.	*//* We don't play by the rules, since the rules make this	*//* impossible (as of Solaris 2.3).  Also note that as of	*//* Solaris 2.3 the various thread and lwp suspension		*//* primitives failed to stop threads by the time the request	*//* is completed.						*/static sigset_t old_mask;/* Sleep for n milliseconds, n < 1000	*/void GC_msec_sleep(int n){    struct timespec ts;                                ts.tv_sec = 0;    ts.tv_nsec = 1000000*n;    if (syscall(SYS_nanosleep, &ts, 0) < 0) {	ABORT("nanosleep failed");    }}/* Turn off preemption;  gross but effective.  		*//* Caller has allocation lock.				*//* Actually this is not needed under Solaris 2.3 and	*//* 2.4, but hopefully that'll change.			*/void preempt_off(){    sigset_t set;    (void)sigfillset(&set);    sigdelset(&set, SIGABRT);    syscall(SYS_sigprocmask, SIG_SETMASK, &set, &old_mask);}void preempt_on(){    syscall(SYS_sigprocmask, SIG_SETMASK, &old_mask, NULL);}int GC_main_proc_fd = -1;struct lwp_cache_entry {    lwpid_t lc_id;    int lc_descr;	/* /proc file descriptor.	*/}  GC_lwp_cache_default[DEFAULT_MAX_LWPS];static int max_lwps = DEFAULT_MAX_LWPS;static struct lwp_cache_entry *GC_lwp_cache = GC_lwp_cache_default;static prgregset_t GC_lwp_registers_default[DEFAULT_MAX_LWPS];static prgregset_t *GC_lwp_registers = GC_lwp_registers_default;/* Return a file descriptor for the /proc entry corresponding	*//* to the given lwp.  The file descriptor may be stale if the	*//* lwp exited and a new one was forked.				*/static int open_lwp(lwpid_t id){    int result;    static int next_victim = 0;    register int i;        for (i = 0; i < max_lwps; i++) {    	if (GC_lwp_cache[i].lc_id == id) return(GC_lwp_cache[i].lc_descr);    }    result = syscall(SYS_ioctl, GC_main_proc_fd, PIOCOPENLWP, &id);    /*     * If PIOCOPENLWP fails, try closing fds in the cache until it succeeds.     */    if (result < 0 && errno == EMFILE) {	    for (i = 0; i < max_lwps; i++) {		if (GC_lwp_cache[i].lc_id != 0) {        		(void)syscall(SYS_close, GC_lwp_cache[i].lc_descr);			result = syscall(SYS_ioctl, GC_main_proc_fd, PIOCOPENLWP, &id);			if (result >= 0 || (result < 0 && errno != EMFILE))				break;		}	    }    }    if (result < 0) {	if (errno == EMFILE) {		ABORT("Too many open files");	}        return(-1) /* exited? */;    }    if (GC_lwp_cache[next_victim].lc_id != 0)        (void)syscall(SYS_close, GC_lwp_cache[next_victim].lc_descr);    GC_lwp_cache[next_victim].lc_id = id;    GC_lwp_cache[next_victim].lc_descr = result;    if (++next_victim >= max_lwps)	next_victim = 0;    return(result);}static void uncache_lwp(lwpid_t id){    register int i;        for (i = 0; i < max_lwps; i++) {    	if (GC_lwp_cache[i].lc_id == id) {    	    (void)syscall(SYS_close, GC_lwp_cache[id].lc_descr);    	    GC_lwp_cache[i].lc_id = 0;    	    break;    	}    }}	/* Sequence of current lwp ids	*/static lwpid_t GC_current_ids_default[DEFAULT_MAX_LWPS + 1];static lwpid_t *GC_current_ids = GC_current_ids_default;	/* Temporary used below (can be big if large number of LWPs) */static lwpid_t last_ids_default[DEFAULT_MAX_LWPS + 1];static lwpid_t *last_ids = last_ids_default;#define ROUNDUP(n)    WORDS_TO_BYTES(ROUNDED_UP_WORDS(n))static void set_max_lwps(GC_word n){    char *mem;    char *oldmem;    int required_bytes = ROUNDUP(n * sizeof(struct lwp_cache_entry))	+ ROUNDUP(n * sizeof(prgregset_t))	+ ROUNDUP((n + 1) * sizeof(lwpid_t))	+ ROUNDUP((n + 1) * sizeof(lwpid_t));    GC_expand_hp_inner(divHBLKSZ((word)required_bytes));    oldmem = mem = GC_scratch_alloc(required_bytes);    if (0 == mem) ABORT("No space for lwp data structures");    /*     * We can either flush the old lwp cache or copy it over. Do the latter.     */    memcpy(mem, GC_lwp_cache, max_lwps * sizeof(struct lwp_cache_entry));    GC_lwp_cache = (struct lwp_cache_entry*)mem;    mem += ROUNDUP(n * sizeof(struct lwp_cache_entry));    BZERO(GC_lwp_registers, max_lwps * sizeof(GC_lwp_registers[0]));    GC_lwp_registers = (prgregset_t *)mem;    mem += ROUNDUP(n * sizeof(prgregset_t));    GC_current_ids = (lwpid_t *)mem;    mem += ROUNDUP((n + 1) * sizeof(lwpid_t));    last_ids = (lwpid_t *)mem;    mem += ROUNDUP((n + 1)* sizeof(lwpid_t));    if (mem > oldmem + required_bytes)	ABORT("set_max_lwps buffer overflow");    max_lwps = n;}/* Stop all lwps in process.  Assumes preemption is off.	*//* Caller has allocation lock (and any other locks he may	*//* need).							*/static void stop_all_lwps(){    int lwp_fd;    char buf[30];    prstatus_t status;    register int i;    GC_bool changed;    lwpid_t me = _lwp_self();    if (GC_main_proc_fd == -1) {    	sprintf(buf, "/proc/%d", getpid());    	GC_main_proc_fd = syscall(SYS_open, buf, O_RDONLY);        if (GC_main_proc_fd < 0) {		if (errno == EMFILE)			ABORT("/proc open failed: too many open files");		GC_printf1("/proc open failed: errno %d", errno);		abort();        }    }    BZERO(GC_lwp_registers, sizeof (prgregset_t) * max_lwps);    for (i = 0; i < max_lwps; i++)	last_ids[i] = 0;    for (;;) {        if (syscall(SYS_ioctl, GC_main_proc_fd, PIOCSTATUS, &status) < 0)    	    ABORT("Main PIOCSTATUS failed");    	if (status.pr_nlwp < 1)    		ABORT("Invalid number of lwps returned by PIOCSTATUS");    	if (status.pr_nlwp >= max_lwps) {    		set_max_lwps(status.pr_nlwp*2 + 10);		/*		 * The data in the old GC_current_ids and		 * GC_lwp_registers has been trashed. Cleaning out last_ids		 * will make sure every LWP gets re-examined.		 */        	for (i = 0; i < max_lwps; i++)			last_ids[i] = 0;		continue;    	}        if (syscall(SYS_ioctl, GC_main_proc_fd, PIOCLWPIDS, GC_current_ids) < 0)            ABORT("PIOCLWPIDS failed");        changed = FALSE;        for (i = 0; GC_current_ids[i] != 0 && i < max_lwps; i++) {            if (GC_current_ids[i] != last_ids[i]) {                changed = TRUE;                if (GC_current_ids[i] != me) {		    /* PIOCSTOP doesn't work without a writable		*/		    /* descriptor.  And that makes the process		*/		    /* undebuggable.					*/                    if (_lwp_suspend(GC_current_ids[i]) < 0) {                        /* Could happen if the lwp exited */                        uncache_lwp(GC_current_ids[i]);                        GC_current_ids[i] = me; /* ignore */                    }                }            }        }        /*         * In the unlikely event something does a fork between the	 * PIOCSTATUS and the PIOCLWPIDS.          */        if (i >= max_lwps)		continue;        /* All lwps in GC_current_ids != me have been suspended.  Note	*/        /* that _lwp_suspend is idempotent.				*/        for (i = 0; GC_current_ids[i] != 0; i++) {            if (GC_current_ids[i] != last_ids[i]) {                if (GC_current_ids[i] != me) {                    lwp_fd = open_lwp(GC_current_ids[i]);		    if (lwp_fd == -1)		    {			    GC_current_ids[i] = me;			    continue;		    }		    /* LWP should be stopped.  Empirically it sometimes	*/		    /* isn't, and more frequently the PR_STOPPED flag	*/		    /* is not set.  Wait for PR_STOPPED.		*/                    if (syscall(SYS_ioctl, lwp_fd,                                PIOCSTATUS, &status) < 0) {			/* Possible if the descriptor was stale, or */			/* we encountered the 2.3 _lwp_suspend bug. */			uncache_lwp(GC_current_ids[i]);                        GC_current_ids[i] = me; /* handle next time. */                    } else {                        while (!(status.pr_flags & PR_STOPPED)) {                            GC_msec_sleep(1);			    if (syscall(SYS_ioctl, lwp_fd,				    	PIOCSTATUS, &status) < 0) {                            	ABORT("Repeated PIOCSTATUS failed");			    }			    if (status.pr_flags & PR_STOPPED) break;			    			    GC_msec_sleep(20);			    if (syscall(SYS_ioctl, lwp_fd,				    	PIOCSTATUS, &status) < 0) {                            	ABORT("Repeated PIOCSTATUS failed");			    }                        }                        if (status.pr_who !=  GC_current_ids[i]) {				/* can happen if thread was on death row */				uncache_lwp(GC_current_ids[i]);				GC_current_ids[i] = me; /* handle next time. */				continue;	                        }                        /* Save registers where collector can */			/* find them.			  */			    BCOPY(status.pr_reg, GC_lwp_registers[i],				  sizeof (prgregset_t));                    }                }            }        }        if (!changed) break;        for (i = 0; i < max_lwps; i++) last_ids[i] = GC_current_ids[i];    }}/* Restart all lwps in process.  Assumes preemption is off.	*/static void restart_all_lwps(){    int lwp_fd;    register int i;    GC_bool changed;    lwpid_t me = _lwp_self();#   define PARANOID    for (i = 0; GC_current_ids[i] != 0; i++) {#	ifdef PARANOID	  if (GC_current_ids[i] != me) {	    int lwp_fd = open_lwp(GC_current_ids[i]);	    prstatus_t status;	    	    if (lwp_fd < 0) ABORT("open_lwp failed");	    if (syscall(SYS_ioctl, lwp_fd,			PIOCSTATUS, &status) < 0) {                ABORT("PIOCSTATUS failed in restart_all_lwps");	    }	    if (memcmp(status.pr_reg, GC_lwp_registers[i],		       sizeof (prgregset_t)) != 0) {		    int j;		    for(j = 0; j < NPRGREG; j++)		    {			    GC_printf3("%i: %x -> %x\n", j,				       GC_lwp_registers[i][j],				       status.pr_reg[j]);		    }		ABORT("Register contents changed");	    }	    if (!status.pr_flags & PR_STOPPED) {	    	ABORT("lwp no longer stopped");	    }#ifdef SPARC	    {		    gwindows_t windows;	      if (syscall(SYS_ioctl, lwp_fd,			PIOCGWIN, &windows) < 0) {                ABORT("PIOCSTATUS failed in restart_all_lwps");	      }	      if (windows.wbcnt > 0) ABORT("unsaved register windows");	    }#endif	  }#	endif /* PARANOID */	if (GC_current_ids[i] == me) continue;        if (_lwp_continue(GC_current_ids[i]) < 0) {            ABORT("Failed to restart lwp");        }    }    if (i >= max_lwps) ABORT("Too many lwps");}GC_bool GC_multithreaded = 0;void GC_stop_world(){    preempt_off();    if (GC_multithreaded)        stop_all_lwps();}void GC_start_world(){    if (GC_multithreaded)        restart_all_lwps();    preempt_on();}void GC_thr_init(void);GC_bool GC_thr_initialized = FALSE;size_t GC_min_stack_sz;/* * stack_head is stored at the top of free stacks */struct stack_head {	struct stack_head	*next;	ptr_t			base;	thread_t		owner;};# define N_FREE_LISTS 25struct stack_head *GC_stack_free_lists[N_FREE_LISTS] = { 0 };		/* GC_stack_free_lists[i] is free list for stacks of 	*/		/* size GC_min_stack_sz*2**i.				*/		/* Free lists are linked through stack_head stored	*/			/* at top of stack.					*//* Return a stack of size at least *stack_size.  *stack_size is	*//* replaced by the actual stack size.				*//* Caller holds allocation lock.				*/ptr_t GC_stack_alloc(size_t * stack_size){    register size_t requested_sz = *stack_size;    register size_t search_sz = GC_min_stack_sz;    register int index = 0;	/* = log2(search_sz/GC_min_stack_sz) */    register ptr_t base;    register struct stack_head *result;        while (search_sz < requested_sz) {        search_sz *= 2;        index++;    }    if ((result = GC_stack_free_lists[index]) == 0        && (result = GC_stack_free_lists[index+1]) != 0) {        /* Try next size up. */        search_sz *= 2; index++;    }    if (result != 0) {        base =  GC_stack_free_lists[index]->base;        GC_stack_free_lists[index] = GC_stack_free_lists[index]->next;    } else {#ifdef MMAP_STACKS        base = (ptr_t)mmap(0, search_sz + GC_page_size,			     PROT_READ|PROT_WRITE, MAP_PRIVATE |MAP_NORESERVE,			     GC_zfd, 0);	if (base == (ptr_t)-1)	{		*stack_size = 0;		return NULL;	}	mprotect(base, GC_page_size, PROT_NONE);	/* Should this use divHBLKSZ(search_sz + GC_page_size) ? -- cf */	GC_is_fresh((struct hblk *)base, divHBLKSZ(search_sz));	base += GC_page_size;#else        base = (ptr_t) GC_scratch_alloc(search_sz + 2*GC_page_size);	if (base == NULL)	{		*stack_size = 0;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -