⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 solaris_threads.c

📁 linux下建立JAVA虚拟机的源码KAFFE
💻 C
📖 第 1 页 / 共 2 页
字号:
		return NULL;	}        base = (ptr_t)(((word)base + GC_page_size) & ~(GC_page_size - 1));        /* Protect hottest page to detect overflow. */#	ifdef SOLARIS23_MPROTECT_BUG_FIXED            mprotect(base, GC_page_size, PROT_NONE);#	endif        GC_is_fresh((struct hblk *)base, divHBLKSZ(search_sz));        base += GC_page_size;#endif    }    *stack_size = search_sz;    return(base);}/* Caller holds  allocationlock.					*/void GC_stack_free(ptr_t stack, size_t size){    register int index = 0;    register size_t search_sz = GC_min_stack_sz;    register struct stack_head *head;    #ifdef MMAP_STACKS    /* Zero pointers */    mmap(stack, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_NORESERVE|MAP_FIXED,	 GC_zfd, 0);#endif    while (search_sz < size) {        search_sz *= 2;        index++;    }    if (search_sz != size) ABORT("Bad stack size");    head = (struct stack_head *)(stack + search_sz - sizeof(struct stack_head));    head->next = GC_stack_free_lists[index];    head->base = stack;    GC_stack_free_lists[index] = head;}void GC_my_stack_limits();/* Notify virtual dirty bit implementation that known empty parts of	*//* stacks do not contain useful data.					*/ /* Caller holds allocation lock.					*/void GC_old_stacks_are_fresh(){/* No point in doing this for MMAP stacks - and pointers are zero'd out *//* by the mmap in GC_stack_free */#ifndef MMAP_STACKS    register int i;    register struct stack_head *s;    register ptr_t p;    register size_t sz;    register struct hblk * h;    int dummy;        for (i = 0, sz= GC_min_stack_sz; i < N_FREE_LISTS;         i++, sz *= 2) {         for (s = GC_stack_free_lists[i]; s != 0; s = s->next) {             p = s->base;             h = (struct hblk *)(((word)p + HBLKSIZE-1) & ~(HBLKSIZE-1));             if ((ptr_t)h == p) {                 GC_is_fresh((struct hblk *)p, divHBLKSZ(sz));             } else {                 GC_is_fresh((struct hblk *)p, divHBLKSZ(sz) - 1);                 BZERO(p, (ptr_t)h - p);             }         }    }#endif /* MMAP_STACKS */    GC_my_stack_limits();}/* The set of all known threads.  We intercept thread creation and 	*//* joins.  We never actually create detached threads.  We allocate all 	*//* new thread stacks ourselves.  These allow us to maintain this	*//* data structure.							*/# define THREAD_TABLE_SZ 128	/* Must be power of 2	*/volatile GC_thread GC_threads[THREAD_TABLE_SZ];void GC_push_thread_structures GC_PROTO((void)){    GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));}/* Add a thread to GC_threads.  We assume it wasn't already there.	*//* Caller holds allocation lock.					*/GC_thread GC_new_thread(thread_t id){    int hv = ((word)id) % THREAD_TABLE_SZ;    GC_thread result;    static struct GC_Thread_Rep first_thread;    static GC_bool first_thread_used = FALSE;        if (!first_thread_used) {    	result = &first_thread;    	first_thread_used = TRUE;    	/* Dont acquire allocation lock, since we may already hold it. */    } else {        result = (struct GC_Thread_Rep *)        	 GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);    }    if (result == 0) return(0);    result -> id = id;    result -> next = GC_threads[hv];    GC_threads[hv] = result;    /* result -> finished = 0; */    (void) cond_init(&(result->join_cv), USYNC_THREAD, 0);    return(result);}/* Delete a thread from GC_threads.  We assume it is there.	*//* (The code intentionally traps if it wasn't.)			*//* Caller holds allocation lock.				*/void GC_delete_thread(thread_t id){    int hv = ((word)id) % THREAD_TABLE_SZ;    register GC_thread p = GC_threads[hv];    register GC_thread prev = 0;        while (p -> id != id) {        prev = p;        p = p -> next;    }    if (prev == 0) {        GC_threads[hv] = p -> next;    } else {        prev -> next = p -> next;    }}/* Return the GC_thread correpsonding to a given thread_t.	*//* Returns 0 if it's not there.					*//* Caller holds  allocation lock.				*/GC_thread GC_lookup_thread(thread_t id){    int hv = ((word)id) % THREAD_TABLE_SZ;    register GC_thread p = GC_threads[hv];        while (p != 0 && p -> id != id) p = p -> next;    return(p);}/* Solaris 2/Intel uses an initial stack size limit slightly bigger than the   SPARC default of 8 MB.  Account for this to warn only if the user has   raised the limit beyond the default.   This is identical to DFLSSIZ defined in <sys/vm_machparam.h>.  This file   is installed in /usr/platform/`uname -m`/include, which is not in the   default include directory list, so copy the definition here.  */#ifdef I386# define MAX_ORIG_STACK_SIZE (8 * 1024 * 1024 + ((USRSTACK) & 0x3FFFFF))#else# define MAX_ORIG_STACK_SIZE (8 * 1024 * 1024)#endifword GC_get_orig_stack_size() {    struct rlimit rl;    static int warned = 0;    int result;    if (getrlimit(RLIMIT_STACK, &rl) != 0) ABORT("getrlimit failed");    result = (word)rl.rlim_cur & ~(HBLKSIZE-1);    if (result > MAX_ORIG_STACK_SIZE) {	if (!warned) {	    WARN("Large stack limit(%ld): only scanning 8 MB\n", result);	    warned = 1;	}	result = MAX_ORIG_STACK_SIZE;    }    return result;}/* Notify dirty bit implementation of unused parts of my stack. *//* Caller holds allocation lock.				*/void GC_my_stack_limits(){    int dummy;    register ptr_t hottest = (ptr_t)((word)(&dummy) & ~(HBLKSIZE-1));    register GC_thread me = GC_lookup_thread(thr_self());    register size_t stack_size = me -> stack_size;    register ptr_t stack;        if (stack_size == 0) {      /* original thread */        /* Empirically, what should be the stack page with lowest	*/        /* address is actually inaccessible.				*/        stack_size = GC_get_orig_stack_size() - GC_page_size;        stack = GC_stackbottom - stack_size + GC_page_size;    } else {        stack = me -> stack;    }    if (stack > hottest || stack + stack_size < hottest) {    	ABORT("sp out of bounds");    }    GC_is_fresh((struct hblk *)stack, divHBLKSZ(hottest - stack));}/* We hold allocation lock.  Should do exactly the right thing if the	*//* world is stopped.  Should not fail if it isn't.			*/void GC_push_all_stacks(){    register int i;    register GC_thread p;    register ptr_t sp = GC_approx_sp();    register ptr_t bottom, top;    struct rlimit rl;    #   define PUSH(bottom,top) \      if (GC_dirty_maintained) { \	GC_push_selected((bottom), (top), GC_page_was_ever_dirty, \		      GC_push_all_stack); \      } else { \        GC_push_all_stack((bottom), (top)); \      }    GC_push_all_stack((ptr_t)GC_lwp_registers,		      (ptr_t)GC_lwp_registers		      + max_lwps * sizeof(GC_lwp_registers[0]));    for (i = 0; i < THREAD_TABLE_SZ; i++) {      for (p = GC_threads[i]; p != 0; p = p -> next) {        if (p -> stack_size != 0) {            bottom = p -> stack;            top = p -> stack + p -> stack_size;        } else {            /* The original stack. */            bottom = GC_stackbottom - GC_get_orig_stack_size() + GC_page_size;            top = GC_stackbottom;        }        if ((word)sp > (word)bottom && (word)sp < (word)top) bottom = sp;        PUSH(bottom, top);      }    }}int GC_is_thread_stack(ptr_t addr){    register int i;    register GC_thread p;    register ptr_t bottom, top;        for (i = 0; i < THREAD_TABLE_SZ; i++) {      for (p = GC_threads[i]; p != 0; p = p -> next) {        if (p -> stack_size != 0) {            if (p -> stack <= addr &&		addr < p -> stack + p -> stack_size)		    return 1;	}      }    }    return 0;}/* The only thread that ever really performs a thr_join.	*/void * GC_thr_daemon(void * dummy){    void *status;    thread_t departed;    register GC_thread t;    register int i;    register int result;        for(;;) {      start:        result = thr_join((thread_t)0, &departed, &status);    	LOCK();    	if (result != 0) {    	    /* No more threads; wait for create. */    	    for (i = 0; i < THREAD_TABLE_SZ; i++) {    	        for (t = GC_threads[i]; t != 0; t = t -> next) {                    if (!(t -> flags & (DETACHED | FINISHED))) {                      UNLOCK();                      goto start; /* Thread started just before we */                      		  /* acquired the lock.		   */                    }                }            }            cond_wait(&GC_create_cv, &GC_allocate_ml);            UNLOCK();    	} else {    	    t = GC_lookup_thread(departed);	    GC_multithreaded--;    	    if (!(t -> flags & CLIENT_OWNS_STACK)) {    	    	GC_stack_free(t -> stack, t -> stack_size);    	    }    	    if (t -> flags & DETACHED) {    	    	GC_delete_thread(departed);    	    } else {    	        t -> status = status;    	    	t -> flags |= FINISHED;    	    	cond_signal(&(t -> join_cv));    	    	cond_broadcast(&GC_prom_join_cv);    	    }    	    UNLOCK();    	}    }}/* We hold the allocation lock, or caller ensures that 2 instances	*//* cannot be invoked concurrently.					*/void GC_thr_init(void){    GC_thread t;    thread_t tid;    int ret;    if (GC_thr_initialized)	    return;    GC_thr_initialized = TRUE;    GC_min_stack_sz = ((thr_min_stack() + 32*1024 + HBLKSIZE-1)    		       & ~(HBLKSIZE - 1));#ifdef MMAP_STACKS    GC_zfd = open("/dev/zero", O_RDONLY);    if (GC_zfd == -1)	    ABORT("Can't open /dev/zero");#endif /* MMAP_STACKS */    cond_init(&GC_prom_join_cv, USYNC_THREAD, 0);    cond_init(&GC_create_cv, USYNC_THREAD, 0);    /* Add the initial thread, so we can stop it.	*/      t = GC_new_thread(thr_self());      t -> stack_size = 0;      t -> flags = DETACHED | CLIENT_OWNS_STACK;    ret = thr_create(0 /* stack */, 0 /* stack_size */, GC_thr_daemon,    		     0 /* arg */, THR_DETACHED | THR_DAEMON,    		     &tid /* thread_id */);    if (ret != 0) {	GC_err_printf1("Thr_create returned %ld\n", ret);    	ABORT("Cant fork daemon");    }    thr_setprio(tid, 126);}/* We acquire the allocation lock to prevent races with 	*//* stopping/starting world.					*//* This is no more correct than the underlying Solaris 2.X	*//* implementation.  Under 2.3 THIS IS BROKEN.			*/int GC_thr_suspend(thread_t target_thread){    GC_thread t;    int result;        LOCK();    result = thr_suspend(target_thread);    if (result == 0) {    	t = GC_lookup_thread(target_thread);    	if (t == 0) ABORT("thread unknown to GC");        t -> flags |= SUSPNDED;    }    UNLOCK();    return(result);}int GC_thr_continue(thread_t target_thread){    GC_thread t;    int result;        LOCK();    result = thr_continue(target_thread);    if (result == 0) {    	t = GC_lookup_thread(target_thread);    	if (t == 0) ABORT("thread unknown to GC");        t -> flags &= ~SUSPNDED;    }    UNLOCK();    return(result);}int GC_thr_join(thread_t wait_for, thread_t *departed, void **status){    register GC_thread t;    int result = 0;        LOCK();    if (wait_for == 0) {        register int i;        register GC_bool thread_exists;        	for (;;) {    	  thread_exists = FALSE;    	  for (i = 0; i < THREAD_TABLE_SZ; i++) {    	    for (t = GC_threads[i]; t != 0; t = t -> next) {              if (!(t -> flags & DETACHED)) {                if (t -> flags & FINISHED) {                  goto found;                }                thread_exists = TRUE;              }            }          }          if (!thread_exists) {              result = ESRCH;    	      goto out;          }          cond_wait(&GC_prom_join_cv, &GC_allocate_ml);        }    } else {        t = GC_lookup_thread(wait_for);    	if (t == 0 || t -> flags & DETACHED) {    	    result = ESRCH;    	    goto out;    	}    	if (wait_for == thr_self()) {    	    result = EDEADLK;    	    goto out;    	}    	while (!(t -> flags & FINISHED)) {            cond_wait(&(t -> join_cv), &GC_allocate_ml);    	}    	    }  found:    if (status) *status = t -> status;    if (departed) *departed = t -> id;    cond_destroy(&(t -> join_cv));    GC_delete_thread(t -> id);  out:    UNLOCK();    return(result);}intGC_thr_create(void *stack_base, size_t stack_size,              void *(*start_routine)(void *), void *arg, long flags,              thread_t *new_thread){    int result;    GC_thread t;    thread_t my_new_thread;    word my_flags = 0;    void * stack = stack_base;       LOCK();    if (!GC_is_initialized) GC_init_inner();    GC_multithreaded++;    if (stack == 0) {     	if (stack_size == 0) stack_size = 1024*1024;     	stack = (void *)GC_stack_alloc(&stack_size);     	if (stack == 0) {	    GC_multithreaded--;     	    UNLOCK();     	    return(ENOMEM);     	}    } else {    	my_flags |= CLIENT_OWNS_STACK;    }    if (flags & THR_DETACHED) my_flags |= DETACHED;    if (flags & THR_SUSPENDED) my_flags |= SUSPNDED;    result = thr_create(stack, stack_size, start_routine,   		        arg, flags & ~THR_DETACHED, &my_new_thread);    if (result == 0) {        t = GC_new_thread(my_new_thread);        t -> flags = my_flags;        if (!(my_flags & DETACHED)) cond_init(&(t -> join_cv), USYNC_THREAD, 0);        t -> stack = stack;        t -> stack_size = stack_size;        if (new_thread != 0) *new_thread = my_new_thread;        cond_signal(&GC_create_cv);    } else {	GC_multithreaded--;        if (!(my_flags & CLIENT_OWNS_STACK)) {      	    GC_stack_free(stack, stack_size);	}    }            UNLOCK();      return(result);}# else /* !GC_SOLARIS_THREADS */#ifndef LINT  int GC_no_sunOS_threads;#endif#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -