⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 proc.c

📁 在x86平台上运行不可信任代码的sandbox。
💻 C
📖 第 1 页 / 共 2 页
字号:
		if(up->procctl == Proc_exitme && up->closingfgrp)			forceclosefgrp();		error(Eintr);	}	splx(s);}static inttfn(void *arg){	return up->trend == nil || up->tfn(arg);}voidtwakeup(Ureg *ureg, Timer *t){	Proc *p;	Rendez *trend;	p = t->ta;	trend = p->trend;	p->trend = 0;	if(trend)		wakeup(trend);}voidtsleep(Rendez *r, int (*fn)(void*), void *arg, ulong ms){	if (up->timer.tt){		print("tsleep: timer active: mode %d, tf 0x%lux\n", up->timer.tmode, up->timer.tf);		timerdel(&up->timer);	}	up->timer.tns = MS2NS(ms);	up->timer.tf = twakeup;	up->timer.tmode = Trelative;	up->timer.ta = up;	up->trend = r;	up->tfn = fn;	timeradd(&up->timer);	if(waserror()){		timerdel(&up->timer);		nexterror();	}	sleep(r, tfn, arg);	if (up->timer.tt)		timerdel(&up->timer);	up->timer.twhen = 0;	poperror();}/* *  Expects that only one process can call wakeup for any given Rendez. *  We hold both locks to ensure that r->p and p->r remain consistent. *  Richard Miller has a better solution that doesn't require both to *  be held simultaneously, but I'm a paranoid - presotto. */Proc*wakeup(Rendez *r){	Proc *p;	int s;	s = splhi();	lock(&r->lk);	p = r->p;	if(p != nil){		lock(&p->rlock);		if(p->state != Wakeme || p->r != r){			iprint("%p %p %d\n", p->r, r, p->state);			panic("wakeup: state");		}		r->p = nil;		p->r = nil;		ready(p);		unlock(&p->rlock);	}	unlock(&r->lk);	splx(s);	return p;}/* *  if waking a sleeping process, this routine must hold both *  p->rlock and r->lock.  However, it can't know them in *  the same order as wakeup causing a possible lock ordering *  deadlock.  We break the deadlock by giving up the p->rlock *  lock if we can't get the r->lock and retrying. */intpostnote(Proc *p, int dolock, char *n, int flag){	int s, ret;	Rendez *r;	Proc *d, **l;	if(dolock)		qlock(&p->debug);	if(flag != NUser && (p->notify == 0 || p->notified))		p->nnote = 0;	ret = 0;	if(p->nnote < NNOTE) {		strcpy(p->note[p->nnote].msg, n);		p->note[p->nnote++].flag = flag;		ret = 1;	}	p->notepending = 1;	if(dolock)		qunlock(&p->debug);	/* this loop is to avoid lock ordering problems. */	for(;;){		s = splhi();		lock(&p->rlock);		r = p->r;		/* waiting for a wakeup? */		if(r == nil)			break;	/* no */		/* try for the second lock */		if(canlock(&r->lk)){			if(p->state != Wakeme || r->p != p)				panic("postnote: state %d %d %d", r->p != p, p->r != r, p->state);			p->r = nil;			r->p = nil;			ready(p);			unlock(&r->lk);			break;		}		/* give other process time to get out of critical section and try again */		unlock(&p->rlock);		splx(s);		sched();	}	unlock(&p->rlock);	splx(s);	if(p->state != Rendezvous)		return ret;	/* Try and pull out of a rendezvous */	lock(&p->rgrp->ref.lk);	if(p->state == Rendezvous) {		p->rendval = ~0;		l = &REND(p->rgrp, p->rendtag);		for(d = *l; d; d = d->rendhash) {			if(d == p) {				*l = p->rendhash;				break;			}			l = &d->rendhash;		}		ready(p);	}	unlock(&p->rgrp->ref.lk);	return ret;}/* * weird thing: keep at most NBROKEN around */#define	NBROKEN 4struct{	QLock lk;	int	n;	Proc	*p[NBROKEN];}broken;voidaddbroken(Proc *p){	qlock(&broken.lk);	if(broken.n == NBROKEN) {		ready(broken.p[0]);		memmove(&broken.p[0], &broken.p[1], sizeof(Proc*)*(NBROKEN-1));		--broken.n;	}	broken.p[broken.n++] = p;	qunlock(&broken.lk);	p->state = Broken;	p->psstate = 0;	sched();}voidunbreak(Proc *p){	int b;	qlock(&broken.lk);	for(b=0; b < broken.n; b++)		if(broken.p[b] == p) {			broken.n--;			memmove(&broken.p[b], &broken.p[b+1],					sizeof(Proc*)*(NBROKEN-(b+1)));			ready(p);			break;		}	qunlock(&broken.lk);}intfreebroken(void){	int i, n;	qlock(&broken.lk);	n = broken.n;	for(i=0; i<n; i++) {		ready(broken.p[i]);		broken.p[i] = 0;	}	broken.n = 0;	qunlock(&broken.lk);	return n;}voidpexit(char *exitstr, int freemem){	Proc *p;	Segment **s, **es;	long utime, stime;	Waitq *wq, *f, *next;	Fgrp *fgrp;	Egrp *egrp;	Rgrp *rgrp;	Pgrp *pgrp;	Chan *dot;	void (*pt)(Proc*, int, vlong);	up->alarm = 0;	if (up->timer.tt)		timerdel(&up->timer);	pt = proctrace;	if(pt)		pt(up, SDead, 0);	/* nil out all the resources under lock (free later) */	qlock(&up->debug);	fgrp = up->fgrp;	up->fgrp = nil;	egrp = up->egrp;	up->egrp = nil;	rgrp = up->rgrp;	up->rgrp = nil;	pgrp = up->pgrp;	up->pgrp = nil;	dot = up->dot;	up->dot = nil;	qunlock(&up->debug);	if(fgrp)		closefgrp(fgrp);	if(egrp)		closeegrp(egrp);	if(rgrp)		closergrp(rgrp);	if(dot)		cclose(dot);	if(pgrp)		closepgrp(pgrp);	/*	 * if not a kernel process and have a parent,	 * do some housekeeping.	 */	if(up->kp == 0) {		p = up->parent;		if(p == 0) {			if(exitstr == 0)				exitstr = "unknown";			panic("boot process died: %s", exitstr);		}		while(waserror())			;		wq = smalloc(sizeof(Waitq));		poperror();		wq->w.pid = up->pid;		utime = up->time[TUser] + up->time[TCUser];		stime = up->time[TSys] + up->time[TCSys];		wq->w.time[TUser] = tk2ms(utime);		wq->w.time[TSys] = tk2ms(stime);		wq->w.time[TReal] = tk2ms(msec() - up->time[TReal]);		if(exitstr && exitstr[0])			snprint(wq->w.msg, sizeof(wq->w.msg), "%s %lud: %s", up->text, up->pid, exitstr);		else			wq->w.msg[0] = '\0';		lock(&p->exl);		/*		 * Check that parent is still alive.		 */		if(p->pid == up->parentpid && p->state != Broken) {			p->nchild--;			p->time[TCUser] += utime;			p->time[TCSys] += stime;			/*			 * If there would be more than 128 wait records			 * processes for my parent, then don't leave a wait			 * record behind.  This helps prevent badly written			 * daemon processes from accumulating lots of wait			 * records.		 	 */			if(p->nwait < 128) {				wq->next = p->waitq;				p->waitq = wq;				p->nwait++;				wq = nil;				wakeup(&p->waitr);			}		}		unlock(&p->exl);		if(wq)			free(wq);	}	if(!freemem)		addbroken(up);	qlock(&up->seglock);	es = &up->seg[NSEG];	for(s = up->seg; s < es; s++) {		if(*s) {			putseg(*s);			*s = 0;		}	}	qunlock(&up->seglock);	lock(&up->exl);		/* Prevent my children from leaving waits */	pidunhash(up);	up->pid = 0;	wakeup(&up->waitr);	unlock(&up->exl);	for(f = up->waitq; f; f = next) {		next = f->next;		free(f);	}	/* release debuggers */	qlock(&up->debug);	if(up->pdbg) {		wakeup(&up->pdbg->sleep);		up->pdbg = 0;	}	qunlock(&up->debug);	/* Sched must not loop for these locks */	lock(&procalloc.lk);	lock(&palloc.lk);	up->state = Moribund;	sched();	panic("pexit");}inthaswaitq(void *x){	Proc *p;	p = (Proc *)x;	return p->waitq != 0;}ulongpwait(Waitmsg *w){	ulong cpid;	Waitq *wq;	if(!canqlock(&up->qwaitr))		error(Einuse);	if(waserror()) {		qunlock(&up->qwaitr);		nexterror();	}	lock(&up->exl);	if(up->nchild == 0 && up->waitq == 0) {		unlock(&up->exl);		error(Enochild);	}	unlock(&up->exl);	sleep(&up->waitr, haswaitq, up);	lock(&up->exl);	wq = up->waitq;	up->waitq = wq->next;	up->nwait--;	unlock(&up->exl);	qunlock(&up->qwaitr);	poperror();	if(w)		memmove(w, &wq->w, sizeof(Waitmsg));	cpid = wq->w.pid;	free(wq);	return cpid;}Proc*proctab(int i){	return &procalloc.arena[i];}voiddumpaproc(Proc *p){	ulong bss;	char *s;	if(p == 0)		return;	bss = 0;	if(p->seg[BSEG])		bss = p->seg[BSEG]->top;	s = p->psstate;	if(s == 0)		s = statename[p->state];	print("%3lud:%10s pc %8lux dbgpc %8lux  %8s (%s) ut %ld st %ld bss %lux qpc %lux nl %lud nd %lud lpc %lux pri %lud\n",		p->pid, p->text, p->pc, dbgpc(p),  s, statename[p->state],		p->time[0], p->time[1], bss, p->qpc, p->nlocks.ref, p->delaysched, p->lastlock ? p->lastlock->pc : 0, p->priority);}voidprocdump(void){	int i;	Proc *p;	if(up)		print("up %lud\n", up->pid);	else		print("no current process\n");	for(i=0; i<conf.nproc; i++) {		p = &procalloc.arena[i];		if(p->state == Dead)			continue;		dumpaproc(p);	}}/* *  wait till all processes have flushed their mmu *  state about segement s */voidprocflushseg(Segment *s){	int i, ns, nm, nwait;	Proc *p;	/*	 *  tell all processes with this	 *  segment to flush their mmu's	 */	nwait = 0;	for(i=0; i<conf.nproc; i++) {		p = &procalloc.arena[i];		if(p->state == Dead)			continue;		for(ns = 0; ns < NSEG; ns++)			if(p->seg[ns] == s){				p->newtlb = 1;				for(nm = 0; nm < conf.nmach; nm++){					if(MACHP(nm)->proc == p){						MACHP(nm)->flushmmu = 1;						nwait++;					}				}				break;			}	}	if(nwait == 0)		return;	/*	 *  wait for all processors to take a clock interrupt	 *  and flush their mmu's	 */	for(nm = 0; nm < conf.nmach; nm++)		if(MACHP(nm) != m)			while(MACHP(nm)->flushmmu)				sched();}voidscheddump(void){	Proc *p;	Schedq *rq;	for(rq = &runq[Nrq-1]; rq >= runq; rq--){		if(rq->head == 0)			continue;		print("rq%ld:", rq-runq);		for(p = rq->head; p; p = p->rnext)			print(" %lud(%lud)", p->pid, msec() - p->readytime);		print("\n");		delay(150);	}	print("nrdy %d\n", nrdy);}voidkproc(char *name, void (*func)(void *), void *arg){	Proc *p;	static Pgrp *kpgrp;	p = newproc();	p->psstate = 0;	p->procmode = 0640;	p->kp = 1;	p->noswap = 1;	if(up){		p->fpsave = up->fpsave;		p->scallnr = up->scallnr;		p->s = up->s;		p->slash = up->slash;		p->dot = up->dot;		if(p->dot)			incref(&p->dot->ref);		memmove(p->note, up->note, sizeof(p->note));		p->nnote = up->nnote;		p->lastnote = up->lastnote;		p->notify = up->notify;	} 	p->notified = 0; 	p->ureg = 0; 	p->dbgreg = 0;	p->nerrlab = 0;	procpriority(p, PriKproc, 0);	kprocchild(p, func, arg);	kstrdup(&p->user, eve);	kstrdup(&p->text, name);	if(kpgrp == 0)		kpgrp = newpgrp();	p->pgrp = kpgrp;	incref(&kpgrp->ref);	memset(p->time, 0, sizeof(p->time));	p->time[TReal] = msec();	ready(p);}/* *  called splhi() by notify().  See comment in notify for the *  reasoning. */voidprocctl(Proc *p){	char *state;	ulong s;	switch(p->procctl) {	case Proc_exitbig:		spllo();		pexit("Killed: Insufficient physical memory", 1);	case Proc_exitme:		spllo();		/* pexit has locks in it */		pexit("Killed", 1);	case Proc_traceme:		if(p->nnote == 0)			return;		/* No break */	case Proc_stopme:		p->procctl = 0;		state = p->psstate;		p->psstate = "Stopped";		/* free a waiting debugger */		s = spllo();		qlock(&p->debug);		if(p->pdbg) {			wakeup(&p->pdbg->sleep);			p->pdbg = 0;		}		qunlock(&p->debug);		splhi();		p->state = Stopped;		sched();		p->psstate = state;		splx(s);		return;	}}#include "errstr.h"voiderror(char *err){	spllo();	assert(up->nerrlab < NERR);	kstrcpy(up->errstr, err, ERRMAX);	setlabel(&up->errlab[NERR-1]);	nexterror();}voidnexterror(void){	gotolabel(&up->errlab[--up->nerrlab]);}voidexhausted(char *resource){	char buf[ERRMAX];	sprint(buf, "no free %s", resource);	iprint("%s\n", buf);	error(buf);}voidkillbig(char *why){	int i;	Segment *s;	ulong l, max;	Proc *p, *ep, *kp;	max = 0;	kp = 0;	ep = procalloc.arena+conf.nproc;	for(p = procalloc.arena; p < ep; p++) {		if(p->state == Dead || p->kp)			continue;		l = 0;		for(i=1; i<NSEG; i++) {			s = p->seg[i];			if(s != 0)				l += s->top - s->base;		}		if(l > max && ((p->procmode&0222) || strcmp(eve, p->user)!=0)) {			kp = p;			max = l;		}	}	print("%lud: %s killed: %s\n", kp->pid, kp->text, why);	for(p = procalloc.arena; p < ep; p++) {		if(p->state == Dead || p->kp)			continue;		if(p != kp && p->seg[BSEG] && p->seg[BSEG] == kp->seg[BSEG])			p->procctl = Proc_exitbig;	}	kp->procctl = Proc_exitbig;	for(i = 0; i < NSEG; i++) {		s = kp->seg[i];		if(s != 0 && canqlock(&s->lk)) {			mfreeseg(s, s->base, (s->top - s->base)/BY2PG);			qunlock(&s->lk);		}	}}/* *  change ownership to 'new' of all processes owned by 'old'.  Used when *  eve changes. */voidrenameuser(char *old, char *new){	Proc *p, *ep;	ep = procalloc.arena+conf.nproc;	for(p = procalloc.arena; p < ep; p++)		if(p->user!=nil && strcmp(old, p->user)==0)			kstrdup(&p->user, new);}/* *  time accounting called by clock() splhi'd */voidaccounttime(void){	Proc *p;	ulong n, per;	static ulong nrun;	p = m->proc;	if(p) {		nrun++;		p->time[p->insyscall]++;	}	/* calculate decaying duty cycles */	n = perfticks();	per = n - m->perf.last;	m->perf.last = n;	per = (m->perf.period*(HZ-1) + per)/HZ;	if(per != 0)		m->perf.period = per;	m->perf.avg_inidle = (m->perf.avg_inidle*(HZ-1)+m->perf.inidle)/HZ;	m->perf.inidle = 0;	m->perf.avg_inintr = (m->perf.avg_inintr*(HZ-1)+m->perf.inintr)/HZ;	m->perf.inintr = 0;	/* only one processor gets to compute system load averages */	if(m->machno != 0)		return;	/*	 * calculate decaying load average.	 * if we decay by (n-1)/n then it takes	 * n clock ticks to go from load L to .36 L once	 * things quiet down.  it takes about 5 n clock	 * ticks to go to zero.  so using HZ means this is	 * approximately the load over the last second,	 * with a tail lasting about 5 seconds.	 */	n = nrun;	nrun = 0;	n = (nrdy+n)*1000;	m->load = (m->load*(HZ-1)+n)/HZ;}static voidpidhash(Proc *p){	int h;	h = p->pid % nelem(procalloc.ht);	lock(&procalloc.lk);	p->pidhash = procalloc.ht[h];	procalloc.ht[h] = p;	unlock(&procalloc.lk);}static voidpidunhash(Proc *p){	int h;	Proc **l;	h = p->pid % nelem(procalloc.ht);	lock(&procalloc.lk);	for(l = &procalloc.ht[h]; *l != nil; l = &(*l)->pidhash)		if(*l == p){			*l = p->pidhash;			break;		}	unlock(&procalloc.lk);}intprocindex(ulong pid){	Proc *p;	int h;	int s;	s = -1;	h = pid % nelem(procalloc.ht);	lock(&procalloc.lk);	for(p = procalloc.ht[h]; p != nil; p = p->pidhash)		if(p->pid == pid){			s = p - procalloc.arena;			break;		}	unlock(&procalloc.lk);	return s;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -