⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pt_machdep.c

📁 <B>Digital的Unix操作系统VAX 4.2源码</B>
💻 C
📖 第 1 页 / 共 2 页
字号:
		ptsize = xp->x_size * sizeof (struct pte);		while(ptsize > nfrag){			if(*dp == 0)				panic("vinitpt: text pt swap addr 0");			swap(p, *dp++, (caddr_t)tptopte(p, poff),			nfrag, B_READ, B_PAGET, swapdev,0);			ptsize -= nfrag;			poff += nfrag/sizeof(struct pte);		}		if(*dp == 0)			panic("vinitpt: text pt swap addr 0");		swap(p, *dp, (caddr_t)tptopte(p, poff),		    ptsize, B_READ, B_PAGET, swapdev, 0);	}done:	/*	 * In the case where we are overlaying ourself with new page	 * table entries, old user-space translations should be flushed.	 */        /*         * I don't think this really has to be done. mad         */        newptes(p, tptov(p, 0), xp->x_size);	if (unlock_flag) {		(void) splimp();		smp_lock(&lk_text, LK_RETRY);	}	return(1);}/* * Release data and stack page tables of process p. */vrelpt(p)	register struct proc *p;{	register int a;	XPRINTF(XPR_VM,"enter vrelpt",0,0,0,0);	if (p->p_datapt) {		a = btokmx(p->p_databr);		(void) vmemfree(&Usrptmap[a], p->p_datapt);		rmfree(kernelmap, (long)p->p_datapt, (long)a);	}	if (p->p_stakpt) {		a = btokmx(p->p_stakbr);		(void) vmemfree(&Usrptmap[a], p->p_stakpt);		rmfree(kernelmap, (long)p->p_stakpt, (long)a);	}	p->p_datapt = 0;	p->p_stakpt = 0;	p->p_databr = (struct pte *)0;	p->p_stakbr = (struct pte *)0;}#define	Xu(a)  {register int t; \		t = up->u_pcb.a; up->u_pcb.a = uq->u_pcb.a; uq->u_pcb.a = t;}#define	Xup(a) {register struct pte *tp; \	        tp = up->u_pcb.a; up->u_pcb.a = uq->u_pcb.a; uq->u_pcb.a = tp;}#define	Xp(a)  {register int t; t = p->a; p->a = q->a; q->a = t;}#define	Xpp(a) {register struct pte *tp; tp = p->a; p->a = q->a; q->a = tp;}/* * Pass the page tables of process p to process q. * Used during vfork().  P and q are not symmetric; * p is the giver and q the receiver; after calling vpasspt * p will be ``cleaned out''.  Thus before vfork() we call vpasspt * with the child as q and give it our resources; after vfork() we * call vpasspt with the child as p to steal our resources back. * We are cognizant of whether we are p or q because we have to * be careful to keep our u. area and restore the other u. area from * umap after we temporarily put our u. area in both p and q's page tables. */vpasspt(p, q, up, uq, umap)	register struct proc *p, *q;	register struct user *up, *uq;	struct pte *umap;{	int s;	XPRINTF(XPR_VM,"enter vpasspt",0,0,0,0);/* * This mips version is symetric since the u-area is not mapped in the * page tables that are being swapped. Note that the umap argument is * not used for the mips version. * TODO: should the processes exchange tlbpids? They are exchanging mappings * except for the u-block which probably has the G bit set. */	s = splhigh();	/* conservative, and slightly paranoid */;	smp_lock(&lk_rq, LK_RETRY);        /* p_addr is not going anywhere because it does not point into         * the the text or data or stack page maps. We do not swap tlbpids,         * rather we let parent and child fault in separate mappings.         */        Xpp(p_textbr); Xpp(p_databr);         Xp(p_textpt); Xp(p_datapt); 	q->p_stakbr = p->p_stakbr;	q->p_stakpt = p->p_stakpt;	if (up != &u) {	/* from child to parent */		p->p_stakbr=0;		p->p_stakpt=0;	}	smp_unlock(&lk_rq);	splx(s);}/* * Get u area for process p.  If a old u area is given, * then copy the new area from the old, else * swap in as specified in the proc structure. * * Since argument map/newu is potentially shared * when an old u. is provided we have to be careful not * to block after beginning to use them in this case. * (This is not true when called from swapin() with no old u.) */vgetu(p, palloc, map, newu, oldu)	register struct proc *p;	int (*palloc)();	register struct pte *map;	register struct user *newu;	struct user *oldu;{	register int i;	int ret, memall();	XPRINTF(XPR_VM,"enter vgetu",0,0,0,0);	if((*palloc)(p->p_addr, clrnd(UPAGES), p, CSYS, NULL,V_NOOP) == 0)		return (0);	/*	 * New u. pages are to be accessible in map/newu as well	 * as in process p's virtual memory.	 */	for (i = 0; i < UPAGES; i++) {                *(int *)(p->p_addr + i) |= PG_KW | PG_V | PG_G | PG_M;		*(int *)(map+i) = *(int *)(p->p_addr+i)			& PG_PFNUM | PG_V | PG_M | PG_KW;	}	setredzone(p->p_addr, (caddr_t)0);   /*     vmaccess(map, (caddr_t)newu, UPAGES, DO_CACHE); */	newptes(u.u_procp, btop(newu), UPAGES);	/*	 * New u.'s come from forking or inswap.	 */	if (oldu) {		/* 		 * Avoid copying the entire ublock by just doing what is		 * known to be active. TODO: this hack knows that if an old		 * u block is provided, it is indeed the active u and 		 * therefore measuring the current stack depth is the right		 * thing to do. Also, if stack depth measurement is to be		 * done, pattern initialization should take place here.		 */		if (oldu != &u)			panic("vgetu bad upage");		bcopy((caddr_t)oldu, (caddr_t)newu, sizeof(struct user));		i = stackdepth();		bcopy(	(caddr_t)((int)oldu + UPAGES*NBPG -i), 			(caddr_t)((int)newu + UPAGES*NBPG -i),			i);		newu->u_procp = p;	} else {		swap(p, *(p->p_smap->dm_ptdaddr), (caddr_t)0, ctob(UPAGES),			B_READ, B_UAREA, swapdev, 0);		if (		    newu->u_tsize != p->p_tsize || newu->u_dsize != p->p_dsize ||		    newu->u_ssize != p->p_ssize || newu->u_procp != p)			panic("vgetu");	}	/*	 * Initialize the pcb copies of the p0 and p1 region bases and	 * software page table size from the information in the proc structure.	 */	return (1);}/* * Release u. area, swapping it out if desired. * * To fix the "stale u-area" problem, this routine will indicate to  * [v]memall that the u-area must be put in a temp "u" list until * after the context switch.  This will only happen if the u-area in * question is currently in context. */vrelu(p, swapu)	register struct proc *p;{	register int i;	struct pte uu[UPAGES];	XPRINTF(XPR_VM,"enter vrelu",0,0,0,0);	if (swapu)		swap(p, *(p->p_smap->dm_ptdaddr), (caddr_t)0, ctob(UPAGES),		    B_WRITE, B_UAREA, swapdev, 0);	for (i = 0; i < UPAGES; i++)		uu[i] = p->p_addr[i];        /*         * If freeing the user structure and kernel stack         * for the current process, have to run a bit longer         * using the pages which have already been freed...         * block memory allocation from the network by raising ipl.         */	if (u.u_procp == p) {		(void) splimp();	/* XXX */		(void) vmemfree(uu, -(clrnd(UPAGES)));	}	else		(void) vmemfree(uu, clrnd(UPAGES));}/* * Expand a page table, assigning new kernel virtual * space and copying the page table entries over both * in the system map and as necessary in the user page table space. */ptexpand(change, region)	register int change;	int region;{	register struct pte *p1, *p2;	register int i;	register int kold;	int knew;	int szpt;	int s;XPRINTF(XPR_VM,"enter ptexpand",0,0,0,0);	if (change <= 0 || change % CLSIZE)		panic("ptexpand");	/*	 * Change is the number of new page table pages needed.	 * Kold is the old index in the kernelmap of the page tables.	 * Allocate a new kernel map segment of size szpt+change for	 * the page tables.	 */top:#ifdef unneeded	if (ptforceswap)		goto bad;#endif unneeded	if (region == 0) {		szpt = u.u_procp->p_datapt;		kold = btokmx(u.u_procp->p_databr);	} else {		szpt = u.u_procp->p_stakpt;		kold = btokmx(u.u_procp->p_stakbr);	}	if ((knew = rmalloc(kernelmap, (long)(szpt+change))) == 0)		goto bad;	if (region == 0) {	    if (memall(&Usrptmap[knew+szpt], change, u.u_procp, CSYS, NULL, V_NOOP) == 0) {		rmfree(kernelmap, (long)(szpt+change), (long)knew);		goto bad;	    }	} else {	    if (memall(&Usrptmap[knew], change, u.u_procp, CSYS, NULL, V_NOOP) == 0) {		rmfree(kernelmap, (long)(szpt+change), (long)knew);		goto bad;	    }	}	/*	 * Copy over stack and data page tables.	 */	if (region == 0)		kmcopy(knew, kold, szpt);	else		kmcopy(knew+change, kold, szpt);	/*	 * Validate and clear the newly allocated page table pages in	 * the new region of the kernelmap.	 */	if (region == 0)		i = knew + szpt;	else		i = knew;	p1 = &Usrptmap[i];	p2 = p1 + change;	while (p1 < p2) {		mapin(btop(kmxtob(i)), p1->pg_pfnum, (int)(PG_V|PG_KW));#ifdef USE_IDLE		/* don't bother if it's already done */		if (cmap[pgtocm(p1->pg_pfnum)].c_zero) {			extern int v_zero_pt_hits;			v_zero_pt_hits++;			cmap[pgtocm(p1->pg_pfnum)].c_zero=0;		}		else {			extern int v_zero_pt_misses;			v_zero_pt_misses++;			clearseg(p1->pg_pfnum);		}#else		clearseg(p1->pg_pfnum);#endif USE_IDLE		p1++;		i++;	}	/*	 * Now switch to the new page tables.	 */	s = splhigh();	/* conservative */	if (region == 0) {		u.u_procp->p_databr = kmxtob(knew);		u.u_procp->p_datapt += change;	} else {		u.u_procp->p_stakpt += change;		u.u_procp->p_stakbr = kmxtob(knew);	}	splx(s);	/*	 * Finally, free old kernelmap.	 */	if (szpt)		rmfree(kernelmap, (long)szpt, (long)kold);	return;bad:	/*	 * Swap out the process so that the unavailable 	 * resource will be allocated upon swapin.	 */	SET_P_VM(u.u_procp, SSWAP);	do {		swapself++;		sleep((caddr_t)&swapself, PSWP);	} while (u.u_procp->p_vm & SSWAP);}blkcpy(src, des, count)caddr_t src;caddr_t des;int count;{	bcopy(src, des, count);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -