⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vm_pt.c

📁 <B>Digital的Unix操作系统VAX 4.2源码</B>
💻 C
📖 第 1 页 / 共 3 页
字号:
	 */#ifdef mips        /*         * I don't think this really has to be done. mad         */        newptes(p, tptov(p, 0), xp->x_size);#endif mips#ifdef vax	if (p == u.u_procp)		newptes(tptopte(p, 0), tptov(p, 0), xp->x_size);	else		p->p_flag |= SPTECHG;#endif vax	return(1);}#ifdef vax/* VINITSMPT - Initialize shared memory portion of page table	*//*	for the given shared memory segment.			*//* As a short cut, to get this done in a hurry, I have made	*//*	shared memory segment page tables wired-down. Therefore,*//*	most of the following code, which is analogous to	*//*	VINITPT, is commented out. (To be resurrected in some	*//*	future version, I hope.)				*//* LeRoy Fundingsland    1/18/85    DEC		SHMEM		*/vinitsmpt(p, sp)    register struct proc *p;    register struct smem *sp;{#ifdef mips	XPRINTF(XPR_VM,"enter vinitsmpt",0,0,0,0);#endif mips	register struct pte *pte, *pte1;	register int i, smindex;	register int smsize;		/* SMS size in clicks	*/#ifdef notdef	struct pte proto;#endif notdef	if(sp == NULL)		return;	if(p->p_sm == (struct p_sm *) NULL) {		panic("vinitsmpt: p_sm");	}	for(smindex=0; smindex < sminfo.smseg; smindex++)		if(p->p_sm[smindex].sm_p == sp)			break;	if(smindex >= sminfo.smseg)		panic("vinitsmpt");	pte = p->p_p0br + p->p_sm[smindex].sm_spte;	smsize = clrnd(btoc(sp->sm_size));	/* set up process' ptes */	pte1 = sp->sm_ptaddr;	for(i=0; i < smsize; i++)/*		if (pte1->pg_fod)*/			*(int *)pte++ = *(int *)pte1++ |				(int)p->p_sm[smindex].sm_pflag;/*		else			*(int *)pte++ = *(int *)pte1++ |  PG_ALLOC |				(int)p->p_sm[smindex].sm_pflag;*/	goto done;#ifdef notdef	/*	 * Initialize text page tables, zfod if we are loading	 * the text now; unless the process is demand loaded,	 * this will suffice as the text will henceforth either be	 * read from a file or demand paged in.	 */	*(int *)&proto = PG_URKR;	if (xp->x_flag & XLOAD) {		proto.pg_fod = 1;		((struct fpte *)&proto)->pg_fileno = PG_FZERO;	}	for (i = 0; i < xp->x_size; i++)		*pte++ = proto;	if ((xp->x_flag & XPAGI) == 0)		goto done;	/*	 * Text is demand loaded.  If process is not loaded (i.e. being	 * swapped in) then retrieve page tables from swap area.  Otherwise	 * this is the first time and we must initialize the page tables	 * from the blocks in the file system.	 */	if (xp->x_flag & XLOAD)		vinifod((struct fpte *)tptopte(p, 0), PG_FTEXT, xp->x_gptr,		    (daddr_t)1, xp->x_size);	else		swap(p, xp->x_ptdaddr, (caddr_t)tptopte(p, 0),		    xp->x_size * sizeof (struct pte), B_READ,		    B_PAGET, swapdev, 0);#endif notdefdone:	/*	 * In the case where we are overlaying ourself with new page	 * table entries, old user-space translations should be flushed.	 */	if (p == u.u_procp)		newptes(tptopte(p,0), tptov(p,p->p_sm[smindex].sm_spte),							smsize);	else		p->p_flag |= SPTECHG;}/* * Update the page tables of all processes linked * to a particular text segment, by distributing * dpte to the the text page at virtual frame v. * * Note that invalidation in the translation buffer for * the current process is the responsibility of the caller. */distpte(xp, tp, dpte)	struct text *xp;	register size_t tp;	register struct pte *dpte;{	register struct proc *p;	register struct pte *pte;	register int i;	for (p = xp->x_caddr; p; p = p->p_xlink) {		pte = tptopte(p, tp);		p->p_flag |= SPTECHG;		if (pte != dpte)			for (i = 0; i < CLSIZE; i++)				pte[i] = dpte[i];	}}/* DISTSMPTE - Update the page tables of all processes linked	*//*	to a particular shared memory segment, by distributing	*//*	dpte to the the shared memory page at virtual frame smp.*//*								*//*	Note that invalidation in the translation buffer for	*//*	the current process is the responsibility of the caller.*//* 			 SHMEM					*/distsmpte(sp, smp, dpte, cm)    register struct smem *sp;    size_t smp;    register struct pte *dpte;		/* Global PTE */    int cm;				/* clear pg_m flag */{	register struct pte *pte;	register int i, j;	register struct proc *p;	/* if the SMS is currently not attached	*/	/* to any process then return.		*/	if(sp->sm_ccount == 0 || sp->sm_caddr == NULL)		return;	p = sp->sm_caddr;	if(p->p_sm == (struct p_sm *) NULL) {		panic("distmpte: p_sm1");	}	for(i=0; i < sminfo.smseg; i++)		if(p->p_sm[i].sm_p == sp)			break;	if(i >= sminfo.smseg)		panic("distsmpte");	/* if requested, clear pg_m bit in global PTE */	if (cm) {		dpte->pg_m = 0;		distcl(dpte);	}	while(p){		pte = p->p_p0br + p->p_sm[i].sm_spte+smp;		/* this panic should eventually go away */		if (pte->pg_v && dpte->pg_fod) {			panic("distsmpte: PG_V && PG_FOD");		}		p->p_flag |= SPTECHG;		/* NOTE: If SM ever has FFOD, this will change */		if (dpte->pg_fod) {			/* CAREFUL: I'm incrementing 'pte' */			for (j=0; j < CLSIZE; j++, pte++) {				((struct fpte *) pte)->pg_fod = 					((struct fpte *) dpte)->pg_fod;				((struct fpte *) pte)->pg_fileno = 					((struct fpte *) dpte)->pg_fileno;			}		} else {			/* CAREFUL: I'm incrementing 'pte' */			for (j=0; j < CLSIZE; j++, pte++) {				pte->pg_pfnum = (dpte + j)->pg_pfnum;				pte->pg_v = (dpte + j)->pg_v;				pte->pg_alloc = (dpte + j)->pg_alloc; 				if (cm)					pte->pg_m = 0;			}		}		/* now for next proc in linked list	*/		if((p = p->p_sm[i].sm_link) == NULL)			break;		if(p->p_sm == (struct p_sm *) NULL) {			panic("distmpte: p_sm2");		}		for(i=0; i < sminfo.smseg; i++)			if(p->p_sm[i].sm_p == sp)				break;		if(i >= sminfo.smseg)			panic("distsmpte #2");	}}/*  *  DIRTYSM -- checks for dirty ptes in process space */dirtysm(sp,smp)register struct smem *sp;	/* pointer to smem structure */register size_t smp;		/* offset into SMS */{register struct proc *p;	/* Proc pointer */register struct pte *pte;	/* pointer to Proc's pte */register int i;		#ifdef mips	XPRINTF(XPR_VM,"enter dirtysm",0,0,0,0);#endif mips	/* if the SMS is currently not attached to any process then return */	if(sp->sm_ccount == 0)		return (0);	/* SMS offset must be on cluster boundary */	if (smp % CLSIZE)		panic("dirtysm: smp");	/* find SMS of first attached proc */        p = sp->sm_caddr;	if(p->p_sm == (struct p_sm *) NULL) {		panic("dirtysm: p_sm1");	}	for(i = 0; i < sminfo.smseg; i++)		if(p->p_sm[i].sm_p == sp)			break;	if(i >= sminfo.smseg)		panic("dirtysm: no SMS");	/*	 * follow attached proc list, looking for associated dirty	 * PTEs.  If one is found, then return (1), else return (0)	 */	while(p){#ifdef vax		pte = p->p_p0br + p->p_sm[i].sm_spte+smp;#endif vax#ifdef mips		/* get the pte's */#endif mips		if (dirtycl(pte))			return(1);		/* now for next proc in linked list	*/		if((p = p->p_sm[i].sm_link) == NULL)			break;		if(p->p_sm == (struct p_sm *) NULL) {			panic("dirtysm: p_sm2");		}		/* find next proc's SMS */		for(i=0; i < sminfo.smseg; i++)			if(p->p_sm[i].sm_p == sp)				break;		if(i >= sminfo.smseg)			panic("dirtysm: no SMS #2");	}	return (0);}#endif vax#ifdef vax/* * Release page tables of process p.  If the process is in context, then * "vmemfree" is notified that the released page frames should be placed * on the "ucmap" free list until after context switch. */vrelpt(p)	register struct proc *p;{	register int a;	if (p->p_szpt == 0)		return;	a = btokmx(p->p_p0br);	if (u.u_procp == p)		(void) vmemfree(&Usrptmap[a],-(p->p_szpt));	else		(void) vmemfree(&Usrptmap[a], p->p_szpt);	rmfree(kernelmap, (long)p->p_szpt, (long)a);}#endif vax#ifdef mips/* * Release data and stack page tables of process p. */vrelpt(p)	register struct proc *p;{	register int a;	XPRINTF(XPR_VM,"enter vrelpt",0,0,0,0);	if (p->p_datapt) {		a = btokmx(p->p_databr);		(void) vmemfree(&Usrptmap[a], p->p_datapt);		rmfree(kernelmap, (long)p->p_datapt, (long)a);	}	if (p->p_stakpt) {		a = btokmx(p->p_stakbr);		(void) vmemfree(&Usrptmap[a], p->p_stakpt);		rmfree(kernelmap, (long)p->p_stakpt, (long)a);	}	p->p_datapt = 0;	p->p_stakpt = 0;	p->p_databr = (struct pte *)0;	p->p_stakbr = (struct pte *)0;}#endif mips#define	Xu(a)	t = up->u_pcb.a; up->u_pcb.a = uq ->u_pcb.a; uq->u_pcb.a = t;#define	Xup(a)	tp = up->u_pcb.a; up->u_pcb.a = uq ->u_pcb.a; uq->u_pcb.a = tp;#define	Xp(a)	t = p->a; p->a = q->a; q->a = t;#define	Xpp(a)	tp = p->a; p->a = q->a; q->a = tp;/* * Pass the page tables of process p to process q. * Used during vfork().  P and q are not symmetric; * p is the giver and q the receiver; after calling vpasspt * p will be ``cleaned out''.  Thus before vfork() we call vpasspt * with the child as q and give it our resources; after vfork() we * call vpasspt with the child as p to steal our resources back. * We are cognizant of whether we are p or q because we have to * be careful to keep our u. area and restore the other u. area from * umap after we temporarily put our u. area in both p and q's page tables. */#ifdef mips/* * This mips version is symetric since the u-area is not mapped in the * page tables that are being swapped. Note that the umap argument is * not used for the mips version. * TODO: should the processes exchange tlbpids? They are exchanging mappings * except for the u-block which probably has the G bit set. */#endif mipsvpasspt(p, q, up, uq, umap)	register struct proc *p, *q;	register struct user *up, *uq;	struct pte *umap;{	int t;	int s;	struct pte *tp;	register int i;#ifdef mips	XPRINTF(XPR_VM,"enter vpasspt",0,0,0,0);#endif mips#ifdef vax	s = spl7();	/* conservative, and slightly paranoid */#endif vax#ifdef mips	s = splhigh();	/* conservative, and slightly paranoid */#endif mips	lock(LOCK_RQ);#ifdef vax	Xu(pcb_szpt); Xu(pcb_p0lr); Xu(pcb_p1lr);	Xup(pcb_p0br); Xup(pcb_p1br);	/*	 * The u. area is contained in the process' p1 region.	 * Thus we map the current u. area into the process virtual space	 * of both sets of page tables we will deal with so that it	 * will stay with us as we rearrange memory management.	 */	for (i = 0; i < UPAGES; i++)		if (up == &u)			q->p_addr[i] = p->p_addr[i];		else			p->p_addr[i] = q->p_addr[i];#ifdef vax	mtpr(TBIA, 0);#endif	/*	 * Now have u. double mapped, and have flushed	 * any stale translations to new u. area.	 * Switch the page tables.	 */	Xpp(p_p0br); Xp(p_szpt); Xpp(p_addr);#ifdef vax	mtpr(P0BR, u.u_pcb.pcb_p0br);	mtpr(P1BR, u.u_pcb.pcb_p1br);	mtpr(P0LR, u.u_pcb.pcb_p0lr &~ AST_CLR);	mtpr(P1LR, u.u_pcb.pcb_p1lr);#endif	/*	 * Now running on the ``other'' set of page tables.	 * Flush translation to insure that we get correct u.	 * Resurrect the u. for the other process in the other	 * (our old) set of page tables.  Thus the other u. has moved	 * from its old (our current) set of page tables to our old	 * (its current) set of page tables, while we have kept our	 * u. by mapping it into the other page table and then keeping	 * the other page table.	 */#ifdef vax	mtpr(TBIA, 0);#endif	for (i = 0; i < UPAGES; i++) {		int pf;		struct pte *pte;		if (up == &u) {			pf = umap[i].pg_pfnum;			pte = &q->p_addr[i];			pte->pg_pfnum = pf;		} else {			pf = umap[i].pg_pfnum;			pte = &p->p_addr[i];			pte->pg_pfnum = pf;		}	}#ifdef vax	mtpr(TBIA, 0);#endif#endif vax#ifdef mips        /* p_addr is not going anywhere because it does not point into         * the the text or data or stack page maps. We do not swap tlbpids,         * rather we let parent and child fault in separate mappings.         */        Xpp(p_textbr); Xpp(p_databr); Xpp(p_stakbr);        Xp(p_textpt); Xp(p_datapt); Xp(p_stakpt);#endif mips	unlock(LOCK_RQ);	splx(s);}/* * Compute number of pages to be allocated to the u. area * and data and stack area page tables, which are stored on the * disk immediately after the u. area. *//*ARGSUSED*/vusize(p, utl)register struct proc *p;struct user *utl;{#ifdef vax	register int tsz = p->p_tsize / NPTEPG;	/*	 * We do not need page table space on the disk for page	 * table pages wholly containing text.  This is well	 * understood in the code in vmswap.c.	 */	return (clrnd(UPAGES +	    clrnd(ctopt(p->p_tsize + p->p_dsize					+ p->p_smsize	/* SHMEM */					+ p->p_ssize+UPAGES)) - tsz));#endif vax#ifdef mipsXPRINTF(XPR_VM,"enter vusize",0,0,0,0);        /*         * We do not need page table space on the disk for page         * table pages wholly containing text.  This is well         * understood in the code in vmswap.c.         */        return (clrnd(UPAGES) + clrnd(ctopt(p->p_dsize))                + clrnd(ctopt(p->p_ssize)));#endif mips}/* * Get u area for process p.  If a old u area is given, * then copy the new area from the old, else * swap in as specified in the proc structure. * * Since argument map/newu is potentially shared * when an old u. is provided we have to be careful not * to block after beginning to use them in this case. * (This is not true when called from swapin() with no old u.)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -