⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vm_page.c

📁 <B>Digital的Unix操作系统VAX 4.2源码</B>
💻 C
📖 第 1 页 / 共 3 页
字号:
	}	else if (type == CSMEM) {	/* SHMEM */		sp->sm_rssize += CLSIZE;#ifdef vax		distsmpte(sp,				vtotp(p, v) -				p->p_sm[smindex].sm_spte, pte, PG_NOCLRM);#endif vax	} 	else		p->p_rssize += CLSIZE;	/*	 * Two cases: either fill on demand (zero, or from file or text)	 * or from swap space.	 */	if (opte.pg_fod) {#ifdef mips                /*                 * must force page to swap, set pg_swapm for text                 * so page is not writable, set pg_m for data to                 * force to swap and avoid unnecessary tlbmod's                 */		/* 		 * CSMEM must be treated like text, in case the segment is		 * read only (MIPS only)		 */                if (type == CTEXT)                        pte->pg_swapm = 1;		else if (type == CSMEM)			pte->pg_swapm = 1;                else                        pte->pg_m = 1;#endif mips#ifdef vax                pte->pg_m = 1;#endif vax		if (fileno == PG_FZERO || fileno == PG_FTEXT) {			/*			 * Flush any previous text page use of this			 * swap device block.			 */			si = splimp();			if (type == CTEXT) {				c = mfind(swapdev, bnswap, p->p_textp->x_gptr);				if(c)munhash(swapdev,bnswap,p->p_textp->x_gptr); 			}			splx(si);			/*			 * If zero fill, short-circuit hard work			 * by just clearing pages.			 */			if (fileno == PG_FZERO) {				pgtrace(TR_ZFOD);				for (i = 0; i < CLSIZE; i++)					clearseg(pf+i);				if (type != CTEXT)					cnt.v_zfod += CLSIZE;				splx(sk);				goto skipswap;			}			pgtrace(TR_EXFOD);			cnt.v_exfod += CLSIZE;		} else			panic("pagein vread");		/*		 * Fill from gnode.  Try to find adjacent		 * pages to bring in also.		 */		/* only use if data paged in from file system */		if (type == CDATA && !nobufcache) {			use_buffer_cache = 1;			klmax = KLMAX/2;	/* 8 clicks == 8192 bytes */		}		if (type == CSMEM)			panic("pagein: SHMEM fodkluster");		v = fodkluster(p, v, pte, &klsize, dev, &bn, klmax);		bncache = bn;				splx(sk);		/*		 * Blocks of an executable may still be in the buffer		 * cache, so we explicitly flush them out to disk		 * so that the proper data will be paged in.		 */#ifdef notdef		/* this is done in kern_exec once and for all */		{		int ret;		ret =		blkflush(dev, bn, (long)klsize*CLSIZE*NBPG, p->p_textp->x_gptr);		}#endif notdef#ifdef TRACE		if (type != CTEXT)			trace(TR_XFODMISS, dev, bn);#endif	} else {		if (opte.pg_pfnum)			panic("pagein pfnum");		pgtrace(TR_SWAPIN);		/*		 * Fill from swap area.  Try to find adjacent		 * pages to bring in also.		 */		if(type != CSMEM)	/* SHMEM */			v = kluster(p, v, pte, B_READ, &klsize,		    		(type == CTEXT) ? kltxt :		    		((p->p_flag & SSEQL) ? klseql : klin), bn);		splx(sk);		/* THIS COULD BE COMPUTED INCREMENTALLY... */		bncache = bn = vtod(p, v, &u.u_dmap, &u.u_smap);			}	distcl(pte);	if(type == CSMEM)	/* SHMEM */#ifdef vax		swap(sp, bn, ptob(vtotp(p, v) - p->p_sm[smindex].sm_spte),				ctob(CLSIZE), B_READ, B_PGIN|B_SMEM, dev, 0); #endif vax#ifdef mips		swap(sp, bn, ptob(v - p->p_sm[smindex].sm_saddr),				ctob(CLSIZE), B_READ, B_PGIN|B_SMEM, dev, 0); #endif mips	else {		/* make pagein use the BUFFER CACHE!!! - rr*/		/* for data only, text is reclaimed out of vm (I hope!) */		/* this is checked above */		if (use_buffer_cache) {			struct gnode *gp = p->p_textp->x_gptr;			struct buf *bp = NULL;			int size = klsize*ctob(CLSIZE);			if (ISLOCAL(gp->g_mp))				bp = bread(dev, bn, size, 0);			else				bp = bread(dev, bn, size, gp);			if (bp->b_flags & B_ERROR) {				printf("buf_pagein got B_ERROR dev %x bn %d size %d gp %x\n",					dev,bn,size,gp);				brelse(bp);				goto swapit;			}			buf_pagein_cnt++;			buf_pagein_bytes += size;			bcopy(bp->b_un.b_addr,ptob(v),size);			brelse(bp);		}		else {swapit:			swap(p, bn, ptob(v), klsize * ctob(CLSIZE),	    			B_READ, B_PGIN, dev, 0); 		}	}#ifdef TRACE	trace(TR_PGINDONE, vsave, u.u_procp->p_pid);#endif	/*	 * Instrumentation.	 */	u.u_ru.ru_majflt++;	cnt.v_pgin++;	cnt.v_pgpgin += klsize * CLSIZE;#ifdef PGINPROF	a = vmtime(otime, olbolt, oicr) / 100;	pgintime += a;	if (a >= 0)		vmfltmon(pmon, a, pmonmin, pres, NPMON);#endifskipswap:	/*	 * Fix page table entries.	 *	 * Only page requested in is validated, and rest of pages	 * can be ``reclaimed''.  This allows system to reclaim prepaged pages	 * quickly if they are not used and memory is tight.	 */	pte = vtopte(p, vsave);	if (opte.pg_fod)		pte->pg_m = 1;	if (pte->pg_pfnum == 0)		panic("pagein: pfnum = 0");	pte->pg_v = 1;	distcl(pte);	if (type == CTEXT) {#ifdef vax		distpte(p->p_textp, vtotp(p, vsave), pte);#endif vax		if (opte.pg_fod)			p->p_textp->x_flag |= XWRIT;		wakeup((caddr_t)p->p_textp);	}	if (type == CSMEM) {	/* SHMEM */#ifdef vax		distsmpte(sp,				vtotp(p, vsave) -				p->p_sm[smindex].sm_spte, pte, PG_NOCLRM);#endif vax		wakeup((caddr_t)&sp->sm_flag);	}#ifdef mips	if (type == CSMEM)		XPRINTF(XPR_SM,"pagein: fill pte 0x%x\n",*(int *)pte, 0,0,0);#endif mips	/*	 * Memall returned page(s) locked.  Unlock all	 * pages in cluster.  If locking pages for raw i/o	 * leave the page which was required to be paged in locked,	 * but still unlock others.	 * If text pages, hash into the cmap situation table.	 */	pte = vtopte(p, v);	for (i = 0; i < klsize; i++) {		c = &cmap[pgtocm(pte->pg_pfnum)];		if (v != vsave && fileno == PG_FTEXT) {			pte->pg_m = 1;			distcl(pte);#ifdef vax			if (type == CTEXT)				distpte(p->p_textp, vtotp(p, v), pte);#endif vax		}		c->c_intrans = 0;		if (type == CTEXT && c->c_blkno == 0 && bncache && !nohash &&				((p->p_textp->x_gptr->g_flag & GTRC) == 0)) {			mhash(c, dev, bncache);			bncache += CLBYTES / DEV_BSIZE;					}		if (v != vsave || !dlyu)			MUNLOCK(c);		if (v != vsave && type != CTEXT && 				type != CSMEM &&	/* SHMEM */				preptofree &&				opte.pg_fod == 0) {			/*			 * Throw pre-paged data/stack pages at the			 * bottom of the free list.			 */			p->p_rssize -= CLSIZE;			memfree(pte, CLSIZE, 0);		}#ifdef mips                newptes(p, v, CLSIZE);#endif mips#ifdef vax		newptes(pte, v, CLSIZE);#endif vax		v += CLSIZE;		pte += CLSIZE;	}	/*	 * All done.	 */	p->p_flag &= ~SPAGE;	/*	 * If process is declared fifo, memory is tight,	 * and this was a data page-in, free memory	 * klsdist pagein clusters away from the current fault.	 */	if ((p->p_flag&SSEQL) && freemem < lotsfree && type == CDATA) {		int k = (vtodp(p, vsave) / CLSIZE) / klseql;		dpageout(p, (k - klsdist) * klseql * CLSIZE, klout*CLSIZE);		dpageout(p, (k + klsdist) * klseql * CLSIZE, klout*CLSIZE);	}}/* * Take away n pages of data space * starting at data page dp. * Used to take pages away from sequential processes. * Mimics pieces of code in pageout() below. */dpageout(p, dp, n)	struct proc *p;	int dp, n;{	register struct cmap *c;	int i, klsize;	register struct pte *pte;	unsigned v;	daddr_t daddr;#ifdef mips	XPRINTF(XPR_VM,"enter dpageout",0,0,0,0);#endif mips	if (dp < 0) {		n += dp;		dp = 0;	}	if (dp + n > p->p_dsize)		n = p->p_dsize - dp;	for (i = 0; i < n; i += CLSIZE, dp += CLSIZE) {		pte = dptopte(p, dp);		if (pte->pg_fod || pte->pg_pfnum == 0)			continue;		c = &cmap[pgtocm(pte->pg_pfnum)];		if (c->c_lock || c->c_free)			continue;		if (pte->pg_v) {#ifdef mips                        flushpte(p, v, CLSIZE, c->c_type);#endif mips			pte->pg_v = 0;			if (anycl(pte, pg_m))				pte->pg_m = 1;			distcl(pte);		}		if (dirtycl(pte)) {			if (bswlist.av_forw == NULL)				continue;			MLOCK(c);#ifdef mips                        flushpte(p, v, CLSIZE, c->c_type);                        pte->pg_swapm = 0;#endif mips			pte->pg_m = 0;			distcl(pte);			p->p_poip++;			v = kluster(p, dptov(p, dp), pte, B_WRITE,				&klsize, klout, (daddr_t)0);			/* THIS ASSUMES THAT p == u.u_procp */			daddr = vtod(p, v, &u.u_dmap, &u.u_smap);			swap(p, daddr, ptob(v), klsize * ctob(CLSIZE),			    B_WRITE, B_DIRTY, swapdev, pte->pg_pfnum);		} else {			if (c->c_gone == 0)				p->p_rssize -= CLSIZE;			memfree(pte, CLSIZE, 0);			cnt.v_seqfree += CLSIZE;		}	}}		    int	pushes;#define	FRONT	1#define	BACK	2/* * The page out daemon, which runs as process 2. * * As long as there are at least lotsfree pages, * this process is not run.  When the number of free * pages stays in the range desfree to lotsfree, * this daemon runs through the pages in the loop * at a rate determined in vmsched().  Pageout manages * two hands on the clock.  The front hand moves through * memory, clearing the valid bit (simulating a reference bit), * and stealing pages from procs that are over maxrss. * The back hand travels a distance behind the front hand, * freeing the pages that have not been referenced in the time * since the front hand passed.  If modified, they are pushed to * swap before being freed. */pageout(){	register int count;	register int maxhand = pgtocm(maxfree);	register int fronthand, backhand;	register int cpindex, cpident;#ifdef mips	XPRINTF(XPR_VM,"enter pageout",0,0,0,0);#endif mips	/*	 * Set the two clock hands to be separated by a reasonable amount,	 * but no more than 360 degrees apart.	 */	backhand = 0 / CLBYTES;	fronthand = HANDSPREAD / CLBYTES;	if (fronthand >= maxhand)		fronthand = maxhand - 1;loop:	/*	 * Before sleeping, look to see if there are any swap I/O headers	 * in the ``cleaned'' list that correspond to dirty	 * pages that have been pushed asynchronously. If so,	 * empty the list by calling cleanup().	 *	 * N.B.: We guarantee never to block while the cleaned list is nonempty.	 */#ifdef vax	(void) splhigh();#endif vax#ifdef mips	XPRINTF(XPR_VM,"pageout going to splbio",0,0,0,0);	(void) splbio();#endif mips  	if (bclnlist != NULL) { 		(void) spl0();  		cleanup(); 		goto loop;  	}	slavehold = 0;#ifdef mips	XPRINTF(XPR_VM,"pageout going to sleep",0,0,0,0);#endif mips  	sleep((caddr_t)&proc[2], PSWP+1);#ifdef mips	XPRINTF(XPR_VM,"pageout waking up",0,0,0,0);#endif mips	if (extracpu) {		slavehold = 1;		cpident = cpuindex();		for (cpindex = 0; cpindex < activecpu; cpindex++) {			if (cpindex != cpident) {				cpudata[cpindex].c_runrun++;				intrcpu(cpindex);			}		}	}	(void) spl0();	count = 0;	pushes = 0;	while (nscan < desscan && freemem < lotsfree) {		/*		 * If checkpage manages to add a page to the free list,		 * we give ourselves another couple of trips around the loop.		 */		if (checkpage(fronthand, FRONT))			count = 0;		if (checkpage(backhand, BACK))			count = 0;		cnt.v_scan++;		nscan++;		if (++fronthand >= maxhand) {			fronthand = 0;			cnt.v_rev++;			if (count > 2) {				/*				 * Extremely unlikely, but we went around				 * the loop twice and didn't get anywhere.				 * Don't cycle, stop till the next clock tick.				 */				goto loop;			}			count++;		}		if (++backhand >= maxhand)			backhand = 0;	}	goto loop;}/* * An iteration of the clock pointer (hand) around the loop. * Look at the page at hand.  If it is a * locked (for physical i/o e.g.), system (u., page table) * or free, then leave it alone. * Otherwise, if we are running the front hand, * invalidate the page for simulation of the reference bit. * If the proc is over maxrss, we take it. * If running the back hand, check whether the page * has been reclaimed.  If not, free the page, * pushing it to disk first if necessary. */checkpage(hand, whichhand)	int hand, whichhand;{	register struct proc *rp;	register struct cmap *c;	register struct cmap *c1, *c2;	register struct pte *pte;	struct text *xp;	struct smem *sp;		/* SHMEM */	swblk_t daddr;	unsigned v;			/* Virt page within proc addr space*/	int klsize;	int lock;			/* Is the segment locked ? */	int j;	struct gnode *gp;	int pagedirty = 0;        int smpage;#ifdef mips        struct user *ppushutl;#define pushutl (*ppushutl)#endif mips#ifdef mips	XPRINTF(XPR_VM,"enter checkpage",0,0,0,0);#endif mipstop:	/*	 * Find a process and text pointer for the	 * page, and a virtual page number in either the	 * process or the text image.	 */	c = &cmap[hand];	if (c->c_lock || c->c_free)		return (0);	switch (c->c_type) {	case CSYS:		return (0);	case CTEXT:		xp = &text[c->c_ndx];		rp = xp->x_caddr;		gp = xp->x_gptr;		v = tptov(rp, c->c_page);		pte = tptopte(rp, c->c_page);		lock = xp->x_flag & (XNOSW|XLOCK);		break;	case CDATA:	case CSTACK:		rp = &proc[c->c_ndx];		while (rp->p_flag & SNOVM)			rp = rp->p_xlink;		xp = rp->p_textp;		if (c->c_type == CDATA) {			v = dptov(rp, c->c_page);			pte = dptopte(rp, c->c_page);		} else {			v = sptov(rp, c->c_page);			pte = sptopte(rp, c->c_page);		}		/*		 * If the process is being swapped out		 * or about to exit, do not bother with its		 * pages		 */		lock = rp->p_flag & (SULOCK | SLOCK | SWEXIT);		break;	case CSMEM:	/* SHMEM */		sp = &smem[c->c_ndx];		rp = sp->sm_caddr;#ifdef vax		v = c->c_page;#endif vax#ifdef mips		v = smptov(rp, sp, c->c_page);  /* this is correct, VAX isn't*/#endif mips		pte = sp->sm_ptaddr + c->c_page;		lock = sp->sm_flag & SMNOSW;		break;	}	/* if segment locked, then not eligible for paging */	if (lock)		return(0);	if (pte->pg_pfnum != cmtopg(hand)) {		panic("bad c_page");	}	/*	 * If page is valid; make invalid but reclaimable.	 * If this pte is not valid, then it must be reclaimable	 * and we can add it to the free list.	 */	if (pte->pg_v) {#ifdef mips                flushpte(rp, v, CLSIZE, c->c_type);#endif mips		if (whichhand == BACK)			return(0);		pte->pg_v = 0;		if (pte->pg_pfnum == 0)			panic("pageout: checkpage");		if (anycl(pte, pg_m))			pte->pg_m = 1;		distcl(pte);#ifdef vax		if (c->c_type == CTEXT)			distpte(xp, vtotp(rp, v), pte);#endif vax		if (c->c_type == CSMEM) {	/* SHMEM */#ifdef vax			distsmpte(sp, v, pte, PG_NOCLRM);#endif vax			return(0);		}		if ((rp->p_flag & (SSEQL|SUANOM)) == 0 &&		    rp->p_rssize <= rp->p_maxrss)			return (0);	}	if (c->c_type != CTEXT			&&  c->c_type != CSMEM) { /* SHMEM */		/*		 * Guarantee a minimal investment in data		 * space for jobs in balance set.		 */		if (rp->p_rssize < saferss - rp->p_slptime)			return (0);	}	/*	 * If the page is currently dirty, we	 * have to arrange to have it cleaned before it	 * can be freed.  We mark it clean immediately.	 * If it is reclaimed while being pushed, then modified	 * again, we are assured of the correct order of 	 * writes because we lock the page during the write.  	 * This guarantees that a swap() of this process (and	 * thus this page), initiated in parallel, will,	 * in fact, push the page after us.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -