uvm_pager.c

来自「基于组件方式开发操作系统的OSKIT源代码」· C语言 代码 · 共 915 行 · 第 1/2 页

C
915
字号
/*	$NetBSD: uvm_pager.c,v 1.38 2000/12/09 23:26:27 chs Exp $	*//* * * Copyright (c) 1997 Charles D. Cranor and Washington University. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright *    notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright *    notice, this list of conditions and the following disclaimer in the *    documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software *    must display the following acknowledgement: *      This product includes software developed by Charles D. Cranor and *      Washington University. * 4. The name of the author may not be used to endorse or promote products *    derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp */#include "opt_uvmhist.h"/* * uvm_pager.c: generic functions used to assist the pagers. */#include <sys/param.h>#include <sys/systm.h>#include <sys/proc.h>#include <sys/malloc.h>#include <sys/pool.h>#include <sys/vnode.h>#define UVM_PAGER#include <uvm/uvm.h>struct pool *uvm_aiobuf_pool;/* * list of uvm pagers in the system */extern struct uvm_pagerops uvm_deviceops;extern struct uvm_pagerops uvm_vnodeops;extern struct uvm_pagerops ubc_pager;struct uvm_pagerops *uvmpagerops[] = {	&aobj_pager,#ifndef OSKIT	&uvm_deviceops,#endif	&uvm_vnodeops,#ifndef OSKIT	&ubc_pager,#endif};/* * the pager map: provides KVA for I/O */vm_map_t pager_map;		/* XXX */simple_lock_data_t pager_map_wanted_lock;boolean_t pager_map_wanted;	/* locked by pager map */static vaddr_t emergva;static boolean_t emerginuse;/* * uvm_pager_init: init pagers (at boot time) */voiduvm_pager_init(){	int lcv;	/*	 * init pager map	 */	pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva,	 			    PAGER_MAP_SIZE, 0, FALSE, NULL);	simple_lock_init(&pager_map_wanted_lock);	pager_map_wanted = FALSE;	emergva = uvm_km_valloc(kernel_map, MAXBSIZE);	emerginuse = FALSE;	/*	 * init ASYNC I/O queue	 */		TAILQ_INIT(&uvm.aio_done);	/*	 * call pager init functions	 */	for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);	    lcv++) {		if (uvmpagerops[lcv]->pgo_init)			uvmpagerops[lcv]->pgo_init();	}}/* * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings * * we basically just map in a blank map entry to reserve the space in the * map and then use pmap_enter() to put the mappings in by hand. */vaddr_tuvm_pagermapin(pps, npages, flags)	struct vm_page **pps;	int npages;	int flags;{	vsize_t size;	vaddr_t kva;	vaddr_t cva;	struct vm_page *pp;	vm_prot_t prot;	UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);	UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);	/*	 * compute protection.  outgoing I/O only needs read	 * access to the page, whereas incoming needs read/write.	 */	prot = VM_PROT_READ;	if (flags & UVMPAGER_MAPIN_READ)		prot |= VM_PROT_WRITE;ReStart:	size = npages << PAGE_SHIFT;	kva = 0;			/* let system choose VA */	if (uvm_map(pager_map, &kva, size, NULL, 	      UVM_UNKNOWN_OFFSET, 0, UVM_FLAG_NOMERGE) != KERN_SUCCESS) {		if (curproc == uvm.pagedaemon_proc) {			simple_lock(&pager_map_wanted_lock);			if (emerginuse) {				UVM_UNLOCK_AND_WAIT(&emergva,				    &pager_map_wanted_lock, FALSE,				    "emergva", 0);				goto ReStart;			}			emerginuse = TRUE;			simple_unlock(&pager_map_wanted_lock);			kva = emergva;			KASSERT(npages <= MAXBSIZE >> PAGE_SHIFT);			goto enter;		}		if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {			UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);			return(0);		}		simple_lock(&pager_map_wanted_lock);		pager_map_wanted = TRUE; 		UVMHIST_LOG(maphist, "  SLEEPING on pager_map",0,0,0,0);		UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE, 		    "pager_map", 0);		goto ReStart;	}enter:	/* got it */	for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {		pp = *pps++;		KASSERT(pp->flags & PG_BUSY);		pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),		    prot, PMAP_WIRED | ((pp->flags & PG_FAKE) ? prot :					VM_PROT_READ));	}	UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);	return(kva);}/* * uvm_pagermapout: remove pager_map mapping * * we remove our mappings by hand and then remove the mapping (waking * up anyone wanting space). */voiduvm_pagermapout(kva, npages)	vaddr_t kva;	int npages;{	vsize_t size = npages << PAGE_SHIFT;	vm_map_entry_t entries;	UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);	UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);	/*	 * duplicate uvm_unmap, but add in pager_map_wanted handling.	 */	if (kva == emergva) {		simple_lock(&pager_map_wanted_lock);		emerginuse = FALSE;		wakeup(&emergva);		simple_unlock(&pager_map_wanted_lock);		entries = NULL;		goto remove;	}	vm_map_lock(pager_map);	(void) uvm_unmap_remove(pager_map, kva, kva + size, &entries);	simple_lock(&pager_map_wanted_lock);	if (pager_map_wanted) {		pager_map_wanted = FALSE;		wakeup(pager_map);	}	simple_unlock(&pager_map_wanted_lock);	vm_map_unlock(pager_map);remove:	pmap_remove(pmap_kernel(), kva, kva + (npages << PAGE_SHIFT));	if (entries)		uvm_unmap_detach(entries, 0);	UVMHIST_LOG(maphist,"<- done",0,0,0,0);}/* * uvm_mk_pcluster * * generic "make 'pager put' cluster" function.  a pager can either * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this * generic function, or [3] set it to a pager specific function. * * => caller must lock object _and_ pagequeues (since we need to look *    at active vs. inactive bits, etc.) * => caller must make center page busy and write-protect it * => we mark all cluster pages busy for the caller * => the caller must unbusy all pages (and check wanted/released *    status if it drops the object lock) * => flags: *      PGO_ALLPAGES:  all pages in object are valid targets *      !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster *      PGO_DOACTCLUST: include active pages in cluster. *        NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST. *              PG_CLEANCHK is only a hint, but clearing will help reduce *		the number of calls we make to the pmap layer. */struct vm_page **uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)	struct uvm_object *uobj;	/* IN */	struct vm_page **pps, *center;  /* IN/OUT, IN */	int *npages, flags;		/* IN/OUT, IN */	voff_t mlo, mhi;		/* IN (if !PGO_ALLPAGES) */{	struct vm_page **ppsp, *pclust;	voff_t lo, hi, curoff;	int center_idx, forward, incr;	UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);	/* 	 * center page should already be busy and write protected.  XXX:	 * suppose page is wired?  if we lock, then a process could	 * fault/block on it.  if we don't lock, a process could write the	 * pages in the middle of an I/O.  (consider an msync()).  let's	 * lock it for now (better to delay than corrupt data?).	 */	/*	 * get cluster boundaries, check sanity, and apply our limits as well.	 */	uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);	if ((flags & PGO_ALLPAGES) == 0) {		if (lo < mlo)			lo = mlo;		if (hi > mhi)			hi = mhi;	}	if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */#ifdef DIAGNOSTIC		printf("uvm_mk_pcluster uobj %p npages %d lo 0x%llx hi 0x%llx "		       "flags 0x%x\n", uobj, *npages, (long long)lo,		       (long long)hi, flags);#endif		pps[0] = center;		*npages = 1;		return(pps);	}	/*	 * now determine the center and attempt to cluster around the	 * edges	 */	center_idx = (center->offset - lo) >> PAGE_SHIFT;	pps[center_idx] = center;	/* plug in the center page */	ppsp = &pps[center_idx];	*npages = 1;	/*	 * attempt to cluster around the left [backward], and then 	 * the right side [forward].    	 *	 * note that for inactive pages (pages that have been deactivated)	 * there are no valid mappings and PG_CLEAN should be up to date.	 * [i.e. there is no need to query the pmap with pmap_is_modified	 * since there are no mappings].	 */	for (forward  = 0 ; forward <= 1 ; forward++) {		incr = forward ? PAGE_SIZE : -PAGE_SIZE;		curoff = center->offset + incr;		for ( ;(forward == 0 && curoff >= lo) ||		       (forward && curoff < hi);		      curoff += incr) {			pclust = uvm_pagelookup(uobj, curoff); /* lookup page */			if (pclust == NULL) {				break;			/* no page */			}			/* handle active pages */			/* NOTE: inactive pages don't have pmap mappings */			if ((pclust->pqflags & PQ_INACTIVE) == 0) {				if ((flags & PGO_DOACTCLUST) == 0) {					/* dont want mapped pages at all */					break;				}				/* make sure "clean" bit is sync'd */				if ((pclust->flags & PG_CLEANCHK) == 0) {					if ((pclust->flags & (PG_CLEAN|PG_BUSY))					   == PG_CLEAN &&					   pmap_is_modified(pclust))						pclust->flags &= ~PG_CLEAN;					/* now checked */					pclust->flags |= PG_CLEANCHK;				}			}			/* is page available for cleaning and does it need it */			if ((pclust->flags & (PG_CLEAN|PG_BUSY)) != 0) {				break;	/* page is already clean or is busy */			}			/* yes!   enroll the page in our array */			pclust->flags |= PG_BUSY;		/* busy! */			UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");			/* XXX: protect wired page?   see above comment. */			pmap_page_protect(pclust, VM_PROT_READ);			if (!forward) {				ppsp--;			/* back up one page */				*ppsp = pclust;			} else {				/* move forward one page */				ppsp[*npages] = pclust;			}			(*npages)++;		}	}		/*	 * done!  return the cluster array to the caller!!!	 */	UVMHIST_LOG(maphist, "<- done",0,0,0,0);	return(ppsp);}/* * uvm_pager_put: high level pageout routine * * we want to pageout page "pg" to backing store, clustering if * possible. * * => page queues must be locked by caller * => if page is not swap-backed, then "uobj" points to the object *	backing it.   this object should be locked by the caller. * => if page is swap-backed, then "uobj" should be NULL. * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN *    for swap-backed memory, "pg" can be NULL if there is no page *    of interest [sometimes the case for the pagedaemon] * => "ppsp_ptr" should point to an array of npages vm_page pointers *	for possible cluster building * => flags (first two for non-swap-backed pages) *	PGO_ALLPAGES: all pages in uobj are valid targets *	PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets *	PGO_SYNCIO: do SYNC I/O (no async) *	PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range *		  if (!uobj) start is the (daddr_t) of the starting swapblk * => return state: *	1. we return the VM_PAGER status code of the pageout *	2. we return with the page queues unlocked *	3. if (uobj != NULL) [!swap_backed] we return with *		uobj locked _only_ if PGO_PDFREECLUST is set  *		AND result != VM_PAGER_PEND.   in all other cases *		we return with uobj unlocked.   [this is a hack *		that allows the pagedaemon to save one lock/unlock *		pair in the !swap_backed case since we have to *		lock the uobj to drop the cluster anyway] *	4. on errors we always drop the cluster.   thus, if we return *		!PEND, !OK, then the caller only has to worry about *		un-busying the main page (not the cluster pages). *	5. on success, if !PGO_PDFREECLUST, we return the cluster *		with all pages busy (caller must un-busy and check *		wanted/released flags). */intuvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop)	struct uvm_object *uobj;	/* IN */	struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */	int *npages;			/* IN/OUT */	int flags;			/* IN */	voff_t start, stop;		/* IN, IN */{	int result;	daddr_t swblk;	struct vm_page **ppsp = *ppsp_ptr;	UVMHIST_FUNC("uvm_pager_put"); UVMHIST_CALLED(ubchist);	/*	 * note that uobj is null  if we are doing a swap-backed pageout.	 * note that uobj is !null if we are doing normal object pageout.	 * note that the page queues must be locked to cluster.	 */	if (uobj) {	/* if !swap-backed */		/*		 * attempt to build a cluster for pageout using its		 * make-put-cluster function (if it has one).		 */		if (uobj->pgops->pgo_mk_pcluster) {			ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,			    npages, pg, flags, start, stop);

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?