uvm_amap.c

来自「基于组件方式开发操作系统的OSKIT源代码」· C语言 代码 · 共 1,067 行 · 第 1/2 页

C
1,067
字号
/*	$NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $	*//* * * Copyright (c) 1997 Charles D. Cranor and Washington University. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright *    notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright *    notice, this list of conditions and the following disclaimer in the *    documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software *    must display the following acknowledgement: *      This product includes software developed by Charles D. Cranor and *      Washington University. * 4. The name of the author may not be used to endorse or promote products *    derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *//* * uvm_amap.c: amap operations *//* * this file contains functions that perform operations on amaps.  see * uvm_amap.h for a brief explanation of the role of amaps in uvm. */#undef UVM_AMAP_INLINE		/* enable/disable amap inlines */#include "opt_uvmhist.h"#include <sys/param.h>#include <sys/systm.h>#include <sys/proc.h>#include <sys/malloc.h>#include <sys/kernel.h>#include <sys/pool.h>#define UVM_AMAP_C		/* ensure disabled inlines are in */#include <uvm/uvm.h>#include <uvm/uvm_swap.h>/* * pool for allocation of vm_map structures.  note that the pool has * its own simplelock for its protection.  also note that in order to * avoid an endless loop, the amap pool's allocator cannot allocate * memory from an amap (it currently goes through the kernel uobj, so * we are ok). */struct pool uvm_amap_pool;/* * local functions */static struct vm_amap *amap_alloc1 __P((int, int, int));#ifdef UVM_AMAP_PPREF/* * what is ppref?   ppref is an _optional_ amap feature which is used * to keep track of reference counts on a per-page basis.  it is enabled * when UVM_AMAP_PPREF is defined. * * when enabled, an array of ints is allocated for the pprefs.  this * array is allocated only when a partial reference is added to the * map (either by unmapping part of the amap, or gaining a reference * to only a part of an amap).  if the malloc of the array fails * (M_NOWAIT), then we set the array pointer to PPREF_NONE to indicate * that we tried to do ppref's but couldn't alloc the array so just * give up (after all, this is an optional feature!). * * the array is divided into page sized "chunks."   for chunks of length 1, * the chunk reference count plus one is stored in that chunk's slot. * for chunks of length > 1 the first slot contains (the reference count * plus one) * -1.    [the negative value indicates that the length is * greater than one.]   the second slot of the chunk contains the length * of the chunk.   here is an example: * * actual REFS:  2  2  2  2  3  1  1  0  0  0  4  4  0  1  1  1 *       ppref: -3  4  x  x  4 -2  2 -1  3  x -5  2  1 -2  3  x *              <----------><-><----><-------><----><-><-------> * (x = don't care) * * this allows us to allow one int to contain the ref count for the whole * chunk.    note that the "plus one" part is needed because a reference * count of zero is neither positive or negative (need a way to tell * if we've got one zero or a bunch of them). *  * here are some in-line functions to help us. */static __inline void pp_getreflen __P((int *, int, int *, int *));static __inline void pp_setreflen __P((int *, int, int, int));/* * pp_getreflen: get the reference and length for a specific offset * * => ppref's amap must be locked */static __inline voidpp_getreflen(ppref, offset, refp, lenp)	int *ppref, offset, *refp, *lenp;{	if (ppref[offset] > 0) {		/* chunk size must be 1 */		*refp = ppref[offset] - 1;	/* don't forget to adjust */		*lenp = 1;	} else {		*refp = (ppref[offset] * -1) - 1;		*lenp = ppref[offset+1];	}}/* * pp_setreflen: set the reference and length for a specific offset * * => ppref's amap must be locked */static __inline voidpp_setreflen(ppref, offset, ref, len)	int *ppref, offset, ref, len;{	if (len == 1) {		ppref[offset] = ref + 1;	} else {		ppref[offset] = (ref + 1) * -1;		ppref[offset+1] = len;	}}#endif/* * amap_init: called at boot time to init global amap data structures */voidamap_init(){	/*	 * Initialize the vm_amap pool.	 */	pool_init(&uvm_amap_pool, sizeof(struct vm_amap), 0, 0, 0,	    "amappl", 0, pool_page_alloc_nointr, pool_page_free_nointr, 	    M_UVMAMAP);}/* * amap_alloc1: internal function that allocates an amap, but does not *	init the overlay. * * => lock on returned amap is init'd */static inline struct vm_amap *amap_alloc1(slots, padslots, waitf)	int slots, padslots, waitf;{	struct vm_amap *amap;	int totalslots = slots + padslots;	amap = pool_get(&uvm_amap_pool, (waitf == M_WAITOK) ? PR_WAITOK : 0);	if (amap == NULL)		return(NULL);	simple_lock_init(&amap->am_l);	amap->am_ref = 1;	amap->am_flags = 0;#ifdef UVM_AMAP_PPREF	amap->am_ppref = NULL;#endif	amap->am_maxslot = totalslots;	amap->am_nslot = slots;	amap->am_nused = 0;	amap->am_slots = malloc(totalslots * sizeof(int), M_UVMAMAP,	    waitf);	if (amap->am_slots == NULL)		goto fail1;	amap->am_bckptr = malloc(totalslots * sizeof(int), M_UVMAMAP, waitf);	if (amap->am_bckptr == NULL)		goto fail2;	amap->am_anon = malloc(totalslots * sizeof(struct vm_anon *),	    M_UVMAMAP, waitf);	if (amap->am_anon == NULL)		goto fail3;	return(amap);fail3:	free(amap->am_bckptr, M_UVMAMAP);fail2:	free(amap->am_slots, M_UVMAMAP);fail1:	pool_put(&uvm_amap_pool, amap);	return (NULL);}/* * amap_alloc: allocate an amap to manage "sz" bytes of anonymous VM * * => caller should ensure sz is a multiple of PAGE_SIZE * => reference count to new amap is set to one * => new amap is returned unlocked */struct vm_amap *amap_alloc(sz, padsz, waitf)	vaddr_t sz, padsz;	int waitf;{	struct vm_amap *amap;	int slots, padslots;	UVMHIST_FUNC("amap_alloc"); UVMHIST_CALLED(maphist);	AMAP_B2SLOT(slots, sz);		/* load slots */	AMAP_B2SLOT(padslots, padsz);	amap = amap_alloc1(slots, padslots, waitf);	if (amap)		memset(amap->am_anon, 0, (slots + padslots) * sizeof(struct vm_anon *));	UVMHIST_LOG(maphist,"<- done, amap = 0x%x, sz=%d", amap, sz, 0, 0);	return(amap);}/* * amap_free: free an amap * * => the amap must be locked (mainly for simplelock accounting) * => the amap should have a zero reference count and be empty */voidamap_free(amap)	struct vm_amap *amap;{	UVMHIST_FUNC("amap_free"); UVMHIST_CALLED(maphist);#ifdef DIAGNOSTIC	if (amap->am_ref || amap->am_nused)		panic("amap_free");#endif	free(amap->am_slots, M_UVMAMAP);	free(amap->am_bckptr, M_UVMAMAP);	free(amap->am_anon, M_UVMAMAP);#ifdef UVM_AMAP_PPREF	if (amap->am_ppref && amap->am_ppref != PPREF_NONE)		free(amap->am_ppref, M_UVMAMAP);#endif	amap_unlock(amap);	/* mainly for lock debugging */	pool_put(&uvm_amap_pool, amap);	UVMHIST_LOG(maphist,"<- done, freed amap = 0x%x", amap, 0, 0, 0);}/* * amap_extend: extend the size of an amap (if needed) * * => called from uvm_map when we want to extend an amap to cover *    a new mapping (rather than allocate a new one) * => amap should be unlocked (we will lock it) * => to safely extend an amap it should have a reference count of *    one (thus it can't be shared) * => XXXCDC: needs a waitflag or failure return value? * => XXXCDC: support padding at this level? */voidamap_extend(entry, addsize)	vm_map_entry_t entry;	vsize_t addsize;{	struct vm_amap *amap = entry->aref.ar_amap;	int slotoff = entry->aref.ar_pageoff;	int slotmapped, slotadd, slotneed;#ifdef UVM_AMAP_PPREF	int *newppref, *oldppref;#endif	u_int *newsl, *newbck, *oldsl, *oldbck;	struct vm_anon **newover, **oldover;	int slotadded;	UVMHIST_FUNC("amap_extend"); UVMHIST_CALLED(maphist);	UVMHIST_LOG(maphist, "  (entry=0x%x, addsize=0x%x)", entry,addsize,0,0);	/*	 * first, determine how many slots we need in the amap.  don't	 * forget that ar_pageoff could be non-zero: this means that	 * there are some unused slots before us in the amap.	 */	amap_lock(amap);					/* lock! */	AMAP_B2SLOT(slotmapped, entry->end - entry->start); /* slots mapped */	AMAP_B2SLOT(slotadd, addsize);			/* slots to add */	slotneed = slotoff + slotmapped + slotadd;	/*	 * case 1: we already have enough slots in the map and thus	 * only need to bump the reference counts on the slots we are	 * adding.	 */	if (amap->am_nslot >= slotneed) {#ifdef UVM_AMAP_PPREF		if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {			amap_pp_adjref(amap, slotoff + slotmapped, slotadd, 1);		}#endif		amap_unlock(amap);		UVMHIST_LOG(maphist,"<- done (case 1), amap = 0x%x, sltneed=%d", 		    amap, slotneed, 0, 0);		return;				/* done! */	}	/*	 * case 2: we pre-allocated slots for use and we just need to	 * bump nslot up to take account for these slots.	 */	if (amap->am_maxslot >= slotneed) {#ifdef UVM_AMAP_PPREF		if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {			if ((slotoff + slotmapped) < amap->am_nslot)				amap_pp_adjref(amap, slotoff + slotmapped, 				    (amap->am_nslot - (slotoff + slotmapped)),				    1);			pp_setreflen(amap->am_ppref, amap->am_nslot, 1, 			   slotneed - amap->am_nslot);		}#endif		amap->am_nslot = slotneed;		amap_unlock(amap);		/*		 * no need to zero am_anon since that was done at		 * alloc time and we never shrink an allocation.		 */		UVMHIST_LOG(maphist,"<- done (case 2), amap = 0x%x, slotneed=%d", 		    amap, slotneed, 0, 0);		return;	}	/*	 * case 3: we need to malloc a new amap and copy all the amap	 * data over from old amap to the new one.	 *	 * XXXCDC: could we take advantage of a kernel realloc()?  	 */	amap_unlock(amap);	/* unlock in case we sleep in malloc */#ifdef UVM_AMAP_PPREF	newppref = NULL;	if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {		newppref = malloc(slotneed * sizeof(int), M_UVMAMAP, M_NOWAIT);		if (newppref == NULL) {			/* give up if malloc fails */			free(amap->am_ppref, M_UVMAMAP);			amap->am_ppref = PPREF_NONE;		}	}#endif	newsl = malloc(slotneed * sizeof(int), M_UVMAMAP, M_WAITOK);	newbck = malloc(slotneed * sizeof(int), M_UVMAMAP, M_WAITOK);	newover = malloc(slotneed * sizeof(struct vm_anon *),	    M_UVMAMAP, M_WAITOK);	amap_lock(amap);			/* re-lock! */#ifdef DIAGNOSTIC	if (amap->am_maxslot >= slotneed)		panic("amap_extend: amap changed during malloc");#endif	/*	 * now copy everything over to new malloc'd areas...	 */	slotadded = slotneed - amap->am_nslot;	/* do am_slots */	oldsl = amap->am_slots;	memcpy(newsl, oldsl, sizeof(int) * amap->am_nused);	amap->am_slots = newsl;	/* do am_anon */	oldover = amap->am_anon;	memcpy(newover, oldover, sizeof(struct vm_anon *) * amap->am_nslot);	memset(newover + amap->am_nslot, 0, sizeof(struct vm_anon *) * slotadded);	amap->am_anon = newover;	/* do am_bckptr */	oldbck = amap->am_bckptr;	memcpy(newbck, oldbck, sizeof(int) * amap->am_nslot);	memset(newbck + amap->am_nslot, 0, sizeof(int) * slotadded); /* XXX: needed? */	amap->am_bckptr = newbck;#ifdef UVM_AMAP_PPREF	/* do ppref */	oldppref = amap->am_ppref;	if (newppref) {		memcpy(newppref, oldppref, sizeof(int) * amap->am_nslot);		memset(newppref + amap->am_nslot, 0, sizeof(int) * slotadded);		amap->am_ppref = newppref;		if ((slotoff + slotmapped) < amap->am_nslot)			amap_pp_adjref(amap, slotoff + slotmapped, 			    (amap->am_nslot - (slotoff + slotmapped)), 1);		pp_setreflen(newppref, amap->am_nslot, 1, slotadded);	}#endif	/* update master values */	amap->am_nslot = slotneed;	amap->am_maxslot = slotneed;	/* unlock */	amap_unlock(amap);	/* and free */	free(oldsl, M_UVMAMAP);	free(oldbck, M_UVMAMAP);	free(oldover, M_UVMAMAP);#ifdef UVM_AMAP_PPREF	if (oldppref && oldppref != PPREF_NONE)		free(oldppref, M_UVMAMAP);#endif	UVMHIST_LOG(maphist,"<- done (case 3), amap = 0x%x, slotneed=%d", 	    amap, slotneed, 0, 0);}/* * amap_share_protect: change protection of anons in a shared amap * * for shared amaps, given the current data structure layout, it is * not possible for us to directly locate all maps referencing the * shared anon (to change the protection).  in order to protect data * in shared maps we use pmap_page_protect().  [this is useful for IPC * mechanisms like map entry passing that may want to write-protect * all mappings of a shared amap.]  we traverse am_anon or am_slots * depending on the current state of the amap. * * => entry's map and amap must be locked by the caller */voidamap_share_protect(entry, prot)	vm_map_entry_t entry;	vm_prot_t prot;{	struct vm_amap *amap = entry->aref.ar_amap;	int slots, lcv, slot, stop;	AMAP_B2SLOT(slots, (entry->end - entry->start));	stop = entry->aref.ar_pageoff + slots;	if (slots < amap->am_nused) {		/* cheaper to traverse am_anon */		for (lcv = entry->aref.ar_pageoff ; lcv < stop ; lcv++) {			if (amap->am_anon[lcv] == NULL)				continue;			if (amap->am_anon[lcv]->u.an_page != NULL)				pmap_page_protect(amap->am_anon[lcv]->u.an_page,						  prot);		}		return;	}	/* cheaper to traverse am_slots */	for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {		slot = amap->am_slots[lcv];		if (slot < entry->aref.ar_pageoff || slot >= stop)			continue;		if (amap->am_anon[slot]->u.an_page != NULL)			pmap_page_protect(amap->am_anon[slot]->u.an_page, prot);	}	return;}/* * amap_wipeout: wipeout all anon's in an amap; then free the amap! * * => called from amap_unref when the final reference to an amap is  *	discarded (i.e. when reference count == 1) * => the amap should be locked (by the caller) */voidamap_wipeout(amap)	struct vm_amap *amap;{	int lcv, slot;	struct vm_anon *anon;	UVMHIST_FUNC("amap_wipeout"); UVMHIST_CALLED(maphist);	UVMHIST_LOG(maphist,"(amap=0x%x)", amap, 0,0,0);	for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {		int refs;		slot = amap->am_slots[lcv];		anon = amap->am_anon[slot];		if (anon == NULL || anon->an_ref == 0) 			panic("amap_wipeout: corrupt amap");		simple_lock(&anon->an_lock); /* lock anon */		UVMHIST_LOG(maphist,"  processing anon 0x%x, ref=%d", anon, 		    anon->an_ref, 0, 0);		refs = --anon->an_ref;		simple_unlock(&anon->an_lock);		if (refs == 0) {			/*			 * we had the last reference to a vm_anon. free it.			 */			uvm_anfree(anon);		}	}

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?