uvm_aobj.c

来自「基于组件方式开发操作系统的OSKIT源代码」· C语言 代码 · 共 1,553 行 · 第 1/3 页

C
1,553
字号
/*	$NetBSD: uvm_aobj.c,v 1.37 2000/11/25 06:27:59 chs Exp $	*//* * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and *                    Washington University. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright *    notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright *    notice, this list of conditions and the following disclaimer in the *    documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software *    must display the following acknowledgement: *      This product includes software developed by Charles D. Cranor and *      Washington University. * 4. The name of the author may not be used to endorse or promote products *    derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp *//* * uvm_aobj.c: anonymous memory uvm_object pager * * author: Chuck Silvers <chuq@chuq.com> * started: Jan-1998 * * - design mostly from Chuck Cranor */#include "opt_uvmhist.h"#include <sys/param.h>#include <sys/systm.h>#include <sys/proc.h>#include <sys/malloc.h>#include <sys/kernel.h>#include <sys/pool.h>#include <sys/kernel.h>#include <uvm/uvm.h>/* * an aobj manages anonymous-memory backed uvm_objects.   in addition * to keeping the list of resident pages, it also keeps a list of * allocated swap blocks.  depending on the size of the aobj this list * of allocated swap blocks is either stored in an array (small objects) * or in a hash table (large objects). *//* * local structures *//* * for hash tables, we break the address space of the aobj into blocks * of UAO_SWHASH_CLUSTER_SIZE pages.   we require the cluster size to * be a power of two. */#define UAO_SWHASH_CLUSTER_SHIFT 4#define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)/* get the "tag" for this page index */#define UAO_SWHASH_ELT_TAG(PAGEIDX) \	((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)/* given an ELT and a page index, find the swap slot */#define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \	((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])/* given an ELT, return its pageidx base */#define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \	((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)/* * the swhash hash function */#define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \	(&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \			    & (AOBJ)->u_swhashmask)])/* * the swhash threshhold determines if we will use an array or a * hash table to store the list of allocated swap blocks. */#define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)#define UAO_USES_SWHASH(AOBJ) \	((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD)	/* use hash? *//* * the number of buckets in a swhash, with an upper bound */#define UAO_SWHASH_MAXBUCKETS 256#define UAO_SWHASH_BUCKETS(AOBJ) \	(min((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \	     UAO_SWHASH_MAXBUCKETS))/* * uao_swhash_elt: when a hash table is being used, this structure defines * the format of an entry in the bucket list. */struct uao_swhash_elt {	LIST_ENTRY(uao_swhash_elt) list;	/* the hash list */	voff_t tag;				/* our 'tag' */	int count;				/* our number of active slots */	int slots[UAO_SWHASH_CLUSTER_SIZE];	/* the slots */};/* * uao_swhash: the swap hash table structure */LIST_HEAD(uao_swhash, uao_swhash_elt);/* * uao_swhash_elt_pool: pool of uao_swhash_elt structures */struct pool uao_swhash_elt_pool;/* * uvm_aobj: the actual anon-backed uvm_object * * => the uvm_object is at the top of the structure, this allows *   (struct uvm_device *) == (struct uvm_object *) * => only one of u_swslots and u_swhash is used in any given aobj */struct uvm_aobj {	struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */	int u_pages;		 /* number of pages in entire object */	int u_flags;		 /* the flags (see uvm_aobj.h) */	int *u_swslots;		 /* array of offset->swapslot mappings */				 /*				  * hashtable of offset->swapslot mappings				  * (u_swhash is an array of bucket heads)				  */	struct uao_swhash *u_swhash;	u_long u_swhashmask;		/* mask for hashtable */	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */};/* * uvm_aobj_pool: pool of uvm_aobj structures */struct pool uvm_aobj_pool;/* * local functions */static struct uao_swhash_elt	*uao_find_swhash_elt __P((struct uvm_aobj *,							  int, boolean_t));static int			 uao_find_swslot __P((struct uvm_aobj *, int));static boolean_t		 uao_flush __P((struct uvm_object *,						voff_t, voff_t, int));static void			 uao_free __P((struct uvm_aobj *));static int			 uao_get __P((struct uvm_object *, voff_t,					      vm_page_t *, int *, int,					      vm_prot_t, int, int));static boolean_t		 uao_releasepg __P((struct vm_page *,						    struct vm_page **));static boolean_t		 uao_pagein __P((struct uvm_aobj *, int, int));static boolean_t		 uao_pagein_page __P((struct uvm_aobj *, int));/* * aobj_pager *  * note that some functions (e.g. put) are handled elsewhere */struct uvm_pagerops aobj_pager = {	NULL,			/* init */	uao_reference,		/* reference */	uao_detach,		/* detach */	NULL,			/* fault */	uao_flush,		/* flush */	uao_get,		/* get */	NULL,			/* put (done by pagedaemon) */	NULL,			/* cluster */	NULL,			/* mk_pcluster */	uao_releasepg		/* releasepg */};/* * uao_list: global list of active aobjs, locked by uao_list_lock */static LIST_HEAD(aobjlist, uvm_aobj) uao_list;static simple_lock_data_t uao_list_lock;/* * functions *//* * hash table/array related functions *//* * uao_find_swhash_elt: find (or create) a hash table entry for a page * offset. * * => the object should be locked by the caller */static struct uao_swhash_elt *uao_find_swhash_elt(aobj, pageidx, create)	struct uvm_aobj *aobj;	int pageidx;	boolean_t create;{	struct uao_swhash *swhash;	struct uao_swhash_elt *elt;	voff_t page_tag;	swhash = UAO_SWHASH_HASH(aobj, pageidx); /* first hash to get bucket */	page_tag = UAO_SWHASH_ELT_TAG(pageidx);	/* tag to search for */	/*	 * now search the bucket for the requested tag	 */	LIST_FOREACH(elt, swhash, list) {		if (elt->tag == page_tag)			return(elt);	}	/* fail now if we are not allowed to create a new entry in the bucket */	if (!create)		return NULL;	/*	 * allocate a new entry for the bucket and init/insert it in	 */	elt = pool_get(&uao_swhash_elt_pool, PR_WAITOK);	LIST_INSERT_HEAD(swhash, elt, list);	elt->tag = page_tag;	elt->count = 0;	memset(elt->slots, 0, sizeof(elt->slots));	return(elt);}/* * uao_find_swslot: find the swap slot number for an aobj/pageidx * * => object must be locked by caller  */__inline static intuao_find_swslot(aobj, pageidx)	struct uvm_aobj *aobj;	int pageidx;{	/*	 * if noswap flag is set, then we never return a slot	 */	if (aobj->u_flags & UAO_FLAG_NOSWAP)		return(0);	/*	 * if hashing, look in hash table.	 */	if (UAO_USES_SWHASH(aobj)) {		struct uao_swhash_elt *elt =		    uao_find_swhash_elt(aobj, pageidx, FALSE);		if (elt)			return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));		else			return(0);	}	/* 	 * otherwise, look in the array	 */	return(aobj->u_swslots[pageidx]);}/* * uao_set_swslot: set the swap slot for a page in an aobj. * * => setting a slot to zero frees the slot * => object must be locked by caller */intuao_set_swslot(uobj, pageidx, slot)	struct uvm_object *uobj;	int pageidx, slot;{	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;	int oldslot;	UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);	UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",	    aobj, pageidx, slot, 0);	/*	 * if noswap flag is set, then we can't set a slot	 */	if (aobj->u_flags & UAO_FLAG_NOSWAP) {		if (slot == 0)			return(0);		/* a clear is ok */		/* but a set is not */		printf("uao_set_swslot: uobj = %p\n", uobj);	    panic("uao_set_swslot: attempt to set a slot on a NOSWAP object");	}	/*	 * are we using a hash table?  if so, add it in the hash.	 */	if (UAO_USES_SWHASH(aobj)) {		/*		 * Avoid allocating an entry just to free it again if		 * the page had not swap slot in the first place, and		 * we are freeing.		 */		struct uao_swhash_elt *elt =		    uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE);		if (elt == NULL) {#ifdef DIAGNOSTIC			if (slot)				panic("uao_set_swslot: didn't create elt");#endif			return (0);		}		oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);		UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;		/*		 * now adjust the elt's reference counter and free it if we've		 * dropped it to zero.		 */		/* an allocation? */		if (slot) {			if (oldslot == 0)				elt->count++;		} else {		/* freeing slot ... */			if (oldslot)	/* to be safe */				elt->count--;			if (elt->count == 0) {				LIST_REMOVE(elt, list);				pool_put(&uao_swhash_elt_pool, elt);			}		}	} else { 		/* we are using an array */		oldslot = aobj->u_swslots[pageidx];		aobj->u_swslots[pageidx] = slot;	}	return (oldslot);}/* * end of hash/array functions *//* * uao_free: free all resources held by an aobj, and then free the aobj * * => the aobj should be dead */static voiduao_free(aobj)	struct uvm_aobj *aobj;{	simple_unlock(&aobj->u_obj.vmobjlock);	if (UAO_USES_SWHASH(aobj)) {		int i, hashbuckets = aobj->u_swhashmask + 1;		/*		 * free the swslots from each hash bucket,		 * then the hash bucket, and finally the hash table itself.		 */		for (i = 0; i < hashbuckets; i++) {			struct uao_swhash_elt *elt, *next;			for (elt = LIST_FIRST(&aobj->u_swhash[i]);			     elt != NULL;			     elt = next) {				int j;				for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++) {					int slot = elt->slots[j];					if (slot == 0) {						continue;					}					uvm_swap_free(slot, 1);					/*					 * this page is no longer					 * only in swap.					 */					simple_lock(&uvm.swap_data_lock);					uvmexp.swpgonly--;					simple_unlock(&uvm.swap_data_lock);				}				next = LIST_NEXT(elt, list);				pool_put(&uao_swhash_elt_pool, elt);			}		}		free(aobj->u_swhash, M_UVMAOBJ);	} else {		int i;		/*		 * free the array		 */		for (i = 0; i < aobj->u_pages; i++) {			int slot = aobj->u_swslots[i];			if (slot) {				uvm_swap_free(slot, 1);				/* this page is no longer only in swap. */				simple_lock(&uvm.swap_data_lock);				uvmexp.swpgonly--;				simple_unlock(&uvm.swap_data_lock);			}		}		free(aobj->u_swslots, M_UVMAOBJ);	}	/*	 * finally free the aobj itself	 */	pool_put(&uvm_aobj_pool, aobj);}/* * pager functions *//* * uao_create: create an aobj of the given size and return its uvm_object. * * => for normal use, flags are always zero * => for the kernel object, the flags are: *	UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once) *	UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ") */struct uvm_object *uao_create(size, flags)	vsize_t size;	int flags;{	static struct uvm_aobj kernel_object_store; /* home of kernel_object */	static int kobj_alloced = 0;			/* not allocated yet */	int pages = round_page(size) >> PAGE_SHIFT;	struct uvm_aobj *aobj;	/*	 * malloc a new aobj unless we are asked for the kernel object	 */	if (flags & UAO_FLAG_KERNOBJ) {		/* want kernel object? */		if (kobj_alloced)			panic("uao_create: kernel object already allocated");		aobj = &kernel_object_store;		aobj->u_pages = pages;		aobj->u_flags = UAO_FLAG_NOSWAP;	/* no swap to start */		/* we are special, we never die */		aobj->u_obj.uo_refs = UVM_OBJ_KERN;		kobj_alloced = UAO_FLAG_KERNOBJ;	} else if (flags & UAO_FLAG_KERNSWAP) {		aobj = &kernel_object_store;		if (kobj_alloced != UAO_FLAG_KERNOBJ)		    panic("uao_create: asked to enable swap on kernel object");		kobj_alloced = UAO_FLAG_KERNSWAP;	} else {	/* normal object */		aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);		aobj->u_pages = pages;		aobj->u_flags = 0;		/* normal object */		aobj->u_obj.uo_refs = 1;	/* start with 1 reference */	}	/* 	 * allocate hash/array if necessary 	 * 	 * note: in the KERNSWAP case no need to worry about locking since 	 * we are still booting we should be the only thread around. 	 */

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?