⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vm_fault.c

📁 open bsd vm device design
💻 C
📖 第 1 页 / 共 2 页
字号:
/*  * Copyright (c) 1991, 1993 *	The Regents of the University of California.  All rights reserved. * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright *    notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright *    notice, this list of conditions and the following disclaimer in the *    documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software *    must display the following acknowledgement: *	This product includes software developed by the University of *	California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors *    may be used to endorse or promote products derived from this software *    without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * *	@(#)vm_fault.c	8.5 (Berkeley) 1/9/95 * * * Copyright (c) 1987, 1990 Carnegie-Mellon University. * All rights reserved. * * Authors: Avadis Tevanian, Jr., Michael Wayne Young *  * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. *  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. *  * Carnegie Mellon requests users of this software to return to * *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU *  School of Computer Science *  Carnegie Mellon University *  Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. *//* *	Page fault handling module. */#include <sys/param.h>#include <sys/systm.h>#include <vm/vm.h>#include <vm/vm_page.h>#include <vm/vm_pageout.h>/* *	vm_fault: * *	Handle a page fault occuring at the given address, *	requiring the given permissions, in the map specified. *	If successful, the page is inserted into the *	associated physical map. * *	NOTE: the given address should be truncated to the *	proper page address. * *	KERN_SUCCESS is returned if the page fault is handled; otherwise, *	a standard error specifying why the fault is fatal is returned. * * *	The map in question must be referenced, and remains so. *	Caller may hold no locks. */intvm_fault(map, vaddr, fault_type, change_wiring)	vm_map_t	map;	vm_offset_t	vaddr;	vm_prot_t	fault_type;	boolean_t	change_wiring;{	vm_object_t		first_object;	vm_offset_t		first_offset;	vm_map_entry_t		entry;	register vm_object_t	object;	register vm_offset_t	offset;	register vm_page_t	m;	vm_page_t		first_m;	vm_prot_t		prot;	int			result;	boolean_t		wired;	boolean_t		su;	boolean_t		lookup_still_valid;	boolean_t		page_exists;	vm_page_t		old_m;	vm_object_t		next_object;	cnt.v_faults++;		/* needs lock XXX *//* *	Recovery actions */#define	FREE_PAGE(m)	{				\	PAGE_WAKEUP(m);					\	vm_page_lock_queues();				\	vm_page_free(m);				\	vm_page_unlock_queues();			\}#define	RELEASE_PAGE(m)	{				\	PAGE_WAKEUP(m);					\	vm_page_lock_queues();				\	vm_page_activate(m);				\	vm_page_unlock_queues();			\}#define	UNLOCK_MAP	{				\	if (lookup_still_valid) {			\		vm_map_lookup_done(map, entry);		\		lookup_still_valid = FALSE;		\	}						\}#define	UNLOCK_THINGS	{				\	object->paging_in_progress--;			\	vm_object_unlock(object);			\	if (object != first_object) {			\		vm_object_lock(first_object);		\		FREE_PAGE(first_m);			\		first_object->paging_in_progress--;	\		vm_object_unlock(first_object);		\	}						\	UNLOCK_MAP;					\}#define	UNLOCK_AND_DEALLOCATE	{			\	UNLOCK_THINGS;					\	vm_object_deallocate(first_object);		\}    RetryFault: ;	/*	 *	Find the backing store object and offset into	 *	it to begin the search.	 */	if ((result = vm_map_lookup(&map, vaddr, fault_type, &entry,			&first_object, &first_offset,			&prot, &wired, &su)) != KERN_SUCCESS) {		return(result);	}	lookup_still_valid = TRUE;	if (wired)		fault_type = prot;	first_m = NULL;   	/*	 *	Make a reference to this object to	 *	prevent its disposal while we are messing with	 *	it.  Once we have the reference, the map is free	 *	to be diddled.  Since objects reference their	 *	shadows (and copies), they will stay around as well.	 */	vm_object_lock(first_object);	first_object->ref_count++;	first_object->paging_in_progress++;	/*	 *	INVARIANTS (through entire routine):	 *	 *	1)	At all times, we must either have the object	 *		lock or a busy page in some object to prevent	 *		some other thread from trying to bring in	 *		the same page.	 *	 *		Note that we cannot hold any locks during the	 *		pager access or when waiting for memory, so	 *		we use a busy page then.	 *	 *		Note also that we aren't as concerned about	 *		more than one thead attempting to pager_data_unlock	 *		the same page at once, so we don't hold the page	 *		as busy then, but do record the highest unlock	 *		value so far.  [Unlock requests may also be delivered	 *		out of order.]	 *	 *	2)	Once we have a busy page, we must remove it from	 *		the pageout queues, so that the pageout daemon	 *		will not grab it away.	 *	 *	3)	To prevent another thread from racing us down the	 *		shadow chain and entering a new page in the top	 *		object before we do, we must keep a busy page in	 *		the top object while following the shadow chain.	 *	 *	4)	We must increment paging_in_progress on any object	 *		for which we have a busy page, to prevent	 *		vm_object_collapse from removing the busy page	 *		without our noticing.	 */	/*	 *	Search for the page at object/offset.	 */	object = first_object;	offset = first_offset;	/*	 *	See whether this page is resident	 */	while (TRUE) {		m = vm_page_lookup(object, offset);		if (m != NULL) {			/*			 *	If the page is being brought in,			 *	wait for it and then retry.			 */			if (m->flags & PG_BUSY) {#ifdef DOTHREADS				int	wait_result;				PAGE_ASSERT_WAIT(m, !change_wiring);				UNLOCK_THINGS;				thread_block();				wait_result = current_thread()->wait_result;				vm_object_deallocate(first_object);				if (wait_result != THREAD_AWAKENED)					return(KERN_SUCCESS);				goto RetryFault;#else				PAGE_ASSERT_WAIT(m, !change_wiring);				UNLOCK_THINGS;				cnt.v_intrans++;				thread_block();				vm_object_deallocate(first_object);				goto RetryFault;#endif			}			/*			 *	Remove the page from the pageout daemon's			 *	reach while we play with it.			 */			vm_page_lock_queues();			if (m->flags & PG_INACTIVE) {				TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);				m->flags &= ~PG_INACTIVE;				cnt.v_inactive_count--;				cnt.v_reactivated++;			} 			if (m->flags & PG_ACTIVE) {				TAILQ_REMOVE(&vm_page_queue_active, m, pageq);				m->flags &= ~PG_ACTIVE;				cnt.v_active_count--;			}			vm_page_unlock_queues();			/*			 *	Mark page busy for other threads.			 */			m->flags |= PG_BUSY;			break;		}		if (((object->pager != NULL) &&				(!change_wiring || wired))		    || (object == first_object)) {			/*			 *	Allocate a new page for this object/offset			 *	pair.			 */			m = vm_page_alloc(object, offset);			if (m == NULL) {				UNLOCK_AND_DEALLOCATE;				VM_WAIT;				goto RetryFault;			}		}		if (object->pager != NULL && (!change_wiring || wired)) {			int rv;			/*			 *	Now that we have a busy page, we can			 *	release the object lock.			 */			vm_object_unlock(object);			/*			 *	Call the pager to retrieve the data, if any,			 *	after releasing the lock on the map.			 */			UNLOCK_MAP;			cnt.v_pageins++;			rv = vm_pager_get(object->pager, m, TRUE);			/*			 *	Reaquire the object lock to preserve our			 *	invariant.			 */			vm_object_lock(object);			/*			 *	Found the page.			 *	Leave it busy while we play with it.			 */			if (rv == VM_PAGER_OK) {				/*				 *	Relookup in case pager changed page.				 *	Pager is responsible for disposition				 *	of old page if moved.				 */				m = vm_page_lookup(object, offset);				cnt.v_pgpgin++;				m->flags &= ~PG_FAKE;				m->flags |= PG_CLEAN;				pmap_clear_modify(VM_PAGE_TO_PHYS(m));				break;			}			/*			 * IO error or page outside the range of the pager:			 * cleanup and return an error.			 */			if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) {				FREE_PAGE(m);				UNLOCK_AND_DEALLOCATE;				return(KERN_PROTECTION_FAILURE); /* XXX */			}			/*			 * rv == VM_PAGER_FAIL:			 *			 * Page does not exist at this object/offset.			 * Free the bogus page (waking up anyone waiting			 * for it) and continue on to the next object.			 *			 * If this is the top-level object, we must			 * leave the busy page to prevent another			 * thread from rushing past us, and inserting			 * the page in that object at the same time			 * that we are.			 */			if (object != first_object) {				FREE_PAGE(m);				/* note that `m' is not used after this */			}		}		/*		 * We get here if the object has no pager (or unwiring)		 * or the pager doesn't have the page.		 */		if (object == first_object)			first_m = m;		/*		 *	Move on to the next object.  Lock the next		 *	object before unlocking the current one.		 */		offset += object->shadow_offset;		next_object = object->shadow;		if (next_object == NULL) {			/*			 *	If there's no object left, fill the page			 *	in the top object with zeros.			 */			if (object != first_object) {				object->paging_in_progress--;				vm_object_unlock(object);				object = first_object;				offset = first_offset;				m = first_m;				vm_object_lock(object);			}			first_m = NULL;			vm_page_zero_fill(m);			cnt.v_zfod++;			m->flags &= ~PG_FAKE;			break;		}		else {			vm_object_lock(next_object);			if (object != first_object)				object->paging_in_progress--;			vm_object_unlock(object);			object = next_object;			object->paging_in_progress++;		}	}	if ((m->flags & (PG_ACTIVE | PG_INACTIVE | PG_BUSY)) != PG_BUSY)		panic("vm_fault: active, inactive or !busy after main loop");	/*	 *	PAGE HAS BEEN FOUND.	 *	[Loop invariant still holds -- the object lock	 *	is held.]	 */	old_m = m;	/* save page that would be copied */	/*	 *	If the page is being written, but isn't	 *	already owned by the top-level object,	 *	we have to copy it into a new page owned	 *	by the top-level object.	 */	if (object != first_object) {	    	/*		 *	We only really need to copy if we		 *	want to write it.		 */	    	if (fault_type & VM_PROT_WRITE) {			/*			 *	If we try to collapse first_object at this			 *	point, we may deadlock when we try to get			 *	the lock on an intermediate object (since we			 *	have the bottom object locked).  We can't			 *	unlock the bottom object, because the page			 *	we found may move (by collapse) if we do.			 *			 *	Instead, we first copy the page.  Then, when			 *	we have no more use for the bottom object,			 *	we unlock it and try to collapse.			 *			 *	Note that we copy the page even if we didn't			 *	need to... that's the breaks.			 */		    	/*			 *	We already have an empty page in			 *	first_object - use it.			 */			vm_page_copy(m, first_m);			first_m->flags &= ~PG_FAKE;			/*			 *	If another map is truly sharing this			 *	page with us, we have to flush all			 *	uses of the original page, since we			 *	can't distinguish those which want the			 *	original from those which need the			 *	new copy.			 *			 *	XXX If we know that only one map has			 *	access to this page, then we could			 *	avoid the pmap_page_protect() call.			 */			vm_page_lock_queues();			vm_page_activate(m);			vm_page_deactivate(m);			pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);			vm_page_unlock_queues();			/*			 *	We no longer need the old page or object.			 */			PAGE_WAKEUP(m);			object->paging_in_progress--;			vm_object_unlock(object);			/*			 *	Only use the new page below...			 */			cnt.v_cow_faults++;			m = first_m;			object = first_object;			offset = first_offset;			/*			 *	Now that we've gotten the copy out of the			 *	way, let's try to collapse the top object.			 */			vm_object_lock(object);			/*			 *	But we have to play ugly games with			 *	paging_in_progress to do that...			 */			object->paging_in_progress--;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -