uvm_map.c

来自「基于组件方式开发操作系统的OSKIT源代码」· C语言 代码 · 共 2,449 行 · 第 1/5 页

C
2,449
字号
/*	$NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $	*//*  * Copyright (c) 1997 Charles D. Cranor and Washington University. * Copyright (c) 1991, 1993, The Regents of the University of California.   * * All rights reserved. * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright *    notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright *    notice, this list of conditions and the following disclaimer in the *    documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software *    must display the following acknowledgement: *	This product includes software developed by Charles D. Cranor, *      Washington University, the University of California, Berkeley and  *      its contributors. * 4. Neither the name of the University nor the names of its contributors *    may be used to endorse or promote products derived from this software *    without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * *	@(#)vm_map.c    8.3 (Berkeley) 1/12/94 * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp * * * Copyright (c) 1987, 1990 Carnegie-Mellon University. * All rights reserved. *  * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. *  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. *  * Carnegie Mellon requests users of this software to return to * *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU *  School of Computer Science *  Carnegie Mellon University *  Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */#include "opt_ddb.h"#include "opt_uvmhist.h"#include "opt_sysv.h"/* * uvm_map.c: uvm map operations */#include <sys/param.h>#include <sys/systm.h>#include <sys/mman.h>#include <sys/proc.h>#include <sys/malloc.h>#include <sys/pool.h>#ifdef SYSVSHM#include <sys/shm.h>#endif#define UVM_MAP#include <uvm/uvm.h>#ifdef DDB#include <uvm/uvm_ddb.h>#endifstruct uvm_cnt uvm_map_call, map_backmerge, map_forwmerge;struct uvm_cnt uvm_mlk_call, uvm_mlk_hint;/* * pool for vmspace structures. */struct pool uvm_vmspace_pool;/* * pool for dynamically-allocated map entries. */struct pool uvm_map_entry_pool;#ifdef PMAP_GROWKERNEL/* * This global represents the end of the kernel virtual address * space.  If we want to exceed this, we must grow the kernel * virtual address space dynamically. * * Note, this variable is locked by kernel_map's lock. */vaddr_t uvm_maxkaddr;#endif/* * macros *//* * uvm_map_entry_link: insert entry into a map * * => map must be locked */#define uvm_map_entry_link(map, after_where, entry) do { \	(map)->nentries++; \	(entry)->prev = (after_where); \	(entry)->next = (after_where)->next; \	(entry)->prev->next = (entry); \	(entry)->next->prev = (entry); \} while (0)/* * uvm_map_entry_unlink: remove entry from a map * * => map must be locked */#define uvm_map_entry_unlink(map, entry) do { \	(map)->nentries--; \	(entry)->next->prev = (entry)->prev; \	(entry)->prev->next = (entry)->next; \} while (0)/* * SAVE_HINT: saves the specified entry as the hint for future lookups. * * => map need not be locked (protected by hint_lock). */#define SAVE_HINT(map,check,value) do { \	simple_lock(&(map)->hint_lock); \	if ((map)->hint == (check)) \		(map)->hint = (value); \	simple_unlock(&(map)->hint_lock); \} while (0)/* * VM_MAP_RANGE_CHECK: check and correct range * * => map must at least be read locked */#define VM_MAP_RANGE_CHECK(map, start, end) do { \	if (start < vm_map_min(map)) 		\		start = vm_map_min(map);        \	if (end > vm_map_max(map))              \		end = vm_map_max(map);          \	if (start > end)                        \		start = end;                    \} while (0)/* * local prototypes */static vm_map_entry_t	uvm_mapent_alloc __P((vm_map_t));static void		uvm_mapent_copy __P((vm_map_entry_t,vm_map_entry_t));static void		uvm_mapent_free __P((vm_map_entry_t));static void		uvm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));static void		uvm_map_reference_amap __P((vm_map_entry_t, int));static void		uvm_map_unreference_amap __P((vm_map_entry_t, int));/* * local inlines *//* * uvm_mapent_alloc: allocate a map entry * * => XXX: static pool for kernel map? */static __inline vm_map_entry_tuvm_mapent_alloc(map)	vm_map_t map;{	vm_map_entry_t me;	int s;	UVMHIST_FUNC("uvm_mapent_alloc");	UVMHIST_CALLED(maphist);	if ((map->flags & VM_MAP_INTRSAFE) == 0 &&	    map != kernel_map && kernel_map != NULL /* XXX */) {		me = pool_get(&uvm_map_entry_pool, PR_WAITOK);		me->flags = 0;		/* me can't be null, wait ok */	} else {		s = splimp();	/* protect kentry_free list with splimp */		simple_lock(&uvm.kentry_lock);		me = uvm.kentry_free;		if (me) uvm.kentry_free = me->next;		simple_unlock(&uvm.kentry_lock);		splx(s);		if (!me)	panic("mapent_alloc: out of static map entries, check MAX_KMAPENT");		me->flags = UVM_MAP_STATIC;	}	UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", 		me, ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map)		? TRUE : FALSE, 0, 0);	return(me);}/* * uvm_mapent_free: free map entry * * => XXX: static pool for kernel map? */static __inline voiduvm_mapent_free(me)	vm_map_entry_t me;{	int s;	UVMHIST_FUNC("uvm_mapent_free");	UVMHIST_CALLED(maphist);	UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]", 		me, me->flags, 0, 0);	if ((me->flags & UVM_MAP_STATIC) == 0) {		pool_put(&uvm_map_entry_pool, me);	} else {		s = splimp();	/* protect kentry_free list with splimp */		simple_lock(&uvm.kentry_lock);		me->next = uvm.kentry_free;		uvm.kentry_free = me;		simple_unlock(&uvm.kentry_lock);		splx(s);	}}/* * uvm_mapent_copy: copy a map entry, preserving flags */static __inline voiduvm_mapent_copy(src, dst)	vm_map_entry_t src;	vm_map_entry_t dst;{	memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) - ((char*)src));}/* * uvm_map_entry_unwire: unwire a map entry * * => map should be locked by caller */static __inline voiduvm_map_entry_unwire(map, entry)	vm_map_t map;	vm_map_entry_t entry;{	entry->wired_count = 0;	uvm_fault_unwire_locked(map, entry->start, entry->end);}/* * wrapper for calling amap_ref() */static __inline voiduvm_map_reference_amap(entry, flags)	vm_map_entry_t entry;	int flags;{    amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,	     (entry->end - entry->start) >> PAGE_SHIFT, flags);}/* * wrapper for calling amap_unref()  */static __inline voiduvm_map_unreference_amap(entry, flags)	vm_map_entry_t entry;	int flags;{    amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,	     (entry->end - entry->start) >> PAGE_SHIFT, flags);}/* * uvm_map_init: init mapping system at boot time.   note that we allocate * and init the static pool of vm_map_entry_t's for the kernel here. */voiduvm_map_init() {	static struct vm_map_entry kernel_map_entry[MAX_KMAPENT];#if defined(UVMHIST)	static struct uvm_history_ent maphistbuf[100];	static struct uvm_history_ent pdhistbuf[100];#endif	int lcv;	/*	 * first, init logging system.	 */	UVMHIST_FUNC("uvm_map_init");	UVMHIST_INIT_STATIC(maphist, maphistbuf);	UVMHIST_INIT_STATIC(pdhist, pdhistbuf);	UVMHIST_CALLED(maphist);	UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);	UVMCNT_INIT(uvm_map_call,  UVMCNT_CNT, 0,	    "# uvm_map() successful calls", 0);	UVMCNT_INIT(map_backmerge, UVMCNT_CNT, 0, "# uvm_map() back merges", 0);	UVMCNT_INIT(map_forwmerge, UVMCNT_CNT, 0, "# uvm_map() missed forward",	    0);	UVMCNT_INIT(uvm_mlk_call,  UVMCNT_CNT, 0, "# map lookup calls", 0);	UVMCNT_INIT(uvm_mlk_hint,  UVMCNT_CNT, 0, "# map lookup hint hits", 0);	/*	 * now set up static pool of kernel map entrys ...	 */	simple_lock_init(&uvm.kentry_lock);	uvm.kentry_free = NULL;	for (lcv = 0 ; lcv < MAX_KMAPENT ; lcv++) {		kernel_map_entry[lcv].next = uvm.kentry_free;		uvm.kentry_free = &kernel_map_entry[lcv];	}	/*	 * initialize the map-related pools.	 */	pool_init(&uvm_vmspace_pool, sizeof(struct vmspace),	    0, 0, 0, "vmsppl", 0,	    pool_page_alloc_nointr, pool_page_free_nointr, M_VMMAP);	pool_init(&uvm_map_entry_pool, sizeof(struct vm_map_entry),	    0, 0, 0, "vmmpepl", 0,	    pool_page_alloc_nointr, pool_page_free_nointr, M_VMMAP);}/* * clippers *//* * uvm_map_clip_start: ensure that the entry begins at or after *	the starting address, if it doesn't we split the entry. *  * => caller should use UVM_MAP_CLIP_START macro rather than calling *    this directly * => map must be locked by caller */void uvm_map_clip_start(map, entry, start)	vm_map_t       map;	vm_map_entry_t entry;	vaddr_t    start;{	vm_map_entry_t new_entry;	vaddr_t new_adj;	/* uvm_map_simplify_entry(map, entry); */ /* XXX */	/*	 * Split off the front portion.  note that we must insert the new	 * entry BEFORE this one, so that this entry has the specified	 * starting address.	 */	new_entry = uvm_mapent_alloc(map);	uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */	new_entry->end = start; 	new_adj = start - new_entry->start;	if (entry->object.uvm_obj)		entry->offset += new_adj;	/* shift start over */	entry->start = start;	if (new_entry->aref.ar_amap) {		amap_splitref(&new_entry->aref, &entry->aref, new_adj);	}	uvm_map_entry_link(map, entry->prev, new_entry);	if (UVM_ET_ISSUBMAP(entry)) {		/* ... unlikely to happen, but play it safe */		 uvm_map_reference(new_entry->object.sub_map);	} else {		if (UVM_ET_ISOBJ(entry) && 		    entry->object.uvm_obj->pgops &&		    entry->object.uvm_obj->pgops->pgo_reference)			entry->object.uvm_obj->pgops->pgo_reference(			    entry->object.uvm_obj);	}}/* * uvm_map_clip_end: ensure that the entry ends at or before *	the ending address, if it does't we split the reference *  * => caller should use UVM_MAP_CLIP_END macro rather than calling *    this directly * => map must be locked by caller */voiduvm_map_clip_end(map, entry, end)	vm_map_t	map;	vm_map_entry_t	entry;	vaddr_t	end;{	vm_map_entry_t	new_entry;	vaddr_t new_adj; /* #bytes we move start forward */	/*	 *	Create a new entry and insert it	 *	AFTER the specified entry	 */	new_entry = uvm_mapent_alloc(map);	uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */	new_entry->start = entry->end = end;	new_adj = end - entry->start;	if (new_entry->object.uvm_obj)		new_entry->offset += new_adj;	if (entry->aref.ar_amap)		amap_splitref(&entry->aref, &new_entry->aref, new_adj);	uvm_map_entry_link(map, entry, new_entry);	if (UVM_ET_ISSUBMAP(entry)) {		/* ... unlikely to happen, but play it safe */	 	uvm_map_reference(new_entry->object.sub_map);	} else {		if (UVM_ET_ISOBJ(entry) &&		    entry->object.uvm_obj->pgops &&		    entry->object.uvm_obj->pgops->pgo_reference)			entry->object.uvm_obj->pgops->pgo_reference(			    entry->object.uvm_obj);	}}/* *   M A P   -   m a i n   e n t r y   p o i n t *//* * uvm_map: establish a valid mapping in a map * * => assume startp is page aligned. * => assume size is a multiple of PAGE_SIZE. * => assume sys_mmap provides enough of a "hint" to have us skip *	over text/data/bss area. * => map must be unlocked (we will lock it) * => <uobj,uoffset> value meanings (4 cases): *	 [1] <NULL,uoffset> 		== uoffset is a hint for PMAP_PREFER *	 [2] <NULL,UVM_UNKNOWN_OFFSET>	== don't PMAP_PREFER *	 [3] <uobj,uoffset>		== normal mapping *	 [4] <uobj,UVM_UNKNOWN_OFFSET>	== uvm_map finds offset based on VA *	 *    case [4] is for kernel mappings where we don't know the offset until *    we've found a virtual address.   note that kernel object offsets are *    always relative to vm_map_min(kernel_map).

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?