uvm_glue.c

来自「基于组件方式开发操作系统的OSKIT源代码」· C语言 代码 · 共 625 行 · 第 1/2 页

C
625
字号
/*	$NetBSD: uvm_glue.c,v 1.43 2000/11/25 06:27:59 chs Exp $	*//*  * Copyright (c) 1997 Charles D. Cranor and Washington University. * Copyright (c) 1991, 1993, The Regents of the University of California.   * * All rights reserved. * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright *    notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright *    notice, this list of conditions and the following disclaimer in the *    documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software *    must display the following acknowledgement: *	This product includes software developed by Charles D. Cranor, *      Washington University, the University of California, Berkeley and  *      its contributors. * 4. Neither the name of the University nor the names of its contributors *    may be used to endorse or promote products derived from this software *    without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * *	@(#)vm_glue.c	8.6 (Berkeley) 1/5/94 * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp * * * Copyright (c) 1987, 1990 Carnegie-Mellon University. * All rights reserved. *  * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. *  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. *  * Carnegie Mellon requests users of this software to return to * *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU *  School of Computer Science *  Carnegie Mellon University *  Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */#include "opt_uvmhist.h"#include "opt_sysv.h"/* * uvm_glue.c: glue functions */#include <sys/param.h>#include <sys/systm.h>#include <sys/proc.h>#include <sys/resourcevar.h>#include <sys/buf.h>#include <sys/user.h>#ifdef SYSVSHM#include <sys/shm.h>#endif#include <uvm/uvm.h>#include <machine/cpu.h>/* * local prototypes */static void uvm_swapout __P((struct proc *));/* * XXXCDC: do these really belong here? */unsigned maxdmap = MAXDSIZ;	/* kern_resource.c: RLIMIT_DATA max */unsigned maxsmap = MAXSSIZ;	/* kern_resource.c: RLIMIT_STACK max */int readbuffers = 0;		/* allow KGDB to read kern buffer pool */				/* XXX: see uvm_kernacc *//* * uvm_kernacc: can the kernel access a region of memory * * - called from malloc [DIAGNOSTIC], and /dev/kmem driver (mem.c) */boolean_tuvm_kernacc(addr, len, rw)	caddr_t addr;	size_t len;	int rw;{	boolean_t rv;	vaddr_t saddr, eaddr;	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;	saddr = trunc_page((vaddr_t)addr);	eaddr = round_page((vaddr_t)addr + len);	vm_map_lock_read(kernel_map);	rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);	vm_map_unlock_read(kernel_map);	/*	 * XXX there are still some things (e.g. the buffer cache) that	 * are managed behind the VM system's back so even though an	 * address is accessible in the mind of the VM system, there may	 * not be physical pages where the VM thinks there is.  This can	 * lead to bogus allocation of pages in the kernel address space	 * or worse, inconsistencies at the pmap level.  We only worry	 * about the buffer cache for now.	 */	if (!readbuffers && rv && (eaddr > (vaddr_t)buffers &&			     saddr < (vaddr_t)buffers + MAXBSIZE * nbuf))		rv = FALSE;	return(rv);}/* * uvm_useracc: can the user access it? * * - called from physio() and sys___sysctl(). */boolean_tuvm_useracc(addr, len, rw)	caddr_t addr;	size_t len;	int rw;{	vm_map_t map;	boolean_t rv;	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;	/* XXX curproc */	map = &curproc->p_vmspace->vm_map;	vm_map_lock_read(map);	rv = uvm_map_checkprot(map, trunc_page((vaddr_t)addr),	    round_page((vaddr_t)addr + len), prot);	vm_map_unlock_read(map);	return(rv);}#ifdef KGDB/* * Change protections on kernel pages from addr to addr+len * (presumably so debugger can plant a breakpoint). * * We force the protection change at the pmap level.  If we were * to use vm_map_protect a change to allow writing would be lazily- * applied meaning we would still take a protection fault, something * we really don't want to do.  It would also fragment the kernel * map unnecessarily.  We cannot use pmap_protect since it also won't * enforce a write-enable request.  Using pmap_enter is the only way * we can ensure the change takes place properly. */voiduvm_chgkprot(addr, len, rw)	caddr_t addr;	size_t len;	int rw;{	vm_prot_t prot;	paddr_t pa;	vaddr_t sva, eva;	prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;	eva = round_page((vaddr_t)addr + len);	for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {		/*		 * Extract physical address for the page.		 * We use a cheezy hack to differentiate physical		 * page 0 from an invalid mapping, not that it		 * really matters...		 */		if (pmap_extract(pmap_kernel(), sva, &pa) == FALSE)			panic("chgkprot: invalid page");		pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);	}}#endif/* * vslock: wire user memory for I/O * * - called from physio and sys___sysctl * - XXXCDC: consider nuking this (or making it a macro?) */intuvm_vslock(p, addr, len, access_type)	struct proc *p;	caddr_t	addr;	size_t	len;	vm_prot_t access_type;{	vm_map_t map;	vaddr_t start, end;	int rv;	map = &p->p_vmspace->vm_map;	start = trunc_page((vaddr_t)addr);	end = round_page((vaddr_t)addr + len);	rv = uvm_fault_wire(map, start, end, access_type);	return (rv);}/* * vslock: wire user memory for I/O * * - called from physio and sys___sysctl * - XXXCDC: consider nuking this (or making it a macro?) */voiduvm_vsunlock(p, addr, len)	struct proc *p;	caddr_t	addr;	size_t	len;{	uvm_fault_unwire(&p->p_vmspace->vm_map, trunc_page((vaddr_t)addr),		round_page((vaddr_t)addr + len));}/* * uvm_fork: fork a virtual address space * * - the address space is copied as per parent map's inherit values * - a new "user" structure is allocated for the child process *	[filled in by MD layer...] * - if specified, the child gets a new user stack described by *	stack and stacksize * - NOTE: the kernel stack may be at a different location in the child *	process, and thus addresses of automatic variables may be invalid *	after cpu_fork returns in the child process.  We do nothing here *	after cpu_fork returns. * - XXXCDC: we need a way for this to return a failure value rather *   than just hang */voiduvm_fork(p1, p2, shared, stack, stacksize, func, arg)	struct proc *p1, *p2;	boolean_t shared;	void *stack;	size_t stacksize;	void (*func) __P((void *));	void *arg;{	struct user *up = p2->p_addr;	int rv;	if (shared == TRUE) {		p2->p_vmspace = NULL;		uvmspace_share(p1, p2);			/* share vmspace */	} else		p2->p_vmspace = uvmspace_fork(p1->p_vmspace); /* fork vmspace */	/*	 * Wire down the U-area for the process, which contains the PCB	 * and the kernel stack.  Wired state is stored in p->p_flag's	 * P_INMEM bit rather than in the vm_map_entry's wired count	 * to prevent kernel_map fragmentation.	 *	 * Note the kernel stack gets read/write accesses right off	 * the bat.	 */	rv = uvm_fault_wire(kernel_map, (vaddr_t)up,	    (vaddr_t)up + USPACE, VM_PROT_READ | VM_PROT_WRITE);	if (rv != KERN_SUCCESS)		panic("uvm_fork: uvm_fault_wire failed: %d", rv);	/*	 * p_stats currently points at a field in the user struct.  Copy	 * parts of p_stats, and zero out the rest.	 */	p2->p_stats = &up->u_stats;	memset(&up->u_stats.pstat_startzero, 0,	       ((caddr_t)&up->u_stats.pstat_endzero -		(caddr_t)&up->u_stats.pstat_startzero));	memcpy(&up->u_stats.pstat_startcopy, &p1->p_stats->pstat_startcopy,	       ((caddr_t)&up->u_stats.pstat_endcopy -		(caddr_t)&up->u_stats.pstat_startcopy));		/*

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?