uvm_mmap.c

来自「基于组件方式开发操作系统的OSKIT源代码」· C语言 代码 · 共 1,306 行 · 第 1/2 页

C
1,306
字号
	case KERN_PAGES_LOCKED:	/* XXXCDC: uvm doesn't return this */		return (EBUSY);	default:		return (EINVAL);	}	/*NOTREACHED*/}/* * sys_munmap: unmap a users memory */intsys_munmap(p, v, retval)	struct proc *p;	void *v;	register_t *retval;{	struct sys_munmap_args /* {		syscallarg(caddr_t) addr;		syscallarg(size_t) len;	} */ *uap = v;	vaddr_t addr;	vsize_t size, pageoff;	vm_map_t map;	vaddr_t vm_min_address = VM_MIN_ADDRESS;	struct vm_map_entry *dead_entries;	/*	 * get syscall args...	 */	addr = (vaddr_t) SCARG(uap, addr);	size = (vsize_t) SCARG(uap, len);		/*	 * align the address to a page boundary, and adjust the size accordingly	 */	pageoff = (addr & PAGE_MASK);	addr -= pageoff;	size += pageoff;	size = (vsize_t) round_page(size);	if ((int)size < 0)		return (EINVAL);	if (size == 0)		return (0);	/*	 * Check for illegal addresses.  Watch out for address wrap...	 * Note that VM_*_ADDRESS are not constants due to casts (argh).	 */	if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)		return (EINVAL);	if (vm_min_address > 0 && addr < vm_min_address)		return (EINVAL);	if (addr > addr + size)		return (EINVAL);	map = &p->p_vmspace->vm_map;	vm_map_lock(map);	/* lock map so we can checkprot */	/*	 * interesting system call semantic: make sure entire range is 	 * allocated before allowing an unmap.	 */	if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) {		vm_map_unlock(map);		return (EINVAL);	}	/*	 * doit!	 */	(void) uvm_unmap_remove(map, addr, addr + size, &dead_entries);	vm_map_unlock(map);	/* and unlock */	if (dead_entries != NULL)		uvm_unmap_detach(dead_entries, 0);	return (0);}/* * sys_mprotect: the mprotect system call */intsys_mprotect(p, v, retval)	struct proc *p;	void *v;	register_t *retval;{	struct sys_mprotect_args /* {		syscallarg(caddr_t) addr;		syscallarg(int) len;		syscallarg(int) prot;	} */ *uap = v;	vaddr_t addr;	vsize_t size, pageoff;	vm_prot_t prot;	int rv;	/*	 * extract syscall args from uap	 */	addr = (vaddr_t)SCARG(uap, addr);	size = (vsize_t)SCARG(uap, len);	prot = SCARG(uap, prot) & VM_PROT_ALL;	/*	 * align the address to a page boundary, and adjust the size accordingly	 */	pageoff = (addr & PAGE_MASK);	addr -= pageoff;	size += pageoff;	size = (vsize_t) round_page(size);	if ((int)size < 0)		return (EINVAL);	/*	 * doit	 */#ifdef OSKIT	{	    extern vm_map_t kmem_map;	/* kernel malloc submap */	    vm_map_t map;	    /*	     * We want to use mprotect in kernel malloc area.  To do so,	     * if addr is in the submap (kmem_map), we have to pass kmem_map	     * instead.  Otherwise, we will fail because uvm_map_protect	     * returns KERN_INVALID_ARGUMENT. 	     */	    if (/*p == &proc0 */		p->p_vmspace == &vmspace0		&& kmem_map->header.start <= (vaddr_t)addr 		&& (vaddr_t)addr < kmem_map->header.end) {		map = kmem_map; 	    } else {		map = &p->p_vmspace->vm_map;	    }	    rv = uvm_map_protect(map, addr, addr+size, prot, FALSE);	}#else	rv = uvm_map_protect(&p->p_vmspace->vm_map, 			   addr, addr+size, prot, FALSE);#endif	if (rv == KERN_SUCCESS)		return (0);	if (rv == KERN_PROTECTION_FAILURE)		return (EACCES);	return (EINVAL);}/* * sys_minherit: the minherit system call */intsys_minherit(p, v, retval)	struct proc *p;	void *v;	register_t *retval;{	struct sys_minherit_args /* {		syscallarg(caddr_t) addr;		syscallarg(int) len;		syscallarg(int) inherit;	} */ *uap = v;	vaddr_t addr;	vsize_t size, pageoff;	vm_inherit_t inherit;		addr = (vaddr_t)SCARG(uap, addr);	size = (vsize_t)SCARG(uap, len);	inherit = SCARG(uap, inherit);	/*	 * align the address to a page boundary, and adjust the size accordingly	 */	pageoff = (addr & PAGE_MASK);	addr -= pageoff;	size += pageoff;	size = (vsize_t) round_page(size);	if ((int)size < 0)		return (EINVAL);		switch (uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr+size,			 inherit)) {	case KERN_SUCCESS:		return (0);	case KERN_PROTECTION_FAILURE:		return (EACCES);	}	return (EINVAL);}/* * sys_madvise: give advice about memory usage. *//* ARGSUSED */intsys_madvise(p, v, retval)	struct proc *p;	void *v;	register_t *retval;{	struct sys_madvise_args /* {		syscallarg(caddr_t) addr;		syscallarg(size_t) len;		syscallarg(int) behav;	} */ *uap = v;	vaddr_t addr;	vsize_t size, pageoff;	int advice, rv;;		addr = (vaddr_t)SCARG(uap, addr);	size = (vsize_t)SCARG(uap, len);	advice = SCARG(uap, behav);	/*	 * align the address to a page boundary, and adjust the size accordingly	 */	pageoff = (addr & PAGE_MASK);	addr -= pageoff;	size += pageoff;	size = (vsize_t) round_page(size);	if ((ssize_t)size <= 0)		return (EINVAL);	switch (advice) {	case MADV_NORMAL:	case MADV_RANDOM:	case MADV_SEQUENTIAL:		rv = uvm_map_advice(&p->p_vmspace->vm_map, addr, addr + size,		    advice);		break;	case MADV_WILLNEED:		/*		 * Activate all these pages, pre-faulting them in if		 * necessary.		 */		/*		 * XXX IMPLEMENT ME.		 * Should invent a "weak" mode for uvm_fault()		 * which would only do the PGO_LOCKED pgo_get().		 */		return (0);	case MADV_DONTNEED:		/*		 * Deactivate all these pages.  We don't need them		 * any more.  We don't, however, toss the data in		 * the pages.		 */		rv = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,		    PGO_DEACTIVATE);		break;	case MADV_FREE:		/*		 * These pages contain no valid data, and may be		 * garbage-collected.  Toss all resources, including		 * any swap space in use.		 */		rv = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,		    PGO_FREE);		break;	case MADV_SPACEAVAIL:		/*		 * XXXMRG What is this?  I think it's:		 *		 *	Ensure that we have allocated backing-store		 *	for these pages.		 *		 * This is going to require changes to the page daemon,		 * as it will free swap space allocated to pages in core.		 * There's also what to do for device/file/anonymous memory.		 */		return (EINVAL);	default:		return (EINVAL);	}	switch (rv) {	case KERN_SUCCESS:		return (0);	case KERN_NO_SPACE:		return (EAGAIN);	case KERN_INVALID_ADDRESS:		return (ENOMEM);	case KERN_FAILURE:		return (EIO);	}	return (EINVAL);}/* * sys_mlock: memory lock */intsys_mlock(p, v, retval)	struct proc *p;	void *v;	register_t *retval;{	struct sys_mlock_args /* {		syscallarg(const void *) addr;		syscallarg(size_t) len;	} */ *uap = v;	vaddr_t addr;	vsize_t size, pageoff;	int error;	/*	 * extract syscall args from uap	 */	addr = (vaddr_t)SCARG(uap, addr);	size = (vsize_t)SCARG(uap, len);	/*	 * align the address to a page boundary and adjust the size accordingly	 */	pageoff = (addr & PAGE_MASK);	addr -= pageoff;	size += pageoff;	size = (vsize_t) round_page(size);		/* disallow wrap-around. */	if (addr + (int)size < addr)		return (EINVAL);	if (atop(size) + uvmexp.wired > uvmexp.wiredmax)		return (EAGAIN);#ifdef pmap_wired_count	if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >			p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)		return (EAGAIN);#else	if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)		return (error);#endif	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, FALSE,	    0);	return (error == KERN_SUCCESS ? 0 : ENOMEM);}/* * sys_munlock: unlock wired pages */intsys_munlock(p, v, retval)	struct proc *p;	void *v;	register_t *retval;{	struct sys_munlock_args /* {		syscallarg(const void *) addr;		syscallarg(size_t) len;	} */ *uap = v;	vaddr_t addr;	vsize_t size, pageoff;	int error;	/*	 * extract syscall args from uap	 */	addr = (vaddr_t)SCARG(uap, addr);	size = (vsize_t)SCARG(uap, len);	/*	 * align the address to a page boundary, and adjust the size accordingly	 */	pageoff = (addr & PAGE_MASK);	addr -= pageoff;	size += pageoff;	size = (vsize_t) round_page(size);	/* disallow wrap-around. */	if (addr + (int)size < addr)		return (EINVAL);#ifndef pmap_wired_count	if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)		return (error);#endif	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, TRUE,	    0);	return (error == KERN_SUCCESS ? 0 : ENOMEM);}/* * sys_mlockall: lock all pages mapped into an address space. */intsys_mlockall(p, v, retval)	struct proc *p;	void *v;	register_t *retval;{	struct sys_mlockall_args /* {		syscallarg(int) flags;	} */ *uap = v;	int error, flags;	flags = SCARG(uap, flags);	if (flags == 0 ||	    (flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)		return (EINVAL);#ifndef pmap_wired_count	if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)		return (error);#endif	error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags,	    p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);	switch (error) {	case KERN_SUCCESS:		error = 0;		break;	case KERN_NO_SPACE:	/* XXX overloaded */		error = ENOMEM;		break;	default:		/*		 * "Some or all of the memory could not be locked when		 * the call was made."		 */		error = EAGAIN;	}	return (error);}/* * sys_munlockall: unlock all pages mapped into an address space. */intsys_munlockall(p, v, retval)	struct proc *p;	void *v;	register_t *retval;{	(void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0);	return (0);}/* * uvm_mmap: internal version of mmap * * - used by sys_mmap, exec, and sysv shm * - handle is a vnode pointer or NULL for MAP_ANON (XXX: not true, *	sysv shm uses "named anonymous memory") * - caller must page-align the file offset */intuvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff, locklimit)	vm_map_t map;	vaddr_t *addr;	vsize_t size;	vm_prot_t prot, maxprot;	int flags;	caddr_t handle;		/* XXX: VNODE? */	voff_t foff;	vsize_t locklimit;{	struct uvm_object *uobj;#ifndef OSKIT	struct vnode *vp;#endif	int retval;	int advice = UVM_ADV_NORMAL;	uvm_flag_t uvmflag = 0;	/*	 * check params	 */	if (size == 0)		return(0);	if (foff & PAGE_MASK)		return(EINVAL);	if ((prot & maxprot) != prot)		return(EINVAL);	/*	 * for non-fixed mappings, round off the suggested address.	 * for fixed mappings, check alignment and zap old mappings.	 */	if ((flags & MAP_FIXED) == 0) {		*addr = round_page(*addr);	/* round */	} else {				if (*addr & PAGE_MASK)			return(EINVAL);		uvmflag |= UVM_FLAG_FIXED;		(void) uvm_unmap(map, *addr, *addr + size);	/* zap! */	}	/*	 * handle anon vs. non-anon mappings.   for non-anon mappings attach	 * to underlying vm object.	 */	if (flags & MAP_ANON) {		foff = UVM_UNKNOWN_OFFSET;		uobj = NULL;		if ((flags & MAP_SHARED) == 0)			/* XXX: defer amap create */			uvmflag |= UVM_FLAG_COPYONW;		else			/* shared: create amap now */			uvmflag |= UVM_FLAG_OVERLAY;	} else {#ifdef OSKIT	    	oskit_iunknown_t *iunknown = (oskit_iunknown_t*)handle;		uobj = oskit_blkio_attach(iunknown);		if (uobj == NULL)			return(ENOMEM);#else		vp = (struct vnode *) handle;	/* get vnode */		if (vp->v_type != VCHR) {			uobj = uvn_attach((void *) vp, (flags & MAP_SHARED) ?			   maxprot : (maxprot & ~VM_PROT_WRITE));			/* XXX for now, attach doesn't gain a ref */			VREF(vp);		} else {			uobj = udv_attach((void *) &vp->v_rdev,			    (flags & MAP_SHARED) ?			    maxprot : (maxprot & ~VM_PROT_WRITE), foff, size);			advice = UVM_ADV_RANDOM;		}				if (uobj == NULL)			return((vp->v_type == VREG) ? ENOMEM : EINVAL);#endif		if ((flags & MAP_SHARED) == 0)			uvmflag |= UVM_FLAG_COPYONW;	}	/*	 * set up mapping flags	 */	uvmflag = UVM_MAPFLAG(prot, maxprot, 			(flags & MAP_SHARED) ? UVM_INH_SHARE : UVM_INH_COPY,			advice, uvmflag);	/*	 * do it!	 */	retval = uvm_map(map, addr, size, uobj, foff, 0, uvmflag);	if (retval == KERN_SUCCESS) {		/*		 * POSIX 1003.1b -- if our address space was configured		 * to lock all future mappings, wire the one we just made.		 */		if (prot == VM_PROT_NONE) {			/*			 * No more work to do in this case.			 */			return (0);		}				vm_map_lock(map);		if (map->flags & VM_MAP_WIREFUTURE) {			if ((atop(size) + uvmexp.wired) > uvmexp.wiredmax#ifdef pmap_wired_count			    || (locklimit != 0 && (size +			         ptoa(pmap_wired_count(vm_map_pmap(map)))) >			        locklimit)#endif			) {				retval = KERN_RESOURCE_SHORTAGE;				vm_map_unlock(map);				/* unmap the region! */				(void) uvm_unmap(map, *addr, *addr + size);				goto bad;			}			/*			 * uvm_map_pageable() always returns the map			 * unlocked.			 */			retval = uvm_map_pageable(map, *addr, *addr + size,			    FALSE, UVM_LK_ENTER);			if (retval != KERN_SUCCESS) {				/* unmap the region! */				(void) uvm_unmap(map, *addr, *addr + size);				goto bad;			}			return (0);		}		vm_map_unlock(map);		return (0);	}	/*	 * errors: first detach from the uobj, if any.	 */		if (uobj)		uobj->pgops->pgo_detach(uobj); bad:	switch (retval) {	case KERN_INVALID_ADDRESS:	case KERN_NO_SPACE:		return(ENOMEM);	case KERN_RESOURCE_SHORTAGE:		return (EAGAIN);	case KERN_PROTECTION_FAILURE:		return(EACCES);	}	return(EINVAL);}

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?