⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vm_mmap.c

📁 open bsd vm device design
💻 C
📖 第 1 页 / 共 2 页
字号:
	 */#if 0	if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE))		return(EINVAL);#endif	/* returns nothing but KERN_SUCCESS anyway */	(void) vm_map_remove(map, addr, addr+size);	return(0);}voidmunmapfd(p, fd)	struct proc *p;	int fd;{#ifdef DEBUG	if (mmapdebug & MDB_FOLLOW)		printf("munmapfd(%d): fd %d\n", p->p_pid, fd);#endif	/*	 * XXX should vm_deallocate any regions mapped to this file	 */	p->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED;}intmprotect(p, uap, retval)	struct proc *p;	struct mprotect_args /* {		syscallarg(caddr_t) addr;		syscallarg(int) len;		syscallarg(int) prot;	} */ *uap;	register_t *retval;{	vm_offset_t addr;	vm_size_t size;	register vm_prot_t prot;#ifdef DEBUG	if (mmapdebug & MDB_FOLLOW)		printf("mprotect(%d): addr %x len %x prot %d\n",		       p->p_pid, SCARG(uap, addr), SCARG(uap, len), SCARG(uap, prot));#endif	addr = (vm_offset_t)SCARG(uap, addr);	if ((addr & PAGE_MASK) || SCARG(uap, len) < 0)		return(EINVAL);	size = (vm_size_t)SCARG(uap, len);	prot = SCARG(uap, prot) & VM_PROT_ALL;	switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr+size, prot,	    FALSE)) {	case KERN_SUCCESS:		return (0);	case KERN_PROTECTION_FAILURE:		return (EACCES);	}	return (EINVAL);}/* ARGSUSED */intmadvise(p, uap, retval)	struct proc *p;	struct madvise_args /* {		syscallarg(caddr_t) addr;		syscallarg(int) len;		syscallarg(int) behav;	} */ *uap;	register_t *retval;{	/* Not yet implemented */	return (EOPNOTSUPP);}/* ARGSUSED */intmincore(p, uap, retval)	struct proc *p;	struct mincore_args /* {		syscallarg(caddr_t) addr;		syscallarg(int) len;		syscallarg(char *) vec;	} */ *uap;	register_t *retval;{	/* Not yet implemented */	return (EOPNOTSUPP);}intmlock(p, uap, retval)	struct proc *p;	struct mlock_args /* {		syscallarg(caddr_t) addr;		syscallarg(size_t) len;	} */ *uap;	register_t *retval;{	vm_offset_t addr;	vm_size_t size;	int error;	extern int vm_page_max_wired;#ifdef DEBUG	if (mmapdebug & MDB_FOLLOW)		printf("mlock(%d): addr %x len %x\n",		       p->p_pid, SCARG(uap, addr), SCARG(uap, len));#endif	addr = (vm_offset_t)SCARG(uap, addr);	if ((addr & PAGE_MASK) || SCARG(uap, addr) + SCARG(uap, len) < SCARG(uap, addr))		return (EINVAL);	size = round_page((vm_size_t)SCARG(uap, len));	if (atop(size) + cnt.v_wire_count > vm_page_max_wired)		return (EAGAIN);#ifdef pmap_wired_count	if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >	    p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)		return (EAGAIN);#else	if (error = suser(p->p_ucred, &p->p_acflag))		return (error);#endif	error = vm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, FALSE);	return (error == KERN_SUCCESS ? 0 : ENOMEM);}intmunlock(p, uap, retval)	struct proc *p;	struct munlock_args /* {		syscallarg(caddr_t) addr;		syscallarg(size_t) len;	} */ *uap;	register_t *retval;{	vm_offset_t addr;	vm_size_t size;	int error;#ifdef DEBUG	if (mmapdebug & MDB_FOLLOW)		printf("munlock(%d): addr %x len %x\n",		       p->p_pid, SCARG(uap, addr), SCARG(uap, len));#endif	addr = (vm_offset_t)SCARG(uap, addr);	if ((addr & PAGE_MASK) || SCARG(uap, addr) + SCARG(uap, len) < SCARG(uap, addr))		return (EINVAL);#ifndef pmap_wired_count	if (error = suser(p->p_ucred, &p->p_acflag))		return (error);#endif	size = round_page((vm_size_t)SCARG(uap, len));	error = vm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, TRUE);	return (error == KERN_SUCCESS ? 0 : ENOMEM);}/* * Internal version of mmap. * Currently used by mmap, exec, and sys5 shared memory. * Handle is either a vnode pointer or NULL for MAP_ANON. */intvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)	register vm_map_t map;	register vm_offset_t *addr;	register vm_size_t size;	vm_prot_t prot, maxprot;	register int flags;	caddr_t handle;		/* XXX should be vp */	vm_offset_t foff;{	register vm_pager_t pager;	boolean_t fitit;	vm_object_t object;	struct vnode *vp = NULL;	int type;	int rv = KERN_SUCCESS;	if (size == 0)		return (0);	if ((flags & MAP_FIXED) == 0) {		fitit = TRUE;		*addr = round_page(*addr);	} else {		fitit = FALSE;		(void)vm_deallocate(map, *addr, size);	}	/*	 * Lookup/allocate pager.  All except an unnamed anonymous lookup	 * gain a reference to ensure continued existance of the object.	 * (XXX the exception is to appease the pageout daemon)	 */	if (flags & MAP_ANON)		type = PG_DFLT;	else {		vp = (struct vnode *)handle;		if (vp->v_type == VCHR) {			type = PG_DEVICE;			handle = (caddr_t)vp->v_rdev;		} else			type = PG_VNODE;	}	pager = vm_pager_allocate(type, handle, size, prot, foff);	if (pager == NULL)		return (type == PG_DEVICE ? EINVAL : ENOMEM);	/*	 * Find object and release extra reference gained by lookup	 */	object = vm_object_lookup(pager);	vm_object_deallocate(object);	/*	 * Anonymous memory.	 */	if (flags & MAP_ANON) {		rv = vm_allocate_with_pager(map, addr, size, fitit,					    pager, foff, TRUE);		if (rv != KERN_SUCCESS) {			if (handle == NULL)				vm_pager_deallocate(pager);			else				vm_object_deallocate(object);			goto out;		}		/*		 * Don't cache anonymous objects.		 * Loses the reference gained by vm_pager_allocate.		 * Note that object will be NULL when handle == NULL,		 * this is ok since vm_allocate_with_pager has made		 * sure that these objects are uncached.		 */		(void) pager_cache(object, FALSE);#ifdef DEBUG		if (mmapdebug & MDB_MAPIT)			printf("vm_mmap(%d): ANON *addr %x size %x pager %x\n",			       curproc->p_pid, *addr, size, pager);#endif	}	/*	 * Must be a mapped file.	 * Distinguish between character special and regular files.	 */	else if (vp->v_type == VCHR) {		rv = vm_allocate_with_pager(map, addr, size, fitit,					    pager, foff, FALSE);		/*		 * Uncache the object and lose the reference gained		 * by vm_pager_allocate().  If the call to		 * vm_allocate_with_pager() was sucessful, then we		 * gained an additional reference ensuring the object		 * will continue to exist.  If the call failed then		 * the deallocate call below will terminate the		 * object which is fine.		 */		(void) pager_cache(object, FALSE);		if (rv != KERN_SUCCESS)			goto out;	}	/*	 * A regular file	 */	else {#ifdef DEBUG		if (object == NULL)			printf("vm_mmap: no object: vp %x, pager %x\n",			       vp, pager);#endif		/*		 * Map it directly.		 * Allows modifications to go out to the vnode.		 */		if (flags & MAP_SHARED) {			rv = vm_allocate_with_pager(map, addr, size,						    fitit, pager,						    foff, FALSE);			if (rv != KERN_SUCCESS) {				vm_object_deallocate(object);				goto out;			}			/*			 * Don't cache the object.  This is the easiest way			 * of ensuring that data gets back to the filesystem			 * because vnode_pager_deallocate() will fsync the			 * vnode.  pager_cache() will lose the extra ref.			 */			if (prot & VM_PROT_WRITE)				pager_cache(object, FALSE);			else				vm_object_deallocate(object);		}		/*		 * Copy-on-write of file.  Two flavors.		 * MAP_COPY is true COW, you essentially get a snapshot of		 * the region at the time of mapping.  MAP_PRIVATE means only		 * that your changes are not reflected back to the object.		 * Changes made by others will be seen.		 */		else {			vm_map_t tmap;			vm_offset_t off;			/* locate and allocate the target address space */			rv = vm_map_find(map, NULL, (vm_offset_t)0,					 addr, size, fitit);			if (rv != KERN_SUCCESS) {				vm_object_deallocate(object);				goto out;			}			tmap = vm_map_create(pmap_create(size), VM_MIN_ADDRESS,					     VM_MIN_ADDRESS+size, TRUE);			off = VM_MIN_ADDRESS;			rv = vm_allocate_with_pager(tmap, &off, size,						    TRUE, pager,						    foff, FALSE);			if (rv != KERN_SUCCESS) {				vm_object_deallocate(object);				vm_map_deallocate(tmap);				goto out;			}			/*			 * (XXX)			 * MAP_PRIVATE implies that we see changes made by			 * others.  To ensure that we need to guarentee that			 * no copy object is created (otherwise original			 * pages would be pushed to the copy object and we			 * would never see changes made by others).  We			 * totally sleeze it right now by marking the object			 * internal temporarily.			 */			if ((flags & MAP_COPY) == 0)				object->flags |= OBJ_INTERNAL;			rv = vm_map_copy(map, tmap, *addr, size, off,					 FALSE, FALSE);			object->flags &= ~OBJ_INTERNAL;			/*			 * (XXX)			 * My oh my, this only gets worse...			 * Force creation of a shadow object so that			 * vm_map_fork will do the right thing.			 */			if ((flags & MAP_COPY) == 0) {				vm_map_t tmap;				vm_map_entry_t tentry;				vm_object_t tobject;				vm_offset_t toffset;				vm_prot_t tprot;				boolean_t twired, tsu;				tmap = map;				vm_map_lookup(&tmap, *addr, VM_PROT_WRITE,					      &tentry, &tobject, &toffset,					      &tprot, &twired, &tsu);				vm_map_lookup_done(tmap, tentry);			}			/*			 * (XXX)			 * Map copy code cannot detect sharing unless a			 * sharing map is involved.  So we cheat and write			 * protect everything ourselves.			 */			vm_object_pmap_copy(object, foff, foff + size);			vm_object_deallocate(object);			vm_map_deallocate(tmap);			if (rv != KERN_SUCCESS)				goto out;		}#ifdef DEBUG		if (mmapdebug & MDB_MAPIT)			printf("vm_mmap(%d): FILE *addr %x size %x pager %x\n",			       curproc->p_pid, *addr, size, pager);#endif	}	/*	 * Correct protection (default is VM_PROT_ALL).	 * If maxprot is different than prot, we must set both explicitly.	 */	rv = KERN_SUCCESS;	if (maxprot != VM_PROT_ALL)		rv = vm_map_protect(map, *addr, *addr+size, maxprot, TRUE);	if (rv == KERN_SUCCESS && prot != maxprot)		rv = vm_map_protect(map, *addr, *addr+size, prot, FALSE);	if (rv != KERN_SUCCESS) {		(void) vm_deallocate(map, *addr, size);		goto out;	}	/*	 * Shared memory is also shared with children.	 */	if (flags & MAP_SHARED) {		rv = vm_map_inherit(map, *addr, *addr+size, VM_INHERIT_SHARE);		if (rv != KERN_SUCCESS) {			(void) vm_deallocate(map, *addr, size);			goto out;		}	}out:#ifdef DEBUG	if (mmapdebug & MDB_MAPIT)		printf("vm_mmap: rv %d\n", rv);#endif	switch (rv) {	case KERN_SUCCESS:		return (0);	case KERN_INVALID_ADDRESS:	case KERN_NO_SPACE:		return (ENOMEM);	case KERN_PROTECTION_FAILURE:		return (EACCES);	default:		return (EINVAL);	}}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -