⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vm_map.c

📁 open bsd vm device design
💻 C
📖 第 1 页 / 共 5 页
字号:
			else			 	pmap_protect(map->pmap, current->start,					current->end,					current->protection & MASK(entry));#undef	max#undef	MASK		}		current = current->next;	}	vm_map_unlock(map);	return(KERN_SUCCESS);}/* *	vm_map_inherit: * *	Sets the inheritance of the specified address *	range in the target map.  Inheritance *	affects how the map will be shared with *	child maps at the time of vm_map_fork. */intvm_map_inherit(map, start, end, new_inheritance)	register vm_map_t	map;	register vm_offset_t	start;	register vm_offset_t	end;	register vm_inherit_t	new_inheritance;{	register vm_map_entry_t	entry;	vm_map_entry_t	temp_entry;	switch (new_inheritance) {	case VM_INHERIT_NONE:	case VM_INHERIT_COPY:	case VM_INHERIT_SHARE:		break;	default:		return(KERN_INVALID_ARGUMENT);	}	vm_map_lock(map);	VM_MAP_RANGE_CHECK(map, start, end);	if (vm_map_lookup_entry(map, start, &temp_entry)) {		entry = temp_entry;		vm_map_clip_start(map, entry, start);	}	else		entry = temp_entry->next;	while ((entry != &map->header) && (entry->start < end)) {		vm_map_clip_end(map, entry, end);		entry->inheritance = new_inheritance;		entry = entry->next;	}	vm_map_unlock(map);	return(KERN_SUCCESS);}/* *	vm_map_pageable: * *	Sets the pageability of the specified address *	range in the target map.  Regions specified *	as not pageable require locked-down physical *	memory and physical page maps. * *	The map must not be locked, but a reference *	must remain to the map throughout the call. */intvm_map_pageable(map, start, end, new_pageable)	register vm_map_t	map;	register vm_offset_t	start;	register vm_offset_t	end;	register boolean_t	new_pageable;{	register vm_map_entry_t	entry;	vm_map_entry_t		start_entry;	register vm_offset_t	failed;	int			rv;	vm_map_lock(map);	VM_MAP_RANGE_CHECK(map, start, end);	/*	 *	Only one pageability change may take place at one	 *	time, since vm_fault assumes it will be called	 *	only once for each wiring/unwiring.  Therefore, we	 *	have to make sure we're actually changing the pageability	 *	for the entire region.  We do so before making any changes.	 */	if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {		vm_map_unlock(map);		return(KERN_INVALID_ADDRESS);	}	entry = start_entry;	/*	 *	Actions are rather different for wiring and unwiring,	 *	so we have two separate cases.	 */	if (new_pageable) {		vm_map_clip_start(map, entry, start);		/*		 *	Unwiring.  First ensure that the range to be		 *	unwired is really wired down and that there		 *	are no holes.		 */		while ((entry != &map->header) && (entry->start < end)) {		    if (entry->wired_count == 0 ||			(entry->end < end &&			 (entry->next == &map->header ||			  entry->next->start > entry->end))) {			vm_map_unlock(map);			return(KERN_INVALID_ARGUMENT);		    }		    entry = entry->next;		}		/*		 *	Now decrement the wiring count for each region.		 *	If a region becomes completely unwired,		 *	unwire its physical pages and mappings.		 */		vm_map_set_recursive(&map->lock);		entry = start_entry;		while ((entry != &map->header) && (entry->start < end)) {		    vm_map_clip_end(map, entry, end);		    entry->wired_count--;		    if (entry->wired_count == 0)			vm_fault_unwire(map, entry->start, entry->end);		    entry = entry->next;		}		vm_map_clear_recursive(&map->lock);	}	else {		/*		 *	Wiring.  We must do this in two passes:		 *		 *	1.  Holding the write lock, we create any shadow		 *	    or zero-fill objects that need to be created.		 *	    Then we clip each map entry to the region to be		 *	    wired and increment its wiring count.  We		 *	    create objects before clipping the map entries		 *	    to avoid object proliferation.		 *		 *	2.  We downgrade to a read lock, and call		 *	    vm_fault_wire to fault in the pages for any		 *	    newly wired area (wired_count is 1).		 *		 *	Downgrading to a read lock for vm_fault_wire avoids		 *	a possible deadlock with another thread that may have		 *	faulted on one of the pages to be wired (it would mark		 *	the page busy, blocking us, then in turn block on the		 *	map lock that we hold).  Because of problems in the		 *	recursive lock package, we cannot upgrade to a write		 *	lock in vm_map_lookup.  Thus, any actions that require		 *	the write lock must be done beforehand.  Because we		 *	keep the read lock on the map, the copy-on-write status		 *	of the entries we modify here cannot change.		 */		/*		 *	Pass 1.		 */		while ((entry != &map->header) && (entry->start < end)) {		    if (entry->wired_count == 0) {			/*			 *	Perform actions of vm_map_lookup that need			 *	the write lock on the map: create a shadow			 *	object for a copy-on-write region, or an			 *	object for a zero-fill region.			 *			 *	We don't have to do this for entries that			 *	point to sharing maps, because we won't hold			 *	the lock on the sharing map.			 */			if (!entry->is_a_map) {			    if (entry->needs_copy &&				((entry->protection & VM_PROT_WRITE) != 0)) {				vm_object_shadow(&entry->object.vm_object,						&entry->offset,						(vm_size_t)(entry->end							- entry->start));				entry->needs_copy = FALSE;			    }			    else if (entry->object.vm_object == NULL) {				entry->object.vm_object =				    vm_object_allocate((vm_size_t)(entry->end				    			- entry->start));				entry->offset = (vm_offset_t)0;			    }			}		    }		    vm_map_clip_start(map, entry, start);		    vm_map_clip_end(map, entry, end);		    entry->wired_count++;		    /*		     * Check for holes		     */		    if (entry->end < end &&			(entry->next == &map->header ||			 entry->next->start > entry->end)) {			/*			 *	Found one.  Object creation actions			 *	do not need to be undone, but the			 *	wired counts need to be restored.			 */			while (entry != &map->header && entry->end > start) {			    entry->wired_count--;			    entry = entry->prev;			}			vm_map_unlock(map);			return(KERN_INVALID_ARGUMENT);		    }		    entry = entry->next;		}		/*		 *	Pass 2.		 */		/*		 * HACK HACK HACK HACK		 *		 * If we are wiring in the kernel map or a submap of it,		 * unlock the map to avoid deadlocks.  We trust that the		 * kernel threads are well-behaved, and therefore will		 * not do anything destructive to this region of the map		 * while we have it unlocked.  We cannot trust user threads		 * to do the same.		 *		 * HACK HACK HACK HACK		 */		if (vm_map_pmap(map) == kernel_pmap) {		    vm_map_unlock(map);		/* trust me ... */		}		else {		    vm_map_set_recursive(&map->lock);		    lockmgr(&map->lock, LK_DOWNGRADE, (void *)0, curproc);		}		rv = 0;		entry = start_entry;		while (entry != &map->header && entry->start < end) {		    /*		     * If vm_fault_wire fails for any page we need to		     * undo what has been done.  We decrement the wiring		     * count for those pages which have not yet been		     * wired (now) and unwire those that have (later).		     *		     * XXX this violates the locking protocol on the map,		     * needs to be fixed.		     */		    if (rv)			entry->wired_count--;		    else if (entry->wired_count == 1) {			rv = vm_fault_wire(map, entry->start, entry->end);			if (rv) {			    failed = entry->start;			    entry->wired_count--;			}		    }		    entry = entry->next;		}		if (vm_map_pmap(map) == kernel_pmap) {		    vm_map_lock(map);		}		else {		    vm_map_clear_recursive(&map->lock);		}		if (rv) {		    vm_map_unlock(map);		    (void) vm_map_pageable(map, start, failed, TRUE);		    return(rv);		}	}	vm_map_unlock(map);	return(KERN_SUCCESS);}/* * vm_map_clean * * Push any dirty cached pages in the address range to their pager. * If syncio is TRUE, dirty pages are written synchronously. * If invalidate is TRUE, any cached pages are freed as well. * * Returns an error if any part of the specified range is not mapped. */intvm_map_clean(map, start, end, syncio, invalidate)	vm_map_t	map;	vm_offset_t	start;	vm_offset_t	end;	boolean_t	syncio;	boolean_t	invalidate;{	register vm_map_entry_t current;	vm_map_entry_t entry;	vm_size_t size;	vm_object_t object;	vm_offset_t offset;	vm_map_lock_read(map);	VM_MAP_RANGE_CHECK(map, start, end);	if (!vm_map_lookup_entry(map, start, &entry)) {		vm_map_unlock_read(map);		return(KERN_INVALID_ADDRESS);	}	/*	 * Make a first pass to check for holes.	 */	for (current = entry; current->start < end; current = current->next) {		if (current->is_sub_map) {			vm_map_unlock_read(map);			return(KERN_INVALID_ARGUMENT);		}		if (end > current->end &&		    (current->next == &map->header ||		     current->end != current->next->start)) {			vm_map_unlock_read(map);			return(KERN_INVALID_ADDRESS);		}	}	/*	 * Make a second pass, cleaning/uncaching pages from the indicated	 * objects as we go.	 */	for (current = entry; current->start < end; current = current->next) {		offset = current->offset + (start - current->start);		size = (end <= current->end ? end : current->end) - start;		if (current->is_a_map) {			register vm_map_t smap;			vm_map_entry_t tentry;			vm_size_t tsize;			smap = current->object.share_map;			vm_map_lock_read(smap);			(void) vm_map_lookup_entry(smap, offset, &tentry);			tsize = tentry->end - offset;			if (tsize < size)				size = tsize;			object = tentry->object.vm_object;			offset = tentry->offset + (offset - tentry->start);			vm_object_lock(object);			vm_map_unlock_read(smap);		} else {			object = current->object.vm_object;			vm_object_lock(object);		}		/*		 * Flush pages if writing is allowed.		 * XXX should we continue on an error?		 */		if ((current->protection & VM_PROT_WRITE) &&		    !vm_object_page_clean(object, offset, offset+size,					  syncio, FALSE)) {			vm_object_unlock(object);			vm_map_unlock_read(map);			return(KERN_FAILURE);		}		if (invalidate)			vm_object_page_remove(object, offset, offset+size);		vm_object_unlock(object);		start += size;	}	vm_map_unlock_read(map);	return(KERN_SUCCESS);}/* *	vm_map_entry_unwire:	[ internal use only ] * *	Make the region specified by this entry pageable. * *	The map in question should be locked. *	[This is the reason for this routine's existence.] */voidvm_map_entry_unwire(map, entry)	vm_map_t		map;	register vm_map_entry_t	entry;{	vm_fault_unwire(map, entry->start, entry->end);	entry->wired_count = 0;}/* *	vm_map_entry_delete:	[ internal use only ] * *	Deallocate the given entry from the target map. */		voidvm_map_entry_delete(map, entry)	register vm_map_t	map;	register vm_map_entry_t	entry;{	if (entry->wired_count != 0)		vm_map_entry_unwire(map, entry);			vm_map_entry_unlink(map, entry);	map->size -= entry->end - entry->start;	if (entry->is_a_map || entry->is_sub_map)		vm_map_deallocate(entry->object.share_map);	else	 	vm_object_deallocate(entry->object.vm_object);	vm_map_entry_dispose(map, entry);}/* *	vm_map_delete:	[ internal use only ] * *	Deallocates the given address range from the target *	map. * *	When called with a sharing map, removes pages from *	that region from all physical maps. */intvm_map_delete(map, start, end)	register vm_map_t	map;	vm_offset_t		start;	register vm_offset_t	end;{	register vm_map_entry_t	entry;	vm_map_entry_t		first_entry;	/*	 *	Find the start of the region, and clip it	 */	if (!vm_map_lookup_entry(map, start, &first_entry))		entry = first_entry->next;	else {		entry = first_entry;		vm_map_clip_start(map, entry, start);		/*		 *	Fix the lookup hint now, rather than each		 *	time though the loop.		 */		SAVE_HINT(map, entry->prev);	}	/*	 *	Save the free space hint	 */	if (map->first_free->start >= start)		map->first_free = entry->prev;	/*	 *	Step through all entries in this region	 */	while ((entry != &map->header) && (entry->start < end)) {		vm_map_entry_t		next;		register vm_offset_t	s, e;		register vm_object_t	object;		vm_map_clip_end(map, entry, end);		next = entry->next;		s = entry->start;		e = entry->end;		/*		 *	Unwire before removing addresses from the pmap;		 *	otherwise, unwiring will put the entries back in		 *	the pmap.		 */		object = entry->object.vm_object;		if (entry->wired_count != 0)			vm_map_entry_unwire(map, entry);		/*		 *	If this is a sharing map, we must remove		 *	*all* references to this data, since we can't		 *	find all of the physical maps which are sharing		 *	it.		 */		if (object == kernel_object || object == kmem_object)			vm_object_page_remove(object, entry->offset,					entry->offset + (e - s));		else if (!map->is_main_map)			vm_object_pmap_remove(object,					 entry->offset,					 entry->offset + (e - s));		else			pmap_remove(map->pmap, s, e);		/*		 *	Delete the entry (which may delete the object)		 *	only after removing all pmap entries pointing		 *	to its pages.  (Otherwise, its page frames may		 *	be reallocated, and any modify bits will be		 *	set in the wrong object!)		 */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -