⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vm_map.c

📁 open bsd vm device design
💻 C
📖 第 1 页 / 共 5 页
字号:
		vm_map_entry_delete(map, entry);		entry = next;	}	return(KERN_SUCCESS);}/* *	vm_map_remove: * *	Remove the given address range from the target map. *	This is the exported form of vm_map_delete. */intvm_map_remove(map, start, end)	register vm_map_t	map;	register vm_offset_t	start;	register vm_offset_t	end;{	register int		result;	vm_map_lock(map);	VM_MAP_RANGE_CHECK(map, start, end);	result = vm_map_delete(map, start, end);	vm_map_unlock(map);	return(result);}/* *	vm_map_check_protection: * *	Assert that the target map allows the specified *	privilege on the entire address region given. *	The entire region must be allocated. */boolean_tvm_map_check_protection(map, start, end, protection)	register vm_map_t	map;	register vm_offset_t	start;	register vm_offset_t	end;	register vm_prot_t	protection;{	register vm_map_entry_t	entry;	vm_map_entry_t		tmp_entry;	if (!vm_map_lookup_entry(map, start, &tmp_entry)) {		return(FALSE);	}	entry = tmp_entry;	while (start < end) {		if (entry == &map->header) {			return(FALSE);		}		/*		 *	No holes allowed!		 */		if (start < entry->start) {			return(FALSE);		}		/*		 * Check protection associated with entry.		 */		if ((entry->protection & protection) != protection) {			return(FALSE);		}		/* go to next entry */		start = entry->end;		entry = entry->next;	}	return(TRUE);}/* *	vm_map_copy_entry: * *	Copies the contents of the source entry to the destination *	entry.  The entries *must* be aligned properly. */voidvm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)	vm_map_t		src_map, dst_map;	register vm_map_entry_t	src_entry, dst_entry;{	vm_object_t	temp_object;	if (src_entry->is_sub_map || dst_entry->is_sub_map)		return;	if (dst_entry->object.vm_object != NULL &&	    (dst_entry->object.vm_object->flags & OBJ_INTERNAL) == 0)		printf("vm_map_copy_entry: copying over permanent data!\n");	/*	 *	If our destination map was wired down,	 *	unwire it now.	 */	if (dst_entry->wired_count != 0)		vm_map_entry_unwire(dst_map, dst_entry);	/*	 *	If we're dealing with a sharing map, we	 *	must remove the destination pages from	 *	all maps (since we cannot know which maps	 *	this sharing map belongs in).	 */	if (dst_map->is_main_map)		pmap_remove(dst_map->pmap, dst_entry->start, dst_entry->end);	else		vm_object_pmap_remove(dst_entry->object.vm_object,			dst_entry->offset,			dst_entry->offset +				(dst_entry->end - dst_entry->start));	if (src_entry->wired_count == 0) {		boolean_t	src_needs_copy;		/*		 *	If the source entry is marked needs_copy,		 *	it is already write-protected.		 */		if (!src_entry->needs_copy) {			boolean_t	su;			/*			 *	If the source entry has only one mapping,			 *	we can just protect the virtual address			 *	range.			 */			if (!(su = src_map->is_main_map)) {				simple_lock(&src_map->ref_lock);				su = (src_map->ref_count == 1);				simple_unlock(&src_map->ref_lock);			}			if (su) {				pmap_protect(src_map->pmap,					src_entry->start,					src_entry->end,					src_entry->protection & ~VM_PROT_WRITE);			}			else {				vm_object_pmap_copy(src_entry->object.vm_object,					src_entry->offset,					src_entry->offset + (src_entry->end							    -src_entry->start));			}		}		/*		 *	Make a copy of the object.		 */		temp_object = dst_entry->object.vm_object;		vm_object_copy(src_entry->object.vm_object,				src_entry->offset,				(vm_size_t)(src_entry->end -					    src_entry->start),				&dst_entry->object.vm_object,				&dst_entry->offset,				&src_needs_copy);		/*		 *	If we didn't get a copy-object now, mark the		 *	source map entry so that a shadow will be created		 *	to hold its changed pages.		 */		if (src_needs_copy)			src_entry->needs_copy = TRUE;		/*		 *	The destination always needs to have a shadow		 *	created.		 */		dst_entry->needs_copy = TRUE;		/*		 *	Mark the entries copy-on-write, so that write-enabling		 *	the entry won't make copy-on-write pages writable.		 */		src_entry->copy_on_write = TRUE;		dst_entry->copy_on_write = TRUE;		/*		 *	Get rid of the old object.		 */		vm_object_deallocate(temp_object);		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,			dst_entry->end - dst_entry->start, src_entry->start);	}	else {		/*		 *	Of course, wired down pages can't be set copy-on-write.		 *	Cause wired pages to be copied into the new		 *	map by simulating faults (the new pages are		 *	pageable)		 */		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);	}}/* *	vm_map_copy: * *	Perform a virtual memory copy from the source *	address map/range to the destination map/range. * *	If src_destroy or dst_alloc is requested, *	the source and destination regions should be *	disjoint, not only in the top-level map, but *	in the sharing maps as well.  [The best way *	to guarantee this is to use a new intermediate *	map to make copies.  This also reduces map *	fragmentation.] */intvm_map_copy(dst_map, src_map,			  dst_addr, len, src_addr,			  dst_alloc, src_destroy)	vm_map_t	dst_map;	vm_map_t	src_map;	vm_offset_t	dst_addr;	vm_size_t	len;	vm_offset_t	src_addr;	boolean_t	dst_alloc;	boolean_t	src_destroy;{	register	vm_map_entry_t	src_entry;	register	vm_map_entry_t	dst_entry;	vm_map_entry_t	tmp_entry;	vm_offset_t	src_start;	vm_offset_t	src_end;	vm_offset_t	dst_start;	vm_offset_t	dst_end;	vm_offset_t	src_clip;	vm_offset_t	dst_clip;	int		result;	boolean_t	old_src_destroy;	/*	 *	XXX While we figure out why src_destroy screws up,	 *	we'll do it by explicitly vm_map_delete'ing at the end.	 */	old_src_destroy = src_destroy;	src_destroy = FALSE;	/*	 *	Compute start and end of region in both maps	 */	src_start = src_addr;	src_end = src_start + len;	dst_start = dst_addr;	dst_end = dst_start + len;	/*	 *	Check that the region can exist in both source	 *	and destination.	 */	if ((dst_end < dst_start) || (src_end < src_start))		return(KERN_NO_SPACE);	/*	 *	Lock the maps in question -- we avoid deadlock	 *	by ordering lock acquisition by map value	 */	if (src_map == dst_map) {		vm_map_lock(src_map);	}	else if ((long) src_map < (long) dst_map) {	 	vm_map_lock(src_map);		vm_map_lock(dst_map);	} else {		vm_map_lock(dst_map);	 	vm_map_lock(src_map);	}	result = KERN_SUCCESS;	/*	 *	Check protections... source must be completely readable and	 *	destination must be completely writable.  [Note that if we're	 *	allocating the destination region, we don't have to worry	 *	about protection, but instead about whether the region	 *	exists.]	 */	if (src_map->is_main_map && dst_map->is_main_map) {		if (!vm_map_check_protection(src_map, src_start, src_end,					VM_PROT_READ)) {			result = KERN_PROTECTION_FAILURE;			goto Return;		}		if (dst_alloc) {			/* XXX Consider making this a vm_map_find instead */			if ((result = vm_map_insert(dst_map, NULL,					(vm_offset_t) 0, dst_start, dst_end)) != KERN_SUCCESS)				goto Return;		}		else if (!vm_map_check_protection(dst_map, dst_start, dst_end,					VM_PROT_WRITE)) {			result = KERN_PROTECTION_FAILURE;			goto Return;		}	}	/*	 *	Find the start entries and clip.	 *	 *	Note that checking protection asserts that the	 *	lookup cannot fail.	 *	 *	Also note that we wait to do the second lookup	 *	until we have done the first clip, as the clip	 *	may affect which entry we get!	 */	(void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);	src_entry = tmp_entry;	vm_map_clip_start(src_map, src_entry, src_start);	(void) vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry);	dst_entry = tmp_entry;	vm_map_clip_start(dst_map, dst_entry, dst_start);	/*	 *	If both source and destination entries are the same,	 *	retry the first lookup, as it may have changed.	 */	if (src_entry == dst_entry) {		(void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);		src_entry = tmp_entry;	}	/*	 *	If source and destination entries are still the same,	 *	a null copy is being performed.	 */	if (src_entry == dst_entry)		goto Return;	/*	 *	Go through entries until we get to the end of the	 *	region.	 */	while (src_start < src_end) {		/*		 *	Clip the entries to the endpoint of the entire region.		 */		vm_map_clip_end(src_map, src_entry, src_end);		vm_map_clip_end(dst_map, dst_entry, dst_end);		/*		 *	Clip each entry to the endpoint of the other entry.		 */		src_clip = src_entry->start + (dst_entry->end - dst_entry->start);		vm_map_clip_end(src_map, src_entry, src_clip);		dst_clip = dst_entry->start + (src_entry->end - src_entry->start);		vm_map_clip_end(dst_map, dst_entry, dst_clip);		/*		 *	Both entries now match in size and relative endpoints.		 *		 *	If both entries refer to a VM object, we can		 *	deal with them now.		 */		if (!src_entry->is_a_map && !dst_entry->is_a_map) {			vm_map_copy_entry(src_map, dst_map, src_entry,						dst_entry);		}		else {			register vm_map_t	new_dst_map;			vm_offset_t		new_dst_start;			vm_size_t		new_size;			vm_map_t		new_src_map;			vm_offset_t		new_src_start;			/*			 *	We have to follow at least one sharing map.			 */			new_size = (dst_entry->end - dst_entry->start);			if (src_entry->is_a_map) {				new_src_map = src_entry->object.share_map;				new_src_start = src_entry->offset;			}			else {			 	new_src_map = src_map;				new_src_start = src_entry->start;				vm_map_set_recursive(&src_map->lock);			}			if (dst_entry->is_a_map) {			    	vm_offset_t	new_dst_end;				new_dst_map = dst_entry->object.share_map;				new_dst_start = dst_entry->offset;				/*				 *	Since the destination sharing entries				 *	will be merely deallocated, we can				 *	do that now, and replace the region				 *	with a null object.  [This prevents				 *	splitting the source map to match				 *	the form of the destination map.]				 *	Note that we can only do so if the				 *	source and destination do not overlap.				 */				new_dst_end = new_dst_start + new_size;				if (new_dst_map != new_src_map) {					vm_map_lock(new_dst_map);					(void) vm_map_delete(new_dst_map,							new_dst_start,							new_dst_end);					(void) vm_map_insert(new_dst_map,							NULL,							(vm_offset_t) 0,							new_dst_start,							new_dst_end);					vm_map_unlock(new_dst_map);				}			}			else {			 	new_dst_map = dst_map;				new_dst_start = dst_entry->start;				vm_map_set_recursive(&dst_map->lock);			}			/*			 *	Recursively copy the sharing map.			 */			(void) vm_map_copy(new_dst_map, new_src_map,				new_dst_start, new_size, new_src_start,				FALSE, FALSE);			if (dst_map == new_dst_map)				vm_map_clear_recursive(&dst_map->lock);			if (src_map == new_src_map)				vm_map_clear_recursive(&src_map->lock);		}		/*		 *	Update variables for next pass through the loop.		 */		src_start = src_entry->end;		src_entry = src_entry->next;		dst_start = dst_entry->end;		dst_entry = dst_entry->next;		/*		 *	If the source is to be destroyed, here is the		 *	place to do it.		 */		if (src_destroy && src_map->is_main_map &&						dst_map->is_main_map)			vm_map_entry_delete(src_map, src_entry->prev);	}	/*	 *	Update the physical maps as appropriate	 */	if (src_map->is_main_map && dst_map->is_main_map) {		if (src_destroy)			pmap_remove(src_map->pmap, src_addr, src_addr + len);	}	/*	 *	Unlock the maps	 */	Return: ;	if (old_src_destroy)		vm_map_delete(src_map, src_addr, src_addr + len);	vm_map_unlock(src_map);	if (src_map != dst_map)		vm_map_unlock(dst_map);	return(result);}/* * vmspace_fork: * Create a new process vmspace structure and vm_map * based on those of an existing process.  The new map * is based on the old map, according to the inheritance * values on the regions in that map. * * The source map must not be locked. */struct vmspace *vmspace_fork(vm1)	register struct vmspace *vm1;{	register struct vmspace *vm2;	vm_map_t	old_map = &vm1->vm_map;	vm_map_t	new_map;	vm_map_entry_t	old_entry;	vm_map_entry_t	new_entry;	pmap_t		new_pmap;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -