⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vm_map.c

📁 open bsd vm device design
💻 C
📖 第 1 页 / 共 5 页
字号:
		map->first_free = new_entry;	return(KERN_SUCCESS);}/* *	SAVE_HINT: * *	Saves the specified entry as the hint for *	future lookups.  Performs necessary interlocks. */#define	SAVE_HINT(map,value) \		simple_lock(&(map)->hint_lock); \		(map)->hint = (value); \		simple_unlock(&(map)->hint_lock);/* *	vm_map_lookup_entry:	[ internal use only ] * *	Finds the map entry containing (or *	immediately preceding) the specified address *	in the given map; the entry is returned *	in the "entry" parameter.  The boolean *	result indicates whether the address is *	actually contained in the map. */boolean_tvm_map_lookup_entry(map, address, entry)	register vm_map_t	map;	register vm_offset_t	address;	vm_map_entry_t		*entry;		/* OUT */{	register vm_map_entry_t		cur;	register vm_map_entry_t		last;	/*	 *	Start looking either from the head of the	 *	list, or from the hint.	 */	simple_lock(&map->hint_lock);	cur = map->hint;	simple_unlock(&map->hint_lock);	if (cur == &map->header)		cur = cur->next;	if (address >= cur->start) {	    	/*		 *	Go from hint to end of list.		 *		 *	But first, make a quick check to see if		 *	we are already looking at the entry we		 *	want (which is usually the case).		 *	Note also that we don't need to save the hint		 *	here... it is the same hint (unless we are		 *	at the header, in which case the hint didn't		 *	buy us anything anyway).		 */		last = &map->header;		if ((cur != last) && (cur->end > address)) {			*entry = cur;			return(TRUE);		}	}	else {	    	/*		 *	Go from start to hint, *inclusively*		 */		last = cur->next;		cur = map->header.next;	}	/*	 *	Search linearly	 */	while (cur != last) {		if (cur->end > address) {			if (address >= cur->start) {			    	/*				 *	Save this lookup for future				 *	hints, and return				 */				*entry = cur;				SAVE_HINT(map, cur);				return(TRUE);			}			break;		}		cur = cur->next;	}	*entry = cur->prev;	SAVE_HINT(map, *entry);	return(FALSE);}/* * Find sufficient space for `length' bytes in the given map, starting at * `start'.  The map must be locked.  Returns 0 on success, 1 on no space. */intvm_map_findspace(map, start, length, addr)	register vm_map_t map;	register vm_offset_t start;	vm_size_t length;	vm_offset_t *addr;{	register vm_map_entry_t entry, next;	register vm_offset_t end;	if (start < map->min_offset)		start = map->min_offset;	if (start > map->max_offset)		return (1);	/*	 * Look for the first possible address; if there's already	 * something at this address, we have to start after it.	 */	if (start == map->min_offset) {		if ((entry = map->first_free) != &map->header)			start = entry->end;	} else {		vm_map_entry_t tmp;		if (vm_map_lookup_entry(map, start, &tmp))			start = tmp->end;		entry = tmp;	}	/*	 * Look through the rest of the map, trying to fit a new region in	 * the gap between existing regions, or after the very last region.	 */	for (;; start = (entry = next)->end) {		/*		 * Find the end of the proposed new region.  Be sure we didn't		 * go beyond the end of the map, or wrap around the address;		 * if so, we lose.  Otherwise, if this is the last entry, or		 * if the proposed new region fits before the next entry, we		 * win.		 */		end = start + length;		if (end > map->max_offset || end < start)			return (1);		next = entry->next;		if (next == &map->header || next->start >= end)			break;	}	SAVE_HINT(map, entry);	*addr = start;	return (0);}/* *	vm_map_find finds an unallocated region in the target address *	map with the given length.  The search is defined to be *	first-fit from the specified address; the region found is *	returned in the same parameter. * */intvm_map_find(map, object, offset, addr, length, find_space)	vm_map_t	map;	vm_object_t	object;	vm_offset_t	offset;	vm_offset_t	*addr;		/* IN/OUT */	vm_size_t	length;	boolean_t	find_space;{	register vm_offset_t	start;	int			result;	start = *addr;	vm_map_lock(map);	if (find_space) {		if (vm_map_findspace(map, start, length, addr)) {			vm_map_unlock(map);			return (KERN_NO_SPACE);		}		start = *addr;	}	result = vm_map_insert(map, object, offset, start, start + length);	vm_map_unlock(map);	return (result);}/* *	vm_map_simplify_entry:	[ internal use only ] * *	Simplify the given map entry by: *		removing extra sharing maps *		[XXX maybe later] merging with a neighbor */voidvm_map_simplify_entry(map, entry)	vm_map_t	map;	vm_map_entry_t	entry;{#ifdef	lint	map++;#endif	/*	 *	If this entry corresponds to a sharing map, then	 *	see if we can remove the level of indirection.	 *	If it's not a sharing map, then it points to	 *	a VM object, so see if we can merge with either	 *	of our neighbors.	 */	if (entry->is_sub_map)		return;	if (entry->is_a_map) {#if	0		vm_map_t	my_share_map;		int		count;		my_share_map = entry->object.share_map;			simple_lock(&my_share_map->ref_lock);		count = my_share_map->ref_count;		simple_unlock(&my_share_map->ref_lock);				if (count == 1) {			/* Can move the region from			 * entry->start to entry->end (+ entry->offset)			 * in my_share_map into place of entry.			 * Later.			 */		}#endif	}	else {		/*		 *	Try to merge with our neighbors.		 *		 *	Conditions for merge are:		 *		 *	1.  entries are adjacent.		 *	2.  both entries point to objects		 *	    with null pagers.		 *		 * 	If a merge is possible, we replace the two		 *	entries with a single entry, then merge		 *	the two objects into a single object.		 *		 *	Now, all that is left to do is write the		 *	code!		 */	}}/* *	vm_map_clip_start:	[ internal use only ] * *	Asserts that the given entry begins at or after *	the specified address; if necessary, *	it splits the entry into two. */#define vm_map_clip_start(map, entry, startaddr) \{ \	if (startaddr > entry->start) \		_vm_map_clip_start(map, entry, startaddr); \}/* *	This routine is called only when it is known that *	the entry must be split. */static void_vm_map_clip_start(map, entry, start)	register vm_map_t	map;	register vm_map_entry_t	entry;	register vm_offset_t	start;{	register vm_map_entry_t	new_entry;	/*	 *	See if we can simplify this entry first	 */		 	vm_map_simplify_entry(map, entry);	/*	 *	Split off the front portion --	 *	note that we must insert the new	 *	entry BEFORE this one, so that	 *	this entry has the specified starting	 *	address.	 */	new_entry = vm_map_entry_create(map);	*new_entry = *entry;	new_entry->end = start;	entry->offset += (start - entry->start);	entry->start = start;	vm_map_entry_link(map, entry->prev, new_entry);	if (entry->is_a_map || entry->is_sub_map)	 	vm_map_reference(new_entry->object.share_map);	else		vm_object_reference(new_entry->object.vm_object);}/* *	vm_map_clip_end:	[ internal use only ] * *	Asserts that the given entry ends at or before *	the specified address; if necessary, *	it splits the entry into two. */#define vm_map_clip_end(map, entry, endaddr) \{ \	if (endaddr < entry->end) \		_vm_map_clip_end(map, entry, endaddr); \}/* *	This routine is called only when it is known that *	the entry must be split. */static void_vm_map_clip_end(map, entry, end)	register vm_map_t	map;	register vm_map_entry_t	entry;	register vm_offset_t	end;{	register vm_map_entry_t	new_entry;	/*	 *	Create a new entry and insert it	 *	AFTER the specified entry	 */	new_entry = vm_map_entry_create(map);	*new_entry = *entry;	new_entry->start = entry->end = end;	new_entry->offset += (end - entry->start);	vm_map_entry_link(map, entry, new_entry);	if (entry->is_a_map || entry->is_sub_map)	 	vm_map_reference(new_entry->object.share_map);	else		vm_object_reference(new_entry->object.vm_object);}/* *	VM_MAP_RANGE_CHECK:	[ internal use only ] * *	Asserts that the starting and ending region *	addresses fall within the valid range of the map. */#define	VM_MAP_RANGE_CHECK(map, start, end)		\		{					\		if (start < vm_map_min(map))		\			start = vm_map_min(map);	\		if (end > vm_map_max(map))		\			end = vm_map_max(map);		\		if (start > end)			\			start = end;			\		}/* *	vm_map_submap:		[ kernel use only ] * *	Mark the given range as handled by a subordinate map. * *	This range must have been created with vm_map_find, *	and no other operations may have been performed on this *	range prior to calling vm_map_submap. * *	Only a limited number of operations can be performed *	within this rage after calling vm_map_submap: *		vm_fault *	[Don't try vm_map_copy!] * *	To remove a submapping, one must first remove the *	range from the superior map, and then destroy the *	submap (if desired).  [Better yet, don't try it.] */intvm_map_submap(map, start, end, submap)	register vm_map_t	map;	register vm_offset_t	start;	register vm_offset_t	end;	vm_map_t		submap;{	vm_map_entry_t		entry;	register int		result = KERN_INVALID_ARGUMENT;	vm_map_lock(map);	VM_MAP_RANGE_CHECK(map, start, end);	if (vm_map_lookup_entry(map, start, &entry)) {		vm_map_clip_start(map, entry, start);	}	 else		entry = entry->next;	vm_map_clip_end(map, entry, end);	if ((entry->start == start) && (entry->end == end) &&	    (!entry->is_a_map) &&	    (entry->object.vm_object == NULL) &&	    (!entry->copy_on_write)) {		entry->is_a_map = FALSE;		entry->is_sub_map = TRUE;		vm_map_reference(entry->object.sub_map = submap);		result = KERN_SUCCESS;	}	vm_map_unlock(map);	return(result);}/* *	vm_map_protect: * *	Sets the protection of the specified address *	region in the target map.  If "set_max" is *	specified, the maximum protection is to be set; *	otherwise, only the current protection is affected. */intvm_map_protect(map, start, end, new_prot, set_max)	register vm_map_t	map;	register vm_offset_t	start;	register vm_offset_t	end;	register vm_prot_t	new_prot;	register boolean_t	set_max;{	register vm_map_entry_t		current;	vm_map_entry_t			entry;	vm_map_lock(map);	VM_MAP_RANGE_CHECK(map, start, end);	if (vm_map_lookup_entry(map, start, &entry)) {		vm_map_clip_start(map, entry, start);	}	 else		entry = entry->next;	/*	 *	Make a first pass to check for protection	 *	violations.	 */	current = entry;	while ((current != &map->header) && (current->start < end)) {		if (current->is_sub_map)			return(KERN_INVALID_ARGUMENT);		if ((new_prot & current->max_protection) != new_prot) {			vm_map_unlock(map);			return(KERN_PROTECTION_FAILURE);		}		current = current->next;	}	/*	 *	Go back and fix up protections.	 *	[Note that clipping is not necessary the second time.]	 */	current = entry;	while ((current != &map->header) && (current->start < end)) {		vm_prot_t	old_prot;		vm_map_clip_end(map, current, end);		old_prot = current->protection;		if (set_max)			current->protection =				(current->max_protection = new_prot) &					old_prot;		else			current->protection = new_prot;		/*		 *	Update physical map if necessary.		 *	Worry about copy-on-write here -- CHECK THIS XXX		 */		if (current->protection != old_prot) {#define MASK(entry)	((entry)->copy_on_write ? ~VM_PROT_WRITE : \							VM_PROT_ALL)#define	max(a,b)	((a) > (b) ? (a) : (b))			if (current->is_a_map) {				vm_map_entry_t	share_entry;				vm_offset_t	share_end;				vm_map_lock(current->object.share_map);				(void) vm_map_lookup_entry(						current->object.share_map,						current->offset,						&share_entry);				share_end = current->offset +					(current->end - current->start);				while ((share_entry !=					&current->object.share_map->header) &&					(share_entry->start < share_end)) {					pmap_protect(map->pmap,						(max(share_entry->start,							current->offset) -							current->offset +							current->start),						min(share_entry->end,							share_end) -						current->offset +						current->start,						current->protection &							MASK(share_entry));					share_entry = share_entry->next;				}				vm_map_unlock(current->object.share_map);			}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -