📄 vm_map.c
字号:
vm_map_lock(old_map); vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, old_map->entries_pageable); bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy); new_pmap = &vm2->vm_pmap; /* XXX */ new_map = &vm2->vm_map; /* XXX */ old_entry = old_map->header.next; while (old_entry != &old_map->header) { if (old_entry->is_sub_map) panic("vm_map_fork: encountered a submap"); switch (old_entry->inheritance) { case VM_INHERIT_NONE: break; case VM_INHERIT_SHARE: /* * If we don't already have a sharing map: */ if (!old_entry->is_a_map) { vm_map_t new_share_map; vm_map_entry_t new_share_entry; /* * Create a new sharing map */ new_share_map = vm_map_create(NULL, old_entry->start, old_entry->end, TRUE); new_share_map->is_main_map = FALSE; /* * Create the only sharing entry from the * old task map entry. */ new_share_entry = vm_map_entry_create(new_share_map); *new_share_entry = *old_entry; new_share_entry->wired_count = 0; /* * Insert the entry into the new sharing * map */ vm_map_entry_link(new_share_map, new_share_map->header.prev, new_share_entry); /* * Fix up the task map entry to refer * to the sharing map now. */ old_entry->is_a_map = TRUE; old_entry->object.share_map = new_share_map; old_entry->offset = old_entry->start; } /* * Clone the entry, referencing the sharing map. */ new_entry = vm_map_entry_create(new_map); *new_entry = *old_entry; new_entry->wired_count = 0; vm_map_reference(new_entry->object.share_map); /* * Insert the entry into the new map -- we * know we're inserting at the end of the new * map. */ vm_map_entry_link(new_map, new_map->header.prev, new_entry); /* * Update the physical map */ pmap_copy(new_map->pmap, old_map->pmap, new_entry->start, (old_entry->end - old_entry->start), old_entry->start); break; case VM_INHERIT_COPY: /* * Clone the entry and link into the map. */ new_entry = vm_map_entry_create(new_map); *new_entry = *old_entry; new_entry->wired_count = 0; new_entry->object.vm_object = NULL; new_entry->is_a_map = FALSE; vm_map_entry_link(new_map, new_map->header.prev, new_entry); if (old_entry->is_a_map) { int check; check = vm_map_copy(new_map, old_entry->object.share_map, new_entry->start, (vm_size_t)(new_entry->end - new_entry->start), old_entry->offset, FALSE, FALSE); if (check != KERN_SUCCESS) printf("vm_map_fork: copy in share_map region failed\n"); } else { vm_map_copy_entry(old_map, new_map, old_entry, new_entry); } break; } old_entry = old_entry->next; } new_map->size = old_map->size; vm_map_unlock(old_map); return(vm2);}/* * vm_map_lookup: * * Finds the VM object, offset, and * protection for a given virtual address in the * specified map, assuming a page fault of the * type specified. * * Leaves the map in question locked for read; return * values are guaranteed until a vm_map_lookup_done * call is performed. Note that the map argument * is in/out; the returned map must be used in * the call to vm_map_lookup_done. * * A handle (out_entry) is returned for use in * vm_map_lookup_done, to make that fast. * * If a lookup is requested with "write protection" * specified, the map may be changed to perform virtual * copying operations, although the data referenced will * remain the same. */intvm_map_lookup(var_map, vaddr, fault_type, out_entry, object, offset, out_prot, wired, single_use) vm_map_t *var_map; /* IN/OUT */ register vm_offset_t vaddr; register vm_prot_t fault_type; vm_map_entry_t *out_entry; /* OUT */ vm_object_t *object; /* OUT */ vm_offset_t *offset; /* OUT */ vm_prot_t *out_prot; /* OUT */ boolean_t *wired; /* OUT */ boolean_t *single_use; /* OUT */{ vm_map_t share_map; vm_offset_t share_offset; register vm_map_entry_t entry; register vm_map_t map = *var_map; register vm_prot_t prot; register boolean_t su; RetryLookup: ; /* * Lookup the faulting address. */ vm_map_lock_read(map);#define RETURN(why) \ { \ vm_map_unlock_read(map); \ return(why); \ } /* * If the map has an interesting hint, try it before calling * full blown lookup routine. */ simple_lock(&map->hint_lock); entry = map->hint; simple_unlock(&map->hint_lock); *out_entry = entry; if ((entry == &map->header) || (vaddr < entry->start) || (vaddr >= entry->end)) { vm_map_entry_t tmp_entry; /* * Entry was either not a valid hint, or the vaddr * was not contained in the entry, so do a full lookup. */ if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) RETURN(KERN_INVALID_ADDRESS); entry = tmp_entry; *out_entry = entry; } /* * Handle submaps. */ if (entry->is_sub_map) { vm_map_t old_map = map; *var_map = map = entry->object.sub_map; vm_map_unlock_read(old_map); goto RetryLookup; } /* * Check whether this task is allowed to have * this page. */ prot = entry->protection; if ((fault_type & (prot)) != fault_type) RETURN(KERN_PROTECTION_FAILURE); /* * If this page is not pageable, we have to get * it for all possible accesses. */ if (*wired = (entry->wired_count != 0)) prot = fault_type = entry->protection; /* * If we don't already have a VM object, track * it down. */ if (su = !entry->is_a_map) { share_map = map; share_offset = vaddr; } else { vm_map_entry_t share_entry; /* * Compute the sharing map, and offset into it. */ share_map = entry->object.share_map; share_offset = (vaddr - entry->start) + entry->offset; /* * Look for the backing store object and offset */ vm_map_lock_read(share_map); if (!vm_map_lookup_entry(share_map, share_offset, &share_entry)) { vm_map_unlock_read(share_map); RETURN(KERN_INVALID_ADDRESS); } entry = share_entry; } /* * If the entry was copy-on-write, we either ... */ if (entry->needs_copy) { /* * If we want to write the page, we may as well * handle that now since we've got the sharing * map locked. * * If we don't need to write the page, we just * demote the permissions allowed. */ if (fault_type & VM_PROT_WRITE) { /* * Make a new object, and place it in the * object chain. Note that no new references * have appeared -- one just moved from the * share map to the new object. */ if (lockmgr(&share_map->lock, LK_EXCLUPGRADE, (void *)0, curproc)) { if (share_map != map) vm_map_unlock_read(map); goto RetryLookup; } vm_object_shadow( &entry->object.vm_object, &entry->offset, (vm_size_t) (entry->end - entry->start)); entry->needs_copy = FALSE; lockmgr(&share_map->lock, LK_DOWNGRADE, (void *)0, curproc); } else { /* * We're attempting to read a copy-on-write * page -- don't allow writes. */ prot &= (~VM_PROT_WRITE); } } /* * Create an object if necessary. */ if (entry->object.vm_object == NULL) { if (lockmgr(&share_map->lock, LK_EXCLUPGRADE, (void *)0, curproc)) { if (share_map != map) vm_map_unlock_read(map); goto RetryLookup; } entry->object.vm_object = vm_object_allocate( (vm_size_t)(entry->end - entry->start)); entry->offset = 0; lockmgr(&share_map->lock, LK_DOWNGRADE, (void *)0, curproc); } /* * Return the object/offset from this entry. If the entry * was copy-on-write or empty, it has been fixed up. */ *offset = (share_offset - entry->start) + entry->offset; *object = entry->object.vm_object; /* * Return whether this is the only map sharing this data. */ if (!su) { simple_lock(&share_map->ref_lock); su = (share_map->ref_count == 1); simple_unlock(&share_map->ref_lock); } *out_prot = prot; *single_use = su; return(KERN_SUCCESS); #undef RETURN}/* * vm_map_lookup_done: * * Releases locks acquired by a vm_map_lookup * (according to the handle returned by that lookup). */voidvm_map_lookup_done(map, entry) register vm_map_t map; vm_map_entry_t entry;{ /* * If this entry references a map, unlock it first. */ if (entry->is_a_map) vm_map_unlock_read(entry->object.share_map); /* * Unlock the main-level map */ vm_map_unlock_read(map);}/* * Routine: vm_map_simplify * Purpose: * Attempt to simplify the map representation in * the vicinity of the given starting address. * Note: * This routine is intended primarily to keep the * kernel maps more compact -- they generally don't * benefit from the "expand a map entry" technology * at allocation time because the adjacent entry * is often wired down. */voidvm_map_simplify(map, start) vm_map_t map; vm_offset_t start;{ vm_map_entry_t this_entry; vm_map_entry_t prev_entry; vm_map_lock(map); if ( (vm_map_lookup_entry(map, start, &this_entry)) && ((prev_entry = this_entry->prev) != &map->header) && (prev_entry->end == start) && (map->is_main_map) && (prev_entry->is_a_map == FALSE) && (prev_entry->is_sub_map == FALSE) && (this_entry->is_a_map == FALSE) && (this_entry->is_sub_map == FALSE) && (prev_entry->inheritance == this_entry->inheritance) && (prev_entry->protection == this_entry->protection) && (prev_entry->max_protection == this_entry->max_protection) && (prev_entry->wired_count == this_entry->wired_count) && (prev_entry->copy_on_write == this_entry->copy_on_write) && (prev_entry->needs_copy == this_entry->needs_copy) && (prev_entry->object.vm_object == this_entry->object.vm_object) && ((prev_entry->offset + (prev_entry->end - prev_entry->start)) == this_entry->offset) ) { if (map->first_free == this_entry) map->first_free = prev_entry; SAVE_HINT(map, prev_entry); vm_map_entry_unlink(map, this_entry); prev_entry->end = this_entry->end; vm_object_deallocate(this_entry->object.vm_object); vm_map_entry_dispose(map, this_entry); } vm_map_unlock(map);}/* * vm_map_print: [ debug ] */voidvm_map_print(map, full) register vm_map_t map; boolean_t full;{ register vm_map_entry_t entry; extern int indent; iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n", (map->is_main_map ? "Task" : "Share"), (int) map, (int) (map->pmap), map->ref_count, map->nentries, map->timestamp); if (!full && indent) return; indent += 2; for (entry = map->header.next; entry != &map->header; entry = entry->next) { iprintf("map entry 0x%x: start=0x%x, end=0x%x, ", (int) entry, (int) entry->start, (int) entry->end); if (map->is_main_map) { static char *inheritance_name[4] = { "share", "copy", "none", "donate_copy"}; printf("prot=%x/%x/%s, ", entry->protection, entry->max_protection, inheritance_name[entry->inheritance]); if (entry->wired_count != 0) printf("wired, "); } if (entry->is_a_map || entry->is_sub_map) { printf("share=0x%x, offset=0x%x\n", (int) entry->object.share_map, (int) entry->offset); if ((entry->prev == &map->header) || (!entry->prev->is_a_map) || (entry->prev->object.share_map != entry->object.share_map)) { indent += 2; vm_map_print(entry->object.share_map, full); indent -= 2; } } else { printf("object=0x%x, offset=0x%x", (int) entry->object.vm_object, (int) entry->offset); if (entry->copy_on_write) printf(", copy (%s)", entry->needs_copy ? "needed" : "done"); printf("\n"); if ((entry->prev == &map->header) || (entry->prev->is_a_map) || (entry->prev->object.vm_object != entry->object.vm_object)) { indent += 2; vm_object_print(entry->object.vm_object, full); indent -= 2; } } } indent -= 2;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -