📄 vm_page.c
字号:
* Removes the given mem entry from the object/offset-page * table and the object page list. * * The object and page must be locked. */voidvm_page_remove(mem) register vm_page_t mem;{ register struct pglist *bucket; int spl; VM_PAGE_CHECK(mem); if (!(mem->flags & PG_TABLED)) return; /* * Remove from the object_object/offset hash table */ bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)]; spl = splimp(); simple_lock(&bucket_lock); TAILQ_REMOVE(bucket, mem, hashq); simple_unlock(&bucket_lock); (void) splx(spl); /* * Now remove from the object's list of backed pages. */ TAILQ_REMOVE(&mem->object->memq, mem, listq); /* * And show that the object has one fewer resident * page. */ mem->object->resident_page_count--; mem->flags &= ~PG_TABLED;}/* * vm_page_lookup: * * Returns the page associated with the object/offset * pair specified; if none is found, NULL is returned. * * The object must be locked. No side effects. */vm_page_tvm_page_lookup(object, offset) register vm_object_t object; register vm_offset_t offset;{ register vm_page_t mem; register struct pglist *bucket; int spl; /* * Search the hash table for this object/offset pair */ bucket = &vm_page_buckets[vm_page_hash(object, offset)]; spl = splimp(); simple_lock(&bucket_lock); for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) { VM_PAGE_CHECK(mem); if ((mem->object == object) && (mem->offset == offset)) { simple_unlock(&bucket_lock); splx(spl); return(mem); } } simple_unlock(&bucket_lock); splx(spl); return(NULL);}/* * vm_page_rename: * * Move the given memory entry from its * current object to the specified target object/offset. * * The object must be locked. */voidvm_page_rename(mem, new_object, new_offset) register vm_page_t mem; register vm_object_t new_object; vm_offset_t new_offset;{ if (mem->object == new_object) return; vm_page_lock_queues(); /* keep page from moving out from under pageout daemon */ vm_page_remove(mem); vm_page_insert(mem, new_object, new_offset); vm_page_unlock_queues();}/* * vm_page_alloc: * * Allocate and return a memory cell associated * with this VM object/offset pair. * * Object must be locked. */vm_page_tvm_page_alloc(object, offset) vm_object_t object; vm_offset_t offset;{ register vm_page_t mem; int spl; spl = splimp(); /* XXX */ simple_lock(&vm_page_queue_free_lock); if (vm_page_queue_free.tqh_first == NULL) { simple_unlock(&vm_page_queue_free_lock); splx(spl); return(NULL); } mem = vm_page_queue_free.tqh_first; TAILQ_REMOVE(&vm_page_queue_free, mem, pageq); cnt.v_free_count--; simple_unlock(&vm_page_queue_free_lock); splx(spl); VM_PAGE_INIT(mem, object, offset); /* * Decide if we should poke the pageout daemon. * We do this if the free count is less than the low * water mark, or if the free count is less than the high * water mark (but above the low water mark) and the inactive * count is less than its target. * * We don't have the counts locked ... if they change a little, * it doesn't really matter. */ if (cnt.v_free_count < cnt.v_free_min || (cnt.v_free_count < cnt.v_free_target && cnt.v_inactive_count < cnt.v_inactive_target)) thread_wakeup(&vm_pages_needed); return (mem);}/* * vm_page_free: * * Returns the given page to the free list, * disassociating it with any VM object. * * Object and page must be locked prior to entry. */voidvm_page_free(mem) register vm_page_t mem;{ vm_page_remove(mem); if (mem->flags & PG_ACTIVE) { TAILQ_REMOVE(&vm_page_queue_active, mem, pageq); mem->flags &= ~PG_ACTIVE; cnt.v_active_count--; } if (mem->flags & PG_INACTIVE) { TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq); mem->flags &= ~PG_INACTIVE; cnt.v_inactive_count--; } if (!(mem->flags & PG_FICTITIOUS)) { int spl; spl = splimp(); simple_lock(&vm_page_queue_free_lock); TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq); cnt.v_free_count++; simple_unlock(&vm_page_queue_free_lock); splx(spl); }}/* * vm_page_wire: * * Mark this page as wired down by yet * another map, removing it from paging queues * as necessary. * * The page queues must be locked. */voidvm_page_wire(mem) register vm_page_t mem;{ VM_PAGE_CHECK(mem); if (mem->wire_count == 0) { if (mem->flags & PG_ACTIVE) { TAILQ_REMOVE(&vm_page_queue_active, mem, pageq); cnt.v_active_count--; mem->flags &= ~PG_ACTIVE; } if (mem->flags & PG_INACTIVE) { TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq); cnt.v_inactive_count--; mem->flags &= ~PG_INACTIVE; } cnt.v_wire_count++; } mem->wire_count++;}/* * vm_page_unwire: * * Release one wiring of this page, potentially * enabling it to be paged again. * * The page queues must be locked. */voidvm_page_unwire(mem) register vm_page_t mem;{ VM_PAGE_CHECK(mem); mem->wire_count--; if (mem->wire_count == 0) { TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq); cnt.v_active_count++; mem->flags |= PG_ACTIVE; cnt.v_wire_count--; }}/* * vm_page_deactivate: * * Returns the given page to the inactive list, * indicating that no physical maps have access * to this page. [Used by the physical mapping system.] * * The page queues must be locked. */voidvm_page_deactivate(m) register vm_page_t m;{ VM_PAGE_CHECK(m); /* * Only move active pages -- ignore locked or already * inactive ones. */ if (m->flags & PG_ACTIVE) { pmap_clear_reference(VM_PAGE_TO_PHYS(m)); TAILQ_REMOVE(&vm_page_queue_active, m, pageq); TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); m->flags &= ~PG_ACTIVE; m->flags |= PG_INACTIVE; cnt.v_active_count--; cnt.v_inactive_count++; if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) m->flags &= ~PG_CLEAN; if (m->flags & PG_CLEAN) m->flags &= ~PG_LAUNDRY; else m->flags |= PG_LAUNDRY; }}/* * vm_page_activate: * * Put the specified page on the active list (if appropriate). * * The page queues must be locked. */voidvm_page_activate(m) register vm_page_t m;{ VM_PAGE_CHECK(m); if (m->flags & PG_INACTIVE) { TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); cnt.v_inactive_count--; m->flags &= ~PG_INACTIVE; } if (m->wire_count == 0) { if (m->flags & PG_ACTIVE) panic("vm_page_activate: already active"); TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); m->flags |= PG_ACTIVE; cnt.v_active_count++; }}/* * vm_page_zero_fill: * * Zero-fill the specified page. * Written as a standard pagein routine, to * be used by the zero-fill object. */boolean_tvm_page_zero_fill(m) vm_page_t m;{ VM_PAGE_CHECK(m); m->flags &= ~PG_CLEAN; pmap_zero_page(VM_PAGE_TO_PHYS(m)); return(TRUE);}/* * vm_page_copy: * * Copy one page to another */voidvm_page_copy(src_m, dest_m) vm_page_t src_m; vm_page_t dest_m;{ VM_PAGE_CHECK(src_m); VM_PAGE_CHECK(dest_m); dest_m->flags &= ~PG_CLEAN; pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -