📄 vm_objec.c
字号:
if (object->pager == pager) { TAILQ_REMOVE(bucket, entry, hash_links); free((caddr_t)entry, M_VMOBJHASH); break; } }}/* * vm_object_cache_clear removes all objects from the cache. * */voidvm_object_cache_clear(){ register vm_object_t object; /* * Remove each object in the cache by scanning down the * list of cached objects. */ vm_object_cache_lock(); while ((object = vm_object_cached_list.tqh_first) != NULL) { vm_object_cache_unlock(); /* * Note: it is important that we use vm_object_lookup * to gain a reference, and not vm_object_reference, because * the logic for removing an object from the cache lies in * lookup. */ if (object != vm_object_lookup(object->pager)) panic("vm_object_cache_clear: I'm sooo confused."); pager_cache(object, FALSE); vm_object_cache_lock(); } vm_object_cache_unlock();}boolean_t vm_object_collapse_allowed = TRUE;/* * vm_object_collapse: * * Collapse an object with the object backing it. * Pages in the backing object are moved into the * parent, and the backing object is deallocated. * * Requires that the object be locked and the page * queues be unlocked. * */voidvm_object_collapse(object) register vm_object_t object;{ register vm_object_t backing_object; register vm_offset_t backing_offset; register vm_size_t size; register vm_offset_t new_offset; register vm_page_t p, pp; if (!vm_object_collapse_allowed) return; while (TRUE) { /* * Verify that the conditions are right for collapse: * * The object exists and no pages in it are currently * being paged out (or have ever been paged out). */ if (object == NULL || object->paging_in_progress != 0 || object->pager != NULL) return; /* * There is a backing object, and */ if ((backing_object = object->shadow) == NULL) return; vm_object_lock(backing_object); /* * ... * The backing object is not read_only, * and no pages in the backing object are * currently being paged out. * The backing object is internal. */ if ((backing_object->flags & OBJ_INTERNAL) == 0 || backing_object->paging_in_progress != 0) { vm_object_unlock(backing_object); return; } /* * The backing object can't be a copy-object: * the shadow_offset for the copy-object must stay * as 0. Furthermore (for the 'we have all the * pages' case), if we bypass backing_object and * just shadow the next object in the chain, old * pages from that object would then have to be copied * BOTH into the (former) backing_object and into the * parent object. */ if (backing_object->shadow != NULL && backing_object->shadow->copy != NULL) { vm_object_unlock(backing_object); return; } /* * We know that we can either collapse the backing * object (if the parent is the only reference to * it) or (perhaps) remove the parent's reference * to it. */ backing_offset = object->shadow_offset; size = object->size; /* * If there is exactly one reference to the backing * object, we can collapse it into the parent. */ if (backing_object->ref_count == 1) { /* * We can collapse the backing object. * * Move all in-memory pages from backing_object * to the parent. Pages that have been paged out * will be overwritten by any of the parent's * pages that shadow them. */ while ((p = backing_object->memq.tqh_first) != NULL) { new_offset = (p->offset - backing_offset); /* * If the parent has a page here, or if * this page falls outside the parent, * dispose of it. * * Otherwise, move it as planned. */ if (p->offset < backing_offset || new_offset >= size) { vm_page_lock_queues(); vm_page_free(p); vm_page_unlock_queues(); } else { pp = vm_page_lookup(object, new_offset); if (pp != NULL && !(pp->flags & PG_FAKE)) { vm_page_lock_queues(); vm_page_free(p); vm_page_unlock_queues(); } else { if (pp) { /* may be someone waiting for it */ PAGE_WAKEUP(pp); vm_page_lock_queues(); vm_page_free(pp); vm_page_unlock_queues(); } vm_page_rename(p, object, new_offset); } } } /* * Move the pager from backing_object to object. * * XXX We're only using part of the paging space * for keeps now... we ought to discard the * unused portion. */ if (backing_object->pager) { object->pager = backing_object->pager; object->paging_offset = backing_offset + backing_object->paging_offset; backing_object->pager = NULL; } /* * Object now shadows whatever backing_object did. * Note that the reference to backing_object->shadow * moves from within backing_object to within object. */ object->shadow = backing_object->shadow; object->shadow_offset += backing_object->shadow_offset; if (object->shadow != NULL && object->shadow->copy != NULL) { panic("vm_object_collapse: we collapsed a copy-object!"); } /* * Discard backing_object. * * Since the backing object has no pages, no * pager left, and no object references within it, * all that is necessary is to dispose of it. */ vm_object_unlock(backing_object); simple_lock(&vm_object_list_lock); TAILQ_REMOVE(&vm_object_list, backing_object, object_list); vm_object_count--; simple_unlock(&vm_object_list_lock); free((caddr_t)backing_object, M_VMOBJ); object_collapses++; } else { /* * If all of the pages in the backing object are * shadowed by the parent object, the parent * object no longer has to shadow the backing * object; it can shadow the next one in the * chain. * * The backing object must not be paged out - we'd * have to check all of the paged-out pages, as * well. */ if (backing_object->pager != NULL) { vm_object_unlock(backing_object); return; } /* * Should have a check for a 'small' number * of pages here. */ for (p = backing_object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { new_offset = (p->offset - backing_offset); /* * If the parent has a page here, or if * this page falls outside the parent, * keep going. * * Otherwise, the backing_object must be * left in the chain. */ if (p->offset >= backing_offset && new_offset < size && ((pp = vm_page_lookup(object, new_offset)) == NULL || (pp->flags & PG_FAKE))) { /* * Page still needed. * Can't go any further. */ vm_object_unlock(backing_object); return; } } /* * Make the parent shadow the next object * in the chain. Deallocating backing_object * will not remove it, since its reference * count is at least 2. */ object->shadow = backing_object->shadow; vm_object_reference(object->shadow); object->shadow_offset += backing_object->shadow_offset; /* * Backing object might have had a copy pointer * to us. If it did, clear it. */ if (backing_object->copy == object) { backing_object->copy = NULL; } /* Drop the reference count on backing_object. * Since its ref_count was at least 2, it * will not vanish; so we don't need to call * vm_object_deallocate. */ backing_object->ref_count--; vm_object_unlock(backing_object); object_bypasses ++; } /* * Try again with this object's new backing object. */ }}/* * vm_object_page_remove: [internal] * * Removes all physical pages in the specified * object range from the object's list of pages. * * The object must be locked. */voidvm_object_page_remove(object, start, end) register vm_object_t object; register vm_offset_t start; register vm_offset_t end;{ register vm_page_t p, next; if (object == NULL) return; for (p = object->memq.tqh_first; p != NULL; p = next) { next = p->listq.tqe_next; if ((start <= p->offset) && (p->offset < end)) { pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); vm_page_lock_queues(); vm_page_free(p); vm_page_unlock_queues(); } }}/* * Routine: vm_object_coalesce * Function: Coalesces two objects backing up adjoining * regions of memory into a single object. * * returns TRUE if objects were combined. * * NOTE: Only works at the moment if the second object is NULL - * if it's not, which object do we lock first? * * Parameters: * prev_object First object to coalesce * prev_offset Offset into prev_object * next_object Second object into coalesce * next_offset Offset into next_object * * prev_size Size of reference to prev_object * next_size Size of reference to next_object * * Conditions: * The object must *not* be locked. */boolean_tvm_object_coalesce(prev_object, next_object, prev_offset, next_offset, prev_size, next_size) register vm_object_t prev_object; vm_object_t next_object; vm_offset_t prev_offset, next_offset; vm_size_t prev_size, next_size;{ vm_size_t newsize;#ifdef lint next_offset++;#endif if (next_object != NULL) { return(FALSE); } if (prev_object == NULL) { return(TRUE); } vm_object_lock(prev_object); /* * Try to collapse the object first */ vm_object_collapse(prev_object); /* * Can't coalesce if: * . more than one reference * . paged out * . shadows another object * . has a copy elsewhere * (any of which mean that the pages not mapped to * prev_entry may be in use anyway) */ if (prev_object->ref_count > 1 || prev_object->pager != NULL || prev_object->shadow != NULL || prev_object->copy != NULL) { vm_object_unlock(prev_object); return(FALSE); } /* * Remove any pages that may still be in the object from * a previous deallocation. */ vm_object_page_remove(prev_object, prev_offset + prev_size, prev_offset + prev_size + next_size); /* * Extend the object if necessary. */ newsize = prev_offset + prev_size + next_size; if (newsize > prev_object->size) prev_object->size = newsize; vm_object_unlock(prev_object); return(TRUE);}/* * vm_object_print: [ debug ] */voidvm_object_print(object, full) vm_object_t object; boolean_t full;{ register vm_page_t p; extern indent; register int count; if (object == NULL) return; iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ", (int) object, (int) object->size, object->resident_page_count, object->ref_count); printf("pager=0x%x+0x%x, shadow=(0x%x)+0x%x\n", (int) object->pager, (int) object->paging_offset, (int) object->shadow, (int) object->shadow_offset); printf("cache: next=0x%x, prev=0x%x\n", object->cached_list.tqe_next, object->cached_list.tqe_prev); if (!full) return; indent += 2; count = 0; for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { if (count == 0) iprintf("memory:="); else if (count == 6) { printf("\n"); iprintf(" ..."); count = 0; } else printf(","); count++; printf("(off=0x%x,page=0x%x)", p->offset, VM_PAGE_TO_PHYS(p)); } if (count != 0) printf("\n"); indent -= 2;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -