uvm_pager.c
来自「基于组件方式开发操作系统的OSKIT源代码」· C语言 代码 · 共 915 行 · 第 1/2 页
C
915 行
*ppsp_ptr = ppsp; /* update caller's pointer */ } else { ppsp[0] = pg; *npages = 1; } swblk = 0; /* XXX: keep gcc happy */ } else { /* * for swap-backed pageout, the caller (the pagedaemon) has * already built the cluster for us. the starting swap * block we are writing to has been passed in as "start." * "pg" could be NULL if there is no page we are especially * interested in (in which case the whole cluster gets dropped * in the event of an error or a sync "done"). */ swblk = (daddr_t) start; /* ppsp and npages should be ok */ } /* now that we've clustered we can unlock the page queues */ uvm_unlock_pageq(); /* * now attempt the I/O. if we have a failure and we are * clustered, we will drop the cluster and try again. */ReTry: if (uobj) { /* object is locked */ result = uobj->pgops->pgo_put(uobj, ppsp, *npages, flags); UVMHIST_LOG(ubchist, "put -> %d", result, 0,0,0); /* object is now unlocked */ } else { /* nothing locked */ result = uvm_swap_put(swblk, ppsp, *npages, flags); /* nothing locked */ } /* * we have attempted the I/O. * * if the I/O was a success then: * if !PGO_PDFREECLUST, we return the cluster to the * caller (who must un-busy all pages) * else we un-busy cluster pages for the pagedaemon * * if I/O is pending (async i/o) then we return the pending code. * [in this case the async i/o done function must clean up when * i/o is done...] */ if (result == VM_PAGER_PEND || result == VM_PAGER_OK) { if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) { /* * drop cluster and relock object (only if I/O is * not pending) */ if (uobj) /* required for dropcluster */ simple_lock(&uobj->vmobjlock); if (*npages > 1 || pg == NULL) uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_PDFREECLUST); /* if (uobj): object still locked, as per * return-state item #3 */ } return (result); } /* * a pager error occured. * for transient errors, drop to a cluster of 1 page ("pg") * and try again. for hard errors, don't bother retrying. */ if (*npages > 1 || pg == NULL) { if (uobj) { simple_lock(&uobj->vmobjlock); } uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP); /* * for failed swap-backed pageouts with a "pg", * we need to reset pg's swslot to either: * "swblk" (for transient errors, so we can retry), * or 0 (for hard errors). */ if (uobj == NULL && pg != NULL) { int nswblk = (result == VM_PAGER_AGAIN) ? swblk : 0; if (pg->pqflags & PQ_ANON) { simple_lock(&pg->uanon->an_lock); pg->uanon->an_swslot = nswblk; simple_unlock(&pg->uanon->an_lock); } else { simple_lock(&pg->uobject->vmobjlock); uao_set_swslot(pg->uobject, pg->offset >> PAGE_SHIFT, nswblk); simple_unlock(&pg->uobject->vmobjlock); } } if (result == VM_PAGER_AGAIN) { /* * for transient failures, free all the swslots that * we're not going to retry with. */ if (uobj == NULL) { if (pg) { uvm_swap_free(swblk + 1, *npages - 1); } else { uvm_swap_free(swblk, *npages); } } if (pg) { ppsp[0] = pg; *npages = 1; goto ReTry; } } else if (uobj == NULL) { /* * for hard errors on swap-backed pageouts, * mark the swslots as bad. note that we do not * free swslots that we mark bad. */ uvm_swap_markbad(swblk, *npages); } } /* * a pager error occured (even after dropping the cluster, if there * was one). give up! the caller only has one page ("pg") * to worry about. */ if (uobj && (flags & PGO_PDFREECLUST) != 0) simple_lock(&uobj->vmobjlock); return(result);}/* * uvm_pager_dropcluster: drop a cluster we have built (because we * got an error, or, if PGO_PDFREECLUST we are un-busying the * cluster pages on behalf of the pagedaemon). * * => uobj, if non-null, is a non-swap-backed object that is * locked by the caller. we return with this object still * locked. * => page queues are not locked * => pg is our page of interest (the one we clustered around, can be null) * => ppsp/npages is our current cluster * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster * pages on behalf of the pagedaemon. * PGO_REALLOCSWAP: drop previously allocated swap slots for * clustered swap-backed pages (except for "pg" if !NULL) * "swblk" is the start of swap alloc (e.g. for ppsp[0]) * [only meaningful if swap-backed (uobj == NULL)] */voiduvm_pager_dropcluster(uobj, pg, ppsp, npages, flags) struct uvm_object *uobj; /* IN */ struct vm_page *pg, **ppsp; /* IN, IN/OUT */ int *npages; /* IN/OUT */ int flags;{ int lcv; boolean_t obj_is_alive; struct uvm_object *saved_uobj; /* * drop all pages but "pg" */ for (lcv = 0 ; lcv < *npages ; lcv++) { /* skip "pg" or empty slot */ if (ppsp[lcv] == pg || ppsp[lcv] == NULL) continue; /* * if swap-backed, gain lock on object that owns page. note * that PQ_ANON bit can't change as long as we are holding * the PG_BUSY bit (so there is no need to lock the page * queues to test it). * * once we have the lock, dispose of the pointer to swap, if * requested */ if (!uobj) { if (ppsp[lcv]->pqflags & PQ_ANON) { simple_lock(&ppsp[lcv]->uanon->an_lock); if (flags & PGO_REALLOCSWAP) /* zap swap block */ ppsp[lcv]->uanon->an_swslot = 0; } else { simple_lock(&ppsp[lcv]->uobject->vmobjlock); if (flags & PGO_REALLOCSWAP) uao_set_swslot(ppsp[lcv]->uobject, ppsp[lcv]->offset >> PAGE_SHIFT, 0); } } /* did someone want the page while we had it busy-locked? */ if (ppsp[lcv]->flags & PG_WANTED) { /* still holding obj lock */ wakeup(ppsp[lcv]); } /* if page was released, release it. otherwise un-busy it */ if (ppsp[lcv]->flags & PG_RELEASED) { if (ppsp[lcv]->pqflags & PQ_ANON) { /* so that anfree will free */ ppsp[lcv]->flags &= ~(PG_BUSY); UVM_PAGE_OWN(ppsp[lcv], NULL); pmap_page_protect(ppsp[lcv], VM_PROT_NONE); simple_unlock(&ppsp[lcv]->uanon->an_lock); /* kills anon and frees pg */ uvm_anfree(ppsp[lcv]->uanon); continue; } /* * pgo_releasepg will dump the page for us */#ifdef DIAGNOSTIC if (ppsp[lcv]->uobject->pgops->pgo_releasepg == NULL) panic("uvm_pager_dropcluster: no releasepg " "function");#endif saved_uobj = ppsp[lcv]->uobject; obj_is_alive = saved_uobj->pgops->pgo_releasepg(ppsp[lcv], NULL); #ifdef DIAGNOSTIC /* for normal objects, "pg" is still PG_BUSY by us, * so obj can't die */ if (uobj && !obj_is_alive) panic("uvm_pager_dropcluster: object died " "with active page");#endif /* only unlock the object if it is still alive... */ if (obj_is_alive && saved_uobj != uobj) simple_unlock(&saved_uobj->vmobjlock); /* * XXXCDC: suppose uobj died in the pgo_releasepg? * how pass that * info up to caller. we are currently ignoring it... */ continue; /* next page */ } else { ppsp[lcv]->flags &= ~(PG_BUSY|PG_WANTED|PG_FAKE); UVM_PAGE_OWN(ppsp[lcv], NULL); } /* * if we are operating on behalf of the pagedaemon and we * had a successful pageout update the page! */ if (flags & PGO_PDFREECLUST) { pmap_clear_reference(ppsp[lcv]); pmap_clear_modify(ppsp[lcv]); ppsp[lcv]->flags |= PG_CLEAN; } /* if anonymous cluster, unlock object and move on */ if (!uobj) { if (ppsp[lcv]->pqflags & PQ_ANON) simple_unlock(&ppsp[lcv]->uanon->an_lock); else simple_unlock(&ppsp[lcv]->uobject->vmobjlock); } }}#ifndef OSKIT/* * interrupt-context iodone handler for nested i/o bufs. * * => must be at splbio(). */voiduvm_aio_biodone1(bp) struct buf *bp;{ struct buf *mbp = bp->b_private; KASSERT(mbp != bp); if (bp->b_flags & B_ERROR) { mbp->b_flags |= B_ERROR; mbp->b_error = bp->b_error; } mbp->b_resid -= bp->b_bcount; pool_put(&bufpool, bp); if (mbp->b_resid == 0) { biodone(mbp); }}/* * interrupt-context iodone handler for single-buf i/os * or the top-level buf of a nested-buf i/o. * * => must be at splbio(). */voiduvm_aio_biodone(bp) struct buf *bp;{ /* reset b_iodone for when this is a single-buf i/o. */ bp->b_iodone = uvm_aio_aiodone; simple_lock(&uvm.aiodoned_lock); /* locks uvm.aio_done */ TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist); wakeup(&uvm.aiodoned); simple_unlock(&uvm.aiodoned_lock);}/* * uvm_aio_aiodone: do iodone processing for async i/os. * this should be called in thread context, not interrupt context. */voiduvm_aio_aiodone(bp) struct buf *bp;{ int npages = bp->b_bufsize >> PAGE_SHIFT; struct vm_page *pg, *pgs[npages]; struct uvm_object *uobj; int s, i; boolean_t release, write, swap; UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist); UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0); release = (bp->b_flags & (B_ERROR|B_READ)) == (B_ERROR|B_READ); write = (bp->b_flags & B_READ) == 0; /* XXXUBC B_NOCACHE is for swap pager, should be done differently */ if (write && !(bp->b_flags & B_NOCACHE) && bioops.io_pageiodone) { (*bioops.io_pageiodone)(bp); } uobj = NULL; for (i = 0; i < npages; i++) { pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT)); UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0); } uvm_pagermapout((vaddr_t)bp->b_data, npages); for (i = 0; i < npages; i++) { pg = pgs[i]; if (i == 0) { swap = (pg->pqflags & PQ_SWAPBACKED) != 0; if (!swap) { uobj = pg->uobject; simple_lock(&uobj->vmobjlock); } } KASSERT(swap || pg->uobject == uobj); if (swap) { if (pg->pqflags & PQ_ANON) { simple_lock(&pg->uanon->an_lock); } else { simple_lock(&pg->uobject->vmobjlock); } } /* * if this is a read and we got an error, mark the pages * PG_RELEASED so that uvm_page_unbusy() will free them. */ if (release) { pg->flags |= PG_RELEASED; continue; } KASSERT(!write || (pgs[i]->flags & PG_FAKE) == 0); /* * if this is a read and the page is PG_FAKE * or this was a write, mark the page PG_CLEAN and not PG_FAKE. */ if (pgs[i]->flags & PG_FAKE || write) { pmap_clear_reference(pgs[i]); pmap_clear_modify(pgs[i]); pgs[i]->flags |= PG_CLEAN; pgs[i]->flags &= ~PG_FAKE; } if (pg->wire_count == 0) { uvm_pageactivate(pg); } if (swap) { if (pg->pqflags & PQ_ANON) { simple_unlock(&pg->uanon->an_lock); } else { simple_unlock(&pg->uobject->vmobjlock); } } } uvm_page_unbusy(pgs, npages); if (!swap) { simple_unlock(&uobj->vmobjlock); } s = splbio(); if (write && (bp->b_flags & B_AGE) != 0) { vwakeup(bp); } pool_put(&bufpool, bp); splx(s);}#endif /*OSKIT*//* * translate unix errno values to VM_PAGER_*. */intuvm_errno2vmerror(errno) int errno;{ switch (errno) { case 0: return VM_PAGER_OK; case EINVAL: return VM_PAGER_BAD; case EINPROGRESS: return VM_PAGER_PEND; case EIO: return VM_PAGER_ERROR; case EAGAIN: return VM_PAGER_AGAIN; case EBUSY: return VM_PAGER_UNLOCK; default: return VM_PAGER_ERROR; }}
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?