📄 uvm_vnode.c
字号:
} else { /* * freeing: nuke all mappings so we can sync * PG_CLEAN bit with no race */ if ((pp->flags & PG_CLEAN) != 0 && (flags & PGO_FREE) != 0 && (pp->pqflags & PQ_ACTIVE) != 0) pmap_page_protect(pp, VM_PROT_NONE); if ((pp->flags & PG_CLEAN) != 0 && pmap_is_modified(pp)) pp->flags &= ~(PG_CLEAN); pp->flags |= PG_CLEANCHK; needs_clean = ((pp->flags & PG_CLEAN) == 0); } /* * if we don't need a clean... load ppnext and dispose of pp */ if (!needs_clean) { if (by_list) ppnext = TAILQ_NEXT(pp, listq); else { if (curoff < stop) ppnext = uvm_pagelookup(uobj, curoff); } if (flags & PGO_DEACTIVATE) { if ((pp->pqflags & PQ_INACTIVE) == 0 && pp->wire_count == 0) { pmap_page_protect(pp, VM_PROT_NONE); uvm_pagedeactivate(pp); } } else if (flags & PGO_FREE) { if (pp->flags & PG_BUSY) { pp->flags |= PG_RELEASED; } else { pmap_page_protect(pp, VM_PROT_NONE); uvm_pagefree(pp); } } /* ppnext is valid so we can continue... */ continue; } /* * pp points to a page in the locked object that we are * working on. if it is !PG_CLEAN,!PG_BUSY and we asked * for cleaning (PGO_CLEANIT). we clean it now. * * let uvm_pager_put attempted a clustered page out. * note: locked: uobj and page queues. */ wasclean = FALSE; pp->flags |= PG_BUSY; /* we 'own' page now */ UVM_PAGE_OWN(pp, "uvn_flush"); pmap_page_protect(pp, VM_PROT_READ); pp_version = pp->version;ReTry: ppsp = pps; npages = sizeof(pps) / sizeof(struct vm_page *); /* locked: page queues, uobj */ result = uvm_pager_put(uobj, pp, &ppsp, &npages, flags | PGO_DOACTCLUST, start, stop); /* unlocked: page queues, uobj */ /* * at this point nothing is locked. if we did an async I/O * it is remotely possible for the async i/o to complete and * the page "pp" be freed or what not before we get a chance * to relock the object. in order to detect this, we have * saved the version number of the page in "pp_version". */ /* relock! */ simple_lock(&uobj->vmobjlock); uvm_lock_pageq(); /* * VM_PAGER_AGAIN: given the structure of this pager, this * can only happen when we are doing async I/O and can't * map the pages into kernel memory (pager_map) due to lack * of vm space. if this happens we drop back to sync I/O. */ if (result == VM_PAGER_AGAIN) { /* * it is unlikely, but page could have been released * while we had the object lock dropped. we ignore * this now and retry the I/O. we will detect and * handle the released page after the syncio I/O * completes. */#ifdef DIAGNOSTIC if (flags & PGO_SYNCIO) panic("uvn_flush: PGO_SYNCIO return 'try again' error (impossible)");#endif flags |= PGO_SYNCIO; goto ReTry; } /* * the cleaning operation is now done. finish up. note that * on error (!OK, !PEND) uvm_pager_put drops the cluster for us. * if success (OK, PEND) then uvm_pager_put returns the cluster * to us in ppsp/npages. */ /* * for pending async i/o if we are not deactivating/freeing * we can move on to the next page. */ if (result == VM_PAGER_PEND && (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) { /* * no per-page ops: refresh ppnext and continue */ if (by_list) { if (pp->version == pp_version) ppnext = TAILQ_NEXT(pp, listq); else ppnext = TAILQ_FIRST(&uobj->memq); } else { if (curoff < stop) ppnext = uvm_pagelookup(uobj, curoff); } continue; } /* * need to look at each page of the I/O operation. we defer * processing "pp" until the last trip through this "for" loop * so that we can load "ppnext" for the main loop after we * play with the cluster pages [thus the "npages + 1" in the * loop below]. */ for (lcv = 0 ; lcv < npages + 1 ; lcv++) { /* * handle ppnext for outside loop, and saving pp * until the end. */ if (lcv < npages) { if (ppsp[lcv] == pp) continue; /* skip pp until the end */ ptmp = ppsp[lcv]; } else { ptmp = pp; /* set up next page for outer loop */ if (by_list) { if (pp->version == pp_version) ppnext = TAILQ_NEXT(pp, listq); else ppnext = TAILQ_FIRST( &uobj->memq); } else { if (curoff < stop) ppnext = uvm_pagelookup(uobj, curoff); } } /* * verify the page wasn't moved while obj was * unlocked */ if (result == VM_PAGER_PEND && ptmp->uobject != uobj) continue; /* * unbusy the page if I/O is done. note that for * pending I/O it is possible that the I/O op * finished before we relocked the object (in * which case the page is no longer busy). */ if (result != VM_PAGER_PEND) { if (ptmp->flags & PG_WANTED) { /* still holding object lock */ wakeup(ptmp); } ptmp->flags &= ~(PG_WANTED|PG_BUSY); UVM_PAGE_OWN(ptmp, NULL); if (ptmp->flags & PG_RELEASED) { uvm_unlock_pageq(); if (!uvn_releasepg(ptmp, NULL)) { UVMHIST_LOG(maphist, "released %p", ptmp, 0,0,0); return (TRUE); } uvm_lock_pageq(); continue; } else { if ((flags & PGO_WEAK) == 0 && !(result == VM_PAGER_ERROR && curproc == uvm.pagedaemon_proc)) { ptmp->flags |= (PG_CLEAN|PG_CLEANCHK); if ((flags & PGO_FREE) == 0) { pmap_clear_modify(ptmp); } } } } /* * dispose of page */ if (flags & PGO_DEACTIVATE) { if ((pp->pqflags & PQ_INACTIVE) == 0 && pp->wire_count == 0) { pmap_page_protect(ptmp, VM_PROT_NONE); uvm_pagedeactivate(ptmp); } } else if (flags & PGO_FREE) { if (result == VM_PAGER_PEND) { if ((ptmp->flags & PG_BUSY) != 0) /* signal for i/o done */ ptmp->flags |= PG_RELEASED; } else { if (result != VM_PAGER_OK) { printf("uvn_flush: obj=%p, " "offset=0x%llx. error %d\n", pp->uobject, (long long)pp->offset, result); printf("uvn_flush: WARNING: " "changes to page may be " "lost!\n"); retval = FALSE; } pmap_page_protect(ptmp, VM_PROT_NONE); uvm_pagefree(ptmp); } } } /* end of "lcv" for loop */ } /* end of "pp" for loop */ uvm_unlock_pageq(); if ((flags & PGO_CLEANIT) && all && wasclean && LIST_FIRST(&vp->v_dirtyblkhd) == NULL && (vp->v_flag & VONWORKLST)) { vp->v_flag &= ~VONWORKLST; LIST_REMOVE(vp, v_synclist); } if (need_iosync) { UVMHIST_LOG(maphist," <<DOING IOSYNC>>",0,0,0,0); /* * XXX this doesn't use the new two-flag scheme, * but to use that, all i/o initiators will have to change. */ s = splbio(); while (vp->v_numoutput != 0) { UVMHIST_LOG(ubchist, "waiting for vp %p num %d", vp, vp->v_numoutput,0,0); vp->v_flag |= VBWAIT; UVM_UNLOCK_AND_WAIT(&vp->v_numoutput, &uvn->u_obj.vmobjlock, FALSE, "uvn_flush",0); simple_lock(&uvn->u_obj.vmobjlock); } splx(s); } /* return, with object locked! */ UVMHIST_LOG(maphist,"<- done (retval=0x%x)",retval,0,0,0); return(retval);}/* * uvn_cluster * * we are about to do I/O in an object at offset. this function is called * to establish a range of offsets around "offset" in which we can cluster * I/O. * * - currently doesn't matter if obj locked or not. */static voiduvn_cluster(uobj, offset, loffset, hoffset) struct uvm_object *uobj; voff_t offset; voff_t *loffset, *hoffset; /* OUT */{ struct uvm_vnode *uvn = (struct uvm_vnode *)uobj; *loffset = offset; *hoffset = min(offset + MAXBSIZE, round_page(uvn->u_size));}/* * uvn_put: flush page data to backing store. * * => object must be locked! we will _unlock_ it before starting I/O. * => flags: PGO_SYNCIO -- use sync. I/O * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed) */static intuvn_put(uobj, pps, npages, flags) struct uvm_object *uobj; struct vm_page **pps; int npages, flags;{ struct vnode *vp = (struct vnode *)uobj; int error; error = VOP_PUTPAGES(vp, pps, npages, flags, NULL); return uvm_errno2vmerror(error);}/* * uvn_get: get pages (synchronously) from backing store * * => prefer map unlocked (not required) * => object must be locked! we will _unlock_ it before starting any I/O. * => flags: PGO_ALLPAGES: get all of the pages * PGO_LOCKED: fault data structures are locked * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx] * => NOTE: caller must check for released pages!! */ static intuvn_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags) struct uvm_object *uobj; voff_t offset; struct vm_page **pps; /* IN/OUT */ int *npagesp; /* IN (OUT if PGO_LOCKED) */ int centeridx; vm_prot_t access_type; int advice, flags;{ struct vnode *vp = (struct vnode *)uobj; int error; UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist); UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0); error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx, access_type, advice, flags); return uvm_errno2vmerror(error);}#endif /*OSKIT*//* * uvn_findpages: * return the page for the uobj and offset requested, allocating if needed. * => uobj must be locked. * => returned page will be BUSY. */voiduvn_findpages(uobj, offset, npagesp, pps, flags) struct uvm_object *uobj; voff_t offset; int *npagesp; struct vm_page **pps; int flags;{ int i, rv, npages; rv = 0; npages = *npagesp; for (i = 0; i < npages; i++, offset += PAGE_SIZE) { rv += uvn_findpage(uobj, offset, &pps[i], flags); } *npagesp = rv;}static intuvn_findpage(uobj, offset, pgp, flags) struct uvm_object *uobj; voff_t offset; struct vm_page **pgp; int flags;{ struct vm_page *pg; UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist); UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0); if (*pgp != NULL) { UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0); return 0; } for (;;) { /* look for an existing page */ pg = uvm_pagelookup(uobj, offset); /* nope? allocate one now */ if (pg == NULL) { if (flags & UFP_NOALLOC) { UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0); return 0; }#ifndef OSKIT /* XXX??? */ if (uvmexp.vnodepages > (uvmexp.active + uvmexp.inactive + uvmexp.wired + uvmexp.free) * 7 / 8) { pg = NULL; } else #endif { pg = uvm_pagealloc(uobj, offset, NULL, 0); } if (pg == NULL) { if (flags & UFP_NOWAIT) { UVMHIST_LOG(ubchist, "nowait",0,0,0,0); return 0; } simple_unlock(&uobj->vmobjlock); uvm_wait("uvn_fp1"); simple_lock(&uobj->vmobjlock); continue; } uvmexp.vnodepages++; UVMHIST_LOG(ubchist, "alloced",0,0,0,0); break; } else if (flags & UFP_NOCACHE) { UVMHIST_LOG(ubchist, "nocache",0,0,0,0); return 0; } /* page is there, see if we need to wait on it */ if ((pg->flags & (PG_BUSY|PG_RELEASED)) != 0) { if (flags & UFP_NOWAIT) { UVMHIST_LOG(ubchist, "nowait",0,0,0,0); return 0; } pg->flags |= PG_WANTED; UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0, "uvn_fp2", 0); simple_lock(&uobj->vmobjlock); continue; } /* skip PG_RDONLY pages if requested */ if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) { UVMHIST_LOG(ubchist, "nordonly",0,0,0,0); return 0; } /* mark the page BUSY and we're done. */ pg->flags |= PG_BUSY; UVM_PAGE_OWN(pg, "uvn_findpage"); UVMHIST_LOG(ubchist, "found",0,0,0,0); break; } *pgp = pg; return 1;}#ifndef OSKIT/* * uvm_vnp_setsize: grow or shrink a vnode uvn * * grow => just update size value * shrink => toss un-needed pages * * => we assume that the caller has a reference of some sort to the * vnode in question so that it will not be yanked out from under * us. * * called from: * => truncate fns (ext2fs_truncate, ffs_truncate, detrunc[msdos]) * => "write" fns (ext2fs_write, WRITE [ufs/ufs], msdosfs_write, nfs_write) * => ffs_balloc [XXX: why? doesn't WRITE handle?] * => NFS: nfs_loadattrcache, nfs_getattrcache, nfs_setattr * => union fs: union_newsize */voiduvm_vnp_setsize(vp, newsize) struct vnode *vp; voff_t newsize;{ struct uvm_vnode *uvn = &vp->v_uvm; UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist); simple_lock(&uvn->u_obj.vmobjlock); UVMHIST_LOG(ubchist, "old 0x%x new 0x%x", uvn->u_size, newsize, 0,0); /* * now check if the size has changed: if we shrink we had better * toss some pages... */ if (uvn->u_size > newsize && uvn->u_size != VSIZENOTSET) { (void) uvn_flush(&uvn->u_obj, newsize, uvn->u_size, PGO_FREE); } uvn->u_size = newsize; simple_unlock(&uvn->u_obj.vmobjlock);}#endif /*OSKIT*//* * uvm_vnp_zerorange: set a range of bytes in a file to zero. */#ifndef OSKITvoiduvm_vnp_zerorange(vp, off, len) struct vnode *vp; off_t off; size_t len;{ void *win; /* * XXXUBC invent kzero() and use it */ while (len) { vsize_t bytelen = len; win = ubc_alloc(&vp->v_uvm.u_obj, off, &bytelen, UBC_WRITE); memset(win, 0, bytelen); ubc_release(win, 0); off += bytelen; len -= bytelen; }}#endif /*OSKIT*/
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -