vfs_subr.c
来自「基于组件方式开发操作系统的OSKIT源代码」· C语言 代码 · 共 2,733 行 · 第 1/5 页
C
2,733 行
* initialize uvm_object within vnode. */ uobj = &vp->v_uvm.u_obj; uobj->pgops = &uvm_vnodeops; TAILQ_INIT(&uobj->memq); vp->v_uvm.u_size = VSIZENOTSET; if (mp && error != EDEADLK) vfs_unbusy(mp); return (0);}/* * This is really just the reverse of getnewvnode(). Needed for * VFS_VGET functions who may need to push back a vnode in case * of a locking race. */voidungetnewvnode(vp) struct vnode *vp;{#ifdef DIAGNOSTIC if (vp->v_usecount != 1) panic("ungetnewvnode: busy vnode");#endif vp->v_usecount--; insmntque(vp, NULL); vp->v_type = VBAD; simple_lock(&vp->v_interlock); /* * Insert at head of LRU list */ simple_lock(&vnode_free_list_slock); if (vp->v_holdcnt > 0) TAILQ_INSERT_HEAD(&vnode_hold_list, vp, v_freelist); else TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); simple_unlock(&vnode_free_list_slock); simple_unlock(&vp->v_interlock);}/* * Move a vnode from one mount queue to another. */voidinsmntque(vp, mp) struct vnode *vp; struct mount *mp;{#ifdef DIAGNOSTIC if ((mp != NULL) && (mp->mnt_flag & MNT_UNMOUNT) && !(mp->mnt_flag & MNT_SOFTDEP) && vp->v_tag != VT_VFS) { panic("insmntque into dying filesystem"); }#endif simple_lock(&mntvnode_slock); /* * Delete from old mount point vnode list, if on one. */ if (vp->v_mount != NULL) LIST_REMOVE(vp, v_mntvnodes); /* * Insert into list of vnodes for the new mount point, if available. */ if ((vp->v_mount = mp) != NULL) LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); simple_unlock(&mntvnode_slock);}#endif /*OSKIT*//* * Update outstanding I/O count and do wakeup if requested. */voidvwakeup(bp) struct buf *bp;{ struct vnode *vp; if ((vp = bp->b_vp) != NULL) { if (--vp->v_numoutput < 0) panic("vwakeup: neg numoutput, vp %p", vp); if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) { vp->v_flag &= ~VBWAIT; wakeup((caddr_t)&vp->v_numoutput); } }}#ifndef OSKIT/* * Flush out and invalidate all buffers associated with a vnode. * Called with the underlying vnode locked, which should prevent new dirty * buffers from being queued. */intvinvalbuf(vp, flags, cred, p, slpflag, slptimeo) struct vnode *vp; int flags; struct ucred *cred; struct proc *p; int slpflag, slptimeo;{ struct uvm_object *uobj = &vp->v_uvm.u_obj; struct buf *bp, *nbp; int s, error, rv; int flushflags = PGO_ALLPAGES|PGO_FREE|PGO_SYNCIO| (flags & V_SAVE ? PGO_CLEANIT : 0); /* XXXUBC this doesn't look at flags or slp* */ if (vp->v_type == VREG) { simple_lock(&uobj->vmobjlock); rv = (uobj->pgops->pgo_flush)(uobj, 0, 0, flushflags); simple_unlock(&uobj->vmobjlock); if (!rv) { return EIO; } } if (flags & V_SAVE) { error = VOP_FSYNC(vp, cred, FSYNC_WAIT|FSYNC_RECLAIM, 0, 0, p); if (error) return (error);#ifdef DIAGNOSTIC s = splbio(); if (vp->v_numoutput > 0 || !LIST_EMPTY(&vp->v_dirtyblkhd)) panic("vinvalbuf: dirty bufs, vp %p", vp); splx(s);#endif } s = splbio();restart: for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { nbp = LIST_NEXT(bp, b_vnbufs); if (bp->b_flags & B_BUSY) { bp->b_flags |= B_WANTED; error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1), "vinvalbuf", slptimeo); if (error) { splx(s); return (error); } goto restart; } bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; brelse(bp); } for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { nbp = LIST_NEXT(bp, b_vnbufs); if (bp->b_flags & B_BUSY) { bp->b_flags |= B_WANTED; error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1), "vinvalbuf", slptimeo); if (error) { splx(s); return (error); } goto restart; } /* * XXX Since there are no node locks for NFS, I believe * there is a slight chance that a delayed write will * occur while sleeping just above, so check for it. */ if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {#ifdef DEBUG printf("buffer still DELWRI\n");#endif bp->b_flags |= B_BUSY | B_VFLUSH; VOP_BWRITE(bp); goto restart; } bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; brelse(bp); }#ifdef DIAGNOSTIC if (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd)) panic("vinvalbuf: flush failed, vp %p", vp);#endif splx(s); return (0);}/* * Destroy any in core blocks past the truncation length. * Called with the underlying vnode locked, which should prevent new dirty * buffers from being queued. */intvtruncbuf(vp, lbn, slpflag, slptimeo) struct vnode *vp; daddr_t lbn; int slpflag, slptimeo;{ struct uvm_object *uobj = &vp->v_uvm.u_obj; struct buf *bp, *nbp; int s, error, rv; s = splbio(); if (vp->v_type == VREG) { simple_lock(&uobj->vmobjlock); rv = (uobj->pgops->pgo_flush)(uobj, round_page(lbn << vp->v_mount->mnt_fs_bshift), vp->v_uvm.u_size, PGO_FREE); simple_unlock(&uobj->vmobjlock); if (!rv) { splx(s); return EIO; } }restart: for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { nbp = LIST_NEXT(bp, b_vnbufs); if (bp->b_lblkno < lbn) continue; if (bp->b_flags & B_BUSY) { bp->b_flags |= B_WANTED; error = tsleep(bp, slpflag | (PRIBIO + 1), "vtruncbuf", slptimeo); if (error) { splx(s); return (error); } goto restart; } bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; brelse(bp); } for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { nbp = LIST_NEXT(bp, b_vnbufs); if (bp->b_lblkno < lbn) continue; if (bp->b_flags & B_BUSY) { bp->b_flags |= B_WANTED; error = tsleep(bp, slpflag | (PRIBIO + 1), "vtruncbuf", slptimeo); if (error) { splx(s); return (error); } goto restart; } bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH; brelse(bp); } splx(s); return (0);}voidvflushbuf(vp, sync) struct vnode *vp; int sync;{ struct uvm_object *uobj = &vp->v_uvm.u_obj; struct buf *bp, *nbp; int s; if (vp->v_type == VREG) { int flags = PGO_CLEANIT|PGO_ALLPAGES| (sync ? PGO_SYNCIO : 0); simple_lock(&uobj->vmobjlock); (uobj->pgops->pgo_flush)(uobj, 0, 0, flags); simple_unlock(&uobj->vmobjlock); }loop: s = splbio(); for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { nbp = LIST_NEXT(bp, b_vnbufs); if ((bp->b_flags & B_BUSY)) continue; if ((bp->b_flags & B_DELWRI) == 0) panic("vflushbuf: not dirty, bp %p", bp); bp->b_flags |= B_BUSY | B_VFLUSH; splx(s); /* * Wait for I/O associated with indirect blocks to complete, * since there is no way to quickly wait for them below. */ if (bp->b_vp == vp || sync == 0) (void) bawrite(bp); else (void) bwrite(bp); goto loop; } if (sync == 0) { splx(s); return; } while (vp->v_numoutput) { vp->v_flag |= VBWAIT; tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "vflushbuf", 0); } splx(s); if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { vprint("vflushbuf: dirty", vp); goto loop; }}#endif /*OSKIT*//* * Associate a buffer with a vnode. */voidbgetvp(vp, bp) struct vnode *vp; struct buf *bp;{ int s; if (bp->b_vp) panic("bgetvp: not free, bp %p", bp); VHOLD(vp); s = splbio(); bp->b_vp = vp; if (vp->v_type == VBLK || vp->v_type == VCHR) bp->b_dev = vp->v_rdev; else bp->b_dev = NODEV; /* * Insert onto list for new vnode. */ bufinsvn(bp, &vp->v_cleanblkhd); splx(s);}/* * Disassociate a buffer from a vnode. */voidbrelvp(bp) struct buf *bp;{ struct vnode *vp; int s; if (bp->b_vp == NULL) panic("brelvp: vp NULL, bp %p", bp); s = splbio(); vp = bp->b_vp; /* * Delete from old vnode list, if on one. */ if (bp->b_vnbufs.le_next != NOLIST) bufremvn(bp); if (vp->v_type != VREG && (vp->v_flag & VONWORKLST) && LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { vp->v_flag &= ~VONWORKLST; LIST_REMOVE(vp, v_synclist); } bp->b_vp = NULL; HOLDRELE(vp); splx(s);}#ifndef OSKIT/* * Reassign a buffer from one vnode to another. * Used to assign file specific control information * (indirect blocks) to the vnode to which they belong. * * This function must be called at splbio(). */voidreassignbuf(bp, newvp) struct buf *bp; struct vnode *newvp;{ struct buflists *listheadp; int delay; /* * Delete from old vnode list, if on one. */ if (bp->b_vnbufs.le_next != NOLIST) bufremvn(bp); /* * If dirty, put on list of dirty buffers; * otherwise insert onto list of clean buffers. */ if ((bp->b_flags & B_DELWRI) == 0) { listheadp = &newvp->v_cleanblkhd; if (newvp->v_type != VREG && (newvp->v_flag & VONWORKLST) && LIST_FIRST(&newvp->v_dirtyblkhd) == NULL) { newvp->v_flag &= ~VONWORKLST; LIST_REMOVE(newvp, v_synclist); } } else { listheadp = &newvp->v_dirtyblkhd; if ((newvp->v_flag & VONWORKLST) == 0) { switch (newvp->v_type) { case VDIR: delay = dirdelay; break; case VBLK: if (newvp->v_specmountpoint != NULL) { delay = metadelay; break; } /* fall through */ default: delay = filedelay; break; } if (!newvp->v_mount || (newvp->v_mount->mnt_flag & MNT_ASYNC) == 0) vn_syncer_add_to_worklist(newvp, delay); } } bufinsvn(bp, listheadp);}/* * Create a vnode for a block device. * Used for root filesystem and swap areas. * Also used for memory file system special devices. */intbdevvp(dev, vpp) dev_t dev; struct vnode **vpp;{ return (getdevvp(dev, vpp, VBLK));}/* * Create a vnode for a character device. * Used for kernfs and some console handling. */intcdevvp(dev, vpp) dev_t dev; struct vnode **vpp;{ return (getdevvp(dev, vpp, VCHR));}/* * Create a vnode for a device. * Used by bdevvp (block device) for root file system etc., * and by cdevvp (character device) for console and kernfs. */intgetdevvp(dev, vpp, type) dev_t dev; struct vnode **vpp; enum vtype type;{ struct vnode *vp; struct vnode *nvp; int error; if (dev == NODEV) { *vpp = NULLVP; return (0); } error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp); if (error) { *vpp = NULLVP; return (error); } vp = nvp; vp->v_type = type; if ((nvp = checkalias(vp, dev, NULL)) != 0) { vput(vp); vp = nvp; } *vpp = vp; return (0);}/* * Check to see if the new vnode represents a special device * for which we already have a vnode (either because of * bdevvp() or because of a different vnode representing * the same block device). If such an alias exists, deallocate * the existing contents and return the aliased vnode. The * caller is responsible for filling it with its new contents. */struct vnode *checkalias(nvp, nvp_rdev, mp) struct vnode *nvp; dev_t nvp_rdev; struct mount *mp;{ struct proc *p = curproc; /* XXX */ struct vnode *vp; struct vnode **vpp; if (nvp->v_type != VBLK && nvp->v_type != VCHR) return (NULLVP); vpp = &speclisth[SPECHASH(nvp_rdev)];loop: simple_lock(&spechash_slock); for (vp = *vpp; vp; vp = vp->v_specnext) { if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) continue; /* * Alias, but not in use, so flush it out. */ simple_lock(&vp->v_interlock); if (vp->v_usecount == 0) { simple_unlock(&spechash_slock); vgonel(vp, p); goto loop; } if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) { simple_unlock(&spechash_slock); goto loop; } break; } if (vp == NULL || vp->v_tag != VT_NON || vp->v_type != VBLK) { MALLOC(nvp->v_specinfo, struct specinfo *, sizeof(struct specinfo), M_VNODE, M_WAITOK); nvp->v_rdev = nvp_rdev; nvp->v_hashchain = vpp; nvp->v_specnext = *vpp; nvp->v_specmountpoint = NULL; simple_unlock(&spechash_slock); nvp->v_speclockf = NULL; *vpp = nvp; if (vp != NULLVP) {
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?