📄 vnode.c
字号:
return (EROFS); /* * Although it is possible to call wrapfs_bypass(), we'll do * a direct call to reduce overhead */ ldvp = WRAPFS_VP_TO_LOWERVP(dvp); vp = lvp = NULL; CNP_BEFORE( ap->a_dvp); #ifdef FIST_FILTER_NAME error = VOP_LOOKUP(ldvp, &lvp, lowercnp);#else /* not FIST_FILTER_NAME */ error = VOP_LOOKUP(ldvp, &lvp, cnp);#endif /* not FIST_FILTER_NAME */ #ifdef FIST_FILTER_NAME thiscnp->cn_flags = lowercnp->cn_flags;#endif /* FIST_FILTER_NAME */ CNP_AFTER(ap->a_dvp); #ifdef FIST_FILTER_NAME /*if (!error && (thiscnp->cn_flags & MAKEENTRY) && thiscnp->cn_nameiop != CREATE) { fist_dprint(1, "LK cache_enter\n"); cache_enter(ap->a_dvp, *(ap->a_vpp), thiscnp); }*/#endif /* FIST_FILTER_NAME */ if (error == EJUSTRETURN && (flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) error = EROFS; /* * Rely only on the PDIRUNLOCK flag which should be carefully * tracked by underlying filesystem. */ if (cnp->cn_flags & PDIRUNLOCK) VOP_UNLOCK(dvp, LK_THISLAYER, p); if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) { if (ldvp == lvp) { *ap->a_vpp = dvp; VREF(dvp); vrele(lvp); } else { error = wrapfs_node_create(dvp->v_mount, lvp, &vp); if (error == 0) *ap->a_vpp = vp; } } print_location(); return (error);}/* * Setattr call. Disallow write attempts if the layer is mounted read-only. */intwrapfs_setattr(ap) struct vop_setattr_args /* { struct vnodeop_desc *a_desc; struct vnode *a_vp; struct vattr *a_vap; struct ucred *a_cred; struct proc *a_p; } */ *ap;{ struct vnode *vp = ap->a_vp; struct vattr *vap = ap->a_vap; int error = 0; fist_dprint(4, "FXN=%s FILE=%s LINE=%d\n",__FUNCTION__,__FILE__,__LINE__); if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && (vp->v_mount->mnt_flag & MNT_RDONLY)) return (EROFS); if (vap->va_size != VNOVAL) { switch (vp->v_type) { case VDIR: return (EISDIR); case VCHR: case VBLK: case VSOCK: case VFIFO: if (vap->va_flags != VNOVAL) return (EOPNOTSUPP); return (0); case VREG: case VLNK: default: /* * Disallow write attempts if the filesystem is * mounted read-only. */ if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS);#ifdef FIST_FILTER_DATA error = wrapfs_fill_zeros(vp, vap, ap->a_cred, ap->a_p); if (error) return error;#endif /* FIST_FILTER_DATA */ } } print_location(); return wrapfs_bypass((struct vop_generic_args *)ap);}/* * We handle getattr only to change the fsid. */static intwrapfs_getattr(ap) struct vop_getattr_args /* { struct vnode *a_vp; struct vattr *a_vap; struct ucred *a_cred; struct proc *a_p; } */ *ap;{ int error; vnode_t *vp = ap->a_vp; fist_dprint(4, "FXN=%s FILE=%s LINE=%d\n",__FUNCTION__,__FILE__,__LINE__); if ((error = wrapfs_bypass((struct vop_generic_args *)ap)) != 0) return (error); ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; /* ??? */#ifdef FIST_FILTER_DATA if (vp->v_object) { if (ap->a_vap->va_size > vp->v_object->un_pager.vnp.vnp_size) vnode_pager_setsize(vp, ap->a_vap->va_size); }#endif /* FIST_FILTER_DATA */ print_location(); return (0);}/* * Handle to disallow write access if mounted read-only. */static intwrapfs_access(ap) struct vop_access_args /* { struct vnode *a_vp; int a_mode; struct ucred *a_cred; struct proc *a_p; } */ *ap;{ struct vnode *vp = ap->a_vp; mode_t mode = ap->a_mode; fist_dprint(4, "FXN=%s FILE=%s LINE=%d\n",__FUNCTION__,__FILE__,__LINE__); /* * Disallow write attempts on read-only layers; * unless the file is a socket, fifo, or a block or * character device resident on the file system. */ if (mode & VWRITE) { switch (vp->v_type) { case VDIR: case VLNK: case VREG: if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); break; default: break; } } print_location(); return wrapfs_bypass((struct vop_generic_args *)ap);}/* * We must handle open to be able to catch MNT_NODEV and friends. */static intwrapfs_open(ap) struct vop_open_args /* { struct vnode *a_vp; int a_mode; struct ucred *a_cred; struct proc *a_p; } */ *ap;{ struct vnode *vp = ap->a_vp; struct vnode *lvp = WRAPFS_VP_TO_LOWERVP(ap->a_vp); fist_dprint(4, "FXN=%s FILE=%s LINE=%d\n",__FUNCTION__,__FILE__,__LINE__); if ((vp->v_mount->mnt_flag & MNT_NODEV) && (lvp->v_type == VBLK || lvp->v_type == VCHR)) return ENXIO; print_location(); return wrapfs_bypass((struct vop_generic_args *)ap);}/* * We need to process our own vnode lock and then clear the * interlock flag as it applies only to our vnode, not the * vnodes below us on the stack. */static intwrapfs_lock(ap) struct vop_lock_args /* { struct vnode *a_vp; int a_flags; struct proc *a_p; } */ *ap;{ struct vnode *vp = ap->a_vp; int flags = ap->a_flags; struct proc *p = ap->a_p; struct wrapfs_node *np = VP_TO_WRAPFS(vp); struct vnode *lvp; int error; fist_dprint(4, "FXN=%s FILE=%s LINE=%d\n",__FUNCTION__,__FILE__,__LINE__); if (flags & LK_THISLAYER) { if (vp->v_vnlock != NULL) return 0; /* lock is shared across layers */ error = lockmgr(&np->wrapfs_lock, flags & ~LK_THISLAYER, &vp->v_interlock, p); return (error); } if (vp->v_vnlock != NULL) { /* * The lower level has exported a struct lock to us. Use * it so that all vnodes in the stack lock and unlock * simultaneously. Note: we don't DRAIN the lock as DRAIN * decommissions the lock - just because our vnode is * going away doesn't mean the struct lock below us is. * LK_EXCLUSIVE is fine. */ if ((flags & LK_TYPE_MASK) == LK_DRAIN) { fist_dprint(2, "wrapfs_lock: avoiding LK_DRAIN\n"); return(lockmgr(vp->v_vnlock, (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE, &vp->v_interlock, p)); } return(lockmgr(vp->v_vnlock, flags, &vp->v_interlock, p)); } /* * To prevent race conditions involving doing a lookup * on "..", we have to lock the lower node, then lock our * node. Most of the time it won't matter that we lock our * node (as any locking would need the lower one locked * first). But we can LK_DRAIN the upper lock as a step * towards decomissioning it. */ lvp = WRAPFS_VP_TO_LOWERVP(vp); if (lvp == NULL) return (lockmgr(&np->wrapfs_lock, flags, &vp->v_interlock, p)); if (flags & LK_INTERLOCK) { VI_UNLOCK(vp); flags &= ~LK_INTERLOCK; } if ((flags & LK_TYPE_MASK) == LK_DRAIN) { error = VOP_LOCK(lvp, (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE, p); } else error = VOP_LOCK(lvp, flags, p); if (error) return (error); error = lockmgr(&np->wrapfs_lock, flags, &vp->v_interlock, p); if (error) VOP_UNLOCK(lvp, 0, p); print_location(); return (error);}/* * We need to process our own vnode unlock and then clear the * interlock flag as it applies only to our vnode, not the * vnodes below us on the stack. */static intwrapfs_unlock(ap) struct vop_unlock_args /* { struct vnode *a_vp; int a_flags; struct proc *a_p; } */ *ap;{ struct vnode *vp = ap->a_vp; int flags = ap->a_flags; struct proc *p = ap->a_p; struct wrapfs_node *np = VP_TO_WRAPFS(vp); struct vnode *lvp; fist_dprint(4, "FXN=%s FILE=%s LINE=%d\n",__FUNCTION__,__FILE__,__LINE__); if (vp->v_vnlock != NULL) { if (flags & LK_THISLAYER) return 0; /* the lock is shared across layers */ flags &= ~LK_THISLAYER; return (lockmgr(vp->v_vnlock, flags | LK_RELEASE, &vp->v_interlock, p)); } lvp = WRAPFS_VP_TO_LOWERVP(vp); if (lvp == NULL) return (lockmgr(&np->wrapfs_lock, flags | LK_RELEASE, &vp->v_interlock, p)); if ((flags & LK_THISLAYER) == 0) { if (flags & LK_INTERLOCK) { VI_UNLOCK(vp); flags &= ~LK_INTERLOCK; } VOP_UNLOCK(lvp, flags, p); } else flags &= ~LK_THISLAYER; ap->a_flags = flags; print_location(); return (lockmgr(&np->wrapfs_lock, flags | LK_RELEASE, &vp->v_interlock, p));}static intwrapfs_islocked(ap) struct vop_islocked_args /* { struct vnode *a_vp; struct proc *a_p; } */ *ap;{ struct vnode *vp = ap->a_vp; struct proc *p = ap->a_p; fist_dprint(4, "FXN=%s FILE=%s LINE=%d\n",__FUNCTION__,__FILE__,__LINE__); if (vp->v_vnlock != NULL) return (lockstatus(vp->v_vnlock, p)); print_location(); return (lockstatus(&VP_TO_WRAPFS(vp)->wrapfs_lock, p));}/* * There is no way to tell that someone issued remove/rmdir operation * on the underlying filesystem. For now we just have to release lowevrp * as soon as possible. */static intwrapfs_inactive(ap) struct vop_inactive_args /* { struct vnode *a_vp; struct proc *a_p; } */ *ap;{ struct vnode *vp = ap->a_vp; struct proc *p = ap->a_p; struct wrapfs_node *xp = VP_TO_WRAPFS(vp); struct vnode *lowervp = xp->wrapfs_lowervp; fist_dprint(4, "FXN=%s FILE=%s LINE=%d\n",__FUNCTION__,__FILE__,__LINE__); lockmgr(&wrapfs_hashlock, LK_EXCLUSIVE, NULL, p); LIST_REMOVE(xp, wrapfs_hash); lockmgr(&wrapfs_hashlock, LK_RELEASE, NULL, p); xp->wrapfs_lowervp = NULLVP; if (vp->v_vnlock != NULL) { vp->v_vnlock = &xp->wrapfs_lock; /* we no longer share the lock */ } else VOP_UNLOCK(vp, LK_THISLAYER, p); if (lowervp) { vput(lowervp); /* * Now it is safe to drop references to the lower vnode. * VOP_INACTIVE() will be called by vrele() if necessary. */ vrele (lowervp); } print_location(); return (0);}/* * We can free memory in wrapfs_inactive, but we do this * here. (Possible to guard vp->v_data to point somewhere) */static intwrapfs_reclaim(ap) struct vop_reclaim_args /* { struct vnode *a_vp; struct proc *a_p; } */ *ap;{ struct vnode *vp = ap->a_vp; void *vdata = vp->v_data; fist_dprint(4, "FXN=%s FILE=%s LINE=%d\n",__FUNCTION__,__FILE__,__LINE__); vp->v_data = NULL; FREE(vdata, M_WRAPFSNODE); print_location(); return (0);}static intwrapfs_print(ap) struct vop_print_args /* { struct vnode *a_vp; } */ *ap;{ struct vnode *vp = ap->a_vp; printf ("\ttag VT_WRAPFS, vp=%p, lowervp=%p\n", vp, WRAPFS_VP_TO_LOWERVP(vp)); if (vp->v_vnlock != NULL) { printf("\tvnlock: "); lockmgr_printinfo(vp->v_vnlock); } else { printf("\twrapfs_lock: "); lockmgr_printinfo(&VP_TO_WRAPFS(vp)->wrapfs_lock); } printf("\n"); return (0);}#ifdef FIST_FILTER_DATA/* * if FIST_FILTER_DATA, then we need to create 2 objects, 1 for * lower level vnode , another for wrapfs vnode */static intwrapfs_createvobject(ap) struct vop_createvobject_args /* { struct vnode *vp; struct ucred *cred;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -