📄 vnode.c
字号:
FIST_OP_LOOKUP_POSTCALL;#if 0 if ((thiscnp->cn_flags & PARAMASK) != (lowercnp->cn_flags & PARAMASK)) { char buf[256]; strcpy(buf, fist_cn_flags(thiscnp->cn_flags)); panic("LK: this 0x%x <%s>, lower 0x%x <%s>", thiscnp->cn_flags, buf, lowercnp->cn_flags, fist_cn_flags(lowercnp->cn_flags)); }#endif#ifdef FIST_FILTER_NAME thiscnp->cn_flags = lowercnp->cn_flags;#endif /* FIST_FILTER_NAME */#if 0 if (had_flags) thiscnp->cn_flags |= had_flags; else thiscnp->cn_flags &= ~had_flags;#endif CNP_AFTER(ap->a_dvp);#ifdef FIST_FILTER_NAME if (!error && (thiscnp->cn_flags & MAKEENTRY) && thiscnp->cn_nameiop != CREATE) { fist_dprint(1, "LK cache_enter\n"); cache_enter(ap->a_dvp, *(ap->a_vpp), thiscnp); }#endif /* FIST_FILTER_NAME */ fist_dprint(1, "LK1 %d (dvp=0x%x)\n", counter, ap->a_dvp); if (error == EJUSTRETURN && (flags & ISLASTCN) && (ap->a_dvp->v_mount->mnt_flag & MNT_RDONLY) && (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) error = EROFS; /* * We must do the same locking and unlocking at this layer as * is done in the layers below us. We could figure this out * based on the error return and the LASTCN, LOCKPARENT, and * LOCKLEAF flags. However, it is more expidient to just find * out the state of the lower level vnodes and set ours to the * same state. */ dvp = ap->a_dvp; vp = *ap->a_vpp; if (dvp == vp) return (error); if (!VOP_ISLOCKED(dvp, NULL)) { unlockargs.a_vp = dvp; unlockargs.a_flags = 0; unlockargs.a_p = p; vop_nounlock(&unlockargs); } if (vp != NULLVP && VOP_ISLOCKED(vp, NULL)) { lockargs.a_vp = vp; lockargs.a_flags = LK_SHARED; lockargs.a_p = p; vop_nolock(&lockargs); } return (error);}/* * Setattr call. Disallow write attempts if the layer is mounted read-only. */static intwrapfs_setattr(ap) struct vop_setattr_args /* { struct vnodeop_desc *a_desc; struct vnode *a_vp; struct vattr *a_vap; struct ucred *a_cred; struct proc *a_p; } */ *ap;{ struct vnode *vp = ap->a_vp; struct vattr *vap = ap->a_vap; fist_dprint(4, "FXN=%s FILE=%s LINE=%d\n",__FUNCTION__,__FILE__,__LINE__); if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t) VNOVAL || vap->va_gid != (gid_t) VNOVAL || vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t) VNOVAL) && (vp->v_mount->mnt_flag & MNT_RDONLY)) return (EROFS); if (vap->va_size != VNOVAL) { switch (vp->v_type) { case VDIR: return (EISDIR); case VCHR: case VBLK: case VSOCK: case VFIFO: if (vap->va_flags != VNOVAL) return (EOPNOTSUPP); return (0); case VREG: case VLNK: default: /* * Disallow write attempts if the filesystem is * mounted read-only. */ if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); } } return (wrapfs_bypass((struct vop_generic_args *) ap));}/* * We handle getattr only to change the fsid. */static intwrapfs_getattr(ap) struct vop_getattr_args /* { struct vnode *a_vp; struct vattr *a_vap; struct ucred *a_cred; struct proc *a_p; } */ *ap;{ int error; fist_dprint(4, "WRAPFS_GETATTR: vp=0x%x\n", (int) ap->a_vp); if ((error = wrapfs_bypass((struct vop_generic_args *) ap)) != 0) { printf("bypass getattr returned error %d\n", error); return (error); } return (0);}static intwrapfs_access(ap) struct vop_access_args /* { struct vnode *a_vp; int a_mode; struct ucred *a_cred; struct proc *a_p; } */ *ap;{ struct vnode *vp = ap->a_vp; mode_t mode = ap->a_mode; fist_dprint(4, "FXN=%s FILE=%s LINE=%d\n",__FUNCTION__,__FILE__,__LINE__); /* * Disallow write attempts on read-only layers; * unless the file is a socket, fifo, or a block or * character device resident on the file system. */ if (mode & VWRITE) { switch (vp->v_type) { case VDIR: case VLNK: case VREG: if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); break; default: break; } } return (wrapfs_bypass((struct vop_generic_args *) ap));}/* * We need to process our own vnode lock and then clear the * interlock flag as it applies only to our vnode, not the * vnodes below us on the stack. */static intwrapfs_lock(ap) struct vop_lock_args /* { struct vnode *a_vp; int a_flags; struct proc *a_p; } */ *ap;{ fist_dprint(4, "FXN=%s FILE=%s LINE=%d\n",__FUNCTION__,__FILE__,__LINE__); vop_nolock(ap); if ((ap->a_flags & LK_TYPE_MASK) == LK_DRAIN) return (0); ap->a_flags &= ~LK_INTERLOCK; return (wrapfs_bypass((struct vop_generic_args *) ap));}/* * We need to process our own vnode unlock and then clear the * interlock flag as it applies only to our vnode, not the * vnodes below us on the stack. */static intwrapfs_unlock(ap) struct vop_unlock_args /* { struct vnode *a_vp; int a_flags; struct proc *a_p; } */ *ap;{ /* struct vnode *vp = ap->a_vp; */ fist_dprint(4, "FXN=%s FILE=%s LINE=%d\n",__FUNCTION__,__FILE__,__LINE__); vop_nounlock(ap); ap->a_flags &= ~LK_INTERLOCK; return (wrapfs_bypass((struct vop_generic_args *) ap));}static intwrapfs_inactive(ap) struct vop_inactive_args /* { struct vnode *a_vp; struct proc *a_p; } */ *ap;{ struct vnode *vp = ap->a_vp; struct wrapfs_node *xp = VP_TO_WRAPFS(vp); struct vnode *lowervp = xp->wrapfs_lowervp; fist_dprint(4, "FXN=%s FILE=%s LINE=%d\n",__FUNCTION__,__FILE__,__LINE__); /* * Do nothing (and _don't_ bypass). * Wait to vrele lowervp until reclaim, * so that until then our wrapfs_node is in the * cache and reusable. * We still have to tell the lower layer the vnode * is now inactive though. * * NEEDSWORK: Someday, consider inactive'ing * the lowervp and then trying to reactivate it * with capabilities (v_id) * like they do in the name lookup cache code. * That's too much work for now. */ VOP_INACTIVE(lowervp, ap->a_p); VOP_UNLOCK(ap->a_vp, 0, ap->a_p); return (0);}static intwrapfs_reclaim(ap) struct vop_reclaim_args /* { struct vnode *a_vp; struct proc *a_p; } */ *ap;{ struct vnode *vp = ap->a_vp; struct wrapfs_node *xp = VP_TO_WRAPFS(vp); struct vnode *lowervp = xp->wrapfs_lowervp; fist_dprint(4, "FXN=%s FILE=%s LINE=%d\n",__FUNCTION__,__FILE__,__LINE__); /* * Note: in vop_reclaim, vp->v_op == dead_vnodeop_p, * so we can't call VOPs on ourself. */ /* After this assignment, this node will not be re-used. */ xp->wrapfs_lowervp = NULLVP; LIST_REMOVE(xp, wrapfs_hash); FREE(vp->v_data, M_TEMP); vp->v_data = NULL; vrele(lowervp); return (0);}static intwrapfs_print(ap) struct vop_print_args /* { struct vnode *a_vp; } */ *ap;{ register struct vnode *vp = ap->a_vp; printf("\ttag VT_WRAPFS, vp=%p, lowervp=%p\n", vp, WRAPFS_VP_TO_LOWERVP(vp)); return (0);}/****************************************************************************//* verify that lowervp has an v_object. create it if needed */voidwrapfs_verify_lower_object(vnode_t *vp, char *fxn){ vnode_t *lowervp = WRAPFS_VP_TO_LOWERVP(vp); /* allocate a duplicate vnode pager for lower vp if needed */ if (lowervp->v_object) { return; } lowervp->v_object = vm_pager_allocate(vp->v_object->type, lowervp, vp->v_object->size, VM_PROT_ALL, 0LL); fist_dprint(2, "VERIFY_OBJECT: fxn=%s size=%d obj=0x%x lowervp=0x%x vp=0x%x\n", fxn, vp->v_object->size, (int) lowervp->v_object, (int) lowervp, (int) vp);}/* * Create a page in the vnode and insert data into it. * This is used to synchronize data between the VM and read/write interface. * That that was written using write() gets pages containing the same data, so * that subsequent mmap() ops get valid data. */voidwrapfs_fill_page(vnode_t *vp, char *buf, long long offset){ vm_page_t pp; vm_offset_t kva; caddr_t ca; fist_dprint(4, "FILL_PAGE: vp=0x%x, buf=0x%x, offset=0x%x\n", (int) vp, (int) buf, buf[0], buf[1], buf[2], (int) offset); fist_dprint(1, "FILL_PAGE: vp=0x%x, buf=0x%x [%d,%d,%d], offset=0x%x\n", (int) vp, (int) buf, buf[0], buf[1], buf[2], (int) offset); pp = vm_page_grab(vp->v_object, OFF_TO_IDX(offset), VM_ALLOC_NORMAL | VM_ALLOC_RETRY); if (!pp) { printf("vm_page_grab returned NULL for offset 0x%x!\n", (int) offset); return; } kva = vm_pager_map_page(pp); ca = (caddr_t) kva; bcopy(buf, ca, PAGE_SIZE); vm_pager_unmap_page(kva); vm_page_set_validclean(pp, 0, PAGE_SIZE); pp->flags &= ~PG_BUSY;}voidwrapfs_fill_lowerpage(vnode_t *lowervp, char *buf, long long offset){ vm_page_t pp; vm_offset_t kva; caddr_t ca; fist_dprint(4, "FILL_LOWERPAGE: lowervp=0x%x, buf=0x%x, offset=0x%x\n", (int) lowervp, (int) buf, buf[0], buf[1], buf[2], (int) offset); fist_dprint(1, "FILL_LOWERPAGE: vp=0x%x, buf=0x%x [%d,%d,%d], offset=0x%x\n", (int) lowervp, (int) buf, buf[0], buf[1], buf[2], (int) offset); pp = vm_page_grab(lowervp->v_object, OFF_TO_IDX(offset), VM_ALLOC_NORMAL | VM_ALLOC_RETRY); if (!pp) { printf("vm_page_grab2 returned NULL for offset 0x%x!\n", (int) offset); return; } kva = vm_pager_map_page(pp); ca = (caddr_t) kva; bcopy(buf, ca, PAGE_SIZE); vm_pager_unmap_page(kva); vm_page_set_validclean(pp, 0, PAGE_SIZE); pp->flags &= ~PG_BUSY;}static intwrapfs_read(ap) struct vop_read_args /* { struct vnode *a_vp; struct uio *a_uio; int a_ioflag; struct ucred *a_cred; } */ *ap;{ /* easy mappings */ vnode_t *vp = ap->a_vp; uio_t *uiop = ap->a_uio; int ioflag = ap->a_ioflag; cred_t *cr = ap->a_cred; int error = EPERM; vnode_t *hidden_vp; uio_t temp_uio; iovec_t *temp_iovec; caddr_t current_base; int i, bytes_read; int num_pages, resid; long long start_loffset, end_loffset; long long cleartext_start_loffset, cleartext_end_loffset, current_loffset; fist_dprint(4, "fist_wrapfs_read vp %x\n", (int) vp);#ifdef FIST_DEBUG fist_print_uios("fist_wrapfs_read", uiop);#endif /* FIST_DEBUG */ cleartext_start_loffset = uiop->uio_offset; cleartext_end_loffset = uiop->uio_offset + uiop->uio_resid; start_loffset = cleartext_start_loffset & ~(PAGE_SIZE - 1); end_loffset = cleartext_end_loffset & ~(PAGE_SIZE - 1); /* if not multiple of PAGE_SIZE, then the above formula loses one page. * adjust for it */ if (cleartext_end_loffset > end_loffset) end_loffset += PAGE_SIZE; resid = end_loffset - start_loffset; num_pages = resid >> PAGE_SHIFT; fist_dprint(6, "READ: so=%d eo=%d cs=%d es=%d res=%d np=%d ps=%d\n", (int) start_loffset, (int) end_loffset, (int) cleartext_start_loffset,
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -