📄 vnode.c
字号:
(int) cleartext_end_loffset, resid, num_pages, PAGE_SIZE); temp_iovec = kmem_zalloc(num_pages * sizeof(iovec_t)); for (i = 0; i < num_pages; i++) { temp_iovec[i].iov_len = PAGE_SIZE; temp_iovec[i].iov_base = kmem_zalloc(PAGE_SIZE); fist_dprint(6, "READ allocated %d address 0x%x\n", i, temp_iovec[i].iov_base); } temp_uio.uio_iov = temp_iovec; temp_uio.uio_iovcnt = num_pages; temp_uio.uio_offset = start_loffset; temp_uio.uio_segflg = UIO_SYSSPACE; temp_uio.uio_rw = uiop->uio_rw; temp_uio.uio_procp = uiop->uio_procp; temp_uio.uio_resid = resid; hidden_vp = WRAPFS_VP_TO_LOWERVP(vp); /* * pass operation to hidden filesystem, and return status */ error = VOP_READ(hidden_vp, &temp_uio, ioflag, cr); if (error) { fist_dprint(4, "VOP_READ in read returned error - not good\n"); /* XXX to be checked */ goto out_free; } current_loffset = start_loffset; for (i = 0; i < num_pages; i++) { bytes_read = PAGE_SIZE - temp_iovec[i].iov_len; if (bytes_read == 0) break; temp_iovec[i].iov_base -= bytes_read; current_base = temp_iovec[i].iov_base; /* decode the page/block */ wrapfs_decode_block(current_base, current_base, bytes_read, vp, vp->v_mount); /* * save the original size, for kmem_free. * no need for it w/ wrapfs; size is always PAGE_SIZE, hence this line * is commented out: * temp_iovec[i].iov_len = uiop->uio_iov[i].iov_len; */ /* treat first and last iovec separately, not all data in them is needed */ if (current_loffset + bytes_read > cleartext_end_loffset) { bytes_read = cleartext_end_loffset - current_loffset; } if (i == 0) { bytes_read -= cleartext_start_loffset - start_loffset; current_loffset += cleartext_start_loffset - start_loffset; current_base += cleartext_start_loffset - start_loffset; } if ((error = fist_uiomove(current_base, bytes_read, UIO_READ, uiop))) /* * XXX: we have to see the exact semantics of returning with an * EFAULT from read */ break; current_loffset += bytes_read; }out_free: for (i = 0; i < num_pages; i++) { fist_dprint(6, "READ freeing %d address 0x%x\n", i, temp_iovec[i].iov_base); kmem_free(temp_iovec[i].iov_base); } kmem_free(temp_iovec);#ifdef FIST_DEBUG fist_print_uios("fist_wrapfs_read (END)", uiop);#endif /* FIST_DEBUG */ print_location(); return (error);}static intwrapfs_write(ap) struct vop_read_args /* { struct vnode *a_vp; struct uio *a_uio; int a_ioflag; struct ucred *a_cred; } */ *ap;{ /* easy mappings */ vnode_t *vp = ap->a_vp; uio_t *uiop = ap->a_uio; int ioflag = ap->a_ioflag; cred_t *cr = ap->a_cred; struct proc *p = curproc; /* XXX */ int error = EPERM; vnode_t *hidden_vp; vattr_t va; uio_t temp_uio; iovec_t *temp_iovec; iovec_t *free_iovec; /* for freeing allocated memory */ int i; caddr_t current_base; int resid, bytes_read, num_pages, first_page_bytes, real_first_page; long long start_loffset, end_loffset, real_start_loffset; long long cleartext_start_loffset, cleartext_end_loffset, current_loffset; int hidden_ioflag = (ioflag & ~IO_APPEND); fist_dprint(4, "fist_wrapfs_write vp=0x%x ioflag=0x%x offset=0x%x resid=%d iovcnt=%x\n", (int) vp, ioflag, (int) uiop->uio_offset, uiop->uio_resid, uiop->uio_iovcnt);#ifdef FIST_DEBUG fist_print_uios("fist_wrapfs_write (START)", uiop);#endif /* FIST_DEBUG */ hidden_vp = WRAPFS_VP_TO_LOWERVP(vp); /* we don't want anybody to do updates while we write, so lock the vnode */#ifdef DO_WLOCK VREF(hidden_vp); /* XXX: is this needed? */ vn_lock(hidden_vp, LK_EXCLUSIVE | LK_RETRY | LK_CANRECURSE, p);#endif /* get the attributes, length is necessary for correct updates */ if ((error = VOP_GETATTR(hidden_vp, &va, cr, p))) { fist_dprint(4, "VOP_GETATTR returned error - not good\n"); /* XXX to be checked */ goto out; } /* just in case someone tries to pull a fast one */ if (uiop->uio_resid == 0) { error = 0; goto out; } cleartext_start_loffset = uiop->uio_offset; cleartext_end_loffset = uiop->uio_offset + uiop->uio_resid; if (ioflag & IO_APPEND) { fist_dprint(6, "WRITE: turning off append flag\n"); cleartext_start_loffset += va.va_size; cleartext_end_loffset += va.va_size; } start_loffset = MIN(cleartext_start_loffset, va.va_size) & ~(PAGE_SIZE - 1); real_start_loffset = cleartext_start_loffset & ~(PAGE_SIZE - 1); first_page_bytes = MIN(cleartext_start_loffset, va.va_size) - start_loffset; /* must use this to avoid shifting a quad w/ gcc */ real_first_page = (int)(real_start_loffset - start_loffset) >> PAGE_SHIFT; end_loffset = cleartext_end_loffset & ~(PAGE_SIZE - 1); ASSERT(first_page_bytes <= PAGE_SIZE); /* * if not multiple of PAGE_SIZE, then the above formula loses one page. * adjust for it */ if (cleartext_end_loffset > end_loffset) end_loffset += PAGE_SIZE; resid = end_loffset - start_loffset; num_pages = resid >> PAGE_SHIFT; if (num_pages == 1) first_page_bytes = PAGE_SIZE; temp_iovec = kmem_zalloc(num_pages * sizeof(iovec_t)); free_iovec = kmem_zalloc(num_pages * sizeof(iovec_t)); for (i = 0; i < num_pages; i++) { temp_iovec[i].iov_len = free_iovec[i].iov_len = PAGE_SIZE; /* we need the pages to be zeroed out */ temp_iovec[i].iov_base = free_iovec[i].iov_base = kmem_zalloc(PAGE_SIZE); } fist_dprint(6, "WRITE: so=%d eo=%d cso=%d ceo=%d rso=%d res=%d np=%d rfp=%d\n", (int) start_loffset, (int) end_loffset, (int) cleartext_start_loffset, (int) cleartext_end_loffset, (int) real_start_loffset, resid, num_pages, real_first_page ); current_loffset = start_loffset; /* read first block XXX check length of file */ temp_uio.uio_iov = temp_iovec; temp_uio.uio_iovcnt = 1; temp_uio.uio_offset = start_loffset; temp_uio.uio_segflg = UIO_SYSSPACE; temp_uio.uio_rw = UIO_READ; temp_uio.uio_procp = uiop->uio_procp; temp_uio.uio_resid = first_page_bytes; fist_print_uios("WRITE (before VOP_READ 1)", &temp_uio); error = VOP_READ(hidden_vp, &temp_uio, hidden_ioflag, cr); if (error) { fist_dprint(5, "VOP_READ returned error - not good\n"); /* XXX to be checked */ goto out_free; } fist_print_uios("WRITE (after VOP_READ 1)", &temp_uio); bytes_read = PAGE_SIZE - temp_iovec[0].iov_len; temp_iovec[0].iov_base -= bytes_read; temp_iovec[0].iov_len = PAGE_SIZE; /* decode block read */ wrapfs_decode_block(temp_iovec[0].iov_base, temp_iovec[0].iov_base, bytes_read, vp, vp->v_mount); /* * if num_pages == 1, we already read the page... don't clobber it * if num_pages > 1, then we must read the last page, and decode it * completely, before clobbering it. * XXX: if end offset is on page boundary, we don't have to do this. */ if (num_pages > 1) { /* read last block XXX check length of file */ temp_uio.uio_iov = temp_iovec + (num_pages - 1); temp_uio.uio_iovcnt = 1; temp_uio.uio_offset = end_loffset - PAGE_SIZE; temp_uio.uio_segflg = UIO_SYSSPACE; temp_uio.uio_rw = UIO_READ; temp_uio.uio_procp = uiop->uio_procp; temp_uio.uio_resid = PAGE_SIZE; fist_print_uios("WRITE (before VOP_READ 2)", &temp_uio); error = VOP_READ(hidden_vp, &temp_uio, hidden_ioflag, cr); fist_print_uios("WRITE (after VOP_READ 3)", &temp_uio); if (error) { fist_dprint(4, "VOP_READ returned error - not good\n"); /* XXX to be checked */ goto out_free; } bytes_read = PAGE_SIZE - temp_iovec[num_pages - 1].iov_len; temp_iovec[num_pages - 1].iov_base -= bytes_read; temp_iovec[num_pages - 1].iov_len = PAGE_SIZE; /* decode block read */ wrapfs_decode_block(temp_iovec[num_pages-1].iov_base, temp_iovec[num_pages-1].iov_base, bytes_read, vp, vp->v_mount); } /* * Now we are ready to write the bytes within the start/end * cleartext offsets in the buffers we allocated. */ for (i = 0; i < num_pages; i++) { if (i >= real_first_page) { bytes_read = PAGE_SIZE; current_base = temp_iovec[i].iov_base; if (i == real_first_page) {#define real_first_page_offset (cleartext_start_loffset - real_start_loffset) bytes_read -= real_first_page_offset; current_loffset += real_first_page_offset; current_base += real_first_page_offset;#undef real_first_page_offset } if (current_loffset + bytes_read > cleartext_end_loffset) { bytes_read = cleartext_end_loffset - current_loffset; } if ((error = fist_uiomove(current_base, bytes_read, UIO_WRITE, uiop))) break; } /* update/create VM page with this new/updated data before encoding it */ wrapfs_fill_page(vp, temp_iovec[i].iov_base, current_loffset); /* encode block before writing */ wrapfs_encode_block(temp_iovec[i].iov_base, temp_iovec[i].iov_base, PAGE_SIZE, vp, vp->v_mount);#if 0 if (0) wrapfs_fill_lowerpage(hidden_vp, temp_iovec[i].iov_base, current_loffset); if (vp->v_object) vm_pager_deallocate(vp->v_object);#endif current_loffset += bytes_read; } fist_print_uios("WRITE (after for loop 4)", &temp_uio); if (va.va_size < end_loffset) { if (va.va_size < cleartext_end_loffset) resid -= end_loffset - cleartext_end_loffset; else resid -= end_loffset - va.va_size; } /* XXX: no need for full initialization here */ temp_uio.uio_iov = temp_iovec; temp_uio.uio_iovcnt = num_pages; temp_uio.uio_offset = start_loffset; temp_uio.uio_segflg = UIO_SYSSPACE; temp_uio.uio_rw = uiop->uio_rw; temp_uio.uio_procp = uiop->uio_procp; temp_uio.uio_resid = resid; /* * pass operation to hidden filesystem, and return status */ fist_print_uios("WRITE (before write)", &temp_uio); /* * XXX: Must do IO_SYNC else zfod pages get written arbitrarily into * large files. This slows write performance 5 times! */#ifdef WRITE_SYNC error = VOP_WRITE(hidden_vp, &temp_uio, (hidden_ioflag | IO_SYNC), cr);#else /* not WRITE_SYNC */ error = VOP_WRITE(hidden_vp, &temp_uio, hidden_ioflag, cr);#endif /* not WRITE_SYNC */ fist_print_uios("WRITE (after write)", &temp_uio); if (temp_uio.uio_offset < cleartext_end_loffset) { /* incomplete write: this case is an error and should not happen */ uiop->uio_offset = temp_uio.uio_offset; uiop->uio_resid = cleartext_end_loffset - temp_uio.uio_offset; } else { /* * we may have written more than what was asked of us to preserve the * encoding over a whole page */ uiop->uio_offset = cleartext_end_loffset; uiop->uio_resid = 0; } /* if IO_APPEND was used, return offset of 0 to upper level */ if (ioflag & IO_APPEND) { uiop->uio_offset = 0; }out_free: for (i = 0; i < num_pages; i++) { fist_dprint(6, "PRINT_BASE1 %d: 0x%x (len=%d)\n", i, temp_iovec[i].iov_base, temp_iovec[i].iov_len); fist_dprint(6, "PRINT_BASE2 %d: 0x%x (len=%d)\n", i, free_iovec[i].iov_base, free_iovec[i].iov_len); kmem_free(free_iovec[i].iov_base); } kmem_free(free_iovec); kmem_free(temp_iovec);out:#ifdef DO_WLOCK VOP_UNLOCK(hidden_vp, 0, p); vrele(hidden_vp);#endif#ifdef FIST_DEBUG fist_print_uios("fist_wrapfs_write (END)", uiop);#endif /* FIST_DEBUG */ print_location(); return (error);}static intwrapfs_ioctl(ap) struct vop_ioctl_args /* { struct vnode *a_vp; u_long a_command; caddr_t a_data; int a_fflag; struct ucred *a_cred; struct proc *a_p; } */ *ap;{ int error = EPERM; struct vnode *vp = ap->a_vp; vnode_t *this_vnode = ap->a_vp; struct vnode *lowervp = WRAPFS_VP_TO_LOWERVP(vp); int val = 0; caddr_t arg = ap->a_data; cred_t *cr = curproc->p_cred->pc_ucred; vfs_t *this_vfs = this_vnode->v_mount; fist_dprint(4, "WRAPFS_IOCTL: vp=0x%x, lowervp=0x%x\n", (int) vp, (int) lowervp); switch (ap->a_command) {#ifdef FIST_DEBUG case FIST_IOCTL_GET_DEBUG_VALUE: val = fist_get_debug_value(); *ap->a_data = val; error = 0; goto out; break; case FIST_IOCTL_SET_DEBUG_VALUE: val = (int) (*ap->a_data); if (val < 0 || val > 20) { error = EINVAL; goto out; } fist_dprint(6, "IOCTL: got arg %d\n", val); fist_set_debug_value(val); error = 0; goto out; break;#endif /* FIST_DEBUG */ /* add non-debugging fist ioctl's here */FIST_IOCTL_ECLS } /* end of switch statement */ /* default to repeating ioctl on lowervp */ error = VOP_IOCTL(lowervp, ap->a_command, ap->a_data, ap->a_fflag, ap->a_cred, ap->a_p);out: print_location(); return (error);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -