📄 vnode.c
字号:
(int) cleartext_end_loffset, (int) real_start_loffset, resid, num_pages, real_first_page ); current_loffset = start_loffset; page_loffset = start_loffset; /* read first block XXX check length of file */ temp_uio.uio_iov = temp_iovec; temp_uio.uio_iovcnt = 1; temp_uio.uio_offset = start_loffset; temp_uio.uio_segflg = UIO_SYSSPACE; temp_uio.uio_rw = UIO_READ; temp_uio.uio_procp = uiop->uio_procp; temp_uio.uio_resid = first_page_bytes; fist_print_uios("WRITE (before VOP_READ 1)", &temp_uio); error = VOP_READ(hidden_vp, &temp_uio, hidden_ioflag, cr); if (error) { fist_dprint(5, "VOP_READ returned error - not good\n"); /* XXX to be checked */ goto out_free; } fist_print_uios("WRITE (after VOP_READ 1)", &temp_uio); bytes_read = PAGE_SIZE - temp_iovec[0].iov_len; temp_iovec[0].iov_base -= bytes_read; temp_iovec[0].iov_len = PAGE_SIZE; /* decode block read */ wrapfs_decode_block(temp_iovec[0].iov_base, temp_iovec[0].iov_base, bytes_read, vp, vp->v_mount, OFF_TO_IDX(start_loffset)); /* * if num_pages == 1, we already read the page... don't clobber it * if num_pages > 1, then we must read the last page, and decode it * completely, before clobbering it. * XXX: if end offset is on page boundary, we don't have to do this. */ if ( (num_pages > 1) && ( va.va_size > (end_loffset - PAGE_SIZE) ) ) { /* read last block XXX check length of file */ temp_uio.uio_iov = temp_iovec + (num_pages - 1); temp_uio.uio_iovcnt = 1; temp_uio.uio_offset = end_loffset - PAGE_SIZE; temp_uio.uio_segflg = UIO_SYSSPACE; temp_uio.uio_rw = UIO_READ; temp_uio.uio_procp = uiop->uio_procp; temp_uio.uio_resid = PAGE_SIZE; fist_print_uios("WRITE (before VOP_READ 2)", &temp_uio); error = VOP_READ(hidden_vp, &temp_uio, hidden_ioflag, cr); fist_print_uios("WRITE (after VOP_READ 3)", &temp_uio); if (error) { fist_dprint(4, "VOP_READ returned error - not good\n"); /* XXX to be checked */ goto out_free; } bytes_read = PAGE_SIZE - temp_iovec[num_pages - 1].iov_len; temp_iovec[num_pages - 1].iov_base -= bytes_read; temp_iovec[num_pages - 1].iov_len = PAGE_SIZE; /* decode block read */ wrapfs_decode_block(temp_iovec[num_pages-1].iov_base, temp_iovec[num_pages-1].iov_base, bytes_read, vp, vp->v_mount, OFF_TO_IDX(end_loffset - PAGE_SIZE)); } /* * update the size of the vm object associated with both * the upper and lower vnode */ if (cleartext_end_loffset > va.va_size) { vnode_pager_setsize(vp, cleartext_end_loffset ); vnode_pager_setsize(hidden_vp, cleartext_end_loffset ); } /* * Now we are ready to write the bytes within the start/end * cleartext offsets in the buffers we allocated. */ for (i = 0; i < num_pages; i++) { if (i >= real_first_page) { bytes_read = PAGE_SIZE; current_base = temp_iovec[i].iov_base; if (i == real_first_page) {#define real_first_page_offset (cleartext_start_loffset - real_start_loffset) bytes_read -= real_first_page_offset; current_loffset += real_first_page_offset; current_base += real_first_page_offset;#undef real_first_page_offset } if (current_loffset + bytes_read > cleartext_end_loffset) { bytes_read = cleartext_end_loffset - current_loffset; } if ((error = fist_uiomove(current_base, bytes_read, UIO_WRITE, uiop))) break; } wrapfs_fill_page(vp, temp_iovec[i].iov_base, page_loffset); wrapfs_encode_block(temp_iovec[i].iov_base, temp_iovec[i].iov_base, PAGE_SIZE, vp, vp->v_mount, OFF_TO_IDX(page_loffset)); wrapfs_fill_page(hidden_vp, temp_iovec[i].iov_base, page_loffset); current_loffset += bytes_read; page_loffset += PAGE_SIZE; } fist_print_uios("WRITE (after for loop 4)", &temp_uio); if (va.va_size < end_loffset) { if (va.va_size < cleartext_end_loffset) resid -= end_loffset - cleartext_end_loffset; else resid -= end_loffset - va.va_size; } /* XXX: no need for full initialization here */ temp_uio.uio_iov = temp_iovec; temp_uio.uio_iovcnt = num_pages; temp_uio.uio_offset = start_loffset; temp_uio.uio_segflg = UIO_SYSSPACE; temp_uio.uio_rw = uiop->uio_rw; temp_uio.uio_procp = uiop->uio_procp; temp_uio.uio_resid = resid; /* * pass operation to hidden filesystem, and return status */ fist_print_uios("WRITE (before write)", &temp_uio); error = VOP_WRITE(hidden_vp, &temp_uio, hidden_ioflag, cr); fist_print_uios("WRITE (after write)", &temp_uio); if (temp_uio.uio_offset < cleartext_end_loffset) { /* incomplete write: this case is an error and should not happen */ uiop->uio_offset = temp_uio.uio_offset; uiop->uio_resid = cleartext_end_loffset - temp_uio.uio_offset; } else { /* * we may have written more than what was asked of us to preserve the * encoding over a whole page */ uiop->uio_offset = cleartext_end_loffset; uiop->uio_resid = 0; } /* if IO_APPEND was used, return offset of 0 to upper level */ if (ioflag & IO_APPEND) { uiop->uio_offset = 0; }out_free: for (i = 0; i < num_pages; i++) { fist_dprint(6, "PRINT_BASE1 %d: 0x%x (len=%d)\n", i, temp_iovec[i].iov_base, temp_iovec[i].iov_len); fist_dprint(6, "PRINT_BASE2 %d: 0x%x (len=%d)\n", i, free_iovec[i].iov_base, free_iovec[i].iov_len); kmem_free(free_iovec[i].iov_base); } kmem_free(free_iovec); kmem_free(temp_iovec);out:#ifdef FIST_DEBUG fist_print_uios("fist_wrapfs_write (END)", uiop);#endif /* FIST_DEBUG */ print_location(); return (error);}#endif /* FIST_FILTER_DATA */static intwrapfs_ioctl(ap) struct vop_ioctl_args /* { struct vnode *a_vp; u_long a_command; caddr_t a_data; int a_fflag; struct ucred *a_cred; struct proc *a_p; } */ *ap;{ int error = 0; struct vnode *vp = ap->a_vp; vnode_t *this_vnode = ap->a_vp; struct vnode *lowervp = WRAPFS_VP_TO_LOWERVP(vp); int val = 0; caddr_t arg = ap->a_data; cred_t *cr = curproc->p_cred->pc_ucred; vfs_t *this_vfs = this_vnode->v_mount; fist_dprint(4, "WRAPFS_IOCTL: vp=0x%x, lowervp=0x%x\n", (int) vp, (int) lowervp); switch (ap->a_command) {#ifdef FIST_DEBUG case FIST_IOCTL_GET_DEBUG_VALUE: val = fist_get_debug_value(); *ap->a_data = val; error = 0; goto out; break; case FIST_IOCTL_SET_DEBUG_VALUE: val = (int) (*ap->a_data); if (val < 0 || val > 20) { error = EINVAL; goto out; } fist_dprint(6, "IOCTL: got arg %d\n", val); fist_set_debug_value(val); error = 0; goto out; break;#endif /* FIST_DEBUG */ /* add non-debugging fist ioctl's here */FIST_IOCTL_ECLS default: /* default to repeating ioctl on lowervp */ error = VOP_IOCTL(lowervp, ap->a_command, ap->a_data, ap->a_fflag, ap->a_cred, ap->a_p); } /* end of switch statement */out: print_location(); return (error);}#ifdef FIST_FILTER_DATA/****************************************************************************//* * get page routine */#ifndef FIST_COHERENCYstatic intwrapfs_getpages(ap) struct vop_getpages_args /* { struct vnode *a_vp; vm_page_t *a_m; int a_count; int a_reqpage; vm_ooffset_t a_offset; } */ *ap;{ int error = VM_PAGER_ERROR; struct vnode *vp = ap->a_vp; struct vnode *lowervp = WRAPFS_VP_TO_LOWERVP(vp); int reqpage = ap->a_reqpage; int bytecount = ap->a_count; vm_page_t thispp; int i, resid; int pagecount = round_page(bytecount) / PAGE_SIZE; vm_offset_t thiskva; caddr_t thisca; uio_t temp_uio; iovec_t *free_iovecp, *free_iovarr; cred_t *cr = curproc->p_ucred; fist_dprint(2, "WRAPFS_GETPAGES: vp=0x%x, lowervp=0x%x, m=0x%x, pages=%d, bytes=%d, reqpage=%d, offset=%d\n", /* XXX: change level to 4 */ (int) vp, (int) lowervp, (int) ap->a_m, pagecount, bytecount, reqpage, (int) ap->a_offset ); wrapfs_verify_lower_object(vp, cr, curproc, __FUNCTION__); /* prepare for a VOP_READ on the lowervp data straight into this vp's pages */ temp_uio.uio_resid = bytecount; temp_uio.uio_offset = IDX_TO_OFF(ap->a_m[0]->pindex); temp_uio.uio_segflg = UIO_SYSSPACE; temp_uio.uio_rw = UIO_READ; temp_uio.uio_procp = curproc; temp_uio.uio_iovcnt = pagecount; temp_uio.uio_iov = free_iovarr = (iovec_t *) kmem_zalloc(pagecount * sizeof(iovec_t)); free_iovecp = (iovec_t *) kmem_zalloc(pagecount * sizeof(iovec_t)); if (!temp_uio.uio_iov || !free_iovecp) { error = VM_PAGER_AGAIN; printf("GetPages: nore more memory for temp uio\n"); goto out; } /* setup pointers to each page to read */ for (i = 0; i < pagecount; i++) { temp_uio.uio_iov[i].iov_base = free_iovecp[i].iov_base = kmem_zalloc(PAGE_SIZE); if (!free_iovecp[i].iov_base) { error = VM_PAGER_AGAIN; printf("GetPages: nore more memory for temp iovecs\n"); goto out; } temp_uio.uio_iov[i].iov_len = free_iovecp[i].iov_len = PAGE_SIZE; } /* do the actual VOP_READ */ error = VOP_READ(lowervp, &temp_uio, (IO_VMIO | IO_SYNC), curproc->p_ucred); if (error) { printf("GETPAGES: read on behalf of vmio failed with error %d\n", error); error = VM_PAGER_ERROR; goto out; } /* if residual is non-zero, do nothing since rest of pages were zalloc'ed */ resid = temp_uio.uio_resid; if (resid > PAGE_SIZE) panic("GETPAGES: temp_iovec.uio_resid is %d > PAGE_SIZE\n", resid); /* copy and decode all the bytes */ for (i = 0; i < pagecount; i++) { thispp = ap->a_m[i]; thiskva = vm_pager_map_page(thispp); thisca = (caddr_t) thiskva; wrapfs_decode_block(free_iovecp[i].iov_base, thisca, PAGE_SIZE, vp, vp->v_mount, ap->a_m[i]->pindex); vm_pager_unmap_page(thiskva); } /* set status of pages to valid and non-busy as needed */ for (i = 0; i < pagecount; i++) { thispp = ap->a_m[i]; thispp->valid = VM_PAGE_BITS_ALL; if (i == reqpage) thispp->flags |= PG_BUSY; /* requested page must be busy */ else thispp->flags &= ~PG_BUSY; /* XXX: others need not? */ fist_print_page("getpages: thispp0", thispp); } error = VM_PAGER_OK;out: if (free_iovecp) { for (i = 0; i < pagecount; i++) if (free_iovecp[i].iov_base) kmem_free(free_iovecp[i].iov_base); kmem_free(free_iovecp); } if (free_iovarr) kmem_free(free_iovarr); print_location(); return (error);}/* * put page routine * XXX: ap->a_offset is always ignored */static intwrapfs_putpages(ap) struct vop_putpages_args /* { struct vnode *a_vp; vm_page_t *a_m; int a_count; int a_sync; int *a_rtvals; vm_ooffset_t a_offset; } */ *ap;{ int error; struct vnode *vp = ap->a_vp; struct vnode *lowervp = WRAPFS_VP_TO_LOWERVP(vp); int bytecount = ap->a_count; int pagecount = round_page(bytecount) / PAGE_SIZE; vm_page_t thispp; int i, resid; vm_offset_t thiskva; caddr_t thisca; uio_t temp_uio; iovec_t *free_iovecp, *free_iovarr; cred_t *cr = curproc->p_ucred; fist_dprint(2, "WRAPFS_PUTPAGES: vp=0x%x, lowervp=0x%x, sync=0x%x\n", (int) vp, (int) lowervp, ap->a_sync); /* prepare for a VOP_WRITE on the lowervp data straight into this vp's pages */ temp_uio.uio_resid = bytecount; temp_uio.uio_offset = IDX_TO_OFF(ap->a_m[0]->pindex); temp_uio.uio_segflg = UIO_SYSSPACE; temp_uio.uio_rw = UIO_WRITE; temp_uio.uio_procp = curproc; temp_uio.uio_iovcnt = pagecount; temp_uio.uio_iov = free_iovarr = (iovec_t *) kmem_zalloc(pagecount * sizeof(iovec_t)); free_iovecp = (iovec_t *) kmem_zalloc(pagecount * sizeof(iovec_t)); if (!temp_uio.uio_iov || !free_iovecp) { error = VM_PAGER_AGAIN; printf("PutPages: nore more memory for temp uio\n"); goto out; } /* setup pointers to each page to write */ for (i = 0; i < pagecount; i++) { temp_uio.uio_iov[i].iov_base = free_iovecp[i].iov_base = kmem_zalloc(PAGE_SIZE); if (!free_iovecp[i].iov_base) { error = VM_PAGER_AGAIN; printf("PutPages: nore more memory for temp iovecs\n"); goto out; } temp_uio.uio_iov[i].iov_len = free_iovecp[i].iov_len = PAGE_SIZE; } /* copy and decode all the bytes */ for (i = 0; i < pagecount; i++) { thispp = ap->a_m[i]; thiskva = vm_pager_map_page(thispp); thisca = (caddr_t) thiskva; wrapfs_encode_block( thisca, free_iovecp[i].iov_base, PAGE_SIZE, vp, vp->v_mount, ap->a_m[i]->pindex); vm_pager_unmap_page(thiskva); } /* do the actual VOP_WRITE */ error = VOP_WRITE(lowervp, &temp_uio, (IO_VMIO | IO_SYNC), curproc->p_ucred); if (error) { printf("PUTPAGES: write on behalf of vmio failed with error %d\n", error); error = VM_PAGER_ERROR; goto out; } /* if residual is non-zero, do nothing since rest of pages were zalloc'ed */ resid = temp_uio.uio_resid; if (resid > PAGE_SIZE) panic("PUTPAGES: temp_iovec.uio_resid is %d > PAGE_SIZE\n", resid); /* set status of pages to valid and non-busy as needed */ for (i = 0; i < pagecount; i++) { thispp = ap->a_m[i]; thispp->valid = VM_PAGE_BITS_ALL; thispp->flags &= ~PG_BUSY; fist_print_page("putpages: thispp0", thispp); }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -