📄 direct.c
字号:
memcpy(&first_verf.verifier, &wdata.verf.verifier, VERF_SIZE); if (wdata.verf.committed != NFS_FILE_SYNC) { need_commit = 1; if (memcmp(&first_verf.verifier, &wdata.verf.verifier, VERF_SIZE)) goto sync_retry; } tot_bytes += result; wdata.args.offset += result; wdata.args.pgbase += result; curpage += wdata.args.pgbase >> PAGE_SHIFT; wdata.args.pgbase &= ~PAGE_MASK; request -= result; } while (request != 0); /* * Commit data written so far, even in the event of an error */ if (need_commit) { wdata.args.count = tot_bytes; wdata.args.offset = file_offset; lock_kernel(); result = NFS_PROTO(inode)->commit(&wdata); unlock_kernel(); if (result < 0 || memcmp(&first_verf.verifier, &wdata.verf.verifier, VERF_SIZE) != 0) goto sync_retry; } result = tot_bytes;out: nfs_end_data_update_defer(inode); return result;sync_retry: wdata.args.stable = NFS_FILE_SYNC; goto retry;}/** * nfs_direct_write - For each iov segment, map the user's buffer * then generate write and commit RPCs. * @inode: target inode * @ctx: target file open context * @iov: array of vectors that define I/O buffer * file_offset: offset in file to begin the operation * nr_segs: size of iovec array * * Upon return, generic_file_direct_IO invalidates any cached pages * that non-direct readers might access, so they will pick up these * writes immediately. */static int nfs_direct_write(struct inode *inode, struct nfs_open_context *ctx, const struct iovec *iov, loff_t file_offset, unsigned long nr_segs){ ssize_t tot_bytes = 0; unsigned long seg = 0; while ((seg < nr_segs) && (tot_bytes >= 0)) { ssize_t result; int page_count; struct page **pages; const struct iovec *vec = &iov[seg++]; unsigned long user_addr = (unsigned long) vec->iov_base; size_t size = vec->iov_len; page_count = nfs_get_user_pages(WRITE, user_addr, size, &pages); if (page_count < 0) { nfs_free_user_pages(pages, 0, 0); if (tot_bytes > 0) break; return page_count; } result = nfs_direct_write_seg(inode, ctx, user_addr, size, file_offset, pages, page_count); nfs_free_user_pages(pages, page_count, 0); if (result <= 0) { if (tot_bytes > 0) break; return result; } tot_bytes += result; file_offset += result; if (result < size) break; } return tot_bytes;}/** * nfs_direct_IO - NFS address space operation for direct I/O * rw: direction (read or write) * @iocb: target I/O control block * @iov: array of vectors that define I/O buffer * file_offset: offset in file to begin the operation * nr_segs: size of iovec array * */ssize_tnfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t file_offset, unsigned long nr_segs){ ssize_t result = -EINVAL; struct file *file = iocb->ki_filp; struct nfs_open_context *ctx; struct dentry *dentry = file->f_dentry; struct inode *inode = dentry->d_inode; /* * No support for async yet */ if (!is_sync_kiocb(iocb)) return result; ctx = (struct nfs_open_context *)file->private_data; switch (rw) { case READ: dprintk("NFS: direct_IO(read) (%s) off/no(%Lu/%lu)\n", dentry->d_name.name, file_offset, nr_segs); result = nfs_direct_read(inode, ctx, iov, file_offset, nr_segs); break; case WRITE: dprintk("NFS: direct_IO(write) (%s) off/no(%Lu/%lu)\n", dentry->d_name.name, file_offset, nr_segs); result = nfs_direct_write(inode, ctx, iov, file_offset, nr_segs); break; default: break; } return result;}/** * nfs_file_direct_read - file direct read operation for NFS files * @iocb: target I/O control block * @buf: user's buffer into which to read data * count: number of bytes to read * pos: byte offset in file where reading starts * * We use this function for direct reads instead of calling * generic_file_aio_read() in order to avoid gfar's check to see if * the request starts before the end of the file. For that check * to work, we must generate a GETATTR before each direct read, and * even then there is a window between the GETATTR and the subsequent * READ where the file size could change. So our preference is simply * to do all reads the application wants, and the server will take * care of managing the end of file boundary. * * This function also eliminates unnecessarily updating the file's * atime locally, as the NFS server sets the file's atime, and this * client must read the updated atime from the server back into its * cache. */ssize_tnfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos){ ssize_t retval = -EINVAL; loff_t *ppos = &iocb->ki_pos; struct file *file = iocb->ki_filp; struct nfs_open_context *ctx = (struct nfs_open_context *) file->private_data; struct dentry *dentry = file->f_dentry; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; struct iovec iov = { .iov_base = buf, .iov_len = count, }; dprintk("nfs: direct read(%s/%s, %lu@%lu)\n", dentry->d_parent->d_name.name, dentry->d_name.name, (unsigned long) count, (unsigned long) pos); if (!is_sync_kiocb(iocb)) goto out; if (count < 0) goto out; retval = -EFAULT; if (!access_ok(VERIFY_WRITE, iov.iov_base, iov.iov_len)) goto out; retval = 0; if (!count) goto out; if (mapping->nrpages) { retval = filemap_fdatawrite(mapping); if (retval == 0) retval = filemap_fdatawait(mapping); if (retval) goto out; } retval = nfs_direct_read(inode, ctx, &iov, pos, 1); if (retval > 0) *ppos = pos + retval;out: return retval;}/** * nfs_file_direct_write - file direct write operation for NFS files * @iocb: target I/O control block * @buf: user's buffer from which to write data * count: number of bytes to write * pos: byte offset in file where writing starts * * We use this function for direct writes instead of calling * generic_file_aio_write() in order to avoid taking the inode * semaphore and updating the i_size. The NFS server will set * the new i_size and this client must read the updated size * back into its cache. We let the server do generic write * parameter checking and report problems. * * We also avoid an unnecessary invocation of generic_osync_inode(), * as it is fairly meaningless to sync the metadata of an NFS file. * * We eliminate local atime updates, see direct read above. * * We avoid unnecessary page cache invalidations for normal cached * readers of this file. * * Note that O_APPEND is not supported for NFS direct writes, as there * is no atomic O_APPEND write facility in the NFS protocol. */ssize_tnfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos){ ssize_t retval = -EINVAL; loff_t *ppos = &iocb->ki_pos; unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur; struct file *file = iocb->ki_filp; struct nfs_open_context *ctx = (struct nfs_open_context *) file->private_data; struct dentry *dentry = file->f_dentry; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; struct iovec iov = { .iov_base = (char __user *)buf, .iov_len = count, }; dfprintk(VFS, "nfs: direct write(%s/%s(%ld), %lu@%lu)\n", dentry->d_parent->d_name.name, dentry->d_name.name, inode->i_ino, (unsigned long) count, (unsigned long) pos); if (!is_sync_kiocb(iocb)) goto out; if (count < 0) goto out; if (pos < 0) goto out; retval = -EFAULT; if (!access_ok(VERIFY_READ, iov.iov_base, iov.iov_len)) goto out; if (file->f_error) { retval = file->f_error; file->f_error = 0; goto out; } retval = -EFBIG; if (limit != RLIM_INFINITY) { if (pos >= limit) { send_sig(SIGXFSZ, current, 0); goto out; } if (count > limit - (unsigned long) pos) count = limit - (unsigned long) pos; } retval = 0; if (!count) goto out; if (mapping->nrpages) { retval = filemap_fdatawrite(mapping); if (retval == 0) retval = filemap_fdatawait(mapping); if (retval) goto out; } retval = nfs_direct_write(inode, ctx, &iov, pos, 1); if (mapping->nrpages) invalidate_inode_pages2(mapping); if (retval > 0) *ppos = pos + retval;out: return retval;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -