📄 file.c
字号:
}static int nfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata){ unsigned offset = pos & (PAGE_CACHE_SIZE - 1); int status; lock_kernel(); status = nfs_updatepage(file, page, offset, copied); unlock_kernel(); unlock_page(page); page_cache_release(page); return status < 0 ? status : copied;}static void nfs_invalidate_page(struct page *page, unsigned long offset){ if (offset != 0) return; /* Cancel any unstarted writes on this page */ nfs_wb_page_cancel(page->mapping->host, page);}static int nfs_release_page(struct page *page, gfp_t gfp){ /* If PagePrivate() is set, then the page is not freeable */ return 0;}static int nfs_launder_page(struct page *page){ return nfs_wb_page(page->mapping->host, page);}const struct address_space_operations nfs_file_aops = { .readpage = nfs_readpage, .readpages = nfs_readpages, .set_page_dirty = __set_page_dirty_nobuffers, .writepage = nfs_writepage, .writepages = nfs_writepages, .write_begin = nfs_write_begin, .write_end = nfs_write_end, .invalidatepage = nfs_invalidate_page, .releasepage = nfs_release_page,#ifdef CONFIG_NFS_DIRECTIO .direct_IO = nfs_direct_IO,#endif .launder_page = nfs_launder_page,};static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page){ struct file *filp = vma->vm_file; unsigned pagelen; int ret = -EINVAL; void *fsdata; struct address_space *mapping; loff_t offset; lock_page(page); mapping = page->mapping; if (mapping != vma->vm_file->f_path.dentry->d_inode->i_mapping) { unlock_page(page); return -EINVAL; } pagelen = nfs_page_length(page); offset = (loff_t)page->index << PAGE_CACHE_SHIFT; unlock_page(page); /* * we can use mapping after releasing the page lock, because: * we hold mmap_sem on the fault path, which should pin the vma * which should pin the file, which pins the dentry which should * hold a reference on inode. */ if (pagelen) { struct page *page2 = NULL; ret = nfs_write_begin(filp, mapping, offset, pagelen, 0, &page2, &fsdata); if (!ret) ret = nfs_write_end(filp, mapping, offset, pagelen, pagelen, page2, fsdata); } return ret;}static struct vm_operations_struct nfs_file_vm_ops = { .fault = filemap_fault, .page_mkwrite = nfs_vm_page_mkwrite,};static int nfs_need_sync_write(struct file *filp, struct inode *inode){ struct nfs_open_context *ctx; if (IS_SYNC(inode) || (filp->f_flags & O_SYNC)) return 1; ctx = nfs_file_open_context(filp); if (test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags)) return 1; return 0;}static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos){ struct dentry * dentry = iocb->ki_filp->f_path.dentry; struct inode * inode = dentry->d_inode; ssize_t result; size_t count = iov_length(iov, nr_segs);#ifdef CONFIG_NFS_DIRECTIO if (iocb->ki_filp->f_flags & O_DIRECT) return nfs_file_direct_write(iocb, iov, nr_segs, pos);#endif dfprintk(VFS, "nfs: write(%s/%s(%ld), %lu@%Ld)\n", dentry->d_parent->d_name.name, dentry->d_name.name, inode->i_ino, (unsigned long) count, (long long) pos); result = -EBUSY; if (IS_SWAPFILE(inode)) goto out_swapfile; /* * O_APPEND implies that we must revalidate the file length. */ if (iocb->ki_filp->f_flags & O_APPEND) { result = nfs_revalidate_file_size(inode, iocb->ki_filp); if (result) goto out; } result = count; if (!count) goto out; nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count); result = generic_file_aio_write(iocb, iov, nr_segs, pos); /* Return error values for O_SYNC and IS_SYNC() */ if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) { int err = nfs_do_fsync(nfs_file_open_context(iocb->ki_filp), inode); if (err < 0) result = err; }out: return result;out_swapfile: printk(KERN_INFO "NFS: attempt to write to active swap file!\n"); goto out;}static int do_getlk(struct file *filp, int cmd, struct file_lock *fl){ struct inode *inode = filp->f_mapping->host; int status = 0; lock_kernel(); /* Try local locking first */ posix_test_lock(filp, fl); if (fl->fl_type != F_UNLCK) { /* found a conflict */ goto out; } if (nfs_have_delegation(inode, FMODE_READ)) goto out_noconflict; if (NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM) goto out_noconflict; status = NFS_PROTO(inode)->lock(filp, cmd, fl);out: unlock_kernel(); return status;out_noconflict: fl->fl_type = F_UNLCK; goto out;}static int do_vfs_lock(struct file *file, struct file_lock *fl){ int res = 0; switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { case FL_POSIX: res = posix_lock_file_wait(file, fl); break; case FL_FLOCK: res = flock_lock_file_wait(file, fl); break; default: BUG(); } if (res < 0) dprintk(KERN_WARNING "%s: VFS is out of sync with lock manager" " - error %d!\n", __FUNCTION__, res); return res;}static int do_unlk(struct file *filp, int cmd, struct file_lock *fl){ struct inode *inode = filp->f_mapping->host; int status; /* * Flush all pending writes before doing anything * with locks.. */ nfs_sync_mapping(filp->f_mapping); /* NOTE: special case * If we're signalled while cleaning up locks on process exit, we * still need to complete the unlock. */ lock_kernel(); /* Use local locking if mounted with "-onolock" */ if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM)) status = NFS_PROTO(inode)->lock(filp, cmd, fl); else status = do_vfs_lock(filp, fl); unlock_kernel(); return status;}static int do_setlk(struct file *filp, int cmd, struct file_lock *fl){ struct inode *inode = filp->f_mapping->host; int status; /* * Flush all pending writes before doing anything * with locks.. */ status = nfs_sync_mapping(filp->f_mapping); if (status != 0) goto out; lock_kernel(); /* Use local locking if mounted with "-onolock" */ if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM)) { status = NFS_PROTO(inode)->lock(filp, cmd, fl); /* If we were signalled we still need to ensure that * we clean up any state on the server. We therefore * record the lock call as having succeeded in order to * ensure that locks_remove_posix() cleans it out when * the process exits. */ if (status == -EINTR || status == -ERESTARTSYS) do_vfs_lock(filp, fl); } else status = do_vfs_lock(filp, fl); unlock_kernel(); if (status < 0) goto out; /* * Make sure we clear the cache whenever we try to get the lock. * This makes locking act as a cache coherency point. */ nfs_sync_mapping(filp->f_mapping); nfs_zap_caches(inode);out: return status;}/* * Lock a (portion of) a file */static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl){ struct inode * inode = filp->f_mapping->host; dprintk("NFS: nfs_lock(f=%s/%ld, t=%x, fl=%x, r=%Ld:%Ld)\n", inode->i_sb->s_id, inode->i_ino, fl->fl_type, fl->fl_flags, (long long)fl->fl_start, (long long)fl->fl_end); nfs_inc_stats(inode, NFSIOS_VFSLOCK); /* No mandatory locks over NFS */ if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) return -ENOLCK; if (IS_GETLK(cmd)) return do_getlk(filp, cmd, fl); if (fl->fl_type == F_UNLCK) return do_unlk(filp, cmd, fl); return do_setlk(filp, cmd, fl);}/* * Lock a (portion of) a file */static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl){ dprintk("NFS: nfs_flock(f=%s/%ld, t=%x, fl=%x)\n", filp->f_path.dentry->d_inode->i_sb->s_id, filp->f_path.dentry->d_inode->i_ino, fl->fl_type, fl->fl_flags); /* * No BSD flocks over NFS allowed. * Note: we could try to fake a POSIX lock request here by * using ((u32) filp | 0x80000000) or some such as the pid. * Not sure whether that would be unique, though, or whether * that would break in other places. */ if (!(fl->fl_flags & FL_FLOCK)) return -ENOLCK; /* We're simulating flock() locks using posix locks on the server */ fl->fl_owner = (fl_owner_t)filp; fl->fl_start = 0; fl->fl_end = OFFSET_MAX; if (fl->fl_type == F_UNLCK) return do_unlk(filp, cmd, fl); return do_setlk(filp, cmd, fl);}static int nfs_setlease(struct file *file, long arg, struct file_lock **fl){ /* * There is no protocol support for leases, so we have no way * to implement them correctly in the face of opens by other * clients. */ return -EINVAL;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -