📄 file.c
字号:
pTcon->ses->server->maxBuf - 128); } rc = -EAGAIN; while (rc == -EAGAIN) { if ((open_file->invalidHandle) && (!open_file->closePend)) { rc = cifs_reopen_file(file, TRUE); if (rc != 0) break; } rc = CIFSSMBRead(xid, pTcon, open_file->netfid, current_read_size, *poffset, &bytes_read, ¤t_offset, &buf_type); } if (rc || (bytes_read == 0)) { if (total_read) { break; } else { FreeXid(xid); return rc; } } else { cifs_stats_bytes_read(pTcon, total_read); *poffset += bytes_read; } } FreeXid(xid); return total_read;}int cifs_file_mmap(struct file *file, struct vm_area_struct *vma){#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) struct dentry *dentry = file->f_dentry;#else struct dentry *dentry = file->f_path.dentry;#endif int rc, xid; xid = GetXid(); rc = cifs_revalidate(dentry); if (rc) { cFYI(1, ("Validation prior to mmap failed, error=%d", rc)); FreeXid(xid); return rc; } rc = generic_file_mmap(file, vma); FreeXid(xid); return rc;}#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0)static void cifs_copy_cache_pages(struct address_space *mapping, struct list_head *pages, int bytes_read, char *data, struct pagevec *plru_pvec){ struct page *page; char *target; while (bytes_read > 0) { if (list_empty(pages)) break; page = list_entry(pages->prev, struct page, lru); list_del(&page->lru); if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) { page_cache_release(page); cFYI(1, ("Add page cache failed")); data += PAGE_CACHE_SIZE; bytes_read -= PAGE_CACHE_SIZE; continue; } target = kmap_atomic(page, KM_USER0); if (PAGE_CACHE_SIZE > bytes_read) { memcpy(target, data, bytes_read); /* zero the tail end of this partial page */ memset(target + bytes_read, 0, PAGE_CACHE_SIZE - bytes_read); bytes_read = 0; } else { memcpy(target, data, PAGE_CACHE_SIZE); bytes_read -= PAGE_CACHE_SIZE; } kunmap_atomic(target, KM_USER0); flush_dcache_page(page); SetPageUptodate(page); unlock_page(page); if (!pagevec_add(plru_pvec, page)) __pagevec_lru_add(plru_pvec); data += PAGE_CACHE_SIZE; } return;}static int cifs_readpages(struct file *file, struct address_space *mapping, struct list_head *page_list, unsigned num_pages){ int rc = -EACCES; int xid; loff_t offset; struct page *page; struct cifs_sb_info *cifs_sb; struct cifsTconInfo *pTcon; int bytes_read = 0; unsigned int read_size, i; char *smb_read_data = NULL; struct smb_com_read_rsp *pSMBr; struct pagevec lru_pvec; struct cifsFileInfo *open_file; int buf_type = CIFS_NO_BUFFER; xid = GetXid(); if (file->private_data == NULL) { FreeXid(xid); return -EBADF; } open_file = (struct cifsFileInfo *)file->private_data;#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) cifs_sb = CIFS_SB(file->f_dentry->d_sb);#else cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);#endif pTcon = cifs_sb->tcon; pagevec_init(&lru_pvec, 0);#ifdef CONFIG_CIFS_DEBUG2 cFYI(1, ("rpages: num pages %d", num_pages));#endif for (i = 0; i < num_pages; ) { unsigned contig_pages; struct page *tmp_page; unsigned long expected_index; if (list_empty(page_list)) break; page = list_entry(page_list->prev, struct page, lru); offset = (loff_t)page->index << PAGE_CACHE_SHIFT; /* count adjacent pages that we will read into */ contig_pages = 0; expected_index = list_entry(page_list->prev, struct page, lru)->index; list_for_each_entry_reverse(tmp_page, page_list, lru) { if (tmp_page->index == expected_index) { contig_pages++; expected_index++; } else break; } if (contig_pages + i > num_pages) contig_pages = num_pages - i; /* for reads over a certain size could initiate async read ahead */ read_size = contig_pages * PAGE_CACHE_SIZE; /* Read size needs to be in multiples of one page */ read_size = min_t(const unsigned int, read_size, cifs_sb->rsize & PAGE_CACHE_MASK);#ifdef CONFIG_CIFS_DEBUG2 cFYI(1, ("rpages: read size 0x%x contiguous pages %d", read_size, contig_pages));#endif rc = -EAGAIN; while (rc == -EAGAIN) { if ((open_file->invalidHandle) && (!open_file->closePend)) { rc = cifs_reopen_file(file, TRUE); if (rc != 0) break; } rc = CIFSSMBRead(xid, pTcon, open_file->netfid, read_size, offset, &bytes_read, &smb_read_data, &buf_type); /* BB more RC checks ? */ if (rc == -EAGAIN) { if (smb_read_data) { if (buf_type == CIFS_SMALL_BUFFER) cifs_small_buf_release(smb_read_data); else if (buf_type == CIFS_LARGE_BUFFER) cifs_buf_release(smb_read_data); smb_read_data = NULL; } } } if ((rc < 0) || (smb_read_data == NULL)) { cFYI(1, ("Read error in readpages: %d", rc)); break; } else if (bytes_read > 0) {#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 19) task_io_account_read(bytes_read);#endif pSMBr = (struct smb_com_read_rsp *)smb_read_data; cifs_copy_cache_pages(mapping, page_list, bytes_read, smb_read_data + 4 /* RFC1001 hdr */ + le16_to_cpu(pSMBr->DataOffset), &lru_pvec); i += bytes_read >> PAGE_CACHE_SHIFT; cifs_stats_bytes_read(pTcon, bytes_read); if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) { i++; /* account for partial page */ /* server copy of file can have smaller size than client */ /* BB do we need to verify this common case ? this case is ok - if we are at server EOF we will hit it on next read */ /* break; */ } } else { cFYI(1, ("No bytes read (%d) at offset %lld . " "Cleaning remaining pages from readahead list", bytes_read, offset)); /* BB turn off caching and do new lookup on file size at server? */ break; } if (smb_read_data) { if (buf_type == CIFS_SMALL_BUFFER) cifs_small_buf_release(smb_read_data); else if (buf_type == CIFS_LARGE_BUFFER) cifs_buf_release(smb_read_data); smb_read_data = NULL; } bytes_read = 0; } pagevec_lru_add(&lru_pvec);/* need to free smb_read_data buf before exit */ if (smb_read_data) { if (buf_type == CIFS_SMALL_BUFFER) cifs_small_buf_release(smb_read_data); else if (buf_type == CIFS_LARGE_BUFFER) cifs_buf_release(smb_read_data); smb_read_data = NULL; } FreeXid(xid); return rc;}#endifstatic int cifs_readpage_worker(struct file *file, struct page *page, loff_t *poffset){ char *read_data; int rc; page_cache_get(page); read_data = kmap(page); /* for reads over a certain size could initiate async read ahead */ rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset); if (rc < 0) goto io_error; else cFYI(1, ("Bytes read %d", rc));#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) file->f_dentry->d_inode->i_atime = current_fs_time(file->f_dentry->d_inode->i_sb);#else file->f_path.dentry->d_inode->i_atime = current_fs_time(file->f_path.dentry->d_inode->i_sb);#endif if (PAGE_CACHE_SIZE > rc) memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc); flush_dcache_page(page); SetPageUptodate(page); rc = 0;io_error: kunmap(page); page_cache_release(page); return rc;}static int cifs_readpage(struct file *file, struct page *page){ loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT; int rc = -EACCES; int xid; xid = GetXid(); if (file->private_data == NULL) { FreeXid(xid); return -EBADF; } cFYI(1, ("readpage %p at offset %d 0x%x\n", page, (int)offset, (int)offset)); rc = cifs_readpage_worker(file, page, &offset); unlock_page(page); FreeXid(xid); return rc;}static int is_inode_writable(struct cifsInodeInfo *cifs_inode){ struct cifsFileInfo *open_file; read_lock(&GlobalSMBSeslock); list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { if (open_file->closePend) continue; if (open_file->pfile && ((open_file->pfile->f_flags & O_RDWR) || (open_file->pfile->f_flags & O_WRONLY))) { read_unlock(&GlobalSMBSeslock); return 1; } } read_unlock(&GlobalSMBSeslock); return 0;}/* We do not want to update the file size from server for inodes open for write - to avoid races with writepage extending the file - in the future we could consider allowing refreshing the inode only on increases in the file size but this is tricky to do without racing with writebehind page caching in the current Linux kernel design */int is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file){ if (!cifsInode) return 1; if (is_inode_writable(cifsInode)) { /* This inode is open for write at least once */ struct cifs_sb_info *cifs_sb;#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0) cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);#else cifs_sb = CIFS_SB(open_file->pfile->f_dentry->d_inode->i_sb);#endif if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) { /* since no page cache to corrupt on directio we can change size safely */ return 1; } if (i_size_read(&cifsInode->vfs_inode) < end_of_file) return 1; return 0; } else return 1;}static int cifs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to){ int rc = 0; loff_t i_size; loff_t offset; cFYI(1, ("prepare write for page %p from %d to %d", page, from, to)); if (PageUptodate(page)) return 0; /* If we are writing a full page it will be up to date, no need to read from the server */ if ((to == PAGE_CACHE_SIZE) && (from == 0)) { SetPageUptodate(page); return 0; } offset = (loff_t)page->index << PAGE_CACHE_SHIFT; i_size = i_size_read(page->mapping->host); if ((offset >= i_size) || ((from == 0) && (offset + to) >= i_size)) { /* * We don't need to read data beyond the end of the file. * zero it, and set the page uptodate */ void *kaddr = kmap_atomic(page, KM_USER0); if (from) memset(kaddr, 0, from); if (to < PAGE_CACHE_SIZE) memset(kaddr + to, 0, PAGE_CACHE_SIZE - to); flush_dcache_page(page); kunmap_atomic(kaddr, KM_USER0); SetPageUptodate(page); } else if ((file->f_flags & O_ACCMODE) != O_WRONLY) { /* might as well read a page, it is fast enough */ rc = cifs_readpage_worker(file, page, &offset); } else { /* we could try using another file handle if there is one - but how would we lock it to prevent close of that handle racing with this read? In any case this will be written out by commit_write so is fine */ } /* we do not need to pass errors back e.g. if we do not have read access to the file because cifs_commit_write will do the right thing. -- shaggy */ return 0;}const struct address_space_operations cifs_addr_ops = { .readpage = cifs_readpage,#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0) .readpages = cifs_readpages,#endif .writepage = cifs_writepage,#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 14) .writepages = cifs_writepages,#endif .prepare_write = cifs_prepare_write, .commit_write = cifs_commit_write,#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0) .set_page_dirty = __set_page_dirty_nobuffers,#endif /* .sync_page = cifs_sync_page, */ /* .direct_IO = */};/* * cifs_readpages requires the server to support a buffer large enough to * contain the header plus one complete page of data. Otherwise, we need * to leave cifs_readpages out of the address space operations. */const struct address_space_operations cifs_addr_ops_smallbuf = { .readpage = cifs_readpage, .writepage = cifs_writepage,#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 14) .writepages = cifs_writepages,#endif .prepare_write = cifs_prepare_write, .commit_write = cifs_commit_write,#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0) .set_page_dirty = __set_page_dirty_nobuffers,#endif /* .sync_page = cifs_sync_page, */ /* .direct_IO = */};
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -