⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 file.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
	xid = GetXid();	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);	pTcon = cifs_sb->tcon;	if (file->private_data == NULL) {		FreeXid(xid);		return -EBADF;	}	open_file = (struct cifsFileInfo *)file->private_data;	if ((file->f_flags & O_ACCMODE) == O_WRONLY) {		cFYI(1, ("attempting read on write only file instance"));	}	for (total_read = 0, current_offset = read_data;	     read_size > total_read;	     total_read += bytes_read, current_offset += bytes_read) {		current_read_size = min_t(const int, read_size - total_read,					  cifs_sb->rsize);		rc = -EAGAIN;		smb_read_data = NULL;		while (rc == -EAGAIN) {			int buf_type = CIFS_NO_BUFFER;			if ((open_file->invalidHandle) &&			    (!open_file->closePend)) {				rc = cifs_reopen_file(file, TRUE);				if (rc != 0)					break;			}			rc = CIFSSMBRead(xid, pTcon,					 open_file->netfid,					 current_read_size, *poffset,					 &bytes_read, &smb_read_data,					 &buf_type);			pSMBr = (struct smb_com_read_rsp *)smb_read_data;			if (smb_read_data) {				if (copy_to_user(current_offset,						smb_read_data +						4 /* RFC1001 length field */ +						le16_to_cpu(pSMBr->DataOffset),						bytes_read)) {					rc = -EFAULT;				}				if (buf_type == CIFS_SMALL_BUFFER)					cifs_small_buf_release(smb_read_data);				else if (buf_type == CIFS_LARGE_BUFFER)					cifs_buf_release(smb_read_data);				smb_read_data = NULL;			}		}		if (rc || (bytes_read == 0)) {			if (total_read) {				break;			} else {				FreeXid(xid);				return rc;			}		} else {			cifs_stats_bytes_read(pTcon, bytes_read);			*poffset += bytes_read;		}	}	FreeXid(xid);	return total_read;}static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,	loff_t *poffset){	int rc = -EACCES;	unsigned int bytes_read = 0;	unsigned int total_read;	unsigned int current_read_size;	struct cifs_sb_info *cifs_sb;	struct cifsTconInfo *pTcon;	int xid;	char *current_offset;	struct cifsFileInfo *open_file;	int buf_type = CIFS_NO_BUFFER;	xid = GetXid();	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);	pTcon = cifs_sb->tcon;	if (file->private_data == NULL) {		FreeXid(xid);		return -EBADF;	}	open_file = (struct cifsFileInfo *)file->private_data;	if ((file->f_flags & O_ACCMODE) == O_WRONLY)		cFYI(1, ("attempting read on write only file instance"));	for (total_read = 0, current_offset = read_data;	     read_size > total_read;	     total_read += bytes_read, current_offset += bytes_read) {		current_read_size = min_t(const int, read_size - total_read,					  cifs_sb->rsize);		/* For windows me and 9x we do not want to request more		than it negotiated since it will refuse the read then */		if ((pTcon->ses) &&			!(pTcon->ses->capabilities & CAP_LARGE_FILES)) {			current_read_size = min_t(const int, current_read_size,					pTcon->ses->server->maxBuf - 128);		}		rc = -EAGAIN;		while (rc == -EAGAIN) {			if ((open_file->invalidHandle) &&			    (!open_file->closePend)) {				rc = cifs_reopen_file(file, TRUE);				if (rc != 0)					break;			}			rc = CIFSSMBRead(xid, pTcon,					 open_file->netfid,					 current_read_size, *poffset,					 &bytes_read, &current_offset,					 &buf_type);		}		if (rc || (bytes_read == 0)) {			if (total_read) {				break;			} else {				FreeXid(xid);				return rc;			}		} else {			cifs_stats_bytes_read(pTcon, total_read);			*poffset += bytes_read;		}	}	FreeXid(xid);	return total_read;}int cifs_file_mmap(struct file *file, struct vm_area_struct *vma){	struct dentry *dentry = file->f_path.dentry;	int rc, xid;	xid = GetXid();	rc = cifs_revalidate(dentry);	if (rc) {		cFYI(1, ("Validation prior to mmap failed, error=%d", rc));		FreeXid(xid);		return rc;	}	rc = generic_file_mmap(file, vma);	FreeXid(xid);	return rc;}static void cifs_copy_cache_pages(struct address_space *mapping,	struct list_head *pages, int bytes_read, char *data,	struct pagevec *plru_pvec){	struct page *page;	char *target;	while (bytes_read > 0) {		if (list_empty(pages))			break;		page = list_entry(pages->prev, struct page, lru);		list_del(&page->lru);		if (add_to_page_cache(page, mapping, page->index,				      GFP_KERNEL)) {			page_cache_release(page);			cFYI(1, ("Add page cache failed"));			data += PAGE_CACHE_SIZE;			bytes_read -= PAGE_CACHE_SIZE;			continue;		}		target = kmap_atomic(page, KM_USER0);		if (PAGE_CACHE_SIZE > bytes_read) {			memcpy(target, data, bytes_read);			/* zero the tail end of this partial page */			memset(target + bytes_read, 0,			       PAGE_CACHE_SIZE - bytes_read);			bytes_read = 0;		} else {			memcpy(target, data, PAGE_CACHE_SIZE);			bytes_read -= PAGE_CACHE_SIZE;		}		kunmap_atomic(target, KM_USER0);		flush_dcache_page(page);		SetPageUptodate(page);		unlock_page(page);		if (!pagevec_add(plru_pvec, page))			__pagevec_lru_add(plru_pvec);		data += PAGE_CACHE_SIZE;	}	return;}static int cifs_readpages(struct file *file, struct address_space *mapping,	struct list_head *page_list, unsigned num_pages){	int rc = -EACCES;	int xid;	loff_t offset;	struct page *page;	struct cifs_sb_info *cifs_sb;	struct cifsTconInfo *pTcon;	unsigned int bytes_read = 0;	unsigned int read_size, i;	char *smb_read_data = NULL;	struct smb_com_read_rsp *pSMBr;	struct pagevec lru_pvec;	struct cifsFileInfo *open_file;	int buf_type = CIFS_NO_BUFFER;	xid = GetXid();	if (file->private_data == NULL) {		FreeXid(xid);		return -EBADF;	}	open_file = (struct cifsFileInfo *)file->private_data;	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);	pTcon = cifs_sb->tcon;	pagevec_init(&lru_pvec, 0);#ifdef CONFIG_CIFS_DEBUG2		cFYI(1, ("rpages: num pages %d", num_pages));#endif	for (i = 0; i < num_pages; ) {		unsigned contig_pages;		struct page *tmp_page;		unsigned long expected_index;		if (list_empty(page_list))			break;		page = list_entry(page_list->prev, struct page, lru);		offset = (loff_t)page->index << PAGE_CACHE_SHIFT;		/* count adjacent pages that we will read into */		contig_pages = 0;		expected_index =			list_entry(page_list->prev, struct page, lru)->index;		list_for_each_entry_reverse(tmp_page, page_list, lru) {			if (tmp_page->index == expected_index) {				contig_pages++;				expected_index++;			} else				break;		}		if (contig_pages + i >  num_pages)			contig_pages = num_pages - i;		/* for reads over a certain size could initiate async		   read ahead */		read_size = contig_pages * PAGE_CACHE_SIZE;		/* Read size needs to be in multiples of one page */		read_size = min_t(const unsigned int, read_size,				  cifs_sb->rsize & PAGE_CACHE_MASK);#ifdef CONFIG_CIFS_DEBUG2		cFYI(1, ("rpages: read size 0x%x  contiguous pages %d",				read_size, contig_pages));#endif		rc = -EAGAIN;		while (rc == -EAGAIN) {			if ((open_file->invalidHandle) &&			    (!open_file->closePend)) {				rc = cifs_reopen_file(file, TRUE);				if (rc != 0)					break;			}			rc = CIFSSMBRead(xid, pTcon,					 open_file->netfid,					 read_size, offset,					 &bytes_read, &smb_read_data,					 &buf_type);			/* BB more RC checks ? */			if (rc == -EAGAIN) {				if (smb_read_data) {					if (buf_type == CIFS_SMALL_BUFFER)						cifs_small_buf_release(smb_read_data);					else if (buf_type == CIFS_LARGE_BUFFER)						cifs_buf_release(smb_read_data);					smb_read_data = NULL;				}			}		}		if ((rc < 0) || (smb_read_data == NULL)) {			cFYI(1, ("Read error in readpages: %d", rc));			break;		} else if (bytes_read > 0) {			task_io_account_read(bytes_read);			pSMBr = (struct smb_com_read_rsp *)smb_read_data;			cifs_copy_cache_pages(mapping, page_list, bytes_read,				smb_read_data + 4 /* RFC1001 hdr */ +				le16_to_cpu(pSMBr->DataOffset), &lru_pvec);			i +=  bytes_read >> PAGE_CACHE_SHIFT;			cifs_stats_bytes_read(pTcon, bytes_read);			if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {				i++; /* account for partial page */				/* server copy of file can have smaller size				   than client */				/* BB do we need to verify this common case ?				   this case is ok - if we are at server EOF				   we will hit it on next read */				/* break; */			}		} else {			cFYI(1, ("No bytes read (%d) at offset %lld . "				 "Cleaning remaining pages from readahead list",				 bytes_read, offset));			/* BB turn off caching and do new lookup on			   file size at server? */			break;		}		if (smb_read_data) {			if (buf_type == CIFS_SMALL_BUFFER)				cifs_small_buf_release(smb_read_data);			else if (buf_type == CIFS_LARGE_BUFFER)				cifs_buf_release(smb_read_data);			smb_read_data = NULL;		}		bytes_read = 0;	}	pagevec_lru_add(&lru_pvec);/* need to free smb_read_data buf before exit */	if (smb_read_data) {		if (buf_type == CIFS_SMALL_BUFFER)			cifs_small_buf_release(smb_read_data);		else if (buf_type == CIFS_LARGE_BUFFER)			cifs_buf_release(smb_read_data);		smb_read_data = NULL;	}	FreeXid(xid);	return rc;}static int cifs_readpage_worker(struct file *file, struct page *page,	loff_t *poffset){	char *read_data;	int rc;	page_cache_get(page);	read_data = kmap(page);	/* for reads over a certain size could initiate async read ahead */	rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);	if (rc < 0)		goto io_error;	else		cFYI(1, ("Bytes read %d", rc));	file->f_path.dentry->d_inode->i_atime =		current_fs_time(file->f_path.dentry->d_inode->i_sb);	if (PAGE_CACHE_SIZE > rc)		memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);	flush_dcache_page(page);	SetPageUptodate(page);	rc = 0;io_error:	kunmap(page);	page_cache_release(page);	return rc;}static int cifs_readpage(struct file *file, struct page *page){	loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;	int rc = -EACCES;	int xid;	xid = GetXid();	if (file->private_data == NULL) {		FreeXid(xid);		return -EBADF;	}	cFYI(1, ("readpage %p at offset %d 0x%x\n",		 page, (int)offset, (int)offset));	rc = cifs_readpage_worker(file, page, &offset);	unlock_page(page);	FreeXid(xid);	return rc;}static int is_inode_writable(struct cifsInodeInfo *cifs_inode){	struct cifsFileInfo *open_file;	read_lock(&GlobalSMBSeslock);	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {		if (open_file->closePend)			continue;		if (open_file->pfile &&		    ((open_file->pfile->f_flags & O_RDWR) ||		     (open_file->pfile->f_flags & O_WRONLY))) {			read_unlock(&GlobalSMBSeslock);			return 1;		}	}	read_unlock(&GlobalSMBSeslock);	return 0;}/* We do not want to update the file size from server for inodes   open for write - to avoid races with writepage extending   the file - in the future we could consider allowing   refreshing the inode only on increases in the file size   but this is tricky to do without racing with writebehind   page caching in the current Linux kernel design */int is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file){	if (!cifsInode)		return 1;	if (is_inode_writable(cifsInode)) {		/* This inode is open for write at least once */		struct cifs_sb_info *cifs_sb;		cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);		if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {			/* since no page cache to corrupt on directio			we can change size safely */			return 1;		}		if (i_size_read(&cifsInode->vfs_inode) < end_of_file)			return 1;		return 0;	} else		return 1;}static int cifs_prepare_write(struct file *file, struct page *page,	unsigned from, unsigned to){	int rc = 0;	loff_t i_size;	loff_t offset;	cFYI(1, ("prepare write for page %p from %d to %d", page, from, to));	if (PageUptodate(page))		return 0;	/* If we are writing a full page it will be up to date,	   no need to read from the server */	if ((to == PAGE_CACHE_SIZE) && (from == 0)) {		SetPageUptodate(page);		return 0;	}	offset = (loff_t)page->index << PAGE_CACHE_SHIFT;	i_size = i_size_read(page->mapping->host);	if ((offset >= i_size) ||	    ((from == 0) && (offset + to) >= i_size)) {		/*		 * We don't need to read data beyond the end of the file.		 * zero it, and set the page uptodate		 */		simple_prepare_write(file, page, from, to);		SetPageUptodate(page);	} else if ((file->f_flags & O_ACCMODE) != O_WRONLY) {		/* might as well read a page, it is fast enough */		rc = cifs_readpage_worker(file, page, &offset);	} else {		/* we could try using another file handle if there is one -		   but how would we lock it to prevent close of that handle		   racing with this read? In any case		   this will be written out by commit_write so is fine */	}	/* we do not need to pass errors back	   e.g. if we do not have read access to the file	   because cifs_commit_write will do the right thing.  -- shaggy */	return 0;}const struct address_space_operations cifs_addr_ops = {	.readpage = cifs_readpage,	.readpages = cifs_readpages,	.writepage = cifs_writepage,	.writepages = cifs_writepages,	.prepare_write = cifs_prepare_write,	.commit_write = cifs_commit_write,	.set_page_dirty = __set_page_dirty_nobuffers,	/* .sync_page = cifs_sync_page, */	/* .direct_IO = */};/* * cifs_readpages requires the server to support a buffer large enough to * contain the header plus one complete page of data.  Otherwise, we need * to leave cifs_readpages out of the address space operations. */const struct address_space_operations cifs_addr_ops_smallbuf = {	.readpage = cifs_readpage,	.writepage = cifs_writepage,	.writepages = cifs_writepages,	.prepare_write = cifs_prepare_write,	.commit_write = cifs_commit_write,	.set_page_dirty = __set_page_dirty_nobuffers,	/* .sync_page = cifs_sync_page, */	/* .direct_IO = */};

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -