⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 file.c

📁 Linux内核自带的cifs模块
💻 C
📖 第 1 页 / 共 5 页
字号:
	if (wbc->start || wbc->end) {		index = wbc->start >> PAGE_CACHE_SHIFT;		end = wbc->end >> PAGE_CACHE_SHIFT;		is_range = 1;		scanned = 1;	}#endif /* 2.6.17 */retry:	while (!done && (index <= end) &&	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,			PAGECACHE_TAG_DIRTY,			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {		int first;		unsigned int i;		first = -1;		next = 0;		n_iov = 0;		bytes_to_write = 0;		for (i = 0; i < nr_pages; i++) {			page = pvec.pages[i];			/*			 * At this point we hold neither mapping->tree_lock nor			 * lock on the page itself: the page may be truncated or			 * invalidated (changing page->mapping to NULL), or even			 * swizzled back from swapper_space to tmpfs file			 * mapping			 */			if (first < 0)				lock_page(page);			else if (TestSetPageLocked(page))				break;			if (unlikely(page->mapping != mapping)) {				unlock_page(page);				break;			}#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)			if (!wbc->range_cyclic && page->index > end) {#else			if (unlikely(is_range) && (page->index > end)) {#endif /* 2.6.17 */				done = 1;				unlock_page(page);				break;			}			if (next && (page->index != next)) {				/* Not next consecutive page */				unlock_page(page);				break;			}			if (wbc->sync_mode != WB_SYNC_NONE)				wait_on_page_writeback(page);			if (PageWriteback(page) ||					!clear_page_dirty_for_io(page)) {				unlock_page(page);				break;			}			/*			 * This actually clears the dirty bit in the radix tree.			 * See cifs_writepage() for more commentary.			 */			set_page_writeback(page);			if (page_offset(page) >= mapping->host->i_size) {				done = 1;				unlock_page(page);				end_page_writeback(page);				break;			}			/*			 * BB can we get rid of this?  pages are held by pvec			 */			page_cache_get(page);			len = min(mapping->host->i_size - page_offset(page),				  (loff_t)PAGE_CACHE_SIZE);			/* reserve iov[0] for the smb header */			n_iov++;			iov[n_iov].iov_base = kmap(page);			iov[n_iov].iov_len = len;			bytes_to_write += len;			if (first < 0) {				first = i;				offset = page_offset(page);			}			next = page->index + 1;			if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)				break;		}		if (n_iov) {			/* Search for a writable handle every time we call			 * CIFSSMBWrite2.  We can't rely on the last handle			 * we used to still be valid			 */			open_file = find_writable_file(CIFS_I(mapping->host));			if (!open_file) {				cERROR(1, ("No writable handles for inode"));				rc = -EBADF;			} else {				rc = CIFSSMBWrite2(xid, cifs_sb->tcon,						   open_file->netfid,						   bytes_to_write, offset,						   &bytes_written, iov, n_iov,						   1);				atomic_dec(&open_file->wrtPending);				if (rc || bytes_written < bytes_to_write) {					cERROR(1, ("Write2 ret %d, wrote %d",						  rc, bytes_written));					/* BB what if continued retry is					   requested via mount flags? */					set_bit(AS_EIO, &mapping->flags);				} else {					cifs_stats_bytes_written(cifs_sb->tcon,								 bytes_written);				}			}			for (i = 0; i < n_iov; i++) {				page = pvec.pages[first + i];				/* Should we also set page error on				success rc but too little data written? */				/* BB investigate retry logic on temporary				server crash cases and how recovery works				when page marked as error */				if (rc)					SetPageError(page);				kunmap(page);				unlock_page(page);				end_page_writeback(page);				page_cache_release(page);			}			if ((wbc->nr_to_write -= n_iov) <= 0)				done = 1;			index = next;		}		pagevec_release(&pvec);	}	if (!scanned && !done) {		/*		 * We hit the last page and there is more work to be done: wrap		 * back to the start of the file		 */		scanned = 1;		index = 0;		goto retry;	}#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))#else	if (!is_range)#endif /* 2.6.17 */		mapping->writeback_index = index;	FreeXid(xid);	kfree(iov);	return rc;}#endif /* KERNEL_VERSION > 2.6.14 */#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)static int cifs_writepage(struct page* page)#elsestatic int cifs_writepage(struct page *page, struct writeback_control *wbc)#endif{	int rc = -EFAULT;	int xid;	xid = GetXid();/* BB add check for wbc flags */	page_cache_get(page);	if (!PageUptodate(page)) {		cFYI(1, ("ppw - page not up to date"));	}	/*	 * Set the "writeback" flag, and clear "dirty" in the radix tree.	 *	 * A writepage() implementation always needs to do either this,	 * or re-dirty the page with "redirty_page_for_writepage()" in	 * the case of a failure.	 *	 * Just unlocking the page will cause the radix tree tag-bits	 * to fail to update with the state of the page correctly.	 */	set_page_writeback(page);	rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);	SetPageUptodate(page); /* BB add check for error and Clearuptodate? */	unlock_page(page);	end_page_writeback(page);	page_cache_release(page);	FreeXid(xid);	return rc;}static int cifs_commit_write(struct file *file, struct page *page,	unsigned offset, unsigned to){	int xid;	int rc = 0;	struct inode *inode = page->mapping->host;	loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;	char *page_data;	xid = GetXid();	cFYI(1, ("commit write for page %p up to position %lld for %d",		 page, position, to));	spin_lock(&inode->i_lock);	if (position > inode->i_size) {		i_size_write(inode, position);	}	spin_unlock(&inode->i_lock);	if (!PageUptodate(page)) {		position =  ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;		/* can not rely on (or let) writepage write this data */		if (to < offset) {			cFYI(1, ("Illegal offsets, can not copy from %d to %d",				offset, to));			FreeXid(xid);			return rc;		}		/* this is probably better than directly calling		   partialpage_write since in this function the file handle is		   known which we might as well	leverage */		/* BB check if anything else missing out of ppw		   such as updating last write time */		page_data = kmap(page);		rc = cifs_write(file, page_data + offset, to-offset,				&position);		if (rc > 0)			rc = 0;		/* else if (rc < 0) should we set writebehind rc? */		kunmap(page);	} else {		set_page_dirty(page);	}	FreeXid(xid);	return rc;}int cifs_fsync(struct file *file, struct dentry *dentry, int datasync){	int xid;	int rc = 0;#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)	struct inode *inode = file->f_dentry->d_inode;#else	struct inode *inode = file->f_path.dentry->d_inode;#endif	xid = GetXid();	cFYI(1, ("Sync file - name: %s datasync: 0x%x",		dentry->d_name.name, datasync));	rc = filemap_fdatawrite(inode->i_mapping);	if (rc == 0)		CIFS_I(inode)->write_behind_rc = 0;	FreeXid(xid);	return rc;}/* static void cifs_sync_page(struct page *page){	struct address_space *mapping;	struct inode *inode;	unsigned long index = page->index;	unsigned int rpages = 0;	int rc = 0;	cFYI(1, ("sync page %p",page));	mapping = page->mapping;	if (!mapping)		return 0;	inode = mapping->host;	if (!inode)		return; *//*	fill in rpages then	result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish *//*	cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));#if 0	if (rc < 0)		return rc;	return 0;#endif} *//* * As file closes, flush all cached write data for this inode checking * for write behind errors. */#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)int cifs_flush(struct file *file, fl_owner_t id)#elseint cifs_flush(struct file *file)#endif /* 2.6.17 */{#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)	struct inode *inode = file->f_dentry->d_inode;#else	struct inode *inode = file->f_path.dentry->d_inode;#endif	int rc = 0;	/* Rather than do the steps manually:	   lock the inode for writing	   loop through pages looking for write behind data (dirty pages)	   coalesce into contiguous 16K (or smaller) chunks to write to server	   send to server (prefer in parallel)	   deal with writebehind errors	   unlock inode for writing	   filemapfdatawrite appears easier for the time being */	rc = filemap_fdatawrite(inode->i_mapping);	if (!rc) /* reset wb rc if we were able to write out dirty pages */		CIFS_I(inode)->write_behind_rc = 0;	cFYI(1, ("Flush inode %p file %p rc %d", inode, file, rc));	return rc;}ssize_t cifs_user_read(struct file *file, char __user *read_data,	size_t read_size, loff_t *poffset){	int rc = -EACCES;	unsigned int bytes_read = 0;	unsigned int total_read = 0;	unsigned int current_read_size;	struct cifs_sb_info *cifs_sb;	struct cifsTconInfo *pTcon;	int xid;	struct cifsFileInfo *open_file;	char *smb_read_data;	char __user *current_offset;	struct smb_com_read_rsp *pSMBr;	xid = GetXid();#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)	cifs_sb = CIFS_SB(file->f_dentry->d_sb);#else	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);#endif	pTcon = cifs_sb->tcon;	if (file->private_data == NULL) {		FreeXid(xid);		return -EBADF;	}	open_file = (struct cifsFileInfo *)file->private_data;	if ((file->f_flags & O_ACCMODE) == O_WRONLY) {		cFYI(1, ("attempting read on write only file instance"));	}	for (total_read = 0, current_offset = read_data;	     read_size > total_read;	     total_read += bytes_read, current_offset += bytes_read) {		current_read_size = min_t(const int, read_size - total_read,					  cifs_sb->rsize);		rc = -EAGAIN;		smb_read_data = NULL;		while (rc == -EAGAIN) {			int buf_type = CIFS_NO_BUFFER;			if ((open_file->invalidHandle) &&			    (!open_file->closePend)) {				rc = cifs_reopen_file(file, TRUE);				if (rc != 0)					break;			}			rc = CIFSSMBRead(xid, pTcon,					 open_file->netfid,					 current_read_size, *poffset,					 &bytes_read, &smb_read_data,					 &buf_type);			pSMBr = (struct smb_com_read_rsp *)smb_read_data;			if (smb_read_data) {				if (copy_to_user(current_offset,						smb_read_data +						4 /* RFC1001 length field */ +						le16_to_cpu(pSMBr->DataOffset),						bytes_read)) {					rc = -EFAULT;				}				if (buf_type == CIFS_SMALL_BUFFER)					cifs_small_buf_release(smb_read_data);				else if (buf_type == CIFS_LARGE_BUFFER)					cifs_buf_release(smb_read_data);				smb_read_data = NULL;			}		}		if (rc || (bytes_read == 0)) {			if (total_read) {				break;			} else {				FreeXid(xid);				return rc;			}		} else {			cifs_stats_bytes_read(pTcon, bytes_read);			*poffset += bytes_read;		}	}	FreeXid(xid);	return total_read;}static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,	loff_t *poffset){	int rc = -EACCES;	unsigned int bytes_read = 0;	unsigned int total_read;	unsigned int current_read_size;	struct cifs_sb_info *cifs_sb;	struct cifsTconInfo *pTcon;	int xid;	char *current_offset;	struct cifsFileInfo *open_file;	int buf_type = CIFS_NO_BUFFER;	xid = GetXid();#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)	cifs_sb = CIFS_SB(file->f_dentry->d_sb);#else	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);#endif	pTcon = cifs_sb->tcon;	if (file->private_data == NULL) {		FreeXid(xid);		return -EBADF;	}	open_file = (struct cifsFileInfo *)file->private_data;	if ((file->f_flags & O_ACCMODE) == O_WRONLY)		cFYI(1, ("attempting read on write only file instance"));	for (total_read = 0, current_offset = read_data;	     read_size > total_read;	     total_read += bytes_read, current_offset += bytes_read) {		current_read_size = min_t(const int, read_size - total_read,					  cifs_sb->rsize);		/* For windows me and 9x we do not want to request more		than it negotiated since it will refuse the read then */		if ((pTcon->ses) &&			!(pTcon->ses->capabilities & CAP_LARGE_FILES)) {			current_read_size = min_t(const int, current_read_size,

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -