⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 read.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * linux/fs/nfs/read.c * * Block I/O for NFS * * Partial copy of Linus' read cache modifications to fs/nfs/file.c * modified for async RPC by okir@monad.swb.de * * We do an ugly hack here in order to return proper error codes to the * user program when a read request failed: since generic_file_read * only checks the return value of inode->i_op->readpage() which is always 0 * for async RPC, we set the error bit of the page to 1 when an error occurs, * and make nfs_readpage transmit requests synchronously when encountering this. * This is only a small problem, though, since we now retry all operations * within the RPC code when root squashing is suspected. */#include <linux/config.h>#include <linux/time.h>#include <linux/kernel.h>#include <linux/errno.h>#include <linux/fcntl.h>#include <linux/stat.h>#include <linux/mm.h>#include <linux/slab.h>#include <linux/pagemap.h>#include <linux/mempool.h>#include <linux/sunrpc/clnt.h>#include <linux/nfs_fs.h>#include <linux/nfs_page.h>#include <linux/smp_lock.h>#include <asm/system.h>#define NFSDBG_FACILITY		NFSDBG_PAGECACHEstatic int nfs_pagein_one(struct list_head *, struct inode *);static void nfs_readpage_result_partial(struct nfs_read_data *, int);static void nfs_readpage_result_full(struct nfs_read_data *, int);static kmem_cache_t *nfs_rdata_cachep;static mempool_t *nfs_rdata_mempool;#define MIN_POOL_READ	(32)static struct nfs_read_data *nfs_readdata_alloc(void){	struct nfs_read_data   *p;	p = (struct nfs_read_data *)mempool_alloc(nfs_rdata_mempool, SLAB_NOFS);	if (p)		memset(p, 0, sizeof(*p));	return p;}static __inline__ void nfs_readdata_free(struct nfs_read_data *p){	mempool_free(p, nfs_rdata_mempool);}static void nfs_readdata_release(struct rpc_task *task){        struct nfs_read_data   *data = (struct nfs_read_data *)task->tk_calldata;        nfs_readdata_free(data);}staticunsigned int nfs_page_length(struct inode *inode, struct page *page){	loff_t i_size = i_size_read(inode);	unsigned long idx;	if (i_size <= 0)		return 0;	idx = (i_size - 1) >> PAGE_CACHE_SHIFT;	if (page->index > idx)		return 0;	if (page->index != idx)		return PAGE_CACHE_SIZE;	return 1 + ((i_size - 1) & (PAGE_CACHE_SIZE - 1));}staticint nfs_return_empty_page(struct page *page){	memclear_highpage_flush(page, 0, PAGE_CACHE_SIZE);	SetPageUptodate(page);	unlock_page(page);	return 0;}/* * Read a page synchronously. */static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode,		struct page *page){	unsigned int	rsize = NFS_SERVER(inode)->rsize;	unsigned int	count = PAGE_CACHE_SIZE;	int		result;	struct nfs_read_data *rdata;	rdata = nfs_readdata_alloc();	if (!rdata)		return -ENOMEM;	memset(rdata, 0, sizeof(*rdata));	rdata->flags = (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);	rdata->cred = ctx->cred;	rdata->inode = inode;	INIT_LIST_HEAD(&rdata->pages);	rdata->args.fh = NFS_FH(inode);	rdata->args.context = ctx;	rdata->args.pages = &page;	rdata->args.pgbase = 0UL;	rdata->args.count = rsize;	rdata->res.fattr = &rdata->fattr;	dprintk("NFS: nfs_readpage_sync(%p)\n", page);	/*	 * This works now because the socket layer never tries to DMA	 * into this buffer directly.	 */	do {		if (count < rsize)			rdata->args.count = count;		rdata->res.count = rdata->args.count;		rdata->args.offset = page_offset(page) + rdata->args.pgbase;		dprintk("NFS: nfs_proc_read(%s, (%s/%Ld), %Lu, %u)\n",			NFS_SERVER(inode)->hostname,			inode->i_sb->s_id,			(long long)NFS_FILEID(inode),			(unsigned long long)rdata->args.pgbase,			rdata->args.count);		lock_kernel();		result = NFS_PROTO(inode)->read(rdata);		unlock_kernel();		/*		 * Even if we had a partial success we can't mark the page		 * cache valid.		 */		if (result < 0) {			if (result == -EISDIR)				result = -EINVAL;			goto io_error;		}		count -= result;		rdata->args.pgbase += result;		/* Note: result == 0 should only happen if we're caching		 * a write that extends the file and punches a hole.		 */		if (rdata->res.eof != 0 || result == 0)			break;	} while (count);	NFS_FLAGS(inode) |= NFS_INO_INVALID_ATIME;	if (count)		memclear_highpage_flush(page, rdata->args.pgbase, count);	SetPageUptodate(page);	if (PageError(page))		ClearPageError(page);	result = 0;io_error:	unlock_page(page);	nfs_readdata_free(rdata);	return result;}static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,		struct page *page){	LIST_HEAD(one_request);	struct nfs_page	*new;	unsigned int len;	len = nfs_page_length(inode, page);	if (len == 0)		return nfs_return_empty_page(page);	new = nfs_create_request(ctx, inode, page, 0, len);	if (IS_ERR(new)) {		unlock_page(page);		return PTR_ERR(new);	}	if (len < PAGE_CACHE_SIZE)		memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);	nfs_lock_request(new);	nfs_list_add_request(new, &one_request);	nfs_pagein_one(&one_request, inode);	return 0;}static void nfs_readpage_release(struct nfs_page *req){	unlock_page(req->wb_page);	nfs_clear_request(req);	nfs_release_request(req);	nfs_unlock_request(req);	dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",			req->wb_context->dentry->d_inode->i_sb->s_id,			(long long)NFS_FILEID(req->wb_context->dentry->d_inode),			req->wb_bytes,			(long long)req_offset(req));}/* * Set up the NFS read request struct */static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,		unsigned int count, unsigned int offset){	struct inode		*inode;	data->req	  = req;	data->inode	  = inode = req->wb_context->dentry->d_inode;	data->cred	  = req->wb_context->cred;	data->args.fh     = NFS_FH(inode);	data->args.offset = req_offset(req) + offset;	data->args.pgbase = req->wb_pgbase + offset;	data->args.pages  = data->pagevec;	data->args.count  = count;	data->args.context = req->wb_context;	data->res.fattr   = &data->fattr;	data->res.count   = count;	data->res.eof     = 0;	NFS_PROTO(inode)->read_setup(data);	data->task.tk_cookie = (unsigned long)inode;	data->task.tk_calldata = data;	/* Release requests */	data->task.tk_release = nfs_readdata_release;	dprintk("NFS: %4d initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",			data->task.tk_pid,			inode->i_sb->s_id,			(long long)NFS_FILEID(inode),			count,			(unsigned long long)data->args.offset);}static voidnfs_async_read_error(struct list_head *head){	struct nfs_page	*req;	while (!list_empty(head)) {		req = nfs_list_entry(head->next);		nfs_list_remove_request(req);		SetPageError(req->wb_page);		nfs_readpage_release(req);	}}/* * Start an async read operation */static void nfs_execute_read(struct nfs_read_data *data){	struct rpc_clnt *clnt = NFS_CLIENT(data->inode);	sigset_t oldset;	rpc_clnt_sigmask(clnt, &oldset);	lock_kernel();	rpc_execute(&data->task);	unlock_kernel();	rpc_clnt_sigunmask(clnt, &oldset);}/* * Generate multiple requests to fill a single page. * * We optimize to reduce the number of read operations on the wire.  If we * detect that we're reading a page, or an area of a page, that is past the * end of file, we do not generate NFS read operations but just clear the * parts of the page that would have come back zero from the server anyway. * * We rely on the cached value of i_size to make this determination; another * client can fill pages on the server past our cached end-of-file, but we * won't see the new data until our attribute cache is updated.  This is more * or less conventional NFS client behavior. */static int nfs_pagein_multi(struct list_head *head, struct inode *inode){	struct nfs_page *req = nfs_list_entry(head->next);	struct page *page = req->wb_page;	struct nfs_read_data *data;	unsigned int rsize = NFS_SERVER(inode)->rsize;	unsigned int nbytes, offset;	int requests = 0;	LIST_HEAD(list);	nfs_list_remove_request(req);	nbytes = req->wb_bytes;	for(;;) {		data = nfs_readdata_alloc();		if (!data)			goto out_bad;		INIT_LIST_HEAD(&data->pages);		list_add(&data->pages, &list);		requests++;		if (nbytes <= rsize)			break;		nbytes -= rsize;	}	atomic_set(&req->wb_complete, requests);	ClearPageError(page);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -