⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 write.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
			req->wb_context->path.dentry->d_inode->i_sb->s_id,			(long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),			req->wb_bytes,			(long long)req_offset(req));		if (task->tk_status < 0) {			nfs_set_pageerror(page);			nfs_context_set_write_error(req->wb_context, task->tk_status);			dprintk(", error = %d\n", task->tk_status);			goto remove_request;		}		if (nfs_write_need_commit(data)) {			memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));			nfs_mark_request_commit(req);			nfs_end_page_writeback(page);			dprintk(" marked for commit\n");			goto next;		}		/* Set the PG_uptodate flag? */		nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);		dprintk(" OK\n");remove_request:		nfs_end_page_writeback(page);		nfs_inode_remove_request(req);	next:		nfs_clear_page_tag_locked(req);	}}static const struct rpc_call_ops nfs_write_full_ops = {	.rpc_call_done = nfs_writeback_done_full,	.rpc_release = nfs_writedata_release,};/* * This function is called when the WRITE call is complete. */int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data){	struct nfs_writeargs	*argp = &data->args;	struct nfs_writeres	*resp = &data->res;	int status;	dprintk("NFS: %5u nfs_writeback_done (status %d)\n",		task->tk_pid, task->tk_status);	/*	 * ->write_done will attempt to use post-op attributes to detect	 * conflicting writes by other clients.  A strict interpretation	 * of close-to-open would allow us to continue caching even if	 * another writer had changed the file, but some applications	 * depend on tighter cache coherency when writing.	 */	status = NFS_PROTO(data->inode)->write_done(task, data);	if (status != 0)		return status;	nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)	if (resp->verf->committed < argp->stable && task->tk_status >= 0) {		/* We tried a write call, but the server did not		 * commit data to stable storage even though we		 * requested it.		 * Note: There is a known bug in Tru64 < 5.0 in which		 *	 the server reports NFS_DATA_SYNC, but performs		 *	 NFS_FILE_SYNC. We therefore implement this checking		 *	 as a dprintk() in order to avoid filling syslog.		 */		static unsigned long    complain;		if (time_before(complain, jiffies)) {			dprintk("NFS: faulty NFS server %s:"				" (committed = %d) != (stable = %d)\n",				NFS_SERVER(data->inode)->nfs_client->cl_hostname,				resp->verf->committed, argp->stable);			complain = jiffies + 300 * HZ;		}	}#endif	/* Is this a short write? */	if (task->tk_status >= 0 && resp->count < argp->count) {		static unsigned long    complain;		nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);		/* Has the server at least made some progress? */		if (resp->count != 0) {			/* Was this an NFSv2 write or an NFSv3 stable write? */			if (resp->verf->committed != NFS_UNSTABLE) {				/* Resend from where the server left off */				argp->offset += resp->count;				argp->pgbase += resp->count;				argp->count -= resp->count;			} else {				/* Resend as a stable write in order to avoid				 * headaches in the case of a server crash.				 */				argp->stable = NFS_FILE_SYNC;			}			rpc_restart_call(task);			return -EAGAIN;		}		if (time_before(complain, jiffies)) {			printk(KERN_WARNING			       "NFS: Server wrote zero bytes, expected %u.\n",					argp->count);			complain = jiffies + 300 * HZ;		}		/* Can't do anything about it except throw an error. */		task->tk_status = -EIO;	}	return 0;}#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)void nfs_commit_release(void *wdata){	nfs_commit_free(wdata);}/* * Set up the argument/result storage required for the RPC call. */static void nfs_commit_rpcsetup(struct list_head *head,		struct nfs_write_data *data,		int how){	struct nfs_page		*first;	struct inode		*inode;	int flags;	/* Set up the RPC argument and reply structs	 * NB: take care not to mess about with data->commit et al. */	list_splice_init(head, &data->pages);	first = nfs_list_entry(data->pages.next);	inode = first->wb_context->path.dentry->d_inode;	data->inode	  = inode;	data->cred	  = first->wb_context->cred;	data->args.fh     = NFS_FH(data->inode);	/* Note: we always request a commit of the entire inode */	data->args.offset = 0;	data->args.count  = 0;	data->res.count   = 0;	data->res.fattr   = &data->fattr;	data->res.verf    = &data->verf;	nfs_fattr_init(&data->fattr);	/* Set up the initial task struct.  */	flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;	rpc_init_task(&data->task, NFS_CLIENT(inode), flags, &nfs_commit_ops, data);	NFS_PROTO(inode)->commit_setup(data, how);	data->task.tk_priority = flush_task_priority(how);	data->task.tk_cookie = (unsigned long)inode;		dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);}/* * Commit dirty pages */static intnfs_commit_list(struct inode *inode, struct list_head *head, int how){	struct nfs_write_data	*data;	struct nfs_page         *req;	data = nfs_commit_alloc();	if (!data)		goto out_bad;	/* Set up the argument struct */	nfs_commit_rpcsetup(head, data, how);	nfs_execute_write(data);	return 0; out_bad:	while (!list_empty(head)) {		req = nfs_list_entry(head->next);		nfs_list_remove_request(req);		nfs_mark_request_commit(req);		dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);		dec_bdi_stat(req->wb_page->mapping->backing_dev_info,				BDI_RECLAIMABLE);		nfs_clear_page_tag_locked(req);	}	return -ENOMEM;}/* * COMMIT call returned */static void nfs_commit_done(struct rpc_task *task, void *calldata){	struct nfs_write_data	*data = calldata;	struct nfs_page		*req;        dprintk("NFS: %5u nfs_commit_done (status %d)\n",                                task->tk_pid, task->tk_status);	/* Call the NFS version-specific code */	if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)		return;	while (!list_empty(&data->pages)) {		req = nfs_list_entry(data->pages.next);		nfs_list_remove_request(req);		clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);		dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);		dec_bdi_stat(req->wb_page->mapping->backing_dev_info,				BDI_RECLAIMABLE);		dprintk("NFS: commit (%s/%Ld %d@%Ld)",			req->wb_context->path.dentry->d_inode->i_sb->s_id,			(long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),			req->wb_bytes,			(long long)req_offset(req));		if (task->tk_status < 0) {			nfs_context_set_write_error(req->wb_context, task->tk_status);			nfs_inode_remove_request(req);			dprintk(", error = %d\n", task->tk_status);			goto next;		}		/* Okay, COMMIT succeeded, apparently. Check the verifier		 * returned by the server against all stored verfs. */		if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {			/* We have a match */			/* Set the PG_uptodate flag */			nfs_mark_uptodate(req->wb_page, req->wb_pgbase,					req->wb_bytes);			nfs_inode_remove_request(req);			dprintk(" OK\n");			goto next;		}		/* We have a mismatch. Write the page again */		dprintk(" mismatch\n");		nfs_redirty_request(req);	next:		nfs_clear_page_tag_locked(req);	}}static const struct rpc_call_ops nfs_commit_ops = {	.rpc_call_done = nfs_commit_done,	.rpc_release = nfs_commit_release,};int nfs_commit_inode(struct inode *inode, int how){	LIST_HEAD(head);	int res;	spin_lock(&inode->i_lock);	res = nfs_scan_commit(inode, &head, 0, 0);	spin_unlock(&inode->i_lock);	if (res) {		int error = nfs_commit_list(inode, &head, how);		if (error < 0)			return error;	}	return res;}#elsestatic inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how){	return 0;}#endiflong nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how){	struct inode *inode = mapping->host;	pgoff_t idx_start, idx_end;	unsigned int npages = 0;	LIST_HEAD(head);	int nocommit = how & FLUSH_NOCOMMIT;	long pages, ret;	/* FIXME */	if (wbc->range_cyclic)		idx_start = 0;	else {		idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;		idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;		if (idx_end > idx_start) {			pgoff_t l_npages = 1 + idx_end - idx_start;			npages = l_npages;			if (sizeof(npages) != sizeof(l_npages) &&					(pgoff_t)npages != l_npages)				npages = 0;		}	}	how &= ~FLUSH_NOCOMMIT;	spin_lock(&inode->i_lock);	do {		ret = nfs_wait_on_requests_locked(inode, idx_start, npages);		if (ret != 0)			continue;		if (nocommit)			break;		pages = nfs_scan_commit(inode, &head, idx_start, npages);		if (pages == 0)			break;		if (how & FLUSH_INVALIDATE) {			spin_unlock(&inode->i_lock);			nfs_cancel_commit_list(&head);			ret = pages;			spin_lock(&inode->i_lock);			continue;		}		pages += nfs_scan_commit(inode, &head, 0, 0);		spin_unlock(&inode->i_lock);		ret = nfs_commit_list(inode, &head, how);		spin_lock(&inode->i_lock);	} while (ret >= 0);	spin_unlock(&inode->i_lock);	return ret;}static int __nfs_write_mapping(struct address_space *mapping, struct writeback_control *wbc, int how){	int ret;	ret = nfs_writepages(mapping, wbc);	if (ret < 0)		goto out;	ret = nfs_sync_mapping_wait(mapping, wbc, how);	if (ret < 0)		goto out;	return 0;out:	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);	return ret;}/* Two pass sync: first using WB_SYNC_NONE, then WB_SYNC_ALL */static int nfs_write_mapping(struct address_space *mapping, int how){	struct writeback_control wbc = {		.bdi = mapping->backing_dev_info,		.sync_mode = WB_SYNC_NONE,		.nr_to_write = LONG_MAX,		.for_writepages = 1,		.range_cyclic = 1,	};	int ret;	ret = __nfs_write_mapping(mapping, &wbc, how);	if (ret < 0)		return ret;	wbc.sync_mode = WB_SYNC_ALL;	return __nfs_write_mapping(mapping, &wbc, how);}/* * flush the inode to disk. */int nfs_wb_all(struct inode *inode){	return nfs_write_mapping(inode->i_mapping, 0);}int nfs_wb_nocommit(struct inode *inode){	return nfs_write_mapping(inode->i_mapping, FLUSH_NOCOMMIT);}int nfs_wb_page_cancel(struct inode *inode, struct page *page){	struct nfs_page *req;	loff_t range_start = page_offset(page);	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);	struct writeback_control wbc = {		.bdi = page->mapping->backing_dev_info,		.sync_mode = WB_SYNC_ALL,		.nr_to_write = LONG_MAX,		.range_start = range_start,		.range_end = range_end,	};	int ret = 0;	BUG_ON(!PageLocked(page));	for (;;) {		req = nfs_page_find_request(page);		if (req == NULL)			goto out;		if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {			nfs_release_request(req);			break;		}		if (nfs_lock_request_dontget(req)) {			nfs_inode_remove_request(req);			/*			 * In case nfs_inode_remove_request has marked the			 * page as being dirty			 */			cancel_dirty_page(page, PAGE_CACHE_SIZE);			nfs_unlock_request(req);			break;		}		ret = nfs_wait_on_request(req);		if (ret < 0)			goto out;	}	if (!PagePrivate(page))		return 0;	ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);out:	return ret;}static int nfs_wb_page_priority(struct inode *inode, struct page *page,				int how){	loff_t range_start = page_offset(page);	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);	struct writeback_control wbc = {		.bdi = page->mapping->backing_dev_info,		.sync_mode = WB_SYNC_ALL,		.nr_to_write = LONG_MAX,		.range_start = range_start,		.range_end = range_end,	};	int ret;	BUG_ON(!PageLocked(page));	if (clear_page_dirty_for_io(page)) {		ret = nfs_writepage_locked(page, &wbc);		if (ret < 0)			goto out;	}	if (!PagePrivate(page))		return 0;	ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);	if (ret >= 0)		return 0;out:	__mark_inode_dirty(inode, I_DIRTY_PAGES);	return ret;}/* * Write back all requests on one page - we do this before reading it. */int nfs_wb_page(struct inode *inode, struct page* page){	return nfs_wb_page_priority(inode, page, FLUSH_STABLE);}int __init nfs_init_writepagecache(void){	nfs_wdata_cachep = kmem_cache_create("nfs_write_data",					     sizeof(struct nfs_write_data),					     0, SLAB_HWCACHE_ALIGN,					     NULL);	if (nfs_wdata_cachep == NULL)		return -ENOMEM;	nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,						     nfs_wdata_cachep);	if (nfs_wdata_mempool == NULL)		return -ENOMEM;	nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,						      nfs_wdata_cachep);	if (nfs_commit_mempool == NULL)		return -ENOMEM;	/*	 * NFS congestion size, scale with available memory.	 *	 *  64MB:    8192k	 * 128MB:   11585k	 * 256MB:   16384k	 * 512MB:   23170k	 *   1GB:   32768k	 *   2GB:   46340k	 *   4GB:   65536k	 *   8GB:   92681k	 *  16GB:  131072k	 *	 * This allows larger machines to have larger/more transfers.	 * Limit the default to 256M	 */	nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);	if (nfs_congestion_kb > 256*1024)		nfs_congestion_kb = 256*1024;	return 0;}void nfs_destroy_writepagecache(void){	mempool_destroy(nfs_commit_mempool);	mempool_destroy(nfs_wdata_mempool);	kmem_cache_destroy(nfs_wdata_cachep);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -