⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 write.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
/* * linux/fs/nfs/write.c * * Write file data over NFS. * * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de> */#include <linux/types.h>#include <linux/slab.h>#include <linux/mm.h>#include <linux/pagemap.h>#include <linux/file.h>#include <linux/writeback.h>#include <linux/swap.h>#include <linux/sunrpc/clnt.h>#include <linux/nfs_fs.h>#include <linux/nfs_mount.h>#include <linux/nfs_page.h>#include <linux/backing-dev.h>#include <asm/uaccess.h>#include "delegation.h"#include "internal.h"#include "iostat.h"#define NFSDBG_FACILITY		NFSDBG_PAGECACHE#define MIN_POOL_WRITE		(32)#define MIN_POOL_COMMIT		(4)/* * Local function declarations */static struct nfs_page * nfs_update_request(struct nfs_open_context*,					    struct page *,					    unsigned int, unsigned int);static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,				  struct inode *inode, int ioflags);static const struct rpc_call_ops nfs_write_partial_ops;static const struct rpc_call_ops nfs_write_full_ops;static const struct rpc_call_ops nfs_commit_ops;static struct kmem_cache *nfs_wdata_cachep;static mempool_t *nfs_wdata_mempool;static mempool_t *nfs_commit_mempool;struct nfs_write_data *nfs_commit_alloc(void){	struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);	if (p) {		memset(p, 0, sizeof(*p));		INIT_LIST_HEAD(&p->pages);	}	return p;}static void nfs_commit_rcu_free(struct rcu_head *head){	struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);	if (p && (p->pagevec != &p->page_array[0]))		kfree(p->pagevec);	mempool_free(p, nfs_commit_mempool);}void nfs_commit_free(struct nfs_write_data *wdata){	call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free);}struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount){	struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);	if (p) {		memset(p, 0, sizeof(*p));		INIT_LIST_HEAD(&p->pages);		p->npages = pagecount;		if (pagecount <= ARRAY_SIZE(p->page_array))			p->pagevec = p->page_array;		else {			p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);			if (!p->pagevec) {				mempool_free(p, nfs_wdata_mempool);				p = NULL;			}		}	}	return p;}static void nfs_writedata_rcu_free(struct rcu_head *head){	struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);	if (p && (p->pagevec != &p->page_array[0]))		kfree(p->pagevec);	mempool_free(p, nfs_wdata_mempool);}static void nfs_writedata_free(struct nfs_write_data *wdata){	call_rcu_bh(&wdata->task.u.tk_rcu, nfs_writedata_rcu_free);}void nfs_writedata_release(void *wdata){	nfs_writedata_free(wdata);}static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error){	ctx->error = error;	smp_wmb();	set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);}static struct nfs_page *nfs_page_find_request_locked(struct page *page){	struct nfs_page *req = NULL;	if (PagePrivate(page)) {		req = (struct nfs_page *)page_private(page);		if (req != NULL)			kref_get(&req->wb_kref);	}	return req;}static struct nfs_page *nfs_page_find_request(struct page *page){	struct inode *inode = page->mapping->host;	struct nfs_page *req = NULL;	spin_lock(&inode->i_lock);	req = nfs_page_find_request_locked(page);	spin_unlock(&inode->i_lock);	return req;}/* Adjust the file length if we're writing beyond the end */static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count){	struct inode *inode = page->mapping->host;	loff_t end, i_size = i_size_read(inode);	pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;	if (i_size > 0 && page->index < end_index)		return;	end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);	if (i_size >= end)		return;	nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);	i_size_write(inode, end);}/* A writeback failed: mark the page as bad, and invalidate the page cache */static void nfs_set_pageerror(struct page *page){	SetPageError(page);	nfs_zap_mapping(page->mapping->host, page->mapping);}/* We can set the PG_uptodate flag if we see that a write request * covers the full page. */static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count){	if (PageUptodate(page))		return;	if (base != 0)		return;	if (count != nfs_page_length(page))		return;	SetPageUptodate(page);}static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,		unsigned int offset, unsigned int count){	struct nfs_page	*req;	int ret;	for (;;) {		req = nfs_update_request(ctx, page, offset, count);		if (!IS_ERR(req))			break;		ret = PTR_ERR(req);		if (ret != -EBUSY)			return ret;		ret = nfs_wb_page(page->mapping->host, page);		if (ret != 0)			return ret;	}	/* Update file length */	nfs_grow_file(page, offset, count);	nfs_unlock_request(req);	return 0;}static int wb_priority(struct writeback_control *wbc){	if (wbc->for_reclaim)		return FLUSH_HIGHPRI | FLUSH_STABLE;	if (wbc->for_kupdate)		return FLUSH_LOWPRI;	return 0;}/* * NFS congestion control */int nfs_congestion_kb;#define NFS_CONGESTION_ON_THRESH 	(nfs_congestion_kb >> (PAGE_SHIFT-10))#define NFS_CONGESTION_OFF_THRESH	\	(NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))static int nfs_set_page_writeback(struct page *page){	int ret = test_set_page_writeback(page);	if (!ret) {		struct inode *inode = page->mapping->host;		struct nfs_server *nfss = NFS_SERVER(inode);		if (atomic_long_inc_return(&nfss->writeback) >				NFS_CONGESTION_ON_THRESH)			set_bdi_congested(&nfss->backing_dev_info, WRITE);	}	return ret;}static void nfs_end_page_writeback(struct page *page){	struct inode *inode = page->mapping->host;	struct nfs_server *nfss = NFS_SERVER(inode);	end_page_writeback(page);	if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)		clear_bdi_congested(&nfss->backing_dev_info, WRITE);}/* * Find an associated nfs write request, and prepare to flush it out * May return an error if the user signalled nfs_wait_on_request(). */static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,				struct page *page){	struct inode *inode = page->mapping->host;	struct nfs_inode *nfsi = NFS_I(inode);	struct nfs_page *req;	int ret;	spin_lock(&inode->i_lock);	for(;;) {		req = nfs_page_find_request_locked(page);		if (req == NULL) {			spin_unlock(&inode->i_lock);			return 0;		}		if (nfs_lock_request_dontget(req))			break;		/* Note: If we hold the page lock, as is the case in nfs_writepage,		 *	 then the call to nfs_lock_request_dontget() will always		 *	 succeed provided that someone hasn't already marked the		 *	 request as dirty (in which case we don't care).		 */		spin_unlock(&inode->i_lock);		ret = nfs_wait_on_request(req);		nfs_release_request(req);		if (ret != 0)			return ret;		spin_lock(&inode->i_lock);	}	if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {		/* This request is marked for commit */		spin_unlock(&inode->i_lock);		nfs_unlock_request(req);		nfs_pageio_complete(pgio);		return 0;	}	if (nfs_set_page_writeback(page) != 0) {		spin_unlock(&inode->i_lock);		BUG();	}	radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,			NFS_PAGE_TAG_LOCKED);	spin_unlock(&inode->i_lock);	nfs_pageio_add_request(pgio, req);	return 0;}static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio){	struct inode *inode = page->mapping->host;	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);	nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);	nfs_pageio_cond_complete(pgio, page->index);	return nfs_page_async_flush(pgio, page);}/* * Write an mmapped page to the server. */static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc){	struct nfs_pageio_descriptor pgio;	int err;	nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc));	err = nfs_do_writepage(page, wbc, &pgio);	nfs_pageio_complete(&pgio);	if (err < 0)		return err;	if (pgio.pg_error < 0)		return pgio.pg_error;	return 0;}int nfs_writepage(struct page *page, struct writeback_control *wbc){	int ret;	ret = nfs_writepage_locked(page, wbc);	unlock_page(page);	return ret;}static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data){	int ret;	ret = nfs_do_writepage(page, wbc, data);	unlock_page(page);	return ret;}int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc){	struct inode *inode = mapping->host;	struct nfs_pageio_descriptor pgio;	int err;	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);	nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));	err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);	nfs_pageio_complete(&pgio);	if (err < 0)		return err;	if (pgio.pg_error < 0)		return pgio.pg_error;	return 0;}/* * Insert a write request into an inode */static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req){	struct nfs_inode *nfsi = NFS_I(inode);	int error;	error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);	BUG_ON(error == -EEXIST);	if (error)		return error;	if (!nfsi->npages) {		igrab(inode);		if (nfs_have_delegation(inode, FMODE_WRITE))			nfsi->change_attr++;	}	SetPagePrivate(req->wb_page);	set_page_private(req->wb_page, (unsigned long)req);	nfsi->npages++;	kref_get(&req->wb_kref);	return 0;}/* * Remove a write request from an inode */static void nfs_inode_remove_request(struct nfs_page *req){	struct inode *inode = req->wb_context->path.dentry->d_inode;	struct nfs_inode *nfsi = NFS_I(inode);	BUG_ON (!NFS_WBACK_BUSY(req));	spin_lock(&inode->i_lock);	set_page_private(req->wb_page, 0);	ClearPagePrivate(req->wb_page);	radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);	nfsi->npages--;	if (!nfsi->npages) {		spin_unlock(&inode->i_lock);		iput(inode);	} else		spin_unlock(&inode->i_lock);	nfs_clear_request(req);	nfs_release_request(req);}static voidnfs_redirty_request(struct nfs_page *req){	__set_page_dirty_nobuffers(req->wb_page);}/* * Check if a request is dirty */static inline intnfs_dirty_request(struct nfs_page *req){	struct page *page = req->wb_page;	if (page == NULL || test_bit(PG_NEED_COMMIT, &req->wb_flags))		return 0;	return !PageWriteback(req->wb_page);}#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)/* * Add a request to the inode's commit list. */static voidnfs_mark_request_commit(struct nfs_page *req){	struct inode *inode = req->wb_context->path.dentry->d_inode;	struct nfs_inode *nfsi = NFS_I(inode);	spin_lock(&inode->i_lock);	nfsi->ncommit++;	set_bit(PG_NEED_COMMIT, &(req)->wb_flags);	radix_tree_tag_set(&nfsi->nfs_page_tree,			req->wb_index,			NFS_PAGE_TAG_COMMIT);	spin_unlock(&inode->i_lock);	inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);	inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);}static inlineint nfs_write_need_commit(struct nfs_write_data *data){	return data->verf.committed != NFS_FILE_SYNC;}static inlineint nfs_reschedule_unstable_write(struct nfs_page *req){	if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {		nfs_mark_request_commit(req);		return 1;	}	if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {		nfs_redirty_request(req);		return 1;	}	return 0;}#elsestatic inline voidnfs_mark_request_commit(struct nfs_page *req){}static inlineint nfs_write_need_commit(struct nfs_write_data *data){	return 0;}static inlineint nfs_reschedule_unstable_write(struct nfs_page *req){	return 0;}#endif/* * Wait for a request to complete. * * Interruptible by signals only if mounted with intr flag. */static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages){	struct nfs_inode *nfsi = NFS_I(inode);	struct nfs_page *req;	pgoff_t idx_end, next;	unsigned int		res = 0;	int			error;	if (npages == 0)		idx_end = ~0;	else		idx_end = idx_start + npages - 1;	next = idx_start;	while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_LOCKED)) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -