⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 write.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
		if (req->wb_index > idx_end)			break;		next = req->wb_index + 1;		BUG_ON(!NFS_WBACK_BUSY(req));		kref_get(&req->wb_kref);		spin_unlock(&inode->i_lock);		error = nfs_wait_on_request(req);		nfs_release_request(req);		spin_lock(&inode->i_lock);		if (error < 0)			return error;		res++;	}	return res;}static void nfs_cancel_commit_list(struct list_head *head){	struct nfs_page *req;	while(!list_empty(head)) {		req = nfs_list_entry(head->next);		dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);		dec_bdi_stat(req->wb_page->mapping->backing_dev_info,				BDI_RECLAIMABLE);		nfs_list_remove_request(req);		clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);		nfs_inode_remove_request(req);		nfs_unlock_request(req);	}}#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)/* * nfs_scan_commit - Scan an inode for commit requests * @inode: NFS inode to scan * @dst: destination list * @idx_start: lower bound of page->index to scan. * @npages: idx_start + npages sets the upper bound to scan. * * Moves requests from the inode's 'commit' request list. * The requests are *not* checked to ensure that they form a contiguous set. */static intnfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages){	struct nfs_inode *nfsi = NFS_I(inode);	int res = 0;	if (nfsi->ncommit != 0) {		res = nfs_scan_list(nfsi, dst, idx_start, npages,				NFS_PAGE_TAG_COMMIT);		nfsi->ncommit -= res;	}	return res;}#elsestatic inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages){	return 0;}#endif/* * Try to update any existing write request, or create one if there is none. * In order to match, the request's credentials must match those of * the calling process. * * Note: Should always be called with the Page Lock held! */static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,		struct page *page, unsigned int offset, unsigned int bytes){	struct address_space *mapping = page->mapping;	struct inode *inode = mapping->host;	struct nfs_page		*req, *new = NULL;	pgoff_t		rqend, end;	end = offset + bytes;	for (;;) {		/* Loop over all inode entries and see if we find		 * A request for the page we wish to update		 */		spin_lock(&inode->i_lock);		req = nfs_page_find_request_locked(page);		if (req) {			if (!nfs_lock_request_dontget(req)) {				int error;				spin_unlock(&inode->i_lock);				error = nfs_wait_on_request(req);				nfs_release_request(req);				if (error < 0) {					if (new)						nfs_release_request(new);					return ERR_PTR(error);				}				continue;			}			spin_unlock(&inode->i_lock);			if (new)				nfs_release_request(new);			break;		}		if (new) {			int error;			nfs_lock_request_dontget(new);			error = nfs_inode_add_request(inode, new);			if (error) {				spin_unlock(&inode->i_lock);				nfs_unlock_request(new);				return ERR_PTR(error);			}			spin_unlock(&inode->i_lock);			req = new;			goto zero_page;		}		spin_unlock(&inode->i_lock);		new = nfs_create_request(ctx, inode, page, offset, bytes);		if (IS_ERR(new))			return new;	}	/* We have a request for our page.	 * If the creds don't match, or the	 * page addresses don't match,	 * tell the caller to wait on the conflicting	 * request.	 */	rqend = req->wb_offset + req->wb_bytes;	if (req->wb_context != ctx	    || req->wb_page != page	    || !nfs_dirty_request(req)	    || offset > rqend || end < req->wb_offset) {		nfs_unlock_request(req);		return ERR_PTR(-EBUSY);	}	/* Okay, the request matches. Update the region */	if (offset < req->wb_offset) {		req->wb_offset = offset;		req->wb_pgbase = offset;		req->wb_bytes = max(end, rqend) - req->wb_offset;		goto zero_page;	}	if (end > rqend)		req->wb_bytes = end - req->wb_offset;	return req;zero_page:	/* If this page might potentially be marked as up to date,	 * then we need to zero any uninitalised data. */	if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE			&& !PageUptodate(req->wb_page))		zero_user_page(req->wb_page, req->wb_bytes,				PAGE_CACHE_SIZE - req->wb_bytes,				KM_USER0);	return req;}int nfs_flush_incompatible(struct file *file, struct page *page){	struct nfs_open_context *ctx = nfs_file_open_context(file);	struct nfs_page	*req;	int do_flush, status;	/*	 * Look for a request corresponding to this page. If there	 * is one, and it belongs to another file, we flush it out	 * before we try to copy anything into the page. Do this	 * due to the lack of an ACCESS-type call in NFSv2.	 * Also do the same if we find a request from an existing	 * dropped page.	 */	do {		req = nfs_page_find_request(page);		if (req == NULL)			return 0;		do_flush = req->wb_page != page || req->wb_context != ctx			|| !nfs_dirty_request(req);		nfs_release_request(req);		if (!do_flush)			return 0;		status = nfs_wb_page(page->mapping->host, page);	} while (status == 0);	return status;}/* * Update and possibly write a cached page of an NFS file. * * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad * things with a page scheduled for an RPC call (e.g. invalidate it). */int nfs_updatepage(struct file *file, struct page *page,		unsigned int offset, unsigned int count){	struct nfs_open_context *ctx = nfs_file_open_context(file);	struct inode	*inode = page->mapping->host;	int		status = 0;	nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);	dprintk("NFS:      nfs_updatepage(%s/%s %d@%Ld)\n",		file->f_path.dentry->d_parent->d_name.name,		file->f_path.dentry->d_name.name, count,		(long long)(page_offset(page) +offset));	/* If we're not using byte range locks, and we know the page	 * is entirely in cache, it may be more efficient to avoid	 * fragmenting write requests.	 */	if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {		count = max(count + offset, nfs_page_length(page));		offset = 0;	}	status = nfs_writepage_setup(ctx, page, offset, count);	__set_page_dirty_nobuffers(page);        dprintk("NFS:      nfs_updatepage returns %d (isize %Ld)\n",			status, (long long)i_size_read(inode));	if (status < 0)		nfs_set_pageerror(page);	return status;}static void nfs_writepage_release(struct nfs_page *req){	if (PageError(req->wb_page)) {		nfs_end_page_writeback(req->wb_page);		nfs_inode_remove_request(req);	} else if (!nfs_reschedule_unstable_write(req)) {		/* Set the PG_uptodate flag */		nfs_mark_uptodate(req->wb_page, req->wb_pgbase, req->wb_bytes);		nfs_end_page_writeback(req->wb_page);		nfs_inode_remove_request(req);	} else		nfs_end_page_writeback(req->wb_page);	nfs_clear_page_tag_locked(req);}static inline int flush_task_priority(int how){	switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {		case FLUSH_HIGHPRI:			return RPC_PRIORITY_HIGH;		case FLUSH_LOWPRI:			return RPC_PRIORITY_LOW;	}	return RPC_PRIORITY_NORMAL;}/* * Set up the argument/result storage required for the RPC call. */static void nfs_write_rpcsetup(struct nfs_page *req,		struct nfs_write_data *data,		const struct rpc_call_ops *call_ops,		unsigned int count, unsigned int offset,		int how){	struct inode		*inode;	int flags;	/* Set up the RPC argument and reply structs	 * NB: take care not to mess about with data->commit et al. */	data->req = req;	data->inode = inode = req->wb_context->path.dentry->d_inode;	data->cred = req->wb_context->cred;	data->args.fh     = NFS_FH(inode);	data->args.offset = req_offset(req) + offset;	data->args.pgbase = req->wb_pgbase + offset;	data->args.pages  = data->pagevec;	data->args.count  = count;	data->args.context = req->wb_context;	data->res.fattr   = &data->fattr;	data->res.count   = count;	data->res.verf    = &data->verf;	nfs_fattr_init(&data->fattr);	/* Set up the initial task struct.  */	flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;	rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data);	NFS_PROTO(inode)->write_setup(data, how);	data->task.tk_priority = flush_task_priority(how);	data->task.tk_cookie = (unsigned long)inode;	dprintk("NFS: %5u initiated write call "		"(req %s/%Ld, %u bytes @ offset %Lu)\n",		data->task.tk_pid,		inode->i_sb->s_id,		(long long)NFS_FILEID(inode),		count,		(unsigned long long)data->args.offset);}static void nfs_execute_write(struct nfs_write_data *data){	struct rpc_clnt *clnt = NFS_CLIENT(data->inode);	sigset_t oldset;	rpc_clnt_sigmask(clnt, &oldset);	rpc_execute(&data->task);	rpc_clnt_sigunmask(clnt, &oldset);}/* * Generate multiple small requests to write out a single * contiguous dirty area on one page. */static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how){	struct nfs_page *req = nfs_list_entry(head->next);	struct page *page = req->wb_page;	struct nfs_write_data *data;	size_t wsize = NFS_SERVER(inode)->wsize, nbytes;	unsigned int offset;	int requests = 0;	LIST_HEAD(list);	nfs_list_remove_request(req);	nbytes = count;	do {		size_t len = min(nbytes, wsize);		data = nfs_writedata_alloc(1);		if (!data)			goto out_bad;		list_add(&data->pages, &list);		requests++;		nbytes -= len;	} while (nbytes != 0);	atomic_set(&req->wb_complete, requests);	ClearPageError(page);	offset = 0;	nbytes = count;	do {		data = list_entry(list.next, struct nfs_write_data, pages);		list_del_init(&data->pages);		data->pagevec[0] = page;		if (nbytes < wsize)			wsize = nbytes;		nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,				   wsize, offset, how);		offset += wsize;		nbytes -= wsize;		nfs_execute_write(data);	} while (nbytes != 0);	return 0;out_bad:	while (!list_empty(&list)) {		data = list_entry(list.next, struct nfs_write_data, pages);		list_del(&data->pages);		nfs_writedata_release(data);	}	nfs_redirty_request(req);	nfs_end_page_writeback(req->wb_page);	nfs_clear_page_tag_locked(req);	return -ENOMEM;}/* * Create an RPC task for the given write request and kick it. * The page must have been locked by the caller. * * It may happen that the page we're passed is not marked dirty. * This is the case if nfs_updatepage detects a conflicting request * that has been written but not committed. */static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how){	struct nfs_page		*req;	struct page		**pages;	struct nfs_write_data	*data;	data = nfs_writedata_alloc(npages);	if (!data)		goto out_bad;	pages = data->pagevec;	while (!list_empty(head)) {		req = nfs_list_entry(head->next);		nfs_list_remove_request(req);		nfs_list_add_request(req, &data->pages);		ClearPageError(req->wb_page);		*pages++ = req->wb_page;	}	req = nfs_list_entry(data->pages.next);	/* Set up the argument struct */	nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);	nfs_execute_write(data);	return 0; out_bad:	while (!list_empty(head)) {		req = nfs_list_entry(head->next);		nfs_list_remove_request(req);		nfs_redirty_request(req);		nfs_end_page_writeback(req->wb_page);		nfs_clear_page_tag_locked(req);	}	return -ENOMEM;}static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,				  struct inode *inode, int ioflags){	int wsize = NFS_SERVER(inode)->wsize;	if (wsize < PAGE_CACHE_SIZE)		nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags);	else		nfs_pageio_init(pgio, inode, nfs_flush_one, wsize, ioflags);}/* * Handle a write reply that flushed part of a page. */static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata){	struct nfs_write_data	*data = calldata;	struct nfs_page		*req = data->req;	struct page		*page = req->wb_page;	dprintk("NFS: write (%s/%Ld %d@%Ld)",		req->wb_context->path.dentry->d_inode->i_sb->s_id,		(long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),		req->wb_bytes,		(long long)req_offset(req));	if (nfs_writeback_done(task, data) != 0)		return;	if (task->tk_status < 0) {		nfs_set_pageerror(page);		nfs_context_set_write_error(req->wb_context, task->tk_status);		dprintk(", error = %d\n", task->tk_status);		goto out;	}	if (nfs_write_need_commit(data)) {		struct inode *inode = page->mapping->host;		spin_lock(&inode->i_lock);		if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {			/* Do nothing we need to resend the writes */		} else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {			memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));			dprintk(" defer commit\n");		} else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {			set_bit(PG_NEED_RESCHED, &req->wb_flags);			clear_bit(PG_NEED_COMMIT, &req->wb_flags);			dprintk(" server reboot detected\n");		}		spin_unlock(&inode->i_lock);	} else		dprintk(" OK\n");out:	if (atomic_dec_and_test(&req->wb_complete))		nfs_writepage_release(req);}static const struct rpc_call_ops nfs_write_partial_ops = {	.rpc_call_done = nfs_writeback_done_partial,	.rpc_release = nfs_writedata_release,};/* * Handle a write reply that flushes a whole page. * * FIXME: There is an inherent race with invalidate_inode_pages and *	  writebacks since the page->count is kept > 1 for as long *	  as the page has a write request pending. */static void nfs_writeback_done_full(struct rpc_task *task, void *calldata){	struct nfs_write_data	*data = calldata;	struct nfs_page		*req;	struct page		*page;	if (nfs_writeback_done(task, data) != 0)		return;	/* Update attributes as result of writeback. */	while (!list_empty(&data->pages)) {		req = nfs_list_entry(data->pages.next);		nfs_list_remove_request(req);		page = req->wb_page;		dprintk("NFS: write (%s/%Ld %d@%Ld)",

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -