⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 write.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* handling of writes to regular files and writing back to the server * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */#include <linux/backing-dev.h>#include <linux/slab.h>#include <linux/fs.h>#include <linux/pagemap.h>#include <linux/writeback.h>#include <linux/pagevec.h>#include "internal.h"static int afs_write_back_from_locked_page(struct afs_writeback *wb,					   struct page *page);/* * mark a page as having been made dirty and thus needing writeback */int afs_set_page_dirty(struct page *page){	_enter("");	return __set_page_dirty_nobuffers(page);}/* * unlink a writeback record because its usage has reached zero * - must be called with the wb->vnode->writeback_lock held */static void afs_unlink_writeback(struct afs_writeback *wb){	struct afs_writeback *front;	struct afs_vnode *vnode = wb->vnode;	list_del_init(&wb->link);	if (!list_empty(&vnode->writebacks)) {		/* if an fsync rises to the front of the queue then wake it		 * up */		front = list_entry(vnode->writebacks.next,				   struct afs_writeback, link);		if (front->state == AFS_WBACK_SYNCING) {			_debug("wake up sync");			front->state = AFS_WBACK_COMPLETE;			wake_up(&front->waitq);		}	}}/* * free a writeback record */static void afs_free_writeback(struct afs_writeback *wb){	_enter("");	key_put(wb->key);	kfree(wb);}/* * dispose of a reference to a writeback record */void afs_put_writeback(struct afs_writeback *wb){	struct afs_vnode *vnode = wb->vnode;	_enter("{%d}", wb->usage);	spin_lock(&vnode->writeback_lock);	if (--wb->usage == 0)		afs_unlink_writeback(wb);	else		wb = NULL;	spin_unlock(&vnode->writeback_lock);	if (wb)		afs_free_writeback(wb);}/* * partly or wholly fill a page that's under preparation for writing */static int afs_fill_page(struct afs_vnode *vnode, struct key *key,			 unsigned start, unsigned len, struct page *page){	int ret;	_enter(",,%u,%u", start, len);	ASSERTCMP(start + len, <=, PAGE_SIZE);	ret = afs_vnode_fetch_data(vnode, key, start, len, page);	if (ret < 0) {		if (ret == -ENOENT) {			_debug("got NOENT from server"			       " - marking file deleted and stale");			set_bit(AFS_VNODE_DELETED, &vnode->flags);			ret = -ESTALE;		}	}	_leave(" = %d", ret);	return ret;}/* * prepare a page for being written to */static int afs_prepare_page(struct afs_vnode *vnode, struct page *page,			    struct key *key, unsigned offset, unsigned to){	unsigned eof, tail, start, stop, len;	loff_t i_size, pos;	void *p;	int ret;	_enter("");	if (offset == 0 && to == PAGE_SIZE)		return 0;	p = kmap_atomic(page, KM_USER0);	i_size = i_size_read(&vnode->vfs_inode);	pos = (loff_t) page->index << PAGE_SHIFT;	if (pos >= i_size) {		/* partial write, page beyond EOF */		_debug("beyond");		if (offset > 0)			memset(p, 0, offset);		if (to < PAGE_SIZE)			memset(p + to, 0, PAGE_SIZE - to);		kunmap_atomic(p, KM_USER0);		return 0;	}	if (i_size - pos >= PAGE_SIZE) {		/* partial write, page entirely before EOF */		_debug("before");		tail = eof = PAGE_SIZE;	} else {		/* partial write, page overlaps EOF */		eof = i_size - pos;		_debug("overlap %u", eof);		tail = max(eof, to);		if (tail < PAGE_SIZE)			memset(p + tail, 0, PAGE_SIZE - tail);		if (offset > eof)			memset(p + eof, 0, PAGE_SIZE - eof);	}	kunmap_atomic(p, KM_USER0);	ret = 0;	if (offset > 0 || eof > to) {		/* need to fill one or two bits that aren't going to be written		 * (cover both fillers in one read if there are two) */		start = (offset > 0) ? 0 : to;		stop = (eof > to) ? eof : offset;		len = stop - start;		_debug("wr=%u-%u av=0-%u rd=%u@%u",		       offset, to, eof, start, len);		ret = afs_fill_page(vnode, key, start, len, page);	}	_leave(" = %d", ret);	return ret;}/* * prepare to perform part of a write to a page * - the caller holds the page locked, preventing it from being written out or *   modified by anyone else */int afs_prepare_write(struct file *file, struct page *page,		      unsigned offset, unsigned to){	struct afs_writeback *candidate, *wb;	struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode);	struct key *key = file->private_data;	pgoff_t index;	int ret;	_enter("{%x:%u},{%lx},%u,%u",	       vnode->fid.vid, vnode->fid.vnode, page->index, offset, to);	candidate = kzalloc(sizeof(*candidate), GFP_KERNEL);	if (!candidate)		return -ENOMEM;	candidate->vnode = vnode;	candidate->first = candidate->last = page->index;	candidate->offset_first = offset;	candidate->to_last = to;	candidate->usage = 1;	candidate->state = AFS_WBACK_PENDING;	init_waitqueue_head(&candidate->waitq);	if (!PageUptodate(page)) {		_debug("not up to date");		ret = afs_prepare_page(vnode, page, key, offset, to);		if (ret < 0) {			kfree(candidate);			_leave(" = %d [prep]", ret);			return ret;		}	}try_again:	index = page->index;	spin_lock(&vnode->writeback_lock);	/* see if this page is already pending a writeback under a suitable key	 * - if so we can just join onto that one */	wb = (struct afs_writeback *) page_private(page);	if (wb) {		if (wb->key == key && wb->state == AFS_WBACK_PENDING)			goto subsume_in_current_wb;		goto flush_conflicting_wb;	}	if (index > 0) {		/* see if we can find an already pending writeback that we can		 * append this page to */		list_for_each_entry(wb, &vnode->writebacks, link) {			if (wb->last == index - 1 && wb->key == key &&			    wb->state == AFS_WBACK_PENDING)				goto append_to_previous_wb;		}	}	list_add_tail(&candidate->link, &vnode->writebacks);	candidate->key = key_get(key);	spin_unlock(&vnode->writeback_lock);	SetPagePrivate(page);	set_page_private(page, (unsigned long) candidate);	_leave(" = 0 [new]");	return 0;subsume_in_current_wb:	_debug("subsume");	ASSERTRANGE(wb->first, <=, index, <=, wb->last);	if (index == wb->first && offset < wb->offset_first)		wb->offset_first = offset;	if (index == wb->last && to > wb->to_last)		wb->to_last = to;	spin_unlock(&vnode->writeback_lock);	kfree(candidate);	_leave(" = 0 [sub]");	return 0;append_to_previous_wb:	_debug("append into %lx-%lx", wb->first, wb->last);	wb->usage++;	wb->last++;	wb->to_last = to;	spin_unlock(&vnode->writeback_lock);	SetPagePrivate(page);	set_page_private(page, (unsigned long) wb);	kfree(candidate);	_leave(" = 0 [app]");	return 0;	/* the page is currently bound to another context, so if it's dirty we	 * need to flush it before we can use the new context */flush_conflicting_wb:	_debug("flush conflict");	if (wb->state == AFS_WBACK_PENDING)		wb->state = AFS_WBACK_CONFLICTING;	spin_unlock(&vnode->writeback_lock);	if (PageDirty(page)) {		ret = afs_write_back_from_locked_page(wb, page);		if (ret < 0) {			afs_put_writeback(candidate);			_leave(" = %d", ret);			return ret;		}	}	/* the page holds a ref on the writeback record */	afs_put_writeback(wb);	set_page_private(page, 0);	ClearPagePrivate(page);	goto try_again;}/* * finalise part of a write to a page */int afs_commit_write(struct file *file, struct page *page,		     unsigned offset, unsigned to){	struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode);	loff_t i_size, maybe_i_size;	_enter("{%x:%u},{%lx},%u,%u",	       vnode->fid.vid, vnode->fid.vnode, page->index, offset, to);	maybe_i_size = (loff_t) page->index << PAGE_SHIFT;	maybe_i_size += to;	i_size = i_size_read(&vnode->vfs_inode);	if (maybe_i_size > i_size) {		spin_lock(&vnode->writeback_lock);		i_size = i_size_read(&vnode->vfs_inode);		if (maybe_i_size > i_size)			i_size_write(&vnode->vfs_inode, maybe_i_size);		spin_unlock(&vnode->writeback_lock);	}	SetPageUptodate(page);	set_page_dirty(page);	if (PageDirty(page))		_debug("dirtied");	return 0;}/* * kill all the pages in the given range */static void afs_kill_pages(struct afs_vnode *vnode, bool error,			   pgoff_t first, pgoff_t last){	struct pagevec pv;	unsigned count, loop;	_enter("{%x:%u},%lx-%lx",	       vnode->fid.vid, vnode->fid.vnode, first, last);	pagevec_init(&pv, 0);	do {		_debug("kill %lx-%lx", first, last);		count = last - first + 1;		if (count > PAGEVEC_SIZE)			count = PAGEVEC_SIZE;		pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,					      first, count, pv.pages);		ASSERTCMP(pv.nr, ==, count);		for (loop = 0; loop < count; loop++) {			ClearPageUptodate(pv.pages[loop]);			if (error)				SetPageError(pv.pages[loop]);			end_page_writeback(pv.pages[loop]);		}		__pagevec_release(&pv);	} while (first < last);	_leave("");}/* * synchronously write back the locked page and any subsequent non-locked dirty * pages also covered by the same writeback record */static int afs_write_back_from_locked_page(struct afs_writeback *wb,					   struct page *primary_page){	struct page *pages[8], *page;	unsigned long count;	unsigned n, offset, to;	pgoff_t start, first, last;	int loop, ret;	_enter(",%lx", primary_page->index);	count = 1;	if (!clear_page_dirty_for_io(primary_page))		BUG();	if (test_set_page_writeback(primary_page))		BUG();	/* find all consecutive lockable dirty pages, stopping when we find a	 * page that is not immediately lockable, is not dirty or is missing,	 * or we reach the end of the range */	start = primary_page->index;	if (start >= wb->last)		goto no_more;	start++;	do {		_debug("more %lx [%lx]", start, count);		n = wb->last - start + 1;		if (n > ARRAY_SIZE(pages))			n = ARRAY_SIZE(pages);		n = find_get_pages_contig(wb->vnode->vfs_inode.i_mapping,					  start, n, pages);		_debug("fgpc %u", n);		if (n == 0)			goto no_more;		if (pages[0]->index != start) {			do {				put_page(pages[--n]);			} while (n > 0);			goto no_more;		}		for (loop = 0; loop < n; loop++) {			page = pages[loop];			if (page->index > wb->last)				break;			if (TestSetPageLocked(page))				break;			if (!PageDirty(page) ||			    page_private(page) != (unsigned long) wb) {				unlock_page(page);				break;			}			if (!clear_page_dirty_for_io(page))

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -