⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xfs_aops.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
/* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA */#include "xfs.h"#include "xfs_bit.h"#include "xfs_log.h"#include "xfs_inum.h"#include "xfs_sb.h"#include "xfs_ag.h"#include "xfs_dir2.h"#include "xfs_trans.h"#include "xfs_dmapi.h"#include "xfs_mount.h"#include "xfs_bmap_btree.h"#include "xfs_alloc_btree.h"#include "xfs_ialloc_btree.h"#include "xfs_dir2_sf.h"#include "xfs_attr_sf.h"#include "xfs_dinode.h"#include "xfs_inode.h"#include "xfs_alloc.h"#include "xfs_btree.h"#include "xfs_error.h"#include "xfs_rw.h"#include "xfs_iomap.h"#include "xfs_vnodeops.h"#include <linux/mpage.h>#include <linux/pagevec.h>#include <linux/writeback.h>STATIC voidxfs_count_page_state(	struct page		*page,	int			*delalloc,	int			*unmapped,	int			*unwritten){	struct buffer_head	*bh, *head;	*delalloc = *unmapped = *unwritten = 0;	bh = head = page_buffers(page);	do {		if (buffer_uptodate(bh) && !buffer_mapped(bh))			(*unmapped) = 1;		else if (buffer_unwritten(bh))			(*unwritten) = 1;		else if (buffer_delay(bh))			(*delalloc) = 1;	} while ((bh = bh->b_this_page) != head);}#if defined(XFS_RW_TRACE)voidxfs_page_trace(	int		tag,	struct inode	*inode,	struct page	*page,	unsigned long	pgoff){	xfs_inode_t	*ip;	bhv_vnode_t	*vp = vn_from_inode(inode);	loff_t		isize = i_size_read(inode);	loff_t		offset = page_offset(page);	int		delalloc = -1, unmapped = -1, unwritten = -1;	if (page_has_buffers(page))		xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);	ip = xfs_vtoi(vp);	if (!ip->i_rwtrace)		return;	ktrace_enter(ip->i_rwtrace,		(void *)((unsigned long)tag),		(void *)ip,		(void *)inode,		(void *)page,		(void *)pgoff,		(void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),		(void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),		(void *)((unsigned long)((isize >> 32) & 0xffffffff)),		(void *)((unsigned long)(isize & 0xffffffff)),		(void *)((unsigned long)((offset >> 32) & 0xffffffff)),		(void *)((unsigned long)(offset & 0xffffffff)),		(void *)((unsigned long)delalloc),		(void *)((unsigned long)unmapped),		(void *)((unsigned long)unwritten),		(void *)((unsigned long)current_pid()),		(void *)NULL);}#else#define xfs_page_trace(tag, inode, page, pgoff)#endif/* * Schedule IO completion handling on a xfsdatad if this was * the final hold on this ioend. If we are asked to wait, * flush the workqueue. */STATIC voidxfs_finish_ioend(	xfs_ioend_t	*ioend,	int		wait){	if (atomic_dec_and_test(&ioend->io_remaining)) {		queue_work(xfsdatad_workqueue, &ioend->io_work);		if (wait)			flush_workqueue(xfsdatad_workqueue);	}}/* * We're now finished for good with this ioend structure. * Update the page state via the associated buffer_heads, * release holds on the inode and bio, and finally free * up memory.  Do not use the ioend after this. */STATIC voidxfs_destroy_ioend(	xfs_ioend_t		*ioend){	struct buffer_head	*bh, *next;	for (bh = ioend->io_buffer_head; bh; bh = next) {		next = bh->b_private;		bh->b_end_io(bh, !ioend->io_error);	}	if (unlikely(ioend->io_error)) {		vn_ioerror(XFS_I(ioend->io_inode), ioend->io_error,				__FILE__,__LINE__);	}	vn_iowake(XFS_I(ioend->io_inode));	mempool_free(ioend, xfs_ioend_pool);}/* * Update on-disk file size now that data has been written to disk. * The current in-memory file size is i_size.  If a write is beyond * eof io_new_size will be the intended file size until i_size is * updated.  If this write does not extend all the way to the valid * file size then restrict this update to the end of the write. */STATIC voidxfs_setfilesize(	xfs_ioend_t		*ioend){	xfs_inode_t		*ip = XFS_I(ioend->io_inode);	xfs_fsize_t		isize;	xfs_fsize_t		bsize;	ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);	ASSERT(ioend->io_type != IOMAP_READ);	if (unlikely(ioend->io_error))		return;	bsize = ioend->io_offset + ioend->io_size;	xfs_ilock(ip, XFS_ILOCK_EXCL);	isize = MAX(ip->i_size, ip->i_iocore.io_new_size);	isize = MIN(isize, bsize);	if (ip->i_d.di_size < isize) {		ip->i_d.di_size = isize;		ip->i_update_core = 1;		ip->i_update_size = 1;		mark_inode_dirty_sync(ioend->io_inode);	}	xfs_iunlock(ip, XFS_ILOCK_EXCL);}/* * Buffered IO write completion for delayed allocate extents. */STATIC voidxfs_end_bio_delalloc(	struct work_struct	*work){	xfs_ioend_t		*ioend =		container_of(work, xfs_ioend_t, io_work);	xfs_setfilesize(ioend);	xfs_destroy_ioend(ioend);}/* * Buffered IO write completion for regular, written extents. */STATIC voidxfs_end_bio_written(	struct work_struct	*work){	xfs_ioend_t		*ioend =		container_of(work, xfs_ioend_t, io_work);	xfs_setfilesize(ioend);	xfs_destroy_ioend(ioend);}/* * IO write completion for unwritten extents. * * Issue transactions to convert a buffer range from unwritten * to written extents. */STATIC voidxfs_end_bio_unwritten(	struct work_struct	*work){	xfs_ioend_t		*ioend =		container_of(work, xfs_ioend_t, io_work);	xfs_off_t		offset = ioend->io_offset;	size_t			size = ioend->io_size;	if (likely(!ioend->io_error)) {		xfs_bmap(XFS_I(ioend->io_inode), offset, size,				BMAPI_UNWRITTEN, NULL, NULL);		xfs_setfilesize(ioend);	}	xfs_destroy_ioend(ioend);}/* * IO read completion for regular, written extents. */STATIC voidxfs_end_bio_read(	struct work_struct	*work){	xfs_ioend_t		*ioend =		container_of(work, xfs_ioend_t, io_work);	xfs_destroy_ioend(ioend);}/* * Allocate and initialise an IO completion structure. * We need to track unwritten extent write completion here initially. * We'll need to extend this for updating the ondisk inode size later * (vs. incore size). */STATIC xfs_ioend_t *xfs_alloc_ioend(	struct inode		*inode,	unsigned int		type){	xfs_ioend_t		*ioend;	ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);	/*	 * Set the count to 1 initially, which will prevent an I/O	 * completion callback from happening before we have started	 * all the I/O from calling the completion routine too early.	 */	atomic_set(&ioend->io_remaining, 1);	ioend->io_error = 0;	ioend->io_list = NULL;	ioend->io_type = type;	ioend->io_inode = inode;	ioend->io_buffer_head = NULL;	ioend->io_buffer_tail = NULL;	atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);	ioend->io_offset = 0;	ioend->io_size = 0;	if (type == IOMAP_UNWRITTEN)		INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);	else if (type == IOMAP_DELAY)		INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);	else if (type == IOMAP_READ)		INIT_WORK(&ioend->io_work, xfs_end_bio_read);	else		INIT_WORK(&ioend->io_work, xfs_end_bio_written);	return ioend;}STATIC intxfs_map_blocks(	struct inode		*inode,	loff_t			offset,	ssize_t			count,	xfs_iomap_t		*mapp,	int			flags){	xfs_inode_t		*ip = XFS_I(inode);	int			error, nmaps = 1;	error = xfs_bmap(ip, offset, count,				flags, mapp, &nmaps);	if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))		xfs_iflags_set(ip, XFS_IMODIFIED);	return -error;}STATIC_INLINE intxfs_iomap_valid(	xfs_iomap_t		*iomapp,	loff_t			offset){	return offset >= iomapp->iomap_offset &&		offset < iomapp->iomap_offset + iomapp->iomap_bsize;}/* * BIO completion handler for buffered IO. */STATIC voidxfs_end_bio(	struct bio		*bio,	int			error){	xfs_ioend_t		*ioend = bio->bi_private;	ASSERT(atomic_read(&bio->bi_cnt) >= 1);	ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;	/* Toss bio and pass work off to an xfsdatad thread */	bio->bi_private = NULL;	bio->bi_end_io = NULL;	bio_put(bio);	xfs_finish_ioend(ioend, 0);}STATIC voidxfs_submit_ioend_bio(	xfs_ioend_t	*ioend,	struct bio	*bio){	atomic_inc(&ioend->io_remaining);	bio->bi_private = ioend;	bio->bi_end_io = xfs_end_bio;	submit_bio(WRITE, bio);	ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));	bio_put(bio);}STATIC struct bio *xfs_alloc_ioend_bio(	struct buffer_head	*bh){	struct bio		*bio;	int			nvecs = bio_get_nr_vecs(bh->b_bdev);	do {		bio = bio_alloc(GFP_NOIO, nvecs);		nvecs >>= 1;	} while (!bio);	ASSERT(bio->bi_private == NULL);	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);	bio->bi_bdev = bh->b_bdev;	bio_get(bio);	return bio;}STATIC voidxfs_start_buffer_writeback(	struct buffer_head	*bh){	ASSERT(buffer_mapped(bh));	ASSERT(buffer_locked(bh));	ASSERT(!buffer_delay(bh));	ASSERT(!buffer_unwritten(bh));	mark_buffer_async_write(bh);	set_buffer_uptodate(bh);	clear_buffer_dirty(bh);}STATIC voidxfs_start_page_writeback(	struct page		*page,	struct writeback_control *wbc,	int			clear_dirty,	int			buffers){	ASSERT(PageLocked(page));	ASSERT(!PageWriteback(page));	if (clear_dirty)		clear_page_dirty_for_io(page);	set_page_writeback(page);	unlock_page(page);	/* If no buffers on the page are to be written, finish it here */	if (!buffers)		end_page_writeback(page);}static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh){	return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));}/* * Submit all of the bios for all of the ioends we have saved up, covering the * initial writepage page and also any probed pages. * * Because we may have multiple ioends spanning a page, we need to start * writeback on all the buffers before we submit them for I/O. If we mark the * buffers as we got, then we can end up with a page that only has buffers * marked async write and I/O complete on can occur before we mark the other * buffers async write. * * The end result of this is that we trip a bug in end_page_writeback() because * we call it twice for the one page as the code in end_buffer_async_write() * assumes that all buffers on the page are started at the same time. * * The fix is two passes across the ioend list - one to start writeback on the * buffer_heads, and then submit them for I/O on the second pass. */STATIC voidxfs_submit_ioend(	xfs_ioend_t		*ioend){	xfs_ioend_t		*head = ioend;	xfs_ioend_t		*next;	struct buffer_head	*bh;	struct bio		*bio;	sector_t		lastblock = 0;	/* Pass 1 - start writeback */	do {		next = ioend->io_list;		for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {			xfs_start_buffer_writeback(bh);		}	} while ((ioend = next) != NULL);	/* Pass 2 - submit I/O */	ioend = head;	do {		next = ioend->io_list;		bio = NULL;		for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {			if (!bio) { retry:				bio = xfs_alloc_ioend_bio(bh);			} else if (bh->b_blocknr != lastblock + 1) {				xfs_submit_ioend_bio(ioend, bio);				goto retry;			}			if (bio_add_buffer(bio, bh) != bh->b_size) {				xfs_submit_ioend_bio(ioend, bio);				goto retry;			}			lastblock = bh->b_blocknr;		}		if (bio)			xfs_submit_ioend_bio(ioend, bio);		xfs_finish_ioend(ioend, 0);	} while ((ioend = next) != NULL);}/* * Cancel submission of all buffer_heads so far in this endio. * Toss the endio too.  Only ever called for the initial page * in a writepage request, so only ever one page. */STATIC voidxfs_cancel_ioend(	xfs_ioend_t		*ioend){	xfs_ioend_t		*next;	struct buffer_head	*bh, *next_bh;	do {		next = ioend->io_list;		bh = ioend->io_buffer_head;		do {			next_bh = bh->b_private;			clear_buffer_async_write(bh);			unlock_buffer(bh);		} while ((bh = next_bh) != NULL);		vn_iowake(XFS_I(ioend->io_inode));		mempool_free(ioend, xfs_ioend_pool);	} while ((ioend = next) != NULL);}/* * Test to see if we've been building up a completion structure for * earlier buffers -- if so, we try to append to this ioend if we * can, otherwise we finish off any current ioend and start another. * Return true if we've finished the given ioend. */STATIC voidxfs_add_to_ioend(	struct inode		*inode,	struct buffer_head	*bh,	xfs_off_t		offset,	unsigned int		type,	xfs_ioend_t		**result,	int			need_ioend){	xfs_ioend_t		*ioend = *result;	if (!ioend || need_ioend || type != ioend->io_type) {		xfs_ioend_t	*previous = *result;		ioend = xfs_alloc_ioend(inode, type);		ioend->io_offset = offset;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -