⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xfs_aops.c

📁 linux2.6.16版本
💻 C
📖 第 1 页 / 共 3 页
字号:
/* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA */#include "xfs.h"#include "xfs_bit.h"#include "xfs_log.h"#include "xfs_inum.h"#include "xfs_sb.h"#include "xfs_ag.h"#include "xfs_dir.h"#include "xfs_dir2.h"#include "xfs_trans.h"#include "xfs_dmapi.h"#include "xfs_mount.h"#include "xfs_bmap_btree.h"#include "xfs_alloc_btree.h"#include "xfs_ialloc_btree.h"#include "xfs_dir_sf.h"#include "xfs_dir2_sf.h"#include "xfs_attr_sf.h"#include "xfs_dinode.h"#include "xfs_inode.h"#include "xfs_alloc.h"#include "xfs_btree.h"#include "xfs_error.h"#include "xfs_rw.h"#include "xfs_iomap.h"#include <linux/mpage.h>#include <linux/pagevec.h>#include <linux/writeback.h>STATIC void xfs_count_page_state(struct page *, int *, int *, int *);#if defined(XFS_RW_TRACE)voidxfs_page_trace(	int		tag,	struct inode	*inode,	struct page	*page,	int		mask){	xfs_inode_t	*ip;	vnode_t		*vp = LINVFS_GET_VP(inode);	loff_t		isize = i_size_read(inode);	loff_t		offset = page_offset(page);	int		delalloc = -1, unmapped = -1, unwritten = -1;	if (page_has_buffers(page))		xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);	ip = xfs_vtoi(vp);	if (!ip->i_rwtrace)		return;	ktrace_enter(ip->i_rwtrace,		(void *)((unsigned long)tag),		(void *)ip,		(void *)inode,		(void *)page,		(void *)((unsigned long)mask),		(void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),		(void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),		(void *)((unsigned long)((isize >> 32) & 0xffffffff)),		(void *)((unsigned long)(isize & 0xffffffff)),		(void *)((unsigned long)((offset >> 32) & 0xffffffff)),		(void *)((unsigned long)(offset & 0xffffffff)),		(void *)((unsigned long)delalloc),		(void *)((unsigned long)unmapped),		(void *)((unsigned long)unwritten),		(void *)NULL,		(void *)NULL);}#else#define xfs_page_trace(tag, inode, page, mask)#endif/* * Schedule IO completion handling on a xfsdatad if this was * the final hold on this ioend. */STATIC voidxfs_finish_ioend(	xfs_ioend_t		*ioend){	if (atomic_dec_and_test(&ioend->io_remaining))		queue_work(xfsdatad_workqueue, &ioend->io_work);}/* * We're now finished for good with this ioend structure. * Update the page state via the associated buffer_heads, * release holds on the inode and bio, and finally free * up memory.  Do not use the ioend after this. */STATIC voidxfs_destroy_ioend(	xfs_ioend_t		*ioend){	struct buffer_head	*bh, *next;	for (bh = ioend->io_buffer_head; bh; bh = next) {		next = bh->b_private;		bh->b_end_io(bh, ioend->io_uptodate);	}	vn_iowake(ioend->io_vnode);	mempool_free(ioend, xfs_ioend_pool);}/* * Buffered IO write completion for delayed allocate extents. * TODO: Update ondisk isize now that we know the file data * has been flushed (i.e. the notorious "NULL file" problem). */STATIC voidxfs_end_bio_delalloc(	void			*data){	xfs_ioend_t		*ioend = data;	xfs_destroy_ioend(ioend);}/* * Buffered IO write completion for regular, written extents. */STATIC voidxfs_end_bio_written(	void			*data){	xfs_ioend_t		*ioend = data;	xfs_destroy_ioend(ioend);}/* * IO write completion for unwritten extents. * * Issue transactions to convert a buffer range from unwritten * to written extents. */STATIC voidxfs_end_bio_unwritten(	void			*data){	xfs_ioend_t		*ioend = data;	vnode_t			*vp = ioend->io_vnode;	xfs_off_t		offset = ioend->io_offset;	size_t			size = ioend->io_size;	int			error;	if (ioend->io_uptodate)		VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);	xfs_destroy_ioend(ioend);}/* * Allocate and initialise an IO completion structure. * We need to track unwritten extent write completion here initially. * We'll need to extend this for updating the ondisk inode size later * (vs. incore size). */STATIC xfs_ioend_t *xfs_alloc_ioend(	struct inode		*inode,	unsigned int		type){	xfs_ioend_t		*ioend;	ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);	/*	 * Set the count to 1 initially, which will prevent an I/O	 * completion callback from happening before we have started	 * all the I/O from calling the completion routine too early.	 */	atomic_set(&ioend->io_remaining, 1);	ioend->io_uptodate = 1; /* cleared if any I/O fails */	ioend->io_list = NULL;	ioend->io_type = type;	ioend->io_vnode = LINVFS_GET_VP(inode);	ioend->io_buffer_head = NULL;	ioend->io_buffer_tail = NULL;	atomic_inc(&ioend->io_vnode->v_iocount);	ioend->io_offset = 0;	ioend->io_size = 0;	if (type == IOMAP_UNWRITTEN)		INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);	else if (type == IOMAP_DELAY)		INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend);	else		INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend);	return ioend;}STATIC intxfs_map_blocks(	struct inode		*inode,	loff_t			offset,	ssize_t			count,	xfs_iomap_t		*mapp,	int			flags){	vnode_t			*vp = LINVFS_GET_VP(inode);	int			error, nmaps = 1;	VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error);	if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))		VMODIFY(vp);	return -error;}STATIC inline intxfs_iomap_valid(	xfs_iomap_t		*iomapp,	loff_t			offset){	return offset >= iomapp->iomap_offset &&		offset < iomapp->iomap_offset + iomapp->iomap_bsize;}/* * BIO completion handler for buffered IO. */STATIC intxfs_end_bio(	struct bio		*bio,	unsigned int		bytes_done,	int			error){	xfs_ioend_t		*ioend = bio->bi_private;	if (bio->bi_size)		return 1;	ASSERT(ioend);	ASSERT(atomic_read(&bio->bi_cnt) >= 1);	/* Toss bio and pass work off to an xfsdatad thread */	if (!test_bit(BIO_UPTODATE, &bio->bi_flags))		ioend->io_uptodate = 0;	bio->bi_private = NULL;	bio->bi_end_io = NULL;	bio_put(bio);	xfs_finish_ioend(ioend);	return 0;}STATIC voidxfs_submit_ioend_bio(	xfs_ioend_t	*ioend,	struct bio	*bio){	atomic_inc(&ioend->io_remaining);	bio->bi_private = ioend;	bio->bi_end_io = xfs_end_bio;	submit_bio(WRITE, bio);	ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));	bio_put(bio);}STATIC struct bio *xfs_alloc_ioend_bio(	struct buffer_head	*bh){	struct bio		*bio;	int			nvecs = bio_get_nr_vecs(bh->b_bdev);	do {		bio = bio_alloc(GFP_NOIO, nvecs);		nvecs >>= 1;	} while (!bio);	ASSERT(bio->bi_private == NULL);	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);	bio->bi_bdev = bh->b_bdev;	bio_get(bio);	return bio;}STATIC voidxfs_start_buffer_writeback(	struct buffer_head	*bh){	ASSERT(buffer_mapped(bh));	ASSERT(buffer_locked(bh));	ASSERT(!buffer_delay(bh));	ASSERT(!buffer_unwritten(bh));	mark_buffer_async_write(bh);	set_buffer_uptodate(bh);	clear_buffer_dirty(bh);}STATIC voidxfs_start_page_writeback(	struct page		*page,	struct writeback_control *wbc,	int			clear_dirty,	int			buffers){	ASSERT(PageLocked(page));	ASSERT(!PageWriteback(page));	set_page_writeback(page);	if (clear_dirty)		clear_page_dirty(page);	unlock_page(page);	if (!buffers) {		end_page_writeback(page);		wbc->pages_skipped++;	/* We didn't write this page */	}}static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh){	return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));}/* * Submit all of the bios for all of the ioends we have saved up, covering the * initial writepage page and also any probed pages. * * Because we may have multiple ioends spanning a page, we need to start * writeback on all the buffers before we submit them for I/O. If we mark the * buffers as we got, then we can end up with a page that only has buffers * marked async write and I/O complete on can occur before we mark the other * buffers async write. * * The end result of this is that we trip a bug in end_page_writeback() because * we call it twice for the one page as the code in end_buffer_async_write() * assumes that all buffers on the page are started at the same time. * * The fix is two passes across the ioend list - one to start writeback on the * bufferheads, and then the second one submit them for I/O. */STATIC voidxfs_submit_ioend(	xfs_ioend_t		*ioend){	xfs_ioend_t		*head = ioend;	xfs_ioend_t		*next;	struct buffer_head	*bh;	struct bio		*bio;	sector_t		lastblock = 0;	/* Pass 1 - start writeback */	do {		next = ioend->io_list;		for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {			xfs_start_buffer_writeback(bh);		}	} while ((ioend = next) != NULL);	/* Pass 2 - submit I/O */	ioend = head;	do {		next = ioend->io_list;		bio = NULL;		for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {			if (!bio) { retry:				bio = xfs_alloc_ioend_bio(bh);			} else if (bh->b_blocknr != lastblock + 1) {				xfs_submit_ioend_bio(ioend, bio);				goto retry;			}			if (bio_add_buffer(bio, bh) != bh->b_size) {				xfs_submit_ioend_bio(ioend, bio);				goto retry;			}			lastblock = bh->b_blocknr;		}		if (bio)			xfs_submit_ioend_bio(ioend, bio);		xfs_finish_ioend(ioend);	} while ((ioend = next) != NULL);}/* * Cancel submission of all buffer_heads so far in this endio. * Toss the endio too.  Only ever called for the initial page * in a writepage request, so only ever one page. */STATIC voidxfs_cancel_ioend(	xfs_ioend_t		*ioend){	xfs_ioend_t		*next;	struct buffer_head	*bh, *next_bh;	do {		next = ioend->io_list;		bh = ioend->io_buffer_head;		do {			next_bh = bh->b_private;			clear_buffer_async_write(bh);			unlock_buffer(bh);		} while ((bh = next_bh) != NULL);		vn_iowake(ioend->io_vnode);		mempool_free(ioend, xfs_ioend_pool);	} while ((ioend = next) != NULL);}/* * Test to see if we've been building up a completion structure for * earlier buffers -- if so, we try to append to this ioend if we * can, otherwise we finish off any current ioend and start another. * Return true if we've finished the given ioend. */STATIC voidxfs_add_to_ioend(	struct inode		*inode,	struct buffer_head	*bh,	xfs_off_t		offset,	unsigned int		type,	xfs_ioend_t		**result,	int			need_ioend){	xfs_ioend_t		*ioend = *result;	if (!ioend || need_ioend || type != ioend->io_type) {		xfs_ioend_t	*previous = *result;		ioend = xfs_alloc_ioend(inode, type);		ioend->io_offset = offset;		ioend->io_buffer_head = bh;		ioend->io_buffer_tail = bh;		if (previous)			previous->io_list = ioend;		*result = ioend;	} else {		ioend->io_buffer_tail->b_private = bh;		ioend->io_buffer_tail = bh;	}	bh->b_private = NULL;	ioend->io_size += bh->b_size;}STATIC voidxfs_map_at_offset(	struct buffer_head	*bh,	loff_t			offset,	int			block_bits,	xfs_iomap_t		*iomapp){	xfs_daddr_t		bn;	int			sector_shift;	ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));	ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));	ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL);	sector_shift = block_bits - BBSHIFT;	bn = (iomapp->iomap_bn >> sector_shift) +	      ((offset - iomapp->iomap_offset) >> block_bits);	ASSERT(bn || (iomapp->iomap_flags & IOMAP_REALTIME));	ASSERT((bn << sector_shift) >= iomapp->iomap_bn);	lock_buffer(bh);	bh->b_blocknr = bn;	bh->b_bdev = iomapp->iomap_target->bt_bdev;	set_buffer_mapped(bh);	clear_buffer_delay(bh);	clear_buffer_unwritten(bh);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -