⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xfs_aops.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
				if (startio) {					xfs_add_to_ioend(inode, bh, offset,							type, &ioend,							new_ioend);				} else {					set_buffer_dirty(bh);					unlock_buffer(bh);					mark_buffer_dirty(bh);				}				page_dirty--;				count++;			}		} else if (buffer_uptodate(bh) && startio) {			/*			 * we got here because the buffer is already mapped.			 * That means it must already have extents allocated			 * underneath it. Map the extent by reading it.			 */			if (!iomap_valid || flags != BMAPI_READ) {				flags = BMAPI_READ;				size = xfs_probe_cluster(inode, page, bh,								head, 1);				err = xfs_map_blocks(inode, offset, size,						&iomap, flags);				if (err)					goto error;				iomap_valid = xfs_iomap_valid(&iomap, offset);			}			/*			 * We set the type to IOMAP_NEW in case we are doing a			 * small write at EOF that is extending the file but			 * without needing an allocation. We need to update the			 * file size on I/O completion in this case so it is			 * the same case as having just allocated a new extent			 * that we are writing into for the first time.			 */			type = IOMAP_NEW;			if (!test_and_set_bit(BH_Lock, &bh->b_state)) {				ASSERT(buffer_mapped(bh));				if (iomap_valid)					all_bh = 1;				xfs_add_to_ioend(inode, bh, offset, type,						&ioend, !iomap_valid);				page_dirty--;				count++;			} else {				iomap_valid = 0;			}		} else if ((buffer_uptodate(bh) || PageUptodate(page)) &&			   (unmapped || startio)) {			iomap_valid = 0;		}		if (!iohead)			iohead = ioend;	} while (offset += len, ((bh = bh->b_this_page) != head));	if (uptodate && bh == head)		SetPageUptodate(page);	if (startio)		xfs_start_page_writeback(page, wbc, 1, count);	if (ioend && iomap_valid) {		offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>					PAGE_CACHE_SHIFT;		tlast = min_t(pgoff_t, offset, last_index);		xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,					wbc, startio, all_bh, tlast);	}	if (iohead)		xfs_submit_ioend(iohead);	return page_dirty;error:	if (iohead)		xfs_cancel_ioend(iohead);	/*	 * If it's delalloc and we have nowhere to put it,	 * throw it away, unless the lower layers told	 * us to try again.	 */	if (err != -EAGAIN) {		if (!unmapped)			block_invalidatepage(page, 0);		ClearPageUptodate(page);	}	return err;}/* * writepage: Called from one of two places: * * 1. we are flushing a delalloc buffer head. * * 2. we are writing out a dirty page. Typically the page dirty *    state is cleared before we get here. In this case is it *    conceivable we have no buffer heads. * * For delalloc space on the page we need to allocate space and * flush it. For unmapped buffer heads on the page we should * allocate space if the page is uptodate. For any other dirty * buffer heads on the page we should flush them. * * If we detect that a transaction would be required to flush * the page, we have to check the process flags first, if we * are already in a transaction or disk I/O during allocations * is off, we need to fail the writepage and redirty the page. */STATIC intxfs_vm_writepage(	struct page		*page,	struct writeback_control *wbc){	int			error;	int			need_trans;	int			delalloc, unmapped, unwritten;	struct inode		*inode = page->mapping->host;	xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);	/*	 * We need a transaction if:	 *  1. There are delalloc buffers on the page	 *  2. The page is uptodate and we have unmapped buffers	 *  3. The page is uptodate and we have no buffers	 *  4. There are unwritten buffers on the page	 */	if (!page_has_buffers(page)) {		unmapped = 1;		need_trans = 1;	} else {		xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);		if (!PageUptodate(page))			unmapped = 0;		need_trans = delalloc + unmapped + unwritten;	}	/*	 * If we need a transaction and the process flags say	 * we are already in a transaction, or no IO is allowed	 * then mark the page dirty again and leave the page	 * as is.	 */	if (current_test_flags(PF_FSTRANS) && need_trans)		goto out_fail;	/*	 * Delay hooking up buffer heads until we have	 * made our go/no-go decision.	 */	if (!page_has_buffers(page))		create_empty_buffers(page, 1 << inode->i_blkbits, 0);	/*	 * Convert delayed allocate, unwritten or unmapped space	 * to real space and flush out to disk.	 */	error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);	if (error == -EAGAIN)		goto out_fail;	if (unlikely(error < 0))		goto out_unlock;	return 0;out_fail:	redirty_page_for_writepage(wbc, page);	unlock_page(page);	return 0;out_unlock:	unlock_page(page);	return error;}STATIC intxfs_vm_writepages(	struct address_space	*mapping,	struct writeback_control *wbc){	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);	return generic_writepages(mapping, wbc);}/* * Called to move a page into cleanable state - and from there * to be released. Possibly the page is already clean. We always * have buffer heads in this call. * * Returns 0 if the page is ok to release, 1 otherwise. * * Possible scenarios are: * * 1. We are being called to release a page which has been written *    to via regular I/O. buffer heads will be dirty and possibly *    delalloc. If no delalloc buffer heads in this case then we *    can just return zero. * * 2. We are called to release a page which has been written via *    mmap, all we need to do is ensure there is no delalloc *    state in the buffer heads, if not we can let the caller *    free them and we should come back later via writepage. */STATIC intxfs_vm_releasepage(	struct page		*page,	gfp_t			gfp_mask){	struct inode		*inode = page->mapping->host;	int			dirty, delalloc, unmapped, unwritten;	struct writeback_control wbc = {		.sync_mode = WB_SYNC_ALL,		.nr_to_write = 1,	};	xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);	if (!page_has_buffers(page))		return 0;	xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);	if (!delalloc && !unwritten)		goto free_buffers;	if (!(gfp_mask & __GFP_FS))		return 0;	/* If we are already inside a transaction or the thread cannot	 * do I/O, we cannot release this page.	 */	if (current_test_flags(PF_FSTRANS))		return 0;	/*	 * Convert delalloc space to real space, do not flush the	 * data out to disk, that will be done by the caller.	 * Never need to allocate space here - we will always	 * come back to writepage in that case.	 */	dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);	if (dirty == 0 && !unwritten)		goto free_buffers;	return 0;free_buffers:	return try_to_free_buffers(page);}STATIC int__xfs_get_blocks(	struct inode		*inode,	sector_t		iblock,	struct buffer_head	*bh_result,	int			create,	int			direct,	bmapi_flags_t		flags){	xfs_iomap_t		iomap;	xfs_off_t		offset;	ssize_t			size;	int			niomap = 1;	int			error;	offset = (xfs_off_t)iblock << inode->i_blkbits;	ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));	size = bh_result->b_size;	error = xfs_bmap(XFS_I(inode), offset, size,			     create ? flags : BMAPI_READ, &iomap, &niomap);	if (error)		return -error;	if (niomap == 0)		return 0;	if (iomap.iomap_bn != IOMAP_DADDR_NULL) {		/*		 * For unwritten extents do not report a disk address on		 * the read case (treat as if we're reading into a hole).		 */		if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {			xfs_map_buffer(bh_result, &iomap, offset,				       inode->i_blkbits);		}		if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {			if (direct)				bh_result->b_private = inode;			set_buffer_unwritten(bh_result);		}	}	/*	 * If this is a realtime file, data may be on a different device.	 * to that pointed to from the buffer_head b_bdev currently.	 */	bh_result->b_bdev = iomap.iomap_target->bt_bdev;	/*	 * If we previously allocated a block out beyond eof and we are now	 * coming back to use it then we will need to flag it as new even if it	 * has a disk address.	 *	 * With sub-block writes into unwritten extents we also need to mark	 * the buffer as new so that the unwritten parts of the buffer gets	 * correctly zeroed.	 */	if (create &&	    ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||	     (offset >= i_size_read(inode)) ||	     (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))		set_buffer_new(bh_result);	if (iomap.iomap_flags & IOMAP_DELAY) {		BUG_ON(direct);		if (create) {			set_buffer_uptodate(bh_result);			set_buffer_mapped(bh_result);			set_buffer_delay(bh_result);		}	}	if (direct || size > (1 << inode->i_blkbits)) {		ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);		offset = min_t(xfs_off_t,				iomap.iomap_bsize - iomap.iomap_delta, size);		bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);	}	return 0;}intxfs_get_blocks(	struct inode		*inode,	sector_t		iblock,	struct buffer_head	*bh_result,	int			create){	return __xfs_get_blocks(inode, iblock,				bh_result, create, 0, BMAPI_WRITE);}STATIC intxfs_get_blocks_direct(	struct inode		*inode,	sector_t		iblock,	struct buffer_head	*bh_result,	int			create){	return __xfs_get_blocks(inode, iblock,				bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);}STATIC voidxfs_end_io_direct(	struct kiocb	*iocb,	loff_t		offset,	ssize_t		size,	void		*private){	xfs_ioend_t	*ioend = iocb->private;	/*	 * Non-NULL private data means we need to issue a transaction to	 * convert a range from unwritten to written extents.  This needs	 * to happen from process context but aio+dio I/O completion	 * happens from irq context so we need to defer it to a workqueue.	 * This is not necessary for synchronous direct I/O, but we do	 * it anyway to keep the code uniform and simpler.	 *	 * Well, if only it were that simple. Because synchronous direct I/O	 * requires extent conversion to occur *before* we return to userspace,	 * we have to wait for extent conversion to complete. Look at the	 * iocb that has been passed to us to determine if this is AIO or	 * not. If it is synchronous, tell xfs_finish_ioend() to kick the	 * workqueue and wait for it to complete.	 *	 * The core direct I/O code might be changed to always call the	 * completion handler in the future, in which case all this can	 * go away.	 */	ioend->io_offset = offset;	ioend->io_size = size;	if (ioend->io_type == IOMAP_READ) {		xfs_finish_ioend(ioend, 0);	} else if (private && size > 0) {		xfs_finish_ioend(ioend, is_sync_kiocb(iocb));	} else {		/*		 * A direct I/O write ioend starts it's life in unwritten		 * state in case they map an unwritten extent.  This write		 * didn't map an unwritten extent so switch it's completion		 * handler.		 */		INIT_WORK(&ioend->io_work, xfs_end_bio_written);		xfs_finish_ioend(ioend, 0);	}	/*	 * blockdev_direct_IO can return an error even after the I/O	 * completion handler was called.  Thus we need to protect	 * against double-freeing.	 */	iocb->private = NULL;}STATIC ssize_txfs_vm_direct_IO(	int			rw,	struct kiocb		*iocb,	const struct iovec	*iov,	loff_t			offset,	unsigned long		nr_segs){	struct file	*file = iocb->ki_filp;	struct inode	*inode = file->f_mapping->host;	xfs_iomap_t	iomap;	int		maps = 1;	int		error;	ssize_t		ret;	error = xfs_bmap(XFS_I(inode), offset, 0,				BMAPI_DEVICE, &iomap, &maps);	if (error)		return -error;	if (rw == WRITE) {		iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);		ret = blockdev_direct_IO_own_locking(rw, iocb, inode,			iomap.iomap_target->bt_bdev,			iov, offset, nr_segs,			xfs_get_blocks_direct,			xfs_end_io_direct);	} else {		iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);		ret = blockdev_direct_IO_no_locking(rw, iocb, inode,			iomap.iomap_target->bt_bdev,			iov, offset, nr_segs,			xfs_get_blocks_direct,			xfs_end_io_direct);	}	if (unlikely(ret != -EIOCBQUEUED && iocb->private))		xfs_destroy_ioend(iocb->private);	return ret;}STATIC intxfs_vm_write_begin(	struct file		*file,	struct address_space	*mapping,	loff_t			pos,	unsigned		len,	unsigned		flags,	struct page		**pagep,	void			**fsdata){	*pagep = NULL;	return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,								xfs_get_blocks);}STATIC sector_txfs_vm_bmap(	struct address_space	*mapping,	sector_t		block){	struct inode		*inode = (struct inode *)mapping->host;	struct xfs_inode	*ip = XFS_I(inode);	vn_trace_entry(XFS_I(inode), __FUNCTION__,			(inst_t *)__return_address);	xfs_rwlock(ip, VRWLOCK_READ);	xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);	xfs_rwunlock(ip, VRWLOCK_READ);	return generic_block_bmap(mapping, block, xfs_get_blocks);}STATIC intxfs_vm_readpage(	struct file		*unused,	struct page		*page){	return mpage_readpage(page, xfs_get_blocks);}STATIC intxfs_vm_readpages(	struct file		*unused,	struct address_space	*mapping,	struct list_head	*pages,	unsigned		nr_pages){	return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);}STATIC voidxfs_vm_invalidatepage(	struct page		*page,	unsigned long		offset){	xfs_page_trace(XFS_INVALIDPAGE_ENTER,			page->mapping->host, page, offset);	block_invalidatepage(page, offset);}const struct address_space_operations xfs_address_space_operations = {	.readpage		= xfs_vm_readpage,	.readpages		= xfs_vm_readpages,	.writepage		= xfs_vm_writepage,	.writepages		= xfs_vm_writepages,	.sync_page		= block_sync_page,	.releasepage		= xfs_vm_releasepage,	.invalidatepage		= xfs_vm_invalidatepage,	.write_begin		= xfs_vm_write_begin,	.write_end		= generic_write_end,	.bmap			= xfs_vm_bmap,	.direct_IO		= xfs_vm_direct_IO,	.migratepage		= buffer_migrate_page,};

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -