⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xfs_aops.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 3 页
字号:
out:	unlock_page(page);	return NULL;}STATIC intxfs_map_unwritten(	struct inode		*inode,	struct page		*start_page,	struct buffer_head	*head,	struct buffer_head	*curr,	unsigned long		p_offset,	int			block_bits,	xfs_iomap_t		*iomapp,	struct writeback_control *wbc,	int			startio,	int			all_bh){	struct buffer_head	*bh = curr;	xfs_iomap_t		*tmp;	xfs_ioend_t		*ioend;	loff_t			offset;	unsigned long		nblocks = 0;	offset = start_page->index;	offset <<= PAGE_CACHE_SHIFT;	offset += p_offset;	ioend = xfs_alloc_ioend(inode);	/* First map forwards in the page consecutive buffers	 * covering this unwritten extent	 */	do {		if (!buffer_unwritten(bh))			break;		tmp = xfs_offset_to_map(start_page, iomapp, p_offset);		if (!tmp)			break;		xfs_map_at_offset(start_page, bh, p_offset, block_bits, iomapp);		set_buffer_unwritten_io(bh);		bh->b_private = ioend;		p_offset += bh->b_size;		nblocks++;	} while ((bh = bh->b_this_page) != head);	atomic_add(nblocks, &ioend->io_remaining);	/* If we reached the end of the page, map forwards in any	 * following pages which are also covered by this extent.	 */	if (bh == head) {		struct address_space	*mapping = inode->i_mapping;		pgoff_t			tindex, tloff, tlast;		unsigned long		bs;		unsigned int		pg_offset, bbits = inode->i_blkbits;		struct page		*page;		tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;		tloff = (iomapp->iomap_offset + iomapp->iomap_bsize) >> PAGE_CACHE_SHIFT;		tloff = min(tlast, tloff);		for (tindex = start_page->index + 1; tindex < tloff; tindex++) {			page = xfs_probe_unwritten_page(mapping,						tindex, iomapp, ioend,						PAGE_CACHE_SIZE, &bs, bbits);			if (!page)				break;			nblocks += bs;			atomic_add(bs, &ioend->io_remaining);			xfs_convert_page(inode, page, iomapp, wbc, ioend,							startio, all_bh);			/* stop if converting the next page might add			 * enough blocks that the corresponding byte			 * count won't fit in our ulong page buf length */			if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))				goto enough;		}		if (tindex == tlast &&		    (pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) {			page = xfs_probe_unwritten_page(mapping,							tindex, iomapp, ioend,							pg_offset, &bs, bbits);			if (page) {				nblocks += bs;				atomic_add(bs, &ioend->io_remaining);				xfs_convert_page(inode, page, iomapp, wbc, ioend,							startio, all_bh);				if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))					goto enough;			}		}	}enough:	ioend->io_size = (xfs_off_t)nblocks << block_bits;	ioend->io_offset = offset;	xfs_finish_ioend(ioend);	return 0;}STATIC voidxfs_submit_page(	struct page		*page,	struct writeback_control *wbc,	struct buffer_head	*bh_arr[],	int			bh_count,	int			probed_page,	int			clear_dirty){	struct buffer_head	*bh;	int			i;	BUG_ON(PageWriteback(page));	if (bh_count)		set_page_writeback(page);	if (clear_dirty)		clear_page_dirty(page);	unlock_page(page);	if (bh_count) {		for (i = 0; i < bh_count; i++) {			bh = bh_arr[i];			mark_buffer_async_write(bh);			if (buffer_unwritten(bh))				set_buffer_unwritten_io(bh);			set_buffer_uptodate(bh);			clear_buffer_dirty(bh);		}		for (i = 0; i < bh_count; i++)			submit_bh(WRITE, bh_arr[i]);		if (probed_page && clear_dirty)			wbc->nr_to_write--;	/* Wrote an "extra" page */	}}/* * Allocate & map buffers for page given the extent map. Write it out. * except for the original page of a writepage, this is called on * delalloc/unwritten pages only, for the original page it is possible * that the page has no mapping at all. */STATIC voidxfs_convert_page(	struct inode		*inode,	struct page		*page,	xfs_iomap_t		*iomapp,	struct writeback_control *wbc,	void			*private,	int			startio,	int			all_bh){	struct buffer_head	*bh_arr[MAX_BUF_PER_PAGE], *bh, *head;	xfs_iomap_t		*mp = iomapp, *tmp;	unsigned long		offset, end_offset;	int			index = 0;	int			bbits = inode->i_blkbits;	int			len, page_dirty;	end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1));	/*	 * page_dirty is initially a count of buffers on the page before	 * EOF and is decrememted as we move each into a cleanable state.	 */	len = 1 << inode->i_blkbits;	end_offset = max(end_offset, PAGE_CACHE_SIZE);	end_offset = roundup(end_offset, len);	page_dirty = end_offset / len;	offset = 0;	bh = head = page_buffers(page);	do {		if (offset >= end_offset)			break;		if (!(PageUptodate(page) || buffer_uptodate(bh)))			continue;		if (buffer_mapped(bh) && all_bh &&		    !(buffer_unwritten(bh) || buffer_delay(bh))) {			if (startio) {				lock_buffer(bh);				bh_arr[index++] = bh;				page_dirty--;			}			continue;		}		tmp = xfs_offset_to_map(page, mp, offset);		if (!tmp)			continue;		ASSERT(!(tmp->iomap_flags & IOMAP_HOLE));		ASSERT(!(tmp->iomap_flags & IOMAP_DELAY));		/* If this is a new unwritten extent buffer (i.e. one		 * that we haven't passed in private data for, we must		 * now map this buffer too.		 */		if (buffer_unwritten(bh) && !bh->b_end_io) {			ASSERT(tmp->iomap_flags & IOMAP_UNWRITTEN);			xfs_map_unwritten(inode, page, head, bh, offset,					bbits, tmp, wbc, startio, all_bh);		} else if (! (buffer_unwritten(bh) && buffer_locked(bh))) {			xfs_map_at_offset(page, bh, offset, bbits, tmp);			if (buffer_unwritten(bh)) {				set_buffer_unwritten_io(bh);				bh->b_private = private;				ASSERT(private);			}		}		if (startio) {			bh_arr[index++] = bh;		} else {			set_buffer_dirty(bh);			unlock_buffer(bh);			mark_buffer_dirty(bh);		}		page_dirty--;	} while (offset += len, (bh = bh->b_this_page) != head);	if (startio && index) {		xfs_submit_page(page, wbc, bh_arr, index, 1, !page_dirty);	} else {		unlock_page(page);	}}/* * Convert & write out a cluster of pages in the same extent as defined * by mp and following the start page. */STATIC voidxfs_cluster_write(	struct inode		*inode,	pgoff_t			tindex,	xfs_iomap_t		*iomapp,	struct writeback_control *wbc,	int			startio,	int			all_bh,	pgoff_t			tlast){	struct page		*page;	for (; tindex <= tlast; tindex++) {		page = xfs_probe_delalloc_page(inode, tindex);		if (!page)			break;		xfs_convert_page(inode, page, iomapp, wbc, NULL,				startio, all_bh);	}}/* * Calling this without startio set means we are being asked to make a dirty * page ready for freeing it's buffers.  When called with startio set then * we are coming from writepage. * * When called with startio set it is important that we write the WHOLE * page if possible. * The bh->b_state's cannot know if any of the blocks or which block for * that matter are dirty due to mmap writes, and therefore bh uptodate is * only vaild if the page itself isn't completely uptodate.  Some layers * may clear the page dirty flag prior to calling write page, under the * assumption the entire page will be written out; by not writing out the * whole page the page can be reused before all valid dirty data is * written out.  Note: in the case of a page that has been dirty'd by * mapwrite and but partially setup by block_prepare_write the * bh->b_states's will not agree and only ones setup by BPW/BCW will have * valid state, thus the whole page must be written out thing. */STATIC intxfs_page_state_convert(	struct inode	*inode,	struct page	*page,	struct writeback_control *wbc,	int		startio,	int		unmapped) /* also implies page uptodate */{	struct buffer_head	*bh_arr[MAX_BUF_PER_PAGE], *bh, *head;	xfs_iomap_t		*iomp, iomap;	loff_t			offset;	unsigned long           p_offset = 0;	__uint64_t              end_offset;	pgoff_t                 end_index, last_index, tlast;	int			len, err, i, cnt = 0, uptodate = 1;	int			flags;	int			page_dirty;	/* wait for other IO threads? */	flags = (startio && wbc->sync_mode != WB_SYNC_NONE) ? 0 : BMAPI_TRYLOCK;	/* Is this page beyond the end of the file? */	offset = i_size_read(inode);	end_index = offset >> PAGE_CACHE_SHIFT;	last_index = (offset - 1) >> PAGE_CACHE_SHIFT;	if (page->index >= end_index) {		if ((page->index >= end_index + 1) ||		    !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {			if (startio)				unlock_page(page);			return 0;		}	}	end_offset = min_t(unsigned long long,			(loff_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);	offset = (loff_t)page->index << PAGE_CACHE_SHIFT;	/*	 * page_dirty is initially a count of buffers on the page before	 * EOF and is decrememted as we move each into a cleanable state.	 */	len = 1 << inode->i_blkbits;	p_offset = max(p_offset, PAGE_CACHE_SIZE);	p_offset = roundup(p_offset, len);	page_dirty = p_offset / len;	iomp = NULL;	p_offset = 0;	bh = head = page_buffers(page);	do {		if (offset >= end_offset)			break;		if (!buffer_uptodate(bh))			uptodate = 0;		if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio)			continue;		if (iomp) {			iomp = xfs_offset_to_map(page, &iomap, p_offset);		}		/*		 * First case, map an unwritten extent and prepare for		 * extent state conversion transaction on completion.		 */		if (buffer_unwritten(bh)) {			if (!startio)				continue;			if (!iomp) {				err = xfs_map_blocks(inode, offset, len, &iomap,						BMAPI_WRITE|BMAPI_IGNSTATE);				if (err) {					goto error;				}				iomp = xfs_offset_to_map(page, &iomap,								p_offset);			}			if (iomp) {				if (!bh->b_end_io) {					err = xfs_map_unwritten(inode, page,							head, bh, p_offset,							inode->i_blkbits, iomp,							wbc, startio, unmapped);					if (err) {						goto error;					}				} else {					set_bit(BH_Lock, &bh->b_state);				}				BUG_ON(!buffer_locked(bh));				bh_arr[cnt++] = bh;				page_dirty--;			}		/*		 * Second case, allocate space for a delalloc buffer.		 * We can return EAGAIN here in the release page case.		 */		} else if (buffer_delay(bh)) {			if (!iomp) {				err = xfs_map_blocks(inode, offset, len, &iomap,						BMAPI_ALLOCATE | flags);				if (err) {					goto error;				}				iomp = xfs_offset_to_map(page, &iomap,								p_offset);			}			if (iomp) {				xfs_map_at_offset(page, bh, p_offset,						inode->i_blkbits, iomp);				if (startio) {					bh_arr[cnt++] = bh;				} else {					set_buffer_dirty(bh);					unlock_buffer(bh);					mark_buffer_dirty(bh);				}				page_dirty--;			}		} else if ((buffer_uptodate(bh) || PageUptodate(page)) &&			   (unmapped || startio)) {			if (!buffer_mapped(bh)) {				int	size;				/*				 * Getting here implies an unmapped buffer				 * was found, and we are in a path where we				 * need to write the whole page out.				 */				if (!iomp) {					size = xfs_probe_unmapped_cluster(							inode, page, bh, head);					err = xfs_map_blocks(inode, offset,							size, &iomap,							BMAPI_WRITE|BMAPI_MMAP);					if (err) {						goto error;					}					iomp = xfs_offset_to_map(page, &iomap,								     p_offset);				}				if (iomp) {					xfs_map_at_offset(page,							bh, p_offset,							inode->i_blkbits, iomp);					if (startio) {						bh_arr[cnt++] = bh;					} else {						set_buffer_dirty(bh);						unlock_buffer(bh);						mark_buffer_dirty(bh);					}					page_dirty--;				}			} else if (startio) {				if (buffer_uptodate(bh) &&				    !test_and_set_bit(BH_Lock, &bh->b_state)) {					bh_arr[cnt++] = bh;					page_dirty--;				}			}		}	} while (offset += len, p_offset += len,		((bh = bh->b_this_page) != head));	if (uptodate && bh == head)		SetPageUptodate(page);	if (startio) {		xfs_submit_page(page, wbc, bh_arr, cnt, 0, !page_dirty);	}	if (iomp) {		offset = (iomp->iomap_offset + iomp->iomap_bsize - 1) >>					PAGE_CACHE_SHIFT;		tlast = min_t(pgoff_t, offset, last_index);		xfs_cluster_write(inode, page->index + 1, iomp, wbc,

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -