xfs_aops.c
来自「linux-2.4.29操作系统的源码」· C语言 代码 · 共 1,300 行 · 第 1/3 页
C
1,300 行
do { if (!buffer_unwritten(bh)) break; tmp = xfs_offset_to_map(start_page, iomapp, p_offset); if (!tmp) break; xfs_map_at_offset(start_page, bh, p_offset, block_bits, iomapp); set_buffer_unwritten_io(bh); bh->b_private = pb; p_offset += bh->b_size; nblocks++; } while ((bh = bh->b_this_page) != head); atomic_add(nblocks, &pb->pb_io_remaining); /* If we reached the end of the page, map forwards in any * following pages which are also covered by this extent. */ if (bh == head) { struct address_space *mapping = inode->i_mapping; pgoff_t tindex, tloff, tlast; unsigned long bs; unsigned int pg_offset, bbits = inode->i_blkbits; struct page *page; tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT; tloff = (iomapp->iomap_offset + iomapp->iomap_bsize) >> PAGE_CACHE_SHIFT; tloff = min(tlast, tloff); for (tindex = start_page->index + 1; tindex < tloff; tindex++) { page = xfs_probe_unwritten_page(mapping, tindex, iomapp, pb, PAGE_CACHE_SIZE, &bs, bbits); if (!page) break; nblocks += bs; atomic_add(bs, &pb->pb_io_remaining); xfs_convert_page(inode, page, iomapp, pb, startio, all_bh); /* stop if converting the next page might add * enough blocks that the corresponding byte * count won't fit in our ulong page buf length */ if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits)) goto enough; } if (tindex == tlast && (pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) { page = xfs_probe_unwritten_page(mapping, tindex, iomapp, pb, pg_offset, &bs, bbits); if (page) { nblocks += bs; atomic_add(bs, &pb->pb_io_remaining); xfs_convert_page(inode, page, iomapp, pb, startio, all_bh); if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits)) goto enough; } } }enough: size = nblocks; /* NB: using 64bit number here */ size <<= block_bits; /* convert fsb's to byte range */ XFS_BUF_DATAIO(pb); XFS_BUF_ASYNC(pb); XFS_BUF_SET_SIZE(pb, size); XFS_BUF_SET_COUNT(pb, size); XFS_BUF_SET_OFFSET(pb, offset); XFS_BUF_SET_FSPRIVATE(pb, LINVFS_GET_VP(inode)); XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_convert); if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) { pagebuf_iodone(pb, 1, 1); } return 0;}STATIC voidxfs_submit_page( struct page *page, struct buffer_head *bh_arr[], int bh_count){ struct buffer_head *bh; int i; if (bh_count) { for (i = 0; i < bh_count; i++) { bh = bh_arr[i]; set_buffer_async_io(bh); if (buffer_unwritten(bh)) set_buffer_unwritten_io(bh); set_buffer_uptodate(bh); clear_buffer_dirty(bh); } for (i = 0; i < bh_count; i++) { refile_buffer(bh_arr[i]); submit_bh(WRITE, bh_arr[i]); } } else { unlock_page(page); }}/* * Allocate & map buffers for page given the extent map. Write it out. * except for the original page of a writepage, this is called on * delalloc/unwritten pages only, for the original page it is possible * that the page has no mapping at all. */STATIC voidxfs_convert_page( struct inode *inode, struct page *page, xfs_iomap_t *iomapp, void *private, int startio, int all_bh){ struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head; xfs_iomap_t *mp = iomapp, *tmp; unsigned long end, offset; pgoff_t end_index; int i = 0, index = 0; int bbits = inode->i_blkbits; end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT; if (page->index < end_index) { end = PAGE_CACHE_SIZE; } else { end = i_size_read(inode) & (PAGE_CACHE_SIZE-1); } bh = head = page_buffers(page); do { offset = i << bbits; if (offset >= end) break; if (!(PageUptodate(page) || buffer_uptodate(bh))) continue; if (buffer_mapped(bh) && all_bh && !buffer_unwritten(bh) && !buffer_delay(bh)) { if (startio) { lock_buffer(bh); bh_arr[index++] = bh; } continue; } tmp = xfs_offset_to_map(page, mp, offset); if (!tmp) continue; ASSERT(!(tmp->iomap_flags & IOMAP_HOLE)); ASSERT(!(tmp->iomap_flags & IOMAP_DELAY)); /* If this is a new unwritten extent buffer (i.e. one * that we haven't passed in private data for, we must * now map this buffer too. */ if (buffer_unwritten(bh) && !bh->b_end_io) { ASSERT(tmp->iomap_flags & IOMAP_UNWRITTEN); xfs_map_unwritten(inode, page, head, bh, offset, bbits, tmp, startio, all_bh); } else if (! (buffer_unwritten(bh) && buffer_locked(bh))) { xfs_map_at_offset(page, bh, offset, bbits, tmp); if (buffer_unwritten(bh)) { set_buffer_unwritten_io(bh); bh->b_private = private; ASSERT(private); } } if (startio) { bh_arr[index++] = bh; } else { unlock_buffer(bh); mark_buffer_dirty(bh); } } while (i++, (bh = bh->b_this_page) != head); if (startio) { xfs_submit_page(page, bh_arr, index); } else { unlock_page(page); }}/* * Convert & write out a cluster of pages in the same extent as defined * by mp and following the start page. */STATIC voidxfs_cluster_write( struct inode *inode, pgoff_t tindex, xfs_iomap_t *iomapp, int startio, int all_bh, pgoff_t tlast){ struct page *page; for (; tindex <= tlast; tindex++) { page = xfs_probe_delalloc_page(inode, tindex); if (!page) break; xfs_convert_page(inode, page, iomapp, NULL, startio, all_bh); }}/* * Calling this without startio set means we are being asked to make a dirty * page ready for freeing it's buffers. When called with startio set then * we are coming from writepage. * * When called with startio set it is important that we write the WHOLE * page if possible. * The bh->b_state's cannot know if any of the blocks or which block for * that matter are dirty due to mmap writes, and therefore bh uptodate is * only vaild if the page itself isn't completely uptodate. Some layers * may clear the page dirty flag prior to calling write page, under the * assumption the entire page will be written out; by not writing out the * whole page the page can be reused before all valid dirty data is * written out. Note: in the case of a page that has been dirty'd by * mapwrite and but partially setup by block_prepare_write the * bh->b_states's will not agree and only ones setup by BPW/BCW will have * valid state, thus the whole page must be written out thing. */STATIC intxfs_page_state_convert( struct inode *inode, struct page *page, int startio, int unmapped) /* also implies page uptodate */{ struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head; xfs_iomap_t *iomp, iomap; loff_t offset; unsigned long p_offset = 0; __uint64_t end_offset; pgoff_t end_index, last_index, tlast; int len, err, i, cnt = 0, uptodate = 1; int flags = startio ? 0 : BMAPI_TRYLOCK; int page_dirty = 1; int delalloc = 0; /* Are we off the end of the file ? */ offset = i_size_read(inode); end_index = offset >> PAGE_CACHE_SHIFT; last_index = (offset - 1) >> PAGE_CACHE_SHIFT; if (page->index >= end_index) { if ((page->index >= end_index + 1) || !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { err = -EIO; goto error; } } offset = (loff_t)page->index << PAGE_CACHE_SHIFT; end_offset = min_t(unsigned long long, offset + PAGE_CACHE_SIZE, i_size_read(inode)); bh = head = page_buffers(page); iomp = NULL; len = bh->b_size; do { if (offset >= end_offset) break; if (!buffer_uptodate(bh)) uptodate = 0; if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) continue; if (iomp) { iomp = xfs_offset_to_map(page, &iomap, p_offset); } /* * First case, map an unwritten extent and prepare for * extent state conversion transaction on completion. */ if (buffer_unwritten(bh)) { if (!startio) continue; if (!iomp) { err = xfs_map_blocks(inode, offset, len, &iomap, BMAPI_READ|BMAPI_IGNSTATE); if (err) { goto error; } iomp = xfs_offset_to_map(page, &iomap, p_offset); } if (iomp) { if (!bh->b_end_io) { err = xfs_map_unwritten(inode, page, head, bh, p_offset, inode->i_blkbits, iomp, startio, unmapped); if (err) { goto error; } } else { set_bit(BH_Lock, &bh->b_state); } BUG_ON(!buffer_locked(bh)); bh_arr[cnt++] = bh; page_dirty = 0; } /* * Second case, allocate space for a delalloc buffer. * We can return EAGAIN here in the release page case. */ } else if (buffer_delay(bh)) { if (!iomp) { delalloc = 1; err = xfs_map_blocks(inode, offset, len, &iomap, BMAPI_ALLOCATE | flags); if (err) { goto error; } iomp = xfs_offset_to_map(page, &iomap, p_offset); } if (iomp) { xfs_map_at_offset(page, bh, p_offset, inode->i_blkbits, iomp); if (startio) { bh_arr[cnt++] = bh; } else { unlock_buffer(bh); mark_buffer_dirty(bh); } page_dirty = 0; } } else if ((buffer_uptodate(bh) || PageUptodate(page)) && (unmapped || startio)) { if (!buffer_mapped(bh)) { int size; /* * Getting here implies an unmapped buffer * was found, and we are in a path where we * need to write the whole page out. */ if (!iomp) { size = xfs_probe_unmapped_cluster( inode, page, bh, head); err = xfs_map_blocks(inode, offset, size, &iomap, BMAPI_WRITE|BMAPI_MMAP); if (err) { goto error; } iomp = xfs_offset_to_map(page, &iomap, p_offset); } if (iomp) { xfs_map_at_offset(page, bh, p_offset, inode->i_blkbits, iomp); if (startio) { bh_arr[cnt++] = bh; } else { unlock_buffer(bh); mark_buffer_dirty(bh); } page_dirty = 0; } } else if (startio) { if (buffer_uptodate(bh) && !test_and_set_bit(BH_Lock, &bh->b_state)) { bh_arr[cnt++] = bh; page_dirty = 0; } } } } while (offset += len, p_offset += len, ((bh = bh->b_this_page) != head)); if (uptodate && bh == head) SetPageUptodate(page); if (startio) xfs_submit_page(page, bh_arr, cnt); if (iomp) { tlast = (iomp->iomap_offset + iomp->iomap_bsize - 1) >> PAGE_CACHE_SHIFT; if (delalloc && (tlast > last_index)) tlast = last_index; xfs_cluster_write(inode, page->index + 1, iomp, startio, unmapped, tlast); } return page_dirty;error: for (i = 0; i < cnt; i++) { unlock_buffer(bh_arr[i]); } /* * If it's delalloc and we have nowhere to put it, * throw it away, unless the lower layers told * us to try again. */ if (err != -EAGAIN) { if (!unmapped) { block_flushpage(page, 0); } ClearPageUptodate(page); } return err;}STATIC intlinvfs_get_block_core( struct inode *inode, long iblock, struct buffer_head *bh_result, int create, int direct, bmapi_flags_t flags){ vnode_t *vp = LINVFS_GET_VP(inode); xfs_iomap_t iomap; int retpbbm = 1; int error;
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?