📄 balloc.c
字号:
*/ if (!test_opt(sb, RESERVATION)) rsv->rsv_goal_size = 0; else rsv->rsv_goal_size = EXT4_DEFAULT_RESERVE_BLOCKS; rsv->rsv_alloc_hit = 0; block_i->last_alloc_logical_block = 0; block_i->last_alloc_physical_block = 0; } ei->i_block_alloc_info = block_i;}/** * ext4_discard_reservation() * @inode: inode * * Discard(free) block reservation window on last file close, or truncate * or at last iput(). * * It is being called in three cases: * ext4_release_file(): last writer close the file * ext4_clear_inode(): last iput(), when nobody link to this file. * ext4_truncate(): when the block indirect map is about to change. * */void ext4_discard_reservation(struct inode *inode){ struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info; struct ext4_reserve_window_node *rsv; spinlock_t *rsv_lock = &EXT4_SB(inode->i_sb)->s_rsv_window_lock; if (!block_i) return; rsv = &block_i->rsv_window_node; if (!rsv_is_empty(&rsv->rsv_window)) { spin_lock(rsv_lock); if (!rsv_is_empty(&rsv->rsv_window)) rsv_window_remove(inode->i_sb, rsv); spin_unlock(rsv_lock); }}/** * ext4_free_blocks_sb() -- Free given blocks and update quota * @handle: handle to this transaction * @sb: super block * @block: start physcial block to free * @count: number of blocks to free * @pdquot_freed_blocks: pointer to quota */void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb, ext4_fsblk_t block, unsigned long count, unsigned long *pdquot_freed_blocks){ struct buffer_head *bitmap_bh = NULL; struct buffer_head *gd_bh; unsigned long block_group; ext4_grpblk_t bit; unsigned long i; unsigned long overflow; struct ext4_group_desc * desc; struct ext4_super_block * es; struct ext4_sb_info *sbi; int err = 0, ret; ext4_grpblk_t group_freed; *pdquot_freed_blocks = 0; sbi = EXT4_SB(sb); es = sbi->s_es; if (block < le32_to_cpu(es->s_first_data_block) || block + count < block || block + count > ext4_blocks_count(es)) { ext4_error (sb, "ext4_free_blocks", "Freeing blocks not in datazone - " "block = %llu, count = %lu", block, count); goto error_return; } ext4_debug ("freeing block(s) %llu-%llu\n", block, block + count - 1);do_more: overflow = 0; ext4_get_group_no_and_offset(sb, block, &block_group, &bit); /* * Check to see if we are freeing blocks across a group * boundary. */ if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) { overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb); count -= overflow; } brelse(bitmap_bh); bitmap_bh = read_block_bitmap(sb, block_group); if (!bitmap_bh) goto error_return; desc = ext4_get_group_desc (sb, block_group, &gd_bh); if (!desc) goto error_return; if (in_range(ext4_block_bitmap(sb, desc), block, count) || in_range(ext4_inode_bitmap(sb, desc), block, count) || in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || in_range(block + count - 1, ext4_inode_table(sb, desc), sbi->s_itb_per_group)) ext4_error (sb, "ext4_free_blocks", "Freeing blocks in system zones - " "Block = %llu, count = %lu", block, count); /* * We are about to start releasing blocks in the bitmap, * so we need undo access. */ /* @@@ check errors */ BUFFER_TRACE(bitmap_bh, "getting undo access"); err = ext4_journal_get_undo_access(handle, bitmap_bh); if (err) goto error_return; /* * We are about to modify some metadata. Call the journal APIs * to unshare ->b_data if a currently-committing transaction is * using it */ BUFFER_TRACE(gd_bh, "get_write_access"); err = ext4_journal_get_write_access(handle, gd_bh); if (err) goto error_return; jbd_lock_bh_state(bitmap_bh); for (i = 0, group_freed = 0; i < count; i++) { /* * An HJ special. This is expensive... */#ifdef CONFIG_JBD2_DEBUG jbd_unlock_bh_state(bitmap_bh); { struct buffer_head *debug_bh; debug_bh = sb_find_get_block(sb, block + i); if (debug_bh) { BUFFER_TRACE(debug_bh, "Deleted!"); if (!bh2jh(bitmap_bh)->b_committed_data) BUFFER_TRACE(debug_bh, "No commited data in bitmap"); BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap"); __brelse(debug_bh); } } jbd_lock_bh_state(bitmap_bh);#endif if (need_resched()) { jbd_unlock_bh_state(bitmap_bh); cond_resched(); jbd_lock_bh_state(bitmap_bh); } /* @@@ This prevents newly-allocated data from being * freed and then reallocated within the same * transaction. * * Ideally we would want to allow that to happen, but to * do so requires making jbd2_journal_forget() capable of * revoking the queued write of a data block, which * implies blocking on the journal lock. *forget() * cannot block due to truncate races. * * Eventually we can fix this by making jbd2_journal_forget() * return a status indicating whether or not it was able * to revoke the buffer. On successful revoke, it is * safe not to set the allocation bit in the committed * bitmap, because we know that there is no outstanding * activity on the buffer any more and so it is safe to * reallocate it. */ BUFFER_TRACE(bitmap_bh, "set in b_committed_data"); J_ASSERT_BH(bitmap_bh, bh2jh(bitmap_bh)->b_committed_data != NULL); ext4_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i, bh2jh(bitmap_bh)->b_committed_data); /* * We clear the bit in the bitmap after setting the committed * data bit, because this is the reverse order to that which * the allocator uses. */ BUFFER_TRACE(bitmap_bh, "clear bit"); if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i, bitmap_bh->b_data)) { jbd_unlock_bh_state(bitmap_bh); ext4_error(sb, __FUNCTION__, "bit already cleared for block %llu", (ext4_fsblk_t)(block + i)); jbd_lock_bh_state(bitmap_bh); BUFFER_TRACE(bitmap_bh, "bit already cleared"); } else { group_freed++; } } jbd_unlock_bh_state(bitmap_bh); spin_lock(sb_bgl_lock(sbi, block_group)); desc->bg_free_blocks_count = cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) + group_freed); desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc); spin_unlock(sb_bgl_lock(sbi, block_group)); percpu_counter_add(&sbi->s_freeblocks_counter, count); /* We dirtied the bitmap block */ BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); err = ext4_journal_dirty_metadata(handle, bitmap_bh); /* And the group descriptor block */ BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); ret = ext4_journal_dirty_metadata(handle, gd_bh); if (!err) err = ret; *pdquot_freed_blocks += group_freed; if (overflow && !err) { block += count; count = overflow; goto do_more; } sb->s_dirt = 1;error_return: brelse(bitmap_bh); ext4_std_error(sb, err); return;}/** * ext4_free_blocks() -- Free given blocks and update quota * @handle: handle for this transaction * @inode: inode * @block: start physical block to free * @count: number of blocks to count */void ext4_free_blocks(handle_t *handle, struct inode *inode, ext4_fsblk_t block, unsigned long count){ struct super_block * sb; unsigned long dquot_freed_blocks; sb = inode->i_sb; if (!sb) { printk ("ext4_free_blocks: nonexistent device"); return; } ext4_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks); if (dquot_freed_blocks) DQUOT_FREE_BLOCK(inode, dquot_freed_blocks); return;}/** * ext4_test_allocatable() * @nr: given allocation block group * @bh: bufferhead contains the bitmap of the given block group * * For ext4 allocations, we must not reuse any blocks which are * allocated in the bitmap buffer's "last committed data" copy. This * prevents deletes from freeing up the page for reuse until we have * committed the delete transaction. * * If we didn't do this, then deleting something and reallocating it as * data would allow the old block to be overwritten before the * transaction committed (because we force data to disk before commit). * This would lead to corruption if we crashed between overwriting the * data and committing the delete. * * @@@ We may want to make this allocation behaviour conditional on * data-writes at some point, and disable it for metadata allocations or * sync-data inodes. */static int ext4_test_allocatable(ext4_grpblk_t nr, struct buffer_head *bh){ int ret; struct journal_head *jh = bh2jh(bh); if (ext4_test_bit(nr, bh->b_data)) return 0; jbd_lock_bh_state(bh); if (!jh->b_committed_data) ret = 1; else ret = !ext4_test_bit(nr, jh->b_committed_data); jbd_unlock_bh_state(bh); return ret;}/** * bitmap_search_next_usable_block() * @start: the starting block (group relative) of the search * @bh: bufferhead contains the block group bitmap * @maxblocks: the ending block (group relative) of the reservation * * The bitmap search --- search forward alternately through the actual * bitmap on disk and the last-committed copy in journal, until we find a * bit free in both bitmaps. */static ext4_grpblk_tbitmap_search_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh, ext4_grpblk_t maxblocks){ ext4_grpblk_t next; struct journal_head *jh = bh2jh(bh); while (start < maxblocks) { next = ext4_find_next_zero_bit(bh->b_data, maxblocks, start); if (next >= maxblocks) return -1; if (ext4_test_allocatable(next, bh)) return next; jbd_lock_bh_state(bh); if (jh->b_committed_data) start = ext4_find_next_zero_bit(jh->b_committed_data, maxblocks, next); jbd_unlock_bh_state(bh); } return -1;}/** * find_next_usable_block() * @start: the starting block (group relative) to find next * allocatable block in bitmap. * @bh: bufferhead contains the block group bitmap * @maxblocks: the ending block (group relative) for the search * * Find an allocatable block in a bitmap. We honor both the bitmap and * its last-committed copy (if that exists), and perform the "most * appropriate allocation" algorithm of looking for a free block near * the initial goal; then for a free byte somewhere in the bitmap; then * for any free bit in the bitmap. */static ext4_grpblk_tfind_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh, ext4_grpblk_t maxblocks){ ext4_grpblk_t here, next; char *p, *r; if (start > 0) { /* * The goal was occupied; search forward for a free * block within the next XX blocks. * * end_goal is more or less random, but it has to be * less than EXT4_BLOCKS_PER_GROUP. Aligning up to the * next 64-bit boundary is simple.. */ ext4_grpblk_t end_goal = (start + 63) & ~63; if (end_goal > maxblocks) end_goal = maxblocks; here = ext4_find_next_zero_bit(bh->b_data, end_goal, start); if (here < end_goal && ext4_test_allocatable(here, bh)) return here; ext4_debug("Bit not found near goal\n"); } here = start; if (here < 0) here = 0; p = ((char *)bh->b_data) + (here >> 3); r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3)); next = (r - ((char *)bh->b_data)) << 3; if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh)) return next; /* * The bitmap search --- search forward alternately through the actual * bitmap and the last-committed copy until we find a bit free in * both */ here = bitmap_search_next_usable_block(here, bh, maxblocks); return here;}/** * claim_block() * @block: the free block (group relative) to allocate * @bh: the bufferhead containts the block group bitmap * * We think we can allocate this block in this bitmap. Try to set the bit. * If that succeeds then check that nobody has allocated and then freed the * block since we saw that is was not marked in b_committed_data. If it _was_ * allocated and freed then clear the bit in the bitmap again and return * zero (failure). */static inline intclaim_block(spinlock_t *lock, ext4_grpblk_t block, struct buffer_head *bh){ struct journal_head *jh = bh2jh(bh); int ret; if (ext4_set_bit_atomic(lock, block, bh->b_data)) return 0; jbd_lock_bh_state(bh); if (jh->b_committed_data && ext4_test_bit(block,jh->b_committed_data)) { ext4_clear_bit_atomic(lock, block, bh->b_data); ret = 0; } else { ret = 1; } jbd_unlock_bh_state(bh); return ret;}/** * ext4_try_to_allocate() * @sb: superblock * @handle: handle to this transaction * @group: given allocation block group * @bitmap_bh: bufferhead holds the block bitmap * @grp_goal: given target block within the group * @count: target number of blocks to allocate * @my_rsv: reservation window * * Attempt to allocate blocks within a give range. Set the range of allocation * first, then find the first free bit(s) from the bitmap (within the range), * and at last, allocate the blocks by claiming the found free bit as allocated. * * To set the range of this allocation: * if there is a reservation window, only try to allocate block(s) from the * file's own reservation window; * Otherwise, the allocation range starts from the give goal block, ends at * the block group's last block. * * If we failed to allocate the desired block then we may end up crossing to a * new bitmap. In that case we must release write access to the old one via * ext4_journal_release_buffer(), else we'll run out of credits. */static ext4_grpblk_text4_try_to_allocate(struct super_block *sb, handle_t *handle, int group, struct buffer_head *bitmap_bh, ext4_grpblk_t grp_goal, unsigned long *count, struct ext4_reserve_window *my_rsv){ ext4_fsblk_t group_first_block; ext4_grpblk_t start, end; unsigned long num = 0; /* we do allocation within the reservation window if we have a window */ if (my_rsv) { group_first_block = ext4_group_first_block_no(sb, group); if (my_rsv->_rsv_start >= group_first_block) start = my_rsv->_rsv_start - group_first_block; else /* reservation window cross group boundary */ start = 0; end = my_rsv->_rsv_end - group_first_block + 1; if (end > EXT4_BLOCKS_PER_GROUP(sb)) /* reservation window crosses group boundary */ end = EXT4_BLOCKS_PER_GROUP(sb); if ((start <= grp_goal) && (grp_goal < end)) start = grp_goal; else grp_goal = -1; } else { if (grp_goal > 0) start = grp_goal; else start = 0; end = EXT4_BLOCKS_PER_GROUP(sb); } BUG_ON(start > EXT4_BLOCKS_PER_GROUP(sb));repeat: if (grp_goal < 0 || !ext4_test_allocatable(grp_goal, bitmap_bh)) { grp_goal = find_next_usable_block(start, bitmap_bh, end); if (grp_goal < 0) goto fail_access; if (!my_rsv) { int i; for (i = 0; i < 7 && grp_goal > start && ext4_test_allocatable(grp_goal - 1, bitmap_bh); i++, grp_goal--)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -