⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 balloc.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
		overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb);		count -= overflow;	}	brelse(bitmap_bh);	bitmap_bh = read_block_bitmap(sb, block_group);	if (!bitmap_bh)		goto error_return;	desc = ext3_get_group_desc (sb, block_group, &gd_bh);	if (!desc)		goto error_return;	if (in_range (le32_to_cpu(desc->bg_block_bitmap), block, count) ||	    in_range (le32_to_cpu(desc->bg_inode_bitmap), block, count) ||	    in_range (block, le32_to_cpu(desc->bg_inode_table),		      sbi->s_itb_per_group) ||	    in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table),		      sbi->s_itb_per_group))		ext3_error (sb, "ext3_free_blocks",			    "Freeing blocks in system zones - "			    "Block = "E3FSBLK", count = %lu",			    block, count);	/*	 * We are about to start releasing blocks in the bitmap,	 * so we need undo access.	 */	/* @@@ check errors */	BUFFER_TRACE(bitmap_bh, "getting undo access");	err = ext3_journal_get_undo_access(handle, bitmap_bh);	if (err)		goto error_return;	/*	 * We are about to modify some metadata.  Call the journal APIs	 * to unshare ->b_data if a currently-committing transaction is	 * using it	 */	BUFFER_TRACE(gd_bh, "get_write_access");	err = ext3_journal_get_write_access(handle, gd_bh);	if (err)		goto error_return;	jbd_lock_bh_state(bitmap_bh);	for (i = 0, group_freed = 0; i < count; i++) {		/*		 * An HJ special.  This is expensive...		 */#ifdef CONFIG_JBD_DEBUG		jbd_unlock_bh_state(bitmap_bh);		{			struct buffer_head *debug_bh;			debug_bh = sb_find_get_block(sb, block + i);			if (debug_bh) {				BUFFER_TRACE(debug_bh, "Deleted!");				if (!bh2jh(bitmap_bh)->b_committed_data)					BUFFER_TRACE(debug_bh,						"No commited data in bitmap");				BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");				__brelse(debug_bh);			}		}		jbd_lock_bh_state(bitmap_bh);#endif		if (need_resched()) {			jbd_unlock_bh_state(bitmap_bh);			cond_resched();			jbd_lock_bh_state(bitmap_bh);		}		/* @@@ This prevents newly-allocated data from being		 * freed and then reallocated within the same		 * transaction.		 *		 * Ideally we would want to allow that to happen, but to		 * do so requires making journal_forget() capable of		 * revoking the queued write of a data block, which		 * implies blocking on the journal lock.  *forget()		 * cannot block due to truncate races.		 *		 * Eventually we can fix this by making journal_forget()		 * return a status indicating whether or not it was able		 * to revoke the buffer.  On successful revoke, it is		 * safe not to set the allocation bit in the committed		 * bitmap, because we know that there is no outstanding		 * activity on the buffer any more and so it is safe to		 * reallocate it.		 */		BUFFER_TRACE(bitmap_bh, "set in b_committed_data");		J_ASSERT_BH(bitmap_bh,				bh2jh(bitmap_bh)->b_committed_data != NULL);		ext3_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,				bh2jh(bitmap_bh)->b_committed_data);		/*		 * We clear the bit in the bitmap after setting the committed		 * data bit, because this is the reverse order to that which		 * the allocator uses.		 */		BUFFER_TRACE(bitmap_bh, "clear bit");		if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),						bit + i, bitmap_bh->b_data)) {			jbd_unlock_bh_state(bitmap_bh);			ext3_error(sb, __FUNCTION__,				"bit already cleared for block "E3FSBLK,				 block + i);			jbd_lock_bh_state(bitmap_bh);			BUFFER_TRACE(bitmap_bh, "bit already cleared");		} else {			group_freed++;		}	}	jbd_unlock_bh_state(bitmap_bh);	spin_lock(sb_bgl_lock(sbi, block_group));	desc->bg_free_blocks_count =		cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) +			group_freed);	spin_unlock(sb_bgl_lock(sbi, block_group));	percpu_counter_add(&sbi->s_freeblocks_counter, count);	/* We dirtied the bitmap block */	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");	err = ext3_journal_dirty_metadata(handle, bitmap_bh);	/* And the group descriptor block */	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");	ret = ext3_journal_dirty_metadata(handle, gd_bh);	if (!err) err = ret;	*pdquot_freed_blocks += group_freed;	if (overflow && !err) {		block += count;		count = overflow;		goto do_more;	}	sb->s_dirt = 1;error_return:	brelse(bitmap_bh);	ext3_std_error(sb, err);	return;}/** * ext3_free_blocks() -- Free given blocks and update quota * @handle:		handle for this transaction * @inode:		inode * @block:		start physical block to free * @count:		number of blocks to count */void ext3_free_blocks(handle_t *handle, struct inode *inode,			ext3_fsblk_t block, unsigned long count){	struct super_block * sb;	unsigned long dquot_freed_blocks;	sb = inode->i_sb;	if (!sb) {		printk ("ext3_free_blocks: nonexistent device");		return;	}	ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);	if (dquot_freed_blocks)		DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);	return;}/** * ext3_test_allocatable() * @nr:			given allocation block group * @bh:			bufferhead contains the bitmap of the given block group * * For ext3 allocations, we must not reuse any blocks which are * allocated in the bitmap buffer's "last committed data" copy.  This * prevents deletes from freeing up the page for reuse until we have * committed the delete transaction. * * If we didn't do this, then deleting something and reallocating it as * data would allow the old block to be overwritten before the * transaction committed (because we force data to disk before commit). * This would lead to corruption if we crashed between overwriting the * data and committing the delete. * * @@@ We may want to make this allocation behaviour conditional on * data-writes at some point, and disable it for metadata allocations or * sync-data inodes. */static int ext3_test_allocatable(ext3_grpblk_t nr, struct buffer_head *bh){	int ret;	struct journal_head *jh = bh2jh(bh);	if (ext3_test_bit(nr, bh->b_data))		return 0;	jbd_lock_bh_state(bh);	if (!jh->b_committed_data)		ret = 1;	else		ret = !ext3_test_bit(nr, jh->b_committed_data);	jbd_unlock_bh_state(bh);	return ret;}/** * bitmap_search_next_usable_block() * @start:		the starting block (group relative) of the search * @bh:			bufferhead contains the block group bitmap * @maxblocks:		the ending block (group relative) of the reservation * * The bitmap search --- search forward alternately through the actual * bitmap on disk and the last-committed copy in journal, until we find a * bit free in both bitmaps. */static ext3_grpblk_tbitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,					ext3_grpblk_t maxblocks){	ext3_grpblk_t next;	struct journal_head *jh = bh2jh(bh);	while (start < maxblocks) {		next = ext3_find_next_zero_bit(bh->b_data, maxblocks, start);		if (next >= maxblocks)			return -1;		if (ext3_test_allocatable(next, bh))			return next;		jbd_lock_bh_state(bh);		if (jh->b_committed_data)			start = ext3_find_next_zero_bit(jh->b_committed_data,							maxblocks, next);		jbd_unlock_bh_state(bh);	}	return -1;}/** * find_next_usable_block() * @start:		the starting block (group relative) to find next *			allocatable block in bitmap. * @bh:			bufferhead contains the block group bitmap * @maxblocks:		the ending block (group relative) for the search * * Find an allocatable block in a bitmap.  We honor both the bitmap and * its last-committed copy (if that exists), and perform the "most * appropriate allocation" algorithm of looking for a free block near * the initial goal; then for a free byte somewhere in the bitmap; then * for any free bit in the bitmap. */static ext3_grpblk_tfind_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,			ext3_grpblk_t maxblocks){	ext3_grpblk_t here, next;	char *p, *r;	if (start > 0) {		/*		 * The goal was occupied; search forward for a free		 * block within the next XX blocks.		 *		 * end_goal is more or less random, but it has to be		 * less than EXT3_BLOCKS_PER_GROUP. Aligning up to the		 * next 64-bit boundary is simple..		 */		ext3_grpblk_t end_goal = (start + 63) & ~63;		if (end_goal > maxblocks)			end_goal = maxblocks;		here = ext3_find_next_zero_bit(bh->b_data, end_goal, start);		if (here < end_goal && ext3_test_allocatable(here, bh))			return here;		ext3_debug("Bit not found near goal\n");	}	here = start;	if (here < 0)		here = 0;	p = ((char *)bh->b_data) + (here >> 3);	r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));	next = (r - ((char *)bh->b_data)) << 3;	if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh))		return next;	/*	 * The bitmap search --- search forward alternately through the actual	 * bitmap and the last-committed copy until we find a bit free in	 * both	 */	here = bitmap_search_next_usable_block(here, bh, maxblocks);	return here;}/** * claim_block() * @block:		the free block (group relative) to allocate * @bh:			the bufferhead containts the block group bitmap * * We think we can allocate this block in this bitmap.  Try to set the bit. * If that succeeds then check that nobody has allocated and then freed the * block since we saw that is was not marked in b_committed_data.  If it _was_ * allocated and freed then clear the bit in the bitmap again and return * zero (failure). */static inline intclaim_block(spinlock_t *lock, ext3_grpblk_t block, struct buffer_head *bh){	struct journal_head *jh = bh2jh(bh);	int ret;	if (ext3_set_bit_atomic(lock, block, bh->b_data))		return 0;	jbd_lock_bh_state(bh);	if (jh->b_committed_data && ext3_test_bit(block,jh->b_committed_data)) {		ext3_clear_bit_atomic(lock, block, bh->b_data);		ret = 0;	} else {		ret = 1;	}	jbd_unlock_bh_state(bh);	return ret;}/** * ext3_try_to_allocate() * @sb:			superblock * @handle:		handle to this transaction * @group:		given allocation block group * @bitmap_bh:		bufferhead holds the block bitmap * @grp_goal:		given target block within the group * @count:		target number of blocks to allocate * @my_rsv:		reservation window * * Attempt to allocate blocks within a give range. Set the range of allocation * first, then find the first free bit(s) from the bitmap (within the range), * and at last, allocate the blocks by claiming the found free bit as allocated. * * To set the range of this allocation: *	if there is a reservation window, only try to allocate block(s) from the *	file's own reservation window; *	Otherwise, the allocation range starts from the give goal block, ends at *	the block group's last block. * * If we failed to allocate the desired block then we may end up crossing to a * new bitmap.  In that case we must release write access to the old one via * ext3_journal_release_buffer(), else we'll run out of credits. */static ext3_grpblk_text3_try_to_allocate(struct super_block *sb, handle_t *handle, int group,			struct buffer_head *bitmap_bh, ext3_grpblk_t grp_goal,			unsigned long *count, struct ext3_reserve_window *my_rsv){	ext3_fsblk_t group_first_block;	ext3_grpblk_t start, end;	unsigned long num = 0;	/* we do allocation within the reservation window if we have a window */	if (my_rsv) {		group_first_block = ext3_group_first_block_no(sb, group);		if (my_rsv->_rsv_start >= group_first_block)			start = my_rsv->_rsv_start - group_first_block;		else			/* reservation window cross group boundary */			start = 0;		end = my_rsv->_rsv_end - group_first_block + 1;		if (end > EXT3_BLOCKS_PER_GROUP(sb))			/* reservation window crosses group boundary */			end = EXT3_BLOCKS_PER_GROUP(sb);		if ((start <= grp_goal) && (grp_goal < end))			start = grp_goal;		else			grp_goal = -1;	} else {		if (grp_goal > 0)			start = grp_goal;		else			start = 0;		end = EXT3_BLOCKS_PER_GROUP(sb);	}	BUG_ON(start > EXT3_BLOCKS_PER_GROUP(sb));repeat:	if (grp_goal < 0 || !ext3_test_allocatable(grp_goal, bitmap_bh)) {		grp_goal = find_next_usable_block(start, bitmap_bh, end);		if (grp_goal < 0)			goto fail_access;		if (!my_rsv) {			int i;			for (i = 0; i < 7 && grp_goal > start &&					ext3_test_allocatable(grp_goal - 1,								bitmap_bh);					i++, grp_goal--)				;		}	}	start = grp_goal;	if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group),		grp_goal, bitmap_bh)) {		/*		 * The block was allocated by another thread, or it was		 * allocated and then freed by another thread		 */		start++;		grp_goal++;		if (start >= end)			goto fail_access;		goto repeat;	}	num++;	grp_goal++;	while (num < *count && grp_goal < end		&& ext3_test_allocatable(grp_goal, bitmap_bh)		&& claim_block(sb_bgl_lock(EXT3_SB(sb), group),				grp_goal, bitmap_bh)) {		num++;		grp_goal++;	}	*count = num;	return grp_goal - num;fail_access:	*count = num;	return -1;}/** *	find_next_reservable_window(): *		find a reservable space within the given range. *		It does not allocate the reservation window for now: *		alloc_new_reservation() will do the work later. * *	@search_head: the head of the searching list; *		This is not necessarily the list head of the whole filesystem * *		We have both head and start_block to assist the search *		for the reservable space. The list starts from head, *		but we will shift to the place where start_block is, *		then start from there, when looking for a reservable space. * *	@size: the target new reservation window size * *	@group_first_block: the first block we consider to start *			the real search from * *	@last_block: *		the maximum block number that our goal reservable space *		could start from. This is normally the last block in this *		group. The search will end when we found the start of next *		possible reservable space is out of this boundary. *		This could handle the cross boundary reservation window *		request. *

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -