⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 balloc.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
out:	if (ret >= 0) {		BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for "					"bitmap block");		fatal = ext4_journal_dirty_metadata(handle, bitmap_bh);		if (fatal) {			*errp = fatal;			return -1;		}		return ret;	}	BUFFER_TRACE(bitmap_bh, "journal_release_buffer");	ext4_journal_release_buffer(handle, bitmap_bh);	return ret;}/** * ext4_has_free_blocks() * @sbi:		in-core super block structure. * * Check if filesystem has at least 1 free block available for allocation. */static int ext4_has_free_blocks(struct ext4_sb_info *sbi){	ext4_fsblk_t free_blocks, root_blocks;	free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);	root_blocks = ext4_r_blocks_count(sbi->s_es);	if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&		sbi->s_resuid != current->fsuid &&		(sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {		return 0;	}	return 1;}/** * ext4_should_retry_alloc() * @sb:			super block * @retries		number of attemps has been made * * ext4_should_retry_alloc() is called when ENOSPC is returned, and if * it is profitable to retry the operation, this function will wait * for the current or commiting transaction to complete, and then * return TRUE. * * if the total number of retries exceed three times, return FALSE. */int ext4_should_retry_alloc(struct super_block *sb, int *retries){	if (!ext4_has_free_blocks(EXT4_SB(sb)) || (*retries)++ > 3)		return 0;	jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);	return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);}/** * ext4_new_blocks() -- core block(s) allocation function * @handle:		handle to this transaction * @inode:		file inode * @goal:		given target block(filesystem wide) * @count:		target number of blocks to allocate * @errp:		error code * * ext4_new_blocks uses a goal block to assist allocation.  It tries to * allocate block(s) from the block group contains the goal block first. If that * fails, it will try to allocate block(s) from other block groups without * any specific goal block. * */ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode,			ext4_fsblk_t goal, unsigned long *count, int *errp){	struct buffer_head *bitmap_bh = NULL;	struct buffer_head *gdp_bh;	unsigned long group_no;	int goal_group;	ext4_grpblk_t grp_target_blk;	/* blockgroup relative goal block */	ext4_grpblk_t grp_alloc_blk;	/* blockgroup-relative allocated block*/	ext4_fsblk_t ret_block;		/* filesyetem-wide allocated block */	int bgi;			/* blockgroup iteration index */	int fatal = 0, err;	int performed_allocation = 0;	ext4_grpblk_t free_blocks;	/* number of free blocks in a group */	struct super_block *sb;	struct ext4_group_desc *gdp;	struct ext4_super_block *es;	struct ext4_sb_info *sbi;	struct ext4_reserve_window_node *my_rsv = NULL;	struct ext4_block_alloc_info *block_i;	unsigned short windowsz = 0;#ifdef EXT4FS_DEBUG	static int goal_hits, goal_attempts;#endif	unsigned long ngroups;	unsigned long num = *count;	*errp = -ENOSPC;	sb = inode->i_sb;	if (!sb) {		printk("ext4_new_block: nonexistent device");		return 0;	}	/*	 * Check quota for allocation of this block.	 */	if (DQUOT_ALLOC_BLOCK(inode, num)) {		*errp = -EDQUOT;		return 0;	}	sbi = EXT4_SB(sb);	es = EXT4_SB(sb)->s_es;	ext4_debug("goal=%lu.\n", goal);	/*	 * Allocate a block from reservation only when	 * filesystem is mounted with reservation(default,-o reservation), and	 * it's a regular file, and	 * the desired window size is greater than 0 (One could use ioctl	 * command EXT4_IOC_SETRSVSZ to set the window size to 0 to turn off	 * reservation on that particular file)	 */	block_i = EXT4_I(inode)->i_block_alloc_info;	if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0))		my_rsv = &block_i->rsv_window_node;	if (!ext4_has_free_blocks(sbi)) {		*errp = -ENOSPC;		goto out;	}	/*	 * First, test whether the goal block is free.	 */	if (goal < le32_to_cpu(es->s_first_data_block) ||	    goal >= ext4_blocks_count(es))		goal = le32_to_cpu(es->s_first_data_block);	ext4_get_group_no_and_offset(sb, goal, &group_no, &grp_target_blk);	goal_group = group_no;retry_alloc:	gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);	if (!gdp)		goto io_error;	free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);	/*	 * if there is not enough free blocks to make a new resevation	 * turn off reservation for this allocation	 */	if (my_rsv && (free_blocks < windowsz)		&& (rsv_is_empty(&my_rsv->rsv_window)))		my_rsv = NULL;	if (free_blocks > 0) {		bitmap_bh = read_block_bitmap(sb, group_no);		if (!bitmap_bh)			goto io_error;		grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle,					group_no, bitmap_bh, grp_target_blk,					my_rsv,	&num, &fatal);		if (fatal)			goto out;		if (grp_alloc_blk >= 0)			goto allocated;	}	ngroups = EXT4_SB(sb)->s_groups_count;	smp_rmb();	/*	 * Now search the rest of the groups.  We assume that	 * i and gdp correctly point to the last group visited.	 */	for (bgi = 0; bgi < ngroups; bgi++) {		group_no++;		if (group_no >= ngroups)			group_no = 0;		gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);		if (!gdp)			goto io_error;		free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);		/*		 * skip this group if the number of		 * free blocks is less than half of the reservation		 * window size.		 */		if (free_blocks <= (windowsz/2))			continue;		brelse(bitmap_bh);		bitmap_bh = read_block_bitmap(sb, group_no);		if (!bitmap_bh)			goto io_error;		/*		 * try to allocate block(s) from this group, without a goal(-1).		 */		grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle,					group_no, bitmap_bh, -1, my_rsv,					&num, &fatal);		if (fatal)			goto out;		if (grp_alloc_blk >= 0)			goto allocated;	}	/*	 * We may end up a bogus ealier ENOSPC error due to	 * filesystem is "full" of reservations, but	 * there maybe indeed free blocks avaliable on disk	 * In this case, we just forget about the reservations	 * just do block allocation as without reservations.	 */	if (my_rsv) {		my_rsv = NULL;		windowsz = 0;		group_no = goal_group;		goto retry_alloc;	}	/* No space left on the device */	*errp = -ENOSPC;	goto out;allocated:	ext4_debug("using block group %d(%d)\n",			group_no, gdp->bg_free_blocks_count);	BUFFER_TRACE(gdp_bh, "get_write_access");	fatal = ext4_journal_get_write_access(handle, gdp_bh);	if (fatal)		goto out;	ret_block = grp_alloc_blk + ext4_group_first_block_no(sb, group_no);	if (in_range(ext4_block_bitmap(sb, gdp), ret_block, num) ||	    in_range(ext4_inode_bitmap(sb, gdp), ret_block, num) ||	    in_range(ret_block, ext4_inode_table(sb, gdp),		     EXT4_SB(sb)->s_itb_per_group) ||	    in_range(ret_block + num - 1, ext4_inode_table(sb, gdp),		     EXT4_SB(sb)->s_itb_per_group))		ext4_error(sb, "ext4_new_block",			    "Allocating block in system zone - "			    "blocks from %llu, length %lu",			     ret_block, num);	performed_allocation = 1;#ifdef CONFIG_JBD2_DEBUG	{		struct buffer_head *debug_bh;		/* Record bitmap buffer state in the newly allocated block */		debug_bh = sb_find_get_block(sb, ret_block);		if (debug_bh) {			BUFFER_TRACE(debug_bh, "state when allocated");			BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state");			brelse(debug_bh);		}	}	jbd_lock_bh_state(bitmap_bh);	spin_lock(sb_bgl_lock(sbi, group_no));	if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {		int i;		for (i = 0; i < num; i++) {			if (ext4_test_bit(grp_alloc_blk+i,					bh2jh(bitmap_bh)->b_committed_data)) {				printk("%s: block was unexpectedly set in "					"b_committed_data\n", __FUNCTION__);			}		}	}	ext4_debug("found bit %d\n", grp_alloc_blk);	spin_unlock(sb_bgl_lock(sbi, group_no));	jbd_unlock_bh_state(bitmap_bh);#endif	if (ret_block + num - 1 >= ext4_blocks_count(es)) {		ext4_error(sb, "ext4_new_block",			    "block(%llu) >= blocks count(%llu) - "			    "block_group = %lu, es == %p ", ret_block,			ext4_blocks_count(es), group_no, es);		goto out;	}	/*	 * It is up to the caller to add the new buffer to a journal	 * list of some description.  We don't know in advance whether	 * the caller wants to use it as metadata or data.	 */	ext4_debug("allocating block %lu. Goal hits %d of %d.\n",			ret_block, goal_hits, goal_attempts);	spin_lock(sb_bgl_lock(sbi, group_no));	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))		gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);	gdp->bg_free_blocks_count =			cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num);	gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp);	spin_unlock(sb_bgl_lock(sbi, group_no));	percpu_counter_sub(&sbi->s_freeblocks_counter, num);	BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");	err = ext4_journal_dirty_metadata(handle, gdp_bh);	if (!fatal)		fatal = err;	sb->s_dirt = 1;	if (fatal)		goto out;	*errp = 0;	brelse(bitmap_bh);	DQUOT_FREE_BLOCK(inode, *count-num);	*count = num;	return ret_block;io_error:	*errp = -EIO;out:	if (fatal) {		*errp = fatal;		ext4_std_error(sb, fatal);	}	/*	 * Undo the block allocation	 */	if (!performed_allocation)		DQUOT_FREE_BLOCK(inode, *count);	brelse(bitmap_bh);	return 0;}ext4_fsblk_t ext4_new_block(handle_t *handle, struct inode *inode,			ext4_fsblk_t goal, int *errp){	unsigned long count = 1;	return ext4_new_blocks(handle, inode, goal, &count, errp);}/** * ext4_count_free_blocks() -- count filesystem free blocks * @sb:		superblock * * Adds up the number of free blocks from each block group. */ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb){	ext4_fsblk_t desc_count;	struct ext4_group_desc *gdp;	int i;	unsigned long ngroups = EXT4_SB(sb)->s_groups_count;#ifdef EXT4FS_DEBUG	struct ext4_super_block *es;	ext4_fsblk_t bitmap_count;	unsigned long x;	struct buffer_head *bitmap_bh = NULL;	es = EXT4_SB(sb)->s_es;	desc_count = 0;	bitmap_count = 0;	gdp = NULL;	smp_rmb();	for (i = 0; i < ngroups; i++) {		gdp = ext4_get_group_desc(sb, i, NULL);		if (!gdp)			continue;		desc_count += le16_to_cpu(gdp->bg_free_blocks_count);		brelse(bitmap_bh);		bitmap_bh = read_block_bitmap(sb, i);		if (bitmap_bh == NULL)			continue;		x = ext4_count_free(bitmap_bh, sb->s_blocksize);		printk("group %d: stored = %d, counted = %lu\n",			i, le16_to_cpu(gdp->bg_free_blocks_count), x);		bitmap_count += x;	}	brelse(bitmap_bh);	printk("ext4_count_free_blocks: stored = %llu"		", computed = %llu, %llu\n",	       EXT4_FREE_BLOCKS_COUNT(es),		desc_count, bitmap_count);	return bitmap_count;#else	desc_count = 0;	smp_rmb();	for (i = 0; i < ngroups; i++) {		gdp = ext4_get_group_desc(sb, i, NULL);		if (!gdp)			continue;		desc_count += le16_to_cpu(gdp->bg_free_blocks_count);	}	return desc_count;#endif}static inline int test_root(int a, int b){	int num = b;	while (a > num)		num *= b;	return num == a;}static int ext4_group_sparse(int group){	if (group <= 1)		return 1;	if (!(group & 1))		return 0;	return (test_root(group, 7) || test_root(group, 5) ||		test_root(group, 3));}/** *	ext4_bg_has_super - number of blocks used by the superblock in group *	@sb: superblock for filesystem *	@group: group number to check * *	Return the number of blocks used by the superblock (primary or backup) *	in this group.  Currently this will be only 0 or 1. */int ext4_bg_has_super(struct super_block *sb, int group){	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,				EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&			!ext4_group_sparse(group))		return 0;	return 1;}static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb, int group){	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);	unsigned long first = metagroup * EXT4_DESC_PER_BLOCK(sb);	unsigned long last = first + EXT4_DESC_PER_BLOCK(sb) - 1;	if (group == first || group == first + 1 || group == last)		return 1;	return 0;}static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb, int group){	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,				EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&			!ext4_group_sparse(group))		return 0;	return EXT4_SB(sb)->s_gdb_count;}/** *	ext4_bg_num_gdb - number of blocks used by the group table in group *	@sb: superblock for filesystem *	@group: group number to check * *	Return the number of blocks used by the group descriptor table *	(primary or backup) in this group.  In the future there may be a *	different number of descriptor blocks in each group. */unsigned long ext4_bg_num_gdb(struct super_block *sb, int group){	unsigned long first_meta_bg =			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);	if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||			metagroup < first_meta_bg)		return ext4_bg_num_gdb_nometa(sb,group);	return ext4_bg_num_gdb_meta(sb,group);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -