⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ialloc.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
	 */	for (i = 1; i < ngroups; i <<= 1) {		group += i;		if (group >= ngroups)			group -= ngroups;		desc = ext4_get_group_desc (sb, group, NULL);		if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&				le16_to_cpu(desc->bg_free_blocks_count))			return group;	}	/*	 * That failed: try linear search for a free inode, even if that group	 * has no free blocks.	 */	group = parent_group;	for (i = 0; i < ngroups; i++) {		if (++group >= ngroups)			group = 0;		desc = ext4_get_group_desc (sb, group, NULL);		if (desc && le16_to_cpu(desc->bg_free_inodes_count))			return group;	}	return -1;}/* * There are two policies for allocating an inode.  If the new inode is * a directory, then a forward search is made for a block group with both * free space and a low directory-to-inode ratio; if that fails, then of * the groups with above-average free space, that group with the fewest * directories already is chosen. * * For other inodes, search forward from the parent directory's block * group to find a free inode. */struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode){	struct super_block *sb;	struct buffer_head *bitmap_bh = NULL;	struct buffer_head *bh2;	int group;	unsigned long ino = 0;	struct inode * inode;	struct ext4_group_desc * gdp = NULL;	struct ext4_super_block * es;	struct ext4_inode_info *ei;	struct ext4_sb_info *sbi;	int err = 0;	struct inode *ret;	int i, free = 0;	/* Cannot create files in a deleted directory */	if (!dir || !dir->i_nlink)		return ERR_PTR(-EPERM);	sb = dir->i_sb;	inode = new_inode(sb);	if (!inode)		return ERR_PTR(-ENOMEM);	ei = EXT4_I(inode);	sbi = EXT4_SB(sb);	es = sbi->s_es;	if (S_ISDIR(mode)) {		if (test_opt (sb, OLDALLOC))			group = find_group_dir(sb, dir);		else			group = find_group_orlov(sb, dir);	} else		group = find_group_other(sb, dir);	err = -ENOSPC;	if (group == -1)		goto out;	for (i = 0; i < sbi->s_groups_count; i++) {		err = -EIO;		gdp = ext4_get_group_desc(sb, group, &bh2);		if (!gdp)			goto fail;		brelse(bitmap_bh);		bitmap_bh = read_inode_bitmap(sb, group);		if (!bitmap_bh)			goto fail;		ino = 0;repeat_in_this_group:		ino = ext4_find_next_zero_bit((unsigned long *)				bitmap_bh->b_data, EXT4_INODES_PER_GROUP(sb), ino);		if (ino < EXT4_INODES_PER_GROUP(sb)) {			BUFFER_TRACE(bitmap_bh, "get_write_access");			err = ext4_journal_get_write_access(handle, bitmap_bh);			if (err)				goto fail;			if (!ext4_set_bit_atomic(sb_bgl_lock(sbi, group),						ino, bitmap_bh->b_data)) {				/* we won it */				BUFFER_TRACE(bitmap_bh,					"call ext4_journal_dirty_metadata");				err = ext4_journal_dirty_metadata(handle,								bitmap_bh);				if (err)					goto fail;				goto got;			}			/* we lost it */			jbd2_journal_release_buffer(handle, bitmap_bh);			if (++ino < EXT4_INODES_PER_GROUP(sb))				goto repeat_in_this_group;		}		/*		 * This case is possible in concurrent environment.  It is very		 * rare.  We cannot repeat the find_group_xxx() call because		 * that will simply return the same blockgroup, because the		 * group descriptor metadata has not yet been updated.		 * So we just go onto the next blockgroup.		 */		if (++group == sbi->s_groups_count)			group = 0;	}	err = -ENOSPC;	goto out;got:	ino++;	if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||	    ino > EXT4_INODES_PER_GROUP(sb)) {		ext4_error(sb, __FUNCTION__,			   "reserved inode or inode > inodes count - "			   "block_group = %d, inode=%lu", group,			   ino + group * EXT4_INODES_PER_GROUP(sb));		err = -EIO;		goto fail;	}	BUFFER_TRACE(bh2, "get_write_access");	err = ext4_journal_get_write_access(handle, bh2);	if (err) goto fail;	/* We may have to initialize the block bitmap if it isn't already */	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {		struct buffer_head *block_bh = read_block_bitmap(sb, group);		BUFFER_TRACE(block_bh, "get block bitmap access");		err = ext4_journal_get_write_access(handle, block_bh);		if (err) {			brelse(block_bh);			goto fail;		}		free = 0;		spin_lock(sb_bgl_lock(sbi, group));		/* recheck and clear flag under lock if we still need to */		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);			free = ext4_free_blocks_after_init(sb, group, gdp);			gdp->bg_free_blocks_count = cpu_to_le16(free);		}		spin_unlock(sb_bgl_lock(sbi, group));		/* Don't need to dirty bitmap block if we didn't change it */		if (free) {			BUFFER_TRACE(block_bh, "dirty block bitmap");			err = ext4_journal_dirty_metadata(handle, block_bh);		}		brelse(block_bh);		if (err)			goto fail;	}	spin_lock(sb_bgl_lock(sbi, group));	/* If we didn't allocate from within the initialized part of the inode	 * table then we need to initialize up to this inode. */	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);			/* When marking the block group with			 * ~EXT4_BG_INODE_UNINIT we don't want to depend			 * on the value of bg_itable_unsed even though			 * mke2fs could have initialized the same for us.			 * Instead we calculated the value below			 */			free = 0;		} else {			free = EXT4_INODES_PER_GROUP(sb) -				le16_to_cpu(gdp->bg_itable_unused);		}		/*		 * Check the relative inode number against the last used		 * relative inode number in this group. if it is greater		 * we need to  update the bg_itable_unused count		 *		 */		if (ino > free)			gdp->bg_itable_unused =				cpu_to_le16(EXT4_INODES_PER_GROUP(sb) - ino);	}	gdp->bg_free_inodes_count =		cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);	if (S_ISDIR(mode)) {		gdp->bg_used_dirs_count =			cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);	}	gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);	spin_unlock(sb_bgl_lock(sbi, group));	BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");	err = ext4_journal_dirty_metadata(handle, bh2);	if (err) goto fail;	percpu_counter_dec(&sbi->s_freeinodes_counter);	if (S_ISDIR(mode))		percpu_counter_inc(&sbi->s_dirs_counter);	sb->s_dirt = 1;	inode->i_uid = current->fsuid;	if (test_opt (sb, GRPID))		inode->i_gid = dir->i_gid;	else if (dir->i_mode & S_ISGID) {		inode->i_gid = dir->i_gid;		if (S_ISDIR(mode))			mode |= S_ISGID;	} else		inode->i_gid = current->fsgid;	inode->i_mode = mode;	inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);	/* This is the optimal IO size (for stat), not the fs block size */	inode->i_blocks = 0;	inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =						       ext4_current_time(inode);	memset(ei->i_data, 0, sizeof(ei->i_data));	ei->i_dir_start_lookup = 0;	ei->i_disksize = 0;	ei->i_flags = EXT4_I(dir)->i_flags & ~EXT4_INDEX_FL;	if (S_ISLNK(mode))		ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL);	/* dirsync only applies to directories */	if (!S_ISDIR(mode))		ei->i_flags &= ~EXT4_DIRSYNC_FL;	ei->i_file_acl = 0;	ei->i_dir_acl = 0;	ei->i_dtime = 0;	ei->i_block_alloc_info = NULL;	ei->i_block_group = group;	ext4_set_inode_flags(inode);	if (IS_DIRSYNC(inode))		handle->h_sync = 1;	insert_inode_hash(inode);	spin_lock(&sbi->s_next_gen_lock);	inode->i_generation = sbi->s_next_generation++;	spin_unlock(&sbi->s_next_gen_lock);	ei->i_state = EXT4_STATE_NEW;	ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;	ret = inode;	if(DQUOT_ALLOC_INODE(inode)) {		err = -EDQUOT;		goto fail_drop;	}	err = ext4_init_acl(handle, inode, dir);	if (err)		goto fail_free_drop;	err = ext4_init_security(handle,inode, dir);	if (err)		goto fail_free_drop;	err = ext4_mark_inode_dirty(handle, inode);	if (err) {		ext4_std_error(sb, err);		goto fail_free_drop;	}	if (test_opt(sb, EXTENTS)) {		EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;		ext4_ext_tree_init(handle, inode);		if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {			err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);			if (err) goto fail;			EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS);			BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "call ext4_journal_dirty_metadata");			err = ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);		}	}	ext4_debug("allocating inode %lu\n", inode->i_ino);	goto really_out;fail:	ext4_std_error(sb, err);out:	iput(inode);	ret = ERR_PTR(err);really_out:	brelse(bitmap_bh);	return ret;fail_free_drop:	DQUOT_FREE_INODE(inode);fail_drop:	DQUOT_DROP(inode);	inode->i_flags |= S_NOQUOTA;	inode->i_nlink = 0;	iput(inode);	brelse(bitmap_bh);	return ERR_PTR(err);}/* Verify that we are loading a valid orphan from disk */struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino){	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);	unsigned long block_group;	int bit;	struct buffer_head *bitmap_bh = NULL;	struct inode *inode = NULL;	/* Error cases - e2fsck has already cleaned up for us */	if (ino > max_ino) {		ext4_warning(sb, __FUNCTION__,			     "bad orphan ino %lu!  e2fsck was run?", ino);		goto out;	}	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);	bitmap_bh = read_inode_bitmap(sb, block_group);	if (!bitmap_bh) {		ext4_warning(sb, __FUNCTION__,			     "inode bitmap error for orphan %lu", ino);		goto out;	}	/* Having the inode bit set should be a 100% indicator that this	 * is a valid orphan (no e2fsck run on fs).  Orphans also include	 * inodes that were being truncated, so we can't check i_nlink==0.	 */	if (!ext4_test_bit(bit, bitmap_bh->b_data) ||			!(inode = iget(sb, ino)) || is_bad_inode(inode) ||			NEXT_ORPHAN(inode) > max_ino) {		ext4_warning(sb, __FUNCTION__,			     "bad orphan inode %lu!  e2fsck was run?", ino);		printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",		       bit, (unsigned long long)bitmap_bh->b_blocknr,		       ext4_test_bit(bit, bitmap_bh->b_data));		printk(KERN_NOTICE "inode=%p\n", inode);		if (inode) {			printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",			       is_bad_inode(inode));			printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",			       NEXT_ORPHAN(inode));			printk(KERN_NOTICE "max_ino=%lu\n", max_ino);		}		/* Avoid freeing blocks if we got a bad deleted inode */		if (inode && inode->i_nlink == 0)			inode->i_blocks = 0;		iput(inode);		inode = NULL;	}out:	brelse(bitmap_bh);	return inode;}unsigned long ext4_count_free_inodes (struct super_block * sb){	unsigned long desc_count;	struct ext4_group_desc *gdp;	int i;#ifdef EXT4FS_DEBUG	struct ext4_super_block *es;	unsigned long bitmap_count, x;	struct buffer_head *bitmap_bh = NULL;	es = EXT4_SB(sb)->s_es;	desc_count = 0;	bitmap_count = 0;	gdp = NULL;	for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {		gdp = ext4_get_group_desc (sb, i, NULL);		if (!gdp)			continue;		desc_count += le16_to_cpu(gdp->bg_free_inodes_count);		brelse(bitmap_bh);		bitmap_bh = read_inode_bitmap(sb, i);		if (!bitmap_bh)			continue;		x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);		printk("group %d: stored = %d, counted = %lu\n",			i, le16_to_cpu(gdp->bg_free_inodes_count), x);		bitmap_count += x;	}	brelse(bitmap_bh);	printk("ext4_count_free_inodes: stored = %u, computed = %lu, %lu\n",		le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);	return desc_count;#else	desc_count = 0;	for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {		gdp = ext4_get_group_desc (sb, i, NULL);		if (!gdp)			continue;		desc_count += le16_to_cpu(gdp->bg_free_inodes_count);		cond_resched();	}	return desc_count;#endif}/* Called at mount-time, super-block is locked */unsigned long ext4_count_dirs (struct super_block * sb){	unsigned long count = 0;	int i;	for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {		struct ext4_group_desc *gdp = ext4_get_group_desc (sb, i, NULL);		if (!gdp)			continue;		count += le16_to_cpu(gdp->bg_used_dirs_count);	}	return count;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -