⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ialloc.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *  linux/fs/ext4/ialloc.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * *  BSD ufs-inspired inode and directory allocation by *  Stephen Tweedie (sct@redhat.com), 1993 *  Big-endian to little-endian byte-swapping/bitmaps by *        David S. Miller (davem@caip.rutgers.edu), 1995 */#include <linux/time.h>#include <linux/fs.h>#include <linux/jbd2.h>#include <linux/ext4_fs.h>#include <linux/ext4_jbd2.h>#include <linux/stat.h>#include <linux/string.h>#include <linux/quotaops.h>#include <linux/buffer_head.h>#include <linux/random.h>#include <linux/bitops.h>#include <linux/blkdev.h>#include <asm/byteorder.h>#include "xattr.h"#include "acl.h"#include "group.h"/* * ialloc.c contains the inodes allocation and deallocation routines *//* * The free inodes are managed by bitmaps.  A file system contains several * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap * block for inodes, N blocks for the inode table and data blocks. * * The file system contains group descriptors which are located after the * super block.  Each descriptor contains the number of the bitmap block and * the free blocks count in the block. *//* * To avoid calling the atomic setbit hundreds or thousands of times, we only * need to use it within a single byte (to ensure we get endianness right). * We can use memset for the rest of the bitmap as there are no other users. */void mark_bitmap_end(int start_bit, int end_bit, char *bitmap){	int i;	if (start_bit >= end_bit)		return;	ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)		ext4_set_bit(i, bitmap);	if (i < end_bit)		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);}/* Initializes an uninitialized inode bitmap */unsigned ext4_init_inode_bitmap(struct super_block *sb,				struct buffer_head *bh, int block_group,				struct ext4_group_desc *gdp){	struct ext4_sb_info *sbi = EXT4_SB(sb);	J_ASSERT_BH(bh, buffer_locked(bh));	/* If checksum is bad mark all blocks and inodes use to prevent	 * allocation, essentially implementing a per-group read-only flag. */	if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {		ext4_error(sb, __FUNCTION__, "Checksum bad for group %u\n",			   block_group);		gdp->bg_free_blocks_count = 0;		gdp->bg_free_inodes_count = 0;		gdp->bg_itable_unused = 0;		memset(bh->b_data, 0xff, sb->s_blocksize);		return 0;	}	memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);	mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), EXT4_BLOCKS_PER_GROUP(sb),			bh->b_data);	return EXT4_INODES_PER_GROUP(sb);}/* * Read the inode allocation bitmap for a given block_group, reading * into the specified slot in the superblock's bitmap cache. * * Return buffer_head of bitmap on success or NULL. */static struct buffer_head *read_inode_bitmap(struct super_block * sb, unsigned long block_group){	struct ext4_group_desc *desc;	struct buffer_head *bh = NULL;	desc = ext4_get_group_desc(sb, block_group, NULL);	if (!desc)		goto error_out;	if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {		bh = sb_getblk(sb, ext4_inode_bitmap(sb, desc));		if (!buffer_uptodate(bh)) {			lock_buffer(bh);			if (!buffer_uptodate(bh)) {				ext4_init_inode_bitmap(sb, bh, block_group,						       desc);				set_buffer_uptodate(bh);			}			unlock_buffer(bh);		}	} else {		bh = sb_bread(sb, ext4_inode_bitmap(sb, desc));	}	if (!bh)		ext4_error(sb, "read_inode_bitmap",			    "Cannot read inode bitmap - "			    "block_group = %lu, inode_bitmap = %llu",			    block_group, ext4_inode_bitmap(sb, desc));error_out:	return bh;}/* * NOTE! When we get the inode, we're the only people * that have access to it, and as such there are no * race conditions we have to worry about. The inode * is not on the hash-lists, and it cannot be reached * through the filesystem because the directory entry * has been deleted earlier. * * HOWEVER: we must make sure that we get no aliases, * which means that we have to call "clear_inode()" * _before_ we mark the inode not in use in the inode * bitmaps. Otherwise a newly created file might use * the same inode number (not actually the same pointer * though), and then we'd have two inodes sharing the * same inode number and space on the harddisk. */void ext4_free_inode (handle_t *handle, struct inode * inode){	struct super_block * sb = inode->i_sb;	int is_directory;	unsigned long ino;	struct buffer_head *bitmap_bh = NULL;	struct buffer_head *bh2;	unsigned long block_group;	unsigned long bit;	struct ext4_group_desc * gdp;	struct ext4_super_block * es;	struct ext4_sb_info *sbi;	int fatal = 0, err;	if (atomic_read(&inode->i_count) > 1) {		printk ("ext4_free_inode: inode has count=%d\n",					atomic_read(&inode->i_count));		return;	}	if (inode->i_nlink) {		printk ("ext4_free_inode: inode has nlink=%d\n",			inode->i_nlink);		return;	}	if (!sb) {		printk("ext4_free_inode: inode on nonexistent device\n");		return;	}	sbi = EXT4_SB(sb);	ino = inode->i_ino;	ext4_debug ("freeing inode %lu\n", ino);	/*	 * Note: we must free any quota before locking the superblock,	 * as writing the quota to disk may need the lock as well.	 */	DQUOT_INIT(inode);	ext4_xattr_delete_inode(handle, inode);	DQUOT_FREE_INODE(inode);	DQUOT_DROP(inode);	is_directory = S_ISDIR(inode->i_mode);	/* Do this BEFORE marking the inode not in use or returning an error */	clear_inode (inode);	es = EXT4_SB(sb)->s_es;	if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {		ext4_error (sb, "ext4_free_inode",			    "reserved or nonexistent inode %lu", ino);		goto error_return;	}	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);	bitmap_bh = read_inode_bitmap(sb, block_group);	if (!bitmap_bh)		goto error_return;	BUFFER_TRACE(bitmap_bh, "get_write_access");	fatal = ext4_journal_get_write_access(handle, bitmap_bh);	if (fatal)		goto error_return;	/* Ok, now we can actually update the inode bitmaps.. */	if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),					bit, bitmap_bh->b_data))		ext4_error (sb, "ext4_free_inode",			      "bit already cleared for inode %lu", ino);	else {		gdp = ext4_get_group_desc (sb, block_group, &bh2);		BUFFER_TRACE(bh2, "get_write_access");		fatal = ext4_journal_get_write_access(handle, bh2);		if (fatal) goto error_return;		if (gdp) {			spin_lock(sb_bgl_lock(sbi, block_group));			gdp->bg_free_inodes_count = cpu_to_le16(				le16_to_cpu(gdp->bg_free_inodes_count) + 1);			if (is_directory)				gdp->bg_used_dirs_count = cpu_to_le16(				  le16_to_cpu(gdp->bg_used_dirs_count) - 1);			gdp->bg_checksum = ext4_group_desc_csum(sbi,							block_group, gdp);			spin_unlock(sb_bgl_lock(sbi, block_group));			percpu_counter_inc(&sbi->s_freeinodes_counter);			if (is_directory)				percpu_counter_dec(&sbi->s_dirs_counter);		}		BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");		err = ext4_journal_dirty_metadata(handle, bh2);		if (!fatal) fatal = err;	}	BUFFER_TRACE(bitmap_bh, "call ext4_journal_dirty_metadata");	err = ext4_journal_dirty_metadata(handle, bitmap_bh);	if (!fatal)		fatal = err;	sb->s_dirt = 1;error_return:	brelse(bitmap_bh);	ext4_std_error(sb, fatal);}/* * There are two policies for allocating an inode.  If the new inode is * a directory, then a forward search is made for a block group with both * free space and a low directory-to-inode ratio; if that fails, then of * the groups with above-average free space, that group with the fewest * directories already is chosen. * * For other inodes, search forward from the parent directory\'s block * group to find a free inode. */static int find_group_dir(struct super_block *sb, struct inode *parent){	int ngroups = EXT4_SB(sb)->s_groups_count;	unsigned int freei, avefreei;	struct ext4_group_desc *desc, *best_desc = NULL;	int group, best_group = -1;	freei = percpu_counter_read_positive(&EXT4_SB(sb)->s_freeinodes_counter);	avefreei = freei / ngroups;	for (group = 0; group < ngroups; group++) {		desc = ext4_get_group_desc (sb, group, NULL);		if (!desc || !desc->bg_free_inodes_count)			continue;		if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)			continue;		if (!best_desc ||		    (le16_to_cpu(desc->bg_free_blocks_count) >		     le16_to_cpu(best_desc->bg_free_blocks_count))) {			best_group = group;			best_desc = desc;		}	}	return best_group;}/* * Orlov's allocator for directories. * * We always try to spread first-level directories. * * If there are blockgroups with both free inodes and free blocks counts * not worse than average we return one with smallest directory count. * Otherwise we simply return a random group. * * For the rest rules look so: * * It's OK to put directory into a group unless * it has too many directories already (max_dirs) or * it has too few free inodes left (min_inodes) or * it has too few free blocks left (min_blocks) or * it's already running too large debt (max_debt). * Parent's group is prefered, if it doesn't satisfy these * conditions we search cyclically through the rest. If none * of the groups look good we just look for a group with more * free inodes than average (starting at parent's group). * * Debt is incremented each time we allocate a directory and decremented * when we allocate an inode, within 0--255. */#define INODE_COST 64#define BLOCK_COST 256static int find_group_orlov(struct super_block *sb, struct inode *parent){	int parent_group = EXT4_I(parent)->i_block_group;	struct ext4_sb_info *sbi = EXT4_SB(sb);	struct ext4_super_block *es = sbi->s_es;	int ngroups = sbi->s_groups_count;	int inodes_per_group = EXT4_INODES_PER_GROUP(sb);	unsigned int freei, avefreei;	ext4_fsblk_t freeb, avefreeb;	ext4_fsblk_t blocks_per_dir;	unsigned int ndirs;	int max_debt, max_dirs, min_inodes;	ext4_grpblk_t min_blocks;	int group = -1, i;	struct ext4_group_desc *desc;	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);	avefreei = freei / ngroups;	freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter);	avefreeb = freeb;	do_div(avefreeb, ngroups);	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);	if ((parent == sb->s_root->d_inode) ||	    (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL)) {		int best_ndir = inodes_per_group;		int best_group = -1;		get_random_bytes(&group, sizeof(group));		parent_group = (unsigned)group % ngroups;		for (i = 0; i < ngroups; i++) {			group = (parent_group + i) % ngroups;			desc = ext4_get_group_desc (sb, group, NULL);			if (!desc || !desc->bg_free_inodes_count)				continue;			if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir)				continue;			if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)				continue;			if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb)				continue;			best_group = group;			best_ndir = le16_to_cpu(desc->bg_used_dirs_count);		}		if (best_group >= 0)			return best_group;		goto fallback;	}	blocks_per_dir = ext4_blocks_count(es) - freeb;	do_div(blocks_per_dir, ndirs);	max_dirs = ndirs / ngroups + inodes_per_group / 16;	min_inodes = avefreei - inodes_per_group / 4;	min_blocks = avefreeb - EXT4_BLOCKS_PER_GROUP(sb) / 4;	max_debt = EXT4_BLOCKS_PER_GROUP(sb);	max_debt /= max_t(int, blocks_per_dir, BLOCK_COST);	if (max_debt * INODE_COST > inodes_per_group)		max_debt = inodes_per_group / INODE_COST;	if (max_debt > 255)		max_debt = 255;	if (max_debt == 0)		max_debt = 1;	for (i = 0; i < ngroups; i++) {		group = (parent_group + i) % ngroups;		desc = ext4_get_group_desc (sb, group, NULL);		if (!desc || !desc->bg_free_inodes_count)			continue;		if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs)			continue;		if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes)			continue;		if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks)			continue;		return group;	}fallback:	for (i = 0; i < ngroups; i++) {		group = (parent_group + i) % ngroups;		desc = ext4_get_group_desc (sb, group, NULL);		if (!desc || !desc->bg_free_inodes_count)			continue;		if (le16_to_cpu(desc->bg_free_inodes_count) >= avefreei)			return group;	}	if (avefreei) {		/*		 * The free-inodes counter is approximate, and for really small		 * filesystems the above test can fail to find any blockgroups		 */		avefreei = 0;		goto fallback;	}	return -1;}static int find_group_other(struct super_block *sb, struct inode *parent){	int parent_group = EXT4_I(parent)->i_block_group;	int ngroups = EXT4_SB(sb)->s_groups_count;	struct ext4_group_desc *desc;	int group, i;	/*	 * Try to place the inode in its parent directory	 */	group = parent_group;	desc = ext4_get_group_desc (sb, group, NULL);	if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&			le16_to_cpu(desc->bg_free_blocks_count))		return group;	/*	 * We're going to place this inode in a different blockgroup from its	 * parent.  We want to cause files in a common directory to all land in	 * the same blockgroup.  But we want files which are in a different	 * directory which shares a blockgroup with our parent to land in a	 * different blockgroup.	 *	 * So add our directory's i_ino into the starting point for the hash.	 */	group = (group + parent->i_ino) % ngroups;	/*	 * Use a quadratic hash to find a group with a free inode and some free	 * blocks.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -