⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 balloc.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
/* *  linux/fs/ext4/balloc.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993 *  Big-endian to little-endian byte-swapping/bitmaps by *        David S. Miller (davem@caip.rutgers.edu), 1995 */#include <linux/time.h>#include <linux/capability.h>#include <linux/fs.h>#include <linux/jbd2.h>#include <linux/ext4_fs.h>#include <linux/ext4_jbd2.h>#include <linux/quotaops.h>#include <linux/buffer_head.h>#include "group.h"/* * balloc.c contains the blocks allocation and deallocation routines *//* * Calculate the block group number and offset, given a block number */void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,		unsigned long *blockgrpp, ext4_grpblk_t *offsetp){	struct ext4_super_block *es = EXT4_SB(sb)->s_es;	ext4_grpblk_t offset;	blocknr = blocknr - le32_to_cpu(es->s_first_data_block);	offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb));	if (offsetp)		*offsetp = offset;	if (blockgrpp)		*blockgrpp = blocknr;}/* Initializes an uninitialized block bitmap if given, and returns the * number of blocks free in the group. */unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,				int block_group, struct ext4_group_desc *gdp){	unsigned long start;	int bit, bit_max;	unsigned free_blocks, group_blocks;	struct ext4_sb_info *sbi = EXT4_SB(sb);	if (bh) {		J_ASSERT_BH(bh, buffer_locked(bh));		/* If checksum is bad mark all blocks used to prevent allocation		 * essentially implementing a per-group read-only flag. */		if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {			ext4_error(sb, __FUNCTION__,				   "Checksum bad for group %u\n", block_group);			gdp->bg_free_blocks_count = 0;			gdp->bg_free_inodes_count = 0;			gdp->bg_itable_unused = 0;			memset(bh->b_data, 0xff, sb->s_blocksize);			return 0;		}		memset(bh->b_data, 0, sb->s_blocksize);	}	/* Check for superblock and gdt backups in this group */	bit_max = ext4_bg_has_super(sb, block_group);	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||	    block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *			  sbi->s_desc_per_block) {		if (bit_max) {			bit_max += ext4_bg_num_gdb(sb, block_group);			bit_max +=				le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);		}	} else { /* For META_BG_BLOCK_GROUPS */		int group_rel = (block_group -				 le32_to_cpu(sbi->s_es->s_first_meta_bg)) %				EXT4_DESC_PER_BLOCK(sb);		if (group_rel == 0 || group_rel == 1 ||		    (group_rel == EXT4_DESC_PER_BLOCK(sb) - 1))			bit_max += 1;	}	if (block_group == sbi->s_groups_count - 1) {		/*		 * Even though mke2fs always initialize first and last group		 * if some other tool enabled the EXT4_BG_BLOCK_UNINIT we need		 * to make sure we calculate the right free blocks		 */		group_blocks = ext4_blocks_count(sbi->s_es) -			le32_to_cpu(sbi->s_es->s_first_data_block) -			(EXT4_BLOCKS_PER_GROUP(sb) * (sbi->s_groups_count -1));	} else {		group_blocks = EXT4_BLOCKS_PER_GROUP(sb);	}	free_blocks = group_blocks - bit_max;	if (bh) {		for (bit = 0; bit < bit_max; bit++)			ext4_set_bit(bit, bh->b_data);		start = block_group * EXT4_BLOCKS_PER_GROUP(sb) +			le32_to_cpu(sbi->s_es->s_first_data_block);		/* Set bits for block and inode bitmaps, and inode table */		ext4_set_bit(ext4_block_bitmap(sb, gdp) - start, bh->b_data);		ext4_set_bit(ext4_inode_bitmap(sb, gdp) - start, bh->b_data);		for (bit = (ext4_inode_table(sb, gdp) - start),		     bit_max = bit + sbi->s_itb_per_group; bit < bit_max; bit++)			ext4_set_bit(bit, bh->b_data);		/*		 * Also if the number of blocks within the group is		 * less than the blocksize * 8 ( which is the size		 * of bitmap ), set rest of the block bitmap to 1		 */		mark_bitmap_end(group_blocks, sb->s_blocksize * 8, bh->b_data);	}	return free_blocks - sbi->s_itb_per_group - 2;}/* * The free blocks are managed by bitmaps.  A file system contains several * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap * block for inodes, N blocks for the inode table and data blocks. * * The file system contains group descriptors which are located after the * super block.  Each descriptor contains the number of the bitmap block and * the free blocks count in the block.  The descriptors are loaded in memory * when a file system is mounted (see ext4_fill_super). */#define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1)/** * ext4_get_group_desc() -- load group descriptor from disk * @sb:			super block * @block_group:	given block group * @bh:			pointer to the buffer head to store the block *			group descriptor */struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,					     unsigned int block_group,					     struct buffer_head ** bh){	unsigned long group_desc;	unsigned long offset;	struct ext4_group_desc * desc;	struct ext4_sb_info *sbi = EXT4_SB(sb);	if (block_group >= sbi->s_groups_count) {		ext4_error (sb, "ext4_get_group_desc",			    "block_group >= groups_count - "			    "block_group = %d, groups_count = %lu",			    block_group, sbi->s_groups_count);		return NULL;	}	smp_rmb();	group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);	offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);	if (!sbi->s_group_desc[group_desc]) {		ext4_error (sb, "ext4_get_group_desc",			    "Group descriptor not loaded - "			    "block_group = %d, group_desc = %lu, desc = %lu",			     block_group, group_desc, offset);		return NULL;	}	desc = (struct ext4_group_desc *)(		(__u8 *)sbi->s_group_desc[group_desc]->b_data +		offset * EXT4_DESC_SIZE(sb));	if (bh)		*bh = sbi->s_group_desc[group_desc];	return desc;}/** * read_block_bitmap() * @sb:			super block * @block_group:	given block group * * Read the bitmap for a given block_group, reading into the specified * slot in the superblock's bitmap cache. * * Return buffer_head on success or NULL in case of failure. */struct buffer_head *read_block_bitmap(struct super_block *sb, unsigned int block_group){	struct ext4_group_desc * desc;	struct buffer_head * bh = NULL;	ext4_fsblk_t bitmap_blk;	desc = ext4_get_group_desc(sb, block_group, NULL);	if (!desc)		return NULL;	bitmap_blk = ext4_block_bitmap(sb, desc);	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {		bh = sb_getblk(sb, bitmap_blk);		if (!buffer_uptodate(bh)) {			lock_buffer(bh);			if (!buffer_uptodate(bh)) {				ext4_init_block_bitmap(sb, bh, block_group,						       desc);				set_buffer_uptodate(bh);			}			unlock_buffer(bh);		}	} else {		bh = sb_bread(sb, bitmap_blk);	}	if (!bh)		ext4_error (sb, __FUNCTION__,			    "Cannot read block bitmap - "			    "block_group = %d, block_bitmap = %llu",			    block_group, bitmap_blk);	return bh;}/* * The reservation window structure operations * -------------------------------------------- * Operations include: * dump, find, add, remove, is_empty, find_next_reservable_window, etc. * * We use a red-black tree to represent per-filesystem reservation * windows. * *//** * __rsv_window_dump() -- Dump the filesystem block allocation reservation map * @rb_root:		root of per-filesystem reservation rb tree * @verbose:		verbose mode * @fn:			function which wishes to dump the reservation map * * If verbose is turned on, it will print the whole block reservation * windows(start, end).	Otherwise, it will only print out the "bad" windows, * those windows that overlap with their immediate neighbors. */#if 1static void __rsv_window_dump(struct rb_root *root, int verbose,			      const char *fn){	struct rb_node *n;	struct ext4_reserve_window_node *rsv, *prev;	int bad;restart:	n = rb_first(root);	bad = 0;	prev = NULL;	printk("Block Allocation Reservation Windows Map (%s):\n", fn);	while (n) {		rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);		if (verbose)			printk("reservation window 0x%p "			       "start:  %llu, end:  %llu\n",			       rsv, rsv->rsv_start, rsv->rsv_end);		if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {			printk("Bad reservation %p (start >= end)\n",			       rsv);			bad = 1;		}		if (prev && prev->rsv_end >= rsv->rsv_start) {			printk("Bad reservation %p (prev->end >= start)\n",			       rsv);			bad = 1;		}		if (bad) {			if (!verbose) {				printk("Restarting reservation walk in verbose mode\n");				verbose = 1;				goto restart;			}		}		n = rb_next(n);		prev = rsv;	}	printk("Window map complete.\n");	if (bad)		BUG();}#define rsv_window_dump(root, verbose) \	__rsv_window_dump((root), (verbose), __FUNCTION__)#else#define rsv_window_dump(root, verbose) do {} while (0)#endif/** * goal_in_my_reservation() * @rsv:		inode's reservation window * @grp_goal:		given goal block relative to the allocation block group * @group:		the current allocation block group * @sb:			filesystem super block * * Test if the given goal block (group relative) is within the file's * own block reservation window range. * * If the reservation window is outside the goal allocation group, return 0; * grp_goal (given goal block) could be -1, which means no specific * goal block. In this case, always return 1. * If the goal block is within the reservation window, return 1; * otherwise, return 0; */static intgoal_in_my_reservation(struct ext4_reserve_window *rsv, ext4_grpblk_t grp_goal,			unsigned int group, struct super_block * sb){	ext4_fsblk_t group_first_block, group_last_block;	group_first_block = ext4_group_first_block_no(sb, group);	group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);	if ((rsv->_rsv_start > group_last_block) ||	    (rsv->_rsv_end < group_first_block))		return 0;	if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start)		|| (grp_goal + group_first_block > rsv->_rsv_end)))		return 0;	return 1;}/** * search_reserve_window() * @rb_root:		root of reservation tree * @goal:		target allocation block * * Find the reserved window which includes the goal, or the previous one * if the goal is not in any window. * Returns NULL if there are no windows or if all windows start after the goal. */static struct ext4_reserve_window_node *search_reserve_window(struct rb_root *root, ext4_fsblk_t goal){	struct rb_node *n = root->rb_node;	struct ext4_reserve_window_node *rsv;	if (!n)		return NULL;	do {		rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);		if (goal < rsv->rsv_start)			n = n->rb_left;		else if (goal > rsv->rsv_end)			n = n->rb_right;		else			return rsv;	} while (n);	/*	 * We've fallen off the end of the tree: the goal wasn't inside	 * any particular node.  OK, the previous node must be to one	 * side of the interval containing the goal.  If it's the RHS,	 * we need to back up one.	 */	if (rsv->rsv_start > goal) {		n = rb_prev(&rsv->rsv_node);		rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);	}	return rsv;}/** * ext4_rsv_window_add() -- Insert a window to the block reservation rb tree. * @sb:			super block * @rsv:		reservation window to add * * Must be called with rsv_lock hold. */void ext4_rsv_window_add(struct super_block *sb,		    struct ext4_reserve_window_node *rsv){	struct rb_root *root = &EXT4_SB(sb)->s_rsv_window_root;	struct rb_node *node = &rsv->rsv_node;	ext4_fsblk_t start = rsv->rsv_start;	struct rb_node ** p = &root->rb_node;	struct rb_node * parent = NULL;	struct ext4_reserve_window_node *this;	while (*p)	{		parent = *p;		this = rb_entry(parent, struct ext4_reserve_window_node, rsv_node);		if (start < this->rsv_start)			p = &(*p)->rb_left;		else if (start > this->rsv_end)			p = &(*p)->rb_right;		else {			rsv_window_dump(root, 1);			BUG();		}	}	rb_link_node(node, parent, p);	rb_insert_color(node, root);}/** * ext4_rsv_window_remove() -- unlink a window from the reservation rb tree * @sb:			super block * @rsv:		reservation window to remove * * Mark the block reservation window as not allocated, and unlink it * from the filesystem reservation window rb tree. Must be called with * rsv_lock hold. */static void rsv_window_remove(struct super_block *sb,			      struct ext4_reserve_window_node *rsv){	rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;	rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;	rsv->rsv_alloc_hit = 0;	rb_erase(&rsv->rsv_node, &EXT4_SB(sb)->s_rsv_window_root);}/* * rsv_is_empty() -- Check if the reservation window is allocated. * @rsv:		given reservation window to check * * returns 1 if the end block is EXT4_RESERVE_WINDOW_NOT_ALLOCATED. */static inline int rsv_is_empty(struct ext4_reserve_window *rsv){	/* a valid reservation end block could not be 0 */	return rsv->_rsv_end == EXT4_RESERVE_WINDOW_NOT_ALLOCATED;}/** * ext4_init_block_alloc_info() * @inode:		file inode structure * * Allocate and initialize the	reservation window structure, and * link the window to the ext4 inode structure at last * * The reservation window structure is only dynamically allocated * and linked to ext4 inode the first time the open file * needs a new block. So, before every ext4_new_block(s) call, for * regular files, we should check whether the reservation window * structure exists or not. In the latter case, this function is called. * Fail to do so will result in block reservation being turned off for that * open file. * * This function is called from ext4_get_blocks_handle(), also called * when setting the reservation window size through ioctl before the file * is open for write (needs block allocation). * * Needs truncate_mutex protection prior to call this function. */void ext4_init_block_alloc_info(struct inode *inode){	struct ext4_inode_info *ei = EXT4_I(inode);	struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info;	struct super_block *sb = inode->i_sb;	block_i = kmalloc(sizeof(*block_i), GFP_NOFS);	if (block_i) {		struct ext4_reserve_window_node *rsv = &block_i->rsv_window_node;		rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;		rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;		/*		 * if filesystem is mounted with NORESERVATION, the goal		 * reservation window size is set to zero to indicate		 * block reservation is off

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -