⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 balloc.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *  linux/fs/ext2/balloc.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * *  Enhanced block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993 *  Big-endian to little-endian byte-swapping/bitmaps by *        David S. Miller (davem@caip.rutgers.edu), 1995 */#include <linux/config.h>#include "ext2.h"#include <linux/quotaops.h>#include <linux/sched.h>#include <linux/buffer_head.h>/* * balloc.c contains the blocks allocation and deallocation routines *//* * The free blocks are managed by bitmaps.  A file system contains several * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap * block for inodes, N blocks for the inode table and data blocks. * * The file system contains group descriptors which are located after the * super block.  Each descriptor contains the number of the bitmap block and * the free blocks count in the block.  The descriptors are loaded in memory * when a file system is mounted (see ext2_read_super). */#define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1)struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,					     unsigned int block_group,					     struct buffer_head ** bh){	unsigned long group_desc;	unsigned long offset;	struct ext2_group_desc * desc;	struct ext2_sb_info *sbi = EXT2_SB(sb);	if (block_group >= sbi->s_groups_count) {		ext2_error (sb, "ext2_get_group_desc",			    "block_group >= groups_count - "			    "block_group = %d, groups_count = %lu",			    block_group, sbi->s_groups_count);		return NULL;	}		group_desc = block_group / EXT2_DESC_PER_BLOCK(sb);	offset = block_group % EXT2_DESC_PER_BLOCK(sb);	if (!sbi->s_group_desc[group_desc]) {		ext2_error (sb, "ext2_get_group_desc",			    "Group descriptor not loaded - "			    "block_group = %d, group_desc = %lu, desc = %lu",			     block_group, group_desc, offset);		return NULL;	}		desc = (struct ext2_group_desc *) sbi->s_group_desc[group_desc]->b_data;	if (bh)		*bh = sbi->s_group_desc[group_desc];	return desc + offset;}/* * Read the bitmap for a given block_group, reading into the specified  * slot in the superblock's bitmap cache. * * Return buffer_head on success or NULL in case of failure. */static struct buffer_head *read_block_bitmap(struct super_block *sb, unsigned int block_group){	struct ext2_group_desc * desc;	struct buffer_head * bh = NULL;		desc = ext2_get_group_desc (sb, block_group, NULL);	if (!desc)		goto error_out;	bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap));	if (!bh)		ext2_error (sb, "read_block_bitmap",			    "Cannot read block bitmap - "			    "block_group = %d, block_bitmap = %u",			    block_group, le32_to_cpu(desc->bg_block_bitmap));error_out:	return bh;}/* * Set sb->s_dirt here because the superblock was "logically" altered.  We * need to recalculate its free blocks count and flush it out. */static int reserve_blocks(struct super_block *sb, int count){	struct ext2_sb_info *sbi = EXT2_SB(sb);	struct ext2_super_block *es = sbi->s_es;	unsigned free_blocks;	unsigned root_blocks;	free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);	root_blocks = le32_to_cpu(es->s_r_blocks_count);	if (free_blocks < count)		count = free_blocks;	if (free_blocks < root_blocks + count && !capable(CAP_SYS_RESOURCE) &&	    sbi->s_resuid != current->fsuid &&	    (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {		/*		 * We are too close to reserve and we are not privileged.		 * Can we allocate anything at all?		 */		if (free_blocks > root_blocks)			count = free_blocks - root_blocks;		else			return 0;	}	percpu_counter_mod(&sbi->s_freeblocks_counter, -count);	sb->s_dirt = 1;	return count;}static void release_blocks(struct super_block *sb, int count){	if (count) {		struct ext2_sb_info *sbi = EXT2_SB(sb);		percpu_counter_mod(&sbi->s_freeblocks_counter, count);		sb->s_dirt = 1;	}}static int group_reserve_blocks(struct ext2_sb_info *sbi, int group_no,	struct ext2_group_desc *desc, struct buffer_head *bh, int count){	unsigned free_blocks;	if (!desc->bg_free_blocks_count)		return 0;	spin_lock(sb_bgl_lock(sbi, group_no));	free_blocks = le16_to_cpu(desc->bg_free_blocks_count);	if (free_blocks < count)		count = free_blocks;	desc->bg_free_blocks_count = cpu_to_le16(free_blocks - count);	spin_unlock(sb_bgl_lock(sbi, group_no));	mark_buffer_dirty(bh);	return count;}static void group_release_blocks(struct super_block *sb, int group_no,	struct ext2_group_desc *desc, struct buffer_head *bh, int count){	if (count) {		struct ext2_sb_info *sbi = EXT2_SB(sb);		unsigned free_blocks;		spin_lock(sb_bgl_lock(sbi, group_no));		free_blocks = le16_to_cpu(desc->bg_free_blocks_count);		desc->bg_free_blocks_count = cpu_to_le16(free_blocks + count);		spin_unlock(sb_bgl_lock(sbi, group_no));		sb->s_dirt = 1;		mark_buffer_dirty(bh);	}}/* Free given blocks, update quota and i_blocks field */void ext2_free_blocks (struct inode * inode, unsigned long block,		       unsigned long count){	struct buffer_head *bitmap_bh = NULL;	struct buffer_head * bh2;	unsigned long block_group;	unsigned long bit;	unsigned long i;	unsigned long overflow;	struct super_block * sb = inode->i_sb;	struct ext2_sb_info * sbi = EXT2_SB(sb);	struct ext2_group_desc * desc;	struct ext2_super_block * es = sbi->s_es;	unsigned freed = 0, group_freed;	if (block < le32_to_cpu(es->s_first_data_block) ||	    block + count < block ||	    block + count > le32_to_cpu(es->s_blocks_count)) {		ext2_error (sb, "ext2_free_blocks",			    "Freeing blocks not in datazone - "			    "block = %lu, count = %lu", block, count);		goto error_return;	}	ext2_debug ("freeing block(s) %lu-%lu\n", block, block + count - 1);do_more:	overflow = 0;	block_group = (block - le32_to_cpu(es->s_first_data_block)) /		      EXT2_BLOCKS_PER_GROUP(sb);	bit = (block - le32_to_cpu(es->s_first_data_block)) %		      EXT2_BLOCKS_PER_GROUP(sb);	/*	 * Check to see if we are freeing blocks across a group	 * boundary.	 */	if (bit + count > EXT2_BLOCKS_PER_GROUP(sb)) {		overflow = bit + count - EXT2_BLOCKS_PER_GROUP(sb);		count -= overflow;	}	brelse(bitmap_bh);	bitmap_bh = read_block_bitmap(sb, block_group);	if (!bitmap_bh)		goto error_return;	desc = ext2_get_group_desc (sb, block_group, &bh2);	if (!desc)		goto error_return;	if (in_range (le32_to_cpu(desc->bg_block_bitmap), block, count) ||	    in_range (le32_to_cpu(desc->bg_inode_bitmap), block, count) ||	    in_range (block, le32_to_cpu(desc->bg_inode_table),		      sbi->s_itb_per_group) ||	    in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table),		      sbi->s_itb_per_group))		ext2_error (sb, "ext2_free_blocks",			    "Freeing blocks in system zones - "			    "Block = %lu, count = %lu",			    block, count);	for (i = 0, group_freed = 0; i < count; i++) {		if (!ext2_clear_bit_atomic(sb_bgl_lock(sbi, block_group),					bit + i, (void *) bitmap_bh->b_data))			ext2_error (sb, "ext2_free_blocks",				      "bit already cleared for block %lu",				      block + i);		else			group_freed++;	}	mark_buffer_dirty(bitmap_bh);	if (sb->s_flags & MS_SYNCHRONOUS)		sync_dirty_buffer(bitmap_bh);	group_release_blocks(sb, block_group, desc, bh2, group_freed);	freed += group_freed;	if (overflow) {		block += count;		count = overflow;		goto do_more;	}error_return:	brelse(bitmap_bh);	release_blocks(sb, freed);	DQUOT_FREE_BLOCK(inode, freed);}static int grab_block(spinlock_t *lock, char *map, unsigned size, int goal){	int k;	char *p, *r;	if (!ext2_test_bit(goal, map))		goto got_it;repeat:	if (goal) {		/*		 * The goal was occupied; search forward for a free 		 * block within the next XX blocks.		 *		 * end_goal is more or less random, but it has to be		 * less than EXT2_BLOCKS_PER_GROUP. Aligning up to the		 * next 64-bit boundary is simple..		 */		k = (goal + 63) & ~63;		goal = ext2_find_next_zero_bit(map, k, goal);		if (goal < k)			goto got_it;		/*		 * Search in the remainder of the current group.		 */	}	p = map + (goal >> 3);	r = memscan(p, 0, (size - goal + 7) >> 3);	k = (r - map) << 3;	if (k < size) {		/* 		 * We have succeeded in finding a free byte in the block		 * bitmap.  Now search backwards to find the start of this		 * group of free blocks - won't take more than 7 iterations.		 */		for (goal = k; goal && !ext2_test_bit (goal - 1, map); goal--)			;		goto got_it;	}	k = ext2_find_next_zero_bit ((u32 *)map, size, goal);	if (k < size) {		goal = k;		goto got_it;	}	return -1;got_it:	if (ext2_set_bit_atomic(lock, goal, (void *) map)) 		goto repeat;		return goal;}/* * ext2_new_block uses a goal block to assist allocation.  If the goal is * free, or there is a free block within 32 blocks of the goal, that block * is allocated.  Otherwise a forward search is made for a free block; within  * each block group the search first looks for an entire free byte in the block * bitmap, and then for any free bit if that fails. * This function also updates quota and i_blocks field. */int ext2_new_block(struct inode *inode, unsigned long goal,			u32 *prealloc_count, u32 *prealloc_block, int *err){	struct buffer_head *bitmap_bh = NULL;	struct buffer_head *gdp_bh;	/* bh2 */	struct ext2_group_desc *desc;	int group_no;			/* i */	int ret_block;			/* j */	int group_idx;			/* k */	int target_block;		/* tmp */	int block = 0;	struct super_block *sb = inode->i_sb;	struct ext2_sb_info *sbi = EXT2_SB(sb);	struct ext2_super_block *es = sbi->s_es;	unsigned group_size = EXT2_BLOCKS_PER_GROUP(sb);	unsigned prealloc_goal = es->s_prealloc_blocks;	unsigned group_alloc = 0, es_alloc, dq_alloc;	int nr_scanned_groups;	if (!prealloc_goal--)		prealloc_goal = EXT2_DEFAULT_PREALLOC_BLOCKS - 1;	if (!prealloc_count || *prealloc_count)		prealloc_goal = 0;	if (DQUOT_ALLOC_BLOCK(inode, 1)) {		*err = -EDQUOT;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -