⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 extents.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
		BUG_ON(cbex.ec_len == 0);		err = func(inode, path, &cbex, cbdata);		ext4_ext_drop_refs(path);		if (err < 0)			break;		if (err == EXT_REPEAT)			continue;		else if (err == EXT_BREAK) {			err = 0;			break;		}		if (ext_depth(inode) != depth) {			/* depth was changed. we have to realloc path */			kfree(path);			path = NULL;		}		block = cbex.ec_block + cbex.ec_len;	}	if (path) {		ext4_ext_drop_refs(path);		kfree(path);	}	return err;}static voidext4_ext_put_in_cache(struct inode *inode, __u32 block,			__u32 len, ext4_fsblk_t start, int type){	struct ext4_ext_cache *cex;	BUG_ON(len == 0);	cex = &EXT4_I(inode)->i_cached_extent;	cex->ec_type = type;	cex->ec_block = block;	cex->ec_len = len;	cex->ec_start = start;}/* * ext4_ext_put_gap_in_cache: * calculate boundaries of the gap that the requested block fits into * and cache this gap */static voidext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,				unsigned long block){	int depth = ext_depth(inode);	unsigned long lblock, len;	struct ext4_extent *ex;	ex = path[depth].p_ext;	if (ex == NULL) {		/* there is no extent yet, so gap is [0;-] */		lblock = 0;		len = EXT_MAX_BLOCK;		ext_debug("cache gap(whole file):");	} else if (block < le32_to_cpu(ex->ee_block)) {		lblock = block;		len = le32_to_cpu(ex->ee_block) - block;		ext_debug("cache gap(before): %lu [%lu:%lu]",				(unsigned long) block,				(unsigned long) le32_to_cpu(ex->ee_block),				(unsigned long) ext4_ext_get_actual_len(ex));	} else if (block >= le32_to_cpu(ex->ee_block)			+ ext4_ext_get_actual_len(ex)) {		lblock = le32_to_cpu(ex->ee_block)			+ ext4_ext_get_actual_len(ex);		len = ext4_ext_next_allocated_block(path);		ext_debug("cache gap(after): [%lu:%lu] %lu",				(unsigned long) le32_to_cpu(ex->ee_block),				(unsigned long) ext4_ext_get_actual_len(ex),				(unsigned long) block);		BUG_ON(len == lblock);		len = len - lblock;	} else {		lblock = len = 0;		BUG();	}	ext_debug(" -> %lu:%lu\n", (unsigned long) lblock, len);	ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);}static intext4_ext_in_cache(struct inode *inode, unsigned long block,			struct ext4_extent *ex){	struct ext4_ext_cache *cex;	cex = &EXT4_I(inode)->i_cached_extent;	/* has cache valid data? */	if (cex->ec_type == EXT4_EXT_CACHE_NO)		return EXT4_EXT_CACHE_NO;	BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&			cex->ec_type != EXT4_EXT_CACHE_EXTENT);	if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {		ex->ee_block = cpu_to_le32(cex->ec_block);		ext4_ext_store_pblock(ex, cex->ec_start);		ex->ee_len = cpu_to_le16(cex->ec_len);		ext_debug("%lu cached by %lu:%lu:%llu\n",				(unsigned long) block,				(unsigned long) cex->ec_block,				(unsigned long) cex->ec_len,				cex->ec_start);		return cex->ec_type;	}	/* not in cache */	return EXT4_EXT_CACHE_NO;}/* * ext4_ext_rm_idx: * removes index from the index block. * It's used in truncate case only, thus all requests are for * last index in the block only. */int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,			struct ext4_ext_path *path){	struct buffer_head *bh;	int err;	ext4_fsblk_t leaf;	/* free index block */	path--;	leaf = idx_pblock(path->p_idx);	BUG_ON(path->p_hdr->eh_entries == 0);	err = ext4_ext_get_access(handle, inode, path);	if (err)		return err;	path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1);	err = ext4_ext_dirty(handle, inode, path);	if (err)		return err;	ext_debug("index is empty, remove it, free block %llu\n", leaf);	bh = sb_find_get_block(inode->i_sb, leaf);	ext4_forget(handle, 1, inode, bh, leaf);	ext4_free_blocks(handle, inode, leaf, 1);	return err;}/* * ext4_ext_calc_credits_for_insert: * This routine returns max. credits that the extent tree can consume. * It should be OK for low-performance paths like ->writepage() * To allow many writing processes to fit into a single transaction, * the caller should calculate credits under truncate_mutex and * pass the actual path. */int ext4_ext_calc_credits_for_insert(struct inode *inode,						struct ext4_ext_path *path){	int depth, needed;	if (path) {		/* probably there is space in leaf? */		depth = ext_depth(inode);		if (le16_to_cpu(path[depth].p_hdr->eh_entries)				< le16_to_cpu(path[depth].p_hdr->eh_max))			return 1;	}	/*	 * given 32-bit logical block (4294967296 blocks), max. tree	 * can be 4 levels in depth -- 4 * 340^4 == 53453440000.	 * Let's also add one more level for imbalance.	 */	depth = 5;	/* allocation of new data block(s) */	needed = 2;	/*	 * tree can be full, so it would need to grow in depth:	 * we need one credit to modify old root, credits for	 * new root will be added in split accounting	 */	needed += 1;	/*	 * Index split can happen, we would need:	 *    allocate intermediate indexes (bitmap + group)	 *  + change two blocks at each level, but root (already included)	 */	needed += (depth * 2) + (depth * 2);	/* any allocation modifies superblock */	needed += 1;	return needed;}static int ext4_remove_blocks(handle_t *handle, struct inode *inode,				struct ext4_extent *ex,				unsigned long from, unsigned long to){	struct buffer_head *bh;	unsigned short ee_len =  ext4_ext_get_actual_len(ex);	int i;#ifdef EXTENTS_STATS	{		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);		spin_lock(&sbi->s_ext_stats_lock);		sbi->s_ext_blocks += ee_len;		sbi->s_ext_extents++;		if (ee_len < sbi->s_ext_min)			sbi->s_ext_min = ee_len;		if (ee_len > sbi->s_ext_max)			sbi->s_ext_max = ee_len;		if (ext_depth(inode) > sbi->s_depth_max)			sbi->s_depth_max = ext_depth(inode);		spin_unlock(&sbi->s_ext_stats_lock);	}#endif	if (from >= le32_to_cpu(ex->ee_block)	    && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {		/* tail removal */		unsigned long num;		ext4_fsblk_t start;		num = le32_to_cpu(ex->ee_block) + ee_len - from;		start = ext_pblock(ex) + ee_len - num;		ext_debug("free last %lu blocks starting %llu\n", num, start);		for (i = 0; i < num; i++) {			bh = sb_find_get_block(inode->i_sb, start + i);			ext4_forget(handle, 0, inode, bh, start + i);		}		ext4_free_blocks(handle, inode, start, num);	} else if (from == le32_to_cpu(ex->ee_block)		   && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {		printk("strange request: removal %lu-%lu from %u:%u\n",			from, to, le32_to_cpu(ex->ee_block), ee_len);	} else {		printk("strange request: removal(2) %lu-%lu from %u:%u\n",			from, to, le32_to_cpu(ex->ee_block), ee_len);	}	return 0;}static intext4_ext_rm_leaf(handle_t *handle, struct inode *inode,		struct ext4_ext_path *path, unsigned long start){	int err = 0, correct_index = 0;	int depth = ext_depth(inode), credits;	struct ext4_extent_header *eh;	unsigned a, b, block, num;	unsigned long ex_ee_block;	unsigned short ex_ee_len;	unsigned uninitialized = 0;	struct ext4_extent *ex;	/* the header must be checked already in ext4_ext_remove_space() */	ext_debug("truncate since %lu in leaf\n", start);	if (!path[depth].p_hdr)		path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);	eh = path[depth].p_hdr;	BUG_ON(eh == NULL);	/* find where to start removing */	ex = EXT_LAST_EXTENT(eh);	ex_ee_block = le32_to_cpu(ex->ee_block);	if (ext4_ext_is_uninitialized(ex))		uninitialized = 1;	ex_ee_len = ext4_ext_get_actual_len(ex);	while (ex >= EXT_FIRST_EXTENT(eh) &&			ex_ee_block + ex_ee_len > start) {		ext_debug("remove ext %lu:%u\n", ex_ee_block, ex_ee_len);		path[depth].p_ext = ex;		a = ex_ee_block > start ? ex_ee_block : start;		b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?			ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;		ext_debug("  border %u:%u\n", a, b);		if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {			block = 0;			num = 0;			BUG();		} else if (a != ex_ee_block) {			/* remove tail of the extent */			block = ex_ee_block;			num = a - block;		} else if (b != ex_ee_block + ex_ee_len - 1) {			/* remove head of the extent */			block = a;			num = b - a;			/* there is no "make a hole" API yet */			BUG();		} else {			/* remove whole extent: excellent! */			block = ex_ee_block;			num = 0;			BUG_ON(a != ex_ee_block);			BUG_ON(b != ex_ee_block + ex_ee_len - 1);		}		/* at present, extent can't cross block group: */		/* leaf + bitmap + group desc + sb + inode */		credits = 5;		if (ex == EXT_FIRST_EXTENT(eh)) {			correct_index = 1;			credits += (ext_depth(inode)) + 1;		}#ifdef CONFIG_QUOTA		credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);#endif		handle = ext4_ext_journal_restart(handle, credits);		if (IS_ERR(handle)) {			err = PTR_ERR(handle);			goto out;		}		err = ext4_ext_get_access(handle, inode, path + depth);		if (err)			goto out;		err = ext4_remove_blocks(handle, inode, ex, a, b);		if (err)			goto out;		if (num == 0) {			/* this extent is removed; mark slot entirely unused */			ext4_ext_store_pblock(ex, 0);			eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);		}		ex->ee_block = cpu_to_le32(block);		ex->ee_len = cpu_to_le16(num);		/*		 * Do not mark uninitialized if all the blocks in the		 * extent have been removed.		 */		if (uninitialized && num)			ext4_ext_mark_uninitialized(ex);		err = ext4_ext_dirty(handle, inode, path + depth);		if (err)			goto out;		ext_debug("new extent: %u:%u:%llu\n", block, num,				ext_pblock(ex));		ex--;		ex_ee_block = le32_to_cpu(ex->ee_block);		ex_ee_len = ext4_ext_get_actual_len(ex);	}	if (correct_index && eh->eh_entries)		err = ext4_ext_correct_indexes(handle, inode, path);	/* if this leaf is free, then we should	 * remove it from index block above */	if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)		err = ext4_ext_rm_idx(handle, inode, path + depth);out:	return err;}/* * ext4_ext_more_to_rm: * returns 1 if current index has to be freed (even partial) */static intext4_ext_more_to_rm(struct ext4_ext_path *path){	BUG_ON(path->p_idx == NULL);	if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))		return 0;	/*	 * if truncate on deeper level happened, it wasn't partial,	 * so we have to consider current index for truncation	 */	if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)		return 0;	return 1;}int ext4_ext_remove_space(struct inode *inode, unsigned long start){	struct super_block *sb = inode->i_sb;	int depth = ext_depth(inode);	struct ext4_ext_path *path;	handle_t *handle;	int i = 0, err = 0;	ext_debug("truncate since %lu\n", start);	/* probably first extent we're gonna free will be last in block */	handle = ext4_journal_start(inode, depth + 1);	if (IS_ERR(handle))		return PTR_ERR(handle);	ext4_ext_invalidate_cache(inode);	/*	 * We start scanning from right side, freeing all the blocks	 * after i_size and walking into the tree depth-wise.	 */	path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL);	if (path == NULL) {		ext4_journal_stop(handle);		return -ENOMEM;	}	path[0].p_hdr = ext_inode_hdr(inode);	if (ext4_ext_check_header(inode, path[0].p_hdr, depth)) {		err = -EIO;		goto out;	}	path[0].p_depth = depth;	while (i >= 0 && err == 0) {		if (i == depth) {			/* this is leaf block */			err = ext4_ext_rm_leaf(handle, inode, path, start);			/* root level has p_bh == NULL, brelse() eats this */			brelse(path[i].p_bh);			path[i].p_bh = NULL;			i--;			continue;		}		/* this is index block */		if (!path[i].p_hdr) {			ext_debug("initialize header\n");			path[i].p_hdr = ext_block_hdr(path[i].p_bh);		}		if (!path[i].p_idx) {			/* this level hasn't been touched yet */			path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;			ext_debug("init index ptr: hdr 0x%p, num %d\n",				  path[i].p_hdr,				  le16_to_cpu(path[i].p_hdr->eh_entries));		} else {			/* we were already here, see at next index */			path[i].p_idx--;		}		ext_debug("level %d - index, first 0x%p, cur 0x%p\n",				i, EXT_FIRST_INDEX(path[i].p_hdr),				path[i].p_idx);		if (ext4_ext_more_to_rm(path + i)) {			struct buffer_head *bh;			/* go to the next level */			ext_debug("move to level %d (block %llu)\n",				  i + 1, idx_pblock(path[i].p_idx));			memset(path + i + 1, 0, sizeof(*path));			bh = sb_bread(sb, idx_pblock(path[i].p_idx));			if (!bh) {				/* should we reset i_size? */				err = -EIO;				break;			}			if (WARN_ON(i + 1 > depth)) {				err = -EIO;				break;			}			if (ext4_ext_check_header(inode, ext_block_hdr(bh),							depth - i - 1)) {				err = -EIO;				break;			}			path[i + 1].p_bh = bh;			/* save actual number of indexes since this			 * number is changed at the next iteration */			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);			i++;		} else {			/* we finished processing this index, go up */			if (path[i].p_hdr->eh_entries == 0 && i > 0) {				/* index is empty, remove it;				 * handle must be already prepared by the				 * truncatei_leaf() */				err = ext4_ext_rm_idx(handle, inode, path + i);			}			/* root level has p_bh == NULL, brelse() eats this */			brelse(path[i].p_bh);			path[i].p_bh = NULL;			i--;			ext_debug("return to level %d\n", i);		}	}	/* TODO: flexible tree reduction should be here */	if (path->p_hdr->eh_entries == 0) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -