⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 extents.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
		/*		 * truncate to zero freed all the tree,		 * so we need to correct eh_depth		 */		err = ext4_ext_get_access(handle, inode, path);		if (err == 0) {			ext_inode_hdr(inode)->eh_depth = 0;			ext_inode_hdr(inode)->eh_max =				cpu_to_le16(ext4_ext_space_root(inode));			err = ext4_ext_dirty(handle, inode, path);		}	}out:	ext4_ext_tree_changed(inode);	ext4_ext_drop_refs(path);	kfree(path);	ext4_journal_stop(handle);	return err;}/* * called at mount time */void ext4_ext_init(struct super_block *sb){	/*	 * possible initialization would be here	 */	if (test_opt(sb, EXTENTS)) {		printk("EXT4-fs: file extents enabled");#ifdef AGGRESSIVE_TEST		printk(", aggressive tests");#endif#ifdef CHECK_BINSEARCH		printk(", check binsearch");#endif#ifdef EXTENTS_STATS		printk(", stats");#endif		printk("\n");#ifdef EXTENTS_STATS		spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);		EXT4_SB(sb)->s_ext_min = 1 << 30;		EXT4_SB(sb)->s_ext_max = 0;#endif	}}/* * called at umount time */void ext4_ext_release(struct super_block *sb){	if (!test_opt(sb, EXTENTS))		return;#ifdef EXTENTS_STATS	if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {		struct ext4_sb_info *sbi = EXT4_SB(sb);		printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",			sbi->s_ext_blocks, sbi->s_ext_extents,			sbi->s_ext_blocks / sbi->s_ext_extents);		printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",			sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);	}#endif}/* * This function is called by ext4_ext_get_blocks() if someone tries to write * to an uninitialized extent. It may result in splitting the uninitialized * extent into multiple extents (upto three - one initialized and two * uninitialized). * There are three possibilities: *   a> There is no split required: Entire extent should be initialized *   b> Splits in two extents: Write is happening at either end of the extent *   c> Splits in three extents: Somone is writing in middle of the extent */int ext4_ext_convert_to_initialized(handle_t *handle, struct inode *inode,					struct ext4_ext_path *path,					ext4_fsblk_t iblock,					unsigned long max_blocks){	struct ext4_extent *ex, newex;	struct ext4_extent *ex1 = NULL;	struct ext4_extent *ex2 = NULL;	struct ext4_extent *ex3 = NULL;	struct ext4_extent_header *eh;	unsigned int allocated, ee_block, ee_len, depth;	ext4_fsblk_t newblock;	int err = 0;	int ret = 0;	depth = ext_depth(inode);	eh = path[depth].p_hdr;	ex = path[depth].p_ext;	ee_block = le32_to_cpu(ex->ee_block);	ee_len = ext4_ext_get_actual_len(ex);	allocated = ee_len - (iblock - ee_block);	newblock = iblock - ee_block + ext_pblock(ex);	ex2 = ex;	/* ex1: ee_block to iblock - 1 : uninitialized */	if (iblock > ee_block) {		ex1 = ex;		ex1->ee_len = cpu_to_le16(iblock - ee_block);		ext4_ext_mark_uninitialized(ex1);		ex2 = &newex;	}	/*	 * for sanity, update the length of the ex2 extent before	 * we insert ex3, if ex1 is NULL. This is to avoid temporary	 * overlap of blocks.	 */	if (!ex1 && allocated > max_blocks)		ex2->ee_len = cpu_to_le16(max_blocks);	/* ex3: to ee_block + ee_len : uninitialised */	if (allocated > max_blocks) {		unsigned int newdepth;		ex3 = &newex;		ex3->ee_block = cpu_to_le32(iblock + max_blocks);		ext4_ext_store_pblock(ex3, newblock + max_blocks);		ex3->ee_len = cpu_to_le16(allocated - max_blocks);		ext4_ext_mark_uninitialized(ex3);		err = ext4_ext_insert_extent(handle, inode, path, ex3);		if (err)			goto out;		/*		 * The depth, and hence eh & ex might change		 * as part of the insert above.		 */		newdepth = ext_depth(inode);		if (newdepth != depth) {			depth = newdepth;			path = ext4_ext_find_extent(inode, iblock, NULL);			if (IS_ERR(path)) {				err = PTR_ERR(path);				path = NULL;				goto out;			}			eh = path[depth].p_hdr;			ex = path[depth].p_ext;			if (ex2 != &newex)				ex2 = ex;		}		allocated = max_blocks;	}	/*	 * If there was a change of depth as part of the	 * insertion of ex3 above, we need to update the length	 * of the ex1 extent again here	 */	if (ex1 && ex1 != ex) {		ex1 = ex;		ex1->ee_len = cpu_to_le16(iblock - ee_block);		ext4_ext_mark_uninitialized(ex1);		ex2 = &newex;	}	/* ex2: iblock to iblock + maxblocks-1 : initialised */	ex2->ee_block = cpu_to_le32(iblock);	ext4_ext_store_pblock(ex2, newblock);	ex2->ee_len = cpu_to_le16(allocated);	if (ex2 != ex)		goto insert;	err = ext4_ext_get_access(handle, inode, path + depth);	if (err)		goto out;	/*	 * New (initialized) extent starts from the first block	 * in the current extent. i.e., ex2 == ex	 * We have to see if it can be merged with the extent	 * on the left.	 */	if (ex2 > EXT_FIRST_EXTENT(eh)) {		/*		 * To merge left, pass "ex2 - 1" to try_to_merge(),		 * since it merges towards right _only_.		 */		ret = ext4_ext_try_to_merge(inode, path, ex2 - 1);		if (ret) {			err = ext4_ext_correct_indexes(handle, inode, path);			if (err)				goto out;			depth = ext_depth(inode);			ex2--;		}	}	/*	 * Try to Merge towards right. This might be required	 * only when the whole extent is being written to.	 * i.e. ex2 == ex and ex3 == NULL.	 */	if (!ex3) {		ret = ext4_ext_try_to_merge(inode, path, ex2);		if (ret) {			err = ext4_ext_correct_indexes(handle, inode, path);			if (err)				goto out;		}	}	/* Mark modified extent as dirty */	err = ext4_ext_dirty(handle, inode, path + depth);	goto out;insert:	err = ext4_ext_insert_extent(handle, inode, path, &newex);out:	return err ? err : allocated;}int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,			ext4_fsblk_t iblock,			unsigned long max_blocks, struct buffer_head *bh_result,			int create, int extend_disksize){	struct ext4_ext_path *path = NULL;	struct ext4_extent_header *eh;	struct ext4_extent newex, *ex;	ext4_fsblk_t goal, newblock;	int err = 0, depth, ret;	unsigned long allocated = 0;	__clear_bit(BH_New, &bh_result->b_state);	ext_debug("blocks %d/%lu requested for inode %u\n", (int) iblock,			max_blocks, (unsigned) inode->i_ino);	mutex_lock(&EXT4_I(inode)->truncate_mutex);	/* check in cache */	goal = ext4_ext_in_cache(inode, iblock, &newex);	if (goal) {		if (goal == EXT4_EXT_CACHE_GAP) {			if (!create) {				/*				 * block isn't allocated yet and				 * user doesn't want to allocate it				 */				goto out2;			}			/* we should allocate requested block */		} else if (goal == EXT4_EXT_CACHE_EXTENT) {			/* block is already allocated */			newblock = iblock				   - le32_to_cpu(newex.ee_block)				   + ext_pblock(&newex);			/* number of remaining blocks in the extent */			allocated = le16_to_cpu(newex.ee_len) -					(iblock - le32_to_cpu(newex.ee_block));			goto out;		} else {			BUG();		}	}	/* find extent for this block */	path = ext4_ext_find_extent(inode, iblock, NULL);	if (IS_ERR(path)) {		err = PTR_ERR(path);		path = NULL;		goto out2;	}	depth = ext_depth(inode);	/*	 * consistent leaf must not be empty;	 * this situation is possible, though, _during_ tree modification;	 * this is why assert can't be put in ext4_ext_find_extent()	 */	BUG_ON(path[depth].p_ext == NULL && depth != 0);	eh = path[depth].p_hdr;	ex = path[depth].p_ext;	if (ex) {		unsigned long ee_block = le32_to_cpu(ex->ee_block);		ext4_fsblk_t ee_start = ext_pblock(ex);		unsigned short ee_len;		/*		 * Uninitialized extents are treated as holes, except that		 * we split out initialized portions during a write.		 */		ee_len = ext4_ext_get_actual_len(ex);		/* if found extent covers block, simply return it */		if (iblock >= ee_block && iblock < ee_block + ee_len) {			newblock = iblock - ee_block + ee_start;			/* number of remaining blocks in the extent */			allocated = ee_len - (iblock - ee_block);			ext_debug("%d fit into %lu:%d -> %llu\n", (int) iblock,					ee_block, ee_len, newblock);			/* Do not put uninitialized extent in the cache */			if (!ext4_ext_is_uninitialized(ex)) {				ext4_ext_put_in_cache(inode, ee_block,							ee_len, ee_start,							EXT4_EXT_CACHE_EXTENT);				goto out;			}			if (create == EXT4_CREATE_UNINITIALIZED_EXT)				goto out;			if (!create)				goto out2;			ret = ext4_ext_convert_to_initialized(handle, inode,								path, iblock,								max_blocks);			if (ret <= 0)				goto out2;			else				allocated = ret;			goto outnew;		}	}	/*	 * requested block isn't allocated yet;	 * we couldn't try to create block if create flag is zero	 */	if (!create) {		/*		 * put just found gap into cache to speed up		 * subsequent requests		 */		ext4_ext_put_gap_in_cache(inode, path, iblock);		goto out2;	}	/*	 * Okay, we need to do block allocation.  Lazily initialize the block	 * allocation info here if necessary.	 */	if (S_ISREG(inode->i_mode) && (!EXT4_I(inode)->i_block_alloc_info))		ext4_init_block_alloc_info(inode);	/* allocate new block */	goal = ext4_ext_find_goal(inode, path, iblock);	/*	 * See if request is beyond maximum number of blocks we can have in	 * a single extent. For an initialized extent this limit is	 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is	 * EXT_UNINIT_MAX_LEN.	 */	if (max_blocks > EXT_INIT_MAX_LEN &&	    create != EXT4_CREATE_UNINITIALIZED_EXT)		max_blocks = EXT_INIT_MAX_LEN;	else if (max_blocks > EXT_UNINIT_MAX_LEN &&		 create == EXT4_CREATE_UNINITIALIZED_EXT)		max_blocks = EXT_UNINIT_MAX_LEN;	/* Check if we can really insert (iblock)::(iblock+max_blocks) extent */	newex.ee_block = cpu_to_le32(iblock);	newex.ee_len = cpu_to_le16(max_blocks);	err = ext4_ext_check_overlap(inode, &newex, path);	if (err)		allocated = le16_to_cpu(newex.ee_len);	else		allocated = max_blocks;	newblock = ext4_new_blocks(handle, inode, goal, &allocated, &err);	if (!newblock)		goto out2;	ext_debug("allocate new block: goal %llu, found %llu/%lu\n",			goal, newblock, allocated);	/* try to insert new extent into found leaf and return */	ext4_ext_store_pblock(&newex, newblock);	newex.ee_len = cpu_to_le16(allocated);	if (create == EXT4_CREATE_UNINITIALIZED_EXT)  /* Mark uninitialized */		ext4_ext_mark_uninitialized(&newex);	err = ext4_ext_insert_extent(handle, inode, path, &newex);	if (err) {		/* free data blocks we just allocated */		ext4_free_blocks(handle, inode, ext_pblock(&newex),					le16_to_cpu(newex.ee_len));		goto out2;	}	if (extend_disksize && inode->i_size > EXT4_I(inode)->i_disksize)		EXT4_I(inode)->i_disksize = inode->i_size;	/* previous routine could use block we allocated */	newblock = ext_pblock(&newex);outnew:	__set_bit(BH_New, &bh_result->b_state);	/* Cache only when it is _not_ an uninitialized extent */	if (create != EXT4_CREATE_UNINITIALIZED_EXT)		ext4_ext_put_in_cache(inode, iblock, allocated, newblock,						EXT4_EXT_CACHE_EXTENT);out:	if (allocated > max_blocks)		allocated = max_blocks;	ext4_ext_show_leaf(inode, path);	__set_bit(BH_Mapped, &bh_result->b_state);	bh_result->b_bdev = inode->i_sb->s_bdev;	bh_result->b_blocknr = newblock;out2:	if (path) {		ext4_ext_drop_refs(path);		kfree(path);	}	mutex_unlock(&EXT4_I(inode)->truncate_mutex);	return err ? err : allocated;}void ext4_ext_truncate(struct inode * inode, struct page *page){	struct address_space *mapping = inode->i_mapping;	struct super_block *sb = inode->i_sb;	unsigned long last_block;	handle_t *handle;	int err = 0;	/*	 * probably first extent we're gonna free will be last in block	 */	err = ext4_writepage_trans_blocks(inode) + 3;	handle = ext4_journal_start(inode, err);	if (IS_ERR(handle)) {		if (page) {			clear_highpage(page);			flush_dcache_page(page);			unlock_page(page);			page_cache_release(page);		}		return;	}	if (page)		ext4_block_truncate_page(handle, page, mapping, inode->i_size);	mutex_lock(&EXT4_I(inode)->truncate_mutex);	ext4_ext_invalidate_cache(inode);	/*	 * TODO: optimization is possible here.	 * Probably we need not scan at all,	 * because page truncation is enough.	 */	if (ext4_orphan_add(handle, inode))		goto out_stop;	/* we have to know where to truncate from in crash case */	EXT4_I(inode)->i_disksize = inode->i_size;	ext4_mark_inode_dirty(handle, inode);	last_block = (inode->i_size + sb->s_blocksize - 1)			>> EXT4_BLOCK_SIZE_BITS(sb);	err = ext4_ext_remove_space(inode, last_block);	/* In a multi-transaction truncate, we only make the final	 * transaction synchronous.	 */	if (IS_SYNC(inode))		handle->h_sync = 1;out_stop:	/*	 * If this was a simple ftruncate() and the file will remain alive,	 * then we need to clear up the orphan record which we created above.	 * However, if this was a real unlink then we were called by	 * ext4_delete_inode(), and we allow that function to clean up the	 * orphan info for us.	 */	if (inode->i_nlink)		ext4_orphan_del(handle, inode);	mutex_unlock(&EXT4_I(inode)->truncate_mutex);	ext4_journal_stop(handle);}/* * ext4_ext_writepage_trans_blocks: * calculate max number of blocks we could modify * in order to allocate new block for an inode */int ext4_ext_writepage_trans_blocks(struct inode *inode, int num){	int needed;	needed = ext4_ext_calc_credits_for_insert(inode, NULL);	/* caller wants to allocate num blocks, but note it includes sb */	needed = needed * num - (num - 1);#ifdef CONFIG_QUOTA	needed += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);#endif	return needed;}/* * preallocate space for a file. This implements ext4's fallocate inode * operation, which gets called from sys_fallocate system call. * For block-mapped files, posix_fallocate should fall back to the method * of writing zeroes to the required new blocks (the same behavior which is * expected for file systems which do not support fallocate() system call). */long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len){	handle_t *handl

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -