⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 extents.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	if (ext4_ext_check_header(inode, eh, depth))		return ERR_PTR(-EIO);	/* account possible depth increase */	if (!path) {		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),				GFP_NOFS);		if (!path)			return ERR_PTR(-ENOMEM);		alloc = 1;	}	path[0].p_hdr = eh;	i = depth;	/* walk through the tree */	while (i) {		ext_debug("depth %d: num %d, max %d\n",			  ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));		ext4_ext_binsearch_idx(inode, path + ppos, block);		path[ppos].p_block = idx_pblock(path[ppos].p_idx);		path[ppos].p_depth = i;		path[ppos].p_ext = NULL;		bh = sb_bread(inode->i_sb, path[ppos].p_block);		if (!bh)			goto err;		eh = ext_block_hdr(bh);		ppos++;		BUG_ON(ppos > depth);		path[ppos].p_bh = bh;		path[ppos].p_hdr = eh;		i--;		if (ext4_ext_check_header(inode, eh, i))			goto err;	}	path[ppos].p_depth = i;	path[ppos].p_hdr = eh;	path[ppos].p_ext = NULL;	path[ppos].p_idx = NULL;	/* find extent */	ext4_ext_binsearch(inode, path + ppos, block);	ext4_ext_show_path(inode, path);	return path;err:	ext4_ext_drop_refs(path);	if (alloc)		kfree(path);	return ERR_PTR(-EIO);}/* * ext4_ext_insert_index: * insert new index [@logical;@ptr] into the block at @curp; * check where to insert: before @curp or after @curp */static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,				struct ext4_ext_path *curp,				int logical, ext4_fsblk_t ptr){	struct ext4_extent_idx *ix;	int len, err;	err = ext4_ext_get_access(handle, inode, curp);	if (err)		return err;	BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));	len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;	if (logical > le32_to_cpu(curp->p_idx->ei_block)) {		/* insert after */		if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {			len = (len - 1) * sizeof(struct ext4_extent_idx);			len = len < 0 ? 0 : len;			ext_debug("insert new index %d after: %llu. "					"move %d from 0x%p to 0x%p\n",					logical, ptr, len,					(curp->p_idx + 1), (curp->p_idx + 2));			memmove(curp->p_idx + 2, curp->p_idx + 1, len);		}		ix = curp->p_idx + 1;	} else {		/* insert before */		len = len * sizeof(struct ext4_extent_idx);		len = len < 0 ? 0 : len;		ext_debug("insert new index %d before: %llu. "				"move %d from 0x%p to 0x%p\n",				logical, ptr, len,				curp->p_idx, (curp->p_idx + 1));		memmove(curp->p_idx + 1, curp->p_idx, len);		ix = curp->p_idx;	}	ix->ei_block = cpu_to_le32(logical);	ext4_idx_store_pblock(ix, ptr);	curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1);	BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)			     > le16_to_cpu(curp->p_hdr->eh_max));	BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));	err = ext4_ext_dirty(handle, inode, curp);	ext4_std_error(inode->i_sb, err);	return err;}/* * ext4_ext_split: * inserts new subtree into the path, using free index entry * at depth @at: * - allocates all needed blocks (new leaf and all intermediate index blocks) * - makes decision where to split * - moves remaining extents and index entries (right to the split point) *   into the newly allocated blocks * - initializes subtree */static int ext4_ext_split(handle_t *handle, struct inode *inode,				struct ext4_ext_path *path,				struct ext4_extent *newext, int at){	struct buffer_head *bh = NULL;	int depth = ext_depth(inode);	struct ext4_extent_header *neh;	struct ext4_extent_idx *fidx;	struct ext4_extent *ex;	int i = at, k, m, a;	ext4_fsblk_t newblock, oldblock;	__le32 border;	ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */	int err = 0;	/* make decision: where to split? */	/* FIXME: now decision is simplest: at current extent */	/* if current leaf will be split, then we should use	 * border from split point */	BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));	if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {		border = path[depth].p_ext[1].ee_block;		ext_debug("leaf will be split."				" next leaf starts at %d\n",				  le32_to_cpu(border));	} else {		border = newext->ee_block;		ext_debug("leaf will be added."				" next leaf starts at %d\n",				le32_to_cpu(border));	}	/*	 * If error occurs, then we break processing	 * and mark filesystem read-only. index won't	 * be inserted and tree will be in consistent	 * state. Next mount will repair buffers too.	 */	/*	 * Get array to track all allocated blocks.	 * We need this to handle errors and free blocks	 * upon them.	 */	ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);	if (!ablocks)		return -ENOMEM;	/* allocate all needed blocks */	ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);	for (a = 0; a < depth - at; a++) {		newblock = ext4_ext_new_block(handle, inode, path, newext, &err);		if (newblock == 0)			goto cleanup;		ablocks[a] = newblock;	}	/* initialize new leaf */	newblock = ablocks[--a];	BUG_ON(newblock == 0);	bh = sb_getblk(inode->i_sb, newblock);	if (!bh) {		err = -EIO;		goto cleanup;	}	lock_buffer(bh);	err = ext4_journal_get_create_access(handle, bh);	if (err)		goto cleanup;	neh = ext_block_hdr(bh);	neh->eh_entries = 0;	neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));	neh->eh_magic = EXT4_EXT_MAGIC;	neh->eh_depth = 0;	ex = EXT_FIRST_EXTENT(neh);	/* move remainder of path[depth] to the new leaf */	BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);	/* start copy from next extent */	/* TODO: we could do it by single memmove */	m = 0;	path[depth].p_ext++;	while (path[depth].p_ext <=			EXT_MAX_EXTENT(path[depth].p_hdr)) {		ext_debug("move %d:%llu:%d in new leaf %llu\n",				le32_to_cpu(path[depth].p_ext->ee_block),				ext_pblock(path[depth].p_ext),				ext4_ext_get_actual_len(path[depth].p_ext),				newblock);		/*memmove(ex++, path[depth].p_ext++,				sizeof(struct ext4_extent));		neh->eh_entries++;*/		path[depth].p_ext++;		m++;	}	if (m) {		memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);		neh->eh_entries = cpu_to_le16(le16_to_cpu(neh->eh_entries)+m);	}	set_buffer_uptodate(bh);	unlock_buffer(bh);	err = ext4_journal_dirty_metadata(handle, bh);	if (err)		goto cleanup;	brelse(bh);	bh = NULL;	/* correct old leaf */	if (m) {		err = ext4_ext_get_access(handle, inode, path + depth);		if (err)			goto cleanup;		path[depth].p_hdr->eh_entries =		     cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);		err = ext4_ext_dirty(handle, inode, path + depth);		if (err)			goto cleanup;	}	/* create intermediate indexes */	k = depth - at - 1;	BUG_ON(k < 0);	if (k)		ext_debug("create %d intermediate indices\n", k);	/* insert new index into current index block */	/* current depth stored in i var */	i = depth - 1;	while (k--) {		oldblock = newblock;		newblock = ablocks[--a];		bh = sb_getblk(inode->i_sb, (ext4_fsblk_t)newblock);		if (!bh) {			err = -EIO;			goto cleanup;		}		lock_buffer(bh);		err = ext4_journal_get_create_access(handle, bh);		if (err)			goto cleanup;		neh = ext_block_hdr(bh);		neh->eh_entries = cpu_to_le16(1);		neh->eh_magic = EXT4_EXT_MAGIC;		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));		neh->eh_depth = cpu_to_le16(depth - i);		fidx = EXT_FIRST_INDEX(neh);		fidx->ei_block = border;		ext4_idx_store_pblock(fidx, oldblock);		ext_debug("int.index at %d (block %llu): %lu -> %llu\n", i,				newblock, (unsigned long) le32_to_cpu(border),				oldblock);		/* copy indexes */		m = 0;		path[i].p_idx++;		ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,				EXT_MAX_INDEX(path[i].p_hdr));		BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=				EXT_LAST_INDEX(path[i].p_hdr));		while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {			ext_debug("%d: move %d:%llu in new index %llu\n", i,					le32_to_cpu(path[i].p_idx->ei_block),					idx_pblock(path[i].p_idx),					newblock);			/*memmove(++fidx, path[i].p_idx++,					sizeof(struct ext4_extent_idx));			neh->eh_entries++;			BUG_ON(neh->eh_entries > neh->eh_max);*/			path[i].p_idx++;			m++;		}		if (m) {			memmove(++fidx, path[i].p_idx - m,				sizeof(struct ext4_extent_idx) * m);			neh->eh_entries =				cpu_to_le16(le16_to_cpu(neh->eh_entries) + m);		}		set_buffer_uptodate(bh);		unlock_buffer(bh);		err = ext4_journal_dirty_metadata(handle, bh);		if (err)			goto cleanup;		brelse(bh);		bh = NULL;		/* correct old index */		if (m) {			err = ext4_ext_get_access(handle, inode, path + i);			if (err)				goto cleanup;			path[i].p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path[i].p_hdr->eh_entries)-m);			err = ext4_ext_dirty(handle, inode, path + i);			if (err)				goto cleanup;		}		i--;	}	/* insert new index */	err = ext4_ext_insert_index(handle, inode, path + at,				    le32_to_cpu(border), newblock);cleanup:	if (bh) {		if (buffer_locked(bh))			unlock_buffer(bh);		brelse(bh);	}	if (err) {		/* free all allocated blocks in error case */		for (i = 0; i < depth; i++) {			if (!ablocks[i])				continue;			ext4_free_blocks(handle, inode, ablocks[i], 1);		}	}	kfree(ablocks);	return err;}/* * ext4_ext_grow_indepth: * implements tree growing procedure: * - allocates new block * - moves top-level data (index block or leaf) into the new block * - initializes new top-level, creating index that points to the *   just created block */static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,					struct ext4_ext_path *path,					struct ext4_extent *newext){	struct ext4_ext_path *curp = path;	struct ext4_extent_header *neh;	struct ext4_extent_idx *fidx;	struct buffer_head *bh;	ext4_fsblk_t newblock;	int err = 0;	newblock = ext4_ext_new_block(handle, inode, path, newext, &err);	if (newblock == 0)		return err;	bh = sb_getblk(inode->i_sb, newblock);	if (!bh) {		err = -EIO;		ext4_std_error(inode->i_sb, err);		return err;	}	lock_buffer(bh);	err = ext4_journal_get_create_access(handle, bh);	if (err) {		unlock_buffer(bh);		goto out;	}	/* move top-level index/leaf into new block */	memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));	/* set size of new block */	neh = ext_block_hdr(bh);	/* old root could have indexes or leaves	 * so calculate e_max right way */	if (ext_depth(inode))	  neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));	else	  neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));	neh->eh_magic = EXT4_EXT_MAGIC;	set_buffer_uptodate(bh);	unlock_buffer(bh);	err = ext4_journal_dirty_metadata(handle, bh);	if (err)		goto out;	/* create index in new top-level index: num,max,pointer */	err = ext4_ext_get_access(handle, inode, curp);	if (err)		goto out;	curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;	curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode));	curp->p_hdr->eh_entries = cpu_to_le16(1);	curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);	if (path[0].p_hdr->eh_depth)		curp->p_idx->ei_block =			EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;	else		curp->p_idx->ei_block =			EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;	ext4_idx_store_pblock(curp->p_idx, newblock);	neh = ext_inode_hdr(inode);	fidx = EXT_FIRST_INDEX(neh);	ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",		  le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),		  le32_to_cpu(fidx->ei_block), idx_pblock(fidx));	neh->eh_depth = cpu_to_le16(path->p_depth + 1);	err = ext4_ext_dirty(handle, inode, curp);out:	brelse(bh);	return err;}/* * ext4_ext_create_new_leaf: * finds empty index and adds new leaf. * if no free index is found, then it requests in-depth growing. */static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,					struct ext4_ext_path *path,					struct ext4_extent *newext){	struct ext4_ext_path *curp;	int depth, i, err = 0;repeat:	i = depth = ext_depth(inode);	/* walk up to the tree and look for free index entry */	curp = path + depth;	while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {		i--;		curp--;	}	/* we use already allocated block for index block,	 * so subsequent data blocks should be contiguous */	if (EXT_HAS_FREE_INDEX(curp)) {		/* if we found index with free entry, then use that		 * entry: create all needed subtree and add new leaf */		err = ext4_ext_split(handle, inode, path, newext, i);		/* refill path */		ext4_ext_drop_refs(path);		path = ext4_ext_find_extent(inode,					    le32_to_cpu(newext->ee_block),					    path);		if (IS_ERR(path))			err = PTR_ERR(path);	} else {		/* tree is full, time to grow in depth */		err = ext4_ext_grow_indepth(handle, inode, path, newext);		if (err)			goto out;		/* refill path */		ext4_ext_drop_refs(path);		path = ext4_ext_find_extent(inode,					    le32_to_cpu(newext->ee_block),					    path);		if (IS_ERR(path)) {			err = PTR_ERR(path);			goto out;		}		/*		 * only first (depth 0 -> 1) produces free space;		 * in all other cases we have to split the grown tree		 */		depth = ext_depth(inode);		if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {			/* now we need to split */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -