xfs_da_btree.c

来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 2,203 行 · 第 1/5 页

C
2,203
字号
	node = oldblk->bp->data;	ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);	/*	 * With V2 the extra block is data or freespace.	 */	useextra = state->extravalid && XFS_DIR_IS_V1(state->mp);	newcount = 1 + useextra;	/*	 * Do we have to split the node?	 */	if ((INT_GET(node->hdr.count, ARCH_CONVERT) + newcount) > state->node_ents) {		/*		 * Allocate a new node, add to the doubly linked chain of		 * nodes, then move some of our excess entries into it.		 */		error = xfs_da_grow_inode(state->args, &blkno);		if (error)			return(error);	/* GROT: dir is inconsistent */		error = xfs_da_node_create(state->args, blkno, treelevel,					   &newblk->bp, state->args->whichfork);		if (error)			return(error);	/* GROT: dir is inconsistent */		newblk->blkno = blkno;		newblk->magic = XFS_DA_NODE_MAGIC;		xfs_da_node_rebalance(state, oldblk, newblk);		error = xfs_da_blk_link(state, oldblk, newblk);		if (error)			return(error);		*result = 1;	} else {		*result = 0;	}	/*	 * Insert the new entry(s) into the correct block	 * (updating last hashval in the process).	 *	 * xfs_da_node_add() inserts BEFORE the given index,	 * and as a result of using node_lookup_int() we always	 * point to a valid entry (not after one), but a split	 * operation always results in a new block whose hashvals	 * FOLLOW the current block.	 *	 * If we had double-split op below us, then add the extra block too.	 */	node = oldblk->bp->data;	if (oldblk->index <= INT_GET(node->hdr.count, ARCH_CONVERT)) {		oldblk->index++;		xfs_da_node_add(state, oldblk, addblk);		if (useextra) {			if (state->extraafter)				oldblk->index++;			xfs_da_node_add(state, oldblk, &state->extrablk);			state->extravalid = 0;		}	} else {		newblk->index++;		xfs_da_node_add(state, newblk, addblk);		if (useextra) {			if (state->extraafter)				newblk->index++;			xfs_da_node_add(state, newblk, &state->extrablk);			state->extravalid = 0;		}	}	return(0);}/* * Balance the btree elements between two intermediate nodes, * usually one full and one empty. * * NOTE: if blk2 is empty, then it will get the upper half of blk1. */STATIC voidxfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,				     xfs_da_state_blk_t *blk2){	xfs_da_intnode_t *node1, *node2, *tmpnode;	xfs_da_node_entry_t *btree_s, *btree_d;	int count, tmp;	xfs_trans_t *tp;	node1 = blk1->bp->data;	node2 = blk2->bp->data;	/*	 * Figure out how many entries need to move, and in which direction.	 * Swap the nodes around if that makes it simpler.	 */	if ((INT_GET(node1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(node2->hdr.count, ARCH_CONVERT) > 0) &&	    ((INT_GET(node2->btree[ 0 ].hashval, ARCH_CONVERT) < INT_GET(node1->btree[ 0 ].hashval, ARCH_CONVERT)) ||	     (INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) <	      INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)))) {		tmpnode = node1;		node1 = node2;		node2 = tmpnode;	}	ASSERT(INT_GET(node1->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);	ASSERT(INT_GET(node2->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);	count = (INT_GET(node1->hdr.count, ARCH_CONVERT) - INT_GET(node2->hdr.count, ARCH_CONVERT)) / 2;	if (count == 0)		return;	tp = state->args->trans;	/*	 * Two cases: high-to-low and low-to-high.	 */	if (count > 0) {		/*		 * Move elements in node2 up to make a hole.		 */		if ((tmp = INT_GET(node2->hdr.count, ARCH_CONVERT)) > 0) {			tmp *= (uint)sizeof(xfs_da_node_entry_t);			btree_s = &node2->btree[0];			btree_d = &node2->btree[count];			memmove(btree_d, btree_s, tmp);		}		/*		 * Move the req'd B-tree elements from high in node1 to		 * low in node2.		 */		INT_MOD(node2->hdr.count, ARCH_CONVERT, count);		tmp = count * (uint)sizeof(xfs_da_node_entry_t);		btree_s = &node1->btree[INT_GET(node1->hdr.count, ARCH_CONVERT) - count];		btree_d = &node2->btree[0];		memcpy(btree_d, btree_s, tmp);		INT_MOD(node1->hdr.count, ARCH_CONVERT, -(count));	} else {		/*		 * Move the req'd B-tree elements from low in node2 to		 * high in node1.		 */		count = -count;		tmp = count * (uint)sizeof(xfs_da_node_entry_t);		btree_s = &node2->btree[0];		btree_d = &node1->btree[INT_GET(node1->hdr.count, ARCH_CONVERT)];		memcpy(btree_d, btree_s, tmp);		INT_MOD(node1->hdr.count, ARCH_CONVERT, count);		xfs_da_log_buf(tp, blk1->bp,			XFS_DA_LOGRANGE(node1, btree_d, tmp));		/*		 * Move elements in node2 down to fill the hole.		 */		tmp  = INT_GET(node2->hdr.count, ARCH_CONVERT) - count;		tmp *= (uint)sizeof(xfs_da_node_entry_t);		btree_s = &node2->btree[count];		btree_d = &node2->btree[0];		memmove(btree_d, btree_s, tmp);		INT_MOD(node2->hdr.count, ARCH_CONVERT, -(count));	}	/*	 * Log header of node 1 and all current bits of node 2.	 */	xfs_da_log_buf(tp, blk1->bp,		XFS_DA_LOGRANGE(node1, &node1->hdr, sizeof(node1->hdr)));	xfs_da_log_buf(tp, blk2->bp,		XFS_DA_LOGRANGE(node2, &node2->hdr,			sizeof(node2->hdr) +			sizeof(node2->btree[0]) * INT_GET(node2->hdr.count, ARCH_CONVERT)));	/*	 * Record the last hashval from each block for upward propagation.	 * (note: don't use the swapped node pointers)	 */	node1 = blk1->bp->data;	node2 = blk2->bp->data;	blk1->hashval = INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT);	blk2->hashval = INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT);	/*	 * Adjust the expected index for insertion.	 */	if (blk1->index >= INT_GET(node1->hdr.count, ARCH_CONVERT)) {		blk2->index = blk1->index - INT_GET(node1->hdr.count, ARCH_CONVERT);		blk1->index = INT_GET(node1->hdr.count, ARCH_CONVERT) + 1;	/* make it invalid */	}}/* * Add a new entry to an intermediate node. */STATIC voidxfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,			       xfs_da_state_blk_t *newblk){	xfs_da_intnode_t *node;	xfs_da_node_entry_t *btree;	int tmp;	xfs_mount_t *mp;	node = oldblk->bp->data;	mp = state->mp;	ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);	ASSERT((oldblk->index >= 0) && (oldblk->index <= INT_GET(node->hdr.count, ARCH_CONVERT)));	ASSERT(newblk->blkno != 0);	if (state->args->whichfork == XFS_DATA_FORK && XFS_DIR_IS_V2(mp))		ASSERT(newblk->blkno >= mp->m_dirleafblk &&		       newblk->blkno < mp->m_dirfreeblk);	/*	 * We may need to make some room before we insert the new node.	 */	tmp = 0;	btree = &node->btree[ oldblk->index ];	if (oldblk->index < INT_GET(node->hdr.count, ARCH_CONVERT)) {		tmp = (INT_GET(node->hdr.count, ARCH_CONVERT) - oldblk->index) * (uint)sizeof(*btree);		memmove(btree + 1, btree, tmp);	}	INT_SET(btree->hashval, ARCH_CONVERT, newblk->hashval);	INT_SET(btree->before, ARCH_CONVERT, newblk->blkno);	xfs_da_log_buf(state->args->trans, oldblk->bp,		XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree)));	INT_MOD(node->hdr.count, ARCH_CONVERT, +1);	xfs_da_log_buf(state->args->trans, oldblk->bp,		XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));	/*	 * Copy the last hash value from the oldblk to propagate upwards.	 */	oldblk->hashval = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT);}/*======================================================================== * Routines used for shrinking the Btree. *========================================================================*//* * Deallocate an empty leaf node, remove it from its parent, * possibly deallocating that block, etc... */intxfs_da_join(xfs_da_state_t *state){	xfs_da_state_blk_t *drop_blk, *save_blk;	int action, error;	action = 0;	drop_blk = &state->path.blk[ state->path.active-1 ];	save_blk = &state->altpath.blk[ state->path.active-1 ];	ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);	ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||	       drop_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp));	/*	 * Walk back up the tree joining/deallocating as necessary.	 * When we stop dropping blocks, break out.	 */	for (  ; state->path.active >= 2; drop_blk--, save_blk--,		 state->path.active--) {		/*		 * See if we can combine the block with a neighbor.		 *   (action == 0) => no options, just leave		 *   (action == 1) => coalesce, then unlink		 *   (action == 2) => block empty, unlink it		 */		switch (drop_blk->magic) {		case XFS_ATTR_LEAF_MAGIC:#ifndef __KERNEL__			error = ENOTTY;#else			error = xfs_attr_leaf_toosmall(state, &action);#endif			if (error)				return(error);			if (action == 0)				return(0);#ifdef __KERNEL__			xfs_attr_leaf_unbalance(state, drop_blk, save_blk);#endif			break;		case XFS_DIR_LEAF_MAGIC:			ASSERT(XFS_DIR_IS_V1(state->mp));			error = xfs_dir_leaf_toosmall(state, &action);			if (error)				return(error);			if (action == 0)				return(0);			xfs_dir_leaf_unbalance(state, drop_blk, save_blk);			break;		case XFS_DIR2_LEAFN_MAGIC:			ASSERT(XFS_DIR_IS_V2(state->mp));			error = xfs_dir2_leafn_toosmall(state, &action);			if (error)				return error;			if (action == 0)				return 0;			xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);			break;		case XFS_DA_NODE_MAGIC:			/*			 * Remove the offending node, fixup hashvals,			 * check for a toosmall neighbor.			 */			xfs_da_node_remove(state, drop_blk);			xfs_da_fixhashpath(state, &state->path);			error = xfs_da_node_toosmall(state, &action);			if (error)				return(error);			if (action == 0)				return 0;			xfs_da_node_unbalance(state, drop_blk, save_blk);			break;		}		xfs_da_fixhashpath(state, &state->altpath);		error = xfs_da_blk_unlink(state, drop_blk, save_blk);		xfs_da_state_kill_altpath(state);		if (error)			return(error);		error = xfs_da_shrink_inode(state->args, drop_blk->blkno,							 drop_blk->bp);		drop_blk->bp = NULL;		if (error)			return(error);	}	/*	 * We joined all the way to the top.  If it turns out that	 * we only have one entry in the root, make the child block	 * the new root.	 */	xfs_da_node_remove(state, drop_blk);	xfs_da_fixhashpath(state, &state->path);	error = xfs_da_root_join(state, &state->path.blk[0]);	return(error);}/* * We have only one entry in the root.  Copy the only remaining child of * the old root to block 0 as the new root node. */STATIC intxfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk){	xfs_da_intnode_t *oldroot;	/* REFERENCED */	xfs_da_blkinfo_t *blkinfo;	xfs_da_args_t *args;	xfs_dablk_t child;	xfs_dabuf_t *bp;	int error;	args = state->args;	ASSERT(args != NULL);	ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);	oldroot = root_blk->bp->data;	ASSERT(INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);	ASSERT(INT_ISZERO(oldroot->hdr.info.forw, ARCH_CONVERT));	ASSERT(INT_ISZERO(oldroot->hdr.info.back, ARCH_CONVERT));	/*	 * If the root has more than one child, then don't do anything.	 */	if (INT_GET(oldroot->hdr.count, ARCH_CONVERT) > 1)		return(0);	/*	 * Read in the (only) child block, then copy those bytes into	 * the root block's buffer and free the original child block.	 */	child = INT_GET(oldroot->btree[ 0 ].before, ARCH_CONVERT);	ASSERT(child != 0);	error = xfs_da_read_buf(args->trans, args->dp, child, -1, &bp,					     args->whichfork);	if (error)		return(error);	ASSERT(bp != NULL);	blkinfo = bp->data;	if (INT_GET(oldroot->hdr.level, ARCH_CONVERT) == 1) {		ASSERT(INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) ||		       INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC);	} else {		ASSERT(INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);	}	ASSERT(INT_ISZERO(blkinfo->forw, ARCH_CONVERT));	ASSERT(INT_ISZERO(blkinfo->back, ARCH_CONVERT));	memcpy(root_blk->bp->data, bp->data, state->blocksize);	xfs_da_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);	error = xfs_da_shrink_inode(args, child, bp);	return(error);}/* * Check a node block and its neighbors to see if the block should be * collapsed into one or the other neighbor.  Always keep the block * with the smaller block number. * If the current block is over 50% full, don't try to join it, return 0. * If the block is empty, fill in the state structure and return 2. * If it can be collapsed, fill in the state structure and return 1. * If nothing can be done, return 0. */STATIC intxfs_da_node_toosmall(xfs_da_state_t *state, int *action){	xfs_da_intnode_t *node;	xfs_da_state_blk_t *blk;	xfs_da_blkinfo_t *info;	int count, forward, error, retval, i;	xfs_dablk_t blkno;	xfs_dabuf_t *bp;	/*	 * Check for the degenerate case of the block being over 50% full.	 * If so, it's not worth even looking to see if we might be able	 * to coalesce with a sibling.	 */	blk = &state->path.blk[ state->path.active-1 ];	info = blk->bp->data;	ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC);	node = (xfs_da_intnode_t *)info;	count = INT_GET(node->hdr.count, ARCH_CONVERT);	if (count > (state->node_ents >> 1)) {		*action = 0;	/* blk over 50%, don't try to join */		return(0);	/* blk over 50%, don't try to join */	}	/*	 * Check for the degenerate case of the block being empty.	 * If the block is empty, we'll simply delete it, no need to	 * coalesce it with a sibling block.  We choose (aribtrarily)	 * to merge with the forward block unless it is NULL.	 */	if (count == 0) {		/*		 * Make altpath point to the block we want to keep and		 * path point to the block we want to drop (this one).		 */		forward = (!INT_ISZERO(info->forw, ARCH_CONVERT));		memcpy(&state->altpath, &state->path, sizeof(state->path));		error = xfs_da_path_shift(state, &state->altpath, forward,						 0, &retval);		if (error)			return(error);		if (retval) {			*action = 0;		} else {

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?