⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xfs_da_btree.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	node1 = blk1->bp->data;	node2 = blk2->bp->data;	/*	 * Figure out how many entries need to move, and in which direction.	 * Swap the nodes around if that makes it simpler.	 */	if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&	    ((be32_to_cpu(node2->btree[0].hashval) < be32_to_cpu(node1->btree[0].hashval)) ||	     (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <	      be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {		tmpnode = node1;		node1 = node2;		node2 = tmpnode;	}	ASSERT(be16_to_cpu(node1->hdr.info.magic) == XFS_DA_NODE_MAGIC);	ASSERT(be16_to_cpu(node2->hdr.info.magic) == XFS_DA_NODE_MAGIC);	count = (be16_to_cpu(node1->hdr.count) - be16_to_cpu(node2->hdr.count)) / 2;	if (count == 0)		return;	tp = state->args->trans;	/*	 * Two cases: high-to-low and low-to-high.	 */	if (count > 0) {		/*		 * Move elements in node2 up to make a hole.		 */		if ((tmp = be16_to_cpu(node2->hdr.count)) > 0) {			tmp *= (uint)sizeof(xfs_da_node_entry_t);			btree_s = &node2->btree[0];			btree_d = &node2->btree[count];			memmove(btree_d, btree_s, tmp);		}		/*		 * Move the req'd B-tree elements from high in node1 to		 * low in node2.		 */		be16_add(&node2->hdr.count, count);		tmp = count * (uint)sizeof(xfs_da_node_entry_t);		btree_s = &node1->btree[be16_to_cpu(node1->hdr.count) - count];		btree_d = &node2->btree[0];		memcpy(btree_d, btree_s, tmp);		be16_add(&node1->hdr.count, -count);	} else {		/*		 * Move the req'd B-tree elements from low in node2 to		 * high in node1.		 */		count = -count;		tmp = count * (uint)sizeof(xfs_da_node_entry_t);		btree_s = &node2->btree[0];		btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)];		memcpy(btree_d, btree_s, tmp);		be16_add(&node1->hdr.count, count);		xfs_da_log_buf(tp, blk1->bp,			XFS_DA_LOGRANGE(node1, btree_d, tmp));		/*		 * Move elements in node2 down to fill the hole.		 */		tmp  = be16_to_cpu(node2->hdr.count) - count;		tmp *= (uint)sizeof(xfs_da_node_entry_t);		btree_s = &node2->btree[count];		btree_d = &node2->btree[0];		memmove(btree_d, btree_s, tmp);		be16_add(&node2->hdr.count, -count);	}	/*	 * Log header of node 1 and all current bits of node 2.	 */	xfs_da_log_buf(tp, blk1->bp,		XFS_DA_LOGRANGE(node1, &node1->hdr, sizeof(node1->hdr)));	xfs_da_log_buf(tp, blk2->bp,		XFS_DA_LOGRANGE(node2, &node2->hdr,			sizeof(node2->hdr) +			sizeof(node2->btree[0]) * be16_to_cpu(node2->hdr.count)));	/*	 * Record the last hashval from each block for upward propagation.	 * (note: don't use the swapped node pointers)	 */	node1 = blk1->bp->data;	node2 = blk2->bp->data;	blk1->hashval = be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval);	blk2->hashval = be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval);	/*	 * Adjust the expected index for insertion.	 */	if (blk1->index >= be16_to_cpu(node1->hdr.count)) {		blk2->index = blk1->index - be16_to_cpu(node1->hdr.count);		blk1->index = be16_to_cpu(node1->hdr.count) + 1;	/* make it invalid */	}}/* * Add a new entry to an intermediate node. */STATIC voidxfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,			       xfs_da_state_blk_t *newblk){	xfs_da_intnode_t *node;	xfs_da_node_entry_t *btree;	int tmp;	xfs_mount_t *mp;	node = oldblk->bp->data;	mp = state->mp;	ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);	ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));	ASSERT(newblk->blkno != 0);	if (state->args->whichfork == XFS_DATA_FORK)		ASSERT(newblk->blkno >= mp->m_dirleafblk &&		       newblk->blkno < mp->m_dirfreeblk);	/*	 * We may need to make some room before we insert the new node.	 */	tmp = 0;	btree = &node->btree[ oldblk->index ];	if (oldblk->index < be16_to_cpu(node->hdr.count)) {		tmp = (be16_to_cpu(node->hdr.count) - oldblk->index) * (uint)sizeof(*btree);		memmove(btree + 1, btree, tmp);	}	btree->hashval = cpu_to_be32(newblk->hashval);	btree->before = cpu_to_be32(newblk->blkno);	xfs_da_log_buf(state->args->trans, oldblk->bp,		XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree)));	be16_add(&node->hdr.count, 1);	xfs_da_log_buf(state->args->trans, oldblk->bp,		XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));	/*	 * Copy the last hash value from the oldblk to propagate upwards.	 */	oldblk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1 ].hashval);}/*======================================================================== * Routines used for shrinking the Btree. *========================================================================*//* * Deallocate an empty leaf node, remove it from its parent, * possibly deallocating that block, etc... */intxfs_da_join(xfs_da_state_t *state){	xfs_da_state_blk_t *drop_blk, *save_blk;	int action, error;	action = 0;	drop_blk = &state->path.blk[ state->path.active-1 ];	save_blk = &state->altpath.blk[ state->path.active-1 ];	ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);	ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||	       drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);	/*	 * Walk back up the tree joining/deallocating as necessary.	 * When we stop dropping blocks, break out.	 */	for (  ; state->path.active >= 2; drop_blk--, save_blk--,		 state->path.active--) {		/*		 * See if we can combine the block with a neighbor.		 *   (action == 0) => no options, just leave		 *   (action == 1) => coalesce, then unlink		 *   (action == 2) => block empty, unlink it		 */		switch (drop_blk->magic) {		case XFS_ATTR_LEAF_MAGIC:			error = xfs_attr_leaf_toosmall(state, &action);			if (error)				return(error);			if (action == 0)				return(0);			xfs_attr_leaf_unbalance(state, drop_blk, save_blk);			break;		case XFS_DIR2_LEAFN_MAGIC:			error = xfs_dir2_leafn_toosmall(state, &action);			if (error)				return error;			if (action == 0)				return 0;			xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);			break;		case XFS_DA_NODE_MAGIC:			/*			 * Remove the offending node, fixup hashvals,			 * check for a toosmall neighbor.			 */			xfs_da_node_remove(state, drop_blk);			xfs_da_fixhashpath(state, &state->path);			error = xfs_da_node_toosmall(state, &action);			if (error)				return(error);			if (action == 0)				return 0;			xfs_da_node_unbalance(state, drop_blk, save_blk);			break;		}		xfs_da_fixhashpath(state, &state->altpath);		error = xfs_da_blk_unlink(state, drop_blk, save_blk);		xfs_da_state_kill_altpath(state);		if (error)			return(error);		error = xfs_da_shrink_inode(state->args, drop_blk->blkno,							 drop_blk->bp);		drop_blk->bp = NULL;		if (error)			return(error);	}	/*	 * We joined all the way to the top.  If it turns out that	 * we only have one entry in the root, make the child block	 * the new root.	 */	xfs_da_node_remove(state, drop_blk);	xfs_da_fixhashpath(state, &state->path);	error = xfs_da_root_join(state, &state->path.blk[0]);	return(error);}/* * We have only one entry in the root.  Copy the only remaining child of * the old root to block 0 as the new root node. */STATIC intxfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk){	xfs_da_intnode_t *oldroot;	/* REFERENCED */	xfs_da_blkinfo_t *blkinfo;	xfs_da_args_t *args;	xfs_dablk_t child;	xfs_dabuf_t *bp;	int error;	args = state->args;	ASSERT(args != NULL);	ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);	oldroot = root_blk->bp->data;	ASSERT(be16_to_cpu(oldroot->hdr.info.magic) == XFS_DA_NODE_MAGIC);	ASSERT(!oldroot->hdr.info.forw);	ASSERT(!oldroot->hdr.info.back);	/*	 * If the root has more than one child, then don't do anything.	 */	if (be16_to_cpu(oldroot->hdr.count) > 1)		return(0);	/*	 * Read in the (only) child block, then copy those bytes into	 * the root block's buffer and free the original child block.	 */	child = be32_to_cpu(oldroot->btree[0].before);	ASSERT(child != 0);	error = xfs_da_read_buf(args->trans, args->dp, child, -1, &bp,					     args->whichfork);	if (error)		return(error);	ASSERT(bp != NULL);	blkinfo = bp->data;	if (be16_to_cpu(oldroot->hdr.level) == 1) {		ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DIR2_LEAFN_MAGIC ||		       be16_to_cpu(blkinfo->magic) == XFS_ATTR_LEAF_MAGIC);	} else {		ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DA_NODE_MAGIC);	}	ASSERT(!blkinfo->forw);	ASSERT(!blkinfo->back);	memcpy(root_blk->bp->data, bp->data, state->blocksize);	xfs_da_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);	error = xfs_da_shrink_inode(args, child, bp);	return(error);}/* * Check a node block and its neighbors to see if the block should be * collapsed into one or the other neighbor.  Always keep the block * with the smaller block number. * If the current block is over 50% full, don't try to join it, return 0. * If the block is empty, fill in the state structure and return 2. * If it can be collapsed, fill in the state structure and return 1. * If nothing can be done, return 0. */STATIC intxfs_da_node_toosmall(xfs_da_state_t *state, int *action){	xfs_da_intnode_t *node;	xfs_da_state_blk_t *blk;	xfs_da_blkinfo_t *info;	int count, forward, error, retval, i;	xfs_dablk_t blkno;	xfs_dabuf_t *bp;	/*	 * Check for the degenerate case of the block being over 50% full.	 * If so, it's not worth even looking to see if we might be able	 * to coalesce with a sibling.	 */	blk = &state->path.blk[ state->path.active-1 ];	info = blk->bp->data;	ASSERT(be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC);	node = (xfs_da_intnode_t *)info;	count = be16_to_cpu(node->hdr.count);	if (count > (state->node_ents >> 1)) {		*action = 0;	/* blk over 50%, don't try to join */		return(0);	/* blk over 50%, don't try to join */	}	/*	 * Check for the degenerate case of the block being empty.	 * If the block is empty, we'll simply delete it, no need to	 * coalesce it with a sibling block.  We choose (arbitrarily)	 * to merge with the forward block unless it is NULL.	 */	if (count == 0) {		/*		 * Make altpath point to the block we want to keep and		 * path point to the block we want to drop (this one).		 */		forward = (info->forw != 0);		memcpy(&state->altpath, &state->path, sizeof(state->path));		error = xfs_da_path_shift(state, &state->altpath, forward,						 0, &retval);		if (error)			return(error);		if (retval) {			*action = 0;		} else {			*action = 2;		}		return(0);	}	/*	 * Examine each sibling block to see if we can coalesce with	 * at least 25% free space to spare.  We need to figure out	 * whether to merge with the forward or the backward block.	 * We prefer coalescing with the lower numbered sibling so as	 * to shrink a directory over time.	 */	/* start with smaller blk num */	forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back));	for (i = 0; i < 2; forward = !forward, i++) {		if (forward)			blkno = be32_to_cpu(info->forw);		else			blkno = be32_to_cpu(info->back);		if (blkno == 0)			continue;		error = xfs_da_read_buf(state->args->trans, state->args->dp,					blkno, -1, &bp, state->args->whichfork);		if (error)			return(error);		ASSERT(bp != NULL);		node = (xfs_da_intnode_t *)info;		count  = state->node_ents;		count -= state->node_ents >> 2;		count -= be16_to_cpu(node->hdr.count);		node = bp->data;		ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);		count -= be16_to_cpu(node->hdr.count);		xfs_da_brelse(state->args->trans, bp);		if (count >= 0)			break;	/* fits with at least 25% to spare */	}	if (i >= 2) {		*action = 0;		return(0);	}	/*	 * Make altpath point to the block we want to keep (the lower	 * numbered block) and path point to the block we want to drop.	 */	memcpy(&state->altpath, &state->path, sizeof(state->path));	if (blkno < blk->blkno) {		error = xfs_da_path_shift(state, &state->altpath, forward,						 0, &retval);		if (error) {			return(error);		}		if (retval) {			*action = 0;			return(0);		}	} else {		error = xfs_da_path_shift(state, &state->path, forward,						 0, &retval);		if (error) {			return(error);		}		if (retval) {			*action = 0;			return(0);		}	}	*action = 1;	return(0);}/* * Walk back up the tree adjusting hash values as necessary, * when we stop making changes, return. */voidxfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path){	xfs_da_state_blk_t *blk;	xfs_da_intnode_t *node;	xfs_da_node_entry_t *btree;	xfs_dahash_t lasthash=0;	int level, count;	level = path->active-1;	blk = &path->blk[ level ];	switch (blk->magic) {	case XFS_ATTR_LEAF_MAGIC:		lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);		if (count == 0)			return;		break;	case XFS_DIR2_LEAFN_MAGIC:		lasthash = xfs_dir2_leafn_lasthash(blk->bp, &count);		if (count == 0)			return;		break;	case XFS_DA_NODE_MAGIC:		lasthash = xfs_da_node_lasthash(blk->bp, &count);		if (count == 0)			return;		break;	}	for (blk--, level--; level >= 0; blk--, level--) {		node = blk->bp->data;		ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);		btree = &node->btree[ blk->index ];		if (be32_to_cpu(btree->hashval) == lasthash)			break;		blk->hashval = lasthash;		btree->hashval = cpu_to_be32(lasthash);		xfs_da_log_buf(state->args->trans, blk->bp,				  XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));		lasthash = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);	}}/* * Remove an entry from an intermediate node. */STATIC voidxfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk){	xfs_da_intnode_t *node;	xfs_da_node_entry_t *btree;	int tmp;	node = drop_blk->bp->data;	ASSERT(drop_blk->index < be16_to_cpu(node->hdr.count));	ASSERT(drop_blk->index >= 0);	/*	 * Copy over the offending entry, or just zero it out.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -