⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xfs_attr_leaf.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	/*	 * Examine entries until we reduce the absolute difference in	 * byte usage between the two blocks to a minimum.  Then get	 * the direction to copy and the number of elements to move.	 *	 * "inleaf" is true if the new entry should be inserted into blk1.	 * If "swap" is also true, then reverse the sense of "inleaf".	 */	state->inleaf = xfs_attr_leaf_figure_balance(state, blk1, blk2,							    &count, &totallen);	if (swap)		state->inleaf = !state->inleaf;	/*	 * Move any entries required from leaf to leaf:	 */	if (count < be16_to_cpu(hdr1->count)) {		/*		 * Figure the total bytes to be added to the destination leaf.		 */		/* number entries being moved */		count = be16_to_cpu(hdr1->count) - count;		space  = be16_to_cpu(hdr1->usedbytes) - totallen;		space += count * sizeof(xfs_attr_leaf_entry_t);		/*		 * leaf2 is the destination, compact it if it looks tight.		 */		max  = be16_to_cpu(hdr2->firstused)						- sizeof(xfs_attr_leaf_hdr_t);		max -= be16_to_cpu(hdr2->count) * sizeof(xfs_attr_leaf_entry_t);		if (space > max) {			xfs_attr_leaf_compact(args->trans, blk2->bp);		}		/*		 * Move high entries from leaf1 to low end of leaf2.		 */		xfs_attr_leaf_moveents(leaf1, be16_to_cpu(hdr1->count) - count,				leaf2, 0, count, state->mp);		xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1);		xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1);	} else if (count > be16_to_cpu(hdr1->count)) {		/*		 * I assert that since all callers pass in an empty		 * second buffer, this code should never execute.		 */		/*		 * Figure the total bytes to be added to the destination leaf.		 */		/* number entries being moved */		count -= be16_to_cpu(hdr1->count);		space  = totallen - be16_to_cpu(hdr1->usedbytes);		space += count * sizeof(xfs_attr_leaf_entry_t);		/*		 * leaf1 is the destination, compact it if it looks tight.		 */		max  = be16_to_cpu(hdr1->firstused)						- sizeof(xfs_attr_leaf_hdr_t);		max -= be16_to_cpu(hdr1->count) * sizeof(xfs_attr_leaf_entry_t);		if (space > max) {			xfs_attr_leaf_compact(args->trans, blk1->bp);		}		/*		 * Move low entries from leaf2 to high end of leaf1.		 */		xfs_attr_leaf_moveents(leaf2, 0, leaf1,				be16_to_cpu(hdr1->count), count, state->mp);		xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1);		xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1);	}	/*	 * Copy out last hashval in each block for B-tree code.	 */	blk1->hashval = be32_to_cpu(		leaf1->entries[be16_to_cpu(leaf1->hdr.count)-1].hashval);	blk2->hashval = be32_to_cpu(		leaf2->entries[be16_to_cpu(leaf2->hdr.count)-1].hashval);	/*	 * Adjust the expected index for insertion.	 * NOTE: this code depends on the (current) situation that the	 * second block was originally empty.	 *	 * If the insertion point moved to the 2nd block, we must adjust	 * the index.  We must also track the entry just following the	 * new entry for use in an "atomic rename" operation, that entry	 * is always the "old" entry and the "new" entry is what we are	 * inserting.  The index/blkno fields refer to the "old" entry,	 * while the index2/blkno2 fields refer to the "new" entry.	 */	if (blk1->index > be16_to_cpu(leaf1->hdr.count)) {		ASSERT(state->inleaf == 0);		blk2->index = blk1->index - be16_to_cpu(leaf1->hdr.count);		args->index = args->index2 = blk2->index;		args->blkno = args->blkno2 = blk2->blkno;	} else if (blk1->index == be16_to_cpu(leaf1->hdr.count)) {		if (state->inleaf) {			args->index = blk1->index;			args->blkno = blk1->blkno;			args->index2 = 0;			args->blkno2 = blk2->blkno;		} else {			blk2->index = blk1->index				    - be16_to_cpu(leaf1->hdr.count);			args->index = args->index2 = blk2->index;			args->blkno = args->blkno2 = blk2->blkno;		}	} else {		ASSERT(state->inleaf == 1);		args->index = args->index2 = blk1->index;		args->blkno = args->blkno2 = blk1->blkno;	}}/* * Examine entries until we reduce the absolute difference in * byte usage between the two blocks to a minimum. * GROT: Is this really necessary?  With other than a 512 byte blocksize, * GROT: there will always be enough room in either block for a new entry. * GROT: Do a double-split for this case? */STATIC intxfs_attr_leaf_figure_balance(xfs_da_state_t *state,				    xfs_da_state_blk_t *blk1,				    xfs_da_state_blk_t *blk2,				    int *countarg, int *usedbytesarg){	xfs_attr_leafblock_t *leaf1, *leaf2;	xfs_attr_leaf_hdr_t *hdr1, *hdr2;	xfs_attr_leaf_entry_t *entry;	int count, max, index, totallen, half;	int lastdelta, foundit, tmp;	/*	 * Set up environment.	 */	leaf1 = blk1->bp->data;	leaf2 = blk2->bp->data;	hdr1 = &leaf1->hdr;	hdr2 = &leaf2->hdr;	foundit = 0;	totallen = 0;	/*	 * Examine entries until we reduce the absolute difference in	 * byte usage between the two blocks to a minimum.	 */	max = be16_to_cpu(hdr1->count) + be16_to_cpu(hdr2->count);	half  = (max+1) * sizeof(*entry);	half += be16_to_cpu(hdr1->usedbytes) +		be16_to_cpu(hdr2->usedbytes) +		xfs_attr_leaf_newentsize(				state->args->namelen,				state->args->valuelen,				state->blocksize, NULL);	half /= 2;	lastdelta = state->blocksize;	entry = &leaf1->entries[0];	for (count = index = 0; count < max; entry++, index++, count++) {#define XFS_ATTR_ABS(A)	(((A) < 0) ? -(A) : (A))		/*		 * The new entry is in the first block, account for it.		 */		if (count == blk1->index) {			tmp = totallen + sizeof(*entry) +				xfs_attr_leaf_newentsize(						state->args->namelen,						state->args->valuelen,						state->blocksize, NULL);			if (XFS_ATTR_ABS(half - tmp) > lastdelta)				break;			lastdelta = XFS_ATTR_ABS(half - tmp);			totallen = tmp;			foundit = 1;		}		/*		 * Wrap around into the second block if necessary.		 */		if (count == be16_to_cpu(hdr1->count)) {			leaf1 = leaf2;			entry = &leaf1->entries[0];			index = 0;		}		/*		 * Figure out if next leaf entry would be too much.		 */		tmp = totallen + sizeof(*entry) + xfs_attr_leaf_entsize(leaf1,									index);		if (XFS_ATTR_ABS(half - tmp) > lastdelta)			break;		lastdelta = XFS_ATTR_ABS(half - tmp);		totallen = tmp;#undef XFS_ATTR_ABS	}	/*	 * Calculate the number of usedbytes that will end up in lower block.	 * If new entry not in lower block, fix up the count.	 */	totallen -= count * sizeof(*entry);	if (foundit) {		totallen -= sizeof(*entry) +				xfs_attr_leaf_newentsize(						state->args->namelen,						state->args->valuelen,						state->blocksize, NULL);	}	*countarg = count;	*usedbytesarg = totallen;	return(foundit);}/*======================================================================== * Routines used for shrinking the Btree. *========================================================================*//* * Check a leaf block and its neighbors to see if the block should be * collapsed into one or the other neighbor.  Always keep the block * with the smaller block number. * If the current block is over 50% full, don't try to join it, return 0. * If the block is empty, fill in the state structure and return 2. * If it can be collapsed, fill in the state structure and return 1. * If nothing can be done, return 0. * * GROT: allow for INCOMPLETE entries in calculation. */intxfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action){	xfs_attr_leafblock_t *leaf;	xfs_da_state_blk_t *blk;	xfs_da_blkinfo_t *info;	int count, bytes, forward, error, retval, i;	xfs_dablk_t blkno;	xfs_dabuf_t *bp;	/*	 * Check for the degenerate case of the block being over 50% full.	 * If so, it's not worth even looking to see if we might be able	 * to coalesce with a sibling.	 */	blk = &state->path.blk[ state->path.active-1 ];	info = blk->bp->data;	ASSERT(be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC);	leaf = (xfs_attr_leafblock_t *)info;	count = be16_to_cpu(leaf->hdr.count);	bytes = sizeof(xfs_attr_leaf_hdr_t) +		count * sizeof(xfs_attr_leaf_entry_t) +		be16_to_cpu(leaf->hdr.usedbytes);	if (bytes > (state->blocksize >> 1)) {		*action = 0;	/* blk over 50%, don't try to join */		return(0);	}	/*	 * Check for the degenerate case of the block being empty.	 * If the block is empty, we'll simply delete it, no need to	 * coalesce it with a sibling block.  We choose (arbitrarily)	 * to merge with the forward block unless it is NULL.	 */	if (count == 0) {		/*		 * Make altpath point to the block we want to keep and		 * path point to the block we want to drop (this one).		 */		forward = (info->forw != 0);		memcpy(&state->altpath, &state->path, sizeof(state->path));		error = xfs_da_path_shift(state, &state->altpath, forward,						 0, &retval);		if (error)			return(error);		if (retval) {			*action = 0;		} else {			*action = 2;		}		return(0);	}	/*	 * Examine each sibling block to see if we can coalesce with	 * at least 25% free space to spare.  We need to figure out	 * whether to merge with the forward or the backward block.	 * We prefer coalescing with the lower numbered sibling so as	 * to shrink an attribute list over time.	 */	/* start with smaller blk num */	forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back));	for (i = 0; i < 2; forward = !forward, i++) {		if (forward)			blkno = be32_to_cpu(info->forw);		else			blkno = be32_to_cpu(info->back);		if (blkno == 0)			continue;		error = xfs_da_read_buf(state->args->trans, state->args->dp,					blkno, -1, &bp, XFS_ATTR_FORK);		if (error)			return(error);		ASSERT(bp != NULL);		leaf = (xfs_attr_leafblock_t *)info;		count  = be16_to_cpu(leaf->hdr.count);		bytes  = state->blocksize - (state->blocksize>>2);		bytes -= be16_to_cpu(leaf->hdr.usedbytes);		leaf = bp->data;		ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);		count += be16_to_cpu(leaf->hdr.count);		bytes -= be16_to_cpu(leaf->hdr.usedbytes);		bytes -= count * sizeof(xfs_attr_leaf_entry_t);		bytes -= sizeof(xfs_attr_leaf_hdr_t);		xfs_da_brelse(state->args->trans, bp);		if (bytes >= 0)			break;	/* fits with at least 25% to spare */	}	if (i >= 2) {		*action = 0;		return(0);	}	/*	 * Make altpath point to the block we want to keep (the lower	 * numbered block) and path point to the block we want to drop.	 */	memcpy(&state->altpath, &state->path, sizeof(state->path));	if (blkno < blk->blkno) {		error = xfs_da_path_shift(state, &state->altpath, forward,						 0, &retval);	} else {		error = xfs_da_path_shift(state, &state->path, forward,						 0, &retval);	}	if (error)		return(error);	if (retval) {		*action = 0;	} else {		*action = 1;	}	return(0);}/* * Remove a name from the leaf attribute list structure. * * Return 1 if leaf is less than 37% full, 0 if >= 37% full. * If two leaves are 37% full, when combined they will leave 25% free. */intxfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args){	xfs_attr_leafblock_t *leaf;	xfs_attr_leaf_hdr_t *hdr;	xfs_attr_leaf_map_t *map;	xfs_attr_leaf_entry_t *entry;	int before, after, smallest, entsize;	int tablesize, tmp, i;	xfs_mount_t *mp;	leaf = bp->data;	ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);	hdr = &leaf->hdr;	mp = args->trans->t_mountp;	ASSERT((be16_to_cpu(hdr->count) > 0)		&& (be16_to_cpu(hdr->count) < (XFS_LBSIZE(mp)/8)));	ASSERT((args->index >= 0)		&& (args->index < be16_to_cpu(hdr->count)));	ASSERT(be16_to_cpu(hdr->firstused) >=	       ((be16_to_cpu(hdr->count) * sizeof(*entry)) + sizeof(*hdr)));	entry = &leaf->entries[args->index];	ASSERT(be16_to_cpu(entry->nameidx) >= be16_to_cpu(hdr->firstused));	ASSERT(be16_to_cpu(entry->nameidx) < XFS_LBSIZE(mp));	/*	 * Scan through free region table:	 *    check for adjacency of free'd entry with an existing one,	 *    find smallest free region in case we need to replace it,	 *    adjust any map that borders the entry table,	 */	tablesize = be16_to_cpu(hdr->count) * sizeof(xfs_attr_leaf_entry_t)					+ sizeof(xfs_attr_leaf_hdr_t);	map = &hdr->freemap[0];	tmp = be16_to_cpu(map->size);	before = after = -1;	smallest = XFS_ATTR_LEAF_MAPSIZE - 1;	entsize = xfs_attr_leaf_entsize(leaf, args->index);	for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) {		ASSERT(be16_to_cpu(map->base) < XFS_LBSIZE(mp));		ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp));		if (be16_to_cpu(map->base) == tablesize) {			be16_add(&map->base,				 -((int)sizeof(xfs_attr_leaf_entry_t)));			be16_add(&map->size, sizeof(xfs_attr_leaf_entry_t));		}		if ((be16_to_cpu(map->base) + be16_to_cpu(map->size))				== be16_to_cpu(entry->nameidx)) {			before = i;		} else if (be16_to_cpu(map->base)			== (be16_to_cpu(entry->nameidx) + entsize)) {			after = i;		} else if (be16_to_cpu(map->size) < tmp) {			tmp = be16_to_cpu(map->size);			smallest = i;		}	}	/*	 * Coalesce adjacent freemap regions,	 * or replace the smallest region.	 */	if ((before >= 0) || (after >= 0)) {		if ((before >= 0) && (after >= 0)) {			map = &hdr->freemap[before];			be16_add(&map->size, entsize);			be16_add(&map->size,				 be16_to_cpu(hdr->freemap[after].size));			hdr->freemap[after].base = 0;			hdr->freemap[after].size = 0;		} else if (before >= 0) {			map = &hdr->freemap[before];			be16_add(&map->size, entsize);		} else {			map = &hdr->freemap[after];			/* both on-disk, don't endian flip twice */			map->base = entry->nameidx;			be16_add(&map->size, entsize);		}	} else {		/*		 * Replace smallest region (if it is smaller than free'd entry)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -