⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xfs_alloc.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
 * Add an allocation trace entry for a free call. */STATIC voidxfs_alloc_trace_free(	const char	*name,		/* function tag string */	char		*str,		/* additional string */	xfs_mount_t	*mp,		/* file system mount point */	xfs_agnumber_t	agno,		/* allocation group number */	xfs_agblock_t	agbno,		/* a.g. relative block number */	xfs_extlen_t	len,		/* length of extent */	int		isfl,		/* set if is freelist allocation/free */	int		line)		/* source line number */{	ktrace_enter(xfs_alloc_trace_buf,		(void *)(__psint_t)(XFS_ALLOC_KTRACE_FREE | (line << 16)),		(void *)name,		(void *)str,		(void *)mp,		(void *)(__psunsigned_t)agno,		(void *)(__psunsigned_t)agbno,		(void *)(__psunsigned_t)len,		(void *)(__psint_t)isfl,		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);}/* * Add an allocation trace entry for modifying an agf. */STATIC voidxfs_alloc_trace_modagf(	const char	*name,		/* function tag string */	char		*str,		/* additional string */	xfs_mount_t	*mp,		/* file system mount point */	xfs_agf_t	*agf,		/* new agf value */	int		flags,		/* logging flags for agf */	int		line)		/* source line number */{	ktrace_enter(xfs_alloc_trace_buf,		(void *)(__psint_t)(XFS_ALLOC_KTRACE_MODAGF | (line << 16)),		(void *)name,		(void *)str,		(void *)mp,		(void *)(__psint_t)flags,		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_seqno),		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_length),		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]),		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_flfirst),		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_fllast),		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_flcount),		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_freeblks),		(void *)(__psunsigned_t)be32_to_cpu(agf->agf_longest));}STATIC voidxfs_alloc_trace_busy(	const char	*name,		/* function tag string */	char		*str,		/* additional string */	xfs_mount_t	*mp,		/* file system mount point */	xfs_agnumber_t	agno,		/* allocation group number */	xfs_agblock_t	agbno,		/* a.g. relative block number */	xfs_extlen_t	len,		/* length of extent */	int		slot,		/* perag Busy slot */	xfs_trans_t	*tp,	int		trtype,		/* type: add, delete, search */	int		line)		/* source line number */{	ktrace_enter(xfs_alloc_trace_buf,		(void *)(__psint_t)(trtype | (line << 16)),		(void *)name,		(void *)str,		(void *)mp,		(void *)(__psunsigned_t)agno,		(void *)(__psunsigned_t)agbno,		(void *)(__psunsigned_t)len,		(void *)(__psint_t)slot,		(void *)tp,		NULL, NULL, NULL, NULL, NULL, NULL, NULL);}#endif	/* XFS_ALLOC_TRACE *//* * Allocation group level functions. *//* * Allocate a variable extent in the allocation group agno. * Type and bno are used to determine where in the allocation group the * extent will start. * Extent's length (returned in *len) will be between minlen and maxlen, * and of the form k * prod + mod unless there's nothing that large. * Return the starting a.g. block, or NULLAGBLOCK if we can't do it. */STATIC int			/* error */xfs_alloc_ag_vextent(	xfs_alloc_arg_t	*args)	/* argument structure for allocation */{	int		error=0;	ASSERT(args->minlen > 0);	ASSERT(args->maxlen > 0);	ASSERT(args->minlen <= args->maxlen);	ASSERT(args->mod < args->prod);	ASSERT(args->alignment > 0);	/*	 * Branch to correct routine based on the type.	 */	args->wasfromfl = 0;	switch (args->type) {	case XFS_ALLOCTYPE_THIS_AG:		error = xfs_alloc_ag_vextent_size(args);		break;	case XFS_ALLOCTYPE_NEAR_BNO:		error = xfs_alloc_ag_vextent_near(args);		break;	case XFS_ALLOCTYPE_THIS_BNO:		error = xfs_alloc_ag_vextent_exact(args);		break;	default:		ASSERT(0);		/* NOTREACHED */	}	if (error)		return error;	/*	 * If the allocation worked, need to change the agf structure	 * (and log it), and the superblock.	 */	if (args->agbno != NULLAGBLOCK) {		xfs_agf_t	*agf;	/* allocation group freelist header */#ifdef XFS_ALLOC_TRACE		xfs_mount_t	*mp = args->mp;#endif		long		slen = (long)args->len;		ASSERT(args->len >= args->minlen && args->len <= args->maxlen);		ASSERT(!(args->wasfromfl) || !args->isfl);		ASSERT(args->agbno % args->alignment == 0);		if (!(args->wasfromfl)) {			agf = XFS_BUF_TO_AGF(args->agbp);			be32_add(&agf->agf_freeblks, -(args->len));			xfs_trans_agblocks_delta(args->tp,						 -((long)(args->len)));			args->pag->pagf_freeblks -= args->len;			ASSERT(be32_to_cpu(agf->agf_freeblks) <=				be32_to_cpu(agf->agf_length));			TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS);			xfs_alloc_log_agf(args->tp, args->agbp,						XFS_AGF_FREEBLKS);			/* search the busylist for these blocks */			xfs_alloc_search_busy(args->tp, args->agno,					args->agbno, args->len);		}		if (!args->isfl)			xfs_trans_mod_sb(args->tp,				args->wasdel ? XFS_TRANS_SB_RES_FDBLOCKS :					XFS_TRANS_SB_FDBLOCKS, -slen);		XFS_STATS_INC(xs_allocx);		XFS_STATS_ADD(xs_allocb, args->len);	}	return 0;}/* * Allocate a variable extent at exactly agno/bno. * Extent's length (returned in *len) will be between minlen and maxlen, * and of the form k * prod + mod unless there's nothing that large. * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it. */STATIC int			/* error */xfs_alloc_ag_vextent_exact(	xfs_alloc_arg_t	*args)	/* allocation argument structure */{	xfs_btree_cur_t	*bno_cur;/* by block-number btree cursor */	xfs_btree_cur_t	*cnt_cur;/* by count btree cursor */	xfs_agblock_t	end;	/* end of allocated extent */	int		error;	xfs_agblock_t	fbno;	/* start block of found extent */	xfs_agblock_t	fend;	/* end block of found extent */	xfs_extlen_t	flen;	/* length of found extent */	int		i;	/* success/failure of operation */	xfs_agblock_t	maxend;	/* end of maximal extent */	xfs_agblock_t	minend;	/* end of minimal extent */	xfs_extlen_t	rlen;	/* length of returned extent */	ASSERT(args->alignment == 1);	/*	 * Allocate/initialize a cursor for the by-number freespace btree.	 */	bno_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,		args->agno, XFS_BTNUM_BNO, NULL, 0);	/*	 * Lookup bno and minlen in the btree (minlen is irrelevant, really).	 * Look for the closest free block <= bno, it must contain bno	 * if any free block does.	 */	if ((error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i)))		goto error0;	if (!i) {		/*		 * Didn't find it, return null.		 */		xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);		args->agbno = NULLAGBLOCK;		return 0;	}	/*	 * Grab the freespace record.	 */	if ((error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i)))		goto error0;	XFS_WANT_CORRUPTED_GOTO(i == 1, error0);	ASSERT(fbno <= args->agbno);	minend = args->agbno + args->minlen;	maxend = args->agbno + args->maxlen;	fend = fbno + flen;	/*	 * Give up if the freespace isn't long enough for the minimum request.	 */	if (fend < minend) {		xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);		args->agbno = NULLAGBLOCK;		return 0;	}	/*	 * End of extent will be smaller of the freespace end and the	 * maximal requested end.	 */	end = XFS_AGBLOCK_MIN(fend, maxend);	/*	 * Fix the length according to mod and prod if given.	 */	args->len = end - args->agbno;	xfs_alloc_fix_len(args);	if (!xfs_alloc_fix_minleft(args)) {		xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);		return 0;	}	rlen = args->len;	ASSERT(args->agbno + rlen <= fend);	end = args->agbno + rlen;	/*	 * We are allocating agbno for rlen [agbno .. end]	 * Allocate/initialize a cursor for the by-size btree.	 */	cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,		args->agno, XFS_BTNUM_CNT, NULL, 0);	ASSERT(args->agbno + args->len <=		be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,			args->agbno, args->len, XFSA_FIXUP_BNO_OK))) {		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);		goto error0;	}	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);	TRACE_ALLOC("normal", args);	args->wasfromfl = 0;	return 0;error0:	xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);	TRACE_ALLOC("error", args);	return error;}/* * Allocate a variable extent near bno in the allocation group agno. * Extent's length (returned in len) will be between minlen and maxlen, * and of the form k * prod + mod unless there's nothing that large. * Return the starting a.g. block, or NULLAGBLOCK if we can't do it. */STATIC int				/* error */xfs_alloc_ag_vextent_near(	xfs_alloc_arg_t	*args)		/* allocation argument structure */{	xfs_btree_cur_t	*bno_cur_gt;	/* cursor for bno btree, right side */	xfs_btree_cur_t	*bno_cur_lt;	/* cursor for bno btree, left side */	xfs_btree_cur_t	*cnt_cur;	/* cursor for count btree */	xfs_agblock_t	gtbno;		/* start bno of right side entry */	xfs_agblock_t	gtbnoa;		/* aligned ... */	xfs_extlen_t	gtdiff;		/* difference to right side entry */	xfs_extlen_t	gtlen;		/* length of right side entry */	xfs_extlen_t	gtlena;		/* aligned ... */	xfs_agblock_t	gtnew;		/* useful start bno of right side */	int		error;		/* error code */	int		i;		/* result code, temporary */	int		j;		/* result code, temporary */	xfs_agblock_t	ltbno;		/* start bno of left side entry */	xfs_agblock_t	ltbnoa;		/* aligned ... */	xfs_extlen_t	ltdiff;		/* difference to left side entry */	/*REFERENCED*/	xfs_agblock_t	ltend;		/* end bno of left side entry */	xfs_extlen_t	ltlen;		/* length of left side entry */	xfs_extlen_t	ltlena;		/* aligned ... */	xfs_agblock_t	ltnew;		/* useful start bno of left side */	xfs_extlen_t	rlen;		/* length of returned extent */#if defined(DEBUG) && defined(__KERNEL__)	/*	 * Randomly don't execute the first algorithm.	 */	int		dofirst;	/* set to do first algorithm */	dofirst = random32() & 1;#endif	/*	 * Get a cursor for the by-size btree.	 */	cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,		args->agno, XFS_BTNUM_CNT, NULL, 0);	ltlen = 0;	bno_cur_lt = bno_cur_gt = NULL;	/*	 * See if there are any free extents as big as maxlen.	 */	if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))		goto error0;	/*	 * If none, then pick up the last entry in the tree unless the	 * tree is empty.	 */	if (!i) {		if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &ltbno,				&ltlen, &i)))			goto error0;		if (i == 0 || ltlen == 0) {			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);			return 0;		}		ASSERT(i == 1);	}	args->wasfromfl = 0;	/*	 * First algorithm.	 * If the requested extent is large wrt the freespaces available	 * in this a.g., then the cursor will be pointing to a btree entry	 * near the right edge of the tree.  If it's in the last btree leaf	 * block, then we just examine all the entries in that block	 * that are big enough, and pick the best one.	 * This is written as a while loop so we can break out of it,	 * but we never loop back to the top.	 */	while (xfs_btree_islastblock(cnt_cur, 0)) {		xfs_extlen_t	bdiff;		int		besti=0;		xfs_extlen_t	blen=0;		xfs_agblock_t	bnew=0;#if defined(DEBUG) && defined(__KERNEL__)		if (!dofirst)			break;#endif		/*		 * Start from the entry that lookup found, sequence through		 * all larger free blocks.  If we're actually pointing at a		 * record smaller than maxlen, go to the start of this block,		 * and skip all those smaller than minlen.		 */		if (ltlen || args->alignment > 1) {			cnt_cur->bc_ptrs[0] = 1;			do {				if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno,						&ltlen, &i)))					goto error0;				XFS_WANT_CORRUPTED_GOTO(i == 1, error0);				if (ltlen >= args->minlen)					break;				if ((error = xfs_alloc_increment(cnt_cur, 0, &i)))					goto error0;			} while (i);			ASSERT(ltlen >= args->minlen);			if (!i)				break;		}		i = cnt_cur->bc_ptrs[0];		for (j = 1, blen = 0, bdiff = 0;		     !error && j && (blen < args->maxlen || bdiff > 0);		     error = xfs_alloc_increment(cnt_cur, 0, &j)) {			/*			 * For each entry, decide if it's better than			 * the previous best entry.			 */			if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))				goto error0;			XFS_WANT_CORRUPTED_GOTO(i == 1, error0);			if (!xfs_alloc_compute_aligned(ltbno, ltlen,					args->alignment, args->minlen,					&ltbnoa, &ltlena))				continue;			args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);			xfs_alloc_fix_len(args);			ASSERT(args->len >= args->minlen);			if (args->len < blen)				continue;			ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,				args->alignment, ltbno, ltlen, &ltnew);			if (ltnew != NULLAGBLOCK &&			    (args->len > blen || ltdiff < bdiff)) {				bdiff = ltdiff;				bnew = ltnew;				blen = args->len;				besti = cnt_cur->bc_ptrs[0];			}		}		/*		 * It didn't work.  We COULD be in a case where		 * there's a good record somewhere, so try again.		 */		if (blen == 0)			break;		/*		 * Point at the best entry, and retrieve it again.		 */		cnt_cur->bc_ptrs[0] = besti;		if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))			goto error0;		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);		ltend = ltbno + ltlen;		ASSERT(ltend <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));		args->len = blen;		if (!xfs_alloc_fix_minleft(args)) {			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);			TRACE_ALLOC("nominleft", args);			return 0;		}		blen = args->len;		/*		 * We are allocating starting at bnew for blen blocks.		 */		args->agbno = bnew;		ASSERT(bnew >= ltbno);		ASSERT(bnew + blen <= ltend);		/*		 * Set up a cursor for the by-bno tree.		 */		bno_cur_lt = xfs_btree_init_cursor(args->mp, args->tp,			args->agbp, args->agno, XFS_BTNUM_BNO, NULL, 0);		/*		 * Fix up the btree entries.		 */		if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,				ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))			goto error0;		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);		xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);		TRACE_ALLOC("first", args);		return 0;	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -