⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xfs_ialloc.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
/* * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA */#include "xfs.h"#include "xfs_fs.h"#include "xfs_types.h"#include "xfs_bit.h"#include "xfs_log.h"#include "xfs_inum.h"#include "xfs_trans.h"#include "xfs_sb.h"#include "xfs_ag.h"#include "xfs_dir2.h"#include "xfs_dmapi.h"#include "xfs_mount.h"#include "xfs_bmap_btree.h"#include "xfs_alloc_btree.h"#include "xfs_ialloc_btree.h"#include "xfs_dir2_sf.h"#include "xfs_attr_sf.h"#include "xfs_dinode.h"#include "xfs_inode.h"#include "xfs_btree.h"#include "xfs_ialloc.h"#include "xfs_alloc.h"#include "xfs_rtalloc.h"#include "xfs_error.h"#include "xfs_bmap.h"/* * Log specified fields for the inode given by bp and off. */STATIC voidxfs_ialloc_log_di(	xfs_trans_t	*tp,		/* transaction pointer */	xfs_buf_t	*bp,		/* inode buffer */	int		off,		/* index of inode in buffer */	int		fields)		/* bitmask of fields to log */{	int			first;		/* first byte number */	int			ioffset;	/* off in bytes */	int			last;		/* last byte number */	xfs_mount_t		*mp;		/* mount point structure */	static const short	offsets[] = {	/* field offsets */						/* keep in sync with bits */		offsetof(xfs_dinode_core_t, di_magic),		offsetof(xfs_dinode_core_t, di_mode),		offsetof(xfs_dinode_core_t, di_version),		offsetof(xfs_dinode_core_t, di_format),		offsetof(xfs_dinode_core_t, di_onlink),		offsetof(xfs_dinode_core_t, di_uid),		offsetof(xfs_dinode_core_t, di_gid),		offsetof(xfs_dinode_core_t, di_nlink),		offsetof(xfs_dinode_core_t, di_projid),		offsetof(xfs_dinode_core_t, di_pad),		offsetof(xfs_dinode_core_t, di_atime),		offsetof(xfs_dinode_core_t, di_mtime),		offsetof(xfs_dinode_core_t, di_ctime),		offsetof(xfs_dinode_core_t, di_size),		offsetof(xfs_dinode_core_t, di_nblocks),		offsetof(xfs_dinode_core_t, di_extsize),		offsetof(xfs_dinode_core_t, di_nextents),		offsetof(xfs_dinode_core_t, di_anextents),		offsetof(xfs_dinode_core_t, di_forkoff),		offsetof(xfs_dinode_core_t, di_aformat),		offsetof(xfs_dinode_core_t, di_dmevmask),		offsetof(xfs_dinode_core_t, di_dmstate),		offsetof(xfs_dinode_core_t, di_flags),		offsetof(xfs_dinode_core_t, di_gen),		offsetof(xfs_dinode_t, di_next_unlinked),		offsetof(xfs_dinode_t, di_u),		offsetof(xfs_dinode_t, di_a),		sizeof(xfs_dinode_t)	};	ASSERT(offsetof(xfs_dinode_t, di_core) == 0);	ASSERT((fields & (XFS_DI_U|XFS_DI_A)) == 0);	mp = tp->t_mountp;	/*	 * Get the inode-relative first and last bytes for these fields	 */	xfs_btree_offsets(fields, offsets, XFS_DI_NUM_BITS, &first, &last);	/*	 * Convert to buffer offsets and log it.	 */	ioffset = off << mp->m_sb.sb_inodelog;	first += ioffset;	last += ioffset;	xfs_trans_log_buf(tp, bp, first, last);}/* * Allocation group level functions. *//* * Allocate new inodes in the allocation group specified by agbp. * Return 0 for success, else error code. */STATIC int				/* error code or 0 */xfs_ialloc_ag_alloc(	xfs_trans_t	*tp,		/* transaction pointer */	xfs_buf_t	*agbp,		/* alloc group buffer */	int		*alloc){	xfs_agi_t	*agi;		/* allocation group header */	xfs_alloc_arg_t	args;		/* allocation argument structure */	int		blks_per_cluster;  /* fs blocks per inode cluster */	xfs_btree_cur_t	*cur;		/* inode btree cursor */	xfs_daddr_t	d;		/* disk addr of buffer */	xfs_agnumber_t	agno;	int		error;	xfs_buf_t	*fbuf;		/* new free inodes' buffer */	xfs_dinode_t	*free;		/* new free inode structure */	int		i;		/* inode counter */	int		j;		/* block counter */	int		nbufs;		/* num bufs of new inodes */	xfs_agino_t	newino;		/* new first inode's number */	xfs_agino_t	newlen;		/* new number of inodes */	int		ninodes;	/* num inodes per buf */	xfs_agino_t	thisino;	/* current inode number, for loop */	int		version;	/* inode version number to use */	int		isaligned = 0;	/* inode allocation at stripe unit */					/* boundary */	args.tp = tp;	args.mp = tp->t_mountp;	/*	 * Locking will ensure that we don't have two callers in here	 * at one time.	 */	newlen = XFS_IALLOC_INODES(args.mp);	if (args.mp->m_maxicount &&	    args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount)		return XFS_ERROR(ENOSPC);	args.minlen = args.maxlen = XFS_IALLOC_BLOCKS(args.mp);	/*	 * First try to allocate inodes contiguous with the last-allocated	 * chunk of inodes.  If the filesystem is striped, this will fill	 * an entire stripe unit with inodes. 	 */	agi = XFS_BUF_TO_AGI(agbp);	newino = be32_to_cpu(agi->agi_newino);	args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +			XFS_IALLOC_BLOCKS(args.mp);	if (likely(newino != NULLAGINO &&		  (args.agbno < be32_to_cpu(agi->agi_length)))) {		args.fsbno = XFS_AGB_TO_FSB(args.mp,				be32_to_cpu(agi->agi_seqno), args.agbno);		args.type = XFS_ALLOCTYPE_THIS_BNO;		args.mod = args.total = args.wasdel = args.isfl =			args.userdata = args.minalignslop = 0;		args.prod = 1;		args.alignment = 1;		/*		 * Allow space for the inode btree to split.		 */		args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1;		if ((error = xfs_alloc_vextent(&args)))			return error;	} else		args.fsbno = NULLFSBLOCK;	if (unlikely(args.fsbno == NULLFSBLOCK)) {		/*		 * Set the alignment for the allocation.		 * If stripe alignment is turned on then align at stripe unit		 * boundary.		 * If the cluster size is smaller than a filesystem block		 * then we're doing I/O for inodes in filesystem block size		 * pieces, so don't need alignment anyway.		 */		isaligned = 0;		if (args.mp->m_sinoalign) {			ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));			args.alignment = args.mp->m_dalign;			isaligned = 1;		} else if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) &&			   args.mp->m_sb.sb_inoalignmt >=			   XFS_B_TO_FSBT(args.mp,			  	XFS_INODE_CLUSTER_SIZE(args.mp)))				args.alignment = args.mp->m_sb.sb_inoalignmt;		else			args.alignment = 1;		/*		 * Need to figure out where to allocate the inode blocks.		 * Ideally they should be spaced out through the a.g.		 * For now, just allocate blocks up front.		 */		args.agbno = be32_to_cpu(agi->agi_root);		args.fsbno = XFS_AGB_TO_FSB(args.mp,				be32_to_cpu(agi->agi_seqno), args.agbno);		/*		 * Allocate a fixed-size extent of inodes.		 */		args.type = XFS_ALLOCTYPE_NEAR_BNO;		args.mod = args.total = args.wasdel = args.isfl =			args.userdata = args.minalignslop = 0;		args.prod = 1;		/*		 * Allow space for the inode btree to split.		 */		args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1;		if ((error = xfs_alloc_vextent(&args)))			return error;	}	/*	 * If stripe alignment is turned on, then try again with cluster	 * alignment.	 */	if (isaligned && args.fsbno == NULLFSBLOCK) {		args.type = XFS_ALLOCTYPE_NEAR_BNO;		args.agbno = be32_to_cpu(agi->agi_root);		args.fsbno = XFS_AGB_TO_FSB(args.mp,				be32_to_cpu(agi->agi_seqno), args.agbno);		if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) &&			args.mp->m_sb.sb_inoalignmt >=			XFS_B_TO_FSBT(args.mp, XFS_INODE_CLUSTER_SIZE(args.mp)))				args.alignment = args.mp->m_sb.sb_inoalignmt;		else			args.alignment = 1;		if ((error = xfs_alloc_vextent(&args)))			return error;	}	if (args.fsbno == NULLFSBLOCK) {		*alloc = 0;		return 0;	}	ASSERT(args.len == args.minlen);	/*	 * Convert the results.	 */	newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0);	/*	 * Loop over the new block(s), filling in the inodes.	 * For small block sizes, manipulate the inodes in buffers	 * which are multiples of the blocks size.	 */	if (args.mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(args.mp)) {		blks_per_cluster = 1;		nbufs = (int)args.len;		ninodes = args.mp->m_sb.sb_inopblock;	} else {		blks_per_cluster = XFS_INODE_CLUSTER_SIZE(args.mp) /				   args.mp->m_sb.sb_blocksize;		nbufs = (int)args.len / blks_per_cluster;		ninodes = blks_per_cluster * args.mp->m_sb.sb_inopblock;	}	/*	 * Figure out what version number to use in the inodes we create.	 * If the superblock version has caught up to the one that supports	 * the new inode format, then use the new inode version.  Otherwise	 * use the old version so that old kernels will continue to be	 * able to use the file system.	 */	if (XFS_SB_VERSION_HASNLINK(&args.mp->m_sb))		version = XFS_DINODE_VERSION_2;	else		version = XFS_DINODE_VERSION_1;	for (j = 0; j < nbufs; j++) {		/*		 * Get the block.		 */		d = XFS_AGB_TO_DADDR(args.mp, be32_to_cpu(agi->agi_seqno),				     args.agbno + (j * blks_per_cluster));		fbuf = xfs_trans_get_buf(tp, args.mp->m_ddev_targp, d,					 args.mp->m_bsize * blks_per_cluster,					 XFS_BUF_LOCK);		ASSERT(fbuf);		ASSERT(!XFS_BUF_GETERROR(fbuf));		/*		 * Set initial values for the inodes in this buffer.		 */		xfs_biozero(fbuf, 0, ninodes << args.mp->m_sb.sb_inodelog);		for (i = 0; i < ninodes; i++) {			free = XFS_MAKE_IPTR(args.mp, fbuf, i);			free->di_core.di_magic = cpu_to_be16(XFS_DINODE_MAGIC);			free->di_core.di_version = version;			free->di_next_unlinked = cpu_to_be32(NULLAGINO);			xfs_ialloc_log_di(tp, fbuf, i,				XFS_DI_CORE_BITS | XFS_DI_NEXT_UNLINKED);		}		xfs_trans_inode_alloc_buf(tp, fbuf);	}	be32_add(&agi->agi_count, newlen);	be32_add(&agi->agi_freecount, newlen);	agno = be32_to_cpu(agi->agi_seqno);	down_read(&args.mp->m_peraglock);	args.mp->m_perag[agno].pagi_freecount += newlen;	up_read(&args.mp->m_peraglock);	agi->agi_newino = cpu_to_be32(newino);	/*	 * Insert records describing the new inode chunk into the btree.	 */	cur = xfs_btree_init_cursor(args.mp, tp, agbp, agno,			XFS_BTNUM_INO, (xfs_inode_t *)0, 0);	for (thisino = newino;	     thisino < newino + newlen;	     thisino += XFS_INODES_PER_CHUNK) {		if ((error = xfs_inobt_lookup_eq(cur, thisino,				XFS_INODES_PER_CHUNK, XFS_INOBT_ALL_FREE, &i))) {			xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);			return error;		}		ASSERT(i == 0);		if ((error = xfs_inobt_insert(cur, &i))) {			xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);			return error;		}		ASSERT(i == 1);	}	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);	/*	 * Log allocation group header fields	 */	xfs_ialloc_log_agi(tp, agbp,		XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO);	/*	 * Modify/log superblock values for inode count and inode free count.	 */	xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen);	xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen);	*alloc = 1;	return 0;}STATIC_INLINE xfs_agnumber_txfs_ialloc_next_ag(	xfs_mount_t	*mp){	xfs_agnumber_t	agno;	spin_lock(&mp->m_agirotor_lock);	agno = mp->m_agirotor;	if (++mp->m_agirotor == mp->m_maxagi)		mp->m_agirotor = 0;	spin_unlock(&mp->m_agirotor_lock);	return agno;}/* * Select an allocation group to look for a free inode in, based on the parent * inode and then mode.  Return the allocation group buffer. */STATIC xfs_buf_t *			/* allocation group buffer */xfs_ialloc_ag_select(	xfs_trans_t	*tp,		/* transaction pointer */	xfs_ino_t	parent,		/* parent directory inode number */	mode_t		mode,		/* bits set to indicate file type */	int		okalloc)	/* ok to allocate more space */{	xfs_buf_t	*agbp;		/* allocation group header buffer */	xfs_agnumber_t	agcount;	/* number of ag's in the filesystem */	xfs_agnumber_t	agno;		/* current ag number */	int		flags;		/* alloc buffer locking flags */	xfs_extlen_t	ineed;		/* blocks needed for inode allocation */	xfs_extlen_t	longest = 0;	/* longest extent available */	xfs_mount_t	*mp;		/* mount point structure */	int		needspace;	/* file mode implies space allocated */	xfs_perag_t	*pag;		/* per allocation group data */	xfs_agnumber_t	pagno;		/* parent (starting) ag number */	/*	 * Files of these types need at least one block if length > 0	 * (and they won't fit in the inode, but that's hard to figure out).	 */	needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode);	mp = tp->t_mountp;	agcount = mp->m_maxagi;	if (S_ISDIR(mode))		pagno = xfs_ialloc_next_ag(mp);	else {		pagno = XFS_INO_TO_AGNO(mp, parent);		if (pagno >= agcount)			pagno = 0;	}	ASSERT(pagno < agcount);	/*	 * Loop through allocation groups, looking for one with a little	 * free space in it.  Note we don't look for free inodes, exactly.	 * Instead, we include whether there is a need to allocate inodes	 * to mean that blocks must be allocated for them,	 * if none are currently free.	 */	agno = pagno;	flags = XFS_ALLOC_FLAG_TRYLOCK;	down_read(&mp->m_peraglock);	for (;;) {		pag = &mp->m_perag[agno];		if (!pag->pagi_init) {			if (xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {				agbp = NULL;				goto nextag;			}		} else			agbp = NULL;		if (!pag->pagi_inodeok) {			xfs_ialloc_next_ag(mp);			goto unlock_nextag;		}		/*		 * Is there enough free space for the file plus a block		 * of inodes (if we need to allocate some)?		 */		ineed = pag->pagi_freecount ? 0 : XFS_IALLOC_BLOCKS(mp);		if (ineed && !pag->pagf_init) {			if (agbp == NULL &&			    xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {				agbp = NULL;				goto nextag;			}			(void)xfs_alloc_pagf_init(mp, tp, agno, flags);		}		if (!ineed || pag->pagf_init) {			if (ineed && !(longest = pag->pagf_longest))				longest = pag->pagf_flcount > 0;			if (!ineed ||			    (pag->pagf_freeblks >= needspace + ineed &&			     longest >= ineed &&			     okalloc)) {				if (agbp == NULL &&				    xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {					agbp = NULL;					goto nextag;				}				up_read(&mp->m_peraglock);				return agbp;			}		}unlock_nextag:		if (agbp)			xfs_trans_brelse(tp, agbp);nextag:		/*		 * No point in iterating over the rest, if we're shutting		 * down.		 */		if (XFS_FORCED_SHUTDOWN(mp)) {			up_read(&mp->m_peraglock);			return NULL;		}		agno++;		if (agno >= agcount)			agno = 0;		if (agno == pagno) {			if (flags == 0) {				up_read(&mp->m_peraglock);				return NULL;			}			flags = 0;		}	}}/* * Visible inode allocation functions. */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -