⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xfs_qm.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
		 * the inode is never added to the transaction.		 */		xfs_ilock(qip, XFS_ILOCK_SHARED);		error = xfs_bmapi(NULL, qip, lblkno,				  maxlblkcnt - lblkno,				  XFS_BMAPI_METADATA,				  NULL,				  0, map, &nmaps, NULL, NULL);		xfs_iunlock(qip, XFS_ILOCK_SHARED);		if (error)			break;		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);		for (i = 0; i < nmaps; i++) {			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);			ASSERT(map[i].br_blockcount);			lblkno += map[i].br_blockcount;			if (map[i].br_startblock == HOLESTARTBLOCK)				continue;			firstid = (xfs_dqid_t) map[i].br_startoff *				XFS_QM_DQPERBLK(mp);			/*			 * Do a read-ahead on the next extent.			 */			if ((i+1 < nmaps) &&			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {				rablkcnt =  map[i+1].br_blockcount;				rablkno = map[i+1].br_startblock;				while (rablkcnt--) {					xfs_baread(mp->m_ddev_targp,					       XFS_FSB_TO_DADDR(mp, rablkno),					       (int)XFS_QI_DQCHUNKLEN(mp));					rablkno++;				}			}			/*			 * Iterate thru all the blks in the extent and			 * reset the counters of all the dquots inside them.			 */			if ((error = xfs_qm_dqiter_bufs(mp,						       firstid,						       map[i].br_startblock,						       map[i].br_blockcount,						       flags))) {				break;			}		}		if (error)			break;	} while (nmaps > 0);	kmem_free(map, XFS_DQITER_MAP_SIZE * sizeof(*map));	return error;}/* * Called by dqusage_adjust in doing a quotacheck. * Given the inode, and a dquot (either USR or GRP, doesn't matter), * this updates its incore copy as well as the buffer copy. This is * so that once the quotacheck is done, we can just log all the buffers, * as opposed to logging numerous updates to individual dquots. */STATIC voidxfs_qm_quotacheck_dqadjust(	xfs_dquot_t		*dqp,	xfs_qcnt_t		nblks,	xfs_qcnt_t		rtblks){	ASSERT(XFS_DQ_IS_LOCKED(dqp));	xfs_dqtrace_entry(dqp, "QCHECK DQADJUST");	/*	 * Adjust the inode count and the block count to reflect this inode's	 * resource usage.	 */	be64_add(&dqp->q_core.d_icount, 1);	dqp->q_res_icount++;	if (nblks) {		be64_add(&dqp->q_core.d_bcount, nblks);		dqp->q_res_bcount += nblks;	}	if (rtblks) {		be64_add(&dqp->q_core.d_rtbcount, rtblks);		dqp->q_res_rtbcount += rtblks;	}	/*	 * Set default limits, adjust timers (since we changed usages)	 */	if (! XFS_IS_SUSER_DQUOT(dqp)) {		xfs_qm_adjust_dqlimits(dqp->q_mount, &dqp->q_core);		xfs_qm_adjust_dqtimers(dqp->q_mount, &dqp->q_core);	}	dqp->dq_flags |= XFS_DQ_DIRTY;}STATIC intxfs_qm_get_rtblks(	xfs_inode_t	*ip,	xfs_qcnt_t	*O_rtblks){	xfs_filblks_t	rtblks;			/* total rt blks */	xfs_extnum_t	idx;			/* extent record index */	xfs_ifork_t	*ifp;			/* inode fork pointer */	xfs_extnum_t	nextents;		/* number of extent entries */	int		error;	ASSERT(XFS_IS_REALTIME_INODE(ip));	ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);	if (!(ifp->if_flags & XFS_IFEXTENTS)) {		if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))			return error;	}	rtblks = 0;	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);	for (idx = 0; idx < nextents; idx++)		rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));	*O_rtblks = (xfs_qcnt_t)rtblks;	return 0;}/* * callback routine supplied to bulkstat(). Given an inumber, find its * dquots and update them to account for resources taken by that inode. *//* ARGSUSED */STATIC intxfs_qm_dqusage_adjust(	xfs_mount_t	*mp,		/* mount point for filesystem */	xfs_ino_t	ino,		/* inode number to get data for */	void		__user *buffer,	/* not used */	int		ubsize,		/* not used */	void		*private_data,	/* not used */	xfs_daddr_t	bno,		/* starting block of inode cluster */	int		*ubused,	/* not used */	void		*dip,		/* on-disk inode pointer (not used) */	int		*res)		/* result code value */{	xfs_inode_t	*ip;	xfs_dquot_t	*udqp, *gdqp;	xfs_qcnt_t	nblks, rtblks;	int		error;	ASSERT(XFS_IS_QUOTA_RUNNING(mp));	/*	 * rootino must have its resources accounted for, not so with the quota	 * inodes.	 */	if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) {		*res = BULKSTAT_RV_NOTHING;		return XFS_ERROR(EINVAL);	}	/*	 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget	 * interface expects the inode to be exclusively locked because that's	 * the case in all other instances. It's OK that we do this because	 * quotacheck is done only at mount time.	 */	if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip, bno))) {		*res = BULKSTAT_RV_NOTHING;		return error;	}	if (ip->i_d.di_mode == 0) {		xfs_iput_new(ip, XFS_ILOCK_EXCL);		*res = BULKSTAT_RV_NOTHING;		return XFS_ERROR(ENOENT);	}	/*	 * Obtain the locked dquots. In case of an error (eg. allocation	 * fails for ENOSPC), we return the negative of the error number	 * to bulkstat, so that it can get propagated to quotacheck() and	 * making us disable quotas for the file system.	 */	if ((error = xfs_qm_dqget_noattach(ip, &udqp, &gdqp))) {		xfs_iput(ip, XFS_ILOCK_EXCL);		*res = BULKSTAT_RV_GIVEUP;		return error;	}	rtblks = 0;	if (! XFS_IS_REALTIME_INODE(ip)) {		nblks = (xfs_qcnt_t)ip->i_d.di_nblocks;	} else {		/*		 * Walk thru the extent list and count the realtime blocks.		 */		if ((error = xfs_qm_get_rtblks(ip, &rtblks))) {			xfs_iput(ip, XFS_ILOCK_EXCL);			if (udqp)				xfs_qm_dqput(udqp);			if (gdqp)				xfs_qm_dqput(gdqp);			*res = BULKSTAT_RV_GIVEUP;			return error;		}		nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;	}	ASSERT(ip->i_delayed_blks == 0);	/*	 * We can't release the inode while holding its dquot locks.	 * The inode can go into inactive and might try to acquire the dquotlocks.	 * So, just unlock here and do a vn_rele at the end.	 */	xfs_iunlock(ip, XFS_ILOCK_EXCL);	/*	 * Add the (disk blocks and inode) resources occupied by this	 * inode to its dquots. We do this adjustment in the incore dquot,	 * and also copy the changes to its buffer.	 * We don't care about putting these changes in a transaction	 * envelope because if we crash in the middle of a 'quotacheck'	 * we have to start from the beginning anyway.	 * Once we're done, we'll log all the dquot bufs.	 *	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks	 * and quotaoffs don't race. (Quotachecks happen at mount time only).	 */	if (XFS_IS_UQUOTA_ON(mp)) {		ASSERT(udqp);		xfs_qm_quotacheck_dqadjust(udqp, nblks, rtblks);		xfs_qm_dqput(udqp);	}	if (XFS_IS_OQUOTA_ON(mp)) {		ASSERT(gdqp);		xfs_qm_quotacheck_dqadjust(gdqp, nblks, rtblks);		xfs_qm_dqput(gdqp);	}	/*	 * Now release the inode. This will send it to 'inactive', and	 * possibly even free blocks.	 */	VN_RELE(XFS_ITOV(ip));	/*	 * Goto next inode.	 */	*res = BULKSTAT_RV_DIDONE;	return 0;}/* * Walk thru all the filesystem inodes and construct a consistent view * of the disk quota world. If the quotacheck fails, disable quotas. */intxfs_qm_quotacheck(	xfs_mount_t	*mp){	int		done, count, error;	xfs_ino_t	lastino;	size_t		structsz;	xfs_inode_t	*uip, *gip;	uint		flags;	count = INT_MAX;	structsz = 1;	lastino = 0;	flags = 0;	ASSERT(XFS_QI_UQIP(mp) || XFS_QI_GQIP(mp));	ASSERT(XFS_IS_QUOTA_RUNNING(mp));	/*	 * There should be no cached dquots. The (simplistic) quotacheck	 * algorithm doesn't like that.	 */	ASSERT(XFS_QI_MPLNDQUOTS(mp) == 0);	cmn_err(CE_NOTE, "XFS quotacheck %s: Please wait.", mp->m_fsname);	/*	 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset	 * their counters to zero. We need a clean slate.	 * We don't log our changes till later.	 */	if ((uip = XFS_QI_UQIP(mp))) {		if ((error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA)))			goto error_return;		flags |= XFS_UQUOTA_CHKD;	}	if ((gip = XFS_QI_GQIP(mp))) {		if ((error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?					XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA)))			goto error_return;		flags |= XFS_OQUOTA_CHKD;	}	do {		/*		 * Iterate thru all the inodes in the file system,		 * adjusting the corresponding dquot counters in core.		 */		if ((error = xfs_bulkstat(mp, &lastino, &count,				     xfs_qm_dqusage_adjust, NULL,				     structsz, NULL, BULKSTAT_FG_IGET, &done)))			break;	} while (! done);	/*	 * We can get this error if we couldn't do a dquot allocation inside	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the	 * dirty dquots that might be cached, we just want to get rid of them	 * and turn quotaoff. The dquots won't be attached to any of the inodes	 * at this point (because we intentionally didn't in dqget_noattach).	 */	if (error) {		xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_QUOTAOFF);		goto error_return;	}	/*	 * We've made all the changes that we need to make incore.	 * Now flush_them down to disk buffers.	 */	xfs_qm_dqflush_all(mp, XFS_QMOPT_DELWRI);	/*	 * We didn't log anything, because if we crashed, we'll have to	 * start the quotacheck from scratch anyway. However, we must make	 * sure that our dquot changes are secure before we put the	 * quotacheck'd stamp on the superblock. So, here we do a synchronous	 * flush.	 */	XFS_bflush(mp->m_ddev_targp);	/*	 * If one type of quotas is off, then it will lose its	 * quotachecked status, since we won't be doing accounting for	 * that type anymore.	 */	mp->m_qflags &= ~(XFS_OQUOTA_CHKD | XFS_UQUOTA_CHKD);	mp->m_qflags |= flags;	XQM_LIST_PRINT(&(XFS_QI_MPL_LIST(mp)), MPL_NEXT, "++++ Mp list +++"); error_return:	if (error) {		cmn_err(CE_WARN, "XFS quotacheck %s: Unsuccessful (Error %d): "			"Disabling quotas.",			mp->m_fsname, error);		/*		 * We must turn off quotas.		 */		ASSERT(mp->m_quotainfo != NULL);		ASSERT(xfs_Gqm != NULL);		xfs_qm_destroy_quotainfo(mp);		(void)xfs_mount_reset_sbqflags(mp);	} else {		cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname);	}	return (error);}/* * This is called after the superblock has been read in and we're ready to * iget the quota inodes. */STATIC intxfs_qm_init_quotainos(	xfs_mount_t	*mp){	xfs_inode_t	*uip, *gip;	int		error;	__int64_t	sbflags;	uint		flags;	ASSERT(mp->m_quotainfo);	uip = gip = NULL;	sbflags = 0;	flags = 0;	/*	 * Get the uquota and gquota inodes	 */	if (XFS_SB_VERSION_HASQUOTA(&mp->m_sb)) {		if (XFS_IS_UQUOTA_ON(mp) &&		    mp->m_sb.sb_uquotino != NULLFSINO) {			ASSERT(mp->m_sb.sb_uquotino > 0);			if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,					     0, 0, &uip, 0)))				return XFS_ERROR(error);		}		if (XFS_IS_OQUOTA_ON(mp) &&		    mp->m_sb.sb_gquotino != NULLFSINO) {			ASSERT(mp->m_sb.sb_gquotino > 0);			if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,					     0, 0, &gip, 0))) {				if (uip)					VN_RELE(XFS_ITOV(uip));				return XFS_ERROR(error);			}		}	} else {		flags |= XFS_QMOPT_SBVERSION;		sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |			    XFS_SB_GQUOTINO | XFS_SB_QFLAGS);	}	/*	 * Create the two inodes, if they don't exist already. The changes	 * made above will get added to a transaction and logged in one of	 * the qino_alloc calls below.  If the device is readonly,	 * temporarily switch to read-write to do this.	 */	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {		if ((error = xfs_qm_qino_alloc(mp, &uip,					      sbflags | XFS_SB_UQUOTINO,					      flags | XFS_QMOPT_UQUOTA)))			return XFS_ERROR(error);		flags &= ~XFS_QMOPT_SBVERSION;	}	if (XFS_IS_OQUOTA_ON(mp) && gip == NULL) {		flags |= (XFS_IS_GQUOTA_ON(mp) ?				XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);		error = xfs_qm_qino_alloc(mp, &gip,					  sbflags | XFS_SB_GQUOTINO, flags);		if (error) {			if (uip)				VN_RELE(XFS_ITOV(uip));			return XFS_ERROR(error);		}	}	XFS_QI_UQIP(mp) = uip;	XFS_QI_GQIP(mp) = gip;	return 0;}/* * Traverse the freelist of dquots and attempt to reclaim a maximum of * 'howmany' dquots. This operation races with dqlookup(), and attempts to * favor the lookup function ... * XXXsup merge this with qm_reclaim_one(). */STATIC intxfs_qm_shake_freelist(	int howmany){	int		nreclaimed;	xfs_dqhash_t	*hash;	xfs_dquot_t	*dqp, *nextdqp;	int		restarts;	int		nflushes;	if (howmany <= 0)		return 0;	nreclaimed = 0;	restarts = 0;	nflushes = 0;#ifdef QUOTADEBUG	cmn_err(CE_DEBUG, "Shake free 0x%x", howmany);#endif	/* lock order is : hashchainlock, freelistlock, mplistlock */ tryagain:	xfs_qm_freelist_lock(xfs_Gqm);	for (dqp = xfs_Gqm->qm_dqfreelist.qh_next;	     ((dqp != (xfs_dquot_t *) &xfs_Gqm->qm_dqfreelist) &&	      nreclaimed < howmany); ) {		xfs_dqlock(dqp);		/*		 * We are racing with dqlookup here. Naturally we don't		 * want to reclaim a dquot that lookup wants.		 */		if (dqp->dq_flags & XFS_DQ_WANT) {			xfs_dqunlock(dqp);			xfs_qm_freelist_unlock(xfs_Gqm);			if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)				return nreclaimed;			XQM_STATS_INC(xqmstats.xs_qm_dqwants);			goto tryagain;		}		/*		 * If the dquot is inactive, we are assured that it is		 * not on the mplist or the hashlist, and that makes our		 * life easier.		 */		if (dqp->dq_flags & XFS_DQ_INACTIVE) {			ASSERT(dqp->q_mount == NULL);			ASSERT(! XFS_DQ_IS_DIRTY(dqp));			ASSERT(dqp->HL_PREVP == NULL);			ASSERT(dqp->MPL_PREVP == NULL);			XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);			nextdqp = dqp->dq_flnext;			goto off_freelist;		}		ASSERT(dqp->MPL_PREVP);		/*		 * Try to grab the flush lock. If this dquot is in the process of		 * getting flushed to disk, we don't want to reclaim it.		 */		if (! xfs_qm_dqflock_nowait(dqp)) {			xfs_dqunlock(dqp);			dqp = dqp->dq_flnext;			continue;		}		/*		 * We have the flush lock so we know that this is not in the		 * process of being flushed. So, if this is dirty, flush it		 * DELWRI so that we don't get a freelist infested with		 * dirty dquots.		 */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -