⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xfs_qm.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 5 页
字号:
	 */	nowait = (boolean_t)(flags & SYNC_BDFLUSH || (flags & SYNC_WAIT) == 0);  again:	xfs_qm_mplist_lock(mp);	/*	 * dqpurge_all() also takes the mplist lock and iterate thru all dquots	 * in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared	 * when we have the mplist lock, we know that dquots will be consistent	 * as long as we have it locked.	 */	if (! XFS_IS_QUOTA_ON(mp)) {		xfs_qm_mplist_unlock(mp);		return (0);	}	FOREACH_DQUOT_IN_MP(dqp, mp) {		/*		 * If this is vfs_sync calling, then skip the dquots that		 * don't 'seem' to be dirty. ie. don't acquire dqlock.		 * This is very similar to what xfs_sync does with inodes.		 */		if (flags & SYNC_BDFLUSH) {			if (! XFS_DQ_IS_DIRTY(dqp))				continue;		}		if (nowait) {			/*			 * Try to acquire the dquot lock. We are NOT out of			 * lock order, but we just don't want to wait for this			 * lock, unless somebody wanted us to.			 */			if (! xfs_qm_dqlock_nowait(dqp))				continue;		} else {			xfs_dqlock(dqp);		}		/*		 * Now, find out for sure if this dquot is dirty or not.		 */		if (! XFS_DQ_IS_DIRTY(dqp)) {			xfs_dqunlock(dqp);			continue;		}		/* XXX a sentinel would be better */		recl = XFS_QI_MPLRECLAIMS(mp);		if (! xfs_qm_dqflock_nowait(dqp)) {			if (nowait) {				xfs_dqunlock(dqp);				continue;			}			/*			 * If we can't grab the flush lock then if the caller			 * really wanted us to give this our best shot,			 * see if we can give a push to the buffer before we wait			 * on the flush lock. At this point, we know that			 * eventhough the dquot is being flushed,			 * it has (new) dirty data.			 */			xfs_qm_dqflock_pushbuf_wait(dqp);		}		/*		 * Let go of the mplist lock. We don't want to hold it		 * across a disk write		 */		flush_flags = (nowait) ? XFS_QMOPT_DELWRI : XFS_QMOPT_SYNC;		xfs_qm_mplist_unlock(mp);		xfs_dqtrace_entry(dqp, "XQM_SYNC: DQFLUSH");		error = xfs_qm_dqflush(dqp, flush_flags);		xfs_dqunlock(dqp);		if (error && XFS_FORCED_SHUTDOWN(mp))			return(0);	/* Need to prevent umount failure */		else if (error)			return (error);		xfs_qm_mplist_lock(mp);		if (recl != XFS_QI_MPLRECLAIMS(mp)) {			if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS)				break;			xfs_qm_mplist_unlock(mp);			goto again;		}	}	xfs_qm_mplist_unlock(mp);	return (0);}/* * This initializes all the quota information that's kept in the * mount structure */STATIC intxfs_qm_init_quotainfo(	xfs_mount_t	*mp){	xfs_quotainfo_t *qinf;	int		error;	xfs_dquot_t	*dqp;	ASSERT(XFS_IS_QUOTA_RUNNING(mp));	/*	 * Tell XQM that we exist as soon as possible.	 */	if ((error = xfs_qm_hold_quotafs_ref(mp))) {		return (error);	}	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);	/*	 * See if quotainodes are setup, and if not, allocate them,	 * and change the superblock accordingly.	 */	if ((error = xfs_qm_init_quotainos(mp))) {		kmem_free(qinf, sizeof(xfs_quotainfo_t));		mp->m_quotainfo = NULL;		return (error);	}	spinlock_init(&qinf->qi_pinlock, "xfs_qinf_pin");	xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0);	qinf->qi_dqreclaims = 0;	/* mutex used to serialize quotaoffs */	mutex_init(&qinf->qi_quotaofflock, MUTEX_DEFAULT, "qoff");	/* Precalc some constants */	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);	ASSERT(qinf->qi_dqchunklen);	qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen);	do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t));	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);	/*	 * We try to get the limits from the superuser's limits fields.	 * This is quite hacky, but it is standard quota practice.	 * We look at the USR dquot with id == 0 first, but if user quotas	 * are not enabled we goto the GRP dquot with id == 0.	 * We don't really care to keep separate default limits for user	 * and group quotas, at least not at this point.	 */	error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)0,			     XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER : 			     (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :				XFS_DQ_PROJ),			     XFS_QMOPT_DQSUSER|XFS_QMOPT_DOWARN,			     &dqp);	if (! error) {		xfs_disk_dquot_t	*ddqp = &dqp->q_core;		/*		 * The warnings and timers set the grace period given to		 * a user or group before he or she can not perform any		 * more writing. If it is zero, a default is used.		 */		qinf->qi_btimelimit = ddqp->d_btimer ?			be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;		qinf->qi_itimelimit = ddqp->d_itimer ?			be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;		qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?			be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;		qinf->qi_bwarnlimit = ddqp->d_bwarns ?			be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;		qinf->qi_iwarnlimit = ddqp->d_iwarns ?			be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;		qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?			be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;		qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);		qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);		qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);		qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);		qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);		qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit); 		/*		 * We sent the XFS_QMOPT_DQSUSER flag to dqget because		 * we don't want this dquot cached. We haven't done a		 * quotacheck yet, and quotacheck doesn't like incore dquots.		 */		xfs_qm_dqdestroy(dqp);	} else {		qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;		qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;		qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;		qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;		qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;		qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;	}	return (0);}/* * Gets called when unmounting a filesystem or when all quotas get * turned off. * This purges the quota inodes, destroys locks and frees itself. */voidxfs_qm_destroy_quotainfo(	xfs_mount_t	*mp){	xfs_quotainfo_t *qi;	qi = mp->m_quotainfo;	ASSERT(qi != NULL);	ASSERT(xfs_Gqm != NULL);	/*	 * Release the reference that XQM kept, so that we know	 * when the XQM structure should be freed. We cannot assume	 * that xfs_Gqm is non-null after this point.	 */	xfs_qm_rele_quotafs_ref(mp);	spinlock_destroy(&qi->qi_pinlock);	xfs_qm_list_destroy(&qi->qi_dqlist);	if (qi->qi_uquotaip) {		XFS_PURGE_INODE(qi->qi_uquotaip);		qi->qi_uquotaip = NULL; /* paranoia */	}	if (qi->qi_gquotaip) {		XFS_PURGE_INODE(qi->qi_gquotaip);		qi->qi_gquotaip = NULL;	}	mutex_destroy(&qi->qi_quotaofflock);	kmem_free(qi, sizeof(xfs_quotainfo_t));	mp->m_quotainfo = NULL;}/* ------------------- PRIVATE STATIC FUNCTIONS ----------------------- *//* ARGSUSED */STATIC voidxfs_qm_list_init(	xfs_dqlist_t	*list,	char		*str,	int		n){	mutex_init(&list->qh_lock, MUTEX_DEFAULT, str);	list->qh_next = NULL;	list->qh_version = 0;	list->qh_nelems = 0;}STATIC voidxfs_qm_list_destroy(	xfs_dqlist_t	*list){	mutex_destroy(&(list->qh_lock));}/* * Stripped down version of dqattach. This doesn't attach, or even look at the * dquots attached to the inode. The rationale is that there won't be any * attached at the time this is called from quotacheck. */STATIC intxfs_qm_dqget_noattach(	xfs_inode_t	*ip,	xfs_dquot_t	**O_udqpp,	xfs_dquot_t	**O_gdqpp){	int		error;	xfs_mount_t	*mp;	xfs_dquot_t	*udqp, *gdqp;	ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));	mp = ip->i_mount;	udqp = NULL;	gdqp = NULL;	if (XFS_IS_UQUOTA_ON(mp)) {		ASSERT(ip->i_udquot == NULL);		/*		 * We want the dquot allocated if it doesn't exist.		 */		if ((error = xfs_qm_dqget(mp, ip, ip->i_d.di_uid, XFS_DQ_USER,					 XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN,					 &udqp))) {			/*			 * Shouldn't be able to turn off quotas here.			 */			ASSERT(error != ESRCH);			ASSERT(error != ENOENT);			return (error);		}		ASSERT(udqp);	}	if (XFS_IS_OQUOTA_ON(mp)) {		ASSERT(ip->i_gdquot == NULL);		if (udqp)			xfs_dqunlock(udqp);		error = XFS_IS_GQUOTA_ON(mp) ?				xfs_qm_dqget(mp, ip,					     ip->i_d.di_gid, XFS_DQ_GROUP,					     XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN,					     &gdqp) :				xfs_qm_dqget(mp, ip,					     ip->i_d.di_projid, XFS_DQ_PROJ,					     XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN,					     &gdqp);		if (error) {			if (udqp)				xfs_qm_dqrele(udqp);			ASSERT(error != ESRCH);			ASSERT(error != ENOENT);			return (error);		}		ASSERT(gdqp);		/* Reacquire the locks in the right order */		if (udqp) {			if (! xfs_qm_dqlock_nowait(udqp)) {				xfs_dqunlock(gdqp);				xfs_dqlock(udqp);				xfs_dqlock(gdqp);			}		}	}	*O_udqpp = udqp;	*O_gdqpp = gdqp;#ifdef QUOTADEBUG	if (udqp) ASSERT(XFS_DQ_IS_LOCKED(udqp));	if (gdqp) ASSERT(XFS_DQ_IS_LOCKED(gdqp));#endif	return (0);}/* * Create an inode and return with a reference already taken, but unlocked * This is how we create quota inodes */STATIC intxfs_qm_qino_alloc(	xfs_mount_t	*mp,	xfs_inode_t	**ip,	__int64_t	sbfields,	uint		flags){	xfs_trans_t	*tp;	int		error;	unsigned long s;	cred_t		zerocr;	int		committed;	tp = xfs_trans_alloc(mp,XFS_TRANS_QM_QINOCREATE);	if ((error = xfs_trans_reserve(tp,				      XFS_QM_QINOCREATE_SPACE_RES(mp),				      XFS_CREATE_LOG_RES(mp), 0,				      XFS_TRANS_PERM_LOG_RES,				      XFS_CREATE_LOG_COUNT))) {		xfs_trans_cancel(tp, 0);		return (error);	}	memset(&zerocr, 0, sizeof(zerocr));	if ((error = xfs_dir_ialloc(&tp, mp->m_rootip, S_IFREG, 1, 0,				   &zerocr, 0, 1, ip, &committed))) {		xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |				 XFS_TRANS_ABORT);		return (error);	}	/*	 * Keep an extra reference to this quota inode. This inode is	 * locked exclusively and joined to the transaction already.	 */	ASSERT(XFS_ISLOCKED_INODE_EXCL(*ip));	VN_HOLD(XFS_ITOV((*ip)));	/*	 * Make the changes in the superblock, and log those too.	 * sbfields arg may contain fields other than *QUOTINO;	 * VERSIONNUM for example.	 */	s = XFS_SB_LOCK(mp);	if (flags & XFS_QMOPT_SBVERSION) {#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)		unsigned oldv = mp->m_sb.sb_versionnum;#endif		ASSERT(!XFS_SB_VERSION_HASQUOTA(&mp->m_sb));		ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |				   XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) ==		       (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |			XFS_SB_GQUOTINO | XFS_SB_QFLAGS));		XFS_SB_VERSION_ADDQUOTA(&mp->m_sb);		mp->m_sb.sb_uquotino = NULLFSINO;		mp->m_sb.sb_gquotino = NULLFSINO;		/* qflags will get updated _after_ quotacheck */		mp->m_sb.sb_qflags = 0;#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)		cmn_err(CE_NOTE,			"Old superblock version %x, converting to %x.",			oldv, mp->m_sb.sb_versionnum);#endif	}	if (flags & XFS_QMOPT_UQUOTA)		mp->m_sb.sb_uquotino = (*ip)->i_ino;	else		mp->m_sb.sb_gquotino = (*ip)->i_ino;	XFS_SB_UNLOCK(mp, s);	xfs_mod_sb(tp, sbfields);	if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES,				     NULL))) {		xfs_fs_cmn_err(CE_ALERT, mp, "XFS qino_alloc failed!");		return (error);	}	return (0);}STATIC intxfs_qm_reset_dqcounts(	xfs_mount_t	*mp,	xfs_buf_t	*bp,	xfs_dqid_t	id,	uint		type){	xfs_disk_dquot_t	*ddq;	int			j;	xfs_buftrace("RESET DQUOTS", bp);	/*	 * Reset all counters and timers. They'll be	 * started afresh by xfs_qm_quotacheck.	 */#ifdef DEBUG	j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);	do_div(j, sizeof(xfs_dqblk_t));	ASSERT(XFS_QM_DQPERBLK(mp) == j);#endif	ddq = (xfs_disk_dquot_t *)XFS_BUF_PTR(bp);	for (j = 0; j < XFS_QM_DQPERBLK(mp); j++) {		/*		 * Do a sanity check, and if needed, repair the dqblk. Don't		 * output any warnings because it's perfectly possible to		 * find unitialized dquot blks. See comment in xfs_qm_dqcheck.		 */		(void) xfs_qm_dqcheck(ddq, id+j, type, XFS_QMOPT_DQREPAIR,				      "xfs_quotacheck");		ddq->d_bcount = 0;		ddq->d_icount = 0;		ddq->d_rtbcount = 0;		ddq->d_btimer = 0;		ddq->d_itimer = 0;		ddq->d_rtbtimer = 0;		ddq->d_bwarns = 0;		ddq->d_iwarns = 0;		ddq->d_rtbwarns = 0;		ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1);	}	return (0);}STATIC intxfs_qm_dqiter_bufs(	xfs_mount_t	*mp,	xfs_dqid_t	firstid,	xfs_fsblock_t	bno,	xfs_filblks_t	blkcnt,	uint		flags){	xfs_buf_t	*bp;	int		error;	int		notcommitted;	int		incr;	int		type;	ASSERT(blkcnt > 0);	notcommitted = 0;	incr = (blkcnt > XFS_QM_MAX_DQCLUSTER_LOGSZ) ?		XFS_QM_MAX_DQCLUSTER_LOGSZ : blkcnt;	type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :		(flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);	error = 0;	/*	 * Blkcnt arg can be a very big number, and might even be	 * larger than the log itself. So, we have to break it up into	 * manageable-sized transactions.	 * Note that we don't start a permanent transaction here; we might	 * not be able to get a log reservation for the whole thing up front,	 * and we don't really care to either, because we just discard	 * everything if we were to crash in the middle of this loop.	 */	while (blkcnt--) {		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,			      XFS_FSB_TO_DADDR(mp, bno),			      (int)XFS_QI_DQCHUNKLEN(mp), 0, &bp);		if (error)			break;		(void) xfs_qm_reset_dqcounts(mp, bp, firstid, type);		xfs_bdwrite(mp, bp);		/*		 * goto the next block.		 */		bno++;		firstid += XFS_QM_DQPERBLK(mp);	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -