📄 xfs_dquot.c
字号:
} /* * Put the dquot at the beginning of the hash-chain and mp's list * LOCK ORDER: hashlock, freelistlock, mplistlock, udqlock, gdqlock .. */ ASSERT(XFS_DQ_IS_HASH_LOCKED(h)); dqp->q_hash = h; XQM_HASHLIST_INSERT(h, dqp); /* * Attach this dquot to this filesystem's list of all dquots, * kept inside the mount structure in m_quotainfo field */ xfs_qm_mplist_lock(mp); /* * We return a locked dquot to the caller, with a reference taken */ xfs_dqlock(dqp); dqp->q_nrefs = 1; XQM_MPLIST_INSERT(&(XFS_QI_MPL_LIST(mp)), dqp); xfs_qm_mplist_unlock(mp); XFS_DQ_HASH_UNLOCK(h); dqret: ASSERT((ip == NULL) || XFS_ISLOCKED_INODE_EXCL(ip)); xfs_dqtrace_entry(dqp, "DQGET DONE"); *O_dqpp = dqp; return (0);}/* * Release a reference to the dquot (decrement ref-count) * and unlock it. If there is a group quota attached to this * dquot, carefully release that too without tripping over * deadlocks'n'stuff. */voidxfs_qm_dqput( xfs_dquot_t *dqp){ xfs_dquot_t *gdqp; ASSERT(dqp->q_nrefs > 0); ASSERT(XFS_DQ_IS_LOCKED(dqp)); xfs_dqtrace_entry(dqp, "DQPUT"); if (dqp->q_nrefs != 1) { dqp->q_nrefs--; xfs_dqunlock(dqp); return; } /* * drop the dqlock and acquire the freelist and dqlock * in the right order; but try to get it out-of-order first */ if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) { xfs_dqtrace_entry(dqp, "DQPUT: FLLOCK-WAIT"); xfs_dqunlock(dqp); xfs_qm_freelist_lock(xfs_Gqm); xfs_dqlock(dqp); } while (1) { gdqp = NULL; /* We can't depend on nrefs being == 1 here */ if (--dqp->q_nrefs == 0) { xfs_dqtrace_entry(dqp, "DQPUT: ON FREELIST"); /* * insert at end of the freelist. */ XQM_FREELIST_INSERT(&(xfs_Gqm->qm_dqfreelist), dqp); /* * If we just added a udquot to the freelist, then * we want to release the gdquot reference that * it (probably) has. Otherwise it'll keep the * gdquot from getting reclaimed. */ if ((gdqp = dqp->q_gdquot)) { /* * Avoid a recursive dqput call */ xfs_dqlock(gdqp); dqp->q_gdquot = NULL; } /* xfs_qm_freelist_print(&(xfs_Gqm->qm_dqfreelist), "@@@@@++ Free list (after append) @@@@@+"); */ } xfs_dqunlock(dqp); /* * If we had a group quota inside the user quota as a hint, * release it now. */ if (! gdqp) break; dqp = gdqp; } xfs_qm_freelist_unlock(xfs_Gqm);}/* * Release a dquot. Flush it if dirty, then dqput() it. * dquot must not be locked. */voidxfs_qm_dqrele( xfs_dquot_t *dqp){ ASSERT(dqp); xfs_dqtrace_entry(dqp, "DQRELE"); xfs_dqlock(dqp); /* * We don't care to flush it if the dquot is dirty here. * That will create stutters that we want to avoid. * Instead we do a delayed write when we try to reclaim * a dirty dquot. Also xfs_sync will take part of the burden... */ xfs_qm_dqput(dqp);}/* * Write a modified dquot to disk. * The dquot must be locked and the flush lock too taken by caller. * The flush lock will not be unlocked until the dquot reaches the disk, * but the dquot is free to be unlocked and modified by the caller * in the interim. Dquot is still locked on return. This behavior is * identical to that of inodes. */intxfs_qm_dqflush( xfs_dquot_t *dqp, uint flags){ xfs_mount_t *mp; xfs_buf_t *bp; xfs_disk_dquot_t *ddqp; int error; SPLDECL(s); ASSERT(XFS_DQ_IS_LOCKED(dqp)); ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp)); xfs_dqtrace_entry(dqp, "DQFLUSH"); /* * If not dirty, nada. */ if (!XFS_DQ_IS_DIRTY(dqp)) { xfs_dqfunlock(dqp); return (0); } /* * Cant flush a pinned dquot. Wait for it. */ xfs_qm_dqunpin_wait(dqp); /* * This may have been unpinned because the filesystem is shutting * down forcibly. If that's the case we must not write this dquot * to disk, because the log record didn't make it to disk! */ if (XFS_FORCED_SHUTDOWN(dqp->q_mount)) { dqp->dq_flags &= ~(XFS_DQ_DIRTY); xfs_dqfunlock(dqp); return XFS_ERROR(EIO); } /* * Get the buffer containing the on-disk dquot * We don't need a transaction envelope because we know that the * the ondisk-dquot has already been allocated for. */ if ((error = xfs_qm_dqtobp(NULL, dqp, &ddqp, &bp, XFS_QMOPT_DOWARN))) { xfs_dqtrace_entry(dqp, "DQTOBP FAIL"); ASSERT(error != ENOENT); /* * Quotas could have gotten turned off (ESRCH) */ xfs_dqfunlock(dqp); return (error); } if (xfs_qm_dqcheck(&dqp->q_core, be32_to_cpu(ddqp->d_id), 0, XFS_QMOPT_DOWARN, "dqflush (incore copy)")) { xfs_force_shutdown(dqp->q_mount, XFS_CORRUPT_INCORE); return XFS_ERROR(EIO); } /* This is the only portion of data that needs to persist */ memcpy(ddqp, &(dqp->q_core), sizeof(xfs_disk_dquot_t)); /* * Clear the dirty field and remember the flush lsn for later use. */ dqp->dq_flags &= ~(XFS_DQ_DIRTY); mp = dqp->q_mount; /* lsn is 64 bits */ AIL_LOCK(mp, s); dqp->q_logitem.qli_flush_lsn = dqp->q_logitem.qli_item.li_lsn; AIL_UNLOCK(mp, s); /* * Attach an iodone routine so that we can remove this dquot from the * AIL and release the flush lock once the dquot is synced to disk. */ xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t *, xfs_log_item_t *)) xfs_qm_dqflush_done, &(dqp->q_logitem.qli_item)); /* * If the buffer is pinned then push on the log so we won't * get stuck waiting in the write for too long. */ if (XFS_BUF_ISPINNED(bp)) { xfs_dqtrace_entry(dqp, "DQFLUSH LOG FORCE"); xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); } if (flags & XFS_QMOPT_DELWRI) { xfs_bdwrite(mp, bp); } else if (flags & XFS_QMOPT_ASYNC) { xfs_bawrite(mp, bp); } else { error = xfs_bwrite(mp, bp); } xfs_dqtrace_entry(dqp, "DQFLUSH END"); /* * dqp is still locked, but caller is free to unlock it now. */ return (error);}/* * This is the dquot flushing I/O completion routine. It is called * from interrupt level when the buffer containing the dquot is * flushed to disk. It is responsible for removing the dquot logitem * from the AIL if it has not been re-logged, and unlocking the dquot's * flush lock. This behavior is very similar to that of inodes.. *//*ARGSUSED*/STATIC voidxfs_qm_dqflush_done( xfs_buf_t *bp, xfs_dq_logitem_t *qip){ xfs_dquot_t *dqp; SPLDECL(s); dqp = qip->qli_dquot; /* * We only want to pull the item from the AIL if its * location in the log has not changed since we started the flush. * Thus, we only bother if the dquot's lsn has * not changed. First we check the lsn outside the lock * since it's cheaper, and then we recheck while * holding the lock before removing the dquot from the AIL. */ if ((qip->qli_item.li_flags & XFS_LI_IN_AIL) && qip->qli_item.li_lsn == qip->qli_flush_lsn) { AIL_LOCK(dqp->q_mount, s); /* * xfs_trans_delete_ail() drops the AIL lock. */ if (qip->qli_item.li_lsn == qip->qli_flush_lsn) xfs_trans_delete_ail(dqp->q_mount, (xfs_log_item_t*)qip, s); else AIL_UNLOCK(dqp->q_mount, s); } /* * Release the dq's flush lock since we're done with it. */ xfs_dqfunlock(dqp);}intxfs_qm_dqflock_nowait( xfs_dquot_t *dqp){ int locked; locked = cpsema(&((dqp)->q_flock)); /* XXX ifdef these out */ if (locked) (dqp)->dq_flags |= XFS_DQ_FLOCKED; return (locked);}intxfs_qm_dqlock_nowait( xfs_dquot_t *dqp){ return (mutex_trylock(&((dqp)->q_qlock)));}voidxfs_dqlock( xfs_dquot_t *dqp){ mutex_lock(&(dqp->q_qlock), PINOD);}voidxfs_dqunlock( xfs_dquot_t *dqp){ mutex_unlock(&(dqp->q_qlock)); if (dqp->q_logitem.qli_dquot == dqp) { /* Once was dqp->q_mount, but might just have been cleared */ xfs_trans_unlocked_item(dqp->q_logitem.qli_item.li_mountp, (xfs_log_item_t*)&(dqp->q_logitem)); }}voidxfs_dqunlock_nonotify( xfs_dquot_t *dqp){ mutex_unlock(&(dqp->q_qlock));}voidxfs_dqlock2( xfs_dquot_t *d1, xfs_dquot_t *d2){ if (d1 && d2) { ASSERT(d1 != d2); if (be32_to_cpu(d1->q_core.d_id) > be32_to_cpu(d2->q_core.d_id)) { xfs_dqlock(d2); xfs_dqlock(d1); } else { xfs_dqlock(d1); xfs_dqlock(d2); } } else { if (d1) { xfs_dqlock(d1); } else if (d2) { xfs_dqlock(d2); } }}/* * Take a dquot out of the mount's dqlist as well as the hashlist. * This is called via unmount as well as quotaoff, and the purge * will always succeed unless there are soft (temp) references * outstanding. * * This returns 0 if it was purged, 1 if it wasn't. It's not an error code * that we're returning! XXXsup - not cool. *//* ARGSUSED */intxfs_qm_dqpurge( xfs_dquot_t *dqp, uint flags){ xfs_dqhash_t *thishash; xfs_mount_t *mp; mp = dqp->q_mount; ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp)); ASSERT(XFS_DQ_IS_HASH_LOCKED(dqp->q_hash)); xfs_dqlock(dqp); /* * We really can't afford to purge a dquot that is * referenced, because these are hard refs. * It shouldn't happen in general because we went thru _all_ inodes in * dqrele_all_inodes before calling this and didn't let the mountlock go. * However it is possible that we have dquots with temporary * references that are not attached to an inode. e.g. see xfs_setattr(). */ if (dqp->q_nrefs != 0) { xfs_dqunlock(dqp); XFS_DQ_HASH_UNLOCK(dqp->q_hash); return (1); } ASSERT(XFS_DQ_IS_ON_FREELIST(dqp)); /* * If we're turning off quotas, we have to make sure that, for * example, we don't delete quota disk blocks while dquots are * in the process of getting written to those disk blocks. * This dquot might well be on AIL, and we can't leave it there * if we're turning off quotas. Basically, we need this flush * lock, and are willing to block on it. */ if (! xfs_qm_dqflock_nowait(dqp)) { /* * Block on the flush lock after nudging dquot buffer, * if it is incore. */ xfs_qm_dqflock_pushbuf_wait(dqp); } /* * XXXIf we're turning this type of quotas off, we don't care * about the dirty metadata sitting in this dquot. OTOH, if * we're unmounting, we do care, so we flush it and wait. */ if (XFS_DQ_IS_DIRTY(dqp)) { xfs_dqtrace_entry(dqp, "DQPURGE ->DQFLUSH: DQDIRTY"); /* dqflush unlocks dqflock */ /* * Given that dqpurge is a very rare occurrence, it is OK * that we're holding the hashlist and mplist locks * across the disk write. But, ... XXXsup * * We don't care about getting disk errors here. We need * to purge this dquot anyway, so we go ahead regardless. */ (void) xfs_qm_dqflush(dqp, XFS_QMOPT_SYNC); xfs_dqflock(dqp); } ASSERT(dqp->q_pincount == 0); ASSERT(XFS_FORCED_SHUTDOWN(mp) || !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); thishash = dqp->q_hash; XQM_HASHLIST_REMOVE(thishash, dqp); XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(mp)), dqp); /* * XXX Move this to the front of the freelist, if we can get the * freelist lock. */ ASSERT(XFS_DQ_IS_ON_FREELIST(dqp)); dqp->q_mount = NULL; dqp->q_hash = NULL; dqp->dq_flags = XFS_DQ_INACTIVE; memset(&dqp->q_core, 0, sizeof(dqp->q_core)); xfs_dqfunlock(dqp); xfs_dqunlock(dqp); XFS_DQ_HASH_UNLOCK(thishash); return (0);}#ifdef QUOTADEBUGvoidxfs_qm_dqprint(xfs_dquot_t *dqp){ cmn_err(CE_DEBUG, "-----------KERNEL DQUOT----------------"); cmn_err(CE_DEBUG, "---- dquotID = %d", (int)be32_to_cpu(dqp->q_core.d_id)); cmn_err(CE_DEBUG, "---- type = %s", DQFLAGTO_TYPESTR(dqp)); cmn_err(CE_DEBUG, "---- fs = 0x%p", dqp->q_mount); cmn_err(CE_DEBUG, "---- blkno = 0x%x", (int) dqp->q_blkno); cmn_err(CE_DEBUG, "---- boffset = 0x%x", (int) dqp->q_bufoffset); cmn_err(CE_DEBUG, "---- blkhlimit = %Lu (0x%x)", be64_to_cpu(dqp->q_core.d_blk_hardlimit), (int)be64_to_cpu(dqp->q_core.d_blk_hardlimit)); cmn_err(CE_DEBUG, "---- blkslimit = %Lu (0x%x)", be64_to_cpu(dqp->q_core.d_blk_softlimit), (int)be64_to_cpu(dqp->q_core.d_blk_softlimit)); cmn_err(CE_DEBUG, "---- inohlimit = %Lu (0x%x)", be64_to_cpu(dqp->q_core.d_ino_hardlimit), (int)be64_to_cpu(dqp->q_core.d_ino_hardlimit)); cmn_err(CE_DEBUG, "---- inoslimit = %Lu (0x%x)", be64_to_cpu(dqp->q_core.d_ino_softlimit), (int)be64_to_cpu(dqp->q_core.d_ino_softlimit)); cmn_err(CE_DEBUG, "---- bcount = %Lu (0x%x)", be64_to_cpu(dqp->q_core.d_bcount), (int)be64_to_cpu(dqp->q_core.d_bcount)); cmn_err(CE_DEBUG, "---- icount = %Lu (0x%x)", be64_to_cpu(dqp->q_core.d_icount), (int)be64_to_cpu(dqp->q_core.d_icount)); cmn_err(CE_DEBUG, "---- btimer = %d", (int)be32_to_cpu(dqp->q_core.d_btimer)); cmn_err(CE_DEBUG, "---- itimer = %d", (int)be32_to_cpu(dqp->q_core.d_itimer)); cmn_err(CE_DEBUG, "---------------------------");}#endif/* * Give the buffer a little push if it is incore and * wait on the flush lock. */voidxfs_qm_dqflock_pushbuf_wait( xfs_dquot_t *dqp){ xfs_buf_t *bp; /* * Check to see if the dquot has been flushed delayed * write. If so, grab its buffer and send it * out immediately. We'll be able to acquire * the flush lock when the I/O completes. */ bp = xfs_incore(dqp->q_mount->m_ddev_targp, dqp->q_blkno, XFS_QI_DQCHUNKLEN(dqp->q_mount), XFS_INCORE_TRYLOCK); if (bp != NULL) { if (XFS_BUF_ISDELAYWRITE(bp)) { if (XFS_BUF_ISPINNED(bp)) { xfs_log_force(dqp->q_mount, (xfs_lsn_t)0, XFS_LOG_FORCE); } xfs_bawrite(dqp->q_mount, bp); } else { xfs_buf_relse(bp); } } xfs_dqflock(dqp);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -