📄 xfs_trans_buf.c
字号:
} } /* * We never locked this buf ourselves, so we shouldn't * brelse it either. Just get out. */ if (XFS_FORCED_SHUTDOWN(mp)) { xfs_buftrace("READ_BUF_INCORE XFSSHUTDN", bp); *bpp = NULL; return XFS_ERROR(EIO); } bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); bip->bli_recur++; ASSERT(atomic_read(&bip->bli_refcount) > 0); xfs_buf_item_trace("READ RECUR", bip); *bpp = bp; return 0; } /* * We always specify the BUF_BUSY flag within a transaction so * that get_buf does not try to push out a delayed write buffer * which might cause another transaction to take place (if the * buffer was delayed alloc). Such recursive transactions can * easily deadlock with our current transaction as well as cause * us to run out of stack space. */ bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY); if (bp == NULL) { *bpp = NULL; return 0; } if (XFS_BUF_GETERROR(bp) != 0) { XFS_BUF_SUPER_STALE(bp); xfs_buftrace("READ ERROR", bp); error = XFS_BUF_GETERROR(bp); xfs_ioerror_alert("xfs_trans_read_buf", mp, bp, blkno); if (tp->t_flags & XFS_TRANS_DIRTY) xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR); xfs_buf_relse(bp); return error; }#ifdef DEBUG if (xfs_do_error && !(tp->t_flags & XFS_TRANS_DIRTY)) { if (xfs_error_target == target) { if (((xfs_req_num++) % xfs_error_mod) == 0) { xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR); xfs_buf_relse(bp); cmn_err(CE_DEBUG, "Returning trans error!\n"); return XFS_ERROR(EIO); } } }#endif if (XFS_FORCED_SHUTDOWN(mp)) goto shutdown_abort; /* * The xfs_buf_log_item pointer is stored in b_fsprivate. If * it doesn't have one yet, then allocate one and initialize it. * The checks to see if one is there are in xfs_buf_item_init(). */ xfs_buf_item_init(bp, tp->t_mountp); /* * Set the recursion count for the buffer within this transaction * to 0. */ bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); bip->bli_recur = 0; /* * Take a reference for this transaction on the buf item. */ atomic_inc(&bip->bli_refcount); /* * Get a log_item_desc to point at the new item. */ (void) xfs_trans_add_item(tp, (xfs_log_item_t*)bip); /* * Initialize b_fsprivate2 so we can find it with incore_match() * above. */ XFS_BUF_SET_FSPRIVATE2(bp, tp); xfs_buftrace("TRANS READ", bp); xfs_buf_item_trace("READ", bip); *bpp = bp; return 0;shutdown_abort: /* * the theory here is that buffer is good but we're * bailing out because the filesystem is being forcibly * shut down. So we should leave the b_flags alone since * the buffer's not staled and just get out. */#if defined(DEBUG) if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp)) cmn_err(CE_NOTE, "about to pop assert, bp == 0x%p", bp);#endif ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) != (XFS_B_STALE|XFS_B_DELWRI)); xfs_buftrace("READ_BUF XFSSHUTDN", bp); xfs_buf_relse(bp); *bpp = NULL; return XFS_ERROR(EIO);}/* * Release the buffer bp which was previously acquired with one of the * xfs_trans_... buffer allocation routines if the buffer has not * been modified within this transaction. If the buffer is modified * within this transaction, do decrement the recursion count but do * not release the buffer even if the count goes to 0. If the buffer is not * modified within the transaction, decrement the recursion count and * release the buffer if the recursion count goes to 0. * * If the buffer is to be released and it was not modified before * this transaction began, then free the buf_log_item associated with it. * * If the transaction pointer is NULL, make this just a normal * brelse() call. */voidxfs_trans_brelse(xfs_trans_t *tp, xfs_buf_t *bp){ xfs_buf_log_item_t *bip; xfs_log_item_t *lip; xfs_log_item_desc_t *lidp; /* * Default to a normal brelse() call if the tp is NULL. */ if (tp == NULL) { ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL); /* * If there's a buf log item attached to the buffer, * then let the AIL know that the buffer is being * unlocked. */ if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); if (lip->li_type == XFS_LI_BUF) { bip = XFS_BUF_FSPRIVATE(bp,xfs_buf_log_item_t*); xfs_trans_unlocked_item( bip->bli_item.li_mountp, lip); } } xfs_buf_relse(bp); return; } ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); ASSERT(bip->bli_item.li_type == XFS_LI_BUF); ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); ASSERT(atomic_read(&bip->bli_refcount) > 0); /* * Find the item descriptor pointing to this buffer's * log item. It must be there. */ lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip); ASSERT(lidp != NULL); /* * If the release is just for a recursive lock, * then decrement the count and return. */ if (bip->bli_recur > 0) { bip->bli_recur--; xfs_buf_item_trace("RELSE RECUR", bip); return; } /* * If the buffer is dirty within this transaction, we can't * release it until we commit. */ if (lidp->lid_flags & XFS_LID_DIRTY) { xfs_buf_item_trace("RELSE DIRTY", bip); return; } /* * If the buffer has been invalidated, then we can't release * it until the transaction commits to disk unless it is re-dirtied * as part of this transaction. This prevents us from pulling * the item from the AIL before we should. */ if (bip->bli_flags & XFS_BLI_STALE) { xfs_buf_item_trace("RELSE STALE", bip); return; } ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); xfs_buf_item_trace("RELSE", bip); /* * Free up the log item descriptor tracking the released item. */ xfs_trans_free_item(tp, lidp); /* * Clear the hold flag in the buf log item if it is set. * We wouldn't want the next user of the buffer to * get confused. */ if (bip->bli_flags & XFS_BLI_HOLD) { bip->bli_flags &= ~XFS_BLI_HOLD; } /* * Drop our reference to the buf log item. */ atomic_dec(&bip->bli_refcount); /* * If the buf item is not tracking data in the log, then * we must free it before releasing the buffer back to the * free pool. Before releasing the buffer to the free pool, * clear the transaction pointer in b_fsprivate2 to dissolve * its relation to this transaction. */ if (!xfs_buf_item_dirty(bip)) {/*** ASSERT(bp->b_pincount == 0);***/ ASSERT(atomic_read(&bip->bli_refcount) == 0); ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL)); ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF)); xfs_buf_item_relse(bp); bip = NULL; } XFS_BUF_SET_FSPRIVATE2(bp, NULL); /* * If we've still got a buf log item on the buffer, then * tell the AIL that the buffer is being unlocked. */ if (bip != NULL) { xfs_trans_unlocked_item(bip->bli_item.li_mountp, (xfs_log_item_t*)bip); } xfs_buf_relse(bp); return;}/* * Add the locked buffer to the transaction. * The buffer must be locked, and it cannot be associated with any * transaction. * * If the buffer does not yet have a buf log item associated with it, * then allocate one for it. Then add the buf item to the transaction. */voidxfs_trans_bjoin(xfs_trans_t *tp, xfs_buf_t *bp){ xfs_buf_log_item_t *bip; ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL); /* * The xfs_buf_log_item pointer is stored in b_fsprivate. If * it doesn't have one yet, then allocate one and initialize it. * The checks to see if one is there are in xfs_buf_item_init(). */ xfs_buf_item_init(bp, tp->t_mountp); bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); /* * Take a reference for this transaction on the buf item. */ atomic_inc(&bip->bli_refcount); /* * Get a log_item_desc to point at the new item. */ (void) xfs_trans_add_item(tp, (xfs_log_item_t *)bip); /* * Initialize b_fsprivate2 so we can find it with incore_match() * in xfs_trans_get_buf() and friends above. */ XFS_BUF_SET_FSPRIVATE2(bp, tp); xfs_buf_item_trace("BJOIN", bip);}/* * Mark the buffer as not needing to be unlocked when the buf item's * IOP_UNLOCK() routine is called. The buffer must already be locked * and associated with the given transaction. *//* ARGSUSED */voidxfs_trans_bhold(xfs_trans_t *tp, xfs_buf_t *bp){ xfs_buf_log_item_t *bip; ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_flags |= XFS_BLI_HOLD; xfs_buf_item_trace("BHOLD", bip);}/* * Cancel the previous buffer hold request made on this buffer * for this transaction. */voidxfs_trans_bhold_release(xfs_trans_t *tp, xfs_buf_t *bp){ xfs_buf_log_item_t *bip; ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); ASSERT(atomic_read(&bip->bli_refcount) > 0); ASSERT(bip->bli_flags & XFS_BLI_HOLD); bip->bli_flags &= ~XFS_BLI_HOLD; xfs_buf_item_trace("BHOLD RELEASE", bip);}/* * This is called to mark bytes first through last inclusive of the given * buffer as needing to be logged when the transaction is committed. * The buffer must already be associated with the given transaction. * * First and last are numbers relative to the beginning of this buffer, * so the first byte in the buffer is numbered 0 regardless of the * value of b_blkno. */voidxfs_trans_log_buf(xfs_trans_t *tp,
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -