📄 xfs_da_btree.c
字号:
} return off == bno + count;}/* * Make a dabuf. * Used for get_buf, read_buf, read_bufr, and reada_buf. */STATIC intxfs_da_do_buf( xfs_trans_t *trans, xfs_inode_t *dp, xfs_dablk_t bno, xfs_daddr_t *mappedbnop, xfs_dabuf_t **bpp, int whichfork, int caller, inst_t *ra){ xfs_buf_t *bp = NULL; xfs_buf_t **bplist; int error=0; int i; xfs_bmbt_irec_t map; xfs_bmbt_irec_t *mapp; xfs_daddr_t mappedbno; xfs_mount_t *mp; int nbplist=0; int nfsb; int nmap; xfs_dabuf_t *rbp; mp = dp->i_mount; nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1; mappedbno = *mappedbnop; /* * Caller doesn't have a mapping. -2 means don't complain * if we land in a hole. */ if (mappedbno == -1 || mappedbno == -2) { /* * Optimize the one-block case. */ if (nfsb == 1) { xfs_fsblock_t fsb; if ((error = xfs_bmapi_single(trans, dp, whichfork, &fsb, (xfs_fileoff_t)bno))) { return error; } mapp = ↦ if (fsb == NULLFSBLOCK) { nmap = 0; } else { map.br_startblock = fsb; map.br_startoff = (xfs_fileoff_t)bno; map.br_blockcount = 1; nmap = 1; } } else { mapp = kmem_alloc(sizeof(*mapp) * nfsb, KM_SLEEP); nmap = nfsb; if ((error = xfs_bmapi(trans, dp, (xfs_fileoff_t)bno, nfsb, XFS_BMAPI_METADATA | XFS_BMAPI_AFLAG(whichfork), NULL, 0, mapp, &nmap, NULL, NULL))) goto exit0; } } else { map.br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno); map.br_startoff = (xfs_fileoff_t)bno; map.br_blockcount = nfsb; mapp = ↦ nmap = 1; } if (!xfs_da_map_covers_blocks(nmap, mapp, bno, nfsb)) { error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED); if (unlikely(error == EFSCORRUPTED)) { if (xfs_error_level >= XFS_ERRLEVEL_LOW) { cmn_err(CE_ALERT, "xfs_da_do_buf: bno %lld\n", (long long)bno); cmn_err(CE_ALERT, "dir: inode %lld\n", (long long)dp->i_ino); for (i = 0; i < nmap; i++) { cmn_err(CE_ALERT, "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d\n", i, (long long)mapp[i].br_startoff, (long long)mapp[i].br_startblock, (long long)mapp[i].br_blockcount, mapp[i].br_state); } } XFS_ERROR_REPORT("xfs_da_do_buf(1)", XFS_ERRLEVEL_LOW, mp); } goto exit0; } if (caller != 3 && nmap > 1) { bplist = kmem_alloc(sizeof(*bplist) * nmap, KM_SLEEP); nbplist = 0; } else bplist = NULL; /* * Turn the mapping(s) into buffer(s). */ for (i = 0; i < nmap; i++) { int nmapped; mappedbno = XFS_FSB_TO_DADDR(mp, mapp[i].br_startblock); if (i == 0) *mappedbnop = mappedbno; nmapped = (int)XFS_FSB_TO_BB(mp, mapp[i].br_blockcount); switch (caller) { case 0: bp = xfs_trans_get_buf(trans, mp->m_ddev_targp, mappedbno, nmapped, 0); error = bp ? XFS_BUF_GETERROR(bp) : XFS_ERROR(EIO); break; case 1: case 2: bp = NULL; error = xfs_trans_read_buf(mp, trans, mp->m_ddev_targp, mappedbno, nmapped, 0, &bp); break; case 3: xfs_baread(mp->m_ddev_targp, mappedbno, nmapped); error = 0; bp = NULL; break; } if (error) { if (bp) xfs_trans_brelse(trans, bp); goto exit1; } if (!bp) continue; if (caller == 1) { if (whichfork == XFS_ATTR_FORK) { XFS_BUF_SET_VTYPE_REF(bp, B_FS_ATTR_BTREE, XFS_ATTR_BTREE_REF); } else { XFS_BUF_SET_VTYPE_REF(bp, B_FS_DIR_BTREE, XFS_DIR_BTREE_REF); } } if (bplist) { bplist[nbplist++] = bp; } } /* * Build a dabuf structure. */ if (bplist) { rbp = xfs_da_buf_make(nbplist, bplist, ra); } else if (bp) rbp = xfs_da_buf_make(1, &bp, ra); else rbp = NULL; /* * For read_buf, check the magic number. */ if (caller == 1) { xfs_dir2_data_t *data; xfs_dir2_free_t *free; xfs_da_blkinfo_t *info; uint magic, magic1; info = rbp->data; data = rbp->data; free = rbp->data; magic = be16_to_cpu(info->magic); magic1 = be32_to_cpu(data->hdr.magic); if (unlikely( XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) && (magic != XFS_ATTR_LEAF_MAGIC) && (magic != XFS_DIR2_LEAF1_MAGIC) && (magic != XFS_DIR2_LEAFN_MAGIC) && (magic1 != XFS_DIR2_BLOCK_MAGIC) && (magic1 != XFS_DIR2_DATA_MAGIC) && (be32_to_cpu(free->hdr.magic) != XFS_DIR2_FREE_MAGIC), mp, XFS_ERRTAG_DA_READ_BUF, XFS_RANDOM_DA_READ_BUF))) { xfs_buftrace("DA READ ERROR", rbp->bps[0]); XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)", XFS_ERRLEVEL_LOW, mp, info); error = XFS_ERROR(EFSCORRUPTED); xfs_da_brelse(trans, rbp); nbplist = 0; goto exit1; } } if (bplist) { kmem_free(bplist, sizeof(*bplist) * nmap); } if (mapp != &map) { kmem_free(mapp, sizeof(*mapp) * nfsb); } if (bpp) *bpp = rbp; return 0;exit1: if (bplist) { for (i = 0; i < nbplist; i++) xfs_trans_brelse(trans, bplist[i]); kmem_free(bplist, sizeof(*bplist) * nmap); }exit0: if (mapp != &map) kmem_free(mapp, sizeof(*mapp) * nfsb); if (bpp) *bpp = NULL; return error;}/* * Get a buffer for the dir/attr block. */intxfs_da_get_buf( xfs_trans_t *trans, xfs_inode_t *dp, xfs_dablk_t bno, xfs_daddr_t mappedbno, xfs_dabuf_t **bpp, int whichfork){ return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 0, (inst_t *)__return_address);}/* * Get a buffer for the dir/attr block, fill in the contents. */intxfs_da_read_buf( xfs_trans_t *trans, xfs_inode_t *dp, xfs_dablk_t bno, xfs_daddr_t mappedbno, xfs_dabuf_t **bpp, int whichfork){ return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 1, (inst_t *)__return_address);}/* * Readahead the dir/attr block. */xfs_daddr_txfs_da_reada_buf( xfs_trans_t *trans, xfs_inode_t *dp, xfs_dablk_t bno, int whichfork){ xfs_daddr_t rval; rval = -1; if (xfs_da_do_buf(trans, dp, bno, &rval, NULL, whichfork, 3, (inst_t *)__return_address)) return -1; else return rval;}kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */kmem_zone_t *xfs_dabuf_zone; /* dabuf zone *//* * Allocate a dir-state structure. * We don't put them on the stack since they're large. */xfs_da_state_t *xfs_da_state_alloc(void){ return kmem_zone_zalloc(xfs_da_state_zone, KM_SLEEP);}/* * Kill the altpath contents of a da-state structure. */STATIC voidxfs_da_state_kill_altpath(xfs_da_state_t *state){ int i; for (i = 0; i < state->altpath.active; i++) { if (state->altpath.blk[i].bp) { if (state->altpath.blk[i].bp != state->path.blk[i].bp) xfs_da_buf_done(state->altpath.blk[i].bp); state->altpath.blk[i].bp = NULL; } } state->altpath.active = 0;}/* * Free a da-state structure. */voidxfs_da_state_free(xfs_da_state_t *state){ int i; xfs_da_state_kill_altpath(state); for (i = 0; i < state->path.active; i++) { if (state->path.blk[i].bp) xfs_da_buf_done(state->path.blk[i].bp); } if (state->extravalid && state->extrablk.bp) xfs_da_buf_done(state->extrablk.bp);#ifdef DEBUG memset((char *)state, 0, sizeof(*state));#endif /* DEBUG */ kmem_zone_free(xfs_da_state_zone, state);}#ifdef XFS_DABUF_DEBUGxfs_dabuf_t *xfs_dabuf_global_list;lock_t xfs_dabuf_global_lock;#endif/* * Create a dabuf. *//* ARGSUSED */STATIC xfs_dabuf_t *xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra){ xfs_buf_t *bp; xfs_dabuf_t *dabuf; int i; int off; if (nbuf == 1) dabuf = kmem_zone_alloc(xfs_dabuf_zone, KM_SLEEP); else dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_SLEEP); dabuf->dirty = 0;#ifdef XFS_DABUF_DEBUG dabuf->ra = ra; dabuf->target = XFS_BUF_TARGET(bps[0]); dabuf->blkno = XFS_BUF_ADDR(bps[0]);#endif if (nbuf == 1) { dabuf->nbuf = 1; bp = bps[0]; dabuf->bbcount = (short)BTOBB(XFS_BUF_COUNT(bp)); dabuf->data = XFS_BUF_PTR(bp); dabuf->bps[0] = bp; } else { dabuf->nbuf = nbuf; for (i = 0, dabuf->bbcount = 0; i < nbuf; i++) { dabuf->bps[i] = bp = bps[i]; dabuf->bbcount += BTOBB(XFS_BUF_COUNT(bp)); } dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP); for (i = off = 0; i < nbuf; i++, off += XFS_BUF_COUNT(bp)) { bp = bps[i]; memcpy((char *)dabuf->data + off, XFS_BUF_PTR(bp), XFS_BUF_COUNT(bp)); } }#ifdef XFS_DABUF_DEBUG { SPLDECL(s); xfs_dabuf_t *p; s = mutex_spinlock(&xfs_dabuf_global_lock); for (p = xfs_dabuf_global_list; p; p = p->next) { ASSERT(p->blkno != dabuf->blkno || p->target != dabuf->target); } dabuf->prev = NULL; if (xfs_dabuf_global_list) xfs_dabuf_global_list->prev = dabuf; dabuf->next = xfs_dabuf_global_list; xfs_dabuf_global_list = dabuf; mutex_spinunlock(&xfs_dabuf_global_lock, s); }#endif return dabuf;}/* * Un-dirty a dabuf. */STATIC voidxfs_da_buf_clean(xfs_dabuf_t *dabuf){ xfs_buf_t *bp; int i; int off; if (dabuf->dirty) { ASSERT(dabuf->nbuf > 1); dabuf->dirty = 0; for (i = off = 0; i < dabuf->nbuf; i++, off += XFS_BUF_COUNT(bp)) { bp = dabuf->bps[i]; memcpy(XFS_BUF_PTR(bp), (char *)dabuf->data + off, XFS_BUF_COUNT(bp)); } }}/* * Release a dabuf. */voidxfs_da_buf_done(xfs_dabuf_t *dabuf){ ASSERT(dabuf); ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]); if (dabuf->dirty) xfs_da_buf_clean(dabuf); if (dabuf->nbuf > 1) kmem_free(dabuf->data, BBTOB(dabuf->bbcount));#ifdef XFS_DABUF_DEBUG { SPLDECL(s); s = mutex_spinlock(&xfs_dabuf_global_lock); if (dabuf->prev) dabuf->prev->next = dabuf->next; else xfs_dabuf_global_list = dabuf->next; if (dabuf->next) dabuf->next->prev = dabuf->prev; mutex_spinunlock(&xfs_dabuf_global_lock, s); } memset(dabuf, 0, XFS_DA_BUF_SIZE(dabuf->nbuf));#endif if (dabuf->nbuf == 1) kmem_zone_free(xfs_dabuf_zone, dabuf); else kmem_free(dabuf, XFS_DA_BUF_SIZE(dabuf->nbuf));}/* * Log transaction from a dabuf. */voidxfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last){ xfs_buf_t *bp; uint f; int i; uint l; int off; ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]); if (dabuf->nbuf == 1) { ASSERT(dabuf->data == (void *)XFS_BUF_PTR(dabuf->bps[0])); xfs_trans_log_buf(tp, dabuf->bps[0], first, last); return; } dabuf->dirty = 1; ASSERT(first <= last); for (i = off = 0; i < dabuf->nbuf; i++, off += XFS_BUF_COUNT(bp)) { bp = dabuf->bps[i]; f = off; l = f + XFS_BUF_COUNT(bp) - 1; if (f < first) f =
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -