fsfilt_ext3.c

来自「lustre 1.6.5 source code」· C语言 代码 · 共 1,853 行 · 第 1/5 页

C
1,853
字号
{        struct fsfilt_cb_data *fcb = (struct fsfilt_cb_data *)jcb;        fcb->cb_func(fcb->cb_obd, fcb->cb_last_rcvd, fcb->cb_data, error);        OBD_SLAB_FREE(fcb, fcb_cache, sizeof *fcb);}static int fsfilt_ext3_add_journal_cb(struct obd_device *obd, __u64 last_rcvd,                                      void *handle, fsfilt_cb_t cb_func,                                      void *cb_data){        struct fsfilt_cb_data *fcb;        OBD_SLAB_ALLOC(fcb, fcb_cache, CFS_ALLOC_IO, sizeof *fcb);        if (fcb == NULL)                RETURN(-ENOMEM);        fcb->cb_func = cb_func;        fcb->cb_obd = obd;        fcb->cb_last_rcvd = last_rcvd;        fcb->cb_data = cb_data;        CDEBUG(D_EXT2, "set callback for last_rcvd: "LPD64"\n", last_rcvd);        lock_24kernel();        journal_callback_set(handle, fsfilt_ext3_cb_func,                             (struct journal_callback *)fcb);        unlock_24kernel();        return 0;}/* * We need to hack the return value for the free inode counts because * the current EA code requires one filesystem block per inode with EAs, * so it is possible to run out of blocks before we run out of inodes. * * This can be removed when the ext3 EA code is fixed. */static int fsfilt_ext3_statfs(struct super_block *sb, struct obd_statfs *osfs){        struct kstatfs sfs;        int rc;        memset(&sfs, 0, sizeof(sfs));        rc = ll_do_statfs(sb,&sfs);        if (!rc && sfs.f_bfree < sfs.f_ffree) {                sfs.f_files = (sfs.f_files - sfs.f_ffree) + sfs.f_bfree;                sfs.f_ffree = sfs.f_bfree;        }        statfs_pack(osfs, &sfs);        return rc;}static int fsfilt_ext3_sync(struct super_block *sb){        return ext3_force_commit(sb);}#if defined(EXT3_MULTIBLOCK_ALLOCATOR) && (!defined(EXT3_EXT_CACHE_NO) || defined(EXT_CACHE_MARK))#warning "kernel code has old extents/mballoc patch, disabling"#undef EXT3_MULTIBLOCK_ALLOCATOR#endif#ifndef EXT3_EXTENTS_FL#define EXT3_EXTENTS_FL                 0x00080000 /* Inode uses extents */#endif#ifdef EXT3_MULTIBLOCK_ALLOCATOR#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))#define ext3_up_truncate_sem(inode)  up_write(&EXT3_I(inode)->truncate_sem);#define ext3_down_truncate_sem(inode)  down_write(&EXT3_I(inode)->truncate_sem);#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17))#define ext3_up_truncate_sem(inode)  up(&EXT3_I(inode)->truncate_sem);#define ext3_down_truncate_sem(inode)  down(&EXT3_I(inode)->truncate_sem);#else#define ext3_up_truncate_sem(inode)  mutex_unlock(&EXT3_I(inode)->truncate_mutex);#define ext3_down_truncate_sem(inode)  mutex_lock(&EXT3_I(inode)->truncate_mutex);#endif#ifndef EXT_ASSERT#define EXT_ASSERT(cond)  BUG_ON(!(cond))#endif#ifdef EXT3_EXT_HAS_NO_TREE/* for kernels 2.6.18 and later */#define ext3_ext_base                   inode#define ext3_ext_base2inode(inode)      (inode)#define EXT_DEPTH(inode)                ext_depth(inode)#define EXT_GENERATION(inode)           ext_generation(inode)#define fsfilt_ext3_ext_walk_space(inode, block, num, cb, cbdata) \                        ext3_ext_walk_space(inode, block, num, cb, cbdata);#else#define ext3_ext_base                   ext3_extents_tree#define ext3_ext_base2inode(tree)       (tree->inode)#define fsfilt_ext3_ext_walk_space(tree, block, num, cb, cbdata) \                        ext3_ext_walk_space(tree, block, num, cb);#endif#include <linux/lustre_version.h>#if EXT3_EXT_MAGIC == 0xf301#define ee_start e_start#define ee_block e_block#define ee_len   e_num#endif#ifndef EXT3_BB_MAX_BLOCKS#define ext3_mb_new_blocks(handle, inode, goal, count, aflags, err) \        ext3_new_blocks(handle, inode, count, goal, err)#endifstruct bpointers {        unsigned long *blocks;        int *created;        unsigned long start;        int num;        int init_num;        int create;};static int ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path,                              unsigned long block, int *aflags){        struct ext3_inode_info *ei = EXT3_I(inode);        unsigned long bg_start;        unsigned long colour;        int depth;        if (path) {                struct ext3_extent *ex;                depth = path->p_depth;                /* try to predict block placement */                if ((ex = path[depth].p_ext)) {#if 0                        /* This prefers to eat into a contiguous extent                         * rather than find an extent that the whole                         * request will fit into.  This can fragment data                         * block allocation and prevents our lovely 1M I/Os                         * from reaching the disk intact. */                        if (ex->ee_block + ex->ee_len == block)                                *aflags |= 1;#endif                        return ex->ee_start + (block - ex->ee_block);                }                /* it looks index is empty                 * try to find starting from index itself */                if (path[depth].p_bh)                        return path[depth].p_bh->b_blocknr;        }        /* OK. use inode's group */        bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +                le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);        colour = (current->pid % 16) *                (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);        return bg_start + colour + block;}#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))#include <linux/locks.h>static void ll_unmap_underlying_metadata(struct super_block *sb,                                         unsigned long blocknr){        struct buffer_head *old_bh;        old_bh = get_hash_table(sb->s_dev, blocknr, sb->s_blocksize);        if (old_bh) {                mark_buffer_clean(old_bh);                wait_on_buffer(old_bh);                clear_bit(BH_Req, &old_bh->b_state);                __brelse(old_bh);        }}#else#define ll_unmap_underlying_metadata(sb, blocknr) \        unmap_underlying_metadata((sb)->s_bdev, blocknr)#endif#ifndef EXT3_MB_HINT_GROUP_ALLOCstatic unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,                                struct ext3_ext_path *path, unsigned long block,                                unsigned long *count, int *err){        unsigned long pblock, goal;        int aflags = 0;        struct inode *inode = ext3_ext_base2inode(base);        goal = ext3_ext_find_goal(inode, path, block, &aflags);        aflags |= 2; /* block have been already reserved */        lock_24kernel();        pblock = ext3_mb_new_blocks(handle, inode, goal, count, aflags, err);        unlock_24kernel();        return pblock;}#elsestatic unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,                                struct ext3_ext_path *path, unsigned long block,                                unsigned long *count, int *err){        struct inode *inode = ext3_ext_base2inode(base);        struct ext3_allocation_request ar;        unsigned long pblock;        int aflags;        /* find neighbour allocated blocks */        ar.lleft = block;        *err = ext3_ext_search_left(base, path, &ar.lleft, &ar.pleft);        if (*err)                return 0;        ar.lright = block;        *err = ext3_ext_search_right(base, path, &ar.lright, &ar.pright);        if (*err)                return 0;        /* allocate new block */        ar.goal = ext3_ext_find_goal(inode, path, block, &aflags);        ar.inode = inode;        ar.logical = block;        ar.len = *count;        ar.flags = EXT3_MB_HINT_DATA;        pblock = ext3_mb_new_blocks(handle, &ar, err);        *count = ar.len;        return pblock;}#endif#ifdef EXT3_EXT_HAS_NO_TREEstatic int ext3_ext_new_extent_cb(struct ext3_ext_base *base,                                  struct ext3_ext_path *path,                                  struct ext3_ext_cache *cex,#ifdef HAVE_EXT_PREPARE_CB_EXTENT                                  struct ext3_extent *ex,#endif                                  void *cbdata){        struct bpointers *bp = cbdata;#elsestatic int ext3_ext_new_extent_cb(struct ext3_ext_base *base,                                  struct ext3_ext_path *path,                                  struct ext3_ext_cache *cex#ifdef HAVE_EXT_PREPARE_CB_EXTENT                                  , struct ext3_extent *ex#endif                                 ){        struct bpointers *bp = base->private;#endif        struct inode *inode = ext3_ext_base2inode(base);        struct ext3_extent nex;        unsigned long pblock;        unsigned long tgen;        int err, i;        unsigned long count;        handle_t *handle;        i = EXT_DEPTH(base);        EXT_ASSERT(i == path->p_depth);        EXT_ASSERT(path[i].p_hdr);        if (cex->ec_type == EXT3_EXT_CACHE_EXTENT) {                err = EXT_CONTINUE;                goto map;        }        if (bp->create == 0) {                i = 0;                if (cex->ec_block < bp->start)                        i = bp->start - cex->ec_block;                if (i >= cex->ec_len)                        CERROR("nothing to do?! i = %d, e_num = %u\n",                                        i, cex->ec_len);                for (; i < cex->ec_len && bp->num; i++) {                        *(bp->created) = 0;                        bp->created++;                        *(bp->blocks) = 0;                        bp->blocks++;                        bp->num--;                        bp->start++;                }                return EXT_CONTINUE;        }        tgen = EXT_GENERATION(base);        count = ext3_ext_calc_credits_for_insert(base, path);        ext3_up_truncate_sem(inode);        lock_24kernel();        handle = fsfilt_ext3_journal_start(inode, count+EXT3_ALLOC_NEEDED+1);        unlock_24kernel();        if (IS_ERR(handle)) {                ext3_down_truncate_sem(inode);                return PTR_ERR(handle);        }        ext3_down_truncate_sem(inode);        if (tgen != EXT_GENERATION(base)) {                /* the tree has changed. so path can be invalid at moment */                lock_24kernel();                fsfilt_ext3_journal_stop(handle);                unlock_24kernel();                return EXT_REPEAT;        }        count = cex->ec_len;        pblock = new_blocks(handle, base, path, cex->ec_block, &count, &err);        if (!pblock)                goto out;        EXT_ASSERT(count <= cex->ec_len);        /* insert new extent */        nex.ee_block = cex->ec_block;        nex.ee_start = pblock;        nex.ee_len = count;        err = ext3_ext_insert_extent(handle, base, path, &nex);        if (err) {                CERROR("can't insert extent: %d\n", err);                /* XXX: export ext3_free_blocks() */                /*ext3_free_blocks(handle, inode, nex.ee_start, nex.ee_len, 0);*/                goto out;        }        /*         * Putting len of the actual extent we just inserted,         * we are asking ext3_ext_walk_space() to continue         * scaning after that block         */        cex->ec_len = nex.ee_len;        cex->ec_start = nex.ee_start;        BUG_ON(nex.ee_len == 0);        BUG_ON(nex.ee_block != cex->ec_block);out:        lock_24kernel();        fsfilt_ext3_journal_stop(handle);        unlock_24kernel();map:        if (err >= 0) {                /* map blocks */                if (bp->num == 0) {                        CERROR("hmm. why do we find this extent?\n");                        CERROR("initial space: %lu:%u\n",                                bp->start, bp->init_num);                        CERROR("current extent: %u/%u/%u %d\n",                                cex->ec_block, cex->ec_len,                                cex->ec_start, cex->ec_type);                }                i = 0;                if (cex->ec_block < bp->start)                        i = bp->start - cex->ec_block;                if (i >= cex->ec_len)                        CERROR("nothing to do?! i = %d, e_num = %u\n",                                        i, cex->ec_len);                for (; i < cex->ec_len && bp->num; i++) {                        *(bp->blocks) = cex->ec_start + i;                        if (cex->ec_type == EXT3_EXT_CACHE_EXTENT) {                                *(bp->created) = 0;                        } else {                                *(bp->created) = 1;                                /* unmap any possible underlying metadata from                                 * the block device mapping.  bug 6998. */                                ll_unmap_underlying_metadata(inode->i_sb,                                                             *(bp->blocks));                        }                        bp->created++;                        bp->blocks++;                        bp->num--;                        bp->start++;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?