📄 ext3-extents-2.6.18-vanilla.patch
字号:
+ struct ext3_extent_header *eh;+ struct buffer_head *bh;+ short int depth, i, ppos = 0, alloc = 0;++ eh = ext_inode_hdr(inode);+ i = depth = ext_depth(inode);+ if (ext3_ext_check_header(inode, eh, depth))+ return ERR_PTR(-EIO);++ /* account possible depth increase */+ if (!path) {+ path = kmalloc(sizeof(struct ext3_ext_path) * (depth + 2),+ GFP_NOFS);+ if (!path)+ return ERR_PTR(-ENOMEM);+ alloc = 1;+ }+ memset(path, 0, sizeof(struct ext3_ext_path) * (depth + 1));+ path[0].p_hdr = eh;++ /* walk through the tree */+ while (i) {+ ext_debug(inode, "depth %d: num %d, max %d\n",+ ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));++ ext3_ext_binsearch_idx(inode, path + ppos, block);+ path[ppos].p_block = le32_to_cpu(path[ppos].p_idx->ei_leaf);+ path[ppos].p_depth = i;+ path[ppos].p_ext = NULL;++ bh = sb_bread(inode->i_sb, path[ppos].p_block);+ if (!bh)+ goto err;++ eh = ext_block_hdr(bh);+ ppos++;+ BUG_ON(ppos > depth);+ path[ppos].p_bh = bh;+ path[ppos].p_hdr = eh;+ i--;++ if (ext3_ext_check_header(inode, eh, i))+ goto err;+ }++ path[ppos].p_depth = i;+ path[ppos].p_hdr = eh;+ path[ppos].p_ext = NULL;+ path[ppos].p_idx = NULL;++ /* find extent */+ ext3_ext_binsearch(inode, path + ppos, block);++ ext3_ext_show_path(inode, path);++ return path;++err:+ ext3_ext_drop_refs(path);+ if (alloc)+ kfree(path);+ return ERR_PTR(-EIO);+}++/*+ * insert new index [logical;ptr] into the block at cupr+ * it check where to insert: before curp or after curp+ */+static int ext3_ext_insert_index(handle_t *handle, struct inode *inode,+ struct ext3_ext_path *curp,+ int logical, int ptr)+{+ struct ext3_extent_idx *ix;+ int len, err;++ if ((err = ext3_ext_get_access(handle, inode, curp)))+ return err;++ BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));+ len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;+ if (logical > le32_to_cpu(curp->p_idx->ei_block)) {+ /* insert after */+ if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {+ len = (len - 1) * sizeof(struct ext3_extent_idx);+ len = len < 0 ? 0 : len;+ ext_debug(inode, "insert new index %d after: %d. "+ "move %d from 0x%p to 0x%p\n",+ logical, ptr, len,+ (curp->p_idx + 1), (curp->p_idx + 2));+ memmove(curp->p_idx + 2, curp->p_idx + 1, len);+ }+ ix = curp->p_idx + 1;+ } else {+ /* insert before */+ len = len * sizeof(struct ext3_extent_idx);+ len = len < 0 ? 0 : len;+ ext_debug(inode, "insert new index %d before: %d. "+ "move %d from 0x%p to 0x%p\n",+ logical, ptr, len,+ curp->p_idx, (curp->p_idx + 1));+ memmove(curp->p_idx + 1, curp->p_idx, len);+ ix = curp->p_idx;+ }++ ix->ei_block = cpu_to_le32(logical);+ ix->ei_leaf = cpu_to_le32(ptr);+ ix->ei_leaf_hi = ix->ei_unused = 0;+ curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1);++ BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)+ > le16_to_cpu(curp->p_hdr->eh_max));+ BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));++ err = ext3_ext_dirty(handle, inode, curp);+ ext3_std_error(inode->i_sb, err);++ return err;+}++/*+ * routine inserts new subtree into the path, using free index entry+ * at depth 'at:+ * - allocates all needed blocks (new leaf and all intermediate index blocks)+ * - makes decision where to split+ * - moves remaining extens and index entries (right to the split point)+ * into the newly allocated blocks+ * - initialize subtree+ */+static int ext3_ext_split(handle_t *handle, struct inode *inode,+ struct ext3_ext_path *path,+ struct ext3_extent *newext, int at)+{+ struct buffer_head *bh = NULL;+ int depth = ext_depth(inode);+ struct ext3_extent_header *neh;+ struct ext3_extent_idx *fidx;+ struct ext3_extent *ex;+ int i = at, k, m, a;+ unsigned long newblock, oldblock;+ __le32 border;+ int *ablocks = NULL; /* array of allocated blocks */+ int err = 0;++ /* make decision: where to split? */+ /* FIXME: now desicion is simplest: at current extent */++ /* if current leaf will be splitted, then we should use+ * border from split point */+ BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));+ if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {+ border = path[depth].p_ext[1].ee_block;+ ext_debug(inode, "leaf will be splitted."+ " next leaf starts at %d\n",+ le32_to_cpu(border));+ } else {+ border = newext->ee_block;+ ext_debug(inode, "leaf will be added."+ " next leaf starts at %d\n",+ le32_to_cpu(border));+ }++ /*+ * if error occurs, then we break processing+ * and turn filesystem read-only. so, index won't+ * be inserted and tree will be in consistent+ * state. next mount will repair buffers too+ */++ /*+ * get array to track all allocated blocks+ * we need this to handle errors and free blocks+ * upon them+ */+ ablocks = kmalloc(sizeof(unsigned long) * depth, GFP_NOFS);+ if (!ablocks)+ return -ENOMEM;+ memset(ablocks, 0, sizeof(unsigned long) * depth);++ /* allocate all needed blocks */+ ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at);+ for (a = 0; a < depth - at; a++) {+ newblock = ext3_ext_new_block(handle, inode, path, newext, &err);+ if (newblock == 0)+ goto cleanup;+ ablocks[a] = newblock;+ }++ /* initialize new leaf */+ newblock = ablocks[--a];+ BUG_ON(newblock == 0);+ bh = sb_getblk(inode->i_sb, newblock);+ if (!bh) {+ err = -EIO;+ goto cleanup;+ }+ lock_buffer(bh);++ if ((err = ext3_journal_get_create_access(handle, bh)))+ goto cleanup;++ neh = ext_block_hdr(bh);+ neh->eh_entries = 0;+ neh->eh_max = cpu_to_le16(ext3_ext_space_block(inode));+ neh->eh_magic = cpu_to_le16(EXT3_EXT_MAGIC);+ neh->eh_depth = 0;+ ex = EXT_FIRST_EXTENT(neh);++ /* move remain of path[depth] to the new leaf */+ BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);+ /* start copy from next extent */+ /* TODO: we could do it by single memmove */+ m = 0;+ path[depth].p_ext++;+ while (path[depth].p_ext <=+ EXT_MAX_EXTENT(path[depth].p_hdr)) {+ ext_debug(inode, "move %d:%d:%d in new leaf %lu\n",+ le32_to_cpu(path[depth].p_ext->ee_block),+ le32_to_cpu(path[depth].p_ext->ee_start),+ le16_to_cpu(path[depth].p_ext->ee_len),+ newblock);+ /*memmove(ex++, path[depth].p_ext++,+ sizeof(struct ext3_extent));+ neh->eh_entries++;*/+ path[depth].p_ext++;+ m++;+ }+ if (m) {+ memmove(ex, path[depth].p_ext-m, sizeof(struct ext3_extent)*m);+ neh->eh_entries = cpu_to_le16(le16_to_cpu(neh->eh_entries)+m);+ }++ set_buffer_uptodate(bh);+ unlock_buffer(bh);++ if ((err = ext3_journal_dirty_metadata(handle, bh)))+ goto cleanup;+ brelse(bh);+ bh = NULL;++ /* correct old leaf */+ if (m) {+ if ((err = ext3_ext_get_access(handle, inode, path + depth)))+ goto cleanup;+ path[depth].p_hdr->eh_entries =+ cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);+ if ((err = ext3_ext_dirty(handle, inode, path + depth)))+ goto cleanup;++ }++ /* create intermediate indexes */+ k = depth - at - 1;+ BUG_ON(k < 0);+ if (k)+ ext_debug(inode, "create %d intermediate indices\n", k);+ /* insert new index into current index block */+ /* current depth stored in i var */+ i = depth - 1;+ while (k--) {+ oldblock = newblock;+ newblock = ablocks[--a];+ bh = sb_getblk(inode->i_sb, newblock);+ if (!bh) {+ err = -EIO;+ goto cleanup;+ }+ lock_buffer(bh);++ if ((err = ext3_journal_get_create_access(handle, bh)))+ goto cleanup;++ neh = ext_block_hdr(bh);+ neh->eh_entries = cpu_to_le16(1);+ neh->eh_magic = cpu_to_le16(EXT3_EXT_MAGIC);+ neh->eh_max = cpu_to_le16(ext3_ext_space_block_idx(inode));+ neh->eh_depth = cpu_to_le16(depth - i);+ fidx = EXT_FIRST_INDEX(neh);+ fidx->ei_block = border;+ fidx->ei_leaf = cpu_to_le32(oldblock);+ fidx->ei_leaf_hi = fidx->ei_unused = 0;++ ext_debug(inode, "int.index at %d (block %lu): %lu -> %lu\n", i,+ newblock, (unsigned long) le32_to_cpu(border),+ oldblock);+ /* copy indexes */+ m = 0;+ path[i].p_idx++;++ ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx,+ EXT_MAX_INDEX(path[i].p_hdr));+ BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=+ EXT_LAST_INDEX(path[i].p_hdr));+ while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {+ ext_debug(inode, "%d: move %d:%d in new index %lu\n", i,+ le32_to_cpu(path[i].p_idx->ei_block),+ le32_to_cpu(path[i].p_idx->ei_leaf),+ newblock);+ /*memmove(++fidx, path[i].p_idx++,+ sizeof(struct ext3_extent_idx));+ neh->eh_entries++;+ BUG_ON(neh->eh_entries > neh->eh_max);*/+ path[i].p_idx++;+ m++;+ }+ if (m) {+ memmove(++fidx, path[i].p_idx - m,+ sizeof(struct ext3_extent_idx) * m);+ neh->eh_entries =+ cpu_to_le16(le16_to_cpu(neh->eh_entries) + m);+ }+ set_buffer_uptodate(bh);+ unlock_buffer(bh);++ if ((err = ext3_journal_dirty_metadata(handle, bh)))+ goto cleanup;+ brelse(bh);+ bh = NULL;++ /* correct old index */+ if (m) {+ err = ext3_ext_get_access(handle, inode, path + i);+ if (err)+ goto cleanup;+ path[i].p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path[i].p_hdr->eh_entries)-m);+ err = ext3_ext_dirty(handle, inode, path + i);+ if (err)+ goto cleanup;+ }++ i--;+ }++ /* insert new index */+ if (err)+ goto cleanup;++ err = ext3_ext_insert_index(handle, inode, path + at,+ le32_to_cpu(border), newblock);++cleanup:+ if (bh) {+ if (buffer_locked(bh))+ unlock_buffer(bh);+ brelse(bh);+ }++ if (err) {+ /* free all allocated blocks in error case */+ for (i = 0; i < depth; i++) {+ if (!ablocks[i])+ continue;+ ext3_free_blocks(handle, inode, ablocks[i], 1);+ }+ }+ kfree(ablocks);++ return err;+}++/*+ * routine implements tree growing procedure:+ * - allocates new block+ * - moves top-level data (index block or leaf) into the new block+ * - initialize new top-level, creating index that points to the+ * just created block+ */+static int ext3_ext_grow_indepth(handle_t *handle, struct inode *inode,+ struct ext3_ext_path *path,+ struct ext3_extent *newext)+{+ struct ext3_ext_path *curp = path;+ struct ext3_extent_header *neh;+ struct ext3_extent_idx *fidx;+ struct buffer_head *bh;+ unsigned long newblock;+ int err = 0;++ newblock = ext3_ext_new_block(handle, inode, path, newext, &err);+ if (newblock == 0)+ return err;++ bh = sb_getblk(inode->i_sb, newblock);+ if (!bh) {+ err = -EIO;+ ext3_std_error(inode->i_sb, err);+ return err;+ }+ lock_buffer(bh);++ if ((err = ext3_journal_get_create_access(handle, bh))) {+ unlock_buffer(bh);+ goto out;+ }++ /* move top-level index/leaf into new block */+ memmove(bh->b_data, curp->p_hdr, sizeof(EXT3_I(inode)->i_data));++ /* set size of new block */+ neh = ext_block_hdr(bh);+ /* old root could have indexes or leaves+ * so calculate e_max right way */+ if (ext_depth(inode))+ neh->eh_max = cpu_to_le16(ext3_ext_space_block_idx(inode));+ else+ neh->eh_max = cpu_to_le16(ext3_ext_space_block(inode));+ neh->eh_magic = cpu_to_le16(EXT3_EXT_MAGIC);+ set_buffer_uptodate(bh);+ unlock_buffer(bh);++ if ((err = ext3_journal_dirty_metadata(handle, bh)))+ goto out;++ /* create index in new top-level index: num,max,pointer */+ if ((err = ext3_ext_get_access(handle, inode, curp)))+ goto out;++ curp->p_hdr->eh_magic = cpu_to_le16(EXT3_EXT_MAGIC);+ curp->p_hdr->eh_max = cpu_to_le16(ext3_ext_space_root_idx(inode));+ curp->p_hdr->eh_entries = cpu_to_le16(1);+ curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);+ /* FIXME: it works, but actually path[0] can be index */+ curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;+ curp->p_idx->ei_leaf = cpu_to_le32(newblock);+ curp->p_idx->ei_leaf_hi = curp->p_idx->ei_unused = 0;++ neh = ext_inode_hdr(inode);+ fidx = EXT_FIRST_INDEX(neh);+ ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %d\n",+ le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),+ le32_to_cpu(fidx->ei_block), le32_to_cpu(fidx->ei_leaf));++ neh->eh_depth = cpu_to_le16(path->p_depth + 1);+ err = ext3_ext_dirty(handle, inode, curp);+out:+ brelse(bh);++ return err;+}++/*+ * routine finds empty index and adds new leaf. if no free index found+ * then it requests in-depth growing+ */+static int ext3_ext_create_new_leaf(handle_t *handle, struct inode *inode,+ struct ext3_ext_path *path,+ struct ext3_extent *newext)+{+ struct ext3_ext_path *curp;+ int depth, i, err = 0;++repeat:+ i = depth = ext_depth(inode);++ /* walk up to the tree and look for free index entry */+ curp = path + depth;+ while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {+ i--;+ curp--;+ }++ /* we use already allocated block for index block+ * so, subsequent data blocks should be contigoues */+ if (EXT_HAS_FREE_INDEX(curp)) {+ /* if we found index with free entry, then use that+ * entry: create all needed subtree and add new leaf */+ err = ext3_ext_split(handle, inode, path, newext, i);+ if (err)+ goto out;++ /* refill path */+ ext3_ext_drop_refs(path);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -