📄 inode.c
字号:
struct buffer_head *bh; /* Allocate the next block */ int nr = ext2_alloc_block(inode, parent, &err); if (!nr) break; branch[n].key = cpu_to_le32(nr); /* * Get buffer_head for parent block, zero it out and set * the pointer to new one, then send parent to disk. */ bh = sb_getblk(inode->i_sb, parent); lock_buffer(bh); memset(bh->b_data, 0, blocksize); branch[n].bh = bh; branch[n].p = (__le32 *) bh->b_data + offsets[n]; *branch[n].p = branch[n].key; set_buffer_uptodate(bh); unlock_buffer(bh); mark_buffer_dirty_inode(bh, inode); /* We used to sync bh here if IS_SYNC(inode). * But we now rely upon generic_osync_inode() * and b_inode_buffers. But not for directories. */ if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) sync_dirty_buffer(bh); parent = nr; } if (n == num) return 0; /* Allocation failed, free what we already allocated */ for (i = 1; i < n; i++) bforget(branch[i].bh); for (i = 0; i < n; i++) ext2_free_blocks(inode, le32_to_cpu(branch[i].key), 1); return err;}/** * ext2_splice_branch - splice the allocated branch onto inode. * @inode: owner * @block: (logical) number of block we are adding * @chain: chain of indirect blocks (with a missing link - see * ext2_alloc_branch) * @where: location of missing link * @num: number of blocks we are adding * * This function verifies that chain (up to the missing link) had not * changed, fills the missing link and does all housekeeping needed in * inode (->i_blocks, etc.). In case of success we end up with the full * chain to new block and return 0. Otherwise (== chain had been changed) * we free the new blocks (forgetting their buffer_heads, indeed) and * return -EAGAIN. */static inline int ext2_splice_branch(struct inode *inode, long block, Indirect chain[4], Indirect *where, int num){ struct ext2_inode_info *ei = EXT2_I(inode); int i; /* Verify that place we are splicing to is still there and vacant */ write_lock(&ei->i_meta_lock); if (!verify_chain(chain, where-1) || *where->p) goto changed; /* That's it */ *where->p = where->key; ei->i_next_alloc_block = block; ei->i_next_alloc_goal = le32_to_cpu(where[num-1].key); write_unlock(&ei->i_meta_lock); /* We are done with atomic stuff, now do the rest of housekeeping */ inode->i_ctime = CURRENT_TIME; /* had we spliced it onto indirect block? */ if (where->bh) mark_buffer_dirty_inode(where->bh, inode); mark_inode_dirty(inode); return 0;changed: write_unlock(&ei->i_meta_lock); for (i = 1; i < num; i++) bforget(where[i].bh); for (i = 0; i < num; i++) ext2_free_blocks(inode, le32_to_cpu(where[i].key), 1); return -EAGAIN;}/* * Allocation strategy is simple: if we have to allocate something, we will * have to go the whole way to leaf. So let's do it before attaching anything * to tree, set linkage between the newborn blocks, write them if sync is * required, recheck the path, free and repeat if check fails, otherwise * set the last missing link (that will protect us from any truncate-generated * removals - all blocks on the path are immune now) and possibly force the * write on the parent block. * That has a nice additional property: no special recovery from the failed * allocations is needed - we simply release blocks and do not touch anything * reachable from inode. */static int ext2_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create){ int err = -EIO; int offsets[4]; Indirect chain[4]; Indirect *partial; unsigned long goal; int left; int boundary = 0; int depth = ext2_block_to_path(inode, iblock, offsets, &boundary); if (depth == 0) goto out;reread: partial = ext2_get_branch(inode, depth, offsets, chain, &err); /* Simplest case - block found, no allocation needed */ if (!partial) {got_it: map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); if (boundary) set_buffer_boundary(bh_result); /* Clean up and exit */ partial = chain+depth-1; /* the whole chain */ goto cleanup; } /* Next simple case - plain lookup or failed read of indirect block */ if (!create || err == -EIO) {cleanup: while (partial > chain) { brelse(partial->bh); partial--; }out: return err; } /* * Indirect block might be removed by truncate while we were * reading it. Handling of that case (forget what we've got and * reread) is taken out of the main path. */ if (err == -EAGAIN) goto changed; goal = 0; if (ext2_find_goal(inode, iblock, chain, partial, &goal) < 0) goto changed; left = (chain + depth) - partial; err = ext2_alloc_branch(inode, left, goal, offsets+(partial-chain), partial); if (err) goto cleanup; if (ext2_splice_branch(inode, iblock, chain, partial, left) < 0) goto changed; set_buffer_new(bh_result); goto got_it;changed: while (partial > chain) { brelse(partial->bh); partial--; } goto reread;}static int ext2_writepage(struct page *page, struct writeback_control *wbc){ return block_write_full_page(page, ext2_get_block, wbc);}static int ext2_readpage(struct file *file, struct page *page){ return mpage_readpage(page, ext2_get_block);}static intext2_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages){ return mpage_readpages(mapping, pages, nr_pages, ext2_get_block);}static intext2_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to){ return block_prepare_write(page,from,to,ext2_get_block);}static intext2_nobh_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to){ return nobh_prepare_write(page,from,to,ext2_get_block);}static sector_t ext2_bmap(struct address_space *mapping, sector_t block){ return generic_block_bmap(mapping,block,ext2_get_block);}static intext2_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks, struct buffer_head *bh_result, int create){ int ret; ret = ext2_get_block(inode, iblock, bh_result, create); if (ret == 0) bh_result->b_size = (1 << inode->i_blkbits); return ret;}static ssize_text2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs){ struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, offset, nr_segs, ext2_get_blocks, NULL);}static intext2_writepages(struct address_space *mapping, struct writeback_control *wbc){ return mpage_writepages(mapping, wbc, ext2_get_block);}struct address_space_operations ext2_aops = { .readpage = ext2_readpage, .readpages = ext2_readpages, .writepage = ext2_writepage, .sync_page = block_sync_page, .prepare_write = ext2_prepare_write, .commit_write = generic_commit_write, .bmap = ext2_bmap, .direct_IO = ext2_direct_IO, .writepages = ext2_writepages,};struct address_space_operations ext2_nobh_aops = { .readpage = ext2_readpage, .readpages = ext2_readpages, .writepage = ext2_writepage, .sync_page = block_sync_page, .prepare_write = ext2_nobh_prepare_write, .commit_write = nobh_commit_write, .bmap = ext2_bmap, .direct_IO = ext2_direct_IO, .writepages = ext2_writepages,};/* * Probably it should be a library function... search for first non-zero word * or memcmp with zero_page, whatever is better for particular architecture. * Linus? */static inline int all_zeroes(__le32 *p, __le32 *q){ while (p < q) if (*p++) return 0; return 1;}/** * ext2_find_shared - find the indirect blocks for partial truncation. * @inode: inode in question * @depth: depth of the affected branch * @offsets: offsets of pointers in that branch (see ext2_block_to_path) * @chain: place to store the pointers to partial indirect blocks * @top: place to the (detached) top of branch * * This is a helper function used by ext2_truncate(). * * When we do truncate() we may have to clean the ends of several indirect * blocks but leave the blocks themselves alive. Block is partially * truncated if some data below the new i_size is refered from it (and * it is on the path to the first completely truncated data block, indeed). * We have to free the top of that path along with everything to the right * of the path. Since no allocation past the truncation point is possible * until ext2_truncate() finishes, we may safely do the latter, but top * of branch may require special attention - pageout below the truncation * point might try to populate it. * * We atomically detach the top of branch from the tree, store the block * number of its root in *@top, pointers to buffer_heads of partially * truncated blocks - in @chain[].bh and pointers to their last elements * that should not be removed - in @chain[].p. Return value is the pointer * to last filled element of @chain. * * The work left to caller to do the actual freeing of subtrees: * a) free the subtree starting from *@top * b) free the subtrees whose roots are stored in * (@chain[i].p+1 .. end of @chain[i].bh->b_data) * c) free the subtrees growing from the inode past the @chain[0].p * (no partially truncated stuff there). */static Indirect *ext2_find_shared(struct inode *inode, int depth, int offsets[4], Indirect chain[4], __le32 *top){ Indirect *partial, *p; int k, err; *top = 0; for (k = depth; k > 1 && !offsets[k-1]; k--) ; partial = ext2_get_branch(inode, k, offsets, chain, &err); if (!partial) partial = chain + k-1; /* * If the branch acquired continuation since we've looked at it - * fine, it should all survive and (new) top doesn't belong to us. */ write_lock(&EXT2_I(inode)->i_meta_lock); if (!partial->key && *partial->p) { write_unlock(&EXT2_I(inode)->i_meta_lock); goto no_top; } for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--) ; /* * OK, we've found the last block that must survive. The rest of our * branch should be detached before unlocking. However, if that rest * of branch is all ours and does not grow immediately from the inode * it's easier to cheat and just decrement partial->p. */ if (p == chain + k - 1 && p > chain) { p->p--; } else { *top = *p->p; *p->p = 0; } write_unlock(&EXT2_I(inode)->i_meta_lock); while(partial > p) { brelse(partial->bh); partial--; }no_top: return partial;}/** * ext2_free_data - free a list of data blocks * @inode: inode we are dealing with * @p: array of block numbers * @q: points immediately past the end of array * * We are freeing all blocks refered from that array (numbers are * stored as little-endian 32-bit) and updating @inode->i_blocks * appropriately. */static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q){ unsigned long block_to_free = 0, count = 0; unsigned long nr; for ( ; p < q ; p++) { nr = le32_to_cpu(*p); if (nr) { *p = 0; /* accumulate blocks to free if they're contiguous */ if (count == 0) goto free_this; else if (block_to_free == nr - count) count++; else { mark_inode_dirty(inode); ext2_free_blocks (inode, block_to_free, count); free_this: block_to_free = nr; count = 1; } } } if (count > 0) { mark_inode_dirty(inode); ext2_free_blocks (inode, block_to_free, count); }}/** * ext2_free_branches - free an array of branches * @inode: inode we are dealing with * @p: array of block numbers * @q: pointer immediately past the end of array * @depth: depth of the branches to free * * We are freeing all blocks refered from these branches (numbers are * stored as little-endian 32-bit) and updating @inode->i_blocks * appropriately. */static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth){ struct buffer_head * bh; unsigned long nr; if (depth--) { int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb); for ( ; p < q ; p++) { nr = le32_to_cpu(*p); if (!nr) continue;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -