📄 inode.c
字号:
/* do not read the direct item */ _get_block_create_0(inode, block, bh_result, 0); reiserfs_write_unlock(inode->i_sb); return 0;}/* special version of get_block that is only used by grab_tail_page right** now. It is sent to block_prepare_write, and when you try to get a** block past the end of the file (or a block from a hole) it returns** -ENOENT instead of a valid buffer. block_prepare_write expects to** be able to do i/o on the buffers returned, unless an error value** is also returned.** ** So, this allows block_prepare_write to be used for reading a single block** in a page. Where it does not produce a valid page for holes, or past the** end of the file. This turns out to be exactly what we need for reading** tails for conversion.**** The point of the wrapper is forcing a certain value for create, even** though the VFS layer is calling this function with create==1. If you ** don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block, ** don't use this function.*/static int reiserfs_get_block_create_0(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create){ return reiserfs_get_block(inode, block, bh_result, GET_BLOCK_NO_HOLE);}/* This is special helper for reiserfs_get_block in case we are executing direct_IO request. */static int reiserfs_get_blocks_direct_io(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create){ int ret; bh_result->b_page = NULL; /* We set the b_size before reiserfs_get_block call since it is referenced in convert_tail_for_hole() that may be called from reiserfs_get_block() */ bh_result->b_size = (1 << inode->i_blkbits); ret = reiserfs_get_block(inode, iblock, bh_result, create | GET_BLOCK_NO_DANGLE); if (ret) goto out; /* don't allow direct io onto tail pages */ if (buffer_mapped(bh_result) && bh_result->b_blocknr == 0) { /* make sure future calls to the direct io funcs for this offset ** in the file fail by unmapping the buffer */ clear_buffer_mapped(bh_result); ret = -EINVAL; } /* Possible unpacked tail. Flush the data before pages have disappeared */ if (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) { int err; lock_kernel(); err = reiserfs_commit_for_inode(inode); REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask; unlock_kernel(); if (err < 0) ret = err; } out: return ret;}/*** helper function for when reiserfs_get_block is called for a hole** but the file tail is still in a direct item** bh_result is the buffer head for the hole** tail_offset is the offset of the start of the tail in the file**** This calls prepare_write, which will start a new transaction** you should not be in a transaction, or have any paths held when you** call this.*/static int convert_tail_for_hole(struct inode *inode, struct buffer_head *bh_result, loff_t tail_offset){ unsigned long index; unsigned long tail_end; unsigned long tail_start; struct page *tail_page; struct page *hole_page = bh_result->b_page; int retval = 0; if ((tail_offset & (bh_result->b_size - 1)) != 1) return -EIO; /* always try to read until the end of the block */ tail_start = tail_offset & (PAGE_CACHE_SIZE - 1); tail_end = (tail_start | (bh_result->b_size - 1)) + 1; index = tail_offset >> PAGE_CACHE_SHIFT; /* hole_page can be zero in case of direct_io, we are sure that we cannot get here if we write with O_DIRECT into tail page */ if (!hole_page || index != hole_page->index) { tail_page = grab_cache_page(inode->i_mapping, index); retval = -ENOMEM; if (!tail_page) { goto out; } } else { tail_page = hole_page; } /* we don't have to make sure the conversion did not happen while ** we were locking the page because anyone that could convert ** must first take i_mutex. ** ** We must fix the tail page for writing because it might have buffers ** that are mapped, but have a block number of 0. This indicates tail ** data that has been read directly into the page, and block_prepare_write ** won't trigger a get_block in this case. */ fix_tail_page_for_writing(tail_page); retval = reiserfs_prepare_write(NULL, tail_page, tail_start, tail_end); if (retval) goto unlock; /* tail conversion might change the data in the page */ flush_dcache_page(tail_page); retval = reiserfs_commit_write(NULL, tail_page, tail_start, tail_end); unlock: if (tail_page != hole_page) { unlock_page(tail_page); page_cache_release(tail_page); } out: return retval;}static inline int _allocate_block(struct reiserfs_transaction_handle *th, sector_t block, struct inode *inode, b_blocknr_t * allocated_block_nr, struct treepath *path, int flags){ BUG_ON(!th->t_trans_id);#ifdef REISERFS_PREALLOCATE if (!(flags & GET_BLOCK_NO_IMUX)) { return reiserfs_new_unf_blocknrs2(th, inode, allocated_block_nr, path, block); }#endif return reiserfs_new_unf_blocknrs(th, inode, allocated_block_nr, path, block);}int reiserfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create){ int repeat, retval = 0; b_blocknr_t allocated_block_nr = 0; // b_blocknr_t is (unsigned) 32 bit int INITIALIZE_PATH(path); int pos_in_item; struct cpu_key key; struct buffer_head *bh, *unbh = NULL; struct item_head *ih, tmp_ih; __le32 *item; int done; int fs_gen; struct reiserfs_transaction_handle *th = NULL; /* space reserved in transaction batch: . 3 balancings in direct->indirect conversion . 1 block involved into reiserfs_update_sd() XXX in practically impossible worst case direct2indirect() can incur (much) more than 3 balancings. quota update for user, group */ int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3 + 1 + 2 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb); int version; int dangle = 1; loff_t new_offset = (((loff_t) block) << inode->i_sb->s_blocksize_bits) + 1; /* bad.... */ reiserfs_write_lock(inode->i_sb); version = get_inode_item_key_version(inode); if (!file_capable(inode, block)) { reiserfs_write_unlock(inode->i_sb); return -EFBIG; } /* if !create, we aren't changing the FS, so we don't need to ** log anything, so we don't need to start a transaction */ if (!(create & GET_BLOCK_CREATE)) { int ret; /* find number of block-th logical block of the file */ ret = _get_block_create_0(inode, block, bh_result, create | GET_BLOCK_READ_DIRECT); reiserfs_write_unlock(inode->i_sb); return ret; } /* * if we're already in a transaction, make sure to close * any new transactions we start in this func */ if ((create & GET_BLOCK_NO_DANGLE) || reiserfs_transaction_running(inode->i_sb)) dangle = 0; /* If file is of such a size, that it might have a tail and tails are enabled ** we should mark it as possibly needing tail packing on close */ if ((have_large_tails(inode->i_sb) && inode->i_size < i_block_size(inode) * 4) || (have_small_tails(inode->i_sb) && inode->i_size < i_block_size(inode))) REISERFS_I(inode)->i_flags |= i_pack_on_close_mask; /* set the key of the first byte in the 'block'-th block of file */ make_cpu_key(&key, inode, new_offset, TYPE_ANY, 3 /*key length */ ); if ((new_offset + inode->i_sb->s_blocksize - 1) > inode->i_size) { start_trans: th = reiserfs_persistent_transaction(inode->i_sb, jbegin_count); if (!th) { retval = -ENOMEM; goto failure; } reiserfs_update_inode_transaction(inode); } research: retval = search_for_position_by_key(inode->i_sb, &key, &path); if (retval == IO_ERROR) { retval = -EIO; goto failure; } bh = get_last_bh(&path); ih = get_ih(&path); item = get_item(&path); pos_in_item = path.pos_in_item; fs_gen = get_generation(inode->i_sb); copy_item_head(&tmp_ih, ih); if (allocation_needed (retval, allocated_block_nr, ih, item, pos_in_item)) { /* we have to allocate block for the unformatted node */ if (!th) { pathrelse(&path); goto start_trans; } repeat = _allocate_block(th, block, inode, &allocated_block_nr, &path, create); if (repeat == NO_DISK_SPACE || repeat == QUOTA_EXCEEDED) { /* restart the transaction to give the journal a chance to free ** some blocks. releases the path, so we have to go back to ** research if we succeed on the second try */ SB_JOURNAL(inode->i_sb)->j_next_async_flush = 1; retval = restart_transaction(th, inode, &path); if (retval) goto failure; repeat = _allocate_block(th, block, inode, &allocated_block_nr, NULL, create); if (repeat != NO_DISK_SPACE && repeat != QUOTA_EXCEEDED) { goto research; } if (repeat == QUOTA_EXCEEDED) retval = -EDQUOT; else retval = -ENOSPC; goto failure; } if (fs_changed(fs_gen, inode->i_sb) && item_moved(&tmp_ih, &path)) { goto research; } } if (indirect_item_found(retval, ih)) { b_blocknr_t unfm_ptr; /* 'block'-th block is in the file already (there is corresponding cell in some indirect item). But it may be zero unformatted node pointer (hole) */ unfm_ptr = get_block_num(item, pos_in_item); if (unfm_ptr == 0) { /* use allocated block to plug the hole */ reiserfs_prepare_for_journal(inode->i_sb, bh, 1); if (fs_changed(fs_gen, inode->i_sb) && item_moved(&tmp_ih, &path)) { reiserfs_restore_prepared_buffer(inode->i_sb, bh); goto research; } set_buffer_new(bh_result); if (buffer_dirty(bh_result) && reiserfs_data_ordered(inode->i_sb)) reiserfs_add_ordered_list(inode, bh_result); put_block_num(item, pos_in_item, allocated_block_nr); unfm_ptr = allocated_block_nr; journal_mark_dirty(th, inode->i_sb, bh); reiserfs_update_sd(th, inode); } set_block_dev_mapped(bh_result, unfm_ptr, inode); pathrelse(&path); retval = 0; if (!dangle && th) retval = reiserfs_end_persistent_transaction(th); reiserfs_write_unlock(inode->i_sb); /* the item was found, so new blocks were not added to the file ** there is no need to make sure the inode is updated with this ** transaction */ return retval; } if (!th) { pathrelse(&path); goto start_trans; } /* desired position is not found or is in the direct item. We have to append file with holes up to 'block'-th block converting direct items to indirect one if necessary */ done = 0; do { if (is_statdata_le_ih(ih)) { __le32 unp = 0; struct cpu_key tmp_key; /* indirect item has to be inserted */ make_le_item_head(&tmp_ih, &key, version, 1, TYPE_INDIRECT, UNFM_P_SIZE, 0 /* free_space */ ); if (cpu_key_k_offset(&key) == 1) { /* we are going to add 'block'-th block to the file. Use allocated block for that */ unp = cpu_to_le32(allocated_block_nr); set_block_dev_mapped(bh_result, allocated_block_nr, inode); set_buffer_new(bh_result); done = 1; } tmp_key = key; // ;) set_cpu_key_k_offset(&tmp_key, 1); PATH_LAST_POSITION(&path)++; retval = reiserfs_insert_item(th, &path, &tmp_key, &tmp_ih, inode, (char *)&unp); if (retval) { reiserfs_free_block(th, inode, allocated_block_nr, 1); goto failure; // retval == -ENOSPC, -EDQUOT or -EIO or -EEXIST } //mark_tail_converted (inode); } else if (is_direct_le_ih(ih)) { /* direct item has to be converted */ loff_t tail_offset; tail_offset = ((le_ih_k_offset(ih) - 1) & ~(inode->i_sb->s_blocksize - 1)) + 1; if (tail_offset == cpu_key_k_offset(&key)) { /* direct item we just found fits into block we have to map. Convert it into unformatted node: use bh_result for the conversion */ set_block_dev_mapped(bh_result, allocated_block_nr, inode); unbh = bh_result; done = 1; } else { /* we have to padd file tail stored in direct item(s) up to block size and convert it to unformatted node. FIXME: this should also get into page cache */ pathrelse(&path); /* * ugly, but we can only end the transaction if * we aren't nested */ BUG_ON(!th->t_refcount); if (th->t_refcount == 1) { retval = reiserfs_end_persistent_transaction (th); th = NULL; if (retval) goto failure; } retval = convert_tail_for_hole(inode, bh_result, tail_offset); if (retval) { if (retval != -ENOSPC) reiserfs_warning(inode->i_sb, "clm-6004: convert tail failed inode %lu, error %d", inode->i_ino, retval); if (allocated_block_nr) { /* the bitmap, the super, and the stat data == 3 */ if (!th) th = reiserfs_persistent_transaction(inode->i_sb, 3); if (th) reiserfs_free_block(th, inode,
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -