file.c
来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 1,331 行 · 第 1/4 页
C
1,331 行
/* * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README */#include <linux/time.h>#include <linux/reiserfs_fs.h>#include <linux/reiserfs_acl.h>#include <linux/reiserfs_xattr.h>#include <linux/smp_lock.h>#include <asm/uaccess.h>#include <linux/pagemap.h>#include <linux/swap.h>#include <linux/writeback.h>#include <linux/blkdev.h>#include <linux/buffer_head.h>#include <linux/quotaops.h>/*** We pack the tails of files on file close, not at the time they are written.** This implies an unnecessary copy of the tail and an unnecessary indirect item** insertion/balancing, for files that are written in one write.** It avoids unnecessary tail packings (balances) for files that are written in** multiple writes and are small enough to have tails.** ** file_release is called by the VFS layer when the file is closed. If** this is the last open file descriptor, and the file** small enough to have a tail, and the tail is currently in an** unformatted node, the tail is converted back into a direct item.** ** We use reiserfs_truncate_file to pack the tail, since it already has** all the conditions coded. */static int reiserfs_file_release (struct inode * inode, struct file * filp){ struct reiserfs_transaction_handle th ; if (!S_ISREG (inode->i_mode)) BUG (); /* fast out for when nothing needs to be done */ if ((atomic_read(&inode->i_count) > 1 || !(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) || !tail_has_to_be_packed(inode)) && REISERFS_I(inode)->i_prealloc_count <= 0) { return 0; } reiserfs_write_lock(inode->i_sb); down (&inode->i_sem); journal_begin(&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 3) ; reiserfs_update_inode_transaction(inode) ;#ifdef REISERFS_PREALLOCATE reiserfs_discard_prealloc (&th, inode);#endif journal_end(&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 3) ; if (atomic_read(&inode->i_count) <= 1 && (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) && tail_has_to_be_packed (inode)) { /* if regular file is released by last holder and it has been appended (we append by unformatted node only) or its direct item(s) had to be converted, then it may have to be indirect2direct converted */ reiserfs_truncate_file(inode, 0) ; } up (&inode->i_sem); reiserfs_write_unlock(inode->i_sb); return 0;}static void reiserfs_vfs_truncate_file(struct inode *inode) { reiserfs_truncate_file(inode, 1) ;}/* Sync a reiserfs file. *//* * FIXME: sync_mapping_buffers() never has anything to sync. Can * be removed... */static int reiserfs_sync_file( struct file * p_s_filp, struct dentry * p_s_dentry, int datasync ) { struct inode * p_s_inode = p_s_dentry->d_inode; int n_err; int barrier_done; if (!S_ISREG(p_s_inode->i_mode)) BUG (); n_err = sync_mapping_buffers(p_s_inode->i_mapping) ; reiserfs_write_lock(p_s_inode->i_sb); barrier_done = reiserfs_commit_for_inode(p_s_inode); reiserfs_write_unlock(p_s_inode->i_sb); if (barrier_done != 1) blkdev_issue_flush(p_s_inode->i_sb->s_bdev, NULL); return ( n_err < 0 ) ? -EIO : 0;}/* I really do not want to play with memory shortage right now, so to simplify the code, we are not going to write more than this much pages at a time. This still should considerably improve performance compared to 4k at a time case. This is 32 pages of 4k size. */#define REISERFS_WRITE_PAGES_AT_A_TIME (128 * 1024) / PAGE_CACHE_SIZE/* Allocates blocks for a file to fulfil write request. Maps all unmapped but prepared pages from the list. Updates metadata with newly allocated blocknumbers as needed */int reiserfs_allocate_blocks_for_region( struct reiserfs_transaction_handle *th, struct inode *inode, /* Inode we work with */ loff_t pos, /* Writing position */ int num_pages, /* number of pages write going to touch */ int write_bytes, /* amount of bytes to write */ struct page **prepared_pages, /* array of prepared pages */ int blocks_to_allocate /* Amount of blocks we need to allocate to fit the data into file */ ){ struct cpu_key key; // cpu key of item that we are going to deal with struct item_head *ih; // pointer to item head that we are going to deal with struct buffer_head *bh; // Buffer head that contains items that we are going to deal with __u32 * item; // pointer to item we are going to deal with INITIALIZE_PATH(path); // path to item, that we are going to deal with. b_blocknr_t *allocated_blocks; // Pointer to a place where allocated blocknumbers would be stored. reiserfs_blocknr_hint_t hint; // hint structure for block allocator. size_t res; // return value of various functions that we call. int curr_block; // current block used to keep track of unmapped blocks. int i; // loop counter int itempos; // position in item unsigned int from = (pos & (PAGE_CACHE_SIZE - 1)); // writing position in // first page unsigned int to = ((pos + write_bytes - 1) & (PAGE_CACHE_SIZE - 1)) + 1; /* last modified byte offset in last page */ __u64 hole_size ; // amount of blocks for a file hole, if it needed to be created. int modifying_this_item = 0; // Flag for items traversal code to keep track // of the fact that we already prepared // current block for journal int will_prealloc = 0; RFALSE(!blocks_to_allocate, "green-9004: tried to allocate zero blocks?"); /* only preallocate if this is a small write */ if (REISERFS_I(inode)->i_prealloc_count || (!(write_bytes & (inode->i_sb->s_blocksize -1)) && blocks_to_allocate < REISERFS_SB(inode->i_sb)->s_alloc_options.preallocsize)) will_prealloc = REISERFS_SB(inode->i_sb)->s_alloc_options.preallocsize; allocated_blocks = kmalloc((blocks_to_allocate + will_prealloc) * sizeof(b_blocknr_t), GFP_NOFS); /* First we compose a key to point at the writing position, we want to do that outside of any locking region. */ make_cpu_key (&key, inode, pos+1, TYPE_ANY, 3/*key length*/); /* If we came here, it means we absolutely need to open a transaction, since we need to allocate some blocks */ reiserfs_write_lock(inode->i_sb); // Journaling stuff and we need that. journal_begin(th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 3 + 1); // Wish I know if this number enough reiserfs_update_inode_transaction(inode) ; /* Look for the in-tree position of our write, need path for block allocator */ res = search_for_position_by_key(inode->i_sb, &key, &path); if ( res == IO_ERROR ) { res = -EIO; goto error_exit; } /* Allocate blocks */ /* First fill in "hint" structure for block allocator */ hint.th = th; // transaction handle. hint.path = &path; // Path, so that block allocator can determine packing locality or whatever it needs to determine. hint.inode = inode; // Inode is needed by block allocator too. hint.search_start = 0; // We have no hint on where to search free blocks for block allocator. hint.key = key.on_disk_key; // on disk key of file. hint.block = inode->i_blocks>>(inode->i_sb->s_blocksize_bits-9); // Number of disk blocks this file occupies already. hint.formatted_node = 0; // We are allocating blocks for unformatted node. hint.preallocate = will_prealloc; /* Call block allocator to allocate blocks */ res = reiserfs_allocate_blocknrs(&hint, allocated_blocks, blocks_to_allocate, blocks_to_allocate); if ( res != CARRY_ON ) { if ( res == NO_DISK_SPACE ) { /* We flush the transaction in case of no space. This way some blocks might become free */ SB_JOURNAL(inode->i_sb)->j_must_wait = 1; restart_transaction(th, inode, &path); /* We might have scheduled, so search again */ res = search_for_position_by_key(inode->i_sb, &key, &path); if ( res == IO_ERROR ) { res = -EIO; goto error_exit; } /* update changed info for hint structure. */ res = reiserfs_allocate_blocknrs(&hint, allocated_blocks, blocks_to_allocate, blocks_to_allocate); if ( res != CARRY_ON ) { res = -ENOSPC; pathrelse(&path); goto error_exit; } } else { res = -ENOSPC; pathrelse(&path); goto error_exit; } }#ifdef __BIG_ENDIAN // Too bad, I have not found any way to convert a given region from // cpu format to little endian format { int i; for ( i = 0; i < blocks_to_allocate ; i++) allocated_blocks[i]=cpu_to_le32(allocated_blocks[i]); }#endif /* Blocks allocating well might have scheduled and tree might have changed, let's search the tree again */ /* find where in the tree our write should go */ res = search_for_position_by_key(inode->i_sb, &key, &path); if ( res == IO_ERROR ) { res = -EIO; goto error_exit_free_blocks; } bh = get_last_bh( &path ); // Get a bufferhead for last element in path. ih = get_ih( &path ); // Get a pointer to last item head in path. item = get_item( &path ); // Get a pointer to last item in path /* Let's see what we have found */ if ( res != POSITION_FOUND ) { /* position not found, this means that we might need to append file with holes first */ // Since we are writing past the file's end, we need to find out if // there is a hole that needs to be inserted before our writing // position, and how many blocks it is going to cover (we need to // populate pointers to file blocks representing the hole with zeros) { int item_offset = 1; /* * if ih is stat data, its offset is 0 and we don't want to * add 1 to pos in the hole_size calculation */ if (is_statdata_le_ih(ih)) item_offset = 0; hole_size = (pos + item_offset - (le_key_k_offset( get_inode_item_key_version(inode), &(ih->ih_key)) + op_bytes_number(ih, inode->i_sb->s_blocksize))) >> inode->i_sb->s_blocksize_bits; } if ( hole_size > 0 ) { int to_paste = min_t(__u64, hole_size, MAX_ITEM_LEN(inode->i_sb->s_blocksize)/UNFM_P_SIZE ); // How much data to insert first time. /* area filled with zeroes, to supply as list of zero blocknumbers We allocate it outside of loop just in case loop would spin for several iterations. */ char *zeros = kmalloc(to_paste*UNFM_P_SIZE, GFP_ATOMIC); // We cannot insert more than MAX_ITEM_LEN bytes anyway. if ( !zeros ) { res = -ENOMEM; goto error_exit_free_blocks; } memset ( zeros, 0, to_paste*UNFM_P_SIZE); do { to_paste = min_t(__u64, hole_size, MAX_ITEM_LEN(inode->i_sb->s_blocksize)/UNFM_P_SIZE ); if ( is_indirect_le_ih(ih) ) { /* Ok, there is existing indirect item already. Need to append it */ /* Calculate position past inserted item */ make_cpu_key( &key, inode, le_key_k_offset( get_inode_item_key_version(inode), &(ih->ih_key)) + op_bytes_number(ih, inode->i_sb->s_blocksize), TYPE_INDIRECT, 3); res = reiserfs_paste_into_item( th, &path, &key, inode, (char *)zeros, UNFM_P_SIZE*to_paste); if ( res ) { kfree(zeros); goto error_exit_free_blocks; } } else if ( is_statdata_le_ih(ih) ) { /* No existing item, create it */ /* item head for new item */ struct item_head ins_ih; /* create a key for our new item */ make_cpu_key( &key, inode, 1, TYPE_INDIRECT, 3); /* Create new item head for our new item */ make_le_item_head (&ins_ih, &key, key.version, 1, TYPE_INDIRECT, to_paste*UNFM_P_SIZE, 0 /* free space */); /* Find where such item should live in the tree */ res = search_item (inode->i_sb, &key, &path); if ( res != ITEM_NOT_FOUND ) { /* item should not exist, otherwise we have error */ if ( res != -ENOSPC ) { reiserfs_warning (inode->i_sb, "green-9008: search_by_key (%K) returned %d", &key, res); } res = -EIO; kfree(zeros); goto error_exit_free_blocks; } res = reiserfs_insert_item( th, &path, &key, &ins_ih, inode, (char *)zeros); } else { reiserfs_panic(inode->i_sb, "green-9011: Unexpected key type %K\n", &key); } if ( res ) { kfree(zeros); goto error_exit_free_blocks; } /* Now we want to check if transaction is too full, and if it is we restart it. This will also free the path. */ if (journal_transaction_should_end(th, th->t_blocks_allocated)) restart_transaction(th, inode, &path); /* Well, need to recalculate path and stuff */ set_cpu_key_k_offset( &key, cpu_key_k_offset(&key) + (to_paste << inode->i_blkbits)); res = search_for_position_by_key(inode->i_sb, &key, &path); if ( res == IO_ERROR ) { res = -EIO; kfree(zeros);
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?