⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 inode.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
/* * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README */#include <linux/time.h>#include <linux/fs.h>#include <linux/reiserfs_fs.h>#include <linux/reiserfs_acl.h>#include <linux/reiserfs_xattr.h>#include <linux/exportfs.h>#include <linux/smp_lock.h>#include <linux/pagemap.h>#include <linux/highmem.h>#include <asm/uaccess.h>#include <asm/unaligned.h>#include <linux/buffer_head.h>#include <linux/mpage.h>#include <linux/writeback.h>#include <linux/quotaops.h>#include <linux/swap.h>int reiserfs_commit_write(struct file *f, struct page *page,			  unsigned from, unsigned to);int reiserfs_prepare_write(struct file *f, struct page *page,			   unsigned from, unsigned to);void reiserfs_delete_inode(struct inode *inode){	/* We need blocks for transaction + (user+group) quota update (possibly delete) */	int jbegin_count =	    JOURNAL_PER_BALANCE_CNT * 2 +	    2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb);	struct reiserfs_transaction_handle th;	int err;	truncate_inode_pages(&inode->i_data, 0);	reiserfs_write_lock(inode->i_sb);	/* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */	if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) {	/* also handles bad_inode case */		reiserfs_delete_xattrs(inode);		if (journal_begin(&th, inode->i_sb, jbegin_count))			goto out;		reiserfs_update_inode_transaction(inode);		err = reiserfs_delete_object(&th, inode);		/* Do quota update inside a transaction for journaled quotas. We must do that		 * after delete_object so that quota updates go into the same transaction as		 * stat data deletion */		if (!err) 			DQUOT_FREE_INODE(inode);		if (journal_end(&th, inode->i_sb, jbegin_count))			goto out;		/* check return value from reiserfs_delete_object after		 * ending the transaction		 */		if (err)		    goto out;		/* all items of file are deleted, so we can remove "save" link */		remove_save_link(inode, 0 /* not truncate */ );	/* we can't do anything								 * about an error here */	} else {		/* no object items are in the tree */		;	}      out:	clear_inode(inode);	/* note this must go after the journal_end to prevent deadlock */	inode->i_blocks = 0;	reiserfs_write_unlock(inode->i_sb);}static void _make_cpu_key(struct cpu_key *key, int version, __u32 dirid,			  __u32 objectid, loff_t offset, int type, int length){	key->version = version;	key->on_disk_key.k_dir_id = dirid;	key->on_disk_key.k_objectid = objectid;	set_cpu_key_k_offset(key, offset);	set_cpu_key_k_type(key, type);	key->key_length = length;}/* take base of inode_key (it comes from inode always) (dirid, objectid) and version from an inode, set   offset and type of key */void make_cpu_key(struct cpu_key *key, struct inode *inode, loff_t offset,		  int type, int length){	_make_cpu_key(key, get_inode_item_key_version(inode),		      le32_to_cpu(INODE_PKEY(inode)->k_dir_id),		      le32_to_cpu(INODE_PKEY(inode)->k_objectid), offset, type,		      length);}//// when key is 0, do not set version and short key//inline void make_le_item_head(struct item_head *ih, const struct cpu_key *key,			      int version,			      loff_t offset, int type, int length,			      int entry_count /*or ih_free_space */ ){	if (key) {		ih->ih_key.k_dir_id = cpu_to_le32(key->on_disk_key.k_dir_id);		ih->ih_key.k_objectid =		    cpu_to_le32(key->on_disk_key.k_objectid);	}	put_ih_version(ih, version);	set_le_ih_k_offset(ih, offset);	set_le_ih_k_type(ih, type);	put_ih_item_len(ih, length);	/*    set_ih_free_space (ih, 0); */	// for directory items it is entry count, for directs and stat	// datas - 0xffff, for indirects - 0	put_ih_entry_count(ih, entry_count);}//// FIXME: we might cache recently accessed indirect item// Ugh.  Not too eager for that....//  I cut the code until such time as I see a convincing argument (benchmark).// I don't want a bloated inode struct..., and I don't like code complexity..../* cutting the code is fine, since it really isn't in use yet and is easy** to add back in.  But, Vladimir has a really good idea here.  Think** about what happens for reading a file.  For each page,** The VFS layer calls reiserfs_readpage, who searches the tree to find** an indirect item.  This indirect item has X number of pointers, where** X is a big number if we've done the block allocation right.  But,** we only use one or two of these pointers during each call to readpage,** needlessly researching again later on.**** The size of the cache could be dynamic based on the size of the file.**** I'd also like to see us cache the location the stat data item, since** we are needlessly researching for that frequently.**** --chris*//* If this page has a file tail in it, and** it was read in by get_block_create_0, the page data is valid,** but tail is still sitting in a direct item, and we can't write to** it.  So, look through this page, and check all the mapped buffers** to make sure they have valid block numbers.  Any that don't need** to be unmapped, so that block_prepare_write will correctly call** reiserfs_get_block to convert the tail into an unformatted node*/static inline void fix_tail_page_for_writing(struct page *page){	struct buffer_head *head, *next, *bh;	if (page && page_has_buffers(page)) {		head = page_buffers(page);		bh = head;		do {			next = bh->b_this_page;			if (buffer_mapped(bh) && bh->b_blocknr == 0) {				reiserfs_unmap_buffer(bh);			}			bh = next;		} while (bh != head);	}}/* reiserfs_get_block does not need to allocate a block only if it has been   done already or non-hole position has been found in the indirect item */static inline int allocation_needed(int retval, b_blocknr_t allocated,				    struct item_head *ih,				    __le32 * item, int pos_in_item){	if (allocated)		return 0;	if (retval == POSITION_FOUND && is_indirect_le_ih(ih) &&	    get_block_num(item, pos_in_item))		return 0;	return 1;}static inline int indirect_item_found(int retval, struct item_head *ih){	return (retval == POSITION_FOUND) && is_indirect_le_ih(ih);}static inline void set_block_dev_mapped(struct buffer_head *bh,					b_blocknr_t block, struct inode *inode){	map_bh(bh, inode->i_sb, block);}//// files which were created in the earlier version can not be longer,// than 2 gb//static int file_capable(struct inode *inode, sector_t block){	if (get_inode_item_key_version(inode) != KEY_FORMAT_3_5 ||	// it is new file.	    block < (1 << (31 - inode->i_sb->s_blocksize_bits)))	// old file, but 'block' is inside of 2gb		return 1;	return 0;}static int restart_transaction(struct reiserfs_transaction_handle *th,			       struct inode *inode, struct treepath *path){	struct super_block *s = th->t_super;	int len = th->t_blocks_allocated;	int err;	BUG_ON(!th->t_trans_id);	BUG_ON(!th->t_refcount);	pathrelse(path);	/* we cannot restart while nested */	if (th->t_refcount > 1) {		return 0;	}	reiserfs_update_sd(th, inode);	err = journal_end(th, s, len);	if (!err) {		err = journal_begin(th, s, JOURNAL_PER_BALANCE_CNT * 6);		if (!err)			reiserfs_update_inode_transaction(inode);	}	return err;}// it is called by get_block when create == 0. Returns block number// for 'block'-th logical block of file. When it hits direct item it// returns 0 (being called from bmap) or read direct item into piece// of page (bh_result)// Please improve the english/clarity in the comment above, as it is// hard to understand.static int _get_block_create_0(struct inode *inode, sector_t block,			       struct buffer_head *bh_result, int args){	INITIALIZE_PATH(path);	struct cpu_key key;	struct buffer_head *bh;	struct item_head *ih, tmp_ih;	int fs_gen;	b_blocknr_t blocknr;	char *p = NULL;	int chars;	int ret;	int result;	int done = 0;	unsigned long offset;	// prepare the key to look for the 'block'-th block of file	make_cpu_key(&key, inode,		     (loff_t) block * inode->i_sb->s_blocksize + 1, TYPE_ANY,		     3);      research:	result = search_for_position_by_key(inode->i_sb, &key, &path);	if (result != POSITION_FOUND) {		pathrelse(&path);		if (p)			kunmap(bh_result->b_page);		if (result == IO_ERROR)			return -EIO;		// We do not return -ENOENT if there is a hole but page is uptodate, because it means		// That there is some MMAPED data associated with it that is yet to be written to disk.		if ((args & GET_BLOCK_NO_HOLE)		    && !PageUptodate(bh_result->b_page)) {			return -ENOENT;		}		return 0;	}	//	bh = get_last_bh(&path);	ih = get_ih(&path);	if (is_indirect_le_ih(ih)) {		__le32 *ind_item = (__le32 *) B_I_PITEM(bh, ih);		/* FIXME: here we could cache indirect item or part of it in		   the inode to avoid search_by_key in case of subsequent		   access to file */		blocknr = get_block_num(ind_item, path.pos_in_item);		ret = 0;		if (blocknr) {			map_bh(bh_result, inode->i_sb, blocknr);			if (path.pos_in_item ==			    ((ih_item_len(ih) / UNFM_P_SIZE) - 1)) {				set_buffer_boundary(bh_result);			}		} else			// We do not return -ENOENT if there is a hole but page is uptodate, because it means			// That there is some MMAPED data associated with it that is yet to  be written to disk.		if ((args & GET_BLOCK_NO_HOLE)			    && !PageUptodate(bh_result->b_page)) {			ret = -ENOENT;		}		pathrelse(&path);		if (p)			kunmap(bh_result->b_page);		return ret;	}	// requested data are in direct item(s)	if (!(args & GET_BLOCK_READ_DIRECT)) {		// we are called by bmap. FIXME: we can not map block of file		// when it is stored in direct item(s)		pathrelse(&path);		if (p)			kunmap(bh_result->b_page);		return -ENOENT;	}	/* if we've got a direct item, and the buffer or page was uptodate,	 ** we don't want to pull data off disk again.  skip to the	 ** end, where we map the buffer and return	 */	if (buffer_uptodate(bh_result)) {		goto finished;	} else		/*		 ** grab_tail_page can trigger calls to reiserfs_get_block on up to date		 ** pages without any buffers.  If the page is up to date, we don't want		 ** read old data off disk.  Set the up to date bit on the buffer instead		 ** and jump to the end		 */	if (!bh_result->b_page || PageUptodate(bh_result->b_page)) {		set_buffer_uptodate(bh_result);		goto finished;	}	// read file tail into part of page	offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1);	fs_gen = get_generation(inode->i_sb);	copy_item_head(&tmp_ih, ih);	/* we only want to kmap if we are reading the tail into the page.	 ** this is not the common case, so we don't kmap until we are	 ** sure we need to.  But, this means the item might move if	 ** kmap schedules	 */	if (!p) {		p = (char *)kmap(bh_result->b_page);		if (fs_changed(fs_gen, inode->i_sb)		    && item_moved(&tmp_ih, &path)) {			goto research;		}	}	p += offset;	memset(p, 0, inode->i_sb->s_blocksize);	do {		if (!is_direct_le_ih(ih)) {			BUG();		}		/* make sure we don't read more bytes than actually exist in		 ** the file.  This can happen in odd cases where i_size isn't		 ** correct, and when direct item padding results in a few 		 ** extra bytes at the end of the direct item		 */		if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size)			break;		if ((le_ih_k_offset(ih) - 1 + ih_item_len(ih)) > inode->i_size) {			chars =			    inode->i_size - (le_ih_k_offset(ih) - 1) -			    path.pos_in_item;			done = 1;		} else {			chars = ih_item_len(ih) - path.pos_in_item;		}		memcpy(p, B_I_PITEM(bh, ih) + path.pos_in_item, chars);		if (done)			break;		p += chars;		if (PATH_LAST_POSITION(&path) != (B_NR_ITEMS(bh) - 1))			// we done, if read direct item is not the last item of			// node FIXME: we could try to check right delimiting key			// to see whether direct item continues in the right			// neighbor or rely on i_size			break;		// update key to look for the next piece		set_cpu_key_k_offset(&key, cpu_key_k_offset(&key) + chars);		result = search_for_position_by_key(inode->i_sb, &key, &path);		if (result != POSITION_FOUND)			// i/o error most likely			break;		bh = get_last_bh(&path);		ih = get_ih(&path);	} while (1);	flush_dcache_page(bh_result->b_page);	kunmap(bh_result->b_page);      finished:	pathrelse(&path);	if (result == IO_ERROR)		return -EIO;	/* this buffer has valid data, but isn't valid for io.  mapping it to	 * block #0 tells the rest of reiserfs it just has a tail in it	 */	map_bh(bh_result, inode->i_sb, 0);	set_buffer_uptodate(bh_result);	return 0;}// this is called to create file map. So, _get_block_create_0 will not// read direct itemstatic int reiserfs_bmap(struct inode *inode, sector_t block,			 struct buffer_head *bh_result, int create){	if (!file_capable(inode, block))		return -EFBIG;	reiserfs_write_lock(inode->i_sb);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -