⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 inode.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
									    allocated_block_nr,									    1);					}					goto failure;				}				goto research;			}			retval =			    direct2indirect(th, inode, &path, unbh,					    tail_offset);			if (retval) {				reiserfs_unmap_buffer(unbh);				reiserfs_free_block(th, inode,						    allocated_block_nr, 1);				goto failure;			}			/* it is important the set_buffer_uptodate is done after			 ** the direct2indirect.  The buffer might contain valid			 ** data newer than the data on disk (read by readpage, changed,			 ** and then sent here by writepage).  direct2indirect needs			 ** to know if unbh was already up to date, so it can decide			 ** if the data in unbh needs to be replaced with data from			 ** the disk			 */			set_buffer_uptodate(unbh);			/* unbh->b_page == NULL in case of DIRECT_IO request, this means			   buffer will disappear shortly, so it should not be added to			 */			if (unbh->b_page) {				/* we've converted the tail, so we must				 ** flush unbh before the transaction commits				 */				reiserfs_add_tail_list(inode, unbh);				/* mark it dirty now to prevent commit_write from adding				 ** this buffer to the inode's dirty buffer list				 */				/*				 * AKPM: changed __mark_buffer_dirty to mark_buffer_dirty().				 * It's still atomic, but it sets the page dirty too,				 * which makes it eligible for writeback at any time by the				 * VM (which was also the case with __mark_buffer_dirty())				 */				mark_buffer_dirty(unbh);			}		} else {			/* append indirect item with holes if needed, when appending			   pointer to 'block'-th block use block, which is already			   allocated */			struct cpu_key tmp_key;			unp_t unf_single = 0;	// We use this in case we need to allocate only			// one block which is a fastpath			unp_t *un;			__u64 max_to_insert =			    MAX_ITEM_LEN(inode->i_sb->s_blocksize) /			    UNFM_P_SIZE;			__u64 blocks_needed;			RFALSE(pos_in_item != ih_item_len(ih) / UNFM_P_SIZE,			       "vs-804: invalid position for append");			/* indirect item has to be appended, set up key of that position */			make_cpu_key(&tmp_key, inode,				     le_key_k_offset(version,						     &(ih->ih_key)) +				     op_bytes_number(ih,						     inode->i_sb->s_blocksize),				     //pos_in_item * inode->i_sb->s_blocksize,				     TYPE_INDIRECT, 3);	// key type is unimportant			RFALSE(cpu_key_k_offset(&tmp_key) > cpu_key_k_offset(&key),			       "green-805: invalid offset");			blocks_needed =			    1 +			    ((cpu_key_k_offset(&key) -			      cpu_key_k_offset(&tmp_key)) >> inode->i_sb->			     s_blocksize_bits);			if (blocks_needed == 1) {				un = &unf_single;			} else {				un = kzalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_ATOMIC);	// We need to avoid scheduling.				if (!un) {					un = &unf_single;					blocks_needed = 1;					max_to_insert = 0;				}			}			if (blocks_needed <= max_to_insert) {				/* we are going to add target block to the file. Use allocated				   block for that */				un[blocks_needed - 1] =				    cpu_to_le32(allocated_block_nr);				set_block_dev_mapped(bh_result,						     allocated_block_nr, inode);				set_buffer_new(bh_result);				done = 1;			} else {				/* paste hole to the indirect item */				/* If kmalloc failed, max_to_insert becomes zero and it means we				   only have space for one block */				blocks_needed =				    max_to_insert ? max_to_insert : 1;			}			retval =			    reiserfs_paste_into_item(th, &path, &tmp_key, inode,						     (char *)un,						     UNFM_P_SIZE *						     blocks_needed);			if (blocks_needed != 1)				kfree(un);			if (retval) {				reiserfs_free_block(th, inode,						    allocated_block_nr, 1);				goto failure;			}			if (!done) {				/* We need to mark new file size in case this function will be				   interrupted/aborted later on. And we may do this only for				   holes. */				inode->i_size +=				    inode->i_sb->s_blocksize * blocks_needed;			}		}		if (done == 1)			break;		/* this loop could log more blocks than we had originally asked		 ** for.  So, we have to allow the transaction to end if it is		 ** too big or too full.  Update the inode so things are 		 ** consistent if we crash before the function returns		 **		 ** release the path so that anybody waiting on the path before		 ** ending their transaction will be able to continue.		 */		if (journal_transaction_should_end(th, th->t_blocks_allocated)) {			retval = restart_transaction(th, inode, &path);			if (retval)				goto failure;		}		/* inserting indirect pointers for a hole can take a 		 ** long time.  reschedule if needed		 */		cond_resched();		retval = search_for_position_by_key(inode->i_sb, &key, &path);		if (retval == IO_ERROR) {			retval = -EIO;			goto failure;		}		if (retval == POSITION_FOUND) {			reiserfs_warning(inode->i_sb,					 "vs-825: reiserfs_get_block: "					 "%K should not be found", &key);			retval = -EEXIST;			if (allocated_block_nr)				reiserfs_free_block(th, inode,						    allocated_block_nr, 1);			pathrelse(&path);			goto failure;		}		bh = get_last_bh(&path);		ih = get_ih(&path);		item = get_item(&path);		pos_in_item = path.pos_in_item;	} while (1);	retval = 0;      failure:	if (th && (!dangle || (retval && !th->t_trans_id))) {		int err;		if (th->t_trans_id)			reiserfs_update_sd(th, inode);		err = reiserfs_end_persistent_transaction(th);		if (err)			retval = err;	}	reiserfs_write_unlock(inode->i_sb);	reiserfs_check_path(&path);	return retval;}static intreiserfs_readpages(struct file *file, struct address_space *mapping,		   struct list_head *pages, unsigned nr_pages){	return mpage_readpages(mapping, pages, nr_pages, reiserfs_get_block);}/* Compute real number of used bytes by file * Following three functions can go away when we'll have enough space in stat item */static int real_space_diff(struct inode *inode, int sd_size){	int bytes;	loff_t blocksize = inode->i_sb->s_blocksize;	if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode))		return sd_size;	/* End of file is also in full block with indirect reference, so round	 ** up to the next block.	 **	 ** there is just no way to know if the tail is actually packed	 ** on the file, so we have to assume it isn't.  When we pack the	 ** tail, we add 4 bytes to pretend there really is an unformatted	 ** node pointer	 */	bytes =	    ((inode->i_size +	      (blocksize - 1)) >> inode->i_sb->s_blocksize_bits) * UNFM_P_SIZE +	    sd_size;	return bytes;}static inline loff_t to_real_used_space(struct inode *inode, ulong blocks,					int sd_size){	if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode)) {		return inode->i_size +		    (loff_t) (real_space_diff(inode, sd_size));	}	return ((loff_t) real_space_diff(inode, sd_size)) +	    (((loff_t) blocks) << 9);}/* Compute number of blocks used by file in ReiserFS counting */static inline ulong to_fake_used_blocks(struct inode *inode, int sd_size){	loff_t bytes = inode_get_bytes(inode);	loff_t real_space = real_space_diff(inode, sd_size);	/* keeps fsck and non-quota versions of reiserfs happy */	if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode)) {		bytes += (loff_t) 511;	}	/* files from before the quota patch might i_blocks such that	 ** bytes < real_space.  Deal with that here to prevent it from	 ** going negative.	 */	if (bytes < real_space)		return 0;	return (bytes - real_space) >> 9;}//// BAD: new directories have stat data of new type and all other items// of old type. Version stored in the inode says about body items, so// in update_stat_data we can not rely on inode, but have to check// item version directly//// called by read_locked_inodestatic void init_inode(struct inode *inode, struct treepath *path){	struct buffer_head *bh;	struct item_head *ih;	__u32 rdev;	//int version = ITEM_VERSION_1;	bh = PATH_PLAST_BUFFER(path);	ih = PATH_PITEM_HEAD(path);	copy_key(INODE_PKEY(inode), &(ih->ih_key));	INIT_LIST_HEAD(&(REISERFS_I(inode)->i_prealloc_list));	REISERFS_I(inode)->i_flags = 0;	REISERFS_I(inode)->i_prealloc_block = 0;	REISERFS_I(inode)->i_prealloc_count = 0;	REISERFS_I(inode)->i_trans_id = 0;	REISERFS_I(inode)->i_jl = NULL;	mutex_init(&(REISERFS_I(inode)->i_mmap));	reiserfs_init_acl_access(inode);	reiserfs_init_acl_default(inode);	reiserfs_init_xattr_rwsem(inode);	if (stat_data_v1(ih)) {		struct stat_data_v1 *sd =		    (struct stat_data_v1 *)B_I_PITEM(bh, ih);		unsigned long blocks;		set_inode_item_key_version(inode, KEY_FORMAT_3_5);		set_inode_sd_version(inode, STAT_DATA_V1);		inode->i_mode = sd_v1_mode(sd);		inode->i_nlink = sd_v1_nlink(sd);		inode->i_uid = sd_v1_uid(sd);		inode->i_gid = sd_v1_gid(sd);		inode->i_size = sd_v1_size(sd);		inode->i_atime.tv_sec = sd_v1_atime(sd);		inode->i_mtime.tv_sec = sd_v1_mtime(sd);		inode->i_ctime.tv_sec = sd_v1_ctime(sd);		inode->i_atime.tv_nsec = 0;		inode->i_ctime.tv_nsec = 0;		inode->i_mtime.tv_nsec = 0;		inode->i_blocks = sd_v1_blocks(sd);		inode->i_generation = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);		blocks = (inode->i_size + 511) >> 9;		blocks = _ROUND_UP(blocks, inode->i_sb->s_blocksize >> 9);		if (inode->i_blocks > blocks) {			// there was a bug in <=3.5.23 when i_blocks could take negative			// values. Starting from 3.5.17 this value could even be stored in			// stat data. For such files we set i_blocks based on file			// size. Just 2 notes: this can be wrong for sparce files. On-disk value will be			// only updated if file's inode will ever change			inode->i_blocks = blocks;		}		rdev = sd_v1_rdev(sd);		REISERFS_I(inode)->i_first_direct_byte =		    sd_v1_first_direct_byte(sd);		/* an early bug in the quota code can give us an odd number for the		 ** block count.  This is incorrect, fix it here.		 */		if (inode->i_blocks & 1) {			inode->i_blocks++;		}		inode_set_bytes(inode,				to_real_used_space(inode, inode->i_blocks,						   SD_V1_SIZE));		/* nopack is initially zero for v1 objects. For v2 objects,		   nopack is initialised from sd_attrs */		REISERFS_I(inode)->i_flags &= ~i_nopack_mask;	} else {		// new stat data found, but object may have old items		// (directories and symlinks)		struct stat_data *sd = (struct stat_data *)B_I_PITEM(bh, ih);		inode->i_mode = sd_v2_mode(sd);		inode->i_nlink = sd_v2_nlink(sd);		inode->i_uid = sd_v2_uid(sd);		inode->i_size = sd_v2_size(sd);		inode->i_gid = sd_v2_gid(sd);		inode->i_mtime.tv_sec = sd_v2_mtime(sd);		inode->i_atime.tv_sec = sd_v2_atime(sd);		inode->i_ctime.tv_sec = sd_v2_ctime(sd);		inode->i_ctime.tv_nsec = 0;		inode->i_mtime.tv_nsec = 0;		inode->i_atime.tv_nsec = 0;		inode->i_blocks = sd_v2_blocks(sd);		rdev = sd_v2_rdev(sd);		if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))			inode->i_generation =			    le32_to_cpu(INODE_PKEY(inode)->k_dir_id);		else			inode->i_generation = sd_v2_generation(sd);		if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))			set_inode_item_key_version(inode, KEY_FORMAT_3_5);		else			set_inode_item_key_version(inode, KEY_FORMAT_3_6);		REISERFS_I(inode)->i_first_direct_byte = 0;		set_inode_sd_version(inode, STAT_DATA_V2);		inode_set_bytes(inode,				to_real_used_space(inode, inode->i_blocks,						   SD_V2_SIZE));		/* read persistent inode attributes from sd and initalise		   generic inode flags from them */		REISERFS_I(inode)->i_attrs = sd_v2_attrs(sd);		sd_attrs_to_i_attrs(sd_v2_attrs(sd), inode);	}	pathrelse(path);	if (S_ISREG(inode->i_mode)) {		inode->i_op = &reiserfs_file_inode_operations;		inode->i_fop = &reiserfs_file_operations;		inode->i_mapping->a_ops = &reiserfs_address_space_operations;	} else if (S_ISDIR(inode->i_mode)) {		inode->i_op = &reiserfs_dir_inode_operations;		inode->i_fop = &reiserfs_dir_operations;	} else if (S_ISLNK(inode->i_mode)) {		inode->i_op = &reiserfs_symlink_inode_operations;		inode->i_mapping->a_ops = &reiserfs_address_space_operations;	} else {		inode->i_blocks = 0;		inode->i_op = &reiserfs_special_inode_operations;		init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));	}}// update new stat data with inode fieldsstatic void inode2sd(void *sd, struct inode *inode, loff_t size){	struct stat_data *sd_v2 = (struct stat_data *)sd;	__u16 flags;	set_sd_v2_mode(sd_v2, inode->i_mode);	set_sd_v2_nlink(sd_v2, inode->i_nlink);	set_sd_v2_uid(sd_v2, inode->i_uid);	set_sd_v2_size(sd_v2, size);	set_sd_v2_gid(sd_v2, inode->i_gid);	set_sd_v2_mtime(sd_v2, inode->i_mtime.tv_sec);	set_sd_v2_atime(sd_v2, inode->i_atime.tv_sec);	set_sd_v2_ctime(sd_v2, inode->i_ctime.tv_sec);	set_sd_v2_blocks(sd_v2, to_fake_used_blocks(inode, SD_V2_SIZE));	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))		set_sd_v2_rdev(sd_v2, new_encode_dev(inode->i_rdev));	else		set_sd_v2_generation(sd_v2, inode->i_generation);	flags = REISERFS_I(inode)->i_attrs;	i_attrs_to_sd_attrs(inode, &flags);	set_sd_v2_attrs(sd_v2, flags);}// used to copy inode's fields to old stat datastatic void inode2sd_v1(void *sd, struct inode *inode, loff_t size){	struct stat_data_v1 *sd_v1 = (struct stat_data_v1 *)sd;	set_sd_v1_mode(sd_v1, inode->i_mode);	set_sd_v1_uid(sd_v1, inode->i_uid);	set_sd_v1_gid(sd_v1, inode->i_gid);	set_sd_v1_nlink(sd_v1, inode->i_nlink);	set_sd_v1_size(sd_v1, size);	set_sd_v1_atime(sd_v1, inode->i_atime.tv_sec);	set_sd_v1_ctime(sd_v1, inode->i_ctime.tv_sec);	set_sd_v1_mtime(sd_v1, inode->i_mtime.tv_sec);	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))		set_sd_v1_rdev(sd_v1, new_encode_dev(inode->i_rdev));

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -