⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 inode.c

📁 嵌入式系统设计与实例开发实验教材二源码 多线程应用程序设计 串行端口程序设计 AD接口实验 CAN总线通信实验 GPS通信实验 Linux内核移植与编译实验 IC卡读写实验 SD驱动使
💻 C
📖 第 1 页 / 共 5 页
字号:
	/* bget() all the buffers */	if (order_data) {		if (!page->buffers)			create_empty_buffers(page,				inode->i_dev, inode->i_sb->s_blocksize);		page_buffers = page->buffers;		walk_page_buffers(handle, page_buffers, 0,				PAGE_CACHE_SIZE, NULL, bget_one);	}	ret = block_write_full_page(page, ext3_get_block);	/*	 * The page can become unlocked at any point now, and	 * truncate can then come in and change things.  So we	 * can't touch *page from now on.  But *page_buffers is	 * safe due to elevated refcount.	 */	handle = ext3_journal_current_handle();	lock_kernel();	/* And attach them to the current transaction */	if (order_data) {		err = walk_page_buffers(handle, page_buffers,			0, PAGE_CACHE_SIZE, NULL, journal_dirty_async_data);		if (!ret)			ret = err;	}	err = ext3_journal_stop(handle, inode);	if (!ret)		ret = err;	unlock_kernel();	return ret;out_fail:		unlock_kernel();	SetPageDirty(page);	UnlockPage(page);	return ret;}static int ext3_readpage(struct file *file, struct page *page){	return block_read_full_page(page,ext3_get_block);}static int ext3_flushpage(struct page *page, unsigned long offset){	journal_t *journal = EXT3_JOURNAL(page->mapping->host);	return journal_flushpage(journal, page, offset);}static int ext3_releasepage(struct page *page, int wait){	journal_t *journal = EXT3_JOURNAL(page->mapping->host);	return journal_try_to_free_buffers(journal, page, wait);}struct address_space_operations ext3_aops = {	readpage:	ext3_readpage,		/* BKL not held.  Don't need */	writepage:	ext3_writepage,		/* BKL not held.  We take it */	sync_page:	block_sync_page,	prepare_write:	ext3_prepare_write,	/* BKL not held.  We take it */	commit_write:	ext3_commit_write,	/* BKL not held.  We take it */	bmap:		ext3_bmap,		/* BKL held */	flushpage:	ext3_flushpage,		/* BKL not held.  Don't need */	releasepage:	ext3_releasepage,	/* BKL not held.  Don't need */};/* * ext3_block_truncate_page() zeroes out a mapping from file offset `from' * up to the end of the block which corresponds to `from'. * This required during truncate. We need to physically zero the tail end * of that block so it doesn't yield old data if the file is later grown. */static int ext3_block_truncate_page(handle_t *handle,		struct address_space *mapping, loff_t from){	unsigned long index = from >> PAGE_CACHE_SHIFT;	unsigned offset = from & (PAGE_CACHE_SIZE-1);	unsigned blocksize, iblock, length, pos;	struct inode *inode = mapping->host;	struct page *page;	struct buffer_head *bh;	int err;	blocksize = inode->i_sb->s_blocksize;	length = offset & (blocksize - 1);	/* Block boundary? Nothing to do */	if (!length)		return 0;	length = blocksize - length;	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);	page = grab_cache_page(mapping, index);	err = -ENOMEM;	if (!page)		goto out;	if (!page->buffers)		create_empty_buffers(page, inode->i_dev, blocksize);	/* Find the buffer that contains "offset" */	bh = page->buffers;	pos = blocksize;	while (offset >= pos) {		bh = bh->b_this_page;		iblock++;		pos += blocksize;	}	err = 0;	if (!buffer_mapped(bh)) {		/* Hole? Nothing to do */		if (buffer_uptodate(bh))			goto unlock;		ext3_get_block(inode, iblock, bh, 0);		/* Still unmapped? Nothing to do */		if (!buffer_mapped(bh))			goto unlock;	}	/* Ok, it's mapped. Make sure it's up-to-date */	if (Page_Uptodate(page))		set_bit(BH_Uptodate, &bh->b_state);	if (!buffer_uptodate(bh)) {		err = -EIO;		ll_rw_block(READ, 1, &bh);		wait_on_buffer(bh);		/* Uhhuh. Read error. Complain and punt. */		if (!buffer_uptodate(bh))			goto unlock;	}	if (ext3_should_journal_data(inode)) {		BUFFER_TRACE(bh, "get write access");		err = ext3_journal_get_write_access(handle, bh);		if (err)			goto unlock;	}		memset(kmap(page) + offset, 0, length);	flush_dcache_page(page);	kunmap(page);	BUFFER_TRACE(bh, "zeroed end of block");	err = 0;	if (ext3_should_journal_data(inode)) {		err = ext3_journal_dirty_metadata(handle, bh);	} else {		if (ext3_should_order_data(inode))			err = ext3_journal_dirty_data(handle, bh, 0);		__mark_buffer_dirty(bh);	}unlock:	UnlockPage(page);	page_cache_release(page);out:	return err;}/* * Probably it should be a library function... search for first non-zero word * or memcmp with zero_page, whatever is better for particular architecture. * Linus? */static inline int all_zeroes(u32 *p, u32 *q){	while (p < q)		if (*p++)			return 0;	return 1;}/** *	ext3_find_shared - find the indirect blocks for partial truncation. *	@inode:	  inode in question *	@depth:	  depth of the affected branch *	@offsets: offsets of pointers in that branch (see ext3_block_to_path) *	@chain:	  place to store the pointers to partial indirect blocks *	@top:	  place to the (detached) top of branch * *	This is a helper function used by ext3_truncate(). * *	When we do truncate() we may have to clean the ends of several *	indirect blocks but leave the blocks themselves alive. Block is *	partially truncated if some data below the new i_size is refered *	from it (and it is on the path to the first completely truncated *	data block, indeed).  We have to free the top of that path along *	with everything to the right of the path. Since no allocation *	past the truncation point is possible until ext3_truncate() *	finishes, we may safely do the latter, but top of branch may *	require special attention - pageout below the truncation point *	might try to populate it. * *	We atomically detach the top of branch from the tree, store the *	block number of its root in *@top, pointers to buffer_heads of *	partially truncated blocks - in @chain[].bh and pointers to *	their last elements that should not be removed - in *	@chain[].p. Return value is the pointer to last filled element *	of @chain. * *	The work left to caller to do the actual freeing of subtrees: *		a) free the subtree starting from *@top *		b) free the subtrees whose roots are stored in *			(@chain[i].p+1 .. end of @chain[i].bh->b_data) *		c) free the subtrees growing from the inode past the @chain[0]. *			(no partially truncated stuff there).  */static Indirect *ext3_find_shared(struct inode *inode,				int depth,				int offsets[4],				Indirect chain[4],				u32 *top){	Indirect *partial, *p;	int k, err;	*top = 0;	/* Make k index the deepest non-null offest + 1 */	for (k = depth; k > 1 && !offsets[k-1]; k--)		;	partial = ext3_get_branch(inode, k, offsets, chain, &err);	/* Writer: pointers */	if (!partial)		partial = chain + k-1;	/*	 * If the branch acquired continuation since we've looked at it -	 * fine, it should all survive and (new) top doesn't belong to us.	 */	if (!partial->key && *partial->p)		/* Writer: end */		goto no_top;	for (p=partial; p>chain && all_zeroes((u32*)p->bh->b_data,p->p); p--)		;	/*	 * OK, we've found the last block that must survive. The rest of our	 * branch should be detached before unlocking. However, if that rest	 * of branch is all ours and does not grow immediately from the inode	 * it's easier to cheat and just decrement partial->p.	 */	if (p == chain + k - 1 && p > chain) {		p->p--;	} else {		*top = *p->p;		/* Nope, don't do this in ext3.  Must leave the tree intact */#if 0		*p->p = 0;#endif	}	/* Writer: end */	while(partial > p)	{		brelse(partial->bh);		partial--;	}no_top:	return partial;}/* * Zero a number of block pointers in either an inode or an indirect block. * If we restart the transaction we must again get write access to the * indirect block for further modification. * * We release `count' blocks on disk, but (last - first) may be greater * than `count' because there can be holes in there. */static voidext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh,		unsigned long block_to_free, unsigned long count,		u32 *first, u32 *last){	u32 *p;	if (try_to_extend_transaction(handle, inode)) {		if (bh) {			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");			ext3_journal_dirty_metadata(handle, bh);		}		ext3_mark_inode_dirty(handle, inode);		ext3_journal_test_restart(handle, inode);		BUFFER_TRACE(bh, "get_write_access");		ext3_journal_get_write_access(handle, bh);	}	/*	 * Any buffers which are on the journal will be in memory. We find	 * them on the hash table so journal_revoke() will run journal_forget()	 * on them.  We've already detached each block from the file, so	 * bforget() in journal_forget() should be safe.	 *	 * AKPM: turn on bforget in journal_forget()!!!	 */	for (p = first; p < last; p++) {		u32 nr = le32_to_cpu(*p);		if (nr) {			struct buffer_head *bh;			*p = 0;			bh = sb_get_hash_table(inode->i_sb, nr);			ext3_forget(handle, 0, inode, bh, nr);		}	}	ext3_free_blocks(handle, inode, block_to_free, count);}/** * ext3_free_data - free a list of data blocks * @handle:	handle for this transaction * @inode:	inode we are dealing with * @this_bh:	indirect buffer_head which contains *@first and *@last * @first:	array of block numbers * @last:	points immediately past the end of array * * We are freeing all blocks refered from that array (numbers are stored as * little-endian 32-bit) and updating @inode->i_blocks appropriately. * * We accumulate contiguous runs of blocks to free.  Conveniently, if these * blocks are contiguous then releasing them at one time will only affect one * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't * actually use a lot of journal space. * * @this_bh will be %NULL if @first and @last point into the inode's direct * block pointers. */static void ext3_free_data(handle_t *handle, struct inode *inode,			   struct buffer_head *this_bh, u32 *first, u32 *last){	unsigned long block_to_free = 0;    /* Starting block # of a run */	unsigned long count = 0;	    /* Number of blocks in the run */ 	u32 *block_to_free_p = NULL;	    /* Pointer into inode/ind					       corresponding to					       block_to_free */	unsigned long nr;		    /* Current block # */	u32 *p;				    /* Pointer into inode/ind					       for current block */	int err;	if (this_bh) {				/* For indirect block */		BUFFER_TRACE(this_bh, "get_write_access");		err = ext3_journal_get_write_access(handle, this_bh);		/* Important: if we can't update the indirect pointers		 * to the blocks, we can't free them. */		if (err)			return;	}	for (p = first; p < last; p++) {		nr = le32_to_cpu(*p);		if (nr) {			/* accumulate blocks to free if they're contiguous */			if (count == 0) {				block_to_free = nr;				block_to_free_p = p;				count = 1;			} else if (nr == block_to_free + count) {				count++;			} else {				ext3_clear_blocks(handle, inode, this_bh, 						  block_to_free,						  count, block_to_free_p, p);				block_to_free = nr;				block_to_free_p = p;				count = 1;			}		}	}	if (count > 0)		ext3_clear_blocks(handle, inode, this_bh, block_to_free,				  count, block_to_free_p, p);	if (this_bh) {		BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");		ext3_journal_dirty_metadata(handle, this_bh);	}}/** *	ext3_free_branches - free an array of branches *	@handle: JBD handle for this transaction *	@inode:	inode we are dealing with *	@parent_bh: the buffer_head which contains *@first and *@last *	@first:	array of block numbers *	@last:	pointer immediately past the end of array *	@depth:	depth of the branches to free * *	We are freeing all blocks refered from these branches (numbers are *	stored as little-endian 32-bit) and updating @inode->i_blocks *	appropriately. */static void ext3_free_branches(handle_t *handle, struct inode *inode,			       struct buffer_head *parent_bh,			       u32 *first, u32 *last, int depth){	unsigned long nr;	u32 *p;	if (is_handle_aborted(handle))		return;		if (depth--) {		struct buffer_head *bh;		int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);		p = last;		while (--p >= first) {			nr = le32_to_cpu(*p);			if (!nr)				continue;		/* A hole */			/* Go read the buffer for the next level down */			bh = sb_bread(inode->i_sb, nr);			/*			 * A read failure? Report error and clear slot			 * (should be rare).			 */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -