⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 journal.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	return (struct reiserfs_journal_cnode *)0;}/*** this actually means 'can this block be reallocated yet?'.  If you set search_all, a block can only be allocated** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever** being overwritten by a replay after crashing.**** If you don't set search_all, a block can only be allocated if it is not in the current transaction.  Since deleting** a block removes it from the current transaction, this case should never happen.  If you don't set search_all, make** sure you never write the block without logging it.**** next_zero_bit is a suggestion about the next block to try for find_forward.** when bl is rejected because it is set in a journal list bitmap, we search** for the next zero bit in the bitmap that rejected bl.  Then, we return that** through next_zero_bit for find_forward to try.**** Just because we return something in next_zero_bit does not mean we won't** reject it on the next call to reiserfs_in_journal***/int reiserfs_in_journal(struct super_block *p_s_sb,			unsigned int bmap_nr, int bit_nr, int search_all,			b_blocknr_t * next_zero_bit){	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);	struct reiserfs_journal_cnode *cn;	struct reiserfs_list_bitmap *jb;	int i;	unsigned long bl;	*next_zero_bit = 0;	/* always start this at zero. */	PROC_INFO_INC(p_s_sb, journal.in_journal);	/* If we aren't doing a search_all, this is a metablock, and it will be logged before use.	 ** if we crash before the transaction that freed it commits,  this transaction won't	 ** have committed either, and the block will never be written	 */	if (search_all) {		for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {			PROC_INFO_INC(p_s_sb, journal.in_journal_bitmap);			jb = journal->j_list_bitmap + i;			if (jb->journal_list && jb->bitmaps[bmap_nr] &&			    test_bit(bit_nr,				     (unsigned long *)jb->bitmaps[bmap_nr]->				     data)) {				*next_zero_bit =				    find_next_zero_bit((unsigned long *)						       (jb->bitmaps[bmap_nr]->							data),						       p_s_sb->s_blocksize << 3,						       bit_nr + 1);				return 1;			}		}	}	bl = bmap_nr * (p_s_sb->s_blocksize << 3) + bit_nr;	/* is it in any old transactions? */	if (search_all	    && (cn =		get_journal_hash_dev(p_s_sb, journal->j_list_hash_table, bl))) {		return 1;	}	/* is it in the current transaction.  This should never happen */	if ((cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, bl))) {		BUG();		return 1;	}	PROC_INFO_INC(p_s_sb, journal.in_journal_reusable);	/* safe for reuse */	return 0;}/* insert cn into table*/static inline void insert_journal_hash(struct reiserfs_journal_cnode **table,				       struct reiserfs_journal_cnode *cn){	struct reiserfs_journal_cnode *cn_orig;	cn_orig = journal_hash(table, cn->sb, cn->blocknr);	cn->hnext = cn_orig;	cn->hprev = NULL;	if (cn_orig) {		cn_orig->hprev = cn;	}	journal_hash(table, cn->sb, cn->blocknr) = cn;}/* lock the current transaction */static inline void lock_journal(struct super_block *p_s_sb){	PROC_INFO_INC(p_s_sb, journal.lock_journal);	down(&SB_JOURNAL(p_s_sb)->j_lock);}/* unlock the current transaction */static inline void unlock_journal(struct super_block *p_s_sb){	up(&SB_JOURNAL(p_s_sb)->j_lock);}static inline void get_journal_list(struct reiserfs_journal_list *jl){	jl->j_refcount++;}static inline void put_journal_list(struct super_block *s,				    struct reiserfs_journal_list *jl){	if (jl->j_refcount < 1) {		reiserfs_panic(s, "trans id %lu, refcount at %d",			       jl->j_trans_id, jl->j_refcount);	}	if (--jl->j_refcount == 0)		kfree(jl);}/*** this used to be much more involved, and I'm keeping it just in case things get ugly again.** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a** transaction.*/static void cleanup_freed_for_journal_list(struct super_block *p_s_sb,					   struct reiserfs_journal_list *jl){	struct reiserfs_list_bitmap *jb = jl->j_list_bitmap;	if (jb) {		cleanup_bitmap_list(p_s_sb, jb);	}	jl->j_list_bitmap->journal_list = NULL;	jl->j_list_bitmap = NULL;}static int journal_list_still_alive(struct super_block *s,				    unsigned long trans_id){	struct reiserfs_journal *journal = SB_JOURNAL(s);	struct list_head *entry = &journal->j_journal_list;	struct reiserfs_journal_list *jl;	if (!list_empty(entry)) {		jl = JOURNAL_LIST_ENTRY(entry->next);		if (jl->j_trans_id <= trans_id) {			return 1;		}	}	return 0;}/* * If page->mapping was null, we failed to truncate this page for * some reason.  Most likely because it was truncated after being * logged via data=journal. * * This does a check to see if the buffer belongs to one of these * lost pages before doing the final put_bh.  If page->mapping was * null, it tries to free buffers on the page, which should make the * final page_cache_release drop the page from the lru. */static void release_buffer_page(struct buffer_head *bh){	struct page *page = bh->b_page;	if (!page->mapping && !TestSetPageLocked(page)) {		page_cache_get(page);		put_bh(bh);		if (!page->mapping)			try_to_free_buffers(page);		unlock_page(page);		page_cache_release(page);	} else {		put_bh(bh);	}}static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate){	char b[BDEVNAME_SIZE];	if (buffer_journaled(bh)) {		reiserfs_warning(NULL,				 "clm-2084: pinned buffer %lu:%s sent to disk",				 bh->b_blocknr, bdevname(bh->b_bdev, b));	}	if (uptodate)		set_buffer_uptodate(bh);	else		clear_buffer_uptodate(bh);	unlock_buffer(bh);	release_buffer_page(bh);}static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate){	if (uptodate)		set_buffer_uptodate(bh);	else		clear_buffer_uptodate(bh);	unlock_buffer(bh);	put_bh(bh);}static void submit_logged_buffer(struct buffer_head *bh){	get_bh(bh);	bh->b_end_io = reiserfs_end_buffer_io_sync;	clear_buffer_journal_new(bh);	clear_buffer_dirty(bh);	if (!test_clear_buffer_journal_test(bh))		BUG();	if (!buffer_uptodate(bh))		BUG();	submit_bh(WRITE, bh);}static void submit_ordered_buffer(struct buffer_head *bh){	get_bh(bh);	bh->b_end_io = reiserfs_end_ordered_io;	clear_buffer_dirty(bh);	if (!buffer_uptodate(bh))		BUG();	submit_bh(WRITE, bh);}static int submit_barrier_buffer(struct buffer_head *bh){	get_bh(bh);	bh->b_end_io = reiserfs_end_ordered_io;	clear_buffer_dirty(bh);	if (!buffer_uptodate(bh))		BUG();	return submit_bh(WRITE_BARRIER, bh);}static void check_barrier_completion(struct super_block *s,				     struct buffer_head *bh){	if (buffer_eopnotsupp(bh)) {		clear_buffer_eopnotsupp(bh);		disable_barrier(s);		set_buffer_uptodate(bh);		set_buffer_dirty(bh);		sync_dirty_buffer(bh);	}}#define CHUNK_SIZE 32struct buffer_chunk {	struct buffer_head *bh[CHUNK_SIZE];	int nr;};static void write_chunk(struct buffer_chunk *chunk){	int i;	get_fs_excl();	for (i = 0; i < chunk->nr; i++) {		submit_logged_buffer(chunk->bh[i]);	}	chunk->nr = 0;	put_fs_excl();}static void write_ordered_chunk(struct buffer_chunk *chunk){	int i;	get_fs_excl();	for (i = 0; i < chunk->nr; i++) {		submit_ordered_buffer(chunk->bh[i]);	}	chunk->nr = 0;	put_fs_excl();}static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh,			spinlock_t * lock, void (fn) (struct buffer_chunk *)){	int ret = 0;	BUG_ON(chunk->nr >= CHUNK_SIZE);	chunk->bh[chunk->nr++] = bh;	if (chunk->nr >= CHUNK_SIZE) {		ret = 1;		if (lock)			spin_unlock(lock);		fn(chunk);		if (lock)			spin_lock(lock);	}	return ret;}static atomic_t nr_reiserfs_jh = ATOMIC_INIT(0);static struct reiserfs_jh *alloc_jh(void){	struct reiserfs_jh *jh;	while (1) {		jh = kmalloc(sizeof(*jh), GFP_NOFS);		if (jh) {			atomic_inc(&nr_reiserfs_jh);			return jh;		}		yield();	}}/* * we want to free the jh when the buffer has been written * and waited on */void reiserfs_free_jh(struct buffer_head *bh){	struct reiserfs_jh *jh;	jh = bh->b_private;	if (jh) {		bh->b_private = NULL;		jh->bh = NULL;		list_del_init(&jh->list);		kfree(jh);		if (atomic_read(&nr_reiserfs_jh) <= 0)			BUG();		atomic_dec(&nr_reiserfs_jh);		put_bh(bh);	}}static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh,			   int tail){	struct reiserfs_jh *jh;	if (bh->b_private) {		spin_lock(&j->j_dirty_buffers_lock);		if (!bh->b_private) {			spin_unlock(&j->j_dirty_buffers_lock);			goto no_jh;		}		jh = bh->b_private;		list_del_init(&jh->list);	} else {	      no_jh:		get_bh(bh);		jh = alloc_jh();		spin_lock(&j->j_dirty_buffers_lock);		/* buffer must be locked for __add_jh, should be able to have		 * two adds at the same time		 */		BUG_ON(bh->b_private);		jh->bh = bh;		bh->b_private = jh;	}	jh->jl = j->j_current_jl;	if (tail)		list_add_tail(&jh->list, &jh->jl->j_tail_bh_list);	else {		list_add_tail(&jh->list, &jh->jl->j_bh_list);	}	spin_unlock(&j->j_dirty_buffers_lock);	return 0;}int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh){	return __add_jh(SB_JOURNAL(inode->i_sb), bh, 1);}int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh){	return __add_jh(SB_JOURNAL(inode->i_sb), bh, 0);}#define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list)static int write_ordered_buffers(spinlock_t * lock,				 struct reiserfs_journal *j,				 struct reiserfs_journal_list *jl,				 struct list_head *list){	struct buffer_head *bh;	struct reiserfs_jh *jh;	int ret = j->j_errno;	struct buffer_chunk chunk;	struct list_head tmp;	INIT_LIST_HEAD(&tmp);	chunk.nr = 0;	spin_lock(lock);	while (!list_empty(list)) {		jh = JH_ENTRY(list->next);		bh = jh->bh;		get_bh(bh);		if (test_set_buffer_locked(bh)) {			if (!buffer_dirty(bh)) {				list_move(&jh->list, &tmp);				goto loop_next;			}			spin_unlock(lock);			if (chunk.nr)				write_ordered_chunk(&chunk);			wait_on_buffer(bh);			cond_resched();			spin_lock(lock);			goto loop_next;		}		/* in theory, dirty non-uptodate buffers should never get here,		 * but the upper layer io error paths still have a few quirks.		 * Handle them here as gracefully as we can		 */		if (!buffer_uptodate(bh) && buffer_dirty(bh)) {			clear_buffer_dirty(bh);			ret = -EIO;		}		if (buffer_dirty(bh)) {			list_move(&jh->list, &tmp);			add_to_chunk(&chunk, bh, lock, write_ordered_chunk);		} else {			reiserfs_free_jh(bh);			unlock_buffer(bh);		}	      loop_next:		put_bh(bh);		cond_resched_lock(lock);	}	if (chunk.nr) {		spin_unlock(lock);		write_ordered_chunk(&chunk);		spin_lock(lock);	}	while (!list_empty(&tmp)) {		jh = JH_ENTRY(tmp.prev);		bh = jh->bh;		get_bh(bh);		reiserfs_free_jh(bh);		if (buffer_locked(bh)) {			spin_unlock(lock);			wait_on_buffer(bh);			spin_lock(lock);		}		if (!buffer_uptodate(bh)) {			ret = -EIO;		}		/* ugly interaction with invalidatepage here.		 * reiserfs_invalidate_page will pin any buffer that has a valid		 * journal head from an older transaction.  If someone else sets		 * our buffer dirty after we write it in the first loop, and		 * then someone truncates the page away, nobody will ever write		 * the buffer. We're safe if we write the page one last time		 * after freeing the journal header.		 */		if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) {			spin_unlock(lock);			ll_rw_block(WRITE, 1, &bh);			spin_lock(lock);		}		put_bh(bh);		cond_resched_lock(lock);	}	spin_unlock(lock);	return ret;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -