⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 journal.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
}static int flush_older_commits(struct super_block *s,			       struct reiserfs_journal_list *jl){	struct reiserfs_journal *journal = SB_JOURNAL(s);	struct reiserfs_journal_list *other_jl;	struct reiserfs_journal_list *first_jl;	struct list_head *entry;	unsigned long trans_id = jl->j_trans_id;	unsigned long other_trans_id;	unsigned long first_trans_id;      find_first:	/*	 * first we walk backwards to find the oldest uncommitted transation	 */	first_jl = jl;	entry = jl->j_list.prev;	while (1) {		other_jl = JOURNAL_LIST_ENTRY(entry);		if (entry == &journal->j_journal_list ||		    atomic_read(&other_jl->j_older_commits_done))			break;		first_jl = other_jl;		entry = other_jl->j_list.prev;	}	/* if we didn't find any older uncommitted transactions, return now */	if (first_jl == jl) {		return 0;	}	first_trans_id = first_jl->j_trans_id;	entry = &first_jl->j_list;	while (1) {		other_jl = JOURNAL_LIST_ENTRY(entry);		other_trans_id = other_jl->j_trans_id;		if (other_trans_id < trans_id) {			if (atomic_read(&other_jl->j_commit_left) != 0) {				flush_commit_list(s, other_jl, 0);				/* list we were called with is gone, return */				if (!journal_list_still_alive(s, trans_id))					return 1;				/* the one we just flushed is gone, this means all				 * older lists are also gone, so first_jl is no longer				 * valid either.  Go back to the beginning.				 */				if (!journal_list_still_alive				    (s, other_trans_id)) {					goto find_first;				}			}			entry = entry->next;			if (entry == &journal->j_journal_list)				return 0;		} else {			return 0;		}	}	return 0;}static int reiserfs_async_progress_wait(struct super_block *s){	DEFINE_WAIT(wait);	struct reiserfs_journal *j = SB_JOURNAL(s);	if (atomic_read(&j->j_async_throttle))		congestion_wait(WRITE, HZ / 10);	return 0;}/*** if this journal list still has commit blocks unflushed, send them to disk.**** log areas must be flushed in order (transaction 2 can't commit before transaction 1)** Before the commit block can by written, every other log block must be safely on disk***/static int flush_commit_list(struct super_block *s,			     struct reiserfs_journal_list *jl, int flushall){	int i;	b_blocknr_t bn;	struct buffer_head *tbh = NULL;	unsigned long trans_id = jl->j_trans_id;	struct reiserfs_journal *journal = SB_JOURNAL(s);	int barrier = 0;	int retval = 0;	int write_len;	reiserfs_check_lock_depth(s, "flush_commit_list");	if (atomic_read(&jl->j_older_commits_done)) {		return 0;	}	get_fs_excl();	/* before we can put our commit blocks on disk, we have to make sure everyone older than	 ** us is on disk too	 */	BUG_ON(jl->j_len <= 0);	BUG_ON(trans_id == journal->j_trans_id);	get_journal_list(jl);	if (flushall) {		if (flush_older_commits(s, jl) == 1) {			/* list disappeared during flush_older_commits.  return */			goto put_jl;		}	}	/* make sure nobody is trying to flush this one at the same time */	down(&jl->j_commit_lock);	if (!journal_list_still_alive(s, trans_id)) {		up(&jl->j_commit_lock);		goto put_jl;	}	BUG_ON(jl->j_trans_id == 0);	/* this commit is done, exit */	if (atomic_read(&(jl->j_commit_left)) <= 0) {		if (flushall) {			atomic_set(&(jl->j_older_commits_done), 1);		}		up(&jl->j_commit_lock);		goto put_jl;	}	if (!list_empty(&jl->j_bh_list)) {		int ret;		unlock_kernel();		ret = write_ordered_buffers(&journal->j_dirty_buffers_lock,					    journal, jl, &jl->j_bh_list);		if (ret < 0 && retval == 0)			retval = ret;		lock_kernel();	}	BUG_ON(!list_empty(&jl->j_bh_list));	/*	 * for the description block and all the log blocks, submit any buffers	 * that haven't already reached the disk.  Try to write at least 256	 * log blocks. later on, we will only wait on blocks that correspond	 * to this transaction, but while we're unplugging we might as well	 * get a chunk of data on there.	 */	atomic_inc(&journal->j_async_throttle);	write_len = jl->j_len + 1;	if (write_len < 256)		write_len = 256;	for (i = 0 ; i < write_len ; i++) {		bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) %		    SB_ONDISK_JOURNAL_SIZE(s);		tbh = journal_find_get_block(s, bn);		if (tbh) {			if (buffer_dirty(tbh))			    ll_rw_block(WRITE, 1, &tbh) ;			put_bh(tbh) ;		}	}	atomic_dec(&journal->j_async_throttle);	/* We're skipping the commit if there's an error */	if (retval || reiserfs_is_journal_aborted(journal))		barrier = 0;	/* wait on everything written so far before writing the commit	 * if we are in barrier mode, send the commit down now	 */	barrier = reiserfs_barrier_flush(s);	if (barrier) {		int ret;		lock_buffer(jl->j_commit_bh);		ret = submit_barrier_buffer(jl->j_commit_bh);		if (ret == -EOPNOTSUPP) {			set_buffer_uptodate(jl->j_commit_bh);			disable_barrier(s);			barrier = 0;		}	}	for (i = 0; i < (jl->j_len + 1); i++) {		bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +		    (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s);		tbh = journal_find_get_block(s, bn);		wait_on_buffer(tbh);		// since we're using ll_rw_blk above, it might have skipped over		// a locked buffer.  Double check here		//		if (buffer_dirty(tbh))	/* redundant, sync_dirty_buffer() checks */			sync_dirty_buffer(tbh);		if (unlikely(!buffer_uptodate(tbh))) {#ifdef CONFIG_REISERFS_CHECK			reiserfs_warning(s, "journal-601, buffer write failed");#endif			retval = -EIO;		}		put_bh(tbh);	/* once for journal_find_get_block */		put_bh(tbh);	/* once due to original getblk in do_journal_end */		atomic_dec(&(jl->j_commit_left));	}	BUG_ON(atomic_read(&(jl->j_commit_left)) != 1);	if (!barrier) {		/* If there was a write error in the journal - we can't commit		 * this transaction - it will be invalid and, if successful,		 * will just end up propagating the write error out to		 * the file system. */		if (likely(!retval && !reiserfs_is_journal_aborted (journal))) {			if (buffer_dirty(jl->j_commit_bh))				BUG();			mark_buffer_dirty(jl->j_commit_bh) ;			sync_dirty_buffer(jl->j_commit_bh) ;		}	} else		wait_on_buffer(jl->j_commit_bh);	check_barrier_completion(s, jl->j_commit_bh);	/* If there was a write error in the journal - we can't commit this	 * transaction - it will be invalid and, if successful, will just end	 * up propagating the write error out to the filesystem. */	if (unlikely(!buffer_uptodate(jl->j_commit_bh))) {#ifdef CONFIG_REISERFS_CHECK		reiserfs_warning(s, "journal-615: buffer write failed");#endif		retval = -EIO;	}	bforget(jl->j_commit_bh);	if (journal->j_last_commit_id != 0 &&	    (jl->j_trans_id - journal->j_last_commit_id) != 1) {		reiserfs_warning(s, "clm-2200: last commit %lu, current %lu",				 journal->j_last_commit_id, jl->j_trans_id);	}	journal->j_last_commit_id = jl->j_trans_id;	/* now, every commit block is on the disk.  It is safe to allow blocks freed during this transaction to be reallocated */	cleanup_freed_for_journal_list(s, jl);	retval = retval ? retval : journal->j_errno;	/* mark the metadata dirty */	if (!retval)		dirty_one_transaction(s, jl);	atomic_dec(&(jl->j_commit_left));	if (flushall) {		atomic_set(&(jl->j_older_commits_done), 1);	}	up(&jl->j_commit_lock);      put_jl:	put_journal_list(s, jl);	if (retval)		reiserfs_abort(s, retval, "Journal write error in %s",			       __FUNCTION__);	put_fs_excl();	return retval;}/*** flush_journal_list frequently needs to find a newer transaction for a given block.  This does that, or ** returns NULL if it can't find anything */static struct reiserfs_journal_list *find_newer_jl_for_cn(struct							  reiserfs_journal_cnode							  *cn){	struct super_block *sb = cn->sb;	b_blocknr_t blocknr = cn->blocknr;	cn = cn->hprev;	while (cn) {		if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) {			return cn->jlist;		}		cn = cn->hprev;	}	return NULL;}static int newer_jl_done(struct reiserfs_journal_cnode *cn){	struct super_block *sb = cn->sb;	b_blocknr_t blocknr = cn->blocknr;	cn = cn->hprev;	while (cn) {		if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist &&		    atomic_read(&cn->jlist->j_commit_left) != 0)				    return 0;		cn = cn->hprev;	}	return 1;}static void remove_journal_hash(struct super_block *,				struct reiserfs_journal_cnode **,				struct reiserfs_journal_list *, unsigned long,				int);/*** once all the real blocks have been flushed, it is safe to remove them from the** journal list for this transaction.  Aside from freeing the cnode, this also allows the** block to be reallocated for data blocks if it had been deleted.*/static void remove_all_from_journal_list(struct super_block *p_s_sb,					 struct reiserfs_journal_list *jl,					 int debug){	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);	struct reiserfs_journal_cnode *cn, *last;	cn = jl->j_realblock;	/* which is better, to lock once around the whole loop, or	 ** to lock for each call to remove_journal_hash?	 */	while (cn) {		if (cn->blocknr != 0) {			if (debug) {				reiserfs_warning(p_s_sb,						 "block %u, bh is %d, state %ld",						 cn->blocknr, cn->bh ? 1 : 0,						 cn->state);			}			cn->state = 0;			remove_journal_hash(p_s_sb, journal->j_list_hash_table,					    jl, cn->blocknr, 1);		}		last = cn;		cn = cn->next;		free_cnode(p_s_sb, last);	}	jl->j_realblock = NULL;}/*** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start** releasing blocks in this transaction for reuse as data blocks.** called by flush_journal_list, before it calls remove_all_from_journal_list***/static int _update_journal_header_block(struct super_block *p_s_sb,					unsigned long offset,					unsigned long trans_id){	struct reiserfs_journal_header *jh;	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);	if (reiserfs_is_journal_aborted(journal))		return -EIO;	if (trans_id >= journal->j_last_flush_trans_id) {		if (buffer_locked((journal->j_header_bh))) {			wait_on_buffer((journal->j_header_bh));			if (unlikely(!buffer_uptodate(journal->j_header_bh))) {#ifdef CONFIG_REISERFS_CHECK				reiserfs_warning(p_s_sb,						 "journal-699: buffer write failed");#endif				return -EIO;			}		}		journal->j_last_flush_trans_id = trans_id;		journal->j_first_unflushed_offset = offset;		jh = (struct reiserfs_journal_header *)(journal->j_header_bh->							b_data);		jh->j_last_flush_trans_id = cpu_to_le32(trans_id);		jh->j_first_unflushed_offset = cpu_to_le32(offset);		jh->j_mount_id = cpu_to_le32(journal->j_mount_id);		if (reiserfs_barrier_flush(p_s_sb)) {			int ret;			lock_buffer(journal->j_header_bh);			ret = submit_barrier_buffer(journal->j_header_bh);			if (ret == -EOPNOTSUPP) {				set_buffer_uptodate(journal->j_header_bh);				disable_barrier(p_s_sb);				goto sync;			}			wait_on_buffer(journal->j_header_bh);			check_barrier_completion(p_s_sb, journal->j_header_bh);		} else {		      sync:			set_buffer_dirty(journal->j_header_bh);			sync_dirty_buffer(journal->j_header_bh);		}		if (!buffer_uptodate(journal->j_header_bh)) {			reiserfs_warning(p_s_sb,					 "journal-837: IO error during journal replay");			return -EIO;		}	}	return 0;}static int update_journal_header_block(struct super_block *p_s_sb,				       unsigned long offset,				       unsigned long trans_id){	return _update_journal_header_block(p_s_sb, offset, trans_id);}/* ** flush any and all journal lists older than you are ** can only be called from flush_journal_list*/static int flush_older_journal_lists(struct super_block *p_s_sb,				     struct reiserfs_journal_list *jl){	struct list_head *entry;	struct reiserfs_journal_list *other_jl;	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);	unsigned long trans_id = jl->j_trans_id;	/* we know we are the only ones flushing things, no extra race	 * protection is required.	 */      restart:	entry = journal->j_journal_list.next;	/* Did we wrap? */	if (entry == &journal->j_journal_list)		return 0;	other_jl = JOURNAL_LIST_ENTRY(entry);	if (other_jl->j_trans_id < trans_id) {		BUG_ON(other_jl->j_refcount <= 0);		/* do not flush all */		flush_journal_list(p_s_sb, other_jl, 0);		/* other_jl is now deleted from the list */		goto restart;	}	return 0;}static void del_from_work_list(struct super_block *s,			       struct reiserfs_journal_list *jl){	struct reiserfs_journal *journal = SB_JOURNAL(s);	if (!list_empty(&jl->j_working_list)) {		list_del_init(&jl->j_working_list);		journal->j_num_work_lists--;	}}/* flush a journal list, both commit and real blocks**** always set flushall to 1, unless you are calling from inside** flush_journal_list**** IMPORTANT.  This can only be called while there are no journal writers, ** and the journal is locked.  That means it can only be called from ** do_journal_end, or by journal_release*/static int flush_journal_list(struct super_block *s,			      struct reiserfs_journal_list *jl, int flushall){

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -