⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 transaction.c

📁 嵌入式系统设计与实例开发实验教材二源码 多线程应用程序设计 串行端口程序设计 AD接口实验 CAN总线通信实验 GPS通信实验 Linux内核移植与编译实验 IC卡读写实验 SD驱动使
💻 C
📖 第 1 页 / 共 5 页
字号:
			__journal_remove_journal_head(bh);			__brelse(bh);			if (!buffer_jbd(bh)) {				spin_unlock(&journal_datalist_lock);				unlock_journal(journal);				__bforget(bh);				return;			}		}			} else if (jh->b_transaction) {		J_ASSERT_JH(jh, (jh->b_transaction == 				 journal->j_committing_transaction));		/* However, if the buffer is still owned by a prior		 * (committing) transaction, we can't drop it yet... */		JBUFFER_TRACE(jh, "belongs to older transaction");		/* ... but we CAN drop it from the new transaction if we		 * have also modified it since the original commit. */		if (jh->b_next_transaction) {			J_ASSERT(jh->b_next_transaction == transaction);			jh->b_next_transaction = NULL;		}	}not_jbd:	spin_unlock(&journal_datalist_lock);	unlock_journal(journal);	__brelse(bh);	return;}#if 0	/* Unused *//* * journal_sync_buffer: flush a potentially-journaled buffer to disk. * * Used for O_SYNC filesystem operations.  If the buffer is journaled, * we need to complete the O_SYNC by waiting for the transaction to * complete.  It is an error to call journal_sync_buffer before * journal_stop! */void journal_sync_buffer(struct buffer_head *bh){	transaction_t *transaction;	journal_t *journal;	long sequence;	struct journal_head *jh;	/* If the buffer isn't journaled, this is easy: just sync it to	 * disk.  */	BUFFER_TRACE(bh, "entry");	spin_lock(&journal_datalist_lock);	if (!buffer_jbd(bh)) {		spin_unlock(&journal_datalist_lock);		return;	}	jh = bh2jh(bh);	if (jh->b_transaction == NULL) {		/* If the buffer has already been journaled, then this		 * is a noop. */		if (jh->b_cp_transaction == NULL) {			spin_unlock(&journal_datalist_lock);			return;		}		atomic_inc(&bh->b_count);		spin_unlock(&journal_datalist_lock);		ll_rw_block (WRITE, 1, &bh);		wait_on_buffer(bh);		__brelse(bh);		goto out;	}		/* Otherwise, just wait until the transaction is synced to disk. */	transaction = jh->b_transaction;	journal = transaction->t_journal;	sequence = transaction->t_tid;	spin_unlock(&journal_datalist_lock);	jbd_debug(2, "requesting commit for jh %p\n", jh);	log_start_commit (journal, transaction);		while (tid_gt(sequence, journal->j_commit_sequence)) {		wake_up(&journal->j_wait_done_commit);		sleep_on(&journal->j_wait_done_commit);	}	JBUFFER_TRACE(jh, "exit");out:	return;}#endif/* * All done for a particular handle. * * There is not much action needed here.  We just return any remaining * buffer credits to the transaction and remove the handle.  The only * complication is that we need to start a commit operation if the * filesystem is marked for synchronous update. * * journal_stop itself will not usually return an error, but it may * do so in unusual circumstances.  In particular, expect it to  * return -EIO if a journal_abort has been executed since the * transaction began. */int journal_stop(handle_t *handle){	transaction_t *transaction = handle->h_transaction;	journal_t *journal = transaction->t_journal;	int old_handle_count, err;		if (!handle)		return 0;	J_ASSERT (transaction->t_updates > 0);	J_ASSERT (journal_current_handle() == handle);		if (is_handle_aborted(handle))		err = -EIO;	else		err = 0;		if (--handle->h_ref > 0) {		jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,			  handle->h_ref);		return err;	}	jbd_debug(4, "Handle %p going down\n", handle);	/*	 * Implement synchronous transaction batching.  If the handle	 * was synchronous, don't force a commit immediately.  Let's	 * yield and let another thread piggyback onto this transaction.	 * Keep doing that while new threads continue to arrive.	 * It doesn't cost much - we're about to run a commit and sleep	 * on IO anyway.  Speeds up many-threaded, many-dir operations	 * by 30x or more...	 */	if (handle->h_sync) {		do {			old_handle_count = transaction->t_handle_count;			set_current_state(TASK_RUNNING);			current->policy |= SCHED_YIELD;			schedule();		} while (old_handle_count != transaction->t_handle_count);	}	current->journal_info = NULL;	transaction->t_outstanding_credits -= handle->h_buffer_credits;	transaction->t_updates--;	if (!transaction->t_updates) {		wake_up(&journal->j_wait_updates);		if (journal->j_barrier_count)			wake_up(&journal->j_wait_transaction_locked);	}	/* 	 * If the handle is marked SYNC, we need to set another commit	 * going!  We also want to force a commit if the current	 * transaction is occupying too much of the log, or if the	 * transaction is too old now.	 */	if (handle->h_sync ||			transaction->t_outstanding_credits >				journal->j_max_transaction_buffers ||	    		time_after_eq(jiffies, transaction->t_expires)) {		/* Do this even for aborted journals: an abort still		 * completes the commit thread, it just doesn't write		 * anything to disk. */		tid_t tid = transaction->t_tid;				jbd_debug(2, "transaction too old, requesting commit for "					"handle %p\n", handle);		/* This is non-blocking */		log_start_commit(journal, transaction);				/*		 * Special case: JFS_SYNC synchronous updates require us		 * to wait for the commit to complete.  		 */		if (handle->h_sync && !(current->flags & PF_MEMALLOC))			log_wait_commit(journal, tid);	}	kfree(handle);	return err;}/* * For synchronous operations: force any uncommitted trasnactions * to disk.  May seem kludgy, but it reuses all the handle batching * code in a very simple manner. */int journal_force_commit(journal_t *journal){	handle_t *handle;	int ret = 0;	lock_kernel();	handle = journal_start(journal, 1);	if (IS_ERR(handle)) {		ret = PTR_ERR(handle);		goto out;	}	handle->h_sync = 1;	journal_stop(handle);out:	unlock_kernel();	return ret;}/* * * List management code snippets: various functions for manipulating the * transaction buffer lists. * *//* * Append a buffer to a transaction list, given the transaction's list head * pointer. * journal_datalist_lock is held. */static inline void __blist_add_buffer(struct journal_head **list, struct journal_head *jh){	if (!*list) {		jh->b_tnext = jh->b_tprev = jh;		*list = jh;	} else {		/* Insert at the tail of the list to preserve order */		struct journal_head *first = *list, *last = first->b_tprev;		jh->b_tprev = last;		jh->b_tnext = first;		last->b_tnext = first->b_tprev = jh;	}}/*  * Remove a buffer from a transaction list, given the transaction's list * head pointer. * * Called with journal_datalist_lock held, and the journal may not * be locked. */static inline void__blist_del_buffer(struct journal_head **list, struct journal_head *jh){	if (*list == jh) {		*list = jh->b_tnext;		if (*list == jh)			*list = 0;	}	jh->b_tprev->b_tnext = jh->b_tnext;	jh->b_tnext->b_tprev = jh->b_tprev;}/*  * Remove a buffer from the appropriate transaction list. * * Note that this function can *change* the value of * bh->b_transaction->t_sync_datalist, t_async_datalist, t_buffers, t_forget, * t_iobuf_list, t_shadow_list, t_log_list or t_reserved_list.  If the caller * is holding onto a copy of one of thee pointers, it could go bad. * Generally the caller needs to re-read the pointer from the transaction_t. * * If bh->b_jlist is BJ_SyncData or BJ_AsyncData then we may have been called * via journal_try_to_free_buffer() or journal_clean_data_list().  In that * case, journal_datalist_lock will be held, and the journal may not be locked. */void __journal_unfile_buffer(struct journal_head *jh){	struct journal_head **list = 0;	transaction_t * transaction;	assert_spin_locked(&journal_datalist_lock);	transaction = jh->b_transaction;#ifdef __SMP__	J_ASSERT (current->lock_depth >= 0);#endif	J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);	if (jh->b_jlist != BJ_None)		J_ASSERT_JH(jh, transaction != 0);	switch (jh->b_jlist) {	case BJ_None:		return;	case BJ_SyncData:		list = &transaction->t_sync_datalist;		break;	case BJ_AsyncData:		list = &transaction->t_async_datalist;		break;	case BJ_Metadata:		transaction->t_nr_buffers--;		J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);		list = &transaction->t_buffers;		break;	case BJ_Forget:		list = &transaction->t_forget;		break;	case BJ_IO:		list = &transaction->t_iobuf_list;		break;	case BJ_Shadow:		list = &transaction->t_shadow_list;		break;	case BJ_LogCtl:		list = &transaction->t_log_list;		break;	case BJ_Reserved:		list = &transaction->t_reserved_list;		break;	}		__blist_del_buffer(list, jh);	jh->b_jlist = BJ_None;	if (test_and_clear_bit(BH_JBDDirty, &jh2bh(jh)->b_state)) {		set_bit(BH_Dirty, &jh2bh(jh)->b_state);	}}void journal_unfile_buffer(struct journal_head *jh){	spin_lock(&journal_datalist_lock);	__journal_unfile_buffer(jh);	spin_unlock(&journal_datalist_lock);}/* * Called from journal_try_to_free_buffers().  The journal is not * locked. lru_list_lock is not held. * * Here we see why journal_datalist_lock is global and not per-journal. * We cannot get back to this buffer's journal pointer without locking * out journal_clean_data_list() in some manner. * * One could use journal_datalist_lock to get unracy access to a * per-journal lock. * * Called with journal_datalist_lock held. * * Returns non-zero iff we were able to free the journal_head. */static int __journal_try_to_free_buffer(struct buffer_head *bh,					int *locked_or_dirty){	struct journal_head *jh;	assert_spin_locked(&journal_datalist_lock);	jh = bh2jh(bh);	if (buffer_locked(bh) || buffer_dirty(bh)) {		*locked_or_dirty = 1;		goto out;	}	if (!buffer_uptodate(bh))		goto out;	if (jh->b_next_transaction != 0)		goto out;	if (jh->b_transaction != 0 && jh->b_cp_transaction == 0) {		if (jh->b_jlist == BJ_SyncData || jh->b_jlist==BJ_AsyncData) {			/* A written-back ordered data buffer */			JBUFFER_TRACE(jh, "release data");			__journal_unfile_buffer(jh);			jh->b_transaction = 0;			__journal_remove_journal_head(bh);			__brelse(bh);		}	}	else if (jh->b_cp_transaction != 0 && jh->b_transaction == 0) {		/* written-back checkpointed metadata buffer */		if (jh->b_jlist == BJ_None) {			JBUFFER_TRACE(jh, "remove from checkpoint list");			__journal_remove_checkpoint(jh);			__journal_remove_journal_head(bh);			__brelse(bh);		}	}	return !buffer_jbd(bh);out:	return 0;}/* * journal_try_to_free_buffers().  For all the buffers on this page, * if they are fully written out ordered data, move them onto BUF_CLEAN * so try_to_free_buffers() can reap them.  Called with lru_list_lock * not held.  Does its own locking. * * This complicates JBD locking somewhat.  We aren't protected by the * BKL here.  We wish to remove the buffer from its committing or * running transaction's ->t_datalist via __journal_unfile_buffer. * * This may *change* the value of transaction_t->t_datalist, so anyone * who looks at t_datalist needs to lock against this function. * * Even worse, someone may be doing a journal_dirty_data on this * buffer.  So we need to lock against that.  journal_dirty_data() * will come out of the lock with the buffer dirty, which makes it * ineligible for release here.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -