⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 transaction.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
		 *		 * So, if we have a checkpoint on the buffer, we should		 * now refile the buffer on our BJ_Forget list so that		 * we know to remove the checkpoint after we commit.		 */		if (jh->b_cp_transaction) {			__journal_temp_unlink_buffer(jh);			__journal_file_buffer(jh, transaction, BJ_Forget);		} else {			__journal_unfile_buffer(jh);			journal_remove_journal_head(bh);			__brelse(bh);			if (!buffer_jbd(bh)) {				spin_unlock(&journal->j_list_lock);				jbd_unlock_bh_state(bh);				__bforget(bh);				goto drop;			}		}	} else if (jh->b_transaction) {		J_ASSERT_JH(jh, (jh->b_transaction ==				 journal->j_committing_transaction));		/* However, if the buffer is still owned by a prior		 * (committing) transaction, we can't drop it yet... */		JBUFFER_TRACE(jh, "belongs to older transaction");		/* ... but we CAN drop it from the new transaction if we		 * have also modified it since the original commit. */		if (jh->b_next_transaction) {			J_ASSERT(jh->b_next_transaction == transaction);			jh->b_next_transaction = NULL;			drop_reserve = 1;		}	}not_jbd:	spin_unlock(&journal->j_list_lock);	jbd_unlock_bh_state(bh);	__brelse(bh);drop:	if (drop_reserve) {		/* no need to reserve log space for this block -bzzz */		handle->h_buffer_credits++;	}	return err;}/** * int journal_stop() - complete a transaction * @handle: tranaction to complete. * * All done for a particular handle. * * There is not much action needed here.  We just return any remaining * buffer credits to the transaction and remove the handle.  The only * complication is that we need to start a commit operation if the * filesystem is marked for synchronous update. * * journal_stop itself will not usually return an error, but it may * do so in unusual circumstances.  In particular, expect it to * return -EIO if a journal_abort has been executed since the * transaction began. */int journal_stop(handle_t *handle){	transaction_t *transaction = handle->h_transaction;	journal_t *journal = transaction->t_journal;	int old_handle_count, err;	pid_t pid;	J_ASSERT(journal_current_handle() == handle);	if (is_handle_aborted(handle))		err = -EIO;	else {		J_ASSERT(transaction->t_updates > 0);		err = 0;	}	if (--handle->h_ref > 0) {		jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,			  handle->h_ref);		return err;	}	jbd_debug(4, "Handle %p going down\n", handle);	/*	 * Implement synchronous transaction batching.  If the handle	 * was synchronous, don't force a commit immediately.  Let's	 * yield and let another thread piggyback onto this transaction.	 * Keep doing that while new threads continue to arrive.	 * It doesn't cost much - we're about to run a commit and sleep	 * on IO anyway.  Speeds up many-threaded, many-dir operations	 * by 30x or more...	 *	 * But don't do this if this process was the most recent one to	 * perform a synchronous write.  We do this to detect the case where a	 * single process is doing a stream of sync writes.  No point in waiting	 * for joiners in that case.	 */	pid = current->pid;	if (handle->h_sync && journal->j_last_sync_writer != pid) {		journal->j_last_sync_writer = pid;		do {			old_handle_count = transaction->t_handle_count;			schedule_timeout_uninterruptible(1);		} while (old_handle_count != transaction->t_handle_count);	}	current->journal_info = NULL;	spin_lock(&journal->j_state_lock);	spin_lock(&transaction->t_handle_lock);	transaction->t_outstanding_credits -= handle->h_buffer_credits;	transaction->t_updates--;	if (!transaction->t_updates) {		wake_up(&journal->j_wait_updates);		if (journal->j_barrier_count)			wake_up(&journal->j_wait_transaction_locked);	}	/*	 * If the handle is marked SYNC, we need to set another commit	 * going!  We also want to force a commit if the current	 * transaction is occupying too much of the log, or if the	 * transaction is too old now.	 */	if (handle->h_sync ||			transaction->t_outstanding_credits >				journal->j_max_transaction_buffers ||			time_after_eq(jiffies, transaction->t_expires)) {		/* Do this even for aborted journals: an abort still		 * completes the commit thread, it just doesn't write		 * anything to disk. */		tid_t tid = transaction->t_tid;		spin_unlock(&transaction->t_handle_lock);		jbd_debug(2, "transaction too old, requesting commit for "					"handle %p\n", handle);		/* This is non-blocking */		__log_start_commit(journal, transaction->t_tid);		spin_unlock(&journal->j_state_lock);		/*		 * Special case: JFS_SYNC synchronous updates require us		 * to wait for the commit to complete.		 */		if (handle->h_sync && !(current->flags & PF_MEMALLOC))			err = log_wait_commit(journal, tid);	} else {		spin_unlock(&transaction->t_handle_lock);		spin_unlock(&journal->j_state_lock);	}	lock_release(&handle->h_lockdep_map, 1, _THIS_IP_);	jbd_free_handle(handle);	return err;}/**int journal_force_commit() - force any uncommitted transactions * @journal: journal to force * * For synchronous operations: force any uncommitted transactions * to disk.  May seem kludgy, but it reuses all the handle batching * code in a very simple manner. */int journal_force_commit(journal_t *journal){	handle_t *handle;	int ret;	handle = journal_start(journal, 1);	if (IS_ERR(handle)) {		ret = PTR_ERR(handle);	} else {		handle->h_sync = 1;		ret = journal_stop(handle);	}	return ret;}/* * * List management code snippets: various functions for manipulating the * transaction buffer lists. * *//* * Append a buffer to a transaction list, given the transaction's list head * pointer. * * j_list_lock is held. * * jbd_lock_bh_state(jh2bh(jh)) is held. */static inline void__blist_add_buffer(struct journal_head **list, struct journal_head *jh){	if (!*list) {		jh->b_tnext = jh->b_tprev = jh;		*list = jh;	} else {		/* Insert at the tail of the list to preserve order */		struct journal_head *first = *list, *last = first->b_tprev;		jh->b_tprev = last;		jh->b_tnext = first;		last->b_tnext = first->b_tprev = jh;	}}/* * Remove a buffer from a transaction list, given the transaction's list * head pointer. * * Called with j_list_lock held, and the journal may not be locked. * * jbd_lock_bh_state(jh2bh(jh)) is held. */static inline void__blist_del_buffer(struct journal_head **list, struct journal_head *jh){	if (*list == jh) {		*list = jh->b_tnext;		if (*list == jh)			*list = NULL;	}	jh->b_tprev->b_tnext = jh->b_tnext;	jh->b_tnext->b_tprev = jh->b_tprev;}/* * Remove a buffer from the appropriate transaction list. * * Note that this function can *change* the value of * bh->b_transaction->t_sync_datalist, t_buffers, t_forget, * t_iobuf_list, t_shadow_list, t_log_list or t_reserved_list.  If the caller * is holding onto a copy of one of thee pointers, it could go bad. * Generally the caller needs to re-read the pointer from the transaction_t. * * Called under j_list_lock.  The journal may not be locked. */static void __journal_temp_unlink_buffer(struct journal_head *jh){	struct journal_head **list = NULL;	transaction_t *transaction;	struct buffer_head *bh = jh2bh(jh);	J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));	transaction = jh->b_transaction;	if (transaction)		assert_spin_locked(&transaction->t_journal->j_list_lock);	J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);	if (jh->b_jlist != BJ_None)		J_ASSERT_JH(jh, transaction != NULL);	switch (jh->b_jlist) {	case BJ_None:		return;	case BJ_SyncData:		list = &transaction->t_sync_datalist;		break;	case BJ_Metadata:		transaction->t_nr_buffers--;		J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);		list = &transaction->t_buffers;		break;	case BJ_Forget:		list = &transaction->t_forget;		break;	case BJ_IO:		list = &transaction->t_iobuf_list;		break;	case BJ_Shadow:		list = &transaction->t_shadow_list;		break;	case BJ_LogCtl:		list = &transaction->t_log_list;		break;	case BJ_Reserved:		list = &transaction->t_reserved_list;		break;	case BJ_Locked:		list = &transaction->t_locked_list;		break;	}	__blist_del_buffer(list, jh);	jh->b_jlist = BJ_None;	if (test_clear_buffer_jbddirty(bh))		mark_buffer_dirty(bh);	/* Expose it to the VM */}void __journal_unfile_buffer(struct journal_head *jh){	__journal_temp_unlink_buffer(jh);	jh->b_transaction = NULL;}void journal_unfile_buffer(journal_t *journal, struct journal_head *jh){	jbd_lock_bh_state(jh2bh(jh));	spin_lock(&journal->j_list_lock);	__journal_unfile_buffer(jh);	spin_unlock(&journal->j_list_lock);	jbd_unlock_bh_state(jh2bh(jh));}/* * Called from journal_try_to_free_buffers(). * * Called under jbd_lock_bh_state(bh) */static void__journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh){	struct journal_head *jh;	jh = bh2jh(bh);	if (buffer_locked(bh) || buffer_dirty(bh))		goto out;	if (jh->b_next_transaction != NULL)		goto out;	spin_lock(&journal->j_list_lock);	if (jh->b_transaction != NULL && jh->b_cp_transaction == NULL) {		if (jh->b_jlist == BJ_SyncData || jh->b_jlist == BJ_Locked) {			/* A written-back ordered data buffer */			JBUFFER_TRACE(jh, "release data");			__journal_unfile_buffer(jh);			journal_remove_journal_head(bh);			__brelse(bh);		}	} else if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) {		/* written-back checkpointed metadata buffer */		if (jh->b_jlist == BJ_None) {			JBUFFER_TRACE(jh, "remove from checkpoint list");			__journal_remove_checkpoint(jh);			journal_remove_journal_head(bh);			__brelse(bh);		}	}	spin_unlock(&journal->j_list_lock);out:	return;}/** * int journal_try_to_free_buffers() - try to free page buffers. * @journal: journal for operation * @page: to try and free * @unused_gfp_mask: unused * * * For all the buffers on this page, * if they are fully written out ordered data, move them onto BUF_CLEAN * so try_to_free_buffers() can reap them. * * This function returns non-zero if we wish try_to_free_buffers() * to be called. We do this if the page is releasable by try_to_free_buffers(). * We also do it if the page has locked or dirty buffers and the caller wants * us to perform sync or async writeout. * * This complicates JBD locking somewhat.  We aren't protected by the * BKL here.  We wish to remove the buffer from its committing or * running transaction's ->t_datalist via __journal_unfile_buffer. * * This may *change* the value of transaction_t->t_datalist, so anyone * who looks at t_datalist needs to lock against this function. * * Even worse, someone may be doing a journal_dirty_data on this * buffer.  So we need to lock against that.  journal_dirty_data() * will come out of the lock with the buffer dirty, which makes it * ineligible for release here. * * Who else is affected by this?  hmm...  Really the only contender * is do_get_write_access() - it could be looking at the buffer while * journal_try_to_free_buffer() is changing its state.  But that * cannot happen because we never reallocate freed data as metadata * while the data is part of a transaction.  Yes? */int journal_try_to_free_buffers(journal_t *journal,				struct page *page, gfp_t unused_gfp_mask){	struct buffer_head *head;	struct buffer_head *bh;	int ret = 0;	J_ASSERT(PageLocked(page));	head = page_buffers(page);	bh = head;	do {		struct journal_head *jh;		/*		 * We take our own ref against the journal_head here to avoid		 * having to add tons of locking around each instance of		 * journal_remove_journal_head() and journal_put_journal_head().		 */		jh = journal_grab_journal_head(bh);		if (!jh)			continue;		jbd_lock_bh_state(bh);		__journal_try_to_free_buffer(journal, bh);		journal_put_journal_head(jh);		jbd_unlock_bh_state(bh);		if (buffer_jbd(bh))			goto busy;	} while ((bh = bh->b_this_page) != head);	ret = try_to_free_buffers(page);busy:	return ret;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -