⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 transaction.c

📁 嵌入式系统设计与实例开发实验教材二源码 多线程应用程序设计 串行端口程序设计 AD接口实验 CAN总线通信实验 GPS通信实验 Linux内核移植与编译实验 IC卡读写实验 SD驱动使
💻 C
📖 第 1 页 / 共 5 页
字号:
 * * Who else is affected by this?  hmm...  Really the only contender * is do_get_write_access() - it could be looking at the buffer while * journal_try_to_free_buffer() is changing its state.  But that * cannot happen because we never reallocate freed data as metadata * while the data is part of a transaction.  Yes? * * This function returns non-zero if we wish try_to_free_buffers() * to be called. We do this is the page is releasable by try_to_free_buffers(). * We also do it if the page has locked or dirty buffers and the caller wants * us to perform sync or async writeout. */int journal_try_to_free_buffers(journal_t *journal, 				struct page *page, int gfp_mask){	struct buffer_head *bh;	struct buffer_head *tmp;	int locked_or_dirty = 0;	int call_ttfb = 1;	J_ASSERT(PageLocked(page));	bh = page->buffers;	tmp = bh;	spin_lock(&journal_datalist_lock);	do {		struct buffer_head *p = tmp;		tmp = tmp->b_this_page;		if (buffer_jbd(p))			if (!__journal_try_to_free_buffer(p, &locked_or_dirty))				call_ttfb = 0;	} while (tmp != bh);	spin_unlock(&journal_datalist_lock);	if (!(gfp_mask & (__GFP_IO|__GFP_WAIT)))		goto out;	if (!locked_or_dirty)		goto out;	/*	 * The VM wants us to do writeout, or to block on IO, or both.	 * So we allow try_to_free_buffers to be called even if the page	 * still has journalled buffers.	 */	call_ttfb = 1;out:	return call_ttfb;}/* * This buffer is no longer needed.  If it is on an older transaction's * checkpoint list we need to record it on this transaction's forget list * to pin this buffer (and hence its checkpointing transaction) down until * this transaction commits.  If the buffer isn't on a checkpoint list, we * release it. * Returns non-zero if JBD no longer has an interest in the buffer. */static int dispose_buffer(struct journal_head *jh,		transaction_t *transaction){	int may_free = 1;	struct buffer_head *bh = jh2bh(jh);	spin_lock(&journal_datalist_lock);	__journal_unfile_buffer(jh);	jh->b_transaction = 0;	if (jh->b_cp_transaction) {		JBUFFER_TRACE(jh, "on running+cp transaction");		__journal_file_buffer(jh, transaction, BJ_Forget);		clear_bit(BH_JBDDirty, &bh->b_state);		may_free = 0;	} else {		JBUFFER_TRACE(jh, "on running transaction");		__journal_remove_journal_head(bh);		__brelse(bh);	}	spin_unlock(&journal_datalist_lock);	return may_free;}/* * journal_flushpage  * * This code is tricky.  It has a number of cases to deal with. * * There are two invariants which this code relies on: * * i_size must be updated on disk before we start calling flushpage on the * data. *  *  This is done in ext3 by defining an ext3_setattr method which *  updates i_size before truncate gets going.  By maintaining this *  invariant, we can be sure that it is safe to throw away any buffers *  attached to the current transaction: once the transaction commits, *  we know that the data will not be needed. *  *  Note however that we can *not* throw away data belonging to the *  previous, committing transaction!   * * Any disk blocks which *are* part of the previous, committing * transaction (and which therefore cannot be discarded immediately) are * not going to be reused in the new running transaction * *  The bitmap committed_data images guarantee this: any block which is *  allocated in one transaction and removed in the next will be marked *  as in-use in the committed_data bitmap, so cannot be reused until *  the next transaction to delete the block commits.  This means that *  leaving committing buffers dirty is quite safe: the disk blocks *  cannot be reallocated to a different file and so buffer aliasing is *  not possible. * * * The above applies mainly to ordered data mode.  In writeback mode we * don't make guarantees about the order in which data hits disk --- in * particular we don't guarantee that new dirty data is flushed before * transaction commit --- so it is always safe just to discard data * immediately in that mode.  --sct  *//* * The journal_unmap_buffer helper function returns zero if the buffer * concerned remains pinned as an anonymous buffer belonging to an older * transaction. * * We're outside-transaction here.  Either or both of j_running_transaction * and j_committing_transaction may be NULL. */static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh){	transaction_t *transaction;	struct journal_head *jh;	int may_free = 1;	BUFFER_TRACE(bh, "entry");	if (!buffer_mapped(bh))		return 1;	/* It is safe to proceed here without the	 * journal_datalist_spinlock because the buffers cannot be	 * stolen by try_to_free_buffers as long as we are holding the	 * page lock. --sct */	if (!buffer_jbd(bh))		goto zap_buffer;	jh = bh2jh(bh);	transaction = jh->b_transaction;	if (transaction == NULL) {		/* First case: not on any transaction.  If it		 * has no checkpoint link, then we can zap it:		 * it's a writeback-mode buffer so we don't care		 * if it hits disk safely. */		if (!jh->b_cp_transaction) {			JBUFFER_TRACE(jh, "not on any transaction: zap");			goto zap_buffer;		}				if (!buffer_dirty(bh)) {			/* bdflush has written it.  We can drop it now */			goto zap_buffer;		}		/* OK, it must be in the journal but still not		 * written fully to disk: it's metadata or		 * journaled data... */		if (journal->j_running_transaction) {			/* ... and once the current transaction has			 * committed, the buffer won't be needed any			 * longer. */			JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");			return dispose_buffer(jh,					journal->j_running_transaction);		} else {			/* There is no currently-running transaction. So the			 * orphan record which we wrote for this file must have			 * passed into commit.  We must attach this buffer to			 * the committing transaction, if it exists. */			if (journal->j_committing_transaction) {				JBUFFER_TRACE(jh, "give to committing trans");				return dispose_buffer(jh,					journal->j_committing_transaction);			} else {				/* The orphan record's transaction has				 * committed.  We can cleanse this buffer */				clear_bit(BH_JBDDirty, &bh->b_state);				goto zap_buffer;			}		}	} else if (transaction == journal->j_committing_transaction) {		/* If it is committing, we simply cannot touch it.  We		 * can remove it's next_transaction pointer from the		 * running transaction if that is set, but nothing		 * else. */		JBUFFER_TRACE(jh, "on committing transaction");		if (jh->b_next_transaction) {			J_ASSERT(jh->b_next_transaction ==					journal->j_running_transaction);			jh->b_next_transaction = NULL;		}		return 0;	} else {		/* Good, the buffer belongs to the running transaction.		 * We are writing our own transaction's data, not any		 * previous one's, so it is safe to throw it away		 * (remember that we expect the filesystem to have set		 * i_size already for this truncate so recovery will not		 * expose the disk blocks we are discarding here.) */		J_ASSERT_JH(jh, transaction == journal->j_running_transaction);		may_free = dispose_buffer(jh, transaction);	}zap_buffer:		if (buffer_dirty(bh))		mark_buffer_clean(bh);	J_ASSERT_BH(bh, !buffer_jdirty(bh));	clear_bit(BH_Uptodate, &bh->b_state);	clear_bit(BH_Mapped, &bh->b_state);	clear_bit(BH_Req, &bh->b_state);	clear_bit(BH_New, &bh->b_state);	return may_free;}/* * Return non-zero if the page's buffers were successfully reaped */int journal_flushpage(journal_t *journal, 		      struct page *page, 		      unsigned long offset){	struct buffer_head *head, *bh, *next;	unsigned int curr_off = 0;	int may_free = 1;			if (!PageLocked(page))		BUG();	if (!page->buffers)		return 1;	/* We will potentially be playing with lists other than just the	 * data lists (especially for journaled data mode), so be	 * cautious in our locking. */	lock_journal(journal);	head = bh = page->buffers;	do {		unsigned int next_off = curr_off + bh->b_size;		next = bh->b_this_page;		/* AKPM: doing lock_buffer here may be overly paranoid */		if (offset <= curr_off) {		 	/* This block is wholly outside the truncation point */			lock_buffer(bh);			may_free &= journal_unmap_buffer(journal, bh);			unlock_buffer(bh);		}		curr_off = next_off;		bh = next;	} while (bh != head);	unlock_journal(journal);	if (!offset) {		if (!may_free || !try_to_free_buffers(page, 0))			return 0;		J_ASSERT(page->buffers == NULL);	}	return 1;}/*  * File a buffer on the given transaction list.  */void __journal_file_buffer(struct journal_head *jh,			transaction_t *transaction, int jlist){	struct journal_head **list = 0;	assert_spin_locked(&journal_datalist_lock);	#ifdef __SMP__	J_ASSERT (current->lock_depth >= 0);#endif	J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);	J_ASSERT_JH(jh, jh->b_transaction == transaction ||				jh->b_transaction == 0);	if (jh->b_transaction) {		if (jh->b_jlist == jlist)			return;		__journal_unfile_buffer(jh);	} else {		jh->b_transaction = transaction;	}	switch (jlist) {	case BJ_None:		J_ASSERT_JH(jh, !jh->b_committed_data);		J_ASSERT_JH(jh, !jh->b_frozen_data);		return;	case BJ_SyncData:		list = &transaction->t_sync_datalist;		break;	case BJ_AsyncData:		list = &transaction->t_async_datalist;		break;	case BJ_Metadata:		transaction->t_nr_buffers++;		list = &transaction->t_buffers;		break;	case BJ_Forget:		list = &transaction->t_forget;		break;	case BJ_IO:		list = &transaction->t_iobuf_list;		break;	case BJ_Shadow:		list = &transaction->t_shadow_list;		break;	case BJ_LogCtl:		list = &transaction->t_log_list;		break;	case BJ_Reserved:		list = &transaction->t_reserved_list;		break;	}	__blist_add_buffer(list, jh);	jh->b_jlist = jlist;	if (jlist == BJ_Metadata || jlist == BJ_Reserved || 	    jlist == BJ_Shadow || jlist == BJ_Forget) {		if (atomic_set_buffer_clean(jh2bh(jh))) {			set_bit(BH_JBDDirty, &jh2bh(jh)->b_state);		}	}}void journal_file_buffer(struct journal_head *jh,				transaction_t *transaction, int jlist){	spin_lock(&journal_datalist_lock);	__journal_file_buffer(jh, transaction, jlist);	spin_unlock(&journal_datalist_lock);}/*  * Remove a buffer from its current buffer list in preparation for * dropping it from its current transaction entirely.  If the buffer has * already started to be used by a subsequent transaction, refile the * buffer on that transaction's metadata list. */void __journal_refile_buffer(struct journal_head *jh){	assert_spin_locked(&journal_datalist_lock);#ifdef __SMP__	J_ASSERT_JH(jh, current->lock_depth >= 0);#endif	__journal_unfile_buffer(jh);	/* If the buffer is now unused, just drop it.  If it has been	   modified by a later transaction, add it to the new	   transaction's metadata list. */	jh->b_transaction = jh->b_next_transaction;	jh->b_next_transaction = NULL;	if (jh->b_transaction != NULL) {		__journal_file_buffer(jh, jh->b_transaction, BJ_Metadata);		J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);	} else {		/* Onto BUF_DIRTY for writeback */		refile_buffer(jh2bh(jh));	}}/* * For the unlocked version of this call, also make sure that any * hanging journal_head is cleaned up if necessary. * * __journal_refile_buffer is usually called as part of a single locked * operation on a buffer_head, in which the caller is probably going to * be hooking the journal_head onto other lists.  In that case it is up * to the caller to remove the journal_head if necessary.  For the * unlocked journal_refile_buffer call, the caller isn't going to be * doing anything else to the buffer so we need to do the cleanup * ourselves to avoid a jh leak.  * * *** The journal_head may be freed by this call! *** */void journal_refile_buffer(struct journal_head *jh){	struct buffer_head *bh;	spin_lock(&journal_datalist_lock);	bh = jh2bh(jh);	__journal_refile_buffer(jh);	__journal_remove_journal_head(bh);	spin_unlock(&journal_datalist_lock);	__brelse(bh);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -