📄 journal.c
字号:
reiserfs_panic(s, "journal-615: buffer write failed\n") ; } atomic_dec(&(jl->j_commit_left)) ; bforget(jl->j_commit_bh) ; /* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */ cleanup_freed_for_journal_list(s, jl) ; if (flushall) { atomic_set(&(jl->j_older_commits_done), 1) ; } atomic_set(&(jl->j_commit_flushing), 0) ; wake_up(&(jl->j_commit_wait)) ; s->s_dirt = 1 ; return 0 ;}/*** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or ** returns NULL if it can't find anything */static struct reiserfs_journal_list *find_newer_jl_for_cn(struct reiserfs_journal_cnode *cn) { kdev_t dev = cn->dev; unsigned long blocknr = cn->blocknr ; cn = cn->hprev ; while(cn) { if (cn->dev == dev && cn->blocknr == blocknr && cn->jlist) { return cn->jlist ; } cn = cn->hprev ; } return NULL ;}/*** once all the real blocks have been flushed, it is safe to remove them from the** journal list for this transaction. Aside from freeing the cnode, this also allows the** block to be reallocated for data blocks if it had been deleted.*/static void remove_all_from_journal_list(struct super_block *p_s_sb, struct reiserfs_journal_list *jl, int debug) { struct buffer_head fake_bh ; struct reiserfs_journal_cnode *cn, *last ; cn = jl->j_realblock ; /* which is better, to lock once around the whole loop, or ** to lock for each call to remove_from_journal_list? */ while(cn) { if (cn->blocknr != 0) { if (debug) { printk("block %lu, bh is %d, state %ld\n", cn->blocknr, cn->bh ? 1: 0, cn->state) ; } fake_bh.b_blocknr = cn->blocknr ; fake_bh.b_dev = cn->dev ; cn->state = 0 ; remove_from_journal_list(p_s_sb, jl, &fake_bh, 1) ; } last = cn ; cn = cn->next ; free_cnode(p_s_sb, last) ; } jl->j_realblock = NULL ;}/*** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start** releasing blocks in this transaction for reuse as data blocks.** called by flush_journal_list, before it calls remove_all_from_journal_list***/static int _update_journal_header_block(struct super_block *p_s_sb, unsigned long offset, unsigned long trans_id) { struct reiserfs_journal_header *jh ; if (trans_id >= SB_JOURNAL(p_s_sb)->j_last_flush_trans_id) { if (buffer_locked((SB_JOURNAL(p_s_sb)->j_header_bh))) { wait_on_buffer((SB_JOURNAL(p_s_sb)->j_header_bh)) ; if (!buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh)) { reiserfs_panic(p_s_sb, "journal-699: buffer write failed\n") ; } } SB_JOURNAL(p_s_sb)->j_last_flush_trans_id = trans_id ; SB_JOURNAL(p_s_sb)->j_first_unflushed_offset = offset ; jh = (struct reiserfs_journal_header *)(SB_JOURNAL(p_s_sb)->j_header_bh->b_data) ; jh->j_last_flush_trans_id = cpu_to_le32(trans_id) ; jh->j_first_unflushed_offset = cpu_to_le32(offset) ; jh->j_mount_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_mount_id) ; set_bit(BH_Dirty, &(SB_JOURNAL(p_s_sb)->j_header_bh->b_state)) ; ll_rw_block(WRITE, 1, &(SB_JOURNAL(p_s_sb)->j_header_bh)) ; wait_on_buffer((SB_JOURNAL(p_s_sb)->j_header_bh)) ; if (!buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh)) { printk( "reiserfs: journal-837: IO error during journal replay\n" ); return -EIO ; } } return 0 ;}static int update_journal_header_block(struct super_block *p_s_sb, unsigned long offset, unsigned long trans_id) { if (_update_journal_header_block(p_s_sb, offset, trans_id)) { reiserfs_panic(p_s_sb, "journal-712: buffer write failed\n") ; } return 0 ;}/* ** flush any and all journal lists older than you are ** can only be called from flush_journal_list*/static int flush_older_journal_lists(struct super_block *p_s_sb, struct reiserfs_journal_list *jl, unsigned long trans_id) { int i, index ; struct reiserfs_journal_list *other_jl ; index = jl - SB_JOURNAL_LIST(p_s_sb) ; for (i = 0 ; i < JOURNAL_LIST_COUNT ; i++) { other_jl = SB_JOURNAL_LIST(p_s_sb) + ((index + i) % JOURNAL_LIST_COUNT) ; if (other_jl && other_jl->j_len > 0 && other_jl->j_trans_id > 0 && other_jl->j_trans_id < trans_id && other_jl != jl) { /* do not flush all */ flush_journal_list(p_s_sb, other_jl, 0) ; } } return 0 ;}static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate) { if (buffer_journaled(bh)) { reiserfs_warning("clm-2084: pinned buffer %lu:%s sent to disk\n", bh->b_blocknr, kdevname(bh->b_dev)) ; } mark_buffer_uptodate(bh, uptodate) ; unlock_buffer(bh) ; put_bh(bh) ;}static void submit_logged_buffer(struct buffer_head *bh) { lock_buffer(bh) ; get_bh(bh) ; bh->b_end_io = reiserfs_end_buffer_io_sync ; mark_buffer_notjournal_new(bh) ; clear_bit(BH_Dirty, &bh->b_state) ; submit_bh(WRITE, bh) ;}/* flush a journal list, both commit and real blocks**** always set flushall to 1, unless you are calling from inside** flush_journal_list**** IMPORTANT. This can only be called while there are no journal writers, ** and the journal is locked. That means it can only be called from ** do_journal_end, or by journal_release*/static int flush_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) { struct reiserfs_journal_list *pjl ; struct reiserfs_journal_cnode *cn, *last ; int count ; int was_jwait = 0 ; int was_dirty = 0 ; struct buffer_head *saved_bh ; unsigned long j_len_saved = jl->j_len ; if (j_len_saved <= 0) { return 0 ; } if (atomic_read(&SB_JOURNAL(s)->j_wcount) != 0) { reiserfs_warning("clm-2048: flush_journal_list called with wcount %d\n", atomic_read(&SB_JOURNAL(s)->j_wcount)) ; } /* if someone is getting the commit list, we must wait for them */ while (atomic_read(&(jl->j_commit_flushing))) { sleep_on(&(jl->j_commit_wait)) ; } /* if someone is flushing this list, we must wait for them */ while (atomic_read(&(jl->j_flushing))) { sleep_on(&(jl->j_flush_wait)) ; } /* this list is now ours, we can change anything we want */ atomic_set(&(jl->j_flushing), 1) ; count = 0 ; if (j_len_saved > JOURNAL_TRANS_MAX) { reiserfs_panic(s, "journal-715: flush_journal_list, length is %lu, list number %d\n", j_len_saved, jl - SB_JOURNAL_LIST(s)) ; atomic_dec(&(jl->j_flushing)) ; return 0 ; } /* if all the work is already done, get out of here */ if (atomic_read(&(jl->j_nonzerolen)) <= 0 && atomic_read(&(jl->j_commit_left)) <= 0) { goto flush_older_and_return ; } /* start by putting the commit list on disk. This will also flush ** the commit lists of any olders transactions */ flush_commit_list(s, jl, 1) ; /* are we done now? */ if (atomic_read(&(jl->j_nonzerolen)) <= 0 && atomic_read(&(jl->j_commit_left)) <= 0) { goto flush_older_and_return ; } /* loop through each cnode, see if we need to write it, ** or wait on a more recent transaction, or just ignore it */ if (atomic_read(&(SB_JOURNAL(s)->j_wcount)) != 0) { reiserfs_panic(s, "journal-844: panic journal list is flushing, wcount is not 0\n") ; } cn = jl->j_realblock ; while(cn) { was_jwait = 0 ; was_dirty = 0 ; saved_bh = NULL ; /* blocknr of 0 is no longer in the hash, ignore it */ if (cn->blocknr == 0) { goto free_cnode ; } pjl = find_newer_jl_for_cn(cn) ; /* the order is important here. We check pjl to make sure we ** don't clear BH_JDirty_wait if we aren't the one writing this ** block to disk */ if (!pjl && cn->bh) { saved_bh = cn->bh ; /* we do this to make sure nobody releases the buffer while ** we are working with it */ get_bh(saved_bh) ; if (buffer_journal_dirty(saved_bh)) { was_jwait = 1 ; mark_buffer_notjournal_dirty(saved_bh) ; /* undo the inc from journal_mark_dirty */ put_bh(saved_bh) ; } if (can_dirty(cn)) { was_dirty = 1 ; } } /* if someone has this block in a newer transaction, just make ** sure they are commited, and don't try writing it to disk */ if (pjl) { flush_commit_list(s, pjl, 1) ; goto free_cnode ; } /* bh == NULL when the block got to disk on its own, OR, ** the block got freed in a future transaction */ if (saved_bh == NULL) { goto free_cnode ; } /* this should never happen. kupdate_one_transaction has this list ** locked while it works, so we should never see a buffer here that ** is not marked JDirty_wait */ if ((!was_jwait) && !buffer_locked(saved_bh)) {printk("journal-813: BAD! buffer %lu %cdirty %cjwait, not in a newer tranasction\n", saved_bh->b_blocknr, was_dirty ? ' ' : '!', was_jwait ? ' ' : '!') ; } /* kupdate_one_transaction waits on the buffers it is writing, so we ** should never see locked buffers here */ if (buffer_locked(saved_bh)) { printk("clm-2083: locked buffer %lu in flush_journal_list\n", saved_bh->b_blocknr) ; wait_on_buffer(saved_bh) ; if (!buffer_uptodate(saved_bh)) { reiserfs_panic(s, "journal-923: buffer write failed\n") ; } } if (was_dirty) { /* we inc again because saved_bh gets decremented at free_cnode */ get_bh(saved_bh) ; set_bit(BLOCK_NEEDS_FLUSH, &cn->state) ; submit_logged_buffer(saved_bh) ; count++ ; } else { printk("clm-2082: Unable to flush buffer %lu in flush_journal_list\n", saved_bh->b_blocknr) ; }free_cnode: last = cn ; cn = cn->next ; if (saved_bh) { /* we incremented this to keep others from taking the buffer head away */ put_bh(saved_bh) ; if (atomic_read(&(saved_bh->b_count)) < 0) { printk("journal-945: saved_bh->b_count < 0") ; } } } if (count > 0) { cn = jl->j_realblock ; while(cn) { if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) { if (!cn->bh) { reiserfs_panic(s, "journal-1011: cn->bh is NULL\n") ; } wait_on_buffer(cn->bh) ; if (!cn->bh) { reiserfs_panic(s, "journal-1012: cn->bh is NULL\n") ; } if (!buffer_uptodate(cn->bh)) { reiserfs_panic(s, "journal-949: buffer write failed\n") ; } refile_buffer(cn->bh) ; brelse(cn->bh) ; } cn = cn->next ; } }flush_older_and_return: /* before we can update the journal header block, we _must_ flush all ** real blocks from all older transactions to disk. This is because ** once the header block is updated, this transaction will not be ** replayed after a crash */ if (flushall) { flush_older_journal_lists(s, jl, jl->j_trans_id) ; } /* before we can remove everything from the hash tables for this ** transaction, we must make sure it can never be replayed ** ** since we are only called from do_journal_end, we know for sure there ** are no allocations going on while we are flushing journal lists. So, ** we only need to update the journal header block for the last list ** being flushed */ if (flushall) { update_journal_header_block(s, (jl->j_start + jl->j_len + 2) % JOURNAL_BLOCK_COUNT, jl->j_trans_id) ; } remove_all_from_journal_list(s, jl, 0) ; jl->j_len = 0 ; atomic_set(&(jl->j_nonzerolen), 0) ; jl->j_start = 0 ; jl->j_realblock = NULL ; jl->j_commit_bh = NULL ; jl->j_trans_id = 0 ; atomic_dec(&(jl->j_flushing)) ; wake_up(&(jl->j_flush_wait)) ; return 0 ;} static int kupdate_one_transaction(struct super_block *s, struct reiserfs_journal_list *jl) { struct reiserfs_journal_list *pjl ; /* previous list for this cn */ struct reiserfs_journal_cnode *cn, *walk_cn ; unsigned long blocknr ; int run = 0 ; int orig_trans_id = jl->j_trans_id ; struct buffer_head *saved_bh ; int ret = 0 ;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -