⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 journal.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 5 页
字号:
	    spin_lock(lock);	    goto loop_next;	}	if (buffer_dirty(bh)) {	    list_del_init(&jh->list);	    list_add(&jh->list, &tmp);	    add_to_chunk(&chunk, bh, lock, write_ordered_chunk);	} else {	    reiserfs_free_jh(bh);	    unlock_buffer(bh);	}loop_next:	put_bh(bh);	cond_resched_lock(lock);    }    if (chunk.nr) {	spin_unlock(lock);        write_ordered_chunk(&chunk);	spin_lock(lock);    }    while(!list_empty(&tmp)) {        jh = JH_ENTRY(tmp.prev);	bh = jh->bh;	get_bh(bh);	reiserfs_free_jh(bh);	if (buffer_locked(bh)) {	    spin_unlock(lock);	    wait_on_buffer(bh);	    spin_lock(lock);	}	if (!buffer_uptodate(bh))	    ret = -EIO;	put_bh(bh);	cond_resched_lock(lock);    }    spin_unlock(lock);    return ret;}static int flush_older_commits(struct super_block *s, struct reiserfs_journal_list *jl) {    struct reiserfs_journal_list *other_jl;    struct reiserfs_journal_list *first_jl;    struct list_head *entry;    unsigned long trans_id = jl->j_trans_id;    unsigned long other_trans_id;    unsigned long first_trans_id;find_first:    /*     * first we walk backwards to find the oldest uncommitted transation     */    first_jl = jl;    entry = jl->j_list.prev;    while(1) {	other_jl = JOURNAL_LIST_ENTRY(entry);	if (entry == &SB_JOURNAL(s)->j_journal_list ||	    atomic_read(&other_jl->j_older_commits_done))	    break;        first_jl = other_jl;	entry = other_jl->j_list.prev;    }    /* if we didn't find any older uncommitted transactions, return now */    if (first_jl == jl) {        return 0;    }    first_trans_id = first_jl->j_trans_id;    entry = &first_jl->j_list;    while(1) {	other_jl = JOURNAL_LIST_ENTRY(entry);	other_trans_id = other_jl->j_trans_id;	if (other_trans_id < trans_id) {	    if (atomic_read(&other_jl->j_commit_left) != 0) {		flush_commit_list(s, other_jl, 0);		/* list we were called with is gone, return */		if (!journal_list_still_alive(s, trans_id))		    return 1;		/* the one we just flushed is gone, this means all		 * older lists are also gone, so first_jl is no longer		 * valid either.  Go back to the beginning.		 */		if (!journal_list_still_alive(s, other_trans_id)) {		    goto find_first;		}	    }	    entry = entry->next;	    if (entry == &SB_JOURNAL(s)->j_journal_list)		return 0;	} else {	    return 0;	}    }    return 0;}int reiserfs_async_progress_wait(struct super_block *s) {    DEFINE_WAIT(wait);    struct reiserfs_journal *j = SB_JOURNAL(s);    if (atomic_read(&j->j_async_throttle))    	blk_congestion_wait(WRITE, HZ/10);    return 0;}/*** if this journal list still has commit blocks unflushed, send them to disk.**** log areas must be flushed in order (transaction 2 can't commit before transaction 1)** Before the commit block can by written, every other log block must be safely on disk***/static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) {  int i;  int bn ;  struct buffer_head *tbh = NULL ;  unsigned long trans_id = jl->j_trans_id;  int barrier = 0;  reiserfs_check_lock_depth(s, "flush_commit_list") ;  if (atomic_read(&jl->j_older_commits_done)) {    return 0 ;  }  /* before we can put our commit blocks on disk, we have to make sure everyone older than  ** us is on disk too  */  if (jl->j_len <= 0)    BUG();  if (trans_id == SB_JOURNAL(s)->j_trans_id)    BUG();  get_journal_list(jl);  if (flushall) {    if (flush_older_commits(s, jl) == 1) {      /* list disappeared during flush_older_commits.  return */      goto put_jl;    }  }  /* make sure nobody is trying to flush this one at the same time */  down(&jl->j_commit_lock);  if (!journal_list_still_alive(s, trans_id)) {    up(&jl->j_commit_lock);    goto put_jl;  }  if (jl->j_trans_id == 0)    BUG();  /* this commit is done, exit */  if (atomic_read(&(jl->j_commit_left)) <= 0) {    if (flushall) {      atomic_set(&(jl->j_older_commits_done), 1) ;    }    up(&jl->j_commit_lock);    goto put_jl;  }  if (!list_empty(&jl->j_bh_list)) {      unlock_kernel();      write_ordered_buffers(&SB_JOURNAL(s)->j_dirty_buffers_lock,                            SB_JOURNAL(s), jl, &jl->j_bh_list);      lock_kernel();  }  if (!list_empty(&jl->j_bh_list))      BUG();  /*   * for the description block and all the log blocks, submit any buffers   * that haven't already reached the disk   */  atomic_inc(&SB_JOURNAL(s)->j_async_throttle);  for (i = 0 ; i < (jl->j_len + 1) ; i++) {    bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start+i) %         SB_ONDISK_JOURNAL_SIZE(s);    tbh = journal_find_get_block(s, bn) ;    if (buffer_dirty(tbh))	ll_rw_block(WRITE, 1, &tbh) ;    put_bh(tbh) ;  }  atomic_dec(&SB_JOURNAL(s)->j_async_throttle);  /* wait on everything written so far before writing the commit   * if we are in barrier mode, send the commit down now   */  barrier = reiserfs_barrier_flush(s);  if (barrier) {      int ret;      lock_buffer(jl->j_commit_bh);      ret = submit_barrier_buffer(jl->j_commit_bh);      if (ret == -EOPNOTSUPP) {	  set_buffer_uptodate(jl->j_commit_bh);          disable_barrier(s);	  barrier = 0;      }  }  for (i = 0 ;  i < (jl->j_len + 1) ; i++) {    bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +	 (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s) ;    tbh = journal_find_get_block(s, bn) ;    wait_on_buffer(tbh) ;    // since we're using ll_rw_blk above, it might have skipped over    // a locked buffer.  Double check here    //    if (buffer_dirty(tbh))      sync_dirty_buffer(tbh);    if (!buffer_uptodate(tbh)) {      reiserfs_panic(s, "journal-601, buffer write failed\n") ;    }    put_bh(tbh) ; /* once for journal_find_get_block */    put_bh(tbh) ;    /* once due to original getblk in do_journal_end */    atomic_dec(&(jl->j_commit_left)) ;  }  if (atomic_read(&(jl->j_commit_left)) != 1)    BUG();  if (!barrier) {      if (buffer_dirty(jl->j_commit_bh))	BUG();      mark_buffer_dirty(jl->j_commit_bh) ;      sync_dirty_buffer(jl->j_commit_bh) ;  } else      wait_on_buffer(jl->j_commit_bh);  check_barrier_completion(s, jl->j_commit_bh);  if (!buffer_uptodate(jl->j_commit_bh)) {    reiserfs_panic(s, "journal-615: buffer write failed\n") ;  }  bforget(jl->j_commit_bh) ;  if (SB_JOURNAL(s)->j_last_commit_id != 0 &&     (jl->j_trans_id - SB_JOURNAL(s)->j_last_commit_id) != 1) {      reiserfs_warning(s, "clm-2200: last commit %lu, current %lu",                       SB_JOURNAL(s)->j_last_commit_id,		       jl->j_trans_id);  }  SB_JOURNAL(s)->j_last_commit_id = jl->j_trans_id;  /* now, every commit block is on the disk.  It is safe to allow blocks freed during this transaction to be reallocated */  cleanup_freed_for_journal_list(s, jl) ;  /* mark the metadata dirty */  dirty_one_transaction(s, jl);  atomic_dec(&(jl->j_commit_left)) ;  if (flushall) {    atomic_set(&(jl->j_older_commits_done), 1) ;  }  up(&jl->j_commit_lock);put_jl:  put_journal_list(s, jl);  return 0 ;}/*** flush_journal_list frequently needs to find a newer transaction for a given block.  This does that, or ** returns NULL if it can't find anything */static struct reiserfs_journal_list *find_newer_jl_for_cn(struct reiserfs_journal_cnode *cn) {  struct super_block *sb = cn->sb;  b_blocknr_t blocknr = cn->blocknr ;  cn = cn->hprev ;  while(cn) {    if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) {      return cn->jlist ;    }    cn = cn->hprev ;  }  return NULL ;}void remove_journal_hash(struct super_block *, struct reiserfs_journal_cnode **,struct reiserfs_journal_list *, unsigned long, int);/*** once all the real blocks have been flushed, it is safe to remove them from the** journal list for this transaction.  Aside from freeing the cnode, this also allows the** block to be reallocated for data blocks if it had been deleted.*/static void remove_all_from_journal_list(struct super_block *p_s_sb, struct reiserfs_journal_list *jl, int debug) {  struct reiserfs_journal_cnode *cn, *last ;  cn = jl->j_realblock ;  /* which is better, to lock once around the whole loop, or  ** to lock for each call to remove_journal_hash?  */  while(cn) {    if (cn->blocknr != 0) {      if (debug) {       reiserfs_warning (p_s_sb, "block %u, bh is %d, state %ld", cn->blocknr,                         cn->bh ? 1: 0, cn->state) ;      }      cn->state = 0 ;      remove_journal_hash(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_hash_table, jl, cn->blocknr, 1) ;    }    last = cn ;    cn = cn->next ;    free_cnode(p_s_sb, last) ;  }  jl->j_realblock = NULL ;}/*** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start** releasing blocks in this transaction for reuse as data blocks.** called by flush_journal_list, before it calls remove_all_from_journal_list***/static int _update_journal_header_block(struct super_block *p_s_sb, unsigned long offset, unsigned long trans_id) {  struct reiserfs_journal_header *jh ;  if (trans_id >= SB_JOURNAL(p_s_sb)->j_last_flush_trans_id) {    if (buffer_locked((SB_JOURNAL(p_s_sb)->j_header_bh)))  {      wait_on_buffer((SB_JOURNAL(p_s_sb)->j_header_bh)) ;      if (!buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh)) {        reiserfs_panic(p_s_sb, "journal-699: buffer write failed\n") ;      }    }    SB_JOURNAL(p_s_sb)->j_last_flush_trans_id = trans_id ;    SB_JOURNAL(p_s_sb)->j_first_unflushed_offset = offset ;    jh = (struct reiserfs_journal_header *)(SB_JOURNAL(p_s_sb)->j_header_bh->b_data) ;    jh->j_last_flush_trans_id = cpu_to_le32(trans_id) ;    jh->j_first_unflushed_offset = cpu_to_le32(offset) ;    jh->j_mount_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_mount_id) ;    if (reiserfs_barrier_flush(p_s_sb)) {	int ret;	lock_buffer(SB_JOURNAL(p_s_sb)->j_header_bh);	ret = submit_barrier_buffer(SB_JOURNAL(p_s_sb)->j_header_bh);	if (ret == -EOPNOTSUPP) {	    set_buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh);	    disable_barrier(p_s_sb);	    goto sync;	}	wait_on_buffer(SB_JOURNAL(p_s_sb)->j_header_bh);	check_barrier_completion(p_s_sb, SB_JOURNAL(p_s_sb)->j_header_bh);    } else {sync:	set_buffer_dirty(SB_JOURNAL(p_s_sb)->j_header_bh) ;	sync_dirty_buffer(SB_JOURNAL(p_s_sb)->j_header_bh) ;    }    if (!buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh)) {      reiserfs_warning (p_s_sb, "journal-837: IO error during journal replay");      return -EIO ;    }  }  return 0 ;}static int update_journal_header_block(struct super_block *p_s_sb,                                        unsigned long offset, 				       unsigned long trans_id) {    if (_update_journal_header_block(p_s_sb, offset, trans_id)) {	reiserfs_panic(p_s_sb, "journal-712: buffer write failed\n") ;    }    return 0 ;}/* ** flush any and all journal lists older than you are ** can only be called from flush_journal_list*/static int flush_older_journal_lists(struct super_block *p_s_sb,                                     struct reiserfs_journal_list *jl){    struct list_head *entry;    struct reiserfs_journal_list *other_jl ;    unsigned long trans_id = jl->j_trans_id;    /* we know we are the only ones flushing things, no extra race     * protection is required.     */restart:    entry = SB_JOURNAL(p_s_sb)->j_journal_list.next;    other_jl = JOURNAL_LIST_ENTRY(entry);    if (other_jl->j_trans_id < trans_id) {	/* do not flush all */	flush_journal_list(p_s_sb, other_jl, 0) ;	/* other_jl is now deleted from the list */	goto restart;    }    return 0 ;}static void del_from_work_list(struct super_block *s,                               struct reiserfs_journal_list *jl) {    if (!list_empty(&jl->j_working_list)) {	list_del_init(&jl->j_working_list);	SB_JOURNAL(s)->j_num_work_lists--;    }}/* flush a journal list, both commit and real blocks**** always set flushall to 1, unless you are calling from inside** flush_journal_list**** IMPORTANT.  This can only be called while there are no journal writers, ** and the journal is locked.  That means it can only be called from ** do_journal_end, or by journal_release*/static int flush_journal_list(struct super_block *s,                               struct reiserfs_journal_list *jl, int flushall) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -