⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 journal.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 5 页
字号:
  if (bh)     return test_bit(BH_JNew, &bh->b_state) ;  else    return 0 ;}inline int mark_buffer_journal_new(struct buffer_head *bh) {  if (bh) {    set_bit(BH_JNew, &bh->b_state) ;  }  return 0 ;}inline int mark_buffer_not_journaled(struct buffer_head *bh) {  if (bh)     clear_bit(BH_JDirty, &bh->b_state) ;  return 0 ;}/* utility function to force a BUG if it is called without the big** kernel lock held.  caller is the string printed just before calling BUG()*/void reiserfs_check_lock_depth(struct super_block *sb, char *caller) {#ifdef CONFIG_SMP  if (current->lock_depth < 0) {    reiserfs_panic (sb, "%s called without kernel lock held", caller) ;  }#else  ;#endif}/* return a cnode with same dev, block number and size in table, or null if not found */static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct super_block *sb,		     struct reiserfs_journal_cnode **table,		     long bl){  struct reiserfs_journal_cnode *cn ;  cn = journal_hash(table, sb, bl) ;  while(cn) {    if (cn->blocknr == bl && cn->sb == sb)      return cn ;    cn = cn->hnext ;  }  return (struct reiserfs_journal_cnode *)0 ;}/* returns a cnode with same size, block number and dev as bh in the current transaction hash.  NULL if not found */static inline struct reiserfs_journal_cnode *get_journal_hash(struct super_block *p_s_sb, struct buffer_head *bh) {  struct reiserfs_journal_cnode *cn ;  if (bh) {    cn =  get_journal_hash_dev(p_s_sb, SB_JOURNAL(p_s_sb)->j_hash_table, bh->b_blocknr);  }  else {    return (struct reiserfs_journal_cnode *)0 ;  }  return cn ;}/*** this actually means 'can this block be reallocated yet?'.  If you set search_all, a block can only be allocated** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever** being overwritten by a replay after crashing.**** If you don't set search_all, a block can only be allocated if it is not in the current transaction.  Since deleting** a block removes it from the current transaction, this case should never happen.  If you don't set search_all, make** sure you never write the block without logging it.**** next_zero_bit is a suggestion about the next block to try for find_forward.** when bl is rejected because it is set in a journal list bitmap, we search** for the next zero bit in the bitmap that rejected bl.  Then, we return that** through next_zero_bit for find_forward to try.**** Just because we return something in next_zero_bit does not mean we won't** reject it on the next call to reiserfs_in_journal***/int reiserfs_in_journal(struct super_block *p_s_sb,                        int bmap_nr, int bit_nr, int search_all, 			b_blocknr_t *next_zero_bit) {  struct reiserfs_journal_cnode *cn ;  struct reiserfs_list_bitmap *jb ;  int i ;  unsigned long bl;  *next_zero_bit = 0 ; /* always start this at zero. */  PROC_INFO_INC( p_s_sb, journal.in_journal );  /* If we aren't doing a search_all, this is a metablock, and it will be logged before use.  ** if we crash before the transaction that freed it commits,  this transaction won't  ** have committed either, and the block will never be written  */  if (search_all) {    for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {      PROC_INFO_INC( p_s_sb, journal.in_journal_bitmap );      jb = SB_JOURNAL(p_s_sb)->j_list_bitmap + i ;      if (jb->journal_list && jb->bitmaps[bmap_nr] &&          test_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data)) {	*next_zero_bit = find_next_zero_bit((unsigned long *)	                             (jb->bitmaps[bmap_nr]->data),	                             p_s_sb->s_blocksize << 3, bit_nr+1) ; 	return 1 ;      }    }  }  bl = bmap_nr * (p_s_sb->s_blocksize << 3) + bit_nr;  /* is it in any old transactions? */  if (search_all && (cn = get_journal_hash_dev(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_hash_table, bl))) {    return 1;   }  /* is it in the current transaction.  This should never happen */  if ((cn = get_journal_hash_dev(p_s_sb, SB_JOURNAL(p_s_sb)->j_hash_table, bl))) {    BUG();    return 1;   }  PROC_INFO_INC( p_s_sb, journal.in_journal_reusable );  /* safe for reuse */  return 0 ;}/* insert cn into table*/inline void insert_journal_hash(struct reiserfs_journal_cnode **table, struct reiserfs_journal_cnode *cn) {  struct reiserfs_journal_cnode *cn_orig ;  cn_orig = journal_hash(table, cn->sb, cn->blocknr) ;  cn->hnext = cn_orig ;  cn->hprev = NULL ;  if (cn_orig) {    cn_orig->hprev = cn ;  }  journal_hash(table, cn->sb, cn->blocknr) =  cn ;}/* lock the current transaction */inline static void lock_journal(struct super_block *p_s_sb) {    PROC_INFO_INC( p_s_sb, journal.lock_journal );    down(&SB_JOURNAL(p_s_sb)->j_lock);}/* unlock the current transaction */inline static void unlock_journal(struct super_block *p_s_sb) {    up(&SB_JOURNAL(p_s_sb)->j_lock);}static inline void get_journal_list(struct reiserfs_journal_list *jl){    jl->j_refcount++;}static inline void put_journal_list(struct super_block *s,                                   struct reiserfs_journal_list *jl){    if (jl->j_refcount < 1) {        reiserfs_panic (s, "trans id %lu, refcount at %d", jl->j_trans_id,	                                         jl->j_refcount);    }    if (--jl->j_refcount == 0)        reiserfs_kfree(jl, sizeof(struct reiserfs_journal_list), s);}/*** this used to be much more involved, and I'm keeping it just in case things get ugly again.** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a** transaction.*/static void cleanup_freed_for_journal_list(struct super_block *p_s_sb, struct reiserfs_journal_list *jl) {  struct reiserfs_list_bitmap *jb = jl->j_list_bitmap ;  if (jb) {    cleanup_bitmap_list(p_s_sb, jb) ;  }  jl->j_list_bitmap->journal_list = NULL ;  jl->j_list_bitmap = NULL ;}static int journal_list_still_alive(struct super_block *s,                                    unsigned long trans_id){    struct list_head *entry = &SB_JOURNAL(s)->j_journal_list;    struct reiserfs_journal_list *jl;    if (!list_empty(entry)) {        jl = JOURNAL_LIST_ENTRY(entry->next);	if (jl->j_trans_id <= trans_id) {	    return 1;	}    }    return 0;}static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate) {    char b[BDEVNAME_SIZE];    if (buffer_journaled(bh)) {        reiserfs_warning(NULL, "clm-2084: pinned buffer %lu:%s sent to disk",	                 bh->b_blocknr, bdevname(bh->b_bdev, b)) ;    }    if (uptodate)    	set_buffer_uptodate(bh) ;    else    	clear_buffer_uptodate(bh) ;    unlock_buffer(bh) ;    put_bh(bh) ;}static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate) {    if (uptodate)    	set_buffer_uptodate(bh) ;    else    	clear_buffer_uptodate(bh) ;    unlock_buffer(bh) ;    put_bh(bh) ;}static void submit_logged_buffer(struct buffer_head *bh) {    get_bh(bh) ;    bh->b_end_io = reiserfs_end_buffer_io_sync ;    mark_buffer_notjournal_new(bh) ;    clear_buffer_dirty(bh) ;    if (!test_and_clear_bit(BH_JTest, &bh->b_state))        BUG();    if (!buffer_uptodate(bh))        BUG();    submit_bh(WRITE, bh) ;}static void submit_ordered_buffer(struct buffer_head *bh) {    get_bh(bh) ;    bh->b_end_io = reiserfs_end_ordered_io;    clear_buffer_dirty(bh) ;    if (!buffer_uptodate(bh))        BUG();    submit_bh(WRITE, bh) ;}static int submit_barrier_buffer(struct buffer_head *bh) {    get_bh(bh) ;    bh->b_end_io = reiserfs_end_ordered_io;    clear_buffer_dirty(bh) ;    if (!buffer_uptodate(bh))        BUG();    return submit_bh(WRITE_BARRIER, bh) ;}static void check_barrier_completion(struct super_block *s,                                     struct buffer_head *bh) {    if (buffer_eopnotsupp(bh)) {	clear_buffer_eopnotsupp(bh);	disable_barrier(s);	set_buffer_uptodate(bh);	set_buffer_dirty(bh);	sync_dirty_buffer(bh);    }}#define CHUNK_SIZE 32struct buffer_chunk {    struct buffer_head *bh[CHUNK_SIZE];    int nr;};static void write_chunk(struct buffer_chunk *chunk) {    int i;    for (i = 0; i < chunk->nr ; i++) {	submit_logged_buffer(chunk->bh[i]) ;    }    chunk->nr = 0;}static void write_ordered_chunk(struct buffer_chunk *chunk) {    int i;    for (i = 0; i < chunk->nr ; i++) {	submit_ordered_buffer(chunk->bh[i]) ;    }    chunk->nr = 0;}static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh,			 spinlock_t *lock,			 void (fn)(struct buffer_chunk *)){    int ret = 0;    if (chunk->nr >= CHUNK_SIZE)        BUG();    chunk->bh[chunk->nr++] = bh;    if (chunk->nr >= CHUNK_SIZE) {	ret = 1;        if (lock)	    spin_unlock(lock);        fn(chunk);        if (lock)	    spin_lock(lock);    }    return ret;}atomic_t nr_reiserfs_jh = ATOMIC_INIT(0);static struct reiserfs_jh *alloc_jh(void) {    struct reiserfs_jh *jh;    while(1) {	jh = kmalloc(sizeof(*jh), GFP_NOFS);	if (jh) {	    atomic_inc(&nr_reiserfs_jh);	    return jh;	}        yield();    }}/* * we want to free the jh when the buffer has been written * and waited on */void reiserfs_free_jh(struct buffer_head *bh) {    struct reiserfs_jh *jh;    jh = bh->b_private;    if (jh) {	bh->b_private = NULL;	jh->bh = NULL;	list_del_init(&jh->list);	kfree(jh);	if (atomic_read(&nr_reiserfs_jh) <= 0)	    BUG();	atomic_dec(&nr_reiserfs_jh);	put_bh(bh);    }}static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh,                           int tail){    struct reiserfs_jh *jh;    if (bh->b_private) {	spin_lock(&j->j_dirty_buffers_lock);	if (!bh->b_private) {	    spin_unlock(&j->j_dirty_buffers_lock);	    goto no_jh;	}        jh = bh->b_private;	list_del_init(&jh->list);    } else {no_jh:	get_bh(bh);	jh = alloc_jh();	spin_lock(&j->j_dirty_buffers_lock);	/* buffer must be locked for __add_jh, should be able to have	 * two adds at the same time	 */	if (bh->b_private)	    BUG();	jh->bh = bh;	bh->b_private = jh;    }    jh->jl = j->j_current_jl;    if (tail)	list_add_tail(&jh->list, &jh->jl->j_tail_bh_list);    else {	list_add_tail(&jh->list, &jh->jl->j_bh_list);    }    spin_unlock(&j->j_dirty_buffers_lock);    return 0;}int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh) {    return __add_jh(SB_JOURNAL(inode->i_sb), bh, 1);}int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh) {    return __add_jh(SB_JOURNAL(inode->i_sb), bh, 0);}#define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list)static int write_ordered_buffers(spinlock_t *lock,				 struct reiserfs_journal *j,                                 struct reiserfs_journal_list *jl,				 struct list_head *list){    struct buffer_head *bh;    struct reiserfs_jh *jh;    int ret = 0;    struct buffer_chunk chunk;    struct list_head tmp;    INIT_LIST_HEAD(&tmp);    chunk.nr = 0;    spin_lock(lock);    while(!list_empty(list)) {        jh = JH_ENTRY(list->next);	bh = jh->bh;	get_bh(bh);	if (test_set_buffer_locked(bh)) {	    if (!buffer_dirty(bh)) {		list_del_init(&jh->list);		list_add(&jh->list, &tmp);		goto loop_next;	    }	    spin_unlock(lock);	    if (chunk.nr)		write_ordered_chunk(&chunk);	    wait_on_buffer(bh);	    cond_resched();

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -