⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 journal.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
/*** Write ahead logging implementation copyright Chris Mason 2000**** The background commits make this code very interelated, and ** overly complex.  I need to rethink things a bit....The major players:**** journal_begin -- call with the number of blocks you expect to log.  **                  If the current transaction is too** 		    old, it will block until the current transaction is ** 		    finished, and then start a new one.**		    Usually, your transaction will get joined in with **                  previous ones for speed.**** journal_join  -- same as journal_begin, but won't block on the current **                  transaction regardless of age.  Don't ever call**                  this.  Ever.  There are only two places it should be **                  called from, and they are both inside this file.**** journal_mark_dirty -- adds blocks into this transaction.  clears any flags **                       that might make them get sent to disk**                       and then marks them BH_JDirty.  Puts the buffer head **                       into the current transaction hash.  **** journal_end -- if the current transaction is batchable, it does nothing**                   otherwise, it could do an async/synchronous commit, or**                   a full flush of all log and real blocks in the **                   transaction.**** flush_old_commits -- if the current transaction is too old, it is ended and **                      commit blocks are sent to disk.  Forces commit blocks **                      to disk for all backgrounded commits that have been **                      around too long.**		     -- Note, if you call this as an immediate flush from **		        from within kupdate, it will ignore the immediate flag*/#include <asm/uaccess.h>#include <asm/system.h>#include <linux/time.h>#include <asm/semaphore.h>#include <linux/vmalloc.h>#include <linux/reiserfs_fs.h>#include <linux/kernel.h>#include <linux/errno.h>#include <linux/fcntl.h>#include <linux/stat.h>#include <linux/string.h>#include <linux/smp_lock.h>#include <linux/buffer_head.h>#include <linux/workqueue.h>#include <linux/writeback.h>#include <linux/blkdev.h>#include <linux/backing-dev.h>/* gets a struct reiserfs_journal_list * from a list head */#define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \                               j_list))#define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \                               j_working_list))/* the number of mounted filesystems.  This is used to decide when to** start and kill the commit workqueue*/static int reiserfs_mounted_fs_count;static struct workqueue_struct *commit_wq;#define JOURNAL_TRANS_HALF 1018	/* must be correct to keep the desc and commit				   structs at 4k */#define BUFNR 64		/*read ahead *//* cnode stat bits.  Move these into reiserfs_fs.h */#define BLOCK_FREED 2		/* this block was freed, and can't be written.  */#define BLOCK_FREED_HOLDER 3	/* this block was freed during this transaction, and can't be written */#define BLOCK_NEEDS_FLUSH 4	/* used in flush_journal_list */#define BLOCK_DIRTIED 5/* journal list state bits */#define LIST_TOUCHED 1#define LIST_DIRTY   2#define LIST_COMMIT_PENDING  4	/* someone will commit this list *//* flags for do_journal_end */#define FLUSH_ALL   1		/* flush commit and real blocks */#define COMMIT_NOW  2		/* end and commit this transaction */#define WAIT        4		/* wait for the log blocks to hit the disk */static int do_journal_end(struct reiserfs_transaction_handle *,			  struct super_block *, unsigned long nblocks,			  int flags);static int flush_journal_list(struct super_block *s,			      struct reiserfs_journal_list *jl, int flushall);static int flush_commit_list(struct super_block *s,			     struct reiserfs_journal_list *jl, int flushall);static int can_dirty(struct reiserfs_journal_cnode *cn);static int journal_join(struct reiserfs_transaction_handle *th,			struct super_block *p_s_sb, unsigned long nblocks);static int release_journal_dev(struct super_block *super,			       struct reiserfs_journal *journal);static int dirty_one_transaction(struct super_block *s,				 struct reiserfs_journal_list *jl);static void flush_async_commits(struct work_struct *work);static void queue_log_writer(struct super_block *s);/* values for join in do_journal_begin_r */enum {	JBEGIN_REG = 0,		/* regular journal begin */	JBEGIN_JOIN = 1,	/* join the running transaction if at all possible */	JBEGIN_ABORT = 2,	/* called from cleanup code, ignores aborted flag */};static int do_journal_begin_r(struct reiserfs_transaction_handle *th,			      struct super_block *p_s_sb,			      unsigned long nblocks, int join);static void init_journal_hash(struct super_block *p_s_sb){	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);	memset(journal->j_hash_table, 0,	       JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));}/*** clears BH_Dirty and sticks the buffer on the clean list.  Called because I can't allow refile_buffer to** make schedule happen after I've freed a block.  Look at remove_from_transaction and journal_mark_freed for** more details.*/static int reiserfs_clean_and_file_buffer(struct buffer_head *bh){	if (bh) {		clear_buffer_dirty(bh);		clear_buffer_journal_test(bh);	}	return 0;}static void disable_barrier(struct super_block *s){	REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_BARRIER_FLUSH);	printk("reiserfs: disabling flush barriers on %s\n",	       reiserfs_bdevname(s));}static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block							 *p_s_sb){	struct reiserfs_bitmap_node *bn;	static int id;	bn = kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS);	if (!bn) {		return NULL;	}	bn->data = kzalloc(p_s_sb->s_blocksize, GFP_NOFS);	if (!bn->data) {		kfree(bn);		return NULL;	}	bn->id = id++;	INIT_LIST_HEAD(&bn->list);	return bn;}static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *p_s_sb){	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);	struct reiserfs_bitmap_node *bn = NULL;	struct list_head *entry = journal->j_bitmap_nodes.next;	journal->j_used_bitmap_nodes++;      repeat:	if (entry != &journal->j_bitmap_nodes) {		bn = list_entry(entry, struct reiserfs_bitmap_node, list);		list_del(entry);		memset(bn->data, 0, p_s_sb->s_blocksize);		journal->j_free_bitmap_nodes--;		return bn;	}	bn = allocate_bitmap_node(p_s_sb);	if (!bn) {		yield();		goto repeat;	}	return bn;}static inline void free_bitmap_node(struct super_block *p_s_sb,				    struct reiserfs_bitmap_node *bn){	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);	journal->j_used_bitmap_nodes--;	if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {		kfree(bn->data);		kfree(bn);	} else {		list_add(&bn->list, &journal->j_bitmap_nodes);		journal->j_free_bitmap_nodes++;	}}static void allocate_bitmap_nodes(struct super_block *p_s_sb){	int i;	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);	struct reiserfs_bitmap_node *bn = NULL;	for (i = 0; i < REISERFS_MIN_BITMAP_NODES; i++) {		bn = allocate_bitmap_node(p_s_sb);		if (bn) {			list_add(&bn->list, &journal->j_bitmap_nodes);			journal->j_free_bitmap_nodes++;		} else {			break;	// this is ok, we'll try again when more are needed 		}	}}static int set_bit_in_list_bitmap(struct super_block *p_s_sb,				  b_blocknr_t block,				  struct reiserfs_list_bitmap *jb){	unsigned int bmap_nr = block / (p_s_sb->s_blocksize << 3);	unsigned int bit_nr = block % (p_s_sb->s_blocksize << 3);	if (!jb->bitmaps[bmap_nr]) {		jb->bitmaps[bmap_nr] = get_bitmap_node(p_s_sb);	}	set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data);	return 0;}static void cleanup_bitmap_list(struct super_block *p_s_sb,				struct reiserfs_list_bitmap *jb){	int i;	if (jb->bitmaps == NULL)		return;	for (i = 0; i < reiserfs_bmap_count(p_s_sb); i++) {		if (jb->bitmaps[i]) {			free_bitmap_node(p_s_sb, jb->bitmaps[i]);			jb->bitmaps[i] = NULL;		}	}}/*** only call this on FS unmount.*/static int free_list_bitmaps(struct super_block *p_s_sb,			     struct reiserfs_list_bitmap *jb_array){	int i;	struct reiserfs_list_bitmap *jb;	for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {		jb = jb_array + i;		jb->journal_list = NULL;		cleanup_bitmap_list(p_s_sb, jb);		vfree(jb->bitmaps);		jb->bitmaps = NULL;	}	return 0;}static int free_bitmap_nodes(struct super_block *p_s_sb){	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);	struct list_head *next = journal->j_bitmap_nodes.next;	struct reiserfs_bitmap_node *bn;	while (next != &journal->j_bitmap_nodes) {		bn = list_entry(next, struct reiserfs_bitmap_node, list);		list_del(next);		kfree(bn->data);		kfree(bn);		next = journal->j_bitmap_nodes.next;		journal->j_free_bitmap_nodes--;	}	return 0;}/*** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps. ** jb_array is the array to be filled in.*/int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,				   struct reiserfs_list_bitmap *jb_array,				   unsigned int bmap_nr){	int i;	int failed = 0;	struct reiserfs_list_bitmap *jb;	int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *);	for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {		jb = jb_array + i;		jb->journal_list = NULL;		jb->bitmaps = vmalloc(mem);		if (!jb->bitmaps) {			reiserfs_warning(p_s_sb,					 "clm-2000, unable to allocate bitmaps for journal lists");			failed = 1;			break;		}		memset(jb->bitmaps, 0, mem);	}	if (failed) {		free_list_bitmaps(p_s_sb, jb_array);		return -1;	}	return 0;}/*** find an available list bitmap.  If you can't find one, flush a commit list ** and try again*/static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb,						    struct reiserfs_journal_list						    *jl){	int i, j;	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);	struct reiserfs_list_bitmap *jb = NULL;	for (j = 0; j < (JOURNAL_NUM_BITMAPS * 3); j++) {		i = journal->j_list_bitmap_index;		journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS;		jb = journal->j_list_bitmap + i;		if (journal->j_list_bitmap[i].journal_list) {			flush_commit_list(p_s_sb,					  journal->j_list_bitmap[i].					  journal_list, 1);			if (!journal->j_list_bitmap[i].journal_list) {				break;			}		} else {			break;		}	}	if (jb->journal_list) {	/* double check to make sure if flushed correctly */		return NULL;	}	jb->journal_list = jl;	return jb;}/* ** allocates a new chunk of X nodes, and links them all together as a list.** Uses the cnode->next and cnode->prev pointers** returns NULL on failure*/static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes){	struct reiserfs_journal_cnode *head;	int i;	if (num_cnodes <= 0) {		return NULL;	}	head = vmalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode));	if (!head) {		return NULL;	}	memset(head, 0, num_cnodes * sizeof(struct reiserfs_journal_cnode));	head[0].prev = NULL;	head[0].next = head + 1;	for (i = 1; i < num_cnodes; i++) {		head[i].prev = head + (i - 1);		head[i].next = head + (i + 1);	/* if last one, overwrite it after the if */	}	head[num_cnodes - 1].next = NULL;	return head;}/*** pulls a cnode off the free list, or returns NULL on failure */static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb){	struct reiserfs_journal_cnode *cn;	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);	reiserfs_check_lock_depth(p_s_sb, "get_cnode");	if (journal->j_cnode_free <= 0) {		return NULL;	}	journal->j_cnode_used++;	journal->j_cnode_free--;	cn = journal->j_cnode_free_list;	if (!cn) {		return cn;	}	if (cn->next) {		cn->next->prev = NULL;	}	journal->j_cnode_free_list = cn->next;	memset(cn, 0, sizeof(struct reiserfs_journal_cnode));	return cn;}/*** returns a cnode to the free list */static void free_cnode(struct super_block *p_s_sb,		       struct reiserfs_journal_cnode *cn){	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);	reiserfs_check_lock_depth(p_s_sb, "free_cnode");	journal->j_cnode_used--;	journal->j_cnode_free++;	/* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */	cn->next = journal->j_cnode_free_list;	if (journal->j_cnode_free_list) {		journal->j_cnode_free_list->prev = cn;	}	cn->prev = NULL;	/* not needed with the memset, but I might kill the memset, and forget to do this */	journal->j_cnode_free_list = cn;}static void clear_prepared_bits(struct buffer_head *bh){	clear_buffer_journal_prepared(bh);	clear_buffer_journal_restore_dirty(bh);}/* utility function to force a BUG if it is called without the big** kernel lock held.  caller is the string printed just before calling BUG()*/void reiserfs_check_lock_depth(struct super_block *sb, char *caller){#ifdef CONFIG_SMP	if (current->lock_depth < 0) {		reiserfs_panic(sb, "%s called without kernel lock held",			       caller);	}#else	;#endif}/* return a cnode with same dev, block number and size in table, or null if not found */static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct								  super_block								  *sb,								  struct								  reiserfs_journal_cnode								  **table,								  long bl){	struct reiserfs_journal_cnode *cn;	cn = journal_hash(table, sb, bl);	while (cn) {		if (cn->blocknr == bl && cn->sb == sb)			return cn;		cn = cn->hnext;	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -