⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 buffer.c

📁 elinux jffs初始版本 具体了解JFFS的文件系统!
💻 C
📖 第 1 页 / 共 4 页
字号:
/* $Id: buffer.c,v 1.6 1999/07/16 23:52:00 bjornw Exp $  * *  linux/fs/buffer.c * *  Copyright (C) 1991, 1992  Linus Torvalds *//* *  'buffer.c' implements the buffer-cache functions. Race-conditions have * been avoided by NEVER letting an interrupt change a buffer (except for the * data, of course), but instead letting the caller do it. *//* Some bdflush() changes for the dynamic ramdisk - Paul Gortmaker, 12/94 *//* Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 *//* Removed a lot of unnecessary code and simplified things now that   the buffer cache isn't our primary cache - Andrew Tridgell 12/96 */#include <linux/sched.h>#include <linux/kernel.h>#include <linux/major.h>#include <linux/string.h>#include <linux/locks.h>#include <linux/errno.h>#include <linux/malloc.h>#include <linux/pagemap.h>#include <linux/swap.h>#include <linux/swapctl.h>#include <linux/smp.h>#include <linux/smp_lock.h>#include <asm/system.h>#include <asm/segment.h>#include <asm/io.h>#include <asm/bitops.h>#undef DEBUG#define NR_SIZES 5static char buffersize_index[17] ={-1,  0,  1, -1,  2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};#define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)#define MAX_UNUSED_BUFFERS 30 /* don't ever have more than this number of 				 unused buffer heads */#define HASH_PAGES         4  /* number of pages to use for the hash table */#define NR_HASH (HASH_PAGES*PAGE_SIZE/sizeof(struct buffer_head *))#define HASH_MASK (NR_HASH-1)static int grow_buffers(int pri, int size);static struct buffer_head ** hash_table;static struct buffer_head * lru_list[NR_LIST] = {NULL, };static struct buffer_head * free_list[NR_SIZES] = {NULL, };static struct buffer_head * unused_list = NULL;static struct buffer_head * reuse_list = NULL;static struct wait_queue * buffer_wait = NULL;static int nr_buffers = 0;static int nr_buffers_type[NR_LIST] = {0,};static int nr_buffer_heads = 0;static int nr_unused_buffer_heads = 0;static int refilled = 0;       /* Set NZ when a buffer freelist is refilled 				  this is used by the loop device *//* this is used by some architectures to estimate available memory */int buffermem = 0;/* Here is the parameter block for the bdflush process. If you add or * remove any of the parameters, make sure to update kernel/sysctl.c. */static void wakeup_bdflush(int);#define N_PARAM 9/* the dummy values in this structure are left in there for compatibility   with old programs that play with the /proc entries */union bdflush_param{	struct {		int nfract;  /* Percentage of buffer cache dirty to 				activate bdflush */		int ndirty;  /* Maximum number of dirty blocks to write out per				wake-cycle */		int nrefill; /* Number of clean buffers to try to obtain				each time we call refill */		int nref_dirt; /* Dirty buffer threshold for activating bdflush				  when trying to refill buffers. */		int dummy1;    /* unused */		int age_buffer;  /* Time for normal buffer to age before 				    we flush it */		int age_super;  /* Time for superblock to age before we 				   flush it */		int dummy2;    /* unused */		int dummy3;    /* unused */	} b_un;	unsigned int data[N_PARAM];} bdf_prm = {{40, 500, 64, 64, 15, 30*HZ, 5*HZ, 1884, 2}};/* These are the min and max parameter values that we will allow to be assigned */int bdflush_min[N_PARAM] = {  0,  10,    5,   25,  0,   100,   100, 1, 1};int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 60000, 60000, 2047, 5};/* * Rewrote the wait-routines to use the "new" wait-queue functionality, * and getting rid of the cli-sti pairs. The wait-queue routines still * need cli-sti, but now it's just a couple of 386 instructions or so. * * Note that the real wait_on_buffer() is an inline function that checks * if 'b_wait' is set before calling this, so that the queues aren't set * up unnecessarily. */void __wait_on_buffer(struct buffer_head * bh){	struct wait_queue wait = { current, NULL };	bh->b_count++;	add_wait_queue(&bh->b_wait, &wait);repeat:	run_task_queue(&tq_disk);	current->state = TASK_UNINTERRUPTIBLE;	if (buffer_locked(bh)) {		schedule();		goto repeat;	}	remove_wait_queue(&bh->b_wait, &wait);	bh->b_count--;	current->state = TASK_RUNNING;}/* Call sync_buffers with wait!=0 to ensure that the call does not   return until all buffer writes have completed.  Sync() may return   before the writes have finished; fsync() may not. *//* Godamity-damn.  Some buffers (bitmaps for filesystems)   spontaneously dirty themselves without ever brelse being called.   We will ultimately want to put these in a separate list, but for   now we search all of the lists for dirty buffers */static int sync_buffers(kdev_t dev, int wait){	int i, retry, pass = 0, err = 0;	struct buffer_head * bh, *next;	/* One pass for no-wait, three for wait:	   0) write out all dirty, unlocked buffers;	   1) write out all dirty buffers, waiting if locked;	   2) wait for completion by waiting for all buffers to unlock. */	do {		retry = 0;repeat:	/* We search all lists as a failsafe mechanism, not because we expect	   there to be dirty buffers on any of the other lists. */		bh = lru_list[BUF_DIRTY];		if (!bh)			goto repeat2;		for (i = nr_buffers_type[BUF_DIRTY]*2 ; i-- > 0 ; bh = next) {			if (bh->b_list != BUF_DIRTY)				goto repeat;			next = bh->b_next_free;			if (!lru_list[BUF_DIRTY])				break;			if (dev && bh->b_dev != dev)				continue;			if (buffer_locked(bh)) {				/* Buffer is locked; skip it unless wait is				   requested AND pass > 0. */				if (!wait || !pass) {					retry = 1;					continue;				}				wait_on_buffer (bh);				goto repeat;			}			/* If an unlocked buffer is not uptodate, there has			    been an IO error. Skip it. */			if (wait && buffer_req(bh) && !buffer_locked(bh) &&			    !buffer_dirty(bh) && !buffer_uptodate(bh)) {				err = 1;				continue;			}			/* Don't write clean buffers.  Don't write ANY buffers			   on the third pass. */			if (!buffer_dirty(bh) || pass >= 2)				continue;			/* don't bother about locked buffers */			if (buffer_locked(bh))				continue;			bh->b_count++;			next->b_count++;			bh->b_flushtime = 0;			ll_rw_block(WRITE, 1, &bh);			bh->b_count--;			next->b_count--;			retry = 1;		}    repeat2:		bh = lru_list[BUF_LOCKED];		if (!bh)			break;		for (i = nr_buffers_type[BUF_LOCKED]*2 ; i-- > 0 ; bh = next) {			if (bh->b_list != BUF_LOCKED)				goto repeat2;			next = bh->b_next_free;			if (!lru_list[BUF_LOCKED])				break;			if (dev && bh->b_dev != dev)				continue;			if (buffer_locked(bh)) {				/* Buffer is locked; skip it unless wait is				   requested AND pass > 0. */				if (!wait || !pass) {					retry = 1;					continue;				}				wait_on_buffer (bh);				goto repeat2;			}		}	/* If we are waiting for the sync to succeed, and if any dirty	   blocks were written, then repeat; on the second pass, only	   wait for buffers being written (do not pass to write any	   more buffers on the second pass). */	} while (wait && retry && ++pass<=2);	return err;}void sync_dev(kdev_t dev){	sync_buffers(dev, 0);	sync_supers(dev);	sync_inodes(dev);	sync_buffers(dev, 0);	sync_dquots(dev, -1);}int fsync_dev(kdev_t dev){	sync_buffers(dev, 0);	sync_supers(dev);	sync_inodes(dev);	sync_dquots(dev, -1);	return sync_buffers(dev, 1);}asmlinkage int sys_sync(void){	fsync_dev(0);	return 0;}int file_fsync (struct inode *inode, struct file *filp){	return fsync_dev(inode->i_dev);}asmlinkage int sys_fsync(unsigned int fd){	struct file * file;	struct inode * inode;	if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))		return -EBADF;	if (!file->f_op || !file->f_op->fsync)		return -EINVAL;	if (file->f_op->fsync(inode,file))		return -EIO;	return 0;}asmlinkage int sys_fdatasync(unsigned int fd){	struct file * file;	struct inode * inode;	if (fd>=NR_OPEN || !(file=current->files->fd[fd]) || !(inode=file->f_inode))		return -EBADF;	if (!file->f_op || !file->f_op->fsync)		return -EINVAL;	/* this needs further work, at the moment it is identical to fsync() */	if (file->f_op->fsync(inode,file))		return -EIO;	return 0;}void invalidate_buffers(kdev_t dev){	int i;	int nlist;	struct buffer_head * bh;	for(nlist = 0; nlist < NR_LIST; nlist++) {		bh = lru_list[nlist];		for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bh->b_next_free) {			if (bh->b_dev != dev)				continue;			wait_on_buffer(bh);			if (bh->b_dev != dev)				continue;			if (bh->b_count)				continue;			bh->b_flushtime = 0;			clear_bit(BH_Protected, &bh->b_state);			clear_bit(BH_Uptodate, &bh->b_state);			clear_bit(BH_Dirty, &bh->b_state);			clear_bit(BH_Req, &bh->b_state);		}	}}#define _hashfn(dev,block) (((unsigned)(HASHDEV(dev)^block))&HASH_MASK)#define hash(dev,block) hash_table[_hashfn(dev,block)]static inline void remove_from_hash_queue(struct buffer_head * bh){	if (bh->b_next)		bh->b_next->b_prev = bh->b_prev;	if (bh->b_prev)		bh->b_prev->b_next = bh->b_next;	if (hash(bh->b_dev,bh->b_blocknr) == bh)		hash(bh->b_dev,bh->b_blocknr) = bh->b_next;	bh->b_next = bh->b_prev = NULL;}static inline void remove_from_lru_list(struct buffer_head * bh){	if (!(bh->b_prev_free) || !(bh->b_next_free))		panic("VFS: LRU block list corrupted");	if (bh->b_dev == B_FREE)		panic("LRU list corrupted");	bh->b_prev_free->b_next_free = bh->b_next_free;	bh->b_next_free->b_prev_free = bh->b_prev_free;	if (lru_list[bh->b_list] == bh)		 lru_list[bh->b_list] = bh->b_next_free;	if (lru_list[bh->b_list] == bh)		 lru_list[bh->b_list] = NULL;	bh->b_next_free = bh->b_prev_free = NULL;}static inline void remove_from_free_list(struct buffer_head * bh){	int isize = BUFSIZE_INDEX(bh->b_size);	if (!(bh->b_prev_free) || !(bh->b_next_free))		panic("VFS: Free block list corrupted");	if(bh->b_dev != B_FREE)		panic("Free list corrupted");	if(!free_list[isize])		panic("Free list empty");	if(bh->b_next_free == bh)		 free_list[isize] = NULL;	else {		bh->b_prev_free->b_next_free = bh->b_next_free;		bh->b_next_free->b_prev_free = bh->b_prev_free;		if (free_list[isize] == bh)			 free_list[isize] = bh->b_next_free;	}	bh->b_next_free = bh->b_prev_free = NULL;}static inline void remove_from_queues(struct buffer_head * bh){	if(bh->b_dev == B_FREE) {		remove_from_free_list(bh); /* Free list entries should not be					      in the hash queue */		return;	}	nr_buffers_type[bh->b_list]--;	remove_from_hash_queue(bh);	remove_from_lru_list(bh);}static inline void put_last_lru(struct buffer_head * bh){	if (!bh)		return;	if (bh == lru_list[bh->b_list]) {		lru_list[bh->b_list] = bh->b_next_free;		return;	}	if(bh->b_dev == B_FREE)		panic("Wrong block for lru list");	remove_from_lru_list(bh);/* add to back of free list */	if(!lru_list[bh->b_list]) {		lru_list[bh->b_list] = bh;		lru_list[bh->b_list]->b_prev_free = bh;	}	bh->b_next_free = lru_list[bh->b_list];	bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;	lru_list[bh->b_list]->b_prev_free->b_next_free = bh;	lru_list[bh->b_list]->b_prev_free = bh;}static inline void put_last_free(struct buffer_head * bh){	int isize;	if (!bh)		return;	isize = BUFSIZE_INDEX(bh->b_size);		bh->b_dev = B_FREE;  /* So it is obvious we are on the free list */	/* add to back of free list */	if(!free_list[isize]) {		free_list[isize] = bh;		bh->b_prev_free = bh;	}	bh->b_next_free = free_list[isize];	bh->b_prev_free = free_list[isize]->b_prev_free;	free_list[isize]->b_prev_free->b_next_free = bh;	free_list[isize]->b_prev_free = bh;}static inline void insert_into_queues(struct buffer_head * bh){	/* put at end of free list */	if(bh->b_dev == B_FREE) {		put_last_free(bh);		return;	}	if(!lru_list[bh->b_list]) {		lru_list[bh->b_list] = bh;		bh->b_prev_free = bh;	}	if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted");	bh->b_next_free = lru_list[bh->b_list];	bh->b_prev_free = lru_list[bh->b_list]->b_prev_free;	lru_list[bh->b_list]->b_prev_free->b_next_free = bh;	lru_list[bh->b_list]->b_prev_free = bh;	nr_buffers_type[bh->b_list]++;/* put the buffer in new hash-queue if it has a device */	bh->b_prev = NULL;	bh->b_next = NULL;	if (!(bh->b_dev))		return;	bh->b_next = hash(bh->b_dev,bh->b_blocknr);	hash(bh->b_dev,bh->b_blocknr) = bh;	if (bh->b_next)		bh->b_next->b_prev = bh;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -