⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 buffer.c

📁 elinux jffs初始版本 具体了解JFFS的文件系统!
💻 C
📖 第 1 页 / 共 4 页
字号:
 * bread() reads a specified block and returns the buffer that contains * it. It returns NULL if the block was unreadable. */struct buffer_head * bread(kdev_t dev, int block, int size){	struct buffer_head * bh;	if (!(bh = getblk(dev, block, size))) {		printk("VFS: bread: impossible error\n");		return NULL;	}	if (buffer_uptodate(bh))		return bh;	ll_rw_block(READ, 1, &bh);	wait_on_buffer(bh);	if (buffer_uptodate(bh))		return bh;	brelse(bh);	return NULL;}/* * Ok, breada can be used as bread, but additionally to mark other * blocks for reading as well. End the argument list with a negative * number. */#define NBUF 16struct buffer_head * breada(kdev_t dev, int block, int bufsize,	unsigned int pos, unsigned int filesize){	struct buffer_head * bhlist[NBUF];	unsigned int blocks;	struct buffer_head * bh;	int index;	int i, j;	if (pos >= filesize)		return NULL;	if (block < 0 || !(bh = getblk(dev,block,bufsize)))		return NULL;	index = BUFSIZE_INDEX(bh->b_size);	if (buffer_uptodate(bh))		return(bh);   	else ll_rw_block(READ, 1, &bh);	blocks = (filesize - pos) >> (9+index);	if (blocks < (read_ahead[MAJOR(dev)] >> index))		blocks = read_ahead[MAJOR(dev)] >> index;	if (blocks > NBUF) 		blocks = NBUF;/*	if (blocks) printk("breada (new) %d blocks\n",blocks); */	bhlist[0] = bh;	j = 1;	for(i=1; i<blocks; i++) {		bh = getblk(dev,block+i,bufsize);		if (buffer_uptodate(bh)) {			brelse(bh);			break;		}		else bhlist[j++] = bh;	}	/* Request the read for these buffers, and then release them */	if (j>1)  		ll_rw_block(READA, (j-1), bhlist+1); 	for(i=1; i<j; i++)		brelse(bhlist[i]);	/* Wait for this buffer, and then continue on */	bh = bhlist[0];	wait_on_buffer(bh);	if (buffer_uptodate(bh))		return bh;	brelse(bh);	return NULL;}static void get_more_buffer_heads(void){	struct wait_queue wait = { current, NULL };	struct buffer_head * bh;	while (!unused_list) {		/*		 * This is critical.  We can't swap out pages to get		 * more buffer heads, because the swap-out may need		 * more buffer-heads itself.  Thus GFP_ATOMIC.		 *		 * This is no longer true, it is GFP_BUFFER again, the		 * swapping code now knows not to perform I/O when that		 * GFP level is specified... -DaveM		 */		/* we now use kmalloc() here instead of gfp as we want                   to be able to easily release buffer heads - they                   took up quite a bit of memory (tridge) */		bh = (struct buffer_head *) kmalloc(sizeof(*bh),GFP_BUFFER);		if (bh) {			put_unused_buffer_head(bh);			nr_buffer_heads++;			return;		}		/*		 * Uhhuh. We're _really_ low on memory. Now we just		 * wait for old buffer heads to become free due to		 * finishing IO..		 */		run_task_queue(&tq_disk);		/*		 * Set our state for sleeping, then check again for buffer heads.		 * This ensures we won't miss a wake_up from an interrupt.		 */		add_wait_queue(&buffer_wait, &wait);		current->state = TASK_UNINTERRUPTIBLE;		if (!unused_list && !reuse_list)			schedule();		recover_reusable_buffer_heads();		remove_wait_queue(&buffer_wait, &wait);		current->state = TASK_RUNNING;	}}static struct buffer_head * get_unused_buffer_head(void){	struct buffer_head * bh;	recover_reusable_buffer_heads();	get_more_buffer_heads();	if (!unused_list)		return NULL;	bh = unused_list;	unused_list = bh->b_next_free;	nr_unused_buffer_heads--;	return bh;}/* * Create the appropriate buffers when given a page for data area and * the size of each buffer.. Use the bh->b_this_page linked list to * follow the buffers created.  Return NULL if unable to create more * buffers. */static struct buffer_head * create_buffers(unsigned long page, unsigned long size){	struct buffer_head *bh, *head;	long offset;	head = NULL;	offset = PAGE_SIZE;	while ((offset -= size) >= 0) {		bh = get_unused_buffer_head();		if (!bh)			goto no_grow;		bh->b_dev = B_FREE;  /* Flag as unused */		bh->b_this_page = head;		head = bh;		bh->b_state = 0;		bh->b_next_free = NULL;		bh->b_count = 0;		bh->b_size = size;		bh->b_data = (char *) (page+offset);		bh->b_list = 0;	}	return head;/* * In case anything failed, we just free everything we got. */no_grow:	bh = head;	while (bh) {		head = bh;		bh = bh->b_this_page;		put_unused_buffer_head(head);	}	return NULL;}/* Run the hooks that have to be done when a page I/O has completed. */static inline void after_unlock_page (struct page * page){	if (clear_bit(PG_decr_after, &page->flags))		atomic_dec(&nr_async_pages);	if (clear_bit(PG_free_after, &page->flags))		__free_page(page);#ifndef NO_MM	if (clear_bit(PG_swap_unlock_after, &page->flags))		swap_after_unlock_page(page->swap_unlock_entry);#endif}/* * Free all temporary buffers belonging to a page. * This needs to be called with interrupts disabled. */static inline void free_async_buffers (struct buffer_head * bh){	struct buffer_head * tmp;	tmp = bh;	do {		if (!test_bit(BH_FreeOnIO, &tmp->b_state)) {			printk ("Whoops: unlock_buffer: "				"async IO mismatch on page.\n");			return;		}		tmp->b_next_free = reuse_list;		reuse_list = tmp;		clear_bit(BH_FreeOnIO, &tmp->b_state);		tmp = tmp->b_this_page;	} while (tmp != bh);}/* * Start I/O on a page. * This function expects the page to be locked and may return before I/O is complete. * You then have to check page->locked, page->uptodate, and maybe wait on page->wait. */int brw_page(int rw, struct page *page, kdev_t dev, int b[], int size, int bmap){	struct buffer_head *bh, *prev, *next, *arr[MAX_BUF_PER_PAGE];	int block, nr;	if (!PageLocked(page))		panic("brw_page: page not locked for I/O");	clear_bit(PG_uptodate, &page->flags);	clear_bit(PG_error, &page->flags);	/*	 * Allocate buffer heads pointing to this page, just for I/O.	 * They do _not_ show up in the buffer hash table!	 * They are _not_ registered in page->buffers either!	 */	bh = create_buffers(page_address(page), size);	if (!bh) {		clear_bit(PG_locked, &page->flags);		wake_up(&page->wait);		return -ENOMEM;	}	nr = 0;	next = bh;	do {		struct buffer_head * tmp;		block = *(b++);		set_bit(BH_FreeOnIO, &next->b_state);		next->b_list = BUF_CLEAN;		next->b_dev = dev;		next->b_blocknr = block;		next->b_count = 1;		next->b_flushtime = 0;		set_bit(BH_Uptodate, &next->b_state);		/*		 * When we use bmap, we define block zero to represent		 * a hole.  ll_rw_page, however, may legitimately		 * access block zero, and we need to distinguish the		 * two cases.		 */		if (bmap && !block) {			memset(next->b_data, 0, size);			next->b_count--;			continue;		}		tmp = get_hash_table(dev, block, size);		if (tmp) {			if (!buffer_uptodate(tmp)) {				if (rw == READ)					ll_rw_block(READ, 1, &tmp);				wait_on_buffer(tmp);			}			if (rw == READ) 				memcpy(next->b_data, tmp->b_data, size);			else {				memcpy(tmp->b_data, next->b_data, size);				mark_buffer_dirty(tmp, 0);			}			brelse(tmp);			next->b_count--;			continue;		}		if (rw == READ)			clear_bit(BH_Uptodate, &next->b_state);		else			set_bit(BH_Dirty, &next->b_state);		arr[nr++] = next;	} while (prev = next, (next = next->b_this_page) != NULL);	prev->b_this_page = bh;	if (nr) {		ll_rw_block(rw, nr, arr);		/* The rest of the work is done in mark_buffer_uptodate()		 * and unlock_buffer(). */	} else {		unsigned long flags;		save_flags(flags);		cli();		free_async_buffers(bh);		restore_flags(flags);		clear_bit(PG_locked, &page->flags);		set_bit(PG_uptodate, &page->flags);		wake_up(&page->wait);		after_unlock_page(page);		if (waitqueue_active(&buffer_wait))			wake_up(&buffer_wait);	}	++current->maj_flt;	return 0;}/* * This is called by end_request() when I/O has completed. */void mark_buffer_uptodate(struct buffer_head * bh, int on){	if (on) {		struct buffer_head *tmp = bh;		set_bit(BH_Uptodate, &bh->b_state);		/* If a page has buffers and all these buffers are uptodate,		 * then the page is uptodate. */		do {			if (!test_bit(BH_Uptodate, &tmp->b_state))				return;			tmp=tmp->b_this_page;		} while (tmp && tmp != bh);		set_bit(PG_uptodate, &mem_map[MAP_NR(bh->b_data)].flags);		return;	}	clear_bit(BH_Uptodate, &bh->b_state);}/* * This is called by end_request() when I/O has completed. */void unlock_buffer(struct buffer_head * bh){	unsigned long flags;	struct buffer_head *tmp;	struct page *page;	clear_bit(BH_Lock, &bh->b_state);	wake_up(&bh->b_wait);	if (waitqueue_active(&buffer_wait))		wake_up(&buffer_wait);	if (!test_bit(BH_FreeOnIO, &bh->b_state))		return;	/* This is a temporary buffer used for page I/O. */	page = mem_map + MAP_NR(bh->b_data);	if (!PageLocked(page))		goto not_locked;	if (bh->b_count != 1)		goto bad_count;	if (!test_bit(BH_Uptodate, &bh->b_state))		set_bit(PG_error, &page->flags);	/*	 * Be _very_ careful from here on. Bad things can happen if	 * two buffer heads end IO at almost the same time and both	 * decide that the page is now completely done.	 *	 * Async buffer_heads are here only as labels for IO, and get	 * thrown away once the IO for this page is complete.  IO is	 * deemed complete once all buffers have been visited	 * (b_count==0) and are now unlocked. We must make sure that	 * only the _last_ buffer that decrements its count is the one	 * that free's the page..	 */	save_flags(flags);	cli();	bh->b_count--;	tmp = bh;	do {		if (tmp->b_count)			goto still_busy;		tmp = tmp->b_this_page;	} while (tmp != bh);	/* OK, the async IO on this page is complete. */	free_async_buffers(bh);	restore_flags(flags);	clear_bit(PG_locked, &page->flags);	wake_up(&page->wait);	after_unlock_page(page);	wake_up(&buffer_wait);	return;still_busy:	restore_flags(flags);	return;not_locked:	printk ("Whoops: unlock_buffer: async io complete on unlocked page\n");	return;bad_count:	printk ("Whoops: unlock_buffer: b_count != 1 on async io.\n");	return;}/* * Generic "readpage" function for block devices that have the normal * bmap functionality. This is most of the block device filesystems. * Reads the page asynchronously --- the unlock_buffer() and * mark_buffer_uptodate() functions propagate buffer state into the * page struct once IO has completed. */int generic_readpage(struct inode * inode, struct page * page){	unsigned long block;	int *p, nr[PAGE_SIZE/512];	int i;	page->count++;	set_bit(PG_locked, &page->flags);	set_bit(PG_free_after, &page->flags);	i = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;	block = page->offset >> inode->i_sb->s_blocksize_bits;	p = nr;	do {		*p = inode->i_op->bmap(inode, block);		i--;		block++;		p++;	} while (i > 0);	/* IO start */	brw_page(READ, page, inode->i_dev, nr, inode->i_sb->s_blocksize, 1);	return 0;}/* * Try to increase the number of buffers available: the size argument * is used to determine what kind of buffers we want. */static int grow_buffers(int pri, int size){

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -