⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 buffer.c

📁 基于组件方式开发操作系统的OSKIT源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
 * * Note that we don't do a wakeup here, but return a flag indicating * whether we got any buffer heads. A task ready to sleep can check * the returned value, and any tasks already sleeping will have been * awakened when the buffer heads were added to the reuse list. */static inline int recover_reusable_buffer_heads(void){	struct buffer_head *head = xchg(&reuse_list, NULL);	int found = 0;		if (head) {		do {			struct buffer_head *bh = head;			head = head->b_next_free;			put_unused_buffer_head(bh);		} while (head);		found = 1;	}	return found;}/* * Reserve NR_RESERVED buffer heads for async IO requests to avoid * no-buffer-head deadlock.  Return NULL on failure; waiting for * buffer heads is now handled in create_buffers(). */ static struct buffer_head * get_unused_buffer_head(int async){	struct buffer_head * bh;	recover_reusable_buffer_heads();	if (nr_unused_buffer_heads > NR_RESERVED) {		bh = unused_list;		unused_list = bh->b_next_free;		nr_unused_buffer_heads--;		return bh;	}	/* This is critical.  We can't swap out pages to get	 * more buffer heads, because the swap-out may need	 * more buffer-heads itself.  Thus SLAB_BUFFER.	 */	if((bh = kmem_cache_alloc(bh_cachep, SLAB_BUFFER)) != NULL) {		memset(bh, 0, sizeof(*bh));		nr_buffer_heads++;		return bh;	}	/*	 * If we need an async buffer, use the reserved buffer heads.	 */	if (async && unused_list) {		bh = unused_list;		unused_list = bh->b_next_free;		nr_unused_buffer_heads--;		return bh;	}#if 0	/*	 * (Pending further analysis ...)	 * Ordinary (non-async) requests can use a different memory priority	 * to free up pages. Any swapping thus generated will use async	 * buffer heads.	 */	if(!async &&	   (bh = kmem_cache_alloc(bh_cachep, SLAB_KERNEL)) != NULL) {		memset(bh, 0, sizeof(*bh));		nr_buffer_heads++;		return bh;	}#endif	return NULL;}/* * Create the appropriate buffers when given a page for data area and * the size of each buffer.. Use the bh->b_this_page linked list to * follow the buffers created.  Return NULL if unable to create more * buffers. * The async flag is used to differentiate async IO (paging, swapping) * from ordinary buffer allocations, and only async requests are allowed * to sleep waiting for buffer heads.  */static struct buffer_head * create_buffers(unsigned long page, 						unsigned long size, int async){	struct wait_queue wait = { current, NULL };	struct buffer_head *bh, *head;	long offset;try_again:	head = NULL;	offset = PAGE_SIZE;	while ((offset -= size) >= 0) {		bh = get_unused_buffer_head(async);		if (!bh)			goto no_grow;		bh->b_dev = B_FREE;  /* Flag as unused */		bh->b_this_page = head;		head = bh;		bh->b_state = 0;		bh->b_next_free = NULL;		bh->b_count = 0;		bh->b_size = size;		bh->b_data = (char *) (page+offset);		bh->b_list = 0;	}	return head;/* * In case anything failed, we just free everything we got. */no_grow:	if (head) {		do {			bh = head;			head = head->b_this_page;			put_unused_buffer_head(bh);		} while (head);		/* Wake up any waiters ... */		wake_up(&buffer_wait);	}	/*	 * Return failure for non-async IO requests.  Async IO requests	 * are not allowed to fail, so we have to wait until buffer heads	 * become available.  But we don't want tasks sleeping with 	 * partially complete buffers, so all were released above.	 */	if (!async)		return NULL;	/* We're _really_ low on memory. Now we just	 * wait for old buffer heads to become free due to	 * finishing IO.  Since this is an async request and	 * the reserve list is empty, we're sure there are 	 * async buffer heads in use.	 */	run_task_queue(&tq_disk);	/* 	 * Set our state for sleeping, then check again for buffer heads.	 * This ensures we won't miss a wake_up from an interrupt.	 */	add_wait_queue(&buffer_wait, &wait);	current->state = TASK_UNINTERRUPTIBLE;	if (!recover_reusable_buffer_heads())		schedule();	remove_wait_queue(&buffer_wait, &wait);	current->state = TASK_RUNNING;	goto try_again;}#ifndef OSKIT/* Run the hooks that have to be done when a page I/O has completed. */static inline void after_unlock_page (struct page * page){	if (test_and_clear_bit(PG_decr_after, &page->flags)) {		atomic_dec(&nr_async_pages);#ifdef DEBUG_SWAP		printk ("DebugVM: Finished IO on page %p, nr_async_pages %d\n",			(char *) page_address(page), 			atomic_read(&nr_async_pages));#endif	}	if (test_and_clear_bit(PG_swap_unlock_after, &page->flags))		swap_after_unlock_page(page->offset);	if (test_and_clear_bit(PG_free_after, &page->flags))		__free_page(page);}#endif /* OSKIT *//* * Free all temporary buffers belonging to a page. * This needs to be called with interrupts disabled. */static inline void free_async_buffers (struct buffer_head * bh){	struct buffer_head *tmp, *tail;	/*	 * Link all the buffers into the b_next_free list,	 * so we only have to do one xchg() operation ...	 */	tail = bh;	while ((tmp = tail->b_this_page) != bh) {		tail->b_next_free = tmp;		tail = tmp;	};	/* Update the reuse list */	tail->b_next_free = xchg(&reuse_list, NULL);	reuse_list = bh;	/* Wake up any waiters ... */	wake_up(&buffer_wait);}#ifndef OSKITstatic void end_buffer_io_async(struct buffer_head * bh, int uptodate){	unsigned long flags;	struct buffer_head *tmp;	struct page *page;	mark_buffer_uptodate(bh, uptodate);	unlock_buffer(bh);	/* This is a temporary buffer used for page I/O. */	page = mem_map + MAP_NR(bh->b_data);	if (!PageLocked(page))		goto not_locked;	if (bh->b_count != 1)		goto bad_count;	if (!test_bit(BH_Uptodate, &bh->b_state))		set_bit(PG_error, &page->flags);	/*	 * Be _very_ careful from here on. Bad things can happen if	 * two buffer heads end IO at almost the same time and both	 * decide that the page is now completely done.	 *	 * Async buffer_heads are here only as labels for IO, and get	 * thrown away once the IO for this page is complete.  IO is	 * deemed complete once all buffers have been visited	 * (b_count==0) and are now unlocked. We must make sure that	 * only the _last_ buffer that decrements its count is the one	 * that free's the page..	 */	save_flags(flags);	cli();	bh->b_count--;	tmp = bh;	do {		if (tmp->b_count)			goto still_busy;		tmp = tmp->b_this_page;	} while (tmp != bh);	/* OK, the async IO on this page is complete. */	free_async_buffers(bh);	restore_flags(flags);	clear_bit(PG_locked, &page->flags);	wake_up(&page->wait);	after_unlock_page(page);	return;still_busy:	restore_flags(flags);	return;not_locked:	printk ("Whoops: end_buffer_io_async: async io complete on unlocked page\n");	return;bad_count:	printk ("Whoops: end_buffer_io_async: b_count != 1 on async io.\n");	return;}/* * Start I/O on a page. * This function expects the page to be locked and may return before I/O is complete. * You then have to check page->locked, page->uptodate, and maybe wait on page->wait. */int brw_page(int rw, struct page *page, kdev_t dev, int b[], int size, int bmap){	struct buffer_head *bh, *prev, *next, *arr[MAX_BUF_PER_PAGE];	int block, nr;	if (!PageLocked(page))		panic("brw_page: page not locked for I/O");	clear_bit(PG_uptodate, &page->flags);	clear_bit(PG_error, &page->flags);	/*	 * Allocate async buffer heads pointing to this page, just for I/O.	 * They do _not_ show up in the buffer hash table!	 * They are _not_ registered in page->buffers either!	 */	bh = create_buffers(page_address(page), size, 1);	if (!bh) {		/* WSH: exit here leaves page->count incremented */		clear_bit(PG_locked, &page->flags);		wake_up(&page->wait);		return -ENOMEM;	}	nr = 0;	next = bh;	do {		struct buffer_head * tmp;		block = *(b++);		init_buffer(next, dev, block, end_buffer_io_async, NULL);		set_bit(BH_Uptodate, &next->b_state);		/*		 * When we use bmap, we define block zero to represent		 * a hole.  ll_rw_page, however, may legitimately		 * access block zero, and we need to distinguish the		 * two cases.		 */		if (bmap && !block) {			memset(next->b_data, 0, size);			next->b_count--;			continue;		}		tmp = get_hash_table(dev, block, size);		if (tmp) {			if (!buffer_uptodate(tmp)) {				if (rw == READ)					ll_rw_block(READ, 1, &tmp);				wait_on_buffer(tmp);			}			if (rw == READ) 				memcpy(next->b_data, tmp->b_data, size);			else {				memcpy(tmp->b_data, next->b_data, size);				mark_buffer_dirty(tmp, 0);			}			brelse(tmp);			next->b_count--;			continue;		}		if (rw == READ)			clear_bit(BH_Uptodate, &next->b_state);		else			set_bit(BH_Dirty, &next->b_state);		arr[nr++] = next;	} while (prev = next, (next = next->b_this_page) != NULL);	prev->b_this_page = bh;		if (nr) {		ll_rw_block(rw, nr, arr);		/* The rest of the work is done in mark_buffer_uptodate()		 * and unlock_buffer(). */	} else {		unsigned long flags;		clear_bit(PG_locked, &page->flags);		set_bit(PG_uptodate, &page->flags);		wake_up(&page->wait);		save_flags(flags);		cli();		free_async_buffers(bh);		restore_flags(flags);		after_unlock_page(page);	}	++current->maj_flt;	return 0;}#endif /* OSKIT *//* * This is called by end_request() when I/O has completed. */void mark_buffer_uptodate(struct buffer_head * bh, int on){	if (on) {		struct buffer_head *tmp = bh;		set_bit(BH_Uptodate, &bh->b_state);		/* If a page has buffers and all these buffers are uptodate,		 * then the page is uptodate. */		do {			if (!test_bit(BH_Uptodate, &tmp->b_state))				return;			tmp=tmp->b_this_page;		} while (tmp && tmp != bh);		set_bit(PG_uptodate, &mem_map[MAP_NR(bh->b_data)].flags);		return;	}	clear_bit(BH_Uptodate, &bh->b_state);}/* * Generic "readpage" function for block devices that have the normal * bmap functionality. This is most of the block device filesystems. * Reads the page asynchronously --- the unlock_buffer() and * mark_buffer_uptodate() functions propagate buffer state into the * page struct once IO has completed. */int generic_readpage(struct file * file, struct page * page){#ifdef OSKIT	panic("generic_readpage: should never be called");	return 0;#else	struct dentry *dentry = file->f_dentry;	struct inode *inode = dentry->d_inode;	unsigned long block;	int *p, nr[PAGE_SIZE/512];	int i;	atomic_inc(&page->count);	set_bit(PG_locked, &page->flags);	set_bit(PG_free_after, &page->flags);		i = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;	block = page->offset >> inode->i_sb->s_blocksize_bits;	p = nr;	do {		*p = inode->i_op->bmap(inode, block);		i--;		block++;		p++;	} while (i > 0);	/* IO start */	brw_page(READ, page, inode->i_dev, nr, inode->i_sb->s_blocksize, 1);	return 0;#endif /* OSKIT */}/* * Try to increase the number of buffers available: the size argument * is used to determine what kind of buffers we want. */static int grow_buffers(int size){	unsigned long page;	struct buffer_head *bh, *tmp;	struct buffer_head * insert_point;	int isize;	if ((size & 511) || (size > PAGE_SIZE)) {		printk("VFS: grow_buffers: size = %d\n",size);		return 0;	}	if (!(page = __get_free_page(GFP_BUFFER)))		return 0;	bh = create_buffers(page, size, 0);	if (!bh) {		free_page(page);		return 0;	}	isize = BUFSIZE_INDEX(size);	insert_point = free_list[isize];	tmp = bh;	while (1) {		if (insert_point) {			tmp->b_next_free = insert_point->b_next_free;			tmp->b_prev_free = insert_point;			insert_point->b_next_free->b_prev_free = tmp;			insert_point->b_next_free = tmp;		} else {			tmp->b_prev_free = tmp;			tmp->b_next_free = tmp;		}		insert_point = tmp;		++nr_buffers;		if (tmp->b_this_page)			tmp = tmp->b_this_page;		else			break;	}	tmp->b_this_page = bh;	free_list[isize] = bh;	mem_map[MAP_NR(page)].buffers = bh;	buffermem += PAGE_SIZE;	return 1;}/* * Can the buffer be thrown out? */#define BUFFER_BUSY_BITS	((1<<BH_Dirty) | (1<<BH_Lock) | (1<<BH_Protected))

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -