⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 buffer.c

📁 嵌入式系统设计与实例开发实验教材二源码 多线程应用程序设计 串行端口程序设计 AD接口实验 CAN总线通信实验 GPS通信实验 Linux内核移植与编译实验 IC卡读写实验 SD驱动使
💻 C
📖 第 1 页 / 共 5 页
字号:
		size_buffers_type[blist] -= bh->b_size;	}}/* must be called with both the hash_table_lock and the lru_list_lock   held */static void __remove_from_queues(struct buffer_head *bh){	__hash_unlink(bh);	__remove_from_lru_list(bh);}static void remove_from_queues(struct buffer_head *bh){	spin_lock(&lru_list_lock);	write_lock(&hash_table_lock);	__remove_from_queues(bh);	write_unlock(&hash_table_lock);		spin_unlock(&lru_list_lock);}struct buffer_head * get_hash_table(kdev_t dev, int block, int size){	struct buffer_head *bh, **p = &hash(dev, block);	read_lock(&hash_table_lock);	for (;;) {		bh = *p;		if (!bh)			break;		p = &bh->b_next;		if (bh->b_blocknr != block)			continue;		if (bh->b_size != size)			continue;		if (bh->b_dev != dev)			continue;		get_bh(bh);		break;	}	read_unlock(&hash_table_lock);	return bh;}void buffer_insert_inode_queue(struct buffer_head *bh, struct inode *inode){	spin_lock(&lru_list_lock);	if (bh->b_inode)		list_del(&bh->b_inode_buffers);	bh->b_inode = inode;	list_add(&bh->b_inode_buffers, &inode->i_dirty_buffers);	spin_unlock(&lru_list_lock);}void buffer_insert_inode_data_queue(struct buffer_head *bh, struct inode *inode){	spin_lock(&lru_list_lock);	if (bh->b_inode)		list_del(&bh->b_inode_buffers);	bh->b_inode = inode;	list_add(&bh->b_inode_buffers, &inode->i_dirty_data_buffers);	spin_unlock(&lru_list_lock);}/* The caller must have the lru_list lock before calling the    remove_inode_queue functions.  */static void __remove_inode_queue(struct buffer_head *bh){	bh->b_inode = NULL;	list_del(&bh->b_inode_buffers);}static inline void remove_inode_queue(struct buffer_head *bh){	if (bh->b_inode)		__remove_inode_queue(bh);}int inode_has_buffers(struct inode *inode){	int ret;		spin_lock(&lru_list_lock);	ret = !list_empty(&inode->i_dirty_buffers) || !list_empty(&inode->i_dirty_data_buffers);	spin_unlock(&lru_list_lock);		return ret;}/* If invalidate_buffers() will trash dirty buffers, it means some kind   of fs corruption is going on. Trashing dirty data always imply losing   information that was supposed to be just stored on the physical layer   by the user.   Thus invalidate_buffers in general usage is not allwowed to trash   dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to   be preserved.  These buffers are simply skipped.     We also skip buffers which are still in use.  For example this can   happen if a userspace program is reading the block device.   NOTE: In the case where the user removed a removable-media-disk even if   there's still dirty data not synced on disk (due a bug in the device driver   or due an error of the user), by not destroying the dirty buffers we could   generate corruption also on the next media inserted, thus a parameter is   necessary to handle this case in the most safe way possible (trying   to not corrupt also the new disk inserted with the data belonging to   the old now corrupted disk). Also for the ramdisk the natural thing   to do in order to release the ramdisk memory is to destroy dirty buffers.   These are two special cases. Normal usage imply the device driver   to issue a sync on the device (without waiting I/O completion) and   then an invalidate_buffers call that doesn't trash dirty buffers.   For handling cache coherency with the blkdev pagecache the 'update' case   is been introduced. It is needed to re-read from disk any pinned   buffer. NOTE: re-reading from disk is destructive so we can do it only   when we assume nobody is changing the buffercache under our I/O and when   we think the disk contains more recent information than the buffercache.   The update == 1 pass marks the buffers we need to update, the update == 2   pass does the actual I/O. */void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers){	int i, nlist, slept;	struct buffer_head * bh, * bh_next;	kdev_t dev = to_kdev_t(bdev->bd_dev);	/* will become bdev */ retry:	slept = 0;	spin_lock(&lru_list_lock);	for(nlist = 0; nlist < NR_LIST; nlist++) {		bh = lru_list[nlist];		if (!bh)			continue;		for (i = nr_buffers_type[nlist]; i > 0 ; bh = bh_next, i--) {			bh_next = bh->b_next_free;			/* Another device? */			if (bh->b_dev != dev)				continue;			/* Not hashed? */			if (!bh->b_pprev)				continue;			if (buffer_locked(bh)) {				get_bh(bh);				spin_unlock(&lru_list_lock);				wait_on_buffer(bh);				slept = 1;				spin_lock(&lru_list_lock);				put_bh(bh);			}			write_lock(&hash_table_lock);			/* All buffers in the lru lists are mapped */			if (!buffer_mapped(bh))				BUG();			if (buffer_dirty(bh))				printk("invalidate: dirty buffer\n");			if (!atomic_read(&bh->b_count)) {				if (destroy_dirty_buffers || !buffer_dirty(bh)) {					remove_inode_queue(bh);				}			} else				printk("invalidate: busy buffer\n");			write_unlock(&hash_table_lock);			if (slept)				goto out;		}	}out:	spin_unlock(&lru_list_lock);	if (slept)		goto retry;	/* Get rid of the page cache */	invalidate_inode_pages(bdev->bd_inode);}void __invalidate_buffers(kdev_t dev, int destroy_dirty_buffers){	struct block_device *bdev = bdget(dev);	if (bdev) {		invalidate_bdev(bdev, destroy_dirty_buffers);		bdput(bdev);	}}static void free_more_memory(void){	zone_t * zone = contig_page_data.node_zonelists[GFP_NOFS & GFP_ZONEMASK].zones[0];		balance_dirty();	wakeup_bdflush();	try_to_free_pages(zone, GFP_NOFS, 0);	run_task_queue(&tq_disk);	current->policy |= SCHED_YIELD;	__set_current_state(TASK_RUNNING);	schedule();}void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private){	bh->b_list = BUF_CLEAN;	bh->b_end_io = handler;	bh->b_private = private;}static void end_buffer_io_async(struct buffer_head * bh, int uptodate){	static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED;	unsigned long flags;	struct buffer_head *tmp;	struct page *page;	mark_buffer_uptodate(bh, uptodate);	/* This is a temporary buffer used for page I/O. */	page = bh->b_page;	if (!uptodate)		SetPageError(page);	/*	 * Be _very_ careful from here on. Bad things can happen if	 * two buffer heads end IO at almost the same time and both	 * decide that the page is now completely done.	 *	 * Async buffer_heads are here only as labels for IO, and get	 * thrown away once the IO for this page is complete.  IO is	 * deemed complete once all buffers have been visited	 * (b_count==0) and are now unlocked. We must make sure that	 * only the _last_ buffer that decrements its count is the one	 * that unlock the page..	 */	spin_lock_irqsave(&page_uptodate_lock, flags);	mark_buffer_async(bh, 0);	unlock_buffer(bh);	tmp = bh->b_this_page;	while (tmp != bh) {		if (buffer_async(tmp) && buffer_locked(tmp))			goto still_busy;		tmp = tmp->b_this_page;	}	/* OK, the async IO on this page is complete. */	spin_unlock_irqrestore(&page_uptodate_lock, flags);	/*	 * if none of the buffers had errors then we can set the	 * page uptodate:	 */	if (!PageError(page))		SetPageUptodate(page);	UnlockPage(page);	return;still_busy:	spin_unlock_irqrestore(&page_uptodate_lock, flags);	return;}inline void set_buffer_async_io(struct buffer_head *bh) {    bh->b_end_io = end_buffer_io_async ;    mark_buffer_async(bh, 1);}/* * Synchronise all the inode's dirty buffers to the disk. * * We have conflicting pressures: we want to make sure that all * initially dirty buffers get waited on, but that any subsequently * dirtied buffers don't.  After all, we don't want fsync to last * forever if somebody is actively writing to the file. * * Do this in two main stages: first we copy dirty buffers to a * temporary inode list, queueing the writes as we go.  Then we clean * up, waiting for those writes to complete. *  * During this second stage, any subsequent updates to the file may end * up refiling the buffer on the original inode's dirty list again, so * there is a chance we will end up with a buffer queued for write but * not yet completed on that list.  So, as a final cleanup we go through * the osync code to catch these locked, dirty buffers without requeuing * any newly dirty buffers for write. */int fsync_inode_buffers(struct inode *inode){	struct buffer_head *bh;	struct inode tmp;	int err = 0, err2;		INIT_LIST_HEAD(&tmp.i_dirty_buffers);		spin_lock(&lru_list_lock);	while (!list_empty(&inode->i_dirty_buffers)) {		bh = BH_ENTRY(inode->i_dirty_buffers.next);		list_del(&bh->b_inode_buffers);		if (!buffer_dirty(bh) && !buffer_locked(bh))			bh->b_inode = NULL;		else {			bh->b_inode = &tmp;			list_add(&bh->b_inode_buffers, &tmp.i_dirty_buffers);			if (buffer_dirty(bh)) {				get_bh(bh);				spin_unlock(&lru_list_lock);				ll_rw_block(WRITE, 1, &bh);				brelse(bh);				spin_lock(&lru_list_lock);			}		}	}	while (!list_empty(&tmp.i_dirty_buffers)) {		bh = BH_ENTRY(tmp.i_dirty_buffers.prev);		remove_inode_queue(bh);		get_bh(bh);		spin_unlock(&lru_list_lock);		wait_on_buffer(bh);		if (!buffer_uptodate(bh))			err = -EIO;		brelse(bh);		spin_lock(&lru_list_lock);	}		spin_unlock(&lru_list_lock);	err2 = osync_inode_buffers(inode);	if (err)		return err;	else		return err2;}int fsync_inode_data_buffers(struct inode *inode){	struct buffer_head *bh;	struct inode tmp;	int err = 0, err2;		INIT_LIST_HEAD(&tmp.i_dirty_data_buffers);		spin_lock(&lru_list_lock);	while (!list_empty(&inode->i_dirty_data_buffers)) {		bh = BH_ENTRY(inode->i_dirty_data_buffers.next);		list_del(&bh->b_inode_buffers);		if (!buffer_dirty(bh) && !buffer_locked(bh))			bh->b_inode = NULL;		else {			bh->b_inode = &tmp;			list_add(&bh->b_inode_buffers, &tmp.i_dirty_data_buffers);			if (buffer_dirty(bh)) {				get_bh(bh);				spin_unlock(&lru_list_lock);				ll_rw_block(WRITE, 1, &bh);				brelse(bh);				spin_lock(&lru_list_lock);			}		}	}	while (!list_empty(&tmp.i_dirty_data_buffers)) {		bh = BH_ENTRY(tmp.i_dirty_data_buffers.prev);		remove_inode_queue(bh);		get_bh(bh);		spin_unlock(&lru_list_lock);		wait_on_buffer(bh);		if (!buffer_uptodate(bh))			err = -EIO;		brelse(bh);		spin_lock(&lru_list_lock);	}		spin_unlock(&lru_list_lock);	err2 = osync_inode_data_buffers(inode);	if (err)		return err;	else		return err2;}/* * osync is designed to support O_SYNC io.  It waits synchronously for * all already-submitted IO to complete, but does not queue any new * writes to the disk. * * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as * you dirty the buffers, and then use osync_inode_buffers to wait for * completion.  Any other dirty buffers which are not yet queued for * write will not be flushed to disk by the osync. */int osync_inode_buffers(struct inode *inode){	struct buffer_head *bh;	struct list_head *list;	int err = 0;	spin_lock(&lru_list_lock);	 repeat:		for (list = inode->i_dirty_buffers.prev; 	     bh = BH_ENTRY(list), list != &inode->i_dirty_buffers;	     list = bh->b_inode_buffers.prev) {		if (buffer_locked(bh)) {			get_bh(bh);			spin_unlock(&lru_list_lock);			wait_on_buffer(bh);			if (!buffer_uptodate(bh))				err = -EIO;			brelse(bh);			spin_lock(&lru_list_lock);			goto repeat;		}	}	spin_unlock(&lru_list_lock);	return err;}int osync_inode_data_buffers(struct inode *inode){	struct buffer_head *bh;	struct list_head *list;	int err = 0;	spin_lock(&lru_list_lock);	 repeat:	for (list = inode->i_dirty_data_buffers.prev; 	     bh = BH_ENTRY(list), list != &inode->i_dirty_data_buffers;	     list = bh->b_inode_buffers.prev) {		if (buffer_locked(bh)) {			get_bh(bh);			spin_unlock(&lru_list_lock);			wait_on_buffer(bh);			if (!buffer_uptodate(bh))				err = -EIO;			brelse(bh);			spin_lock(&lru_list_lock);			goto repeat;		}	}	spin_unlock(&lru_list_lock);	return err;}/* * Invalidate any and all dirty buffers on a given inode.  We are * probably unmounting the fs, but that doesn't mean we have already * done a sync().  Just drop the buffers from the inode list. */void invalidate_inode_buffers(struct inode *inode){	struct list_head * entry;		spin_lock(&lru_list_lock);	while ((entry = inode->i_dirty_buffers.next) != &inode->i_dirty_buffers)		remove_inode_queue(BH_ENTRY(entry));	while ((entry = inode->i_dirty_data_buffers.next) != &inode->i_dirty_data_buffers)		remove_inode_queue(BH_ENTRY(entry));	spin_unlock(&lru_list_lock);}/* * Ok, this is getblk, and it isn't very clear, again to hinder * race-conditions. Most of the code is seldom used, (ie repeating), * so it should be much more efficient than it looks. * * The algorithm is changed: hopefully better, and an elusive bug removed. * * 14.02.92: changed it to sync dirty buffers a bit: better performance * when the filesystem starts to get full of dirty blocks (I hope). */struct buffer_head * getblk(kdev_t dev, int block, int size){	for (;;) {		struct buffer_head * bh;		bh = get_hash_table(dev, block, size);		if (bh)			return bh;		if (!grow_buffers(dev, block, size))			free_more_memory();	}}/* -1 -> no need to flush    0 -> async flush    1 -> sync flush (wait for I/O completion) */static int balance_dirty_state(void){	unsigned long dirty, tot, hard_dirty_limit, soft_dirty_limit;	dirty = size_buffers_type[BUF_DIRTY] >> PAGE_SHIFT;	dirty += size_buffers_type[BUF_LOCKED] >> PAGE_SHIFT;	tot = nr_free_buffer_pages();	dirty *= 100;	soft_dirty_limit = tot * bdf_prm.b_un.nfract;	hard_dirty_limit = tot * bdf_prm.b_un.nfract_sync;	/* First, check for the "real" dirty limit. */	if (dirty > soft_dirty_limit) {		if (dirty > hard_dirty_limit && !(current->flags & PF_NOIO))			return 1;		return 0;	}	return -1;}/* * if a new dirty buffer is created we need to balance bdflush. * * in the future we might want to make bdflush aware of different * pressures on different devices - thus the (currently unused) * 'dev' parameter. */void balance_dirty(void){	int state = balance_dirty_state();	if (state < 0)		return;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -