⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 buffer.c

📁 嵌入式系统设计与实例开发实验教材二源码 多线程应用程序设计 串行端口程序设计 AD接口实验 CAN总线通信实验 GPS通信实验 Linux内核移植与编译实验 IC卡读写实验 SD驱动使
💻 C
📖 第 1 页 / 共 5 页
字号:
	/* If we're getting into imbalance, start write-out */	spin_lock(&lru_list_lock);	write_some_buffers(NODEV);	/*	 * And if we're _really_ out of balance, wait for	 * some of the dirty/locked buffers ourselves and	 * start bdflush.	 * This will throttle heavy writers.	 */	if (state > 0) {		wait_for_some_buffers(NODEV);		wakeup_bdflush();	}}inline void __mark_dirty(struct buffer_head *bh){	bh->b_flushtime = jiffies + bdf_prm.b_un.age_buffer;	refile_buffer(bh);}/* atomic version, the user must call balance_dirty() by hand   as soon as it become possible to block */void __mark_buffer_dirty(struct buffer_head *bh){	if (!atomic_set_buffer_dirty(bh))		__mark_dirty(bh);}void mark_buffer_dirty(struct buffer_head *bh){	if (!atomic_set_buffer_dirty(bh)) {		__mark_dirty(bh);		balance_dirty();	}}void set_buffer_flushtime(struct buffer_head *bh){	bh->b_flushtime = jiffies + bdf_prm.b_un.age_buffer;}EXPORT_SYMBOL(set_buffer_flushtime);/* * A buffer may need to be moved from one buffer list to another * (e.g. in case it is not shared any more). Handle this. */static void __refile_buffer(struct buffer_head *bh){	int dispose = BUF_CLEAN;	if (buffer_locked(bh))		dispose = BUF_LOCKED;	if (buffer_dirty(bh))		dispose = BUF_DIRTY;	if (dispose != bh->b_list) {		__remove_from_lru_list(bh);		bh->b_list = dispose;		if (dispose == BUF_CLEAN)			remove_inode_queue(bh);		__insert_into_lru_list(bh, dispose);	}}void refile_buffer(struct buffer_head *bh){	spin_lock(&lru_list_lock);	__refile_buffer(bh);	spin_unlock(&lru_list_lock);}/* * Release a buffer head */void __brelse(struct buffer_head * buf){	if (atomic_read(&buf->b_count)) {		put_bh(buf);		return;	}	printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");}/* * bforget() is like brelse(), except it discards any * potentially dirty data. */void __bforget(struct buffer_head * buf){	mark_buffer_clean(buf);	__brelse(buf);}/** *	bread() - reads a specified block and returns the bh *	@block: number of block *	@size: size (in bytes) to read *  *	Reads a specified block, and returns buffer head that *	contains it. It returns NULL if the block was unreadable. */struct buffer_head * bread(kdev_t dev, int block, int size){	struct buffer_head * bh;	bh = getblk(dev, block, size);	touch_buffer(bh);	if (buffer_uptodate(bh))		return bh;	ll_rw_block(READ, 1, &bh);	wait_on_buffer(bh);	if (buffer_uptodate(bh))		return bh;	brelse(bh);	return NULL;}/* * Note: the caller should wake up the buffer_wait list if needed. */static void __put_unused_buffer_head(struct buffer_head * bh){	if (bh->b_inode)		BUG();	if (nr_unused_buffer_heads >= MAX_UNUSED_BUFFERS) {		kmem_cache_free(bh_cachep, bh);	} else {		bh->b_dev = B_FREE;		bh->b_blocknr = -1;		bh->b_this_page = NULL;		nr_unused_buffer_heads++;		bh->b_next_free = unused_list;		unused_list = bh;	}}void put_unused_buffer_head(struct buffer_head *bh){	spin_lock(&unused_list_lock);	__put_unused_buffer_head(bh);	spin_unlock(&unused_list_lock);}EXPORT_SYMBOL(put_unused_buffer_head);/* * Reserve NR_RESERVED buffer heads for async IO requests to avoid * no-buffer-head deadlock.  Return NULL on failure; waiting for * buffer heads is now handled in create_buffers(). */ struct buffer_head * get_unused_buffer_head(int async){	struct buffer_head * bh;	spin_lock(&unused_list_lock);	if (nr_unused_buffer_heads > NR_RESERVED) {		bh = unused_list;		unused_list = bh->b_next_free;		nr_unused_buffer_heads--;		spin_unlock(&unused_list_lock);		return bh;	}	spin_unlock(&unused_list_lock);	/* This is critical.  We can't call out to the FS	 * to get more buffer heads, because the FS may need	 * more buffer-heads itself.  Thus SLAB_NOFS.	 */	if((bh = kmem_cache_alloc(bh_cachep, SLAB_NOFS)) != NULL) {		bh->b_blocknr = -1;		bh->b_this_page = NULL;		return bh;	}	/*	 * If we need an async buffer, use the reserved buffer heads.	 */	if (async) {		spin_lock(&unused_list_lock);		if (unused_list) {			bh = unused_list;			unused_list = bh->b_next_free;			nr_unused_buffer_heads--;			spin_unlock(&unused_list_lock);			return bh;		}		spin_unlock(&unused_list_lock);	}	return NULL;}EXPORT_SYMBOL(get_unused_buffer_head);void set_bh_page (struct buffer_head *bh, struct page *page, unsigned long offset){	bh->b_page = page;	if (offset >= PAGE_SIZE)		BUG();	if (PageHighMem(page))		/*		 * This catches illegal uses and preserves the offset:		 */		bh->b_data = (char *)(0 + offset);	else		bh->b_data = page_address(page) + offset;}EXPORT_SYMBOL(set_bh_page);/* * Create the appropriate buffers when given a page for data area and * the size of each buffer.. Use the bh->b_this_page linked list to * follow the buffers created.  Return NULL if unable to create more * buffers. * The async flag is used to differentiate async IO (paging, swapping) * from ordinary buffer allocations, and only async requests are allowed * to sleep waiting for buffer heads.  */static struct buffer_head * create_buffers(struct page * page, unsigned long size, int async){	struct buffer_head *bh, *head;	long offset;try_again:	head = NULL;	offset = PAGE_SIZE;	while ((offset -= size) >= 0) {		bh = get_unused_buffer_head(async);		if (!bh)			goto no_grow;		bh->b_dev = NODEV;		bh->b_this_page = head;		head = bh;		bh->b_state = 0;		bh->b_next_free = NULL;		bh->b_pprev = NULL;		atomic_set(&bh->b_count, 0);		bh->b_size = size;		set_bh_page(bh, page, offset);		bh->b_list = BUF_CLEAN;		bh->b_end_io = NULL;	}	return head;/* * In case anything failed, we just free everything we got. */no_grow:	if (head) {		spin_lock(&unused_list_lock);		do {			bh = head;			head = head->b_this_page;			__put_unused_buffer_head(bh);		} while (head);		spin_unlock(&unused_list_lock);		/* Wake up any waiters ... */		wake_up(&buffer_wait);	}	/*	 * Return failure for non-async IO requests.  Async IO requests	 * are not allowed to fail, so we have to wait until buffer heads	 * become available.  But we don't want tasks sleeping with 	 * partially complete buffers, so all were released above.	 */	if (!async)		return NULL;	/* We're _really_ low on memory. Now we just	 * wait for old buffer heads to become free due to	 * finishing IO.  Since this is an async request and	 * the reserve list is empty, we're sure there are 	 * async buffer heads in use.	 */	run_task_queue(&tq_disk);	free_more_memory();	goto try_again;}/* * Called when truncating a buffer on a page completely. */static void discard_buffer(struct buffer_head * bh){	if (buffer_mapped(bh)) {		mark_buffer_clean(bh);		lock_buffer(bh);		clear_bit(BH_Uptodate, &bh->b_state);		clear_bit(BH_Mapped, &bh->b_state);		clear_bit(BH_Req, &bh->b_state);		clear_bit(BH_New, &bh->b_state);		remove_from_queues(bh);		unlock_buffer(bh);	}}/** * try_to_release_page - release old fs-specific metadata on a page * */int try_to_release_page(struct page * page, int gfp_mask){	if (!PageLocked(page))		BUG();		if (!page->mapping)		goto try_to_free;	if (!page->mapping->a_ops->releasepage)		goto try_to_free;	if (page->mapping->a_ops->releasepage(page, gfp_mask))		goto try_to_free;	/*	 * We couldn't release buffer metadata; don't even bother trying	 * to release buffers.	 */	return 0;try_to_free:		return try_to_free_buffers(page, gfp_mask);}/* * We don't have to release all buffers here, but * we have to be sure that no dirty buffer is left * and no IO is going on (no buffer is locked), because * we have truncated the file and are going to free the * blocks on-disk.. */int discard_bh_page(struct page *page, unsigned long offset, int drop_pagecache){	struct buffer_head *head, *bh, *next;	unsigned int curr_off = 0;	if (!PageLocked(page))		BUG();	if (!page->buffers)		return 1;	head = page->buffers;	bh = head;	do {		unsigned int next_off = curr_off + bh->b_size;		next = bh->b_this_page;		/*		 * is this block fully flushed?		 */		if (offset <= curr_off)			discard_buffer(bh);		curr_off = next_off;		bh = next;	} while (bh != head);	/*	 * subtle. We release buffer-heads only if this is	 * the 'final' flushpage. We have invalidated the get_block	 * cached value unconditionally, so real IO is not	 * possible anymore.	 *	 * If the free doesn't work out, the buffers can be	 * left around - they just turn into anonymous buffers	 * instead.	 */	if (!offset) {		if (!try_to_release_page(page, 0))			return 0;	}	return 1;}void create_empty_buffers(struct page *page, kdev_t dev, unsigned long blocksize){	struct buffer_head *bh, *head, *tail;	/* FIXME: create_buffers should fail if there's no enough memory */	head = create_buffers(page, blocksize, 1);	if (page->buffers)		BUG();	bh = head;	do {		bh->b_dev = dev;		bh->b_blocknr = 0;		bh->b_end_io = NULL;		tail = bh;		bh = bh->b_this_page;	} while (bh);	tail->b_this_page = head;	page->buffers = head;	page_cache_get(page);}EXPORT_SYMBOL(create_empty_buffers);/* * We are taking a block for data and we don't want any output from any * buffer-cache aliases starting from return from that function and * until the moment when something will explicitly mark the buffer * dirty (hopefully that will not happen until we will free that block ;-) * We don't even need to mark it not-uptodate - nobody can expect * anything from a newly allocated buffer anyway. We used to used * unmap_buffer() for such invalidation, but that was wrong. We definitely * don't want to mark the alias unmapped, for example - it would confuse * anyone who might pick it with bread() afterwards... */static void unmap_underlying_metadata(struct buffer_head * bh){	struct buffer_head *old_bh;	old_bh = get_hash_table(bh->b_dev, bh->b_blocknr, bh->b_size);	if (old_bh) {		mark_buffer_clean(old_bh);		wait_on_buffer(old_bh);		clear_bit(BH_Req, &old_bh->b_state);		__brelse(old_bh);	}}/* * NOTE! All mapped/uptodate combinations are valid: * *	Mapped	Uptodate	Meaning * *	No	No		"unknown" - must do get_block() *	No	Yes		"hole" - zero-filled *	Yes	No		"allocated" - allocated on disk, not read in *	Yes	Yes		"valid" - allocated and up-to-date in memory. * * "Dirty" is valid only with the last case (mapped+uptodate). *//* * block_write_full_page() is SMP threaded - the kernel lock is not held. */static int __block_write_full_page(struct inode *inode, struct page *page, get_block_t *get_block){	int err, i;	unsigned long block;	struct buffer_head *bh, *head;	int need_unlock;	if (!PageLocked(page))		BUG();	if (!page->buffers)		create_empty_buffers(page, inode->i_dev, 1 << inode->i_blkbits);	head = page->buffers;	block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);	bh = head;	i = 0;	/* Stage 1: make sure we have all the buffers mapped! */	do {		/*		 * If the buffer isn't up-to-date, we can't be sure		 * that the buffer has been initialized with the proper		 * block number information etc..		 *		 * Leave it to the low-level FS to make all those		 * decisions (block #0 may actually be a valid block)		 */		if (!buffer_mapped(bh)) {			err = get_block(inode, block, bh, 1);			if (err)				goto out;			if (buffer_new(bh))				unmap_underlying_metadata(bh);		}		bh = bh->b_this_page;		block++;	} while (bh != head);	/* Stage 2: lock the buffers, mark them clean */	do {		lock_buffer(bh);		set_buffer_async_io(bh);		set_bit(BH_Uptodate, &bh->b_state);		clear_bit(BH_Dirty, &bh->b_state);		bh = bh->b_this_page;	} while (bh != head);	/* Stage 3: submit the IO */	do {		struct buffer_head *next = bh->b_this_page;		submit_bh(WRITE, bh);		bh = next;	} while (bh != head);	/* Done - end_buffer_io_async will unlock */	SetPageUptodate(page);	return 0;out:	/*	 * ENOSPC, or some other error.  We may already have added some	 * blocks to the file, so we need to write these out to avoid	 * exposing stale data.	 */	ClearPageUptodate(page);	bh = head;	need_unlock = 1;	/* Recovery: lock and submit the mapped buffers */	do {		if (buffer_mapped(bh)) {			lock_buffer(bh);			set_buffer_async_io(bh);			need_unlock = 0;		}		bh = bh->b_this_page;	} while (bh != head);	do {		struct buffer_head *next = bh->b_this_page;		if (buffer_mapped(bh)) {			set_bit(BH_Uptodate, &bh->b_state);			clear_bit(BH_Dirty, &bh->b_state);			submit_bh(WRITE, bh);		}		bh = next;	} while (bh != head);	if (need_unlock)		UnlockPage(page);	return err;}static int __block_prepare_write(struct inode *inode, struct page *page,		unsigned from, unsigned to, get_block_t *get_block){	unsigned block_start, block_end;	unsigned long block;	int err = 0;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -