⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 buffer.c

📁 嵌入式系统设计与实例开发实验教材二源码 多线程应用程序设计 串行端口程序设计 AD接口实验 CAN总线通信实验 GPS通信实验 Linux内核移植与编译实验 IC卡读写实验 SD驱动使
💻 C
📖 第 1 页 / 共 5 页
字号:
	length = iobuf->length;	nr_blocks = length / blocksize;	/* build the blocklist */	for (i = 0; i < nr_blocks; i++, blocknr++) {		struct buffer_head bh;		bh.b_state = 0;		bh.b_dev = inode->i_dev;		bh.b_size = blocksize;		retval = get_block(inode, blocknr, &bh, rw == READ ? 0 : 1);		if (retval) {			if (!i)				/* report error to userspace */				goto out;			else				/* do short I/O utill 'i' */				break;		}		if (rw == READ) {			if (buffer_new(&bh))				BUG();			if (!buffer_mapped(&bh)) {				/* there was an hole in the filesystem */				blocks[i] = -1UL;				continue;			}		} else {			if (buffer_new(&bh))				unmap_underlying_metadata(&bh);			if (!buffer_mapped(&bh))				BUG();		}		blocks[i] = bh.b_blocknr;	}	/* patch length to handle short I/O */	iobuf->length = i * blocksize;	retval = brw_kiovec(rw, 1, &iobuf, inode->i_dev, iobuf->blocks, blocksize);	/* restore orig length */	iobuf->length = length; out:	return retval;}/* * IO completion routine for a buffer_head being used for kiobuf IO: we * can't dispatch the kiobuf callback until io_count reaches 0.   */static void end_buffer_io_kiobuf(struct buffer_head *bh, int uptodate){	struct kiobuf *kiobuf;		mark_buffer_uptodate(bh, uptodate);	kiobuf = bh->b_private;	unlock_buffer(bh);	end_kio_request(kiobuf, uptodate);}/* * For brw_kiovec: submit a set of buffer_head temporary IOs and wait * for them to complete.  Clean up the buffer_heads afterwards.   */static int wait_kio(int rw, int nr, struct buffer_head *bh[], int size){	int iosize, err;	int i;	struct buffer_head *tmp;	iosize = 0;	err = 0;	for (i = nr; --i >= 0; ) {		iosize += size;		tmp = bh[i];		if (buffer_locked(tmp)) {			wait_on_buffer(tmp);		}				if (!buffer_uptodate(tmp)) {			/* We are traversing bh'es in reverse order so                           clearing iosize on error calculates the                           amount of IO before the first error. */			iosize = 0;			err = -EIO;		}	}		if (iosize)		return iosize;	return err;}/* * Start I/O on a physical range of kernel memory, defined by a vector * of kiobuf structs (much like a user-space iovec list). * * The kiobuf must already be locked for IO.  IO is submitted * asynchronously: you need to check page->locked, page->uptodate, and * maybe wait on page->wait. * * It is up to the caller to make sure that there are enough blocks * passed in to completely map the iobufs to disk. */int brw_kiovec(int rw, int nr, struct kiobuf *iovec[], 	       kdev_t dev, unsigned long b[], int size){	int		err;	int		length;	int		transferred;	int		i;	int		bufind;	int		pageind;	int		bhind;	int		offset;	unsigned long	blocknr;	struct kiobuf *	iobuf = NULL;	struct page *	map;	struct buffer_head *tmp, **bhs = NULL;	if (!nr)		return 0;		/* 	 * First, do some alignment and validity checks 	 */	for (i = 0; i < nr; i++) {		iobuf = iovec[i];		if ((iobuf->offset & (size-1)) ||		    (iobuf->length & (size-1)))			return -EINVAL;		if (!iobuf->nr_pages)			panic("brw_kiovec: iobuf not initialised");	}	/* 	 * OK to walk down the iovec doing page IO on each page we find. 	 */	bufind = bhind = transferred = err = 0;	for (i = 0; i < nr; i++) {		iobuf = iovec[i];		offset = iobuf->offset;		length = iobuf->length;		iobuf->errno = 0;		if (!bhs)			bhs = iobuf->bh;				for (pageind = 0; pageind < iobuf->nr_pages; pageind++) {			map  = iobuf->maplist[pageind];			if (!map) {				err = -EFAULT;				goto finished;			}						while (length > 0) {				blocknr = b[bufind++];				if (blocknr == -1UL) {					if (rw == READ) {						/* there was an hole in the filesystem */						memset(kmap(map) + offset, 0, size);						flush_dcache_page(map);						kunmap(map);						transferred += size;						goto skip_block;					} else						BUG();				}				tmp = bhs[bhind++];				tmp->b_size = size;				set_bh_page(tmp, map, offset);				tmp->b_this_page = tmp;				init_buffer(tmp, end_buffer_io_kiobuf, iobuf);				tmp->b_dev = dev;				tmp->b_blocknr = blocknr;				tmp->b_state = (1 << BH_Mapped) | (1 << BH_Lock) | (1 << BH_Req);				if (rw == WRITE) {					set_bit(BH_Uptodate, &tmp->b_state);					clear_bit(BH_Dirty, &tmp->b_state);				} else					set_bit(BH_Uptodate, &tmp->b_state);				atomic_inc(&iobuf->io_count);				submit_bh(rw, tmp);				/* 				 * Wait for IO if we have got too much 				 */				if (bhind >= KIO_MAX_SECTORS) {					kiobuf_wait_for_io(iobuf); /* wake-one */					err = wait_kio(rw, bhind, bhs, size);					if (err >= 0)						transferred += err;					else						goto finished;					bhind = 0;				}			skip_block:				length -= size;				offset += size;				if (offset >= PAGE_SIZE) {					offset = 0;					break;				}			} /* End of block loop */		} /* End of page loop */			} /* End of iovec loop */	/* Is there any IO still left to submit? */	if (bhind) {		kiobuf_wait_for_io(iobuf); /* wake-one */		err = wait_kio(rw, bhind, bhs, size);		if (err >= 0)			transferred += err;		else			goto finished;	} finished:	if (transferred)		return transferred;	return err;}/* * Start I/O on a page. * This function expects the page to be locked and may return * before I/O is complete. You then have to check page->locked, * page->uptodate, and maybe wait on page->wait. * * brw_page() is SMP-safe, although it's being called with the * kernel lock held - but the code is ready. * * FIXME: we need a swapper_inode->get_block function to remove *        some of the bmap kludges and interface ugliness here. */int brw_page(int rw, struct page *page, kdev_t dev, int b[], int size){	struct buffer_head *head, *bh;	if (!PageLocked(page))		panic("brw_page: page not locked for I/O");	if (!page->buffers)		create_empty_buffers(page, dev, size);	head = bh = page->buffers;	/* Stage 1: lock all the buffers */	do {		lock_buffer(bh);		bh->b_blocknr = *(b++);		set_bit(BH_Mapped, &bh->b_state);		set_buffer_async_io(bh);		bh = bh->b_this_page;	} while (bh != head);	/* Stage 2: start the IO */	do {		struct buffer_head *next = bh->b_this_page;		submit_bh(rw, bh);		bh = next;	} while (bh != head);	return 0;}int block_symlink(struct inode *inode, const char *symname, int len){	struct address_space *mapping = inode->i_mapping;	struct page *page = grab_cache_page(mapping, 0);	int err = -ENOMEM;	char *kaddr;	if (!page)		goto fail;	err = mapping->a_ops->prepare_write(NULL, page, 0, len-1);	if (err)		goto fail_map;	kaddr = page_address(page);	memcpy(kaddr, symname, len-1);	mapping->a_ops->commit_write(NULL, page, 0, len-1);	/*	 * Notice that we are _not_ going to block here - end of page is	 * unmapped, so this will only try to map the rest of page, see	 * that it is unmapped (typically even will not look into inode -	 * ->i_size will be enough for everything) and zero it out.	 * OTOH it's obviously correct and should make the page up-to-date.	 */	err = mapping->a_ops->readpage(NULL, page);	wait_on_page(page);	page_cache_release(page);	if (err < 0)		goto fail;	mark_inode_dirty(inode);	return 0;fail_map:	UnlockPage(page);	page_cache_release(page);fail:	return err;}static inline void link_dev_buffers(struct page * page, struct buffer_head *head){	struct buffer_head *bh, *tail;	bh = head;	do {		tail = bh;		bh = bh->b_this_page;	} while (bh);	tail->b_this_page = head;	page->buffers = head;	page_cache_get(page);}/* * Create the page-cache page that contains the requested block */static struct page * grow_dev_page(struct block_device *bdev, unsigned long index, int size){	struct page * page;	struct buffer_head *bh;	page = find_or_create_page(bdev->bd_inode->i_mapping, index, GFP_NOFS);	if (!page)		return NULL;	if (!PageLocked(page))		BUG();	bh = page->buffers;	if (bh) {		if (bh->b_size == size)			return page;		if (!try_to_free_buffers(page, GFP_NOFS))			goto failed;	}	bh = create_buffers(page, size, 0);	if (!bh)		goto failed;	link_dev_buffers(page, bh);	return page;failed:	UnlockPage(page);	page_cache_release(page);	return NULL;}static void hash_page_buffers(struct page *page, kdev_t dev, int block, int size){	struct buffer_head *head = page->buffers;	struct buffer_head *bh = head;	unsigned int uptodate;	uptodate = 1 << BH_Mapped;	if (Page_Uptodate(page))		uptodate |= 1 << BH_Uptodate;	write_lock(&hash_table_lock);	do {		if (!(bh->b_state & (1 << BH_Mapped))) {			init_buffer(bh, NULL, NULL);			bh->b_dev = dev;			bh->b_blocknr = block;			bh->b_state = uptodate;		}		/* Insert the buffer into the hash lists if necessary */		if (!bh->b_pprev)			__insert_into_hash_list(bh);		block++;		bh = bh->b_this_page;	} while (bh != head);	write_unlock(&hash_table_lock);}/* * Try to increase the number of buffers available: the size argument * is used to determine what kind of buffers we want. */static int grow_buffers(kdev_t dev, unsigned long block, int size){	struct page * page;	struct block_device *bdev;	unsigned long index;	int sizebits;	/* Size must be multiple of hard sectorsize */	if (size & (get_hardsect_size(dev)-1))		BUG();	/* Size must be within 512 bytes and PAGE_SIZE */	if (size < 512 || size > PAGE_SIZE)		BUG();	sizebits = -1;	do {		sizebits++;	} while ((size << sizebits) < PAGE_SIZE);	index = block >> sizebits;	block = index << sizebits;	bdev = bdget(kdev_t_to_nr(dev));	if (!bdev) {		printk("No block device for %s\n", kdevname(dev));		BUG();	}	/* Create a page with the proper size buffers.. */	page = grow_dev_page(bdev, index, size);	/* This is "wrong" - talk to Al Viro */	atomic_dec(&bdev->bd_count);	if (!page)		return 0;	/* Hash in the buffers on the hash list */	hash_page_buffers(page, dev, block, size);	UnlockPage(page);	page_cache_release(page);	/* We hashed up this page, so increment buffermem */	atomic_inc(&buffermem_pages);	return 1;}static int sync_page_buffers(struct buffer_head *head){	struct buffer_head * bh = head;	int tryagain = 0;	do {		if (!buffer_dirty(bh) && !buffer_locked(bh))			continue;		/* Don't start IO first time around.. */		if (!test_and_set_bit(BH_Wait_IO, &bh->b_state))			continue;		/* Second time through we start actively writing out.. */		if (test_and_set_bit(BH_Lock, &bh->b_state)) {			if (!test_bit(BH_launder, &bh->b_state))				continue;			wait_on_buffer(bh);			tryagain = 1;			continue;		}		if (!atomic_set_buffer_clean(bh)) {			unlock_buffer(bh);			continue;		}		__mark_buffer_clean(bh);		get_bh(bh);		set_bit(BH_launder, &bh->b_state);		bh->b_end_io = end_buffer_io_sync;		submit_bh(WRITE, bh);		tryagain = 0;	} while ((bh = bh->b_this_page) != head);	return tryagain;}/* * Can the buffer be thrown out? */#define BUFFER_BUSY_BITS	((1<<BH_Dirty) | (1<<BH_Lock))#define buffer_busy(bh)		(atomic_read(&(bh)->b_count) | ((bh)->b_state & BUFFER_BUSY_BITS))/* * try_to_free_buffers() checks if all the buffers on this particular page * are unused, and free's the page if so. * * Wake up bdflush() if this fails - if we're running low on memory due * to dirty buffers, we need to flush them out as quickly as possible. * * NOTE: There are quite a number of ways that threads of control can *       obtain a reference to a buffer head within a page.  So we must *	 lock out all of these paths to cleanly toss the page. */int try_to_free_buffers(struct page * page, unsigned int gfp_mask){	struct buffer_head * tmp, * bh = page->buffers;cleaned_buffers_try_again:	spin_lock(&lru_list_lock);	write_lock(&hash_table_lock);	tmp = bh;	do {		if (buffer_busy(tmp))			goto busy_buffer_page;		tmp = tmp->b_this_page;	} while (tmp != bh);	spin_lock(&unused_list_lock);	tmp = bh;	/* if this buffer was hashed, this page counts as buffermem */	if (bh->b_pprev)		atomic_dec(&buffermem_pages);	do {		struct buffer_head * p = tmp;		tmp = tmp->b_this_page;		if (p->b_dev == B_FREE) BUG();		remove_inode_queue(p);		__remove_from_queues(p);		__put_unused_buffer_head(p);	} while (tmp != bh);	spin_unlock(&unused_list_lock);	/* Wake up anyone waiting for buffer heads */	wake_up(&buffer_wait);	/* And free the page */	page->buffers = NULL;	page_cache_release(page);	write_unlock(&hash_table_lock);	spin_unlock(&lru_list_lock);	return 1;busy_buffer_page:	/* Uhhuh, start writeback so t

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -