⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 buffer.c

📁 嵌入式系统设计与实例开发实验教材二源码 多线程应用程序设计 串行端口程序设计 AD接口实验 CAN总线通信实验 GPS通信实验 Linux内核移植与编译实验 IC卡读写实验 SD驱动使
💻 C
📖 第 1 页 / 共 5 页
字号:
	unsigned blocksize, bbits;	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;	char *kaddr = kmap(page);	blocksize = 1 << inode->i_blkbits;	if (!page->buffers)		create_empty_buffers(page, inode->i_dev, blocksize);	head = page->buffers;	bbits = inode->i_blkbits;	block = page->index << (PAGE_CACHE_SHIFT - bbits);	for(bh = head, block_start = 0; bh != head || !block_start;	    block++, block_start=block_end, bh = bh->b_this_page) {		if (!bh)			BUG();		block_end = block_start+blocksize;		if (block_end <= from)			continue;		if (block_start >= to)			break;		clear_bit(BH_New, &bh->b_state);		if (!buffer_mapped(bh)) {			err = get_block(inode, block, bh, 1);			if (err)				goto out;			if (buffer_new(bh)) {				unmap_underlying_metadata(bh);				if (Page_Uptodate(page)) {					set_bit(BH_Uptodate, &bh->b_state);					continue;				}				if (block_end > to)					memset(kaddr+to, 0, block_end-to);				if (block_start < from)					memset(kaddr+block_start, 0, from-block_start);				if (block_end > to || block_start < from)					flush_dcache_page(page);				continue;			}		}		if (Page_Uptodate(page)) {			set_bit(BH_Uptodate, &bh->b_state);			continue; 		}		if (!buffer_uptodate(bh) &&		     (block_start < from || block_end > to)) {			ll_rw_block(READ, 1, &bh);			*wait_bh++=bh;		}	}	/*	 * If we issued read requests - let them complete.	 */	while(wait_bh > wait) {		wait_on_buffer(*--wait_bh);		if (!buffer_uptodate(*wait_bh))			return -EIO;	}	return 0;out:	/*	 * Zero out any newly allocated blocks to avoid exposing stale	 * data.  If BH_New is set, we know that the block was newly	 * allocated in the above loop.	 */	bh = head;	block_start = 0;	do {		block_end = block_start+blocksize;		if (block_end <= from)			goto next_bh;		if (block_start >= to)			break;		if (buffer_new(bh)) {			if (buffer_uptodate(bh))				printk(KERN_ERR "%s: zeroing uptodate buffer!\n", __FUNCTION__);			memset(kaddr+block_start, 0, bh->b_size);			set_bit(BH_Uptodate, &bh->b_state);			mark_buffer_dirty(bh);		}next_bh:		block_start = block_end;		bh = bh->b_this_page;	} while (bh != head);	return err;}static int __block_commit_write(struct inode *inode, struct page *page,		unsigned from, unsigned to){	unsigned block_start, block_end;	int partial = 0, need_balance_dirty = 0;	unsigned blocksize;	struct buffer_head *bh, *head;	blocksize = 1 << inode->i_blkbits;	for(bh = head = page->buffers, block_start = 0;	    bh != head || !block_start;	    block_start=block_end, bh = bh->b_this_page) {		block_end = block_start + blocksize;		if (block_end <= from || block_start >= to) {			if (!buffer_uptodate(bh))				partial = 1;		} else {			set_bit(BH_Uptodate, &bh->b_state);			if (!atomic_set_buffer_dirty(bh)) {				__mark_dirty(bh);				buffer_insert_inode_data_queue(bh, inode);				need_balance_dirty = 1;			}		}	}	if (need_balance_dirty)		balance_dirty();	/*	 * is this a partial write that happened to make all buffers	 * uptodate then we can optimize away a bogus readpage() for	 * the next read(). Here we 'discover' wether the page went	 * uptodate as a result of this (potentially partial) write.	 */	if (!partial)		SetPageUptodate(page);	return 0;}/* * Generic "read page" function for block devices that have the normal * get_block functionality. This is most of the block device filesystems. * Reads the page asynchronously --- the unlock_buffer() and * mark_buffer_uptodate() functions propagate buffer state into the * page struct once IO has completed. */int block_read_full_page(struct page *page, get_block_t *get_block){	struct inode *inode = page->mapping->host;	unsigned long iblock, lblock;	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];	unsigned int blocksize, blocks;	int nr, i;	if (!PageLocked(page))		PAGE_BUG(page);	blocksize = 1 << inode->i_blkbits;	if (!page->buffers)		create_empty_buffers(page, inode->i_dev, blocksize);	head = page->buffers;	blocks = PAGE_CACHE_SIZE >> inode->i_blkbits;	iblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);	lblock = (inode->i_size+blocksize-1) >> inode->i_blkbits;	bh = head;	nr = 0;	i = 0;	do {		if (buffer_uptodate(bh))			continue;		if (!buffer_mapped(bh)) {			if (iblock < lblock) {				if (get_block(inode, iblock, bh, 0))					continue;			}			if (!buffer_mapped(bh)) {				memset(kmap(page) + i*blocksize, 0, blocksize);				flush_dcache_page(page);				kunmap(page);				set_bit(BH_Uptodate, &bh->b_state);				continue;			}			/* get_block() might have updated the buffer synchronously */			if (buffer_uptodate(bh))				continue;		}		arr[nr] = bh;		nr++;	} while (i++, iblock++, (bh = bh->b_this_page) != head);	if (!nr) {		/*		 * all buffers are uptodate - we can set the page		 * uptodate as well.		 */		SetPageUptodate(page);		UnlockPage(page);		return 0;	}	/* Stage two: lock the buffers */	for (i = 0; i < nr; i++) {		struct buffer_head * bh = arr[i];		lock_buffer(bh);		set_buffer_async_io(bh);	}	/* Stage 3: start the IO */	for (i = 0; i < nr; i++)		submit_bh(READ, arr[i]);	return 0;}/* utility function for filesystems that need to do work on expanding * truncates.  Uses prepare/commit_write to allow the filesystem to * deal with the hole.   */int generic_cont_expand(struct inode *inode, loff_t size){	struct address_space *mapping = inode->i_mapping;	struct page *page;	unsigned long index, offset, limit;	int err;	err = -EFBIG;        limit = current->rlim[RLIMIT_FSIZE].rlim_cur;	if (limit != RLIM_INFINITY && size > (loff_t)limit) {		send_sig(SIGXFSZ, current, 0);		goto out;	}	if (size > inode->i_sb->s_maxbytes)		goto out;	offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */	/* ugh.  in prepare/commit_write, if from==to==start of block, we 	** skip the prepare.  make sure we never send an offset for the start	** of a block	*/	if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {		offset++;	}	index = size >> PAGE_CACHE_SHIFT;	err = -ENOMEM;	page = grab_cache_page(mapping, index);	if (!page)		goto out;	err = mapping->a_ops->prepare_write(NULL, page, offset, offset);	if (!err) {		err = mapping->a_ops->commit_write(NULL, page, offset, offset);	}	UnlockPage(page);	page_cache_release(page);	if (err > 0)		err = 0;out:	return err;}/* * For moronic filesystems that do not allow holes in file. * We may have to extend the file. */int cont_prepare_write(struct page *page, unsigned offset, unsigned to, get_block_t *get_block, unsigned long *bytes){	struct address_space *mapping = page->mapping;	struct inode *inode = mapping->host;	struct page *new_page;	unsigned long pgpos;	long status;	unsigned zerofrom;	unsigned blocksize = 1 << inode->i_blkbits;	char *kaddr;	while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {		status = -ENOMEM;		new_page = grab_cache_page(mapping, pgpos);		if (!new_page)			goto out;		/* we might sleep */		if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {			UnlockPage(new_page);			page_cache_release(new_page);			continue;		}		zerofrom = *bytes & ~PAGE_CACHE_MASK;		if (zerofrom & (blocksize-1)) {			*bytes |= (blocksize-1);			(*bytes)++;		}		status = __block_prepare_write(inode, new_page, zerofrom,						PAGE_CACHE_SIZE, get_block);		if (status)			goto out_unmap;		kaddr = page_address(new_page);		memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);		flush_dcache_page(new_page);		__block_commit_write(inode, new_page, zerofrom, PAGE_CACHE_SIZE);		kunmap(new_page);		UnlockPage(new_page);		page_cache_release(new_page);	}	if (page->index < pgpos) {		/* completely inside the area */		zerofrom = offset;	} else {		/* page covers the boundary, find the boundary offset */		zerofrom = *bytes & ~PAGE_CACHE_MASK;		/* if we will expand the thing last block will be filled */		if (to > zerofrom && (zerofrom & (blocksize-1))) {			*bytes |= (blocksize-1);			(*bytes)++;		}		/* starting below the boundary? Nothing to zero out */		if (offset <= zerofrom)			zerofrom = offset;	}	status = __block_prepare_write(inode, page, zerofrom, to, get_block);	if (status)		goto out1;	kaddr = page_address(page);	if (zerofrom < offset) {		memset(kaddr+zerofrom, 0, offset-zerofrom);		flush_dcache_page(page);		__block_commit_write(inode, page, zerofrom, offset);	}	return 0;out1:	ClearPageUptodate(page);	kunmap(page);	return status;out_unmap:	ClearPageUptodate(new_page);	kunmap(new_page);	UnlockPage(new_page);	page_cache_release(new_page);out:	return status;}int block_prepare_write(struct page *page, unsigned from, unsigned to,			get_block_t *get_block){	struct inode *inode = page->mapping->host;	int err = __block_prepare_write(inode, page, from, to, get_block);	if (err) {		ClearPageUptodate(page);		kunmap(page);	}	return err;}int block_commit_write(struct page *page, unsigned from, unsigned to){	struct inode *inode = page->mapping->host;	__block_commit_write(inode,page,from,to);	kunmap(page);	return 0;}int generic_commit_write(struct file *file, struct page *page,		unsigned from, unsigned to){	struct inode *inode = page->mapping->host;	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;	__block_commit_write(inode,page,from,to);	kunmap(page);	if (pos > inode->i_size) {		inode->i_size = pos;		mark_inode_dirty(inode);	}	return 0;}int block_truncate_page(struct address_space *mapping, loff_t from, get_block_t *get_block){	unsigned long index = from >> PAGE_CACHE_SHIFT;	unsigned offset = from & (PAGE_CACHE_SIZE-1);	unsigned blocksize, iblock, length, pos;	struct inode *inode = mapping->host;	struct page *page;	struct buffer_head *bh;	int err;	blocksize = 1 << inode->i_blkbits;	length = offset & (blocksize - 1);	/* Block boundary? Nothing to do */	if (!length)		return 0;	length = blocksize - length;	iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);		page = grab_cache_page(mapping, index);	err = -ENOMEM;	if (!page)		goto out;	if (!page->buffers)		create_empty_buffers(page, inode->i_dev, blocksize);	/* Find the buffer that contains "offset" */	bh = page->buffers;	pos = blocksize;	while (offset >= pos) {		bh = bh->b_this_page;		iblock++;		pos += blocksize;	}	err = 0;	if (!buffer_mapped(bh)) {		/* Hole? Nothing to do */		if (buffer_uptodate(bh))			goto unlock;		get_block(inode, iblock, bh, 0);		/* Still unmapped? Nothing to do */		if (!buffer_mapped(bh))			goto unlock;	}	/* Ok, it's mapped. Make sure it's up-to-date */	if (Page_Uptodate(page))		set_bit(BH_Uptodate, &bh->b_state);	if (!buffer_uptodate(bh)) {		err = -EIO;		ll_rw_block(READ, 1, &bh);		wait_on_buffer(bh);		/* Uhhuh. Read error. Complain and punt. */		if (!buffer_uptodate(bh))			goto unlock;	}	memset(kmap(page) + offset, 0, length);	flush_dcache_page(page);	kunmap(page);	__mark_buffer_dirty(bh);	err = 0;unlock:	UnlockPage(page);	page_cache_release(page);out:	return err;}int block_write_full_page(struct page *page, get_block_t *get_block){	struct inode *inode = page->mapping->host;	unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT;	unsigned offset;	int err;	/* easy case */	if (page->index < end_index)		return __block_write_full_page(inode, page, get_block);	/* things got complicated... */	offset = inode->i_size & (PAGE_CACHE_SIZE-1);	/* OK, are we completely out? */	if (page->index >= end_index+1 || !offset) {		UnlockPage(page);		return -EIO;	}	/* Sigh... will have to work, then... */	err = __block_prepare_write(inode, page, 0, offset, get_block);	if (!err) {		memset(page_address(page) + offset, 0, PAGE_CACHE_SIZE - offset);		flush_dcache_page(page);		__block_commit_write(inode,page,0,offset);done:		kunmap(page);		UnlockPage(page);		return err;	}	ClearPageUptodate(page);	goto done;}/* * Commence writeout of all the buffers against a page.  The * page must be locked.   Returns zero on success or a negative * errno. */int writeout_one_page(struct page *page){	struct buffer_head *bh, *head = page->buffers;	if (!PageLocked(page))		BUG();	bh = head;	do {		if (buffer_locked(bh) || !buffer_dirty(bh) || !buffer_uptodate(bh))			continue;		bh->b_flushtime = jiffies;		ll_rw_block(WRITE, 1, &bh);		} while ((bh = bh->b_this_page) != head);	return 0;}EXPORT_SYMBOL(writeout_one_page);/* * Wait for completion of I/O of all buffers against a page.  The page * must be locked.  Returns zero on success or a negative errno. */int waitfor_one_page(struct page *page){	int error = 0;	struct buffer_head *bh, *head = page->buffers;	bh = head;	do {		wait_on_buffer(bh);		if (buffer_req(bh) && !buffer_uptodate(bh))			error = -EIO;	} while ((bh = bh->b_this_page) != head);	return error;}EXPORT_SYMBOL(waitfor_one_page);int generic_block_bmap(struct address_space *mapping, long block, get_block_t *get_block){	struct buffer_head tmp;	struct inode *inode = mapping->host;	tmp.b_state = 0;	tmp.b_blocknr = 0;	get_block(inode, block, &tmp, 0);	return tmp.b_blocknr;}int generic_direct_IO(int rw, struct inode * inode, struct kiobuf * iobuf, unsigned long blocknr, int blocksize, get_block_t * get_block){	int i, nr_blocks, retval;	unsigned long * blocks = iobuf->blocks;	int length;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -