⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 write.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
				BUG();			if (test_set_page_writeback(page))				BUG();			unlock_page(page);			put_page(page);		}		count += loop;		if (loop < n) {			for (; loop < n; loop++)				put_page(pages[loop]);			goto no_more;		}		start += loop;	} while (start <= wb->last && count < 65536);no_more:	/* we now have a contiguous set of dirty pages, each with writeback set	 * and the dirty mark cleared; the first page is locked and must remain	 * so, all the rest are unlocked */	first = primary_page->index;	last = first + count - 1;	offset = (first == wb->first) ? wb->offset_first : 0;	to = (last == wb->last) ? wb->to_last : PAGE_SIZE;	_debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);	ret = afs_vnode_store_data(wb, first, last, offset, to);	if (ret < 0) {		switch (ret) {		case -EDQUOT:		case -ENOSPC:			set_bit(AS_ENOSPC,				&wb->vnode->vfs_inode.i_mapping->flags);			break;		case -EROFS:		case -EIO:		case -EREMOTEIO:		case -EFBIG:		case -ENOENT:		case -ENOMEDIUM:		case -ENXIO:			afs_kill_pages(wb->vnode, true, first, last);			set_bit(AS_EIO, &wb->vnode->vfs_inode.i_mapping->flags);			break;		case -EACCES:		case -EPERM:		case -ENOKEY:		case -EKEYEXPIRED:		case -EKEYREJECTED:		case -EKEYREVOKED:			afs_kill_pages(wb->vnode, false, first, last);			break;		default:			break;		}	} else {		ret = count;	}	_leave(" = %d", ret);	return ret;}/* * write a page back to the server * - the caller locked the page for us */int afs_writepage(struct page *page, struct writeback_control *wbc){	struct backing_dev_info *bdi = page->mapping->backing_dev_info;	struct afs_writeback *wb;	int ret;	_enter("{%lx},", page->index);	wb = (struct afs_writeback *) page_private(page);	ASSERT(wb != NULL);	ret = afs_write_back_from_locked_page(wb, page);	unlock_page(page);	if (ret < 0) {		_leave(" = %d", ret);		return 0;	}	wbc->nr_to_write -= ret;	if (wbc->nonblocking && bdi_write_congested(bdi))		wbc->encountered_congestion = 1;	_leave(" = 0");	return 0;}/* * write a region of pages back to the server */static int afs_writepages_region(struct address_space *mapping,				 struct writeback_control *wbc,				 pgoff_t index, pgoff_t end, pgoff_t *_next){	struct backing_dev_info *bdi = mapping->backing_dev_info;	struct afs_writeback *wb;	struct page *page;	int ret, n;	_enter(",,%lx,%lx,", index, end);	do {		n = find_get_pages_tag(mapping, &index, PAGECACHE_TAG_DIRTY,				       1, &page);		if (!n)			break;		_debug("wback %lx", page->index);		if (page->index > end) {			*_next = index;			page_cache_release(page);			_leave(" = 0 [%lx]", *_next);			return 0;		}		/* at this point we hold neither mapping->tree_lock nor lock on		 * the page itself: the page may be truncated or invalidated		 * (changing page->mapping to NULL), or even swizzled back from		 * swapper_space to tmpfs file mapping		 */		lock_page(page);		if (page->mapping != mapping) {			unlock_page(page);			page_cache_release(page);			continue;		}		if (wbc->sync_mode != WB_SYNC_NONE)			wait_on_page_writeback(page);		if (PageWriteback(page) || !PageDirty(page)) {			unlock_page(page);			continue;		}		wb = (struct afs_writeback *) page_private(page);		ASSERT(wb != NULL);		spin_lock(&wb->vnode->writeback_lock);		wb->state = AFS_WBACK_WRITING;		spin_unlock(&wb->vnode->writeback_lock);		ret = afs_write_back_from_locked_page(wb, page);		unlock_page(page);		page_cache_release(page);		if (ret < 0) {			_leave(" = %d", ret);			return ret;		}		wbc->nr_to_write -= ret;		if (wbc->nonblocking && bdi_write_congested(bdi)) {			wbc->encountered_congestion = 1;			break;		}		cond_resched();	} while (index < end && wbc->nr_to_write > 0);	*_next = index;	_leave(" = 0 [%lx]", *_next);	return 0;}/* * write some of the pending data back to the server */int afs_writepages(struct address_space *mapping,		   struct writeback_control *wbc){	struct backing_dev_info *bdi = mapping->backing_dev_info;	pgoff_t start, end, next;	int ret;	_enter("");	if (wbc->nonblocking && bdi_write_congested(bdi)) {		wbc->encountered_congestion = 1;		_leave(" = 0 [congest]");		return 0;	}	if (wbc->range_cyclic) {		start = mapping->writeback_index;		end = -1;		ret = afs_writepages_region(mapping, wbc, start, end, &next);		if (start > 0 && wbc->nr_to_write > 0 && ret == 0 &&		    !(wbc->nonblocking && wbc->encountered_congestion))			ret = afs_writepages_region(mapping, wbc, 0, start,						    &next);		mapping->writeback_index = next;	} else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {		end = (pgoff_t)(LLONG_MAX >> PAGE_CACHE_SHIFT);		ret = afs_writepages_region(mapping, wbc, 0, end, &next);		if (wbc->nr_to_write > 0)			mapping->writeback_index = next;	} else {		start = wbc->range_start >> PAGE_CACHE_SHIFT;		end = wbc->range_end >> PAGE_CACHE_SHIFT;		ret = afs_writepages_region(mapping, wbc, start, end, &next);	}	_leave(" = %d", ret);	return ret;}/* * write an inode back */int afs_write_inode(struct inode *inode, int sync){	struct afs_vnode *vnode = AFS_FS_I(inode);	int ret;	_enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);	ret = 0;	if (sync) {		ret = filemap_fdatawait(inode->i_mapping);		if (ret < 0)			__mark_inode_dirty(inode, I_DIRTY_DATASYNC);	}	_leave(" = %d", ret);	return ret;}/* * completion of write to server */void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call){	struct afs_writeback *wb = call->wb;	struct pagevec pv;	unsigned count, loop;	pgoff_t first = call->first, last = call->last;	bool free_wb;	_enter("{%x:%u},{%lx-%lx}",	       vnode->fid.vid, vnode->fid.vnode, first, last);	ASSERT(wb != NULL);	pagevec_init(&pv, 0);	do {		_debug("done %lx-%lx", first, last);		count = last - first + 1;		if (count > PAGEVEC_SIZE)			count = PAGEVEC_SIZE;		pv.nr = find_get_pages_contig(call->mapping, first, count,					      pv.pages);		ASSERTCMP(pv.nr, ==, count);		spin_lock(&vnode->writeback_lock);		for (loop = 0; loop < count; loop++) {			struct page *page = pv.pages[loop];			end_page_writeback(page);			if (page_private(page) == (unsigned long) wb) {				set_page_private(page, 0);				ClearPagePrivate(page);				wb->usage--;			}		}		free_wb = false;		if (wb->usage == 0) {			afs_unlink_writeback(wb);			free_wb = true;		}		spin_unlock(&vnode->writeback_lock);		first += count;		if (free_wb) {			afs_free_writeback(wb);			wb = NULL;		}		__pagevec_release(&pv);	} while (first <= last);	_leave("");}/* * write to an AFS file */ssize_t afs_file_write(struct kiocb *iocb, const struct iovec *iov,		       unsigned long nr_segs, loff_t pos){	struct dentry *dentry = iocb->ki_filp->f_path.dentry;	struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode);	ssize_t result;	size_t count = iov_length(iov, nr_segs);	int ret;	_enter("{%x.%u},{%zu},%lu,",	       vnode->fid.vid, vnode->fid.vnode, count, nr_segs);	if (IS_SWAPFILE(&vnode->vfs_inode)) {		printk(KERN_INFO		       "AFS: Attempt to write to active swap file!\n");		return -EBUSY;	}	if (!count)		return 0;	result = generic_file_aio_write(iocb, iov, nr_segs, pos);	if (IS_ERR_VALUE(result)) {		_leave(" = %zd", result);		return result;	}	/* return error values for O_SYNC and IS_SYNC() */	if (IS_SYNC(&vnode->vfs_inode) || iocb->ki_filp->f_flags & O_SYNC) {		ret = afs_fsync(iocb->ki_filp, dentry, 1);		if (ret < 0)			result = ret;	}	_leave(" = %zd", result);	return result;}/* * flush the vnode to the fileserver */int afs_writeback_all(struct afs_vnode *vnode){	struct address_space *mapping = vnode->vfs_inode.i_mapping;	struct writeback_control wbc = {		.bdi		= mapping->backing_dev_info,		.sync_mode	= WB_SYNC_ALL,		.nr_to_write	= LONG_MAX,		.for_writepages = 1,		.range_cyclic	= 1,	};	int ret;	_enter("");	ret = mapping->a_ops->writepages(mapping, &wbc);	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);	_leave(" = %d", ret);	return ret;}/* * flush any dirty pages for this process, and check for write errors. * - the return status from this call provides a reliable indication of *   whether any write errors occurred for this process. */int afs_fsync(struct file *file, struct dentry *dentry, int datasync){	struct afs_writeback *wb, *xwb;	struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode);	int ret;	_enter("{%x:%u},{n=%s},%d",	       vnode->fid.vid, vnode->fid.vnode, dentry->d_name.name,	       datasync);	/* use a writeback record as a marker in the queue - when this reaches	 * the front of the queue, all the outstanding writes are either	 * completed or rejected */	wb = kzalloc(sizeof(*wb), GFP_KERNEL);	if (!wb)		return -ENOMEM;	wb->vnode = vnode;	wb->first = 0;	wb->last = -1;	wb->offset_first = 0;	wb->to_last = PAGE_SIZE;	wb->usage = 1;	wb->state = AFS_WBACK_SYNCING;	init_waitqueue_head(&wb->waitq);	spin_lock(&vnode->writeback_lock);	list_for_each_entry(xwb, &vnode->writebacks, link) {		if (xwb->state == AFS_WBACK_PENDING)			xwb->state = AFS_WBACK_CONFLICTING;	}	list_add_tail(&wb->link, &vnode->writebacks);	spin_unlock(&vnode->writeback_lock);	/* push all the outstanding writebacks to the server */	ret = afs_writeback_all(vnode);	if (ret < 0) {		afs_put_writeback(wb);		_leave(" = %d [wb]", ret);		return ret;	}	/* wait for the preceding writes to actually complete */	ret = wait_event_interruptible(wb->waitq,				       wb->state == AFS_WBACK_COMPLETE ||				       vnode->writebacks.next == &wb->link);	afs_put_writeback(wb);	_leave(" = %d", ret);	return ret;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -