⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 filemap.c

📁 ARM 嵌入式 系统 设计与实例开发 实验教材 二源码
💻 C
📖 第 1 页 / 共 5 页
字号:
 * writepage function - and this has to be * worked around in the VM layer.. * * We *  - mark the page dirty again (but do NOT *    add it back to the inode dirty list, as *    that would livelock in fdatasync) *  - activate the page so that the page stealer *    doesn't try to write it out over and over *    again. */int fail_writepage(struct page *page){	/* Only activate on memory-pressure, not fsync.. */	if (PageLaunder(page)) {		activate_page(page);		SetPageReferenced(page);	}	/* Set the page dirty again, unlock */	SetPageDirty(page);	UnlockPage(page);	return 0;}EXPORT_SYMBOL(fail_writepage);/** *      filemap_fdatasync - walk the list of dirty pages of the given address space *     	and writepage() all of them. *  *      @mapping: address space structure to write * */int filemap_fdatasync(struct address_space * mapping){	int ret = 0;	int (*writepage)(struct page *) = mapping->a_ops->writepage;	spin_lock(&pagecache_lock);        while (!list_empty(&mapping->dirty_pages)) {		struct page *page = list_entry(mapping->dirty_pages.next, struct page, list);		list_del(&page->list);		list_add(&page->list, &mapping->locked_pages);		if (!PageDirty(page))			continue;		page_cache_get(page);		spin_unlock(&pagecache_lock);		lock_page(page);		if (PageDirty(page)) {			int err;			ClearPageDirty(page);			err = writepage(page);			if (err && !ret)				ret = err;		} else			UnlockPage(page);		page_cache_release(page);		spin_lock(&pagecache_lock);	}	spin_unlock(&pagecache_lock);	return ret;}/** *      filemap_fdatawait - walk the list of locked pages of the given address space *     	and wait for all of them. *  *      @mapping: address space structure to wait for * */int filemap_fdatawait(struct address_space * mapping){	int ret = 0;	spin_lock(&pagecache_lock);        while (!list_empty(&mapping->locked_pages)) {		struct page *page = list_entry(mapping->locked_pages.next, struct page, list);		list_del(&page->list);		list_add(&page->list, &mapping->clean_pages);		if (!PageLocked(page))			continue;		page_cache_get(page);		spin_unlock(&pagecache_lock);		___wait_on_page(page);		if (PageError(page))			ret = -EIO;		page_cache_release(page);		spin_lock(&pagecache_lock);	}	spin_unlock(&pagecache_lock);	return ret;}/* * Add a page to the inode page cache. * * The caller must have locked the page and  * set all the page flags correctly.. */void add_to_page_cache_locked(struct page * page, struct address_space *mapping, unsigned long index){	if (!PageLocked(page))		BUG();	page->index = index;	page_cache_get(page);	spin_lock(&pagecache_lock);	add_page_to_inode_queue(mapping, page);	add_page_to_hash_queue(page, page_hash(mapping, index));	spin_unlock(&pagecache_lock);	lru_cache_add(page);}/* * This adds a page to the page cache, starting out as locked, * owned by us, but unreferenced, not uptodate and with no errors. */static inline void __add_to_page_cache(struct page * page,	struct address_space *mapping, unsigned long offset,	struct page **hash){	unsigned long flags;	flags = page->flags & ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_dirty | 1 << PG_referenced | 1 << PG_arch_1 | 1 << PG_checked);	page->flags = flags | (1 << PG_locked);	page_cache_get(page);	page->index = offset;	add_page_to_inode_queue(mapping, page);	add_page_to_hash_queue(page, hash);}void add_to_page_cache(struct page * page, struct address_space * mapping, unsigned long offset){	spin_lock(&pagecache_lock);	__add_to_page_cache(page, mapping, offset, page_hash(mapping, offset));	spin_unlock(&pagecache_lock);	lru_cache_add(page);}int add_to_page_cache_unique(struct page * page,	struct address_space *mapping, unsigned long offset,	struct page **hash){	int err;	struct page *alias;	spin_lock(&pagecache_lock);	alias = __find_page_nolock(mapping, offset, *hash);	err = 1;	if (!alias) {		__add_to_page_cache(page,mapping,offset,hash);		err = 0;	}	spin_unlock(&pagecache_lock);	if (!err)		lru_cache_add(page);	return err;}/* * This adds the requested page to the page cache if it isn't already there, * and schedules an I/O to read in its contents from disk. */static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));static int page_cache_read(struct file * file, unsigned long offset){	struct address_space *mapping = file->f_dentry->d_inode->i_mapping;	struct page **hash = page_hash(mapping, offset);	struct page *page; 	spin_lock(&pagecache_lock);	page = __find_page_nolock(mapping, offset, *hash);	spin_unlock(&pagecache_lock);	if (page)		return 0;	page = page_cache_alloc(mapping);	if (!page)		return -ENOMEM;	if (!add_to_page_cache_unique(page, mapping, offset, hash)) {		int error = mapping->a_ops->readpage(file, page);		page_cache_release(page);		return error;	}	/*	 * We arrive here in the unlikely event that someone 	 * raced with us and added our page to the cache first.	 */	page_cache_release(page);	return 0;}/* * Read in an entire cluster at once.  A cluster is usually a 64k- * aligned block that includes the page requested in "offset." */static int FASTCALL(read_cluster_nonblocking(struct file * file, unsigned long offset,					     unsigned long filesize));static int read_cluster_nonblocking(struct file * file, unsigned long offset,	unsigned long filesize){	unsigned long pages = CLUSTER_PAGES;	offset = CLUSTER_OFFSET(offset);	while ((pages-- > 0) && (offset < filesize)) {		int error = page_cache_read(file, offset);		if (error < 0)			return error;		offset ++;	}	return 0;}/*  * Wait for a page to get unlocked. * * This must be called with the caller "holding" the page, * ie with increased "page->count" so that the page won't * go away during the wait.. */void ___wait_on_page(struct page *page){	struct task_struct *tsk = current;	DECLARE_WAITQUEUE(wait, tsk);	add_wait_queue(&page->wait, &wait);	do {		set_task_state(tsk, TASK_UNINTERRUPTIBLE);		if (!PageLocked(page))			break;		sync_page(page);		schedule();	} while (PageLocked(page));	tsk->state = TASK_RUNNING;	remove_wait_queue(&page->wait, &wait);}void unlock_page(struct page *page){	clear_bit(PG_launder, &(page)->flags);	smp_mb__before_clear_bit();	if (!test_and_clear_bit(PG_locked, &(page)->flags))		BUG();	smp_mb__after_clear_bit(); 	if (waitqueue_active(&(page)->wait))	wake_up(&(page)->wait);}/* * Get a lock on the page, assuming we need to sleep * to get it.. */static void __lock_page(struct page *page){	struct task_struct *tsk = current;	DECLARE_WAITQUEUE(wait, tsk);	add_wait_queue_exclusive(&page->wait, &wait);	for (;;) {		set_task_state(tsk, TASK_UNINTERRUPTIBLE);		if (PageLocked(page)) {			sync_page(page);			schedule();		}		if (!TryLockPage(page))			break;	}	tsk->state = TASK_RUNNING;	remove_wait_queue(&page->wait, &wait);}	/* * Get an exclusive lock on the page, optimistically * assuming it's not locked.. */void lock_page(struct page *page){	if (TryLockPage(page))		__lock_page(page);}/* * a rather lightweight function, finding and getting a reference to a * hashed page atomically. */struct page * __find_get_page(struct address_space *mapping,			      unsigned long offset, struct page **hash){	struct page *page;	/*	 * We scan the hash list read-only. Addition to and removal from	 * the hash-list needs a held write-lock.	 */	spin_lock(&pagecache_lock);	page = __find_page_nolock(mapping, offset, *hash);	if (page)		page_cache_get(page);	spin_unlock(&pagecache_lock);	return page;}/* * Same as above, but trylock it instead of incrementing the count. */struct page *find_trylock_page(struct address_space *mapping, unsigned long offset){	struct page *page;	struct page **hash = page_hash(mapping, offset);	spin_lock(&pagecache_lock);	page = __find_page_nolock(mapping, offset, *hash);	if (page) {		if (TryLockPage(page))			page = NULL;	}	spin_unlock(&pagecache_lock);	return page;}/* * Must be called with the pagecache lock held, * will return with it held (but it may be dropped * during blocking operations.. */static struct page * FASTCALL(__find_lock_page_helper(struct address_space *, unsigned long, struct page *));static struct page * __find_lock_page_helper(struct address_space *mapping,					unsigned long offset, struct page *hash){	struct page *page;	/*	 * We scan the hash list read-only. Addition to and removal from	 * the hash-list needs a held write-lock.	 */repeat:	page = __find_page_nolock(mapping, offset, hash);	if (page) {		page_cache_get(page);		if (TryLockPage(page)) {			spin_unlock(&pagecache_lock);			lock_page(page);			spin_lock(&pagecache_lock);			/* Has the page been re-allocated while we slept? */			if (page->mapping != mapping || page->index != offset) {				UnlockPage(page);				page_cache_release(page);				goto repeat;			}		}	}	return page;}/* * Same as the above, but lock the page too, verifying that * it's still valid once we own it. */struct page * __find_lock_page (struct address_space *mapping,				unsigned long offset, struct page **hash){	struct page *page;	spin_lock(&pagecache_lock);	page = __find_lock_page_helper(mapping, offset, *hash);	spin_unlock(&pagecache_lock);	return page;}/* * Same as above, but create the page if required.. */struct page * find_or_create_page(struct address_space *mapping, unsigned long index, unsigned int gfp_mask){	struct page *page;	struct page **hash = page_hash(mapping, index);	spin_lock(&pagecache_lock);	page = __find_lock_page_helper(mapping, index, *hash);	spin_unlock(&pagecache_lock);	if (!page) {		struct page *newpage = alloc_page(gfp_mask);		if (newpage) {			spin_lock(&pagecache_lock);			page = __find_lock_page_helper(mapping, index, *hash);			if (likely(!page)) {				page = newpage;				__add_to_page_cache(page, mapping, index, hash);				newpage = NULL;			}			spin_unlock(&pagecache_lock);			if (newpage == NULL)				lru_cache_add(page);			else 				page_cache_release(newpage);		}	}	return page;	}/* * Returns locked page at given index in given cache, creating it if needed. */struct page *grab_cache_page(struct address_space *mapping, unsigned long index){	return find_or_create_page(mapping, index, mapping->gfp_mask);}/* * Same as grab_cache_page, but do not wait if the page is unavailable. * This is intended for speculative data generators, where the data can * be regenerated if the page couldn't be grabbed.  This routine should * be safe to call while holding the lock for another page. */struct page *grab_cache_page_nowait(struct address_space *mapping, unsigned long index){	struct page *page, **hash;	hash = page_hash(mapping, index);	page = __find_get_page(mapping, index, hash);	if ( page ) {		if ( !TryLockPage(page) ) {			/* Page found and locked */			/* This test is overly paranoid, but what the heck... */			if ( unlikely(page->mapping != mapping || page->index != index) ) {				/* Someone reallocated this page under us. */				UnlockPage(page);				page_cache_release(page);				return NULL;			} else {				return page;			}		} else {			/* Page locked by someone else */			page_cache_release(page);			return NULL;		}	}	page = page_cache_alloc(mapping);	if ( unlikely(!page) )		return NULL;	/* Failed to allocate a page */	if ( unlikely(add_to_page_cache_unique(page, mapping, index, hash)) ) {		/* Someone else grabbed the page already. */		page_cache_release(page);		return NULL;	}	return page;}#if 0#define PROFILE_READAHEAD#define DEBUG_READAHEAD#endif/* * Read-ahead profiling information * -------------------------------- * Every PROFILE_MAXREADCOUNT, the following information is written  * to the syslog: *   Percentage of asynchronous read-ahead. *   Average of read-ahead fields context value. * If DEBUG_READAHEAD is defined, a snapshot of these fields is written  * to the syslog. */#ifdef PROFILE_READAHEAD#define PROFILE_MAXREADCOUNT 1000static unsigned long total_reada;static unsigned long total_async;static unsigned long total_ramax;static unsigned long total_ralen;static unsigned long total_rawin;static void profile_readahead(int async, struct file *filp){	unsigned long flags;	++total_reada;	if (async)		++total_async;	total_ramax	+= filp->f_ramax;	total_ralen	+= filp->f_ralen;	total_rawin	+= filp->f_rawin;	if (total_reada > PROFILE_MAXREADCOUNT) {		save_flags(flags);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -