⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vmscan.c

📁 ARM 嵌入式 系统 设计与实例开发 实验教材 二源码
💻 C
📖 第 1 页 / 共 2 页
字号:
		 */		if (unlikely(TryLockPage(page))) {			if (PageLaunder(page) && (gfp_mask & __GFP_FS)) {				page_cache_get(page);				spin_unlock(&pagemap_lru_lock);				wait_on_page(page);				page_cache_release(page);				spin_lock(&pagemap_lru_lock);			}			continue;		}		if (PageDirty(page) && is_page_cache_freeable(page) && page->mapping) {			/*			 * It is not critical here to write it only if			 * the page is unmapped beause any direct writer			 * like O_DIRECT would set the PG_dirty bitflag			 * on the phisical page after having successfully			 * pinned it and after the I/O to the page is finished,			 * so the direct writes to the page cannot get lost.			 */			int (*writepage)(struct page *);			writepage = page->mapping->a_ops->writepage;			if ((gfp_mask & __GFP_FS) && writepage) {				ClearPageDirty(page);				SetPageLaunder(page);				page_cache_get(page);				spin_unlock(&pagemap_lru_lock);				writepage(page);				page_cache_release(page);				spin_lock(&pagemap_lru_lock);				continue;			}		}		/*		 * If the page has buffers, try to free the buffer mappings		 * associated with this page. If we succeed we try to free		 * the page as well.		 */		if (page->buffers) {			spin_unlock(&pagemap_lru_lock);			/* avoid to free a locked page */			page_cache_get(page);			if (try_to_release_page(page, gfp_mask)) {				if (!page->mapping) {					/*					 * We must not allow an anon page					 * with no buffers to be visible on					 * the LRU, so we unlock the page after					 * taking the lru lock					 */					spin_lock(&pagemap_lru_lock);					UnlockPage(page);					__lru_cache_del(page);					/* effectively free the page here */					page_cache_release(page);					if (--nr_pages)						continue;					break;				} else {					/*					 * The page is still in pagecache so undo the stuff					 * before the try_to_release_page since we've not					 * finished and we can now try the next step.					 */					page_cache_release(page);					spin_lock(&pagemap_lru_lock);				}			} else {				/* failed to drop the buffers so stop here */				UnlockPage(page);				page_cache_release(page);				spin_lock(&pagemap_lru_lock);				continue;			}		}		spin_lock(&pagecache_lock);		/*		 * this is the non-racy check for busy page.		 */		if (!page->mapping || !is_page_cache_freeable(page)) {			spin_unlock(&pagecache_lock);			UnlockPage(page);page_mapped:			if (--max_mapped >= 0)				continue;			/*			 * Alert! We've found too many mapped pages on the			 * inactive list, so we start swapping out now!			 */			spin_unlock(&pagemap_lru_lock);			swap_out(priority, gfp_mask, classzone);			return nr_pages;		}		/*		 * It is critical to check PageDirty _after_ we made sure		 * the page is freeable* so not in use by anybody.		 */		if (PageDirty(page)) {			spin_unlock(&pagecache_lock);			UnlockPage(page);			continue;		}		/* point of no return */		if (likely(!PageSwapCache(page))) {			__remove_inode_page(page);			spin_unlock(&pagecache_lock);		} else {			swp_entry_t swap;			swap.val = page->index;			__delete_from_swap_cache(page);			spin_unlock(&pagecache_lock);			swap_free(swap);		}		__lru_cache_del(page);		UnlockPage(page);		/* effectively free the page here */		page_cache_release(page);		if (--nr_pages)			continue;		break;	}	spin_unlock(&pagemap_lru_lock);	return nr_pages;}/* * This moves pages from the active list to * the inactive list. * * We move them the other way when we see the * reference bit on the page. */static void refill_inactive(int nr_pages){	struct list_head * entry;	spin_lock(&pagemap_lru_lock);	entry = active_list.prev;	while (nr_pages && entry != &active_list) {		struct page * page;		page = list_entry(entry, struct page, lru);		entry = entry->prev;		if (PageTestandClearReferenced(page)) {			list_del(&page->lru);			list_add(&page->lru, &active_list);			continue;		}		nr_pages--;		del_page_from_active_list(page);		add_page_to_inactive_list(page);		SetPageReferenced(page);	}	spin_unlock(&pagemap_lru_lock);}static int FASTCALL(shrink_caches(zone_t * classzone, int priority, unsigned int gfp_mask, int nr_pages));static int shrink_caches(zone_t * classzone, int priority, unsigned int gfp_mask, int nr_pages){	int chunk_size = nr_pages;	unsigned long ratio;	nr_pages -= kmem_cache_reap(gfp_mask);	if (nr_pages <= 0)		return 0;	nr_pages = chunk_size;	/* try to keep the active list 2/3 of the size of the cache */	ratio = (unsigned long) nr_pages * nr_active_pages / ((nr_inactive_pages + 1) * 2);	refill_inactive(ratio);	nr_pages = shrink_cache(nr_pages, classzone, gfp_mask, priority);	if (nr_pages <= 0)		return 0;	shrink_dcache_memory(priority, gfp_mask);	shrink_icache_memory(priority, gfp_mask);#ifdef CONFIG_QUOTA	shrink_dqcache_memory(DEF_PRIORITY, gfp_mask);#endif	return nr_pages;}int try_to_free_pages(zone_t *classzone, unsigned int gfp_mask, unsigned int order){	int priority = DEF_PRIORITY;	int nr_pages = SWAP_CLUSTER_MAX;	gfp_mask = pf_gfp_mask(gfp_mask);	do {		nr_pages = shrink_caches(classzone, priority, gfp_mask, nr_pages);		if (nr_pages <= 0)			return 1;	} while (--priority);	/*	 * Hmm.. Cache shrink failed - time to kill something?	 * Mhwahahhaha! This is the part I really like. Giggle.	 */#ifndef CONFIG_NO_OOM_KILLER	out_of_memory();#endif	return 0;}DECLARE_WAIT_QUEUE_HEAD(kswapd_wait);static int check_classzone_need_balance(zone_t * classzone){	zone_t * first_classzone;	first_classzone = classzone->zone_pgdat->node_zones;	while (classzone >= first_classzone) {		if (classzone->free_pages > classzone->pages_high)			return 0;		classzone--;	}	return 1;}static int kswapd_balance_pgdat(pg_data_t * pgdat){	int need_more_balance = 0, i;	zone_t * zone;	for (i = pgdat->nr_zones-1; i >= 0; i--) {		zone = pgdat->node_zones + i;		if (unlikely(current->need_resched))			schedule();		if (!zone->need_balance)			continue;		if (!try_to_free_pages(zone, GFP_KSWAPD, 0)) {			zone->need_balance = 0;			__set_current_state(TASK_INTERRUPTIBLE);			schedule_timeout(HZ);			continue;		}		if (check_classzone_need_balance(zone))			need_more_balance = 1;		else			zone->need_balance = 0;	}	return need_more_balance;}static void kswapd_balance(void){	int need_more_balance;	pg_data_t * pgdat;	do {		need_more_balance = 0;		pgdat = pgdat_list;		do			need_more_balance |= kswapd_balance_pgdat(pgdat);		while ((pgdat = pgdat->node_next));	} while (need_more_balance);}static int kswapd_can_sleep_pgdat(pg_data_t * pgdat){	zone_t * zone;	int i;	for (i = pgdat->nr_zones-1; i >= 0; i--) {		zone = pgdat->node_zones + i;		if (!zone->need_balance)			continue;		return 0;	}	return 1;}static int kswapd_can_sleep(void){	pg_data_t * pgdat;	pgdat = pgdat_list;	do {		if (kswapd_can_sleep_pgdat(pgdat))			continue;		return 0;	} while ((pgdat = pgdat->node_next));	return 1;}/* * The background pageout daemon, started as a kernel thread * from the init process.  * * This basically trickles out pages so that we have _some_ * free memory available even if there is no other activity * that frees anything up. This is needed for things like routing * etc, where we otherwise might have all activity going on in * asynchronous contexts that cannot page things out. * * If there are applications that are active memory-allocators * (most normal use), this basically shouldn't matter. */int kswapd(void *unused){	struct task_struct *tsk = current;	DECLARE_WAITQUEUE(wait, tsk);	daemonize();	strcpy(tsk->comm, "kswapd");	sigfillset(&tsk->blocked);		/*	 * Tell the memory management that we're a "memory allocator",	 * and that if we need more memory we should get access to it	 * regardless (see "__alloc_pages()"). "kswapd" should	 * never get caught in the normal page freeing logic.	 *	 * (Kswapd normally doesn't need memory anyway, but sometimes	 * you need a small amount of memory in order to be able to	 * page out something else, and this flag essentially protects	 * us from recursively trying to free more memory as we're	 * trying to free the first piece of memory in the first place).	 */	tsk->flags |= PF_MEMALLOC;	/*	 * Kswapd main loop.	 */	for (;;) {		__set_current_state(TASK_INTERRUPTIBLE);		add_wait_queue(&kswapd_wait, &wait);		mb();		if (kswapd_can_sleep())			schedule();		__set_current_state(TASK_RUNNING);		remove_wait_queue(&kswapd_wait, &wait);		/*		 * If we actually get into a low-memory situation,		 * the processes needing more memory will wake us		 * up on a more timely basis.		 */		kswapd_balance();		run_task_queue(&tq_disk);	}}static int __init kswapd_init(void){	printk("Starting kswapd\n");	swap_setup();	kernel_thread(kswapd, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGNAL);	return 0;}module_init(kswapd_init)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -