⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 page_alloc.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
		page_idx = combined_idx;		order++;	}	set_page_order(page, order);	list_add(&page->lru,		&zone->free_area[order].free_list[migratetype]);	zone->free_area[order].nr_free++;}static inline int free_pages_check(struct page *page){	free_page_mlock(page);	if (unlikely(page_mapcount(page) |		(page->mapping != NULL)  |		(page_count(page) != 0)  |		(page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {		bad_page(page);		return 1;	}	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;	return 0;}/* * Frees a list of pages.  * Assumes all pages on list are in same zone, and of same order. * count is the number of pages to free. * * If the zone was previously in an "all pages pinned" state then look to * see if this freeing clears that state. * * And clear the zone's pages_scanned counter, to hold off the "all pages are * pinned" detection logic. */static void free_pages_bulk(struct zone *zone, int count,					struct list_head *list, int order){	spin_lock(&zone->lock);	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);	zone->pages_scanned = 0;	while (count--) {		struct page *page;		VM_BUG_ON(list_empty(list));		page = list_entry(list->prev, struct page, lru);		/* have to delete it as __free_one_page list manipulates */		list_del(&page->lru);		__free_one_page(page, zone, order);	}	spin_unlock(&zone->lock);}static void free_one_page(struct zone *zone, struct page *page, int order){	spin_lock(&zone->lock);	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);	zone->pages_scanned = 0;	__free_one_page(page, zone, order);	spin_unlock(&zone->lock);}static void __free_pages_ok(struct page *page, unsigned int order){	unsigned long flags;	int i;	int bad = 0;	for (i = 0 ; i < (1 << order) ; ++i)		bad += free_pages_check(page + i);	if (bad)		return;	if (!PageHighMem(page)) {		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);		debug_check_no_obj_freed(page_address(page),					   PAGE_SIZE << order);	}	arch_free_page(page, order);	kernel_map_pages(page, 1 << order, 0);	local_irq_save(flags);	__count_vm_events(PGFREE, 1 << order);	free_one_page(page_zone(page), page, order);	local_irq_restore(flags);}/* * permit the bootmem allocator to evade page validation on high-order frees */void __meminit __free_pages_bootmem(struct page *page, unsigned int order){	if (order == 0) {		__ClearPageReserved(page);		set_page_count(page, 0);		set_page_refcounted(page);		__free_page(page);	} else {		int loop;		prefetchw(page);		for (loop = 0; loop < BITS_PER_LONG; loop++) {			struct page *p = &page[loop];			if (loop + 1 < BITS_PER_LONG)				prefetchw(p + 1);			__ClearPageReserved(p);			set_page_count(p, 0);		}		set_page_refcounted(page);		__free_pages(page, order);	}}/* * The order of subdivision here is critical for the IO subsystem. * Please do not alter this order without good reasons and regression * testing. Specifically, as large blocks of memory are subdivided, * the order in which smaller blocks are delivered depends on the order * they're subdivided in this function. This is the primary factor * influencing the order in which pages are delivered to the IO * subsystem according to empirical testing, and this is also justified * by considering the behavior of a buddy system containing a single * large block of memory acted on by a series of small allocations. * This behavior is a critical factor in sglist merging's success. * * -- wli */static inline void expand(struct zone *zone, struct page *page,	int low, int high, struct free_area *area,	int migratetype){	unsigned long size = 1 << high;	while (high > low) {		area--;		high--;		size >>= 1;		VM_BUG_ON(bad_range(zone, &page[size]));		list_add(&page[size].lru, &area->free_list[migratetype]);		area->nr_free++;		set_page_order(&page[size], high);	}}/* * This page is about to be returned from the page allocator */static int prep_new_page(struct page *page, int order, gfp_t gfp_flags){	if (unlikely(page_mapcount(page) |		(page->mapping != NULL)  |		(page_count(page) != 0)  |		(page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {		bad_page(page);		return 1;	}	set_page_private(page, 0);	set_page_refcounted(page);	arch_alloc_page(page, order);	kernel_map_pages(page, 1 << order, 1);	if (gfp_flags & __GFP_ZERO)		prep_zero_page(page, order, gfp_flags);	if (order && (gfp_flags & __GFP_COMP))		prep_compound_page(page, order);	return 0;}/* * Go through the free lists for the given migratetype and remove * the smallest available page from the freelists */static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,						int migratetype){	unsigned int current_order;	struct free_area * area;	struct page *page;	/* Find a page of the appropriate size in the preferred list */	for (current_order = order; current_order < MAX_ORDER; ++current_order) {		area = &(zone->free_area[current_order]);		if (list_empty(&area->free_list[migratetype]))			continue;		page = list_entry(area->free_list[migratetype].next,							struct page, lru);		list_del(&page->lru);		rmv_page_order(page);		area->nr_free--;		__mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));		expand(zone, page, order, current_order, area, migratetype);		return page;	}	return NULL;}/* * This array describes the order lists are fallen back to when * the free lists for the desirable migrate type are depleted */static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */};/* * Move the free pages in a range to the free lists of the requested type. * Note that start_page and end_pages are not aligned on a pageblock * boundary. If alignment is required, use move_freepages_block() */static int move_freepages(struct zone *zone,			  struct page *start_page, struct page *end_page,			  int migratetype){	struct page *page;	unsigned long order;	int pages_moved = 0;#ifndef CONFIG_HOLES_IN_ZONE	/*	 * page_zone is not safe to call in this context when	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant	 * anyway as we check zone boundaries in move_freepages_block().	 * Remove at a later date when no bug reports exist related to	 * grouping pages by mobility	 */	BUG_ON(page_zone(start_page) != page_zone(end_page));#endif	for (page = start_page; page <= end_page;) {		/* Make sure we are not inadvertently changing nodes */		VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));		if (!pfn_valid_within(page_to_pfn(page))) {			page++;			continue;		}		if (!PageBuddy(page)) {			page++;			continue;		}		order = page_order(page);		list_del(&page->lru);		list_add(&page->lru,			&zone->free_area[order].free_list[migratetype]);		page += 1 << order;		pages_moved += 1 << order;	}	return pages_moved;}static int move_freepages_block(struct zone *zone, struct page *page,				int migratetype){	unsigned long start_pfn, end_pfn;	struct page *start_page, *end_page;	start_pfn = page_to_pfn(page);	start_pfn = start_pfn & ~(pageblock_nr_pages-1);	start_page = pfn_to_page(start_pfn);	end_page = start_page + pageblock_nr_pages - 1;	end_pfn = start_pfn + pageblock_nr_pages - 1;	/* Do not cross zone boundaries */	if (start_pfn < zone->zone_start_pfn)		start_page = page;	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)		return 0;	return move_freepages(zone, start_page, end_page, migratetype);}/* Remove an element from the buddy allocator from the fallback list */static struct page *__rmqueue_fallback(struct zone *zone, int order,						int start_migratetype){	struct free_area * area;	int current_order;	struct page *page;	int migratetype, i;	/* Find the largest possible block of pages in the other list */	for (current_order = MAX_ORDER-1; current_order >= order;						--current_order) {		for (i = 0; i < MIGRATE_TYPES - 1; i++) {			migratetype = fallbacks[start_migratetype][i];			/* MIGRATE_RESERVE handled later if necessary */			if (migratetype == MIGRATE_RESERVE)				continue;			area = &(zone->free_area[current_order]);			if (list_empty(&area->free_list[migratetype]))				continue;			page = list_entry(area->free_list[migratetype].next,					struct page, lru);			area->nr_free--;			/*			 * If breaking a large block of pages, move all free			 * pages to the preferred allocation list. If falling			 * back for a reclaimable kernel allocation, be more			 * agressive about taking ownership of free pages			 */			if (unlikely(current_order >= (pageblock_order >> 1)) ||					start_migratetype == MIGRATE_RECLAIMABLE) {				unsigned long pages;				pages = move_freepages_block(zone, page,								start_migratetype);				/* Claim the whole block if over half of it is free */				if (pages >= (1 << (pageblock_order-1)))					set_pageblock_migratetype(page,								start_migratetype);				migratetype = start_migratetype;			}			/* Remove the page from the freelists */			list_del(&page->lru);			rmv_page_order(page);			__mod_zone_page_state(zone, NR_FREE_PAGES,							-(1UL << order));			if (current_order == pageblock_order)				set_pageblock_migratetype(page,							start_migratetype);			expand(zone, page, order, current_order, area, migratetype);			return page;		}	}	/* Use MIGRATE_RESERVE rather than fail an allocation */	return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);}/* * Do the hard work of removing an element from the buddy allocator. * Call me with the zone->lock already held. */static struct page *__rmqueue(struct zone *zone, unsigned int order,						int migratetype){	struct page *page;	page = __rmqueue_smallest(zone, order, migratetype);	if (unlikely(!page))		page = __rmqueue_fallback(zone, order, migratetype);	return page;}/*  * Obtain a specified number of elements from the buddy allocator, all under * a single hold of the lock, for efficiency.  Add them to the supplied list. * Returns the number of new pages which were placed at *list. */static int rmqueue_bulk(struct zone *zone, unsigned int order, 			unsigned long count, struct list_head *list,			int migratetype){	int i;		spin_lock(&zone->lock);	for (i = 0; i < count; ++i) {		struct page *page = __rmqueue(zone, order, migratetype);		if (unlikely(page == NULL))			break;		/*		 * Split buddy pages returned by expand() are received here		 * in physical page order. The page is added to the callers and		 * list and the list head then moves forward. From the callers		 * perspective, the linked list is ordered by page number in		 * some conditions. This is useful for IO devices that can		 * merge IO requests if the physical pages are ordered		 * properly.		 */		list_add(&page->lru, list);		set_page_private(page, migratetype);		list = &page->lru;	}	spin_unlock(&zone->lock);	return i;}#ifdef CONFIG_NUMA/* * Called from the vmstat counter updater to drain pagesets of this * currently executing processor on remote nodes after they have * expired. * * Note that this function must be called with the thread pinned to * a single processor. */void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp){	unsigned long flags;	int to_drain;	local_irq_save(flags);	if (pcp->count >= pcp->batch)		to_drain = pcp->batch;	else		to_drain = pcp->count;	free_pages_bulk(zone, to_drain, &pcp->list, 0);	pcp->count -= to_drain;	local_irq_restore(flags);}#endif/* * Drain pages of the indicated processor. * * The processor must either be the current processor and the * thread pinned to the current processor or a processor that * is not online. */static void drain_pages(unsigned int cpu){	unsigned long flags;	struct zone *zone;	for_each_zone(zone) {		struct per_cpu_pageset *pset;		struct per_cpu_pages *pcp;		if (!populated_zone(zone))			continue;		pset = zone_pcp(zone, cpu);		pcp = &pset->pcp;		local_irq_save(flags);		free_pages_bulk(zone, pcp->count, &pcp->list, 0);		pcp->count = 0;		local_irq_restore(flags);	}}/* * Spill all of this CPU's per-cpu pages back into the buddy allocator. */void drain_local_pages(void *arg){	drain_pages(smp_processor_id());}/* * Spill all the per-cpu pages from all CPUs back into the buddy allocator */void drain_all_pages(void){	on_each_cpu(drain_local_pages, NULL, 1);}#ifdef CONFIG_HIBERNATIONvoid mark_free_pages(struct zone *zone){	unsigned long pfn, max_zone_pfn;	unsigned long flags;	int order, t;	struct list_head *curr;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -