⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 page_alloc.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
/* *  linux/mm/page_alloc.c * *  Manages the free list, the system allocates free pages here. *  Note that kmalloc() lives in slab.c * *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds *  Swap reorganised 29.12.95, Stephen Tweedie *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton) */#include <linux/stddef.h>#include <linux/mm.h>#include <linux/swap.h>#include <linux/interrupt.h>#include <linux/pagemap.h>#include <linux/jiffies.h>#include <linux/bootmem.h>#include <linux/compiler.h>#include <linux/kernel.h>#include <linux/module.h>#include <linux/suspend.h>#include <linux/pagevec.h>#include <linux/blkdev.h>#include <linux/slab.h>#include <linux/oom.h>#include <linux/notifier.h>#include <linux/topology.h>#include <linux/sysctl.h>#include <linux/cpu.h>#include <linux/cpuset.h>#include <linux/memory_hotplug.h>#include <linux/nodemask.h>#include <linux/vmalloc.h>#include <linux/mempolicy.h>#include <linux/stop_machine.h>#include <linux/sort.h>#include <linux/pfn.h>#include <linux/backing-dev.h>#include <linux/fault-inject.h>#include <linux/page-isolation.h>#include <linux/page_cgroup.h>#include <linux/debugobjects.h>#include <asm/tlbflush.h>#include <asm/div64.h>#include "internal.h"/* * Array of node states. */nodemask_t node_states[NR_NODE_STATES] __read_mostly = {	[N_POSSIBLE] = NODE_MASK_ALL,	[N_ONLINE] = { { [0] = 1UL } },#ifndef CONFIG_NUMA	[N_NORMAL_MEMORY] = { { [0] = 1UL } },#ifdef CONFIG_HIGHMEM	[N_HIGH_MEMORY] = { { [0] = 1UL } },#endif	[N_CPU] = { { [0] = 1UL } },#endif	/* NUMA */};EXPORT_SYMBOL(node_states);unsigned long totalram_pages __read_mostly;unsigned long totalreserve_pages __read_mostly;unsigned long highest_memmap_pfn __read_mostly;int percpu_pagelist_fraction;#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLEint pageblock_order __read_mostly;#endifstatic void __free_pages_ok(struct page *page, unsigned int order);/* * results with 256, 32 in the lowmem_reserve sysctl: *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high) *	1G machine -> (16M dma, 784M normal, 224M high) *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA * * TBD: should special case ZONE_DMA32 machines here - in those we normally * don't need any ZONE_NORMAL reservation */int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {#ifdef CONFIG_ZONE_DMA	 256,#endif#ifdef CONFIG_ZONE_DMA32	 256,#endif#ifdef CONFIG_HIGHMEM	 32,#endif	 32,};EXPORT_SYMBOL(totalram_pages);static char * const zone_names[MAX_NR_ZONES] = {#ifdef CONFIG_ZONE_DMA	 "DMA",#endif#ifdef CONFIG_ZONE_DMA32	 "DMA32",#endif	 "Normal",#ifdef CONFIG_HIGHMEM	 "HighMem",#endif	 "Movable",};int min_free_kbytes = 1024;unsigned long __meminitdata nr_kernel_pages;unsigned long __meminitdata nr_all_pages;static unsigned long __meminitdata dma_reserve;#ifdef CONFIG_ARCH_POPULATES_NODE_MAP  /*   * MAX_ACTIVE_REGIONS determines the maximum number of distinct   * ranges of memory (RAM) that may be registered with add_active_range().   * Ranges passed to add_active_range() will be merged if possible   * so the number of times add_active_range() can be called is   * related to the number of nodes and the number of holes   */  #ifdef CONFIG_MAX_ACTIVE_REGIONS    /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */    #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS  #else    #if MAX_NUMNODES >= 32      /* If there can be many nodes, allow up to 50 holes per node */      #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)    #else      /* By default, allow up to 256 distinct regions */      #define MAX_ACTIVE_REGIONS 256    #endif  #endif  static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];  static int __meminitdata nr_nodemap_entries;  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];  static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE  static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];  static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */  static unsigned long __initdata required_kernelcore;  static unsigned long __initdata required_movablecore;  static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */  int movable_zone;  EXPORT_SYMBOL(movable_zone);#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */#if MAX_NUMNODES > 1int nr_node_ids __read_mostly = MAX_NUMNODES;EXPORT_SYMBOL(nr_node_ids);#endifint page_group_by_mobility_disabled __read_mostly;static void set_pageblock_migratetype(struct page *page, int migratetype){	set_pageblock_flags_group(page, (unsigned long)migratetype,					PB_migrate, PB_migrate_end);}#ifdef CONFIG_DEBUG_VMstatic int page_outside_zone_boundaries(struct zone *zone, struct page *page){	int ret = 0;	unsigned seq;	unsigned long pfn = page_to_pfn(page);	do {		seq = zone_span_seqbegin(zone);		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)			ret = 1;		else if (pfn < zone->zone_start_pfn)			ret = 1;	} while (zone_span_seqretry(zone, seq));	return ret;}static int page_is_consistent(struct zone *zone, struct page *page){	if (!pfn_valid_within(page_to_pfn(page)))		return 0;	if (zone != page_zone(page))		return 0;	return 1;}/* * Temporary debugging check for pages not lying within a given zone. */static int bad_range(struct zone *zone, struct page *page){	if (page_outside_zone_boundaries(zone, page))		return 1;	if (!page_is_consistent(zone, page))		return 1;	return 0;}#elsestatic inline int bad_range(struct zone *zone, struct page *page){	return 0;}#endifstatic void bad_page(struct page *page){	static unsigned long resume;	static unsigned long nr_shown;	static unsigned long nr_unshown;	/*	 * Allow a burst of 60 reports, then keep quiet for that minute;	 * or allow a steady drip of one report per second.	 */	if (nr_shown == 60) {		if (time_before(jiffies, resume)) {			nr_unshown++;			goto out;		}		if (nr_unshown) {			printk(KERN_ALERT			      "BUG: Bad page state: %lu messages suppressed\n",				nr_unshown);			nr_unshown = 0;		}		nr_shown = 0;	}	if (nr_shown++ == 0)		resume = jiffies + 60 * HZ;	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",		current->comm, page_to_pfn(page));	printk(KERN_ALERT		"page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",		page, (void *)page->flags, page_count(page),		page_mapcount(page), page->mapping, page->index);	dump_stack();out:	/* Leave bad fields for debug, except PageBuddy could make trouble */	__ClearPageBuddy(page);	add_taint(TAINT_BAD_PAGE);}/* * Higher-order pages are called "compound pages".  They are structured thusly: * * The first PAGE_SIZE page is called the "head page". * * The remaining PAGE_SIZE pages are called "tail pages". * * All pages have PG_compound set.  All pages have their ->private pointing at * the head page (even the head page has this). * * The first tail page's ->lru.next holds the address of the compound page's * put_page() function.  Its ->lru.prev holds the order of allocation. * This usage means that zero-order pages may not be compound. */static void free_compound_page(struct page *page){	__free_pages_ok(page, compound_order(page));}void prep_compound_page(struct page *page, unsigned long order){	int i;	int nr_pages = 1 << order;	set_compound_page_dtor(page, free_compound_page);	set_compound_order(page, order);	__SetPageHead(page);	for (i = 1; i < nr_pages; i++) {		struct page *p = page + i;		__SetPageTail(p);		p->first_page = page;	}}#ifdef CONFIG_HUGETLBFSvoid prep_compound_gigantic_page(struct page *page, unsigned long order){	int i;	int nr_pages = 1 << order;	struct page *p = page + 1;	set_compound_page_dtor(page, free_compound_page);	set_compound_order(page, order);	__SetPageHead(page);	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {		__SetPageTail(p);		p->first_page = page;	}}#endifstatic int destroy_compound_page(struct page *page, unsigned long order){	int i;	int nr_pages = 1 << order;	int bad = 0;	if (unlikely(compound_order(page) != order) ||	    unlikely(!PageHead(page))) {		bad_page(page);		bad++;	}	__ClearPageHead(page);	for (i = 1; i < nr_pages; i++) {		struct page *p = page + i;		if (unlikely(!PageTail(p) | (p->first_page != page))) {			bad_page(page);			bad++;		}		__ClearPageTail(p);	}	return bad;}static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags){	int i;	/*	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO	 * and __GFP_HIGHMEM from hard or soft interrupt context.	 */	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());	for (i = 0; i < (1 << order); i++)		clear_highpage(page + i);}static inline void set_page_order(struct page *page, int order){	set_page_private(page, order);	__SetPageBuddy(page);}static inline void rmv_page_order(struct page *page){	__ClearPageBuddy(page);	set_page_private(page, 0);}/* * Locate the struct page for both the matching buddy in our * pair (buddy1) and the combined O(n+1) page they form (page). * * 1) Any buddy B1 will have an order O twin B2 which satisfies * the following equation: *     B2 = B1 ^ (1 << O) * For example, if the starting buddy (buddy2) is #8 its order * 1 buddy is #10: *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 * * 2) Any buddy B will have an order O+1 parent P which * satisfies the following equation: *     P = B & ~(1 << O) * * Assumption: *_mem_map is contiguous at least up to MAX_ORDER */static inline struct page *__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order){	unsigned long buddy_idx = page_idx ^ (1 << order);	return page + (buddy_idx - page_idx);}static inline unsigned long__find_combined_index(unsigned long page_idx, unsigned int order){	return (page_idx & ~(1 << order));}/* * This function checks whether a page is free && is the buddy * we can do coalesce a page and its buddy if * (a) the buddy is not in a hole && * (b) the buddy is in the buddy system && * (c) a page and its buddy have the same order && * (d) a page and its buddy are in the same zone. * * For recording whether a page is in the buddy system, we use PG_buddy. * Setting, clearing, and testing PG_buddy is serialized by zone->lock. * * For recording page's order, we use page_private(page). */static inline int page_is_buddy(struct page *page, struct page *buddy,								int order){	if (!pfn_valid_within(page_to_pfn(buddy)))		return 0;	if (page_zone_id(page) != page_zone_id(buddy))		return 0;	if (PageBuddy(buddy) && page_order(buddy) == order) {		BUG_ON(page_count(buddy) != 0);		return 1;	}	return 0;}/* * Freeing function for a buddy system allocator. * * The concept of a buddy system is to maintain direct-mapped table * (containing bit values) for memory blocks of various "orders". * The bottom level table contains the map for the smallest allocatable * units of memory (here, pages), and each level above it describes * pairs of units from the levels below, hence, "buddies". * At a high level, all that happens here is marking the table entry * at the bottom level available, and propagating the changes upward * as necessary, plus some accounting needed to play nicely with other * parts of the VM system. * At each level, we keep a list of pages, which are heads of continuous * free pages of length of (1 << order) and marked with PG_buddy. Page's * order is recorded in page_private(page) field. * So when we are allocating or freeing one, we can derive the state of the * other.  That is, if we allocate a small block, and both were    * free, the remainder of the region must be split into blocks.    * If a block is freed, and its buddy is also free, then this * triggers coalescing into a block of larger size.             * * -- wli */static inline void __free_one_page(struct page *page,		struct zone *zone, unsigned int order){	unsigned long page_idx;	int order_size = 1 << order;	int migratetype = get_pageblock_migratetype(page);	if (unlikely(PageCompound(page)))		if (unlikely(destroy_compound_page(page, order)))			return;	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);	VM_BUG_ON(page_idx & (order_size - 1));	VM_BUG_ON(bad_range(zone, page));	__mod_zone_page_state(zone, NR_FREE_PAGES, order_size);	while (order < MAX_ORDER-1) {		unsigned long combined_idx;		struct page *buddy;		buddy = __page_find_buddy(page, page_idx, order);		if (!page_is_buddy(page, buddy, order))			break;		/* Our buddy is free, merge with it and move up one order. */		list_del(&buddy->lru);		zone->free_area[order].nr_free--;		rmv_page_order(buddy);		combined_idx = __find_combined_index(page_idx, order);		page = page + (combined_idx - page_idx);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -