⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sparse.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * sparse memory mappings. */#include <linux/mm.h>#include <linux/mmzone.h>#include <linux/bootmem.h>#include <linux/highmem.h>#include <linux/module.h>#include <linux/spinlock.h>#include <linux/vmalloc.h>#include "internal.h"#include <asm/dma.h>#include <asm/pgalloc.h>#include <asm/pgtable.h>/* * Permanent SPARSEMEM data: * * 1) mem_section	- memory sections, mem_map's for valid memory */#ifdef CONFIG_SPARSEMEM_EXTREMEstruct mem_section *mem_section[NR_SECTION_ROOTS]	____cacheline_internodealigned_in_smp;#elsestruct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]	____cacheline_internodealigned_in_smp;#endifEXPORT_SYMBOL(mem_section);#ifdef NODE_NOT_IN_PAGE_FLAGS/* * If we did not store the node number in the page then we have to * do a lookup in the section_to_node_table in order to find which * node the page belongs to. */#if MAX_NUMNODES <= 256static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;#elsestatic u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;#endifint page_to_nid(struct page *page){	return section_to_node_table[page_to_section(page)];}EXPORT_SYMBOL(page_to_nid);static void set_section_nid(unsigned long section_nr, int nid){	section_to_node_table[section_nr] = nid;}#else /* !NODE_NOT_IN_PAGE_FLAGS */static inline void set_section_nid(unsigned long section_nr, int nid){}#endif#ifdef CONFIG_SPARSEMEM_EXTREMEstatic struct mem_section noinline __init_refok *sparse_index_alloc(int nid){	struct mem_section *section = NULL;	unsigned long array_size = SECTIONS_PER_ROOT *				   sizeof(struct mem_section);	if (slab_is_available())		section = kmalloc_node(array_size, GFP_KERNEL, nid);	else		section = alloc_bootmem_node(NODE_DATA(nid), array_size);	if (section)		memset(section, 0, array_size);	return section;}static int __meminit sparse_index_init(unsigned long section_nr, int nid){	static DEFINE_SPINLOCK(index_init_lock);	unsigned long root = SECTION_NR_TO_ROOT(section_nr);	struct mem_section *section;	int ret = 0;	if (mem_section[root])		return -EEXIST;	section = sparse_index_alloc(nid);	if (!section)		return -ENOMEM;	/*	 * This lock keeps two different sections from	 * reallocating for the same index	 */	spin_lock(&index_init_lock);	if (mem_section[root]) {		ret = -EEXIST;		goto out;	}	mem_section[root] = section;out:	spin_unlock(&index_init_lock);	return ret;}#else /* !SPARSEMEM_EXTREME */static inline int sparse_index_init(unsigned long section_nr, int nid){	return 0;}#endif/* * Although written for the SPARSEMEM_EXTREME case, this happens * to also work for the flat array case because * NR_SECTION_ROOTS==NR_MEM_SECTIONS. */int __section_nr(struct mem_section* ms){	unsigned long root_nr;	struct mem_section* root;	for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {		root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);		if (!root)			continue;		if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))		     break;	}	return (root_nr * SECTIONS_PER_ROOT) + (ms - root);}/* * During early boot, before section_mem_map is used for an actual * mem_map, we use section_mem_map to store the section's NUMA * node.  This keeps us from having to use another data structure.  The * node information is cleared just before we store the real mem_map. */static inline unsigned long sparse_encode_early_nid(int nid){	return (nid << SECTION_NID_SHIFT);}static inline int sparse_early_nid(struct mem_section *section){	return (section->section_mem_map >> SECTION_NID_SHIFT);}/* Validate the physical addressing limitations of the model */void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,						unsigned long *end_pfn){	unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);	/*	 * Sanity checks - do not allow an architecture to pass	 * in larger pfns than the maximum scope of sparsemem:	 */	if (*start_pfn > max_sparsemem_pfn) {		mminit_dprintk(MMINIT_WARNING, "pfnvalidation",			"Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",			*start_pfn, *end_pfn, max_sparsemem_pfn);		WARN_ON_ONCE(1);		*start_pfn = max_sparsemem_pfn;		*end_pfn = max_sparsemem_pfn;	}	if (*end_pfn > max_sparsemem_pfn) {		mminit_dprintk(MMINIT_WARNING, "pfnvalidation",			"End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",			*start_pfn, *end_pfn, max_sparsemem_pfn);		WARN_ON_ONCE(1);		*end_pfn = max_sparsemem_pfn;	}}/* Record a memory area against a node. */void __init memory_present(int nid, unsigned long start, unsigned long end){	unsigned long pfn;	start &= PAGE_SECTION_MASK;	mminit_validate_memmodel_limits(&start, &end);	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {		unsigned long section = pfn_to_section_nr(pfn);		struct mem_section *ms;		sparse_index_init(section, nid);		set_section_nid(section, nid);		ms = __nr_to_section(section);		if (!ms->section_mem_map)			ms->section_mem_map = sparse_encode_early_nid(nid) |							SECTION_MARKED_PRESENT;	}}/* * Only used by the i386 NUMA architecures, but relatively * generic code. */unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,						     unsigned long end_pfn){	unsigned long pfn;	unsigned long nr_pages = 0;	mminit_validate_memmodel_limits(&start_pfn, &end_pfn);	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {		if (nid != early_pfn_to_nid(pfn))			continue;		if (pfn_present(pfn))			nr_pages += PAGES_PER_SECTION;	}	return nr_pages * sizeof(struct page);}/* * Subtle, we encode the real pfn into the mem_map such that * the identity pfn - section_mem_map will return the actual * physical page frame number. */static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum){	return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));}/* * Decode mem_map from the coded memmap */struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum){	/* mask off the extra low bits of information */	coded_mem_map &= SECTION_MAP_MASK;	return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);}static int __meminit sparse_init_one_section(struct mem_section *ms,		unsigned long pnum, struct page *mem_map,		unsigned long *pageblock_bitmap){	if (!present_section(ms))		return -EINVAL;	ms->section_mem_map &= ~SECTION_MAP_MASK;	ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |							SECTION_HAS_MEM_MAP; 	ms->pageblock_flags = pageblock_bitmap;	return 1;}unsigned long usemap_size(void){	unsigned long size_bytes;	size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;	size_bytes = roundup(size_bytes, sizeof(unsigned long));	return size_bytes;}#ifdef CONFIG_MEMORY_HOTPLUGstatic unsigned long *__kmalloc_section_usemap(void){	return kmalloc(usemap_size(), GFP_KERNEL);}#endif /* CONFIG_MEMORY_HOTPLUG */#ifdef CONFIG_MEMORY_HOTREMOVEstatic unsigned long * __initsparse_early_usemap_alloc_pgdat_section(struct pglist_data *pgdat){	unsigned long section_nr;	/*	 * A page may contain usemaps for other sections preventing the	 * page being freed and making a section unremovable while	 * other sections referencing the usemap retmain active. Similarly,	 * a pgdat can prevent a section being removed. If section A	 * contains a pgdat and section B contains the usemap, both	 * sections become inter-dependent. This allocates usemaps	 * from the same section as the pgdat where possible to avoid	 * this problem.	 */	section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);	return alloc_bootmem_section(usemap_size(), section_nr);}static void __init check_usemap_section_nr(int nid, unsigned long *usemap){	unsigned long usemap_snr, pgdat_snr;	static unsigned long old_usemap_snr = NR_MEM_SECTIONS;	static unsigned long old_pgdat_snr = NR_MEM_SECTIONS;	struct pglist_data *pgdat = NODE_DATA(nid);	int usemap_nid;	usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);	pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);	if (usemap_snr == pgdat_snr)		return;	if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)		/* skip redundant message */		return;	old_usemap_snr = usemap_snr;	old_pgdat_snr = pgdat_snr;	usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));	if (usemap_nid != nid) {		printk(KERN_INFO		       "node %d must be removed before remove section %ld\n",		       nid, usemap_snr);		return;	}	/*	 * There is a circular dependency.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -