⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 numa.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 2 页
字号:
	 * We use lmb_end_of_DRAM() in here instead of memory_limit because	 * we've already adjusted it for the limit and it takes care of	 * having memory holes below the limit.	 */	if (! memory_limit)		return size;	if (start + size <= lmb_end_of_DRAM())		return size;	if (start >= lmb_end_of_DRAM())		return 0;	return lmb_end_of_DRAM() - start;}static int __init parse_numa_properties(void){	struct device_node *cpu = NULL;	struct device_node *memory = NULL;	int addr_cells, size_cells;	int max_domain;	unsigned long i;	if (numa_enabled == 0) {		printk(KERN_WARNING "NUMA disabled by user\n");		return -1;	}	min_common_depth = find_min_common_depth();	dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);	if (min_common_depth < 0)		return min_common_depth;	max_domain = numa_setup_cpu(boot_cpuid);	/*	 * Even though we connect cpus to numa domains later in SMP init,	 * we need to know the maximum node id now. This is because each	 * node id must have NODE_DATA etc backing it.	 * As a result of hotplug we could still have cpus appear later on	 * with larger node ids. In that case we force the cpu into node 0.	 */	for_each_cpu(i) {		int numa_domain;		cpu = find_cpu_node(i);		if (cpu) {			numa_domain = of_node_numa_domain(cpu);			of_node_put(cpu);			if (numa_domain < MAX_NUMNODES &&			    max_domain < numa_domain)				max_domain = numa_domain;		}	}	addr_cells = get_mem_addr_cells();	size_cells = get_mem_size_cells();	memory = NULL;	while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {		unsigned long start;		unsigned long size;		int numa_domain;		int ranges;		unsigned int *memcell_buf;		unsigned int len;		memcell_buf = (unsigned int *)get_property(memory, "reg", &len);		if (!memcell_buf || len <= 0)			continue;		ranges = memory->n_addrs;new_range:		/* these are order-sensitive, and modify the buffer pointer */		start = read_n_cells(addr_cells, &memcell_buf);		size = read_n_cells(size_cells, &memcell_buf);		numa_domain = of_node_numa_domain(memory);		if (numa_domain >= MAX_NUMNODES) {			if (numa_domain != 0xffff)				printk(KERN_ERR "WARNING: memory at %lx maps "				       "to invalid NUMA node %d\n", start,				       numa_domain);			numa_domain = 0;		}		if (max_domain < numa_domain)			max_domain = numa_domain;		if (!(size = numa_enforce_memory_limit(start, size))) {			if (--ranges)				goto new_range;			else				continue;		}		add_region(numa_domain, start >> PAGE_SHIFT,			   size >> PAGE_SHIFT);		if (--ranges)			goto new_range;	}	for (i = 0; i <= max_domain; i++)		node_set_online(i);	return 0;}static void __init setup_nonnuma(void){	unsigned long top_of_ram = lmb_end_of_DRAM();	unsigned long total_ram = lmb_phys_mem_size();	unsigned int i;	printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",	       top_of_ram, total_ram);	printk(KERN_INFO "Memory hole size: %ldMB\n",	       (top_of_ram - total_ram) >> 20);	map_cpu_to_node(boot_cpuid, 0);	for (i = 0; i < lmb.memory.cnt; ++i)		add_region(0, lmb.memory.region[i].base >> PAGE_SHIFT,			   lmb_size_pages(&lmb.memory, i));	node_set_online(0);}static void __init dump_numa_topology(void){	unsigned int node;	unsigned int count;	if (min_common_depth == -1 || !numa_enabled)		return;	for_each_online_node(node) {		unsigned long i;		printk(KERN_INFO "Node %d Memory:", node);		count = 0;		for (i = 0; i < lmb_end_of_DRAM();		     i += (1 << SECTION_SIZE_BITS)) {			if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {				if (count == 0)					printk(" 0x%lx", i);				++count;			} else {				if (count > 0)					printk("-0x%lx", i);				count = 0;			}		}		if (count > 0)			printk("-0x%lx", i);		printk("\n");	}	return;}/* * Allocate some memory, satisfying the lmb or bootmem allocator where * required. nid is the preferred node and end is the physical address of * the highest address in the node. * * Returns the physical address of the memory. */static void __init *careful_allocation(int nid, unsigned long size,				       unsigned long align,				       unsigned long end_pfn){	int new_nid;	unsigned long ret = lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);	/* retry over all memory */	if (!ret)		ret = lmb_alloc_base(size, align, lmb_end_of_DRAM());	if (!ret)		panic("numa.c: cannot allocate %lu bytes on node %d",		      size, nid);	/*	 * If the memory came from a previously allocated node, we must	 * retry with the bootmem allocator.	 */	new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT);	if (new_nid < nid) {		ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid),				size, align, 0);		if (!ret)			panic("numa.c: cannot allocate %lu bytes on node %d",			      size, new_nid);		ret = __pa(ret);		dbg("alloc_bootmem %lx %lx\n", ret, size);	}	return (void *)ret;}void __init do_init_bootmem(void){	int nid;	unsigned int i;	static struct notifier_block ppc64_numa_nb = {		.notifier_call = cpu_numa_callback,		.priority = 1 /* Must run before sched domains notifier. */	};	min_low_pfn = 0;	max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;	max_pfn = max_low_pfn;	if (parse_numa_properties())		setup_nonnuma();	else		dump_numa_topology();	register_cpu_notifier(&ppc64_numa_nb);	for_each_online_node(nid) {		unsigned long start_pfn, end_pfn, pages_present;		unsigned long bootmem_paddr;		unsigned long bootmap_pages;		get_region(nid, &start_pfn, &end_pfn, &pages_present);		/* Allocate the node structure node local if possible */		NODE_DATA(nid) = careful_allocation(nid,					sizeof(struct pglist_data),					SMP_CACHE_BYTES, end_pfn);		NODE_DATA(nid) = __va(NODE_DATA(nid));		memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));  		dbg("node %d\n", nid);		dbg("NODE_DATA() = %p\n", NODE_DATA(nid));		NODE_DATA(nid)->bdata = &plat_node_bdata[nid];		NODE_DATA(nid)->node_start_pfn = start_pfn;		NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;		if (NODE_DATA(nid)->node_spanned_pages == 0)  			continue;  		dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);  		dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);		bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);		bootmem_paddr = (unsigned long)careful_allocation(nid,					bootmap_pages << PAGE_SHIFT,					PAGE_SIZE, end_pfn);		memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT);		dbg("bootmap_paddr = %lx\n", bootmem_paddr);		init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,				  start_pfn, end_pfn);		/* Add free regions on this node */		for (i = 0; init_node_data[i].end_pfn; i++) {			unsigned long start, end;			if (init_node_data[i].nid != nid)				continue;			start = init_node_data[i].start_pfn << PAGE_SHIFT;			end = init_node_data[i].end_pfn << PAGE_SHIFT;			dbg("free_bootmem %lx %lx\n", start, end - start);  			free_bootmem_node(NODE_DATA(nid), start, end - start);		}		/* Mark reserved regions on this node */		for (i = 0; i < lmb.reserved.cnt; i++) {			unsigned long physbase = lmb.reserved.region[i].base;			unsigned long size = lmb.reserved.region[i].size;			unsigned long start_paddr = start_pfn << PAGE_SHIFT;			unsigned long end_paddr = end_pfn << PAGE_SHIFT;			if (early_pfn_to_nid(physbase >> PAGE_SHIFT) != nid &&			    early_pfn_to_nid((physbase+size-1) >> PAGE_SHIFT) != nid)				continue;			if (physbase < end_paddr &&			    (physbase+size) > start_paddr) {				/* overlaps */				if (physbase < start_paddr) {					size -= start_paddr - physbase;					physbase = start_paddr;				}				if (size > end_paddr - physbase)					size = end_paddr - physbase;				dbg("reserve_bootmem %lx %lx\n", physbase,				    size);				reserve_bootmem_node(NODE_DATA(nid), physbase,						     size);			}		}		/* Add regions into sparsemem */		for (i = 0; init_node_data[i].end_pfn; i++) {			unsigned long start, end;			if (init_node_data[i].nid != nid)				continue;			start = init_node_data[i].start_pfn;			end = init_node_data[i].end_pfn;			memory_present(nid, start, end);		}	}}void __init paging_init(void){	unsigned long zones_size[MAX_NR_ZONES];	unsigned long zholes_size[MAX_NR_ZONES];	int nid;	memset(zones_size, 0, sizeof(zones_size));	memset(zholes_size, 0, sizeof(zholes_size));	for_each_online_node(nid) {		unsigned long start_pfn, end_pfn, pages_present;		get_region(nid, &start_pfn, &end_pfn, &pages_present);		zones_size[ZONE_DMA] = end_pfn - start_pfn;		zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] - pages_present;		dbg("free_area_init node %d %lx %lx (hole: %lx)\n", nid,		    zones_size[ZONE_DMA], start_pfn, zholes_size[ZONE_DMA]);		free_area_init_node(nid, NODE_DATA(nid), zones_size, start_pfn,				    zholes_size);	}}static int __init early_numa(char *p){	if (!p)		return 0;	if (strstr(p, "off"))		numa_enabled = 0;	if (strstr(p, "debug"))		numa_debug = 1;	return 0;}early_param("numa", early_numa);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -