⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 numa.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
		cpu = find_cpu_node(i);		BUG_ON(!cpu);		nid = of_node_to_nid_single(cpu);		of_node_put(cpu);		/*		 * Don't fall back to default_nid yet -- we will plug		 * cpus into nodes once the memory scan has discovered		 * the topology.		 */		if (nid < 0)			continue;		node_set_online(nid);	}	get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);	memory = NULL;	while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {		unsigned long start;		unsigned long size;		int nid;		int ranges;		const unsigned int *memcell_buf;		unsigned int len;		memcell_buf = of_get_property(memory,			"linux,usable-memory", &len);		if (!memcell_buf || len <= 0)			memcell_buf = of_get_property(memory, "reg", &len);		if (!memcell_buf || len <= 0)			continue;		/* ranges in cell */		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);new_range:		/* these are order-sensitive, and modify the buffer pointer */		start = read_n_cells(n_mem_addr_cells, &memcell_buf);		size = read_n_cells(n_mem_size_cells, &memcell_buf);		/*		 * Assumption: either all memory nodes or none will		 * have associativity properties.  If none, then		 * everything goes to default_nid.		 */		nid = of_node_to_nid_single(memory);		if (nid < 0)			nid = default_nid;		node_set_online(nid);		if (!(size = numa_enforce_memory_limit(start, size))) {			if (--ranges)				goto new_range;			else				continue;		}		add_active_range(nid, start >> PAGE_SHIFT,				(start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));		if (--ranges)			goto new_range;	}	/*	 * Now do the same thing for each LMB listed in the ibm,dynamic-memory	 * property in the ibm,dynamic-reconfiguration-memory node.	 */	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");	if (memory)		parse_drconf_memory(memory);	return 0;}static void __init setup_nonnuma(void){	unsigned long top_of_ram = lmb_end_of_DRAM();	unsigned long total_ram = lmb_phys_mem_size();	unsigned long start_pfn, end_pfn;	unsigned int i;	printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",	       top_of_ram, total_ram);	printk(KERN_DEBUG "Memory hole size: %ldMB\n",	       (top_of_ram - total_ram) >> 20);	for (i = 0; i < lmb.memory.cnt; ++i) {		start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;		end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);		add_active_range(0, start_pfn, end_pfn);	}	node_set_online(0);}void __init dump_numa_cpu_topology(void){	unsigned int node;	unsigned int cpu, count;	if (min_common_depth == -1 || !numa_enabled)		return;	for_each_online_node(node) {		printk(KERN_DEBUG "Node %d CPUs:", node);		count = 0;		/*		 * If we used a CPU iterator here we would miss printing		 * the holes in the cpumap.		 */		for (cpu = 0; cpu < NR_CPUS; cpu++) {			if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {				if (count == 0)					printk(" %u", cpu);				++count;			} else {				if (count > 1)					printk("-%u", cpu - 1);				count = 0;			}		}		if (count > 1)			printk("-%u", NR_CPUS - 1);		printk("\n");	}}static void __init dump_numa_memory_topology(void){	unsigned int node;	unsigned int count;	if (min_common_depth == -1 || !numa_enabled)		return;	for_each_online_node(node) {		unsigned long i;		printk(KERN_DEBUG "Node %d Memory:", node);		count = 0;		for (i = 0; i < lmb_end_of_DRAM();		     i += (1 << SECTION_SIZE_BITS)) {			if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {				if (count == 0)					printk(" 0x%lx", i);				++count;			} else {				if (count > 0)					printk("-0x%lx", i);				count = 0;			}		}		if (count > 0)			printk("-0x%lx", i);		printk("\n");	}}/* * Allocate some memory, satisfying the lmb or bootmem allocator where * required. nid is the preferred node and end is the physical address of * the highest address in the node. * * Returns the physical address of the memory. */static void __init *careful_allocation(int nid, unsigned long size,				       unsigned long align,				       unsigned long end_pfn){	int new_nid;	unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);	/* retry over all memory */	if (!ret)		ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM());	if (!ret)		panic("numa.c: cannot allocate %lu bytes on node %d",		      size, nid);	/*	 * If the memory came from a previously allocated node, we must	 * retry with the bootmem allocator.	 */	new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT);	if (new_nid < nid) {		ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid),				size, align, 0);		if (!ret)			panic("numa.c: cannot allocate %lu bytes on node %d",			      size, new_nid);		ret = __pa(ret);		dbg("alloc_bootmem %lx %lx\n", ret, size);	}	return (void *)ret;}static struct notifier_block __cpuinitdata ppc64_numa_nb = {	.notifier_call = cpu_numa_callback,	.priority = 1 /* Must run before sched domains notifier. */};void __init do_init_bootmem(void){	int nid;	unsigned int i;	min_low_pfn = 0;	max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;	max_pfn = max_low_pfn;	if (parse_numa_properties())		setup_nonnuma();	else		dump_numa_memory_topology();	register_cpu_notifier(&ppc64_numa_nb);	cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,			  (void *)(unsigned long)boot_cpuid);	for_each_online_node(nid) {		unsigned long start_pfn, end_pfn;		unsigned long bootmem_paddr;		unsigned long bootmap_pages;		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);		/* Allocate the node structure node local if possible */		NODE_DATA(nid) = careful_allocation(nid,					sizeof(struct pglist_data),					SMP_CACHE_BYTES, end_pfn);		NODE_DATA(nid) = __va(NODE_DATA(nid));		memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));  		dbg("node %d\n", nid);		dbg("NODE_DATA() = %p\n", NODE_DATA(nid));		NODE_DATA(nid)->bdata = &plat_node_bdata[nid];		NODE_DATA(nid)->node_start_pfn = start_pfn;		NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;		if (NODE_DATA(nid)->node_spanned_pages == 0)  			continue;  		dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);  		dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);		bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);		bootmem_paddr = (unsigned long)careful_allocation(nid,					bootmap_pages << PAGE_SHIFT,					PAGE_SIZE, end_pfn);		memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT);		dbg("bootmap_paddr = %lx\n", bootmem_paddr);		init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,				  start_pfn, end_pfn);		free_bootmem_with_active_regions(nid, end_pfn);		/* Mark reserved regions on this node */		for (i = 0; i < lmb.reserved.cnt; i++) {			unsigned long physbase = lmb.reserved.region[i].base;			unsigned long size = lmb.reserved.region[i].size;			unsigned long start_paddr = start_pfn << PAGE_SHIFT;			unsigned long end_paddr = end_pfn << PAGE_SHIFT;			if (early_pfn_to_nid(physbase >> PAGE_SHIFT) != nid &&			    early_pfn_to_nid((physbase+size-1) >> PAGE_SHIFT) != nid)				continue;			if (physbase < end_paddr &&			    (physbase+size) > start_paddr) {				/* overlaps */				if (physbase < start_paddr) {					size -= start_paddr - physbase;					physbase = start_paddr;				}				if (size > end_paddr - physbase)					size = end_paddr - physbase;				dbg("reserve_bootmem %lx %lx\n", physbase,				    size);				reserve_bootmem_node(NODE_DATA(nid), physbase,						     size);			}		}		sparse_memory_present_with_active_regions(nid);	}}void __init paging_init(void){	unsigned long max_zone_pfns[MAX_NR_ZONES];	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));	max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT;	free_area_init_nodes(max_zone_pfns);}static int __init early_numa(char *p){	if (!p)		return 0;	if (strstr(p, "off"))		numa_enabled = 0;	if (strstr(p, "debug"))		numa_debug = 1;	return 0;}early_param("numa", early_numa);#ifdef CONFIG_MEMORY_HOTPLUG/* * Find the node associated with a hot added memory section.  Section * corresponds to a SPARSEMEM section, not an LMB.  It is assumed that * sections are fully contained within a single LMB. */int hot_add_scn_to_nid(unsigned long scn_addr){	struct device_node *memory = NULL;	nodemask_t nodes;	int default_nid = any_online_node(NODE_MASK_ALL);	int nid;	if (!numa_enabled || (min_common_depth < 0))		return default_nid;	while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {		unsigned long start, size;		int ranges;		const unsigned int *memcell_buf;		unsigned int len;		memcell_buf = of_get_property(memory, "reg", &len);		if (!memcell_buf || len <= 0)			continue;		/* ranges in cell */		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);ha_new_range:		start = read_n_cells(n_mem_addr_cells, &memcell_buf);		size = read_n_cells(n_mem_size_cells, &memcell_buf);		nid = of_node_to_nid_single(memory);		/* Domains not present at boot default to 0 */		if (nid < 0 || !node_online(nid))			nid = default_nid;		if ((scn_addr >= start) && (scn_addr < (start + size))) {			of_node_put(memory);			goto got_nid;		}		if (--ranges)		/* process all ranges in cell */			goto ha_new_range;	}	BUG();	/* section address should be found above */	return 0;	/* Temporary code to ensure that returned node is not empty */got_nid:	nodes_setall(nodes);	while (NODE_DATA(nid)->node_spanned_pages == 0) {		node_clear(nid, nodes);		nid = any_online_node(nodes);	}	return nid;}#endif /* CONFIG_MEMORY_HOTPLUG */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -