⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 numa_64.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
	      FAKE_NODE_MIN_SIZE;	/* Round down to nearest FAKE_NODE_MIN_SIZE. */	size &= FAKE_NODE_MIN_HASH_MASK;	if (!size) {		printk(KERN_ERR "Not enough memory for each node.  "		       "NUMA emulation disabled.\n");		return -1;	}	for (i = node_start; i < num_nodes + node_start; i++) {		u64 end = *addr + size;		if (i < big)			end += FAKE_NODE_MIN_SIZE;		/*		 * The final node can have the remaining system RAM.  Other		 * nodes receive roughly the same amount of available pages.		 */		if (i == num_nodes + node_start - 1)			end = max_addr;		else			while (end - *addr - e820_hole_size(*addr, end) <			       size) {				end += FAKE_NODE_MIN_SIZE;				if (end > max_addr) {					end = max_addr;					break;				}			}		if (setup_node_range(i, nodes, addr, end - *addr, max_addr) < 0)			break;	}	return i - node_start + 1;}/* * Splits the remaining system RAM into chunks of size.  The remaining memory is * always assigned to a final node and can be asymmetric.  Returns the number of * nodes split. */static int __init split_nodes_by_size(struct bootnode *nodes, u64 *addr,				      u64 max_addr, int node_start, u64 size){	int i = node_start;	size = (size << 20) & FAKE_NODE_MIN_HASH_MASK;	while (!setup_node_range(i++, nodes, addr, size, max_addr))		;	return i - node_start;}/* * Sets up the system RAM area from start_pfn to end_pfn according to the * numa=fake command-line option. */static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn){	struct bootnode nodes[MAX_NUMNODES];	u64 addr = start_pfn << PAGE_SHIFT;	u64 max_addr = end_pfn << PAGE_SHIFT;	int num_nodes = 0;	int coeff_flag;	int coeff = -1;	int num = 0;	u64 size;	int i;	memset(&nodes, 0, sizeof(nodes));	/*	 * If the numa=fake command-line is just a single number N, split the	 * system RAM into N fake nodes.	 */	if (!strchr(cmdline, '*') && !strchr(cmdline, ',')) {		num_nodes = split_nodes_equally(nodes, &addr, max_addr, 0,						simple_strtol(cmdline, NULL, 0));		if (num_nodes < 0)			return num_nodes;		goto out;	}	/* Parse the command line. */	for (coeff_flag = 0; ; cmdline++) {		if (*cmdline && isdigit(*cmdline)) {			num = num * 10 + *cmdline - '0';			continue;		}		if (*cmdline == '*') {			if (num > 0)				coeff = num;			coeff_flag = 1;		}		if (!*cmdline || *cmdline == ',') {			if (!coeff_flag)				coeff = 1;			/*			 * Round down to the nearest FAKE_NODE_MIN_SIZE.			 * Command-line coefficients are in megabytes.			 */			size = ((u64)num << 20) & FAKE_NODE_MIN_HASH_MASK;			if (size)				for (i = 0; i < coeff; i++, num_nodes++)					if (setup_node_range(num_nodes, nodes,						&addr, size, max_addr) < 0)						goto done;			if (!*cmdline)				break;			coeff_flag = 0;			coeff = -1;		}		num = 0;	}done:	if (!num_nodes)		return -1;	/* Fill remainder of system RAM, if appropriate. */	if (addr < max_addr) {		if (coeff_flag && coeff < 0) {			/* Split remaining nodes into num-sized chunks */			num_nodes += split_nodes_by_size(nodes, &addr, max_addr,							 num_nodes, num);			goto out;		}		switch (*(cmdline - 1)) {		case '*':			/* Split remaining nodes into coeff chunks */			if (coeff <= 0)				break;			num_nodes += split_nodes_equally(nodes, &addr, max_addr,							 num_nodes, coeff);			break;		case ',':			/* Do not allocate remaining system RAM */			break;		default:			/* Give one final node */			setup_node_range(num_nodes, nodes, &addr,					 max_addr - addr, max_addr);			num_nodes++;		}	}out:	memnode_shift = compute_hash_shift(nodes, num_nodes);	if (memnode_shift < 0) {		memnode_shift = 0;		printk(KERN_ERR "No NUMA hash function found.  NUMA emulation "		       "disabled.\n");		return -1;	}	/*	 * We need to vacate all active ranges that may have been registered by	 * SRAT and set acpi_numa to -1 so that srat_disabled() always returns	 * true.  NUMA emulation has succeeded so we will not scan ACPI nodes.	 */	remove_all_active_ranges();#ifdef CONFIG_ACPI_NUMA	acpi_numa = -1;#endif	for_each_node_mask(i, node_possible_map) {		e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,						nodes[i].end >> PAGE_SHIFT); 		setup_node_bootmem(i, nodes[i].start, nodes[i].end);	}	acpi_fake_nodes(nodes, num_nodes); 	numa_init_array(); 	return 0;}#endif /* CONFIG_NUMA_EMU */void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn){ 	int i;	nodes_clear(node_possible_map);#ifdef CONFIG_NUMA_EMU	if (cmdline && !numa_emulation(start_pfn, end_pfn)) 		return;	nodes_clear(node_possible_map);#endif#ifdef CONFIG_ACPI_NUMA	if (!numa_off && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,					  end_pfn << PAGE_SHIFT)) 		return;	nodes_clear(node_possible_map);#endif#ifdef CONFIG_K8_NUMA	if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT))		return;	nodes_clear(node_possible_map);#endif	printk(KERN_INFO "%s\n",	       numa_off ? "NUMA turned off" : "No NUMA configuration found");	printk(KERN_INFO "Faking a node at %016lx-%016lx\n", 	       start_pfn << PAGE_SHIFT,	       end_pfn << PAGE_SHIFT); 		/* setup dummy node covering all memory */ 	memnode_shift = 63; 	memnodemap = memnode.embedded_map;	memnodemap[0] = 0;	nodes_clear(node_online_map);	node_set_online(0);	node_set(0, node_possible_map);	for (i = 0; i < NR_CPUS; i++)		numa_set_node(i, 0);	node_to_cpumask[0] = cpumask_of_cpu(0);	e820_register_active_regions(0, start_pfn, end_pfn);	setup_node_bootmem(0, start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);}__cpuinit void numa_add_cpu(int cpu){	set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);} void __cpuinit numa_set_node(int cpu, int node){	cpu_pda(cpu)->nodenumber = node;	cpu_to_node(cpu) = node;}unsigned long __init numa_free_all_bootmem(void) { 	int i;	unsigned long pages = 0;	for_each_online_node(i) {		pages += free_all_bootmem_node(NODE_DATA(i));	}	return pages;} void __init paging_init(void){ 	int i;	unsigned long max_zone_pfns[MAX_NR_ZONES];	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;	max_zone_pfns[ZONE_NORMAL] = end_pfn;	sparse_memory_present_with_active_regions(MAX_NUMNODES);	sparse_init();	for_each_online_node(i) {		setup_node_zones(i); 	}	free_area_init_nodes(max_zone_pfns);} static __init int numa_setup(char *opt){ 	if (!opt)		return -EINVAL;	if (!strncmp(opt,"off",3))		numa_off = 1;#ifdef CONFIG_NUMA_EMU	if (!strncmp(opt, "fake=", 5))		cmdline = opt + 5;#endif#ifdef CONFIG_ACPI_NUMA 	if (!strncmp(opt,"noacpi",6)) 		acpi_numa = -1;	if (!strncmp(opt,"hotadd=", 7))		hotadd_percent = simple_strtoul(opt+7, NULL, 10);#endif	return 0;} early_param("numa", numa_setup);/* * Setup early cpu_to_node. * * Populate cpu_to_node[] only if x86_cpu_to_apicid[], * and apicid_to_node[] tables have valid entries for a CPU. * This means we skip cpu_to_node[] initialisation for NUMA * emulation and faking node case (when running a kernel compiled * for NUMA on a non NUMA box), which is OK as cpu_to_node[] * is already initialized in a round robin manner at numa_init_array, * prior to this call, and this initialization is good enough * for the fake NUMA cases. */void __init init_cpu_to_node(void){	int i; 	for (i = 0; i < NR_CPUS; i++) {		u8 apicid = x86_cpu_to_apicid_init[i];		if (apicid == BAD_APICID)			continue;		if (apicid_to_node[apicid] == NUMA_NO_NODE)			continue;		numa_set_node(i,apicid_to_node[apicid]);	}}EXPORT_SYMBOL(cpu_to_node);EXPORT_SYMBOL(node_to_cpumask);EXPORT_SYMBOL(memnode);EXPORT_SYMBOL(node_data);#ifdef CONFIG_DISCONTIGMEM/* * Functions to convert PFNs from/to per node page addresses. * These are out of line because they are quite big. * They could be all tuned by pre caching more state. * Should do that. */int pfn_valid(unsigned long pfn){	unsigned nid;	if (pfn >= num_physpages)		return 0;	nid = pfn_to_nid(pfn);	if (nid == 0xff)		return 0;	return pfn >= node_start_pfn(nid) && (pfn) < node_end_pfn(nid);}EXPORT_SYMBOL(pfn_valid);#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -