📄 acpi.c
字号:
#ifdef XEN struct acpi_srat_mem_affinity *ma = (struct acpi_srat_mem_affinity *)ma__;#endif unsigned long paddr, size; int pxm; struct node_memblk_s *p, *q, *pend; pxm = get_memory_proximity_domain(ma); /* fill node memory chunk structure */ paddr = ma->base_address; size = ma->length; /* Ignore disabled entries */ if (!(ma->flags & ACPI_SRAT_MEM_ENABLED)) return; /* record this node in proximity bitmap */ pxm_bit_set(pxm); /* Insertion sort based on base address */ pend = &node_memblk[num_node_memblks]; for (p = &node_memblk[0]; p < pend; p++) { if (paddr < p->start_paddr) break; } if (p < pend) { for (q = pend - 1; q >= p; q--) *(q + 1) = *q; } p->start_paddr = paddr; p->size = size; p->nid = pxm; num_node_memblks++;}void __init acpi_numa_arch_fixup(void){ int i, j, node_from, node_to; /* If there's no SRAT, fix the phys_id and mark node 0 online */ if (srat_num_cpus == 0) { node_set_online(0); node_cpuid[0].phys_id = hard_smp_processor_id(); return; } /* * MCD - This can probably be dropped now. No need for pxm ID to node ID * mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES. */ nodes_clear(node_online_map); for (i = 0; i < MAX_PXM_DOMAINS; i++) { if (pxm_bit_test(i)) { int nid = acpi_map_pxm_to_node(i); node_set_online(nid); } } /* set logical node id in memory chunk structure */ for (i = 0; i < num_node_memblks; i++) node_memblk[i].nid = pxm_to_node(node_memblk[i].nid); /* assign memory bank numbers for each chunk on each node */ for_each_online_node(i) { int bank; bank = 0; for (j = 0; j < num_node_memblks; j++) if (node_memblk[j].nid == i) node_memblk[j].bank = bank++; } /* set logical node id in cpu structure */ for_each_possible_early_cpu(i) node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid); printk(KERN_INFO "Number of logical nodes in system = %d\n", num_online_nodes()); printk(KERN_INFO "Number of memory chunks in system = %d\n", num_node_memblks); if (!slit_table) return; memset(numa_slit, -1, sizeof(numa_slit)); for (i = 0; i < slit_table->locality_count; i++) { if (!pxm_bit_test(i)) continue; node_from = pxm_to_node(i); for (j = 0; j < slit_table->locality_count; j++) { if (!pxm_bit_test(j)) continue; node_to = pxm_to_node(j); node_distance(node_from, node_to) = slit_table->entry[i * slit_table->locality_count + j]; } }#ifdef SLIT_DEBUG printk("ACPI 2.0 SLIT locality table:\n"); for_each_online_node(i) { for_each_online_node(j) printk("%03d ", node_distance(i, j)); printk("\n"); }#endif}#endif /* CONFIG_ACPI_NUMA */#ifndef XEN/* * success: return IRQ number (>=0) * failure: return < 0 */int acpi_register_gsi(u32 gsi, int triggering, int polarity){ if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM) return gsi; if (has_8259 && gsi < 16) return isa_irq_to_vector(gsi); return iosapic_register_intr(gsi, (polarity == ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, (triggering == ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);}void acpi_unregister_gsi(u32 gsi){ if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM) return; if (has_8259 && gsi < 16) return; iosapic_unregister_intr(gsi);}#endifstatic int __init acpi_parse_fadt(struct acpi_table_header *table){ struct acpi_table_header *fadt_header; struct acpi_table_fadt *fadt; if (!table) return -EINVAL; fadt_header = (struct acpi_table_header *)table; if (fadt_header->revision != 3) return -ENODEV; /* Only deal with ACPI 2.0 FADT */ fadt = (struct acpi_table_fadt *)fadt_header;#ifndef XEN acpi_register_gsi(fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);#endif return 0;}int __init acpi_boot_init(void){ /* * MADT * ---- * Parse the Multiple APIC Description Table (MADT), if exists. * Note that this table provides platform SMP configuration * information -- the successor to MPS tables. */ if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { printk(KERN_ERR PREFIX "Can't find MADT\n"); goto skip_madt; } /* Local APIC */ if (acpi_table_parse_madt (ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0) < 0) printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_lsapic, NR_CPUS) < 1) printk(KERN_ERR PREFIX "Error parsing MADT - no LAPIC entries\n"); if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0) < 0) printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); /* I/O APIC */ if (acpi_table_parse_madt (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) { if (!ia64_platform_is("sn2")) printk(KERN_ERR PREFIX "Error parsing MADT - no IOSAPIC entries\n"); } /* System-Level Interrupt Routing */ if (acpi_table_parse_madt (ACPI_MADT_TYPE_INTERRUPT_SOURCE, acpi_parse_plat_int_src, ACPI_MAX_PLATFORM_INTERRUPTS) < 0) printk(KERN_ERR PREFIX "Error parsing platform interrupt source entry\n"); if (acpi_table_parse_madt (ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, 0) < 0) printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n"); if (acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, 0) < 0) printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); skip_madt: /* * FADT says whether a legacy keyboard controller is present. * The FADT also contains an SCI_INT line, by which the system * gets interrupts such as power and sleep buttons. If it's not * on a Legacy interrupt, it needs to be setup. */ if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt)) printk(KERN_ERR PREFIX "Can't find FADT\n");#ifdef CONFIG_SMP if (available_cpus == 0) { printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); smp_boot_data.cpu_phys_id[available_cpus] = hard_smp_processor_id(); available_cpus = 1; /* We've got at least one of these, no? */ } smp_boot_data.cpu_count = available_cpus; smp_build_cpu_map();# ifdef CONFIG_ACPI_NUMA if (srat_num_cpus == 0) { int cpu, i = 1; for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id()) node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu]; }# endif#endif#ifdef CONFIG_ACPI_NUMA build_cpu_to_node_map();#endif /* Make boot-up look pretty */ printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus); return 0;}int acpi_gsi_to_irq(u32 gsi, unsigned int *irq){ int tmp; if (has_8259 && gsi < 16) *irq = isa_irq_to_vector(gsi); else { tmp = gsi_to_irq(gsi); if (tmp == -1) return -1; *irq = tmp; } return 0;}/* * ACPI based hotplug CPU support */#ifdef CONFIG_ACPI_HOTPLUG_CPUstaticint acpi_map_cpu2node(acpi_handle handle, int cpu, long physid){#ifdef CONFIG_ACPI_NUMA int pxm_id; int nid; pxm_id = acpi_get_pxm(handle); /* * We don't have cpu-only-node hotadd. But if the system equips * SRAT table, pxm is already found and node is ready. * So, just pxm_to_nid(pxm) is OK. * This code here is for the system which doesn't have full SRAT * table for possible cpus. */ nid = acpi_map_pxm_to_node(pxm_id); node_cpuid[cpu].phys_id = physid; node_cpuid[cpu].nid = nid;#endif return (0);}int additional_cpus __initdata = -1;static __init int setup_additional_cpus(char *s){ if (s) additional_cpus = simple_strtol(s, NULL, 0); return 0;}early_param("additional_cpus", setup_additional_cpus);/* * cpu_possible_map should be static, it cannot change as CPUs * are onlined, or offlined. The reason is per-cpu data-structures * are allocated by some modules at init time, and dont expect to * do this dynamically on cpu arrival/departure. * cpu_present_map on the other hand can change dynamically. * In case when cpu_hotplug is not compiled, then we resort to current * behaviour, which is cpu_possible == cpu_present. * - Ashok Raj * * Three ways to find out the number of additional hotplug CPUs: * - If the BIOS specified disabled CPUs in ACPI/mptables use that. * - The user can overwrite it with additional_cpus=NUM * - Otherwise don't reserve additional CPUs. */__init void prefill_possible_map(void){ int i; int possible, disabled_cpus; disabled_cpus = total_cpus - available_cpus; if (additional_cpus == -1) { if (disabled_cpus > 0) additional_cpus = disabled_cpus; else additional_cpus = 0; } possible = available_cpus + additional_cpus; if (possible > NR_CPUS) possible = NR_CPUS; printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", possible, max((possible - available_cpus), 0)); for (i = 0; i < possible; i++) cpu_set(i, cpu_possible_map);}#ifndef XENint acpi_map_lsapic(acpi_handle handle, int *pcpu){ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; struct acpi_madt_local_sapic *lsapic; cpumask_t tmp_map; long physid; int cpu; if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) return -EINVAL; if (!buffer.length || !buffer.pointer) return -EINVAL; obj = buffer.pointer; if (obj->type != ACPI_TYPE_BUFFER) { kfree(buffer.pointer); return -EINVAL; } lsapic = (struct acpi_madt_local_sapic *)obj->buffer.pointer; if ((lsapic->header.type != ACPI_MADT_TYPE_LOCAL_SAPIC) || (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))) { kfree(buffer.pointer); return -EINVAL; } physid = ((lsapic->id << 8) | (lsapic->eid)); kfree(buffer.pointer); buffer.length = ACPI_ALLOCATE_BUFFER; buffer.pointer = NULL; cpus_complement(tmp_map, cpu_present_map); cpu = first_cpu(tmp_map); if (cpu >= NR_CPUS) return -EINVAL; acpi_map_cpu2node(handle, cpu, physid); cpu_set(cpu, cpu_present_map); ia64_cpu_to_sapicid[cpu] = physid; *pcpu = cpu; return (0);}EXPORT_SYMBOL(acpi_map_lsapic);int acpi_unmap_lsapic(int cpu){ ia64_cpu_to_sapicid[cpu] = -1; cpu_clear(cpu, cpu_present_map);#ifdef CONFIG_ACPI_NUMA /* NUMA specific cleanup's */#endif return (0);}EXPORT_SYMBOL(acpi_unmap_lsapic);#endif /* XEN */#endif /* CONFIG_ACPI_HOTPLUG_CPU */#ifndef XEN#ifdef CONFIG_ACPI_NUMAstatic acpi_status __devinitacpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret){ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; struct acpi_madt_io_sapic *iosapic; unsigned int gsi_base; int pxm, node; /* Only care about objects w/ a method that returns the MADT */ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) return AE_OK; if (!buffer.length || !buffer.pointer) return AE_OK; obj = buffer.pointer; if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < sizeof(*iosapic)) { kfree(buffer.pointer); return AE_OK; } iosapic = (struct acpi_madt_io_sapic *)obj->buffer.pointer; if (iosapic->header.type != ACPI_MADT_TYPE_IO_SAPIC) { kfree(buffer.pointer); return AE_OK; } gsi_base = iosapic->global_irq_base; kfree(buffer.pointer); /* * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell * us which node to associate this with. */ pxm = acpi_get_pxm(handle); if (pxm < 0) return AE_OK; node = pxm_to_node(pxm); if (node >= MAX_NUMNODES || !node_online(node) || cpus_empty(node_to_cpumask(node))) return AE_OK; /* We know a gsi to node mapping! */ map_iosapic_to_node(gsi_base, node); return AE_OK;}static int __initacpi_map_iosapics (void){ acpi_get_devices(NULL, acpi_map_iosapic, NULL, NULL); return 0;}fs_initcall(acpi_map_iosapics);#endif /* CONFIG_ACPI_NUMA */int __ref acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base){ int err; if ((err = iosapic_init(phys_addr, gsi_base))) return err;#ifdef CONFIG_ACPI_NUMA acpi_map_iosapic(handle, 0, NULL, NULL);#endif /* CONFIG_ACPI_NUMA */ return 0;}EXPORT_SYMBOL(acpi_register_ioapic);int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base){ return iosapic_remove(gsi_base);}EXPORT_SYMBOL(acpi_unregister_ioapic);#endif /* XEN *//* * acpi_save_state_mem() - save kernel state * * TBD when when IA64 starts to support suspend... */int acpi_save_state_mem(void) { return 0; } /* * acpi_restore_state() */void acpi_restore_state_mem(void) {}/* * do_suspend_lowlevel() */void do_suspend_lowlevel(void) {}#endif /* CONFIG_ACPI */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -