📄 smpboot.c
字号:
#ifdef BOGOMIPS unsigned long bogosum = 0;#endif /* * Setup boot CPU information */ smp_store_cpu_info(0); /* Final full version of the data */ printk("CPU%d: ", 0); print_cpu_info(&cpu_data[0]); boot_cpu_physical_apicid = get_apic_id(); x86_cpu_to_apicid[0] = boot_cpu_physical_apicid; stack_base[0] = stack_start.esp; /*current_thread_info()->cpu = 0;*/ /*smp_tune_scheduling();*/ set_cpu_sibling_map(0); /* * If we couldn't find an SMP configuration at boot time, * get out of here now! */ if (!smp_found_config && !acpi_lapic) { printk(KERN_NOTICE "SMP motherboard not detected.\n"); init_uniprocessor: phys_cpu_present_map = physid_mask_of_physid(0); if (APIC_init_uniprocessor()) printk(KERN_NOTICE "Local APIC not detected." " Using dummy APIC emulation.\n"); map_cpu_to_logical_apicid(); cpu_set(0, cpu_sibling_map[0]); cpu_set(0, cpu_core_map[0]); return; } /* * Should not be necessary because the MP table should list the boot * CPU too, but we do it for the sake of robustness anyway. * Makes no sense to do this check in clustered apic mode, so skip it */ if (!check_phys_apicid_present(boot_cpu_physical_apicid)) { printk("weird, boot CPU (#%d) not listed by the BIOS.\n", boot_cpu_physical_apicid); physid_set(hard_smp_processor_id(), phys_cpu_present_map); } /* * If we couldn't find a local APIC, then get out of here now! */ if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) { printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", boot_cpu_physical_apicid); goto init_uniprocessor; } verify_local_APIC(); /* * If SMP should be disabled, then really disable it! */ if (!max_cpus) goto init_uniprocessor; connect_bsp_APIC(); setup_local_APIC(); map_cpu_to_logical_apicid(); setup_portio_remap(); /* * Scan the CPU present map and fire up the other CPUs via do_boot_cpu * * In clustered apic mode, phys_cpu_present_map is a constructed thus: * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the * clustered apic ID. */ Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map)); kicked = 1; for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) { apicid = cpu_present_to_apicid(bit); /* * Don't even attempt to start the boot CPU! */ if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID)) continue; if (!check_apicid_present(apicid)) continue; if (max_cpus <= cpucount+1) continue; if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu)) printk("CPU #%d not responding - cannot use it.\n", apicid); else ++kicked; } /* * Cleanup possible dangling ends... */ smpboot_restore_warm_reset_vector();#ifdef BOGOMIPS /* * Allow the user to impress friends. */ Dprintk("Before bogomips.\n"); for (cpu = 0; cpu < NR_CPUS; cpu++) if (cpu_isset(cpu, cpu_callout_map)) bogosum += cpu_data[cpu].loops_per_jiffy; printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", cpucount+1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);#else printk("Total of %d processors activated.\n", cpucount+1);#endif Dprintk("Before bogocount - setting activated=1.\n"); if (smp_b_stepping) printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n"); /* * Don't taint if we are running SMP kernel on a single non-MP * approved Athlon */ if (tainted & TAINT_UNSAFE_SMP) { if (cpucount) printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n"); else tainted &= ~TAINT_UNSAFE_SMP; } Dprintk("Boot done.\n"); /* * construct cpu_sibling_map[], so that we can tell sibling CPUs * efficiently. */ for (cpu = 0; cpu < NR_CPUS; cpu++) { cpus_clear(cpu_sibling_map[cpu]); cpus_clear(cpu_core_map[cpu]); } cpu_set(0, cpu_sibling_map[0]); cpu_set(0, cpu_core_map[0]); if (nmi_watchdog == NMI_LOCAL_APIC) check_nmi_watchdog(); smpboot_setup_io_apic(); setup_boot_APIC_clock(); /* * Synchronize the TSC with the AP */ if (cpu_has_tsc && cpucount && cpu_khz) synchronize_tsc_bp(); calibrate_tsc_bp();}/* These are wrappers to interface to the new boot process. Someone who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */void __init smp_prepare_cpus(unsigned int max_cpus){ smp_commenced_mask = cpumask_of_cpu(0); cpu_callin_map = cpumask_of_cpu(0); mb(); smp_boot_cpus(max_cpus);}void __devinit smp_prepare_boot_cpu(void){ cpu_set(smp_processor_id(), cpu_online_map); cpu_set(smp_processor_id(), cpu_callout_map); cpu_set(smp_processor_id(), cpu_present_map); cpu_set(smp_processor_id(), cpu_possible_map); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;}#ifdef CONFIG_HOTPLUG_CPUstatic voidremove_siblinginfo(int cpu){ int sibling; struct cpuinfo_x86 *c = cpu_data; for_each_cpu_mask(sibling, cpu_core_map[cpu]) { cpu_clear(cpu, cpu_core_map[sibling]); /* * last thread sibling in this cpu core going down */ if (cpus_weight(cpu_sibling_map[cpu]) == 1) c[sibling].booted_cores--; } for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) cpu_clear(cpu, cpu_sibling_map[sibling]); cpus_clear(cpu_sibling_map[cpu]); cpus_clear(cpu_core_map[cpu]); phys_proc_id[cpu] = BAD_APICID; cpu_core_id[cpu] = BAD_APICID; cpu_clear(cpu, cpu_sibling_setup_map);}extern void fixup_irqs(cpumask_t map);int __cpu_disable(void){ cpumask_t map = cpu_online_map; int cpu = smp_processor_id(); /* * Perhaps use cpufreq to drop frequency, but that could go * into generic code. * * We won't take down the boot processor on i386 due to some * interrupts only being able to be serviced by the BSP. * Especially so if we're not using an IOAPIC -zwane */ if (cpu == 0) return -EBUSY; /* * Only S3 is using this path, and thus idle vcpus are running on all * APs when we are called. To support full cpu hotplug, other * notification mechanisms should be introduced (e.g., migrate vcpus * off this physical cpu before rendezvous point). */ if (!is_idle_vcpu(current)) return -EINVAL; local_irq_disable(); clear_local_APIC(); /* Allow any queued timer interrupts to get serviced */ local_irq_enable(); mdelay(1); local_irq_disable(); time_suspend(); remove_siblinginfo(cpu); cpu_clear(cpu, map); fixup_irqs(map); /* It's now safe to remove this processor from the online map */ cpu_clear(cpu, cpu_online_map); return 0;}void __cpu_die(unsigned int cpu){ /* We don't do anything here: idle task is faking death itself. */ unsigned int i; for (i = 0; i < 10; i++) { /* They ack this in play_dead by setting CPU_DEAD */ if (per_cpu(cpu_state, cpu) == CPU_DEAD) { printk ("CPU %d is now offline\n", cpu); return; } mdelay(100); mb(); process_pending_timers(); } printk(KERN_ERR "CPU %u didn't die...\n", cpu);}static int take_cpu_down(void *unused){ return __cpu_disable();}/* * XXX: One important thing missed here is to migrate vcpus * from dead cpu to other online ones and then put whole * system into a stop state. It assures a safe environment * for a cpu hotplug/remove at normal running state. * * However for xen PM case, at this point: * -> All other domains should be notified with PM event, * and then in following states: * * Suspend state, or * * Paused state, which is a force step to all * domains if they do nothing to suspend * -> All vcpus of dom0 (except vcpu0) have already beem * hot removed * with the net effect that all other cpus only have idle vcpu * running. In this special case, we can avoid vcpu migration * then and system can be considered in a stop state. * * So current cpu hotplug is a special version for PM specific * usage, and need more effort later for full cpu hotplug. * (ktian1) */int cpu_down(unsigned int cpu){ int err = 0; spin_lock(&cpu_add_remove_lock); if (num_online_cpus() == 1) { err = -EBUSY; goto out; } if (!cpu_online(cpu)) { err = -EINVAL; goto out; } printk("Prepare to bring CPU%d down...\n", cpu); err = stop_machine_run(take_cpu_down, NULL, cpu); if ( err < 0 ) goto out; __cpu_die(cpu); if (cpu_online(cpu)) { printk("Bad state (DEAD, but in online map) on CPU%d\n", cpu); err = -EBUSY; }out: spin_unlock(&cpu_add_remove_lock); return err;}int cpu_up(unsigned int cpu){ int err = 0; spin_lock(&cpu_add_remove_lock); if (cpu_online(cpu)) { printk("Bring up a online cpu. Bogus!\n"); err = -EBUSY; goto out; } err = __cpu_up(cpu); if (err < 0) goto out;out: spin_unlock(&cpu_add_remove_lock); return err;}/* From kernel/power/main.c *//* This is protected by pm_sem semaphore */static cpumask_t frozen_cpus;void disable_nonboot_cpus(void){ int cpu, error; error = 0; cpus_clear(frozen_cpus); printk("Freezing cpus ...\n"); for_each_online_cpu(cpu) { if (cpu == 0) continue; error = cpu_down(cpu); if (!error) { cpu_set(cpu, frozen_cpus); printk("CPU%d is down\n", cpu); continue; } printk("Error taking cpu %d down: %d\n", cpu, error); } BUG_ON(raw_smp_processor_id() != 0); if (error) panic("cpus not sleeping");}void enable_nonboot_cpus(void){ int cpu, error; printk("Thawing cpus ...\n"); for_each_cpu_mask(cpu, frozen_cpus) { error = cpu_up(cpu); if (!error) { printk("CPU%d is up\n", cpu); continue; } printk("Error taking cpu %d up: %d\n", cpu, error); panic("Not enough cpus"); } cpus_clear(frozen_cpus); /* * Cleanup possible dangling ends after sleep... */ smpboot_restore_warm_reset_vector();}#else /* ... !CONFIG_HOTPLUG_CPU */int __cpu_disable(void){ return -ENOSYS;}void __cpu_die(unsigned int cpu){ /* We said "no" in __cpu_disable */ BUG();}#endif /* CONFIG_HOTPLUG_CPU */int __devinit __cpu_up(unsigned int cpu){#ifdef CONFIG_HOTPLUG_CPU int ret=0; /* * We do warm boot only on cpus that had booted earlier * Otherwise cold boot is all handled from smp_boot_cpus(). * cpu_callin_map is set during AP kickstart process. Its reset * when a cpu is taken offline from cpu_exit_clear(). */ if (!cpu_isset(cpu, cpu_callin_map)) ret = __smp_prepare_cpu(cpu); if (ret) return -EIO;#endif /* In case one didn't come up */ if (!cpu_isset(cpu, cpu_callin_map)) { printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu); local_irq_enable(); return -EIO; } local_irq_enable(); /*per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;*/ /* Unleash the CPU! */ cpu_set(cpu, smp_commenced_mask); while (!cpu_isset(cpu, cpu_online_map)) { mb(); process_pending_timers(); } return 0;}void __init smp_cpus_done(unsigned int max_cpus){#ifdef CONFIG_X86_IO_APIC setup_ioapic_dest();#endif#ifndef CONFIG_HOTPLUG_CPU /* * Disable executability of the SMP trampoline: */ set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);#endif}void __init smp_intr_init(void){ int irq, seridx; /* * IRQ0 must be given a fixed assignment and initialized, * because it's used before the IO-APIC is set up. */ irq_vector[0] = FIRST_HIPRIORITY_VECTOR; vector_irq[FIRST_HIPRIORITY_VECTOR] = 0; /* * Also ensure serial interrupts are high priority. We do not * want them to be blocked by unacknowledged guest-bound interrupts. */ for (seridx = 0; seridx < 2; seridx++) { if ((irq = serial_irq(seridx)) < 0) continue; irq_vector[irq] = FIRST_HIPRIORITY_VECTOR + seridx + 1; vector_irq[FIRST_HIPRIORITY_VECTOR + seridx + 1] = irq; } /* IPI for event checking. */ set_intr_gate(EVENT_CHECK_VECTOR, event_check_interrupt); /* IPI for invalidation */ set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); /* IPI for generic function call */ set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -