📄 apic.c
字号:
extern void get_cpu_vendor(struct cpuinfo_x86*); /* Disabled by DMI scan or kernel option? */ if (dont_enable_local_apic) return -1; /* Workaround for us being called before identify_cpu(). */ get_cpu_vendor(&boot_cpu_data); switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_AMD: if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) break; goto no_apic; case X86_VENDOR_INTEL: if (boot_cpu_data.x86 == 6 || (boot_cpu_data.x86 == 15 && cpu_has_apic) || (boot_cpu_data.x86 == 5 && cpu_has_apic)) break; goto no_apic; default: goto no_apic; } if (!cpu_has_apic) { /* * Some BIOSes disable the local APIC in the * APIC_BASE MSR. This can only be done in * software for Intel P6 and AMD K7 (Model > 1). */ rdmsr(MSR_IA32_APICBASE, l, h); if (!(l & MSR_IA32_APICBASE_ENABLE)) { printk("Local APIC disabled by BIOS -- reenabling.\n"); l &= ~MSR_IA32_APICBASE_BASE; l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; wrmsr(MSR_IA32_APICBASE, l, h); } } /* * The APIC feature bit should now be enabled * in `cpuid' */ features = cpuid_edx(1); if (!(features & (1 << X86_FEATURE_APIC))) { printk("Could not enable APIC!\n"); return -1; } set_bit(X86_FEATURE_APIC, &boot_cpu_data.x86_capability); mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; boot_cpu_physical_apicid = 0; if (nmi_watchdog != NMI_NONE) nmi_watchdog = NMI_LOCAL_APIC; printk("Found and enabled local APIC!\n"); apic_pm_init1(); return 0;no_apic: printk("No local APIC present or hardware disabled\n"); return -1;}void __init init_apic_mappings(void){ unsigned long apic_phys; /* * If no local APIC can be found then set up a fake all * zeroes page to simulate the local APIC and another * one for the IO-APIC. */ if (!smp_found_config && detect_init_APIC()) { apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); apic_phys = __pa(apic_phys); } else apic_phys = mp_lapic_addr; set_fixmap_nocache(FIX_APIC_BASE, apic_phys); Dprintk("mapped APIC to %08lx (%08lx)\n", APIC_BASE, apic_phys); /* * Fetch the APIC ID of the BSP in case we have a * default configuration (or the MP table is broken). */ if (boot_cpu_physical_apicid == -1U) boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));#ifdef CONFIG_X86_IO_APIC { unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; int i; for (i = 0; i < nr_ioapics; i++) { if (smp_found_config) { ioapic_phys = mp_ioapics[i].mpc_apicaddr; if (!ioapic_phys) { printk(KERN_ERR "WARNING: bogus zero IO-APIC address found in MPTABLE, disabling IO/APIC support!\n"); smp_found_config = 0; skip_ioapic_setup = 1; goto fake_ioapic_page; } } else {fake_ioapic_page: ioapic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); ioapic_phys = __pa(ioapic_phys); } set_fixmap_nocache(idx, ioapic_phys); Dprintk("mapped IOAPIC to %08lx (%08lx)\n", __fix_to_virt(idx), ioapic_phys); idx++; } }#endif}/* * This part sets up the APIC 32 bit clock in LVTT1, with HZ interrupts * per second. We assume that the caller has already set up the local * APIC. * * The APIC timer is not exactly sync with the external timer chip, it * closely follows bus clocks. *//* * The timer chip is already set up at HZ interrupts per second here, * but we do not accept timer interrupts yet. We only allow the BP * to calibrate. */static unsigned int __init get_8254_timer_count(void){ extern spinlock_t i8253_lock; unsigned long flags; unsigned int count; spin_lock_irqsave(&i8253_lock, flags); outb(0x00, 0x43); count = inb(0x40); count |= inb(0x40) << 8; spin_unlock_irqrestore(&i8253_lock, flags); return count;}void __init wait_8254_wraparound(void){ unsigned int curr_count, prev_count=~0; int delta; curr_count = get_8254_timer_count(); do { prev_count = curr_count; curr_count = get_8254_timer_count(); delta = curr_count-prev_count; /* * This limit for delta seems arbitrary, but it isn't, it's * slightly above the level of error a buggy Mercury/Neptune * chipset timer can cause. */ } while (delta < 300);}/* * This function sets up the local APIC timer, with a timeout of * 'clocks' APIC bus clock. During calibration we actually call * this function twice on the boot CPU, once with a bogus timeout * value, second time for real. The other (noncalibrating) CPUs * call this function only once, with the real, calibrated value. * * We do reads before writes even if unnecessary, to get around the * P5 APIC double write bug. */#define APIC_DIVISOR 16void __setup_APIC_LVTT(unsigned int clocks){ unsigned int lvtt1_value, tmp_value; lvtt1_value = SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV) | APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR; apic_write_around(APIC_LVTT, lvtt1_value); /* * Divide PICLK by 16 */ tmp_value = apic_read(APIC_TDCR); apic_write_around(APIC_TDCR, (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) | APIC_TDR_DIV_16); apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR);}void setup_APIC_timer(void * data){ unsigned int clocks = (unsigned int) data, slice, t0, t1; unsigned long flags; int delta; __save_flags(flags); __sti(); /* * ok, Intel has some smart code in their APIC that knows * if a CPU was in 'hlt' lowpower mode, and this increases * its APIC arbitration priority. To avoid the external timer * IRQ APIC event being in synchron with the APIC clock we * introduce an interrupt skew to spread out timer events. * * The number of slices within a 'big' timeslice is smp_num_cpus+1 */ slice = clocks / (smp_num_cpus+1); printk("cpu: %d, clocks: %d, slice: %d\n", smp_processor_id(), clocks, slice); /* * Wait for IRQ0's slice: */ wait_8254_wraparound(); __setup_APIC_LVTT(clocks); t0 = apic_read(APIC_TMICT)*APIC_DIVISOR; /* Wait till TMCCT gets reloaded from TMICT... */ do { t1 = apic_read(APIC_TMCCT)*APIC_DIVISOR; delta = (int)(t0 - t1 - slice*(smp_processor_id()+1)); } while (delta >= 0); /* Now wait for our slice for real. */ do { t1 = apic_read(APIC_TMCCT)*APIC_DIVISOR; delta = (int)(t0 - t1 - slice*(smp_processor_id()+1)); } while (delta < 0); __setup_APIC_LVTT(clocks); printk("CPU%d<T0:%d,T1:%d,D:%d,S:%d,C:%d>\n", smp_processor_id(), t0, t1, delta, slice, clocks); __restore_flags(flags);}/* * In this function we calibrate APIC bus clocks to the external * timer. Unfortunately we cannot use jiffies and the timer irq * to calibrate, since some later bootup code depends on getting * the first irq? Ugh. * * We want to do the calibration only once since we * want to have local timer irqs syncron. CPUs connected * by the same APIC bus have the very same bus frequency. * And we want to have irqs off anyways, no accidental * APIC irq that way. */int __init calibrate_APIC_clock(void){ unsigned long long t1 = 0, t2 = 0; long tt1, tt2; long result; int i; const int LOOPS = HZ/10; printk("calibrating APIC timer ...\n"); /* * Put whatever arbitrary (but long enough) timeout * value into the APIC clock, we just want to get the * counter running for calibration. */ __setup_APIC_LVTT(1000000000); /* * The timer chip counts down to zero. Let's wait * for a wraparound to start exact measurement: * (the current tick might have been already half done) */ wait_8254_wraparound(); /* * We wrapped around just now. Let's start: */ if (cpu_has_tsc) rdtscll(t1); tt1 = apic_read(APIC_TMCCT); /* * Let's wait LOOPS wraprounds: */ for (i = 0; i < LOOPS; i++) wait_8254_wraparound(); tt2 = apic_read(APIC_TMCCT); if (cpu_has_tsc) rdtscll(t2); /* * The APIC bus clock counter is 32 bits only, it * might have overflown, but note that we use signed * longs, thus no extra care needed. * * underflown to be exact, as the timer counts down ;) */ result = (tt1-tt2)*APIC_DIVISOR/LOOPS; if (cpu_has_tsc) printk("..... CPU clock speed is %ld.%04ld MHz.\n", ((long)(t2-t1)/LOOPS)/(1000000/HZ), ((long)(t2-t1)/LOOPS)%(1000000/HZ)); printk("..... host bus clock speed is %ld.%04ld MHz.\n", result/(1000000/HZ), result%(1000000/HZ)); return result;}static unsigned int calibration_result;int dont_use_local_apic_timer __initdata = 0;void __init setup_APIC_clocks (void){ /* Disabled by DMI scan or kernel option? */ if (dont_use_local_apic_timer) return; printk("Using local APIC timer interrupts.\n"); using_apic_timer = 1; __cli(); calibration_result = calibrate_APIC_clock(); /* * Now set up the timer for real. */ setup_APIC_timer((void *)calibration_result); __sti(); /* and update all other cpus */ smp_call_function(setup_APIC_timer, (void *)calibration_result, 1, 1);}void __init disable_APIC_timer(void){ if (using_apic_timer) { unsigned long v; v = apic_read(APIC_LVTT); apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED); }}void enable_APIC_timer(void){ if (using_apic_timer) { unsigned long v; v = apic_read(APIC_LVTT); apic_write_around(APIC_LVTT, v & ~APIC_LVT_MASKED); }}/* * the frequency of the profiling timer can be changed * by writing a multiplier value into /proc/profile. */int setup_profiling_timer(unsigned int multiplier){ int i; /* * Sanity check. [at least 500 APIC cycles should be * between APIC interrupts as a rule of thumb, to avoid * irqs flooding us] */ if ( (!multiplier) || (calibration_result/multiplier < 500)) return -EINVAL; /* * Set the new multiplier for each CPU. CPUs don't start using the * new values until the next timer interrupt in which they do process * accounting. At that time they also adjust their APIC timers * accordingly. */ for (i = 0; i < NR_CPUS; ++i) prof_multiplier[i] = multiplier; return 0;}#undef APIC_DIVISOR/* * Local timer interrupt handler. It does both profiling and * process statistics/rescheduling. * * We do profiling in every local tick, statistics/rescheduling * happen only every 'profiling multiplier' ticks. The default * multiplier is 1 and it can be changed by writing the new multiplier * value into /proc/profile. */inline void smp_local_timer_interrupt(struct pt_regs * regs){ int user = user_mode(regs); int cpu = smp_processor_id(); /* * The profiling function is SMP safe. (nothing can mess * around with "current", and the profiling counters are * updated with atomic operations). This is especially * useful with a profiling multiplier != 1 */ if (!user) x86_do_profile(regs->eip); if (--prof_counter[cpu] <= 0) { /* * The multiplier may have changed since the last time we got * to this point as a result of the user writing to * /proc/profile. In this case we need to adjust the APIC * timer accordingly. * * Interrupts are already masked off at this point. */ prof_counter[cpu] = prof_multiplier[cpu]; if (prof_counter[cpu] != prof_old_multiplier[cpu]) { __setup_APIC_LVTT(calibration_result/prof_counter[cpu]); prof_old_multiplier[cpu] = prof_counter[cpu]; }#ifdef CONFIG_SMP update_process_times(user);#endif } /* * We take the 'long' return path, and there every subsystem * grabs the apropriate locks (kernel lock/ irq lock). * * we might want to decouple profiling from the 'long path', * and do the profiling totally in assembly. * * Currently this isn't too much of an issue (performance wise), * we can take more than 100K local irqs per second on a 100 MHz P5. */}/* * Local APIC timer interrupt. This is the most natural way for doing * local interrupts, but local timer interrupts can be emulated by * broadcast interrupts too. [in case the hw doesn't support APIC timers] * * [ if a single-CPU system runs an SMP kernel then we call the local * interrupt as well. Thus we cannot inline the local irq ... ] */unsigned int apic_timer_irqs [NR_CPUS];void smp_apic_timer_interrupt(struct pt_regs * regs){ int cpu = smp_processor_id(); /* * the NMI deadlock-detector uses this. */ apic_timer_irqs[cpu]++; /* * NOTE! We'd better ACK the irq immediately, * because timer handling can be slow. */ ack_APIC_irq(); /* * update_process_times() expects us to have done irq_enter(). * Besides, if we don't timer interrupts ignore the global * interrupt lock, which is the WrongThing (tm) to do. */ irq_enter(cpu, 0); smp_local_timer_interrupt(regs); irq_exit(cpu, 0); if (softirq_pending(cpu)) do_softirq();}/* * This interrupt should _never_ happen with our APIC/SMP architecture */asmlinkage void smp_spurious_interrupt(void){ unsigned long v; /* * Check if this really is a spurious interrupt and ACK it * if it is a vectored one. Just in case... * Spurious interrupts should not be ACKed. */ v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1)); if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) ack_APIC_irq(); /* see sw-dev-man vol 3, chapter 7.4.13.5 */ printk(KERN_INFO "spurious APIC interrupt on CPU#%d, should never happen.\n", smp_processor_id());}/* * This interrupt should never happen with our APIC/SMP architecture */asmlinkage void smp_error_interrupt(void){ unsigned long v, v1; /* First tickle the hardware, only then report what went on. -- REW */ v = apic_read(APIC_ESR); apic_write(APIC_ESR, 0); v1 = apic_read(APIC_ESR); ack_APIC_irq(); atomic_inc(&irq_err_count); /* Here is what the APIC error bits mean: 0: Send CS error 1: Receive CS error 2: Send accept error 3: Receive accept error 4: Reserved 5: Send illegal vector 6: Received illegal vector 7: Illegal register address */ printk (KERN_ERR "APIC error on CPU%d: %02lx(%02lx)\n", smp_processor_id(), v , v1);}/* * This initializes the IO-APIC and APIC hardware if this is * a UP kernel. */int __init APIC_init_uniprocessor (void){ if (!smp_found_config && !cpu_has_apic) return -1; /* * Complain if the BIOS pretends there is one. */ if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", boot_cpu_physical_apicid); return -1; } verify_local_APIC(); connect_bsp_APIC(); phys_cpu_present_map = 1; apic_write_around(APIC_ID, boot_cpu_physical_apicid); apic_pm_init2(); setup_local_APIC(); if (nmi_watchdog == NMI_LOCAL_APIC) check_nmi_watchdog();#ifdef CONFIG_X86_IO_APIC if (smp_found_config) if (!skip_ioapic_setup && nr_ioapics) setup_IO_APIC();#endif setup_APIC_clocks(); return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -