📄 time.c
字号:
struct cpu_calibration *c = &this_cpu(cpu_calibration); /* * System timestamps, extrapolated from local and master oscillators, * taken during this calibration and the previous calibration. */ s_time_t prev_local_stime, curr_local_stime; s_time_t prev_master_stime, curr_master_stime; /* TSC timestamps taken during this calibration and prev calibration. */ u64 prev_tsc, curr_tsc; /* * System time and TSC ticks elapsed during the previous calibration * 'epoch'. These values are down-shifted to fit in 32 bits. */ u64 stime_elapsed64, tsc_elapsed64; u32 stime_elapsed32, tsc_elapsed32; /* The accumulated error in the local estimate. */ u64 local_stime_err; /* Error correction to slow down a fast local clock. */ u32 error_factor = 0; /* Calculated TSC shift to ensure 32-bit scale multiplier. */ int tsc_shift = 0; /* The overall calibration scale multiplier. */ u32 calibration_mul_frac; prev_tsc = t->local_tsc_stamp; prev_local_stime = t->stime_local_stamp; prev_master_stime = t->stime_master_stamp; /* Disabling IRQs ensures we atomically read cpu_calibration struct. */ local_irq_disable(); curr_tsc = c->local_tsc_stamp; curr_local_stime = c->stime_local_stamp; curr_master_stime = c->stime_master_stamp; local_irq_enable();#if 0 printk("PRE%d: tsc=%"PRIu64" stime=%"PRIu64" master=%"PRIu64"\n", smp_processor_id(), prev_tsc, prev_local_stime, prev_master_stime); printk("CUR%d: tsc=%"PRIu64" stime=%"PRIu64" master=%"PRIu64 " -> %"PRId64"\n", smp_processor_id(), curr_tsc, curr_local_stime, curr_master_stime, curr_master_stime - curr_local_stime);#endif /* Local time warps forward if it lags behind master time. */ if ( curr_local_stime < curr_master_stime ) curr_local_stime = curr_master_stime; stime_elapsed64 = curr_master_stime - prev_master_stime; tsc_elapsed64 = curr_tsc - prev_tsc; /* * Weirdness can happen if we lose sync with the platform timer. * We could be smarter here: resync platform timer with local timer? */ if ( ((s64)stime_elapsed64 < (EPOCH / 2)) ) goto out; /* * Calculate error-correction factor. This only slows down a fast local * clock (slow clocks are warped forwards). The scale factor is clamped * to >= 0.5. */ if ( curr_local_stime != curr_master_stime ) { local_stime_err = curr_local_stime - curr_master_stime; if ( local_stime_err > EPOCH ) local_stime_err = EPOCH; error_factor = div_frac(EPOCH, EPOCH + (u32)local_stime_err); } /* * We require 0 < stime_elapsed < 2^31. * This allows us to binary shift a 32-bit tsc_elapsed such that: * stime_elapsed < tsc_elapsed <= 2*stime_elapsed */ while ( ((u32)stime_elapsed64 != stime_elapsed64) || ((s32)stime_elapsed64 < 0) ) { stime_elapsed64 >>= 1; tsc_elapsed64 >>= 1; } /* stime_master_diff now fits in a 32-bit word. */ stime_elapsed32 = (u32)stime_elapsed64; /* tsc_elapsed <= 2*stime_elapsed */ while ( tsc_elapsed64 > (stime_elapsed32 * 2) ) { tsc_elapsed64 >>= 1; tsc_shift--; } /* Local difference must now fit in 32 bits. */ ASSERT((u32)tsc_elapsed64 == tsc_elapsed64); tsc_elapsed32 = (u32)tsc_elapsed64; /* tsc_elapsed > stime_elapsed */ ASSERT(tsc_elapsed32 != 0); while ( tsc_elapsed32 <= stime_elapsed32 ) { tsc_elapsed32 <<= 1; tsc_shift++; } calibration_mul_frac = div_frac(stime_elapsed32, tsc_elapsed32); if ( error_factor != 0 ) calibration_mul_frac = mul_frac(calibration_mul_frac, error_factor);#if 0 printk("---%d: %08x %08x %d\n", smp_processor_id(), error_factor, calibration_mul_frac, tsc_shift);#endif /* Record new timestamp information, atomically w.r.t. interrupts. */ local_irq_disable(); t->tsc_scale.mul_frac = calibration_mul_frac; t->tsc_scale.shift = tsc_shift; t->local_tsc_stamp = curr_tsc; t->stime_local_stamp = curr_local_stime; t->stime_master_stamp = curr_master_stime; local_irq_enable(); update_vcpu_system_time(current); out: if ( smp_processor_id() == 0 ) { set_timer(&calibration_timer, NOW() + EPOCH); platform_time_calibration(); }}/* * Rendezvous for all CPUs in IRQ context. * Master CPU snapshots the platform timer. * All CPUS snapshot their local TSC and extrapolation of system time. */struct calibration_rendezvous { atomic_t nr_cpus; s_time_t master_stime;};static void time_calibration_rendezvous(void *_r){ unsigned int total_cpus = num_online_cpus(); struct cpu_calibration *c = &this_cpu(cpu_calibration); struct calibration_rendezvous *r = _r; local_irq_disable(); if ( smp_processor_id() == 0 ) { while ( atomic_read(&r->nr_cpus) != (total_cpus - 1) ) cpu_relax(); r->master_stime = read_platform_stime(); atomic_inc(&r->nr_cpus); } else { atomic_inc(&r->nr_cpus); while ( atomic_read(&r->nr_cpus) != total_cpus ) cpu_relax(); } rdtscll(c->local_tsc_stamp); c->stime_local_stamp = get_s_time(); c->stime_master_stamp = r->master_stime; local_irq_enable(); /* Callback in softirq context as soon as possible. */ set_timer(&c->softirq_callback, c->stime_local_stamp);}static void time_calibration(void *unused){ struct calibration_rendezvous r = { .nr_cpus = ATOMIC_INIT(0) }; on_each_cpu(time_calibration_rendezvous, &r, 0, 1);}void init_percpu_time(void){ struct cpu_time *t = &this_cpu(cpu_time); unsigned long flags; s_time_t now; local_irq_save(flags); rdtscll(t->local_tsc_stamp); now = !plt_src.read_counter ? 0 : read_platform_stime(); local_irq_restore(flags); t->stime_master_stamp = now; t->stime_local_stamp = now; init_timer(&this_cpu(cpu_calibration).softirq_callback, local_time_calibration, NULL, smp_processor_id()); if ( smp_processor_id() == 0 ) { init_timer(&calibration_timer, time_calibration, NULL, 0); set_timer(&calibration_timer, NOW() + EPOCH); }}/* Late init function (after all CPUs are booted). */int __init init_xen_time(void){ local_irq_disable(); /* check if TSC is invariant during deep C state this is a new feature introduced by Nehalem*/ if ( cpuid_edx(0x80000007) & (1u<<8) ) tsc_invariant = 1; init_percpu_time(); stime_platform_stamp = 0; init_platform_timer(); do_settime(get_cmos_time(), 0, NOW()); local_irq_enable(); return 0;}/* Early init function. */void __init early_time_init(void){ u64 tmp = init_pit_and_calibrate_tsc(); set_time_scale(&this_cpu(cpu_time).tsc_scale, tmp); do_div(tmp, 1000); cpu_khz = (unsigned long)tmp; printk("Detected %lu.%03lu MHz processor.\n", cpu_khz / 1000, cpu_khz % 1000); setup_irq(0, &irq0);}/* force_hpet_broadcast: if true, force using hpet_broadcast to fix lapic stop issue for deep C state with pit disabled */static int force_hpet_broadcast;boolean_param("hpetbroadcast", force_hpet_broadcast);/* keep pit enabled for pit_broadcast working while cpuidle enabled */static int disable_pit_irq(void){ if ( using_pit || !cpu_has_apic || (xen_cpuidle && !force_hpet_broadcast) ) return 0; /* * If we do not rely on PIT CH0 then we can use HPET for one-shot timer * emulation when entering deep C states. * XXX dom0 may rely on RTC interrupt delivery, so only enable * hpet_broadcast if force_hpet_broadcast. */ if ( xen_cpuidle && force_hpet_broadcast ) { hpet_broadcast_init(); if ( !hpet_broadcast_is_available() ) { printk("HPET broadcast init failed, turn to PIT broadcast.\n"); return 0; } } /* Disable PIT CH0 timer interrupt. */ outb_p(0x30, PIT_MODE); outb_p(0, PIT_CH0); outb_p(0, PIT_CH0); return 0;}__initcall(disable_pit_irq);void pit_broadcast_enter(void){ cpu_set(smp_processor_id(), pit_broadcast_mask);}void pit_broadcast_exit(void){ int cpu = smp_processor_id(); if ( cpu_test_and_clear(cpu, pit_broadcast_mask) ) reprogram_timer(per_cpu(timer_deadline, cpu));}int pit_broadcast_is_available(void){ return xen_cpuidle;}void send_timer_event(struct vcpu *v){ send_guest_vcpu_virq(v, VIRQ_TIMER);}/* Return secs after 00:00:00 localtime, 1 January, 1970. */unsigned long get_localtime(struct domain *d){ return wc_sec + (wc_nsec + NOW()) / 1000000000ULL + d->time_offset_seconds;}/* "cmos_utc_offset" is the difference between UTC time and CMOS time. */static long cmos_utc_offset; /* in seconds */int time_suspend(void){ if ( smp_processor_id() == 0 ) { cmos_utc_offset = -get_cmos_time(); cmos_utc_offset += (wc_sec + (wc_nsec + NOW()) / 1000000000ULL); kill_timer(&calibration_timer); } /* Better to cancel calibration timer for accuracy. */ kill_timer(&this_cpu(cpu_calibration).softirq_callback); return 0;}int time_resume(void){ /*u64 tmp = */init_pit_and_calibrate_tsc(); disable_pit_irq(); /* Disable this while calibrate_tsc_ap() also is skipped. */ /*set_time_scale(&this_cpu(cpu_time).tsc_scale, tmp);*/ resume_platform_timer(); init_percpu_time(); do_settime(get_cmos_time() + cmos_utc_offset, 0, NOW()); if ( !is_idle_vcpu(current) ) update_vcpu_system_time(current); return 0;}int dom0_pit_access(struct ioreq *ioreq){ /* Is Xen using Channel 2? Then disallow direct dom0 access. */ if ( using_pit ) return 0; switch ( ioreq->addr ) { case PIT_CH2: if ( ioreq->dir == IOREQ_READ ) ioreq->data = inb(PIT_CH2); else outb(ioreq->data, PIT_CH2); return 1; case PIT_MODE: if ( ioreq->dir == IOREQ_READ ) return 0; /* urk! */ switch ( ioreq->data & 0xc0 ) { case 0xc0: /* Read Back */ if ( ioreq->data & 0x08 ) /* Select Channel 2? */ outb(ioreq->data & 0xf8, PIT_MODE); if ( !(ioreq->data & 0x06) ) /* Select Channel 0/1? */ return 1; /* no - we're done */ /* Filter Channel 2 and reserved bit 0. */ ioreq->data &= ~0x09; return 0; /* emulate ch0/1 readback */ case 0x80: /* Select Counter 2 */ outb(ioreq->data, PIT_MODE); return 1; } case 0x61: if ( ioreq->dir == IOREQ_READ ) ioreq->data = inb(0x61); else outb((inb(0x61) & ~3) | (ioreq->data & 3), 0x61); return 1; } return 0;}struct tm wallclock_time(void){ uint64_t seconds; if ( !wc_sec ) return (struct tm) { 0 }; seconds = NOW() + (wc_sec * 1000000000ull) + wc_nsec; do_div(seconds, 1000000000); return gmtime(seconds);}/* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -