📄 time.c
字号:
rc = init_pmtimer(pts); if ( rc <= 0 ) printk("WARNING: %s clocksource '%s'.\n", (rc == 0) ? "Could not initialise" : "Unrecognised", opt_clocksource); } if ( (rc <= 0) && !init_cyclone(pts) && !init_hpet(pts) && !init_pmtimer(pts) ) init_pit(pts); plt_mask = (u32)~0u >> (32 - pts->counter_bits); set_time_scale(&plt_scale, pts->frequency); overflow_period = scale_delta(1ull << (pts->counter_bits-1), &plt_scale); do_div(overflow_period, MILLISECS(1000/HZ)); plt_overflow_period = overflow_period; plt_overflow(); printk("Platform timer overflows in %d jiffies.\n", plt_overflow_period); platform_timer_stamp = plt_count64; printk("Platform timer is %s %s\n", freq_string(pts->frequency), pts->name);}/*************************************************************************** * CMOS Timer functions ***************************************************************************//* Converts Gregorian date to seconds since 1970-01-01 00:00:00. * Assumes input in normal date format, i.e. 1980-12-31 23:59:59 * => year=1980, mon=12, day=31, hour=23, min=59, sec=59. * * [For the Julian calendar (which was used in Russia before 1917, * Britain & colonies before 1752, anywhere else before 1582, * and is still in use by some communities) leave out the * -year/100+year/400 terms, and add 10.] * * This algorithm was first published by Gauss (I think). * * WARNING: this function will overflow on 2106-02-07 06:28:16 on * machines were long is 32-bit! (However, as time_t is signed, we * will already get problems at other places on 2038-01-19 03:14:08) */unsigned longmktime (unsigned int year, unsigned int mon, unsigned int day, unsigned int hour, unsigned int min, unsigned int sec){ /* 1..12 -> 11,12,1..10: put Feb last since it has a leap day. */ if ( 0 >= (int) (mon -= 2) ) { mon += 12; year -= 1; } return ((((unsigned long)(year/4 - year/100 + year/400 + 367*mon/12 + day)+ year*365 - 719499 )*24 + hour /* now have hours */ )*60 + min /* now have minutes */ )*60 + sec; /* finally seconds */}static unsigned long __get_cmos_time(void){ unsigned int year, mon, day, hour, min, sec; sec = CMOS_READ(RTC_SECONDS); min = CMOS_READ(RTC_MINUTES); hour = CMOS_READ(RTC_HOURS); day = CMOS_READ(RTC_DAY_OF_MONTH); mon = CMOS_READ(RTC_MONTH); year = CMOS_READ(RTC_YEAR); if ( !(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD ) { BCD_TO_BIN(sec); BCD_TO_BIN(min); BCD_TO_BIN(hour); BCD_TO_BIN(day); BCD_TO_BIN(mon); BCD_TO_BIN(year); } if ( (year += 1900) < 1970 ) year += 100; return mktime(year, mon, day, hour, min, sec);}static unsigned long get_cmos_time(void){ unsigned long res, flags; int i; spin_lock_irqsave(&rtc_lock, flags); /* read RTC exactly on falling edge of update flag */ for ( i = 0 ; i < 1000000 ; i++ ) /* may take up to 1 second... */ if ( (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) ) break; for ( i = 0 ; i < 1000000 ; i++ ) /* must try at least 2.228 ms */ if ( !(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) ) break; res = __get_cmos_time(); spin_unlock_irqrestore(&rtc_lock, flags); return res;}/*************************************************************************** * System Time ***************************************************************************/s_time_t get_s_time(void){ struct cpu_time *t = &this_cpu(cpu_time); u64 tsc, delta; s_time_t now; rdtscll(tsc); delta = tsc - t->local_tsc_stamp; now = t->stime_local_stamp + scale_delta(delta, &t->tsc_scale); return now;}static inline void version_update_begin(u32 *version){ /* Explicitly OR with 1 just in case version number gets out of sync. */ *version = (*version + 1) | 1; wmb();}static inline void version_update_end(u32 *version){ wmb(); (*version)++;}void update_vcpu_system_time(struct vcpu *v){ struct cpu_time *t; struct vcpu_time_info *u; if ( v->vcpu_info == NULL ) return; t = &this_cpu(cpu_time); u = &vcpu_info(v, time); if ( u->tsc_timestamp == t->local_tsc_stamp ) return; version_update_begin(&u->version); u->tsc_timestamp = t->local_tsc_stamp; u->system_time = t->stime_local_stamp; u->tsc_to_system_mul = t->tsc_scale.mul_frac; u->tsc_shift = (s8)t->tsc_scale.shift; version_update_end(&u->version);}void update_domain_wallclock_time(struct domain *d){ spin_lock(&wc_lock); version_update_begin(&shared_info(d, wc_version)); shared_info(d, wc_sec) = wc_sec + d->time_offset_seconds; shared_info(d, wc_nsec) = wc_nsec; version_update_end(&shared_info(d, wc_version)); spin_unlock(&wc_lock);}int cpu_frequency_change(u64 freq){ struct cpu_time *t = &this_cpu(cpu_time); u64 curr_tsc; /* Sanity check: CPU frequency allegedly dropping below 1MHz? */ if ( freq < 1000000u ) { gdprintk(XENLOG_WARNING, "Rejecting CPU frequency change " "to %"PRIu64" Hz.\n", freq); return -EINVAL; } local_irq_disable(); /* Platform time /first/, as we may be delayed by platform_timer_lock. */ t->stime_master_stamp = read_platform_stime(); /* TSC-extrapolated time may be bogus after frequency change. */ /*t->stime_local_stamp = get_s_time();*/ t->stime_local_stamp = t->stime_master_stamp; rdtscll(curr_tsc); t->local_tsc_stamp = curr_tsc; set_time_scale(&t->tsc_scale, freq); local_irq_enable(); update_vcpu_system_time(current); /* A full epoch should pass before we check for deviation. */ set_timer(&t->calibration_timer, NOW() + EPOCH); if ( smp_processor_id() == 0 ) platform_time_calibration(); return 0;}/* Set clock to <secs,usecs> after 00:00:00 UTC, 1 January, 1970. */void do_settime(unsigned long secs, unsigned long nsecs, u64 system_time_base){ u64 x; u32 y, _wc_sec, _wc_nsec; struct domain *d; x = (secs * 1000000000ULL) + (u64)nsecs - system_time_base; y = do_div(x, 1000000000); spin_lock(&wc_lock); wc_sec = _wc_sec = (u32)x; wc_nsec = _wc_nsec = (u32)y; spin_unlock(&wc_lock); rcu_read_lock(&domlist_read_lock); for_each_domain ( d ) update_domain_wallclock_time(d); rcu_read_unlock(&domlist_read_lock);}static void local_time_calibration(void *unused){ struct cpu_time *t = &this_cpu(cpu_time); /* * System timestamps, extrapolated from local and master oscillators, * taken during this calibration and the previous calibration. */ s_time_t prev_local_stime, curr_local_stime; s_time_t prev_master_stime, curr_master_stime; /* TSC timestamps taken during this calibration and prev calibration. */ u64 prev_tsc, curr_tsc; /* * System time and TSC ticks elapsed during the previous calibration * 'epoch'. These values are down-shifted to fit in 32 bits. */ u64 stime_elapsed64, tsc_elapsed64; u32 stime_elapsed32, tsc_elapsed32; /* The accumulated error in the local estimate. */ u64 local_stime_err; /* Error correction to slow down a fast local clock. */ u32 error_factor = 0; /* Calculated TSC shift to ensure 32-bit scale multiplier. */ int tsc_shift = 0; /* The overall calibration scale multiplier. */ u32 calibration_mul_frac; prev_tsc = t->local_tsc_stamp; prev_local_stime = t->stime_local_stamp; prev_master_stime = t->stime_master_stamp; /* * Disable IRQs to get 'instantaneous' current timestamps. We read platform * time first, as we may be delayed when acquiring platform_timer_lock. */ local_irq_disable(); curr_master_stime = read_platform_stime(); curr_local_stime = get_s_time(); rdtscll(curr_tsc); local_irq_enable();#if 0 printk("PRE%d: tsc=%"PRIu64" stime=%"PRIu64" master=%"PRIu64"\n", smp_processor_id(), prev_tsc, prev_local_stime, prev_master_stime); printk("CUR%d: tsc=%"PRIu64" stime=%"PRIu64" master=%"PRIu64 " -> %"PRId64"\n", smp_processor_id(), curr_tsc, curr_local_stime, curr_master_stime, curr_master_stime - curr_local_stime);#endif /* Local time warps forward if it lags behind master time. */ if ( curr_local_stime < curr_master_stime ) curr_local_stime = curr_master_stime; stime_elapsed64 = curr_master_stime - prev_master_stime; tsc_elapsed64 = curr_tsc - prev_tsc; /* * Weirdness can happen if we lose sync with the platform timer. * We could be smarter here: resync platform timer with local timer? */ if ( ((s64)stime_elapsed64 < (EPOCH / 2)) ) goto out; /* * Calculate error-correction factor. This only slows down a fast local * clock (slow clocks are warped forwards). The scale factor is clamped * to >= 0.5. */ if ( curr_local_stime != curr_master_stime ) { local_stime_err = curr_local_stime - curr_master_stime; if ( local_stime_err > EPOCH ) local_stime_err = EPOCH; error_factor = div_frac(EPOCH, EPOCH + (u32)local_stime_err); } /* * We require 0 < stime_elapsed < 2^31. * This allows us to binary shift a 32-bit tsc_elapsed such that: * stime_elapsed < tsc_elapsed <= 2*stime_elapsed */ while ( ((u32)stime_elapsed64 != stime_elapsed64) || ((s32)stime_elapsed64 < 0) ) { stime_elapsed64 >>= 1; tsc_elapsed64 >>= 1; } /* stime_master_diff now fits in a 32-bit word. */ stime_elapsed32 = (u32)stime_elapsed64; /* tsc_elapsed <= 2*stime_elapsed */ while ( tsc_elapsed64 > (stime_elapsed32 * 2) ) { tsc_elapsed64 >>= 1; tsc_shift--; } /* Local difference must now fit in 32 bits. */ ASSERT((u32)tsc_elapsed64 == tsc_elapsed64); tsc_elapsed32 = (u32)tsc_elapsed64; /* tsc_elapsed > stime_elapsed */ ASSERT(tsc_elapsed32 != 0); while ( tsc_elapsed32 <= stime_elapsed32 ) { tsc_elapsed32 <<= 1; tsc_shift++; } calibration_mul_frac = div_frac(stime_elapsed32, tsc_elapsed32); if ( error_factor != 0 ) calibration_mul_frac = mul_frac(calibration_mul_frac, error_factor);#if 0 printk("---%d: %08x %08x %d\n", smp_processor_id(), error_factor, calibration_mul_frac, tsc_shift);#endif /* Record new timestamp information, atomically w.r.t. interrupts. */ local_irq_disable(); t->tsc_scale.mul_frac = calibration_mul_frac; t->tsc_scale.shift = tsc_shift; t->local_tsc_stamp = curr_tsc; t->stime_local_stamp = curr_local_stime; t->stime_master_stamp = curr_master_stime; local_irq_enable(); update_vcpu_system_time(current); out: set_timer(&t->calibration_timer, NOW() + EPOCH); if ( smp_processor_id() == 0 ) platform_time_calibration();}void init_percpu_time(void){ struct cpu_time *t = &this_cpu(cpu_time); unsigned long flags; s_time_t now; local_irq_save(flags); rdtscll(t->local_tsc_stamp); now = !plt_src.read_counter ? 0 : read_platform_stime(); local_irq_restore(flags); t->stime_master_stamp = now; t->stime_local_stamp = now; init_timer(&t->calibration_timer, local_time_calibration, NULL, smp_processor_id()); set_timer(&t->calibration_timer, NOW() + EPOCH);}/* Late init function (after all CPUs are booted). */int __init init_xen_time(void){ wc_sec = get_cmos_time(); local_irq_disable(); init_percpu_time(); stime_platform_stamp = 0; init_platform_timer(); local_irq_enable(); return 0;}/* Early init function. */void __init early_time_init(void){ u64 tmp = init_pit_and_calibrate_tsc(); set_time_scale(&this_cpu(cpu_time).tsc_scale, tmp); do_div(tmp, 1000); cpu_khz = (unsigned long)tmp; printk("Detected %lu.%03lu MHz processor.\n", cpu_khz / 1000, cpu_khz % 1000); setup_irq(0, &irq0);}void send_timer_event(struct vcpu *v){ send_guest_vcpu_virq(v, VIRQ_TIMER);}/* Return secs after 00:00:00 localtime, 1 January, 1970. */unsigned long get_localtime(struct domain *d){ return wc_sec + (wc_nsec + NOW()) / 1000000000ULL + d->time_offset_seconds;}/* "cmos_utc_offset" is the difference between UTC time and CMOS time. */static long cmos_utc_offset; /* in seconds */int time_suspend(void){ if ( smp_processor_id() == 0 ) { cmos_utc_offset = -get_cmos_time(); cmos_utc_offset += (wc_sec + (wc_nsec + NOW()) / 1000000000ULL); } /* Better to cancel calibration timer for accuracy. */ kill_timer(&this_cpu(cpu_time).calibration_timer); return 0;}int time_resume(void){ u64 tmp = init_pit_and_calibrate_tsc(); set_time_scale(&this_cpu(cpu_time).tsc_scale, tmp); resume_platform_timer(); do_settime(get_cmos_time() + cmos_utc_offset, 0, read_platform_stime()); init_percpu_time(); if ( !is_idle_vcpu(current) ) update_vcpu_system_time(current); return 0;}int dom0_pit_access(struct ioreq *ioreq){ /* Is Xen using Channel 2? Then disallow direct dom0 access. */ if ( plt_src.read_counter == read_pit_count ) return 0; switch ( ioreq->addr ) { case PIT_CH2: if ( ioreq->dir == IOREQ_READ ) ioreq->data = inb(PIT_CH2); else outb(ioreq->data, PIT_CH2); return 1; case PIT_MODE: if ( ioreq->dir == IOREQ_READ ) return 0; /* urk! */ switch ( ioreq->data & 0xc0 ) { case 0xc0: /* Read Back */ if ( ioreq->data & 0x08 ) /* Select Channel 2? */ outb(ioreq->data & 0xf8, PIT_MODE); if ( !(ioreq->data & 0x06) ) /* Select Channel 0/1? */ return 1; /* no - we're done */ /* Filter Channel 2 and reserved bit 0. */ ioreq->data &= ~0x09; return 0; /* emulate ch0/1 readback */ case 0x80: /* Select Counter 2 */ outb(ioreq->data, PIT_MODE); return 1; } case 0x61: if ( ioreq->dir == IOREQ_READ ) ioreq->data = inb(0x61); else outb((inb(0x61) & ~3) | (ioreq->data & 3), 0x61); return 1; } return 0;}struct tm wallclock_time(void){ uint64_t seconds; if ( !wc_sec ) return (struct tm) { 0 }; seconds = NOW() + (wc_sec * 1000000000ull) + wc_nsec; do_div(seconds, 1000000000); return gmtime(seconds);}/* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -