📄 time.c
字号:
return (volatile u32 *)(fix_to_virt(FIX_CYCLONE_TIMER) + offset);}static int init_cyclone(struct platform_timesource *pts){ u32 base; if ( !use_cyclone ) return 0; /* Find base address. */ base = *(map_cyclone_reg(CYCLONE_CBAR_ADDR)); if ( base == 0 ) { printk(KERN_ERR "Cyclone: Could not find valid CBAR value.\n"); return 0; } /* Enable timer and map the counter register. */ *(map_cyclone_reg(base + CYCLONE_PMCC_OFFSET)) = 1; *(map_cyclone_reg(base + CYCLONE_MPCS_OFFSET)) = 1; cyclone_timer = map_cyclone_reg(base + CYCLONE_MPMC_OFFSET); pts->name = "IBM Cyclone"; pts->frequency = CYCLONE_TIMER_FREQ; pts->read_counter = read_cyclone_count; pts->counter_bits = 32; return 1;}/************************************************************ * PLATFORM TIMER 4: ACPI PM TIMER */u32 pmtmr_ioport;/* ACPI PM timer ticks at 3.579545 MHz. */#define ACPI_PM_FREQUENCY 3579545static u64 read_pmtimer_count(void){ return inl(pmtmr_ioport);}static int init_pmtimer(struct platform_timesource *pts){ if ( pmtmr_ioport == 0 ) return 0; pts->name = "ACPI PM Timer"; pts->frequency = ACPI_PM_FREQUENCY; pts->read_counter = read_pmtimer_count; pts->counter_bits = 24; return 1;}/************************************************************ * GENERIC PLATFORM TIMER INFRASTRUCTURE */static struct platform_timesource plt_src; /* details of chosen timesource */static u64 plt_mask; /* hardware-width mask */static u64 plt_overflow_period; /* ns between calls to plt_overflow() */static struct time_scale plt_scale; /* scale: platform counter -> nanosecs *//* Protected by platform_timer_lock. */static DEFINE_SPINLOCK(platform_timer_lock);static s_time_t stime_platform_stamp; /* System time at below platform time */static u64 platform_timer_stamp; /* Platform time at above system time */static u64 plt_stamp64; /* 64-bit platform counter stamp */static u64 plt_stamp; /* hardware-width platform counter stamp */static struct timer plt_overflow_timer;static void plt_overflow(void *unused){ u64 count; spin_lock_irq(&platform_timer_lock); count = plt_src.read_counter(); plt_stamp64 += (count - plt_stamp) & plt_mask; plt_stamp = count; spin_unlock_irq(&platform_timer_lock); set_timer(&plt_overflow_timer, NOW() + plt_overflow_period);}static s_time_t __read_platform_stime(u64 platform_time){ u64 diff = platform_time - platform_timer_stamp; ASSERT(spin_is_locked(&platform_timer_lock)); return (stime_platform_stamp + scale_delta(diff, &plt_scale));}static s_time_t read_platform_stime(void){ u64 count; s_time_t stime; ASSERT(!local_irq_is_enabled()); spin_lock(&platform_timer_lock); count = plt_stamp64 + ((plt_src.read_counter() - plt_stamp) & plt_mask); stime = __read_platform_stime(count); spin_unlock(&platform_timer_lock); return stime;}static void platform_time_calibration(void){ u64 count; s_time_t stamp; spin_lock_irq(&platform_timer_lock); count = plt_stamp64 + ((plt_src.read_counter() - plt_stamp) & plt_mask); stamp = __read_platform_stime(count); stime_platform_stamp = stamp; platform_timer_stamp = count; spin_unlock_irq(&platform_timer_lock);}static void resume_platform_timer(void){ /* No change in platform_stime across suspend/resume. */ platform_timer_stamp = plt_stamp64; plt_stamp = plt_src.read_counter();}static void init_platform_timer(void){ struct platform_timesource *pts = &plt_src; int rc = -1; if ( opt_clocksource[0] != '\0' ) { if ( !strcmp(opt_clocksource, "pit") ) rc = (init_pit(pts), 1); else if ( !strcmp(opt_clocksource, "hpet") ) rc = init_hpet(pts); else if ( !strcmp(opt_clocksource, "cyclone") ) rc = init_cyclone(pts); else if ( !strcmp(opt_clocksource, "acpi") ) rc = init_pmtimer(pts); if ( rc <= 0 ) printk("WARNING: %s clocksource '%s'.\n", (rc == 0) ? "Could not initialise" : "Unrecognised", opt_clocksource); } if ( (rc <= 0) && !init_cyclone(pts) && !init_hpet(pts) && !init_pmtimer(pts) ) init_pit(pts); plt_mask = (u64)~0ull >> (64 - pts->counter_bits); set_time_scale(&plt_scale, pts->frequency); plt_overflow_period = scale_delta( 1ull << (pts->counter_bits-1), &plt_scale); init_timer(&plt_overflow_timer, plt_overflow, NULL, 0); plt_overflow(NULL); platform_timer_stamp = plt_stamp64; printk("Platform timer is %s %s\n", freq_string(pts->frequency), pts->name);}void cstate_save_tsc(void){ struct cpu_time *t = &this_cpu(cpu_time); if ( tsc_invariant ) return; t->cstate_plt_count_stamp = plt_src.read_counter(); rdtscll(t->cstate_tsc_stamp);}void cstate_restore_tsc(void){ struct cpu_time *t = &this_cpu(cpu_time); u64 plt_count_delta, tsc_delta; if ( tsc_invariant ) return; plt_count_delta = (plt_src.read_counter() - t->cstate_plt_count_stamp) & plt_mask; tsc_delta = scale_delta(plt_count_delta, &plt_scale) * cpu_khz/1000000UL; wrmsrl(MSR_IA32_TSC, t->cstate_tsc_stamp + tsc_delta);}/*************************************************************************** * CMOS Timer functions ***************************************************************************//* Converts Gregorian date to seconds since 1970-01-01 00:00:00. * Assumes input in normal date format, i.e. 1980-12-31 23:59:59 * => year=1980, mon=12, day=31, hour=23, min=59, sec=59. * * [For the Julian calendar (which was used in Russia before 1917, * Britain & colonies before 1752, anywhere else before 1582, * and is still in use by some communities) leave out the * -year/100+year/400 terms, and add 10.] * * This algorithm was first published by Gauss (I think). * * WARNING: this function will overflow on 2106-02-07 06:28:16 on * machines were long is 32-bit! (However, as time_t is signed, we * will already get problems at other places on 2038-01-19 03:14:08) */unsigned longmktime (unsigned int year, unsigned int mon, unsigned int day, unsigned int hour, unsigned int min, unsigned int sec){ /* 1..12 -> 11,12,1..10: put Feb last since it has a leap day. */ if ( 0 >= (int) (mon -= 2) ) { mon += 12; year -= 1; } return ((((unsigned long)(year/4 - year/100 + year/400 + 367*mon/12 + day)+ year*365 - 719499 )*24 + hour /* now have hours */ )*60 + min /* now have minutes */ )*60 + sec; /* finally seconds */}static unsigned long __get_cmos_time(void){ unsigned int year, mon, day, hour, min, sec; sec = CMOS_READ(RTC_SECONDS); min = CMOS_READ(RTC_MINUTES); hour = CMOS_READ(RTC_HOURS); day = CMOS_READ(RTC_DAY_OF_MONTH); mon = CMOS_READ(RTC_MONTH); year = CMOS_READ(RTC_YEAR); if ( !(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD ) { BCD_TO_BIN(sec); BCD_TO_BIN(min); BCD_TO_BIN(hour); BCD_TO_BIN(day); BCD_TO_BIN(mon); BCD_TO_BIN(year); } if ( (year += 1900) < 1970 ) year += 100; return mktime(year, mon, day, hour, min, sec);}static unsigned long get_cmos_time(void){ unsigned long res, flags; int i; spin_lock_irqsave(&rtc_lock, flags); /* read RTC exactly on falling edge of update flag */ for ( i = 0 ; i < 1000000 ; i++ ) /* may take up to 1 second... */ if ( (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) ) break; for ( i = 0 ; i < 1000000 ; i++ ) /* must try at least 2.228 ms */ if ( !(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) ) break; res = __get_cmos_time(); spin_unlock_irqrestore(&rtc_lock, flags); return res;}/*************************************************************************** * System Time ***************************************************************************/s_time_t get_s_time(void){ struct cpu_time *t = &this_cpu(cpu_time); u64 tsc, delta; s_time_t now; rdtscll(tsc); delta = tsc - t->local_tsc_stamp; now = t->stime_local_stamp + scale_delta(delta, &t->tsc_scale); return now;}static inline void version_update_begin(u32 *version){ /* Explicitly OR with 1 just in case version number gets out of sync. */ *version = (*version + 1) | 1; wmb();}static inline void version_update_end(u32 *version){ wmb(); (*version)++;}void update_vcpu_system_time(struct vcpu *v){ struct cpu_time *t; struct vcpu_time_info *u; if ( v->vcpu_info == NULL ) return; t = &this_cpu(cpu_time); u = &vcpu_info(v, time); if ( u->tsc_timestamp == t->local_tsc_stamp ) return; version_update_begin(&u->version); u->tsc_timestamp = t->local_tsc_stamp; u->system_time = t->stime_local_stamp; u->tsc_to_system_mul = t->tsc_scale.mul_frac; u->tsc_shift = (s8)t->tsc_scale.shift; version_update_end(&u->version);}void update_domain_wallclock_time(struct domain *d){ spin_lock(&wc_lock); version_update_begin(&shared_info(d, wc_version)); shared_info(d, wc_sec) = wc_sec + d->time_offset_seconds; shared_info(d, wc_nsec) = wc_nsec; version_update_end(&shared_info(d, wc_version)); spin_unlock(&wc_lock);}void domain_set_time_offset(struct domain *d, int32_t time_offset_seconds){ d->time_offset_seconds = time_offset_seconds; if ( is_hvm_domain(d) ) rtc_update_clock(d);}int cpu_frequency_change(u64 freq){ struct cpu_time *t = &this_cpu(cpu_time); u64 curr_tsc; /* Sanity check: CPU frequency allegedly dropping below 1MHz? */ if ( freq < 1000000u ) { gdprintk(XENLOG_WARNING, "Rejecting CPU frequency change " "to %"PRIu64" Hz.\n", freq); return -EINVAL; } local_irq_disable(); /* Platform time /first/, as we may be delayed by platform_timer_lock. */ t->stime_master_stamp = read_platform_stime(); /* TSC-extrapolated time may be bogus after frequency change. */ /*t->stime_local_stamp = get_s_time();*/ t->stime_local_stamp = t->stime_master_stamp; rdtscll(curr_tsc); t->local_tsc_stamp = curr_tsc; set_time_scale(&t->tsc_scale, freq); local_irq_enable(); update_vcpu_system_time(current); /* A full epoch should pass before we check for deviation. */ if ( smp_processor_id() == 0 ) { set_timer(&calibration_timer, NOW() + EPOCH); platform_time_calibration(); } return 0;}/* Set clock to <secs,usecs> after 00:00:00 UTC, 1 January, 1970. */void do_settime(unsigned long secs, unsigned long nsecs, u64 system_time_base){ u64 x; u32 y, _wc_sec, _wc_nsec; struct domain *d; x = (secs * 1000000000ULL) + (u64)nsecs - system_time_base; y = do_div(x, 1000000000); spin_lock(&wc_lock); wc_sec = _wc_sec = (u32)x; wc_nsec = _wc_nsec = (u32)y; spin_unlock(&wc_lock); rcu_read_lock(&domlist_read_lock); for_each_domain ( d ) update_domain_wallclock_time(d); rcu_read_unlock(&domlist_read_lock);}/* Per-CPU communication between rendezvous IRQ and softirq handler. */struct cpu_calibration { u64 local_tsc_stamp; s_time_t stime_local_stamp; s_time_t stime_master_stamp; struct timer softirq_callback;};static DEFINE_PER_CPU(struct cpu_calibration, cpu_calibration);/* Softirq handler for per-CPU time calibration. */static void local_time_calibration(void *unused){ struct cpu_time *t = &this_cpu(cpu_time);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -