📄 time.c
字号:
* Called when a cpu comes up after the system has finished booting, * i.e. as a result of a hotplug cpu action. */void snapshot_timebase(void){ __get_cpu_var(last_jiffy) = get_tb_or_rtc(); snapshot_purr();}void __delay(unsigned long loops){ unsigned long start; int diff; if (__USE_RTC()) { start = get_rtcl(); do { /* the RTCL register wraps at 1000000000 */ diff = get_rtcl() - start; if (diff < 0) diff += 1000000000; } while (diff < loops); } else { start = get_tbl(); while (get_tbl() - start < loops) HMT_low(); HMT_medium(); }}EXPORT_SYMBOL(__delay);void udelay(unsigned long usecs){ __delay(tb_ticks_per_usec * usecs);}EXPORT_SYMBOL(udelay);/* * There are two copies of tb_to_xs and stamp_xsec so that no * lock is needed to access and use these values in * do_gettimeofday. We alternate the copies and as long as a * reasonable time elapses between changes, there will never * be inconsistent values. ntpd has a minimum of one minute * between updates. */static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec, u64 new_tb_to_xs){ unsigned temp_idx; struct gettimeofday_vars *temp_varp; temp_idx = (do_gtod.var_idx == 0); temp_varp = &do_gtod.vars[temp_idx]; temp_varp->tb_to_xs = new_tb_to_xs; temp_varp->tb_orig_stamp = new_tb_stamp; temp_varp->stamp_xsec = new_stamp_xsec; smp_mb(); do_gtod.varp = temp_varp; do_gtod.var_idx = temp_idx; /* * tb_update_count is used to allow the userspace gettimeofday code * to assure itself that it sees a consistent view of the tb_to_xs and * stamp_xsec variables. It reads the tb_update_count, then reads * tb_to_xs and stamp_xsec and then reads tb_update_count again. If * the two values of tb_update_count match and are even then the * tb_to_xs and stamp_xsec values are consistent. If not, then it * loops back and reads them again until this criteria is met. * We expect the caller to have done the first increment of * vdso_data->tb_update_count already. */ vdso_data->tb_orig_stamp = new_tb_stamp; vdso_data->stamp_xsec = new_stamp_xsec; vdso_data->tb_to_xs = new_tb_to_xs; vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; smp_wmb(); ++(vdso_data->tb_update_count);}#ifdef CONFIG_SMPunsigned long profile_pc(struct pt_regs *regs){ unsigned long pc = instruction_pointer(regs); if (in_lock_functions(pc)) return regs->link; return pc;}EXPORT_SYMBOL(profile_pc);#endif#ifdef CONFIG_PPC_ISERIES/* * This function recalibrates the timebase based on the 49-bit time-of-day * value in the Titan chip. The Titan is much more accurate than the value * returned by the service processor for the timebase frequency. */static int __init iSeries_tb_recal(void){ struct div_result divres; unsigned long titan, tb; /* Make sure we only run on iSeries */ if (!firmware_has_feature(FW_FEATURE_ISERIES)) return -ENODEV; tb = get_tb(); titan = HvCallXm_loadTod(); if ( iSeries_recal_titan ) { unsigned long tb_ticks = tb - iSeries_recal_tb; unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12; unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec; unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ; long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy; char sign = '+'; /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */ new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ; if ( tick_diff < 0 ) { tick_diff = -tick_diff; sign = '-'; } if ( tick_diff ) { if ( tick_diff < tb_ticks_per_jiffy/25 ) { printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n", new_tb_ticks_per_jiffy, sign, tick_diff ); tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; tb_ticks_per_sec = new_tb_ticks_per_sec; calc_cputime_factors(); div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; tb_to_xs = divres.result_low; do_gtod.varp->tb_to_xs = tb_to_xs; vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; vdso_data->tb_to_xs = tb_to_xs; } else { printk( "Titan recalibrate: FAILED (difference > 4 percent)\n" " new tb_ticks_per_jiffy = %lu\n" " old tb_ticks_per_jiffy = %lu\n", new_tb_ticks_per_jiffy, tb_ticks_per_jiffy ); } } } iSeries_recal_titan = titan; iSeries_recal_tb = tb; /* Called here as now we know accurate values for the timebase */ clocksource_init(); return 0;}late_initcall(iSeries_tb_recal);/* Called from platform early init */void __init iSeries_time_init_early(void){ iSeries_recal_tb = get_tb(); iSeries_recal_titan = HvCallXm_loadTod();}#endif /* CONFIG_PPC_ISERIES *//* * For iSeries shared processors, we have to let the hypervisor * set the hardware decrementer. We set a virtual decrementer * in the lppaca and call the hypervisor if the virtual * decrementer is less than the current value in the hardware * decrementer. (almost always the new decrementer value will * be greater than the current hardware decementer so the hypervisor * call will not be needed) *//* * timer_interrupt - gets called when the decrementer overflows, * with interrupts disabled. */void timer_interrupt(struct pt_regs * regs){ struct pt_regs *old_regs; int cpu = smp_processor_id(); struct clock_event_device *evt = &per_cpu(decrementers, cpu); u64 now; /* Ensure a positive value is written to the decrementer, or else * some CPUs will continuue to take decrementer exceptions */ set_dec(DECREMENTER_MAX);#ifdef CONFIG_PPC32 if (atomic_read(&ppc_n_lost_interrupts) != 0) do_IRQ(regs);#endif now = get_tb_or_rtc(); if (now < per_cpu(decrementer_next_tb, cpu)) { /* not time for this event yet */ now = per_cpu(decrementer_next_tb, cpu) - now; if (now <= DECREMENTER_MAX) set_dec((int)now); return; } old_regs = set_irq_regs(regs); irq_enter(); calculate_steal_time();#ifdef CONFIG_PPC_ISERIES if (firmware_has_feature(FW_FEATURE_ISERIES)) get_lppaca()->int_dword.fields.decr_int = 0;#endif if (evt->event_handler) evt->event_handler(evt);#ifdef CONFIG_PPC_ISERIES if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) process_hvlpevents();#endif#ifdef CONFIG_PPC64 /* collect purr register values often, for accurate calculations */ if (firmware_has_feature(FW_FEATURE_SPLPAR)) { struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); cu->current_tb = mfspr(SPRN_PURR); }#endif irq_exit(); set_irq_regs(old_regs);}void wakeup_decrementer(void){ unsigned long ticks; /* * The timebase gets saved on sleep and restored on wakeup, * so all we need to do is to reset the decrementer. */ ticks = tb_ticks_since(__get_cpu_var(last_jiffy)); if (ticks < tb_ticks_per_jiffy) ticks = tb_ticks_per_jiffy - ticks; else ticks = 1; set_dec(ticks);}#ifdef CONFIG_SMPvoid __init smp_space_timers(unsigned int max_cpus){ int i; u64 previous_tb = per_cpu(last_jiffy, boot_cpuid); /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */ previous_tb -= tb_ticks_per_jiffy; for_each_possible_cpu(i) { if (i == boot_cpuid) continue; per_cpu(last_jiffy, i) = previous_tb; }}#endif/* * Scheduler clock - returns current time in nanosec units. * * Note: mulhdu(a, b) (multiply high double unsigned) returns * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b * are 64-bit unsigned numbers. */unsigned long long sched_clock(void){ if (__USE_RTC()) return get_rtc(); return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;}static int __init get_freq(char *name, int cells, unsigned long *val){ struct device_node *cpu; const unsigned int *fp; int found = 0; /* The cpu node should have timebase and clock frequency properties */ cpu = of_find_node_by_type(NULL, "cpu"); if (cpu) { fp = of_get_property(cpu, name, NULL); if (fp) { found = 1; *val = of_read_ulong(fp, cells); } of_node_put(cpu); } return found;}void __init generic_calibrate_decr(void){ ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */ if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) && !get_freq("timebase-frequency", 1, &ppc_tb_freq)) { printk(KERN_ERR "WARNING: Estimating decrementer frequency " "(not found)\n"); } ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */ if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) && !get_freq("clock-frequency", 1, &ppc_proc_freq)) { printk(KERN_ERR "WARNING: Estimating processor frequency " "(not found)\n"); }#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) /* Set the time base to zero */ mtspr(SPRN_TBWL, 0); mtspr(SPRN_TBWU, 0); /* Clear any pending timer interrupts */ mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS); /* Enable decrementer interrupt */ mtspr(SPRN_TCR, TCR_DIE);#endif}int update_persistent_clock(struct timespec now){ struct rtc_time tm; if (!ppc_md.set_rtc_time) return 0; to_tm(now.tv_sec + 1 + timezone_offset, &tm); tm.tm_year -= 1900; tm.tm_mon -= 1; return ppc_md.set_rtc_time(&tm);}unsigned long read_persistent_clock(void){ struct rtc_time tm; static int first = 1; /* XXX this is a litle fragile but will work okay in the short term */ if (first) { first = 0; if (ppc_md.time_init) timezone_offset = ppc_md.time_init(); /* get_boot_time() isn't guaranteed to be safe to call late */ if (ppc_md.get_boot_time) return ppc_md.get_boot_time() -timezone_offset; } if (!ppc_md.get_rtc_time) return 0; ppc_md.get_rtc_time(&tm); return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);}/* clocksource code */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -