📄 time.c
字号:
/* * Common time routines among all ppc machines. * * Written by Cort Dougan (cort@cs.nmt.edu) to merge * Paul Mackerras' version and mine for PReP and Pmac. * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net). * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com) * * First round of bugfixes by Gabriel Paubert (paubert@iram.es) * to make clock more stable (2.4.0-test5). The only thing * that this code assumes is that the timebases have been synchronized * by firmware on SMP and are never stopped (never do sleep * on SMP then, nap and doze are OK). * * Speeded up do_gettimeofday by getting rid of references to * xtime (which required locks for consistency). (mikejc@us.ibm.com) * * TODO (not necessarily in this file): * - improve precision and reproducibility of timebase frequency * measurement at boot time. (for iSeries, we calibrate the timebase * against the Titan chip's clock.) * - for astronomical applications: add a new function to get * non ambiguous timestamps even around leap seconds. This needs * a new timestamp format and a good name. * * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 * "A Kernel Model for Precision Timekeeping" by Dave Mills * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */#include <linux/errno.h>#include <linux/module.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/param.h>#include <linux/string.h>#include <linux/mm.h>#include <linux/interrupt.h>#include <linux/timex.h>#include <linux/kernel_stat.h>#include <linux/time.h>#include <linux/init.h>#include <linux/profile.h>#include <linux/cpu.h>#include <linux/security.h>#include <linux/percpu.h>#include <linux/rtc.h>#include <linux/jiffies.h>#include <linux/posix-timers.h>#include <linux/irq.h>#include <asm/io.h>#include <asm/processor.h>#include <asm/nvram.h>#include <asm/cache.h>#include <asm/machdep.h>#include <asm/uaccess.h>#include <asm/time.h>#include <asm/prom.h>#include <asm/irq.h>#include <asm/div64.h>#include <asm/smp.h>#include <asm/vdso_datapage.h>#include <asm/firmware.h>#ifdef CONFIG_PPC_ISERIES#include <asm/iseries/it_lp_queue.h>#include <asm/iseries/hv_call_xm.h>#endif/* powerpc clocksource/clockevent code */#include <linux/clockchips.h>#include <linux/clocksource.h>static cycle_t rtc_read(void);static struct clocksource clocksource_rtc = { .name = "rtc", .rating = 400, .flags = CLOCK_SOURCE_IS_CONTINUOUS, .mask = CLOCKSOURCE_MASK(64), .shift = 22, .mult = 0, /* To be filled in */ .read = rtc_read,};static cycle_t timebase_read(void);static struct clocksource clocksource_timebase = { .name = "timebase", .rating = 400, .flags = CLOCK_SOURCE_IS_CONTINUOUS, .mask = CLOCKSOURCE_MASK(64), .shift = 22, .mult = 0, /* To be filled in */ .read = timebase_read,};#define DECREMENTER_MAX 0x7fffffffstatic int decrementer_set_next_event(unsigned long evt, struct clock_event_device *dev);static void decrementer_set_mode(enum clock_event_mode mode, struct clock_event_device *dev);static struct clock_event_device decrementer_clockevent = { .name = "decrementer", .rating = 200, .shift = 16, .mult = 0, /* To be filled in */ .irq = 0, .set_next_event = decrementer_set_next_event, .set_mode = decrementer_set_mode, .features = CLOCK_EVT_FEAT_ONESHOT,};static DEFINE_PER_CPU(struct clock_event_device, decrementers);void init_decrementer_clockevent(void);static DEFINE_PER_CPU(u64, decrementer_next_tb);#ifdef CONFIG_PPC_ISERIESstatic unsigned long __initdata iSeries_recal_titan;static signed long __initdata iSeries_recal_tb;/* Forward declaration is only needed for iSereis compiles */void __init clocksource_init(void);#endif#define XSEC_PER_SEC (1024*1024)#ifdef CONFIG_PPC64#define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)#else/* compute ((xsec << 12) * max) >> 32 */#define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)#endifunsigned long tb_ticks_per_jiffy;unsigned long tb_ticks_per_usec = 100; /* sane default */EXPORT_SYMBOL(tb_ticks_per_usec);unsigned long tb_ticks_per_sec;EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */u64 tb_to_xs;unsigned tb_to_us;#define TICKLEN_SCALE TICK_LENGTH_SHIFTu64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */u64 ticklen_to_xs; /* 0.64 fraction *//* If last_tick_len corresponds to about 1/HZ seconds, then last_tick_len << TICKLEN_SHIFT will be about 2^63. */#define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)DEFINE_SPINLOCK(rtc_lock);EXPORT_SYMBOL_GPL(rtc_lock);static u64 tb_to_ns_scale __read_mostly;static unsigned tb_to_ns_shift __read_mostly;static unsigned long boot_tb __read_mostly;struct gettimeofday_struct do_gtod;extern struct timezone sys_tz;static long timezone_offset;unsigned long ppc_proc_freq;EXPORT_SYMBOL(ppc_proc_freq);unsigned long ppc_tb_freq;static u64 tb_last_jiffy __cacheline_aligned_in_smp;static DEFINE_PER_CPU(u64, last_jiffy);#ifdef CONFIG_VIRT_CPU_ACCOUNTING/* * Factors for converting from cputime_t (timebase ticks) to * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds). * These are all stored as 0.64 fixed-point binary fractions. */u64 __cputime_jiffies_factor;EXPORT_SYMBOL(__cputime_jiffies_factor);u64 __cputime_msec_factor;EXPORT_SYMBOL(__cputime_msec_factor);u64 __cputime_sec_factor;EXPORT_SYMBOL(__cputime_sec_factor);u64 __cputime_clockt_factor;EXPORT_SYMBOL(__cputime_clockt_factor);static void calc_cputime_factors(void){ struct div_result res; div128_by_32(HZ, 0, tb_ticks_per_sec, &res); __cputime_jiffies_factor = res.result_low; div128_by_32(1000, 0, tb_ticks_per_sec, &res); __cputime_msec_factor = res.result_low; div128_by_32(1, 0, tb_ticks_per_sec, &res); __cputime_sec_factor = res.result_low; div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res); __cputime_clockt_factor = res.result_low;}/* * Read the PURR on systems that have it, otherwise the timebase. */static u64 read_purr(void){ if (cpu_has_feature(CPU_FTR_PURR)) return mfspr(SPRN_PURR); return mftb();}/* * Read the SPURR on systems that have it, otherwise the purr */static u64 read_spurr(u64 purr){ if (cpu_has_feature(CPU_FTR_SPURR)) return mfspr(SPRN_SPURR); return purr;}/* * Account time for a transition between system, hard irq * or soft irq state. */void account_system_vtime(struct task_struct *tsk){ u64 now, nowscaled, delta, deltascaled; unsigned long flags; local_irq_save(flags); now = read_purr(); delta = now - get_paca()->startpurr; get_paca()->startpurr = now; nowscaled = read_spurr(now); deltascaled = nowscaled - get_paca()->startspurr; get_paca()->startspurr = nowscaled; if (!in_interrupt()) { /* deltascaled includes both user and system time. * Hence scale it based on the purr ratio to estimate * the system time */ if (get_paca()->user_time) deltascaled = deltascaled * get_paca()->system_time / (get_paca()->system_time + get_paca()->user_time); delta += get_paca()->system_time; get_paca()->system_time = 0; } account_system_time(tsk, 0, delta); get_paca()->purrdelta = delta; account_system_time_scaled(tsk, deltascaled); get_paca()->spurrdelta = deltascaled; local_irq_restore(flags);}/* * Transfer the user and system times accumulated in the paca * by the exception entry and exit code to the generic process * user and system time records. * Must be called with interrupts disabled. */void account_process_tick(struct task_struct *tsk, int user_tick){ cputime_t utime, utimescaled; utime = get_paca()->user_time; get_paca()->user_time = 0; account_user_time(tsk, utime); /* Estimate the scaled utime by scaling the real utime based * on the last spurr to purr ratio */ utimescaled = utime * get_paca()->spurrdelta / get_paca()->purrdelta; get_paca()->spurrdelta = get_paca()->purrdelta = 0; account_user_time_scaled(tsk, utimescaled);}/* * Stuff for accounting stolen time. */struct cpu_purr_data { int initialized; /* thread is running */ u64 tb; /* last TB value read */ u64 purr; /* last PURR value read */ u64 spurr; /* last SPURR value read */};/* * Each entry in the cpu_purr_data array is manipulated only by its * "owner" cpu -- usually in the timer interrupt but also occasionally * in process context for cpu online. As long as cpus do not touch * each others' cpu_purr_data, disabling local interrupts is * sufficient to serialize accesses. */static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);static void snapshot_tb_and_purr(void *data){ unsigned long flags; struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data); local_irq_save(flags); p->tb = get_tb_or_rtc(); p->purr = mfspr(SPRN_PURR); wmb(); p->initialized = 1; local_irq_restore(flags);}/* * Called during boot when all cpus have come up. */void snapshot_timebases(void){ if (!cpu_has_feature(CPU_FTR_PURR)) return; on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);}/* * Must be called with interrupts disabled. */void calculate_steal_time(void){ u64 tb, purr; s64 stolen; struct cpu_purr_data *pme; if (!cpu_has_feature(CPU_FTR_PURR)) return; pme = &per_cpu(cpu_purr_data, smp_processor_id()); if (!pme->initialized) return; /* this can happen in early boot */ tb = mftb(); purr = mfspr(SPRN_PURR); stolen = (tb - pme->tb) - (purr - pme->purr); if (stolen > 0) account_steal_time(current, stolen); pme->tb = tb; pme->purr = purr;}#ifdef CONFIG_PPC_SPLPAR/* * Must be called before the cpu is added to the online map when * a cpu is being brought up at runtime. */static void snapshot_purr(void){ struct cpu_purr_data *pme; unsigned long flags; if (!cpu_has_feature(CPU_FTR_PURR)) return; local_irq_save(flags); pme = &per_cpu(cpu_purr_data, smp_processor_id()); pme->tb = mftb(); pme->purr = mfspr(SPRN_PURR); pme->initialized = 1; local_irq_restore(flags);}#endif /* CONFIG_PPC_SPLPAR */#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */#define calc_cputime_factors()#define calculate_steal_time() do { } while (0)#endif#if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))#define snapshot_purr() do { } while (0)#endif/*
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -