time.c
来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 1,200 行 · 第 1/3 页
C
1,200 行
/* * Common time routines among all ppc machines. * * Written by Cort Dougan (cort@cs.nmt.edu) to merge * Paul Mackerras' version and mine for PReP and Pmac. * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net). * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com) * * First round of bugfixes by Gabriel Paubert (paubert@iram.es) * to make clock more stable (2.4.0-test5). The only thing * that this code assumes is that the timebases have been synchronized * by firmware on SMP and are never stopped (never do sleep * on SMP then, nap and doze are OK). * * Speeded up do_gettimeofday by getting rid of references to * xtime (which required locks for consistency). (mikejc@us.ibm.com) * * TODO (not necessarily in this file): * - improve precision and reproducibility of timebase frequency * measurement at boot time. (for iSeries, we calibrate the timebase * against the Titan chip's clock.) * - for astronomical applications: add a new function to get * non ambiguous timestamps even around leap seconds. This needs * a new timestamp format and a good name. * * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 * "A Kernel Model for Precision Timekeeping" by Dave Mills * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */#include <linux/config.h>#include <linux/errno.h>#include <linux/module.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/param.h>#include <linux/string.h>#include <linux/mm.h>#include <linux/interrupt.h>#include <linux/timex.h>#include <linux/kernel_stat.h>#include <linux/time.h>#include <linux/init.h>#include <linux/profile.h>#include <linux/cpu.h>#include <linux/security.h>#include <linux/percpu.h>#include <linux/rtc.h>#include <linux/jiffies.h>#include <linux/posix-timers.h>#include <asm/io.h>#include <asm/processor.h>#include <asm/nvram.h>#include <asm/cache.h>#include <asm/machdep.h>#include <asm/uaccess.h>#include <asm/time.h>#include <asm/prom.h>#include <asm/irq.h>#include <asm/div64.h>#include <asm/smp.h>#include <asm/vdso_datapage.h>#ifdef CONFIG_PPC64#include <asm/firmware.h>#endif#ifdef CONFIG_PPC_ISERIES#include <asm/iseries/it_lp_queue.h>#include <asm/iseries/hv_call_xm.h>#endif#include <asm/smp.h>/* keep track of when we need to update the rtc */time_t last_rtc_update;extern int piranha_simulator;#ifdef CONFIG_PPC_ISERIESunsigned long iSeries_recal_titan = 0;unsigned long iSeries_recal_tb = 0; static unsigned long first_settimeofday = 1;#endif/* The decrementer counts down by 128 every 128ns on a 601. */#define DECREMENTER_COUNT_601 (1000000000 / HZ)#define XSEC_PER_SEC (1024*1024)#ifdef CONFIG_PPC64#define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)#else/* compute ((xsec << 12) * max) >> 32 */#define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)#endifunsigned long tb_ticks_per_jiffy;unsigned long tb_ticks_per_usec = 100; /* sane default */EXPORT_SYMBOL(tb_ticks_per_usec);unsigned long tb_ticks_per_sec;EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */u64 tb_to_xs;unsigned tb_to_us;#define TICKLEN_SCALE (SHIFT_SCALE - 10)u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */u64 ticklen_to_xs; /* 0.64 fraction *//* If last_tick_len corresponds to about 1/HZ seconds, then last_tick_len << TICKLEN_SHIFT will be about 2^63. */#define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)DEFINE_SPINLOCK(rtc_lock);EXPORT_SYMBOL_GPL(rtc_lock);u64 tb_to_ns_scale;unsigned tb_to_ns_shift;struct gettimeofday_struct do_gtod;extern unsigned long wall_jiffies;extern struct timezone sys_tz;static long timezone_offset;unsigned long ppc_proc_freq;unsigned long ppc_tb_freq;u64 tb_last_jiffy __cacheline_aligned_in_smp;unsigned long tb_last_stamp;/* * Note that on ppc32 this only stores the bottom 32 bits of * the timebase value, but that's enough to tell when a jiffy * has passed. */DEFINE_PER_CPU(unsigned long, last_jiffy);#ifdef CONFIG_VIRT_CPU_ACCOUNTING/* * Factors for converting from cputime_t (timebase ticks) to * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds). * These are all stored as 0.64 fixed-point binary fractions. */u64 __cputime_jiffies_factor;EXPORT_SYMBOL(__cputime_jiffies_factor);u64 __cputime_msec_factor;EXPORT_SYMBOL(__cputime_msec_factor);u64 __cputime_sec_factor;EXPORT_SYMBOL(__cputime_sec_factor);u64 __cputime_clockt_factor;EXPORT_SYMBOL(__cputime_clockt_factor);static void calc_cputime_factors(void){ struct div_result res; div128_by_32(HZ, 0, tb_ticks_per_sec, &res); __cputime_jiffies_factor = res.result_low; div128_by_32(1000, 0, tb_ticks_per_sec, &res); __cputime_msec_factor = res.result_low; div128_by_32(1, 0, tb_ticks_per_sec, &res); __cputime_sec_factor = res.result_low; div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res); __cputime_clockt_factor = res.result_low;}/* * Read the PURR on systems that have it, otherwise the timebase. */static u64 read_purr(void){ if (cpu_has_feature(CPU_FTR_PURR)) return mfspr(SPRN_PURR); return mftb();}/* * Account time for a transition between system, hard irq * or soft irq state. */void account_system_vtime(struct task_struct *tsk){ u64 now, delta; unsigned long flags; local_irq_save(flags); now = read_purr(); delta = now - get_paca()->startpurr; get_paca()->startpurr = now; if (!in_interrupt()) { delta += get_paca()->system_time; get_paca()->system_time = 0; } account_system_time(tsk, 0, delta); local_irq_restore(flags);}/* * Transfer the user and system times accumulated in the paca * by the exception entry and exit code to the generic process * user and system time records. * Must be called with interrupts disabled. */void account_process_vtime(struct task_struct *tsk){ cputime_t utime; utime = get_paca()->user_time; get_paca()->user_time = 0; account_user_time(tsk, utime);}static void account_process_time(struct pt_regs *regs){ int cpu = smp_processor_id(); account_process_vtime(current); run_local_timers(); if (rcu_pending(cpu)) rcu_check_callbacks(cpu, user_mode(regs)); scheduler_tick(); run_posix_cpu_timers(current);}#ifdef CONFIG_PPC_SPLPAR/* * Stuff for accounting stolen time. */struct cpu_purr_data { int initialized; /* thread is running */ u64 tb0; /* timebase at origin time */ u64 purr0; /* PURR at origin time */ u64 tb; /* last TB value read */ u64 purr; /* last PURR value read */ u64 stolen; /* stolen time so far */ spinlock_t lock;};static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);static void snapshot_tb_and_purr(void *data){ struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data); p->tb0 = mftb(); p->purr0 = mfspr(SPRN_PURR); p->tb = p->tb0; p->purr = 0; wmb(); p->initialized = 1;}/* * Called during boot when all cpus have come up. */void snapshot_timebases(void){ int cpu; if (!cpu_has_feature(CPU_FTR_PURR)) return; for_each_possible_cpu(cpu) spin_lock_init(&per_cpu(cpu_purr_data, cpu).lock); on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);}void calculate_steal_time(void){ u64 tb, purr, t0; s64 stolen; struct cpu_purr_data *p0, *pme, *phim; int cpu; if (!cpu_has_feature(CPU_FTR_PURR)) return; cpu = smp_processor_id(); pme = &per_cpu(cpu_purr_data, cpu); if (!pme->initialized) return; /* this can happen in early boot */ p0 = &per_cpu(cpu_purr_data, cpu & ~1); phim = &per_cpu(cpu_purr_data, cpu ^ 1); spin_lock(&p0->lock); tb = mftb(); purr = mfspr(SPRN_PURR) - pme->purr0; if (!phim->initialized || !cpu_online(cpu ^ 1)) { stolen = (tb - pme->tb) - (purr - pme->purr); } else { t0 = pme->tb0; if (phim->tb0 < t0) t0 = phim->tb0; stolen = phim->tb - t0 - phim->purr - purr - p0->stolen; } if (stolen > 0) { account_steal_time(current, stolen); p0->stolen += stolen; } pme->tb = tb; pme->purr = purr; spin_unlock(&p0->lock);}/* * Must be called before the cpu is added to the online map when * a cpu is being brought up at runtime. */static void snapshot_purr(void){ int cpu; u64 purr; struct cpu_purr_data *p0, *pme, *phim; unsigned long flags; if (!cpu_has_feature(CPU_FTR_PURR)) return; cpu = smp_processor_id(); pme = &per_cpu(cpu_purr_data, cpu); p0 = &per_cpu(cpu_purr_data, cpu & ~1); phim = &per_cpu(cpu_purr_data, cpu ^ 1); spin_lock_irqsave(&p0->lock, flags); pme->tb = pme->tb0 = mftb(); purr = mfspr(SPRN_PURR); if (!phim->initialized) { pme->purr = 0; pme->purr0 = purr; } else { /* set p->purr and p->purr0 for no change in p0->stolen */ pme->purr = phim->tb - phim->tb0 - phim->purr - p0->stolen; pme->purr0 = purr - pme->purr; } pme->initialized = 1; spin_unlock_irqrestore(&p0->lock, flags);}#endif /* CONFIG_PPC_SPLPAR */#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */#define calc_cputime_factors()#define account_process_time(regs) update_process_times(user_mode(regs))#define calculate_steal_time() do { } while (0)#endif#if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))#define snapshot_purr() do { } while (0)#endif/* * Called when a cpu comes up after the system has finished booting, * i.e. as a result of a hotplug cpu action. */void snapshot_timebase(void){ __get_cpu_var(last_jiffy) = get_tb(); snapshot_purr();}void __delay(unsigned long loops){ unsigned long start; int diff; if (__USE_RTC()) { start = get_rtcl(); do { /* the RTCL register wraps at 1000000000 */ diff = get_rtcl() - start; if (diff < 0) diff += 1000000000; } while (diff < loops); } else { start = get_tbl(); while (get_tbl() - start < loops) HMT_low(); HMT_medium(); }}EXPORT_SYMBOL(__delay);void udelay(unsigned long usecs){ __delay(tb_ticks_per_usec * usecs);}EXPORT_SYMBOL(udelay);static __inline__ void timer_check_rtc(void){ /* * update the rtc when needed, this should be performed on the * right fraction of a second. Half or full second ? * Full second works on mk48t59 clocks, others need testing. * Note that this update is basically only used through * the adjtimex system calls. Setting the HW clock in * any other way is a /dev/rtc and userland business. * This is still wrong by -0.5/+1.5 jiffies because of the * timer interrupt resolution and possible delay, but here we * hit a quantization limit which can only be solved by higher * resolution timers and decoupling time management from timer * interrupts. This is also wrong on the clocks * which require being written at the half second boundary.
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?