time.c

来自「底层驱动开发」· C语言 代码 · 共 882 行 · 第 1/2 页

C
882
字号
/* *  * Common time routines among all ppc machines. * * Written by Cort Dougan (cort@cs.nmt.edu) to merge * Paul Mackerras' version and mine for PReP and Pmac. * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net). * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com) * * First round of bugfixes by Gabriel Paubert (paubert@iram.es) * to make clock more stable (2.4.0-test5). The only thing * that this code assumes is that the timebases have been synchronized * by firmware on SMP and are never stopped (never do sleep * on SMP then, nap and doze are OK). *  * Speeded up do_gettimeofday by getting rid of references to * xtime (which required locks for consistency). (mikejc@us.ibm.com) * * TODO (not necessarily in this file): * - improve precision and reproducibility of timebase frequency * measurement at boot time. (for iSeries, we calibrate the timebase * against the Titan chip's clock.) * - for astronomical applications: add a new function to get * non ambiguous timestamps even around leap seconds. This needs * a new timestamp format and a good name. * * 1997-09-10  Updated NTP code according to technical memorandum Jan '96 *             "A Kernel Model for Precision Timekeeping" by Dave Mills * *      This program is free software; you can redistribute it and/or *      modify it under the terms of the GNU General Public License *      as published by the Free Software Foundation; either version *      2 of the License, or (at your option) any later version. */#include <linux/config.h>#include <linux/errno.h>#include <linux/module.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/param.h>#include <linux/string.h>#include <linux/mm.h>#include <linux/interrupt.h>#include <linux/timex.h>#include <linux/kernel_stat.h>#include <linux/mc146818rtc.h>#include <linux/time.h>#include <linux/init.h>#include <linux/profile.h>#include <linux/cpu.h>#include <linux/security.h>#include <asm/io.h>#include <asm/processor.h>#include <asm/nvram.h>#include <asm/cache.h>#include <asm/machdep.h>#ifdef CONFIG_PPC_ISERIES#include <asm/iSeries/ItLpQueue.h>#include <asm/iSeries/HvCallXm.h>#endif#include <asm/uaccess.h>#include <asm/time.h>#include <asm/ppcdebug.h>#include <asm/prom.h>#include <asm/sections.h>#include <asm/systemcfg.h>#include <asm/firmware.h>u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;EXPORT_SYMBOL(jiffies_64);/* keep track of when we need to update the rtc */time_t last_rtc_update;extern int piranha_simulator;#ifdef CONFIG_PPC_ISERIESunsigned long iSeries_recal_titan = 0;unsigned long iSeries_recal_tb = 0; static unsigned long first_settimeofday = 1;#endif#define XSEC_PER_SEC (1024*1024)unsigned long tb_ticks_per_jiffy;unsigned long tb_ticks_per_usec = 100; /* sane default */EXPORT_SYMBOL(tb_ticks_per_usec);unsigned long tb_ticks_per_sec;unsigned long tb_to_xs;unsigned      tb_to_us;unsigned long processor_freq;DEFINE_SPINLOCK(rtc_lock);EXPORT_SYMBOL_GPL(rtc_lock);unsigned long tb_to_ns_scale;unsigned long tb_to_ns_shift;struct gettimeofday_struct do_gtod;extern unsigned long wall_jiffies;extern int smp_tb_synchronized;extern struct timezone sys_tz;void ppc_adjtimex(void);static unsigned adjusting_time = 0;unsigned long ppc_proc_freq;unsigned long ppc_tb_freq;static __inline__ void timer_check_rtc(void){        /*         * update the rtc when needed, this should be performed on the         * right fraction of a second. Half or full second ?         * Full second works on mk48t59 clocks, others need testing.         * Note that this update is basically only used through          * the adjtimex system calls. Setting the HW clock in         * any other way is a /dev/rtc and userland business.         * This is still wrong by -0.5/+1.5 jiffies because of the         * timer interrupt resolution and possible delay, but here we          * hit a quantization limit which can only be solved by higher         * resolution timers and decoupling time management from timer         * interrupts. This is also wrong on the clocks         * which require being written at the half second boundary.         * We should have an rtc call that only sets the minutes and         * seconds like on Intel to avoid problems with non UTC clocks.         */        if (ntp_synced() &&             xtime.tv_sec - last_rtc_update >= 659 &&             abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ &&             jiffies - wall_jiffies == 1) {	    struct rtc_time tm;	    to_tm(xtime.tv_sec+1, &tm);	    tm.tm_year -= 1900;	    tm.tm_mon -= 1;            if (ppc_md.set_rtc_time(&tm) == 0)                last_rtc_update = xtime.tv_sec+1;            else                /* Try again one minute later */                last_rtc_update += 60;        }}/* * This version of gettimeofday has microsecond resolution. */static inline void __do_gettimeofday(struct timeval *tv, unsigned long tb_val){	unsigned long sec, usec, tb_ticks;	unsigned long xsec, tb_xsec;	struct gettimeofday_vars * temp_varp;	unsigned long temp_tb_to_xs, temp_stamp_xsec;	/*	 * These calculations are faster (gets rid of divides)	 * if done in units of 1/2^20 rather than microseconds.	 * The conversion to microseconds at the end is done	 * without a divide (and in fact, without a multiply)	 */	temp_varp = do_gtod.varp;	tb_ticks = tb_val - temp_varp->tb_orig_stamp;	temp_tb_to_xs = temp_varp->tb_to_xs;	temp_stamp_xsec = temp_varp->stamp_xsec;	tb_xsec = mulhdu( tb_ticks, temp_tb_to_xs );	xsec = temp_stamp_xsec + tb_xsec;	sec = xsec / XSEC_PER_SEC;	xsec -= sec * XSEC_PER_SEC;	usec = (xsec * USEC_PER_SEC)/XSEC_PER_SEC;	tv->tv_sec = sec;	tv->tv_usec = usec;}void do_gettimeofday(struct timeval *tv){	__do_gettimeofday(tv, get_tb());}EXPORT_SYMBOL(do_gettimeofday);/* Synchronize xtime with do_gettimeofday */ static inline void timer_sync_xtime(unsigned long cur_tb){	struct timeval my_tv;	__do_gettimeofday(&my_tv, cur_tb);	if (xtime.tv_sec <= my_tv.tv_sec) {		xtime.tv_sec = my_tv.tv_sec;		xtime.tv_nsec = my_tv.tv_usec * 1000;	}}/* * When the timebase - tb_orig_stamp gets too big, we do a manipulation * between tb_orig_stamp and stamp_xsec. The goal here is to keep the * difference tb - tb_orig_stamp small enough to always fit inside a * 32 bits number. This is a requirement of our fast 32 bits userland * implementation in the vdso. If we "miss" a call to this function * (interrupt latency, CPU locked in a spinlock, ...) and we end up * with a too big difference, then the vdso will fallback to calling * the syscall */static __inline__ void timer_recalc_offset(unsigned long cur_tb){	struct gettimeofday_vars * temp_varp;	unsigned temp_idx;	unsigned long offset, new_stamp_xsec, new_tb_orig_stamp;	if (((cur_tb - do_gtod.varp->tb_orig_stamp) & 0x80000000u) == 0)		return;	temp_idx = (do_gtod.var_idx == 0);	temp_varp = &do_gtod.vars[temp_idx];	new_tb_orig_stamp = cur_tb;	offset = new_tb_orig_stamp - do_gtod.varp->tb_orig_stamp;	new_stamp_xsec = do_gtod.varp->stamp_xsec + mulhdu(offset, do_gtod.varp->tb_to_xs);	temp_varp->tb_to_xs = do_gtod.varp->tb_to_xs;	temp_varp->tb_orig_stamp = new_tb_orig_stamp;	temp_varp->stamp_xsec = new_stamp_xsec;	smp_mb();	do_gtod.varp = temp_varp;	do_gtod.var_idx = temp_idx;	++(systemcfg->tb_update_count);	smp_wmb();	systemcfg->tb_orig_stamp = new_tb_orig_stamp;	systemcfg->stamp_xsec = new_stamp_xsec;	smp_wmb();	++(systemcfg->tb_update_count);}#ifdef CONFIG_SMPunsigned long profile_pc(struct pt_regs *regs){	unsigned long pc = instruction_pointer(regs);	if (in_lock_functions(pc))		return regs->link;	return pc;}EXPORT_SYMBOL(profile_pc);#endif#ifdef CONFIG_PPC_ISERIES/*  * This function recalibrates the timebase based on the 49-bit time-of-day * value in the Titan chip.  The Titan is much more accurate than the value * returned by the service processor for the timebase frequency.   */static void iSeries_tb_recal(void){	struct div_result divres;	unsigned long titan, tb;	tb = get_tb();	titan = HvCallXm_loadTod();	if ( iSeries_recal_titan ) {		unsigned long tb_ticks = tb - iSeries_recal_tb;		unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;		unsigned long new_tb_ticks_per_sec   = (tb_ticks * USEC_PER_SEC)/titan_usec;		unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ;		long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;		char sign = '+';				/* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */		new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;		if ( tick_diff < 0 ) {			tick_diff = -tick_diff;			sign = '-';		}		if ( tick_diff ) {			if ( tick_diff < tb_ticks_per_jiffy/25 ) {				printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",						new_tb_ticks_per_jiffy, sign, tick_diff );				tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;				tb_ticks_per_sec   = new_tb_ticks_per_sec;				div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );				do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;				tb_to_xs = divres.result_low;				do_gtod.varp->tb_to_xs = tb_to_xs;				systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;				systemcfg->tb_to_xs = tb_to_xs;			}			else {				printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"					"                   new tb_ticks_per_jiffy = %lu\n"					"                   old tb_ticks_per_jiffy = %lu\n",					new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );			}		}	}	iSeries_recal_titan = titan;	iSeries_recal_tb = tb;}#endif/* * For iSeries shared processors, we have to let the hypervisor * set the hardware decrementer.  We set a virtual decrementer * in the lppaca and call the hypervisor if the virtual * decrementer is less than the current value in the hardware * decrementer. (almost always the new decrementer value will * be greater than the current hardware decementer so the hypervisor * call will not be needed) */unsigned long tb_last_stamp __cacheline_aligned_in_smp;/* * timer_interrupt - gets called when the decrementer overflows, * with interrupts disabled. */int timer_interrupt(struct pt_regs * regs){	int next_dec;	unsigned long cur_tb;	struct paca_struct *lpaca = get_paca();	unsigned long cpu = smp_processor_id();	irq_enter();	profile_tick(CPU_PROFILING, regs);	lpaca->lppaca.int_dword.fields.decr_int = 0;	while (lpaca->next_jiffy_update_tb <= (cur_tb = get_tb())) {		/*		 * We cannot disable the decrementer, so in the period		 * between this cpu's being marked offline in cpu_online_map		 * and calling stop-self, it is taking timer interrupts.		 * Avoid calling into the scheduler rebalancing code if this		 * is the case.		 */		if (!cpu_is_offline(cpu))			update_process_times(user_mode(regs));		/*		 * No need to check whether cpu is offline here; boot_cpuid		 * should have been fixed up by now.		 */		if (cpu == boot_cpuid) {			write_seqlock(&xtime_lock);			tb_last_stamp = lpaca->next_jiffy_update_tb;			timer_recalc_offset(lpaca->next_jiffy_update_tb);			do_timer(regs);			timer_sync_xtime(lpaca->next_jiffy_update_tb);			timer_check_rtc();			write_sequnlock(&xtime_lock);			if ( adjusting_time && (time_adjust == 0) )				ppc_adjtimex();		}		lpaca->next_jiffy_update_tb += tb_ticks_per_jiffy;	}		next_dec = lpaca->next_jiffy_update_tb - cur_tb;	if (next_dec > lpaca->default_decr)        	next_dec = lpaca->default_decr;	set_dec(next_dec);#ifdef CONFIG_PPC_ISERIES	if (hvlpevent_is_pending())		process_hvlpevents(regs);#endif	/* collect purr register values often, for accurate calculations */	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {		struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);		cu->current_tb = mfspr(SPRN_PURR);	}	irq_exit();	return 1;}/* * Scheduler clock - returns current time in nanosec units. * * Note: mulhdu(a, b) (multiply high double unsigned) returns * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b * are 64-bit unsigned numbers. */unsigned long long sched_clock(void){	return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift;}int do_settimeofday(struct timespec *tv){	time_t wtm_sec, new_sec = tv->tv_sec;	long wtm_nsec, new_nsec = tv->tv_nsec;	unsigned long flags;	unsigned long delta_xsec;	long int tb_delta;	unsigned long new_xsec;	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)		return -EINVAL;	write_seqlock_irqsave(&xtime_lock, flags);	/* Updating the RTC is not the job of this code. If the time is	 * stepped under NTP, the RTC will be update after STA_UNSYNC	 * is cleared. Tool like clock/hwclock either copy the RTC	 * to the system time, in which case there is no point in writing	 * to the RTC again, or write to the RTC but then they don't call	 * settimeofday to perform this operation.	 */#ifdef CONFIG_PPC_ISERIES	if ( first_settimeofday ) {		iSeries_tb_recal();		first_settimeofday = 0;	}#endif	tb_delta = tb_ticks_since(tb_last_stamp);	tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy;	new_nsec -= tb_delta / tb_ticks_per_usec / 1000;	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); 	set_normalized_timespec(&xtime, new_sec, new_nsec);	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);	/* In case of a large backwards jump in time with NTP, we want the 	 * clock to be updated as soon as the PLL is again in lock.	 */	last_rtc_update = new_sec - 658;	ntp_clear();	delta_xsec = mulhdu( (tb_last_stamp-do_gtod.varp->tb_orig_stamp),			     do_gtod.varp->tb_to_xs );

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?