time.c

来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 1,200 行 · 第 1/3 页

C
1,200
字号
	 * is cleared.  Tools like clock/hwclock either copy the RTC	 * to the system time, in which case there is no point in writing	 * to the RTC again, or write to the RTC but then they don't call	 * settimeofday to perform this operation.	 */#ifdef CONFIG_PPC_ISERIES	if (first_settimeofday) {		iSeries_tb_recal();		first_settimeofday = 0;	}#endif	/* Make userspace gettimeofday spin until we're done. */	++vdso_data->tb_update_count;	smp_mb();	/*	 * Subtract off the number of nanoseconds since the	 * beginning of the last tick.	 * Note that since we don't increment jiffies_64 anywhere other	 * than in do_timer (since we don't have a lost tick problem),	 * wall_jiffies will always be the same as jiffies,	 * and therefore the (jiffies - wall_jiffies) computation	 * has been removed.	 */	tb_delta = tb_ticks_since(tb_last_stamp);	tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */	new_nsec -= SCALE_XSEC(tb_delta, 1000000000);	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); 	set_normalized_timespec(&xtime, new_sec, new_nsec);	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);	/* In case of a large backwards jump in time with NTP, we want the 	 * clock to be updated as soon as the PLL is again in lock.	 */	last_rtc_update = new_sec - 658;	ntp_clear();	new_xsec = xtime.tv_nsec;	if (new_xsec != 0) {		new_xsec *= XSEC_PER_SEC;		do_div(new_xsec, NSEC_PER_SEC);	}	new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC;	update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);	vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;	vdso_data->tz_dsttime = sys_tz.tz_dsttime;	write_sequnlock_irqrestore(&xtime_lock, flags);	clock_was_set();	return 0;}EXPORT_SYMBOL(do_settimeofday);void __init generic_calibrate_decr(void){	struct device_node *cpu;	unsigned int *fp;	int node_found;	/*	 * The cpu node should have a timebase-frequency property	 * to tell us the rate at which the decrementer counts.	 */	cpu = of_find_node_by_type(NULL, "cpu");	ppc_tb_freq = DEFAULT_TB_FREQ;		/* hardcoded default */	node_found = 0;	if (cpu) {		fp = (unsigned int *)get_property(cpu, "timebase-frequency",						  NULL);		if (fp) {			node_found = 1;			ppc_tb_freq = *fp;		}	}	if (!node_found)		printk(KERN_ERR "WARNING: Estimating decrementer frequency "				"(not found)\n");	ppc_proc_freq = DEFAULT_PROC_FREQ;	node_found = 0;	if (cpu) {		fp = (unsigned int *)get_property(cpu, "clock-frequency",						  NULL);		if (fp) {			node_found = 1;			ppc_proc_freq = *fp;		}	}#ifdef CONFIG_BOOKE	/* Set the time base to zero */	mtspr(SPRN_TBWL, 0);	mtspr(SPRN_TBWU, 0);	/* Clear any pending timer interrupts */	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);	/* Enable decrementer interrupt */	mtspr(SPRN_TCR, TCR_DIE);#endif	if (!node_found)		printk(KERN_ERR "WARNING: Estimating processor frequency "				"(not found)\n");	of_node_put(cpu);}unsigned long get_boot_time(void){	struct rtc_time tm;	if (ppc_md.get_boot_time)		return ppc_md.get_boot_time();	if (!ppc_md.get_rtc_time)		return 0;	ppc_md.get_rtc_time(&tm);	return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,		      tm.tm_hour, tm.tm_min, tm.tm_sec);}/* This function is only called on the boot processor */void __init time_init(void){	unsigned long flags;	unsigned long tm = 0;	struct div_result res;	u64 scale, x;	unsigned shift;        if (ppc_md.time_init != NULL)                timezone_offset = ppc_md.time_init();	if (__USE_RTC()) {		/* 601 processor: dec counts down by 128 every 128ns */		ppc_tb_freq = 1000000000;		tb_last_stamp = get_rtcl();		tb_last_jiffy = tb_last_stamp;	} else {		/* Normal PowerPC with timebase register */		ppc_md.calibrate_decr();		printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n",		       ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);		printk(KERN_INFO "time_init: processor frequency   = %lu.%.6lu MHz\n",		       ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);		tb_last_stamp = tb_last_jiffy = get_tb();	}	tb_ticks_per_jiffy = ppc_tb_freq / HZ;	tb_ticks_per_sec = ppc_tb_freq;	tb_ticks_per_usec = ppc_tb_freq / 1000000;	tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);	calc_cputime_factors();	/*	 * Calculate the length of each tick in ns.  It will not be	 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.	 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,	 * rounded up.	 */	x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;	do_div(x, ppc_tb_freq);	tick_nsec = x;	last_tick_len = x << TICKLEN_SCALE;	/*	 * Compute ticklen_to_xs, which is a factor which gets multiplied	 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.	 * It is computed as:	 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)	 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT	 * which turns out to be N = 51 - SHIFT_HZ.	 * This gives the result as a 0.64 fixed-point fraction.	 * That value is reduced by an offset amounting to 1 xsec per	 * 2^31 timebase ticks to avoid problems with time going backwards	 * by 1 xsec when we do timer_recalc_offset due to losing the	 * fractional xsec.  That offset is equal to ppc_tb_freq/2^51	 * since there are 2^20 xsec in a second.	 */	div128_by_32((1ULL << 51) - ppc_tb_freq, 0,		     tb_ticks_per_jiffy << SHIFT_HZ, &res);	div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);	ticklen_to_xs = res.result_low;	/* Compute tb_to_xs from tick_nsec */	tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);	/*	 * Compute scale factor for sched_clock.	 * The calibrate_decr() function has set tb_ticks_per_sec,	 * which is the timebase frequency.	 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret	 * the 128-bit result as a 64.64 fixed-point number.	 * We then shift that number right until it is less than 1.0,	 * giving us the scale factor and shift count to use in	 * sched_clock().	 */	div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);	scale = res.result_low;	for (shift = 0; res.result_high != 0; ++shift) {		scale = (scale >> 1) | (res.result_high << 63);		res.result_high >>= 1;	}	tb_to_ns_scale = scale;	tb_to_ns_shift = shift;#ifdef CONFIG_PPC_ISERIES	if (!piranha_simulator)#endif		tm = get_boot_time();	write_seqlock_irqsave(&xtime_lock, flags);	/* If platform provided a timezone (pmac), we correct the time */        if (timezone_offset) {		sys_tz.tz_minuteswest = -timezone_offset / 60;		sys_tz.tz_dsttime = 0;		tm -= timezone_offset;        }	xtime.tv_sec = tm;	xtime.tv_nsec = 0;	do_gtod.varp = &do_gtod.vars[0];	do_gtod.var_idx = 0;	do_gtod.varp->tb_orig_stamp = tb_last_jiffy;	__get_cpu_var(last_jiffy) = tb_last_stamp;	do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;	do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;	do_gtod.varp->tb_to_xs = tb_to_xs;	do_gtod.tb_to_us = tb_to_us;	vdso_data->tb_orig_stamp = tb_last_jiffy;	vdso_data->tb_update_count = 0;	vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;	vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;	vdso_data->tb_to_xs = tb_to_xs;	time_freq = 0;	last_rtc_update = xtime.tv_sec;	set_normalized_timespec(&wall_to_monotonic,	                        -xtime.tv_sec, -xtime.tv_nsec);	write_sequnlock_irqrestore(&xtime_lock, flags);	/* Not exact, but the timer interrupt takes care of this */	set_dec(tb_ticks_per_jiffy);}#define FEBRUARY	2#define	STARTOFTIME	1970#define SECDAY		86400L#define SECYR		(SECDAY * 365)#define	leapyear(year)		((year) % 4 == 0 && \				 ((year) % 100 != 0 || (year) % 400 == 0))#define	days_in_year(a) 	(leapyear(a) ? 366 : 365)#define	days_in_month(a) 	(month_days[(a) - 1])static int month_days[12] = {	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};/* * This only works for the Gregorian calendar - i.e. after 1752 (in the UK) */void GregorianDay(struct rtc_time * tm){	int leapsToDate;	int lastYear;	int day;	int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };	lastYear = tm->tm_year - 1;	/*	 * Number of leap corrections to apply up to end of last year	 */	leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;	/*	 * This year is a leap year if it is divisible by 4 except when it is	 * divisible by 100 unless it is divisible by 400	 *	 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was	 */	day = tm->tm_mon > 2 && leapyear(tm->tm_year);	day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +		   tm->tm_mday;	tm->tm_wday = day % 7;}void to_tm(int tim, struct rtc_time * tm){	register int    i;	register long   hms, day;	day = tim / SECDAY;	hms = tim % SECDAY;	/* Hours, minutes, seconds are easy */	tm->tm_hour = hms / 3600;	tm->tm_min = (hms % 3600) / 60;	tm->tm_sec = (hms % 3600) % 60;	/* Number of years in days */	for (i = STARTOFTIME; day >= days_in_year(i); i++)		day -= days_in_year(i);	tm->tm_year = i;	/* Number of months in days left */	if (leapyear(tm->tm_year))		days_in_month(FEBRUARY) = 29;	for (i = 1; day >= days_in_month(i); i++)		day -= days_in_month(i);	days_in_month(FEBRUARY) = 28;	tm->tm_mon = i;	/* Days are what is left over (+1) from all that. */	tm->tm_mday = day + 1;	/*	 * Determine the day of week	 */	GregorianDay(tm);}/* Auxiliary function to compute scaling factors *//* Actually the choice of a timebase running at 1/4 the of the bus * frequency giving resolution of a few tens of nanoseconds is quite nice. * It makes this computation very precise (27-28 bits typically) which * is optimistic considering the stability of most processor clock * oscillators and the precision with which the timebase frequency * is measured but does not harm. */unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale){        unsigned mlt=0, tmp, err;        /* No concern for performance, it's done once: use a stupid         * but safe and compact method to find the multiplier.         */          for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {                if (mulhwu(inscale, mlt|tmp) < outscale)			mlt |= tmp;        }          /* We might still be off by 1 for the best approximation.         * A side effect of this is that if outscale is too large         * the returned value will be zero.         * Many corner cases have been checked and seem to work,         * some might have been forgotten in the test however.         */          err = inscale * (mlt+1);        if (err <= inscale/2)		mlt++;        return mlt;}/* * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit * result. */void div128_by_32(u64 dividend_high, u64 dividend_low,		  unsigned divisor, struct div_result *dr){	unsigned long a, b, c, d;	unsigned long w, x, y, z;	u64 ra, rb, rc;	a = dividend_high >> 32;	b = dividend_high & 0xffffffff;	c = dividend_low >> 32;	d = dividend_low & 0xffffffff;	w = a / divisor;	ra = ((u64)(a - (w * divisor)) << 32) + b;	rb = ((u64) do_div(ra, divisor) << 32) + c;	x = ra;	rc = ((u64) do_div(rb, divisor) << 32) + d;	y = rb;	do_div(rc, divisor);	z = rc;	dr->result_high = ((u64)w << 32) + x;	dr->result_low  = ((u64)y << 32) + z;}

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?