⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 time.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 2 页
字号:
		if (i != boot_cpuid) {			previous_tb += offset;			per_cpu(last_jiffy, i) = previous_tb;		}	}}#endif/* * Scheduler clock - returns current time in nanosec units. * * Note: mulhdu(a, b) (multiply high double unsigned) returns * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b * are 64-bit unsigned numbers. */unsigned long long sched_clock(void){	if (__USE_RTC())		return get_rtc();	return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift;}int do_settimeofday(struct timespec *tv){	time_t wtm_sec, new_sec = tv->tv_sec;	long wtm_nsec, new_nsec = tv->tv_nsec;	unsigned long flags;	long int tb_delta;	u64 new_xsec, tb_delta_xs;	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)		return -EINVAL;	write_seqlock_irqsave(&xtime_lock, flags);	/*	 * Updating the RTC is not the job of this code. If the time is	 * stepped under NTP, the RTC will be updated after STA_UNSYNC	 * is cleared.  Tools like clock/hwclock either copy the RTC	 * to the system time, in which case there is no point in writing	 * to the RTC again, or write to the RTC but then they don't call	 * settimeofday to perform this operation.	 */#ifdef CONFIG_PPC_ISERIES	if (first_settimeofday) {		iSeries_tb_recal();		first_settimeofday = 0;	}#endif	tb_delta = tb_ticks_since(tb_last_stamp);	tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy;	tb_delta_xs = mulhdu(tb_delta, do_gtod.varp->tb_to_xs);	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); 	set_normalized_timespec(&xtime, new_sec, new_nsec);	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);	/* In case of a large backwards jump in time with NTP, we want the 	 * clock to be updated as soon as the PLL is again in lock.	 */	last_rtc_update = new_sec - 658;	ntp_clear();	new_xsec = 0;	if (new_nsec != 0) {		new_xsec = (u64)new_nsec * XSEC_PER_SEC;		do_div(new_xsec, NSEC_PER_SEC);	}	new_xsec += (u64)new_sec * XSEC_PER_SEC - tb_delta_xs;	update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);	vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;	vdso_data->tz_dsttime = sys_tz.tz_dsttime;	write_sequnlock_irqrestore(&xtime_lock, flags);	clock_was_set();	return 0;}EXPORT_SYMBOL(do_settimeofday);void __init generic_calibrate_decr(void){	struct device_node *cpu;	unsigned int *fp;	int node_found;	/*	 * The cpu node should have a timebase-frequency property	 * to tell us the rate at which the decrementer counts.	 */	cpu = of_find_node_by_type(NULL, "cpu");	ppc_tb_freq = DEFAULT_TB_FREQ;		/* hardcoded default */	node_found = 0;	if (cpu != 0) {		fp = (unsigned int *)get_property(cpu, "timebase-frequency",						  NULL);		if (fp != 0) {			node_found = 1;			ppc_tb_freq = *fp;		}	}	if (!node_found)		printk(KERN_ERR "WARNING: Estimating decrementer frequency "				"(not found)\n");	ppc_proc_freq = DEFAULT_PROC_FREQ;	node_found = 0;	if (cpu != 0) {		fp = (unsigned int *)get_property(cpu, "clock-frequency",						  NULL);		if (fp != 0) {			node_found = 1;			ppc_proc_freq = *fp;		}	}#ifdef CONFIG_BOOKE	/* Set the time base to zero */	mtspr(SPRN_TBWL, 0);	mtspr(SPRN_TBWU, 0);	/* Clear any pending timer interrupts */	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);	/* Enable decrementer interrupt */	mtspr(SPRN_TCR, TCR_DIE);#endif	if (!node_found)		printk(KERN_ERR "WARNING: Estimating processor frequency "				"(not found)\n");	of_node_put(cpu);}unsigned long get_boot_time(void){	struct rtc_time tm;	if (ppc_md.get_boot_time)		return ppc_md.get_boot_time();	if (!ppc_md.get_rtc_time)		return 0;	ppc_md.get_rtc_time(&tm);	return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,		      tm.tm_hour, tm.tm_min, tm.tm_sec);}/* This function is only called on the boot processor */void __init time_init(void){	unsigned long flags;	unsigned long tm = 0;	struct div_result res;	u64 scale;	unsigned shift;        if (ppc_md.time_init != NULL)                timezone_offset = ppc_md.time_init();	if (__USE_RTC()) {		/* 601 processor: dec counts down by 128 every 128ns */		ppc_tb_freq = 1000000000;		tb_last_stamp = get_rtcl();		tb_last_jiffy = tb_last_stamp;	} else {		/* Normal PowerPC with timebase register */		ppc_md.calibrate_decr();		printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n",		       ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);		printk(KERN_INFO "time_init: processor frequency   = %lu.%.6lu MHz\n",		       ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);		tb_last_stamp = tb_last_jiffy = get_tb();	}	tb_ticks_per_jiffy = ppc_tb_freq / HZ;	tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;	tb_ticks_per_usec = ppc_tb_freq / 1000000;	tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);	div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res);	tb_to_xs = res.result_low;#ifdef CONFIG_PPC64	get_paca()->default_decr = tb_ticks_per_jiffy;#endif	/*	 * Compute scale factor for sched_clock.	 * The calibrate_decr() function has set tb_ticks_per_sec,	 * which is the timebase frequency.	 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret	 * the 128-bit result as a 64.64 fixed-point number.	 * We then shift that number right until it is less than 1.0,	 * giving us the scale factor and shift count to use in	 * sched_clock().	 */	div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);	scale = res.result_low;	for (shift = 0; res.result_high != 0; ++shift) {		scale = (scale >> 1) | (res.result_high << 63);		res.result_high >>= 1;	}	tb_to_ns_scale = scale;	tb_to_ns_shift = shift;#ifdef CONFIG_PPC_ISERIES	if (!piranha_simulator)#endif		tm = get_boot_time();	write_seqlock_irqsave(&xtime_lock, flags);	xtime.tv_sec = tm;	xtime.tv_nsec = 0;	do_gtod.varp = &do_gtod.vars[0];	do_gtod.var_idx = 0;	do_gtod.varp->tb_orig_stamp = tb_last_jiffy;	__get_cpu_var(last_jiffy) = tb_last_stamp;	do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;	do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;	do_gtod.varp->tb_to_xs = tb_to_xs;	do_gtod.tb_to_us = tb_to_us;	vdso_data->tb_orig_stamp = tb_last_jiffy;	vdso_data->tb_update_count = 0;	vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;	vdso_data->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;	vdso_data->tb_to_xs = tb_to_xs;	time_freq = 0;	/* If platform provided a timezone (pmac), we correct the time */        if (timezone_offset) {		sys_tz.tz_minuteswest = -timezone_offset / 60;		sys_tz.tz_dsttime = 0;		xtime.tv_sec -= timezone_offset;        }	last_rtc_update = xtime.tv_sec;	set_normalized_timespec(&wall_to_monotonic,	                        -xtime.tv_sec, -xtime.tv_nsec);	write_sequnlock_irqrestore(&xtime_lock, flags);	/* Not exact, but the timer interrupt takes care of this */	set_dec(tb_ticks_per_jiffy);}/*  * After adjtimex is called, adjust the conversion of tb ticks * to microseconds to keep do_gettimeofday synchronized  * with ntpd. * * Use the time_adjust, time_freq and time_offset computed by adjtimex to  * adjust the frequency. *//* #define DEBUG_PPC_ADJTIMEX 1 */void ppc_adjtimex(void){#ifdef CONFIG_PPC64	unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec,		new_tb_to_xs, new_xsec, new_stamp_xsec;	unsigned long tb_ticks_per_sec_delta;	long delta_freq, ltemp;	struct div_result divres; 	unsigned long flags;	long singleshot_ppm = 0;	/*	 * Compute parts per million frequency adjustment to	 * accomplish the time adjustment implied by time_offset to be	 * applied over the elapsed time indicated by time_constant.	 * Use SHIFT_USEC to get it into the same units as	 * time_freq.	 */	if ( time_offset < 0 ) {		ltemp = -time_offset;		ltemp <<= SHIFT_USEC - SHIFT_UPDATE;		ltemp >>= SHIFT_KG + time_constant;		ltemp = -ltemp;	} else {		ltemp = time_offset;		ltemp <<= SHIFT_USEC - SHIFT_UPDATE;		ltemp >>= SHIFT_KG + time_constant;	}		/* If there is a single shot time adjustment in progress */	if ( time_adjust ) {#ifdef DEBUG_PPC_ADJTIMEX		printk("ppc_adjtimex: ");		if ( adjusting_time == 0 )			printk("starting ");		printk("single shot time_adjust = %ld\n", time_adjust);#endif				adjusting_time = 1;				/*		 * Compute parts per million frequency adjustment		 * to match time_adjust		 */		singleshot_ppm = tickadj * HZ;			/*		 * The adjustment should be tickadj*HZ to match the code in		 * linux/kernel/timer.c, but experiments show that this is too		 * large. 3/4 of tickadj*HZ seems about right		 */		singleshot_ppm -= singleshot_ppm / 4;		/* Use SHIFT_USEC to get it into the same units as time_freq */		singleshot_ppm <<= SHIFT_USEC;		if ( time_adjust < 0 )			singleshot_ppm = -singleshot_ppm;	}	else {#ifdef DEBUG_PPC_ADJTIMEX		if ( adjusting_time )			printk("ppc_adjtimex: ending single shot time_adjust\n");#endif		adjusting_time = 0;	}		/* Add up all of the frequency adjustments */	delta_freq = time_freq + ltemp + singleshot_ppm;		/*	 * Compute a new value for tb_ticks_per_sec based on	 * the frequency adjustment	 */	den = 1000000 * (1 << (SHIFT_USEC - 8));	if ( delta_freq < 0 ) {		tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den;		new_tb_ticks_per_sec = tb_ticks_per_sec + tb_ticks_per_sec_delta;	}	else {		tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( delta_freq >> (SHIFT_USEC - 8))) / den;		new_tb_ticks_per_sec = tb_ticks_per_sec - tb_ticks_per_sec_delta;	}	#ifdef DEBUG_PPC_ADJTIMEX	printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm);	printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld  new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec);#endif	/*	 * Compute a new value of tb_to_xs (used to convert tb to	 * microseconds) and a new value of stamp_xsec which is the	 * time (in 1/2^20 second units) corresponding to	 * tb_orig_stamp.  This new value of stamp_xsec compensates	 * for the change in frequency (implied by the new tb_to_xs)	 * which guarantees that the current time remains the same.	 */	write_seqlock_irqsave( &xtime_lock, flags );	tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp;	div128_by_32(1024*1024, 0, new_tb_ticks_per_sec, &divres);	new_tb_to_xs = divres.result_low;	new_xsec = mulhdu(tb_ticks, new_tb_to_xs);	old_xsec = mulhdu(tb_ticks, do_gtod.varp->tb_to_xs);	new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec;	update_gtod(do_gtod.varp->tb_orig_stamp, new_stamp_xsec, new_tb_to_xs);	write_sequnlock_irqrestore( &xtime_lock, flags );#endif /* CONFIG_PPC64 */}#define FEBRUARY	2#define	STARTOFTIME	1970#define SECDAY		86400L#define SECYR		(SECDAY * 365)#define	leapyear(year)		((year) % 4 == 0 && \				 ((year) % 100 != 0 || (year) % 400 == 0))#define	days_in_year(a) 	(leapyear(a) ? 366 : 365)#define	days_in_month(a) 	(month_days[(a) - 1])static int month_days[12] = {	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};/* * This only works for the Gregorian calendar - i.e. after 1752 (in the UK) */void GregorianDay(struct rtc_time * tm){	int leapsToDate;	int lastYear;	int day;	int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };	lastYear = tm->tm_year - 1;	/*	 * Number of leap corrections to apply up to end of last year	 */	leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;	/*	 * This year is a leap year if it is divisible by 4 except when it is	 * divisible by 100 unless it is divisible by 400	 *	 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was	 */	day = tm->tm_mon > 2 && leapyear(tm->tm_year);	day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +		   tm->tm_mday;	tm->tm_wday = day % 7;}void to_tm(int tim, struct rtc_time * tm){	register int    i;	register long   hms, day;	day = tim / SECDAY;	hms = tim % SECDAY;	/* Hours, minutes, seconds are easy */	tm->tm_hour = hms / 3600;	tm->tm_min = (hms % 3600) / 60;	tm->tm_sec = (hms % 3600) % 60;	/* Number of years in days */	for (i = STARTOFTIME; day >= days_in_year(i); i++)		day -= days_in_year(i);	tm->tm_year = i;	/* Number of months in days left */	if (leapyear(tm->tm_year))		days_in_month(FEBRUARY) = 29;	for (i = 1; day >= days_in_month(i); i++)		day -= days_in_month(i);	days_in_month(FEBRUARY) = 28;	tm->tm_mon = i;	/* Days are what is left over (+1) from all that. */	tm->tm_mday = day + 1;	/*	 * Determine the day of week	 */	GregorianDay(tm);}/* Auxiliary function to compute scaling factors *//* Actually the choice of a timebase running at 1/4 the of the bus * frequency giving resolution of a few tens of nanoseconds is quite nice. * It makes this computation very precise (27-28 bits typically) which * is optimistic considering the stability of most processor clock * oscillators and the precision with which the timebase frequency * is measured but does not harm. */unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale){        unsigned mlt=0, tmp, err;        /* No concern for performance, it's done once: use a stupid         * but safe and compact method to find the multiplier.         */          for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {                if (mulhwu(inscale, mlt|tmp) < outscale)			mlt |= tmp;        }          /* We might still be off by 1 for the best approximation.         * A side effect of this is that if outscale is too large         * the returned value will be zero.         * Many corner cases have been checked and seem to work,         * some might have been forgotten in the test however.         */          err = inscale * (mlt+1);        if (err <= inscale/2)		mlt++;        return mlt;}/* * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit * result. */void div128_by_32(u64 dividend_high, u64 dividend_low,		  unsigned divisor, struct div_result *dr){	unsigned long a, b, c, d;	unsigned long w, x, y, z;	u64 ra, rb, rc;	a = dividend_high >> 32;	b = dividend_high & 0xffffffff;	c = dividend_low >> 32;	d = dividend_low & 0xffffffff;	w = a / divisor;	ra = ((u64)(a - (w * divisor)) << 32) + b;	rb = ((u64) do_div(ra, divisor) << 32) + c;	x = ra;	rc = ((u64) do_div(rb, divisor) << 32) + d;	y = rb;	do_div(rc, divisor);	z = rc;	dr->result_high = ((u64)w << 32) + x;	dr->result_low  = ((u64)y << 32) + z;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -