time.c

来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 1,200 行 · 第 1/3 页

C
1,200
字号
         * We should have an rtc call that only sets the minutes and         * seconds like on Intel to avoid problems with non UTC clocks.         */        if (ppc_md.set_rtc_time && ntp_synced() &&	    xtime.tv_sec - last_rtc_update >= 659 &&	    abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ) {		struct rtc_time tm;		to_tm(xtime.tv_sec + 1 + timezone_offset, &tm);		tm.tm_year -= 1900;		tm.tm_mon -= 1;		if (ppc_md.set_rtc_time(&tm) == 0)			last_rtc_update = xtime.tv_sec + 1;		else			/* Try again one minute later */			last_rtc_update += 60;        }}/* * This version of gettimeofday has microsecond resolution. */static inline void __do_gettimeofday(struct timeval *tv, u64 tb_val){	unsigned long sec, usec;	u64 tb_ticks, xsec;	struct gettimeofday_vars *temp_varp;	u64 temp_tb_to_xs, temp_stamp_xsec;	/*	 * These calculations are faster (gets rid of divides)	 * if done in units of 1/2^20 rather than microseconds.	 * The conversion to microseconds at the end is done	 * without a divide (and in fact, without a multiply)	 */	temp_varp = do_gtod.varp;	tb_ticks = tb_val - temp_varp->tb_orig_stamp;	temp_tb_to_xs = temp_varp->tb_to_xs;	temp_stamp_xsec = temp_varp->stamp_xsec;	xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs);	sec = xsec / XSEC_PER_SEC;	usec = (unsigned long)xsec & (XSEC_PER_SEC - 1);	usec = SCALE_XSEC(usec, 1000000);	tv->tv_sec = sec;	tv->tv_usec = usec;}void do_gettimeofday(struct timeval *tv){	if (__USE_RTC()) {		/* do this the old way */		unsigned long flags, seq;		unsigned int sec, nsec, usec;		do {			seq = read_seqbegin_irqsave(&xtime_lock, flags);			sec = xtime.tv_sec;			nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp);		} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));		usec = nsec / 1000;		while (usec >= 1000000) {			usec -= 1000000;			++sec;		}		tv->tv_sec = sec;		tv->tv_usec = usec;		return;	}	__do_gettimeofday(tv, get_tb());}EXPORT_SYMBOL(do_gettimeofday);/* * There are two copies of tb_to_xs and stamp_xsec so that no * lock is needed to access and use these values in * do_gettimeofday.  We alternate the copies and as long as a * reasonable time elapses between changes, there will never * be inconsistent values.  ntpd has a minimum of one minute * between updates. */static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,			       u64 new_tb_to_xs){	unsigned temp_idx;	struct gettimeofday_vars *temp_varp;	temp_idx = (do_gtod.var_idx == 0);	temp_varp = &do_gtod.vars[temp_idx];	temp_varp->tb_to_xs = new_tb_to_xs;	temp_varp->tb_orig_stamp = new_tb_stamp;	temp_varp->stamp_xsec = new_stamp_xsec;	smp_mb();	do_gtod.varp = temp_varp;	do_gtod.var_idx = temp_idx;	/*	 * tb_update_count is used to allow the userspace gettimeofday code	 * to assure itself that it sees a consistent view of the tb_to_xs and	 * stamp_xsec variables.  It reads the tb_update_count, then reads	 * tb_to_xs and stamp_xsec and then reads tb_update_count again.  If	 * the two values of tb_update_count match and are even then the	 * tb_to_xs and stamp_xsec values are consistent.  If not, then it	 * loops back and reads them again until this criteria is met.	 * We expect the caller to have done the first increment of	 * vdso_data->tb_update_count already.	 */	vdso_data->tb_orig_stamp = new_tb_stamp;	vdso_data->stamp_xsec = new_stamp_xsec;	vdso_data->tb_to_xs = new_tb_to_xs;	vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;	vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;	smp_wmb();	++(vdso_data->tb_update_count);}/* * When the timebase - tb_orig_stamp gets too big, we do a manipulation * between tb_orig_stamp and stamp_xsec. The goal here is to keep the * difference tb - tb_orig_stamp small enough to always fit inside a * 32 bits number. This is a requirement of our fast 32 bits userland * implementation in the vdso. If we "miss" a call to this function * (interrupt latency, CPU locked in a spinlock, ...) and we end up * with a too big difference, then the vdso will fallback to calling * the syscall */static __inline__ void timer_recalc_offset(u64 cur_tb){	unsigned long offset;	u64 new_stamp_xsec;	u64 tlen, t2x;	u64 tb, xsec_old, xsec_new;	struct gettimeofday_vars *varp;	if (__USE_RTC())		return;	tlen = current_tick_length();	offset = cur_tb - do_gtod.varp->tb_orig_stamp;	if (tlen == last_tick_len && offset < 0x80000000u)		return;	if (tlen != last_tick_len) {		t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs);		last_tick_len = tlen;	} else		t2x = do_gtod.varp->tb_to_xs;	new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;	do_div(new_stamp_xsec, 1000000000);	new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;	++vdso_data->tb_update_count;	smp_mb();	/*	 * Make sure time doesn't go backwards for userspace gettimeofday.	 */	tb = get_tb();	varp = do_gtod.varp;	xsec_old = mulhdu(tb - varp->tb_orig_stamp, varp->tb_to_xs)		+ varp->stamp_xsec;	xsec_new = mulhdu(tb - cur_tb, t2x) + new_stamp_xsec;	if (xsec_new < xsec_old)		new_stamp_xsec += xsec_old - xsec_new;	update_gtod(cur_tb, new_stamp_xsec, t2x);}#ifdef CONFIG_SMPunsigned long profile_pc(struct pt_regs *regs){	unsigned long pc = instruction_pointer(regs);	if (in_lock_functions(pc))		return regs->link;	return pc;}EXPORT_SYMBOL(profile_pc);#endif#ifdef CONFIG_PPC_ISERIES/*  * This function recalibrates the timebase based on the 49-bit time-of-day * value in the Titan chip.  The Titan is much more accurate than the value * returned by the service processor for the timebase frequency.   */static void iSeries_tb_recal(void){	struct div_result divres;	unsigned long titan, tb;	tb = get_tb();	titan = HvCallXm_loadTod();	if ( iSeries_recal_titan ) {		unsigned long tb_ticks = tb - iSeries_recal_tb;		unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;		unsigned long new_tb_ticks_per_sec   = (tb_ticks * USEC_PER_SEC)/titan_usec;		unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ;		long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;		char sign = '+';				/* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */		new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;		if ( tick_diff < 0 ) {			tick_diff = -tick_diff;			sign = '-';		}		if ( tick_diff ) {			if ( tick_diff < tb_ticks_per_jiffy/25 ) {				printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",						new_tb_ticks_per_jiffy, sign, tick_diff );				tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;				tb_ticks_per_sec   = new_tb_ticks_per_sec;				calc_cputime_factors();				div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );				do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;				tb_to_xs = divres.result_low;				do_gtod.varp->tb_to_xs = tb_to_xs;				vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;				vdso_data->tb_to_xs = tb_to_xs;			}			else {				printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"					"                   new tb_ticks_per_jiffy = %lu\n"					"                   old tb_ticks_per_jiffy = %lu\n",					new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );			}		}	}	iSeries_recal_titan = titan;	iSeries_recal_tb = tb;}#endif/* * For iSeries shared processors, we have to let the hypervisor * set the hardware decrementer.  We set a virtual decrementer * in the lppaca and call the hypervisor if the virtual * decrementer is less than the current value in the hardware * decrementer. (almost always the new decrementer value will * be greater than the current hardware decementer so the hypervisor * call will not be needed) *//* * timer_interrupt - gets called when the decrementer overflows, * with interrupts disabled. */void timer_interrupt(struct pt_regs * regs){	int next_dec;	int cpu = smp_processor_id();	unsigned long ticks;#ifdef CONFIG_PPC32	if (atomic_read(&ppc_n_lost_interrupts) != 0)		do_IRQ(regs);#endif	irq_enter();	profile_tick(CPU_PROFILING, regs);	calculate_steal_time();#ifdef CONFIG_PPC_ISERIES	get_lppaca()->int_dword.fields.decr_int = 0;#endif	while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))	       >= tb_ticks_per_jiffy) {		/* Update last_jiffy */		per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy;		/* Handle RTCL overflow on 601 */		if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000)			per_cpu(last_jiffy, cpu) -= 1000000000;		/*		 * We cannot disable the decrementer, so in the period		 * between this cpu's being marked offline in cpu_online_map		 * and calling stop-self, it is taking timer interrupts.		 * Avoid calling into the scheduler rebalancing code if this		 * is the case.		 */		if (!cpu_is_offline(cpu))			account_process_time(regs);		/*		 * No need to check whether cpu is offline here; boot_cpuid		 * should have been fixed up by now.		 */		if (cpu != boot_cpuid)			continue;		write_seqlock(&xtime_lock);		tb_last_jiffy += tb_ticks_per_jiffy;		tb_last_stamp = per_cpu(last_jiffy, cpu);		do_timer(regs);		timer_recalc_offset(tb_last_jiffy);		timer_check_rtc();		write_sequnlock(&xtime_lock);	}		next_dec = tb_ticks_per_jiffy - ticks;	set_dec(next_dec);#ifdef CONFIG_PPC_ISERIES	if (hvlpevent_is_pending())		process_hvlpevents(regs);#endif#ifdef CONFIG_PPC64	/* collect purr register values often, for accurate calculations */	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {		struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);		cu->current_tb = mfspr(SPRN_PURR);	}#endif	irq_exit();}void wakeup_decrementer(void){	unsigned long ticks;	/*	 * The timebase gets saved on sleep and restored on wakeup,	 * so all we need to do is to reset the decrementer.	 */	ticks = tb_ticks_since(__get_cpu_var(last_jiffy));	if (ticks < tb_ticks_per_jiffy)		ticks = tb_ticks_per_jiffy - ticks;	else		ticks = 1;	set_dec(ticks);}#ifdef CONFIG_SMPvoid __init smp_space_timers(unsigned int max_cpus){	int i;	unsigned long half = tb_ticks_per_jiffy / 2;	unsigned long offset = tb_ticks_per_jiffy / max_cpus;	unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid);	/* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */	previous_tb -= tb_ticks_per_jiffy;	/*	 * The stolen time calculation for POWER5 shared-processor LPAR	 * systems works better if the two threads' timebase interrupts	 * are staggered by half a jiffy with respect to each other.	 */	for_each_possible_cpu(i) {		if (i == boot_cpuid)			continue;		if (i == (boot_cpuid ^ 1))			per_cpu(last_jiffy, i) =				per_cpu(last_jiffy, boot_cpuid) - half;		else if (i & 1)			per_cpu(last_jiffy, i) =				per_cpu(last_jiffy, i ^ 1) + half;		else {			previous_tb += offset;			per_cpu(last_jiffy, i) = previous_tb;		}	}}#endif/* * Scheduler clock - returns current time in nanosec units. * * Note: mulhdu(a, b) (multiply high double unsigned) returns * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b * are 64-bit unsigned numbers. */unsigned long long sched_clock(void){	if (__USE_RTC())		return get_rtc();	return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift;}int do_settimeofday(struct timespec *tv){	time_t wtm_sec, new_sec = tv->tv_sec;	long wtm_nsec, new_nsec = tv->tv_nsec;	unsigned long flags;	u64 new_xsec;	unsigned long tb_delta;	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)		return -EINVAL;	write_seqlock_irqsave(&xtime_lock, flags);	/*	 * Updating the RTC is not the job of this code. If the time is	 * stepped under NTP, the RTC will be updated after STA_UNSYNC

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?