⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 timer_tsc.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 2 页
字号:
	return 0;}static struct notifier_block time_cpufreq_notifier_block = {	.notifier_call	= time_cpufreq_notifier};static int __init cpufreq_tsc(void){	int ret;	INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);	ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,					CPUFREQ_TRANSITION_NOTIFIER);	if (!ret)		cpufreq_init = 1;	return ret;}core_initcall(cpufreq_tsc);#else /* CONFIG_CPU_FREQ */static inline void cpufreq_delayed_get(void) { return; }#endif int recalibrate_cpu_khz(void){#ifndef CONFIG_SMP	unsigned int cpu_khz_old = cpu_khz;	if (cpu_has_tsc) {		init_cpu_khz();		cpu_data[0].loops_per_jiffy =		    cpufreq_scale(cpu_data[0].loops_per_jiffy,			          cpu_khz_old,				  cpu_khz);		return 0;	} else		return -ENODEV;#else	return -ENODEV;#endif}EXPORT_SYMBOL(recalibrate_cpu_khz);static void mark_offset_tsc(void){	unsigned long lost,delay;	unsigned long delta = last_tsc_low;	int count;	int countmp;	static int count1 = 0;	unsigned long long this_offset, last_offset;	static int lost_count = 0;	write_seqlock(&monotonic_lock);	last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;	/*	 * It is important that these two operations happen almost at	 * the same time. We do the RDTSC stuff first, since it's	 * faster. To avoid any inconsistencies, we need interrupts	 * disabled locally.	 */	/*	 * Interrupts are just disabled locally since the timer irq	 * has the SA_INTERRUPT flag set. -arca	 */	/* read Pentium cycle counter */	rdtsc(last_tsc_low, last_tsc_high);	spin_lock(&i8253_lock);	outb_p(0x00, PIT_MODE);     /* latch the count ASAP */	count = inb_p(PIT_CH0);    /* read the latched count */	count |= inb(PIT_CH0) << 8;	/*	 * VIA686a test code... reset the latch if count > max + 1	 * from timer_pit.c - cjb	 */	if (count > LATCH) {		outb_p(0x34, PIT_MODE);		outb_p(LATCH & 0xff, PIT_CH0);		outb(LATCH >> 8, PIT_CH0);		count = LATCH - 1;	}	spin_unlock(&i8253_lock);	if (pit_latch_buggy) {		/* get center value of last 3 time lutch */		if ((count2 >= count && count >= count1)		    || (count1 >= count && count >= count2)) {			count2 = count1; count1 = count;		} else if ((count1 >= count2 && count2 >= count)			   || (count >= count2 && count2 >= count1)) {			countmp = count;count = count2;			count2 = count1;count1 = countmp;		} else {			count2 = count1; count1 = count; count = count1;		}	}	/* lost tick compensation */	delta = last_tsc_low - delta;	{		register unsigned long eax, edx;		eax = delta;		__asm__("mull %2"		:"=a" (eax), "=d" (edx)		:"rm" (fast_gettimeoffset_quotient),		 "0" (eax));		delta = edx;	}	delta += delay_at_last_interrupt;	lost = delta/(1000000/HZ);	delay = delta%(1000000/HZ);	if (lost >= 2) {		jiffies_64 += lost-1;		/* sanity check to ensure we're not always losing ticks */		if (lost_count++ > 100) {			printk(KERN_WARNING "Losing too many ticks!\n");			printk(KERN_WARNING "TSC cannot be used as a timesource.  \n");			printk(KERN_WARNING "Possible reasons for this are:\n");			printk(KERN_WARNING "  You're running with Speedstep,\n");			printk(KERN_WARNING "  You don't have DMA enabled for your hard disk (see hdparm),\n");			printk(KERN_WARNING "  Incorrect TSC synchronization on an SMP system (see dmesg).\n");			printk(KERN_WARNING "Falling back to a sane timesource now.\n");			clock_fallback();		}		/* ... but give the TSC a fair chance */		if (lost_count > 25)			cpufreq_delayed_get();	} else		lost_count = 0;	/* update the monotonic base value */	this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;	monotonic_base += cycles_2_ns(this_offset - last_offset);	write_sequnlock(&monotonic_lock);	/* calculate delay_at_last_interrupt */	count = ((LATCH-1) - count) * TICK_SIZE;	delay_at_last_interrupt = (count + LATCH/2) / LATCH;	/* catch corner case where tick rollover occured	 * between tsc and pit reads (as noted when	 * usec delta is > 90% # of usecs/tick)	 */	if (lost && abs(delay - delay_at_last_interrupt) > (900000/HZ))		jiffies_64++;}static int __init init_tsc(char* override){	/* check clock override */	if (override[0] && strncmp(override,"tsc",3)) {#ifdef CONFIG_HPET_TIMER		if (is_hpet_enabled()) {			printk(KERN_ERR "Warning: clock= override failed. Defaulting to tsc\n");		} else#endif		{			return -ENODEV;		}	}	/*	 * If we have APM enabled or the CPU clock speed is variable	 * (CPU stops clock on HLT or slows clock to save power)	 * then the TSC timestamps may diverge by up to 1 jiffy from	 * 'real time' but nothing will break.	 * The most frequent case is that the CPU is "woken" from a halt	 * state by the timer interrupt itself, so we get 0 error. In the	 * rare cases where a driver would "wake" the CPU and request a	 * timestamp, the maximum error is < 1 jiffy. But timestamps are	 * still perfectly ordered.	 * Note that the TSC counter will be reset if APM suspends	 * to disk; this won't break the kernel, though, 'cuz we're	 * smart.  See arch/i386/kernel/apm.c.	 */ 	/* 	 *	Firstly we have to do a CPU check for chips with 	 * 	a potentially buggy TSC. At this point we haven't run 	 *	the ident/bugs checks so we must run this hook as it 	 *	may turn off the TSC flag. 	 * 	 *	NOTE: this doesn't yet handle SMP 486 machines where only 	 *	some CPU's have a TSC. Thats never worked and nobody has 	 *	moaned if you have the only one in the world - you fix it! 	 */	count2 = LATCH; /* initialize counter for mark_offset_tsc() */	if (cpu_has_tsc) {		unsigned long tsc_quotient;#ifdef CONFIG_HPET_TIMER		if (is_hpet_enabled() && hpet_use_timer) {			unsigned long result, remain;			printk("Using TSC for gettimeofday\n");			tsc_quotient = calibrate_tsc_hpet(NULL);			timer_tsc.mark_offset = &mark_offset_tsc_hpet;			/*			 * Math to calculate hpet to usec multiplier			 * Look for the comments at get_offset_tsc_hpet()			 */			ASM_DIV64_REG(result, remain, hpet_tick,					0, KERNEL_TICK_USEC);			if (remain > (hpet_tick >> 1))				result++; /* rounding the result */			hpet_usec_quotient = result;		} else#endif		{			tsc_quotient = calibrate_tsc();		}		if (tsc_quotient) {			fast_gettimeoffset_quotient = tsc_quotient;			use_tsc = 1;			/*			 *	We could be more selective here I suspect			 *	and just enable this for the next intel chips ?			 */			/* report CPU clock rate in Hz.			 * The formula is (10^6 * 2^32) / (2^32 * 1 / (clocks/us)) =			 * clock/second. Our precision is about 100 ppm.			 */			{	unsigned long eax=0, edx=1000;				__asm__("divl %2"		       		:"=a" (cpu_khz), "=d" (edx)        	       		:"r" (tsc_quotient),	                	"0" (eax), "1" (edx));				printk("Detected %u.%03u MHz processor.\n",					cpu_khz / 1000, cpu_khz % 1000);			}			set_cyc2ns_scale(cpu_khz);			return 0;		}	}	return -ENODEV;}static int tsc_resume(void){	write_seqlock(&monotonic_lock);	/* Assume this is the last mark offset time */	rdtsc(last_tsc_low, last_tsc_high);#ifdef CONFIG_HPET_TIMER	if (is_hpet_enabled() && hpet_use_timer)		hpet_last = hpet_readl(HPET_COUNTER);#endif	write_sequnlock(&monotonic_lock);	return 0;}#ifndef CONFIG_X86_TSC/* disable flag for tsc.  Takes effect by clearing the TSC cpu flag * in cpu/common.c */static int __init tsc_setup(char *str){	tsc_disable = 1;	return 1;}#elsestatic int __init tsc_setup(char *str){	printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "				"cannot disable TSC.\n");	return 1;}#endif__setup("notsc", tsc_setup);/************************************************************//* tsc timer_opts struct */static struct timer_opts timer_tsc = {	.name = "tsc",	.mark_offset = mark_offset_tsc, 	.get_offset = get_offset_tsc,	.monotonic_clock = monotonic_clock_tsc,	.delay = delay_tsc,	.read_timer = read_timer_tsc,	.resume	= tsc_resume,};struct init_timer_opts __initdata timer_tsc_init = {	.init = init_tsc,	.opts = &timer_tsc,};

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -