⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 time.c

📁 linux-2.4.29操作系统的源码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Copyright 2001 MontaVista Software Inc. * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net * Copyright (c) 2003, 2004  Maciej W. Rozycki * * Common time service routines for MIPS machines. See * Documentation/mips/time.README. * * This program is free software; you can redistribute  it and/or modify it * under  the terms of  the GNU General  Public License as published by the * Free Software Foundation;  either version 2 of the  License, or (at your * option) any later version. */#include <linux/config.h>#include <linux/types.h>#include <linux/kernel.h>#include <linux/init.h>#include <linux/sched.h>#include <linux/param.h>#include <linux/time.h>#include <linux/timex.h>#include <linux/smp.h>#include <linux/kernel_stat.h>#include <linux/spinlock.h>#include <linux/interrupt.h>#include <linux/module.h>#include <asm/bootinfo.h>#include <asm/compiler.h>#include <asm/cpu.h>#include <asm/time.h>#include <asm/hardirq.h>#include <asm/div64.h>/* * The integer part of the number of usecs per jiffy is taken from tick, * but the fractional part is not recorded, so we calculate it using the * initial value of HZ.  This aids systems where tick isn't really an * integer (e.g. for HZ = 128). */#define USECS_PER_JIFFY		tick#define USECS_PER_JIFFY_FRAC	((unsigned long)(u32)((1000000ULL << 32) / HZ))/* * forward reference */extern rwlock_t xtime_lock;extern volatile unsigned long wall_jiffies;spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;/* * whether we emulate local_timer_interrupts for SMP machines. */int emulate_local_timer_interrupt;/* * By default we provide the null RTC ops */static unsigned long null_rtc_get_time(void){	return mktime(2000, 1, 1, 0, 0, 0);}static int null_rtc_set_time(unsigned long sec){	return 0;}unsigned long (*rtc_get_time)(void) = null_rtc_get_time;int (*rtc_set_time)(unsigned long) = null_rtc_set_time;int (*rtc_set_mmss)(unsigned long);/* usecs per counter cycle, shifted to left by 32 bits */static unsigned int sll32_usecs_per_cycle;/* how many counter cycles in a jiffy */static unsigned long cycles_per_jiffy;/* Cycle counter value at the previous timer interrupt.. */static unsigned int timerhi, timerlo;/* expirelo is the count value for next CPU timer interrupt */static unsigned int expirelo;/* * Null timer ack for systems not needing one (e.g. i8254). */static void null_timer_ack(void) { /* nothing */ }/* * Null high precision timer functions for systems lacking one. */static unsigned int null_hpt_read(void){	return 0;}static void null_hpt_init(unsigned int count) { /* nothing */ }/* * Timer ack for an R4k-compatible timer of a known frequency. */static void c0_timer_ack(void){	unsigned int count;	/* Ack this timer interrupt and set the next one.  */	expirelo += cycles_per_jiffy;	write_c0_compare(expirelo);	/* Check to see if we have missed any timer interrupts.  */	count = read_c0_count();	if ((count - expirelo) < 0x7fffffff) {		/* missed_timer_count++; */		expirelo = count + cycles_per_jiffy;		write_c0_compare(expirelo);	}}/* * High precision timer functions for a R4k-compatible timer. */static unsigned int c0_hpt_read(void){	return read_c0_count();}/* For use solely as a high precision timer.  */static void c0_hpt_init(unsigned int count){	write_c0_count(read_c0_count() - count);}/* For use both as a high precision timer and an interrupt source.  */static void c0_hpt_timer_init(unsigned int count){	count = read_c0_count() - count;	expirelo = (count / cycles_per_jiffy + 1) * cycles_per_jiffy;	write_c0_count(expirelo - cycles_per_jiffy);	write_c0_compare(expirelo);	write_c0_count(count);}int (*mips_timer_state)(void);void (*mips_timer_ack)(void);unsigned int (*mips_hpt_read)(void);void (*mips_hpt_init)(unsigned int);/* * timeofday services, for syscalls. */void do_gettimeofday(struct timeval *tv){	unsigned long flags, lost;	read_lock_irqsave(&xtime_lock, flags);	*tv = xtime;	tv->tv_usec += do_gettimeoffset();	/*	 * xtime is atomically updated in timer_bh.  jiffies - wall_jiffies	 * is nonzero if the timer bottom half hasn't executed yet.	 */	lost = jiffies - wall_jiffies;	if (lost)		tv->tv_usec += lost * USECS_PER_JIFFY;	read_unlock_irqrestore(&xtime_lock, flags);	while (tv->tv_usec >= 1000000) {		tv->tv_usec -= 1000000;		tv->tv_sec++;	}}void do_settimeofday(struct timeval *tv){	write_lock_irq(&xtime_lock);	/*	 * This is revolting.  We need to set "xtime" correctly.  However,	 * the value in this location is the value at the most recent update	 * of wall time.  Discover what correction gettimeofday() would have	 * made, and then undo it!	 */	tv->tv_usec -= do_gettimeoffset();	tv->tv_usec -= (jiffies - wall_jiffies) * USECS_PER_JIFFY;	while (tv->tv_usec < 0) {		tv->tv_usec += 1000000;		tv->tv_sec--;	}	xtime = *tv;	time_adjust = 0;			/* stop active adjtime() */	time_status |= STA_UNSYNC;	time_maxerror = NTP_PHASE_LIMIT;	time_esterror = NTP_PHASE_LIMIT;	write_unlock_irq(&xtime_lock);}/* * Gettimeoffset routines.  These routines returns the time duration * since last timer interrupt in usecs. * * If the exact CPU counter frequency is known, use fixed_rate_gettimeoffset. * Otherwise use calibrate_gettimeoffset() * * If the CPU does not have the counter register, you can either supply * your own gettimeoffset() routine, or use null_gettimeoffset(), which * gives the same resolution as HZ. */static unsigned long null_gettimeoffset(void){	return 0;}/* The function pointer to one of the gettimeoffset funcs.  */unsigned long (*do_gettimeoffset)(void) = null_gettimeoffset;static unsigned long fixed_rate_gettimeoffset(void){	u32 count;	unsigned long res;	/* Get last timer tick in absolute kernel time */	count = mips_hpt_read();	/* .. relative to previous jiffy (32 bits is enough) */	count -= timerlo;	__asm__("multu	%1,%2"		: "=h" (res)		: "r" (count), "r" (sll32_usecs_per_cycle)		: "lo", GCC_REG_ACCUM);	/*	 * Due to possible jiffies inconsistencies, we need to check	 * the result so that we'll get a timer that is monotonic.	 */	if (res >= USECS_PER_JIFFY)		res = USECS_PER_JIFFY - 1;	return res;}/* * Cached "1/(clocks per usec) * 2^32" value. * It has to be recalculated once each jiffy. */static unsigned long cached_quotient;/* Last jiffy when calibrate_divXX_gettimeoffset() was called. */static unsigned long last_jiffies;/* * This is moved from dec/time.c:do_ioasic_gettimeoffset() by Maciej. */static unsigned long calibrate_div32_gettimeoffset(void){	u32 count;	unsigned long res, tmp;	unsigned long quotient;	tmp = jiffies;	quotient = cached_quotient;	if (last_jiffies != tmp) {		last_jiffies = tmp;		if (last_jiffies != 0) {			unsigned long r0;			do_div64_32(r0, timerhi, timerlo, tmp);			do_div64_32(quotient, USECS_PER_JIFFY,				    USECS_PER_JIFFY_FRAC, r0);			cached_quotient = quotient;		}	}	/* Get last timer tick in absolute kernel time */	count = mips_hpt_read();	/* .. relative to previous jiffy (32 bits is enough) */	count -= timerlo;	__asm__("multu  %1,%2"		: "=h" (res)		: "r" (count), "r" (quotient)		: "lo", GCC_REG_ACCUM);	/*	 * Due to possible jiffies inconsistencies, we need to check	 * the result so that we'll get a timer that is monotonic.	 */	if (res >= USECS_PER_JIFFY)		res = USECS_PER_JIFFY - 1;	return res;}static unsigned long calibrate_div64_gettimeoffset(void){	u32 count;	unsigned long res, tmp;	unsigned long quotient;	tmp = jiffies;	quotient = cached_quotient;	if (last_jiffies != tmp) {		last_jiffies = tmp;		if (last_jiffies) {			unsigned long r0;			__asm__(".set	push\n\t"				".set	mips3\n\t"				"lwu	%0,%3\n\t"				"dsll32	%1,%2,0\n\t"				"or	%1,%1,%0\n\t"				"ddivu	$0,%1,%4\n\t"				"mflo	%1\n\t"				"dsll32	%0,%5,0\n\t"				"or	%0,%0,%6\n\t"				"ddivu	$0,%0,%1\n\t"				"mflo	%0\n\t"				".set	pop"				: "=&r" (quotient), "=&r" (r0)				: "r" (timerhi), "m" (timerlo),				  "r" (tmp), "r" (USECS_PER_JIFFY),				  "r" (USECS_PER_JIFFY_FRAC)				: "hi", "lo", GCC_REG_ACCUM);			cached_quotient = quotient;		}	}	/* Get last timer tick in absolute kernel time */	count = mips_hpt_read();	/* .. relative to previous jiffy (32 bits is enough) */	count -= timerlo;	__asm__("multu	%1,%2"		: "=h" (res)		: "r" (count), "r" (quotient)		: "lo", GCC_REG_ACCUM);	/*	 * Due to possible jiffies inconsistencies, we need to check	 * the result so that we'll get a timer that is monotonic.	 */	if (res >= USECS_PER_JIFFY)		res = USECS_PER_JIFFY - 1;	return res;}/* last time when xtime and rtc are sync'ed up */static long last_rtc_update;/* * local_timer_interrupt() does profiling and process accounting * on a per-CPU basis. * * In UP mode, it is invoked from the (global) timer_interrupt. * * In SMP mode, it might invoked by per-CPU timer interrupt, or * a broadcasted inter-processor interrupt which itself is triggered * by the global timer interrupt. */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -