📄 time.c
字号:
/* * Copyright 2001 MontaVista Software Inc. * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net * Copyright (c) 2003, 2004 Maciej W. Rozycki * * Common time service routines for MIPS machines. See * Documentation/mips/time.README. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */#include <linux/config.h>#include <linux/types.h>#include <linux/kernel.h>#include <linux/init.h>#include <linux/sched.h>#include <linux/param.h>#include <linux/time.h>#include <linux/timex.h>#include <linux/smp.h>#include <linux/kernel_stat.h>#include <linux/spinlock.h>#include <linux/interrupt.h>#include <linux/module.h>#include <asm/bootinfo.h>#include <asm/cache.h>#include <asm/compiler.h>#include <asm/cpu.h>#include <asm/cpu-features.h>#include <asm/div64.h>#include <asm/sections.h>#include <asm/time.h>/* * The integer part of the number of usecs per jiffy is taken from tick, * but the fractional part is not recorded, so we calculate it using the * initial value of HZ. This aids systems where tick isn't really an * integer (e.g. for HZ = 128). */#define USECS_PER_JIFFY TICK_SIZE#define USECS_PER_JIFFY_FRAC ((unsigned long)(u32)((1000000ULL << 32) / HZ))#define TICK_SIZE (tick_nsec / 1000)/* * forward reference */extern volatile unsigned long wall_jiffies;DEFINE_SPINLOCK(rtc_lock);/* * By default we provide the null RTC ops */static unsigned long null_rtc_get_time(void){ return mktime(2000, 1, 1, 0, 0, 0);}static int null_rtc_set_time(unsigned long sec){ return 0;}unsigned long (*rtc_mips_get_time)(void) = null_rtc_get_time;int (*rtc_mips_set_time)(unsigned long) = null_rtc_set_time;int (*rtc_mips_set_mmss)(unsigned long);/* usecs per counter cycle, shifted to left by 32 bits */static unsigned int sll32_usecs_per_cycle;/* how many counter cycles in a jiffy */static unsigned long cycles_per_jiffy __read_mostly;/* Cycle counter value at the previous timer interrupt.. */static unsigned int timerhi, timerlo;/* expirelo is the count value for next CPU timer interrupt */static unsigned int expirelo;/* * Null timer ack for systems not needing one (e.g. i8254). */static void null_timer_ack(void) { /* nothing */ }/* * Null high precision timer functions for systems lacking one. */static unsigned int null_hpt_read(void){ return 0;}static void null_hpt_init(unsigned int count){ /* nothing */}/* * Timer ack for an R4k-compatible timer of a known frequency. */static void c0_timer_ack(void){ unsigned int count;#ifndef CONFIG_SOC_PNX8550 /* pnx8550 resets to zero */ /* Ack this timer interrupt and set the next one. */ expirelo += cycles_per_jiffy;#endif write_c0_compare(expirelo); /* Check to see if we have missed any timer interrupts. */ while (((count = read_c0_count()) - expirelo) < 0x7fffffff) { /* missed_timer_count++; */ expirelo = count + cycles_per_jiffy; write_c0_compare(expirelo); }}/* * High precision timer functions for a R4k-compatible timer. */static unsigned int c0_hpt_read(void){ return read_c0_count();}/* For use solely as a high precision timer. */static void c0_hpt_init(unsigned int count){ write_c0_count(read_c0_count() - count);}/* For use both as a high precision timer and an interrupt source. */static void c0_hpt_timer_init(unsigned int count){ count = read_c0_count() - count; expirelo = (count / cycles_per_jiffy + 1) * cycles_per_jiffy; write_c0_count(expirelo - cycles_per_jiffy); write_c0_compare(expirelo); write_c0_count(count);}int (*mips_timer_state)(void);void (*mips_timer_ack)(void);unsigned int (*mips_hpt_read)(void);void (*mips_hpt_init)(unsigned int);/* * This version of gettimeofday has microsecond resolution and better than * microsecond precision on fast machines with cycle counter. */void do_gettimeofday(struct timeval *tv){ unsigned long seq; unsigned long lost; unsigned long usec, sec; unsigned long max_ntp_tick; do { seq = read_seqbegin(&xtime_lock); usec = do_gettimeoffset(); lost = jiffies - wall_jiffies; /* * If time_adjust is negative then NTP is slowing the clock * so make sure not to go into next possible interval. * Better to lose some accuracy than have time go backwards.. */ if (unlikely(time_adjust < 0)) { max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj; usec = min(usec, max_ntp_tick); if (lost) usec += lost * max_ntp_tick; } else if (unlikely(lost)) usec += lost * (USEC_PER_SEC / HZ); sec = xtime.tv_sec; usec += (xtime.tv_nsec / 1000); } while (read_seqretry(&xtime_lock, seq)); while (usec >= 1000000) { usec -= 1000000; sec++; } tv->tv_sec = sec; tv->tv_usec = usec;}EXPORT_SYMBOL(do_gettimeofday);int do_settimeofday(struct timespec *tv){ time_t wtm_sec, sec = tv->tv_sec; long wtm_nsec, nsec = tv->tv_nsec; if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; write_seqlock_irq(&xtime_lock); /* * This is revolting. We need to set "xtime" correctly. However, * the value in this location is the value at the most recent update * of wall time. Discover what correction gettimeofday() would have * made, and then undo it! */ nsec -= do_gettimeoffset() * NSEC_PER_USEC; nsec -= (jiffies - wall_jiffies) * tick_nsec; wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); set_normalized_timespec(&xtime, sec, nsec); set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); ntp_clear(); write_sequnlock_irq(&xtime_lock); clock_was_set(); return 0;}EXPORT_SYMBOL(do_settimeofday);/* * Gettimeoffset routines. These routines returns the time duration * since last timer interrupt in usecs. * * If the exact CPU counter frequency is known, use fixed_rate_gettimeoffset. * Otherwise use calibrate_gettimeoffset() * * If the CPU does not have the counter register, you can either supply * your own gettimeoffset() routine, or use null_gettimeoffset(), which * gives the same resolution as HZ. */static unsigned long null_gettimeoffset(void){ return 0;}/* The function pointer to one of the gettimeoffset funcs. */unsigned long (*do_gettimeoffset)(void) = null_gettimeoffset;static unsigned long fixed_rate_gettimeoffset(void){ u32 count; unsigned long res; /* Get last timer tick in absolute kernel time */ count = mips_hpt_read(); /* .. relative to previous jiffy (32 bits is enough) */ count -= timerlo; __asm__("multu %1,%2" : "=h" (res) : "r" (count), "r" (sll32_usecs_per_cycle) : "lo", GCC_REG_ACCUM); /* * Due to possible jiffies inconsistencies, we need to check * the result so that we'll get a timer that is monotonic. */ if (res >= USECS_PER_JIFFY) res = USECS_PER_JIFFY - 1; return res;}/* * Cached "1/(clocks per usec) * 2^32" value. * It has to be recalculated once each jiffy. */static unsigned long cached_quotient;/* Last jiffy when calibrate_divXX_gettimeoffset() was called. */static unsigned long last_jiffies;/* * This is moved from dec/time.c:do_ioasic_gettimeoffset() by Maciej. */static unsigned long calibrate_div32_gettimeoffset(void){ u32 count; unsigned long res, tmp; unsigned long quotient; tmp = jiffies; quotient = cached_quotient; if (last_jiffies != tmp) { last_jiffies = tmp; if (last_jiffies != 0) { unsigned long r0; do_div64_32(r0, timerhi, timerlo, tmp); do_div64_32(quotient, USECS_PER_JIFFY, USECS_PER_JIFFY_FRAC, r0); cached_quotient = quotient; } } /* Get last timer tick in absolute kernel time */ count = mips_hpt_read(); /* .. relative to previous jiffy (32 bits is enough) */ count -= timerlo; __asm__("multu %1,%2" : "=h" (res) : "r" (count), "r" (quotient) : "lo", GCC_REG_ACCUM); /* * Due to possible jiffies inconsistencies, we need to check * the result so that we'll get a timer that is monotonic. */ if (res >= USECS_PER_JIFFY) res = USECS_PER_JIFFY - 1; return res;}static unsigned long calibrate_div64_gettimeoffset(void){ u32 count; unsigned long res, tmp; unsigned long quotient; tmp = jiffies; quotient = cached_quotient; if (last_jiffies != tmp) { last_jiffies = tmp; if (last_jiffies) { unsigned long r0; __asm__(".set push\n\t" ".set mips3\n\t" "lwu %0,%3\n\t" "dsll32 %1,%2,0\n\t" "or %1,%1,%0\n\t" "ddivu $0,%1,%4\n\t" "mflo %1\n\t" "dsll32 %0,%5,0\n\t" "or %0,%0,%6\n\t" "ddivu $0,%0,%1\n\t" "mflo %0\n\t" ".set pop" : "=&r" (quotient), "=&r" (r0) : "r" (timerhi), "m" (timerlo), "r" (tmp), "r" (USECS_PER_JIFFY), "r" (USECS_PER_JIFFY_FRAC) : "hi", "lo", GCC_REG_ACCUM); cached_quotient = quotient; } } /* Get last timer tick in absolute kernel time */ count = mips_hpt_read(); /* .. relative to previous jiffy (32 bits is enough) */ count -= timerlo; __asm__("multu %1,%2" : "=h" (res) : "r" (count), "r" (quotient) : "lo", GCC_REG_ACCUM); /* * Due to possible jiffies inconsistencies, we need to check * the result so that we'll get a timer that is monotonic. */ if (res >= USECS_PER_JIFFY) res = USECS_PER_JIFFY - 1;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -