📄 time.c
字号:
/* * arch/s390/kernel/time.c * Time of day based timer functions. * * S390 version * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Hartmut Penner (hp@de.ibm.com), * Martin Schwidefsky (schwidefsky@de.ibm.com), * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) * * Derived from "arch/i386/kernel/time.c" * Copyright (C) 1991, 1992, 1995 Linus Torvalds */#include <linux/errno.h>#include <linux/module.h>#include <linux/sched.h>#include <linux/kernel.h>#include <linux/param.h>#include <linux/string.h>#include <linux/mm.h>#include <linux/interrupt.h>#include <linux/time.h>#include <linux/sysdev.h>#include <linux/delay.h>#include <linux/init.h>#include <linux/smp.h>#include <linux/types.h>#include <linux/profile.h>#include <linux/timex.h>#include <linux/notifier.h>#include <linux/clocksource.h>#include <asm/uaccess.h>#include <asm/delay.h>#include <asm/s390_ext.h>#include <asm/div64.h>#include <asm/irq.h>#include <asm/irq_regs.h>#include <asm/timer.h>#include <asm/etr.h>/* change this if you have some constant time drift */#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)/* The value of the TOD clock for 1.1.1970. */#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL/* * Create a small time difference between the timer interrupts * on the different cpus to avoid lock contention. */#define CPU_DEVIATION (smp_processor_id() << 12)#define TICK_SIZE tickstatic ext_int_info_t ext_int_info_cc;static ext_int_info_t ext_int_etr_cc;static u64 init_timer_cc;static u64 jiffies_timer_cc;static u64 xtime_cc;/* * Scheduler clock - returns current time in nanosec units. */unsigned long long sched_clock(void){ return ((get_clock() - jiffies_timer_cc) * 125) >> 9;}/* * Monotonic_clock - returns # of nanoseconds passed since time_init() */unsigned long long monotonic_clock(void){ return sched_clock();}EXPORT_SYMBOL(monotonic_clock);void tod_to_timeval(__u64 todval, struct timespec *xtime){ unsigned long long sec; sec = todval >> 12; do_div(sec, 1000000); xtime->tv_sec = sec; todval -= (sec * 1000000) << 12; xtime->tv_nsec = ((todval * 1000) >> 12);}#ifdef CONFIG_PROFILING#define s390_do_profile() profile_tick(CPU_PROFILING)#else#define s390_do_profile() do { ; } while(0)#endif /* CONFIG_PROFILING *//* * Advance the per cpu tick counter up to the time given with the * "time" argument. The per cpu update consists of accounting * the virtual cpu time, calling update_process_times and calling * the profiling hook. If xtime is before time it is advanced as well. */void account_ticks(u64 time){ __u32 ticks; __u64 tmp; /* Calculate how many ticks have passed. */ if (time < S390_lowcore.jiffy_timer) return; tmp = time - S390_lowcore.jiffy_timer; if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */ ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1; S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY * (__u64) ticks; } else if (tmp >= CLK_TICKS_PER_JIFFY) { ticks = 2; S390_lowcore.jiffy_timer += 2*CLK_TICKS_PER_JIFFY; } else { ticks = 1; S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY; }#ifdef CONFIG_SMP /* * Do not rely on the boot cpu to do the calls to do_timer. * Spread it over all cpus instead. */ write_seqlock(&xtime_lock); if (S390_lowcore.jiffy_timer > xtime_cc) { __u32 xticks; tmp = S390_lowcore.jiffy_timer - xtime_cc; if (tmp >= 2*CLK_TICKS_PER_JIFFY) { xticks = __div(tmp, CLK_TICKS_PER_JIFFY); xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY; } else { xticks = 1; xtime_cc += CLK_TICKS_PER_JIFFY; } do_timer(xticks); } write_sequnlock(&xtime_lock);#else do_timer(ticks);#endif while (ticks--) update_process_times(user_mode(get_irq_regs())); s390_do_profile();}#ifdef CONFIG_NO_IDLE_HZ#ifdef CONFIG_NO_IDLE_HZ_INITint sysctl_hz_timer = 0;#elseint sysctl_hz_timer = 1;#endif/* * Stop the HZ tick on the current CPU. * Only cpu_idle may call this function. */static void stop_hz_timer(void){ unsigned long flags; unsigned long seq, next; __u64 timer, todval; int cpu = smp_processor_id(); if (sysctl_hz_timer != 0) return; cpu_set(cpu, nohz_cpu_mask); /* * Leave the clock comparator set up for the next timer * tick if either rcu or a softirq is pending. */ if (rcu_needs_cpu(cpu) || local_softirq_pending()) { cpu_clear(cpu, nohz_cpu_mask); return; } /* * This cpu is going really idle. Set up the clock comparator * for the next event. */ next = next_timer_interrupt(); do { seq = read_seqbegin_irqsave(&xtime_lock, flags); timer = ((__u64) next) - ((__u64) jiffies) + jiffies_64; } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); todval = -1ULL; /* Be careful about overflows. */ if (timer < (-1ULL / CLK_TICKS_PER_JIFFY)) { timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY; if (timer >= jiffies_timer_cc) todval = timer; } set_clock_comparator(todval);}/* * Start the HZ tick on the current CPU. * Only cpu_idle may call this function. */static void start_hz_timer(void){ BUG_ON(!in_interrupt()); if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) return; account_ticks(get_clock()); set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION); cpu_clear(smp_processor_id(), nohz_cpu_mask);}static int nohz_idle_notify(struct notifier_block *self, unsigned long action, void *hcpu){ switch (action) { case S390_CPU_IDLE: stop_hz_timer(); break; case S390_CPU_NOT_IDLE: start_hz_timer(); break; } return NOTIFY_OK;}static struct notifier_block nohz_idle_nb = { .notifier_call = nohz_idle_notify,};static void __init nohz_init(void){ if (register_idle_notifier(&nohz_idle_nb)) panic("Couldn't register idle notifier");}#endif/* * Set up per cpu jiffy timer and set the clock comparator. */static void setup_jiffy_timer(void){ /* Set up clock comparator to next jiffy. */ S390_lowcore.jiffy_timer = jiffies_timer_cc + (jiffies_64 + 1) * CLK_TICKS_PER_JIFFY; set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);}/* * Set up lowcore and control register of the current cpu to * enable TOD clock and clock comparator interrupts. */void init_cpu_timer(void){ setup_jiffy_timer(); /* Enable clock comparator timer interrupt. */ __ctl_set_bit(0,11); /* Always allow ETR external interrupts, even without an ETR. */ __ctl_set_bit(0, 4);}static void clock_comparator_interrupt(__u16 code){ /* set clock comparator for next tick */ set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);}static void etr_reset(void);static void etr_ext_handler(__u16);/* * Get the TOD clock running. */static u64 __init reset_tod_clock(void){ u64 time; etr_reset(); if (store_clock(&time) == 0) return time; /* TOD clock not running. Set the clock to Unix Epoch. */ if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0) panic("TOD clock not operational."); return TOD_UNIX_EPOCH;}static cycle_t read_tod_clock(void){ return get_clock();}static struct clocksource clocksource_tod = { .name = "tod", .rating = 400, .read = read_tod_clock, .mask = -1ULL, .mult = 1000, .shift = 12, .flags = CLOCK_SOURCE_IS_CONTINUOUS,};/* * Initialize the TOD clock and the CPU timer of * the boot cpu. */void __init time_init(void){ init_timer_cc = reset_tod_clock(); xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY; jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY; /* set xtime */ tod_to_timeval(init_timer_cc - TOD_UNIX_EPOCH, &xtime); set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); /* request the clock comparator external interrupt */ if (register_early_external_interrupt(0x1004, clock_comparator_interrupt, &ext_int_info_cc) != 0) panic("Couldn't request external interrupt 0x1004"); if (clocksource_register(&clocksource_tod) != 0) panic("Could not register TOD clock source"); /* request the etr external interrupt */ if (register_early_external_interrupt(0x1406, etr_ext_handler, &ext_int_etr_cc) != 0) panic("Couldn't request external interrupt 0x1406"); /* Enable TOD clock interrupts on the boot cpu. */ init_cpu_timer();#ifdef CONFIG_NO_IDLE_HZ nohz_init();#endif#ifdef CONFIG_VIRT_TIMER vtime_init();#endif}/* * External Time Reference (ETR) code. */static int etr_port0_online;static int etr_port1_online;static int __init early_parse_etr(char *p){ if (strncmp(p, "off", 3) == 0) etr_port0_online = etr_port1_online = 0; else if (strncmp(p, "port0", 5) == 0) etr_port0_online = 1; else if (strncmp(p, "port1", 5) == 0) etr_port1_online = 1; else if (strncmp(p, "on", 2) == 0) etr_port0_online = etr_port1_online = 1; return 0;}early_param("etr", early_parse_etr);enum etr_event { ETR_EVENT_PORT0_CHANGE, ETR_EVENT_PORT1_CHANGE, ETR_EVENT_PORT_ALERT, ETR_EVENT_SYNC_CHECK, ETR_EVENT_SWITCH_LOCAL, ETR_EVENT_UPDATE,};enum etr_flags { ETR_FLAG_ENOSYS, ETR_FLAG_EACCES, ETR_FLAG_STEAI,};/* * Valid bit combinations of the eacr register are (x = don't care): * e0 e1 dp p0 p1 ea es sl * 0 0 x 0 0 0 0 0 initial, disabled state * 0 0 x 0 1 1 0 0 port 1 online * 0 0 x 1 0 1 0 0 port 0 online * 0 0 x 1 1 1 0 0 both ports online * 0 1 x 0 1 1 0 0 port 1 online and usable, ETR or PPS mode * 0 1 x 0 1 1 0 1 port 1 online, usable and ETR mode * 0 1 x 0 1 1 1 0 port 1 online, usable, PPS mode, in-sync * 0 1 x 0 1 1 1 1 port 1 online, usable, ETR mode, in-sync * 0 1 x 1 1 1 0 0 both ports online, port 1 usable * 0 1 x 1 1 1 1 0 both ports online, port 1 usable, PPS mode, in-sync * 0 1 x 1 1 1 1 1 both ports online, port 1 usable, ETR mode, in-sync * 1 0 x 1 0 1 0 0 port 0 online and usable, ETR or PPS mode * 1 0 x 1 0 1 0 1 port 0 online, usable and ETR mode * 1 0 x 1 0 1 1 0 port 0 online, usable, PPS mode, in-sync * 1 0 x 1 0 1 1 1 port 0 online, usable, ETR mode, in-sync * 1 0 x 1 1 1 0 0 both ports online, port 0 usable * 1 0 x 1 1 1 1 0 both ports online, port 0 usable, PPS mode, in-sync * 1 0 x 1 1 1 1 1 both ports online, port 0 usable, ETR mode, in-sync * 1 1 x 1 1 1 1 0 both ports online & usable, ETR, in-sync * 1 1 x 1 1 1 1 1 both ports online & usable, ETR, in-sync */static struct etr_eacr etr_eacr;static u64 etr_tolec; /* time of last eacr update */static unsigned long etr_flags;static struct etr_aib etr_port0;static int etr_port0_uptodate;static struct etr_aib etr_port1;static int etr_port1_uptodate;static unsigned long etr_events;static struct timer_list etr_timer;static DEFINE_PER_CPU(atomic_t, etr_sync_word);static void etr_timeout(unsigned long dummy);static void etr_work_fn(struct work_struct *work);static DECLARE_WORK(etr_work, etr_work_fn);/* * The etr get_clock function. It will write the current clock value * to the clock pointer and return 0 if the clock is in sync with the * external time source. If the clock mode is local it will return * -ENOSYS and -EAGAIN if the clock is not in sync with the external * reference. This function is what ETR is all about.. */int get_sync_clock(unsigned long long *clock){ atomic_t *sw_ptr; unsigned int sw0, sw1; sw_ptr = &get_cpu_var(etr_sync_word); sw0 = atomic_read(sw_ptr); *clock = get_clock(); sw1 = atomic_read(sw_ptr); put_cpu_var(etr_sync_sync); if (sw0 == sw1 && (sw0 & 0x80000000U)) /* Success: time is in sync. */ return 0; if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) return -ENOSYS; if (test_bit(ETR_FLAG_EACCES, &etr_flags)) return -EACCES; return -EAGAIN;}EXPORT_SYMBOL(get_sync_clock);/* * Make get_sync_clock return -EAGAIN. */static void etr_disable_sync_clock(void *dummy){ atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word); /* * Clear the in-sync bit 2^31. All get_sync_clock calls will * fail until the sync bit is turned back on. In addition * increase the "sequence" counter to avoid the race of an * etr event and the complete recovery against get_sync_clock. */ atomic_clear_mask(0x80000000, sw_ptr);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -