📄 hrtimer.c
字号:
/* * linux/kernel/hrtimer.c * * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner * * High-resolution kernel timers * * In contrast to the low-resolution timeout API implemented in * kernel/timer.c, hrtimers provide finer resolution and accuracy * depending on system configuration and capabilities. * * These timers are currently used for: * - itimers * - POSIX timers * - nanosleep * - precise in-kernel timing * * Started by: Thomas Gleixner and Ingo Molnar * * Credits: * based on kernel/timer.c * * Help, testing, suggestions, bugfixes, improvements were * provided by: * * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel * et. al. * * For licencing details see kernel-base/COPYING */#include <linux/cpu.h>#include <linux/irq.h>#include <linux/module.h>#include <linux/percpu.h>#include <linux/hrtimer.h>#include <linux/notifier.h>#include <linux/syscalls.h>#include <linux/kallsyms.h>#include <linux/interrupt.h>#include <linux/tick.h>#include <linux/seq_file.h>#include <linux/err.h>#include <asm/uaccess.h>/** * ktime_get - get the monotonic time in ktime_t format * * returns the time in ktime_t format */ktime_t ktime_get(void){ struct timespec now; ktime_get_ts(&now); return timespec_to_ktime(now);}EXPORT_SYMBOL_GPL(ktime_get);/** * ktime_get_real - get the real (wall-) time in ktime_t format * * returns the time in ktime_t format */ktime_t ktime_get_real(void){ struct timespec now; getnstimeofday(&now); return timespec_to_ktime(now);}EXPORT_SYMBOL_GPL(ktime_get_real);/* * The timer bases: * * Note: If we want to add new timer bases, we have to skip the two * clock ids captured by the cpu-timers. We do this by holding empty * entries rather than doing math adjustment of the clock ids. * This ensures that we capture erroneous accesses to these clock ids * rather than moving them into the range of valid clock id's. */DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) ={ .clock_base = { { .index = CLOCK_REALTIME, .get_time = &ktime_get_real, .resolution = KTIME_LOW_RES, }, { .index = CLOCK_MONOTONIC, .get_time = &ktime_get, .resolution = KTIME_LOW_RES, }, }};/** * ktime_get_ts - get the monotonic clock in timespec format * @ts: pointer to timespec variable * * The function calculates the monotonic clock from the realtime * clock and the wall_to_monotonic offset and stores the result * in normalized timespec format in the variable pointed to by @ts. */void ktime_get_ts(struct timespec *ts){ struct timespec tomono; unsigned long seq; do { seq = read_seqbegin(&xtime_lock); getnstimeofday(ts); tomono = wall_to_monotonic; } while (read_seqretry(&xtime_lock, seq)); set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, ts->tv_nsec + tomono.tv_nsec);}EXPORT_SYMBOL_GPL(ktime_get_ts);/* * Get the coarse grained time at the softirq based on xtime and * wall_to_monotonic. */static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base){ ktime_t xtim, tomono; struct timespec xts, tom; unsigned long seq; do { seq = read_seqbegin(&xtime_lock);#ifdef CONFIG_NO_HZ getnstimeofday(&xts);#else xts = xtime;#endif tom = wall_to_monotonic; } while (read_seqretry(&xtime_lock, seq)); xtim = timespec_to_ktime(xts); tomono = timespec_to_ktime(tom); base->clock_base[CLOCK_REALTIME].softirq_time = xtim; base->clock_base[CLOCK_MONOTONIC].softirq_time = ktime_add(xtim, tomono);}/* * Helper function to check, whether the timer is running the callback * function */static inline int hrtimer_callback_running(struct hrtimer *timer){ return timer->state & HRTIMER_STATE_CALLBACK;}/* * Functions and macros which are different for UP/SMP systems are kept in a * single place */#ifdef CONFIG_SMP/* * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock * means that all timers which are tied to this base via timer->base are * locked, and the base itself is locked too. * * So __run_timers/migrate_timers can safely modify all timers which could * be found on the lists/queues. * * When the timer's base is locked, and the timer removed from list, it is * possible to set timer->base = NULL and drop the lock: the timer remains * locked. */staticstruct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags){ struct hrtimer_clock_base *base; for (;;) { base = timer->base; if (likely(base != NULL)) { spin_lock_irqsave(&base->cpu_base->lock, *flags); if (likely(base == timer->base)) return base; /* The timer has migrated to another CPU: */ spin_unlock_irqrestore(&base->cpu_base->lock, *flags); } cpu_relax(); }}/* * Switch the timer base to the current CPU when possible. */static inline struct hrtimer_clock_base *switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base){ struct hrtimer_clock_base *new_base; struct hrtimer_cpu_base *new_cpu_base; new_cpu_base = &__get_cpu_var(hrtimer_bases); new_base = &new_cpu_base->clock_base[base->index]; if (base != new_base) { /* * We are trying to schedule the timer on the local CPU. * However we can't change timer's base while it is running, * so we keep it on the same CPU. No hassle vs. reprogramming * the event source in the high resolution case. The softirq * code will take care of this when the timer function has * completed. There is no conflict as we hold the lock until * the timer is enqueued. */ if (unlikely(hrtimer_callback_running(timer))) return base; /* See the comment in lock_timer_base() */ timer->base = NULL; spin_unlock(&base->cpu_base->lock); spin_lock(&new_base->cpu_base->lock); timer->base = new_base; } return new_base;}#else /* CONFIG_SMP */static inline struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags){ struct hrtimer_clock_base *base = timer->base; spin_lock_irqsave(&base->cpu_base->lock, *flags); return base;}# define switch_hrtimer_base(t, b) (b)#endif /* !CONFIG_SMP *//* * Functions for the union type storage format of ktime_t which are * too large for inlining: */#if BITS_PER_LONG < 64# ifndef CONFIG_KTIME_SCALAR/** * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable * @kt: addend * @nsec: the scalar nsec value to add * * Returns the sum of kt and nsec in ktime_t format */ktime_t ktime_add_ns(const ktime_t kt, u64 nsec){ ktime_t tmp; if (likely(nsec < NSEC_PER_SEC)) { tmp.tv64 = nsec; } else { unsigned long rem = do_div(nsec, NSEC_PER_SEC); tmp = ktime_set((long)nsec, rem); } return ktime_add(kt, tmp);}EXPORT_SYMBOL_GPL(ktime_add_ns);# endif /* !CONFIG_KTIME_SCALAR *//* * Divide a ktime value by a nanosecond value */unsigned long ktime_divns(const ktime_t kt, s64 div){ u64 dclc, inc, dns; int sft = 0; dclc = dns = ktime_to_ns(kt); inc = div; /* Make sure the divisor is less than 2^32: */ while (div >> 32) { sft++; div >>= 1; } dclc >>= sft; do_div(dclc, (unsigned long) div); return (unsigned long) dclc;}#endif /* BITS_PER_LONG >= 64 *//* High resolution timer related functions */#ifdef CONFIG_HIGH_RES_TIMERS/* * High resolution timer enabled ? */static int hrtimer_hres_enabled __read_mostly = 1;/* * Enable / Disable high resolution mode */static int __init setup_hrtimer_hres(char *str){ if (!strcmp(str, "off")) hrtimer_hres_enabled = 0; else if (!strcmp(str, "on")) hrtimer_hres_enabled = 1; else return 0; return 1;}__setup("highres=", setup_hrtimer_hres);/* * hrtimer_high_res_enabled - query, if the highres mode is enabled */static inline int hrtimer_is_hres_enabled(void){ return hrtimer_hres_enabled;}/* * Is the high resolution mode active ? */static inline int hrtimer_hres_active(void){ return __get_cpu_var(hrtimer_bases).hres_active;}/* * Reprogram the event source with checking both queues for the * next event * Called with interrupts disabled and base->lock held */static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base){ int i; struct hrtimer_clock_base *base = cpu_base->clock_base; ktime_t expires; cpu_base->expires_next.tv64 = KTIME_MAX; for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { struct hrtimer *timer; if (!base->first) continue; timer = rb_entry(base->first, struct hrtimer, node); expires = ktime_sub(timer->expires, base->offset); if (expires.tv64 < cpu_base->expires_next.tv64) cpu_base->expires_next = expires; } if (cpu_base->expires_next.tv64 != KTIME_MAX) tick_program_event(cpu_base->expires_next, 1);}/* * Shared reprogramming for clock_realtime and clock_monotonic * * When a timer is enqueued and expires earlier than the already enqueued * timers, we have to check, whether it expires earlier than the timer for * which the clock event device was armed. * * Called with interrupts disabled and base->cpu_base.lock held */static int hrtimer_reprogram(struct hrtimer *timer, struct hrtimer_clock_base *base){ ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next; ktime_t expires = ktime_sub(timer->expires, base->offset); int res; /* * When the callback is running, we do not reprogram the clock event * device. The timer callback is either running on a different CPU or * the callback is executed in the hrtimer_interupt context. The * reprogramming is handled either by the softirq, which called the * callback or at the end of the hrtimer_interrupt. */ if (hrtimer_callback_running(timer)) return 0; if (expires.tv64 >= expires_next->tv64) return 0; /* * Clockevents returns -ETIME, when the event was in the past. */ res = tick_program_event(expires, 0); if (!IS_ERR_VALUE(res)) *expires_next = expires; return res;}/* * Retrigger next event is called after clock was set * * Called with interrupts disabled via on_each_cpu() */static void retrigger_next_event(void *arg){ struct hrtimer_cpu_base *base; struct timespec realtime_offset; unsigned long seq; if (!hrtimer_hres_active()) return; do { seq = read_seqbegin(&xtime_lock); set_normalized_timespec(&realtime_offset, -wall_to_monotonic.tv_sec, -wall_to_monotonic.tv_nsec); } while (read_seqretry(&xtime_lock, seq)); base = &__get_cpu_var(hrtimer_bases); /* Adjust CLOCK_REALTIME offset */ spin_lock(&base->lock); base->clock_base[CLOCK_REALTIME].offset = timespec_to_ktime(realtime_offset); hrtimer_force_reprogram(base); spin_unlock(&base->lock);}/* * Clock realtime was set * * Change the offset of the realtime clock vs. the monotonic * clock. * * We might have to reprogram the high resolution timer interrupt. On * SMP we call the architecture specific code to retrigger _all_ high * resolution timer interrupts. On UP we just disable interrupts and * call the high resolution interrupt code. */void clock_was_set(void){ /* Retrigger the CPU local events everywhere */ on_each_cpu(retrigger_next_event, NULL, 0, 1);}/* * During resume we might have to reprogram the high resolution timer * interrupt (on the local CPU): */void hres_timers_resume(void){ WARN_ON_ONCE(num_online_cpus() > 1); /* Retrigger the CPU local events: */ retrigger_next_event(NULL);}/* * Check, whether the timer is on the callback pending list */static inline int hrtimer_cb_pending(const struct hrtimer *timer){ return timer->state & HRTIMER_STATE_PENDING;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -