📄 hrtimer.c
字号:
/* * Remove a timer from the callback pending list */static inline void hrtimer_remove_cb_pending(struct hrtimer *timer){ list_del_init(&timer->cb_entry);}/* * Initialize the high resolution related parts of cpu_base */static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base){ base->expires_next.tv64 = KTIME_MAX; base->hres_active = 0; INIT_LIST_HEAD(&base->cb_pending);}/* * Initialize the high resolution related parts of a hrtimer */static inline void hrtimer_init_timer_hres(struct hrtimer *timer){ INIT_LIST_HEAD(&timer->cb_entry);}/* * When High resolution timers are active, try to reprogram. Note, that in case * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry * check happens. The timer gets enqueued into the rbtree. The reprogramming * and expiry check is done in the hrtimer_interrupt or in the softirq. */static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, struct hrtimer_clock_base *base){ if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { /* Timer is expired, act upon the callback mode */ switch(timer->cb_mode) { case HRTIMER_CB_IRQSAFE_NO_RESTART: /* * We can call the callback from here. No restart * happens, so no danger of recursion */ BUG_ON(timer->function(timer) != HRTIMER_NORESTART); return 1; case HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: /* * This is solely for the sched tick emulation with * dynamic tick support to ensure that we do not * restart the tick right on the edge and end up with * the tick timer in the softirq ! The calling site * takes care of this. */ return 1; case HRTIMER_CB_IRQSAFE: case HRTIMER_CB_SOFTIRQ: /* * Move everything else into the softirq pending list ! */ list_add_tail(&timer->cb_entry, &base->cpu_base->cb_pending); timer->state = HRTIMER_STATE_PENDING; raise_softirq(HRTIMER_SOFTIRQ); return 1; default: BUG(); } } return 0;}/* * Switch to high resolution mode */static int hrtimer_switch_to_hres(void){ struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); unsigned long flags; if (base->hres_active) return 1; local_irq_save(flags); if (tick_init_highres()) { local_irq_restore(flags); return 0; } base->hres_active = 1; base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES; base->clock_base[CLOCK_MONOTONIC].resolution = KTIME_HIGH_RES; tick_setup_sched_timer(); /* "Retrigger" the interrupt to get things going */ retrigger_next_event(NULL); local_irq_restore(flags); printk(KERN_INFO "Switched to high resolution mode on CPU %d\n", smp_processor_id()); return 1;}#elsestatic inline int hrtimer_hres_active(void) { return 0; }static inline int hrtimer_is_hres_enabled(void) { return 0; }static inline int hrtimer_switch_to_hres(void) { return 0; }static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { }static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, struct hrtimer_clock_base *base){ return 0;}static inline int hrtimer_cb_pending(struct hrtimer *timer) { return 0; }static inline void hrtimer_remove_cb_pending(struct hrtimer *timer) { }static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { }#endif /* CONFIG_HIGH_RES_TIMERS */#ifdef CONFIG_TIMER_STATSvoid __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr){ if (timer->start_site) return; timer->start_site = addr; memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); timer->start_pid = current->pid;}#endif/* * Counterpart to lock_timer_base above: */static inlinevoid unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags){ spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);}/** * hrtimer_forward - forward the timer expiry * @timer: hrtimer to forward * @now: forward past this time * @interval: the interval to forward * * Forward the timer expiry so it will expire in the future. * Returns the number of overruns. */unsigned longhrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval){ unsigned long orun = 1; ktime_t delta; delta = ktime_sub(now, timer->expires); if (delta.tv64 < 0) return 0; if (interval.tv64 < timer->base->resolution.tv64) interval.tv64 = timer->base->resolution.tv64; if (unlikely(delta.tv64 >= interval.tv64)) { s64 incr = ktime_to_ns(interval); orun = ktime_divns(delta, incr); timer->expires = ktime_add_ns(timer->expires, incr * orun); if (timer->expires.tv64 > now.tv64) return orun; /* * This (and the ktime_add() below) is the * correction for exact: */ orun++; } timer->expires = ktime_add(timer->expires, interval); /* * Make sure, that the result did not wrap with a very large * interval. */ if (timer->expires.tv64 < 0) timer->expires = ktime_set(KTIME_SEC_MAX, 0); return orun;}EXPORT_SYMBOL_GPL(hrtimer_forward);/* * enqueue_hrtimer - internal function to (re)start a timer * * The timer is inserted in expiry order. Insertion into the * red black tree is O(log(n)). Must hold the base lock. */static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, int reprogram){ struct rb_node **link = &base->active.rb_node; struct rb_node *parent = NULL; struct hrtimer *entry; /* * Find the right place in the rbtree: */ while (*link) { parent = *link; entry = rb_entry(parent, struct hrtimer, node); /* * We dont care about collisions. Nodes with * the same expiry time stay together. */ if (timer->expires.tv64 < entry->expires.tv64) link = &(*link)->rb_left; else link = &(*link)->rb_right; } /* * Insert the timer to the rbtree and check whether it * replaces the first pending timer */ if (!base->first || timer->expires.tv64 < rb_entry(base->first, struct hrtimer, node)->expires.tv64) { /* * Reprogram the clock event device. When the timer is already * expired hrtimer_enqueue_reprogram has either called the * callback or added it to the pending list and raised the * softirq. * * This is a NOP for !HIGHRES */ if (reprogram && hrtimer_enqueue_reprogram(timer, base)) return; base->first = &timer->node; } rb_link_node(&timer->node, parent, link); rb_insert_color(&timer->node, &base->active); /* * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the * state of a possibly running callback. */ timer->state |= HRTIMER_STATE_ENQUEUED;}/* * __remove_hrtimer - internal function to remove a timer * * Caller must hold the base lock. * * High resolution timer mode reprograms the clock event device when the * timer is the one which expires next. The caller can disable this by setting * reprogram to zero. This is useful, when the context does a reprogramming * anyway (e.g. timer interrupt) */static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, unsigned long newstate, int reprogram){ /* High res. callback list. NOP for !HIGHRES */ if (hrtimer_cb_pending(timer)) hrtimer_remove_cb_pending(timer); else { /* * Remove the timer from the rbtree and replace the * first entry pointer if necessary. */ if (base->first == &timer->node) { base->first = rb_next(&timer->node); /* Reprogram the clock event device. if enabled */ if (reprogram && hrtimer_hres_active()) hrtimer_force_reprogram(base->cpu_base); } rb_erase(&timer->node, &base->active); } timer->state = newstate;}/* * remove hrtimer, called with base lock held */static inline intremove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base){ if (hrtimer_is_queued(timer)) { int reprogram; /* * Remove the timer and force reprogramming when high * resolution mode is active and the timer is on the current * CPU. If we remove a timer on another CPU, reprogramming is * skipped. The interrupt event on this CPU is fired and * reprogramming happens in the interrupt handler. This is a * rare case and less expensive than a smp call. */ timer_stats_hrtimer_clear_start_info(timer); reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, reprogram); return 1; } return 0;}/** * hrtimer_start - (re)start an relative timer on the current CPU * @timer: the timer to be added * @tim: expiry time * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) * * Returns: * 0 on success * 1 when the timer was active */inthrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode){ struct hrtimer_clock_base *base, *new_base; unsigned long flags; int ret; base = lock_hrtimer_base(timer, &flags); /* Remove an active timer from the queue: */ ret = remove_hrtimer(timer, base); /* Switch the timer base, if necessary: */ new_base = switch_hrtimer_base(timer, base); if (mode == HRTIMER_MODE_REL) { tim = ktime_add(tim, new_base->get_time()); /* * CONFIG_TIME_LOW_RES is a temporary way for architectures * to signal that they simply return xtime in * do_gettimeoffset(). In this case we want to round up by * resolution when starting a relative timer, to avoid short * timeouts. This will go away with the GTOD framework. */#ifdef CONFIG_TIME_LOW_RES tim = ktime_add(tim, base->resolution);#endif } timer->expires = tim; timer_stats_hrtimer_set_start_info(timer); /* * Only allow reprogramming if the new base is on this CPU. * (it might still be on another CPU if the timer was pending) */ enqueue_hrtimer(timer, new_base, new_base->cpu_base == &__get_cpu_var(hrtimer_bases)); unlock_hrtimer_base(timer, &flags); return ret;}EXPORT_SYMBOL_GPL(hrtimer_start);/** * hrtimer_try_to_cancel - try to deactivate a timer * @timer: hrtimer to stop * * Returns: * 0 when the timer was not active * 1 when the timer was active * -1 when the timer is currently excuting the callback function and * cannot be stopped */int hrtimer_try_to_cancel(struct hrtimer *timer){ struct hrtimer_clock_base *base; unsigned long flags; int ret = -1; base = lock_hrtimer_base(timer, &flags); if (!hrtimer_callback_running(timer)) ret = remove_hrtimer(timer, base); unlock_hrtimer_base(timer, &flags); return ret;}EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);/** * hrtimer_cancel - cancel a timer and wait for the handler to finish. * @timer: the timer to be cancelled * * Returns: * 0 when the timer was not active * 1 when the timer was active */int hrtimer_cancel(struct hrtimer *timer){ for (;;) { int ret = hrtimer_try_to_cancel(timer); if (ret >= 0) return ret; cpu_relax(); }}EXPORT_SYMBOL_GPL(hrtimer_cancel);/** * hrtimer_get_remaining - get remaining time for the timer * @timer: the timer to read */ktime_t hrtimer_get_remaining(const struct hrtimer *timer){ struct hrtimer_clock_base *base; unsigned long flags; ktime_t rem; base = lock_hrtimer_base(timer, &flags); rem = ktime_sub(timer->expires, base->get_time()); unlock_hrtimer_base(timer, &flags); return rem;}EXPORT_SYMBOL_GPL(hrtimer_get_remaining);#if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ)/** * hrtimer_get_next_event - get the time until next expiry event * * Returns the delta to the next expiry event or KTIME_MAX if no timer * is pending. */ktime_t hrtimer_get_next_event(void){ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); struct hrtimer_clock_base *base = cpu_base->clock_base; ktime_t delta, mindelta = { .tv64 = KTIME_MAX }; unsigned long flags; int i; spin_lock_irqsave(&cpu_base->lock, flags); if (!hrtimer_hres_active()) { for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { struct hrtimer *timer; if (!base->first) continue; timer = rb_entry(base->first, struct hrtimer, node); delta.tv64 = timer->expires.tv64; delta = ktime_sub(delta, base->get_time()); if (delta.tv64 < mindelta.tv64) mindelta.tv64 = delta.tv64; } } spin_unlock_irqrestore(&cpu_base->lock, flags); if (mindelta.tv64 < 0) mindelta.tv64 = 0; return mindelta;}#endif/** * hrtimer_init - initialize a timer to the given clock * @timer: the timer to be initialized * @clock_id: the clock to be used * @mode: timer mode abs/rel */void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, enum hrtimer_mode mode){ struct hrtimer_cpu_base *cpu_base; memset(timer, 0, sizeof(struct hrtimer)); cpu_base = &__raw_get_cpu_var(hrtimer_bases);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -