📄 hrtimer.c
字号:
{ struct hrtimer_cpu_base *cpu_base; cpu_base = &__raw_get_cpu_var(hrtimer_bases); *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution); return 0;}EXPORT_SYMBOL_GPL(hrtimer_get_res);static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base){ spin_lock_irq(&cpu_base->lock); while (!list_empty(&cpu_base->cb_pending)) { enum hrtimer_restart (*fn)(struct hrtimer *); struct hrtimer *timer; int restart; timer = list_entry(cpu_base->cb_pending.next, struct hrtimer, cb_entry); debug_hrtimer_deactivate(timer); timer_stats_account_hrtimer(timer); fn = timer->function; __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0); spin_unlock_irq(&cpu_base->lock); restart = fn(timer); spin_lock_irq(&cpu_base->lock); timer->state &= ~HRTIMER_STATE_CALLBACK; if (restart == HRTIMER_RESTART) { BUG_ON(hrtimer_active(timer)); /* * Enqueue the timer, allow reprogramming of the event * device */ enqueue_hrtimer(timer, timer->base, 1); } else if (hrtimer_active(timer)) { /* * If the timer was rearmed on another CPU, reprogram * the event device. */ struct hrtimer_clock_base *base = timer->base; if (base->first == &timer->node && hrtimer_reprogram(timer, base)) { /* * Timer is expired. Thus move it from tree to * pending list again. */ __remove_hrtimer(timer, base, HRTIMER_STATE_PENDING, 0); list_add_tail(&timer->cb_entry, &base->cpu_base->cb_pending); } } } spin_unlock_irq(&cpu_base->lock);}static void __run_hrtimer(struct hrtimer *timer){ struct hrtimer_clock_base *base = timer->base; struct hrtimer_cpu_base *cpu_base = base->cpu_base; enum hrtimer_restart (*fn)(struct hrtimer *); int restart; debug_hrtimer_deactivate(timer); __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); timer_stats_account_hrtimer(timer); fn = timer->function; if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU || timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) { /* * Used for scheduler timers, avoid lock inversion with * rq->lock and tasklist_lock. * * These timers are required to deal with enqueue expiry * themselves and are not allowed to migrate. */ spin_unlock(&cpu_base->lock); restart = fn(timer); spin_lock(&cpu_base->lock); } else restart = fn(timer); /* * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid * reprogramming of the event hardware. This happens at the end of this * function anyway. */ if (restart != HRTIMER_NORESTART) { BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); enqueue_hrtimer(timer, base, 0); } timer->state &= ~HRTIMER_STATE_CALLBACK;}#ifdef CONFIG_HIGH_RES_TIMERS/* * High resolution timer interrupt * Called with interrupts disabled */void hrtimer_interrupt(struct clock_event_device *dev){ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); struct hrtimer_clock_base *base; ktime_t expires_next, now; int i, raise = 0; BUG_ON(!cpu_base->hres_active); cpu_base->nr_events++; dev->next_event.tv64 = KTIME_MAX; retry: now = ktime_get(); expires_next.tv64 = KTIME_MAX; base = cpu_base->clock_base; for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { ktime_t basenow; struct rb_node *node; spin_lock(&cpu_base->lock); basenow = ktime_add(now, base->offset); while ((node = base->first)) { struct hrtimer *timer; timer = rb_entry(node, struct hrtimer, node); if (basenow.tv64 < timer->expires.tv64) { ktime_t expires; expires = ktime_sub(timer->expires, base->offset); if (expires.tv64 < expires_next.tv64) expires_next = expires; break; } /* Move softirq callbacks to the pending list */ if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { __remove_hrtimer(timer, base, HRTIMER_STATE_PENDING, 0); list_add_tail(&timer->cb_entry, &base->cpu_base->cb_pending); raise = 1; continue; } __run_hrtimer(timer); } spin_unlock(&cpu_base->lock); base++; } cpu_base->expires_next = expires_next; /* Reprogramming necessary ? */ if (expires_next.tv64 != KTIME_MAX) { if (tick_program_event(expires_next, 0)) goto retry; } /* Raise softirq ? */ if (raise) raise_softirq(HRTIMER_SOFTIRQ);}static void run_hrtimer_softirq(struct softirq_action *h){ run_hrtimer_pending(&__get_cpu_var(hrtimer_bases));}#endif /* CONFIG_HIGH_RES_TIMERS *//* * Called from timer softirq every jiffy, expire hrtimers: * * For HRT its the fall back code to run the softirq in the timer * softirq context in case the hrtimer initialization failed or has * not been done yet. */void hrtimer_run_pending(void){ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); if (hrtimer_hres_active()) return; /* * This _is_ ugly: We have to check in the softirq context, * whether we can switch to highres and / or nohz mode. The * clocksource switch happens in the timer interrupt with * xtime_lock held. Notification from there only sets the * check bit in the tick_oneshot code, otherwise we might * deadlock vs. xtime_lock. */ if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) hrtimer_switch_to_hres(); run_hrtimer_pending(cpu_base);}/* * Called from hardirq context every jiffy */void hrtimer_run_queues(void){ struct rb_node *node; struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); struct hrtimer_clock_base *base; int index, gettime = 1; if (hrtimer_hres_active()) return; for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { base = &cpu_base->clock_base[index]; if (!base->first) continue; if (base->get_softirq_time) base->softirq_time = base->get_softirq_time(); else if (gettime) { hrtimer_get_softirq_time(cpu_base); gettime = 0; } spin_lock(&cpu_base->lock); while ((node = base->first)) { struct hrtimer *timer; timer = rb_entry(node, struct hrtimer, node); if (base->softirq_time.tv64 <= timer->expires.tv64) break; if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { __remove_hrtimer(timer, base, HRTIMER_STATE_PENDING, 0); list_add_tail(&timer->cb_entry, &base->cpu_base->cb_pending); continue; } __run_hrtimer(timer); } spin_unlock(&cpu_base->lock); }}/* * Sleep related functions: */static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer){ struct hrtimer_sleeper *t = container_of(timer, struct hrtimer_sleeper, timer); struct task_struct *task = t->task; t->task = NULL; if (task) wake_up_process(task); return HRTIMER_NORESTART;}void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task){ sl->timer.function = hrtimer_wakeup; sl->task = task;#ifdef CONFIG_HIGH_RES_TIMERS sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;#endif}static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode){ hrtimer_init_sleeper(t, current); do { set_current_state(TASK_INTERRUPTIBLE); hrtimer_start(&t->timer, t->timer.expires, mode); if (!hrtimer_active(&t->timer)) t->task = NULL; if (likely(t->task)) schedule(); hrtimer_cancel(&t->timer); mode = HRTIMER_MODE_ABS; } while (t->task && !signal_pending(current)); __set_current_state(TASK_RUNNING); return t->task == NULL;}static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp){ struct timespec rmt; ktime_t rem; rem = ktime_sub(timer->expires, timer->base->get_time()); if (rem.tv64 <= 0) return 0; rmt = ktime_to_timespec(rem); if (copy_to_user(rmtp, &rmt, sizeof(*rmtp))) return -EFAULT; return 1;}long __sched hrtimer_nanosleep_restart(struct restart_block *restart){ struct hrtimer_sleeper t; struct timespec __user *rmtp; int ret = 0; hrtimer_init_on_stack(&t.timer, restart->nanosleep.index, HRTIMER_MODE_ABS); t.timer.expires.tv64 = restart->nanosleep.expires; if (do_nanosleep(&t, HRTIMER_MODE_ABS)) goto out; rmtp = restart->nanosleep.rmtp; if (rmtp) { ret = update_rmtp(&t.timer, rmtp); if (ret <= 0) goto out; } /* The other values in restart are already filled in */ ret = -ERESTART_RESTARTBLOCK;out: destroy_hrtimer_on_stack(&t.timer); return ret;}long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, const enum hrtimer_mode mode, const clockid_t clockid){ struct restart_block *restart; struct hrtimer_sleeper t; int ret = 0; hrtimer_init_on_stack(&t.timer, clockid, mode); t.timer.expires = timespec_to_ktime(*rqtp); if (do_nanosleep(&t, mode)) goto out; /* Absolute timers do not update the rmtp value and restart: */ if (mode == HRTIMER_MODE_ABS) { ret = -ERESTARTNOHAND; goto out; } if (rmtp) { ret = update_rmtp(&t.timer, rmtp); if (ret <= 0) goto out; } restart = ¤t_thread_info()->restart_block; restart->fn = hrtimer_nanosleep_restart; restart->nanosleep.index = t.timer.base->index; restart->nanosleep.rmtp = rmtp; restart->nanosleep.expires = t.timer.expires.tv64; ret = -ERESTART_RESTARTBLOCK;out: destroy_hrtimer_on_stack(&t.timer); return ret;}asmlinkage longsys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp){ struct timespec tu; if (copy_from_user(&tu, rqtp, sizeof(tu))) return -EFAULT; if (!timespec_valid(&tu)) return -EINVAL; return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);}/* * Functions related to boot-time initialization: */static void __cpuinit init_hrtimers_cpu(int cpu){ struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); int i; spin_lock_init(&cpu_base->lock); for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) cpu_base->clock_base[i].cpu_base = cpu_base; INIT_LIST_HEAD(&cpu_base->cb_pending); hrtimer_init_hres(cpu_base);}#ifdef CONFIG_HOTPLUG_CPUstatic int migrate_hrtimer_list(struct hrtimer_clock_base *old_base, struct hrtimer_clock_base *new_base, int dcpu){ struct hrtimer *timer; struct rb_node *node; int raise = 0; while ((node = rb_first(&old_base->active))) { timer = rb_entry(node, struct hrtimer, node); BUG_ON(hrtimer_callback_running(timer)); debug_hrtimer_deactivate(timer); /* * Should not happen. Per CPU timers should be * canceled _before_ the migration code is called */ if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) { __remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE, 0); WARN(1, "hrtimer (%p %p)active but cpu %d dead\n", timer, timer->function, dcpu); continue; } /* * Mark it as STATE_MIGRATE not INACTIVE otherwise the * timer could be seen as !active and just vanish away * under us on another CPU */ __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); timer->base = new_base; /* * Enqueue the timer. Allow reprogramming of the event device */ enqueue_hrtimer(timer, new_base, 1);#ifdef CONFIG_HIGH_RES_TIMERS /* * Happens with high res enabled when the timer was * already expired and the callback mode is * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The * enqueue code does not move them to the soft irq * pending list for performance/latency reasons, but * in the migration state, we need to do that * otherwise we end up with a stale timer. */ if (timer->state == HRTIMER_STATE_MIGRATE) { timer->state = HRTIMER_STATE_PENDING; list_add_tail(&timer->cb_entry, &new_base->cpu_base->cb_pending); raise = 1; }#endif /* Clear the migration state bit */ timer->state &= ~HRTIMER_STATE_MIGRATE; } return raise;}#ifdef CONFIG_HIGH_RES_TIMERSstatic int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base, struct hrtimer_cpu_base *new_base){ struct hrtimer *timer; int raise = 0; while (!list_empty(&old_base->cb_pending)) { timer = list_entry(old_base->cb_pending.next, struct hrtimer, cb_entry); __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0); timer->base = &new_base->clock_base[timer->base->index]; list_add_tail(&timer->cb_entry, &new_base->cb_pending); raise = 1; } return raise;}#elsestatic int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base, struct hrtimer_cpu_base *new_base){ return 0;}#endifstatic void migrate_hrtimers(int cpu){ struct hrtimer_cpu_base *old_base, *new_base; int i, raise = 0; BUG_ON(cpu_online(cpu)); old_base = &per_cpu(hrtimer_bases, cpu); new_base = &get_cpu_var(hrtimer_bases); tick_cancel_sched_timer(cpu); local_irq_disable(); spin_lock(&new_base->lock); spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { if (migrate_hrtimer_list(&old_base->clock_base[i], &new_base->clock_base[i], cpu)) raise = 1; } if (migrate_hrtimer_pending(old_base, new_base)) raise = 1; spin_unlock(&old_base->lock); spin_unlock(&new_base->lock); local_irq_enable(); put_cpu_var(hrtimer_bases); if (raise) hrtimer_raise_softirq();}#endif /* CONFIG_HOTPLUG_CPU */static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu){ unsigned int cpu = (long)hcpu; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: init_hrtimers_cpu(cpu); break;#ifdef CONFIG_HOTPLUG_CPU case CPU_DEAD: case CPU_DEAD_FROZEN: clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu); migrate_hrtimers(cpu); break;#endif default: break; } return NOTIFY_OK;}static struct notifier_block __cpuinitdata hrtimers_nb = { .notifier_call = hrtimer_cpu_notify,};void __init hrtimers_init(void){ hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, (void *)(long)smp_processor_id()); register_cpu_notifier(&hrtimers_nb);#ifdef CONFIG_HIGH_RES_TIMERS open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);#endif}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -