⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 hrtimer.c

📁 linux 2.6.19 kernel source code before patching
💻 C
📖 第 1 页 / 共 3 页
字号:
	if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)		clock_id = CLOCK_MONOTONIC;	timer->base = &cpu_base->clock_base[clock_id];	hrtimer_init_timer_hres(timer);#ifdef CONFIG_TIMER_STATS	timer->start_site = NULL;	timer->start_pid = -1;	memset(timer->start_comm, 0, TASK_COMM_LEN);#endif}EXPORT_SYMBOL_GPL(hrtimer_init);/** * hrtimer_get_res - get the timer resolution for a clock * @which_clock: which clock to query * @tp:		 pointer to timespec variable to store the resolution * * Store the resolution of the clock selected by @which_clock in the * variable pointed to by @tp. */int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp){	struct hrtimer_cpu_base *cpu_base;	cpu_base = &__raw_get_cpu_var(hrtimer_bases);	*tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution);	return 0;}EXPORT_SYMBOL_GPL(hrtimer_get_res);#ifdef CONFIG_HIGH_RES_TIMERS/* * High resolution timer interrupt * Called with interrupts disabled */void hrtimer_interrupt(struct clock_event_device *dev){	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);	struct hrtimer_clock_base *base;	ktime_t expires_next, now;	int i, raise = 0;	BUG_ON(!cpu_base->hres_active);	cpu_base->nr_events++;	dev->next_event.tv64 = KTIME_MAX; retry:	now = ktime_get();	expires_next.tv64 = KTIME_MAX;	base = cpu_base->clock_base;	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {		ktime_t basenow;		struct rb_node *node;		spin_lock(&cpu_base->lock);		basenow = ktime_add(now, base->offset);		while ((node = base->first)) {			struct hrtimer *timer;			timer = rb_entry(node, struct hrtimer, node);			if (basenow.tv64 < timer->expires.tv64) {				ktime_t expires;				expires = ktime_sub(timer->expires,						    base->offset);				if (expires.tv64 < expires_next.tv64)					expires_next = expires;				break;			}			/* Move softirq callbacks to the pending list */			if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {				__remove_hrtimer(timer, base,						 HRTIMER_STATE_PENDING, 0);				list_add_tail(&timer->cb_entry,					      &base->cpu_base->cb_pending);				raise = 1;				continue;			}			__remove_hrtimer(timer, base,					 HRTIMER_STATE_CALLBACK, 0);			timer_stats_account_hrtimer(timer);			/*			 * Note: We clear the CALLBACK bit after			 * enqueue_hrtimer to avoid reprogramming of			 * the event hardware. This happens at the end			 * of this function anyway.			 */			if (timer->function(timer) != HRTIMER_NORESTART) {				BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);				enqueue_hrtimer(timer, base, 0);			}			timer->state &= ~HRTIMER_STATE_CALLBACK;		}		spin_unlock(&cpu_base->lock);		base++;	}	cpu_base->expires_next = expires_next;	/* Reprogramming necessary ? */	if (expires_next.tv64 != KTIME_MAX) {		if (tick_program_event(expires_next, 0))			goto retry;	}	/* Raise softirq ? */	if (raise)		raise_softirq(HRTIMER_SOFTIRQ);}static void run_hrtimer_softirq(struct softirq_action *h){	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);	spin_lock_irq(&cpu_base->lock);	while (!list_empty(&cpu_base->cb_pending)) {		enum hrtimer_restart (*fn)(struct hrtimer *);		struct hrtimer *timer;		int restart;		timer = list_entry(cpu_base->cb_pending.next,				   struct hrtimer, cb_entry);		timer_stats_account_hrtimer(timer);		fn = timer->function;		__remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0);		spin_unlock_irq(&cpu_base->lock);		restart = fn(timer);		spin_lock_irq(&cpu_base->lock);		timer->state &= ~HRTIMER_STATE_CALLBACK;		if (restart == HRTIMER_RESTART) {			BUG_ON(hrtimer_active(timer));			/*			 * Enqueue the timer, allow reprogramming of the event			 * device			 */			enqueue_hrtimer(timer, timer->base, 1);		} else if (hrtimer_active(timer)) {			/*			 * If the timer was rearmed on another CPU, reprogram			 * the event device.			 */			if (timer->base->first == &timer->node)				hrtimer_reprogram(timer, timer->base);		}	}	spin_unlock_irq(&cpu_base->lock);}#endif	/* CONFIG_HIGH_RES_TIMERS *//* * Expire the per base hrtimer-queue: */static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base,				     int index){	struct rb_node *node;	struct hrtimer_clock_base *base = &cpu_base->clock_base[index];	if (!base->first)		return;	if (base->get_softirq_time)		base->softirq_time = base->get_softirq_time();	spin_lock_irq(&cpu_base->lock);	while ((node = base->first)) {		struct hrtimer *timer;		enum hrtimer_restart (*fn)(struct hrtimer *);		int restart;		timer = rb_entry(node, struct hrtimer, node);		if (base->softirq_time.tv64 <= timer->expires.tv64)			break;#ifdef CONFIG_HIGH_RES_TIMERS		WARN_ON_ONCE(timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ);#endif		timer_stats_account_hrtimer(timer);		fn = timer->function;		__remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);		spin_unlock_irq(&cpu_base->lock);		restart = fn(timer);		spin_lock_irq(&cpu_base->lock);		timer->state &= ~HRTIMER_STATE_CALLBACK;		if (restart != HRTIMER_NORESTART) {			BUG_ON(hrtimer_active(timer));			enqueue_hrtimer(timer, base, 0);		}	}	spin_unlock_irq(&cpu_base->lock);}/* * Called from timer softirq every jiffy, expire hrtimers: * * For HRT its the fall back code to run the softirq in the timer * softirq context in case the hrtimer initialization failed or has * not been done yet. */void hrtimer_run_queues(void){	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);	int i;	if (hrtimer_hres_active())		return;	/*	 * This _is_ ugly: We have to check in the softirq context,	 * whether we can switch to highres and / or nohz mode. The	 * clocksource switch happens in the timer interrupt with	 * xtime_lock held. Notification from there only sets the	 * check bit in the tick_oneshot code, otherwise we might	 * deadlock vs. xtime_lock.	 */	if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))		if (hrtimer_switch_to_hres())			return;	hrtimer_get_softirq_time(cpu_base);	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)		run_hrtimer_queue(cpu_base, i);}/* * Sleep related functions: */static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer){	struct hrtimer_sleeper *t =		container_of(timer, struct hrtimer_sleeper, timer);	struct task_struct *task = t->task;	t->task = NULL;	if (task)		wake_up_process(task);	return HRTIMER_NORESTART;}void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task){	sl->timer.function = hrtimer_wakeup;	sl->task = task;#ifdef CONFIG_HIGH_RES_TIMERS	sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_RESTART;#endif}static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode){	hrtimer_init_sleeper(t, current);	do {		set_current_state(TASK_INTERRUPTIBLE);		hrtimer_start(&t->timer, t->timer.expires, mode);		if (likely(t->task))			schedule();		hrtimer_cancel(&t->timer);		mode = HRTIMER_MODE_ABS;	} while (t->task && !signal_pending(current));	return t->task == NULL;}long __sched hrtimer_nanosleep_restart(struct restart_block *restart){	struct hrtimer_sleeper t;	struct timespec __user *rmtp;	struct timespec tu;	ktime_t time;	restart->fn = do_no_restart_syscall;	hrtimer_init(&t.timer, restart->arg0, HRTIMER_MODE_ABS);	t.timer.expires.tv64 = ((u64)restart->arg3 << 32) | (u64) restart->arg2;	if (do_nanosleep(&t, HRTIMER_MODE_ABS))		return 0;	rmtp = (struct timespec __user *) restart->arg1;	if (rmtp) {		time = ktime_sub(t.timer.expires, t.timer.base->get_time());		if (time.tv64 <= 0)			return 0;		tu = ktime_to_timespec(time);		if (copy_to_user(rmtp, &tu, sizeof(tu)))			return -EFAULT;	}	restart->fn = hrtimer_nanosleep_restart;	/* The other values in restart are already filled in */	return -ERESTART_RESTARTBLOCK;}long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,		       const enum hrtimer_mode mode, const clockid_t clockid){	struct restart_block *restart;	struct hrtimer_sleeper t;	struct timespec tu;	ktime_t rem;	hrtimer_init(&t.timer, clockid, mode);	t.timer.expires = timespec_to_ktime(*rqtp);	if (do_nanosleep(&t, mode))		return 0;	/* Absolute timers do not update the rmtp value and restart: */	if (mode == HRTIMER_MODE_ABS)		return -ERESTARTNOHAND;	if (rmtp) {		rem = ktime_sub(t.timer.expires, t.timer.base->get_time());		if (rem.tv64 <= 0)			return 0;		tu = ktime_to_timespec(rem);		if (copy_to_user(rmtp, &tu, sizeof(tu)))			return -EFAULT;	}	restart = &current_thread_info()->restart_block;	restart->fn = hrtimer_nanosleep_restart;	restart->arg0 = (unsigned long) t.timer.base->index;	restart->arg1 = (unsigned long) rmtp;	restart->arg2 = t.timer.expires.tv64 & 0xFFFFFFFF;	restart->arg3 = t.timer.expires.tv64 >> 32;	return -ERESTART_RESTARTBLOCK;}asmlinkage longsys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp){	struct timespec tu;	if (copy_from_user(&tu, rqtp, sizeof(tu)))		return -EFAULT;	if (!timespec_valid(&tu))		return -EINVAL;	return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);}/* * Functions related to boot-time initialization: */static void __devinit init_hrtimers_cpu(int cpu){	struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);	int i;	spin_lock_init(&cpu_base->lock);	lockdep_set_class(&cpu_base->lock, &cpu_base->lock_key);	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)		cpu_base->clock_base[i].cpu_base = cpu_base;	hrtimer_init_hres(cpu_base);}#ifdef CONFIG_HOTPLUG_CPUstatic void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,				struct hrtimer_clock_base *new_base){	struct hrtimer *timer;	struct rb_node *node;	while ((node = rb_first(&old_base->active))) {		timer = rb_entry(node, struct hrtimer, node);		BUG_ON(hrtimer_callback_running(timer));		__remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE, 0);		timer->base = new_base;		/*		 * Enqueue the timer. Allow reprogramming of the event device		 */		enqueue_hrtimer(timer, new_base, 1);	}}static void migrate_hrtimers(int cpu){	struct hrtimer_cpu_base *old_base, *new_base;	int i;	BUG_ON(cpu_online(cpu));	old_base = &per_cpu(hrtimer_bases, cpu);	new_base = &get_cpu_var(hrtimer_bases);	tick_cancel_sched_timer(cpu);	local_irq_disable();	double_spin_lock(&new_base->lock, &old_base->lock,			 smp_processor_id() < cpu);	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {		migrate_hrtimer_list(&old_base->clock_base[i],				     &new_base->clock_base[i]);	}	double_spin_unlock(&new_base->lock, &old_base->lock,			   smp_processor_id() < cpu);	local_irq_enable();	put_cpu_var(hrtimer_bases);}#endif /* CONFIG_HOTPLUG_CPU */static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,					unsigned long action, void *hcpu){	long cpu = (long)hcpu;	switch (action) {	case CPU_UP_PREPARE:	case CPU_UP_PREPARE_FROZEN:		init_hrtimers_cpu(cpu);		break;#ifdef CONFIG_HOTPLUG_CPU	case CPU_DEAD:	case CPU_DEAD_FROZEN:		clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu);		migrate_hrtimers(cpu);		break;#endif	default:		break;	}	return NOTIFY_OK;}static struct notifier_block __cpuinitdata hrtimers_nb = {	.notifier_call = hrtimer_cpu_notify,};void __init hrtimers_init(void){	hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,			  (void *)(long)smp_processor_id());	register_cpu_notifier(&hrtimers_nb);#ifdef CONFIG_HIGH_RES_TIMERS	open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq, NULL);#endif}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -