⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 timer.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 3 页
字号:
		base = tbase_get_base(prelock_base);		if (likely(base != NULL)) {			spin_lock_irqsave(&base->lock, *flags);			if (likely(prelock_base == timer->base))				return base;			/* The timer has migrated to another CPU */			spin_unlock_irqrestore(&base->lock, *flags);		}		cpu_relax();	}}int __mod_timer(struct timer_list *timer, unsigned long expires){	struct tvec_base *base, *new_base;	unsigned long flags;	int ret = 0;	timer_stats_timer_set_start_info(timer);	BUG_ON(!timer->function);	base = lock_timer_base(timer, &flags);	if (timer_pending(timer)) {		detach_timer(timer, 0);		ret = 1;	}	debug_timer_activate(timer);	new_base = __get_cpu_var(tvec_bases);	if (base != new_base) {		/*		 * We are trying to schedule the timer on the local CPU.		 * However we can't change timer's base while it is running,		 * otherwise del_timer_sync() can't detect that the timer's		 * handler yet has not finished. This also guarantees that		 * the timer is serialized wrt itself.		 */		if (likely(base->running_timer != timer)) {			/* See the comment in lock_timer_base() */			timer_set_base(timer, NULL);			spin_unlock(&base->lock);			base = new_base;			spin_lock(&base->lock);			timer_set_base(timer, base);		}	}	timer->expires = expires;	internal_add_timer(base, timer);	spin_unlock_irqrestore(&base->lock, flags);	return ret;}EXPORT_SYMBOL(__mod_timer);/** * add_timer_on - start a timer on a particular CPU * @timer: the timer to be added * @cpu: the CPU to start it on * * This is not very scalable on SMP. Double adds are not possible. */void add_timer_on(struct timer_list *timer, int cpu){	struct tvec_base *base = per_cpu(tvec_bases, cpu);	unsigned long flags;	timer_stats_timer_set_start_info(timer);	BUG_ON(timer_pending(timer) || !timer->function);	spin_lock_irqsave(&base->lock, flags);	timer_set_base(timer, base);	debug_timer_activate(timer);	internal_add_timer(base, timer);	/*	 * Check whether the other CPU is idle and needs to be	 * triggered to reevaluate the timer wheel when nohz is	 * active. We are protected against the other CPU fiddling	 * with the timer by holding the timer base lock. This also	 * makes sure that a CPU on the way to idle can not evaluate	 * the timer wheel.	 */	wake_up_idle_cpu(cpu);	spin_unlock_irqrestore(&base->lock, flags);}/** * mod_timer - modify a timer's timeout * @timer: the timer to be modified * @expires: new timeout in jiffies * * mod_timer() is a more efficient way to update the expire field of an * active timer (if the timer is inactive it will be activated) * * mod_timer(timer, expires) is equivalent to: * *     del_timer(timer); timer->expires = expires; add_timer(timer); * * Note that if there are multiple unserialized concurrent users of the * same timer, then mod_timer() is the only safe way to modify the timeout, * since add_timer() cannot modify an already running timer. * * The function returns whether it has modified a pending timer or not. * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an * active timer returns 1.) */int mod_timer(struct timer_list *timer, unsigned long expires){	BUG_ON(!timer->function);	timer_stats_timer_set_start_info(timer);	/*	 * This is a common optimization triggered by the	 * networking code - if the timer is re-modified	 * to be the same thing then just return:	 */	if (timer->expires == expires && timer_pending(timer))		return 1;	return __mod_timer(timer, expires);}EXPORT_SYMBOL(mod_timer);/** * del_timer - deactive a timer. * @timer: the timer to be deactivated * * del_timer() deactivates a timer - this works on both active and inactive * timers. * * The function returns whether it has deactivated a pending timer or not. * (ie. del_timer() of an inactive timer returns 0, del_timer() of an * active timer returns 1.) */int del_timer(struct timer_list *timer){	struct tvec_base *base;	unsigned long flags;	int ret = 0;	timer_stats_timer_clear_start_info(timer);	if (timer_pending(timer)) {		base = lock_timer_base(timer, &flags);		if (timer_pending(timer)) {			detach_timer(timer, 1);			ret = 1;		}		spin_unlock_irqrestore(&base->lock, flags);	}	return ret;}EXPORT_SYMBOL(del_timer);#ifdef CONFIG_SMP/** * try_to_del_timer_sync - Try to deactivate a timer * @timer: timer do del * * This function tries to deactivate a timer. Upon successful (ret >= 0) * exit the timer is not queued and the handler is not running on any CPU. * * It must not be called from interrupt contexts. */int try_to_del_timer_sync(struct timer_list *timer){	struct tvec_base *base;	unsigned long flags;	int ret = -1;	base = lock_timer_base(timer, &flags);	if (base->running_timer == timer)		goto out;	ret = 0;	if (timer_pending(timer)) {		detach_timer(timer, 1);		ret = 1;	}out:	spin_unlock_irqrestore(&base->lock, flags);	return ret;}EXPORT_SYMBOL(try_to_del_timer_sync);/** * del_timer_sync - deactivate a timer and wait for the handler to finish. * @timer: the timer to be deactivated * * This function only differs from del_timer() on SMP: besides deactivating * the timer it also makes sure the handler has finished executing on other * CPUs. * * Synchronization rules: Callers must prevent restarting of the timer, * otherwise this function is meaningless. It must not be called from * interrupt contexts. The caller must not hold locks which would prevent * completion of the timer's handler. The timer's handler must not call * add_timer_on(). Upon exit the timer is not queued and the handler is * not running on any CPU. * * The function returns whether it has deactivated a pending timer or not. */int del_timer_sync(struct timer_list *timer){	for (;;) {		int ret = try_to_del_timer_sync(timer);		if (ret >= 0)			return ret;		cpu_relax();	}}EXPORT_SYMBOL(del_timer_sync);#endifstatic int cascade(struct tvec_base *base, struct tvec *tv, int index){	/* cascade all the timers from tv up one level */	struct timer_list *timer, *tmp;	struct list_head tv_list;	list_replace_init(tv->vec + index, &tv_list);	/*	 * We are removing _all_ timers from the list, so we	 * don't have to detach them individually.	 */	list_for_each_entry_safe(timer, tmp, &tv_list, entry) {		BUG_ON(tbase_get_base(timer->base) != base);		internal_add_timer(base, timer);	}	return index;}#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)/** * __run_timers - run all expired timers (if any) on this CPU. * @base: the timer vector to be processed. * * This function cascades all vectors and executes all expired timer * vectors. */static inline void __run_timers(struct tvec_base *base){	struct timer_list *timer;	spin_lock_irq(&base->lock);	while (time_after_eq(jiffies, base->timer_jiffies)) {		struct list_head work_list;		struct list_head *head = &work_list;		int index = base->timer_jiffies & TVR_MASK;		/*		 * Cascade timers:		 */		if (!index &&			(!cascade(base, &base->tv2, INDEX(0))) &&				(!cascade(base, &base->tv3, INDEX(1))) &&					!cascade(base, &base->tv4, INDEX(2)))			cascade(base, &base->tv5, INDEX(3));		++base->timer_jiffies;		list_replace_init(base->tv1.vec + index, &work_list);		while (!list_empty(head)) {			void (*fn)(unsigned long);			unsigned long data;			timer = list_first_entry(head, struct timer_list,entry);			fn = timer->function;			data = timer->data;			timer_stats_account_timer(timer);			set_running_timer(base, timer);			detach_timer(timer, 1);			spin_unlock_irq(&base->lock);			{				int preempt_count = preempt_count();				fn(data);				if (preempt_count != preempt_count()) {					printk(KERN_ERR "huh, entered %p "					       "with preempt_count %08x, exited"					       " with %08x?\n",					       fn, preempt_count,					       preempt_count());					BUG();				}			}			spin_lock_irq(&base->lock);		}	}	set_running_timer(base, NULL);	spin_unlock_irq(&base->lock);}#ifdef CONFIG_NO_HZ/* * Find out when the next timer event is due to happen. This * is used on S/390 to stop all activity when a cpus is idle. * This functions needs to be called disabled. */static unsigned long __next_timer_interrupt(struct tvec_base *base){	unsigned long timer_jiffies = base->timer_jiffies;	unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;	int index, slot, array, found = 0;	struct timer_list *nte;	struct tvec *varray[4];	/* Look for timer events in tv1. */	index = slot = timer_jiffies & TVR_MASK;	do {		list_for_each_entry(nte, base->tv1.vec + slot, entry) {			if (tbase_get_deferrable(nte->base))				continue;			found = 1;			expires = nte->expires;			/* Look at the cascade bucket(s)? */			if (!index || slot < index)				goto cascade;			return expires;		}		slot = (slot + 1) & TVR_MASK;	} while (slot != index);cascade:	/* Calculate the next cascade event */	if (index)		timer_jiffies += TVR_SIZE - index;	timer_jiffies >>= TVR_BITS;	/* Check tv2-tv5. */	varray[0] = &base->tv2;	varray[1] = &base->tv3;	varray[2] = &base->tv4;	varray[3] = &base->tv5;	for (array = 0; array < 4; array++) {		struct tvec *varp = varray[array];		index = slot = timer_jiffies & TVN_MASK;		do {			list_for_each_entry(nte, varp->vec + slot, entry) {				found = 1;				if (time_before(nte->expires, expires))					expires = nte->expires;			}			/*			 * Do we still search for the first timer or are			 * we looking up the cascade buckets ?			 */			if (found) {				/* Look at the cascade bucket(s)? */				if (!index || slot < index)					break;				return expires;			}			slot = (slot + 1) & TVN_MASK;		} while (slot != index);		if (index)			timer_jiffies += TVN_SIZE - index;		timer_jiffies >>= TVN_BITS;	}	return expires;}/* * Check, if the next hrtimer event is before the next timer wheel * event: */static unsigned long cmp_next_hrtimer_event(unsigned long now,					    unsigned long expires){	ktime_t hr_delta = hrtimer_get_next_event();	struct timespec tsdelta;	unsigned long delta;	if (hr_delta.tv64 == KTIME_MAX)		return expires;	/*	 * Expired timer available, let it expire in the next tick	 */	if (hr_delta.tv64 <= 0)		return now + 1;	tsdelta = ktime_to_timespec(hr_delta);	delta = timespec_to_jiffies(&tsdelta);	/*	 * Limit the delta to the max value, which is checked in	 * tick_nohz_stop_sched_tick():	 */	if (delta > NEXT_TIMER_MAX_DELTA)		delta = NEXT_TIMER_MAX_DELTA;	/*	 * Take rounding errors in to account and make sure, that it	 * expires in the next tick. Otherwise we go into an endless	 * ping pong due to tick_nohz_stop_sched_tick() retriggering	 * the timer softirq	 */	if (delta < 1)		delta = 1;	now += delta;	if (time_before(now, expires))		return now;	return expires;}/** * get_next_timer_interrupt - return the jiffy of the next pending timer * @now: current time (in jiffies) */unsigned long get_next_timer_interrupt(unsigned long now){	struct tvec_base *base = __get_cpu_var(tvec_bases);	unsigned long expires;	spin_lock(&base->lock);	expires = __next_timer_interrupt(base);	spin_unlock(&base->lock);	if (time_before_eq(expires, now))		return now;	return cmp_next_hrtimer_event(now, expires);}#endif#ifndef CONFIG_VIRT_CPU_ACCOUNTINGvoid account_process_tick(struct task_struct *p, int user_tick){	cputime_t one_jiffy = jiffies_to_cputime(1);	if (user_tick) {		account_user_time(p, one_jiffy);		account_user_time_scaled(p, cputime_to_scaled(one_jiffy));	} else {		account_system_time(p, HARDIRQ_OFFSET, one_jiffy);		account_system_time_scaled(p, cputime_to_scaled(one_jiffy));	}}#endif/* * Called from the timer interrupt handler to charge one tick to the current * process.  user_tick is 1 if the tick is user time, 0 for system. */void update_process_times(int user_tick){	struct task_struct *p = current;	int cpu = smp_processor_id();	/* Note: this timer irq context must be accounted for as well. */	account_process_tick(p, user_tick);	run_local_timers();	if (rcu_pending(cpu))		rcu_check_callbacks(cpu, user_tick);	scheduler_tick();	run_posix_cpu_timers(p);}/* * Nr of active tasks - counted in fixed-point numbers */static unsigned long count_active_tasks(void){	return nr_active() * FIXED_1;}/* * Hmm.. Changed this, as the GNU make sources (load.c) seems to * imply that avenrun[] is the standard name for this kind of thing. * Nothing else seems to be standardized: the fractional size etc * all seem to differ on different machines. * * Requires xtime_lock to access. */unsigned long avenrun[3];EXPORT_SYMBOL(avenrun);/* * calc_load - given tick count, update the avenrun load estimates. * This is called while holding a write_lock on xtime_lock. */static inline void calc_load(unsigned long ticks){	unsigned long active_tasks; /* fixed-point */	static int count = LOAD_FREQ;	count -= ticks;	if (unlikely(count < 0)) {		active_tasks = count_active_tasks();		do {			CALC_LOAD(avenrun[0], EXP_1, active_tasks);			CALC_LOAD(avenrun[1], EXP_5, active_tasks);			CALC_LOAD(avenrun[2], EXP_15, active_tasks);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -