⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 timer.c

📁 linux 2.6.19 kernel source code before patching
💻 C
📖 第 1 页 / 共 3 页
字号:
 * * This function tries to deactivate a timer. Upon successful (ret >= 0) * exit the timer is not queued and the handler is not running on any CPU. * * It must not be called from interrupt contexts. */int try_to_del_timer_sync(struct timer_list *timer){	tvec_base_t *base;	unsigned long flags;	int ret = -1;	base = lock_timer_base(timer, &flags);	if (base->running_timer == timer)		goto out;	ret = 0;	if (timer_pending(timer)) {		detach_timer(timer, 1);		ret = 1;	}out:	spin_unlock_irqrestore(&base->lock, flags);	return ret;}EXPORT_SYMBOL(try_to_del_timer_sync);/** * del_timer_sync - deactivate a timer and wait for the handler to finish. * @timer: the timer to be deactivated * * This function only differs from del_timer() on SMP: besides deactivating * the timer it also makes sure the handler has finished executing on other * CPUs. * * Synchronization rules: Callers must prevent restarting of the timer, * otherwise this function is meaningless. It must not be called from * interrupt contexts. The caller must not hold locks which would prevent * completion of the timer's handler. The timer's handler must not call * add_timer_on(). Upon exit the timer is not queued and the handler is * not running on any CPU. * * The function returns whether it has deactivated a pending timer or not. */int del_timer_sync(struct timer_list *timer){	for (;;) {		int ret = try_to_del_timer_sync(timer);		if (ret >= 0)			return ret;		cpu_relax();	}}EXPORT_SYMBOL(del_timer_sync);#endifstatic int cascade(tvec_base_t *base, tvec_t *tv, int index){	/* cascade all the timers from tv up one level */	struct timer_list *timer, *tmp;	struct list_head tv_list;	list_replace_init(tv->vec + index, &tv_list);	/*	 * We are removing _all_ timers from the list, so we	 * don't have to detach them individually.	 */	list_for_each_entry_safe(timer, tmp, &tv_list, entry) {		BUG_ON(tbase_get_base(timer->base) != base);		internal_add_timer(base, timer);	}	return index;}#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)/** * __run_timers - run all expired timers (if any) on this CPU. * @base: the timer vector to be processed. * * This function cascades all vectors and executes all expired timer * vectors. */static inline void __run_timers(tvec_base_t *base){	struct timer_list *timer;	spin_lock_irq(&base->lock);	while (time_after_eq(jiffies, base->timer_jiffies)) {		struct list_head work_list;		struct list_head *head = &work_list; 		int index = base->timer_jiffies & TVR_MASK;		/*		 * Cascade timers:		 */		if (!index &&			(!cascade(base, &base->tv2, INDEX(0))) &&				(!cascade(base, &base->tv3, INDEX(1))) &&					!cascade(base, &base->tv4, INDEX(2)))			cascade(base, &base->tv5, INDEX(3));		++base->timer_jiffies;		list_replace_init(base->tv1.vec + index, &work_list);		while (!list_empty(head)) {			void (*fn)(unsigned long);			unsigned long data;			timer = list_first_entry(head, struct timer_list,entry); 			fn = timer->function; 			data = timer->data;			timer_stats_account_timer(timer);			set_running_timer(base, timer);			detach_timer(timer, 1);			spin_unlock_irq(&base->lock);			{				int preempt_count = preempt_count();				fn(data);				if (preempt_count != preempt_count()) {					printk(KERN_WARNING "huh, entered %p "					       "with preempt_count %08x, exited"					       " with %08x?\n",					       fn, preempt_count,					       preempt_count());					BUG();				}			}			spin_lock_irq(&base->lock);		}	}	set_running_timer(base, NULL);	spin_unlock_irq(&base->lock);}#if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ)/* * Find out when the next timer event is due to happen. This * is used on S/390 to stop all activity when a cpus is idle. * This functions needs to be called disabled. */static unsigned long __next_timer_interrupt(tvec_base_t *base){	unsigned long timer_jiffies = base->timer_jiffies;	unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;	int index, slot, array, found = 0;	struct timer_list *nte;	tvec_t *varray[4];	/* Look for timer events in tv1. */	index = slot = timer_jiffies & TVR_MASK;	do {		list_for_each_entry(nte, base->tv1.vec + slot, entry) { 			if (tbase_get_deferrable(nte->base)) 				continue;			found = 1;			expires = nte->expires;			/* Look at the cascade bucket(s)? */			if (!index || slot < index)				goto cascade;			return expires;		}		slot = (slot + 1) & TVR_MASK;	} while (slot != index);cascade:	/* Calculate the next cascade event */	if (index)		timer_jiffies += TVR_SIZE - index;	timer_jiffies >>= TVR_BITS;	/* Check tv2-tv5. */	varray[0] = &base->tv2;	varray[1] = &base->tv3;	varray[2] = &base->tv4;	varray[3] = &base->tv5;	for (array = 0; array < 4; array++) {		tvec_t *varp = varray[array];		index = slot = timer_jiffies & TVN_MASK;		do {			list_for_each_entry(nte, varp->vec + slot, entry) {				found = 1;				if (time_before(nte->expires, expires))					expires = nte->expires;			}			/*			 * Do we still search for the first timer or are			 * we looking up the cascade buckets ?			 */			if (found) {				/* Look at the cascade bucket(s)? */				if (!index || slot < index)					break;				return expires;			}			slot = (slot + 1) & TVN_MASK;		} while (slot != index);		if (index)			timer_jiffies += TVN_SIZE - index;		timer_jiffies >>= TVN_BITS;	}	return expires;}/* * Check, if the next hrtimer event is before the next timer wheel * event: */static unsigned long cmp_next_hrtimer_event(unsigned long now,					    unsigned long expires){	ktime_t hr_delta = hrtimer_get_next_event();	struct timespec tsdelta;	unsigned long delta;	if (hr_delta.tv64 == KTIME_MAX)		return expires;	/*	 * Expired timer available, let it expire in the next tick	 */	if (hr_delta.tv64 <= 0)		return now + 1;	tsdelta = ktime_to_timespec(hr_delta);	delta = timespec_to_jiffies(&tsdelta);	/*	 * Limit the delta to the max value, which is checked in	 * tick_nohz_stop_sched_tick():	 */	if (delta > NEXT_TIMER_MAX_DELTA)		delta = NEXT_TIMER_MAX_DELTA;	/*	 * Take rounding errors in to account and make sure, that it	 * expires in the next tick. Otherwise we go into an endless	 * ping pong due to tick_nohz_stop_sched_tick() retriggering	 * the timer softirq	 */	if (delta < 1)		delta = 1;	now += delta;	if (time_before(now, expires))		return now;	return expires;}/** * next_timer_interrupt - return the jiffy of the next pending timer * @now: current time (in jiffies) */unsigned long get_next_timer_interrupt(unsigned long now){	tvec_base_t *base = __get_cpu_var(tvec_bases);	unsigned long expires;	spin_lock(&base->lock);	expires = __next_timer_interrupt(base);	spin_unlock(&base->lock);	if (time_before_eq(expires, now))		return now;	return cmp_next_hrtimer_event(now, expires);}#ifdef CONFIG_NO_IDLE_HZunsigned long next_timer_interrupt(void){	return get_next_timer_interrupt(jiffies);}#endif#endif/* * Called from the timer interrupt handler to charge one tick to the current  * process.  user_tick is 1 if the tick is user time, 0 for system. */void update_process_times(int user_tick){	struct task_struct *p = current;	int cpu = smp_processor_id();	/* Note: this timer irq context must be accounted for as well. */	if (user_tick)		account_user_time(p, jiffies_to_cputime(1));	else		account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));	run_local_timers();	if (rcu_pending(cpu))		rcu_check_callbacks(cpu, user_tick);	scheduler_tick(); 	run_posix_cpu_timers(p);}/* * Nr of active tasks - counted in fixed-point numbers */static unsigned long count_active_tasks(void){	return nr_active() * FIXED_1;}/* * Hmm.. Changed this, as the GNU make sources (load.c) seems to * imply that avenrun[] is the standard name for this kind of thing. * Nothing else seems to be standardized: the fractional size etc * all seem to differ on different machines. * * Requires xtime_lock to access. */unsigned long avenrun[3];EXPORT_SYMBOL(avenrun);/* * calc_load - given tick count, update the avenrun load estimates. * This is called while holding a write_lock on xtime_lock. */static inline void calc_load(unsigned long ticks){	unsigned long active_tasks; /* fixed-point */	static int count = LOAD_FREQ;	count -= ticks;	if (unlikely(count < 0)) {		active_tasks = count_active_tasks();		do {			CALC_LOAD(avenrun[0], EXP_1, active_tasks);			CALC_LOAD(avenrun[1], EXP_5, active_tasks);			CALC_LOAD(avenrun[2], EXP_15, active_tasks);			count += LOAD_FREQ;		} while (count < 0);	}}/* * This function runs timers and the timer-tq in bottom half context. */static void run_timer_softirq(struct softirq_action *h){	tvec_base_t *base = __get_cpu_var(tvec_bases);	hrtimer_run_queues();	if (time_after_eq(jiffies, base->timer_jiffies))		__run_timers(base);}/* * Called by the local, per-CPU timer interrupt on SMP. */void run_local_timers(void){	raise_softirq(TIMER_SOFTIRQ);	softlockup_tick();}/* * Called by the timer interrupt. xtime_lock must already be taken * by the timer IRQ! */static inline void update_times(unsigned long ticks){	update_wall_time();	calc_load(ticks);}  /* * The 64-bit jiffies value is not atomic - you MUST NOT read it * without sampling the sequence number in xtime_lock. * jiffies is defined in the linker script... */void do_timer(unsigned long ticks){	jiffies_64 += ticks;	update_times(ticks);}#ifdef __ARCH_WANT_SYS_ALARM/* * For backwards compatibility?  This can be done in libc so Alpha * and all newer ports shouldn't need it. */asmlinkage unsigned long sys_alarm(unsigned int seconds){	return alarm_setitimer(seconds);}#endif#ifndef __alpha__/* * The Alpha uses getxpid, getxuid, and getxgid instead.  Maybe this * should be moved into arch/i386 instead? *//** * sys_getpid - return the thread group id of the current process * * Note, despite the name, this returns the tgid not the pid.  The tgid and * the pid are identical unless CLONE_THREAD was specified on clone() in * which case the tgid is the same in all threads of the same group. * * This is SMP safe as current->tgid does not change. */asmlinkage long sys_getpid(void){	return current->tgid;}/* * Accessing ->real_parent is not SMP-safe, it could * change from under us. However, we can use a stale * value of ->real_parent under rcu_read_lock(), see * release_task()->call_rcu(delayed_put_task_struct). */asmlinkage long sys_getppid(void){	int pid;	rcu_read_lock();	pid = rcu_dereference(current->real_parent)->tgid;	rcu_read_unlock();	return pid;}asmlinkage long sys_getuid(void){	/* Only we change this so SMP safe */	return current->uid;}asmlinkage long sys_geteuid(void){	/* Only we change this so SMP safe */	return current->euid;}asmlinkage long sys_getgid(void){	/* Only we change this so SMP safe */	return current->gid;}asmlinkage long sys_getegid(void){	/* Only we change this so SMP safe */	return  current->egid;}#endifstatic void process_timeout(unsigned long __data){	wake_up_process((struct task_struct *)__data);}/** * schedule_timeout - sleep until timeout * @timeout: timeout value in jiffies * * Make the current task sleep until @timeout jiffies have * elapsed. The routine will return immediately unless * the current task state has been set (see set_current_state()). * * You can set the task state as follows - * * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to * pass before the routine returns. The routine will return 0 * * %TASK_INTERRUPTIBLE - the routine may return early if a signal is * delivered to the current task. In this case the remaining time * in jiffies will be returned, or 0 if the timer expired in time * * The current task state is guaranteed to be TASK_RUNNING when this * routine returns. * * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule * the CPU away without a bound on the timeout. In this case the return * value will be %MAX_SCHEDULE_TIMEOUT. * * In all cases the return value is guaranteed to be non-negative. */fastcall signed long __sched schedule_timeout(signed long timeout){	struct timer_list timer;	unsigned long expire;	switch (timeout)	{	case MAX_SCHEDULE_TIMEOUT:		/*		 * These two special cases are useful to be comfortable		 * in the caller. Nothing more. We could take		 * MAX_SCHEDULE_TIMEOUT from one of the negative value		 * but I' d like to return a valid offset (>=0) to allow		 * the caller to do everything it want with the retval.		 */		schedule();		goto out;	default:

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -