⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 timer.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 3 页
字号:
/* *  linux/kernel/timer.c * *  Kernel internal timers, basic process system calls * *  Copyright (C) 1991, 1992  Linus Torvalds * *  1997-01-28  Modified by Finn Arne Gangstad to make timers scale better. * *  1997-09-10  Updated NTP code according to technical memorandum Jan '96 *              "A Kernel Model for Precision Timekeeping" by Dave Mills *  1998-12-24  Fixed a xtime SMP race (we need the xtime_lock rw spinlock to *              serialize accesses to xtime/lost_ticks). *                              Copyright (C) 1998  Andrea Arcangeli *  1999-03-10  Improved NTP compatibility by Ulrich Windl *  2002-05-31	Move sys_sysinfo here and make its locking sane, Robert Love *  2000-10-05  Implemented scalable SMP per-CPU timer handling. *                              Copyright (C) 2000, 2001, 2002  Ingo Molnar *              Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar */#include <linux/kernel_stat.h>#include <linux/module.h>#include <linux/interrupt.h>#include <linux/percpu.h>#include <linux/init.h>#include <linux/mm.h>#include <linux/swap.h>#include <linux/pid_namespace.h>#include <linux/notifier.h>#include <linux/thread_info.h>#include <linux/time.h>#include <linux/jiffies.h>#include <linux/posix-timers.h>#include <linux/cpu.h>#include <linux/syscalls.h>#include <linux/delay.h>#include <linux/tick.h>#include <linux/kallsyms.h>#include <asm/uaccess.h>#include <asm/unistd.h>#include <asm/div64.h>#include <asm/timex.h>#include <asm/io.h>u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;EXPORT_SYMBOL(jiffies_64);/* * per-CPU timer vector definitions: */#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)#define TVN_SIZE (1 << TVN_BITS)#define TVR_SIZE (1 << TVR_BITS)#define TVN_MASK (TVN_SIZE - 1)#define TVR_MASK (TVR_SIZE - 1)struct tvec {	struct list_head vec[TVN_SIZE];};struct tvec_root {	struct list_head vec[TVR_SIZE];};struct tvec_base {	spinlock_t lock;	struct timer_list *running_timer;	unsigned long timer_jiffies;	struct tvec_root tv1;	struct tvec tv2;	struct tvec tv3;	struct tvec tv4;	struct tvec tv5;} ____cacheline_aligned;struct tvec_base boot_tvec_bases;EXPORT_SYMBOL(boot_tvec_bases);static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;/* * Note that all tvec_bases are 2 byte aligned and lower bit of * base in timer_list is guaranteed to be zero. Use the LSB for * the new flag to indicate whether the timer is deferrable */#define TBASE_DEFERRABLE_FLAG		(0x1)/* Functions below help us manage 'deferrable' flag */static inline unsigned int tbase_get_deferrable(struct tvec_base *base){	return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG);}static inline struct tvec_base *tbase_get_base(struct tvec_base *base){	return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG));}static inline void timer_set_deferrable(struct timer_list *timer){	timer->base = ((struct tvec_base *)((unsigned long)(timer->base) |				       TBASE_DEFERRABLE_FLAG));}static inline voidtimer_set_base(struct timer_list *timer, struct tvec_base *new_base){	timer->base = (struct tvec_base *)((unsigned long)(new_base) |				      tbase_get_deferrable(timer->base));}/** * __round_jiffies - function to round jiffies to a full second * @j: the time in (absolute) jiffies that should be rounded * @cpu: the processor number on which the timeout will happen * * __round_jiffies() rounds an absolute time in the future (in jiffies) * up or down to (approximately) full seconds. This is useful for timers * for which the exact time they fire does not matter too much, as long as * they fire approximately every X seconds. * * By rounding these timers to whole seconds, all such timers will fire * at the same time, rather than at various times spread out. The goal * of this is to have the CPU wake up less, which saves power. * * The exact rounding is skewed for each processor to avoid all * processors firing at the exact same time, which could lead * to lock contention or spurious cache line bouncing. * * The return value is the rounded version of the @j parameter. */unsigned long __round_jiffies(unsigned long j, int cpu){	int rem;	unsigned long original = j;	/*	 * We don't want all cpus firing their timers at once hitting the	 * same lock or cachelines, so we skew each extra cpu with an extra	 * 3 jiffies. This 3 jiffies came originally from the mm/ code which	 * already did this.	 * The skew is done by adding 3*cpunr, then round, then subtract this	 * extra offset again.	 */	j += cpu * 3;	rem = j % HZ;	/*	 * If the target jiffie is just after a whole second (which can happen	 * due to delays of the timer irq, long irq off times etc etc) then	 * we should round down to the whole second, not up. Use 1/4th second	 * as cutoff for this rounding as an extreme upper bound for this.	 */	if (rem < HZ/4) /* round down */		j = j - rem;	else /* round up */		j = j - rem + HZ;	/* now that we have rounded, subtract the extra skew again */	j -= cpu * 3;	if (j <= jiffies) /* rounding ate our timeout entirely; */		return original;	return j;}EXPORT_SYMBOL_GPL(__round_jiffies);/** * __round_jiffies_relative - function to round jiffies to a full second * @j: the time in (relative) jiffies that should be rounded * @cpu: the processor number on which the timeout will happen * * __round_jiffies_relative() rounds a time delta  in the future (in jiffies) * up or down to (approximately) full seconds. This is useful for timers * for which the exact time they fire does not matter too much, as long as * they fire approximately every X seconds. * * By rounding these timers to whole seconds, all such timers will fire * at the same time, rather than at various times spread out. The goal * of this is to have the CPU wake up less, which saves power. * * The exact rounding is skewed for each processor to avoid all * processors firing at the exact same time, which could lead * to lock contention or spurious cache line bouncing. * * The return value is the rounded version of the @j parameter. */unsigned long __round_jiffies_relative(unsigned long j, int cpu){	/*	 * In theory the following code can skip a jiffy in case jiffies	 * increments right between the addition and the later subtraction.	 * However since the entire point of this function is to use approximate	 * timeouts, it's entirely ok to not handle that.	 */	return  __round_jiffies(j + jiffies, cpu) - jiffies;}EXPORT_SYMBOL_GPL(__round_jiffies_relative);/** * round_jiffies - function to round jiffies to a full second * @j: the time in (absolute) jiffies that should be rounded * * round_jiffies() rounds an absolute time in the future (in jiffies) * up or down to (approximately) full seconds. This is useful for timers * for which the exact time they fire does not matter too much, as long as * they fire approximately every X seconds. * * By rounding these timers to whole seconds, all such timers will fire * at the same time, rather than at various times spread out. The goal * of this is to have the CPU wake up less, which saves power. * * The return value is the rounded version of the @j parameter. */unsigned long round_jiffies(unsigned long j){	return __round_jiffies(j, raw_smp_processor_id());}EXPORT_SYMBOL_GPL(round_jiffies);/** * round_jiffies_relative - function to round jiffies to a full second * @j: the time in (relative) jiffies that should be rounded * * round_jiffies_relative() rounds a time delta  in the future (in jiffies) * up or down to (approximately) full seconds. This is useful for timers * for which the exact time they fire does not matter too much, as long as * they fire approximately every X seconds. * * By rounding these timers to whole seconds, all such timers will fire * at the same time, rather than at various times spread out. The goal * of this is to have the CPU wake up less, which saves power. * * The return value is the rounded version of the @j parameter. */unsigned long round_jiffies_relative(unsigned long j){	return __round_jiffies_relative(j, raw_smp_processor_id());}EXPORT_SYMBOL_GPL(round_jiffies_relative);static inline void set_running_timer(struct tvec_base *base,					struct timer_list *timer){#ifdef CONFIG_SMP	base->running_timer = timer;#endif}static void internal_add_timer(struct tvec_base *base, struct timer_list *timer){	unsigned long expires = timer->expires;	unsigned long idx = expires - base->timer_jiffies;	struct list_head *vec;	if (idx < TVR_SIZE) {		int i = expires & TVR_MASK;		vec = base->tv1.vec + i;	} else if (idx < 1 << (TVR_BITS + TVN_BITS)) {		int i = (expires >> TVR_BITS) & TVN_MASK;		vec = base->tv2.vec + i;	} else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {		int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;		vec = base->tv3.vec + i;	} else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {		int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;		vec = base->tv4.vec + i;	} else if ((signed long) idx < 0) {		/*		 * Can happen if you add a timer with expires == jiffies,		 * or you set a timer to go off in the past		 */		vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);	} else {		int i;		/* If the timeout is larger than 0xffffffff on 64-bit		 * architectures then we use the maximum timeout:		 */		if (idx > 0xffffffffUL) {			idx = 0xffffffffUL;			expires = idx + base->timer_jiffies;		}		i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;		vec = base->tv5.vec + i;	}	/*	 * Timers are FIFO:	 */	list_add_tail(&timer->entry, vec);}#ifdef CONFIG_TIMER_STATSvoid __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr){	if (timer->start_site)		return;	timer->start_site = addr;	memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);	timer->start_pid = current->pid;}static void timer_stats_account_timer(struct timer_list *timer){	unsigned int flag = 0;	if (unlikely(tbase_get_deferrable(timer->base)))		flag |= TIMER_STATS_FLAG_DEFERRABLE;	timer_stats_update_stats(timer, timer->start_pid, timer->start_site,				 timer->function, timer->start_comm, flag);}#elsestatic void timer_stats_account_timer(struct timer_list *timer) {}#endif#ifdef CONFIG_DEBUG_OBJECTS_TIMERSstatic struct debug_obj_descr timer_debug_descr;/* * fixup_init is called when: * - an active object is initialized */static int timer_fixup_init(void *addr, enum debug_obj_state state){	struct timer_list *timer = addr;	switch (state) {	case ODEBUG_STATE_ACTIVE:		del_timer_sync(timer);		debug_object_init(timer, &timer_debug_descr);		return 1;	default:		return 0;	}}/* * fixup_activate is called when: * - an active object is activated * - an unknown object is activated (might be a statically initialized object) */static int timer_fixup_activate(void *addr, enum debug_obj_state state){	struct timer_list *timer = addr;	switch (state) {	case ODEBUG_STATE_NOTAVAILABLE:		/*		 * This is not really a fixup. The timer was		 * statically initialized. We just make sure that it		 * is tracked in the object tracker.		 */		if (timer->entry.next == NULL &&		    timer->entry.prev == TIMER_ENTRY_STATIC) {			debug_object_init(timer, &timer_debug_descr);			debug_object_activate(timer, &timer_debug_descr);			return 0;		} else {			WARN_ON_ONCE(1);		}		return 0;	case ODEBUG_STATE_ACTIVE:		WARN_ON(1);	default:		return 0;	}}/* * fixup_free is called when: * - an active object is freed */static int timer_fixup_free(void *addr, enum debug_obj_state state){	struct timer_list *timer = addr;	switch (state) {	case ODEBUG_STATE_ACTIVE:		del_timer_sync(timer);		debug_object_free(timer, &timer_debug_descr);		return 1;	default:		return 0;	}}static struct debug_obj_descr timer_debug_descr = {	.name		= "timer_list",	.fixup_init	= timer_fixup_init,	.fixup_activate	= timer_fixup_activate,	.fixup_free	= timer_fixup_free,};static inline void debug_timer_init(struct timer_list *timer){	debug_object_init(timer, &timer_debug_descr);}static inline void debug_timer_activate(struct timer_list *timer){	debug_object_activate(timer, &timer_debug_descr);}static inline void debug_timer_deactivate(struct timer_list *timer){	debug_object_deactivate(timer, &timer_debug_descr);}static inline void debug_timer_free(struct timer_list *timer){	debug_object_free(timer, &timer_debug_descr);}static void __init_timer(struct timer_list *timer);void init_timer_on_stack(struct timer_list *timer){	debug_object_init_on_stack(timer, &timer_debug_descr);	__init_timer(timer);}EXPORT_SYMBOL_GPL(init_timer_on_stack);void destroy_timer_on_stack(struct timer_list *timer){	debug_object_free(timer, &timer_debug_descr);}EXPORT_SYMBOL_GPL(destroy_timer_on_stack);#elsestatic inline void debug_timer_init(struct timer_list *timer) { }static inline void debug_timer_activate(struct timer_list *timer) { }static inline void debug_timer_deactivate(struct timer_list *timer) { }#endifstatic void __init_timer(struct timer_list *timer){	timer->entry.next = NULL;	timer->base = __raw_get_cpu_var(tvec_bases);#ifdef CONFIG_TIMER_STATS	timer->start_site = NULL;	timer->start_pid = -1;	memset(timer->start_comm, 0, TASK_COMM_LEN);#endif}/** * init_timer - initialize a timer. * @timer: the timer to be initialized * * init_timer() must be done to a timer prior calling *any* of the * other timer functions. */void init_timer(struct timer_list *timer){	debug_timer_init(timer);	__init_timer(timer);}EXPORT_SYMBOL(init_timer);void init_timer_deferrable(struct timer_list *timer){	init_timer(timer);	timer_set_deferrable(timer);}EXPORT_SYMBOL(init_timer_deferrable);static inline void detach_timer(struct timer_list *timer,				int clear_pending){	struct list_head *entry = &timer->entry;	debug_timer_deactivate(timer);	__list_del(entry->prev, entry->next);	if (clear_pending)		entry->next = NULL;	entry->prev = LIST_POISON2;}/* * We are using hashed locking: holding per_cpu(tvec_bases).lock * means that all timers which are tied to this base via timer->base are * locked, and the base itself is locked too. * * So __run_timers/migrate_timers can safely modify all timers which could * be found on ->tvX lists. * * When the timer's base is locked, and the timer removed from list, it is * possible to set timer->base = NULL and drop the lock: the timer remains * locked. */static struct tvec_base *lock_timer_base(struct timer_list *timer,					unsigned long *flags)	__acquires(timer->base->lock){	struct tvec_base *base;	for (;;) {		struct tvec_base *prelock_base = timer->base;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -