⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 timer.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 3 页
字号:
			count += LOAD_FREQ;		} while (count < 0);	}}/* * This function runs timers and the timer-tq in bottom half context. */static void run_timer_softirq(struct softirq_action *h){	struct tvec_base *base = __get_cpu_var(tvec_bases);	hrtimer_run_pending();	if (time_after_eq(jiffies, base->timer_jiffies))		__run_timers(base);}/* * Called by the local, per-CPU timer interrupt on SMP. */void run_local_timers(void){	hrtimer_run_queues();	raise_softirq(TIMER_SOFTIRQ);	softlockup_tick();}/* * Called by the timer interrupt. xtime_lock must already be taken * by the timer IRQ! */static inline void update_times(unsigned long ticks){	update_wall_time();	calc_load(ticks);}/* * The 64-bit jiffies value is not atomic - you MUST NOT read it * without sampling the sequence number in xtime_lock. * jiffies is defined in the linker script... */void do_timer(unsigned long ticks){	jiffies_64 += ticks;	update_times(ticks);}#ifdef __ARCH_WANT_SYS_ALARM/* * For backwards compatibility?  This can be done in libc so Alpha * and all newer ports shouldn't need it. */asmlinkage unsigned long sys_alarm(unsigned int seconds){	return alarm_setitimer(seconds);}#endif#ifndef __alpha__/* * The Alpha uses getxpid, getxuid, and getxgid instead.  Maybe this * should be moved into arch/i386 instead? *//** * sys_getpid - return the thread group id of the current process * * Note, despite the name, this returns the tgid not the pid.  The tgid and * the pid are identical unless CLONE_THREAD was specified on clone() in * which case the tgid is the same in all threads of the same group. * * This is SMP safe as current->tgid does not change. */asmlinkage long sys_getpid(void){	return task_tgid_vnr(current);}/* * Accessing ->real_parent is not SMP-safe, it could * change from under us. However, we can use a stale * value of ->real_parent under rcu_read_lock(), see * release_task()->call_rcu(delayed_put_task_struct). */asmlinkage long sys_getppid(void){	int pid;	rcu_read_lock();	pid = task_tgid_vnr(current->real_parent);	rcu_read_unlock();	return pid;}asmlinkage long sys_getuid(void){	/* Only we change this so SMP safe */	return current->uid;}asmlinkage long sys_geteuid(void){	/* Only we change this so SMP safe */	return current->euid;}asmlinkage long sys_getgid(void){	/* Only we change this so SMP safe */	return current->gid;}asmlinkage long sys_getegid(void){	/* Only we change this so SMP safe */	return  current->egid;}#endifstatic void process_timeout(unsigned long __data){	wake_up_process((struct task_struct *)__data);}/** * schedule_timeout - sleep until timeout * @timeout: timeout value in jiffies * * Make the current task sleep until @timeout jiffies have * elapsed. The routine will return immediately unless * the current task state has been set (see set_current_state()). * * You can set the task state as follows - * * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to * pass before the routine returns. The routine will return 0 * * %TASK_INTERRUPTIBLE - the routine may return early if a signal is * delivered to the current task. In this case the remaining time * in jiffies will be returned, or 0 if the timer expired in time * * The current task state is guaranteed to be TASK_RUNNING when this * routine returns. * * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule * the CPU away without a bound on the timeout. In this case the return * value will be %MAX_SCHEDULE_TIMEOUT. * * In all cases the return value is guaranteed to be non-negative. */signed long __sched schedule_timeout(signed long timeout){	struct timer_list timer;	unsigned long expire;	switch (timeout)	{	case MAX_SCHEDULE_TIMEOUT:		/*		 * These two special cases are useful to be comfortable		 * in the caller. Nothing more. We could take		 * MAX_SCHEDULE_TIMEOUT from one of the negative value		 * but I' d like to return a valid offset (>=0) to allow		 * the caller to do everything it want with the retval.		 */		schedule();		goto out;	default:		/*		 * Another bit of PARANOID. Note that the retval will be		 * 0 since no piece of kernel is supposed to do a check		 * for a negative retval of schedule_timeout() (since it		 * should never happens anyway). You just have the printk()		 * that will tell you if something is gone wrong and where.		 */		if (timeout < 0) {			printk(KERN_ERR "schedule_timeout: wrong timeout "				"value %lx\n", timeout);			dump_stack();			current->state = TASK_RUNNING;			goto out;		}	}	expire = timeout + jiffies;	setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);	__mod_timer(&timer, expire);	schedule();	del_singleshot_timer_sync(&timer);	/* Remove the timer from the object tracker */	destroy_timer_on_stack(&timer);	timeout = expire - jiffies; out:	return timeout < 0 ? 0 : timeout;}EXPORT_SYMBOL(schedule_timeout);/* * We can use __set_current_state() here because schedule_timeout() calls * schedule() unconditionally. */signed long __sched schedule_timeout_interruptible(signed long timeout){	__set_current_state(TASK_INTERRUPTIBLE);	return schedule_timeout(timeout);}EXPORT_SYMBOL(schedule_timeout_interruptible);signed long __sched schedule_timeout_killable(signed long timeout){	__set_current_state(TASK_KILLABLE);	return schedule_timeout(timeout);}EXPORT_SYMBOL(schedule_timeout_killable);signed long __sched schedule_timeout_uninterruptible(signed long timeout){	__set_current_state(TASK_UNINTERRUPTIBLE);	return schedule_timeout(timeout);}EXPORT_SYMBOL(schedule_timeout_uninterruptible);/* Thread ID - the internal kernel "pid" */asmlinkage long sys_gettid(void){	return task_pid_vnr(current);}/** * do_sysinfo - fill in sysinfo struct * @info: pointer to buffer to fill */int do_sysinfo(struct sysinfo *info){	unsigned long mem_total, sav_total;	unsigned int mem_unit, bitcount;	unsigned long seq;	memset(info, 0, sizeof(struct sysinfo));	do {		struct timespec tp;		seq = read_seqbegin(&xtime_lock);		/*		 * This is annoying.  The below is the same thing		 * posix_get_clock_monotonic() does, but it wants to		 * take the lock which we want to cover the loads stuff		 * too.		 */		getnstimeofday(&tp);		tp.tv_sec += wall_to_monotonic.tv_sec;		tp.tv_nsec += wall_to_monotonic.tv_nsec;		monotonic_to_bootbased(&tp);		if (tp.tv_nsec - NSEC_PER_SEC >= 0) {			tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;			tp.tv_sec++;		}		info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);		info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);		info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);		info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);		info->procs = nr_threads;	} while (read_seqretry(&xtime_lock, seq));	si_meminfo(info);	si_swapinfo(info);	/*	 * If the sum of all the available memory (i.e. ram + swap)	 * is less than can be stored in a 32 bit unsigned long then	 * we can be binary compatible with 2.2.x kernels.  If not,	 * well, in that case 2.2.x was broken anyways...	 *	 *  -Erik Andersen <andersee@debian.org>	 */	mem_total = info->totalram + info->totalswap;	if (mem_total < info->totalram || mem_total < info->totalswap)		goto out;	bitcount = 0;	mem_unit = info->mem_unit;	while (mem_unit > 1) {		bitcount++;		mem_unit >>= 1;		sav_total = mem_total;		mem_total <<= 1;		if (mem_total < sav_total)			goto out;	}	/*	 * If mem_total did not overflow, multiply all memory values by	 * info->mem_unit and set it to 1.  This leaves things compatible	 * with 2.2.x, and also retains compatibility with earlier 2.4.x	 * kernels...	 */	info->mem_unit = 1;	info->totalram <<= bitcount;	info->freeram <<= bitcount;	info->sharedram <<= bitcount;	info->bufferram <<= bitcount;	info->totalswap <<= bitcount;	info->freeswap <<= bitcount;	info->totalhigh <<= bitcount;	info->freehigh <<= bitcount;out:	return 0;}asmlinkage long sys_sysinfo(struct sysinfo __user *info){	struct sysinfo val;	do_sysinfo(&val);	if (copy_to_user(info, &val, sizeof(struct sysinfo)))		return -EFAULT;	return 0;}static int __cpuinit init_timers_cpu(int cpu){	int j;	struct tvec_base *base;	static char __cpuinitdata tvec_base_done[NR_CPUS];	if (!tvec_base_done[cpu]) {		static char boot_done;		if (boot_done) {			/*			 * The APs use this path later in boot			 */			base = kmalloc_node(sizeof(*base),						GFP_KERNEL | __GFP_ZERO,						cpu_to_node(cpu));			if (!base)				return -ENOMEM;			/* Make sure that tvec_base is 2 byte aligned */			if (tbase_get_deferrable(base)) {				WARN_ON(1);				kfree(base);				return -ENOMEM;			}			per_cpu(tvec_bases, cpu) = base;		} else {			/*			 * This is for the boot CPU - we use compile-time			 * static initialisation because per-cpu memory isn't			 * ready yet and because the memory allocators are not			 * initialised either.			 */			boot_done = 1;			base = &boot_tvec_bases;		}		tvec_base_done[cpu] = 1;	} else {		base = per_cpu(tvec_bases, cpu);	}	spin_lock_init(&base->lock);	for (j = 0; j < TVN_SIZE; j++) {		INIT_LIST_HEAD(base->tv5.vec + j);		INIT_LIST_HEAD(base->tv4.vec + j);		INIT_LIST_HEAD(base->tv3.vec + j);		INIT_LIST_HEAD(base->tv2.vec + j);	}	for (j = 0; j < TVR_SIZE; j++)		INIT_LIST_HEAD(base->tv1.vec + j);	base->timer_jiffies = jiffies;	return 0;}#ifdef CONFIG_HOTPLUG_CPUstatic void migrate_timer_list(struct tvec_base *new_base, struct list_head *head){	struct timer_list *timer;	while (!list_empty(head)) {		timer = list_first_entry(head, struct timer_list, entry);		detach_timer(timer, 0);		timer_set_base(timer, new_base);		internal_add_timer(new_base, timer);	}}static void __cpuinit migrate_timers(int cpu){	struct tvec_base *old_base;	struct tvec_base *new_base;	int i;	BUG_ON(cpu_online(cpu));	old_base = per_cpu(tvec_bases, cpu);	new_base = get_cpu_var(tvec_bases);	local_irq_disable();	spin_lock(&new_base->lock);	spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);	BUG_ON(old_base->running_timer);	for (i = 0; i < TVR_SIZE; i++)		migrate_timer_list(new_base, old_base->tv1.vec + i);	for (i = 0; i < TVN_SIZE; i++) {		migrate_timer_list(new_base, old_base->tv2.vec + i);		migrate_timer_list(new_base, old_base->tv3.vec + i);		migrate_timer_list(new_base, old_base->tv4.vec + i);		migrate_timer_list(new_base, old_base->tv5.vec + i);	}	spin_unlock(&old_base->lock);	spin_unlock(&new_base->lock);	local_irq_enable();	put_cpu_var(tvec_bases);}#endif /* CONFIG_HOTPLUG_CPU */static int __cpuinit timer_cpu_notify(struct notifier_block *self,				unsigned long action, void *hcpu){	long cpu = (long)hcpu;	switch(action) {	case CPU_UP_PREPARE:	case CPU_UP_PREPARE_FROZEN:		if (init_timers_cpu(cpu) < 0)			return NOTIFY_BAD;		break;#ifdef CONFIG_HOTPLUG_CPU	case CPU_DEAD:	case CPU_DEAD_FROZEN:		migrate_timers(cpu);		break;#endif	default:		break;	}	return NOTIFY_OK;}static struct notifier_block __cpuinitdata timers_nb = {	.notifier_call	= timer_cpu_notify,};void __init init_timers(void){	int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,				(void *)(long)smp_processor_id());	init_timer_stats();	BUG_ON(err == NOTIFY_BAD);	register_cpu_notifier(&timers_nb);	open_softirq(TIMER_SOFTIRQ, run_timer_softirq);}/** * msleep - sleep safely even with waitqueue interruptions * @msecs: Time in milliseconds to sleep for */void msleep(unsigned int msecs){	unsigned long timeout = msecs_to_jiffies(msecs) + 1;	while (timeout)		timeout = schedule_timeout_uninterruptible(timeout);}EXPORT_SYMBOL(msleep);/** * msleep_interruptible - sleep waiting for signals * @msecs: Time in milliseconds to sleep for */unsigned long msleep_interruptible(unsigned int msecs){	unsigned long timeout = msecs_to_jiffies(msecs) + 1;	while (timeout && !signal_pending(current))		timeout = schedule_timeout_interruptible(timeout);	return jiffies_to_msecs(timeout);}EXPORT_SYMBOL(msleep_interruptible);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -