⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 posix-timers.c

📁 linux 2.6.19 kernel source code before patching
💻 C
📖 第 1 页 / 共 2 页
字号:
			 &new_timer_id, sizeof (new_timer_id))) {		error = -EFAULT;		goto out;	}	if (timer_event_spec) {		if (copy_from_user(&event, timer_event_spec, sizeof (event))) {			error = -EFAULT;			goto out;		}		new_timer->it_sigev_notify = event.sigev_notify;		new_timer->it_sigev_signo = event.sigev_signo;		new_timer->it_sigev_value = event.sigev_value;		read_lock(&tasklist_lock);		if ((process = good_sigevent(&event))) {			/*			 * We may be setting up this process for another			 * thread.  It may be exiting.  To catch this			 * case the we check the PF_EXITING flag.  If			 * the flag is not set, the siglock will catch			 * him before it is too late (in exit_itimers).			 *			 * The exec case is a bit more invloved but easy			 * to code.  If the process is in our thread			 * group (and it must be or we would not allow			 * it here) and is doing an exec, it will cause			 * us to be killed.  In this case it will wait			 * for us to die which means we can finish this			 * linkage with our last gasp. I.e. no code :)			 */			spin_lock_irqsave(&process->sighand->siglock, flags);			if (!(process->flags & PF_EXITING)) {				new_timer->it_process = process;				list_add(&new_timer->list,					 &process->signal->posix_timers);				spin_unlock_irqrestore(&process->sighand->siglock, flags);				if (new_timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))					get_task_struct(process);			} else {				spin_unlock_irqrestore(&process->sighand->siglock, flags);				process = NULL;			}		}		read_unlock(&tasklist_lock);		if (!process) {			error = -EINVAL;			goto out;		}	} else {		new_timer->it_sigev_notify = SIGEV_SIGNAL;		new_timer->it_sigev_signo = SIGALRM;		new_timer->it_sigev_value.sival_int = new_timer->it_id;		process = current->group_leader;		spin_lock_irqsave(&process->sighand->siglock, flags);		new_timer->it_process = process;		list_add(&new_timer->list, &process->signal->posix_timers);		spin_unlock_irqrestore(&process->sighand->siglock, flags);	} 	/*	 * In the case of the timer belonging to another task, after	 * the task is unlocked, the timer is owned by the other task	 * and may cease to exist at any time.  Don't use or modify	 * new_timer after the unlock call.	 */out:	if (error)		release_posix_timer(new_timer, it_id_set);	return error;}/* * Locking issues: We need to protect the result of the id look up until * we get the timer locked down so it is not deleted under us.  The * removal is done under the idr spinlock so we use that here to bridge * the find to the timer lock.  To avoid a dead lock, the timer id MUST * be release with out holding the timer lock. */static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags){	struct k_itimer *timr;	/*	 * Watch out here.  We do a irqsave on the idr_lock and pass the	 * flags part over to the timer lock.  Must not let interrupts in	 * while we are moving the lock.	 */	spin_lock_irqsave(&idr_lock, *flags);	timr = (struct k_itimer *) idr_find(&posix_timers_id, (int) timer_id);	if (timr) {		spin_lock(&timr->it_lock);		spin_unlock(&idr_lock);		if ((timr->it_id != timer_id) || !(timr->it_process) ||				timr->it_process->tgid != current->tgid) {			unlock_timer(timr, *flags);			timr = NULL;		}	} else		spin_unlock_irqrestore(&idr_lock, *flags);	return timr;}/* * Get the time remaining on a POSIX.1b interval timer.  This function * is ALWAYS called with spin_lock_irq on the timer, thus it must not * mess with irq. * * We have a couple of messes to clean up here.  First there is the case * of a timer that has a requeue pending.  These timers should appear to * be in the timer list with an expiry as if we were to requeue them * now. * * The second issue is the SIGEV_NONE timer which may be active but is * not really ever put in the timer list (to save system resources). * This timer may be expired, and if so, we will do it here.  Otherwise * it is the same as a requeue pending timer WRT to what we should * report. */static voidcommon_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting){	ktime_t now, remaining, iv;	struct hrtimer *timer = &timr->it.real.timer;	memset(cur_setting, 0, sizeof(struct itimerspec));	iv = timr->it.real.interval;	/* interval timer ? */	if (iv.tv64)		cur_setting->it_interval = ktime_to_timespec(iv);	else if (!hrtimer_active(timer) &&		 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)		return;	now = timer->base->get_time();	/*	 * When a requeue is pending or this is a SIGEV_NONE	 * timer move the expiry time forward by intervals, so	 * expiry is > now.	 */	if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||	    (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))		timr->it_overrun += hrtimer_forward(timer, now, iv);	remaining = ktime_sub(timer->expires, now);	/* Return 0 only, when the timer is expired and not pending */	if (remaining.tv64 <= 0) {		/*		 * A single shot SIGEV_NONE timer must return 0, when		 * it is expired !		 */		if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)			cur_setting->it_value.tv_nsec = 1;	} else		cur_setting->it_value = ktime_to_timespec(remaining);}/* Get the time remaining on a POSIX.1b interval timer. */asmlinkage longsys_timer_gettime(timer_t timer_id, struct itimerspec __user *setting){	struct k_itimer *timr;	struct itimerspec cur_setting;	unsigned long flags;	timr = lock_timer(timer_id, &flags);	if (!timr)		return -EINVAL;	CLOCK_DISPATCH(timr->it_clock, timer_get, (timr, &cur_setting));	unlock_timer(timr, flags);	if (copy_to_user(setting, &cur_setting, sizeof (cur_setting)))		return -EFAULT;	return 0;}/* * Get the number of overruns of a POSIX.1b interval timer.  This is to * be the overrun of the timer last delivered.  At the same time we are * accumulating overruns on the next timer.  The overrun is frozen when * the signal is delivered, either at the notify time (if the info block * is not queued) or at the actual delivery time (as we are informed by * the call back to do_schedule_next_timer().  So all we need to do is * to pick up the frozen overrun. */asmlinkage longsys_timer_getoverrun(timer_t timer_id){	struct k_itimer *timr;	int overrun;	long flags;	timr = lock_timer(timer_id, &flags);	if (!timr)		return -EINVAL;	overrun = timr->it_overrun_last;	unlock_timer(timr, flags);	return overrun;}/* Set a POSIX.1b interval timer. *//* timr->it_lock is taken. */static intcommon_timer_set(struct k_itimer *timr, int flags,		 struct itimerspec *new_setting, struct itimerspec *old_setting){	struct hrtimer *timer = &timr->it.real.timer;	enum hrtimer_mode mode;	if (old_setting)		common_timer_get(timr, old_setting);	/* disable the timer */	timr->it.real.interval.tv64 = 0;	/*	 * careful here.  If smp we could be in the "fire" routine which will	 * be spinning as we hold the lock.  But this is ONLY an SMP issue.	 */	if (hrtimer_try_to_cancel(timer) < 0)		return TIMER_RETRY;	timr->it_requeue_pending = (timr->it_requeue_pending + 2) & 		~REQUEUE_PENDING;	timr->it_overrun_last = 0;	/* switch off the timer when it_value is zero */	if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)		return 0;	mode = flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;	hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);	timr->it.real.timer.function = posix_timer_fn;	timer->expires = timespec_to_ktime(new_setting->it_value);	/* Convert interval */	timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);	/* SIGEV_NONE timers are not queued ! See common_timer_get */	if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {		/* Setup correct expiry time for relative timers */		if (mode == HRTIMER_MODE_REL)			timer->expires = ktime_add(timer->expires,						   timer->base->get_time());		return 0;	}	hrtimer_start(timer, timer->expires, mode);	return 0;}/* Set a POSIX.1b interval timer */asmlinkage longsys_timer_settime(timer_t timer_id, int flags,		  const struct itimerspec __user *new_setting,		  struct itimerspec __user *old_setting){	struct k_itimer *timr;	struct itimerspec new_spec, old_spec;	int error = 0;	long flag;	struct itimerspec *rtn = old_setting ? &old_spec : NULL;	if (!new_setting)		return -EINVAL;	if (copy_from_user(&new_spec, new_setting, sizeof (new_spec)))		return -EFAULT;	if (!timespec_valid(&new_spec.it_interval) ||	    !timespec_valid(&new_spec.it_value))		return -EINVAL;retry:	timr = lock_timer(timer_id, &flag);	if (!timr)		return -EINVAL;	error = CLOCK_DISPATCH(timr->it_clock, timer_set,			       (timr, flags, &new_spec, rtn));	unlock_timer(timr, flag);	if (error == TIMER_RETRY) {		rtn = NULL;	// We already got the old time...		goto retry;	}	if (old_setting && !error &&	    copy_to_user(old_setting, &old_spec, sizeof (old_spec)))		error = -EFAULT;	return error;}static inline int common_timer_del(struct k_itimer *timer){	timer->it.real.interval.tv64 = 0;	if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0)		return TIMER_RETRY;	return 0;}static inline int timer_delete_hook(struct k_itimer *timer){	return CLOCK_DISPATCH(timer->it_clock, timer_del, (timer));}/* Delete a POSIX.1b interval timer. */asmlinkage longsys_timer_delete(timer_t timer_id){	struct k_itimer *timer;	long flags;retry_delete:	timer = lock_timer(timer_id, &flags);	if (!timer)		return -EINVAL;	if (timer_delete_hook(timer) == TIMER_RETRY) {		unlock_timer(timer, flags);		goto retry_delete;	}	spin_lock(&current->sighand->siglock);	list_del(&timer->list);	spin_unlock(&current->sighand->siglock);	/*	 * This keeps any tasks waiting on the spin lock from thinking	 * they got something (see the lock code above).	 */	if (timer->it_process) {		if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))			put_task_struct(timer->it_process);		timer->it_process = NULL;	}	unlock_timer(timer, flags);	release_posix_timer(timer, IT_ID_SET);	return 0;}/* * return timer owned by the process, used by exit_itimers */static void itimer_delete(struct k_itimer *timer){	unsigned long flags;retry_delete:	spin_lock_irqsave(&timer->it_lock, flags);	if (timer_delete_hook(timer) == TIMER_RETRY) {		unlock_timer(timer, flags);		goto retry_delete;	}	list_del(&timer->list);	/*	 * This keeps any tasks waiting on the spin lock from thinking	 * they got something (see the lock code above).	 */	if (timer->it_process) {		if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))			put_task_struct(timer->it_process);		timer->it_process = NULL;	}	unlock_timer(timer, flags);	release_posix_timer(timer, IT_ID_SET);}/* * This is called by do_exit or de_thread, only when there are no more * references to the shared signal_struct. */void exit_itimers(struct signal_struct *sig){	struct k_itimer *tmr;	while (!list_empty(&sig->posix_timers)) {		tmr = list_entry(sig->posix_timers.next, struct k_itimer, list);		itimer_delete(tmr);	}}/* Not available / possible... functions */int do_posix_clock_nosettime(const clockid_t clockid, struct timespec *tp){	return -EINVAL;}EXPORT_SYMBOL_GPL(do_posix_clock_nosettime);int do_posix_clock_nonanosleep(const clockid_t clock, int flags,			       struct timespec *t, struct timespec __user *r){#ifndef ENOTSUP	return -EOPNOTSUPP;	/* aka ENOTSUP in userland for POSIX */#else  /*  parisc does define it separately.  */	return -ENOTSUP;#endif}EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep);asmlinkage long sys_clock_settime(const clockid_t which_clock,				  const struct timespec __user *tp){	struct timespec new_tp;	if (invalid_clockid(which_clock))		return -EINVAL;	if (copy_from_user(&new_tp, tp, sizeof (*tp)))		return -EFAULT;	return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));}asmlinkage longsys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp){	struct timespec kernel_tp;	int error;	if (invalid_clockid(which_clock))		return -EINVAL;	error = CLOCK_DISPATCH(which_clock, clock_get,			       (which_clock, &kernel_tp));	if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp)))		error = -EFAULT;	return error;}asmlinkage longsys_clock_getres(const clockid_t which_clock, struct timespec __user *tp){	struct timespec rtn_tp;	int error;	if (invalid_clockid(which_clock))		return -EINVAL;	error = CLOCK_DISPATCH(which_clock, clock_getres,			       (which_clock, &rtn_tp));	if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp))) {		error = -EFAULT;	}	return error;}/* * nanosleep for monotonic and realtime clocks */static int common_nsleep(const clockid_t which_clock, int flags,			 struct timespec *tsave, struct timespec __user *rmtp){	return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ?				 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,				 which_clock);}asmlinkage longsys_clock_nanosleep(const clockid_t which_clock, int flags,		    const struct timespec __user *rqtp,		    struct timespec __user *rmtp){	struct timespec t;	if (invalid_clockid(which_clock))		return -EINVAL;	if (copy_from_user(&t, rqtp, sizeof (struct timespec)))		return -EFAULT;	if (!timespec_valid(&t))		return -EINVAL;	return CLOCK_DISPATCH(which_clock, nsleep,			      (which_clock, flags, &t, rmtp));}/* * nanosleep_restart for monotonic and realtime clocks */static int common_nsleep_restart(struct restart_block *restart_block){	return hrtimer_nanosleep_restart(restart_block);}/* * This will restart clock_nanosleep. This is required only by * compat_clock_nanosleep_restart for now. */longclock_nanosleep_restart(struct restart_block *restart_block){	clockid_t which_clock = restart_block->arg0;	return CLOCK_DISPATCH(which_clock, nsleep_restart,			      (restart_block));}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -