⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 signal.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 5 页
字号:
/* *  linux/kernel/signal.c * *  Copyright (C) 1991, 1992  Linus Torvalds * *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson * *  2003-06-02  Jim Houston - Concurrent Computer Corp. *		Changes to use preallocated sigqueue structures *		to allow signals to be sent reliably. */#include <linux/slab.h>#include <linux/module.h>#include <linux/init.h>#include <linux/sched.h>#include <linux/fs.h>#include <linux/tty.h>#include <linux/binfmts.h>#include <linux/security.h>#include <linux/syscalls.h>#include <linux/ptrace.h>#include <linux/signal.h>#include <linux/signalfd.h>#include <linux/tracehook.h>#include <linux/capability.h>#include <linux/freezer.h>#include <linux/pid_namespace.h>#include <linux/nsproxy.h>#include <asm/param.h>#include <asm/uaccess.h>#include <asm/unistd.h>#include <asm/siginfo.h>#include "audit.h"	/* audit_signal_info() *//* * SLAB caches for signal bits. */static struct kmem_cache *sigqueue_cachep;static void __user *sig_handler(struct task_struct *t, int sig){	return t->sighand->action[sig - 1].sa.sa_handler;}static int sig_handler_ignored(void __user *handler, int sig){	/* Is it explicitly or implicitly ignored? */	return handler == SIG_IGN ||		(handler == SIG_DFL && sig_kernel_ignore(sig));}static int sig_ignored(struct task_struct *t, int sig){	void __user *handler;	/*	 * Blocked signals are never ignored, since the	 * signal handler may change by the time it is	 * unblocked.	 */	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))		return 0;	handler = sig_handler(t, sig);	if (!sig_handler_ignored(handler, sig))		return 0;	/*	 * Tracers may want to know about even ignored signals.	 */	return !tracehook_consider_ignored_signal(t, sig, handler);}/* * Re-calculate pending state from the set of locally pending * signals, globally pending signals, and blocked signals. */static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked){	unsigned long ready;	long i;	switch (_NSIG_WORDS) {	default:		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)			ready |= signal->sig[i] &~ blocked->sig[i];		break;	case 4: ready  = signal->sig[3] &~ blocked->sig[3];		ready |= signal->sig[2] &~ blocked->sig[2];		ready |= signal->sig[1] &~ blocked->sig[1];		ready |= signal->sig[0] &~ blocked->sig[0];		break;	case 2: ready  = signal->sig[1] &~ blocked->sig[1];		ready |= signal->sig[0] &~ blocked->sig[0];		break;	case 1: ready  = signal->sig[0] &~ blocked->sig[0];	}	return ready !=	0;}#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))static int recalc_sigpending_tsk(struct task_struct *t){	if (t->signal->group_stop_count > 0 ||	    PENDING(&t->pending, &t->blocked) ||	    PENDING(&t->signal->shared_pending, &t->blocked)) {		set_tsk_thread_flag(t, TIF_SIGPENDING);		return 1;	}	/*	 * We must never clear the flag in another thread, or in current	 * when it's possible the current syscall is returning -ERESTART*.	 * So we don't clear it here, and only callers who know they should do.	 */	return 0;}/* * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. * This is superfluous when called on current, the wakeup is a harmless no-op. */void recalc_sigpending_and_wake(struct task_struct *t){	if (recalc_sigpending_tsk(t))		signal_wake_up(t, 0);}void recalc_sigpending(void){	if (unlikely(tracehook_force_sigpending()))		set_thread_flag(TIF_SIGPENDING);	else if (!recalc_sigpending_tsk(current) && !freezing(current))		clear_thread_flag(TIF_SIGPENDING);}/* Given the mask, find the first available signal that should be serviced. */int next_signal(struct sigpending *pending, sigset_t *mask){	unsigned long i, *s, *m, x;	int sig = 0;		s = pending->signal.sig;	m = mask->sig;	switch (_NSIG_WORDS) {	default:		for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)			if ((x = *s &~ *m) != 0) {				sig = ffz(~x) + i*_NSIG_BPW + 1;				break;			}		break;	case 2: if ((x = s[0] &~ m[0]) != 0)			sig = 1;		else if ((x = s[1] &~ m[1]) != 0)			sig = _NSIG_BPW + 1;		else			break;		sig += ffz(~x);		break;	case 1: if ((x = *s &~ *m) != 0)			sig = ffz(~x) + 1;		break;	}		return sig;}static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,					 int override_rlimit){	struct sigqueue *q = NULL;	struct user_struct *user;	/*	 * In order to avoid problems with "switch_user()", we want to make	 * sure that the compiler doesn't re-load "t->user"	 */	user = t->user;	barrier();	atomic_inc(&user->sigpending);	if (override_rlimit ||	    atomic_read(&user->sigpending) <=			t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)		q = kmem_cache_alloc(sigqueue_cachep, flags);	if (unlikely(q == NULL)) {		atomic_dec(&user->sigpending);	} else {		INIT_LIST_HEAD(&q->list);		q->flags = 0;		q->user = get_uid(user);	}	return(q);}static void __sigqueue_free(struct sigqueue *q){	if (q->flags & SIGQUEUE_PREALLOC)		return;	atomic_dec(&q->user->sigpending);	free_uid(q->user);	kmem_cache_free(sigqueue_cachep, q);}void flush_sigqueue(struct sigpending *queue){	struct sigqueue *q;	sigemptyset(&queue->signal);	while (!list_empty(&queue->list)) {		q = list_entry(queue->list.next, struct sigqueue , list);		list_del_init(&q->list);		__sigqueue_free(q);	}}/* * Flush all pending signals for a task. */void flush_signals(struct task_struct *t){	unsigned long flags;	spin_lock_irqsave(&t->sighand->siglock, flags);	clear_tsk_thread_flag(t, TIF_SIGPENDING);	flush_sigqueue(&t->pending);	flush_sigqueue(&t->signal->shared_pending);	spin_unlock_irqrestore(&t->sighand->siglock, flags);}static void __flush_itimer_signals(struct sigpending *pending){	sigset_t signal, retain;	struct sigqueue *q, *n;	signal = pending->signal;	sigemptyset(&retain);	list_for_each_entry_safe(q, n, &pending->list, list) {		int sig = q->info.si_signo;		if (likely(q->info.si_code != SI_TIMER)) {			sigaddset(&retain, sig);		} else {			sigdelset(&signal, sig);			list_del_init(&q->list);			__sigqueue_free(q);		}	}	sigorsets(&pending->signal, &signal, &retain);}void flush_itimer_signals(void){	struct task_struct *tsk = current;	unsigned long flags;	spin_lock_irqsave(&tsk->sighand->siglock, flags);	__flush_itimer_signals(&tsk->pending);	__flush_itimer_signals(&tsk->signal->shared_pending);	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);}void ignore_signals(struct task_struct *t){	int i;	for (i = 0; i < _NSIG; ++i)		t->sighand->action[i].sa.sa_handler = SIG_IGN;	flush_signals(t);}/* * Flush all handlers for a task. */voidflush_signal_handlers(struct task_struct *t, int force_default){	int i;	struct k_sigaction *ka = &t->sighand->action[0];	for (i = _NSIG ; i != 0 ; i--) {		if (force_default || ka->sa.sa_handler != SIG_IGN)			ka->sa.sa_handler = SIG_DFL;		ka->sa.sa_flags = 0;		sigemptyset(&ka->sa.sa_mask);		ka++;	}}int unhandled_signal(struct task_struct *tsk, int sig){	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;	if (is_global_init(tsk))		return 1;	if (handler != SIG_IGN && handler != SIG_DFL)		return 0;	return !tracehook_consider_fatal_signal(tsk, sig, handler);}/* Notify the system that a driver wants to block all signals for this * process, and wants to be notified if any signals at all were to be * sent/acted upon.  If the notifier routine returns non-zero, then the * signal will be acted upon after all.  If the notifier routine returns 0, * then then signal will be blocked.  Only one block per process is * allowed.  priv is a pointer to private data that the notifier routine * can use to determine if the signal should be blocked or not.  */voidblock_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask){	unsigned long flags;	spin_lock_irqsave(&current->sighand->siglock, flags);	current->notifier_mask = mask;	current->notifier_data = priv;	current->notifier = notifier;	spin_unlock_irqrestore(&current->sighand->siglock, flags);}/* Notify the system that blocking has ended. */voidunblock_all_signals(void){	unsigned long flags;	spin_lock_irqsave(&current->sighand->siglock, flags);	current->notifier = NULL;	current->notifier_data = NULL;	recalc_sigpending();	spin_unlock_irqrestore(&current->sighand->siglock, flags);}static void collect_signal(int sig, struct sigpending *list, siginfo_t *info){	struct sigqueue *q, *first = NULL;	/*	 * Collect the siginfo appropriate to this signal.  Check if	 * there is another siginfo for the same signal.	*/	list_for_each_entry(q, &list->list, list) {		if (q->info.si_signo == sig) {			if (first)				goto still_pending;			first = q;		}	}	sigdelset(&list->signal, sig);	if (first) {still_pending:		list_del_init(&first->list);		copy_siginfo(info, &first->info);		__sigqueue_free(first);	} else {		/* Ok, it wasn't in the queue.  This must be		   a fast-pathed signal or we must have been		   out of queue space.  So zero out the info.		 */		info->si_signo = sig;		info->si_errno = 0;		info->si_code = 0;		info->si_pid = 0;		info->si_uid = 0;	}}static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,			siginfo_t *info){	int sig = next_signal(pending, mask);	if (sig) {		if (current->notifier) {			if (sigismember(current->notifier_mask, sig)) {				if (!(current->notifier)(current->notifier_data)) {					clear_thread_flag(TIF_SIGPENDING);					return 0;				}			}		}		collect_signal(sig, pending, info);	}	return sig;}/* * Dequeue a signal and return the element to the caller, which is  * expected to free it. * * All callers have to hold the siglock. */int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info){	int signr;	/* We only dequeue private signals from ourselves, we don't let	 * signalfd steal them	 */	signr = __dequeue_signal(&tsk->pending, mask, info);	if (!signr) {		signr = __dequeue_signal(&tsk->signal->shared_pending,					 mask, info);		/*		 * itimer signal ?		 *		 * itimers are process shared and we restart periodic		 * itimers in the signal delivery path to prevent DoS		 * attacks in the high resolution timer case. This is		 * compliant with the old way of self restarting		 * itimers, as the SIGALRM is a legacy signal and only		 * queued once. Changing the restart behaviour to		 * restart the timer in the signal dequeue path is		 * reducing the timer noise on heavy loaded !highres		 * systems too.		 */		if (unlikely(signr == SIGALRM)) {			struct hrtimer *tmr = &tsk->signal->real_timer;			if (!hrtimer_is_queued(tmr) &&			    tsk->signal->it_real_incr.tv64 != 0) {				hrtimer_forward(tmr, tmr->base->get_time(),						tsk->signal->it_real_incr);				hrtimer_restart(tmr);			}		}	}	recalc_sigpending();	if (!signr)		return 0;	if (unlikely(sig_kernel_stop(signr))) {		/*		 * Set a marker that we have dequeued a stop signal.  Our		 * caller might release the siglock and then the pending		 * stop signal it is about to process is no longer in the		 * pending bitmasks, but must still be cleared by a SIGCONT		 * (and overruled by a SIGKILL).  So those cases clear this		 * shared flag after we've set it.  Note that this flag may		 * remain set after the signal we return is ignored or		 * handled.  That doesn't matter because its only purpose		 * is to alert stop-signal processing code when another		 * processor has come along and cleared the flag.		 */		tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;	}	if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {		/*		 * Release the siglock to ensure proper locking order		 * of timer locks outside of siglocks.  Note, we leave		 * irqs disabled here, since the posix-timers code is		 * about to disable them again anyway.		 */		spin_unlock(&tsk->sighand->siglock);		do_schedule_next_timer(info);		spin_lock(&tsk->sighand->siglock);	}	return signr;}/* * Tell a process that it has a new active signal.. * * NOTE! we rely on the previous spin_lock to * lock interrupts for us! We can only be called with * "siglock" held, and the local interrupt must * have been disabled when that got acquired! * * No need to set need_resched since signal event passing * goes through ->blocked */void signal_wake_up(struct task_struct *t, int resume){	unsigned int mask;	set_tsk_thread_flag(t, TIF_SIGPENDING);	/*	 * For SIGKILL, we want to wake it up in the stopped/traced/killable	 * case. We don't check t->state here because there is a race with it	 * executing another processor and just now entering stopped state.	 * By using wake_up_state, we ensure the process will wake up and	 * handle its death signal.	 */	mask = TASK_INTERRUPTIBLE;	if (resume)		mask |= TASK_WAKEKILL;	if (!wake_up_state(t, mask))		kick_process(t);}/* * Remove signals in mask from the pending set and queue. * Returns 1 if any signals were found. * * All callers must be holding the siglock. *

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -