⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 signal.c

📁 linux 2.6.19 kernel source code before patching
💻 C
📖 第 1 页 / 共 5 页
字号:
/* * Bad permissions for sending the signal */static int check_kill_permission(int sig, struct siginfo *info,				 struct task_struct *t){	int error = -EINVAL;	if (!valid_signal(sig))		return error;	error = audit_signal_info(sig, t); /* Let audit system see the signal */	if (error)		return error;	error = -EPERM;	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))	    && ((sig != SIGCONT) ||		(process_session(current) != process_session(t)))	    && (current->euid ^ t->suid) && (current->euid ^ t->uid)	    && (current->uid ^ t->suid) && (current->uid ^ t->uid)	    && !capable(CAP_KILL))		return error;	return security_task_kill(t, info, sig, 0);}/* forward decl */static void do_notify_parent_cldstop(struct task_struct *tsk, int why);/* * Handle magic process-wide effects of stop/continue signals. * Unlike the signal actions, these happen immediately at signal-generation * time regardless of blocking, ignoring, or handling.  This does the * actual continuing for SIGCONT, but not the actual stopping for stop * signals.  The process stop is done as a signal action for SIG_DFL. */static void handle_stop_signal(int sig, struct task_struct *p){	struct task_struct *t;	if (p->signal->flags & SIGNAL_GROUP_EXIT)		/*		 * The process is in the middle of dying already.		 */		return;	if (sig_kernel_stop(sig)) {		/*		 * This is a stop signal.  Remove SIGCONT from all queues.		 */		rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);		t = p;		do {			rm_from_queue(sigmask(SIGCONT), &t->pending);			t = next_thread(t);		} while (t != p);	} else if (sig == SIGCONT) {		/*		 * Remove all stop signals from all queues,		 * and wake all threads.		 */		if (unlikely(p->signal->group_stop_count > 0)) {			/*			 * There was a group stop in progress.  We'll			 * pretend it finished before we got here.  We are			 * obliged to report it to the parent: if the			 * SIGSTOP happened "after" this SIGCONT, then it			 * would have cleared this pending SIGCONT.  If it			 * happened "before" this SIGCONT, then the parent			 * got the SIGCHLD about the stop finishing before			 * the continue happened.  We do the notification			 * now, and it's as if the stop had finished and			 * the SIGCHLD was pending on entry to this kill.			 */			p->signal->group_stop_count = 0;			p->signal->flags = SIGNAL_STOP_CONTINUED;			spin_unlock(&p->sighand->siglock);			do_notify_parent_cldstop(p, CLD_STOPPED);			spin_lock(&p->sighand->siglock);		}		rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);		t = p;		do {			unsigned int state;			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);						/*			 * If there is a handler for SIGCONT, we must make			 * sure that no thread returns to user mode before			 * we post the signal, in case it was the only			 * thread eligible to run the signal handler--then			 * it must not do anything between resuming and			 * running the handler.  With the TIF_SIGPENDING			 * flag set, the thread will pause and acquire the			 * siglock that we hold now and until we've queued			 * the pending signal. 			 *			 * Wake up the stopped thread _after_ setting			 * TIF_SIGPENDING			 */			state = TASK_STOPPED;			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {				set_tsk_thread_flag(t, TIF_SIGPENDING);				state |= TASK_INTERRUPTIBLE;			}			wake_up_state(t, state);			t = next_thread(t);		} while (t != p);		if (p->signal->flags & SIGNAL_STOP_STOPPED) {			/*			 * We were in fact stopped, and are now continued.			 * Notify the parent with CLD_CONTINUED.			 */			p->signal->flags = SIGNAL_STOP_CONTINUED;			p->signal->group_exit_code = 0;			spin_unlock(&p->sighand->siglock);			do_notify_parent_cldstop(p, CLD_CONTINUED);			spin_lock(&p->sighand->siglock);		} else {			/*			 * We are not stopped, but there could be a stop			 * signal in the middle of being processed after			 * being removed from the queue.  Clear that too.			 */			p->signal->flags = 0;		}	} else if (sig == SIGKILL) {		/*		 * Make sure that any pending stop signal already dequeued		 * is undone by the wakeup for SIGKILL.		 */		p->signal->flags = 0;	}}static int send_signal(int sig, struct siginfo *info, struct task_struct *t,			struct sigpending *signals){	struct sigqueue * q = NULL;	int ret = 0;	/*	 * Deliver the signal to listening signalfds. This must be called	 * with the sighand lock held.	 */	signalfd_notify(t, sig);	/*	 * fast-pathed signals for kernel-internal things like SIGSTOP	 * or SIGKILL.	 */	if (info == SEND_SIG_FORCED)		goto out_set;	/* Real-time signals must be queued if sent by sigqueue, or	   some other real-time mechanism.  It is implementation	   defined whether kill() does so.  We attempt to do so, on	   the principle of least surprise, but since kill is not	   allowed to fail with EAGAIN when low on memory we just	   make sure at least one signal gets delivered and don't	   pass on the info struct.  */	q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&					     (is_si_special(info) ||					      info->si_code >= 0)));	if (q) {		list_add_tail(&q->list, &signals->list);		switch ((unsigned long) info) {		case (unsigned long) SEND_SIG_NOINFO:			q->info.si_signo = sig;			q->info.si_errno = 0;			q->info.si_code = SI_USER;			q->info.si_pid = current->pid;			q->info.si_uid = current->uid;			break;		case (unsigned long) SEND_SIG_PRIV:			q->info.si_signo = sig;			q->info.si_errno = 0;			q->info.si_code = SI_KERNEL;			q->info.si_pid = 0;			q->info.si_uid = 0;			break;		default:			copy_siginfo(&q->info, info);			break;		}	} else if (!is_si_special(info)) {		if (sig >= SIGRTMIN && info->si_code != SI_USER)		/*		 * Queue overflow, abort.  We may abort if the signal was rt		 * and sent by user using something other than kill().		 */			return -EAGAIN;	}out_set:	sigaddset(&signals->signal, sig);	return ret;}#define LEGACY_QUEUE(sigptr, sig) \	(((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))static intspecific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t){	int ret = 0;	BUG_ON(!irqs_disabled());	assert_spin_locked(&t->sighand->siglock);	/* Short-circuit ignored signals.  */	if (sig_ignored(t, sig))		goto out;	/* Support queueing exactly one non-rt signal, so that we	   can get more detailed information about the cause of	   the signal. */	if (LEGACY_QUEUE(&t->pending, sig))		goto out;	ret = send_signal(sig, info, t, &t->pending);	if (!ret && !sigismember(&t->blocked, sig))		signal_wake_up(t, sig == SIGKILL);out:	return ret;}/* * Force a signal that the process can't ignore: if necessary * we unblock the signal and change any SIG_IGN to SIG_DFL. * * Note: If we unblock the signal, we always reset it to SIG_DFL, * since we do not want to have a signal handler that was blocked * be invoked when user space had explicitly blocked it. * * We don't want to have recursive SIGSEGV's etc, for example. */intforce_sig_info(int sig, struct siginfo *info, struct task_struct *t){	unsigned long int flags;	int ret, blocked, ignored;	struct k_sigaction *action;	spin_lock_irqsave(&t->sighand->siglock, flags);	action = &t->sighand->action[sig-1];	ignored = action->sa.sa_handler == SIG_IGN;	blocked = sigismember(&t->blocked, sig);	if (blocked || ignored) {		action->sa.sa_handler = SIG_DFL;		if (blocked) {			sigdelset(&t->blocked, sig);			recalc_sigpending_and_wake(t);		}	}	ret = specific_send_sig_info(sig, info, t);	spin_unlock_irqrestore(&t->sighand->siglock, flags);	return ret;}voidforce_sig_specific(int sig, struct task_struct *t){	force_sig_info(sig, SEND_SIG_FORCED, t);}/* * Test if P wants to take SIG.  After we've checked all threads with this, * it's equivalent to finding no threads not blocking SIG.  Any threads not * blocking SIG were ruled out because they are not running and already * have pending signals.  Such threads will dequeue from the shared queue * as soon as they're available, so putting the signal on the shared queue * will be equivalent to sending it to one such thread. */static inline int wants_signal(int sig, struct task_struct *p){	if (sigismember(&p->blocked, sig))		return 0;	if (p->flags & PF_EXITING)		return 0;	if (sig == SIGKILL)		return 1;	if (p->state & (TASK_STOPPED | TASK_TRACED))		return 0;	return task_curr(p) || !signal_pending(p);}static void__group_complete_signal(int sig, struct task_struct *p){	struct task_struct *t;	/*	 * Now find a thread we can wake up to take the signal off the queue.	 *	 * If the main thread wants the signal, it gets first crack.	 * Probably the least surprising to the average bear.	 */	if (wants_signal(sig, p))		t = p;	else if (thread_group_empty(p))		/*		 * There is just one thread and it does not need to be woken.		 * It will dequeue unblocked signals before it runs again.		 */		return;	else {		/*		 * Otherwise try to find a suitable thread.		 */		t = p->signal->curr_target;		if (t == NULL)			/* restart balancing at this thread */			t = p->signal->curr_target = p;		while (!wants_signal(sig, t)) {			t = next_thread(t);			if (t == p->signal->curr_target)				/*				 * No thread needs to be woken.				 * Any eligible threads will see				 * the signal in the queue soon.				 */				return;		}		p->signal->curr_target = t;	}	/*	 * Found a killable thread.  If the signal will be fatal,	 * then start taking the whole group down immediately.	 */	if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&	    !sigismember(&t->real_blocked, sig) &&	    (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {		/*		 * This signal will be fatal to the whole group.		 */		if (!sig_kernel_coredump(sig)) {			/*			 * Start a group exit and wake everybody up.			 * This way we don't have other threads			 * running and doing things after a slower			 * thread has the fatal signal pending.			 */			p->signal->flags = SIGNAL_GROUP_EXIT;			p->signal->group_exit_code = sig;			p->signal->group_stop_count = 0;			t = p;			do {				sigaddset(&t->pending.signal, SIGKILL);				signal_wake_up(t, 1);				t = next_thread(t);			} while (t != p);			return;		}		/*		 * There will be a core dump.  We make all threads other		 * than the chosen one go into a group stop so that nothing		 * happens until it gets scheduled, takes the signal off		 * the shared queue, and does the core dump.  This is a		 * little more complicated than strictly necessary, but it		 * keeps the signal state that winds up in the core dump		 * unchanged from the death state, e.g. which thread had		 * the core-dump signal unblocked.		 */		rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);		rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);		p->signal->group_stop_count = 0;		p->signal->group_exit_task = t;		t = p;		do {			p->signal->group_stop_count++;			signal_wake_up(t, 0);			t = next_thread(t);		} while (t != p);		wake_up_process(p->signal->group_exit_task);		return;	}	/*	 * The signal is already in the shared-pending queue.	 * Tell the chosen thread to wake up and dequeue it.	 */	signal_wake_up(t, sig == SIGKILL);	return;}int__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p){	int ret = 0;	assert_spin_locked(&p->sighand->siglock);	handle_stop_signal(sig, p);	/* Short-circuit ignored signals.  */	if (sig_ignored(p, sig))		return ret;	if (LEGACY_QUEUE(&p->signal->shared_pending, sig))		/* This is a non-RT signal and we already have one queued.  */		return ret;	/*	 * Put this signal on the shared-pending queue, or fail with EAGAIN.	 * We always use the shared queue for process-wide signals,	 * to avoid several races.	 */	ret = send_signal(sig, info, p, &p->signal->shared_pending);	if (unlikely(ret))		return ret;	__group_complete_signal(sig, p);	return 0;}/* * Nuke all other threads in the group. */void zap_other_threads(struct task_struct *p){	struct task_struct *t;	p->signal->flags = SIGNAL_GROUP_EXIT;	p->signal->group_stop_count = 0;	if (thread_group_empty(p))		return;	for (t = next_thread(p); t != p; t = next_thread(t)) {		/*		 * Don't bother with already dead threads		 */		if (t->exit_state)			continue;		/* SIGKILL will be handled before any pending SIGSTOP */		sigaddset(&t->pending.signal, SIGKILL);		signal_wake_up(t, 1);	}}/* * Must be called under rcu_read_lock() or with tasklist_lock read-held. */struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags){	struct sighand_struct *sighand;	for (;;) {		sighand = rcu_dereference(tsk->sighand);		if (unlikely(sighand == NULL))			break;		spin_lock_irqsave(&sighand->siglock, *flags);		if (likely(sighand == tsk->sighand))			break;		spin_unlock_irqrestore(&sighand->siglock, *flags);	}	return sighand;}int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p){	unsigned long flags;	int ret;	ret = check_kill_permission(sig, info, p);	if (!ret && sig) {		ret = -ESRCH;		if (lock_task_sighand(p, &flags)) {			ret = __group_send_sig_info(sig, info, p);			unlock_task_sighand(p, &flags);		}	}	return ret;}/* * kill_pgrp_info() sends a signal to a process group: this is what the tty * control characters do (^C, ^Z etc) */int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp){	struct task_struct *p = NULL;	int retval, success;	success = 0;	retval = -ESRCH;	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {		int err = group_send_sig_info(sig, info, p);		success |= !err;		retval = err;	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);	return success ? 0 : retval;}int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp){	int retval;	read_lock(&tasklist_lock);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -