⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 signal.c

📁 linux 2.6.19 kernel source code before patching
💻 C
📖 第 1 页 / 共 5 页
字号:
	retval = __kill_pgrp_info(sig, info, pgrp);	read_unlock(&tasklist_lock);	return retval;}int kill_pid_info(int sig, struct siginfo *info, struct pid *pid){	int error;	struct task_struct *p;	rcu_read_lock();	if (unlikely(sig_needs_tasklist(sig)))		read_lock(&tasklist_lock);	p = pid_task(pid, PIDTYPE_PID);	error = -ESRCH;	if (p)		error = group_send_sig_info(sig, info, p);	if (unlikely(sig_needs_tasklist(sig)))		read_unlock(&tasklist_lock);	rcu_read_unlock();	return error;}intkill_proc_info(int sig, struct siginfo *info, pid_t pid){	int error;	rcu_read_lock();	error = kill_pid_info(sig, info, find_pid(pid));	rcu_read_unlock();	return error;}/* like kill_pid_info(), but doesn't use uid/euid of "current" */int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,		      uid_t uid, uid_t euid, u32 secid){	int ret = -EINVAL;	struct task_struct *p;	if (!valid_signal(sig))		return ret;	read_lock(&tasklist_lock);	p = pid_task(pid, PIDTYPE_PID);	if (!p) {		ret = -ESRCH;		goto out_unlock;	}	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))	    && (euid != p->suid) && (euid != p->uid)	    && (uid != p->suid) && (uid != p->uid)) {		ret = -EPERM;		goto out_unlock;	}	ret = security_task_kill(p, info, sig, secid);	if (ret)		goto out_unlock;	if (sig && p->sighand) {		unsigned long flags;		spin_lock_irqsave(&p->sighand->siglock, flags);		ret = __group_send_sig_info(sig, info, p);		spin_unlock_irqrestore(&p->sighand->siglock, flags);	}out_unlock:	read_unlock(&tasklist_lock);	return ret;}EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);/* * kill_something_info() interprets pid in interesting ways just like kill(2). * * POSIX specifies that kill(-1,sig) is unspecified, but what we have * is probably wrong.  Should make it like BSD or SYSV. */static int kill_something_info(int sig, struct siginfo *info, int pid){	int ret;	rcu_read_lock();	if (!pid) {		ret = kill_pgrp_info(sig, info, task_pgrp(current));	} else if (pid == -1) {		int retval = 0, count = 0;		struct task_struct * p;		read_lock(&tasklist_lock);		for_each_process(p) {			if (p->pid > 1 && p->tgid != current->tgid) {				int err = group_send_sig_info(sig, info, p);				++count;				if (err != -EPERM)					retval = err;			}		}		read_unlock(&tasklist_lock);		ret = count ? retval : -ESRCH;	} else if (pid < 0) {		ret = kill_pgrp_info(sig, info, find_pid(-pid));	} else {		ret = kill_pid_info(sig, info, find_pid(pid));	}	rcu_read_unlock();	return ret;}/* * These are for backward compatibility with the rest of the kernel source. *//* * These two are the most common entry points.  They send a signal * just to the specific thread. */intsend_sig_info(int sig, struct siginfo *info, struct task_struct *p){	int ret;	unsigned long flags;	/*	 * Make sure legacy kernel users don't send in bad values	 * (normal paths check this in check_kill_permission).	 */	if (!valid_signal(sig))		return -EINVAL;	/*	 * We need the tasklist lock even for the specific	 * thread case (when we don't need to follow the group	 * lists) in order to avoid races with "p->sighand"	 * going away or changing from under us.	 */	read_lock(&tasklist_lock);  	spin_lock_irqsave(&p->sighand->siglock, flags);	ret = specific_send_sig_info(sig, info, p);	spin_unlock_irqrestore(&p->sighand->siglock, flags);	read_unlock(&tasklist_lock);	return ret;}#define __si_special(priv) \	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)intsend_sig(int sig, struct task_struct *p, int priv){	return send_sig_info(sig, __si_special(priv), p);}/* * This is the entry point for "process-wide" signals. * They will go to an appropriate thread in the thread group. */intsend_group_sig_info(int sig, struct siginfo *info, struct task_struct *p){	int ret;	read_lock(&tasklist_lock);	ret = group_send_sig_info(sig, info, p);	read_unlock(&tasklist_lock);	return ret;}voidforce_sig(int sig, struct task_struct *p){	force_sig_info(sig, SEND_SIG_PRIV, p);}/* * When things go south during signal handling, we * will force a SIGSEGV. And if the signal that caused * the problem was already a SIGSEGV, we'll want to * make sure we don't even try to deliver the signal.. */intforce_sigsegv(int sig, struct task_struct *p){	if (sig == SIGSEGV) {		unsigned long flags;		spin_lock_irqsave(&p->sighand->siglock, flags);		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;		spin_unlock_irqrestore(&p->sighand->siglock, flags);	}	force_sig(SIGSEGV, p);	return 0;}int kill_pgrp(struct pid *pid, int sig, int priv){	return kill_pgrp_info(sig, __si_special(priv), pid);}EXPORT_SYMBOL(kill_pgrp);int kill_pid(struct pid *pid, int sig, int priv){	return kill_pid_info(sig, __si_special(priv), pid);}EXPORT_SYMBOL(kill_pid);intkill_proc(pid_t pid, int sig, int priv){	return kill_proc_info(sig, __si_special(priv), pid);}/* * These functions support sending signals using preallocated sigqueue * structures.  This is needed "because realtime applications cannot * afford to lose notifications of asynchronous events, like timer * expirations or I/O completions".  In the case of Posix Timers  * we allocate the sigqueue structure from the timer_create.  If this * allocation fails we are able to report the failure to the application * with an EAGAIN error. */ struct sigqueue *sigqueue_alloc(void){	struct sigqueue *q;	if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))		q->flags |= SIGQUEUE_PREALLOC;	return(q);}void sigqueue_free(struct sigqueue *q){	unsigned long flags;	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));	/*	 * If the signal is still pending remove it from the	 * pending queue.	 */	if (unlikely(!list_empty(&q->list))) {		spinlock_t *lock = &current->sighand->siglock;		read_lock(&tasklist_lock);		spin_lock_irqsave(lock, flags);		if (!list_empty(&q->list))			list_del_init(&q->list);		spin_unlock_irqrestore(lock, flags);		read_unlock(&tasklist_lock);	}	q->flags &= ~SIGQUEUE_PREALLOC;	__sigqueue_free(q);}int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p){	unsigned long flags;	int ret = 0;	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));	/*	 * The rcu based delayed sighand destroy makes it possible to	 * run this without tasklist lock held. The task struct itself	 * cannot go away as create_timer did get_task_struct().	 *	 * We return -1, when the task is marked exiting, so	 * posix_timer_event can redirect it to the group leader	 */	rcu_read_lock();	if (!likely(lock_task_sighand(p, &flags))) {		ret = -1;		goto out_err;	}	if (unlikely(!list_empty(&q->list))) {		/*		 * If an SI_TIMER entry is already queue just increment		 * the overrun count.		 */		BUG_ON(q->info.si_code != SI_TIMER);		q->info.si_overrun++;		goto out;	}	/* Short-circuit ignored signals.  */	if (sig_ignored(p, sig)) {		ret = 1;		goto out;	}	/*	 * Deliver the signal to listening signalfds. This must be called	 * with the sighand lock held.	 */	signalfd_notify(p, sig);	list_add_tail(&q->list, &p->pending.list);	sigaddset(&p->pending.signal, sig);	if (!sigismember(&p->blocked, sig))		signal_wake_up(p, sig == SIGKILL);out:	unlock_task_sighand(p, &flags);out_err:	rcu_read_unlock();	return ret;}intsend_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p){	unsigned long flags;	int ret = 0;	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));	read_lock(&tasklist_lock);	/* Since it_lock is held, p->sighand cannot be NULL. */	spin_lock_irqsave(&p->sighand->siglock, flags);	handle_stop_signal(sig, p);	/* Short-circuit ignored signals.  */	if (sig_ignored(p, sig)) {		ret = 1;		goto out;	}	if (unlikely(!list_empty(&q->list))) {		/*		 * If an SI_TIMER entry is already queue just increment		 * the overrun count.  Other uses should not try to		 * send the signal multiple times.		 */		BUG_ON(q->info.si_code != SI_TIMER);		q->info.si_overrun++;		goto out;	} 	/*	 * Deliver the signal to listening signalfds. This must be called	 * with the sighand lock held.	 */	signalfd_notify(p, sig);	/*	 * Put this signal on the shared-pending queue.	 * We always use the shared queue for process-wide signals,	 * to avoid several races.	 */	list_add_tail(&q->list, &p->signal->shared_pending.list);	sigaddset(&p->signal->shared_pending.signal, sig);	__group_complete_signal(sig, p);out:	spin_unlock_irqrestore(&p->sighand->siglock, flags);	read_unlock(&tasklist_lock);	return ret;}/* * Wake up any threads in the parent blocked in wait* syscalls. */static inline void __wake_up_parent(struct task_struct *p,				    struct task_struct *parent){	wake_up_interruptible_sync(&parent->signal->wait_chldexit);}/* * Let a parent know about the death of a child. * For a stopped/continued status change, use do_notify_parent_cldstop instead. */void do_notify_parent(struct task_struct *tsk, int sig){	struct siginfo info;	unsigned long flags;	struct sighand_struct *psig;	BUG_ON(sig == -1); 	/* do_notify_parent_cldstop should have been called instead.  */ 	BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));	BUG_ON(!tsk->ptrace &&	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));	info.si_signo = sig;	info.si_errno = 0;	info.si_pid = tsk->pid;	info.si_uid = tsk->uid;	/* FIXME: find out whether or not this is supposed to be c*time. */	info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,						       tsk->signal->utime));	info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,						       tsk->signal->stime));	info.si_status = tsk->exit_code & 0x7f;	if (tsk->exit_code & 0x80)		info.si_code = CLD_DUMPED;	else if (tsk->exit_code & 0x7f)		info.si_code = CLD_KILLED;	else {		info.si_code = CLD_EXITED;		info.si_status = tsk->exit_code >> 8;	}	psig = tsk->parent->sighand;	spin_lock_irqsave(&psig->siglock, flags);	if (!tsk->ptrace && sig == SIGCHLD &&	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {		/*		 * We are exiting and our parent doesn't care.  POSIX.1		 * defines special semantics for setting SIGCHLD to SIG_IGN		 * or setting the SA_NOCLDWAIT flag: we should be reaped		 * automatically and not left for our parent's wait4 call.		 * Rather than having the parent do it as a magic kind of		 * signal handler, we just set this to tell do_exit that we		 * can be cleaned up without becoming a zombie.  Note that		 * we still call __wake_up_parent in this case, because a		 * blocked sys_wait4 might now return -ECHILD.		 *		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT		 * is implementation-defined: we do (if you don't want		 * it, just use SIG_IGN instead).		 */		tsk->exit_signal = -1;		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)			sig = 0;	}	if (valid_signal(sig) && sig > 0)		__group_send_sig_info(sig, &info, tsk->parent);	__wake_up_parent(tsk, tsk->parent);	spin_unlock_irqrestore(&psig->siglock, flags);}static void do_notify_parent_cldstop(struct task_struct *tsk, int why){	struct siginfo info;	unsigned long flags;	struct task_struct *parent;	struct sighand_struct *sighand;	if (tsk->ptrace & PT_PTRACED)		parent = tsk->parent;	else {		tsk = tsk->group_leader;		parent = tsk->real_parent;	}	info.si_signo = SIGCHLD;	info.si_errno = 0;	info.si_pid = tsk->pid;	info.si_uid = tsk->uid;	/* FIXME: find out whether or not this is supposed to be c*time. */	info.si_utime = cputime_to_jiffies(tsk->utime);	info.si_stime = cputime_to_jiffies(tsk->stime); 	info.si_code = why; 	switch (why) { 	case CLD_CONTINUED: 		info.si_status = SIGCONT; 		break; 	case CLD_STOPPED: 		info.si_status = tsk->signal->group_exit_code & 0x7f; 		break; 	case CLD_TRAPPED: 		info.si_status = tsk->exit_code & 0x7f; 		break; 	default: 		BUG(); 	}	sighand = parent->sighand;	spin_lock_irqsave(&sighand->siglock, flags);	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))		__group_send_sig_info(SIGCHLD, &info, parent);	/*	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.	 */	__wake_up_parent(tsk, parent);	spin_unlock_irqrestore(&sighand->siglock, flags);}static inline int may_ptrace_stop(void){	if (!likely(current->ptrace & PT_PTRACED))		return 0;	if (unlikely(current->parent == current->real_parent &&		    (current->ptrace & PT_ATTACHED)))		return 0;	if (unlikely(current->signal == current->parent->signal) &&	    unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))		return 0;	/*	 * Are we in the middle of do_coredump?	 * If so and our tracer is also part of the coredump stopping	 * is a deadlock situation, and pointless because our tracer	 * is dead so don't allow us to stop.	 * If SIGKILL was already sent before the caller unlocked	 * ->siglock we must see ->core_waiters != 0. Otherwise it	 * is safe to enter schedule().	 */	if (unlikely(current->mm->core_waiters) &&	    unlikely(current->mm == current->parent->mm))		return 0;	return 1;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -