⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 signal.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 5 页
字号:
	struct task_struct *p = NULL;	int retval, success;	success = 0;	retval = -ESRCH;	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {		int err = group_send_sig_info(sig, info, p);		success |= !err;		retval = err;	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);	return success ? 0 : retval;}int kill_pid_info(int sig, struct siginfo *info, struct pid *pid){	int error = -ESRCH;	struct task_struct *p;	rcu_read_lock();retry:	p = pid_task(pid, PIDTYPE_PID);	if (p) {		error = group_send_sig_info(sig, info, p);		if (unlikely(error == -ESRCH))			/*			 * The task was unhashed in between, try again.			 * If it is dead, pid_task() will return NULL,			 * if we race with de_thread() it will find the			 * new leader.			 */			goto retry;	}	rcu_read_unlock();	return error;}intkill_proc_info(int sig, struct siginfo *info, pid_t pid){	int error;	rcu_read_lock();	error = kill_pid_info(sig, info, find_vpid(pid));	rcu_read_unlock();	return error;}/* like kill_pid_info(), but doesn't use uid/euid of "current" */int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,		      uid_t uid, uid_t euid, u32 secid){	int ret = -EINVAL;	struct task_struct *p;	if (!valid_signal(sig))		return ret;	read_lock(&tasklist_lock);	p = pid_task(pid, PIDTYPE_PID);	if (!p) {		ret = -ESRCH;		goto out_unlock;	}	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))	    && (euid != p->suid) && (euid != p->uid)	    && (uid != p->suid) && (uid != p->uid)) {		ret = -EPERM;		goto out_unlock;	}	ret = security_task_kill(p, info, sig, secid);	if (ret)		goto out_unlock;	if (sig && p->sighand) {		unsigned long flags;		spin_lock_irqsave(&p->sighand->siglock, flags);		ret = __group_send_sig_info(sig, info, p);		spin_unlock_irqrestore(&p->sighand->siglock, flags);	}out_unlock:	read_unlock(&tasklist_lock);	return ret;}EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);/* * kill_something_info() interprets pid in interesting ways just like kill(2). * * POSIX specifies that kill(-1,sig) is unspecified, but what we have * is probably wrong.  Should make it like BSD or SYSV. */static int kill_something_info(int sig, struct siginfo *info, pid_t pid){	int ret;	if (pid > 0) {		rcu_read_lock();		ret = kill_pid_info(sig, info, find_vpid(pid));		rcu_read_unlock();		return ret;	}	read_lock(&tasklist_lock);	if (pid != -1) {		ret = __kill_pgrp_info(sig, info,				pid ? find_vpid(-pid) : task_pgrp(current));	} else {		int retval = 0, count = 0;		struct task_struct * p;		for_each_process(p) {			if (p->pid > 1 && !same_thread_group(p, current)) {				int err = group_send_sig_info(sig, info, p);				++count;				if (err != -EPERM)					retval = err;			}		}		ret = count ? retval : -ESRCH;	}	read_unlock(&tasklist_lock);	return ret;}/* * These are for backward compatibility with the rest of the kernel source. *//* * The caller must ensure the task can't exit. */intsend_sig_info(int sig, struct siginfo *info, struct task_struct *p){	int ret;	unsigned long flags;	/*	 * Make sure legacy kernel users don't send in bad values	 * (normal paths check this in check_kill_permission).	 */	if (!valid_signal(sig))		return -EINVAL;	spin_lock_irqsave(&p->sighand->siglock, flags);	ret = specific_send_sig_info(sig, info, p);	spin_unlock_irqrestore(&p->sighand->siglock, flags);	return ret;}#define __si_special(priv) \	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)intsend_sig(int sig, struct task_struct *p, int priv){	return send_sig_info(sig, __si_special(priv), p);}voidforce_sig(int sig, struct task_struct *p){	force_sig_info(sig, SEND_SIG_PRIV, p);}/* * When things go south during signal handling, we * will force a SIGSEGV. And if the signal that caused * the problem was already a SIGSEGV, we'll want to * make sure we don't even try to deliver the signal.. */intforce_sigsegv(int sig, struct task_struct *p){	if (sig == SIGSEGV) {		unsigned long flags;		spin_lock_irqsave(&p->sighand->siglock, flags);		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;		spin_unlock_irqrestore(&p->sighand->siglock, flags);	}	force_sig(SIGSEGV, p);	return 0;}int kill_pgrp(struct pid *pid, int sig, int priv){	int ret;	read_lock(&tasklist_lock);	ret = __kill_pgrp_info(sig, __si_special(priv), pid);	read_unlock(&tasklist_lock);	return ret;}EXPORT_SYMBOL(kill_pgrp);int kill_pid(struct pid *pid, int sig, int priv){	return kill_pid_info(sig, __si_special(priv), pid);}EXPORT_SYMBOL(kill_pid);/* * These functions support sending signals using preallocated sigqueue * structures.  This is needed "because realtime applications cannot * afford to lose notifications of asynchronous events, like timer * expirations or I/O completions".  In the case of Posix Timers  * we allocate the sigqueue structure from the timer_create.  If this * allocation fails we are able to report the failure to the application * with an EAGAIN error. */ struct sigqueue *sigqueue_alloc(void){	struct sigqueue *q;	if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))		q->flags |= SIGQUEUE_PREALLOC;	return(q);}void sigqueue_free(struct sigqueue *q){	unsigned long flags;	spinlock_t *lock = &current->sighand->siglock;	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));	/*	 * We must hold ->siglock while testing q->list	 * to serialize with collect_signal() or with	 * __exit_signal()->flush_sigqueue().	 */	spin_lock_irqsave(lock, flags);	q->flags &= ~SIGQUEUE_PREALLOC;	/*	 * If it is queued it will be freed when dequeued,	 * like the "regular" sigqueue.	 */	if (!list_empty(&q->list))		q = NULL;	spin_unlock_irqrestore(lock, flags);	if (q)		__sigqueue_free(q);}int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group){	int sig = q->info.si_signo;	struct sigpending *pending;	unsigned long flags;	int ret;	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));	ret = -1;	if (!likely(lock_task_sighand(t, &flags)))		goto ret;	ret = 1; /* the signal is ignored */	if (!prepare_signal(sig, t))		goto out;	ret = 0;	if (unlikely(!list_empty(&q->list))) {		/*		 * If an SI_TIMER entry is already queue just increment		 * the overrun count.		 */		BUG_ON(q->info.si_code != SI_TIMER);		q->info.si_overrun++;		goto out;	}	q->info.si_overrun = 0;	signalfd_notify(t, sig);	pending = group ? &t->signal->shared_pending : &t->pending;	list_add_tail(&q->list, &pending->list);	sigaddset(&pending->signal, sig);	complete_signal(sig, t, group);out:	unlock_task_sighand(t, &flags);ret:	return ret;}/* * Wake up any threads in the parent blocked in wait* syscalls. */static inline void __wake_up_parent(struct task_struct *p,				    struct task_struct *parent){	wake_up_interruptible_sync(&parent->signal->wait_chldexit);}/* * Let a parent know about the death of a child. * For a stopped/continued status change, use do_notify_parent_cldstop instead. * * Returns -1 if our parent ignored us and so we've switched to * self-reaping, or else @sig. */int do_notify_parent(struct task_struct *tsk, int sig){	struct siginfo info;	unsigned long flags;	struct sighand_struct *psig;	int ret = sig;	BUG_ON(sig == -1); 	/* do_notify_parent_cldstop should have been called instead.  */ 	BUG_ON(task_is_stopped_or_traced(tsk));	BUG_ON(!tsk->ptrace &&	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));	info.si_signo = sig;	info.si_errno = 0;	/*	 * we are under tasklist_lock here so our parent is tied to	 * us and cannot exit and release its namespace.	 *	 * the only it can is to switch its nsproxy with sys_unshare,	 * bu uncharing pid namespaces is not allowed, so we'll always	 * see relevant namespace	 *	 * write_lock() currently calls preempt_disable() which is the	 * same as rcu_read_lock(), but according to Oleg, this is not	 * correct to rely on this	 */	rcu_read_lock();	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);	rcu_read_unlock();	info.si_uid = tsk->uid;	info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,						       tsk->signal->utime));	info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,						       tsk->signal->stime));	info.si_status = tsk->exit_code & 0x7f;	if (tsk->exit_code & 0x80)		info.si_code = CLD_DUMPED;	else if (tsk->exit_code & 0x7f)		info.si_code = CLD_KILLED;	else {		info.si_code = CLD_EXITED;		info.si_status = tsk->exit_code >> 8;	}	psig = tsk->parent->sighand;	spin_lock_irqsave(&psig->siglock, flags);	if (!tsk->ptrace && sig == SIGCHLD &&	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {		/*		 * We are exiting and our parent doesn't care.  POSIX.1		 * defines special semantics for setting SIGCHLD to SIG_IGN		 * or setting the SA_NOCLDWAIT flag: we should be reaped		 * automatically and not left for our parent's wait4 call.		 * Rather than having the parent do it as a magic kind of		 * signal handler, we just set this to tell do_exit that we		 * can be cleaned up without becoming a zombie.  Note that		 * we still call __wake_up_parent in this case, because a		 * blocked sys_wait4 might now return -ECHILD.		 *		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT		 * is implementation-defined: we do (if you don't want		 * it, just use SIG_IGN instead).		 */		ret = tsk->exit_signal = -1;		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)			sig = -1;	}	if (valid_signal(sig) && sig > 0)		__group_send_sig_info(sig, &info, tsk->parent);	__wake_up_parent(tsk, tsk->parent);	spin_unlock_irqrestore(&psig->siglock, flags);	return ret;}static void do_notify_parent_cldstop(struct task_struct *tsk, int why){	struct siginfo info;	unsigned long flags;	struct task_struct *parent;	struct sighand_struct *sighand;	if (tsk->ptrace & PT_PTRACED)		parent = tsk->parent;	else {		tsk = tsk->group_leader;		parent = tsk->real_parent;	}	info.si_signo = SIGCHLD;	info.si_errno = 0;	/*	 * see comment in do_notify_parent() abot the following 3 lines	 */	rcu_read_lock();	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);	rcu_read_unlock();	info.si_uid = tsk->uid;	info.si_utime = cputime_to_clock_t(tsk->utime);	info.si_stime = cputime_to_clock_t(tsk->stime); 	info.si_code = why; 	switch (why) { 	case CLD_CONTINUED: 		info.si_status = SIGCONT; 		break; 	case CLD_STOPPED: 		info.si_status = tsk->signal->group_exit_code & 0x7f; 		break; 	case CLD_TRAPPED: 		info.si_status = tsk->exit_code & 0x7f; 		break; 	default: 		BUG(); 	}	sighand = parent->sighand;	spin_lock_irqsave(&sighand->siglock, flags);	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))		__group_send_sig_info(SIGCHLD, &info, parent);	/*	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.	 */	__wake_up_parent(tsk, parent);	spin_unlock_irqrestore(&sighand->siglock, flags);}static inline int may_ptrace_stop(void){	if (!likely(current->ptrace & PT_PTRACED))		return 0;	/*	 * Are we in the middle of do_coredump?	 * If so and our tracer is also part of the coredump stopping	 * is a deadlock situation, and pointless because our tracer	 * is dead so don't allow us to stop.	 * If SIGKILL was already sent before the caller unlocked	 * ->siglock we must see ->core_state != NULL. Otherwise it	 * is safe to enter schedule().	 */	if (unlikely(current->mm->core_state) &&	    unlikely(current->mm == current->parent->mm))		return 0;	return 1;}/* * Return nonzero if there is a SIGKILL that should be waking us up. * Called with the siglock held. */static int sigkill_pending(struct task_struct *tsk){	return	sigismember(&tsk->pending.signal, SIGKILL) ||		sigismember(&tsk->signal->shared_pending.signal, SIGKILL);}/* * This must be called with current->sighand->siglock held. * * This should be the path for all ptrace stops. * We always set current->last_siginfo while stopped here. * That makes it a way to test a stopped process for * being ptrace-stopped vs being job-control-stopped. * * If we actually decide not to stop at all because the tracer * is gone, we keep current->exit_code unless clear_code. */static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info){	if (arch_ptrace_stop_needed(exit_code, info)) {		/*		 * The arch code has something special to do before a		 * ptrace stop.  This is allowed to block, e.g. for faults		 * on user stack pages.  We can't keep the siglock while		 * calling arch_ptrace_stop, so we must release it now.		 * To preserve proper semantics, we must do this before		 * any signal bookkeeping like checking group_stop_count.		 * Meanwhile, a SIGKILL could come in before we retake the		 * siglock.  That must prevent us from sleeping in TASK_TRACED.		 * So after regaining the lock, we must check for SIGKILL.		 */		spin_unlock_irq(&current->sighand->siglock);		arch_ptrace_stop(exit_code, info);		spin_lock_irq(&current->sighand->siglock);		if (sigkill_pending(current))			return;	}	/*	 * If there is a group stop in progress,	 * we must participate in the bookkeeping.	 */	if (current->signal->group_stop_count > 0)		--current->signal->group_stop_count;	current->last_siginfo = info;	current->exit_code = exit_code;	/* Let the debugger run.  */	__set_current_state(TASK_TRACED);	spin_unlock_irq(&current->sighand->siglock);	read_lock(&tasklist_lock);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -