⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 signal.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 5 页
字号:
	if (may_ptrace_stop()) {		do_notify_parent_cldstop(current, CLD_TRAPPED);		read_unlock(&tasklist_lock);		schedule();	} else {		/*		 * By the time we got the lock, our tracer went away.		 * Don't drop the lock yet, another tracer may come.		 */		__set_current_state(TASK_RUNNING);		if (clear_code)			current->exit_code = 0;		read_unlock(&tasklist_lock);	}	/*	 * While in TASK_TRACED, we were considered "frozen enough".	 * Now that we woke up, it's crucial if we're supposed to be	 * frozen that we freeze now before running anything substantial.	 */	try_to_freeze();	/*	 * We are back.  Now reacquire the siglock before touching	 * last_siginfo, so that we are sure to have synchronized with	 * any signal-sending on another CPU that wants to examine it.	 */	spin_lock_irq(&current->sighand->siglock);	current->last_siginfo = NULL;	/*	 * Queued signals ignored us while we were stopped for tracing.	 * So check for any that we should take before resuming user mode.	 * This sets TIF_SIGPENDING, but never clears it.	 */	recalc_sigpending_tsk(current);}void ptrace_notify(int exit_code){	siginfo_t info;	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);	memset(&info, 0, sizeof info);	info.si_signo = SIGTRAP;	info.si_code = exit_code;	info.si_pid = task_pid_vnr(current);	info.si_uid = current->uid;	/* Let the debugger run.  */	spin_lock_irq(&current->sighand->siglock);	ptrace_stop(exit_code, 1, &info);	spin_unlock_irq(&current->sighand->siglock);}static voidfinish_stop(int stop_count){	/*	 * If there are no other threads in the group, or if there is	 * a group stop in progress and we are the last to stop,	 * report to the parent.  When ptraced, every thread reports itself.	 */	if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {		read_lock(&tasklist_lock);		do_notify_parent_cldstop(current, CLD_STOPPED);		read_unlock(&tasklist_lock);	}	do {		schedule();	} while (try_to_freeze());	/*	 * Now we don't run again until continued.	 */	current->exit_code = 0;}/* * This performs the stopping for SIGSTOP and other stop signals. * We have to stop all threads in the thread group. * Returns nonzero if we've actually stopped and released the siglock. * Returns zero if we didn't stop and still hold the siglock. */static int do_signal_stop(int signr){	struct signal_struct *sig = current->signal;	int stop_count;	if (sig->group_stop_count > 0) {		/*		 * There is a group stop in progress.  We don't need to		 * start another one.		 */		stop_count = --sig->group_stop_count;	} else {		struct task_struct *t;		if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||		    unlikely(signal_group_exit(sig)))			return 0;		/*		 * There is no group stop already in progress.		 * We must initiate one now.		 */		sig->group_exit_code = signr;		stop_count = 0;		for (t = next_thread(current); t != current; t = next_thread(t))			/*			 * Setting state to TASK_STOPPED for a group			 * stop is always done with the siglock held,			 * so this check has no races.			 */			if (!(t->flags & PF_EXITING) &&			    !task_is_stopped_or_traced(t)) {				stop_count++;				signal_wake_up(t, 0);			}		sig->group_stop_count = stop_count;	}	if (stop_count == 0)		sig->flags = SIGNAL_STOP_STOPPED;	current->exit_code = sig->group_exit_code;	__set_current_state(TASK_STOPPED);	spin_unlock_irq(&current->sighand->siglock);	finish_stop(stop_count);	return 1;}static int ptrace_signal(int signr, siginfo_t *info,			 struct pt_regs *regs, void *cookie){	if (!(current->ptrace & PT_PTRACED))		return signr;	ptrace_signal_deliver(regs, cookie);	/* Let the debugger run.  */	ptrace_stop(signr, 0, info);	/* We're back.  Did the debugger cancel the sig?  */	signr = current->exit_code;	if (signr == 0)		return signr;	current->exit_code = 0;	/* Update the siginfo structure if the signal has	   changed.  If the debugger wanted something	   specific in the siginfo structure then it should	   have updated *info via PTRACE_SETSIGINFO.  */	if (signr != info->si_signo) {		info->si_signo = signr;		info->si_errno = 0;		info->si_code = SI_USER;		info->si_pid = task_pid_vnr(current->parent);		info->si_uid = current->parent->uid;	}	/* If the (new) signal is now blocked, requeue it.  */	if (sigismember(&current->blocked, signr)) {		specific_send_sig_info(signr, info, current);		signr = 0;	}	return signr;}int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,			  struct pt_regs *regs, void *cookie){	struct sighand_struct *sighand = current->sighand;	struct signal_struct *signal = current->signal;	int signr;relock:	/*	 * We'll jump back here after any time we were stopped in TASK_STOPPED.	 * While in TASK_STOPPED, we were considered "frozen enough".	 * Now that we woke up, it's crucial if we're supposed to be	 * frozen that we freeze now before running anything substantial.	 */	try_to_freeze();	spin_lock_irq(&sighand->siglock);	/*	 * Every stopped thread goes here after wakeup. Check to see if	 * we should notify the parent, prepare_signal(SIGCONT) encodes	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.	 */	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {		int why = (signal->flags & SIGNAL_STOP_CONTINUED)				? CLD_CONTINUED : CLD_STOPPED;		signal->flags &= ~SIGNAL_CLD_MASK;		spin_unlock_irq(&sighand->siglock);		if (unlikely(!tracehook_notify_jctl(1, why)))			goto relock;		read_lock(&tasklist_lock);		do_notify_parent_cldstop(current->group_leader, why);		read_unlock(&tasklist_lock);		goto relock;	}	for (;;) {		struct k_sigaction *ka;		if (unlikely(signal->group_stop_count > 0) &&		    do_signal_stop(0))			goto relock;		/*		 * Tracing can induce an artifical signal and choose sigaction.		 * The return value in @signr determines the default action,		 * but @info->si_signo is the signal number we will report.		 */		signr = tracehook_get_signal(current, regs, info, return_ka);		if (unlikely(signr < 0))			goto relock;		if (unlikely(signr != 0))			ka = return_ka;		else {			signr = dequeue_signal(current, &current->blocked,					       info);			if (!signr)				break; /* will return 0 */			if (signr != SIGKILL) {				signr = ptrace_signal(signr, info,						      regs, cookie);				if (!signr)					continue;			}			ka = &sighand->action[signr-1];		}		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */			continue;		if (ka->sa.sa_handler != SIG_DFL) {			/* Run the handler.  */			*return_ka = *ka;			if (ka->sa.sa_flags & SA_ONESHOT)				ka->sa.sa_handler = SIG_DFL;			break; /* will return non-zero "signr" value */		}		/*		 * Now we are doing the default action for this signal.		 */		if (sig_kernel_ignore(signr)) /* Default is nothing. */			continue;		/*		 * Global init gets no signals it doesn't want.		 */		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&		    !signal_group_exit(signal))			continue;		if (sig_kernel_stop(signr)) {			/*			 * The default action is to stop all threads in			 * the thread group.  The job control signals			 * do nothing in an orphaned pgrp, but SIGSTOP			 * always works.  Note that siglock needs to be			 * dropped during the call to is_orphaned_pgrp()			 * because of lock ordering with tasklist_lock.			 * This allows an intervening SIGCONT to be posted.			 * We need to check for that and bail out if necessary.			 */			if (signr != SIGSTOP) {				spin_unlock_irq(&sighand->siglock);				/* signals can be posted during this window */				if (is_current_pgrp_orphaned())					goto relock;				spin_lock_irq(&sighand->siglock);			}			if (likely(do_signal_stop(info->si_signo))) {				/* It released the siglock.  */				goto relock;			}			/*			 * We didn't actually stop, due to a race			 * with SIGCONT or something like that.			 */			continue;		}		spin_unlock_irq(&sighand->siglock);		/*		 * Anything else is fatal, maybe with a core dump.		 */		current->flags |= PF_SIGNALED;		if (sig_kernel_coredump(signr)) {			if (print_fatal_signals)				print_fatal_signal(regs, info->si_signo);			/*			 * If it was able to dump core, this kills all			 * other threads in the group and synchronizes with			 * their demise.  If we lost the race with another			 * thread getting here, it set group_exit_code			 * first and our do_group_exit call below will use			 * that value and ignore the one we pass it.			 */			do_coredump(info->si_signo, info->si_signo, regs);		}		/*		 * Death signals, no core dump.		 */		do_group_exit(info->si_signo);		/* NOTREACHED */	}	spin_unlock_irq(&sighand->siglock);	return signr;}void exit_signals(struct task_struct *tsk){	int group_stop = 0;	struct task_struct *t;	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {		tsk->flags |= PF_EXITING;		return;	}	spin_lock_irq(&tsk->sighand->siglock);	/*	 * From now this task is not visible for group-wide signals,	 * see wants_signal(), do_signal_stop().	 */	tsk->flags |= PF_EXITING;	if (!signal_pending(tsk))		goto out;	/* It could be that __group_complete_signal() choose us to	 * notify about group-wide signal. Another thread should be	 * woken now to take the signal since we will not.	 */	for (t = tsk; (t = next_thread(t)) != tsk; )		if (!signal_pending(t) && !(t->flags & PF_EXITING))			recalc_sigpending_and_wake(t);	if (unlikely(tsk->signal->group_stop_count) &&			!--tsk->signal->group_stop_count) {		tsk->signal->flags = SIGNAL_STOP_STOPPED;		group_stop = 1;	}out:	spin_unlock_irq(&tsk->sighand->siglock);	if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) {		read_lock(&tasklist_lock);		do_notify_parent_cldstop(tsk, CLD_STOPPED);		read_unlock(&tasklist_lock);	}}EXPORT_SYMBOL(recalc_sigpending);EXPORT_SYMBOL_GPL(dequeue_signal);EXPORT_SYMBOL(flush_signals);EXPORT_SYMBOL(force_sig);EXPORT_SYMBOL(send_sig);EXPORT_SYMBOL(send_sig_info);EXPORT_SYMBOL(sigprocmask);EXPORT_SYMBOL(block_all_signals);EXPORT_SYMBOL(unblock_all_signals);/* * System call entry points. */asmlinkage long sys_restart_syscall(void){	struct restart_block *restart = &current_thread_info()->restart_block;	return restart->fn(restart);}long do_no_restart_syscall(struct restart_block *param){	return -EINTR;}/* * We don't need to get the kernel lock - this is all local to this * particular thread.. (and that's good, because this is _heavily_ * used by various programs) *//* * This is also useful for kernel threads that want to temporarily * (or permanently) block certain signals. * * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel * interface happily blocks "unblockable" signals like SIGKILL * and friends. */int sigprocmask(int how, sigset_t *set, sigset_t *oldset){	int error;	spin_lock_irq(&current->sighand->siglock);	if (oldset)		*oldset = current->blocked;	error = 0;	switch (how) {	case SIG_BLOCK:		sigorsets(&current->blocked, &current->blocked, set);		break;	case SIG_UNBLOCK:		signandsets(&current->blocked, &current->blocked, set);		break;	case SIG_SETMASK:		current->blocked = *set;		break;	default:		error = -EINVAL;	}	recalc_sigpending();	spin_unlock_irq(&current->sighand->siglock);	return error;}asmlinkage longsys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize){	int error = -EINVAL;	sigset_t old_set, new_set;	/* XXX: Don't preclude handling different sized sigset_t's.  */	if (sigsetsize != sizeof(sigset_t))		goto out;	if (set) {		error = -EFAULT;		if (copy_from_user(&new_set, set, sizeof(*set)))			goto out;		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));		error = sigprocmask(how, &new_set, &old_set);		if (error)			goto out;		if (oset)			goto set_old;	} else if (oset) {		spin_lock_irq(&current->sighand->siglock);		old_set = current->blocked;		spin_unlock_irq(&current->sighand->siglock);	set_old:		error = -EFAULT;		if (copy_to_user(oset, &old_set, sizeof(*oset)))			goto out;	}	error = 0;out:	return error;}long do_sigpending(void __user *set, unsigned long sigsetsize){	long error = -EINVAL;	sigset_t pending;	if (sigsetsize > sizeof(sigset_t))		goto out;	spin_lock_irq(&current->sighand->siglock);	sigorsets(&pending, &current->pending.signal,		  &current->signal->shared_pending.signal);	spin_unlock_irq(&current->sighand->siglock);	/* Outside the lock because only this thread touches it.  */	sigandsets(&pending, &current->blocked, &pending);	error = -EFAULT;	if (!copy_to_user(set, &pending, sigsetsize))		error = 0;out:	return error;}	asmlinkage longsys_rt_sigpending(sigset_t __user *set, size_t sigsetsize){	return do_sigpending(set, sigsetsize);}#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USERint copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from){	int err;	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -