⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 exit.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 4 页
字号:
	write_lock_irq(&tasklist_lock);	if (group_dead)		kill_orphaned_pgrp(tsk->group_leader, NULL);	/* Let father know we died	 *	 * Thread signals are configurable, but you aren't going to use	 * that to send signals to arbitary processes.	 * That stops right now.	 *	 * If the parent exec id doesn't match the exec id we saved	 * when we started then we know the parent has changed security	 * domain.	 *	 * If our self_exec id doesn't match our parent_exec_id then	 * we have changed execution domain as these two values started	 * the same after a fork.	 */	if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&	    (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||	     tsk->self_exec_id != tsk->parent_exec_id) &&	    !capable(CAP_KILL))		tsk->exit_signal = SIGCHLD;	signal = tracehook_notify_death(tsk, &cookie, group_dead);	if (signal >= 0)		signal = do_notify_parent(tsk, signal);	tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;	/* mt-exec, de_thread() is waiting for us */	if (thread_group_leader(tsk) &&	    tsk->signal->group_exit_task &&	    tsk->signal->notify_count < 0)		wake_up_process(tsk->signal->group_exit_task);	write_unlock_irq(&tasklist_lock);	tracehook_report_death(tsk, signal, cookie, group_dead);	/* If the process is dead, release it - nobody will wait for it */	if (signal == DEATH_REAP)		release_task(tsk);}#ifdef CONFIG_DEBUG_STACK_USAGEstatic void check_stack_usage(void){	static DEFINE_SPINLOCK(low_water_lock);	static int lowest_to_date = THREAD_SIZE;	unsigned long *n = end_of_stack(current);	unsigned long free;	while (*n == 0)		n++;	free = (unsigned long)n - (unsigned long)end_of_stack(current);	if (free >= lowest_to_date)		return;	spin_lock(&low_water_lock);	if (free < lowest_to_date) {		printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "				"left\n",				current->comm, free);		lowest_to_date = free;	}	spin_unlock(&low_water_lock);}#elsestatic inline void check_stack_usage(void) {}#endifNORET_TYPE void do_exit(long code){	struct task_struct *tsk = current;	int group_dead;	profile_task_exit(tsk);	WARN_ON(atomic_read(&tsk->fs_excl));	if (unlikely(in_interrupt()))		panic("Aiee, killing interrupt handler!");	if (unlikely(!tsk->pid))		panic("Attempted to kill the idle task!");	tracehook_report_exit(&code);	/*	 * We're taking recursive faults here in do_exit. Safest is to just	 * leave this task alone and wait for reboot.	 */	if (unlikely(tsk->flags & PF_EXITING)) {		printk(KERN_ALERT			"Fixing recursive fault but reboot is needed!\n");		/*		 * We can do this unlocked here. The futex code uses		 * this flag just to verify whether the pi state		 * cleanup has been done or not. In the worst case it		 * loops once more. We pretend that the cleanup was		 * done as there is no way to return. Either the		 * OWNER_DIED bit is set by now or we push the blocked		 * task into the wait for ever nirwana as well.		 */		tsk->flags |= PF_EXITPIDONE;		if (tsk->io_context)			exit_io_context();		set_current_state(TASK_UNINTERRUPTIBLE);		schedule();	}	exit_signals(tsk);  /* sets PF_EXITING */	/*	 * tsk->flags are checked in the futex code to protect against	 * an exiting task cleaning up the robust pi futexes.	 */	smp_mb();	spin_unlock_wait(&tsk->pi_lock);	if (unlikely(in_atomic()))		printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",				current->comm, task_pid_nr(current),				preempt_count());	acct_update_integrals(tsk);	if (tsk->mm) {		update_hiwater_rss(tsk->mm);		update_hiwater_vm(tsk->mm);	}	group_dead = atomic_dec_and_test(&tsk->signal->live);	if (group_dead) {		hrtimer_cancel(&tsk->signal->real_timer);		exit_itimers(tsk->signal);	}	acct_collect(code, group_dead);#ifdef CONFIG_FUTEX	if (unlikely(tsk->robust_list))		exit_robust_list(tsk);#ifdef CONFIG_COMPAT	if (unlikely(tsk->compat_robust_list))		compat_exit_robust_list(tsk);#endif#endif	if (group_dead)		tty_audit_exit();	if (unlikely(tsk->audit_context))		audit_free(tsk);	tsk->exit_code = code;	taskstats_exit(tsk, group_dead);	exit_mm(tsk);	if (group_dead)		acct_process();	exit_sem(tsk);	exit_files(tsk);	exit_fs(tsk);	check_stack_usage();	exit_thread();	cgroup_exit(tsk, 1);	exit_keys(tsk);	if (group_dead && tsk->signal->leader)		disassociate_ctty(1);	module_put(task_thread_info(tsk)->exec_domain->module);	if (tsk->binfmt)		module_put(tsk->binfmt->module);	proc_exit_connector(tsk);	exit_notify(tsk, group_dead);#ifdef CONFIG_NUMA	mpol_put(tsk->mempolicy);	tsk->mempolicy = NULL;#endif#ifdef CONFIG_FUTEX	/*	 * This must happen late, after the PID is not	 * hashed anymore:	 */	if (unlikely(!list_empty(&tsk->pi_state_list)))		exit_pi_state_list(tsk);	if (unlikely(current->pi_state_cache))		kfree(current->pi_state_cache);#endif	/*	 * Make sure we are holding no locks:	 */	debug_check_no_locks_held(tsk);	/*	 * We can do this unlocked here. The futex code uses this flag	 * just to verify whether the pi state cleanup has been done	 * or not. In the worst case it loops once more.	 */	tsk->flags |= PF_EXITPIDONE;	if (tsk->io_context)		exit_io_context();	if (tsk->splice_pipe)		__free_pipe_info(tsk->splice_pipe);	preempt_disable();	/* causes final put_task_struct in finish_task_switch(). */	tsk->state = TASK_DEAD;	schedule();	BUG();	/* Avoid "noreturn function does return".  */	for (;;)		cpu_relax();	/* For when BUG is null */}EXPORT_SYMBOL_GPL(do_exit);NORET_TYPE void complete_and_exit(struct completion *comp, long code){	if (comp)		complete(comp);	do_exit(code);}EXPORT_SYMBOL(complete_and_exit);asmlinkage long sys_exit(int error_code){	do_exit((error_code&0xff)<<8);}/* * Take down every thread in the group.  This is called by fatal signals * as well as by sys_exit_group (below). */NORET_TYPE voiddo_group_exit(int exit_code){	struct signal_struct *sig = current->signal;	BUG_ON(exit_code & 0x80); /* core dumps don't get here */	if (signal_group_exit(sig))		exit_code = sig->group_exit_code;	else if (!thread_group_empty(current)) {		struct sighand_struct *const sighand = current->sighand;		spin_lock_irq(&sighand->siglock);		if (signal_group_exit(sig))			/* Another thread got here before we took the lock.  */			exit_code = sig->group_exit_code;		else {			sig->group_exit_code = exit_code;			sig->flags = SIGNAL_GROUP_EXIT;			zap_other_threads(current);		}		spin_unlock_irq(&sighand->siglock);	}	do_exit(exit_code);	/* NOTREACHED */}/* * this kills every thread in the thread group. Note that any externally * wait4()-ing process will get the correct exit code - even if this * thread is not the thread group leader. */asmlinkage void sys_exit_group(int error_code){	do_group_exit((error_code & 0xff) << 8);}static struct pid *task_pid_type(struct task_struct *task, enum pid_type type){	struct pid *pid = NULL;	if (type == PIDTYPE_PID)		pid = task->pids[type].pid;	else if (type < PIDTYPE_MAX)		pid = task->group_leader->pids[type].pid;	return pid;}static int eligible_child(enum pid_type type, struct pid *pid, int options,			  struct task_struct *p){	int err;	if (type < PIDTYPE_MAX) {		if (task_pid_type(p, type) != pid)			return 0;	}	/* Wait for all children (clone and not) if __WALL is set;	 * otherwise, wait for clone children *only* if __WCLONE is	 * set; otherwise, wait for non-clone children *only*.  (Note:	 * A "clone" child here is one that reports to its parent	 * using a signal other than SIGCHLD.) */	if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))	    && !(options & __WALL))		return 0;	err = security_task_wait(p);	if (err)		return err;	return 1;}static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,			       int why, int status,			       struct siginfo __user *infop,			       struct rusage __user *rusagep){	int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;	put_task_struct(p);	if (!retval)		retval = put_user(SIGCHLD, &infop->si_signo);	if (!retval)		retval = put_user(0, &infop->si_errno);	if (!retval)		retval = put_user((short)why, &infop->si_code);	if (!retval)		retval = put_user(pid, &infop->si_pid);	if (!retval)		retval = put_user(uid, &infop->si_uid);	if (!retval)		retval = put_user(status, &infop->si_status);	if (!retval)		retval = pid;	return retval;}/* * Handle sys_wait4 work for one task in state EXIT_ZOMBIE.  We hold * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold * the lock and this task is uninteresting.  If we return nonzero, we have * released the lock and the system call should return. */static int wait_task_zombie(struct task_struct *p, int options,			    struct siginfo __user *infop,			    int __user *stat_addr, struct rusage __user *ru){	unsigned long state;	int retval, status, traced;	pid_t pid = task_pid_vnr(p);	if (!likely(options & WEXITED))		return 0;	if (unlikely(options & WNOWAIT)) {		uid_t uid = p->uid;		int exit_code = p->exit_code;		int why, status;		get_task_struct(p);		read_unlock(&tasklist_lock);		if ((exit_code & 0x7f) == 0) {			why = CLD_EXITED;			status = exit_code >> 8;		} else {			why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;			status = exit_code & 0x7f;		}		return wait_noreap_copyout(p, pid, uid, why,					   status, infop, ru);	}	/*	 * Try to move the task's state to DEAD	 * only one thread is allowed to do this:	 */	state = xchg(&p->exit_state, EXIT_DEAD);	if (state != EXIT_ZOMBIE) {		BUG_ON(state != EXIT_DEAD);		return 0;	}	traced = ptrace_reparented(p);	if (likely(!traced)) {		struct signal_struct *psig;		struct signal_struct *sig;		/*		 * The resource counters for the group leader are in its		 * own task_struct.  Those for dead threads in the group		 * are in its signal_struct, as are those for the child		 * processes it has previously reaped.  All these		 * accumulate in the parent's signal_struct c* fields.		 *		 * We don't bother to take a lock here to protect these		 * p->signal fields, because they are only touched by		 * __exit_signal, which runs with tasklist_lock		 * write-locked anyway, and so is excluded here.  We do		 * need to protect the access to p->parent->signal fields,		 * as other threads in the parent group can be right		 * here reaping other children at the same time.		 */		spin_lock_irq(&p->parent->sighand->siglock);		psig = p->parent->signal;		sig = p->signal;		psig->cutime =			cputime_add(psig->cutime,			cputime_add(p->utime,			cputime_add(sig->utime,				    sig->cutime)));		psig->cstime =			cputime_add(psig->cstime,			cputime_add(p->stime,			cputime_add(sig->stime,				    sig->cstime)));		psig->cgtime =			cputime_add(psig->cgtime,			cputime_add(p->gtime,			cputime_add(sig->gtime,				    sig->cgtime)));		psig->cmin_flt +=			p->min_flt + sig->min_flt + sig->cmin_flt;		psig->cmaj_flt +=			p->maj_flt + sig->maj_flt + sig->cmaj_flt;		psig->cnvcsw +=			p->nvcsw + sig->nvcsw + sig->cnvcsw;		psig->cnivcsw +=			p->nivcsw + sig->nivcsw + sig->cnivcsw;		psig->cinblock +=			task_io_get_inblock(p) +			sig->inblock + sig->cinblock;		psig->coublock +=			task_io_get_oublock(p) +			sig->oublock + sig->coublock;		task_io_accounting_add(&psig->ioac, &p->ioac);		task_io_accounting_add(&psig->ioac, &sig->ioac);		spin_unlock_irq(&p->parent->sighand->siglock);	}	/*	 * Now we are sure this task is interesting, and no other	 * thread can reap it because we set its state to EXIT_DEAD.	 */	read_unlock(&tasklist_lock);	retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;	status = (p->signal->flags & SIGNAL_GROUP_EXIT)		? p->signal->group_exit_code : p->exit_code;	if (!retval && stat_addr)		retval = put_user(status, stat_addr);	if (!retval && infop)		retval = put_user(SIGCHLD, &infop->si_signo);	if (!retval && infop)		retval = put_user(0, &infop->si_errno);	if (!retval && infop) {		int why;		if ((status & 0x7f) == 0) {			why = CLD_EXITED;			status >>= 8;		} else {			why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -