⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 exit.c

📁 linux 2.6.19 kernel source code before patching
💻 C
📖 第 1 页 / 共 3 页
字号:
	 * group with ->mm != NULL.	 */	down_read(&mm->mmap_sem);	if (mm->core_waiters) {		up_read(&mm->mmap_sem);		down_write(&mm->mmap_sem);		if (!--mm->core_waiters)			complete(mm->core_startup_done);		up_write(&mm->mmap_sem);		wait_for_completion(&mm->core_done);		down_read(&mm->mmap_sem);	}	atomic_inc(&mm->mm_count);	BUG_ON(mm != tsk->active_mm);	/* more a memory barrier than a real lock */	task_lock(tsk);	tsk->mm = NULL;	up_read(&mm->mmap_sem);	enter_lazy_tlb(mm, current);	task_unlock(tsk);	mmput(mm);}static inline voidchoose_new_parent(struct task_struct *p, struct task_struct *reaper){	/*	 * Make sure we're not reparenting to ourselves and that	 * the parent is not a zombie.	 */	BUG_ON(p == reaper || reaper->exit_state);	p->real_parent = reaper;}static voidreparent_thread(struct task_struct *p, struct task_struct *father, int traced){	if (p->pdeath_signal)		/* We already hold the tasklist_lock here.  */		group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);	/* Move the child from its dying parent to the new one.  */	if (unlikely(traced)) {		/* Preserve ptrace links if someone else is tracing this child.  */		list_del_init(&p->ptrace_list);		if (p->parent != p->real_parent)			list_add(&p->ptrace_list, &p->real_parent->ptrace_children);	} else {		/* If this child is being traced, then we're the one tracing it		 * anyway, so let go of it.		 */		p->ptrace = 0;		remove_parent(p);		p->parent = p->real_parent;		add_parent(p);		if (p->state == TASK_TRACED) {			/*			 * If it was at a trace stop, turn it into			 * a normal stop since it's no longer being			 * traced.			 */			ptrace_untrace(p);		}	}	/* If this is a threaded reparent there is no need to	 * notify anyone anything has happened.	 */	if (p->real_parent->group_leader == father->group_leader)		return;	/* We don't want people slaying init.  */	if (p->exit_signal != -1)		p->exit_signal = SIGCHLD;	/* If we'd notified the old parent about this child's death,	 * also notify the new parent.	 */	if (!traced && p->exit_state == EXIT_ZOMBIE &&	    p->exit_signal != -1 && thread_group_empty(p))		do_notify_parent(p, p->exit_signal);	/*	 * process group orphan check	 * Case ii: Our child is in a different pgrp	 * than we are, and it was the only connection	 * outside, so the child pgrp is now orphaned.	 */	if ((task_pgrp(p) != task_pgrp(father)) &&	    (task_session(p) == task_session(father))) {		struct pid *pgrp = task_pgrp(p);		if (will_become_orphaned_pgrp(pgrp, NULL) &&		    has_stopped_jobs(pgrp)) {			__kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);			__kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);		}	}}/* * When we die, we re-parent all our children. * Try to give them to another thread in our thread * group, and if no such member exists, give it to * the child reaper process (ie "init") in our pid * space. */static voidforget_original_parent(struct task_struct *father, struct list_head *to_release){	struct task_struct *p, *reaper = father;	struct list_head *_p, *_n;	do {		reaper = next_thread(reaper);		if (reaper == father) {			reaper = child_reaper(father);			break;		}	} while (reaper->exit_state);	/*	 * There are only two places where our children can be:	 *	 * - in our child list	 * - in our ptraced child list	 *	 * Search them and reparent children.	 */	list_for_each_safe(_p, _n, &father->children) {		int ptrace;		p = list_entry(_p, struct task_struct, sibling);		ptrace = p->ptrace;		/* if father isn't the real parent, then ptrace must be enabled */		BUG_ON(father != p->real_parent && !ptrace);		if (father == p->real_parent) {			/* reparent with a reaper, real father it's us */			choose_new_parent(p, reaper);			reparent_thread(p, father, 0);		} else {			/* reparent ptraced task to its real parent */			__ptrace_unlink (p);			if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&			    thread_group_empty(p))				do_notify_parent(p, p->exit_signal);		}		/*		 * if the ptraced child is a zombie with exit_signal == -1		 * we must collect it before we exit, or it will remain		 * zombie forever since we prevented it from self-reap itself		 * while it was being traced by us, to be able to see it in wait4.		 */		if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1))			list_add(&p->ptrace_list, to_release);	}	list_for_each_safe(_p, _n, &father->ptrace_children) {		p = list_entry(_p, struct task_struct, ptrace_list);		choose_new_parent(p, reaper);		reparent_thread(p, father, 1);	}}/* * Send signals to all our closest relatives so that they know * to properly mourn us.. */static void exit_notify(struct task_struct *tsk){	int state;	struct task_struct *t;	struct list_head ptrace_dead, *_p, *_n;	struct pid *pgrp;	if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT)	    && !thread_group_empty(tsk)) {		/*		 * This occurs when there was a race between our exit		 * syscall and a group signal choosing us as the one to		 * wake up.  It could be that we are the only thread		 * alerted to check for pending signals, but another thread		 * should be woken now to take the signal since we will not.		 * Now we'll wake all the threads in the group just to make		 * sure someone gets all the pending signals.		 */		read_lock(&tasklist_lock);		spin_lock_irq(&tsk->sighand->siglock);		for (t = next_thread(tsk); t != tsk; t = next_thread(t))			if (!signal_pending(t) && !(t->flags & PF_EXITING))				recalc_sigpending_and_wake(t);		spin_unlock_irq(&tsk->sighand->siglock);		read_unlock(&tasklist_lock);	}	write_lock_irq(&tasklist_lock);	/*	 * This does two things:	 *  	 * A.  Make init inherit all the child processes	 * B.  Check to see if any process groups have become orphaned	 *	as a result of our exiting, and if they have any stopped	 *	jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)	 */	INIT_LIST_HEAD(&ptrace_dead);	forget_original_parent(tsk, &ptrace_dead);	BUG_ON(!list_empty(&tsk->children));	BUG_ON(!list_empty(&tsk->ptrace_children));	/*	 * Check to see if any process groups have become orphaned	 * as a result of our exiting, and if they have any stopped	 * jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)	 *	 * Case i: Our father is in a different pgrp than we are	 * and we were the only connection outside, so our pgrp	 * is about to become orphaned.	 */	 	t = tsk->real_parent;		pgrp = task_pgrp(tsk);	if ((task_pgrp(t) != pgrp) &&	    (task_session(t) == task_session(tsk)) &&	    will_become_orphaned_pgrp(pgrp, tsk) &&	    has_stopped_jobs(pgrp)) {		__kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);		__kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);	}	/* Let father know we died 	 *	 * Thread signals are configurable, but you aren't going to use	 * that to send signals to arbitary processes. 	 * That stops right now.	 *	 * If the parent exec id doesn't match the exec id we saved	 * when we started then we know the parent has changed security	 * domain.	 *	 * If our self_exec id doesn't match our parent_exec_id then	 * we have changed execution domain as these two values started	 * the same after a fork.	 *		 */		if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 &&	    ( tsk->parent_exec_id != t->self_exec_id  ||	      tsk->self_exec_id != tsk->parent_exec_id)	    && !capable(CAP_KILL))		tsk->exit_signal = SIGCHLD;	/* If something other than our normal parent is ptracing us, then	 * send it a SIGCHLD instead of honoring exit_signal.  exit_signal	 * only has special meaning to our real parent.	 */	if (tsk->exit_signal != -1 && thread_group_empty(tsk)) {		int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;		do_notify_parent(tsk, signal);	} else if (tsk->ptrace) {		do_notify_parent(tsk, SIGCHLD);	}	state = EXIT_ZOMBIE;	if (tsk->exit_signal == -1 &&	    (likely(tsk->ptrace == 0) ||	     unlikely(tsk->parent->signal->flags & SIGNAL_GROUP_EXIT)))		state = EXIT_DEAD;	tsk->exit_state = state;	write_unlock_irq(&tasklist_lock);	list_for_each_safe(_p, _n, &ptrace_dead) {		list_del_init(_p);		t = list_entry(_p, struct task_struct, ptrace_list);		release_task(t);	}	/* If the process is dead, release it - nobody will wait for it */	if (state == EXIT_DEAD)		release_task(tsk);}fastcall NORET_TYPE void do_exit(long code){	struct task_struct *tsk = current;	int group_dead;	profile_task_exit(tsk);	WARN_ON(atomic_read(&tsk->fs_excl));	if (unlikely(in_interrupt()))		panic("Aiee, killing interrupt handler!");	if (unlikely(!tsk->pid))		panic("Attempted to kill the idle task!");	if (unlikely(tsk == child_reaper(tsk))) {		if (tsk->nsproxy->pid_ns != &init_pid_ns)			tsk->nsproxy->pid_ns->child_reaper = init_pid_ns.child_reaper;		else			panic("Attempted to kill init!");	}	if (unlikely(current->ptrace & PT_TRACE_EXIT)) {		current->ptrace_message = code;		ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);	}	/*	 * We're taking recursive faults here in do_exit. Safest is to just	 * leave this task alone and wait for reboot.	 */	if (unlikely(tsk->flags & PF_EXITING)) {		printk(KERN_ALERT			"Fixing recursive fault but reboot is needed!\n");		/*		 * We can do this unlocked here. The futex code uses		 * this flag just to verify whether the pi state		 * cleanup has been done or not. In the worst case it		 * loops once more. We pretend that the cleanup was		 * done as there is no way to return. Either the		 * OWNER_DIED bit is set by now or we push the blocked		 * task into the wait for ever nirwana as well.		 */		tsk->flags |= PF_EXITPIDONE;		if (tsk->io_context)			exit_io_context();		set_current_state(TASK_UNINTERRUPTIBLE);		schedule();	}	/*	 * tsk->flags are checked in the futex code to protect against	 * an exiting task cleaning up the robust pi futexes.	 */	spin_lock_irq(&tsk->pi_lock);	tsk->flags |= PF_EXITING;	spin_unlock_irq(&tsk->pi_lock);	if (unlikely(in_atomic()))		printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",				current->comm, current->pid,				preempt_count());	acct_update_integrals(tsk);	if (tsk->mm) {		update_hiwater_rss(tsk->mm);		update_hiwater_vm(tsk->mm);	}	group_dead = atomic_dec_and_test(&tsk->signal->live);	if (group_dead) {		hrtimer_cancel(&tsk->signal->real_timer);		exit_itimers(tsk->signal);	}	acct_collect(code, group_dead);	if (unlikely(tsk->robust_list))		exit_robust_list(tsk);#if defined(CONFIG_FUTEX) && defined(CONFIG_COMPAT)	if (unlikely(tsk->compat_robust_list))		compat_exit_robust_list(tsk);#endif	if (unlikely(tsk->audit_context))		audit_free(tsk);	taskstats_exit(tsk, group_dead);	exit_mm(tsk);	if (group_dead)		acct_process();	exit_sem(tsk);	__exit_files(tsk);	__exit_fs(tsk);	exit_thread();	cpuset_exit(tsk);	exit_keys(tsk);	if (group_dead && tsk->signal->leader)		disassociate_ctty(1);	module_put(task_thread_info(tsk)->exec_domain->module);	if (tsk->binfmt)		module_put(tsk->binfmt->module);	tsk->exit_code = code;	proc_exit_connector(tsk);	exit_task_namespaces(tsk);	exit_notify(tsk);#ifdef CONFIG_NUMA	mpol_free(tsk->mempolicy);	tsk->mempolicy = NULL;#endif	/*	 * This must happen late, after the PID is not	 * hashed anymore:	 */	if (unlikely(!list_empty(&tsk->pi_state_list)))		exit_pi_state_list(tsk);	if (unlikely(current->pi_state_cache))		kfree(current->pi_state_cache);	/*	 * Make sure we are holding no locks:	 */	debug_check_no_locks_held(tsk);	/*	 * We can do this unlocked here. The futex code uses this flag	 * just to verify whether the pi state cleanup has been done	 * or not. In the worst case it loops once more.	 */	tsk->flags |= PF_EXITPIDONE;	if (tsk->io_context)		exit_io_context();	if (tsk->splice_pipe)		__free_pipe_info(tsk->splice_pipe);	preempt_disable();	/* causes final put_task_struct in finish_task_switch(). */	tsk->state = TASK_DEAD;	schedule();	BUG();	/* Avoid "noreturn function does return".  */	for (;;)		cpu_relax();	/* For when BUG is null */}EXPORT_SYMBOL_GPL(do_exit);NORET_TYPE void complete_and_exit(struct completion *comp, long code){	if (comp)		complete(comp);	do_exit(code);}EXPORT_SYMBOL(complete_and_exit);asmlinkage long sys_exit(int error_code){	do_exit((error_code&0xff)<<8);}/* * Take down every thread in the group.  This is called by fatal signals * as well as by sys_exit_group (below). */NORET_TYPE voiddo_group_exit(int exit_code){	BUG_ON(exit_code & 0x80); /* core dumps don't get here */	if (current->signal->flags & SIGNAL_GROUP_EXIT)		exit_code = current->signal->group_exit_code;	else if (!thread_group_empty(current)) {		struct signal_struct *const sig = current->signal;		struct sighand_struct *const sighand = current->sighand;		spin_lock_irq(&sighand->siglock);		if (sig->flags & SIGNAL_GROUP_EXIT)			/* Another thread got here before we took the lock.  */			exit_code = sig->group_exit_code;		else {			sig->group_exit_code = exit_code;			zap_other_threads(current);		}		spin_unlock_irq(&sighand->siglock);	}	do_exit(exit_code);	/* NOTREACHED */}/* * this kills every thread in the thread group. Note that any externally * wait4()-ing process will get the correct exit code - even if this * thread is not the thread group leader. */asmlinkage void sys_exit_group(int error_code){	do_group_exit((error_code & 0xff) << 8);}static int eligible_child(pid_t pid, int options, struct task_struct *p){	int err;	if (pid > 0) {		if (p->pid != pid)			return 0;	} else if (!pid) {		if (process_group(p) != process_group(current))			return 0;	} else if (pid != -1) {		if (process_group(p) != -pid)			return 0;	}	/*	 * Do not consider detached threads that are	 * not ptraced:	 */	if (p->exit_signal == -1 && !p->ptrace)		return 0;	/* Wait for all children (clone and not) if __WALL is set;	 * otherwise, wait for clone children *only* if __WCLONE is	 * set; otherwise, wait for non-clone children *only*.  (Note:	 * A "clone" child here is one that reports to its parent	 * using a signal other than SIGCHLD.) */	if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))	    && !(options & __WALL))		return 0;	/*	 * Do not consider thread group leaders that are	 * in a non-empty thread group:	 */	if (delay_group_leader(p))		return 2;	err = security_task_wait(p);	if (err)		return err;	return 1;}static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,			       int why, int status,			       struct siginfo __user *infop,			       struct rusage __user *rusagep){	int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;	put_task_struct(p);	if (!retval)		retval = put_user(SIGCHLD, &infop->si_signo);	if (!retval)		retval = put_user(0, &infop->si_errno);	if (!retval)		retval = put_user((short)why, &infop->si_code);	if (!retval)		retval = put_user(pid, &infop->si_pid);	if (!retval)		retval = put_user(uid, &infop->si_uid);	if (!retval)		retval = put_user(status, &infop->si_status);	if (!retval)		retval = pid;	return retval;}/* * Handle sys_wait4 work for one task in state EXIT_ZOMBIE.  We hold * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold * the lock and this task is uninteresting.  If we return nonzero, we have * released the lock and the system call should return. */static int wait_task_zombie(struct task_struct *p, int noreap,			    struct siginfo __user *infop,			    int __user *stat_addr, struct rusage __user *ru){

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -