⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 exit.c

📁 linux 2.6.19 kernel source code before patching
💻 C
📖 第 1 页 / 共 3 页
字号:
/* *  linux/kernel/exit.c * *  Copyright (C) 1991, 1992  Linus Torvalds */#include <linux/mm.h>#include <linux/slab.h>#include <linux/interrupt.h>#include <linux/module.h>#include <linux/capability.h>#include <linux/completion.h>#include <linux/personality.h>#include <linux/tty.h>#include <linux/mnt_namespace.h>#include <linux/key.h>#include <linux/security.h>#include <linux/cpu.h>#include <linux/acct.h>#include <linux/tsacct_kern.h>#include <linux/file.h>#include <linux/binfmts.h>#include <linux/nsproxy.h>#include <linux/pid_namespace.h>#include <linux/ptrace.h>#include <linux/profile.h>#include <linux/signalfd.h>#include <linux/mount.h>#include <linux/proc_fs.h>#include <linux/kthread.h>#include <linux/mempolicy.h>#include <linux/taskstats_kern.h>#include <linux/delayacct.h>#include <linux/cpuset.h>#include <linux/syscalls.h>#include <linux/signal.h>#include <linux/posix-timers.h>#include <linux/cn_proc.h>#include <linux/mutex.h>#include <linux/futex.h>#include <linux/compat.h>#include <linux/pipe_fs_i.h>#include <linux/audit.h> /* for audit_free() */#include <linux/resource.h>#include <linux/blkdev.h>#include <linux/task_io_accounting_ops.h>#include <asm/uaccess.h>#include <asm/unistd.h>#include <asm/pgtable.h>#include <asm/mmu_context.h>extern void sem_exit (void);static void exit_mm(struct task_struct * tsk);static void __unhash_process(struct task_struct *p){	nr_threads--;	detach_pid(p, PIDTYPE_PID);	if (thread_group_leader(p)) {		detach_pid(p, PIDTYPE_PGID);		detach_pid(p, PIDTYPE_SID);		list_del_rcu(&p->tasks);		__get_cpu_var(process_counts)--;	}	list_del_rcu(&p->thread_group);	remove_parent(p);}/* * This function expects the tasklist_lock write-locked. */static void __exit_signal(struct task_struct *tsk){	struct signal_struct *sig = tsk->signal;	struct sighand_struct *sighand;	BUG_ON(!sig);	BUG_ON(!atomic_read(&sig->count));	rcu_read_lock();	sighand = rcu_dereference(tsk->sighand);	spin_lock(&sighand->siglock);	/*	 * Notify that this sighand has been detached. This must	 * be called with the tsk->sighand lock held. Also, this	 * access tsk->sighand internally, so it must be called	 * before tsk->sighand is reset.	 */	signalfd_detach_locked(tsk);	posix_cpu_timers_exit(tsk);	if (atomic_dec_and_test(&sig->count))		posix_cpu_timers_exit_group(tsk);	else {		/*		 * If there is any task waiting for the group exit		 * then notify it:		 */		if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {			wake_up_process(sig->group_exit_task);			sig->group_exit_task = NULL;		}		if (tsk == sig->curr_target)			sig->curr_target = next_thread(tsk);		/*		 * Accumulate here the counters for all threads but the		 * group leader as they die, so they can be added into		 * the process-wide totals when those are taken.		 * The group leader stays around as a zombie as long		 * as there are other threads.  When it gets reaped,		 * the exit.c code will add its counts into these totals.		 * We won't ever get here for the group leader, since it		 * will have been the last reference on the signal_struct.		 */		sig->utime = cputime_add(sig->utime, tsk->utime);		sig->stime = cputime_add(sig->stime, tsk->stime);		sig->min_flt += tsk->min_flt;		sig->maj_flt += tsk->maj_flt;		sig->nvcsw += tsk->nvcsw;		sig->nivcsw += tsk->nivcsw;		sig->sched_time += tsk->sched_time;		sig->inblock += task_io_get_inblock(tsk);		sig->oublock += task_io_get_oublock(tsk);		sig = NULL; /* Marker for below. */	}	__unhash_process(tsk);	tsk->signal = NULL;	tsk->sighand = NULL;	spin_unlock(&sighand->siglock);	rcu_read_unlock();	__cleanup_sighand(sighand);	clear_tsk_thread_flag(tsk,TIF_SIGPENDING);	flush_sigqueue(&tsk->pending);	if (sig) {		flush_sigqueue(&sig->shared_pending);		taskstats_tgid_free(sig);		__cleanup_signal(sig);	}}static void delayed_put_task_struct(struct rcu_head *rhp){	put_task_struct(container_of(rhp, struct task_struct, rcu));}void release_task(struct task_struct * p){	struct task_struct *leader;	int zap_leader;repeat:	atomic_dec(&p->user->processes);	write_lock_irq(&tasklist_lock);	ptrace_unlink(p);	BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));	__exit_signal(p);	/*	 * If we are the last non-leader member of the thread	 * group, and the leader is zombie, then notify the	 * group leader's parent process. (if it wants notification.)	 */	zap_leader = 0;	leader = p->group_leader;	if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {		BUG_ON(leader->exit_signal == -1);		do_notify_parent(leader, leader->exit_signal);		/*		 * If we were the last child thread and the leader has		 * exited already, and the leader's parent ignores SIGCHLD,		 * then we are the one who should release the leader.		 *		 * do_notify_parent() will have marked it self-reaping in		 * that case.		 */		zap_leader = (leader->exit_signal == -1);	}	sched_exit(p);	write_unlock_irq(&tasklist_lock);	proc_flush_task(p);	release_thread(p);	call_rcu(&p->rcu, delayed_put_task_struct);	p = leader;	if (unlikely(zap_leader))		goto repeat;}/* * This checks not only the pgrp, but falls back on the pid if no * satisfactory pgrp is found. I dunno - gdb doesn't work correctly * without this... * * The caller must hold rcu lock or the tasklist lock. */struct pid *session_of_pgrp(struct pid *pgrp){	struct task_struct *p;	struct pid *sid = NULL;	p = pid_task(pgrp, PIDTYPE_PGID);	if (p == NULL)		p = pid_task(pgrp, PIDTYPE_PID);	if (p != NULL)		sid = task_session(p);	return sid;}/* * Determine if a process group is "orphaned", according to the POSIX * definition in 2.2.2.52.  Orphaned process groups are not to be affected * by terminal-generated stop signals.  Newly orphaned process groups are * to receive a SIGHUP and a SIGCONT. * * "I ask you, have you ever known what it is to be an orphan?" */static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task){	struct task_struct *p;	int ret = 1;	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {		if (p == ignored_task				|| p->exit_state				|| is_init(p->real_parent))			continue;		if (task_pgrp(p->real_parent) != pgrp &&		    task_session(p->real_parent) == task_session(p)) {			ret = 0;			break;		}	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);	return ret;	/* (sighing) "Often!" */}int is_current_pgrp_orphaned(void){	int retval;	read_lock(&tasklist_lock);	retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);	read_unlock(&tasklist_lock);	return retval;}static int has_stopped_jobs(struct pid *pgrp){	int retval = 0;	struct task_struct *p;	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {		if (p->state != TASK_STOPPED)			continue;		retval = 1;		break;	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);	return retval;}/** * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd * * If a kernel thread is launched as a result of a system call, or if * it ever exits, it should generally reparent itself to kthreadd so it * isn't in the way of other processes and is correctly cleaned up on exit. * * The various task state such as scheduling policy and priority may have * been inherited from a user process, so we reset them to sane values here. * * NOTE that reparent_to_kthreadd() gives the caller full capabilities. */static void reparent_to_kthreadd(void){	write_lock_irq(&tasklist_lock);	ptrace_unlink(current);	/* Reparent to init */	remove_parent(current);	current->real_parent = current->parent = kthreadd_task;	add_parent(current);	/* Set the exit signal to SIGCHLD so we signal init on exit */	current->exit_signal = SIGCHLD;	if (!has_rt_policy(current) && (task_nice(current) < 0))		set_user_nice(current, 0);	/* cpus_allowed? */	/* rt_priority? */	/* signals? */	security_task_reparent_to_init(current);	memcpy(current->signal->rlim, init_task.signal->rlim,	       sizeof(current->signal->rlim));	atomic_inc(&(INIT_USER->__count));	write_unlock_irq(&tasklist_lock);	switch_uid(INIT_USER);}void __set_special_pids(pid_t session, pid_t pgrp){	struct task_struct *curr = current->group_leader;	if (process_session(curr) != session) {		detach_pid(curr, PIDTYPE_SID);		set_signal_session(curr->signal, session);		attach_pid(curr, PIDTYPE_SID, find_pid(session));	}	if (process_group(curr) != pgrp) {		detach_pid(curr, PIDTYPE_PGID);		curr->signal->pgrp = pgrp;		attach_pid(curr, PIDTYPE_PGID, find_pid(pgrp));	}}static void set_special_pids(pid_t session, pid_t pgrp){	write_lock_irq(&tasklist_lock);	__set_special_pids(session, pgrp);	write_unlock_irq(&tasklist_lock);}/* * Let kernel threads use this to say that they * allow a certain signal (since daemonize() will * have disabled all of them by default). */int allow_signal(int sig){	if (!valid_signal(sig) || sig < 1)		return -EINVAL;	spin_lock_irq(&current->sighand->siglock);	sigdelset(&current->blocked, sig);	if (!current->mm) {		/* Kernel threads handle their own signals.		   Let the signal code know it'll be handled, so		   that they don't get converted to SIGKILL or		   just silently dropped */		current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;	}	recalc_sigpending();	spin_unlock_irq(&current->sighand->siglock);	return 0;}EXPORT_SYMBOL(allow_signal);int disallow_signal(int sig){	if (!valid_signal(sig) || sig < 1)		return -EINVAL;	spin_lock_irq(&current->sighand->siglock);	current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN;	recalc_sigpending();	spin_unlock_irq(&current->sighand->siglock);	return 0;}EXPORT_SYMBOL(disallow_signal);/* *	Put all the gunge required to become a kernel thread without *	attached user resources in one place where it belongs. */void daemonize(const char *name, ...){	va_list args;	struct fs_struct *fs;	sigset_t blocked;	va_start(args, name);	vsnprintf(current->comm, sizeof(current->comm), name, args);	va_end(args);	/*	 * If we were started as result of loading a module, close all of the	 * user space pages.  We don't need them, and if we didn't close them	 * they would be locked into memory.	 */	exit_mm(current);	set_special_pids(1, 1);	proc_clear_tty(current);	/* Block and flush all signals */	sigfillset(&blocked);	sigprocmask(SIG_BLOCK, &blocked, NULL);	flush_signals(current);	/* Become as one with the init task */	exit_fs(current);	/* current->fs->count--; */	fs = init_task.fs;	current->fs = fs;	atomic_inc(&fs->count);	exit_task_namespaces(current);	current->nsproxy = init_task.nsproxy;	get_task_namespaces(current); 	exit_files(current);	current->files = init_task.files;	atomic_inc(&current->files->count);	reparent_to_kthreadd();}EXPORT_SYMBOL(daemonize);static void close_files(struct files_struct * files){	int i, j;	struct fdtable *fdt;	j = 0;	/*	 * It is safe to dereference the fd table without RCU or	 * ->file_lock because this is the last reference to the	 * files structure.	 */	fdt = files_fdtable(files);	for (;;) {		unsigned long set;		i = j * __NFDBITS;		if (i >= fdt->max_fds)			break;		set = fdt->open_fds->fds_bits[j++];		while (set) {			if (set & 1) {				struct file * file = xchg(&fdt->fd[i], NULL);				if (file) {					filp_close(file, files);					cond_resched();				}			}			i++;			set >>= 1;		}	}}struct files_struct *get_files_struct(struct task_struct *task){	struct files_struct *files;	task_lock(task);	files = task->files;	if (files)		atomic_inc(&files->count);	task_unlock(task);	return files;}void fastcall put_files_struct(struct files_struct *files){	struct fdtable *fdt;	if (atomic_dec_and_test(&files->count)) {		close_files(files);		/*		 * Free the fd and fdset arrays if we expanded them.		 * If the fdtable was embedded, pass files for freeing		 * at the end of the RCU grace period. Otherwise,		 * you can free files immediately.		 */		fdt = files_fdtable(files);		if (fdt != &files->fdtab)			kmem_cache_free(files_cachep, files);		free_fdtable(fdt);	}}EXPORT_SYMBOL(put_files_struct);void reset_files_struct(struct task_struct *tsk, struct files_struct *files){	struct files_struct *old;	old = tsk->files;	task_lock(tsk);	tsk->files = files;	task_unlock(tsk);	put_files_struct(old);}EXPORT_SYMBOL(reset_files_struct);static inline void __exit_files(struct task_struct *tsk){	struct files_struct * files = tsk->files;	if (files) {		task_lock(tsk);		tsk->files = NULL;		task_unlock(tsk);		put_files_struct(files);	}}void exit_files(struct task_struct *tsk){	__exit_files(tsk);}static inline void __put_fs_struct(struct fs_struct *fs){	/* No need to hold fs->lock if we are killing it */	if (atomic_dec_and_test(&fs->count)) {		dput(fs->root);		mntput(fs->rootmnt);		dput(fs->pwd);		mntput(fs->pwdmnt);		if (fs->altroot) {			dput(fs->altroot);			mntput(fs->altrootmnt);		}		kmem_cache_free(fs_cachep, fs);	}}void put_fs_struct(struct fs_struct *fs){	__put_fs_struct(fs);}static inline void __exit_fs(struct task_struct *tsk){	struct fs_struct * fs = tsk->fs;	if (fs) {		task_lock(tsk);		tsk->fs = NULL;		task_unlock(tsk);		__put_fs_struct(fs);	}}void exit_fs(struct task_struct *tsk){	__exit_fs(tsk);}EXPORT_SYMBOL_GPL(exit_fs);/* * Turn us into a lazy TLB process if we * aren't already.. */static void exit_mm(struct task_struct * tsk){	struct mm_struct *mm = tsk->mm;	mm_release(tsk, mm);	if (!mm)		return;	/*	 * Serialize with any possible pending coredump.	 * We must hold mmap_sem around checking core_waiters	 * and clearing tsk->mm.  The core-inducing thread	 * will increment core_waiters for each thread in the

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -