⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 exit.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 4 页
字号:
static void close_files(struct files_struct * files){	int i, j;	struct fdtable *fdt;	j = 0;	/*	 * It is safe to dereference the fd table without RCU or	 * ->file_lock because this is the last reference to the	 * files structure.	 */	fdt = files_fdtable(files);	for (;;) {		unsigned long set;		i = j * __NFDBITS;		if (i >= fdt->max_fds)			break;		set = fdt->open_fds->fds_bits[j++];		while (set) {			if (set & 1) {				struct file * file = xchg(&fdt->fd[i], NULL);				if (file) {					filp_close(file, files);					cond_resched();				}			}			i++;			set >>= 1;		}	}}struct files_struct *get_files_struct(struct task_struct *task){	struct files_struct *files;	task_lock(task);	files = task->files;	if (files)		atomic_inc(&files->count);	task_unlock(task);	return files;}void put_files_struct(struct files_struct *files){	struct fdtable *fdt;	if (atomic_dec_and_test(&files->count)) {		close_files(files);		/*		 * Free the fd and fdset arrays if we expanded them.		 * If the fdtable was embedded, pass files for freeing		 * at the end of the RCU grace period. Otherwise,		 * you can free files immediately.		 */		fdt = files_fdtable(files);		if (fdt != &files->fdtab)			kmem_cache_free(files_cachep, files);		free_fdtable(fdt);	}}void reset_files_struct(struct files_struct *files){	struct task_struct *tsk = current;	struct files_struct *old;	old = tsk->files;	task_lock(tsk);	tsk->files = files;	task_unlock(tsk);	put_files_struct(old);}void exit_files(struct task_struct *tsk){	struct files_struct * files = tsk->files;	if (files) {		task_lock(tsk);		tsk->files = NULL;		task_unlock(tsk);		put_files_struct(files);	}}void put_fs_struct(struct fs_struct *fs){	/* No need to hold fs->lock if we are killing it */	if (atomic_dec_and_test(&fs->count)) {		path_put(&fs->root);		path_put(&fs->pwd);		kmem_cache_free(fs_cachep, fs);	}}void exit_fs(struct task_struct *tsk){	struct fs_struct * fs = tsk->fs;	if (fs) {		task_lock(tsk);		tsk->fs = NULL;		task_unlock(tsk);		put_fs_struct(fs);	}}EXPORT_SYMBOL_GPL(exit_fs);#ifdef CONFIG_MM_OWNER/* * Task p is exiting and it owned mm, lets find a new owner for it */static inline intmm_need_new_owner(struct mm_struct *mm, struct task_struct *p){	/*	 * If there are other users of the mm and the owner (us) is exiting	 * we need to find a new owner to take on the responsibility.	 */	if (atomic_read(&mm->mm_users) <= 1)		return 0;	if (mm->owner != p)		return 0;	return 1;}void mm_update_next_owner(struct mm_struct *mm){	struct task_struct *c, *g, *p = current;retry:	if (!mm_need_new_owner(mm, p))		return;	read_lock(&tasklist_lock);	/*	 * Search in the children	 */	list_for_each_entry(c, &p->children, sibling) {		if (c->mm == mm)			goto assign_new_owner;	}	/*	 * Search in the siblings	 */	list_for_each_entry(c, &p->parent->children, sibling) {		if (c->mm == mm)			goto assign_new_owner;	}	/*	 * Search through everything else. We should not get	 * here often	 */	do_each_thread(g, c) {		if (c->mm == mm)			goto assign_new_owner;	} while_each_thread(g, c);	read_unlock(&tasklist_lock);	/*	 * We found no owner yet mm_users > 1: this implies that we are	 * most likely racing with swapoff (try_to_unuse()) or /proc or	 * ptrace or page migration (get_task_mm()).  Mark owner as NULL,	 * so that subsystems can understand the callback and take action.	 */	down_write(&mm->mmap_sem);	cgroup_mm_owner_callbacks(mm->owner, NULL);	mm->owner = NULL;	up_write(&mm->mmap_sem);	return;assign_new_owner:	BUG_ON(c == p);	get_task_struct(c);	/*	 * The task_lock protects c->mm from changing.	 * We always want mm->owner->mm == mm	 */	task_lock(c);	/*	 * Delay read_unlock() till we have the task_lock()	 * to ensure that c does not slip away underneath us	 */	read_unlock(&tasklist_lock);	if (c->mm != mm) {		task_unlock(c);		put_task_struct(c);		goto retry;	}	cgroup_mm_owner_callbacks(mm->owner, c);	mm->owner = c;	task_unlock(c);	put_task_struct(c);}#endif /* CONFIG_MM_OWNER *//* * Turn us into a lazy TLB process if we * aren't already.. */static void exit_mm(struct task_struct * tsk){	struct mm_struct *mm = tsk->mm;	struct core_state *core_state;	mm_release(tsk, mm);	if (!mm)		return;	/*	 * Serialize with any possible pending coredump.	 * We must hold mmap_sem around checking core_state	 * and clearing tsk->mm.  The core-inducing thread	 * will increment ->nr_threads for each thread in the	 * group with ->mm != NULL.	 */	down_read(&mm->mmap_sem);	core_state = mm->core_state;	if (core_state) {		struct core_thread self;		up_read(&mm->mmap_sem);		self.task = tsk;		self.next = xchg(&core_state->dumper.next, &self);		/*		 * Implies mb(), the result of xchg() must be visible		 * to core_state->dumper.		 */		if (atomic_dec_and_test(&core_state->nr_threads))			complete(&core_state->startup);		for (;;) {			set_task_state(tsk, TASK_UNINTERRUPTIBLE);			if (!self.task) /* see coredump_finish() */				break;			schedule();		}		__set_task_state(tsk, TASK_RUNNING);		down_read(&mm->mmap_sem);	}	atomic_inc(&mm->mm_count);	BUG_ON(mm != tsk->active_mm);	/* more a memory barrier than a real lock */	task_lock(tsk);	tsk->mm = NULL;	up_read(&mm->mmap_sem);	enter_lazy_tlb(mm, current);	/* We don't want this task to be frozen prematurely */	clear_freeze_flag(tsk);	task_unlock(tsk);	mm_update_next_owner(mm);	mmput(mm);}/* * Return nonzero if @parent's children should reap themselves. * * Called with write_lock_irq(&tasklist_lock) held. */static int ignoring_children(struct task_struct *parent){	int ret;	struct sighand_struct *psig = parent->sighand;	unsigned long flags;	spin_lock_irqsave(&psig->siglock, flags);	ret = (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||	       (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT));	spin_unlock_irqrestore(&psig->siglock, flags);	return ret;}/* * Detach all tasks we were using ptrace on. * Any that need to be release_task'd are put on the @dead list. * * Called with write_lock(&tasklist_lock) held. */static void ptrace_exit(struct task_struct *parent, struct list_head *dead){	struct task_struct *p, *n;	int ign = -1;	list_for_each_entry_safe(p, n, &parent->ptraced, ptrace_entry) {		__ptrace_unlink(p);		if (p->exit_state != EXIT_ZOMBIE)			continue;		/*		 * If it's a zombie, our attachedness prevented normal		 * parent notification or self-reaping.  Do notification		 * now if it would have happened earlier.  If it should		 * reap itself, add it to the @dead list.  We can't call		 * release_task() here because we already hold tasklist_lock.		 *		 * If it's our own child, there is no notification to do.		 * But if our normal children self-reap, then this child		 * was prevented by ptrace and we must reap it now.		 */		if (!task_detached(p) && thread_group_empty(p)) {			if (!same_thread_group(p->real_parent, parent))				do_notify_parent(p, p->exit_signal);			else {				if (ign < 0)					ign = ignoring_children(parent);				if (ign)					p->exit_signal = -1;			}		}		if (task_detached(p)) {			/*			 * Mark it as in the process of being reaped.			 */			p->exit_state = EXIT_DEAD;			list_add(&p->ptrace_entry, dead);		}	}}/* * Finish up exit-time ptrace cleanup. * * Called without locks. */static void ptrace_exit_finish(struct task_struct *parent,			       struct list_head *dead){	struct task_struct *p, *n;	BUG_ON(!list_empty(&parent->ptraced));	list_for_each_entry_safe(p, n, dead, ptrace_entry) {		list_del_init(&p->ptrace_entry);		release_task(p);	}}static void reparent_thread(struct task_struct *p, struct task_struct *father){	if (p->pdeath_signal)		/* We already hold the tasklist_lock here.  */		group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);	list_move_tail(&p->sibling, &p->real_parent->children);	/* If this is a threaded reparent there is no need to	 * notify anyone anything has happened.	 */	if (same_thread_group(p->real_parent, father))		return;	/* We don't want people slaying init.  */	if (!task_detached(p))		p->exit_signal = SIGCHLD;	/* If we'd notified the old parent about this child's death,	 * also notify the new parent.	 */	if (!ptrace_reparented(p) &&	    p->exit_state == EXIT_ZOMBIE &&	    !task_detached(p) && thread_group_empty(p))		do_notify_parent(p, p->exit_signal);	kill_orphaned_pgrp(p, father);}/* * When we die, we re-parent all our children. * Try to give them to another thread in our thread * group, and if no such member exists, give it to * the child reaper process (ie "init") in our pid * space. */static struct task_struct *find_new_reaper(struct task_struct *father){	struct pid_namespace *pid_ns = task_active_pid_ns(father);	struct task_struct *thread;	thread = father;	while_each_thread(father, thread) {		if (thread->flags & PF_EXITING)			continue;		if (unlikely(pid_ns->child_reaper == father))			pid_ns->child_reaper = thread;		return thread;	}	if (unlikely(pid_ns->child_reaper == father)) {		write_unlock_irq(&tasklist_lock);		if (unlikely(pid_ns == &init_pid_ns))			panic("Attempted to kill init!");		zap_pid_ns_processes(pid_ns);		write_lock_irq(&tasklist_lock);		/*		 * We can not clear ->child_reaper or leave it alone.		 * There may by stealth EXIT_DEAD tasks on ->children,		 * forget_original_parent() must move them somewhere.		 */		pid_ns->child_reaper = init_pid_ns.child_reaper;	}	return pid_ns->child_reaper;}static void forget_original_parent(struct task_struct *father){	struct task_struct *p, *n, *reaper;	LIST_HEAD(ptrace_dead);	write_lock_irq(&tasklist_lock);	reaper = find_new_reaper(father);	/*	 * First clean up ptrace if we were using it.	 */	ptrace_exit(father, &ptrace_dead);	list_for_each_entry_safe(p, n, &father->children, sibling) {		p->real_parent = reaper;		if (p->parent == father) {			BUG_ON(p->ptrace);			p->parent = p->real_parent;		}		reparent_thread(p, father);	}	write_unlock_irq(&tasklist_lock);	BUG_ON(!list_empty(&father->children));	ptrace_exit_finish(father, &ptrace_dead);}/* * Send signals to all our closest relatives so that they know * to properly mourn us.. */static void exit_notify(struct task_struct *tsk, int group_dead){	int signal;	void *cookie;	/*	 * This does two things:	 *  	 * A.  Make init inherit all the child processes	 * B.  Check to see if any process groups have become orphaned	 *	as a result of our exiting, and if they have any stopped	 *	jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)	 */	forget_original_parent(tsk);	exit_task_namespaces(tsk);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -