⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 fork.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 3 页
字号:
	/*	 * Syscall tracing should be turned off in the child regardless	 * of CLONE_PTRACE.	 */	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);#ifdef TIF_SYSCALL_EMU	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);#endif	clear_all_latency_tracing(p);	/* Our parent execution domain becomes current domain	   These must match for thread signalling to apply */	p->parent_exec_id = p->self_exec_id;	/* ok, now we should be set up.. */	p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);	p->pdeath_signal = 0;	p->exit_state = 0;	/*	 * Ok, make it visible to the rest of the system.	 * We dont wake it up yet.	 */	p->group_leader = p;	INIT_LIST_HEAD(&p->thread_group);	/* Now that the task is set up, run cgroup callbacks if	 * necessary. We need to run them before the task is visible	 * on the tasklist. */	cgroup_fork_callbacks(p);	cgroup_callbacks_done = 1;	/* Need tasklist lock for parent etc handling! */	write_lock_irq(&tasklist_lock);	/*	 * The task hasn't been attached yet, so its cpus_allowed mask will	 * not be changed, nor will its assigned CPU.	 *	 * The cpus_allowed mask of the parent may have changed after it was	 * copied first time - so re-copy it here, then check the child's CPU	 * to ensure it is on a valid CPU (and if not, just force it back to	 * parent's CPU). This avoids alot of nasty races.	 */	p->cpus_allowed = current->cpus_allowed;	p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;	if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||			!cpu_online(task_cpu(p))))		set_task_cpu(p, smp_processor_id());	/* CLONE_PARENT re-uses the old parent */	if (clone_flags & (CLONE_PARENT|CLONE_THREAD))		p->real_parent = current->real_parent;	else		p->real_parent = current;	spin_lock(&current->sighand->siglock);	/*	 * Process group and session signals need to be delivered to just the	 * parent before the fork or both the parent and the child after the	 * fork. Restart if a signal comes in before we add the new process to	 * it's process group.	 * A fatal signal pending means that current will exit, so the new	 * thread can't slip out of an OOM kill (or normal SIGKILL). 	 */	recalc_sigpending();	if (signal_pending(current)) {		spin_unlock(&current->sighand->siglock);		write_unlock_irq(&tasklist_lock);		retval = -ERESTARTNOINTR;		goto bad_fork_free_pid;	}	if (clone_flags & CLONE_THREAD) {		p->group_leader = current->group_leader;		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);		if (!cputime_eq(current->signal->it_virt_expires,				cputime_zero) ||		    !cputime_eq(current->signal->it_prof_expires,				cputime_zero) ||		    current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||		    !list_empty(&current->signal->cpu_timers[0]) ||		    !list_empty(&current->signal->cpu_timers[1]) ||		    !list_empty(&current->signal->cpu_timers[2])) {			/*			 * Have child wake up on its first tick to check			 * for process CPU timers.			 */			p->it_prof_expires = jiffies_to_cputime(1);		}	}	if (likely(p->pid)) {		list_add_tail(&p->sibling, &p->real_parent->children);		tracehook_finish_clone(p, clone_flags, trace);		if (thread_group_leader(p)) {			if (clone_flags & CLONE_NEWPID)				p->nsproxy->pid_ns->child_reaper = p;			p->signal->leader_pid = pid;			p->signal->tty = current->signal->tty;			set_task_pgrp(p, task_pgrp_nr(current));			set_task_session(p, task_session_nr(current));			attach_pid(p, PIDTYPE_PGID, task_pgrp(current));			attach_pid(p, PIDTYPE_SID, task_session(current));			list_add_tail_rcu(&p->tasks, &init_task.tasks);			__get_cpu_var(process_counts)++;		}		attach_pid(p, PIDTYPE_PID, pid);		nr_threads++;	}	total_forks++;	spin_unlock(&current->sighand->siglock);	write_unlock_irq(&tasklist_lock);	proc_fork_connector(p);	cgroup_post_fork(p);	return p;bad_fork_free_pid:	if (pid != &init_struct_pid)		free_pid(pid);bad_fork_cleanup_io:	put_io_context(p->io_context);bad_fork_cleanup_namespaces:	exit_task_namespaces(p);bad_fork_cleanup_keys:	exit_keys(p);bad_fork_cleanup_mm:	if (p->mm)		mmput(p->mm);bad_fork_cleanup_signal:	cleanup_signal(p);bad_fork_cleanup_sighand:	__cleanup_sighand(p->sighand);bad_fork_cleanup_fs:	exit_fs(p); /* blocking */bad_fork_cleanup_files:	exit_files(p); /* blocking */bad_fork_cleanup_semundo:	exit_sem(p);bad_fork_cleanup_audit:	audit_free(p);bad_fork_cleanup_security:	security_task_free(p);bad_fork_cleanup_policy:#ifdef CONFIG_NUMA	mpol_put(p->mempolicy);bad_fork_cleanup_cgroup:#endif	cgroup_exit(p, cgroup_callbacks_done);	delayacct_tsk_free(p);	if (p->binfmt)		module_put(p->binfmt->module);bad_fork_cleanup_put_domain:	module_put(task_thread_info(p)->exec_domain->module);bad_fork_cleanup_count:	put_group_info(p->group_info);	atomic_dec(&p->user->processes);	free_uid(p->user);bad_fork_free:	free_task(p);fork_out:	return ERR_PTR(retval);}noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs){	memset(regs, 0, sizeof(struct pt_regs));	return regs;}struct task_struct * __cpuinit fork_idle(int cpu){	struct task_struct *task;	struct pt_regs regs;	task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,			    &init_struct_pid, 0);	if (!IS_ERR(task))		init_idle(task, cpu);	return task;}/* *  Ok, this is the main fork-routine. * * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. */long do_fork(unsigned long clone_flags,	      unsigned long stack_start,	      struct pt_regs *regs,	      unsigned long stack_size,	      int __user *parent_tidptr,	      int __user *child_tidptr){	struct task_struct *p;	int trace = 0;	long nr;	/*	 * We hope to recycle these flags after 2.6.26	 */	if (unlikely(clone_flags & CLONE_STOPPED)) {		static int __read_mostly count = 100;		if (count > 0 && printk_ratelimit()) {			char comm[TASK_COMM_LEN];			count--;			printk(KERN_INFO "fork(): process `%s' used deprecated "					"clone flags 0x%lx\n",				get_task_comm(comm, current),				clone_flags & CLONE_STOPPED);		}	}	/*	 * When called from kernel_thread, don't do user tracing stuff.	 */	if (likely(user_mode(regs)))		trace = tracehook_prepare_clone(clone_flags);	p = copy_process(clone_flags, stack_start, regs, stack_size,			 child_tidptr, NULL, trace);	/*	 * Do this prior waking up the new thread - the thread pointer	 * might get invalid after that point, if the thread exits quickly.	 */	if (!IS_ERR(p)) {		struct completion vfork;		nr = task_pid_vnr(p);		if (clone_flags & CLONE_PARENT_SETTID)			put_user(nr, parent_tidptr);		if (clone_flags & CLONE_VFORK) {			p->vfork_done = &vfork;			init_completion(&vfork);		}		tracehook_report_clone(trace, regs, clone_flags, nr, p);		/*		 * We set PF_STARTING at creation in case tracing wants to		 * use this to distinguish a fully live task from one that		 * hasn't gotten to tracehook_report_clone() yet.  Now we		 * clear it and set the child going.		 */		p->flags &= ~PF_STARTING;		if (unlikely(clone_flags & CLONE_STOPPED)) {			/*			 * We'll start up with an immediate SIGSTOP.			 */			sigaddset(&p->pending.signal, SIGSTOP);			set_tsk_thread_flag(p, TIF_SIGPENDING);			__set_task_state(p, TASK_STOPPED);		} else {			wake_up_new_task(p, clone_flags);		}		tracehook_report_clone_complete(trace, regs,						clone_flags, nr, p);		if (clone_flags & CLONE_VFORK) {			freezer_do_not_count();			wait_for_completion(&vfork);			freezer_count();			tracehook_report_vfork_done(p, nr);		}	} else {		nr = PTR_ERR(p);	}	return nr;}#ifndef ARCH_MIN_MMSTRUCT_ALIGN#define ARCH_MIN_MMSTRUCT_ALIGN 0#endifstatic void sighand_ctor(void *data){	struct sighand_struct *sighand = data;	spin_lock_init(&sighand->siglock);	init_waitqueue_head(&sighand->signalfd_wqh);}void __init proc_caches_init(void){	sighand_cachep = kmem_cache_create("sighand_cache",			sizeof(struct sighand_struct), 0,			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,			sighand_ctor);	signal_cachep = kmem_cache_create("signal_cache",			sizeof(struct signal_struct), 0,			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);	files_cachep = kmem_cache_create("files_cache",			sizeof(struct files_struct), 0,			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);	fs_cachep = kmem_cache_create("fs_cache",			sizeof(struct fs_struct), 0,			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);	vm_area_cachep = kmem_cache_create("vm_area_struct",			sizeof(struct vm_area_struct), 0,			SLAB_PANIC, NULL);	mm_cachep = kmem_cache_create("mm_struct",			sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);}/* * Check constraints on flags passed to the unshare system call and * force unsharing of additional process context as appropriate. */static void check_unshare_flags(unsigned long *flags_ptr){	/*	 * If unsharing a thread from a thread group, must also	 * unshare vm.	 */	if (*flags_ptr & CLONE_THREAD)		*flags_ptr |= CLONE_VM;	/*	 * If unsharing vm, must also unshare signal handlers.	 */	if (*flags_ptr & CLONE_VM)		*flags_ptr |= CLONE_SIGHAND;	/*	 * If unsharing signal handlers and the task was created	 * using CLONE_THREAD, then must unshare the thread	 */	if ((*flags_ptr & CLONE_SIGHAND) &&	    (atomic_read(&current->signal->count) > 1))		*flags_ptr |= CLONE_THREAD;	/*	 * If unsharing namespace, must also unshare filesystem information.	 */	if (*flags_ptr & CLONE_NEWNS)		*flags_ptr |= CLONE_FS;}/* * Unsharing of tasks created with CLONE_THREAD is not supported yet */static int unshare_thread(unsigned long unshare_flags){	if (unshare_flags & CLONE_THREAD)		return -EINVAL;	return 0;}/* * Unshare the filesystem structure if it is being shared */static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp){	struct fs_struct *fs = current->fs;	if ((unshare_flags & CLONE_FS) &&	    (fs && atomic_read(&fs->count) > 1)) {		*new_fsp = __copy_fs_struct(current->fs);		if (!*new_fsp)			return -ENOMEM;	}	return 0;}/* * Unsharing of sighand is not supported yet */static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp){	struct sighand_struct *sigh = current->sighand;	if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1)		return -EINVAL;	else		return 0;}/* * Unshare vm if it is being shared */static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp){	struct mm_struct *mm = current->mm;	if ((unshare_flags & CLONE_VM) &&	    (mm && atomic_read(&mm->mm_users) > 1)) {		return -EINVAL;	}	return 0;}/* * Unshare file descriptor table if it is being shared */static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp){	struct files_struct *fd = current->files;	int error = 0;	if ((unshare_flags & CLONE_FILES) &&	    (fd && atomic_read(&fd->count) > 1)) {		*new_fdp = dup_fd(fd, &error);		if (!*new_fdp)			return error;	}	return 0;}/* * unshare allows a process to 'unshare' part of the process * context which was originally shared using clone.  copy_* * functions used by do_fork() cannot be used here directly * because they modify an inactive task_struct that is being * constructed. Here we are modifying the current, active, * task_struct. */asmlinkage long sys_unshare(unsigned long unshare_flags){	int err = 0;	struct fs_struct *fs, *new_fs = NULL;	struct sighand_struct *new_sigh = NULL;	struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;	struct files_struct *fd, *new_fd = NULL;	struct nsproxy *new_nsproxy = NULL;	int do_sysvsem = 0;	check_unshare_flags(&unshare_flags);	/* Return -EINVAL for all unsupported flags */	err = -EINVAL;	if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|				CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|				CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWUSER|				CLONE_NEWNET))		goto bad_unshare_out;	/*	 * CLONE_NEWIPC must also detach from the undolist: after switching	 * to a new ipc namespace, the semaphore arrays from the old	 * namespace are unreachable.	 */	if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))		do_sysvsem = 1;	if ((err = unshare_thread(unshare_flags)))		goto bad_unshare_out;	if ((err = unshare_fs(unshare_flags, &new_fs)))		goto bad_unshare_cleanup_thread;	if ((err = unshare_sighand(unshare_flags, &new_sigh)))		goto bad_unshare_cleanup_fs;	if ((err = unshare_vm(unshare_flags, &new_mm)))		goto bad_unshare_cleanup_sigh;	if ((err = unshare_fd(unshare_flags, &new_fd)))		goto bad_unshare_cleanup_vm;	if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,			new_fs)))		goto bad_unshare_cleanup_fd;	if (new_fs ||  new_mm || new_fd || do_sysvsem || new_nsproxy) {		if (do_sysvsem) {			/*			 * CLONE_SYSVSEM is equivalent to sys_exit().			 */			exit_sem(current);		}		if (new_nsproxy) {			switch_task_namespaces(current, new_nsproxy);			new_nsproxy = NULL;		}		task_lock(current);		if (new_fs) {			fs = current->fs;			current->fs = new_fs;			new_fs = fs;		}		if (new_mm) {			mm = current->mm;			active_mm = current->active_mm;			current->mm = new_mm;			current->active_mm = new_mm;			activate_mm(active_mm, new_mm);			new_mm = mm;		}		if (new_fd) {			fd = current->files;			current->files = new_fd;			new_fd = fd;		}		task_unlock(current);	}	if (new_nsproxy)		put_nsproxy(new_nsproxy);bad_unshare_cleanup_fd:	if (new_fd)		put_files_struct(new_fd);bad_unshare_cleanup_vm:	if (new_mm)		mmput(new_mm);bad_unshare_cleanup_sigh:	if (new_sigh)		if (atomic_dec_and_test(&new_sigh->count))			kmem_cache_free(sighand_cachep, new_sigh);bad_unshare_cleanup_fs:	if (new_fs)		put_fs_struct(new_fs);bad_unshare_cleanup_thread:bad_unshare_out:	return err;}/* *	Helper to unshare the files of the current task. *	We don't want to expose copy_files internals to *	the exec layer of the kernel. */int unshare_files(struct files_struct **displaced){	struct task_struct *task = current;	struct files_struct *copy = NULL;	int error;	error = unshare_fd(CLONE_FILES, &copy);	if (error || !copy) {		*displaced = NULL;		return error;	}	*displaced = task->files;	task_lock(task);	task->files = copy;	task_unlock(task);	return 0;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -