📄 fork.c
字号:
goto bad_fork_cleanup_sighand; if ((retval = copy_mm(clone_flags, p))) goto bad_fork_cleanup_signal; if ((retval = copy_keys(clone_flags, p))) goto bad_fork_cleanup_mm; if ((retval = copy_namespaces(clone_flags, p))) goto bad_fork_cleanup_keys; retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); if (retval) goto bad_fork_cleanup_namespaces; p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; /* * Clear TID on mm_release()? */ p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; p->robust_list = NULL;#ifdef CONFIG_COMPAT p->compat_robust_list = NULL;#endif INIT_LIST_HEAD(&p->pi_state_list); p->pi_state_cache = NULL; /* * sigaltstack should be cleared when sharing the same VM */ if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) p->sas_ss_sp = p->sas_ss_size = 0; /* * Syscall tracing should be turned off in the child regardless * of CLONE_PTRACE. */ clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);#ifdef TIF_SYSCALL_EMU clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);#endif /* Our parent execution domain becomes current domain These must match for thread signalling to apply */ p->parent_exec_id = p->self_exec_id; /* ok, now we should be set up.. */ p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); p->pdeath_signal = 0; p->exit_state = 0; /* * Ok, make it visible to the rest of the system. * We dont wake it up yet. */ p->group_leader = p; INIT_LIST_HEAD(&p->thread_group); INIT_LIST_HEAD(&p->ptrace_children); INIT_LIST_HEAD(&p->ptrace_list); /* Perform scheduler related setup. Assign this task to a CPU. */ sched_fork(p, clone_flags); /* Need tasklist lock for parent etc handling! */ write_lock_irq(&tasklist_lock); /* for sys_ioprio_set(IOPRIO_WHO_PGRP) */ p->ioprio = current->ioprio; /* * The task hasn't been attached yet, so its cpus_allowed mask will * not be changed, nor will its assigned CPU. * * The cpus_allowed mask of the parent may have changed after it was * copied first time - so re-copy it here, then check the child's CPU * to ensure it is on a valid CPU (and if not, just force it back to * parent's CPU). This avoids alot of nasty races. */ p->cpus_allowed = current->cpus_allowed; if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) || !cpu_online(task_cpu(p)))) set_task_cpu(p, smp_processor_id()); /* CLONE_PARENT re-uses the old parent */ if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) p->real_parent = current->real_parent; else p->real_parent = current; p->parent = p->real_parent; spin_lock(¤t->sighand->siglock); /* * Process group and session signals need to be delivered to just the * parent before the fork or both the parent and the child after the * fork. Restart if a signal comes in before we add the new process to * it's process group. * A fatal signal pending means that current will exit, so the new * thread can't slip out of an OOM kill (or normal SIGKILL). */ recalc_sigpending(); if (signal_pending(current)) { spin_unlock(¤t->sighand->siglock); write_unlock_irq(&tasklist_lock); retval = -ERESTARTNOINTR; goto bad_fork_cleanup_namespaces; } if (clone_flags & CLONE_THREAD) { p->group_leader = current->group_leader; list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); if (!cputime_eq(current->signal->it_virt_expires, cputime_zero) || !cputime_eq(current->signal->it_prof_expires, cputime_zero) || current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY || !list_empty(¤t->signal->cpu_timers[0]) || !list_empty(¤t->signal->cpu_timers[1]) || !list_empty(¤t->signal->cpu_timers[2])) { /* * Have child wake up on its first tick to check * for process CPU timers. */ p->it_prof_expires = jiffies_to_cputime(1); } } if (likely(p->pid)) { add_parent(p); if (unlikely(p->ptrace & PT_PTRACED)) __ptrace_link(p, current->parent); if (thread_group_leader(p)) { p->signal->tty = current->signal->tty; p->signal->pgrp = process_group(current); set_signal_session(p->signal, process_session(current)); attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); attach_pid(p, PIDTYPE_SID, task_session(current)); list_add_tail_rcu(&p->tasks, &init_task.tasks); __get_cpu_var(process_counts)++; } attach_pid(p, PIDTYPE_PID, pid); nr_threads++; } total_forks++; spin_unlock(¤t->sighand->siglock); write_unlock_irq(&tasklist_lock); proc_fork_connector(p); return p;bad_fork_cleanup_namespaces: exit_task_namespaces(p);bad_fork_cleanup_keys: exit_keys(p);bad_fork_cleanup_mm: if (p->mm) mmput(p->mm);bad_fork_cleanup_signal: cleanup_signal(p);bad_fork_cleanup_sighand: __cleanup_sighand(p->sighand);bad_fork_cleanup_fs: exit_fs(p); /* blocking */bad_fork_cleanup_files: exit_files(p); /* blocking */bad_fork_cleanup_semundo: exit_sem(p);bad_fork_cleanup_audit: audit_free(p);bad_fork_cleanup_security: security_task_free(p);bad_fork_cleanup_policy:#ifdef CONFIG_NUMA mpol_free(p->mempolicy);bad_fork_cleanup_cpuset:#endif cpuset_exit(p);bad_fork_cleanup_delays_binfmt: delayacct_tsk_free(p); if (p->binfmt) module_put(p->binfmt->module);bad_fork_cleanup_put_domain: module_put(task_thread_info(p)->exec_domain->module);bad_fork_cleanup_count: put_group_info(p->group_info); atomic_dec(&p->user->processes); free_uid(p->user);bad_fork_free: free_task(p);fork_out: return ERR_PTR(retval);}noinline struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs){ memset(regs, 0, sizeof(struct pt_regs)); return regs;}struct task_struct * __cpuinit fork_idle(int cpu){ struct task_struct *task; struct pt_regs regs; task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, NULL, &init_struct_pid); if (!IS_ERR(task)) init_idle(task, cpu); return task;}static inline int fork_traceflag (unsigned clone_flags){ if (clone_flags & CLONE_UNTRACED) return 0; else if (clone_flags & CLONE_VFORK) { if (current->ptrace & PT_TRACE_VFORK) return PTRACE_EVENT_VFORK; } else if ((clone_flags & CSIGNAL) != SIGCHLD) { if (current->ptrace & PT_TRACE_CLONE) return PTRACE_EVENT_CLONE; } else if (current->ptrace & PT_TRACE_FORK) return PTRACE_EVENT_FORK; return 0;}/* * Ok, this is the main fork-routine. * * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. */long do_fork(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr){ struct task_struct *p; int trace = 0; struct pid *pid = alloc_pid(); long nr; if (!pid) return -EAGAIN; nr = pid->nr; if (unlikely(current->ptrace)) { trace = fork_traceflag (clone_flags); if (trace) clone_flags |= CLONE_PTRACE; } p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, pid); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. */ if (!IS_ERR(p)) { struct completion vfork; if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); } if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) { /* * We'll start up with an immediate SIGSTOP. */ sigaddset(&p->pending.signal, SIGSTOP); set_tsk_thread_flag(p, TIF_SIGPENDING); } if (!(clone_flags & CLONE_STOPPED)) wake_up_new_task(p, clone_flags); else p->state = TASK_STOPPED; if (unlikely (trace)) { current->ptrace_message = nr; ptrace_notify ((trace << 8) | SIGTRAP); } if (clone_flags & CLONE_VFORK) { freezer_do_not_count(); wait_for_completion(&vfork); freezer_count(); if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) { current->ptrace_message = nr; ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP); } } } else { free_pid(pid); nr = PTR_ERR(p); } return nr;}#ifndef ARCH_MIN_MMSTRUCT_ALIGN#define ARCH_MIN_MMSTRUCT_ALIGN 0#endifstatic void sighand_ctor(void *data, struct kmem_cache *cachep, unsigned long flags){ struct sighand_struct *sighand = data; spin_lock_init(&sighand->siglock); INIT_LIST_HEAD(&sighand->signalfd_list);}void __init proc_caches_init(void){ sighand_cachep = kmem_cache_create("sighand_cache", sizeof(struct sighand_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU, sighand_ctor, NULL); signal_cachep = kmem_cache_create("signal_cache", sizeof(struct signal_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); files_cachep = kmem_cache_create("files_cache", sizeof(struct files_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); fs_cachep = kmem_cache_create("fs_cache", sizeof(struct fs_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); vm_area_cachep = kmem_cache_create("vm_area_struct", sizeof(struct vm_area_struct), 0, SLAB_PANIC, NULL, NULL); mm_cachep = kmem_cache_create("mm_struct", sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);}/* * Check constraints on flags passed to the unshare system call and * force unsharing of additional process context as appropriate. */static inline void check_unshare_flags(unsigned long *flags_ptr){ /* * If unsharing a thread from a thread group, must also * unshare vm. */ if (*flags_ptr & CLONE_THREAD) *flags_ptr |= CLONE_VM; /* * If unsharing vm, must also unshare signal handlers. */ if (*flags_ptr & CLONE_VM) *flags_ptr |= CLONE_SIGHAND; /* * If unsharing signal handlers and the task was created * using CLONE_THREAD, then must unshare the thread */ if ((*flags_ptr & CLONE_SIGHAND) && (atomic_read(¤t->signal->count) > 1)) *flags_ptr |= CLONE_THREAD; /* * If unsharing namespace, must also unshare filesystem information. */ if (*flags_ptr & CLONE_NEWNS) *flags_ptr |= CLONE_FS;}/* * Unsharing of tasks created with CLONE_THREAD is not supported yet */static int unshare_thread(unsigned long unshare_flags){ if (unshare_flags & CLONE_THREAD) return -EINVAL; return 0;}/* * Unshare the filesystem structure if it is being shared */static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp){ struct fs_struct *fs = current->fs; if ((unshare_flags & CLONE_FS) && (fs && atomic_read(&fs->count) > 1)) { *new_fsp = __copy_fs_struct(current->fs); if (!*new_fsp) return -ENOMEM; } return 0;}/* * Unsharing of sighand is not supported yet */static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp){ struct sighand_struct *sigh = current->sighand; if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1) return -EINVAL; else return 0;}/* * Unshare vm if it is being shared */static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp){ struct mm_struct *mm = current->mm; if ((unshare_flags & CLONE_VM) && (mm && atomic_read(&mm->mm_users) > 1)) { return -EINVAL; } return 0;}/* * Unshare file descriptor table if it is being shared */static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp){ struct files_struct *fd = current->files; int error = 0; if ((unshare_flags & CLONE_FILES) && (fd && atomic_read(&fd->count) > 1)) { *new_fdp = dup_fd(fd, &error); if (!*new_fdp) return error; } return 0;}/* * Unsharing of semundo for tasks created with CLONE_SYSVSEM is not * supported yet */static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp){ if (unshare_flags & CLONE_SYSVSEM) return -EINVAL; return 0;}/* * unshare allows a process to 'unshare' part of the process * context which was originally shared using clone. copy_* * functions used by do_fork() cannot be used here directly * because they modify an inactive task_struct that is being * constructed. Here we are modifying the current, active, * task_struct. */asmlinkage long sys_unshare(unsigned long unshare_flags){ int err = 0; struct fs_struct *fs, *new_fs = NULL; struct sighand_struct *new_sigh = NULL; struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL; struct files_struct *fd, *new_fd = NULL; struct sem_undo_list *new_ulist = NULL; struct nsproxy *new_nsproxy = NULL, *old_nsproxy = NULL; check_unshare_flags(&unshare_flags); /* Return -EINVAL for all unsupported flags */ err = -EINVAL; if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| CLONE_NEWUTS|CLONE_NEWIPC)) goto bad_unshare_out; if ((err = unshare_thread(unshare_flags))) goto bad_unshare_out; if ((err = unshare_fs(unshare_flags, &new_fs))) goto bad_unshare_cleanup_thread; if ((err = unshare_sighand(unshare_flags, &new_sigh))) goto bad_unshare_cleanup_fs; if ((err = unshare_vm(unshare_flags, &new_mm))) goto bad_unshare_cleanup_sigh; if ((err = unshare_fd(unshare_flags, &new_fd))) goto bad_unshare_cleanup_vm; if ((err = unshare_semundo(unshare_flags, &new_ulist))) goto bad_unshare_cleanup_fd; if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, new_fs))) goto bad_unshare_cleanup_semundo; if (new_fs || new_mm || new_fd || new_ulist || new_nsproxy) { task_lock(current); if (new_nsproxy) { old_nsproxy = current->nsproxy; current->nsproxy = new_nsproxy; new_nsproxy = old_nsproxy; } if (new_fs) { fs = current->fs; current->fs = new_fs; new_fs = fs; } if (new_mm) { mm = current->mm; active_mm = current->active_mm; current->mm = new_mm; current->active_mm = new_mm; activate_mm(active_mm, new_mm); new_mm = mm; } if (new_fd) { fd = current->files; current->files = new_fd; new_fd = fd; } task_unlock(current); } if (new_nsproxy) put_nsproxy(new_nsproxy);bad_unshare_cleanup_semundo:bad_unshare_cleanup_fd: if (new_fd) put_files_struct(new_fd);bad_unshare_cleanup_vm: if (new_mm) mmput(new_mm);bad_unshare_cleanup_sigh: if (new_sigh) if (atomic_dec_and_test(&new_sigh->count)) kmem_cache_free(sighand_cachep, new_sigh);bad_unshare_cleanup_fs: if (new_fs) put_fs_struct(new_fs);bad_unshare_cleanup_thread:bad_unshare_out: return err;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -