📄 fork.c
字号:
mm = allocate_mm(); if (mm) { memset(mm, 0, sizeof(*mm)); mm = mm_init(mm); } return mm;}/* * Called when the last reference to the mm * is dropped: either by a lazy thread or by * mmput. Free the page directory and the mm. */void fastcall __mmdrop(struct mm_struct *mm){ BUG_ON(mm == &init_mm); mm_free_pgd(mm); destroy_context(mm); free_mm(mm);}/* * Decrement the use count and release all resources for an mm. */void mmput(struct mm_struct *mm){ if (atomic_dec_and_lock(&mm->mm_users, &mmlist_lock)) { list_del(&mm->mmlist); mmlist_nr--; spin_unlock(&mmlist_lock); exit_aio(mm); exit_mmap(mm); mmdrop(mm); }}/* * Checks if the use count of an mm is non-zero and if so * returns a reference to it after bumping up the use count. * If the use count is zero, it means this mm is going away, * so return NULL. */struct mm_struct *mmgrab(struct mm_struct *mm){ spin_lock(&mmlist_lock); if (!atomic_read(&mm->mm_users)) mm = NULL; else atomic_inc(&mm->mm_users); spin_unlock(&mmlist_lock); return mm;}/* Please note the differences between mmput and mm_release. * mmput is called whenever we stop holding onto a mm_struct, * error success whatever. * * mm_release is called after a mm_struct has been removed * from the current process. * * This difference is important for error handling, when we * only half set up a mm_struct for a new process and need to restore * the old one. Because we mmput the new mm_struct before * restoring the old one. . . * Eric Biederman 10 January 1998 */void mm_release(struct task_struct *tsk, struct mm_struct *mm){ struct completion *vfork_done = tsk->vfork_done; /* Get rid of any cached register state */ deactivate_mm(tsk, mm); /* notify parent sleeping on vfork() */ if (vfork_done) { tsk->vfork_done = NULL; complete(vfork_done); } if (tsk->clear_child_tid && atomic_read(&mm->mm_users) > 1) { u32 __user * tidptr = tsk->clear_child_tid; tsk->clear_child_tid = NULL; /* * We don't check the error code - if userspace has * not set up a proper pointer then tough luck. */ put_user(0, tidptr); sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0); }}static int copy_mm(unsigned long clone_flags, struct task_struct * tsk){ struct mm_struct * mm, *oldmm; int retval; tsk->min_flt = tsk->maj_flt = 0; tsk->cmin_flt = tsk->cmaj_flt = 0; tsk->nvcsw = tsk->nivcsw = tsk->cnvcsw = tsk->cnivcsw = 0; tsk->mm = NULL; tsk->active_mm = NULL; /* * Are we cloning a kernel thread? * * We need to steal a active VM for that.. */ oldmm = current->mm; if (!oldmm) return 0; if (clone_flags & CLONE_VM) { atomic_inc(&oldmm->mm_users); mm = oldmm; /* * There are cases where the PTL is held to ensure no * new threads start up in user mode using an mm, which * allows optimizing out ipis; the tlb_gather_mmu code * is an example. */ spin_unlock_wait(&oldmm->page_table_lock); goto good_mm; } retval = -ENOMEM; mm = allocate_mm(); if (!mm) goto fail_nomem; /* Copy the current MM stuff.. */ memcpy(mm, oldmm, sizeof(*mm)); if (!mm_init(mm)) goto fail_nomem; if (init_new_context(tsk,mm)) goto fail_nocontext; retval = dup_mmap(mm, oldmm); if (retval) goto free_pt;good_mm: tsk->mm = mm; tsk->active_mm = mm; return 0;free_pt: mmput(mm);fail_nomem: return retval;fail_nocontext: /* * If init_new_context() failed, we cannot use mmput() to free the mm * because it calls destroy_context() */ mm_free_pgd(mm); free_mm(mm); return retval;}static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old){ struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL); /* We don't need to lock fs - think why ;-) */ if (fs) { atomic_set(&fs->count, 1); fs->lock = RW_LOCK_UNLOCKED; fs->umask = old->umask; read_lock(&old->lock); fs->rootmnt = mntget(old->rootmnt); fs->root = dget(old->root); fs->pwdmnt = mntget(old->pwdmnt); fs->pwd = dget(old->pwd); if (old->altroot) { fs->altrootmnt = mntget(old->altrootmnt); fs->altroot = dget(old->altroot); } else { fs->altrootmnt = NULL; fs->altroot = NULL; } read_unlock(&old->lock); } return fs;}struct fs_struct *copy_fs_struct(struct fs_struct *old){ return __copy_fs_struct(old);}EXPORT_SYMBOL_GPL(copy_fs_struct);static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk){ if (clone_flags & CLONE_FS) { atomic_inc(¤t->fs->count); return 0; } tsk->fs = __copy_fs_struct(current->fs); if (!tsk->fs) return -ENOMEM; return 0;}static int count_open_files(struct files_struct *files, int size){ int i; /* Find the last open fd */ for (i = size/(8*sizeof(long)); i > 0; ) { if (files->open_fds->fds_bits[--i]) break; } i = (i+1) * 8 * sizeof(long); return i;}static int copy_files(unsigned long clone_flags, struct task_struct * tsk){ struct files_struct *oldf, *newf; struct file **old_fds, **new_fds; int open_files, nfds, size, i, error = 0; /* * A background process may not have any files ... */ oldf = current->files; if (!oldf) goto out; if (clone_flags & CLONE_FILES) { atomic_inc(&oldf->count); goto out; } /* * Note: we may be using current for both targets (See exec.c) * This works because we cache current->files (old) as oldf. Don't * break this. */ tsk->files = NULL; error = -ENOMEM; newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL); if (!newf) goto out; atomic_set(&newf->count, 1); newf->file_lock = SPIN_LOCK_UNLOCKED; newf->next_fd = 0; newf->max_fds = NR_OPEN_DEFAULT; newf->max_fdset = __FD_SETSIZE; newf->close_on_exec = &newf->close_on_exec_init; newf->open_fds = &newf->open_fds_init; newf->fd = &newf->fd_array[0]; /* We don't yet have the oldf readlock, but even if the old fdset gets grown now, we'll only copy up to "size" fds */ size = oldf->max_fdset; if (size > __FD_SETSIZE) { newf->max_fdset = 0; spin_lock(&newf->file_lock); error = expand_fdset(newf, size-1); spin_unlock(&newf->file_lock); if (error) goto out_release; } spin_lock(&oldf->file_lock); open_files = count_open_files(oldf, size); /* * Check whether we need to allocate a larger fd array. * Note: we're not a clone task, so the open count won't * change. */ nfds = NR_OPEN_DEFAULT; if (open_files > nfds) { spin_unlock(&oldf->file_lock); newf->max_fds = 0; spin_lock(&newf->file_lock); error = expand_fd_array(newf, open_files-1); spin_unlock(&newf->file_lock); if (error) goto out_release; nfds = newf->max_fds; spin_lock(&oldf->file_lock); } old_fds = oldf->fd; new_fds = newf->fd; memcpy(newf->open_fds->fds_bits, oldf->open_fds->fds_bits, open_files/8); memcpy(newf->close_on_exec->fds_bits, oldf->close_on_exec->fds_bits, open_files/8); for (i = open_files; i != 0; i--) { struct file *f = *old_fds++; if (f) get_file(f); *new_fds++ = f; } spin_unlock(&oldf->file_lock); /* compute the remainder to be cleared */ size = (newf->max_fds - open_files) * sizeof(struct file *); /* This is long word aligned thus could use a optimized version */ memset(new_fds, 0, size); if (newf->max_fdset > open_files) { int left = (newf->max_fdset-open_files)/8; int start = open_files / (8 * sizeof(unsigned long)); memset(&newf->open_fds->fds_bits[start], 0, left); memset(&newf->close_on_exec->fds_bits[start], 0, left); } tsk->files = newf; error = 0;out: return error;out_release: free_fdset (newf->close_on_exec, newf->max_fdset); free_fdset (newf->open_fds, newf->max_fdset); kmem_cache_free(files_cachep, newf); goto out;}/* * Helper to unshare the files of the current task. * We don't want to expose copy_files internals to * the exec layer of the kernel. */int unshare_files(void){ struct files_struct *files = current->files; int rc; if(!files) BUG(); /* This can race but the race causes us to copy when we don't need to and drop the copy */ if(atomic_read(&files->count) == 1) { atomic_inc(&files->count); return 0; } rc = copy_files(0, current); if(rc) current->files = files; return rc;}EXPORT_SYMBOL(unshare_files);static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk){ struct sighand_struct *sig; if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) { atomic_inc(¤t->sighand->count); return 0; } sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); tsk->sighand = sig; if (!sig) return -ENOMEM; spin_lock_init(&sig->siglock); atomic_set(&sig->count, 1); memcpy(sig->action, current->sighand->action, sizeof(sig->action)); return 0;}static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk){ struct signal_struct *sig; if (clone_flags & CLONE_THREAD) { atomic_inc(¤t->signal->count); return 0; } sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); tsk->signal = sig; if (!sig) return -ENOMEM; atomic_set(&sig->count, 1); sig->group_exit = 0; sig->group_exit_code = 0; sig->group_exit_task = NULL; sig->group_stop_count = 0; sig->curr_target = NULL; init_sigpending(&sig->shared_pending); INIT_LIST_HEAD(&sig->posix_timers); sig->tty = current->signal->tty; sig->pgrp = process_group(current); sig->session = current->signal->session; sig->leader = 0; /* session leadership doesn't inherit */ sig->tty_old_pgrp = 0; return 0;}static inline void copy_flags(unsigned long clone_flags, struct task_struct *p){ unsigned long new_flags = p->flags; new_flags &= ~PF_SUPERPRIV; new_flags |= PF_FORKNOEXEC; if (!(clone_flags & CLONE_PTRACE)) p->ptrace = 0; p->flags = new_flags;}asmlinkage long sys_set_tid_address(int __user *tidptr){ current->clear_child_tid = tidptr; return current->pid;}/* * This creates a new process as a copy of the old one, * but does not actually start it yet. *
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -