⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 fork.c

📁 实现创建进程的fork函数的源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
 * It copies the registers, and all the appropriate * parts of the process environment (as per the clone * flags). The actual kick-off is left to the caller. */struct task_struct *copy_process(unsigned long clone_flags,				 unsigned long stack_start,				 struct pt_regs *regs,				 unsigned long stack_size,				 int __user *parent_tidptr,				 int __user *child_tidptr){	int retval;	struct task_struct *p = NULL;	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))		return ERR_PTR(-EINVAL);	/*	 * Thread groups must share signals as well, and detached threads	 * can only be started up within the thread group.	 */	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))		return ERR_PTR(-EINVAL);	/*	 * Shared signal handlers imply shared VM. By way of the above,	 * thread groups also imply shared VM. Blocking this case allows	 * for various simplifications in other code.	 */	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))		return ERR_PTR(-EINVAL);	retval = security_task_create(clone_flags);	if (retval)		goto fork_out;	retval = -ENOMEM;	p = dup_task_struct(current);	if (!p)		goto fork_out;	retval = -EAGAIN;	if (atomic_read(&p->user->processes) >=			p->rlim[RLIMIT_NPROC].rlim_cur) {		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&				p->user != &root_user)			goto bad_fork_free;	}	atomic_inc(&p->user->__count);	atomic_inc(&p->user->processes);	get_group_info(p->group_info);	/*	 * If multiple threads are within copy_process(), then this check	 * triggers too late. This doesn't hurt, the check is only there	 * to stop root fork bombs.	 */	if (nr_threads >= max_threads)		goto bad_fork_cleanup_count;	if (!try_module_get(p->thread_info->exec_domain->module))		goto bad_fork_cleanup_count;	if (p->binfmt && !try_module_get(p->binfmt->module))		goto bad_fork_cleanup_put_domain;	p->did_exec = 0;	copy_flags(clone_flags, p);	if (clone_flags & CLONE_IDLETASK)		p->pid = 0;	else {		p->pid = alloc_pidmap();		if (p->pid == -1)			goto bad_fork_cleanup;	}	retval = -EFAULT;	if (clone_flags & CLONE_PARENT_SETTID)		if (put_user(p->pid, parent_tidptr))			goto bad_fork_cleanup;	p->proc_dentry = NULL;	INIT_LIST_HEAD(&p->children);	INIT_LIST_HEAD(&p->sibling);	init_waitqueue_head(&p->wait_chldexit);	p->vfork_done = NULL;	spin_lock_init(&p->alloc_lock);	spin_lock_init(&p->proc_lock);	clear_tsk_thread_flag(p, TIF_SIGPENDING);	init_sigpending(&p->pending);	p->it_real_value = p->it_virt_value = p->it_prof_value = 0;	p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0;	init_timer(&p->real_timer);	p->real_timer.data = (unsigned long) p;	p->utime = p->stime = 0;	p->cutime = p->cstime = 0;	p->lock_depth = -1;		/* -1 = no lock */	p->start_time = get_jiffies_64();	p->security = NULL;	p->io_context = NULL;	p->audit_context = NULL;#ifdef CONFIG_NUMA 	p->mempolicy = mpol_copy(p->mempolicy); 	if (IS_ERR(p->mempolicy)) { 		retval = PTR_ERR(p->mempolicy); 		p->mempolicy = NULL; 		goto bad_fork_cleanup; 	}#endif	if ((retval = security_task_alloc(p)))		goto bad_fork_cleanup_policy;	if ((retval = audit_alloc(p)))		goto bad_fork_cleanup_security;	/* copy all the process information */	if ((retval = copy_semundo(clone_flags, p)))		goto bad_fork_cleanup_audit;	if ((retval = copy_files(clone_flags, p)))		goto bad_fork_cleanup_semundo;	if ((retval = copy_fs(clone_flags, p)))		goto bad_fork_cleanup_files;	if ((retval = copy_sighand(clone_flags, p)))		goto bad_fork_cleanup_fs;	if ((retval = copy_signal(clone_flags, p)))		goto bad_fork_cleanup_sighand;	if ((retval = copy_mm(clone_flags, p)))		goto bad_fork_cleanup_signal;	if ((retval = copy_namespace(clone_flags, p)))		goto bad_fork_cleanup_mm;	retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);	if (retval)		goto bad_fork_cleanup_namespace;	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;	/*	 * Clear TID on mm_release()?	 */	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;	/*	 * Syscall tracing should be turned off in the child regardless	 * of CLONE_PTRACE.	 */	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);	/* Our parent execution domain becomes current domain	   These must match for thread signalling to apply */	   	p->parent_exec_id = p->self_exec_id;	/* ok, now we should be set up.. */	p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);	p->pdeath_signal = 0;	/* Perform scheduler related setup */	sched_fork(p);	/*	 * Ok, make it visible to the rest of the system.	 * We dont wake it up yet.	 */	p->tgid = p->pid;	p->group_leader = p;	INIT_LIST_HEAD(&p->ptrace_children);	INIT_LIST_HEAD(&p->ptrace_list);	/* Need tasklist lock for parent etc handling! */	write_lock_irq(&tasklist_lock);	/*	 * Check for pending SIGKILL! The new thread should not be allowed	 * to slip out of an OOM kill. (or normal SIGKILL.)	 */	if (sigismember(&current->pending.signal, SIGKILL)) {		write_unlock_irq(&tasklist_lock);		retval = -EINTR;		goto bad_fork_cleanup_namespace;	}	/* CLONE_PARENT re-uses the old parent */	if (clone_flags & CLONE_PARENT)		p->real_parent = current->real_parent;	else		p->real_parent = current;	p->parent = p->real_parent;	if (clone_flags & CLONE_THREAD) {		spin_lock(&current->sighand->siglock);		/*		 * Important: if an exit-all has been started then		 * do not create this new thread - the whole thread		 * group is supposed to exit anyway.		 */		if (current->signal->group_exit) {			spin_unlock(&current->sighand->siglock);			write_unlock_irq(&tasklist_lock);			retval = -EAGAIN;			goto bad_fork_cleanup_namespace;		}		p->tgid = current->tgid;		p->group_leader = current->group_leader;		if (current->signal->group_stop_count > 0) {			/*			 * There is an all-stop in progress for the group.			 * We ourselves will stop as soon as we check signals.			 * Make the new thread part of that group stop too.			 */			current->signal->group_stop_count++;			set_tsk_thread_flag(p, TIF_SIGPENDING);		}		spin_unlock(&current->sighand->siglock);	}	SET_LINKS(p);	if (p->ptrace & PT_PTRACED)		__ptrace_link(p, current->parent);	attach_pid(p, PIDTYPE_PID, p->pid);	if (thread_group_leader(p)) {		attach_pid(p, PIDTYPE_TGID, p->tgid);		attach_pid(p, PIDTYPE_PGID, process_group(p));		attach_pid(p, PIDTYPE_SID, p->signal->session);		if (p->pid)			__get_cpu_var(process_counts)++;	} else		link_pid(p, p->pids + PIDTYPE_TGID, &p->group_leader->pids[PIDTYPE_TGID].pid);	nr_threads++;	write_unlock_irq(&tasklist_lock);	retval = 0;fork_out:	if (retval)		return ERR_PTR(retval);	return p;bad_fork_cleanup_namespace:	exit_namespace(p);bad_fork_cleanup_mm:	exit_mm(p);	if (p->active_mm)		mmdrop(p->active_mm);bad_fork_cleanup_signal:	exit_signal(p);bad_fork_cleanup_sighand:	exit_sighand(p);bad_fork_cleanup_fs:	exit_fs(p); /* blocking */bad_fork_cleanup_files:	exit_files(p); /* blocking */bad_fork_cleanup_semundo:	exit_sem(p);bad_fork_cleanup_audit:	audit_free(p);bad_fork_cleanup_security:	security_task_free(p);bad_fork_cleanup_policy:#ifdef CONFIG_NUMA	mpol_free(p->mempolicy);#endifbad_fork_cleanup:	if (p->pid > 0)		free_pidmap(p->pid);	if (p->binfmt)		module_put(p->binfmt->module);bad_fork_cleanup_put_domain:	module_put(p->thread_info->exec_domain->module);bad_fork_cleanup_count:	put_group_info(p->group_info);	atomic_dec(&p->user->processes);	free_uid(p->user);bad_fork_free:	free_task(p);	goto fork_out;}static inline int fork_traceflag (unsigned clone_flags){	if (clone_flags & (CLONE_UNTRACED | CLONE_IDLETASK))		return 0;	else if (clone_flags & CLONE_VFORK) {		if (current->ptrace & PT_TRACE_VFORK)			return PTRACE_EVENT_VFORK;	} else if ((clone_flags & CSIGNAL) != SIGCHLD) {		if (current->ptrace & PT_TRACE_CLONE)			return PTRACE_EVENT_CLONE;	} else if (current->ptrace & PT_TRACE_FORK)		return PTRACE_EVENT_FORK;	return 0;}/* *  Ok, this is the main fork-routine. * * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. */long do_fork(unsigned long clone_flags,	      unsigned long stack_start,	      struct pt_regs *regs,	      unsigned long stack_size,	      int __user *parent_tidptr,	      int __user *child_tidptr){	struct task_struct *p;	int trace = 0;	long pid;	if (unlikely(current->ptrace)) {		trace = fork_traceflag (clone_flags);		if (trace)			clone_flags |= CLONE_PTRACE;	}	p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr);	/*	 * Do this prior waking up the new thread - the thread pointer	 * might get invalid after that point, if the thread exits quickly.	 */	pid = IS_ERR(p) ? PTR_ERR(p) : p->pid;	if (!IS_ERR(p)) {		struct completion vfork;		if (clone_flags & CLONE_VFORK) {			p->vfork_done = &vfork;			init_completion(&vfork);		}		if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {			/*			 * We'll start up with an immediate SIGSTOP.			 */			sigaddset(&p->pending.signal, SIGSTOP);			set_tsk_thread_flag(p, TIF_SIGPENDING);		}		if (!(clone_flags & CLONE_STOPPED)) {			/*			 * Do the wakeup last. On SMP we treat fork() and			 * CLONE_VM separately, because fork() has already			 * created cache footprint on this CPU (due to			 * copying the pagetables), hence migration would			 * probably be costy. Threads on the other hand			 * have less traction to the current CPU, and if			 * there's an imbalance then the scheduler can			 * migrate this fresh thread now, before it			 * accumulates a larger cache footprint:			 */			if (clone_flags & CLONE_VM)				wake_up_forked_thread(p);			else				wake_up_forked_process(p);		} else {			int cpu = get_cpu();			p->state = TASK_STOPPED;			if (cpu_is_offline(task_cpu(p)))				set_task_cpu(p, cpu);			put_cpu();		}		++total_forks;		if (unlikely (trace)) {			current->ptrace_message = pid;			ptrace_notify ((trace << 8) | SIGTRAP);		}		if (clone_flags & CLONE_VFORK) {			wait_for_completion(&vfork);			if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE))				ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);		} else			/*			 * Let the child process run first, to avoid most of the			 * COW overhead when the child exec()s afterwards.			 */			set_need_resched();	}	return pid;}/* SLAB cache for signal_struct structures (tsk->signal) */kmem_cache_t *signal_cachep;/* SLAB cache for sighand_struct structures (tsk->sighand) */kmem_cache_t *sighand_cachep;/* SLAB cache for files_struct structures (tsk->files) */kmem_cache_t *files_cachep;/* SLAB cache for fs_struct structures (tsk->fs) */kmem_cache_t *fs_cachep;/* SLAB cache for vm_area_struct structures */kmem_cache_t *vm_area_cachep;/* SLAB cache for mm_struct structures (tsk->mm) */kmem_cache_t *mm_cachep;void __init proc_caches_init(void){	sighand_cachep = kmem_cache_create("sighand_cache",			sizeof(struct sighand_struct), 0,			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);	signal_cachep = kmem_cache_create("signal_cache",			sizeof(struct signal_struct), 0,			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);	files_cachep = kmem_cache_create("files_cache", 			sizeof(struct files_struct), 0,			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);	fs_cachep = kmem_cache_create("fs_cache", 			sizeof(struct fs_struct), 0,			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);	vm_area_cachep = kmem_cache_create("vm_area_struct",			sizeof(struct vm_area_struct), 0,			SLAB_PANIC, NULL, NULL);	mm_cachep = kmem_cache_create("mm_struct",			sizeof(struct mm_struct), 0,			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -