⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 fork.c

📁 linux 2.6.19 kernel source code before patching
💻 C
📖 第 1 页 / 共 3 页
字号:
	mm->token_priority = 0;	mm->last_interval = 0;	tsk->mm = mm;	tsk->active_mm = mm;	return 0;fail_nomem:	return retval;}static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old){	struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);	/* We don't need to lock fs - think why ;-) */	if (fs) {		atomic_set(&fs->count, 1);		rwlock_init(&fs->lock);		fs->umask = old->umask;		read_lock(&old->lock);		fs->rootmnt = mntget(old->rootmnt);		fs->root = dget(old->root);		fs->pwdmnt = mntget(old->pwdmnt);		fs->pwd = dget(old->pwd);		if (old->altroot) {			fs->altrootmnt = mntget(old->altrootmnt);			fs->altroot = dget(old->altroot);		} else {			fs->altrootmnt = NULL;			fs->altroot = NULL;		}		read_unlock(&old->lock);	}	return fs;}struct fs_struct *copy_fs_struct(struct fs_struct *old){	return __copy_fs_struct(old);}EXPORT_SYMBOL_GPL(copy_fs_struct);static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk){	if (clone_flags & CLONE_FS) {		atomic_inc(&current->fs->count);		return 0;	}	tsk->fs = __copy_fs_struct(current->fs);	if (!tsk->fs)		return -ENOMEM;	return 0;}static int count_open_files(struct fdtable *fdt){	int size = fdt->max_fds;	int i;	/* Find the last open fd */	for (i = size/(8*sizeof(long)); i > 0; ) {		if (fdt->open_fds->fds_bits[--i])			break;	}	i = (i+1) * 8 * sizeof(long);	return i;}static struct files_struct *alloc_files(void){	struct files_struct *newf;	struct fdtable *fdt;	newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);	if (!newf)		goto out;	atomic_set(&newf->count, 1);	spin_lock_init(&newf->file_lock);	newf->next_fd = 0;	fdt = &newf->fdtab;	fdt->max_fds = NR_OPEN_DEFAULT;	fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;	fdt->open_fds = (fd_set *)&newf->open_fds_init;	fdt->fd = &newf->fd_array[0];	INIT_RCU_HEAD(&fdt->rcu);	fdt->next = NULL;	rcu_assign_pointer(newf->fdt, fdt);out:	return newf;}/* * Allocate a new files structure and copy contents from the * passed in files structure. * errorp will be valid only when the returned files_struct is NULL. */static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp){	struct files_struct *newf;	struct file **old_fds, **new_fds;	int open_files, size, i;	struct fdtable *old_fdt, *new_fdt;	*errorp = -ENOMEM;	newf = alloc_files();	if (!newf)		goto out;	spin_lock(&oldf->file_lock);	old_fdt = files_fdtable(oldf);	new_fdt = files_fdtable(newf);	open_files = count_open_files(old_fdt);	/*	 * Check whether we need to allocate a larger fd array and fd set.	 * Note: we're not a clone task, so the open count won't change.	 */	if (open_files > new_fdt->max_fds) {		new_fdt->max_fds = 0;		spin_unlock(&oldf->file_lock);		spin_lock(&newf->file_lock);		*errorp = expand_files(newf, open_files-1);		spin_unlock(&newf->file_lock);		if (*errorp < 0)			goto out_release;		new_fdt = files_fdtable(newf);		/*		 * Reacquire the oldf lock and a pointer to its fd table		 * who knows it may have a new bigger fd table. We need		 * the latest pointer.		 */		spin_lock(&oldf->file_lock);		old_fdt = files_fdtable(oldf);	}	old_fds = old_fdt->fd;	new_fds = new_fdt->fd;	memcpy(new_fdt->open_fds->fds_bits,		old_fdt->open_fds->fds_bits, open_files/8);	memcpy(new_fdt->close_on_exec->fds_bits,		old_fdt->close_on_exec->fds_bits, open_files/8);	for (i = open_files; i != 0; i--) {		struct file *f = *old_fds++;		if (f) {			get_file(f);		} else {			/*			 * The fd may be claimed in the fd bitmap but not yet			 * instantiated in the files array if a sibling thread			 * is partway through open().  So make sure that this			 * fd is available to the new process.			 */			FD_CLR(open_files - i, new_fdt->open_fds);		}		rcu_assign_pointer(*new_fds++, f);	}	spin_unlock(&oldf->file_lock);	/* compute the remainder to be cleared */	size = (new_fdt->max_fds - open_files) * sizeof(struct file *);	/* This is long word aligned thus could use a optimized version */ 	memset(new_fds, 0, size); 	if (new_fdt->max_fds > open_files) {		int left = (new_fdt->max_fds-open_files)/8;		int start = open_files / (8 * sizeof(unsigned long));		memset(&new_fdt->open_fds->fds_bits[start], 0, left);		memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);	}	return newf;out_release:	kmem_cache_free(files_cachep, newf);out:	return NULL;}static int copy_files(unsigned long clone_flags, struct task_struct * tsk){	struct files_struct *oldf, *newf;	int error = 0;	/*	 * A background process may not have any files ...	 */	oldf = current->files;	if (!oldf)		goto out;	if (clone_flags & CLONE_FILES) {		atomic_inc(&oldf->count);		goto out;	}	/*	 * Note: we may be using current for both targets (See exec.c)	 * This works because we cache current->files (old) as oldf. Don't	 * break this.	 */	tsk->files = NULL;	newf = dup_fd(oldf, &error);	if (!newf)		goto out;	tsk->files = newf;	error = 0;out:	return error;}/* *	Helper to unshare the files of the current task. *	We don't want to expose copy_files internals to *	the exec layer of the kernel. */int unshare_files(void){	struct files_struct *files  = current->files;	int rc;	BUG_ON(!files);	/* This can race but the race causes us to copy when we don't	   need to and drop the copy */	if(atomic_read(&files->count) == 1)	{		atomic_inc(&files->count);		return 0;	}	rc = copy_files(0, current);	if(rc)		current->files = files;	return rc;}EXPORT_SYMBOL(unshare_files);static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk){	struct sighand_struct *sig;	if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) {		atomic_inc(&current->sighand->count);		return 0;	}	sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);	rcu_assign_pointer(tsk->sighand, sig);	if (!sig)		return -ENOMEM;	atomic_set(&sig->count, 1);	memcpy(sig->action, current->sighand->action, sizeof(sig->action));	return 0;}void __cleanup_sighand(struct sighand_struct *sighand){	if (atomic_dec_and_test(&sighand->count))		kmem_cache_free(sighand_cachep, sighand);}static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk){	struct signal_struct *sig;	int ret;	if (clone_flags & CLONE_THREAD) {		atomic_inc(&current->signal->count);		atomic_inc(&current->signal->live);		return 0;	}	sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);	tsk->signal = sig;	if (!sig)		return -ENOMEM;	ret = copy_thread_group_keys(tsk);	if (ret < 0) {		kmem_cache_free(signal_cachep, sig);		return ret;	}	atomic_set(&sig->count, 1);	atomic_set(&sig->live, 1);	init_waitqueue_head(&sig->wait_chldexit);	sig->flags = 0;	sig->group_exit_code = 0;	sig->group_exit_task = NULL;	sig->group_stop_count = 0;	sig->curr_target = NULL;	init_sigpending(&sig->shared_pending);	INIT_LIST_HEAD(&sig->posix_timers);	hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);	sig->it_real_incr.tv64 = 0;	sig->real_timer.function = it_real_fn;	sig->tsk = tsk;	sig->it_virt_expires = cputime_zero;	sig->it_virt_incr = cputime_zero;	sig->it_prof_expires = cputime_zero;	sig->it_prof_incr = cputime_zero;	sig->leader = 0;	/* session leadership doesn't inherit */	sig->tty_old_pgrp = NULL;	sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;	sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;	sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;	sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;	sig->sched_time = 0;	INIT_LIST_HEAD(&sig->cpu_timers[0]);	INIT_LIST_HEAD(&sig->cpu_timers[1]);	INIT_LIST_HEAD(&sig->cpu_timers[2]);	taskstats_tgid_init(sig);	task_lock(current->group_leader);	memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);	task_unlock(current->group_leader);	if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {		/*		 * New sole thread in the process gets an expiry time		 * of the whole CPU time limit.		 */		tsk->it_prof_expires =			secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);	}	acct_init_pacct(&sig->pacct);	return 0;}void __cleanup_signal(struct signal_struct *sig){	exit_thread_group_keys(sig);	kmem_cache_free(signal_cachep, sig);}static inline void cleanup_signal(struct task_struct *tsk){	struct signal_struct *sig = tsk->signal;	atomic_dec(&sig->live);	if (atomic_dec_and_test(&sig->count))		__cleanup_signal(sig);}static inline void copy_flags(unsigned long clone_flags, struct task_struct *p){	unsigned long new_flags = p->flags;	new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE);	new_flags |= PF_FORKNOEXEC;	if (!(clone_flags & CLONE_PTRACE))		p->ptrace = 0;	p->flags = new_flags;}asmlinkage long sys_set_tid_address(int __user *tidptr){	current->clear_child_tid = tidptr;	return current->pid;}static inline void rt_mutex_init_task(struct task_struct *p){	spin_lock_init(&p->pi_lock);#ifdef CONFIG_RT_MUTEXES	plist_head_init(&p->pi_waiters, &p->pi_lock);	p->pi_blocked_on = NULL;#endif}/* * This creates a new process as a copy of the old one, * but does not actually start it yet. * * It copies the registers, and all the appropriate * parts of the process environment (as per the clone * flags). The actual kick-off is left to the caller. */static struct task_struct *copy_process(unsigned long clone_flags,					unsigned long stack_start,					struct pt_regs *regs,					unsigned long stack_size,					int __user *parent_tidptr,					int __user *child_tidptr,					struct pid *pid){	int retval;	struct task_struct *p = NULL;	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))		return ERR_PTR(-EINVAL);	/*	 * Thread groups must share signals as well, and detached threads	 * can only be started up within the thread group.	 */	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))		return ERR_PTR(-EINVAL);	/*	 * Shared signal handlers imply shared VM. By way of the above,	 * thread groups also imply shared VM. Blocking this case allows	 * for various simplifications in other code.	 */	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))		return ERR_PTR(-EINVAL);	retval = security_task_create(clone_flags);	if (retval)		goto fork_out;	retval = -ENOMEM;	p = dup_task_struct(current);	if (!p)		goto fork_out;	rt_mutex_init_task(p);#ifdef CONFIG_TRACE_IRQFLAGS	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);#endif	retval = -EAGAIN;	if (atomic_read(&p->user->processes) >=			p->signal->rlim[RLIMIT_NPROC].rlim_cur) {		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&				p->user != &root_user)			goto bad_fork_free;	}	atomic_inc(&p->user->__count);	atomic_inc(&p->user->processes);	get_group_info(p->group_info);	/*	 * If multiple threads are within copy_process(), then this check	 * triggers too late. This doesn't hurt, the check is only there	 * to stop root fork bombs.	 */	if (nr_threads >= max_threads)		goto bad_fork_cleanup_count;	if (!try_module_get(task_thread_info(p)->exec_domain->module))		goto bad_fork_cleanup_count;	if (p->binfmt && !try_module_get(p->binfmt->module))		goto bad_fork_cleanup_put_domain;	p->did_exec = 0;	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */	copy_flags(clone_flags, p);	p->pid = pid_nr(pid);	retval = -EFAULT;	if (clone_flags & CLONE_PARENT_SETTID)		if (put_user(p->pid, parent_tidptr))			goto bad_fork_cleanup_delays_binfmt;	INIT_LIST_HEAD(&p->children);	INIT_LIST_HEAD(&p->sibling);	p->vfork_done = NULL;	spin_lock_init(&p->alloc_lock);	clear_tsk_thread_flag(p, TIF_SIGPENDING);	init_sigpending(&p->pending);	p->utime = cputime_zero;	p->stime = cputime_zero; 	p->sched_time = 0;#ifdef CONFIG_TASK_XACCT	p->rchar = 0;		/* I/O counter: bytes read */	p->wchar = 0;		/* I/O counter: bytes written */	p->syscr = 0;		/* I/O counter: read syscalls */	p->syscw = 0;		/* I/O counter: write syscalls */#endif	task_io_accounting_init(p);	acct_clear_integrals(p); 	p->it_virt_expires = cputime_zero;	p->it_prof_expires = cputime_zero; 	p->it_sched_expires = 0; 	INIT_LIST_HEAD(&p->cpu_timers[0]); 	INIT_LIST_HEAD(&p->cpu_timers[1]); 	INIT_LIST_HEAD(&p->cpu_timers[2]);	p->lock_depth = -1;		/* -1 = no lock */	do_posix_clock_monotonic_gettime(&p->start_time);	p->security = NULL;	p->io_context = NULL;	p->io_wait = NULL;	p->audit_context = NULL;	cpuset_fork(p);#ifdef CONFIG_NUMA 	p->mempolicy = mpol_copy(p->mempolicy); 	if (IS_ERR(p->mempolicy)) { 		retval = PTR_ERR(p->mempolicy); 		p->mempolicy = NULL; 		goto bad_fork_cleanup_cpuset; 	}	mpol_fix_fork_child_flag(p);#endif#ifdef CONFIG_TRACE_IRQFLAGS	p->irq_events = 0;#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW	p->hardirqs_enabled = 1;#else	p->hardirqs_enabled = 0;#endif	p->hardirq_enable_ip = 0;	p->hardirq_enable_event = 0;	p->hardirq_disable_ip = _THIS_IP_;	p->hardirq_disable_event = 0;	p->softirqs_enabled = 1;	p->softirq_enable_ip = _THIS_IP_;	p->softirq_enable_event = 0;	p->softirq_disable_ip = 0;	p->softirq_disable_event = 0;	p->hardirq_context = 0;	p->softirq_context = 0;#endif#ifdef CONFIG_LOCKDEP	p->lockdep_depth = 0; /* no locks held yet */	p->curr_chain_key = 0;	p->lockdep_recursion = 0;#endif#ifdef CONFIG_DEBUG_MUTEXES	p->blocked_on = NULL; /* not blocked yet */#endif	p->tgid = p->pid;	if (clone_flags & CLONE_THREAD)		p->tgid = current->tgid;	if ((retval = security_task_alloc(p)))		goto bad_fork_cleanup_policy;	if ((retval = audit_alloc(p)))		goto bad_fork_cleanup_security;	/* copy all the process information */	if ((retval = copy_semundo(clone_flags, p)))		goto bad_fork_cleanup_audit;	if ((retval = copy_files(clone_flags, p)))		goto bad_fork_cleanup_semundo;	if ((retval = copy_fs(clone_flags, p)))		goto bad_fork_cleanup_files;	if ((retval = copy_sighand(clone_flags, p)))		goto bad_fork_cleanup_fs;	if ((retval = copy_signal(clone_flags, p)))

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -