⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 fork.c

📁 实现创建进程的fork函数的源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
/* *  linux/kernel/fork.c * *  Copyright (C) 1991, 1992  Linus Torvalds *//* *  'fork.c' contains the help-routines for the 'fork' system call * (see also entry.S and others). * Fork is rather simple, once you get the hang of it, but the memory * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' */#include <linux/config.h>#include <linux/slab.h>#include <linux/init.h>#include <linux/unistd.h>#include <linux/smp_lock.h>#include <linux/module.h>#include <linux/vmalloc.h>#include <linux/completion.h>#include <linux/namespace.h>#include <linux/personality.h>#include <linux/mempolicy.h>#include <linux/sem.h>#include <linux/file.h>#include <linux/binfmts.h>#include <linux/mman.h>#include <linux/fs.h>#include <linux/cpu.h>#include <linux/security.h>#include <linux/syscalls.h>#include <linux/jiffies.h>#include <linux/futex.h>#include <linux/ptrace.h>#include <linux/mount.h>#include <linux/audit.h>#include <linux/rmap.h>#include <asm/pgtable.h>#include <asm/pgalloc.h>#include <asm/uaccess.h>#include <asm/mmu_context.h>#include <asm/cacheflush.h>#include <asm/tlbflush.h>/* The idle threads do not count.. * Protected by write_lock_irq(&tasklist_lock) */int nr_threads;int max_threads;unsigned long total_forks;	/* Handle normal Linux uptimes. */DEFINE_PER_CPU(unsigned long, process_counts) = 0;rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED;  /* outer */EXPORT_SYMBOL(tasklist_lock);int nr_processes(void){	int cpu;	int total = 0;	for_each_online_cpu(cpu)		total += per_cpu(process_counts, cpu);	return total;}#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR# define alloc_task_struct()	kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)# define free_task_struct(tsk)	kmem_cache_free(task_struct_cachep, (tsk))static kmem_cache_t *task_struct_cachep;#endifstatic void free_task(struct task_struct *tsk){	free_thread_info(tsk->thread_info);	free_task_struct(tsk);}void __put_task_struct(struct task_struct *tsk){	WARN_ON(!(tsk->state & (TASK_DEAD | TASK_ZOMBIE)));	WARN_ON(atomic_read(&tsk->usage));	WARN_ON(tsk == current);	if (unlikely(tsk->audit_context))		audit_free(tsk);	security_task_free(tsk);	free_uid(tsk->user);	put_group_info(tsk->group_info);	free_task(tsk);}void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait){	unsigned long flags;	wait->flags &= ~WQ_FLAG_EXCLUSIVE;	spin_lock_irqsave(&q->lock, flags);	__add_wait_queue(q, wait);	spin_unlock_irqrestore(&q->lock, flags);}EXPORT_SYMBOL(add_wait_queue);void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait){	unsigned long flags;	wait->flags |= WQ_FLAG_EXCLUSIVE;	spin_lock_irqsave(&q->lock, flags);	__add_wait_queue_tail(q, wait);	spin_unlock_irqrestore(&q->lock, flags);}EXPORT_SYMBOL(add_wait_queue_exclusive);void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait){	unsigned long flags;	spin_lock_irqsave(&q->lock, flags);	__remove_wait_queue(q, wait);	spin_unlock_irqrestore(&q->lock, flags);}EXPORT_SYMBOL(remove_wait_queue);/* * Note: we use "set_current_state()" _after_ the wait-queue add, * because we need a memory barrier there on SMP, so that any * wake-function that tests for the wait-queue being active * will be guaranteed to see waitqueue addition _or_ subsequent * tests in this thread will see the wakeup having taken place. * * The spin_unlock() itself is semi-permeable and only protects * one way (it only protects stuff inside the critical region and * stops them from bleeding out - it would still allow subsequent * loads to move into the the critical region). */void fastcall prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state){	unsigned long flags;	wait->flags &= ~WQ_FLAG_EXCLUSIVE;	spin_lock_irqsave(&q->lock, flags);	if (list_empty(&wait->task_list))		__add_wait_queue(q, wait);	set_current_state(state);	spin_unlock_irqrestore(&q->lock, flags);}EXPORT_SYMBOL(prepare_to_wait);void fastcallprepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state){	unsigned long flags;	wait->flags |= WQ_FLAG_EXCLUSIVE;	spin_lock_irqsave(&q->lock, flags);	if (list_empty(&wait->task_list))		__add_wait_queue_tail(q, wait);	set_current_state(state);	spin_unlock_irqrestore(&q->lock, flags);}EXPORT_SYMBOL(prepare_to_wait_exclusive);void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait){	unsigned long flags;	__set_current_state(TASK_RUNNING);	/*	 * We can check for list emptiness outside the lock	 * IFF:	 *  - we use the "careful" check that verifies both	 *    the next and prev pointers, so that there cannot	 *    be any half-pending updates in progress on other	 *    CPU's that we haven't seen yet (and that might	 *    still change the stack area.	 * and	 *  - all other users take the lock (ie we can only	 *    have _one_ other CPU that looks at or modifies	 *    the list).	 */	if (!list_empty_careful(&wait->task_list)) {		spin_lock_irqsave(&q->lock, flags);		list_del_init(&wait->task_list);		spin_unlock_irqrestore(&q->lock, flags);	}}EXPORT_SYMBOL(finish_wait);int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key){	int ret = default_wake_function(wait, mode, sync, key);	if (ret)		list_del_init(&wait->task_list);	return ret;}EXPORT_SYMBOL(autoremove_wake_function);void __init fork_init(unsigned long mempages){#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR#ifndef ARCH_MIN_TASKALIGN#define ARCH_MIN_TASKALIGN	L1_CACHE_BYTES#endif	/* create a slab on which task_structs can be allocated */	task_struct_cachep =		kmem_cache_create("task_struct", sizeof(struct task_struct),			ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL, NULL);#endif	/*	 * The default maximum number of threads is set to a safe	 * value: the thread structures can take up at most half	 * of memory.	 */	max_threads = mempages / (THREAD_SIZE/PAGE_SIZE) / 8;	/*	 * we need to allow at least 20 threads to boot a system	 */	if(max_threads < 20)		max_threads = 20;	init_task.rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;	init_task.rlim[RLIMIT_NPROC].rlim_max = max_threads/2;}static struct task_struct *dup_task_struct(struct task_struct *orig){	struct task_struct *tsk;	struct thread_info *ti;	prepare_to_copy(orig);	tsk = alloc_task_struct();	if (!tsk)		return NULL;	ti = alloc_thread_info(tsk);	if (!ti) {		free_task_struct(tsk);		return NULL;	}	*ti = *orig->thread_info;	*tsk = *orig;	tsk->thread_info = ti;	ti->task = tsk;	/* One for us, one for whoever does the "release_task()" (usually parent) */	atomic_set(&tsk->usage,2);	return tsk;}#ifdef CONFIG_MMUstatic inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm){	struct vm_area_struct * mpnt, *tmp, **pprev;	struct rb_node **rb_link, *rb_parent;	int retval;	unsigned long charge;	struct mempolicy *pol;	down_write(&oldmm->mmap_sem);	flush_cache_mm(current->mm);	mm->locked_vm = 0;	mm->mmap = NULL;	mm->mmap_cache = NULL;	mm->free_area_cache = TASK_UNMAPPED_BASE;	mm->map_count = 0;	mm->rss = 0;	cpus_clear(mm->cpu_vm_mask);	mm->mm_rb = RB_ROOT;	rb_link = &mm->mm_rb.rb_node;	rb_parent = NULL;	pprev = &mm->mmap;	/*	 * Add it to the mmlist after the parent.	 * Doing it this way means that we can order the list,	 * and fork() won't mess up the ordering significantly.	 * Add it first so that swapoff can see any swap entries.	 */	spin_lock(&mmlist_lock);	list_add(&mm->mmlist, &current->mm->mmlist);	mmlist_nr++;	spin_unlock(&mmlist_lock);	for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {		struct file *file;		if(mpnt->vm_flags & VM_DONTCOPY)			continue;		charge = 0;		if (mpnt->vm_flags & VM_ACCOUNT) {			unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;			if (security_vm_enough_memory(len))				goto fail_nomem;			charge = len;		}		tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);		if (!tmp)			goto fail_nomem;		*tmp = *mpnt;		pol = mpol_copy(vma_policy(mpnt));		retval = PTR_ERR(pol);		if (IS_ERR(pol))			goto fail_nomem_policy;		vma_set_policy(tmp, pol);		tmp->vm_flags &= ~VM_LOCKED;		tmp->vm_mm = mm;		tmp->vm_next = NULL;		anon_vma_link(tmp);		vma_prio_tree_init(tmp);		file = tmp->vm_file;		if (file) {			struct inode *inode = file->f_dentry->d_inode;			get_file(file);			if (tmp->vm_flags & VM_DENYWRITE)				atomic_dec(&inode->i_writecount);      			/* insert tmp into the share list, just after mpnt */			spin_lock(&file->f_mapping->i_mmap_lock);			flush_dcache_mmap_lock(file->f_mapping);			vma_prio_tree_add(tmp, mpnt);			flush_dcache_mmap_unlock(file->f_mapping);			spin_unlock(&file->f_mapping->i_mmap_lock);		}		/*		 * Link in the new vma and copy the page table entries:		 * link in first so that swapoff can see swap entries,		 * and try_to_unmap_one's find_vma find the new vma.		 */		spin_lock(&mm->page_table_lock);		*pprev = tmp;		pprev = &tmp->vm_next;		__vma_link_rb(mm, tmp, rb_link, rb_parent);		rb_link = &tmp->vm_rb.rb_right;		rb_parent = &tmp->vm_rb;		mm->map_count++;		retval = copy_page_range(mm, current->mm, tmp);		spin_unlock(&mm->page_table_lock);		if (tmp->vm_ops && tmp->vm_ops->open)			tmp->vm_ops->open(tmp);		if (retval)			goto out;	}	retval = 0;out:	flush_tlb_mm(current->mm);	up_write(&oldmm->mmap_sem);	return retval;fail_nomem_policy:	kmem_cache_free(vm_area_cachep, tmp);fail_nomem:	retval = -ENOMEM;	vm_unacct_memory(charge);	goto out;}static inline int mm_alloc_pgd(struct mm_struct * mm){	mm->pgd = pgd_alloc(mm);	if (unlikely(!mm->pgd))		return -ENOMEM;	return 0;}static inline void mm_free_pgd(struct mm_struct * mm){	pgd_free(mm->pgd);}#else#define dup_mmap(mm, oldmm)	(0)#define mm_alloc_pgd(mm)	(0)#define mm_free_pgd(mm)#endif /* CONFIG_MMU */spinlock_t mmlist_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;int mmlist_nr;#define allocate_mm()	(kmem_cache_alloc(mm_cachep, SLAB_KERNEL))#define free_mm(mm)	(kmem_cache_free(mm_cachep, (mm)))#include <linux/init_task.h>static struct mm_struct * mm_init(struct mm_struct * mm){	atomic_set(&mm->mm_users, 1);	atomic_set(&mm->mm_count, 1);	init_rwsem(&mm->mmap_sem);	mm->core_waiters = 0;	mm->page_table_lock = SPIN_LOCK_UNLOCKED;	mm->ioctx_list_lock = RW_LOCK_UNLOCKED;	mm->ioctx_list = NULL;	mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm);	mm->free_area_cache = TASK_UNMAPPED_BASE;	if (likely(!mm_alloc_pgd(mm))) {		mm->def_flags = 0;		return mm;	}	free_mm(mm);	return NULL;}/* * Allocate and initialize an mm_struct. */struct mm_struct * mm_alloc(void){	struct mm_struct * mm;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -