⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mmap.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
				if (vma_tmp->vm_end > addr) {					vma = vma_tmp;					if (vma_tmp->vm_start <= addr)						break;					rb_node = rb_node->rb_left;				} else					rb_node = rb_node->rb_right;			}			if (vma)				mm->mmap_cache = vma;		}	}	return vma;}EXPORT_SYMBOL(find_vma);/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */struct vm_area_struct *find_vma_prev(struct mm_struct *mm, unsigned long addr,			struct vm_area_struct **pprev){	struct vm_area_struct *vma = NULL, *prev = NULL;	struct rb_node *rb_node;	if (!mm)		goto out;	/* Guard against addr being lower than the first VMA */	vma = mm->mmap;	/* Go through the RB tree quickly. */	rb_node = mm->mm_rb.rb_node;	while (rb_node) {		struct vm_area_struct *vma_tmp;		vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);		if (addr < vma_tmp->vm_end) {			rb_node = rb_node->rb_left;		} else {			prev = vma_tmp;			if (!prev->vm_next || (addr < prev->vm_next->vm_end))				break;			rb_node = rb_node->rb_right;		}	}out:	*pprev = prev;	return prev ? prev->vm_next : vma;}/* * Verify that the stack growth is acceptable and * update accounting. This is shared with both the * grow-up and grow-down cases. */static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow){	struct mm_struct *mm = vma->vm_mm;	struct rlimit *rlim = current->signal->rlim;	unsigned long new_start;	/* address space limit tests */	if (!may_expand_vm(mm, grow))		return -ENOMEM;	/* Stack limit test */	if (size > rlim[RLIMIT_STACK].rlim_cur)		return -ENOMEM;	/* mlock limit tests */	if (vma->vm_flags & VM_LOCKED) {		unsigned long locked;		unsigned long limit;		locked = mm->locked_vm + grow;		limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;		if (locked > limit && !capable(CAP_IPC_LOCK))			return -ENOMEM;	}	/* Check to ensure the stack will not grow into a hugetlb-only region */	new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :			vma->vm_end - size;	if (is_hugepage_only_range(vma->vm_mm, new_start, size))		return -EFAULT;	/*	 * Overcommit..  This must be the final test, as it will	 * update security statistics.	 */	if (security_vm_enough_memory(grow))		return -ENOMEM;	/* Ok, everything looks good - let it rip */	mm->total_vm += grow;	if (vma->vm_flags & VM_LOCKED)		mm->locked_vm += grow;	vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);	return 0;}#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)/* * PA-RISC uses this for its stack; IA64 for its Register Backing Store. * vma is the last one with address > vma->vm_end.  Have to extend vma. */#ifndef CONFIG_IA64static#endifint expand_upwards(struct vm_area_struct *vma, unsigned long address){	int error;	if (!(vma->vm_flags & VM_GROWSUP))		return -EFAULT;	/*	 * We must make sure the anon_vma is allocated	 * so that the anon_vma locking is not a noop.	 */	if (unlikely(anon_vma_prepare(vma)))		return -ENOMEM;	anon_vma_lock(vma);	/*	 * vma->vm_start/vm_end cannot change under us because the caller	 * is required to hold the mmap_sem in read mode.  We need the	 * anon_vma lock to serialize against concurrent expand_stacks.	 * Also guard against wrapping around to address 0.	 */	if (address < PAGE_ALIGN(address+4))		address = PAGE_ALIGN(address+4);	else {		anon_vma_unlock(vma);		return -ENOMEM;	}	error = 0;	/* Somebody else might have raced and expanded it already */	if (address > vma->vm_end) {		unsigned long size, grow;		size = address - vma->vm_start;		grow = (address - vma->vm_end) >> PAGE_SHIFT;		error = acct_stack_growth(vma, size, grow);		if (!error)			vma->vm_end = address;	}	anon_vma_unlock(vma);	return error;}#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 *//* * vma is the first one with address < vma->vm_start.  Have to extend vma. */static int expand_downwards(struct vm_area_struct *vma,				   unsigned long address){	int error;	/*	 * We must make sure the anon_vma is allocated	 * so that the anon_vma locking is not a noop.	 */	if (unlikely(anon_vma_prepare(vma)))		return -ENOMEM;	address &= PAGE_MASK;	error = security_file_mmap(NULL, 0, 0, 0, address, 1);	if (error)		return error;	anon_vma_lock(vma);	/*	 * vma->vm_start/vm_end cannot change under us because the caller	 * is required to hold the mmap_sem in read mode.  We need the	 * anon_vma lock to serialize against concurrent expand_stacks.	 */	/* Somebody else might have raced and expanded it already */	if (address < vma->vm_start) {		unsigned long size, grow;		size = vma->vm_end - address;		grow = (vma->vm_start - address) >> PAGE_SHIFT;		error = acct_stack_growth(vma, size, grow);		if (!error) {			vma->vm_start = address;			vma->vm_pgoff -= grow;		}	}	anon_vma_unlock(vma);	return error;}int expand_stack_downwards(struct vm_area_struct *vma, unsigned long address){	return expand_downwards(vma, address);}#ifdef CONFIG_STACK_GROWSUPint expand_stack(struct vm_area_struct *vma, unsigned long address){	return expand_upwards(vma, address);}struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr){	struct vm_area_struct *vma, *prev;	addr &= PAGE_MASK;	vma = find_vma_prev(mm, addr, &prev);	if (vma && (vma->vm_start <= addr))		return vma;	if (!prev || expand_stack(prev, addr))		return NULL;	if (prev->vm_flags & VM_LOCKED) {		if (mlock_vma_pages_range(prev, addr, prev->vm_end) < 0)			return NULL;	/* vma gone! */	}	return prev;}#elseint expand_stack(struct vm_area_struct *vma, unsigned long address){	return expand_downwards(vma, address);}struct vm_area_struct *find_extend_vma(struct mm_struct * mm, unsigned long addr){	struct vm_area_struct * vma;	unsigned long start;	addr &= PAGE_MASK;	vma = find_vma(mm,addr);	if (!vma)		return NULL;	if (vma->vm_start <= addr)		return vma;	if (!(vma->vm_flags & VM_GROWSDOWN))		return NULL;	start = vma->vm_start;	if (expand_stack(vma, addr))		return NULL;	if (vma->vm_flags & VM_LOCKED) {		if (mlock_vma_pages_range(vma, addr, start) < 0)			return NULL;	/* vma gone! */	}	return vma;}#endif/* * Ok - we have the memory areas we should free on the vma list, * so release them, and do the vma updates. * * Called with the mm semaphore held. */static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma){	/* Update high watermark before we lower total_vm */	update_hiwater_vm(mm);	do {		long nrpages = vma_pages(vma);		mm->total_vm -= nrpages;		vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);		vma = remove_vma(vma);	} while (vma);	validate_mm(mm);}/* * Get rid of page table information in the indicated region. * * Called with the mm semaphore held. */static void unmap_region(struct mm_struct *mm,		struct vm_area_struct *vma, struct vm_area_struct *prev,		unsigned long start, unsigned long end){	struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;	struct mmu_gather *tlb;	unsigned long nr_accounted = 0;	lru_add_drain();	tlb = tlb_gather_mmu(mm, 0);	update_hiwater_rss(mm);	unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);	vm_unacct_memory(nr_accounted);	free_pgtables(tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,				 next? next->vm_start: 0);	tlb_finish_mmu(tlb, start, end);}/* * Create a list of vma's touched by the unmap, removing them from the mm's * vma list as we go.. */static voiddetach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,	struct vm_area_struct *prev, unsigned long end){	struct vm_area_struct **insertion_point;	struct vm_area_struct *tail_vma = NULL;	unsigned long addr;	insertion_point = (prev ? &prev->vm_next : &mm->mmap);	do {		rb_erase(&vma->vm_rb, &mm->mm_rb);		mm->map_count--;		tail_vma = vma;		vma = vma->vm_next;	} while (vma && vma->vm_start < end);	*insertion_point = vma;	tail_vma->vm_next = NULL;	if (mm->unmap_area == arch_unmap_area)		addr = prev ? prev->vm_end : mm->mmap_base;	else		addr = vma ?  vma->vm_start : mm->mmap_base;	mm->unmap_area(mm, addr);	mm->mmap_cache = NULL;		/* Kill the cache. */}/* * Split a vma into two pieces at address 'addr', a new vma is allocated * either for the first part or the tail. */int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,	      unsigned long addr, int new_below){	struct mempolicy *pol;	struct vm_area_struct *new;	if (is_vm_hugetlb_page(vma) && (addr &					~(huge_page_mask(hstate_vma(vma)))))		return -EINVAL;	if (mm->map_count >= sysctl_max_map_count)		return -ENOMEM;	new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);	if (!new)		return -ENOMEM;	/* most fields are the same, copy all, and then fixup */	*new = *vma;	if (new_below)		new->vm_end = addr;	else {		new->vm_start = addr;		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);	}	pol = mpol_dup(vma_policy(vma));	if (IS_ERR(pol)) {		kmem_cache_free(vm_area_cachep, new);		return PTR_ERR(pol);	}	vma_set_policy(new, pol);	if (new->vm_file) {		get_file(new->vm_file);		if (vma->vm_flags & VM_EXECUTABLE)			added_exe_file_vma(mm);	}	if (new->vm_ops && new->vm_ops->open)		new->vm_ops->open(new);	if (new_below)		vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +			((addr - new->vm_start) >> PAGE_SHIFT), new);	else		vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);	return 0;}/* Munmap is split into 2 main parts -- this part which finds * what needs doing, and the areas themselves, which do the * work.  This now handles partial unmappings. * Jeremy Fitzhardinge <jeremy@goop.org> */int do_munmap(struct mm_struct *mm, unsigned long start, size_t len){	unsigned long end;	struct vm_area_struct *vma, *prev, *last;	if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)		return -EINVAL;	if ((len = PAGE_ALIGN(len)) == 0)		return -EINVAL;	/* Find the first overlapping VMA */	vma = find_vma_prev(mm, start, &prev);	if (!vma)		return 0;	/* we have  start < vma->vm_end  */	/* if it doesn't overlap, we have nothing.. */	end = start + len;	if (vma->vm_start >= end)		return 0;	/*	 * If we need to split any vma, do it now to save pain later.	 *	 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially	 * unmapped vm_area_struct will remain in use: so lower split_vma	 * places tmp vma above, and higher split_vma places tmp vma below.	 */	if (start > vma->vm_start) {		int error = split_vma(mm, vma, start, 0);		if (error)			return error;		prev = vma;	}	/* Does it split the last one? */	last = find_vma(mm, end);	if (last && end > last->vm_start) {		int error = split_vma(mm, last, end, 1);		if (error)			return error;	}	vma = prev? prev->vm_next: mm->mmap;	/*	 * unlock any mlock()ed ranges before detaching vmas	 */	if (mm->locked_vm) {		struct vm_area_struct *tmp = vma;		while (tmp && tmp->vm_start < end) {			if (tmp->vm_flags & VM_LOCKED) {				mm->locked_vm -= vma_pages(tmp);				munlock_vma_pages_all(tmp);			}			tmp = tmp->vm_next;		}	}	/*	 * Remove the vma's, and unmap the actual pages	 */	detach_vmas_to_be_unmapped(mm, vma, prev, end);	unmap_region(mm, vma, prev, start, end);	/* Fix up all other VM information */	remove_vma_list(mm, vma);	return 0;}EXPORT_SYMBOL(do_munmap);SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len){	int ret;	struct mm_struct *mm = current->mm;	profile_munmap(addr);	down_write(&mm->mmap_sem);	ret = do_munmap(mm, addr, len);	up_write(&mm->mmap_sem);	return ret;}static inline void verify_mm_writelocked(struct mm_struct *mm){#ifdef CONFIG_DEBUG_VM	if (unlikely(down_read_trylock(&mm->mmap_sem))) {		WARN_ON(1);		up_read(&mm->mmap_sem);	}#endif}/* *  this is really a simplified "do_mmap".  it only handles *  anonymous maps.  eventually we may be able to do some *  brk-specific accounting here. */unsigned long do_brk(unsigned long addr, unsigned long len)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -