⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mmap.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	if (file) {		switch (flags & MAP_TYPE) {		case MAP_SHARED:			if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))				return -EACCES;			/*			 * Make sure we don't allow writing to an append-only			 * file..			 */			if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))				return -EACCES;			/*			 * Make sure there are no mandatory locks on the file.			 */			if (locks_verify_locked(inode))				return -EAGAIN;			vm_flags |= VM_SHARED | VM_MAYSHARE;			if (!(file->f_mode & FMODE_WRITE))				vm_flags &= ~(VM_MAYWRITE | VM_SHARED);			/* fall through */		case MAP_PRIVATE:			if (!(file->f_mode & FMODE_READ))				return -EACCES;			if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {				if (vm_flags & VM_EXEC)					return -EPERM;				vm_flags &= ~VM_MAYEXEC;			}			if (!file->f_op || !file->f_op->mmap)				return -ENODEV;			break;		default:			return -EINVAL;		}	} else {		switch (flags & MAP_TYPE) {		case MAP_SHARED:			/*			 * Ignore pgoff.			 */			pgoff = 0;			vm_flags |= VM_SHARED | VM_MAYSHARE;			break;		case MAP_PRIVATE:			/*			 * Set pgoff according to addr for anon_vma.			 */			pgoff = addr >> PAGE_SHIFT;			break;		default:			return -EINVAL;		}	}	error = security_file_mmap(file, reqprot, prot, flags, addr, 0);	if (error)		return error;	return mmap_region(file, addr, len, flags, vm_flags, pgoff);}EXPORT_SYMBOL(do_mmap_pgoff);/* * Some shared mappigns will want the pages marked read-only * to track write events. If so, we'll downgrade vm_page_prot * to the private version (using protection_map[] without the * VM_SHARED bit). */int vma_wants_writenotify(struct vm_area_struct *vma){	unsigned int vm_flags = vma->vm_flags;	/* If it was private or non-writable, the write bit is already clear */	if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))		return 0;	/* The backer wishes to know when pages are first written to? */	if (vma->vm_ops && vma->vm_ops->page_mkwrite)		return 1;	/* The open routine did something to the protections already? */	if (pgprot_val(vma->vm_page_prot) !=	    pgprot_val(vm_get_page_prot(vm_flags)))		return 0;	/* Specialty mapping? */	if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE))		return 0;	/* Can the mapping track the dirty pages? */	return vma->vm_file && vma->vm_file->f_mapping &&		mapping_cap_account_dirty(vma->vm_file->f_mapping);}/* * We account for memory if it's a private writeable mapping, * not hugepages and VM_NORESERVE wasn't set. */static inline int accountable_mapping(struct file *file, unsigned int vm_flags){	/*	 * hugetlb has its own accounting separate from the core VM	 * VM_HUGETLB may not be set yet so we cannot check for that flag.	 */	if (file && is_file_hugepages(file))		return 0;	return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;}unsigned long mmap_region(struct file *file, unsigned long addr,			  unsigned long len, unsigned long flags,			  unsigned int vm_flags, unsigned long pgoff){	struct mm_struct *mm = current->mm;	struct vm_area_struct *vma, *prev;	int correct_wcount = 0;	int error;	struct rb_node **rb_link, *rb_parent;	unsigned long charged = 0;	struct inode *inode =  file ? file->f_path.dentry->d_inode : NULL;	/* Clear old maps */	error = -ENOMEM;munmap_back:	vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);	if (vma && vma->vm_start < addr + len) {		if (do_munmap(mm, addr, len))			return -ENOMEM;		goto munmap_back;	}	/* Check against address space limit. */	if (!may_expand_vm(mm, len >> PAGE_SHIFT))		return -ENOMEM;	/*	 * Set 'VM_NORESERVE' if we should not account for the	 * memory use of this mapping.	 */	if ((flags & MAP_NORESERVE)) {		/* We honor MAP_NORESERVE if allowed to overcommit */		if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)			vm_flags |= VM_NORESERVE;		/* hugetlb applies strict overcommit unless MAP_NORESERVE */		if (file && is_file_hugepages(file))			vm_flags |= VM_NORESERVE;	}	/*	 * Private writable mapping: check memory availability	 */	if (accountable_mapping(file, vm_flags)) {		charged = len >> PAGE_SHIFT;		if (security_vm_enough_memory(charged))			return -ENOMEM;		vm_flags |= VM_ACCOUNT;	}	/*	 * Can we just expand an old mapping?	 */	vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);	if (vma)		goto out;	/*	 * Determine the object being mapped and call the appropriate	 * specific mapper. the address has already been validated, but	 * not unmapped, but the maps are removed from the list.	 */	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);	if (!vma) {		error = -ENOMEM;		goto unacct_error;	}	vma->vm_mm = mm;	vma->vm_start = addr;	vma->vm_end = addr + len;	vma->vm_flags = vm_flags;	vma->vm_page_prot = vm_get_page_prot(vm_flags);	vma->vm_pgoff = pgoff;	if (file) {		error = -EINVAL;		if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))			goto free_vma;		if (vm_flags & VM_DENYWRITE) {			error = deny_write_access(file);			if (error)				goto free_vma;			correct_wcount = 1;		}		vma->vm_file = file;		get_file(file);		error = file->f_op->mmap(file, vma);		if (error)			goto unmap_and_free_vma;		if (vm_flags & VM_EXECUTABLE)			added_exe_file_vma(mm);	} else if (vm_flags & VM_SHARED) {		error = shmem_zero_setup(vma);		if (error)			goto free_vma;	}	/* Can addr have changed??	 *	 * Answer: Yes, several device drivers can do it in their	 *         f_op->mmap method. -DaveM	 */	addr = vma->vm_start;	pgoff = vma->vm_pgoff;	vm_flags = vma->vm_flags;	if (vma_wants_writenotify(vma))		vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);	vma_link(mm, vma, prev, rb_link, rb_parent);	file = vma->vm_file;	/* Once vma denies write, undo our temporary denial count */	if (correct_wcount)		atomic_inc(&inode->i_writecount);out:	mm->total_vm += len >> PAGE_SHIFT;	vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);	if (vm_flags & VM_LOCKED) {		/*		 * makes pages present; downgrades, drops, reacquires mmap_sem		 */		long nr_pages = mlock_vma_pages_range(vma, addr, addr + len);		if (nr_pages < 0)			return nr_pages;	/* vma gone! */		mm->locked_vm += (len >> PAGE_SHIFT) - nr_pages;	} else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))		make_pages_present(addr, addr + len);	return addr;unmap_and_free_vma:	if (correct_wcount)		atomic_inc(&inode->i_writecount);	vma->vm_file = NULL;	fput(file);	/* Undo any partial mapping done by a device driver. */	unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);	charged = 0;free_vma:	kmem_cache_free(vm_area_cachep, vma);unacct_error:	if (charged)		vm_unacct_memory(charged);	return error;}/* Get an address range which is currently unmapped. * For shmat() with addr=0. * * Ugly calling convention alert: * Return value with the low bits set means error value, * ie *	if (ret & ~PAGE_MASK) *		error = ret; * * This function "knows" that -ENOMEM has the bits set. */#ifndef HAVE_ARCH_UNMAPPED_AREAunsigned longarch_get_unmapped_area(struct file *filp, unsigned long addr,		unsigned long len, unsigned long pgoff, unsigned long flags){	struct mm_struct *mm = current->mm;	struct vm_area_struct *vma;	unsigned long start_addr;	if (len > TASK_SIZE)		return -ENOMEM;	if (flags & MAP_FIXED)		return addr;	if (addr) {		addr = PAGE_ALIGN(addr);		vma = find_vma(mm, addr);		if (TASK_SIZE - len >= addr &&		    (!vma || addr + len <= vma->vm_start))			return addr;	}	if (len > mm->cached_hole_size) {	        start_addr = addr = mm->free_area_cache;	} else {	        start_addr = addr = TASK_UNMAPPED_BASE;	        mm->cached_hole_size = 0;	}full_search:	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {		/* At this point:  (!vma || addr < vma->vm_end). */		if (TASK_SIZE - len < addr) {			/*			 * Start a new search - just in case we missed			 * some holes.			 */			if (start_addr != TASK_UNMAPPED_BASE) {				addr = TASK_UNMAPPED_BASE;			        start_addr = addr;				mm->cached_hole_size = 0;				goto full_search;			}			return -ENOMEM;		}		if (!vma || addr + len <= vma->vm_start) {			/*			 * Remember the place where we stopped the search:			 */			mm->free_area_cache = addr + len;			return addr;		}		if (addr + mm->cached_hole_size < vma->vm_start)		        mm->cached_hole_size = vma->vm_start - addr;		addr = vma->vm_end;	}}#endif	void arch_unmap_area(struct mm_struct *mm, unsigned long addr){	/*	 * Is this a new hole at the lowest possible address?	 */	if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {		mm->free_area_cache = addr;		mm->cached_hole_size = ~0UL;	}}/* * This mmap-allocator allocates new areas top-down from below the * stack's low limit (the base): */#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWNunsigned longarch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,			  const unsigned long len, const unsigned long pgoff,			  const unsigned long flags){	struct vm_area_struct *vma;	struct mm_struct *mm = current->mm;	unsigned long addr = addr0;	/* requested length too big for entire address space */	if (len > TASK_SIZE)		return -ENOMEM;	if (flags & MAP_FIXED)		return addr;	/* requesting a specific address */	if (addr) {		addr = PAGE_ALIGN(addr);		vma = find_vma(mm, addr);		if (TASK_SIZE - len >= addr &&				(!vma || addr + len <= vma->vm_start))			return addr;	}	/* check if free_area_cache is useful for us */	if (len <= mm->cached_hole_size) { 	        mm->cached_hole_size = 0; 		mm->free_area_cache = mm->mmap_base; 	}	/* either no address requested or can't fit in requested address hole */	addr = mm->free_area_cache;	/* make sure it can fit in the remaining address space */	if (addr > len) {		vma = find_vma(mm, addr-len);		if (!vma || addr <= vma->vm_start)			/* remember the address as a hint for next time */			return (mm->free_area_cache = addr-len);	}	if (mm->mmap_base < len)		goto bottomup;	addr = mm->mmap_base-len;	do {		/*		 * Lookup failure means no vma is above this address,		 * else if new region fits below vma->vm_start,		 * return with success:		 */		vma = find_vma(mm, addr);		if (!vma || addr+len <= vma->vm_start)			/* remember the address as a hint for next time */			return (mm->free_area_cache = addr); 		/* remember the largest hole we saw so far */ 		if (addr + mm->cached_hole_size < vma->vm_start) 		        mm->cached_hole_size = vma->vm_start - addr;		/* try just below the current vma->vm_start */		addr = vma->vm_start-len;	} while (len < vma->vm_start);bottomup:	/*	 * A failed mmap() very likely causes application failure,	 * so fall back to the bottom-up function here. This scenario	 * can happen with large stack limits and large mmap()	 * allocations.	 */	mm->cached_hole_size = ~0UL;  	mm->free_area_cache = TASK_UNMAPPED_BASE;	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);	/*	 * Restore the topdown base:	 */	mm->free_area_cache = mm->mmap_base;	mm->cached_hole_size = ~0UL;	return addr;}#endifvoid arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr){	/*	 * Is this a new hole at the highest possible address?	 */	if (addr > mm->free_area_cache)		mm->free_area_cache = addr;	/* dont allow allocations above current base */	if (mm->free_area_cache > mm->mmap_base)		mm->free_area_cache = mm->mmap_base;}unsigned longget_unmapped_area(struct file *file, unsigned long addr, unsigned long len,		unsigned long pgoff, unsigned long flags){	unsigned long (*get_area)(struct file *, unsigned long,				  unsigned long, unsigned long, unsigned long);	get_area = current->mm->get_unmapped_area;	if (file && file->f_op && file->f_op->get_unmapped_area)		get_area = file->f_op->get_unmapped_area;	addr = get_area(file, addr, len, pgoff, flags);	if (IS_ERR_VALUE(addr))		return addr;	if (addr > TASK_SIZE - len)		return -ENOMEM;	if (addr & ~PAGE_MASK)		return -EINVAL;	return arch_rebalance_pgtables(addr, len);}EXPORT_SYMBOL(get_unmapped_area);/* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr){	struct vm_area_struct *vma = NULL;	if (mm) {		/* Check the cache first. */		/* (Cache hit rate is typically around 35%.) */		vma = mm->mmap_cache;		if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {			struct rb_node * rb_node;			rb_node = mm->mm_rb.rb_node;			vma = NULL;			while (rb_node) {				struct vm_area_struct * vma_tmp;				vma_tmp = rb_entry(rb_node,						struct vm_area_struct, vm_rb);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -