⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 nommu.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
	new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);	if (!new) {		kmem_cache_free(vm_region_jar, region);		return -ENOMEM;	}	/* most fields are the same, copy all, and then fixup */	*new = *vma;	*region = *vma->vm_region;	new->vm_region = region;	npages = (addr - vma->vm_start) >> PAGE_SHIFT;	if (new_below) {		region->vm_top = region->vm_end = new->vm_end = addr;	} else {		region->vm_start = new->vm_start = addr;		region->vm_pgoff = new->vm_pgoff += npages;	}	if (new->vm_ops && new->vm_ops->open)		new->vm_ops->open(new);	delete_vma_from_mm(vma);	down_write(&nommu_region_sem);	delete_nommu_region(vma->vm_region);	if (new_below) {		vma->vm_region->vm_start = vma->vm_start = addr;		vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;	} else {		vma->vm_region->vm_end = vma->vm_end = addr;		vma->vm_region->vm_top = addr;	}	add_nommu_region(vma->vm_region);	add_nommu_region(new->vm_region);	up_write(&nommu_region_sem);	add_vma_to_mm(mm, vma);	add_vma_to_mm(mm, new);	return 0;}/* * shrink a VMA by removing the specified chunk from either the beginning or * the end */static int shrink_vma(struct mm_struct *mm,		      struct vm_area_struct *vma,		      unsigned long from, unsigned long to){	struct vm_region *region;	kenter("");	/* adjust the VMA's pointers, which may reposition it in the MM's tree	 * and list */	delete_vma_from_mm(vma);	if (from > vma->vm_start)		vma->vm_end = from;	else		vma->vm_start = to;	add_vma_to_mm(mm, vma);	/* cut the backing region down to size */	region = vma->vm_region;	BUG_ON(atomic_read(&region->vm_usage) != 1);	down_write(&nommu_region_sem);	delete_nommu_region(region);	if (from > region->vm_start) {		to = region->vm_top;		region->vm_top = region->vm_end = from;	} else {		region->vm_start = to;	}	add_nommu_region(region);	up_write(&nommu_region_sem);	free_page_series(from, to);	return 0;}/* * release a mapping * - under NOMMU conditions the chunk to be unmapped must be backed by a single *   VMA, though it need not cover the whole VMA */int do_munmap(struct mm_struct *mm, unsigned long start, size_t len){	struct vm_area_struct *vma;	struct rb_node *rb;	unsigned long end = start + len;	int ret;	kenter(",%lx,%zx", start, len);	if (len == 0)		return -EINVAL;	/* find the first potentially overlapping VMA */	vma = find_vma(mm, start);	if (!vma) {		printk(KERN_WARNING		       "munmap of memory not mmapped by process %d (%s):"		       " 0x%lx-0x%lx\n",		       current->pid, current->comm, start, start + len - 1);		return -EINVAL;	}	/* we're allowed to split an anonymous VMA but not a file-backed one */	if (vma->vm_file) {		do {			if (start > vma->vm_start) {				kleave(" = -EINVAL [miss]");				return -EINVAL;			}			if (end == vma->vm_end)				goto erase_whole_vma;			rb = rb_next(&vma->vm_rb);			vma = rb_entry(rb, struct vm_area_struct, vm_rb);		} while (rb);		kleave(" = -EINVAL [split file]");		return -EINVAL;	} else {		/* the chunk must be a subset of the VMA found */		if (start == vma->vm_start && end == vma->vm_end)			goto erase_whole_vma;		if (start < vma->vm_start || end > vma->vm_end) {			kleave(" = -EINVAL [superset]");			return -EINVAL;		}		if (start & ~PAGE_MASK) {			kleave(" = -EINVAL [unaligned start]");			return -EINVAL;		}		if (end != vma->vm_end && end & ~PAGE_MASK) {			kleave(" = -EINVAL [unaligned split]");			return -EINVAL;		}		if (start != vma->vm_start && end != vma->vm_end) {			ret = split_vma(mm, vma, start, 1);			if (ret < 0) {				kleave(" = %d [split]", ret);				return ret;			}		}		return shrink_vma(mm, vma, start, end);	}erase_whole_vma:	delete_vma_from_mm(vma);	delete_vma(mm, vma);	kleave(" = 0");	return 0;}EXPORT_SYMBOL(do_munmap);SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len){	int ret;	struct mm_struct *mm = current->mm;	down_write(&mm->mmap_sem);	ret = do_munmap(mm, addr, len);	up_write(&mm->mmap_sem);	return ret;}/* * release all the mappings made in a process's VM space */void exit_mmap(struct mm_struct *mm){	struct vm_area_struct *vma;	if (!mm)		return;	kenter("");	mm->total_vm = 0;	while ((vma = mm->mmap)) {		mm->mmap = vma->vm_next;		delete_vma_from_mm(vma);		delete_vma(mm, vma);	}	kleave("");}unsigned long do_brk(unsigned long addr, unsigned long len){	return -ENOMEM;}/* * expand (or shrink) an existing mapping, potentially moving it at the same * time (controlled by the MREMAP_MAYMOVE flag and available VM space) * * under NOMMU conditions, we only permit changing a mapping's size, and only * as long as it stays within the region allocated by do_mmap_private() and the * block is not shareable * * MREMAP_FIXED is not supported under NOMMU conditions */unsigned long do_mremap(unsigned long addr,			unsigned long old_len, unsigned long new_len,			unsigned long flags, unsigned long new_addr){	struct vm_area_struct *vma;	/* insanity checks first */	if (old_len == 0 || new_len == 0)		return (unsigned long) -EINVAL;	if (addr & ~PAGE_MASK)		return -EINVAL;	if (flags & MREMAP_FIXED && new_addr != addr)		return (unsigned long) -EINVAL;	vma = find_vma_exact(current->mm, addr, old_len);	if (!vma)		return (unsigned long) -EINVAL;	if (vma->vm_end != vma->vm_start + old_len)		return (unsigned long) -EFAULT;	if (vma->vm_flags & VM_MAYSHARE)		return (unsigned long) -EPERM;	if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)		return (unsigned long) -ENOMEM;	/* all checks complete - do it */	vma->vm_end = vma->vm_start + new_len;	return vma->vm_start;}EXPORT_SYMBOL(do_mremap);SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,		unsigned long, new_len, unsigned long, flags,		unsigned long, new_addr){	unsigned long ret;	down_write(&current->mm->mmap_sem);	ret = do_mremap(addr, old_len, new_len, flags, new_addr);	up_write(&current->mm->mmap_sem);	return ret;}struct page *follow_page(struct vm_area_struct *vma, unsigned long address,			unsigned int foll_flags){	return NULL;}int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,		unsigned long to, unsigned long size, pgprot_t prot){	vma->vm_start = vma->vm_pgoff << PAGE_SHIFT;	return 0;}EXPORT_SYMBOL(remap_pfn_range);int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,			unsigned long pgoff){	unsigned int size = vma->vm_end - vma->vm_start;	if (!(vma->vm_flags & VM_USERMAP))		return -EINVAL;	vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));	vma->vm_end = vma->vm_start + size;	return 0;}EXPORT_SYMBOL(remap_vmalloc_range);void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page){}unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,	unsigned long len, unsigned long pgoff, unsigned long flags){	return -ENOMEM;}void arch_unmap_area(struct mm_struct *mm, unsigned long addr){}void unmap_mapping_range(struct address_space *mapping,			 loff_t const holebegin, loff_t const holelen,			 int even_cows){}EXPORT_SYMBOL(unmap_mapping_range);/* * ask for an unmapped area at which to create a mapping on a file */unsigned long get_unmapped_area(struct file *file, unsigned long addr,				unsigned long len, unsigned long pgoff,				unsigned long flags){	unsigned long (*get_area)(struct file *, unsigned long, unsigned long,				  unsigned long, unsigned long);	get_area = current->mm->get_unmapped_area;	if (file && file->f_op && file->f_op->get_unmapped_area)		get_area = file->f_op->get_unmapped_area;	if (!get_area)		return -ENOSYS;	return get_area(file, addr, len, pgoff, flags);}EXPORT_SYMBOL(get_unmapped_area);/* * Check that a process has enough memory to allocate a new virtual * mapping. 0 means there is enough memory for the allocation to * succeed and -ENOMEM implies there is not. * * We currently support three overcommit policies, which are set via the * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting * * Strict overcommit modes added 2002 Feb 26 by Alan Cox. * Additional code 2002 Jul 20 by Robert Love. * * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. * * Note this is a helper function intended to be used by LSMs which * wish to use this logic. */int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin){	unsigned long free, allowed;	vm_acct_memory(pages);	/*	 * Sometimes we want to use more memory than we have	 */	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)		return 0;	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {		unsigned long n;		free = global_page_state(NR_FILE_PAGES);		free += nr_swap_pages;		/*		 * Any slabs which are created with the		 * SLAB_RECLAIM_ACCOUNT flag claim to have contents		 * which are reclaimable, under pressure.  The dentry		 * cache and most inode caches should fall into this		 */		free += global_page_state(NR_SLAB_RECLAIMABLE);		/*		 * Leave the last 3% for root		 */		if (!cap_sys_admin)			free -= free / 32;		if (free > pages)			return 0;		/*		 * nr_free_pages() is very expensive on large systems,		 * only call if we're about to fail.		 */		n = nr_free_pages();		/*		 * Leave reserved pages. The pages are not for anonymous pages.		 */		if (n <= totalreserve_pages)			goto error;		else			n -= totalreserve_pages;		/*		 * Leave the last 3% for root		 */		if (!cap_sys_admin)			n -= n / 32;		free += n;		if (free > pages)			return 0;		goto error;	}	allowed = totalram_pages * sysctl_overcommit_ratio / 100;	/*	 * Leave the last 3% for root	 */	if (!cap_sys_admin)		allowed -= allowed / 32;	allowed += total_swap_pages;	/* Don't let a single process grow too big:	   leave 3% of the size of this process for other processes */	if (mm)		allowed -= mm->total_vm / 32;	/*	 * cast `allowed' as a signed long because vm_committed_space	 * sometimes has a negative value	 */	if (atomic_long_read(&vm_committed_space) < (long)allowed)		return 0;error:	vm_unacct_memory(pages);	return -ENOMEM;}int in_gate_area_no_task(unsigned long addr){	return 0;}int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf){	BUG();	return 0;}EXPORT_SYMBOL(filemap_fault);/* * Access another process' address space. * - source/target buffer must be kernel space */int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write){	struct vm_area_struct *vma;	struct mm_struct *mm;	if (addr + len < addr)		return 0;	mm = get_task_mm(tsk);	if (!mm)		return 0;	down_read(&mm->mmap_sem);	/* the access must start within one of the target process's mappings */	vma = find_vma(mm, addr);	if (vma) {		/* don't overrun this mapping */		if (addr + len >= vma->vm_end)			len = vma->vm_end - addr;		/* only read or write mappings where it is permitted */		if (write && vma->vm_flags & VM_MAYWRITE)			len -= copy_to_user((void *) addr, buf, len);		else if (!write && vma->vm_flags & VM_MAYREAD)			len -= copy_from_user(buf, (void *) addr, len);		else			len = 0;	} else {		len = 0;	}	up_read(&mm->mmap_sem);	mmput(mm);	return len;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -