⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 nommu.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
{	struct vm_region *region, *last;	struct rb_node *p, *lastp;	lastp = rb_first(&nommu_region_tree);	if (!lastp)		return;	last = rb_entry(lastp, struct vm_region, vm_rb);	if (unlikely(last->vm_end <= last->vm_start))		BUG();	if (unlikely(last->vm_top < last->vm_end))		BUG();	while ((p = rb_next(lastp))) {		region = rb_entry(p, struct vm_region, vm_rb);		last = rb_entry(lastp, struct vm_region, vm_rb);		if (unlikely(region->vm_end <= region->vm_start))			BUG();		if (unlikely(region->vm_top < region->vm_end))			BUG();		if (unlikely(region->vm_start < last->vm_top))			BUG();		lastp = p;	}}#else#define validate_nommu_regions() do {} while(0)#endif/* * add a region into the global tree */static void add_nommu_region(struct vm_region *region){	struct vm_region *pregion;	struct rb_node **p, *parent;	validate_nommu_regions();	BUG_ON(region->vm_start & ~PAGE_MASK);	parent = NULL;	p = &nommu_region_tree.rb_node;	while (*p) {		parent = *p;		pregion = rb_entry(parent, struct vm_region, vm_rb);		if (region->vm_start < pregion->vm_start)			p = &(*p)->rb_left;		else if (region->vm_start > pregion->vm_start)			p = &(*p)->rb_right;		else if (pregion == region)			return;		else			BUG();	}	rb_link_node(&region->vm_rb, parent, p);	rb_insert_color(&region->vm_rb, &nommu_region_tree);	validate_nommu_regions();}/* * delete a region from the global tree */static void delete_nommu_region(struct vm_region *region){	BUG_ON(!nommu_region_tree.rb_node);	validate_nommu_regions();	rb_erase(&region->vm_rb, &nommu_region_tree);	validate_nommu_regions();}/* * free a contiguous series of pages */static void free_page_series(unsigned long from, unsigned long to){	for (; from < to; from += PAGE_SIZE) {		struct page *page = virt_to_page(from);		kdebug("- free %lx", from);		atomic_dec(&mmap_pages_allocated);		if (page_count(page) != 1)			kdebug("free page %p [%d]", page, page_count(page));		put_page(page);	}}/* * release a reference to a region * - the caller must hold the region semaphore, which this releases * - the region may not have been added to the tree yet, in which case vm_top *   will equal vm_start */static void __put_nommu_region(struct vm_region *region)	__releases(nommu_region_sem){	kenter("%p{%d}", region, atomic_read(&region->vm_usage));	BUG_ON(!nommu_region_tree.rb_node);	if (atomic_dec_and_test(&region->vm_usage)) {		if (region->vm_top > region->vm_start)			delete_nommu_region(region);		up_write(&nommu_region_sem);		if (region->vm_file)			fput(region->vm_file);		/* IO memory and memory shared directly out of the pagecache		 * from ramfs/tmpfs mustn't be released here */		if (region->vm_flags & VM_MAPPED_COPY) {			kdebug("free series");			free_page_series(region->vm_start, region->vm_top);		}		kmem_cache_free(vm_region_jar, region);	} else {		up_write(&nommu_region_sem);	}}/* * release a reference to a region */static void put_nommu_region(struct vm_region *region){	down_write(&nommu_region_sem);	__put_nommu_region(region);}/* * add a VMA into a process's mm_struct in the appropriate place in the list * and tree and add to the address space's page tree also if not an anonymous * page * - should be called with mm->mmap_sem held writelocked */static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma){	struct vm_area_struct *pvma, **pp;	struct address_space *mapping;	struct rb_node **p, *parent;	kenter(",%p", vma);	BUG_ON(!vma->vm_region);	mm->map_count++;	vma->vm_mm = mm;	/* add the VMA to the mapping */	if (vma->vm_file) {		mapping = vma->vm_file->f_mapping;		flush_dcache_mmap_lock(mapping);		vma_prio_tree_insert(vma, &mapping->i_mmap);		flush_dcache_mmap_unlock(mapping);	}	/* add the VMA to the tree */	parent = NULL;	p = &mm->mm_rb.rb_node;	while (*p) {		parent = *p;		pvma = rb_entry(parent, struct vm_area_struct, vm_rb);		/* sort by: start addr, end addr, VMA struct addr in that order		 * (the latter is necessary as we may get identical VMAs) */		if (vma->vm_start < pvma->vm_start)			p = &(*p)->rb_left;		else if (vma->vm_start > pvma->vm_start)			p = &(*p)->rb_right;		else if (vma->vm_end < pvma->vm_end)			p = &(*p)->rb_left;		else if (vma->vm_end > pvma->vm_end)			p = &(*p)->rb_right;		else if (vma < pvma)			p = &(*p)->rb_left;		else if (vma > pvma)			p = &(*p)->rb_right;		else			BUG();	}	rb_link_node(&vma->vm_rb, parent, p);	rb_insert_color(&vma->vm_rb, &mm->mm_rb);	/* add VMA to the VMA list also */	for (pp = &mm->mmap; (pvma = *pp); pp = &(*pp)->vm_next) {		if (pvma->vm_start > vma->vm_start)			break;		if (pvma->vm_start < vma->vm_start)			continue;		if (pvma->vm_end < vma->vm_end)			break;	}	vma->vm_next = *pp;	*pp = vma;}/* * delete a VMA from its owning mm_struct and address space */static void delete_vma_from_mm(struct vm_area_struct *vma){	struct vm_area_struct **pp;	struct address_space *mapping;	struct mm_struct *mm = vma->vm_mm;	kenter("%p", vma);	mm->map_count--;	if (mm->mmap_cache == vma)		mm->mmap_cache = NULL;	/* remove the VMA from the mapping */	if (vma->vm_file) {		mapping = vma->vm_file->f_mapping;		flush_dcache_mmap_lock(mapping);		vma_prio_tree_remove(vma, &mapping->i_mmap);		flush_dcache_mmap_unlock(mapping);	}	/* remove from the MM's tree and list */	rb_erase(&vma->vm_rb, &mm->mm_rb);	for (pp = &mm->mmap; *pp; pp = &(*pp)->vm_next) {		if (*pp == vma) {			*pp = vma->vm_next;			break;		}	}	vma->vm_mm = NULL;}/* * destroy a VMA record */static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma){	kenter("%p", vma);	if (vma->vm_ops && vma->vm_ops->close)		vma->vm_ops->close(vma);	if (vma->vm_file) {		fput(vma->vm_file);		if (vma->vm_flags & VM_EXECUTABLE)			removed_exe_file_vma(mm);	}	put_nommu_region(vma->vm_region);	kmem_cache_free(vm_area_cachep, vma);}/* * look up the first VMA in which addr resides, NULL if none * - should be called with mm->mmap_sem at least held readlocked */struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr){	struct vm_area_struct *vma;	struct rb_node *n = mm->mm_rb.rb_node;	/* check the cache first */	vma = mm->mmap_cache;	if (vma && vma->vm_start <= addr && vma->vm_end > addr)		return vma;	/* trawl the tree (there may be multiple mappings in which addr	 * resides) */	for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {		vma = rb_entry(n, struct vm_area_struct, vm_rb);		if (vma->vm_start > addr)			return NULL;		if (vma->vm_end > addr) {			mm->mmap_cache = vma;			return vma;		}	}	return NULL;}EXPORT_SYMBOL(find_vma);/* * find a VMA * - we don't extend stack VMAs under NOMMU conditions */struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr){	return find_vma(mm, addr);}/* * expand a stack to a given address * - not supported under NOMMU conditions */int expand_stack(struct vm_area_struct *vma, unsigned long address){	return -ENOMEM;}/* * look up the first VMA exactly that exactly matches addr * - should be called with mm->mmap_sem at least held readlocked */static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,					     unsigned long addr,					     unsigned long len){	struct vm_area_struct *vma;	struct rb_node *n = mm->mm_rb.rb_node;	unsigned long end = addr + len;	/* check the cache first */	vma = mm->mmap_cache;	if (vma && vma->vm_start == addr && vma->vm_end == end)		return vma;	/* trawl the tree (there may be multiple mappings in which addr	 * resides) */	for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {		vma = rb_entry(n, struct vm_area_struct, vm_rb);		if (vma->vm_start < addr)			continue;		if (vma->vm_start > addr)			return NULL;		if (vma->vm_end == end) {			mm->mmap_cache = vma;			return vma;		}	}	return NULL;}/* * determine whether a mapping should be permitted and, if so, what sort of * mapping we're capable of supporting */static int validate_mmap_request(struct file *file,				 unsigned long addr,				 unsigned long len,				 unsigned long prot,				 unsigned long flags,				 unsigned long pgoff,				 unsigned long *_capabilities){	unsigned long capabilities, rlen;	unsigned long reqprot = prot;	int ret;	/* do the simple checks first */	if (flags & MAP_FIXED || addr) {		printk(KERN_DEBUG		       "%d: Can't do fixed-address/overlay mmap of RAM\n",		       current->pid);		return -EINVAL;	}	if ((flags & MAP_TYPE) != MAP_PRIVATE &&	    (flags & MAP_TYPE) != MAP_SHARED)		return -EINVAL;	if (!len)		return -EINVAL;	/* Careful about overflows.. */	rlen = PAGE_ALIGN(len);	if (!rlen || rlen > TASK_SIZE)		return -ENOMEM;	/* offset overflow? */	if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)		return -EOVERFLOW;	if (file) {		/* validate file mapping requests */		struct address_space *mapping;		/* files must support mmap */		if (!file->f_op || !file->f_op->mmap)			return -ENODEV;		/* work out if what we've got could possibly be shared		 * - we support chardevs that provide their own "memory"		 * - we support files/blockdevs that are memory backed		 */		mapping = file->f_mapping;		if (!mapping)			mapping = file->f_path.dentry->d_inode->i_mapping;		capabilities = 0;		if (mapping && mapping->backing_dev_info)			capabilities = mapping->backing_dev_info->capabilities;		if (!capabilities) {			/* no explicit capabilities set, so assume some			 * defaults */			switch (file->f_path.dentry->d_inode->i_mode & S_IFMT) {			case S_IFREG:			case S_IFBLK:				capabilities = BDI_CAP_MAP_COPY;				break;			case S_IFCHR:				capabilities =					BDI_CAP_MAP_DIRECT |					BDI_CAP_READ_MAP |					BDI_CAP_WRITE_MAP;				break;			default:				return -EINVAL;			}		}		/* eliminate any capabilities that we can't support on this		 * device */		if (!file->f_op->get_unmapped_area)			capabilities &= ~BDI_CAP_MAP_DIRECT;		if (!file->f_op->read)			capabilities &= ~BDI_CAP_MAP_COPY;		if (flags & MAP_SHARED) {			/* do checks for writing, appending and locking */			if ((prot & PROT_WRITE) &&			    !(file->f_mode & FMODE_WRITE))				return -EACCES;			if (IS_APPEND(file->f_path.dentry->d_inode) &&			    (file->f_mode & FMODE_WRITE))				return -EACCES;			if (locks_verify_locked(file->f_path.dentry->d_inode))				return -EAGAIN;			if (!(capabilities & BDI_CAP_MAP_DIRECT))				return -ENODEV;			if (((prot & PROT_READ)  && !(capabilities & BDI_CAP_READ_MAP))  ||			    ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||			    ((prot & PROT_EXEC)  && !(capabilities & BDI_CAP_EXEC_MAP))			    ) {				printk("MAP_SHARED not completely supported on !MMU\n");				return -EINVAL;			}			/* we mustn't privatise shared mappings */			capabilities &= ~BDI_CAP_MAP_COPY;		}		else {			/* we're going to read the file into private memory we			 * allocate */			if (!(capabilities & BDI_CAP_MAP_COPY))				return -ENODEV;			/* we don't permit a private writable mapping to be			 * shared with the backing device */			if (prot & PROT_WRITE)				capabilities &= ~BDI_CAP_MAP_DIRECT;		}		/* handle executable mappings and implied executable		 * mappings */		if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {			if (prot & PROT_EXEC)				return -EPERM;		}		else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {			/* handle implication of PROT_EXEC by PROT_READ */			if (current->personality & READ_IMPLIES_EXEC) {				if (capabilities & BDI_CAP_EXEC_MAP)					prot |= PROT_EXEC;			}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -