⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mmap.c

📁 ARM 嵌入式 系统 设计与实例开发 实验教材 二源码
💻 C
📖 第 1 页 / 共 3 页
字号:
/* *	linux/mm/mmap.c * * Written by obz. */#include <linux/slab.h>#include <linux/shm.h>#include <linux/mman.h>#include <linux/pagemap.h>#include <linux/swap.h>#include <linux/swapctl.h>#include <linux/smp_lock.h>#include <linux/init.h>#include <linux/file.h>#include <linux/fs.h>#include <linux/personality.h>#include <asm/uaccess.h>#include <asm/pgalloc.h>#include <asm/tlb.h>extern void unmap_page_range(mmu_gather_t *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);/* * WARNING: the debugging will use recursive algorithms so never enable this * unless you know what you are doing. */#undef DEBUG_MM_RB/* description of effects of mapping type and prot in current implementation. * this is due to the limited x86 page protection hardware.  The expected * behavior is in parens: * * map_type	prot *		PROT_NONE	PROT_READ	PROT_WRITE	PROT_EXEC * MAP_SHARED	r: (no) no	r: (yes) yes	r: (no) yes	r: (no) yes *		w: (no) no	w: (no) no	w: (yes) yes	w: (no) no *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes *		 * MAP_PRIVATE	r: (no) no	r: (yes) yes	r: (no) yes	r: (no) yes *		w: (no) no	w: (no) no	w: (copy) copy	w: (no) no *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes * */pgprot_t protection_map[16] = {	__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,	__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111};int sysctl_overcommit_memory;/* Check that a process has enough memory to allocate a * new virtual mapping. */int vm_enough_memory(long pages){	/* Stupid algorithm to decide if we have enough memory: while	 * simple, it hopefully works in most obvious cases.. Easy to	 * fool it, but this should catch most mistakes.	 */	/* 23/11/98 NJC: Somewhat less stupid version of algorithm,	 * which tries to do "TheRightThing".  Instead of using half of	 * (buffers+cache), use the minimum values.  Allow an extra 2%	 * of num_physpages for safety margin.	 */	unsigned long free;	        /* Sometimes we want to use more memory than we have. */	if (sysctl_overcommit_memory)	    return 1;	/* The page cache contains buffer pages these days.. */	free = atomic_read(&page_cache_size);	free += nr_free_pages();	free += nr_swap_pages;	/*	 * This double-counts: the nrpages are both in the page-cache	 * and in the swapper space. At the same time, this compensates	 * for the swap-space over-allocation (ie "nr_swap_pages" being	 * too small.	 */	free += swapper_space.nrpages;	/*	 * The code below doesn't account for free space in the inode	 * and dentry slab cache, slab cache fragmentation, inodes and	 * dentries which will become freeable under VM load, etc.	 * Lets just hope all these (complex) factors balance out...	 */	free += (dentry_stat.nr_unused * sizeof(struct dentry)) >> PAGE_SHIFT;	free += (inodes_stat.nr_unused * sizeof(struct inode)) >> PAGE_SHIFT;	return free > pages;}/* Remove one vm structure from the inode's i_mapping address space. */static inline void __remove_shared_vm_struct(struct vm_area_struct *vma){	struct file * file = vma->vm_file;	if (file) {		struct inode *inode = file->f_dentry->d_inode;		if (vma->vm_flags & VM_DENYWRITE)			atomic_inc(&inode->i_writecount);		if(vma->vm_next_share)			vma->vm_next_share->vm_pprev_share = vma->vm_pprev_share;		*vma->vm_pprev_share = vma->vm_next_share;	}}static inline void remove_shared_vm_struct(struct vm_area_struct *vma){	lock_vma_mappings(vma);	__remove_shared_vm_struct(vma);	unlock_vma_mappings(vma);}void lock_vma_mappings(struct vm_area_struct *vma){	struct address_space *mapping;	mapping = NULL;	if (vma->vm_file)		mapping = vma->vm_file->f_dentry->d_inode->i_mapping;	if (mapping)		spin_lock(&mapping->i_shared_lock);}void unlock_vma_mappings(struct vm_area_struct *vma){	struct address_space *mapping;	mapping = NULL;	if (vma->vm_file)		mapping = vma->vm_file->f_dentry->d_inode->i_mapping;	if (mapping)		spin_unlock(&mapping->i_shared_lock);}/* *  sys_brk() for the most part doesn't need the global kernel *  lock, except when an application is doing something nasty *  like trying to un-brk an area that has already been mapped *  to a regular file.  in this case, the unmapping will need *  to invoke file system routines that need the global lock. */asmlinkage unsigned long sys_brk(unsigned long brk){	unsigned long rlim, retval;	unsigned long newbrk, oldbrk;	struct mm_struct *mm = current->mm;	down_write(&mm->mmap_sem);	if (brk < mm->end_code)		goto out;	newbrk = PAGE_ALIGN(brk);	oldbrk = PAGE_ALIGN(mm->brk);	if (oldbrk == newbrk)		goto set_brk;	/* Always allow shrinking brk. */	if (brk <= mm->brk) {		if (!do_munmap(mm, newbrk, oldbrk-newbrk))			goto set_brk;		goto out;	}	/* Check against rlimit.. */	rlim = current->rlim[RLIMIT_DATA].rlim_cur;	if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)		goto out;	/* Check against existing mmap mappings. */	if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))		goto out;	/* Check if we have enough memory.. */	if (!vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT))		goto out;	/* Ok, looks good - let it rip. */	if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)		goto out;set_brk:	mm->brk = brk;out:	retval = mm->brk;	up_write(&mm->mmap_sem);	return retval;}/* Combine the mmap "prot" and "flags" argument into one "vm_flags" used * internally. Essentially, translate the "PROT_xxx" and "MAP_xxx" bits * into "VM_xxx". */static inline unsigned long calc_vm_flags(unsigned long prot, unsigned long flags){#define _trans(x,bit1,bit2) \((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)	unsigned long prot_bits, flag_bits;	prot_bits =		_trans(prot, PROT_READ, VM_READ) |		_trans(prot, PROT_WRITE, VM_WRITE) |		_trans(prot, PROT_EXEC, VM_EXEC);	flag_bits =		_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN) |		_trans(flags, MAP_DENYWRITE, VM_DENYWRITE) |		_trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE);	return prot_bits | flag_bits;#undef _trans}#ifdef DEBUG_MM_RBstatic int browse_rb(rb_node_t * rb_node) {	int i = 0;	if (rb_node) {		i++;		i += browse_rb(rb_node->rb_left);		i += browse_rb(rb_node->rb_right);	}	return i;}static void validate_mm(struct mm_struct * mm) {	int bug = 0;	int i = 0;	struct vm_area_struct * tmp = mm->mmap;	while (tmp) {		tmp = tmp->vm_next;		i++;	}	if (i != mm->map_count)		printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1;	i = browse_rb(mm->mm_rb.rb_node);	if (i != mm->map_count)		printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;	if (bug)		BUG();}#else#define validate_mm(mm) do { } while (0)#endifstatic struct vm_area_struct * find_vma_prepare(struct mm_struct * mm, unsigned long addr,						struct vm_area_struct ** pprev,						rb_node_t *** rb_link, rb_node_t ** rb_parent){	struct vm_area_struct * vma;	rb_node_t ** __rb_link, * __rb_parent, * rb_prev;	__rb_link = &mm->mm_rb.rb_node;	rb_prev = __rb_parent = NULL;	vma = NULL;	while (*__rb_link) {		struct vm_area_struct *vma_tmp;		__rb_parent = *__rb_link;		vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);		if (vma_tmp->vm_end > addr) {			vma = vma_tmp;			if (vma_tmp->vm_start <= addr)				return vma;			__rb_link = &__rb_parent->rb_left;		} else {			rb_prev = __rb_parent;			__rb_link = &__rb_parent->rb_right;		}	}	*pprev = NULL;	if (rb_prev)		*pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);	*rb_link = __rb_link;	*rb_parent = __rb_parent;	return vma;}static inline void __vma_link_list(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev,				   rb_node_t * rb_parent){	if (prev) {		vma->vm_next = prev->vm_next;		prev->vm_next = vma;	} else {		mm->mmap = vma;		if (rb_parent)			vma->vm_next = rb_entry(rb_parent, struct vm_area_struct, vm_rb);		else			vma->vm_next = NULL;	}}static inline void __vma_link_rb(struct mm_struct * mm, struct vm_area_struct * vma,				 rb_node_t ** rb_link, rb_node_t * rb_parent){	rb_link_node(&vma->vm_rb, rb_parent, rb_link);	rb_insert_color(&vma->vm_rb, &mm->mm_rb);}static inline void __vma_link_file(struct vm_area_struct * vma){	struct file * file;	file = vma->vm_file;	if (file) {		struct inode * inode = file->f_dentry->d_inode;		struct address_space *mapping = inode->i_mapping;		struct vm_area_struct **head;		if (vma->vm_flags & VM_DENYWRITE)			atomic_dec(&inode->i_writecount);		head = &mapping->i_mmap;		if (vma->vm_flags & VM_SHARED)			head = &mapping->i_mmap_shared;      		/* insert vma into inode's share list */		if((vma->vm_next_share = *head) != NULL)			(*head)->vm_pprev_share = &vma->vm_next_share;		*head = vma;		vma->vm_pprev_share = head;	}}static void __vma_link(struct mm_struct * mm, struct vm_area_struct * vma,  struct vm_area_struct * prev,		       rb_node_t ** rb_link, rb_node_t * rb_parent){	__vma_link_list(mm, vma, prev, rb_parent);	__vma_link_rb(mm, vma, rb_link, rb_parent);	__vma_link_file(vma);}static inline void vma_link(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev,			    rb_node_t ** rb_link, rb_node_t * rb_parent){	lock_vma_mappings(vma);	spin_lock(&mm->page_table_lock);	__vma_link(mm, vma, prev, rb_link, rb_parent);	spin_unlock(&mm->page_table_lock);	unlock_vma_mappings(vma);	mm->map_count++;	validate_mm(mm);}static int vma_merge(struct mm_struct * mm, struct vm_area_struct * prev,		     rb_node_t * rb_parent, unsigned long addr, unsigned long end, unsigned long vm_flags){	spinlock_t * lock = &mm->page_table_lock;	if (!prev) {		prev = rb_entry(rb_parent, struct vm_area_struct, vm_rb);		goto merge_next;	}	if (prev->vm_end == addr && can_vma_merge(prev, vm_flags)) {		struct vm_area_struct * next;		spin_lock(lock);		prev->vm_end = end;		next = prev->vm_next;		if (next && prev->vm_end == next->vm_start && can_vma_merge(next, vm_flags)) {			prev->vm_end = next->vm_end;			__vma_unlink(mm, next, prev);			spin_unlock(lock);			mm->map_count--;			kmem_cache_free(vm_area_cachep, next);			return 1;		}		spin_unlock(lock);		return 1;	}	prev = prev->vm_next;	if (prev) { merge_next:		if (!can_vma_merge(prev, vm_flags))			return 0;		if (end == prev->vm_start) {			spin_lock(lock);			prev->vm_start = addr;			spin_unlock(lock);			return 1;		}	}	return 0;}unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned long len,	unsigned long prot, unsigned long flags, unsigned long pgoff){	struct mm_struct * mm = current->mm;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -