mem.c

来自「linux 内核源代码」· C语言 代码 · 共 915 行 · 第 1/2 页

C
915
字号
/* *  linux/drivers/char/mem.c * *  Copyright (C) 1991, 1992  Linus Torvalds * *  Added devfs support.  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu> *  Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com> */#include <linux/mm.h>#include <linux/miscdevice.h>#include <linux/slab.h>#include <linux/vmalloc.h>#include <linux/mman.h>#include <linux/random.h>#include <linux/init.h>#include <linux/raw.h>#include <linux/tty.h>#include <linux/capability.h>#include <linux/ptrace.h>#include <linux/device.h>#include <linux/highmem.h>#include <linux/crash_dump.h>#include <linux/backing-dev.h>#include <linux/bootmem.h>#include <linux/splice.h>#include <linux/pfn.h>#include <asm/uaccess.h>#include <asm/io.h>#ifdef CONFIG_IA64# include <linux/efi.h>#endif/* * Architectures vary in how they handle caching for addresses * outside of main memory. * */static inline int uncached_access(struct file *file, unsigned long addr){#if defined(__i386__) && !defined(__arch_um__)	/*	 * On the PPro and successors, the MTRRs are used to set	 * memory types for physical addresses outside main memory,	 * so blindly setting PCD or PWT on those pages is wrong.	 * For Pentiums and earlier, the surround logic should disable	 * caching for the high addresses through the KEN pin, but	 * we maintain the tradition of paranoia in this code.	 */	if (file->f_flags & O_SYNC)		return 1; 	return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||		  test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||		  test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||		  test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )	  && addr >= __pa(high_memory);#elif defined(__x86_64__) && !defined(__arch_um__)	/* 	 * This is broken because it can generate memory type aliases,	 * which can cause cache corruptions	 * But it is only available for root and we have to be bug-to-bug	 * compatible with i386.	 */	if (file->f_flags & O_SYNC)		return 1;	/* same behaviour as i386. PAT always set to cached and MTRRs control the	   caching behaviour. 	   Hopefully a full PAT implementation will fix that soon. */	   	return 0;#elif defined(CONFIG_IA64)	/*	 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.	 */	return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);#elif defined(CONFIG_MIPS)	{		extern int __uncached_access(struct file *file,					     unsigned long addr);		return __uncached_access(file, addr);	}#else	/*	 * Accessing memory above the top the kernel knows about or through a file pointer	 * that was marked O_SYNC will be done non-cached.	 */	if (file->f_flags & O_SYNC)		return 1;	return addr >= __pa(high_memory);#endif}#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGEstatic inline int valid_phys_addr_range(unsigned long addr, size_t count){	if (addr + count > __pa(high_memory))		return 0;	return 1;}static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size){	return 1;}#endif/* * This funcion reads the *physical* memory. The f_pos points directly to the  * memory location.  */static ssize_t read_mem(struct file * file, char __user * buf,			size_t count, loff_t *ppos){	unsigned long p = *ppos;	ssize_t read, sz;	char *ptr;	if (!valid_phys_addr_range(p, count))		return -EFAULT;	read = 0;#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED	/* we don't have page 0 mapped on sparc and m68k.. */	if (p < PAGE_SIZE) {		sz = PAGE_SIZE - p;		if (sz > count) 			sz = count; 		if (sz > 0) {			if (clear_user(buf, sz))				return -EFAULT;			buf += sz; 			p += sz; 			count -= sz; 			read += sz; 		}	}#endif	while (count > 0) {		/*		 * Handle first page in case it's not aligned		 */		if (-p & (PAGE_SIZE - 1))			sz = -p & (PAGE_SIZE - 1);		else			sz = PAGE_SIZE;		sz = min_t(unsigned long, sz, count);		/*		 * On ia64 if a page has been mapped somewhere as		 * uncached, then it must also be accessed uncached		 * by the kernel or data corruption may occur		 */		ptr = xlate_dev_mem_ptr(p);		if (copy_to_user(buf, ptr, sz))			return -EFAULT;		buf += sz;		p += sz;		count -= sz;		read += sz;	}	*ppos += read;	return read;}static ssize_t write_mem(struct file * file, const char __user * buf, 			 size_t count, loff_t *ppos){	unsigned long p = *ppos;	ssize_t written, sz;	unsigned long copied;	void *ptr;	if (!valid_phys_addr_range(p, count))		return -EFAULT;	written = 0;#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED	/* we don't have page 0 mapped on sparc and m68k.. */	if (p < PAGE_SIZE) {		unsigned long sz = PAGE_SIZE - p;		if (sz > count)			sz = count;		/* Hmm. Do something? */		buf += sz;		p += sz;		count -= sz;		written += sz;	}#endif	while (count > 0) {		/*		 * Handle first page in case it's not aligned		 */		if (-p & (PAGE_SIZE - 1))			sz = -p & (PAGE_SIZE - 1);		else			sz = PAGE_SIZE;		sz = min_t(unsigned long, sz, count);		/*		 * On ia64 if a page has been mapped somewhere as		 * uncached, then it must also be accessed uncached		 * by the kernel or data corruption may occur		 */		ptr = xlate_dev_mem_ptr(p);		copied = copy_from_user(ptr, buf, sz);		if (copied) {			written += sz - copied;			if (written)				break;			return -EFAULT;		}		buf += sz;		p += sz;		count -= sz;		written += sz;	}	*ppos += written;	return written;}#ifndef __HAVE_PHYS_MEM_ACCESS_PROTstatic pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,				     unsigned long size, pgprot_t vma_prot){#ifdef pgprot_noncached	unsigned long offset = pfn << PAGE_SHIFT;	if (uncached_access(file, offset))		return pgprot_noncached(vma_prot);#endif	return vma_prot;}#endif#ifndef CONFIG_MMUstatic unsigned long get_unmapped_area_mem(struct file *file,					   unsigned long addr,					   unsigned long len,					   unsigned long pgoff,					   unsigned long flags){	if (!valid_mmap_phys_addr_range(pgoff, len))		return (unsigned long) -EINVAL;	return pgoff << PAGE_SHIFT;}/* can't do an in-place private mapping if there's no MMU */static inline int private_mapping_ok(struct vm_area_struct *vma){	return vma->vm_flags & VM_MAYSHARE;}#else#define get_unmapped_area_mem	NULLstatic inline int private_mapping_ok(struct vm_area_struct *vma){	return 1;}#endifstatic int mmap_mem(struct file * file, struct vm_area_struct * vma){	size_t size = vma->vm_end - vma->vm_start;	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))		return -EINVAL;	if (!private_mapping_ok(vma))		return -ENOSYS;	vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,						 size,						 vma->vm_page_prot);	/* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */	if (remap_pfn_range(vma,			    vma->vm_start,			    vma->vm_pgoff,			    size,			    vma->vm_page_prot))		return -EAGAIN;	return 0;}static int mmap_kmem(struct file * file, struct vm_area_struct * vma){	unsigned long pfn;	/* Turn a kernel-virtual address into a physical page frame */	pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;	/*	 * RED-PEN: on some architectures there is more mapped memory	 * than available in mem_map which pfn_valid checks	 * for. Perhaps should add a new macro here.	 *	 * RED-PEN: vmalloc is not supported right now.	 */	if (!pfn_valid(pfn))		return -EIO;	vma->vm_pgoff = pfn;	return mmap_mem(file, vma);}#ifdef CONFIG_CRASH_DUMP/* * Read memory corresponding to the old kernel. */static ssize_t read_oldmem(struct file *file, char __user *buf,				size_t count, loff_t *ppos){	unsigned long pfn, offset;	size_t read = 0, csize;	int rc = 0;	while (count) {		pfn = *ppos / PAGE_SIZE;		if (pfn > saved_max_pfn)			return read;		offset = (unsigned long)(*ppos % PAGE_SIZE);		if (count > PAGE_SIZE - offset)			csize = PAGE_SIZE - offset;		else			csize = count;		rc = copy_oldmem_page(pfn, buf, csize, offset, 1);		if (rc < 0)			return rc;		buf += csize;		*ppos += csize;		read += csize;		count -= csize;	}	return read;}#endifextern long vread(char *buf, char *addr, unsigned long count);extern long vwrite(char *buf, char *addr, unsigned long count);/* * This function reads the *virtual* memory as seen by the kernel. */static ssize_t read_kmem(struct file *file, char __user *buf, 			 size_t count, loff_t *ppos){	unsigned long p = *ppos;	ssize_t low_count, read, sz;	char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */	read = 0;	if (p < (unsigned long) high_memory) {		low_count = count;		if (count > (unsigned long) high_memory - p)			low_count = (unsigned long) high_memory - p;#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED		/* we don't have page 0 mapped on sparc and m68k.. */		if (p < PAGE_SIZE && low_count > 0) {			size_t tmp = PAGE_SIZE - p;			if (tmp > low_count) tmp = low_count;			if (clear_user(buf, tmp))				return -EFAULT;			buf += tmp;			p += tmp;			read += tmp;			low_count -= tmp;			count -= tmp;		}#endif		while (low_count > 0) {			/*			 * Handle first page in case it's not aligned			 */			if (-p & (PAGE_SIZE - 1))				sz = -p & (PAGE_SIZE - 1);			else				sz = PAGE_SIZE;			sz = min_t(unsigned long, sz, low_count);			/*			 * On ia64 if a page has been mapped somewhere as			 * uncached, then it must also be accessed uncached			 * by the kernel or data corruption may occur			 */			kbuf = xlate_dev_kmem_ptr((char *)p);			if (copy_to_user(buf, kbuf, sz))				return -EFAULT;			buf += sz;			p += sz;			read += sz;			low_count -= sz;			count -= sz;		}	}	if (count > 0) {		kbuf = (char *)__get_free_page(GFP_KERNEL);		if (!kbuf)			return -ENOMEM;		while (count > 0) {			int len = count;			if (len > PAGE_SIZE)				len = PAGE_SIZE;			len = vread(kbuf, (char *)p, len);			if (!len)				break;			if (copy_to_user(buf, kbuf, len)) {				free_page((unsigned long)kbuf);				return -EFAULT;			}			count -= len;			buf += len;			read += len;			p += len;		}		free_page((unsigned long)kbuf);	} 	*ppos = p; 	return read;}static inline ssize_tdo_write_kmem(void *p, unsigned long realp, const char __user * buf,	      size_t count, loff_t *ppos){	ssize_t written, sz;	unsigned long copied;	written = 0;#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED	/* we don't have page 0 mapped on sparc and m68k.. */	if (realp < PAGE_SIZE) {		unsigned long sz = PAGE_SIZE - realp;		if (sz > count)			sz = count;		/* Hmm. Do something? */		buf += sz;		p += sz;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?