mem.c
来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 734 行 · 第 1/2 页
C
734 行
if (verify_area(VERIFY_READ,buf,count)) return -EFAULT; while (count-- > 0 && i < 65536) { char c; if (__get_user(c, tmp)) return -EFAULT; outb(c,i); i++; tmp++; } *ppos = i; return tmp-buf;}#endifstatic ssize_t read_null(struct file * file, char __user * buf, size_t count, loff_t *ppos){ return 0;}static ssize_t write_null(struct file * file, const char __user * buf, size_t count, loff_t *ppos){ return count;}#ifdef CONFIG_MMU/* * For fun, we are using the MMU for this. */static inline size_t read_zero_pagealigned(char __user * buf, size_t size){ struct mm_struct *mm; struct vm_area_struct * vma; unsigned long addr=(unsigned long)buf; mm = current->mm; /* Oops, this was forgotten before. -ben */ down_read(&mm->mmap_sem); /* For private mappings, just map in zero pages. */ for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { unsigned long count; if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0) goto out_up; if (vma->vm_flags & (VM_SHARED | VM_HUGETLB)) break; count = vma->vm_end - addr; if (count > size) count = size; zap_page_range(vma, addr, count, NULL); zeromap_page_range(vma, addr, count, PAGE_COPY); size -= count; buf += count; addr += count; if (size == 0) goto out_up; } up_read(&mm->mmap_sem); /* The shared case is hard. Let's do the conventional zeroing. */ do { unsigned long unwritten = clear_user(buf, PAGE_SIZE); if (unwritten) return size + unwritten - PAGE_SIZE; cond_resched(); buf += PAGE_SIZE; size -= PAGE_SIZE; } while (size); return size;out_up: up_read(&mm->mmap_sem); return size;}static ssize_t read_zero(struct file * file, char __user * buf, size_t count, loff_t *ppos){ unsigned long left, unwritten, written = 0; if (!count) return 0; if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; left = count; /* do we want to be clever? Arbitrary cut-off */ if (count >= PAGE_SIZE*4) { unsigned long partial; /* How much left of the page? */ partial = (PAGE_SIZE-1) & -(unsigned long) buf; unwritten = clear_user(buf, partial); written = partial - unwritten; if (unwritten) goto out; left -= partial; buf += partial; unwritten = read_zero_pagealigned(buf, left & PAGE_MASK); written += (left & PAGE_MASK) - unwritten; if (unwritten) goto out; buf += left & PAGE_MASK; left &= ~PAGE_MASK; } unwritten = clear_user(buf, left); written += left - unwritten;out: return written ? written : -EFAULT;}static int mmap_zero(struct file * file, struct vm_area_struct * vma){ if (vma->vm_flags & VM_SHARED) return shmem_zero_setup(vma); if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; return 0;}#else /* CONFIG_MMU */static ssize_t read_zero(struct file * file, char * buf, size_t count, loff_t *ppos){ size_t todo = count; while (todo) { size_t chunk = todo; if (chunk > 4096) chunk = 4096; /* Just for latency reasons */ if (clear_user(buf, chunk)) return -EFAULT; buf += chunk; todo -= chunk; cond_resched(); } return count;}static int mmap_zero(struct file * file, struct vm_area_struct * vma){ return -ENOSYS;}#endif /* CONFIG_MMU */static ssize_t write_full(struct file * file, const char __user * buf, size_t count, loff_t *ppos){ return -ENOSPC;}/* * Special lseek() function for /dev/null and /dev/zero. Most notably, you * can fopen() both devices with "a" now. This was previously impossible. * -- SRB. */static loff_t null_lseek(struct file * file, loff_t offset, int orig){ return file->f_pos = 0;}/* * The memory devices use the full 32/64 bits of the offset, and so we cannot * check against negative addresses: they are ok. The return value is weird, * though, in that case (0). * * also note that seeking relative to the "end of file" isn't supported: * it has no meaning, so it returns -EINVAL. */static loff_t memory_lseek(struct file * file, loff_t offset, int orig){ loff_t ret; down(&file->f_dentry->d_inode->i_sem); switch (orig) { case 0: file->f_pos = offset; ret = file->f_pos; force_successful_syscall_return(); break; case 1: file->f_pos += offset; ret = file->f_pos; force_successful_syscall_return(); break; default: ret = -EINVAL; } up(&file->f_dentry->d_inode->i_sem); return ret;}static int open_port(struct inode * inode, struct file * filp){ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;}#define mmap_kmem mmap_mem#define zero_lseek null_lseek#define full_lseek null_lseek#define write_zero write_null#define read_full read_zero#define open_mem open_port#define open_kmem open_memstatic struct file_operations mem_fops = { .llseek = memory_lseek, .read = read_mem, .write = write_mem, .mmap = mmap_mem, .open = open_mem,};static struct file_operations kmem_fops = { .llseek = memory_lseek, .read = read_kmem, .write = write_kmem, .mmap = mmap_kmem, .open = open_kmem,};static struct file_operations null_fops = { .llseek = null_lseek, .read = read_null, .write = write_null,};#if defined(CONFIG_ISA) || !defined(__mc68000__)static struct file_operations port_fops = { .llseek = memory_lseek, .read = read_port, .write = write_port, .open = open_port,};#endifstatic struct file_operations zero_fops = { .llseek = zero_lseek, .read = read_zero, .write = write_zero, .mmap = mmap_zero,};static struct file_operations full_fops = { .llseek = full_lseek, .read = read_full, .write = write_full,};static ssize_t kmsg_write(struct file * file, const char __user * buf, size_t count, loff_t *ppos){ char *tmp; int ret; tmp = kmalloc(count + 1, GFP_KERNEL); if (tmp == NULL) return -ENOMEM; ret = -EFAULT; if (!copy_from_user(tmp, buf, count)) { tmp[count] = 0; ret = printk("%s", tmp); } kfree(tmp); return ret;}static struct file_operations kmsg_fops = { .write = kmsg_write,};static int memory_open(struct inode * inode, struct file * filp){ switch (iminor(inode)) { case 1: filp->f_op = &mem_fops; break; case 2: filp->f_op = &kmem_fops; break; case 3: filp->f_op = &null_fops; break;#if defined(CONFIG_ISA) || !defined(__mc68000__) case 4: filp->f_op = &port_fops; break;#endif case 5: filp->f_op = &zero_fops; break; case 7: filp->f_op = &full_fops; break; case 8: filp->f_op = &random_fops; break; case 9: filp->f_op = &urandom_fops; break; case 11: filp->f_op = &kmsg_fops; break; default: return -ENXIO; } if (filp->f_op && filp->f_op->open) return filp->f_op->open(inode,filp); return 0;}static struct file_operations memory_fops = { .open = memory_open, /* just a selector for the real open */};static const struct { unsigned int minor; char *name; umode_t mode; struct file_operations *fops;} devlist[] = { /* list of minor devices */ {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops}, {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops}, {3, "null", S_IRUGO | S_IWUGO, &null_fops},#if defined(CONFIG_ISA) || !defined(__mc68000__) {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},#endif {5, "zero", S_IRUGO | S_IWUGO, &zero_fops}, {7, "full", S_IRUGO | S_IWUGO, &full_fops}, {8, "random", S_IRUGO | S_IWUSR, &random_fops}, {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops}, {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},};static struct class_simple *mem_class;static int __init chr_dev_init(void){ int i; if (register_chrdev(MEM_MAJOR,"mem",&memory_fops)) printk("unable to get major %d for memory devs\n", MEM_MAJOR); mem_class = class_simple_create(THIS_MODULE, "mem"); for (i = 0; i < ARRAY_SIZE(devlist); i++) { class_simple_device_add(mem_class, MKDEV(MEM_MAJOR, devlist[i].minor), NULL, devlist[i].name); devfs_mk_cdev(MKDEV(MEM_MAJOR, devlist[i].minor), S_IFCHR | devlist[i].mode, devlist[i].name); } return 0;}fs_initcall(chr_dev_init);
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?