📄 main.c
字号:
} if (count > quantum - q_pos) count = quantum - q_pos; /* write only up to the end of this quantum */ if (copy_from_user (dptr->data[s_pos]+q_pos, buf, count)) { retval = -EFAULT; goto nomem; } *f_pos += count; /* update the size */ if (dev->size < *f_pos) dev->size = *f_pos; up (&dev->sem); return count; nomem: up (&dev->sem); return retval;}/* * The ioctl() implementation */int scullv_ioctl (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg){ int err= 0, ret = 0, tmp; /* don't even decode wrong cmds: better returning ENOTTY than EFAULT */ if (_IOC_TYPE(cmd) != SCULLV_IOC_MAGIC) return -ENOTTY; if (_IOC_NR(cmd) > SCULLV_IOC_MAXNR) return -ENOTTY; /* * the type is a bitmask, and VERIFY_WRITE catches R/W * transfers. Note that the type is user-oriented, while * verify_area is kernel-oriented, so the concept of "read" and * "write" is reversed */ if (_IOC_DIR(cmd) & _IOC_READ) err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd)); else if (_IOC_DIR(cmd) & _IOC_WRITE) err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd)); if (err) return -EFAULT; switch(cmd) { case SCULLV_IOCRESET: scullv_qset = SCULLV_QSET; scullv_order = SCULLV_ORDER; break; case SCULLV_IOCSORDER: /* Set: arg points to the value */ ret = __GET_USER(scullv_order, (int *) arg); break; case SCULLV_IOCTORDER: /* Tell: arg is the value */ scullv_order = arg; break; case SCULLV_IOCGORDER: /* Get: arg is pointer to result */ ret = __PUT_USER (scullv_order, (int *) arg); break; case SCULLV_IOCQORDER: /* Query: return it (it's positive) */ return scullv_order; case SCULLV_IOCXORDER: /* eXchange: use arg as pointer */ tmp = scullv_order; ret = __GET_USER(scullv_order, (int *) arg); if (ret == 0) ret = __PUT_USER(tmp, (int *) arg); break; case SCULLV_IOCHORDER: /* sHift: like Tell + Query */ tmp = scullv_order; scullv_order = arg; return tmp; case SCULLV_IOCSQSET: ret = __GET_USER(scullv_qset, (int *) arg); break; case SCULLV_IOCTQSET: scullv_qset = arg; break; case SCULLV_IOCGQSET: ret = __PUT_USER(scullv_qset, (int *)arg); break; case SCULLV_IOCQQSET: return scullv_qset; case SCULLV_IOCXQSET: tmp = scullv_qset; ret = __GET_USER(scullv_qset, (int *) arg); if (ret == 0) ret = __PUT_USER(tmp, (int *)arg); break; case SCULLV_IOCHQSET: tmp = scullv_qset; scullv_qset = arg; return tmp; default: /* redundant, as cmd was checked against MAXNR */ return -ENOTTY; } return ret;}/* * The "extended" operations */loff_t scullv_llseek (struct file *filp, loff_t off, int whence){ ScullV_Dev *dev = filp->private_data; long newpos; switch(whence) { case 0: /* SEEK_SET */ newpos = off; break; case 1: /* SEEK_CUR */ newpos = filp->f_pos + off; break; case 2: /* SEEK_END */ newpos = dev->size + off; break; default: /* can't happen */ return -EINVAL; } if (newpos<0) return -EINVAL; filp->f_pos = newpos; return newpos;} /* * Mmap *is* available, but confined in a different file */#ifndef LINUX_20extern int scullv_mmap(struct file *filp, struct vm_area_struct *vma);#elseextern int scullv_mmap(struct inode *inode, struct file *filp, struct vm_area_struct *vma);#endif/* * The 2.0 wrappers */#ifdef LINUX_20int scullv_lseek_20 (struct inode *ino, struct file *f, off_t offset, int whence){ return (int)scullv_llseek(f, offset, whence);}int scullv_read_20 (struct inode *ino, struct file *f, char *buf, int count){ return (int)scullv_read(f, buf, count, &f->f_pos);}int scullv_write_20 (struct inode *ino, struct file *f, const char *b, int c){ return (int)scullv_write(f, b, c, &f->f_pos);}void scullv_release_20 (struct inode *ino, struct file *f){ scullv_release(ino, f);}#define scullv_llseek scullv_lseek_20#define scullv_read scullv_read_20#define scullv_write scullv_write_20#define scullv_release scullv_release_20#define llseek lseek#endif /* LINUX_20 *//* * The fops */struct file_operations scullv_fops = { llseek: scullv_llseek, read: scullv_read, write: scullv_write, ioctl: scullv_ioctl, mmap: scullv_mmap, open: scullv_open, release: scullv_release,};int scullv_trim(ScullV_Dev *dev){ ScullV_Dev *next, *dptr; int qset = dev->qset; /* "dev" is not-null */ int i; if (dev->vmas) /* don't trim: there are active mappings */ return -EBUSY; for (dptr = dev; dptr; dptr = next) { /* all the list items */ if (dptr->data) { /* Release the quantum-set */ for (i = 0; i < qset; i++) if (dptr->data[i]) vfree(dptr->data[i]); kfree(dptr->data); dptr->data=NULL; } next=dptr->next; if (dptr != dev) kfree(dptr); /* all of them but the first */ } dev->size = 0; dev->qset = scullv_qset; dev->order = scullv_order; dev->next = NULL; return 0;}/* * Finally, the module stuff */int scullv_init(void){ int result, i; SET_MODULE_OWNER(&scullv_fops); /* * Register your major, and accept a dynamic number */ result = register_chrdev(scullv_major, "scullv", &scullv_fops); if (result < 0) return result; if (scullv_major == 0) scullv_major = result; /* dynamic */ /* * allocate the devices -- we can't have them static, as the number * can be specified at load time */ scullv_devices = kmalloc(scullv_devs * sizeof (ScullV_Dev), GFP_KERNEL); if (!scullv_devices) { result = -ENOMEM; goto fail_malloc; } memset(scullv_devices, 0, scullv_devs * sizeof (ScullV_Dev)); for (i=0; i < scullv_devs; i++) { scullv_devices[i].order = scullv_order; scullv_devices[i].qset = scullv_qset; sema_init (&scullv_devices[i].sem, 1); }#ifdef SCULLV_USE_PROC /* only when available */ create_proc_read_entry("scullvmem", 0, NULL, scullv_read_procmem, NULL);#endif return 0; /* succeed */ fail_malloc: unregister_chrdev(scullv_major, "scullv"); return result;}void scullv_cleanup(void){ int i; unregister_chrdev(scullv_major, "scullv");#ifdef SCULLV_USE_PROC remove_proc_entry("scullvmem", 0);#endif for (i=0; i<scullv_devs; i++) scullv_trim(scullv_devices+i); kfree(scullv_devices);}module_init(scullv_init);module_exit(scullv_cleanup);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -