📄 main.c
字号:
if (count > quantum - q_pos)
count = quantum - q_pos; /* write only up to the end of this quantum */
if (copy_from_user (dptr->data[s_pos]+q_pos, buf, count)) {
retval = -EFAULT;
goto nomem;
}
*f_pos += count;
/* update the size */
if (dev->size < *f_pos)
dev->size = *f_pos;
up (&dev->sem);
return count;
nomem:
up (&dev->sem);
return retval;
}
/*
* The ioctl() implementation
*/
int scullp_ioctl (struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
int err= 0, ret = 0, tmp;
/* don't even decode wrong cmds: better returning ENOTTY than EFAULT */
if (_IOC_TYPE(cmd) != SCULLP_IOC_MAGIC) return -ENOTTY;
if (_IOC_NR(cmd) > SCULLP_IOC_MAXNR) return -ENOTTY;
/*
* the type is a bitmask, and VERIFY_WRITE catches R/W
* transfers. Note that the type is user-oriented, while
* verify_area is kernel-oriented, so the concept of "read" and
* "write" is reversed
*/
if (_IOC_DIR(cmd) & _IOC_READ)
err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd));
else if (_IOC_DIR(cmd) & _IOC_WRITE)
err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd));
if (err) return -EFAULT;
switch(cmd) {
case SCULLP_IOCRESET:
scullp_qset = SCULLP_QSET;
scullp_order = SCULLP_ORDER;
break;
case SCULLP_IOCSORDER: /* Set: arg points to the value */
ret = __GET_USER(scullp_order, (int *) arg);
break;
case SCULLP_IOCTORDER: /* Tell: arg is the value */
scullp_order = arg;
break;
case SCULLP_IOCGORDER: /* Get: arg is pointer to result */
ret = __PUT_USER (scullp_order, (int *) arg);
break;
case SCULLP_IOCQORDER: /* Query: return it (it's positive) */
return scullp_order;
case SCULLP_IOCXORDER: /* eXchange: use arg as pointer */
tmp = scullp_order;
ret = __GET_USER(scullp_order, (int *) arg);
if (ret == 0)
ret = __PUT_USER(tmp, (int *) arg);
break;
case SCULLP_IOCHORDER: /* sHift: like Tell + Query */
tmp = scullp_order;
scullp_order = arg;
return tmp;
case SCULLP_IOCSQSET:
ret = __GET_USER(scullp_qset, (int *) arg);
break;
case SCULLP_IOCTQSET:
scullp_qset = arg;
break;
case SCULLP_IOCGQSET:
ret = __PUT_USER(scullp_qset, (int *)arg);
break;
case SCULLP_IOCQQSET:
return scullp_qset;
case SCULLP_IOCXQSET:
tmp = scullp_qset;
ret = __GET_USER(scullp_qset, (int *) arg);
if (ret == 0)
ret = __PUT_USER(tmp, (int *)arg);
break;
case SCULLP_IOCHQSET:
tmp = scullp_qset;
scullp_qset = arg;
return tmp;
default: /* redundant, as cmd was checked against MAXNR */
return -ENOTTY;
}
return ret;
}
/*
* The "extended" operations
*/
loff_t scullp_llseek (struct file *filp, loff_t off, int whence)
{
ScullP_Dev *dev = filp->private_data;
long newpos;
switch(whence) {
case 0: /* SEEK_SET */
newpos = off;
break;
case 1: /* SEEK_CUR */
newpos = filp->f_pos + off;
break;
case 2: /* SEEK_END */
newpos = dev->size + off;
break;
default: /* can't happen */
return -EINVAL;
}
if (newpos<0) return -EINVAL;
filp->f_pos = newpos;
return newpos;
}
/*
* Mmap *is* available, but confined in a different file
*/
#ifndef LINUX_20
extern int scullp_mmap(struct file *filp, struct vm_area_struct *vma);
#else
extern int scullp_mmap(struct inode *inode, struct file *filp,
struct vm_area_struct *vma);
#endif
/*
* The 2.0 wrappers
*/
#ifdef LINUX_20
int scullp_lseek_20 (struct inode *ino, struct file *f,
off_t offset, int whence)
{
return (int)scullp_llseek(f, offset, whence);
}
int scullp_read_20 (struct inode *ino, struct file *f, char *buf, int count)
{
return (int)scullp_read(f, buf, count, &f->f_pos);
}
int scullp_write_20 (struct inode *ino, struct file *f, const char *b, int c)
{
return (int)scullp_write(f, b, c, &f->f_pos);
}
void scullp_release_20 (struct inode *ino, struct file *f)
{
scullp_release(ino, f);
}
#define scullp_llseek scullp_lseek_20
#define scullp_read scullp_read_20
#define scullp_write scullp_write_20
#define scullp_release scullp_release_20
#define llseek lseek
#endif /* LINUX_20 */
/*
* The fops
*/
struct file_operations scullp_fops = {
llseek: scullp_llseek,
read: scullp_read,
write: scullp_write,
ioctl: scullp_ioctl,
mmap: scullp_mmap,
open: scullp_open,
release: scullp_release,
};
int scullp_trim(ScullP_Dev *dev)
{
ScullP_Dev *next, *dptr;
int qset = dev->qset; /* "dev" is not-null */
int i;
if (dev->vmas) /* don't trim: there are active mappings */
return -EBUSY;
for (dptr = dev; dptr; dptr = next) { /* all the list items */
if (dptr->data) {
/* This code frees a whole quantum-set */
for (i = 0; i < qset; i++)
if (dptr->data[i])
free_pages((unsigned long)(dptr->data[i]),
dptr->order);
kfree(dptr->data);
dptr->data=NULL;
}
next=dptr->next;
if (dptr != dev) kfree(dptr); /* all of them but the first */
}
dev->size = 0;
dev->qset = scullp_qset;
dev->order = scullp_order;
dev->next = NULL;
return 0;
}
/*
* Finally, the module stuff
*/
int scullp_init(void)
{
int result, i;
SET_MODULE_OWNER(&scullp_fops);
/*
* Register your major, and accept a dynamic number
*/
result = register_chrdev(scullp_major, "scullp", &scullp_fops);
if (result < 0) return result;
if (scullp_major == 0) scullp_major = result; /* dynamic */
/*
* allocate the devices -- we can't have them static, as the number
* can be specified at load time
*/
scullp_devices = kmalloc(scullp_devs * sizeof (ScullP_Dev), GFP_KERNEL);
if (!scullp_devices) {
result = -ENOMEM;
goto fail_malloc;
}
memset(scullp_devices, 0, scullp_devs * sizeof (ScullP_Dev));
for (i=0; i < scullp_devs; i++) {
scullp_devices[i].order = scullp_order;
scullp_devices[i].qset = scullp_qset;
sema_init (&scullp_devices[i].sem, 1);
}
#ifdef SCULLP_USE_PROC /* only when available */
create_proc_read_entry("scullpmem", 0, NULL, scullp_read_procmem, NULL);
#endif
return 0; /* succeed */
fail_malloc:
unregister_chrdev(scullp_major, "scullp");
return result;
}
void scullp_cleanup(void)
{
int i;
unregister_chrdev(scullp_major, "scullp");
#ifdef SCULLP_USE_PROC
remove_proc_entry("scullpmem", 0);
#endif
for (i=0; i<scullp_devs; i++)
scullp_trim(scullp_devices+i);
kfree(scullp_devices);
}
module_init(scullp_init);
module_exit(scullp_cleanup);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -