📄 ldd ——scull(main.c).txt
字号:
339 loff_t *f_pos)
340 {
341 struct scull_dev *dev = filp->private_data;
342 struct scull_qset *dptr;
343 int quantum = dev->quantum, qset = dev->qset;
344 int itemsize = quantum * qset;
345 int item, s_pos, q_pos, rest;
346 ssize_t retval = -ENOMEM; /* value used in "goto out" statements */
347
348 if (down_interruptible(&dev->sem))
349 return -ERESTARTSYS;
350
351 /* find listitem, qset index and offset in the quantum */
352 item = (long)*f_pos / itemsize;
353 rest = (long)*f_pos % itemsize;
354 s_pos = rest / quantum; q_pos = rest % quantum;
355
356 /* follow the list up to the right position */
357 dptr = scull_follow(dev, item);
358 if (dptr == NULL)
359 goto out;
360 if (!dptr->data) {
361 dptr->data = kmalloc(qset * sizeof(char *), GFP_KERNEL);
362 if (!dptr->data)
363 goto out;
364 memset(dptr->data, 0, qset * sizeof(char *));
365 }
366 if (!dptr->data[s_pos]) {
367 dptr->data[s_pos] = kmalloc(quantum, GFP_KERNEL);
368 if (!dptr->data[s_pos])
369 goto out;
370 }
371 /* write only up to the end of this quantum */
372 if (count > quantum - q_pos)
373 count = quantum - q_pos;
374
375 if (copy_from_user(dptr->data[s_pos]+q_pos, buf, count)) {
376 retval = -EFAULT;
377 goto out;
378 }
379 *f_pos += count;
380 retval = count;
381
382 /* update the size */
383 if (dev->size < *f_pos)
384 dev->size = *f_pos;
385
386 out:
387 up(&dev->sem);
388 return retval;
389 }
390
391 /*
392 * The ioctl() implementation
393 */
394
395 int scull_ioctl(struct inode *inode, struct file *filp,
396 unsigned int cmd, unsigned long arg)
397 {
398
399 int err = 0, tmp;
400 int retval = 0;
401
402 /*
403 * extract the type and number bitfields, and don't decode
404 * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok()
405 */
406 if (_IOC_TYPE(cmd) != SCULL_IOC_MAGIC) return -ENOTTY;
407 if (_IOC_NR(cmd) > SCULL_IOC_MAXNR) return -ENOTTY;
408
409 /*
410 * the direction is a bitmask, and VERIFY_WRITE catches R/W
411 * transfers. `Type' is user-oriented, while
412 * access_ok is kernel-oriented, so the concept of "read" and
413 * "write" is reversed
414 */
415 if (_IOC_DIR(cmd) & _IOC_READ)
416 err = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
417 else if (_IOC_DIR(cmd) & _IOC_WRITE)
418 err = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
419 if (err) return -EFAULT;
420
421 switch(cmd) {
422
423 case SCULL_IOCRESET:
424 scull_quantum = SCULL_QUANTUM;
425 scull_qset = SCULL_QSET;
426 break;
427
428 case SCULL_IOCSQUANTUM: /* Set: arg points to the value */
429 if (! capable (CAP_SYS_ADMIN))
430 return -EPERM;
431 retval = __get_user(scull_quantum, (int __user *)arg);
432 break;
433
434 case SCULL_IOCTQUANTUM: /* Tell: arg is the value */
435 if (! capable (CAP_SYS_ADMIN))
436 return -EPERM;
437 scull_quantum = arg;
438 break;
439
440 case SCULL_IOCGQUANTUM: /* Get: arg is pointer to result */
441 retval = __put_user(scull_quantum, (int __user *)arg);
442 break;
443
444 case SCULL_IOCQQUANTUM: /* Query: return it (it's positive) */
445 return scull_quantum;
446
447 case SCULL_IOCXQUANTUM: /* eXchange: use arg as pointer */
448 if (! capable (CAP_SYS_ADMIN))
449 return -EPERM;
450 tmp = scull_quantum;
451 retval = __get_user(scull_quantum, (int __user *)arg);
452 if (retval == 0)
453 retval = __put_user(tmp, (int __user *)arg);
454 break;
455
456 case SCULL_IOCHQUANTUM: /* sHift: like Tell + Query */
457 if (! capable (CAP_SYS_ADMIN))
458 return -EPERM;
459 tmp = scull_quantum;
460 scull_quantum = arg;
461 return tmp;
462
463 case SCULL_IOCSQSET:
464 if (! capable (CAP_SYS_ADMIN))
465 return -EPERM;
466 retval = __get_user(scull_qset, (int __user *)arg);
467 break;
468
469 case SCULL_IOCTQSET:
470 if (! capable (CAP_SYS_ADMIN))
471 return -EPERM;
472 scull_qset = arg;
473 break;
474
475 case SCULL_IOCGQSET:
476 retval = __put_user(scull_qset, (int __user *)arg);
477 break;
478
479 case SCULL_IOCQQSET:
480 return scull_qset;
481
482 case SCULL_IOCXQSET:
483 if (! capable (CAP_SYS_ADMIN))
484 return -EPERM;
485 tmp = scull_qset;
486 retval = __get_user(scull_qset, (int __user *)arg);
487 if (retval == 0)
488 retval = put_user(tmp, (int __user *)arg);
489 break;
490
491 case SCULL_IOCHQSET:
492 if (! capable (CAP_SYS_ADMIN))
493 return -EPERM;
494 tmp = scull_qset;
495 scull_qset = arg;
496 return tmp;
497
498 /*
499 * The following two change the buffer size for scullpipe.
500 * The scullpipe device uses this same ioctl method, just to
501 * write less code. Actually, it's the same driver, isn't it?
502 */
503
504 case SCULL_P_IOCTSIZE:
505 scull_p_buffer = arg;
506 break;
507
508 case SCULL_P_IOCQSIZE:
509 return scull_p_buffer;
510
511
512 default: /* redundant, as cmd was checked against MAXNR */
513 return -ENOTTY;
514 }
515 return retval;
516
517 }
518
519
520
521 /*
522 * The "extended" operations -- only seek
523 */
524
525 loff_t scull_llseek(struct file *filp, loff_t off, int whence)
526 {
527 struct scull_dev *dev = filp->private_data;
528 loff_t newpos;
529
530 switch(whence) {
531 case 0: /* SEEK_SET */
532 newpos = off;
533 break;
534
535 case 1: /* SEEK_CUR */
536 newpos = filp->f_pos + off;
537 break;
538
539 case 2: /* SEEK_END */
540 newpos = dev->size + off;
541 break;
542
543 default: /* can't happen */
544 return -EINVAL;
545 }
546 if (newpos < 0) return -EINVAL;
547 filp->f_pos = newpos;
548 return newpos;
549 }
550
551
552
553 struct file_operations scull_fops = {
554 .owner = THIS_MODULE,
555 .llseek = scull_llseek,
556 .read = scull_read,
557 .write = scull_write,
558 .ioctl = scull_ioctl,
559 .open = scull_open,
560 .release = scull_release,
561 };
562
563 /*
564 * Finally, the module stuff
565 */
566
567 /*
568 * The cleanup function is used to handle initialization failures as well.
569 * Thefore, it must be careful to work correctly even if some of the items
570 * have not been initialized
571 */
572 void scull_cleanup_module(void)
573 {
574 int i;
575 dev_t devno = MKDEV(scull_major, scull_minor);
576
577 /* Get rid of our char dev entries */
578 if (scull_devices) {
579 for (i = 0; i < scull_nr_devs; i++) {
580 scull_trim(scull_devices + i);
581 cdev_del(&scull_devices[i].cdev);
582 }
583 kfree(scull_devices);
584 }
585
586 #ifdef SCULL_DEBUG /* use proc only if debugging */
587 scull_remove_proc();
588 #endif
589
590 /* cleanup_module is never called if registering failed */
591 unregister_chrdev_region(devno, scull_nr_devs);
592
593 /* and call the cleanup functions for friend devices */
594 scull_p_cleanup();
595 scull_access_cleanup();
596
597 }
598
599
600 /*
601 * Set up the char_dev structure for this device.
602 */
603 static void scull_setup_cdev(struct scull_dev *dev, int index)
604 {
605 int err, devno = MKDEV(scull_major, scull_minor + index);
606
607 cdev_init(&dev->cdev, &scull_fops);
608 dev->cdev.owner = THIS_MODULE;
609 dev->cdev.ops = &scull_fops;
610 err = cdev_add (&dev->cdev, devno, 1);
611 /* Fail gracefully if need be */
612 if (err)
613 printk(KERN_NOTICE "Error %d adding scull%d", err, index);
614 }
615
616
617 int scull_init_module(void)
618 {
619 int result, i;
620 dev_t dev = 0;
621
622 /*
623 * Get a range of minor numbers to work with, asking for a dynamic
624 * major unless directed otherwise at load time.
625 */
626 if (scull_major) {
627 dev = MKDEV(scull_major, scull_minor);
628 result = register_chrdev_region(dev, scull_nr_devs, "scull");
629 } else {
630 result = alloc_chrdev_region(&dev, scull_minor, scull_nr_devs,
631 "scull");
632 scull_major = MAJOR(dev);
633 }
634 if (result < 0) {
635 printk(KERN_WARNING "scull: can't get major %d\n", scull_major);
636 return result;
637 }
638
639 /*
640 * allocate the devices -- we can't have them static, as the number
641 * can be specified at load time
642 */
643 scull_devices = kmalloc(scull_nr_devs * sizeof(struct scull_dev), GFP_KERNEL);
644 if (!scull_devices) {
645 result = -ENOMEM;
646 goto fail; /* Make this more graceful */
647 }
648 memset(scull_devices, 0, scull_nr_devs * sizeof(struct scull_dev));
649
650 /* Initialize each device. */
651 for (i = 0; i < scull_nr_devs; i++) {
652 scull_devices[i].quantum = scull_quantum;
653 scull_devices[i].qset = scull_qset;
654 init_MUTEX(&scull_devices[i].sem);
655 scull_setup_cdev(&scull_devices[i], i);
656 }
657
658 /* At this point call the init function for any friend device */
659 dev = MKDEV(scull_major, scull_minor + scull_nr_devs);
660 dev += scull_p_init(dev);
661 dev += scull_access_init(dev);
662
663 #ifdef SCULL_DEBUG /* only when debugging */
664 scull_create_proc();
665 #endif
666
667 return 0; /* succeed */
668
669 fail:
670 scull_cleanup_module();
671 return result;
672 }
673
674 module_init(scull_init_module);
675 module_exit(scull_cleanup_module);
676
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -