📄 inode.c
字号:
struct ep_data *epdata; void *buf; char __user *ubuf; unsigned actual;};static int ep_aio_cancel(struct kiocb *iocb, struct io_event *e){ struct kiocb_priv *priv = iocb->private; struct ep_data *epdata; int value; local_irq_disable(); epdata = priv->epdata; // spin_lock(&epdata->dev->lock); kiocbSetCancelled(iocb); if (likely(epdata && epdata->ep && priv->req)) value = usb_ep_dequeue (epdata->ep, priv->req); else value = -EINVAL; // spin_unlock(&epdata->dev->lock); local_irq_enable(); aio_put_req(iocb); return value;}static ssize_t ep_aio_read_retry(struct kiocb *iocb){ struct kiocb_priv *priv = iocb->private; ssize_t status = priv->actual; /* we "retry" to get the right mm context for this: */ status = copy_to_user(priv->ubuf, priv->buf, priv->actual); if (unlikely(0 != status)) status = -EFAULT; else status = priv->actual; kfree(priv->buf); kfree(priv); aio_put_req(iocb); return status;}static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req){ struct kiocb *iocb = req->context; struct kiocb_priv *priv = iocb->private; struct ep_data *epdata = priv->epdata; /* lock against disconnect (and ideally, cancel) */ spin_lock(&epdata->dev->lock); priv->req = NULL; priv->epdata = NULL; if (NULL == iocb->ki_retry || unlikely(0 == req->actual) || unlikely(kiocbIsCancelled(iocb))) { kfree(req->buf); kfree(priv); iocb->private = NULL; /* aio_complete() reports bytes-transferred _and_ faults */ if (unlikely(kiocbIsCancelled(iocb))) aio_put_req(iocb); else aio_complete(iocb, req->actual ? req->actual : req->status, req->status); } else { /* retry() won't report both; so we hide some faults */ if (unlikely(0 != req->status)) DBG(epdata->dev, "%s fault %d len %d\n", ep->name, req->status, req->actual); priv->buf = req->buf; priv->actual = req->actual; kick_iocb(iocb); } spin_unlock(&epdata->dev->lock); usb_ep_free_request(ep, req); put_ep(epdata);}static ssize_tep_aio_rwtail( struct kiocb *iocb, char *buf, size_t len, struct ep_data *epdata, char __user *ubuf){ struct kiocb_priv *priv = (void *) &iocb->private; struct usb_request *req; ssize_t value; priv = kmalloc(sizeof *priv, GFP_KERNEL); if (!priv) { value = -ENOMEM;fail: kfree(buf); return value; } iocb->private = priv; priv->ubuf = ubuf; value = get_ready_ep(iocb->ki_filp->f_flags, epdata); if (unlikely(value < 0)) { kfree(priv); goto fail; } iocb->ki_cancel = ep_aio_cancel; get_ep(epdata); priv->epdata = epdata; priv->actual = 0; /* each kiocb is coupled to one usb_request, but we can't * allocate or submit those if the host disconnected. */ spin_lock_irq(&epdata->dev->lock); if (likely(epdata->ep)) { req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); if (likely(req)) { priv->req = req; req->buf = buf; req->length = len; req->complete = ep_aio_complete; req->context = iocb; value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC); if (unlikely(0 != value)) usb_ep_free_request(epdata->ep, req); } else value = -EAGAIN; } else value = -ENODEV; spin_unlock_irq(&epdata->dev->lock); up(&epdata->lock); if (unlikely(value)) { kfree(priv); put_ep(epdata); } else value = -EIOCBQUEUED; return value;}static ssize_tep_aio_read(struct kiocb *iocb, char __user *ubuf, size_t len, loff_t o){ struct ep_data *epdata = iocb->ki_filp->private_data; char *buf; if (unlikely(epdata->desc.bEndpointAddress & USB_DIR_IN)) return -EINVAL; buf = kmalloc(len, GFP_KERNEL); if (unlikely(!buf)) return -ENOMEM; iocb->ki_retry = ep_aio_read_retry; return ep_aio_rwtail(iocb, buf, len, epdata, ubuf);}static ssize_tep_aio_write(struct kiocb *iocb, const char __user *ubuf, size_t len, loff_t o){ struct ep_data *epdata = iocb->ki_filp->private_data; char *buf; if (unlikely(!(epdata->desc.bEndpointAddress & USB_DIR_IN))) return -EINVAL; buf = kmalloc(len, GFP_KERNEL); if (unlikely(!buf)) return -ENOMEM; if (unlikely(copy_from_user(buf, ubuf, len) != 0)) { kfree(buf); return -EFAULT; } return ep_aio_rwtail(iocb, buf, len, epdata, NULL);}/*----------------------------------------------------------------------*//* used after endpoint configuration */static struct file_operations ep_io_operations = { .owner = THIS_MODULE, .llseek = no_llseek, .read = ep_read, .write = ep_write, .ioctl = ep_ioctl, .release = ep_release, .aio_read = ep_aio_read, .aio_write = ep_aio_write,};/* ENDPOINT INITIALIZATION * * fd = open ("/dev/gadget/$ENDPOINT", O_RDWR) * status = write (fd, descriptors, sizeof descriptors) * * That write establishes the endpoint configuration, configuring * the controller to process bulk, interrupt, or isochronous transfers * at the right maxpacket size, and so on. * * The descriptors are message type 1, identified by a host order u32 * at the beginning of what's written. Descriptor order is: full/low * speed descriptor, then optional high speed descriptor. */static ssize_tep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr){ struct ep_data *data = fd->private_data; struct usb_ep *ep; u32 tag; int value; if ((value = down_interruptible (&data->lock)) < 0) return value; if (data->state != STATE_EP_READY) { value = -EL2HLT; goto fail; } value = len; if (len < USB_DT_ENDPOINT_SIZE + 4) goto fail0; /* we might need to change message format someday */ if (copy_from_user (&tag, buf, 4)) { goto fail1; } if (tag != 1) { DBG(data->dev, "config %s, bad tag %d\n", data->name, tag); goto fail0; } buf += 4; len -= 4; /* NOTE: audio endpoint extensions not accepted here; * just don't include the extra bytes. */ /* full/low speed descriptor, then high speed */ if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) { goto fail1; } if (data->desc.bLength != USB_DT_ENDPOINT_SIZE || data->desc.bDescriptorType != USB_DT_ENDPOINT) goto fail0; if (len != USB_DT_ENDPOINT_SIZE) { if (len != 2 * USB_DT_ENDPOINT_SIZE) goto fail0; if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE, USB_DT_ENDPOINT_SIZE)) { goto fail1; } if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE || data->hs_desc.bDescriptorType != USB_DT_ENDPOINT) { DBG(data->dev, "config %s, bad hs length or type\n", data->name); goto fail0; } } value = len; spin_lock_irq (&data->dev->lock); if (data->dev->state == STATE_DEV_UNBOUND) { value = -ENOENT; goto gone; } else if ((ep = data->ep) == NULL) { value = -ENODEV; goto gone; } switch (data->dev->gadget->speed) { case USB_SPEED_LOW: case USB_SPEED_FULL: value = usb_ep_enable (ep, &data->desc); if (value == 0) data->state = STATE_EP_ENABLED; break;#ifdef HIGHSPEED case USB_SPEED_HIGH: /* fails if caller didn't provide that descriptor... */ value = usb_ep_enable (ep, &data->hs_desc); if (value == 0) data->state = STATE_EP_ENABLED; break;#endif default: DBG (data->dev, "unconnected, %s init deferred\n", data->name); data->state = STATE_EP_DEFER_ENABLE; } if (value == 0) fd->f_op = &ep_io_operations;gone: spin_unlock_irq (&data->dev->lock); if (value < 0) {fail: data->desc.bDescriptorType = 0; data->hs_desc.bDescriptorType = 0; } up (&data->lock); return value;fail0: value = -EINVAL; goto fail;fail1: value = -EFAULT; goto fail;}static intep_open (struct inode *inode, struct file *fd){ struct ep_data *data = inode->u.generic_ip; int value = -EBUSY; if (down_interruptible (&data->lock) != 0) return -EINTR; spin_lock_irq (&data->dev->lock); if (data->dev->state == STATE_DEV_UNBOUND) value = -ENOENT; else if (data->state == STATE_EP_DISABLED) { value = 0; data->state = STATE_EP_READY; get_ep (data); fd->private_data = data; VDEBUG (data->dev, "%s ready\n", data->name); } else DBG (data->dev, "%s state %d\n", data->name, data->state); spin_unlock_irq (&data->dev->lock); up (&data->lock); return value;}/* used before endpoint configuration */static struct file_operations ep_config_operations = { .owner = THIS_MODULE, .llseek = no_llseek, .open = ep_open, .write = ep_config, .release = ep_release,};/*----------------------------------------------------------------------*//* EP0 IMPLEMENTATION can be partly in userspace. * * Drivers that use this facility receive various events, including * control requests the kernel doesn't handle. Drivers that don't * use this facility may be too simple-minded for real applications. */static inline void ep0_readable (struct dev_data *dev){ wake_up (&dev->wait); kill_fasync (&dev->fasync, SIGIO, POLL_IN);}static void clean_req (struct usb_ep *ep, struct usb_request *req){ struct dev_data *dev = ep->driver_data; if (req->buf != dev->rbuf) { usb_ep_free_buffer (ep, req->buf, req->dma, req->length); req->buf = dev->rbuf; req->dma = DMA_ADDR_INVALID; } req->complete = epio_complete; dev->setup_out_ready = 0;}static void ep0_complete (struct usb_ep *ep, struct usb_request *req){ struct dev_data *dev = ep->driver_data; int free = 1; /* for control OUT, data must still get to userspace */ if (!dev->setup_in) { dev->setup_out_error = (req->status != 0); if (!dev->setup_out_error) free = 0; dev->setup_out_ready = 1; ep0_readable (dev); } else if (dev->state == STATE_SETUP) dev->state = STATE_CONNECTED; /* clean up as appropriate */ if (free && req->buf != &dev->rbuf) clean_req (ep, req); req->complete = epio_complete;}static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len){ struct dev_data *dev = ep->driver_data; if (dev->setup_out_ready) { DBG (dev, "ep0 request busy!\n"); return -EBUSY; } if (len > sizeof (dev->rbuf)) req->buf = usb_ep_alloc_buffer (ep, len, &req->dma, GFP_ATOMIC); if (req->buf == 0) { req->buf = dev->rbuf; return -ENOMEM; } req->complete = ep0_complete; req->length = len; return 0;}static ssize_tep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr){ struct dev_data *dev = fd->private_data; ssize_t retval; enum ep0_state state; spin_lock_irq (&dev->lock); /* report fd mode change before acting on it */ if (dev->setup_abort) { dev->setup_abort = 0; retval = -EIDRM; goto done; } /* control DATA stage */ if ((state = dev->state) == STATE_SETUP) { if (dev->setup_in) { /* stall IN */ VDEBUG(dev, "ep0in stall\n"); (void) usb_ep_set_halt (dev->gadget->ep0); retval = -EL2HLT; dev->state = STATE_CONNECTED; } else if (len == 0) { /* ack SET_CONFIGURATION etc */ struct usb_ep *ep = dev->gadget->ep0; struct usb_request *req = dev->req; if ((retval = setup_req (ep, req, 0)) == 0) retval = usb_ep_queue (ep, req, GFP_ATOMIC); dev->state = STATE_CONNECTED; /* assume that was SET_CONFIGURATION */ if (dev->current_config) { unsigned power;#ifdef HIGHSPEED if (dev->gadget->speed == USB_SPEED_HIGH) power = dev->hs_config->bMaxPower; else#endif power = dev->config->bMaxPower; usb_gadget_vbus_draw(dev->gadget, 2 * power); } } else { /* collect OUT data */ if ((fd->f_flags & O_NONBLOCK) != 0 && !dev->setup_out_ready) { retval = -EAGAIN; goto done; } spin_unlock_irq (&dev->lock); retval = wait_event_interruptible (dev->wait, dev->setup_out_ready != 0); /* FIXME state could change from under us */ spin_lock_irq (&dev->lock); if (retval) goto done; if (dev->setup_out_error) retval = -EIO; else { len = min (len, (size_t)dev->req->actual);// FIXME don't call this with the spinlock held ... if (copy_to_user (buf, &dev->req->buf, len)) retval = -EFAULT; clean_req (dev->gadget->ep0, dev->req); /* NOTE userspace can't yet choose to stall */ } } goto done; } /* else normal: return event data */ if (len < sizeof dev->event [0]) { retval = -EINVAL; goto done; } len -= len % sizeof (struct usb_gadgetfs_event); dev->usermode_setup = 1;scan: /* return queued events right away */ if (dev->ev_next != 0) { unsigned i, n; int tmp = dev->ev_next; len = min (len, tmp * sizeof (struct usb_gadgetfs_event)); n = len / sizeof (struct usb_gadgetfs_event); /* ep0 can't deliver events when STATE_SETUP */ for (i = 0; i < n; i++) { if (dev->event [i].type == GADGETFS_SETUP) { len = n = i + 1; len *= sizeof (struct usb_gadgetfs_event); n = 0; break; } } spin_unlock_irq (&dev->lock); if (copy_to_user (buf, &dev->event, len)) retval = -EFAULT; else retval = len; if (len > 0) { len /= sizeof (struct usb_gadgetfs_event); /* NOTE this doesn't guard against broken drivers; * concurrent ep0 readers may lose events.
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -