📄 zd_usb.c
字号:
goto error_free_urb; } intr->urb = urb; spin_unlock_irq(&intr->lock); /* TODO: make it a DMA buffer */ r = -ENOMEM; transfer_buffer = kmalloc(USB_MAX_EP_INT_BUFFER, GFP_KERNEL); if (!transfer_buffer) { dev_dbg_f(zd_usb_dev(usb), "couldn't allocate transfer_buffer\n"); goto error_set_urb_null; } udev = zd_usb_to_usbdev(usb); usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, EP_INT_IN), transfer_buffer, USB_MAX_EP_INT_BUFFER, int_urb_complete, usb, intr->interval); dev_dbg_f(zd_usb_dev(usb), "submit urb %p\n", intr->urb); r = usb_submit_urb(urb, GFP_KERNEL); if (r) { dev_dbg_f(zd_usb_dev(usb), "Couldn't submit urb. Error number %d\n", r); goto error; } return 0;error: kfree(transfer_buffer);error_set_urb_null: spin_lock_irq(&intr->lock); intr->urb = NULL; spin_unlock_irq(&intr->lock);error_free_urb: usb_free_urb(urb);out: return r;}void zd_usb_disable_int(struct zd_usb *usb){ unsigned long flags; struct zd_usb_interrupt *intr = &usb->intr; struct urb *urb; spin_lock_irqsave(&intr->lock, flags); urb = intr->urb; if (!urb) { spin_unlock_irqrestore(&intr->lock, flags); return; } intr->urb = NULL; spin_unlock_irqrestore(&intr->lock, flags); usb_kill_urb(urb); dev_dbg_f(zd_usb_dev(usb), "urb %p killed\n", urb); usb_free_urb(urb);}static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer, unsigned int length){ int i; struct zd_mac *mac = zd_usb_to_mac(usb); const struct rx_length_info *length_info; if (length < sizeof(struct rx_length_info)) { /* It's not a complete packet anyhow. */ struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); ieee->stats.rx_errors++; ieee->stats.rx_length_errors++; return; } length_info = (struct rx_length_info *) (buffer + length - sizeof(struct rx_length_info)); /* It might be that three frames are merged into a single URB * transaction. We have to check for the length info tag. * * While testing we discovered that length_info might be unaligned, * because if USB transactions are merged, the last packet will not * be padded. Unaligned access might also happen if the length_info * structure is not present. */ if (get_unaligned(&length_info->tag) == cpu_to_le16(RX_LENGTH_INFO_TAG)) { unsigned int l, k, n; for (i = 0, l = 0;; i++) { k = le16_to_cpu(get_unaligned(&length_info->length[i])); if (k == 0) return; n = l+k; if (n > length) return; zd_mac_rx_irq(mac, buffer+l, k); if (i >= 2) return; l = (n+3) & ~3; } } else { zd_mac_rx_irq(mac, buffer, length); }}static void rx_urb_complete(struct urb *urb){ struct zd_usb *usb; struct zd_usb_rx *rx; const u8 *buffer; unsigned int length; switch (urb->status) { case 0: break; case -ESHUTDOWN: case -EINVAL: case -ENODEV: case -ENOENT: case -ECONNRESET: case -EPIPE: return; default: dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); goto resubmit; } buffer = urb->transfer_buffer; length = urb->actual_length; usb = urb->context; rx = &usb->rx; if (length%rx->usb_packet_size > rx->usb_packet_size-4) { /* If there is an old first fragment, we don't care. */ dev_dbg_f(urb_dev(urb), "*** first fragment ***\n"); ZD_ASSERT(length <= ARRAY_SIZE(rx->fragment)); spin_lock(&rx->lock); memcpy(rx->fragment, buffer, length); rx->fragment_length = length; spin_unlock(&rx->lock); goto resubmit; } spin_lock(&rx->lock); if (rx->fragment_length > 0) { /* We are on a second fragment, we believe */ ZD_ASSERT(length + rx->fragment_length <= ARRAY_SIZE(rx->fragment)); dev_dbg_f(urb_dev(urb), "*** second fragment ***\n"); memcpy(rx->fragment+rx->fragment_length, buffer, length); handle_rx_packet(usb, rx->fragment, rx->fragment_length + length); rx->fragment_length = 0; spin_unlock(&rx->lock); } else { spin_unlock(&rx->lock); handle_rx_packet(usb, buffer, length); }resubmit: usb_submit_urb(urb, GFP_ATOMIC);}static struct urb *alloc_urb(struct zd_usb *usb){ struct usb_device *udev = zd_usb_to_usbdev(usb); struct urb *urb; void *buffer; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return NULL; buffer = usb_buffer_alloc(udev, USB_MAX_RX_SIZE, GFP_KERNEL, &urb->transfer_dma); if (!buffer) { usb_free_urb(urb); return NULL; } usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, EP_DATA_IN), buffer, USB_MAX_RX_SIZE, rx_urb_complete, usb); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; return urb;}static void free_urb(struct urb *urb){ if (!urb) return; usb_buffer_free(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); usb_free_urb(urb);}int zd_usb_enable_rx(struct zd_usb *usb){ int i, r; struct zd_usb_rx *rx = &usb->rx; struct urb **urbs; dev_dbg_f(zd_usb_dev(usb), "\n"); r = -ENOMEM; urbs = kcalloc(URBS_COUNT, sizeof(struct urb *), GFP_KERNEL); if (!urbs) goto error; for (i = 0; i < URBS_COUNT; i++) { urbs[i] = alloc_urb(usb); if (!urbs[i]) goto error; } ZD_ASSERT(!irqs_disabled()); spin_lock_irq(&rx->lock); if (rx->urbs) { spin_unlock_irq(&rx->lock); r = 0; goto error; } rx->urbs = urbs; rx->urbs_count = URBS_COUNT; spin_unlock_irq(&rx->lock); for (i = 0; i < URBS_COUNT; i++) { r = usb_submit_urb(urbs[i], GFP_KERNEL); if (r) goto error_submit; } return 0;error_submit: for (i = 0; i < URBS_COUNT; i++) { usb_kill_urb(urbs[i]); } spin_lock_irq(&rx->lock); rx->urbs = NULL; rx->urbs_count = 0; spin_unlock_irq(&rx->lock);error: if (urbs) { for (i = 0; i < URBS_COUNT; i++) free_urb(urbs[i]); } return r;}void zd_usb_disable_rx(struct zd_usb *usb){ int i; unsigned long flags; struct urb **urbs; unsigned int count; struct zd_usb_rx *rx = &usb->rx; spin_lock_irqsave(&rx->lock, flags); urbs = rx->urbs; count = rx->urbs_count; spin_unlock_irqrestore(&rx->lock, flags); if (!urbs) return; for (i = 0; i < count; i++) { usb_kill_urb(urbs[i]); free_urb(urbs[i]); } kfree(urbs); spin_lock_irqsave(&rx->lock, flags); rx->urbs = NULL; rx->urbs_count = 0; spin_unlock_irqrestore(&rx->lock, flags);}static void tx_urb_complete(struct urb *urb){ int r; switch (urb->status) { case 0: break; case -ESHUTDOWN: case -EINVAL: case -ENODEV: case -ENOENT: case -ECONNRESET: case -EPIPE: dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); break; default: dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); goto resubmit; }free_urb: usb_buffer_free(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); usb_free_urb(urb); return;resubmit: r = usb_submit_urb(urb, GFP_ATOMIC); if (r) { dev_dbg_f(urb_dev(urb), "error resubmit urb %p %d\n", urb, r); goto free_urb; }}/* Puts the frame on the USB endpoint. It doesn't wait for * completion. The frame must contain the control set. */int zd_usb_tx(struct zd_usb *usb, const u8 *frame, unsigned int length){ int r; struct usb_device *udev = zd_usb_to_usbdev(usb); struct urb *urb; void *buffer; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { r = -ENOMEM; goto out; } buffer = usb_buffer_alloc(zd_usb_to_usbdev(usb), length, GFP_ATOMIC, &urb->transfer_dma); if (!buffer) { r = -ENOMEM; goto error_free_urb; } memcpy(buffer, frame, length); usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT), buffer, length, tx_urb_complete, NULL); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; r = usb_submit_urb(urb, GFP_ATOMIC); if (r) goto error; return 0;error: usb_buffer_free(zd_usb_to_usbdev(usb), length, buffer, urb->transfer_dma);error_free_urb: usb_free_urb(urb);out: return r;}static inline void init_usb_interrupt(struct zd_usb *usb){ struct zd_usb_interrupt *intr = &usb->intr; spin_lock_init(&intr->lock); intr->interval = int_urb_interval(zd_usb_to_usbdev(usb)); init_completion(&intr->read_regs.completion); intr->read_regs.cr_int_addr = cpu_to_le16((u16)CR_INTERRUPT);}static inline void init_usb_rx(struct zd_usb *usb){ struct zd_usb_rx *rx = &usb->rx; spin_lock_init(&rx->lock); if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) { rx->usb_packet_size = 512; } else { rx->usb_packet_size = 64; } ZD_ASSERT(rx->fragment_length == 0);}static inline void init_usb_tx(struct zd_usb *usb){ /* FIXME: at this point we will allocate a fixed number of urb's for * use in a cyclic scheme */}void zd_usb_init(struct zd_usb *usb, struct net_device *netdev, struct usb_interface *intf){ memset(usb, 0, sizeof(*usb)); usb->intf = usb_get_intf(intf); usb_set_intfdata(usb->intf, netdev); init_usb_interrupt(usb); init_usb_tx(usb); init_usb_rx(usb);}void zd_usb_clear(struct zd_usb *usb){ usb_set_intfdata(usb->intf, NULL); usb_put_intf(usb->intf); ZD_MEMCLEAR(usb, sizeof(*usb)); /* FIXME: usb_interrupt, usb_tx, usb_rx? */}static const char *speed(enum usb_device_speed speed){ switch (speed) { case USB_SPEED_LOW: return "low"; case USB_SPEED_FULL: return "full"; case USB_SPEED_HIGH: return "high"; default: return "unknown speed"; }}static int scnprint_id(struct usb_device *udev, char *buffer, size_t size){ return scnprintf(buffer, size, "%04hx:%04hx v%04hx %s", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct), get_bcdDevice(udev), speed(udev->speed));}int zd_usb_scnprint_id(struct zd_usb *usb, char *buffer, size_t size){ struct usb_device *udev = interface_to_usbdev(usb->intf); return scnprint_id(udev, buffer, size);}#ifdef DEBUGstatic void print_id(struct usb_device *udev){ char buffer[40]; scnprint_id(udev, buffer, sizeof(buffer)); buffer[sizeof(buffer)-1] = 0; dev_dbg_f(&udev->dev, "%s\n", buffer);}#else#define print_id(udev) do { } while (0)#endifstatic int eject_installer(struct usb_interface *intf){ struct usb_device *udev = interface_to_usbdev(intf); struct usb_host_interface *iface_desc = &intf->altsetting[0]; struct usb_endpoint_descriptor *endpoint; unsigned char *cmd; u8 bulk_out_ep; int r; /* Find bulk out endpoint */ endpoint = &iface_desc->endpoint[1].desc; if ((endpoint->bEndpointAddress & USB_TYPE_MASK) == USB_DIR_OUT && (endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK) { bulk_out_ep = endpoint->bEndpointAddress; } else { dev_err(&udev->dev, "zd1211rw: Could not find bulk out endpoint\n"); return -ENODEV; } cmd = kzalloc(31, GFP_KERNEL); if (cmd == NULL) return -ENODEV; /* USB bulk command block */ cmd[0] = 0x55; /* bulk command signature */ cmd[1] = 0x53; /* bulk command signature */ cmd[2] = 0x42; /* bulk command signature */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -