📄 usbnet.c
字号:
|| skb->len > FRAMED_SIZE (dev->net.mtu)) { dev->stats.rx_frame_errors++; dbg ("rx framesize %d range %d..%d mtu %d", skb->len, (int)MIN_FRAMED, (int)FRAMED_SIZE (dev->net.mtu), dev->net.mtu); return 0; } header = (struct nc_header *) skb->data; le16_to_cpus (&header->hdr_len); le16_to_cpus (&header->packet_len); if (FRAMED_SIZE (header->packet_len) > MAX_PACKET) { dev->stats.rx_frame_errors++; dbg ("packet too big, %d", header->packet_len); return 0; } else if (header->hdr_len < MIN_HEADER) { dev->stats.rx_frame_errors++; dbg ("header too short, %d", header->hdr_len); return 0; } else if (header->hdr_len > MIN_HEADER) { // out of band data for us? dbg ("header OOB, %d bytes", header->hdr_len - MIN_HEADER); // switch (vendor/product ids) { ... } } skb_pull (skb, header->hdr_len); trailer = (struct nc_trailer *) (skb->data + skb->len - sizeof *trailer); skb_trim (skb, skb->len - sizeof *trailer); if ((header->packet_len & 0x01) == 0) { if (skb->data [header->packet_len] != PAD_BYTE) { dev->stats.rx_frame_errors++; dbg ("bad pad"); return 0; } skb_trim (skb, skb->len - 1); } if (skb->len != header->packet_len) { dev->stats.rx_frame_errors++; dbg ("bad packet len %d (expected %d)", skb->len, header->packet_len); return 0; } if (header->packet_id != get_unaligned (&trailer->packet_id)) { dev->stats.rx_fifo_errors++; dbg ("(2+ dropped) rx packet_id mismatch 0x%x 0x%x", header->packet_id, trailer->packet_id); return 0; }#if 0 devdbg (dev, "frame <rx h %d p %d id %d", header->hdr_len, header->packet_len, header->packet_id);#endif return 1;}static struct sk_buff *net1080_tx_fixup (struct usbnet *dev, struct sk_buff *skb, int flags){ int padlen; struct sk_buff *skb2; padlen = ((skb->len + sizeof (struct nc_header) + sizeof (struct nc_trailer)) & 0x01) ? 0 : 1; if (!skb_cloned (skb)) { int headroom = skb_headroom (skb); int tailroom = skb_tailroom (skb); if ((padlen + sizeof (struct nc_trailer)) <= tailroom && sizeof (struct nc_header) <= headroom) return skb; if ((sizeof (struct nc_header) + padlen + sizeof (struct nc_trailer)) < (headroom + tailroom)) { skb->data = memmove (skb->head + sizeof (struct nc_header), skb->data, skb->len); skb->tail = skb->data + skb->len; return skb; } } skb2 = skb_copy_expand (skb, sizeof (struct nc_header), sizeof (struct nc_trailer) + padlen, flags); dev_kfree_skb_any (skb); return skb2;}static const struct driver_info net1080_info = { description: "NetChip TurboCONNECT", flags: FLAG_FRAMING_NC, reset: net1080_reset, check_connect: net1080_check_connect, rx_fixup: net1080_rx_fixup, tx_fixup: net1080_tx_fixup, in: 1, out: 1, // direction distinguishes these epsize: 64,};#endif /* CONFIG_USB_NET1080 */#ifdef CONFIG_USB_PL2301/*------------------------------------------------------------------------- * * Prolific PL-2301/PL-2302 driver ... http://www.prolifictech.com * *-------------------------------------------------------------------------*//* * Bits 0-4 can be used for software handshaking; they're set from * one end, cleared from the other, "read" with the interrupt byte. */#define PL_S_EN (1<<7) /* (feature only) suspend enable *//* reserved bit -- rx ready (6) ? */#define PL_TX_READY (1<<5) /* (interrupt only) transmit ready */#define PL_RESET_OUT (1<<4) /* reset output pipe */#define PL_RESET_IN (1<<3) /* reset input pipe */#define PL_TX_C (1<<2) /* transmission complete */#define PL_TX_REQ (1<<1) /* transmission received */#define PL_PEER_E (1<<0) /* peer exists */static inline intpl_vendor_req (struct usbnet *dev, u8 req, u8 val, u8 index){ return usb_control_msg (dev->udev, usb_rcvctrlpipe (dev->udev, 0), req, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, val, index, 0, 0, CONTROL_TIMEOUT_JIFFIES);}static inline intpl_clear_QuickLink_features (struct usbnet *dev, int val){ return pl_vendor_req (dev, 1, (u8) val, 0);}static inline intpl_set_QuickLink_features (struct usbnet *dev, int val){ return pl_vendor_req (dev, 3, (u8) val, 0);}/*-------------------------------------------------------------------------*/static int pl_reset (struct usbnet *dev){ return pl_set_QuickLink_features (dev, PL_S_EN|PL_RESET_OUT|PL_RESET_IN|PL_PEER_E);}static int pl_check_connect (struct usbnet *dev){ // FIXME test interrupt data PL_PEER_E bit // plus, there's some handshake done by // the prolific win32 driver... dbg ("%s: assuming peer is connected", dev->net.name); return 0;}static const struct driver_info prolific_info = { description: "Prolific PL-2301/PL-2302", flags: FLAG_NO_SETINT, /* some PL-2302 versions seem to fail usb_set_interface() */ reset: pl_reset, check_connect: pl_check_connect, in: 3, out: 2, epsize: 64,};#endif /* CONFIG_USB_PL2301 *//*------------------------------------------------------------------------- * * Network Device Driver (peer link to "Host Device", from USB host) * *-------------------------------------------------------------------------*/static int usbnet_change_mtu (struct net_device *net, int new_mtu){ struct usbnet *dev = (struct usbnet *) net->priv; if (new_mtu <= MIN_PACKET || new_mtu > MAX_PACKET) return -EINVAL;#ifdef CONFIG_USB_NET1080 if (((dev->driver_info->flags) & FLAG_FRAMING_NC)) { if (FRAMED_SIZE (new_mtu) > MAX_PACKET) return -EINVAL; }#endif#ifdef CONFIG_USB_GENESYS if (((dev->driver_info->flags) & FLAG_FRAMING_GL) && new_mtu > GL_MAX_PACKET_LEN) return -EINVAL;#endif // no second zero-length packet read wanted after mtu-sized packets if (((new_mtu + sizeof (struct ethhdr)) % EP_SIZE (dev)) == 0) return -EDOM; net->mtu = new_mtu; return 0;}/*-------------------------------------------------------------------------*/static struct net_device_stats *usbnet_get_stats (struct net_device *net){ return &((struct usbnet *) net->priv)->stats;}/*-------------------------------------------------------------------------*//* urb completions are currently in_irq; avoid doing real work then. */static void defer_bh (struct usbnet *dev, struct sk_buff *skb){ struct sk_buff_head *list = skb->list; unsigned long flags; spin_lock_irqsave (&list->lock, flags); __skb_unlink (skb, list); spin_unlock (&list->lock); spin_lock (&dev->done.lock); __skb_queue_tail (&dev->done, skb); if (dev->done.qlen == 1) tasklet_schedule (&dev->bh); spin_unlock_irqrestore (&dev->done.lock, flags);}/*-------------------------------------------------------------------------*/static void rx_complete (struct urb *urb);static void rx_submit (struct usbnet *dev, struct urb *urb, int flags){ struct sk_buff *skb; struct skb_data *entry; int retval = 0; unsigned long lockflags; size_t size;#ifdef CONFIG_USB_NET1080 if (dev->driver_info->flags & FLAG_FRAMING_NC) size = FRAMED_SIZE (dev->net.mtu); else#endif#ifdef CONFIG_USB_GENESYS if (dev->driver_info->flags & FLAG_FRAMING_GL) size = GL_RCV_BUF_SIZE; else#endif size = (sizeof (struct ethhdr) + dev->net.mtu); if ((skb = alloc_skb (size, flags)) == 0) { dbg ("no rx skb"); tasklet_schedule (&dev->bh); usb_free_urb (urb); return; } entry = (struct skb_data *) skb->cb; entry->urb = urb; entry->dev = dev; entry->state = rx_start; entry->length = 0; FILL_BULK_URB (urb, dev->udev, usb_rcvbulkpipe (dev->udev, dev->driver_info->in), skb->data, size, rx_complete, skb); urb->transfer_flags |= USB_ASYNC_UNLINK;#ifdef REALLY_QUEUE urb->transfer_flags |= USB_QUEUE_BULK;#endif#if 0 // Idle-but-posted reads with UHCI really chew up // PCI bandwidth unless FSBR is disabled urb->transfer_flags |= USB_NO_FSBR;#endif spin_lock_irqsave (&dev->rxq.lock, lockflags); if (netif_running (&dev->net)) { if ((retval = usb_submit_urb (urb)) != 0) { dbg ("%s rx submit, %d", dev->net.name, retval); tasklet_schedule (&dev->bh); } else { __skb_queue_tail (&dev->rxq, skb); } } else { dbg ("rx: stopped"); retval = -ENOLINK; } spin_unlock_irqrestore (&dev->rxq.lock, lockflags); if (retval) { dev_kfree_skb_any (skb); usb_free_urb (urb); }}/*-------------------------------------------------------------------------*/static inline void rx_process (struct usbnet *dev, struct sk_buff *skb){ if (dev->driver_info->rx_fixup && !dev->driver_info->rx_fixup (dev, skb)) goto error; // else network stack removes extra byte if we forced a short packet if (skb->len) { int status;// FIXME: eth_copy_and_csum "small" packets to new SKB (small < ~200 bytes) ? skb->dev = &dev->net; skb->protocol = eth_type_trans (skb, &dev->net); dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len;#ifdef VERBOSE devdbg (dev, "< rx, len %d, type 0x%x", skb->len + sizeof (struct ethhdr), skb->protocol);#endif memset (skb->cb, 0, sizeof (struct skb_data)); status = netif_rx (skb); if (status != NET_RX_SUCCESS) devdbg (dev, "netif_rx status %d", status); } else { dbg ("drop");error: dev->stats.rx_errors++; skb_queue_tail (&dev->done, skb); }}/*-------------------------------------------------------------------------*/static void rx_complete (struct urb *urb){ struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; int urb_status = urb->status; skb_put (skb, urb->actual_length); entry->state = rx_done; entry->urb = 0; switch (urb_status) { // success case 0: if (MIN_PACKET > skb->len || skb->len > MAX_PACKET) { entry->state = rx_cleanup; dev->stats.rx_errors++; dev->stats.rx_length_errors++; dbg ("rx length %d", skb->len); } break; // software-driven interface shutdown case -ECONNRESET: // usb-ohci, usb-uhci case -ECONNABORTED: // uhci ... for usb-uhci, INTR dbg ("%s shutdown, code %d", dev->net.name, urb_status); entry->state = rx_cleanup; // do urb frees only in the tasklet entry->urb = urb; urb = 0; break; // data overrun ... flush fifo? case -EOVERFLOW: dev->stats.rx_over_errors++; // FALLTHROUGH default: // on unplug we'll get a burst of ETIMEDOUT/EILSEQ // till the khubd gets and handles its interrupt. entry->state = rx_cleanup; dev->stats.rx_errors++; dbg ("%s rx: status %d", dev->net.name, urb_status); break; } defer_bh (dev, skb); if (urb) { if (netif_running (&dev->net)) { rx_submit (dev, urb, GFP_ATOMIC); return; } }#ifdef VERBOSE dbg ("no read resubmitted");#endif /* VERBOSE */}/*-------------------------------------------------------------------------*/// unlink pending rx/tx; completion handlers do all other cleanupstatic int unlink_urbs (struct sk_buff_head *q){ unsigned long flags; struct sk_buff *skb, *skbnext; int count = 0; spin_lock_irqsave (&q->lock, flags); for (skb = q->next; skb != (struct sk_buff *) q; skb = skbnext) { struct skb_data *entry; struct urb *urb; int retval; entry = (struct skb_data *) skb->cb; urb = entry->urb; skbnext = skb->next; // during some PM-driven resume scenarios, // these (async) unlinks complete immediately retval = usb_unlink_urb (urb); if (retval < 0) dbg ("unlink urb err, %d", retval); else count++; } spin_unlock_irqrestore (&q->lock, flags); return count;}/*-------------------------------------------------------------------------*/// precondition: never called in_interruptstatic int usbnet_stop (struct net_device *net){ struct usbnet *dev = (struct usbnet *) net->priv; int temp; DECLARE_WAIT_QUEUE_HEAD (unlink_wakeup); DECLARE_WAITQUEUE (wait, current); mutex_lock (&dev->mutex); netif_stop_queue (net); devdbg (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld", dev->stats.rx_packets, dev->stats.tx_packets, dev->stats.rx_errors, dev->stats.tx_errors ); // ensure there are no more active urbs add_wait_queue (&unlink_wakeup, &wait); dev->wait = &unlink_wakeup; temp = unlink_urbs (&dev->txq) + unlink_urbs (&dev->rxq); // maybe wait for deletions to finish. while (skb_queue_len (&dev->rxq) && skb_queue_len (&dev->txq) && skb_queue_len (&dev->done)) { set_current_state (TASK_UNINTERRUPTIBLE); schedule_timeout (UNLINK_TIMEOUT_JIFFIES); dbg ("waited for %d urb completions", temp); } dev->wait = 0; remove_wait_queue (&unlink_wakeup, &wait); mutex_unlock (&dev->mutex); return 0;}/*-------------------------------------------------------------------------*/// posts reads, and enables write queing// precondition: never called in_interruptstatic int usbnet_open (struct net_device *net){ struct usbnet *dev = (struct usbnet *) net->priv; int retval = 0; struct driver_info *info = dev->driver_info; mutex_lock (&dev->mutex);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -