⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ether.c

📁 ARM S3C2410 USB SLAVE LINUX驱动
💻 C
📖 第 1 页 / 共 5 页
字号:
					spin_unlock (&dev->lock);
					eth_start (dev, GFP_ATOMIC);
					spin_lock (&dev->lock);
				}
			} else {
				netif_stop_queue (dev->net);
				netif_carrier_off (dev->net);
			}
			value = 0;
			break;
		}
#else
		/* FIXME this is wrong, as is the assumption that
		 * all non-PXA hardware talks real CDC ...
		 */
		dev_warn (&gadget->dev, "set_interface ignored!\n");
#endif /* DEV_CONFIG_CDC */

done_set_intf:
		spin_unlock (&dev->lock);
		break;
	case USB_REQ_GET_INTERFACE:
		if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)
				|| !dev->config
				|| wIndex > 1)
			break;
		if (!(cdc_active(dev) || rndis_active(dev)) && wIndex != 0)
			break;

		/* for CDC, iff carrier is on, data interface is active. */
		if (rndis_active(dev) || wIndex != 1)
			*(u8 *)req->buf = 0;
		else
			*(u8 *)req->buf = netif_carrier_ok (dev->net) ? 1 : 0;
		value = min (wLength, (u16) 1);
		break;

#ifdef DEV_CONFIG_CDC
	case USB_CDC_SET_ETHERNET_PACKET_FILTER:
		/* see 6.2.30: no data, wIndex = interface,
		 * wValue = packet filter bitmap
		 */
		if (ctrl->bRequestType != (USB_TYPE_CLASS|USB_RECIP_INTERFACE)
				|| !cdc_active(dev)
				|| wLength != 0
				|| wIndex > 1)
			break;
		DEBUG (dev, "packet filter %02x\n", wValue);
		dev->cdc_filter = wValue;
		value = 0;
		break;

	/* and potentially:
	 * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS:
	 * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER:
	 * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER:
	 * case USB_CDC_GET_ETHERNET_STATISTIC:
	 */

#endif /* DEV_CONFIG_CDC */

#ifdef CONFIG_USB_ETH_RNDIS		
	/* RNDIS uses the CDC command encapsulation mechanism to implement
	 * an RPC scheme, with much getting/setting of attributes by OID.
	 */
	case USB_CDC_SEND_ENCAPSULATED_COMMAND:
		if (ctrl->bRequestType != (USB_TYPE_CLASS|USB_RECIP_INTERFACE)
				|| !rndis_active(dev)
				|| wLength > USB_BUFSIZ
				|| wValue
				|| rndis_control_intf.bInterfaceNumber
					!= wIndex)
			break;
		/* read the request, then process it */
		value = wLength;
		req->complete = rndis_command_complete;
		/* later, rndis_control_ack () sends a notification */
		break;
		
	case USB_CDC_GET_ENCAPSULATED_RESPONSE:
		if ((USB_DIR_IN|USB_TYPE_CLASS|USB_RECIP_INTERFACE)
					== ctrl->bRequestType
				&& rndis_active(dev)
				// && wLength >= 0x0400
				&& !wValue
				&& rndis_control_intf.bInterfaceNumber
					== wIndex) {
			u8 *buf;

			/* return the result */
			buf = rndis_get_next_response (dev->rndis_config,
						       &value);
			if (buf) {
				memcpy (req->buf, buf, value);
				req->complete = rndis_response_complete;
				rndis_free_response(dev->rndis_config, buf);
			}
			/* else stalls ... spec says to avoid that */
		}
		break;
#endif	/* RNDIS */

	default:
		VDEBUG (dev,
			"unknown control req%02x.%02x v%04x i%04x l%d\n",
			ctrl->bRequestType, ctrl->bRequest,
			wValue, wIndex, wLength);
	}

	/* respond with data transfer before status phase? */
	if (value >= 0) {
		req->length = value;
		req->zero = value < wLength
				&& (value % gadget->ep0->maxpacket) == 0;
		value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
		if (value < 0) {
			DEBUG (dev, "ep_queue --> %d\n", value);
			req->status = 0;
			eth_setup_complete (gadget->ep0, req);
		}
	}

	/* host either stalls (value < 0) or reports success */
	return value;
}

static void
eth_disconnect (struct usb_gadget *gadget)
{
	struct eth_dev		*dev = get_gadget_data (gadget);
	unsigned long		flags;

	spin_lock_irqsave (&dev->lock, flags);
	netif_stop_queue (dev->net);
	netif_carrier_off (dev->net);
	eth_reset_config (dev);
	spin_unlock_irqrestore (&dev->lock, flags);

	/* FIXME RNDIS should enter RNDIS_UNINITIALIZED */

	/* next we may get setup() calls to enumerate new connections;
	 * or an unbind() during shutdown (including removing module).
	 */
}

/*-------------------------------------------------------------------------*/

/* NETWORK DRIVER HOOKUP (to the layer above this driver) */

static int eth_change_mtu (struct net_device *net, int new_mtu)
{
	struct eth_dev	*dev = netdev_priv(net);

	// FIXME if rndis, don't change while link's live

	if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
		return -ERANGE;
	/* no zero-length packet read wanted after mtu-sized packets */
	if (((new_mtu + sizeof (struct ethhdr)) % dev->in_ep->maxpacket) == 0)
		return -EDOM;
	net->mtu = new_mtu;
	return 0;
}

static struct net_device_stats *eth_get_stats (struct net_device *net)
{
	return &((struct eth_dev *)netdev_priv(net))->stats;
}

static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
{
	struct eth_dev	*dev = netdev_priv(net);
	strlcpy(p->driver, shortname, sizeof p->driver);
	strlcpy(p->version, DRIVER_VERSION, sizeof p->version);
	strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
	strlcpy (p->bus_info, dev->gadget->dev.bus_id, sizeof p->bus_info);
}

static u32 eth_get_link(struct net_device *net)
{
	struct eth_dev	*dev = netdev_priv(net);
	return dev->gadget->speed != USB_SPEED_UNKNOWN;
}

static struct ethtool_ops ops = {
	.get_drvinfo = eth_get_drvinfo,
	.get_link = eth_get_link
};

static void defer_kevent (struct eth_dev *dev, int flag)
{
	if (test_and_set_bit (flag, &dev->todo))
		return;
	if (!schedule_work (&dev->work))
		ERROR (dev, "kevent %d may have been dropped\n", flag);
	else
		DEBUG (dev, "kevent %d scheduled\n", flag);
}

static void rx_complete (struct usb_ep *ep, struct usb_request *req);

static int
rx_submit (struct eth_dev *dev, struct usb_request *req, unsigned gfp_flags)
{
	struct sk_buff		*skb;
	int			retval = -ENOMEM;
	size_t			size;

	/* Padding up to RX_EXTRA handles minor disagreements with host.
	 * Normally we use the USB "terminate on short read" convention;
	 * so allow up to (N*maxpacket), since that memory is normally
	 * already allocated.  Some hardware doesn't deal well with short
	 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
	 * byte off the end (to force hardware errors on overflow).
	 *
	 * RNDIS uses internal framing, and explicitly allows senders to
	 * pad to end-of-packet.  That's potentially nice for speed,
	 * but means receivers can't recover synch on their own.
	 */
	size = (sizeof (struct ethhdr) + dev->net->mtu + RX_EXTRA);
	size += dev->out_ep->maxpacket - 1;
	if (rndis_active(dev))
		size += sizeof (struct rndis_packet_msg_type);
	size -= size % dev->out_ep->maxpacket;

	if ((skb = alloc_skb (size + NET_IP_ALIGN, gfp_flags)) == 0) {
		DEBUG (dev, "no rx skb\n");
		goto enomem;
	}
	
	/* Some platforms perform better when IP packets are aligned,
	 * but on at least one, checksumming fails otherwise.  Note:
	 * RNDIS headers involve variable numbers of LE32 values.
	 */
	skb_reserve(skb, NET_IP_ALIGN);

	req->buf = skb->data;
	req->length = size;
	req->complete = rx_complete;
	req->context = skb;

	retval = usb_ep_queue (dev->out_ep, req, gfp_flags);
	if (retval == -ENOMEM)
enomem:
		defer_kevent (dev, WORK_RX_MEMORY);
	if (retval) {
		DEBUG (dev, "rx submit --> %d\n", retval);
		dev_kfree_skb_any (skb);
		spin_lock (&dev->lock);
		list_add (&req->list, &dev->rx_reqs);
		spin_unlock (&dev->lock);
	}
	return retval;
}

static void rx_complete (struct usb_ep *ep, struct usb_request *req)
{
	struct sk_buff	*skb = req->context;
	struct eth_dev	*dev = ep->driver_data;
	int		status = req->status;

	switch (status) {

	/* normal completion */
	case 0:
		skb_put (skb, req->actual);
		/* we know MaxPacketsPerTransfer == 1 here */
		if (rndis_active(dev))
			status = rndis_rm_hdr (skb);
		if (status < 0
				|| ETH_HLEN > skb->len
				|| skb->len > ETH_FRAME_LEN) {
			dev->stats.rx_errors++;
			dev->stats.rx_length_errors++;
			DEBUG (dev, "rx length %d\n", skb->len);
			break;
		}

		skb->dev = dev->net;
		skb->protocol = eth_type_trans (skb, dev->net);
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += skb->len;

		/* no buffer copies needed, unless hardware can't
		 * use skb buffers.
		 */
		status = netif_rx (skb);
		skb = NULL;
		break;

	/* software-driven interface shutdown */
	case -ECONNRESET:		// unlink
	case -ESHUTDOWN:		// disconnect etc
		VDEBUG (dev, "rx shutdown, code %d\n", status);
		goto quiesce;

	/* for hardware automagic (such as pxa) */
	case -ECONNABORTED:		// endpoint reset
		DEBUG (dev, "rx %s reset\n", ep->name);
		defer_kevent (dev, WORK_RX_MEMORY);
quiesce:
		dev_kfree_skb_any (skb);
		goto clean;

	/* data overrun */
	case -EOVERFLOW:
		dev->stats.rx_over_errors++;
		// FALLTHROUGH
	    
	default:
		dev->stats.rx_errors++;
		DEBUG (dev, "rx status %d\n", status);
		break;
	}

	if (skb)
		dev_kfree_skb_any (skb);
	if (!netif_running (dev->net)) {
clean:
		/* nobody reading rx_reqs, so no dev->lock */
		list_add (&req->list, &dev->rx_reqs);
		req = NULL;
	}
	if (req)
		rx_submit (dev, req, GFP_ATOMIC);
}

static int prealloc (struct list_head *list, struct usb_ep *ep,
			unsigned n, unsigned gfp_flags)
{
	unsigned		i;
	struct usb_request	*req;

	if (!n)
		return -ENOMEM;

	/* queue/recycle up to N requests */
	i = n;
	list_for_each_entry (req, list, list) {
		if (i-- == 0)
			goto extra;
	}
	while (i--) {
		req = usb_ep_alloc_request (ep, gfp_flags);
		if (!req)
			return list_empty (list) ? -ENOMEM : 0;
		list_add (&req->list, list);
	}
	return 0;

extra:
	/* free extras */
	for (;;) {
		struct list_head	*next;

		next = req->list.next;
		list_del (&req->list);
		usb_ep_free_request (ep, req);

		if (next == list)
			break;

		req = container_of (next, struct usb_request, list);
	}
	return 0;
}

static int alloc_requests (struct eth_dev *dev, unsigned n, unsigned gfp_flags)
{
	int status;

	status = prealloc (&dev->tx_reqs, dev->in_ep, n, gfp_flags);
	if (status < 0)
		goto fail;
	status = prealloc (&dev->rx_reqs, dev->out_ep, n, gfp_flags);
	if (status < 0)
		goto fail;
	return 0;
fail:
	DEBUG (dev, "can't alloc requests\n");
	return status;
}

static void rx_fill (struct eth_dev *dev, unsigned gfp_flags)
{
	struct usb_request	*req;
	unsigned long		flags;

	/* fill unused rxq slots with some skb */
	spin_lock_irqsave (&dev->lock, flags);
	while (!list_empty (&dev->rx_reqs)) {
		req = container_of (dev->rx_reqs.next,
				struct usb_request, list);
		list_del_init (&req->list);
		spin_unlock_irqrestore (&dev->lock, flags);

		if (rx_submit (dev, req, gfp_flags) < 0) {
			defer_kevent (dev, WORK_RX_MEMORY);
			return;
		}

		spin_lock_irqsave (&dev->lock, flags);
	}
	spin_unlock_irqrestore (&dev->lock, flags);
}

static void eth_work (void *_dev)
{
	struct eth_dev		*dev = _dev;

	if (test_and_clear_bit (WORK_RX_MEMORY, &dev->todo)) {
		if (netif_running (dev->net))
			rx_fill (dev, GFP_KERNEL);
	}

	if (dev->todo)
		DEBUG (dev, "work done, flags = 0x%lx\n", dev->todo);
}

static void tx_complete (struct usb_ep *ep, struct usb_request *req)
{
	struct sk_buff	*skb = req->context;
	struct eth_dev	*dev = ep->driver_data;

	switch (req->status) {
	default:
		dev->stats.tx_errors++;
		VDEBUG (dev, "tx err %d\n", req->status);
		/* FALLTHROUGH */
	case -ECONNRESET:		// unlink
	case -ESHUTDOWN:		// disconnect etc
		break;
	case 0:
		dev->stats.tx_bytes += skb->len;
	}
	dev->stats.tx_packets++;

	spin_lock (&dev->lock);
	list_add (&req->list, &dev->tx_reqs);
	spin_unlock (&dev->lock);
	dev_kfree_skb_any (skb);

	atomic_dec (&dev->tx_qlen);
	if (netif_carrier_ok (dev->net))
		netif_wake_queue (dev->net);
}

static inline int eth_is_promisc (struct eth_dev *dev)
{
	/* no filters for the CDC subset; always promisc */
	if (subset_active (dev))
		return 1;
	return dev->cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
}

static int eth_start_xmit (struct sk_buff *skb, struct net_device *net)
{
	struct eth_dev		*dev = netdev_priv(net);
	int			length = skb->len;
	int			retval;
	struct usb_request	*req = NULL;
	unsigned long		flags;

	/* apply outgoing CDC or RNDIS filters */
	if (!eth_is_promisc (dev)) {
		u8		*dest = skb->data;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -