ether.c

来自「omap3 linux 2.6 用nocc去除了冗余代码」· C语言 代码 · 共 2,360 行 · 第 1/5 页

C
2,360
字号
}static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p){	struct eth_dev	*dev = netdev_priv(net);	strlcpy(p->driver, shortname, sizeof p->driver);	strlcpy(p->version, DRIVER_VERSION, sizeof p->version);	strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);	strlcpy (p->bus_info, dev->gadget->dev.bus_id, sizeof p->bus_info);}static u32 eth_get_link(struct net_device *net){	struct eth_dev	*dev = netdev_priv(net);	return dev->gadget->speed != USB_SPEED_UNKNOWN;}static struct ethtool_ops ops = {	.get_drvinfo = eth_get_drvinfo,	.get_link = eth_get_link};static void defer_kevent (struct eth_dev *dev, int flag){	if (test_and_set_bit (flag, &dev->todo))		return;	if (!schedule_work (&dev->work))		ERROR (dev, "kevent %d may have been dropped\n", flag);	else		DEBUG (dev, "kevent %d scheduled\n", flag);}static void rx_complete (struct usb_ep *ep, struct usb_request *req);static intrx_submit (struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags){	struct sk_buff		*skb;	int			retval = -ENOMEM;	size_t			size;	/* Padding up to RX_EXTRA handles minor disagreements with host.	 * Normally we use the USB "terminate on short read" convention;	 * so allow up to (N*maxpacket), since that memory is normally	 * already allocated.  Some hardware doesn't deal well with short	 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a	 * byte off the end (to force hardware errors on overflow).	 *	 * RNDIS uses internal framing, and explicitly allows senders to	 * pad to end-of-packet.  That's potentially nice for speed,	 * but means receivers can't recover synch on their own.	 */	size = (sizeof (struct ethhdr) + dev->net->mtu + RX_EXTRA);	size += dev->out_ep->maxpacket - 1;	if (rndis_active(dev))		size += sizeof (struct rndis_packet_msg_type);	size -= size % dev->out_ep->maxpacket;	if ((skb = alloc_skb (size + NET_IP_ALIGN, gfp_flags)) == 0) {		DEBUG (dev, "no rx skb\n");		goto enomem;	}	/* Some platforms perform better when IP packets are aligned,	 * but on at least one, checksumming fails otherwise.  Note:	 * RNDIS headers involve variable numbers of LE32 values.	 */	skb_reserve(skb, NET_IP_ALIGN);	req->buf = skb->data;	req->length = size;	req->complete = rx_complete;	req->context = skb;	retval = usb_ep_queue (dev->out_ep, req, gfp_flags);	if (retval == -ENOMEM)enomem:		defer_kevent (dev, WORK_RX_MEMORY);	if (retval) {		DEBUG (dev, "rx submit --> %d\n", retval);		if (skb)			dev_kfree_skb_any(skb);		spin_lock(&dev->req_lock);		list_add (&req->list, &dev->rx_reqs);		spin_unlock(&dev->req_lock);	}	return retval;}static void rx_complete (struct usb_ep *ep, struct usb_request *req){	struct sk_buff	*skb = req->context;	struct eth_dev	*dev = ep->driver_data;	int		status = req->status;	switch (status) {	/* normal completion */	case 0:		skb_put (skb, req->actual);		/* we know MaxPacketsPerTransfer == 1 here */		if (rndis_active(dev))			status = rndis_rm_hdr (skb);		if (status < 0				|| ETH_HLEN > skb->len				|| skb->len > ETH_FRAME_LEN) {			dev->stats.rx_errors++;			dev->stats.rx_length_errors++;			DEBUG (dev, "rx length %d\n", skb->len);			break;		}		skb->protocol = eth_type_trans (skb, dev->net);		dev->stats.rx_packets++;		dev->stats.rx_bytes += skb->len;		/* no buffer copies needed, unless hardware can't		 * use skb buffers.		 */		status = netif_rx (skb);		skb = NULL;		break;	/* software-driven interface shutdown */	case -ECONNRESET:		// unlink	case -ESHUTDOWN:		// disconnect etc		VDEBUG (dev, "rx shutdown, code %d\n", status);		goto quiesce;	/* for hardware automagic (such as pxa) */	case -ECONNABORTED:		// endpoint reset		DEBUG (dev, "rx %s reset\n", ep->name);		defer_kevent (dev, WORK_RX_MEMORY);quiesce:		dev_kfree_skb_any (skb);		goto clean;	/* data overrun */	case -EOVERFLOW:		dev->stats.rx_over_errors++;		// FALLTHROUGH	default:		dev->stats.rx_errors++;		DEBUG (dev, "rx status %d\n", status);		break;	}	if (skb)		dev_kfree_skb_any (skb);	if (!netif_running (dev->net)) {clean:		spin_lock(&dev->req_lock);		list_add (&req->list, &dev->rx_reqs);		spin_unlock(&dev->req_lock);		req = NULL;	}	if (req)		rx_submit (dev, req, GFP_ATOMIC);}static int prealloc (struct list_head *list, struct usb_ep *ep,			unsigned n, gfp_t gfp_flags){	unsigned		i;	struct usb_request	*req;	if (!n)		return -ENOMEM;	/* queue/recycle up to N requests */	i = n;	list_for_each_entry (req, list, list) {		if (i-- == 0)			goto extra;	}	while (i--) {		req = usb_ep_alloc_request (ep, gfp_flags);		if (!req)			return list_empty (list) ? -ENOMEM : 0;		list_add (&req->list, list);	}	return 0;extra:	/* free extras */	for (;;) {		struct list_head	*next;		next = req->list.next;		list_del (&req->list);		usb_ep_free_request (ep, req);		if (next == list)			break;		req = container_of (next, struct usb_request, list);	}	return 0;}static int alloc_requests (struct eth_dev *dev, unsigned n, gfp_t gfp_flags){	int status;	spin_lock(&dev->req_lock);	status = prealloc (&dev->tx_reqs, dev->in_ep, n, gfp_flags);	if (status < 0)		goto fail;	status = prealloc (&dev->rx_reqs, dev->out_ep, n, gfp_flags);	if (status < 0)		goto fail;	goto done;fail:	DEBUG (dev, "can't alloc requests\n");done:	spin_unlock(&dev->req_lock);	return status;}static void rx_fill (struct eth_dev *dev, gfp_t gfp_flags){	struct usb_request	*req;	unsigned long		flags;	/* fill unused rxq slots with some skb */	spin_lock_irqsave(&dev->req_lock, flags);	while (!list_empty (&dev->rx_reqs)) {		req = container_of (dev->rx_reqs.next,				struct usb_request, list);		list_del_init (&req->list);		spin_unlock_irqrestore(&dev->req_lock, flags);		if (rx_submit (dev, req, gfp_flags) < 0) {			defer_kevent (dev, WORK_RX_MEMORY);			return;		}		spin_lock_irqsave(&dev->req_lock, flags);	}	spin_unlock_irqrestore(&dev->req_lock, flags);}static void eth_work (struct work_struct *work){	struct eth_dev	*dev = container_of(work, struct eth_dev, work);	if (test_and_clear_bit (WORK_RX_MEMORY, &dev->todo)) {		if (netif_running (dev->net))			rx_fill (dev, GFP_KERNEL);	}	if (dev->todo)		DEBUG (dev, "work done, flags = 0x%lx\n", dev->todo);}static void tx_complete (struct usb_ep *ep, struct usb_request *req){	struct sk_buff	*skb = req->context;	struct eth_dev	*dev = ep->driver_data;	switch (req->status) {	default:		dev->stats.tx_errors++;		VDEBUG (dev, "tx err %d\n", req->status);		/* FALLTHROUGH */	case -ECONNRESET:		// unlink	case -ESHUTDOWN:		// disconnect etc		break;	case 0:		dev->stats.tx_bytes += skb->len;	}	dev->stats.tx_packets++;	spin_lock(&dev->req_lock);	list_add (&req->list, &dev->tx_reqs);	spin_unlock(&dev->req_lock);	dev_kfree_skb_any (skb);	atomic_dec (&dev->tx_qlen);	if (netif_carrier_ok (dev->net))		netif_wake_queue (dev->net);}static inline int eth_is_promisc (struct eth_dev *dev){	/* no filters for the CDC subset; always promisc */	if (subset_active (dev))		return 1;	return dev->cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;}static int eth_start_xmit (struct sk_buff *skb, struct net_device *net){	struct eth_dev		*dev = netdev_priv(net);	int			length = skb->len;	int			retval;	struct usb_request	*req = NULL;	unsigned long		flags;	/* apply outgoing CDC or RNDIS filters */	if (!eth_is_promisc (dev)) {		u8		*dest = skb->data;		if (is_multicast_ether_addr(dest)) {			u16	type;			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host			 * SET_ETHERNET_MULTICAST_FILTERS requests			 */			if (is_broadcast_ether_addr(dest))				type = USB_CDC_PACKET_TYPE_BROADCAST;			else				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;			if (!(dev->cdc_filter & type)) {				dev_kfree_skb_any (skb);				return 0;			}		}		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */	}	spin_lock_irqsave(&dev->req_lock, flags);	req = container_of (dev->tx_reqs.next, struct usb_request, list);	list_del (&req->list);	if (list_empty (&dev->tx_reqs))		netif_stop_queue (net);	spin_unlock_irqrestore(&dev->req_lock, flags);	/* no buffer copies needed, unless the network stack did it	 * or the hardware can't use skb buffers.	 * or there's not enough space for any RNDIS headers we need	 */	if (rndis_active(dev)) {		struct sk_buff	*skb_rndis;		skb_rndis = skb_realloc_headroom (skb,				sizeof (struct rndis_packet_msg_type));		if (!skb_rndis)			goto drop;		dev_kfree_skb_any (skb);		skb = skb_rndis;		rndis_add_hdr (skb);		length = skb->len;	}	req->buf = skb->data;	req->context = skb;	req->complete = tx_complete;	/* use zlp framing on tx for strict CDC-Ether conformance,	 * though any robust network rx path ignores extra padding.	 * and some hardware doesn't like to write zlps.	 */	req->zero = 1;	if (!dev->zlp && (length % dev->in_ep->maxpacket) == 0)		length++;	req->length = length;	/* throttle highspeed IRQ rate back slightly */	req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH)		? ((atomic_read (&dev->tx_qlen) % TX_DELAY) != 0)		: 0;	retval = usb_ep_queue (dev->in_ep, req, GFP_ATOMIC);	switch (retval) {	default:		DEBUG (dev, "tx queue err %d\n", retval);		break;	case 0:		net->trans_start = jiffies;		atomic_inc (&dev->tx_qlen);	}	if (retval) {drop:		dev->stats.tx_dropped++;		dev_kfree_skb_any (skb);		spin_lock_irqsave(&dev->req_lock, flags);		if (list_empty (&dev->tx_reqs))			netif_start_queue (net);		list_add (&req->list, &dev->tx_reqs);		spin_unlock_irqrestore(&dev->req_lock, flags);	}	return 0;}/*-------------------------------------------------------------------------*//* The interrupt endpoint is used in RNDIS to notify the host when messages * other than data packets are available ... notably the REMOTE_NDIS_*_CMPLT * messages, but also REMOTE_NDIS_INDICATE_STATUS_MSG and potentially even * REMOTE_NDIS_KEEPALIVE_MSG. * * The RNDIS control queue is processed by GET_ENCAPSULATED_RESPONSE, and * normally just one notification will be queued. */static struct usb_request *eth_req_alloc (struct usb_ep *, unsigned, gfp_t);static void eth_req_free (struct usb_ep *ep, struct usb_request *req);static voidrndis_control_ack_complete (struct usb_ep *ep, struct usb_request *req){	struct eth_dev          *dev = ep->driver_data;	if (req->status || req->actual != req->length)		DEBUG (dev,			"rndis control ack complete --> %d, %d/%d\n",			req->status, req->actual, req->length);	req->context = NULL;	if (req != dev->stat_req)		eth_req_free(ep, req);}static int rndis_control_ack (struct net_device *net){	struct eth_dev          *dev = netdev_priv(net);	int                     length;	struct usb_request      *resp = dev->stat_req;	/* in case RNDIS calls this after disconnect */	if (!dev->status) {		DEBUG (dev, "status ENODEV\n");		return -ENODEV;	}	/* in case queue length > 1 */	if (resp->context) {		resp = eth_req_alloc (dev->status_ep, 8, GFP_ATOMIC);		if (!resp)			return -ENOMEM;	}	/* Send RNDIS RESPONSE_AVAILABLE notification;	 * USB_CDC_NOTIFY_RESPONSE_AVAILABLE should work too	 */	resp->length = 8;	resp->complete = rndis_control_ack_complete;	resp->context = dev;	*((__le32 *) resp->buf) = __constant_cpu_to_le32 (1);	*((__le32 *) resp->buf + 1) = __constant_cpu_to_le32 (0);	length = usb_ep_queue (dev->status_ep, resp, GFP_ATOMIC);	if (length < 0) {		resp->status = 0;		rndis_control_ack_complete (dev->status_ep, resp);	}	return 0;}static void eth_start (struct eth_dev *dev, gfp_t gfp_flags){	DEBUG (dev, "%s\n", __FUNCTION__);	/* fill the rx queue */	rx_fill (dev, gfp_flags);	/* and open the tx floodgates */	atomic_set (&dev->tx_qlen, 0);	netif_wake_queue (dev->net);	if (rndis_active(dev)) {		rndis_set_param_medium (dev->rndis_config,					NDIS_MEDIUM_802_3,					BITRATE(dev->gadget)/100);		(void) rndis_signal_connect (dev->rndis_config);

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?