⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ibm_emac_core.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 4 页
字号:
	if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))		return emac_start_xmit(skb, ndev);	len -= skb->data_len;	/* Note, this is only an *estimation*, we can still run out of empty	 * slots because of the additional fragmentation into	 * MAL_MAX_TX_SIZE-sized chunks	 */	if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))		goto stop_queue;	ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |	    emac_tx_csum(dev, skb);	slot = dev->tx_slot;	/* skb data */	dev->tx_skb[slot] = NULL;	chunk = min(len, MAL_MAX_TX_SIZE);	dev->tx_desc[slot].data_ptr = pd =	    dma_map_single(dev->ldev, skb->data, len, DMA_TO_DEVICE);	dev->tx_desc[slot].data_len = (u16) chunk;	len -= chunk;	if (unlikely(len))		slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,				       ctrl);	/* skb fragments */	for (i = 0; i < nr_frags; ++i) {		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];		len = frag->size;		if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))			goto undo_frame;		pd = dma_map_page(dev->ldev, frag->page, frag->page_offset, len,				  DMA_TO_DEVICE);		slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,				       ctrl);	}	DBG2("%d: xmit_sg(%u) %d - %d" NL, dev->def->index, skb->len,	     dev->tx_slot, slot);	/* Attach skb to the last slot so we don't release it too early */	dev->tx_skb[slot] = skb;	/* Send the packet out */	if (dev->tx_slot == NUM_TX_BUFF - 1)		ctrl |= MAL_TX_CTRL_WRAP;	barrier();	dev->tx_desc[dev->tx_slot].ctrl = ctrl;	dev->tx_slot = (slot + 1) % NUM_TX_BUFF;	return emac_xmit_finish(dev, skb->len);      undo_frame:	/* Well, too bad. Our previous estimation was overly optimistic. 	 * Undo everything.	 */	while (slot != dev->tx_slot) {		dev->tx_desc[slot].ctrl = 0;		--dev->tx_cnt;		if (--slot < 0)			slot = NUM_TX_BUFF - 1;	}	++dev->estats.tx_undo;      stop_queue:	netif_stop_queue(ndev);	DBG2("%d: stopped TX queue" NL, dev->def->index);	return 1;}#else# define emac_start_xmit_sg	emac_start_xmit#endif	/* !defined(CONFIG_IBM_EMAC_TAH) *//* BHs disabled */static void emac_parse_tx_error(struct ocp_enet_private *dev, u16 ctrl){	struct ibm_emac_error_stats *st = &dev->estats;	DBG("%d: BD TX error %04x" NL, dev->def->index, ctrl);	++st->tx_bd_errors;	if (ctrl & EMAC_TX_ST_BFCS)		++st->tx_bd_bad_fcs;	if (ctrl & EMAC_TX_ST_LCS)		++st->tx_bd_carrier_loss;	if (ctrl & EMAC_TX_ST_ED)		++st->tx_bd_excessive_deferral;	if (ctrl & EMAC_TX_ST_EC)		++st->tx_bd_excessive_collisions;	if (ctrl & EMAC_TX_ST_LC)		++st->tx_bd_late_collision;	if (ctrl & EMAC_TX_ST_MC)		++st->tx_bd_multple_collisions;	if (ctrl & EMAC_TX_ST_SC)		++st->tx_bd_single_collision;	if (ctrl & EMAC_TX_ST_UR)		++st->tx_bd_underrun;	if (ctrl & EMAC_TX_ST_SQE)		++st->tx_bd_sqe;}static void emac_poll_tx(void *param){	struct ocp_enet_private *dev = param;	DBG2("%d: poll_tx, %d %d" NL, dev->def->index, dev->tx_cnt,	     dev->ack_slot);	if (dev->tx_cnt) {		u16 ctrl;		int slot = dev->ack_slot, n = 0;	      again:		ctrl = dev->tx_desc[slot].ctrl;		if (!(ctrl & MAL_TX_CTRL_READY)) {			struct sk_buff *skb = dev->tx_skb[slot];			++n;			if (skb) {				dev_kfree_skb(skb);				dev->tx_skb[slot] = NULL;			}			slot = (slot + 1) % NUM_TX_BUFF;			if (unlikely(EMAC_IS_BAD_TX(ctrl)))				emac_parse_tx_error(dev, ctrl);			if (--dev->tx_cnt)				goto again;		}		if (n) {			dev->ack_slot = slot;			if (netif_queue_stopped(dev->ndev) &&			    dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)				netif_wake_queue(dev->ndev);			DBG2("%d: tx %d pkts" NL, dev->def->index, n);		}	}}static inline void emac_recycle_rx_skb(struct ocp_enet_private *dev, int slot,				       int len){	struct sk_buff *skb = dev->rx_skb[slot];	DBG2("%d: recycle %d %d" NL, dev->def->index, slot, len);	if (len) 		dma_map_single(dev->ldev, skb->data - 2, 			       EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);	dev->rx_desc[slot].data_len = 0;	barrier();	dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |	    (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);}static void emac_parse_rx_error(struct ocp_enet_private *dev, u16 ctrl){	struct ibm_emac_error_stats *st = &dev->estats;	DBG("%d: BD RX error %04x" NL, dev->def->index, ctrl);	++st->rx_bd_errors;	if (ctrl & EMAC_RX_ST_OE)		++st->rx_bd_overrun;	if (ctrl & EMAC_RX_ST_BP)		++st->rx_bd_bad_packet;	if (ctrl & EMAC_RX_ST_RP)		++st->rx_bd_runt_packet;	if (ctrl & EMAC_RX_ST_SE)		++st->rx_bd_short_event;	if (ctrl & EMAC_RX_ST_AE)		++st->rx_bd_alignment_error;	if (ctrl & EMAC_RX_ST_BFCS)		++st->rx_bd_bad_fcs;	if (ctrl & EMAC_RX_ST_PTL)		++st->rx_bd_packet_too_long;	if (ctrl & EMAC_RX_ST_ORE)		++st->rx_bd_out_of_range;	if (ctrl & EMAC_RX_ST_IRE)		++st->rx_bd_in_range;}static inline void emac_rx_csum(struct ocp_enet_private *dev,				struct sk_buff *skb, u16 ctrl){#if defined(CONFIG_IBM_EMAC_TAH)	if (!ctrl && dev->tah_dev) {		skb->ip_summed = CHECKSUM_UNNECESSARY;		++dev->stats.rx_packets_csum;	}#endif}static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot){	if (likely(dev->rx_sg_skb != NULL)) {		int len = dev->rx_desc[slot].data_len;		int tot_len = dev->rx_sg_skb->len + len;		if (unlikely(tot_len + 2 > dev->rx_skb_size)) {			++dev->estats.rx_dropped_mtu;			dev_kfree_skb(dev->rx_sg_skb);			dev->rx_sg_skb = NULL;		} else {			cacheable_memcpy(dev->rx_sg_skb->tail,					 dev->rx_skb[slot]->data, len);			skb_put(dev->rx_sg_skb, len);			emac_recycle_rx_skb(dev, slot, len);			return 0;		}	}	emac_recycle_rx_skb(dev, slot, 0);	return -1;}/* BHs disabled */static int emac_poll_rx(void *param, int budget){	struct ocp_enet_private *dev = param;	int slot = dev->rx_slot, received = 0;	DBG2("%d: poll_rx(%d)" NL, dev->def->index, budget);      again:	while (budget > 0) {		int len;		struct sk_buff *skb;		u16 ctrl = dev->rx_desc[slot].ctrl;		if (ctrl & MAL_RX_CTRL_EMPTY)			break;		skb = dev->rx_skb[slot];		barrier();		len = dev->rx_desc[slot].data_len;		if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))			goto sg;		ctrl &= EMAC_BAD_RX_MASK;		if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {			emac_parse_rx_error(dev, ctrl);			++dev->estats.rx_dropped_error;			emac_recycle_rx_skb(dev, slot, 0);			len = 0;			goto next;		}		if (len && len < EMAC_RX_COPY_THRESH) {			struct sk_buff *copy_skb =			    alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);			if (unlikely(!copy_skb))				goto oom;			skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);			cacheable_memcpy(copy_skb->data - 2, skb->data - 2,					 len + 2);			emac_recycle_rx_skb(dev, slot, len);			skb = copy_skb;		} else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))			goto oom;		skb_put(skb, len);	      push_packet:		skb->dev = dev->ndev;		skb->protocol = eth_type_trans(skb, dev->ndev);		emac_rx_csum(dev, skb, ctrl);		if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))			++dev->estats.rx_dropped_stack;	      next:		++dev->stats.rx_packets;	      skip:		dev->stats.rx_bytes += len;		slot = (slot + 1) % NUM_RX_BUFF;		--budget;		++received;		continue;	      sg:		if (ctrl & MAL_RX_CTRL_FIRST) {			BUG_ON(dev->rx_sg_skb);			if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {				DBG("%d: rx OOM %d" NL, dev->def->index, slot);				++dev->estats.rx_dropped_oom;				emac_recycle_rx_skb(dev, slot, 0);			} else {				dev->rx_sg_skb = skb;				skb_put(skb, len);			}		} else if (!emac_rx_sg_append(dev, slot) &&			   (ctrl & MAL_RX_CTRL_LAST)) {			skb = dev->rx_sg_skb;			dev->rx_sg_skb = NULL;			ctrl &= EMAC_BAD_RX_MASK;			if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {				emac_parse_rx_error(dev, ctrl);				++dev->estats.rx_dropped_error;				dev_kfree_skb(skb);				len = 0;			} else				goto push_packet;		}		goto skip;	      oom:		DBG("%d: rx OOM %d" NL, dev->def->index, slot);		/* Drop the packet and recycle skb */		++dev->estats.rx_dropped_oom;		emac_recycle_rx_skb(dev, slot, 0);		goto next;	}	if (received) {		DBG2("%d: rx %d BDs" NL, dev->def->index, received);		dev->rx_slot = slot;	}	if (unlikely(budget && dev->commac.rx_stopped)) {		struct ocp_func_emac_data *emacdata = dev->def->additions;		barrier();		if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {			DBG2("%d: rx restart" NL, dev->def->index);			received = 0;			goto again;		}		if (dev->rx_sg_skb) {			DBG2("%d: dropping partial rx packet" NL,			     dev->def->index);			++dev->estats.rx_dropped_error;			dev_kfree_skb(dev->rx_sg_skb);			dev->rx_sg_skb = NULL;		}		dev->commac.rx_stopped = 0;		mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);		emac_rx_enable(dev);		dev->rx_slot = 0;	}	return received;}/* BHs disabled */static int emac_peek_rx(void *param){	struct ocp_enet_private *dev = param;	return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);}/* BHs disabled */static int emac_peek_rx_sg(void *param){	struct ocp_enet_private *dev = param;	int slot = dev->rx_slot;	while (1) {		u16 ctrl = dev->rx_desc[slot].ctrl;		if (ctrl & MAL_RX_CTRL_EMPTY)			return 0;		else if (ctrl & MAL_RX_CTRL_LAST)			return 1;		slot = (slot + 1) % NUM_RX_BUFF;		/* I'm just being paranoid here :) */		if (unlikely(slot == dev->rx_slot))			return 0;	}}/* Hard IRQ */static void emac_rxde(void *param){	struct ocp_enet_private *dev = param;	++dev->estats.rx_stopped;	emac_rx_disable_async(dev);}/* Hard IRQ */static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs){	struct ocp_enet_private *dev = dev_instance;	struct emac_regs *p = dev->emacp;	struct ibm_emac_error_stats *st = &dev->estats;	u32 isr = in_be32(&p->isr);	out_be32(&p->isr, isr);	DBG("%d: isr = %08x" NL, dev->def->index, isr);	if (isr & EMAC_ISR_TXPE)		++st->tx_parity;	if (isr & EMAC_ISR_RXPE)		++st->rx_parity;	if (isr & EMAC_ISR_TXUE)		++st->tx_underrun;	if (isr & EMAC_ISR_RXOE)		++st->rx_fifo_overrun;	if (isr & EMAC_ISR_OVR)		++st->rx_overrun;	if (isr & EMAC_ISR_BP)		++st->rx_bad_packet;	if (isr & EMAC_ISR_RP)		++st->rx_runt_packet;	if (isr & EMAC_ISR_SE)		++st->rx_short_event;	if (isr & EMAC_ISR_ALE)		++st->rx_alignment_error;	if (isr & EMAC_ISR_BFCS)		++st->rx_bad_fcs;	if (isr & EMAC_ISR_PTLE)		++st->rx_packet_too_long;	if (isr & EMAC_ISR_ORE)		++st->rx_out_of_range;	if (isr & EMAC_ISR_IRE)		++st->rx_in_range;	if (isr & EMAC_ISR_SQE)		++st->tx_sqe;	if (isr & EMAC_ISR_TE)		++st->tx_errors;	return IRQ_HANDLED;}static struct net_device_stats *emac_stats(struct net_device *ndev){	struct ocp_enet_private *dev = ndev->priv;	struct ibm_emac_stats *st = &dev->stats;	struct ibm_emac_error_stats *est = &dev->estats;	struct net_device_stats *nst = &dev->nstats;	DBG2("%d: stats" NL, dev->def->index);	/* Compute "legacy" statistics */	local_irq_disable();	nst->rx_packets = (unsigned long)st->rx_packets;	nst->rx_bytes = (unsigned long)st->rx_bytes;	nst->tx_packets = (unsigned long)st->tx_packets;	nst->tx_bytes = (unsigned long)st->tx_bytes;	nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +					  est->rx_dropped_error +					  est->rx_dropped_resize +					  est->rx_dropped_mtu);	nst->tx_dropped = (unsigned long)est->tx_dropped;	nst->rx_errors = (unsigned long)est->rx_bd_errors;	nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +					      est->rx_fifo_overrun +					      est->rx_overrun);	nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +					       est->rx_alignment_error);	nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +					     est->rx_bad_fcs);	nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +						est->rx_bd_short_event +						est->rx_bd_packet_too_long +						est->rx_bd_out_of_range +						est->rx_bd_in_range +						est->rx_runt_packet +						est->rx_short_event +						est->rx_packet_too_long +						est->rx_out_of_range +						est->rx_in_range);	nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);	nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +					      est->tx_underrun);	nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;	nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +					  est->tx_bd_excessive_collisions +					  est->tx_bd_late_collision +					  est->tx_bd_multple_collisions);	local_irq_enable();	return nst;}static void emac_remove(struct ocp_device *ocpdev){	struct ocp_enet_private *dev = ocp_get_drvdata(ocpdev);	DBG("%d: remove" NL, dev->def->index);	ocp_set_drvdata(ocpdev, 0);	unregister_netdev(dev->ndev);	tah_fini(dev->tah_dev);	rgmii_fini(dev->rgmii_dev, dev->rgmii_input);	zmii_fini(dev->zmii_dev, dev->zmii_input);	emac_dbg_register(dev->def->index, 0);	mal_unregister_commac(dev->mal, &dev->commac);	iounmap((void *)dev->emacp);	kfree(dev->ndev);}static struct mal_commac_ops emac_commac_ops = {	.poll_tx = &emac_poll_tx,	.poll_rx = &emac_poll_rx,	.peek_rx = &emac_peek_rx,	.rxde = &emac_rxde,};static struct mal_commac_ops emac_commac_sg_ops = {	.poll_tx = &emac_poll_tx,	.poll_rx = &emac_poll_rx,	.peek_rx = &emac_peek_rx_sg,	.rxde = &emac_rxde,};/* Ethtool support */static int emac_ethtool_get_settings(struct net_device *ndev,				     struct ethtool_cmd *cmd){	struct ocp_enet_private *dev = ndev->priv;	cmd->supported = dev->phy.features;	cmd->port = PORT_MII;	cmd->phy_address = dev->phy.address;	cmd->transceiver =	    dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;	local_bh_disable();	cmd->advertising = dev->phy.advertising;	cmd->autoneg = dev->phy.autoneg;	cmd->speed = dev->phy.speed;	cmd->duplex = dev->phy.duplex;	local_bh_enable();	return 0;}static int emac_ethtool_set_settings(struct net_device *ndev,				     struct ethtool_cmd *cmd){	struct ocp_enet_private *dev = ndev->priv;	u32 f = dev->phy.features;	DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL, dev->def->index,	    cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);	/* Basic sanity checks */	if (dev->phy.address < 0)		return -EOPNOTSUPP;	if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)		return -EINVAL;	if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)		return -EINVAL;	if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)		return -EINVAL;	if (cmd->autoneg == AUTONEG_DISABLE) {		switch (cmd->speed) {		case SPEED_10:			if (cmd->duplex == DUPLEX_HALF			    && !(f & SUPPORTED_10baseT_Half))				return -EINVAL;			if (cmd->duplex == DUPLEX_FULL			    && !(f & SUPPORTED_10baseT_Full))				return -EINVAL;			break;		case SPEED_100:			if (cmd->duplex == DUPLEX_HALF			    && !(f & SUPPORTED_100baseT_Half))				return -EINVAL;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -