⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ibm_emac_core.c

📁 h内核
💻 C
📖 第 1 页 / 共 4 页
字号:
	while (fep->tx_cnt &&	       !(fep->tx_desc[fep->ack_slot].ctrl & MAL_TX_CTRL_READY)) {		if (fep->tx_desc[fep->ack_slot].ctrl & MAL_TX_CTRL_LAST) {			/* Tell the system the transmit completed. */			dma_unmap_single(&fep->ocpdev->dev,					 fep->tx_desc[fep->ack_slot].data_ptr,					 fep->tx_desc[fep->ack_slot].data_len,					 DMA_TO_DEVICE);			dev_kfree_skb_irq(fep->tx_skb[fep->ack_slot]);			if (fep->tx_desc[fep->ack_slot].ctrl &			    (EMAC_TX_ST_EC | EMAC_TX_ST_MC | EMAC_TX_ST_SC))				fep->stats.collisions++;		}		fep->tx_skb[fep->ack_slot] = (struct sk_buff *)NULL;		if (++fep->ack_slot == NUM_TX_BUFF)			fep->ack_slot = 0;		fep->tx_cnt--;	}	if (fep->tx_cnt < NUM_TX_BUFF)		netif_wake_queue(dev);	PKT_DEBUG(("emac_txeob_dev() exit, tx_cnt: %d\n", fep->tx_cnt));	spin_unlock_irqrestore(&fep->lock, flags);}/*  Fill/Re-fill the rx chain with valid ctrl/ptrs.  This function will fill from rx_slot up to the parm end.  So to completely fill the chain pre-set rx_slot to 0 and  pass in an end of 0. */static void emac_rx_fill(struct net_device *dev, int end){	int i;	struct ocp_enet_private *fep = dev->priv;	i = fep->rx_slot;	do {		/* We don't want the 16 bytes skb_reserve done by dev_alloc_skb,		 * it breaks our cache line alignement. However, we still allocate		 * +16 so that we end up allocating the exact same size as		 * dev_alloc_skb() would do.		 * Also, because of the skb_res, the max DMA size we give to EMAC		 * is slighly wrong, causing it to potentially DMA 2 more bytes		 * from a broken/oversized packet. These 16 bytes will take care		 * that we don't walk on somebody else toes with that.		 */		fep->rx_skb[i] =		    alloc_skb(fep->rx_buffer_size + 16, GFP_ATOMIC);		if (fep->rx_skb[i] == NULL) {			/* Keep rx_slot here, the next time clean/fill is called			 * we will try again before the MAL wraps back here			 * If the MAL tries to use this descriptor with			 * the EMPTY bit off it will cause the			 * rxde interrupt.  That is where we will			 * try again to allocate an sk_buff.			 */			break;		}		if (skb_res)			skb_reserve(fep->rx_skb[i], skb_res);		/* We must NOT dma_map_single the cache line right after the		 * buffer, so we must crop our sync size to account for the		 * reserved space		 */		fep->rx_desc[i].data_ptr =		    (unsigned char *)dma_map_single(&fep->ocpdev->dev,						    (void *)fep->rx_skb[i]->						    data,						    fep->rx_buffer_size -						    skb_res, DMA_FROM_DEVICE);		/*		 * Some 4xx implementations use the previously		 * reserved bits in data_len to encode the MS		 * 4-bits of a 36-bit physical address (ERPN)		 * This must be initialized.		 */		fep->rx_desc[i].data_len = 0;		fep->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY | MAL_RX_CTRL_INTR |		    (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);	} while ((i = (i + 1) % NUM_RX_BUFF) != end);	fep->rx_slot = i;}static voidemac_rx_csum(struct net_device *dev, unsigned short ctrl, struct sk_buff *skb){	struct ocp_enet_private *fep = dev->priv;	/* Exit if interface has no TAH engine */	if (!fep->tah_dev) {		skb->ip_summed = CHECKSUM_NONE;		return;	}	/* Check for TCP/UDP/IP csum error */	if (ctrl & EMAC_CSUM_VER_ERROR) {		/* Let the stack verify checksum errors */		skb->ip_summed = CHECKSUM_NONE;/*		adapter->hw_csum_err++; */	} else {		/* Csum is good */		skb->ip_summed = CHECKSUM_UNNECESSARY;/*		adapter->hw_csum_good++; */	}}static int emac_rx_clean(struct net_device *dev){	int i, b, bnum = 0, buf[6];	int error, frame_length;	struct ocp_enet_private *fep = dev->priv;	unsigned short ctrl;	i = fep->rx_slot;	PKT_DEBUG(("emac_rx_clean() entry, rx_slot: %d\n", fep->rx_slot));	do {		if (fep->rx_skb[i] == NULL)			continue;	/*we have already handled the packet but haved failed to alloc */		/* 		   since rx_desc is in uncached mem we don't keep reading it directly 		   we pull out a local copy of ctrl and do the checks on the copy.		 */		ctrl = fep->rx_desc[i].ctrl;		if (ctrl & MAL_RX_CTRL_EMPTY)			break;	/*we don't have any more ready packets */		if (EMAC_IS_BAD_RX_PACKET(ctrl)) {			fep->stats.rx_errors++;			fep->stats.rx_dropped++;			if (ctrl & EMAC_RX_ST_OE)				fep->stats.rx_fifo_errors++;			if (ctrl & EMAC_RX_ST_AE)				fep->stats.rx_frame_errors++;			if (ctrl & EMAC_RX_ST_BFCS)				fep->stats.rx_crc_errors++;			if (ctrl & (EMAC_RX_ST_RP | EMAC_RX_ST_PTL |				    EMAC_RX_ST_ORE | EMAC_RX_ST_IRE))				fep->stats.rx_length_errors++;		} else {			if ((ctrl & (MAL_RX_CTRL_FIRST | MAL_RX_CTRL_LAST)) ==			    (MAL_RX_CTRL_FIRST | MAL_RX_CTRL_LAST)) {				/* Single descriptor packet */				emac_rx_csum(dev, ctrl, fep->rx_skb[i]);				/* Send the skb up the chain. */				frame_length = fep->rx_desc[i].data_len - 4;				skb_put(fep->rx_skb[i], frame_length);				fep->rx_skb[i]->dev = dev;				fep->rx_skb[i]->protocol =				    eth_type_trans(fep->rx_skb[i], dev);				error = netif_rx(fep->rx_skb[i]);				if ((error == NET_RX_DROP) ||				    (error == NET_RX_BAD)) {					fep->stats.rx_dropped++;				} else {					fep->stats.rx_packets++;					fep->stats.rx_bytes += frame_length;				}				fep->rx_skb[i] = NULL;			} else {				/* Multiple descriptor packet */				if (ctrl & MAL_RX_CTRL_FIRST) {					if (fep->rx_desc[(i + 1) % NUM_RX_BUFF].					    ctrl & MAL_RX_CTRL_EMPTY)						break;					bnum = 0;					buf[bnum] = i;					++bnum;					continue;				}				if (((ctrl & MAL_RX_CTRL_FIRST) !=				     MAL_RX_CTRL_FIRST) &&				    ((ctrl & MAL_RX_CTRL_LAST) !=				     MAL_RX_CTRL_LAST)) {					if (fep->rx_desc[(i + 1) %							 NUM_RX_BUFF].ctrl &					    MAL_RX_CTRL_EMPTY) {						i = buf[0];						break;					}					buf[bnum] = i;					++bnum;					continue;				}				if (ctrl & MAL_RX_CTRL_LAST) {					buf[bnum] = i;					++bnum;					skb_put(fep->rx_skb[buf[0]],						fep->rx_desc[buf[0]].data_len);					for (b = 1; b < bnum; b++) {						/*						 * MAL is braindead, we need						 * to copy the remainder						 * of the packet from the						 * latter descriptor buffers						 * to the first skb. Then						 * dispose of the source						 * skbs.						 *						 * Once the stack is fixed						 * to handle frags on most						 * protocols we can generate						 * a fragmented skb with						 * no copies.						 */						memcpy(fep->rx_skb[buf[0]]->						       data +						       fep->rx_skb[buf[0]]->len,						       fep->rx_skb[buf[b]]->						       data,						       fep->rx_desc[buf[b]].						       data_len);						skb_put(fep->rx_skb[buf[0]],							fep->rx_desc[buf[b]].							data_len);						dma_unmap_single(&fep->ocpdev->								 dev,								 fep->								 rx_desc[buf									 [b]].								 data_ptr,								 fep->								 rx_desc[buf									 [b]].								 data_len,								 DMA_FROM_DEVICE);						dev_kfree_skb(fep->							      rx_skb[buf[b]]);					}					emac_rx_csum(dev, ctrl,						     fep->rx_skb[buf[0]]);					fep->rx_skb[buf[0]]->dev = dev;					fep->rx_skb[buf[0]]->protocol =					    eth_type_trans(fep->rx_skb[buf[0]],							   dev);					error = netif_rx(fep->rx_skb[buf[0]]);					if ((error == NET_RX_DROP)					    || (error == NET_RX_BAD)) {						fep->stats.rx_dropped++;					} else {						fep->stats.rx_packets++;						fep->stats.rx_bytes +=						    fep->rx_skb[buf[0]]->len;					}					for (b = 0; b < bnum; b++)						fep->rx_skb[buf[b]] = NULL;				}			}		}	} while ((i = (i + 1) % NUM_RX_BUFF) != fep->rx_slot);	PKT_DEBUG(("emac_rx_clean() exit, rx_slot: %d\n", fep->rx_slot));	return i;}static void emac_rxeob_dev(void *param, u32 chanmask){	struct net_device *dev = param;	struct ocp_enet_private *fep = dev->priv;	unsigned long flags;	int n;	spin_lock_irqsave(&fep->lock, flags);	if ((n = emac_rx_clean(dev)) != fep->rx_slot)		emac_rx_fill(dev, n);	spin_unlock_irqrestore(&fep->lock, flags);}/* * This interrupt should never occurr, we don't program * the MAL for contiunous mode. */static void emac_txde_dev(void *param, u32 chanmask){	struct net_device *dev = param;	struct ocp_enet_private *fep = dev->priv;	printk(KERN_WARNING "%s: transmit descriptor error\n", dev->name);	emac_mac_dump(dev);	emac_mal_dump(dev);	/* Reenable the transmit channel */	mal_enable_tx_channels(fep->mal, fep->commac.tx_chan_mask);}/* * This interrupt should be very rare at best.  This occurs when * the hardware has a problem with the receive descriptors.  The manual * states that it occurs when the hardware cannot the receive descriptor * empty bit is not set.  The recovery mechanism will be to * traverse through the descriptors, handle any that are marked to be * handled and reinitialize each along the way.  At that point the driver * will be restarted. */static void emac_rxde_dev(void *param, u32 chanmask){	struct net_device *dev = param;	struct ocp_enet_private *fep = dev->priv;	unsigned long flags;	if (net_ratelimit()) {		printk(KERN_WARNING "%s: receive descriptor error\n",		       fep->ndev->name);		emac_mac_dump(dev);		emac_mal_dump(dev);		emac_desc_dump(dev);	}	/* Disable RX channel */	spin_lock_irqsave(&fep->lock, flags);	mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask);	/* For now, charge the error against all emacs */	fep->stats.rx_errors++;	/* so do we have any good packets still? */	emac_rx_clean(dev);	/* When the interface is restarted it resets processing to the	 *  first descriptor in the table.	 */	fep->rx_slot = 0;	emac_rx_fill(dev, 0);	set_mal_dcrn(fep->mal, DCRN_MALRXEOBISR, fep->commac.rx_chan_mask);	set_mal_dcrn(fep->mal, DCRN_MALRXDEIR, fep->commac.rx_chan_mask);	/* Reenable the receive channels */	mal_enable_rx_channels(fep->mal, fep->commac.rx_chan_mask);	spin_unlock_irqrestore(&fep->lock, flags);}static irqreturn_temac_mac_irq(int irq, void *dev_instance, struct pt_regs *regs){	struct net_device *dev = dev_instance;	struct ocp_enet_private *fep = dev->priv;	emac_t *emacp = fep->emacp;	unsigned long tmp_em0isr;	/* EMAC interrupt */	tmp_em0isr = in_be32(&emacp->em0isr);	if (tmp_em0isr & (EMAC_ISR_TE0 | EMAC_ISR_TE1)) {		/* This error is a hard transmit error - could retransmit */		fep->stats.tx_errors++;		/* Reenable the transmit channel */		mal_enable_tx_channels(fep->mal, fep->commac.tx_chan_mask);	} else {		fep->stats.rx_errors++;	}	if (tmp_em0isr & EMAC_ISR_RP)		fep->stats.rx_length_errors++;	if (tmp_em0isr & EMAC_ISR_ALE)		fep->stats.rx_frame_errors++;	if (tmp_em0isr & EMAC_ISR_BFCS)		fep->stats.rx_crc_errors++;	if (tmp_em0isr & EMAC_ISR_PTLE)		fep->stats.rx_length_errors++;	if (tmp_em0isr & EMAC_ISR_ORE)		fep->stats.rx_length_errors++;	if (tmp_em0isr & EMAC_ISR_TE0)		fep->stats.tx_aborted_errors++;	emac_err_dump(dev, tmp_em0isr);	out_be32(&emacp->em0isr, tmp_em0isr);	return IRQ_HANDLED;}static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev){	unsigned short ctrl;	unsigned long flags;	struct ocp_enet_private *fep = dev->priv;	emac_t *emacp = fep->emacp;	int len = skb->len;	unsigned int offset = 0, size, f, tx_slot_first;	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;	spin_lock_irqsave(&fep->lock, flags);	len -= skb->data_len;	if ((fep->tx_cnt + nr_frags + len / DESC_BUF_SIZE + 1) > NUM_TX_BUFF) {		PKT_DEBUG(("emac_start_xmit() stopping queue\n"));		netif_stop_queue(dev);		spin_unlock_irqrestore(&fep->lock, flags);		return -EBUSY;	}	tx_slot_first = fep->tx_slot;	while (len) {		size = min(len, DESC_BUF_SIZE);		fep->tx_desc[fep->tx_slot].data_len = (short)size;		fep->tx_desc[fep->tx_slot].data_ptr =		    (unsigned char *)dma_map_single(&fep->ocpdev->dev,						    (void *)((unsigned int)skb->							     data + offset),						    size, DMA_TO_DEVICE);		ctrl = EMAC_TX_CTRL_DFLT;		if (fep->tx_slot != tx_slot_first)			ctrl |= MAL_TX_CTRL_READY;		if ((NUM_TX_BUFF - 1) == fep->tx_slot)			ctrl |= MAL_TX_CTRL_WRAP;		if (!nr_frags && (len == size)) {			ctrl |= MAL_TX_CTRL_LAST;			fep->tx_skb[fep->tx_slot] = skb;		}		if (skb->ip_summed == CHECKSUM_HW)			ctrl |= EMAC_TX_CTRL_TAH_CSUM;		fep->tx_desc[fep->tx_slot].ctrl = ctrl;		len -= size;		offset += size;		/* Bump tx count */		if (++fep->tx_cnt == NUM_TX_BUFF)			netif_stop_queue(dev);		/* Next descriptor */		if (++fep->tx_slot == NUM_TX_BUFF)			fep->tx_slot = 0;	}	for (f = 0; f < nr_frags; f++) {		struct skb_frag_struct *frag;		frag = &skb_shinfo(skb)->frags[f];		len = frag->size;		offset = 0;		while (len) {			size = min(len, DESC_BUF_SIZE);			dma_map_page(&fep->ocpdev->dev,				     frag->page,				     frag->page_offset + offset,				     size, DMA_TO_DEVICE);			ctrl = EMAC_TX_CTRL_DFLT | MAL_TX_CTRL_READY;			if ((NUM_TX_BUFF - 1) == fep->tx_slot)				ctrl |= MAL_TX_CTRL_WRAP;			if ((f == (nr_frags - 1)) && (len == size)) {				ctrl |= MAL_TX_CTRL_LAST;				fep->tx_skb[fep->tx_slot] = skb;			}			if (skb->ip_summed == CHECKSUM_HW)				ctrl |= EMAC_TX_CTRL_TAH_CSUM;			fep->tx_desc[fep->tx_slot].data_len = (short)size;			fep->tx_desc[fep->tx_slot].data_ptr =			    (char *)((page_to_pfn(frag->page) << PAGE_SHIFT) +				     frag->page_offset + offset);			fep->tx_desc[fep->tx_slot].ctrl = ctrl;			len -= size;			offset += size;			/* Bump tx count */			if (++fep->tx_cnt == NUM_TX_BUFF)				netif_stop_queue(dev);			/* Next descriptor */			if (++fep->tx_slot == NUM_TX_BUFF)				fep->tx_slot = 0;		}	}	/*	 * Deferred set READY on first descriptor of packet to	 * avoid TX MAL race.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -