⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ibm_emac_core.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 4 页
字号:
	while (!emac_phy_done(in_be32(&p->stacr))) {		udelay(1);		if (!--n)			goto to;	}	/* Issue read command */	out_be32(&p->stacr,		 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ |		 (reg & EMAC_STACR_PRA_MASK)		 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT)		 | EMAC_STACR_START);	/* Wait for read to complete */	n = 100;	while (!emac_phy_done(r = in_be32(&p->stacr))) {		udelay(1);		if (!--n)			goto to;	}	if (unlikely(r & EMAC_STACR_PHYE)) {		DBG("%d: mdio_read(%02x, %02x) failed" NL, dev->def->index,		    id, reg);		return -EREMOTEIO;	}	r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);	DBG2("%d: mdio_read -> %04x" NL, dev->def->index, r);	return r;      to:	DBG("%d: MII management interface timeout (read)" NL, dev->def->index);	return -ETIMEDOUT;}static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,			      u16 val){	struct emac_regs *p = dev->emacp;	int n;	DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg,	     val);	/* Enable proper MDIO port */	zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);	/* Wait for management interface to be idle */	n = 10;	while (!emac_phy_done(in_be32(&p->stacr))) {		udelay(1);		if (!--n)			goto to;	}	/* Issue write command */	out_be32(&p->stacr,		 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE |		 (reg & EMAC_STACR_PRA_MASK) |		 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |		 (val << EMAC_STACR_PHYD_SHIFT) | EMAC_STACR_START);	/* Wait for write to complete */	n = 100;	while (!emac_phy_done(in_be32(&p->stacr))) {		udelay(1);		if (!--n)			goto to;	}	return;      to:	DBG("%d: MII management interface timeout (write)" NL, dev->def->index);}static int emac_mdio_read(struct net_device *ndev, int id, int reg){	struct ocp_enet_private *dev = ndev->priv;	int res;	local_bh_disable();	res = __emac_mdio_read(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,			       (u8) reg);	local_bh_enable();	return res;}static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val){	struct ocp_enet_private *dev = ndev->priv;	local_bh_disable();	__emac_mdio_write(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,			  (u8) reg, (u16) val);	local_bh_enable();}/* BHs disabled */static void emac_set_multicast_list(struct net_device *ndev){	struct ocp_enet_private *dev = ndev->priv;	struct emac_regs *p = dev->emacp;	u32 rmr = emac_iff2rmr(ndev);	DBG("%d: multicast %08x" NL, dev->def->index, rmr);	BUG_ON(!netif_running(dev->ndev));	/* I decided to relax register access rules here to avoid	 * full EMAC reset.	 *	 * There is a real problem with EMAC4 core if we use MWSW_001 bit 	 * in MR1 register and do a full EMAC reset.	 * One TX BD status update is delayed and, after EMAC reset, it 	 * never happens, resulting in TX hung (it'll be recovered by TX 	 * timeout handler eventually, but this is just gross).	 * So we either have to do full TX reset or try to cheat here :)	 *	 * The only required change is to RX mode register, so I *think* all	 * we need is just to stop RX channel. This seems to work on all	 * tested SoCs.                                                --ebs	 */	emac_rx_disable(dev);	if (rmr & EMAC_RMR_MAE)		emac_hash_mc(dev);	out_be32(&p->rmr, rmr);	emac_rx_enable(dev);}/* BHs disabled */static int emac_resize_rx_ring(struct ocp_enet_private *dev, int new_mtu){	struct ocp_func_emac_data *emacdata = dev->def->additions;	int rx_sync_size = emac_rx_sync_size(new_mtu);	int rx_skb_size = emac_rx_skb_size(new_mtu);	int i, ret = 0;	emac_rx_disable(dev);	mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);	if (dev->rx_sg_skb) {		++dev->estats.rx_dropped_resize;		dev_kfree_skb(dev->rx_sg_skb);		dev->rx_sg_skb = NULL;	}	/* Make a first pass over RX ring and mark BDs ready, dropping 	 * non-processed packets on the way. We need this as a separate pass	 * to simplify error recovery in the case of allocation failure later.	 */	for (i = 0; i < NUM_RX_BUFF; ++i) {		if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)			++dev->estats.rx_dropped_resize;		dev->rx_desc[i].data_len = 0;		dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |		    (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);	}	/* Reallocate RX ring only if bigger skb buffers are required */	if (rx_skb_size <= dev->rx_skb_size)		goto skip;	/* Second pass, allocate new skbs */	for (i = 0; i < NUM_RX_BUFF; ++i) {		struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);		if (!skb) {			ret = -ENOMEM;			goto oom;		}		BUG_ON(!dev->rx_skb[i]);		dev_kfree_skb(dev->rx_skb[i]);		skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);		dev->rx_desc[i].data_ptr =		    dma_map_single(dev->ldev, skb->data - 2, rx_sync_size,				   DMA_FROM_DEVICE) + 2;		dev->rx_skb[i] = skb;	}      skip:	/* Check if we need to change "Jumbo" bit in MR1 */	if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {		/* This is to prevent starting RX channel in emac_rx_enable() */		dev->commac.rx_stopped = 1;		dev->ndev->mtu = new_mtu;		emac_full_tx_reset(dev->ndev);	}	mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(new_mtu));      oom:	/* Restart RX */	dev->commac.rx_stopped = dev->rx_slot = 0;	mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);	emac_rx_enable(dev);	return ret;}/* Process ctx, rtnl_lock semaphore */static int emac_change_mtu(struct net_device *ndev, int new_mtu){	struct ocp_enet_private *dev = ndev->priv;	int ret = 0;	if (new_mtu < EMAC_MIN_MTU || new_mtu > EMAC_MAX_MTU)		return -EINVAL;	DBG("%d: change_mtu(%d)" NL, dev->def->index, new_mtu);	local_bh_disable();	if (netif_running(ndev)) {		/* Check if we really need to reinitalize RX ring */		if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))			ret = emac_resize_rx_ring(dev, new_mtu);	}	if (!ret) {		ndev->mtu = new_mtu;		dev->rx_skb_size = emac_rx_skb_size(new_mtu);		dev->rx_sync_size = emac_rx_sync_size(new_mtu);	}		local_bh_enable();	return ret;}static void emac_clean_tx_ring(struct ocp_enet_private *dev){	int i;	for (i = 0; i < NUM_TX_BUFF; ++i) {		if (dev->tx_skb[i]) {			dev_kfree_skb(dev->tx_skb[i]);			dev->tx_skb[i] = NULL;			if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)				++dev->estats.tx_dropped;		}		dev->tx_desc[i].ctrl = 0;		dev->tx_desc[i].data_ptr = 0;	}}static void emac_clean_rx_ring(struct ocp_enet_private *dev){	int i;	for (i = 0; i < NUM_RX_BUFF; ++i)		if (dev->rx_skb[i]) {			dev->rx_desc[i].ctrl = 0;			dev_kfree_skb(dev->rx_skb[i]);			dev->rx_skb[i] = NULL;			dev->rx_desc[i].data_ptr = 0;		}	if (dev->rx_sg_skb) {		dev_kfree_skb(dev->rx_sg_skb);		dev->rx_sg_skb = NULL;	}}static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot,				    int flags){	struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);	if (unlikely(!skb))		return -ENOMEM;	dev->rx_skb[slot] = skb;	dev->rx_desc[slot].data_len = 0;	skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);	dev->rx_desc[slot].data_ptr = 	    dma_map_single(dev->ldev, skb->data - 2, dev->rx_sync_size, 			   DMA_FROM_DEVICE) + 2;	barrier();	dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |	    (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);	return 0;}static void emac_print_link_status(struct ocp_enet_private *dev){	if (netif_carrier_ok(dev->ndev))		printk(KERN_INFO "%s: link is up, %d %s%s\n",		       dev->ndev->name, dev->phy.speed,		       dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",		       dev->phy.pause ? ", pause enabled" :		       dev->phy.asym_pause ? ", assymetric pause enabled" : "");	else		printk(KERN_INFO "%s: link is down\n", dev->ndev->name);}/* Process ctx, rtnl_lock semaphore */static int emac_open(struct net_device *ndev){	struct ocp_enet_private *dev = ndev->priv;	struct ocp_func_emac_data *emacdata = dev->def->additions;	int err, i;	DBG("%d: open" NL, dev->def->index);	/* Setup error IRQ handler */	err = request_irq(dev->def->irq, emac_irq, 0, "EMAC", dev);	if (err) {		printk(KERN_ERR "%s: failed to request IRQ %d\n",		       ndev->name, dev->def->irq);		return err;	}	/* Allocate RX ring */	for (i = 0; i < NUM_RX_BUFF; ++i)		if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {			printk(KERN_ERR "%s: failed to allocate RX ring\n",			       ndev->name);			goto oom;		}	local_bh_disable();	dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot =	    dev->commac.rx_stopped = 0;	dev->rx_sg_skb = NULL;	if (dev->phy.address >= 0) {		int link_poll_interval;		if (dev->phy.def->ops->poll_link(&dev->phy)) {			dev->phy.def->ops->read_link(&dev->phy);			EMAC_RX_CLK_DEFAULT(dev->def->index);			netif_carrier_on(dev->ndev);			link_poll_interval = PHY_POLL_LINK_ON;		} else {			EMAC_RX_CLK_TX(dev->def->index);			netif_carrier_off(dev->ndev);			link_poll_interval = PHY_POLL_LINK_OFF;		}		mod_timer(&dev->link_timer, jiffies + link_poll_interval);		emac_print_link_status(dev);	} else		netif_carrier_on(dev->ndev);	emac_configure(dev);	mal_poll_add(dev->mal, &dev->commac);	mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);	mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(ndev->mtu));	mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);	emac_tx_enable(dev);	emac_rx_enable(dev);	netif_start_queue(ndev);	local_bh_enable();	return 0;      oom:	emac_clean_rx_ring(dev);	free_irq(dev->def->irq, dev);	return -ENOMEM;}/* BHs disabled */static int emac_link_differs(struct ocp_enet_private *dev){	u32 r = in_be32(&dev->emacp->mr1);	int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;	int speed, pause, asym_pause;	if (r & (EMAC_MR1_MF_1000 | EMAC_MR1_MF_1000GPCS))		speed = SPEED_1000;	else if (r & EMAC_MR1_MF_100)		speed = SPEED_100;	else		speed = SPEED_10;	switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {	case (EMAC_MR1_EIFC | EMAC_MR1_APP):		pause = 1;		asym_pause = 0;		break;	case EMAC_MR1_APP:		pause = 0;		asym_pause = 1;		break;	default:		pause = asym_pause = 0;	}	return speed != dev->phy.speed || duplex != dev->phy.duplex ||	    pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;}/* BHs disabled */static void emac_link_timer(unsigned long data){	struct ocp_enet_private *dev = (struct ocp_enet_private *)data;	int link_poll_interval;	DBG2("%d: link timer" NL, dev->def->index);	if (dev->phy.def->ops->poll_link(&dev->phy)) {		if (!netif_carrier_ok(dev->ndev)) {			EMAC_RX_CLK_DEFAULT(dev->def->index);			/* Get new link parameters */			dev->phy.def->ops->read_link(&dev->phy);			if (dev->tah_dev || emac_link_differs(dev))				emac_full_tx_reset(dev->ndev);			netif_carrier_on(dev->ndev);			emac_print_link_status(dev);		}		link_poll_interval = PHY_POLL_LINK_ON;	} else {		if (netif_carrier_ok(dev->ndev)) {			EMAC_RX_CLK_TX(dev->def->index);#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)			emac_reinitialize(dev);#endif			netif_carrier_off(dev->ndev);			emac_print_link_status(dev);		}		/* Retry reset if the previous attempt failed.		 * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX		 * case, but I left it here because it shouldn't trigger for		 * sane PHYs anyway.		 */		if (unlikely(dev->reset_failed))			emac_reinitialize(dev);		link_poll_interval = PHY_POLL_LINK_OFF;	}	mod_timer(&dev->link_timer, jiffies + link_poll_interval);}/* BHs disabled */static void emac_force_link_update(struct ocp_enet_private *dev){	netif_carrier_off(dev->ndev);	if (timer_pending(&dev->link_timer))		mod_timer(&dev->link_timer, jiffies + PHY_POLL_LINK_OFF);}/* Process ctx, rtnl_lock semaphore */static int emac_close(struct net_device *ndev){	struct ocp_enet_private *dev = ndev->priv;	struct ocp_func_emac_data *emacdata = dev->def->additions;	DBG("%d: close" NL, dev->def->index);	local_bh_disable();	if (dev->phy.address >= 0)		del_timer_sync(&dev->link_timer);	netif_stop_queue(ndev);	emac_rx_disable(dev);	emac_tx_disable(dev);	mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);	mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);	mal_poll_del(dev->mal, &dev->commac);	local_bh_enable();	emac_clean_tx_ring(dev);	emac_clean_rx_ring(dev);	free_irq(dev->def->irq, dev);	return 0;}static inline u16 emac_tx_csum(struct ocp_enet_private *dev,			       struct sk_buff *skb){#if defined(CONFIG_IBM_EMAC_TAH)	if (skb->ip_summed == CHECKSUM_HW) {		++dev->stats.tx_packets_csum;		return EMAC_TX_CTRL_TAH_CSUM;	}#endif	return 0;}static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len){	struct emac_regs *p = dev->emacp;	struct net_device *ndev = dev->ndev;	/* Send the packet out */	out_be32(&p->tmr0, EMAC_TMR0_XMIT);	if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {		netif_stop_queue(ndev);		DBG2("%d: stopped TX queue" NL, dev->def->index);	}	ndev->trans_start = jiffies;	++dev->stats.tx_packets;	dev->stats.tx_bytes += len;	return 0;}/* BHs disabled */static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev){	struct ocp_enet_private *dev = ndev->priv;	unsigned int len = skb->len;	int slot;	u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |	    MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);	slot = dev->tx_slot++;	if (dev->tx_slot == NUM_TX_BUFF) {		dev->tx_slot = 0;		ctrl |= MAL_TX_CTRL_WRAP;	}	DBG2("%d: xmit(%u) %d" NL, dev->def->index, len, slot);	dev->tx_skb[slot] = skb;	dev->tx_desc[slot].data_ptr = dma_map_single(dev->ldev, skb->data, len,						     DMA_TO_DEVICE);	dev->tx_desc[slot].data_len = (u16) len;	barrier();	dev->tx_desc[slot].ctrl = ctrl;	return emac_xmit_finish(dev, len);}#if defined(CONFIG_IBM_EMAC_TAH)static inline int emac_xmit_split(struct ocp_enet_private *dev, int slot,				  u32 pd, int len, int last, u16 base_ctrl){	while (1) {		u16 ctrl = base_ctrl;		int chunk = min(len, MAL_MAX_TX_SIZE);		len -= chunk;		slot = (slot + 1) % NUM_TX_BUFF;		if (last && !len)			ctrl |= MAL_TX_CTRL_LAST;		if (slot == NUM_TX_BUFF - 1)			ctrl |= MAL_TX_CTRL_WRAP;		dev->tx_skb[slot] = NULL;		dev->tx_desc[slot].data_ptr = pd;		dev->tx_desc[slot].data_len = (u16) chunk;		dev->tx_desc[slot].ctrl = ctrl;		++dev->tx_cnt;		if (!len)			break;		pd += chunk;	}	return slot;}/* BHs disabled (SG version for TAH equipped EMACs) */static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev){	struct ocp_enet_private *dev = ndev->priv;	int nr_frags = skb_shinfo(skb)->nr_frags;	int len = skb->len, chunk;	int slot, i;	u16 ctrl;	u32 pd;	/* This is common "fast" path */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -