⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pasemi_mac.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
}static inline void pasemi_mac_rx_error(struct pasemi_mac *mac, u64 macrx){	unsigned int rcmdsta, ccmdsta;	if (!netif_msg_rx_err(mac))		return;	rcmdsta = read_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if));	ccmdsta = read_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch));	printk(KERN_ERR "pasemi_mac: rx error. macrx %016lx, rx status %lx\n",		macrx, *mac->rx_status);	printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n",		rcmdsta, ccmdsta);}static inline void pasemi_mac_tx_error(struct pasemi_mac *mac, u64 mactx){	unsigned int cmdsta;	if (!netif_msg_tx_err(mac))		return;	cmdsta = read_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch));	printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016lx, "\		"tx status 0x%016lx\n", mactx, *mac->tx_status);	printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta);}static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit){	unsigned int n;	int count;	struct pasemi_mac_buffer *info;	struct sk_buff *skb;	unsigned int len;	u64 macrx;	dma_addr_t dma;	int buf_index;	u64 eval;	spin_lock(&mac->rx->lock);	n = mac->rx->next_to_clean;	prefetch(&RX_RING(mac, n));	for (count = 0; count < limit; count++) {		macrx = RX_RING(mac, n);		if ((macrx & XCT_MACRX_E) ||		    (*mac->rx_status & PAS_STATUS_ERROR))			pasemi_mac_rx_error(mac, macrx);		if (!(macrx & XCT_MACRX_O))			break;		info = NULL;		BUG_ON(!(macrx & XCT_MACRX_RR_8BRES));		eval = (RX_RING(mac, n+1) & XCT_RXRES_8B_EVAL_M) >>			XCT_RXRES_8B_EVAL_S;		buf_index = eval-1;		dma = (RX_RING(mac, n+2) & XCT_PTR_ADDR_M);		info = &RX_RING_INFO(mac, buf_index);		skb = info->skb;		prefetch(skb);		prefetch(&skb->data_len);		len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;		pci_unmap_single(mac->dma_pdev, dma, len, PCI_DMA_FROMDEVICE);		if (macrx & XCT_MACRX_CRC) {			/* CRC error flagged */			mac->netdev->stats.rx_errors++;			mac->netdev->stats.rx_crc_errors++;			/* No need to free skb, it'll be reused */			goto next;		}		if (len < 256) {			struct sk_buff *new_skb;			new_skb = netdev_alloc_skb(mac->netdev,						   len + LOCAL_SKB_ALIGN);			if (new_skb) {				skb_reserve(new_skb, LOCAL_SKB_ALIGN);				memcpy(new_skb->data, skb->data, len);				/* save the skb in buffer_info as good */				skb = new_skb;			}			/* else just continue with the old one */		} else			info->skb = NULL;		info->dma = 0;		/* Don't include CRC */		skb_put(skb, len-4);		if (likely((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK)) {			skb->ip_summed = CHECKSUM_UNNECESSARY;			skb->csum = (macrx & XCT_MACRX_CSUM_M) >>					   XCT_MACRX_CSUM_S;		} else			skb->ip_summed = CHECKSUM_NONE;		mac->netdev->stats.rx_bytes += len;		mac->netdev->stats.rx_packets++;		skb->protocol = eth_type_trans(skb, mac->netdev);		netif_receive_skb(skb);next:		RX_RING(mac, n) = 0;		RX_RING(mac, n+1) = 0;		/* Need to zero it out since hardware doesn't, since the		 * replenish loop uses it to tell when it's done.		 */		RX_BUFF(mac, buf_index) = 0;		n += 4;	}	if (n > RX_RING_SIZE) {		/* Errata 5971 workaround: L2 target of headers */		write_iob_reg(mac, PAS_IOB_COM_PKTHDRCNT, 0);		n &= (RX_RING_SIZE-1);	}	mac->rx->next_to_clean = n;	/* Increase is in number of 16-byte entries, and since each descriptor	 * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with	 * count*2.	 */	write_dma_reg(mac, PAS_DMA_RXCHAN_INCR(mac->dma_rxch), count << 1);	pasemi_mac_replenish_rx_ring(mac->netdev, count);	spin_unlock(&mac->rx->lock);	return count;}/* Can't make this too large or we blow the kernel stack limits */#define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS)static int pasemi_mac_clean_tx(struct pasemi_mac *mac){	int i, j;	unsigned int start, descr_count, buf_count, batch_limit;	unsigned int ring_limit;	unsigned int total_count;	unsigned long flags;	struct sk_buff *skbs[TX_CLEAN_BATCHSIZE];	dma_addr_t dmas[TX_CLEAN_BATCHSIZE][MAX_SKB_FRAGS+1];	total_count = 0;	batch_limit = TX_CLEAN_BATCHSIZE;restart:	spin_lock_irqsave(&mac->tx->lock, flags);	start = mac->tx->next_to_clean;	ring_limit = mac->tx->next_to_fill;	/* Compensate for when fill has wrapped but clean has not */	if (start > ring_limit)		ring_limit += TX_RING_SIZE;	buf_count = 0;	descr_count = 0;	for (i = start;	     descr_count < batch_limit && i < ring_limit;	     i += buf_count) {		u64 mactx = TX_RING(mac, i);		struct sk_buff *skb;		if ((mactx  & XCT_MACTX_E) ||		    (*mac->tx_status & PAS_STATUS_ERROR))			pasemi_mac_tx_error(mac, mactx);		if (unlikely(mactx & XCT_MACTX_O))			/* Not yet transmitted */			break;		skb = TX_RING_INFO(mac, i+1).skb;		skbs[descr_count] = skb;		buf_count = 2 + skb_shinfo(skb)->nr_frags;		for (j = 0; j <= skb_shinfo(skb)->nr_frags; j++)			dmas[descr_count][j] = TX_RING_INFO(mac, i+1+j).dma;		TX_RING(mac, i) = 0;		TX_RING(mac, i+1) = 0;		/* Since we always fill with an even number of entries, make		 * sure we skip any unused one at the end as well.		 */		if (buf_count & 1)			buf_count++;		descr_count++;	}	mac->tx->next_to_clean = i & (TX_RING_SIZE-1);	spin_unlock_irqrestore(&mac->tx->lock, flags);	netif_wake_queue(mac->netdev);	for (i = 0; i < descr_count; i++)		pasemi_mac_unmap_tx_skb(mac, skbs[i], dmas[i]);	total_count += descr_count;	/* If the batch was full, try to clean more */	if (descr_count == batch_limit)		goto restart;	return total_count;}static irqreturn_t pasemi_mac_rx_intr(int irq, void *data){	struct net_device *dev = data;	struct pasemi_mac *mac = netdev_priv(dev);	unsigned int reg;	if (!(*mac->rx_status & PAS_STATUS_CAUSE_M))		return IRQ_NONE;	/* Don't reset packet count so it won't fire again but clear	 * all others.	 */	reg = 0;	if (*mac->rx_status & PAS_STATUS_SOFT)		reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;	if (*mac->rx_status & PAS_STATUS_ERROR)		reg |= PAS_IOB_DMA_RXCH_RESET_DINTC;	if (*mac->rx_status & PAS_STATUS_TIMER)		reg |= PAS_IOB_DMA_RXCH_RESET_TINTC;	netif_rx_schedule(dev, &mac->napi);	write_iob_reg(mac, PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg);	return IRQ_HANDLED;}static irqreturn_t pasemi_mac_tx_intr(int irq, void *data){	struct net_device *dev = data;	struct pasemi_mac *mac = netdev_priv(dev);	unsigned int reg, pcnt;	if (!(*mac->tx_status & PAS_STATUS_CAUSE_M))		return IRQ_NONE;	pasemi_mac_clean_tx(mac);	pcnt = *mac->tx_status & PAS_STATUS_PCNT_M;	reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC;	if (*mac->tx_status & PAS_STATUS_SOFT)		reg |= PAS_IOB_DMA_TXCH_RESET_SINTC;	if (*mac->tx_status & PAS_STATUS_ERROR)		reg |= PAS_IOB_DMA_TXCH_RESET_DINTC;	write_iob_reg(mac, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch), reg);	return IRQ_HANDLED;}static void pasemi_adjust_link(struct net_device *dev){	struct pasemi_mac *mac = netdev_priv(dev);	int msg;	unsigned int flags;	unsigned int new_flags;	if (!mac->phydev->link) {		/* If no link, MAC speed settings don't matter. Just report		 * link down and return.		 */		if (mac->link && netif_msg_link(mac))			printk(KERN_INFO "%s: Link is down.\n", dev->name);		netif_carrier_off(dev);		mac->link = 0;		return;	} else		netif_carrier_on(dev);	flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);	new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M |			      PAS_MAC_CFG_PCFG_TSR_M);	if (!mac->phydev->duplex)		new_flags |= PAS_MAC_CFG_PCFG_HD;	switch (mac->phydev->speed) {	case 1000:		new_flags |= PAS_MAC_CFG_PCFG_SPD_1G |			     PAS_MAC_CFG_PCFG_TSR_1G;		break;	case 100:		new_flags |= PAS_MAC_CFG_PCFG_SPD_100M |			     PAS_MAC_CFG_PCFG_TSR_100M;		break;	case 10:		new_flags |= PAS_MAC_CFG_PCFG_SPD_10M |			     PAS_MAC_CFG_PCFG_TSR_10M;		break;	default:		printk("Unsupported speed %d\n", mac->phydev->speed);	}	/* Print on link or speed/duplex change */	msg = mac->link != mac->phydev->link || flags != new_flags;	mac->duplex = mac->phydev->duplex;	mac->speed = mac->phydev->speed;	mac->link = mac->phydev->link;	if (new_flags != flags)		write_mac_reg(mac, PAS_MAC_CFG_PCFG, new_flags);	if (msg && netif_msg_link(mac))		printk(KERN_INFO "%s: Link is up at %d Mbps, %s duplex.\n",		       dev->name, mac->speed, mac->duplex ? "full" : "half");}static int pasemi_mac_phy_init(struct net_device *dev){	struct pasemi_mac *mac = netdev_priv(dev);	struct device_node *dn, *phy_dn;	struct phy_device *phydev;	unsigned int phy_id;	const phandle *ph;	const unsigned int *prop;	struct resource r;	int ret;	dn = pci_device_to_OF_node(mac->pdev);	ph = of_get_property(dn, "phy-handle", NULL);	if (!ph)		return -ENODEV;	phy_dn = of_find_node_by_phandle(*ph);	prop = of_get_property(phy_dn, "reg", NULL);	ret = of_address_to_resource(phy_dn->parent, 0, &r);	if (ret)		goto err;	phy_id = *prop;	snprintf(mac->phy_id, BUS_ID_SIZE, PHY_ID_FMT, (int)r.start, phy_id);	of_node_put(phy_dn);	mac->link = 0;	mac->speed = 0;	mac->duplex = -1;	phydev = phy_connect(dev, mac->phy_id, &pasemi_adjust_link, 0, PHY_INTERFACE_MODE_SGMII);	if (IS_ERR(phydev)) {		printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);		return PTR_ERR(phydev);	}	mac->phydev = phydev;	return 0;err:	of_node_put(phy_dn);	return -ENODEV;}static int pasemi_mac_open(struct net_device *dev){	struct pasemi_mac *mac = netdev_priv(dev);	int base_irq;	unsigned int flags;	int ret;	/* enable rx section */	write_dma_reg(mac, PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN);	/* enable tx section */	write_dma_reg(mac, PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);	flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |		PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |		PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);	write_mac_reg(mac, PAS_MAC_CFG_TXP, flags);	write_iob_reg(mac, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch),			   PAS_IOB_DMA_RXCH_CFG_CNTTH(0));	write_iob_reg(mac, PAS_IOB_DMA_TXCH_CFG(mac->dma_txch),			   PAS_IOB_DMA_TXCH_CFG_CNTTH(128));	/* Clear out any residual packet count state from firmware */	pasemi_mac_restart_rx_intr(mac);	pasemi_mac_restart_tx_intr(mac);	/* 0xffffff is max value, about 16ms */	write_iob_reg(mac, PAS_IOB_DMA_COM_TIMEOUTCFG,			   PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0xffffff));	ret = pasemi_mac_setup_rx_resources(dev);	if (ret)		goto out_rx_resources;	ret = pasemi_mac_setup_tx_resources(dev);	if (ret)		goto out_tx_resources;	write_mac_reg(mac, PAS_MAC_IPC_CHNL,			   PAS_MAC_IPC_CHNL_DCHNO(mac->dma_rxch) |			   PAS_MAC_IPC_CHNL_BCH(mac->dma_rxch));	/* enable rx if */	write_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if),			   PAS_DMA_RXINT_RCMDSTA_EN |			   PAS_DMA_RXINT_RCMDSTA_DROPS_M |			   PAS_DMA_RXINT_RCMDSTA_BP |			   PAS_DMA_RXINT_RCMDSTA_OO |			   PAS_DMA_RXINT_RCMDSTA_BT);	/* enable rx channel */	write_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),			   PAS_DMA_RXCHAN_CCMDSTA_EN |			   PAS_DMA_RXCHAN_CCMDSTA_DU |			   PAS_DMA_RXCHAN_CCMDSTA_OD |			   PAS_DMA_RXCHAN_CCMDSTA_FD |			   PAS_DMA_RXCHAN_CCMDSTA_DT);	/* enable tx channel */	write_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),			   PAS_DMA_TXCHAN_TCMDSTA_EN |			   PAS_DMA_TXCHAN_TCMDSTA_SZ |			   PAS_DMA_TXCHAN_TCMDSTA_DB |			   PAS_DMA_TXCHAN_TCMDSTA_DE |			   PAS_DMA_TXCHAN_TCMDSTA_DA);	pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE);	write_dma_reg(mac, PAS_DMA_RXCHAN_INCR(mac->dma_rxch), RX_RING_SIZE>>1);	flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE |		PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;	if (mac->type == MAC_TYPE_GMAC)		flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;	else		flags |= PAS_MAC_CFG_PCFG_TSR_10G | PAS_MAC_CFG_PCFG_SPD_10G;	/* Enable interface in MAC */	write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);	ret = pasemi_mac_phy_init(dev);	/* Some configs don't have PHYs (XAUI etc), so don't complain about	 * failed init due to -ENODEV.	 */	if (ret && ret != -ENODEV)		dev_warn(&mac->pdev->dev, "phy init failed: %d\n", ret);	netif_start_queue(dev);	napi_enable(&mac->napi);	/* Interrupts are a bit different for our DMA controller: While	 * it's got one a regular PCI device header, the interrupt there	 * is really the base of the range it's using. Each tx and rx	 * channel has it's own interrupt source.	 */	base_irq = virq_to_hw(mac->dma_pdev->irq);	mac->tx_irq = irq_create_mapping(NULL, base_irq + mac->dma_txch);	mac->rx_irq = irq_create_mapping(NULL, base_irq + 20 + mac->dma_txch);	ret = request_irq(mac->tx_irq, &pasemi_mac_tx_intr, IRQF_DISABLED,

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -