mv643xx_eth.c

来自「linux 内核源代码」· C语言 代码 · 共 2,216 行 · 第 1/5 页

C
2,216
字号
			dev->stats.tx_errors++;		}		spin_unlock_irqrestore(&mp->lock, flags);		if (cmd_sts & ETH_TX_FIRST_DESC)			dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);		else			dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE);		if (skb)			dev_kfree_skb_irq(skb);		released = 1;	}	return released;}static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev){	struct mv643xx_private *mp = netdev_priv(dev);	if (mv643xx_eth_free_tx_descs(dev, 0) &&	    mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB)		netif_wake_queue(dev);}static void mv643xx_eth_free_all_tx_descs(struct net_device *dev){	mv643xx_eth_free_tx_descs(dev, 1);}/* * mv643xx_eth_receive * * This function is forward packets that are received from the port's * queues toward kernel core or FastRoute them to another interface. * * Input :	dev - a pointer to the required interface *		max - maximum number to receive (0 means unlimted) * * Output :	number of served packets */static int mv643xx_eth_receive_queue(struct net_device *dev, int budget){	struct mv643xx_private *mp = netdev_priv(dev);	struct net_device_stats *stats = &dev->stats;	unsigned int received_packets = 0;	struct sk_buff *skb;	struct pkt_info pkt_info;	while (budget-- > 0 && eth_port_receive(mp, &pkt_info) == ETH_OK) {		dma_unmap_single(NULL, pkt_info.buf_ptr, ETH_RX_SKB_SIZE,							DMA_FROM_DEVICE);		mp->rx_desc_count--;		received_packets++;		/*		 * Update statistics.		 * Note byte count includes 4 byte CRC count		 */		stats->rx_packets++;		stats->rx_bytes += pkt_info.byte_cnt;		skb = pkt_info.return_info;		/*		 * In case received a packet without first / last bits on OR		 * the error summary bit is on, the packets needs to be dropeed.		 */		if (((pkt_info.cmd_sts				& (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) !=					(ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC))				|| (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)) {			stats->rx_dropped++;			if ((pkt_info.cmd_sts & (ETH_RX_FIRST_DESC |							ETH_RX_LAST_DESC)) !=				(ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) {				if (net_ratelimit())					printk(KERN_ERR						"%s: Received packet spread "						"on multiple descriptors\n",						dev->name);			}			if (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)				stats->rx_errors++;			dev_kfree_skb_irq(skb);		} else {			/*			 * The -4 is for the CRC in the trailer of the			 * received packet			 */			skb_put(skb, pkt_info.byte_cnt - 4);			if (pkt_info.cmd_sts & ETH_LAYER_4_CHECKSUM_OK) {				skb->ip_summed = CHECKSUM_UNNECESSARY;				skb->csum = htons(					(pkt_info.cmd_sts & 0x0007fff8) >> 3);			}			skb->protocol = eth_type_trans(skb, dev);#ifdef MV643XX_NAPI			netif_receive_skb(skb);#else			netif_rx(skb);#endif		}		dev->last_rx = jiffies;	}	mv643xx_eth_rx_refill_descs(dev);	/* Fill RX ring with skb's */	return received_packets;}/* Set the mv643xx port configuration register for the speed/duplex mode. */static void mv643xx_eth_update_pscr(struct net_device *dev,				    struct ethtool_cmd *ecmd){	struct mv643xx_private *mp = netdev_priv(dev);	int port_num = mp->port_num;	u32 o_pscr, n_pscr;	unsigned int queues;	o_pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num));	n_pscr = o_pscr;	/* clear speed, duplex and rx buffer size fields */	n_pscr &= ~(SET_MII_SPEED_TO_100  |		   SET_GMII_SPEED_TO_1000 |		   SET_FULL_DUPLEX_MODE   |		   MAX_RX_PACKET_MASK);	if (ecmd->duplex == DUPLEX_FULL)		n_pscr |= SET_FULL_DUPLEX_MODE;	if (ecmd->speed == SPEED_1000)		n_pscr |= SET_GMII_SPEED_TO_1000 |			  MAX_RX_PACKET_9700BYTE;	else {		if (ecmd->speed == SPEED_100)			n_pscr |= SET_MII_SPEED_TO_100;		n_pscr |= MAX_RX_PACKET_1522BYTE;	}	if (n_pscr != o_pscr) {		if ((o_pscr & SERIAL_PORT_ENABLE) == 0)			mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr);		else {			queues = mv643xx_eth_port_disable_tx(port_num);			o_pscr &= ~SERIAL_PORT_ENABLE;			mv_write(PORT_SERIAL_CONTROL_REG(port_num), o_pscr);			mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr);			mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr);			if (queues)				mv643xx_eth_port_enable_tx(port_num, queues);		}	}}/* * mv643xx_eth_int_handler * * Main interrupt handler for the gigbit ethernet ports * * Input :	irq	- irq number (not used) *		dev_id	- a pointer to the required interface's data structure *		regs	- not used * Output :	N/A */static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id){	struct net_device *dev = (struct net_device *)dev_id;	struct mv643xx_private *mp = netdev_priv(dev);	u32 eth_int_cause, eth_int_cause_ext = 0;	unsigned int port_num = mp->port_num;	/* Read interrupt cause registers */	eth_int_cause = mv_read(INTERRUPT_CAUSE_REG(port_num)) &						ETH_INT_UNMASK_ALL;	if (eth_int_cause & ETH_INT_CAUSE_EXT) {		eth_int_cause_ext = mv_read(			INTERRUPT_CAUSE_EXTEND_REG(port_num)) &						ETH_INT_UNMASK_ALL_EXT;		mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num),							~eth_int_cause_ext);	}	/* PHY status changed */	if (eth_int_cause_ext & (ETH_INT_CAUSE_PHY | ETH_INT_CAUSE_STATE)) {		struct ethtool_cmd cmd;		if (mii_link_ok(&mp->mii)) {			mii_ethtool_gset(&mp->mii, &cmd);			mv643xx_eth_update_pscr(dev, &cmd);			mv643xx_eth_port_enable_tx(port_num,						   ETH_TX_QUEUES_ENABLED);			if (!netif_carrier_ok(dev)) {				netif_carrier_on(dev);				if (mp->tx_ring_size - mp->tx_desc_count >=							MAX_DESCS_PER_SKB)					netif_wake_queue(dev);			}		} else if (netif_carrier_ok(dev)) {			netif_stop_queue(dev);			netif_carrier_off(dev);		}	}#ifdef MV643XX_NAPI	if (eth_int_cause & ETH_INT_CAUSE_RX) {		/* schedule the NAPI poll routine to maintain port */		mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);		/* wait for previous write to complete */		mv_read(INTERRUPT_MASK_REG(port_num));		netif_rx_schedule(dev, &mp->napi);	}#else	if (eth_int_cause & ETH_INT_CAUSE_RX)		mv643xx_eth_receive_queue(dev, INT_MAX);#endif	if (eth_int_cause_ext & ETH_INT_CAUSE_TX)		mv643xx_eth_free_completed_tx_descs(dev);	/*	 * If no real interrupt occured, exit.	 * This can happen when using gigE interrupt coalescing mechanism.	 */	if ((eth_int_cause == 0x0) && (eth_int_cause_ext == 0x0))		return IRQ_NONE;	return IRQ_HANDLED;}#ifdef MV643XX_COAL/* * eth_port_set_rx_coal - Sets coalescing interrupt mechanism on RX path * * DESCRIPTION: *	This routine sets the RX coalescing interrupt mechanism parameter. *	This parameter is a timeout counter, that counts in 64 t_clk *	chunks ; that when timeout event occurs a maskable interrupt *	occurs. *	The parameter is calculated using the tClk of the MV-643xx chip *	, and the required delay of the interrupt in usec. * * INPUT: *	unsigned int eth_port_num	Ethernet port number *	unsigned int t_clk		t_clk of the MV-643xx chip in HZ units *	unsigned int delay		Delay in usec * * OUTPUT: *	Interrupt coalescing mechanism value is set in MV-643xx chip. * * RETURN: *	The interrupt coalescing value set in the gigE port. * */static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num,					unsigned int t_clk, unsigned int delay){	unsigned int coal = ((t_clk / 1000000) * delay) / 64;	/* Set RX Coalescing mechanism */	mv_write(SDMA_CONFIG_REG(eth_port_num),		((coal & 0x3fff) << 8) |		(mv_read(SDMA_CONFIG_REG(eth_port_num))			& 0xffc000ff));	return coal;}#endif/* * eth_port_set_tx_coal - Sets coalescing interrupt mechanism on TX path * * DESCRIPTION: *	This routine sets the TX coalescing interrupt mechanism parameter. *	This parameter is a timeout counter, that counts in 64 t_clk *	chunks ; that when timeout event occurs a maskable interrupt *	occurs. *	The parameter is calculated using the t_cLK frequency of the *	MV-643xx chip and the required delay in the interrupt in uSec * * INPUT: *	unsigned int eth_port_num	Ethernet port number *	unsigned int t_clk		t_clk of the MV-643xx chip in HZ units *	unsigned int delay		Delay in uSeconds * * OUTPUT: *	Interrupt coalescing mechanism value is set in MV-643xx chip. * * RETURN: *	The interrupt coalescing value set in the gigE port. * */static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num,					unsigned int t_clk, unsigned int delay){	unsigned int coal;	coal = ((t_clk / 1000000) * delay) / 64;	/* Set TX Coalescing mechanism */	mv_write(TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num), coal << 4);	return coal;}/* * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory. * * DESCRIPTION: *	This function prepares a Rx chained list of descriptors and packet *	buffers in a form of a ring. The routine must be called after port *	initialization routine and before port start routine. *	The Ethernet SDMA engine uses CPU bus addresses to access the various *	devices in the system (i.e. DRAM). This function uses the ethernet *	struct 'virtual to physical' routine (set by the user) to set the ring *	with physical addresses. * * INPUT: *	struct mv643xx_private *mp	Ethernet Port Control srtuct. * * OUTPUT: *	The routine updates the Ethernet port control struct with information *	regarding the Rx descriptors and buffers. * * RETURN: *	None. */static void ether_init_rx_desc_ring(struct mv643xx_private *mp){	volatile struct eth_rx_desc *p_rx_desc;	int rx_desc_num = mp->rx_ring_size;	int i;	/* initialize the next_desc_ptr links in the Rx descriptors ring */	p_rx_desc = (struct eth_rx_desc *)mp->p_rx_desc_area;	for (i = 0; i < rx_desc_num; i++) {		p_rx_desc[i].next_desc_ptr = mp->rx_desc_dma +			((i + 1) % rx_desc_num) * sizeof(struct eth_rx_desc);	}	/* Save Rx desc pointer to driver struct. */	mp->rx_curr_desc_q = 0;	mp->rx_used_desc_q = 0;	mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc);}/* * ether_init_tx_desc_ring - Curve a Tx chain desc list and buffer in memory. * * DESCRIPTION: *	This function prepares a Tx chained list of descriptors and packet *	buffers in a form of a ring. The routine must be called after port *	initialization routine and before port start routine. *	The Ethernet SDMA engine uses CPU bus addresses to access the various *	devices in the system (i.e. DRAM). This function uses the ethernet *	struct 'virtual to physical' routine (set by the user) to set the ring *	with physical addresses. * * INPUT: *	struct mv643xx_private *mp	Ethernet Port Control srtuct. * * OUTPUT: *	The routine updates the Ethernet port control struct with information *	regarding the Tx descriptors and buffers. * * RETURN: *	None. */static void ether_init_tx_desc_ring(struct mv643xx_private *mp){	int tx_desc_num = mp->tx_ring_size;	struct eth_tx_desc *p_tx_desc;	int i;	/* Initialize the next_desc_ptr links in the Tx descriptors ring */	p_tx_desc = (struct eth_tx_desc *)mp->p_tx_desc_area;	for (i = 0; i < tx_desc_num; i++) {		p_tx_desc[i].next_desc_ptr = mp->tx_desc_dma +			((i + 1) % tx_desc_num) * sizeof(struct eth_tx_desc);	}	mp->tx_curr_desc_q = 0;	mp->tx_used_desc_q = 0;	mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc);}static int mv643xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd){	struct mv643xx_private *mp = netdev_priv(dev);	int err;	spin_lock_irq(&mp->lock);	err = mii_ethtool_sset(&mp->mii, cmd);	spin_unlock_irq(&mp->lock);	return err;}static int mv643xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd){	struct mv643xx_private *mp = netdev_priv(dev);	int err;	spin_lock_irq(&mp->lock);	err = mii_ethtool_gset(&mp->mii, cmd);	spin_unlock_irq(&mp->lock);	/* The PHY may support 1000baseT_Half, but the mv643xx does not */	cmd->supported &= ~SUPPORTED_1000baseT_Half;	cmd->advertising &= ~ADVERTISED_1000baseT_Half;	return err;}/* * mv643xx_eth_open * * This function is called when openning the network device. The function * should initialize all the hardware, initialize cyclic Rx/Tx * descriptors chain and buffers and allocate an IRQ to the network * device. * * Input :	a pointer to the network device structure * * Output :	zero of success , nonzero if fails. */static int mv643xx_eth_open(struct net_device *dev){	struct mv643xx_private *mp = netdev_priv(dev);	unsigned int port_num = mp->port_num;	unsigned int size;	int err;	/* Clear any pending ethernet port interrupts */	mv_write(INTERRUPT_CAUSE_REG(port_num), 0);	mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);	/* wait for previous write to complete */

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?