mv643xx_eth.c

来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 2,225 行 · 第 1/5 页

C
2,225
字号
			if (pkt_info.cmd_sts & ETH_LAYER_4_CHECKSUM_OK) {				skb->ip_summed = CHECKSUM_UNNECESSARY;				skb->csum = htons((pkt_info.cmd_sts							& 0x0007fff8) >> 3);			}			skb->protocol = eth_type_trans(skb, dev);#ifdef MV64340_NAPI			netif_receive_skb(skb);#else			netif_rx(skb);#endif		}	}	return received_packets;}/* * mv64340_eth_int_handler * * Main interrupt handler for the gigbit ethernet ports * * Input : irq - irq number (not used) *         dev_id - a pointer to the required interface's data structure *         regs   - not used * Output : N/A */static irqreturn_t mv64340_eth_int_handler(int irq, void *dev_id,	struct pt_regs *regs){	struct net_device *dev = (struct net_device *) dev_id;	struct mv64340_private *mp = netdev_priv(dev);	u32 eth_int_cause, eth_int_cause_ext = 0;	unsigned int port_num = mp->port_num;	/* Read interrupt cause registers */	eth_int_cause = MV_READ(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num)) &			INT_CAUSE_UNMASK_ALL;	if (eth_int_cause & BIT1)		eth_int_cause_ext =		MV_READ(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) &		INT_CAUSE_UNMASK_ALL_EXT;#ifdef MV64340_NAPI	if (!(eth_int_cause & 0x0007fffd)) {	/* Dont ack the Rx interrupt */#endif		/*	 	 * Clear specific ethernet port intrerrupt registers by		 * acknowleding relevant bits.		 */		MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num),			 ~eth_int_cause);		if (eth_int_cause_ext != 0x0)			MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num),				 ~eth_int_cause_ext);		/* UDP change : We may need this */		if ((eth_int_cause_ext & 0x0000ffff) &&		    (mv64340_eth_free_tx_queue(dev, eth_int_cause_ext) == 0) &&		    (MV64340_TX_QUEUE_SIZE > mp->tx_ring_skbs + 1))                                         netif_wake_queue(dev);#ifdef MV64340_NAPI	} else {		if (netif_rx_schedule_prep(dev)) {			/* Mask all the interrupts */			MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num),0);			MV_WRITE(MV64340_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0);			__netif_rx_schedule(dev);		}#else		{		if (eth_int_cause & (BIT2 | BIT11))			mv64340_eth_receive_queue(dev, 0);		/*		 * After forwarded received packets to upper layer,  add a task		 * in an interrupts enabled context that refills the RX ring		 * with skb's.		 */#if MV64340_RX_QUEUE_FILL_ON_TASK		/* Unmask all interrupts on ethernet port */		MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num),		         INT_CAUSE_MASK_ALL);		queue_task(&mp->rx_task, &tq_immediate);		mark_bh(IMMEDIATE_BH);#else		mp->rx_task.func(dev);#endif#endif	}	/* PHY status changed */	if (eth_int_cause_ext & (BIT16 | BIT20)) {		unsigned int phy_reg_data;		/* Check Link status on ethernet port */		eth_port_read_smi_reg(port_num, 1, &phy_reg_data);		if (!(phy_reg_data & 0x20)) {			netif_stop_queue(dev);		} else {			netif_wake_queue(dev);			/*			 * Start all TX queues on ethernet port. This is good in			 * case of previous packets where not transmitted, due			 * to link down and this command re-enables all TX			 * queues.			 * Note that it is possible to get a TX resource error			 * interrupt after issuing this, since not all TX queues			 * are enabled, or has anything to send.			 */			MV_WRITE(MV64340_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 1);		}	}	/*	 * If no real interrupt occured, exit.	 * This can happen when using gigE interrupt coalescing mechanism.	 */	if ((eth_int_cause == 0x0) && (eth_int_cause_ext == 0x0))		return IRQ_NONE;	return IRQ_HANDLED;}#ifdef MV64340_COAL/* * eth_port_set_rx_coal - Sets coalescing interrupt mechanism on RX path * * DESCRIPTION: *	This routine sets the RX coalescing interrupt mechanism parameter. *	This parameter is a timeout counter, that counts in 64 t_clk *	chunks ; that when timeout event occurs a maskable interrupt *	occurs. *	The parameter is calculated using the tClk of the MV-643xx chip *	, and the required delay of the interrupt in usec. * * INPUT: *	unsigned int eth_port_num      Ethernet port number *	unsigned int t_clk        t_clk of the MV-643xx chip in HZ units *	unsigned int delay       Delay in usec * * OUTPUT: *	Interrupt coalescing mechanism value is set in MV-643xx chip. * * RETURN: *	The interrupt coalescing value set in the gigE port. * */static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num,	unsigned int t_clk, unsigned int delay){	unsigned int coal = ((t_clk / 1000000) * delay) / 64;	/* Set RX Coalescing mechanism */	MV_WRITE(MV64340_ETH_SDMA_CONFIG_REG(eth_port_num),		 ((coal & 0x3fff) << 8) |		 (MV_READ(MV64340_ETH_SDMA_CONFIG_REG(eth_port_num))		  & 0xffc000ff));	return coal;}#endif/* * eth_port_set_tx_coal - Sets coalescing interrupt mechanism on TX path * * DESCRIPTION: *	This routine sets the TX coalescing interrupt mechanism parameter. *	This parameter is a timeout counter, that counts in 64 t_clk *	chunks ; that when timeout event occurs a maskable interrupt *	occurs. *	The parameter is calculated using the t_cLK frequency of the  *	MV-643xx chip and the required delay in the interrupt in uSec * * INPUT: *	unsigned int eth_port_num      Ethernet port number *	unsigned int t_clk        t_clk of the MV-643xx chip in HZ units *	unsigned int delay       Delay in uSeconds * * OUTPUT: *	Interrupt coalescing mechanism value is set in MV-643xx chip. * * RETURN: *	The interrupt coalescing value set in the gigE port. * */static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num,	unsigned int t_clk, unsigned int delay){	unsigned int coal;	coal = ((t_clk / 1000000) * delay) / 64;	/* Set TX Coalescing mechanism */	MV_WRITE(MV64340_ETH_TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num),		 coal << 4);	return coal;}/* * mv64340_eth_open * * This function is called when openning the network device. The function * should initialize all the hardware, initialize cyclic Rx/Tx * descriptors chain and buffers and allocate an IRQ to the network * device. * * Input : a pointer to the network device structure * * Output : zero of success , nonzero if fails. */static int mv64340_eth_open(struct net_device *dev){	struct mv64340_private *mp = netdev_priv(dev);	unsigned int port_num = mp->port_num;	int err = err;	spin_lock_irq(&mp->lock);	err = request_irq(dev->irq, mv64340_eth_int_handler,	                  SA_INTERRUPT | SA_SAMPLE_RANDOM, dev->name, dev);	if (err) {		printk(KERN_ERR "Can not assign IRQ number to MV64340_eth%d\n",		       port_num);		err = -EAGAIN;		goto out;	}	if (mv64340_eth_real_open(dev)) {		printk("%s: Error opening interface\n", dev->name);		err = -EBUSY;		goto out_free;	}	spin_unlock_irq(&mp->lock);	return 0;out_free:	free_irq(dev->irq, dev);out:	spin_unlock_irq(&mp->lock);	return err;}/* * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory. * * DESCRIPTION: *       This function prepares a Rx chained list of descriptors and packet  *       buffers in a form of a ring. The routine must be called after port  *       initialization routine and before port start routine.  *       The Ethernet SDMA engine uses CPU bus addresses to access the various  *       devices in the system (i.e. DRAM). This function uses the ethernet  *       struct 'virtual to physical' routine (set by the user) to set the ring  *       with physical addresses. * * INPUT: *	struct mv64340_private   *mp   Ethernet Port Control srtuct.  *      int 			rx_desc_num       Number of Rx descriptors *      int 			rx_buff_size      Size of Rx buffer *      unsigned int    rx_desc_base_addr  Rx descriptors memory area base addr. *      unsigned int    rx_buff_base_addr  Rx buffer memory area base addr. * * OUTPUT: *      The routine updates the Ethernet port control struct with information  *      regarding the Rx descriptors and buffers. * * RETURN: *      false if the given descriptors memory area is not aligned according to *      Ethernet SDMA specifications. *      true otherwise. */static int ether_init_rx_desc_ring(struct mv64340_private * mp,	unsigned long rx_buff_base_addr){	unsigned long buffer_addr = rx_buff_base_addr;	volatile struct eth_rx_desc *p_rx_desc;	int rx_desc_num = mp->rx_ring_size;	unsigned long rx_desc_base_addr = (unsigned long) mp->p_rx_desc_area;	int rx_buff_size = 1536;	/* Dummy, will be replaced later */	int i;	p_rx_desc = (struct eth_rx_desc *) rx_desc_base_addr;	/* Rx desc Must be 4LW aligned (i.e. Descriptor_Address[3:0]=0000). */	if (rx_buff_base_addr & 0xf)		return 0;	/* Rx buffers are limited to 64K bytes and Minimum size is 8 bytes  */	if ((rx_buff_size < 8) || (rx_buff_size > RX_BUFFER_MAX_SIZE))		return 0;	/* Rx buffers must be 64-bit aligned.       */	if ((rx_buff_base_addr + rx_buff_size) & 0x7)		return 0;	/* initialize the Rx descriptors ring */	for (i = 0; i < rx_desc_num; i++) {		p_rx_desc[i].buf_size = rx_buff_size;		p_rx_desc[i].byte_cnt = 0x0000;		p_rx_desc[i].cmd_sts =			ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;		p_rx_desc[i].next_desc_ptr = mp->rx_desc_dma +			((i + 1) % rx_desc_num) * sizeof(struct eth_rx_desc);		p_rx_desc[i].buf_ptr = buffer_addr;		mp->rx_skb[i] = NULL;		buffer_addr += rx_buff_size;	}	/* Save Rx desc pointer to driver struct. */	mp->rx_curr_desc_q = 0;	mp->rx_used_desc_q = 0;	mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc);	mp->port_rx_queue_command |= 1;	return 1;}/* * ether_init_tx_desc_ring - Curve a Tx chain desc list and buffer in memory. * * DESCRIPTION: *       This function prepares a Tx chained list of descriptors and packet  *       buffers in a form of a ring. The routine must be called after port  *       initialization routine and before port start routine.  *       The Ethernet SDMA engine uses CPU bus addresses to access the various  *       devices in the system (i.e. DRAM). This function uses the ethernet  *       struct 'virtual to physical' routine (set by the user) to set the ring  *       with physical addresses. * * INPUT: *	struct mv64340_private   *mp   Ethernet Port Control srtuct.  *      int 		tx_desc_num        Number of Tx descriptors *      int 		tx_buff_size	   Size of Tx buffer *      unsigned int    tx_desc_base_addr  Tx descriptors memory area base addr. * * OUTPUT: *      The routine updates the Ethernet port control struct with information  *      regarding the Tx descriptors and buffers. * * RETURN: *      false if the given descriptors memory area is not aligned according to *      Ethernet SDMA specifications. *      true otherwise. */static int ether_init_tx_desc_ring(struct mv64340_private *mp){	unsigned long tx_desc_base_addr = (unsigned long) mp->p_tx_desc_area;	int tx_desc_num = mp->tx_ring_size;	struct eth_tx_desc *p_tx_desc;	int i;	/* Tx desc Must be 4LW aligned (i.e. Descriptor_Address[3:0]=0000). */	if (tx_desc_base_addr & 0xf)		return 0;	/* save the first desc pointer to link with the last descriptor */	p_tx_desc = (struct eth_tx_desc *) tx_desc_base_addr;	/* Initialize the Tx descriptors ring */	for (i = 0; i < tx_desc_num; i++) {		p_tx_desc[i].byte_cnt	= 0x0000;		p_tx_desc[i].l4i_chk	= 0x0000;		p_tx_desc[i].cmd_sts	= 0x00000000;		p_tx_desc[i].next_desc_ptr = mp->tx_desc_dma +			((i + 1) % tx_desc_num) * sizeof(struct eth_tx_desc);		p_tx_desc[i].buf_ptr	= 0x00000000;		mp->tx_skb[i]		= NULL;	}	/* Set Tx desc pointer in driver struct. */	mp->tx_curr_desc_q = 0;	mp->tx_used_desc_q = 0;#ifdef MV64340_CHECKSUM_OFFLOAD_TX        mp->tx_first_desc_q = 0;#endif	/* Init Tx ring base and size parameters */	mp->tx_desc_area_size	= tx_desc_num * sizeof(struct eth_tx_desc);	/* Add the queue to the list of Tx queues of this port */	mp->port_tx_queue_command |= 1;	return 1;}/* Helper function for mv64340_eth_open */static int mv64340_eth_real_open(struct net_device *dev){	struct mv64340_private *mp = netdev_priv(dev);	unsigned int port_num = mp->port_num;	u32 phy_reg_data;	unsigned int size;	/* Stop RX Queues */	MV_WRITE(MV64340_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num),		 0x0000ff00);	/* Clear the ethernet port interrupts */	MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_REG(port_num), 0);	MV_WRITE(MV64340_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);	/* Unmask RX buffer and TX end interrupt */	MV_WRITE(MV64340_ETH_INTERRUPT_MASK_REG(port_num),		 INT_CAUSE_UNMASK_ALL);	/* Unmask phy and link status changes interrupts */	MV_WRITE(MV64340_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),		 INT_CAUSE_UNMASK_ALL_EXT);	/* Set the MAC Address */	memcpy(mp->port_mac_addr, dev->dev_addr, 6);	eth_port_init(mp);	INIT_WORK(&mp->rx_task, (void (*)(void *)) mv64340_eth_rx_task, dev);	memset(&mp->timeout, 0, sizeof(struct timer_list));	mp->timeout.function = mv64340_eth_rx_task_timer_wrapper;	mp->timeout.data = (unsigned long) dev;	mp->rx_task_busy = 0;	mp->rx_timer_flag = 0;	/* Allocate TX ring */	mp->tx_ring_skbs = 0;	mp->tx_ring_size = MV64340_TX_QUEUE_SIZE;	size = mp->tx_ring_size * sizeof(struct eth_tx_desc);	mp->tx_desc_area_size = size;	/* Assumes allocated ring is 16 bytes alligned */	mp->p_tx_desc_area = pci_alloc_consistent(NULL, size, &mp->tx_desc_dma);	if (!mp->p_tx_desc_area) {		printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",		       dev->name, size);		return -ENOMEM;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?