⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 acacia.c

📁 IDT RC32438 on-chip ethernet controller
💻 C
📖 第 1 页 / 共 3 页
字号:
	/* now convert TD_RING pointer to KSEG1 */	//lp->td_ring = (DMAD_t )KSEG1ADDR(lp->td_ring);	lp->rd_ring = &lp->td_ring[ACACIA_NUM_TDS];	spin_lock_init(&lp->lock);	dev->open = acacia_open;	dev->stop = acacia_close;	dev->hard_start_xmit = acacia_send_packet;#ifdef CONFIG_NET_FASTROUTE	dev->accept_fastpath = acacia_eth_accept_fastpath;#endif	dev->get_stats	= acacia_get_stats;	dev->set_multicast_list = &acacia_multicast_list;	dev->tx_timeout = acacia_tx_timeout;	dev->watchdog_timeo = ACACIA_TX_TIMEOUT;	lp->rx_tasklet = kmalloc(sizeof(struct tasklet_struct), GFP_KERNEL);	tasklet_init(lp->rx_tasklet, acacia_rx_tasklet, (unsigned long)dev);	lp->tx_tasklet = kmalloc(sizeof(struct tasklet_struct), GFP_KERNEL);	tasklet_init(lp->tx_tasklet, acacia_tx_tasklet, (unsigned long)dev);	/* Fill in the fields of the device structure with ethernet values. */	ether_setup(dev);	return 0; probe_err_out:	acacia_cleanup_module();	ERR(__FUNCTION__ " failed.  Returns %d\n", retval);	return retval;	}/* * Open/initialize the ACACIA controller. * * This routine should set everything up anew at each open, even *  registers that "should" only need to be set once at boot, so that *  there is non-reboot way to recover if something goes wrong. */static int acacia_open(struct net_device *dev){	struct acacia_local *lp = (struct acacia_local *)dev->priv;	MOD_INC_USE_COUNT;		/* Initialize */	if (acacia_init(dev)) 	{	  	ERR("Erroe: cannot open the Ethernet device\n");	 	return -EAGAIN;	}		/* Install the interrupt handler that handles the Done Finished Ovr and Und Events */		if (request_irq(lp->rx_irq, &acacia_rx_dma_interrupt,			SA_SHIRQ | SA_INTERRUPT,			"acacia ethernet Rx", dev)) 	{		ERR(__FUNCTION__ ": unable to get Rx DMA IRQ %d\n",		    lp->rx_irq);		MOD_DEC_USE_COUNT;		return -EAGAIN;	}	if (request_irq(lp->tx_irq, &acacia_tx_dma_interrupt,			SA_SHIRQ | SA_INTERRUPT,			"acacia ethernet Tx", dev)) 	{		ERR(__FUNCTION__ ": unable to get Tx DMA IRQ %d\n",		    lp->tx_irq);		free_irq(lp->rx_irq, dev);		MOD_DEC_USE_COUNT;		return -EAGAIN;	}	#ifdef	RC32438_REVISION_ZA	/* Install handler for overrun error. */	if (request_irq(lp->ovr_irq, &acacia_ovr_interrupt,			SA_SHIRQ | SA_INTERRUPT,			"Ethernet Overflow", dev)) 	{		ERR(__FUNCTION__ ": unable to get OVR IRQ %d\n",		    lp->ovr_irq);		free_irq(lp->rx_irq, dev);		free_irq(lp->tx_irq, dev);		MOD_DEC_USE_COUNT;		return -EAGAIN;	}#endif	/* Install handler for underflow error. */	if (request_irq(lp->und_irq, &acacia_und_interrupt,			SA_SHIRQ | SA_INTERRUPT,			"Ethernet Underflow", dev)) 	{		ERR(__FUNCTION__ ": unable to get UND IRQ %d\n",		    lp->und_irq);		free_irq(lp->rx_irq, dev);		free_irq(lp->tx_irq, dev);#ifdef	RC32438_REVISION_ZA				free_irq(lp->ovr_irq, dev);		#endif		MOD_DEC_USE_COUNT;		return -EAGAIN;	}	/*Start MII-PHY Timer*/	//Not enabled this feature at this time.	/*	init_timer(&lp->mii_phy_timer);   	lp->mii_phy_timer.expires = jiffies + 10 * HZ;	   	lp->mii_phy_timer.data = (unsigned long)dev;   	lp->mii_phy_timer.function	 = acacia_mii_handler;   	add_timer(&lp->mii_phy_timer);	*/		return 0;}/* * Close the ACACIA device */static int acacia_close(struct net_device *dev){	struct acacia_local *lp = (struct acacia_local *)dev->priv;	u32 tmp;    	/* Disable interrupts */	disable_irq(lp->rx_irq);	disable_irq(lp->tx_irq);#ifdef	RC32438_REVISION_ZA	disable_irq(lp->ovr_irq);#endif	disable_irq(lp->und_irq);			tmp = local_readl(&lp->tx_dma_regs->dmasm);	tmp = tmp | DMASM_f_m | DMASM_e_m;	local_writel(tmp, &lp->tx_dma_regs->dmasm);     	tmp = local_readl(&lp->rx_dma_regs->dmasm);	tmp = tmp | DMASM_d_m | DMASM_h_m | DMASM_e_m;	local_writel(tmp, &lp->rx_dma_regs->dmasm);	free_irq(lp->rx_irq, dev);	free_irq(lp->tx_irq, dev);#ifdef	RC32438_REVISION_ZA		free_irq(lp->ovr_irq, dev);#endif	free_irq(lp->und_irq, dev);	//Not enabled this feature at this time.	//del_timer(&lp->mii_phy_timer);	MOD_DEC_USE_COUNT;	return 0;}/* transmit packet */static int acacia_send_packet(struct sk_buff *skb, struct net_device *dev){ 	struct acacia_local		*lp = (struct acacia_local *)dev->priv;  	unsigned long 			flags;  	u32					length; 	DMAD_t				td;	spin_lock_irqsave(&lp->lock, flags);	td = &lp->td_ring[lp->tx_chain_tail]; 	/* stop queue when full, drop pkts if queue already full */  	if(lp->tx_count >= (ACACIA_NUM_TDS - 2))  	{		lp->tx_full = 1;			  	if(lp->tx_count == (ACACIA_NUM_TDS - 2))	  	{			/* this pkt is about to fill the queue*/			ERR("Tx Ring now full, queue stopped.\n");				netif_stop_queue(dev);	  	}		else		{			/* this pkt cannot be added to the full queue */			ERR("Tx ring full, packet dropped\n");			lp->stats.tx_dropped++;			dev_kfree_skb_any(skb);			spin_unlock_irqrestore(&lp->lock, flags);			return 1;	  	}	     	}	   	lp->tx_count ++;	 	if (lp->tx_skb[lp->tx_chain_tail] != NULL)  		dev_kfree_skb_any(lp->tx_skb[lp->tx_chain_tail]);	lp->tx_skb[lp->tx_chain_tail] = skb;    	length = skb->len;  	/* Setup the transmit descriptor. */  	td->ca = virt_to_phys(skb->data);	/* Using the NDPTR to handle the DMA Race Condition */   	if(local_readl(&(lp->tx_dma_regs->dmandptr)) == 0) 		  	{		if( lp->tx_chain_status == empty ) 		{			td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m; 			lp->tx_chain_tail = (lp->tx_chain_tail + 1) & ACACIA_TDS_MASK;			local_writel(virt_to_phys(&lp->td_ring[lp->tx_chain_head]), &(lp->tx_dma_regs->dmandptr));						lp->tx_chain_head = lp->tx_chain_tail; 		}		else		{			lp->td_ring[(lp->tx_chain_tail-1)& ACACIA_TDS_MASK].control &=  ~(DMAD_cof_m); 				lp->td_ring[(lp->tx_chain_tail-1)& ACACIA_TDS_MASK].link =  virt_to_phys(td);			td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m;			lp->tx_chain_tail = (lp->tx_chain_tail + 1) & ACACIA_TDS_MASK;			lp->tx_chain_status = empty;			local_writel(virt_to_phys(&lp->td_ring[lp->tx_chain_head]), &(lp->tx_dma_regs->dmandptr));						lp->tx_chain_head = lp->tx_chain_tail; 		}		   	}	else	{		if( lp->tx_chain_status == empty )		{			td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m;			lp->td_ring[(lp->tx_chain_tail-1)& ACACIA_TDS_MASK].link =  virt_to_phys(td);				lp->tx_chain_tail = (lp->tx_chain_tail + 1) & ACACIA_TDS_MASK;			lp->tx_chain_status = filled;				}		else		{			lp->td_ring[(lp->tx_chain_tail-1)& ACACIA_TDS_MASK].control &=  ~(DMAD_cof_m); 				lp->td_ring[(lp->tx_chain_tail-1)& ACACIA_TDS_MASK].link =  virt_to_phys(td);				td->control = DMA_COUNT(length) |DMAD_cof_m |DMAD_iof_m; 			lp->tx_chain_tail = (lp->tx_chain_tail + 1) & ACACIA_TDS_MASK;		}			}	dev->trans_start = jiffies;				     	spin_unlock_irqrestore(&lp->lock, flags);    	return 0;}#if 0/* Ethernet MII-PHY Handler */static void acacia_mii_handler(unsigned long data){	struct net_device *dev = (struct net_device *)data;			struct acacia_local *lp = (struct acacia_local *)dev->priv;  	unsigned long 	flags;	unsigned long duplex_status;	int port_addr = (lp->rx_irq == 0x2c? 1:0) << 8;	spin_lock_irqsave(&lp->lock, flags);	/* Two ports are using the same MII, the difference is the PHY address */	local_writel(0, &rc32438_eth0_regs->miimcfg);  	local_writel(0, &rc32438_eth0_regs->miimcmd);  	local_writel(port_addr |0x05, &rc32438_eth0_regs->miimaddr);  	local_writel(MIIMCMD_scn_m, &rc32438_eth0_regs->miimcmd);  	while(local_readl(&rc32438_eth0_regs->miimind) & MIIMIND_nv_m);	ERR("irq:%x		port_addr:%x	RDD:%x\n", 		lp->rx_irq, port_addr, local_readl(&rc32438_eth0_regs->miimrdd));	duplex_status = (local_readl(&rc32438_eth0_regs->miimrdd) & 0x140)? ETHMAC2_fd_m: 0;	if(duplex_status != lp->duplex_mode)	{		ERR("The MII-PHY is Auto-negotiated to %s-Duplex mode for Eth-%x\n", duplex_status? "Full":"Half", lp->rx_irq == 0x2c? 1:0);				lp->duplex_mode = duplex_status;		acacia_restart(dev);			}   	lp->mii_phy_timer.expires = jiffies + 10 * HZ;	   	add_timer(&lp->mii_phy_timer);     	spin_unlock_irqrestore(&lp->lock, flags);}#endif#ifdef	RC32438_REVISION_ZA	/* Ethernet Rx Overflow interrupt */static void acacia_ovr_interrupt(int irq, void *dev_id, struct pt_regs * regs){	struct net_device *dev = (struct net_device *)dev_id;	struct acacia_local *lp;	unsigned int i;		ASSERT(dev != NULL);	netif_stop_queue(dev);			lp = (struct acacia_local *)dev->priv;	spin_lock(&lp->lock);	i = local_readl(&lp->eth_regs->ethintfc);  	i &= ~ETHINTFC_ovr_m;	local_writel(i, &lp->eth_regs->ethintfc);	/* Restart interface */	acacia_restart(dev);     	spin_unlock(&lp->lock);}#endif/* Ethernet Tx Underflow interrupt */static void acacia_und_interrupt(int irq, void *dev_id, struct pt_regs * regs){	struct net_device *dev = (struct net_device *)dev_id;	struct acacia_local *lp;	unsigned int i;	ASSERT(dev != NULL);		netif_stop_queue(dev);		lp = (struct acacia_local *)dev->priv;	ERR("Tx underflow - i/f reset\n");	spin_lock(&lp->lock);	i = local_readl(&lp->eth_regs->ethintfc);  	i &= ~ETHINTFC_und_m;	local_writel(i, &lp->eth_regs->ethintfc);	/* Restart interface */	acacia_restart(dev);     		spin_unlock(&lp->lock);}/* Ethernet Rx DMA interrupt */static void acacia_rx_dma_interrupt(int irq, void *dev_id, struct pt_regs * regs){  	struct net_device *dev = (struct net_device *)dev_id;	struct acacia_local* lp;  	volatile u32 dmas;  	ASSERT(dev != NULL);	lp = (struct acacia_local *)dev->priv;  	spin_lock(&lp->lock);  	/* Mask D H E bit in Rx DMA */	local_writel(local_readl(&lp->rx_dma_regs->dmasm) | (DMASM_d_m | DMASM_h_m | DMASM_e_m), &lp->rx_dma_regs->dmasm);  	dmas = local_readl(&lp->rx_dma_regs->dmas);#ifdef	RC32438_REVISION_ZA		local_writel(~dmas, &lp->rx_dma_regs->dmas);#endif	if(dmas & (DMAS_d_m | DMAS_h_m | DMAS_e_m))    	{      		tasklet_hi_schedule(lp->rx_tasklet);    	}  	spin_unlock(&lp->lock);}static void acacia_rx_tasklet(unsigned long rx_data_dev){  struct net_device *dev = (struct net_device *)rx_data_dev;	  struct acacia_local* lp = (struct acacia_local *)dev->priv;  volatile DMAD_t  rd = KSEG1ADDR(&lp->rd_ring[lp->rx_next_done]);  struct sk_buff *skb, *skb_new;  u8* pkt_buf;  u32 devcs, count, pkt_len, pktuncrc_len;  unsigned long 	flags;  volatile u32 dmas;#ifdef CONFIG_NET_FASTROUTE	struct ethhdr *eth;	struct iphdr *iph;	struct net_device *odev;	struct rtable *rt;	u32 CPU_ID = smp_processor_id();	u32 fast_routed = 0;	u32 h;#endif				/* #ifdef CONFIG_NET_FASTROUTE */#ifdef	RC32438_REVISION_ZA		  struct sk_buff 	 *skb_new_1, *skb_new_2;	#endif  spin_lock_irqsave(&lp->lock, flags);    while ( (count = ACACIA_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0)    {      /* init the var. used for the later operations within the while loop */#ifdef CONFIG_NET_FASTROUTE      fast_routed = 0;#endif				/* #ifdef CONFIG_NET_FASTROUTE */            skb_new = NULL;      devcs = rd->devcs;      pkt_len = RCVPKT_LENGTH(devcs);      skb = lp->rx_skb[lp->rx_next_done];            if (count < 64) 	{	  lp->stats.rx_errors++;	  lp->stats.rx_dropped++;				}      else if ((devcs & ( ETHRX_ld_m)) !=	ETHRX_ld_m)	{	  /* check that this is a whole packet */	  /* WARNING: DMA_FD bit incorrectly set in Rc32438 (errata ref #077) */	  lp->stats.rx_errors++;	  lp->stats.rx_dropped++;	}      else if ( (devcs & ETHRX_rok_m)  )	{	  #ifdef	RC32438_REVISION_ZA			  /*	   * Due to a bug in rc32438 ZA processor, the packet length	   * given by devcs field and count field sometimes differ.	   *	   * 2 packets of lenght 0x40 bytes are merged into one packet of length	   * 0x7c, with the last 4 bytes (CRC Fields) of the first packet	   * being ovewwritten by the 2nd packet. 	   *	   * To avoid packet loss we seperate the merged packest (0x7c) and pass	   * it up to the upper layer (netif_rx) as 2 different packets of	   * length 0x3c and 0x40 bytes.	   */	  	  if ((count == 0x7c) && ((count - pkt_len) == 60))	    {	      pkt_buf = (u8*)lp->rx_skb[lp->rx_next_done]->data;	      dma_cache_inv((unsigned long)pkt_buf, count);	      	      skb_new_1 = dev_alloc_skb(ACACIA_RBSIZE + 2);			      if (skb_new_1 != NULL) 		{		  /* 16 bit align */		  skb_reserve(skb_new_1, 2);					  skb_new_1->dev = dev;							  skb_put(skb_new_1, 60);		  eth_copy_and_sum(skb_new_1, pkt_buf, 60, 0);		  skb_new_1->protocol = eth_type_trans(skb_new_1, dev);		  netif_rx(skb_new_1);		}	      else		{		  lp->stats.rx_errors++;		  lp->stats.rx_dropped++;		}	      	      skb_new_2 = dev_alloc_skb(ACACIA_RBSIZE + 2);					      if (skb_new_2 != NULL) 		{		  /* 16 bit align */		  skb_reserve(skb_new_2, 2);					  skb_new_2->dev = dev;							  skb_put(skb_new_2, 60);		  eth_copy_and_sum(skb_new_2, (pkt_buf + 60), 60, 0);		  skb_new_2->protocol = eth_type_trans(skb_new_2, dev);		  netif_rx(skb_new_2);		}	      else		{		  lp->stats.rx_errors++;		  lp->stats.rx_dropped++;		}	      	    }	  else#endif					    {	      /* must be the (first and) last descriptor then */	      pkt_buf = (u8*)lp->rx_skb[lp->rx_next_done]->data;	      	      pktuncrc_len = pkt_len - 4;	      /* invalidate the cache */	      dma_cache_inv((unsigned long)pkt_buf, pktuncrc_len);	      	      /* Malloc up new buffer. */					  	      skb_new = dev_alloc_skb(ACACIA_RBSIZE + 2);					             		      	      if (skb_new != NULL)		{		  /* Make room */		  skb_put(skb, pktuncrc_len);		    #ifdef CONFIG_NET_FASTROUTE		  /*		   * Fast Route		   */		  #if 0		  if (pkt_len > 1024)		    goto bigpacket;		  #endif		  		  eth = (struct ethhdr *) skb->data;		  if (eth->h_proto == __constant_htons(ETH_P_IP)) {		    iph = (struct iphdr *) (skb->data + ETH_HLEN);		    h = (*(u8 *) & iph->daddr			 ^ *(u8 *) & iph->			 saddr) & NETDEV_FASTROUTE_HMASK;		    rt = (struct rtable *) (dev->fastpath[h]);		    if (rt			&& ((u16 *) & iph->daddr)[0] ==			((u16 *) & rt->key.dst)[0]			&& ((u16 *) & iph->daddr)[1] ==			((u16 *) & rt->key.dst)[1]			&& ((u16 *) & iph->saddr)[0] ==			((u16 *) & rt->key.src)[0]			&& ((u16 *) & iph->saddr)[1] ==			((u16 *) & rt->key.src)[1]			&& !rt->u.dst.obsolete) {		      odev = rt->u.dst.dev;		      netdev_rx_stat[CPU_ID].fastroute_hit++;		      		      if (*(u8 *) iph == 0x45 && !(eth->h_dest[0] & 0x80)	/* 0x80 for IP payload 0x45 for IPv4*/			  &&neigh_is_valid(rt->u.dst.					   neighbour)			  && iph->ttl > 1) {	/* Fast Route Path */						fast_routed = 1;						if ((!netif_queue_stopped(odev)))			  //if (1)			  {			    skb->pkt_type =			      PACKET_FASTROUTE;			    skb->protocol =			      __constant_htons			      (ETH_P_IP);			    			    ip_decrease_ttl(iph);			    			    

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -