⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 acenic.c

📁 linux和2410结合开发 用他可以生成2410所需的zImage文件
💻 C
📖 第 1 页 / 共 5 页
字号:
	atomic_add(i, &ap->cur_rx_bufs);	ap->rx_std_skbprd = idx;	if (ACE_IS_TIGON_I(ap)) {		struct cmd cmd;		cmd.evt = C_SET_RX_PRD_IDX;		cmd.code = 0;		cmd.idx = ap->rx_std_skbprd;		ace_issue_cmd(regs, &cmd);	} else {		writel(idx, &regs->RxStdPrd);		wmb();	} out:	clear_bit(0, &ap->std_refill_busy);	return; error_out:	printk(KERN_INFO "Out of memory when allocating "	       "standard receive buffers\n");	goto out;}static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs){	struct ace_regs *regs;	short i, idx;	regs = ap->regs;	prefetchw(&ap->cur_mini_bufs);	idx = ap->rx_mini_skbprd;	for (i = 0; i < nr_bufs; i++) {		struct sk_buff *skb;		struct rx_desc *rd;		dma_addr_t mapping;		skb = alloc_skb(ACE_MINI_BUFSIZE, GFP_ATOMIC);		if (!skb)			break;		/*		 * Make sure the IP header ends up on a fresh cache line		 */		skb_reserve(skb, 2 + 16);		mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),				       ((unsigned long)skb->data & ~PAGE_MASK),				       ACE_MINI_BUFSIZE - (2 + 16),				       PCI_DMA_FROMDEVICE);		ap->skb->rx_mini_skbuff[idx].skb = skb;		pci_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],				   mapping, mapping);		rd = &ap->rx_mini_ring[idx];		set_aceaddr(&rd->addr, mapping);		rd->size = ACE_MINI_SIZE;		rd->idx = idx;		idx = (idx + 1) % RX_MINI_RING_ENTRIES;	}	if (!i)		goto error_out;	atomic_add(i, &ap->cur_mini_bufs);	ap->rx_mini_skbprd = idx;	writel(idx, &regs->RxMiniPrd);	wmb(); out:	clear_bit(0, &ap->mini_refill_busy);	return; error_out:	printk(KERN_INFO "Out of memory when allocating "	       "mini receive buffers\n");	goto out;}/* * Load the jumbo rx ring, this may happen at any time if the MTU * is changed to a value > 1500. */static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs){	struct ace_regs *regs;	short i, idx;	regs = ap->regs;	idx = ap->rx_jumbo_skbprd;	for (i = 0; i < nr_bufs; i++) {		struct sk_buff *skb;		struct rx_desc *rd;		dma_addr_t mapping;		skb = alloc_skb(ACE_JUMBO_BUFSIZE, GFP_ATOMIC);		if (!skb)			break;		/*		 * Make sure the IP header ends up on a fresh cache line		 */		skb_reserve(skb, 2 + 16);		mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),				       ((unsigned long)skb->data & ~PAGE_MASK),				       ACE_JUMBO_BUFSIZE - (2 + 16),				       PCI_DMA_FROMDEVICE);		ap->skb->rx_jumbo_skbuff[idx].skb = skb;		pci_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],				   mapping, mapping);		rd = &ap->rx_jumbo_ring[idx];		set_aceaddr(&rd->addr, mapping);		rd->size = ACE_JUMBO_MTU + ETH_HLEN + 4;		rd->idx = idx;		idx = (idx + 1) % RX_JUMBO_RING_ENTRIES;	}	if (!i)		goto error_out;	atomic_add(i, &ap->cur_jumbo_bufs);	ap->rx_jumbo_skbprd = idx;	if (ACE_IS_TIGON_I(ap)) {		struct cmd cmd;		cmd.evt = C_SET_RX_JUMBO_PRD_IDX;		cmd.code = 0;		cmd.idx = ap->rx_jumbo_skbprd;		ace_issue_cmd(regs, &cmd);	} else {		writel(idx, &regs->RxJumboPrd);		wmb();	} out:	clear_bit(0, &ap->jumbo_refill_busy);	return; error_out:	if (net_ratelimit())		printk(KERN_INFO "Out of memory when allocating "		       "jumbo receive buffers\n");	goto out;}/* * All events are considered to be slow (RX/TX ints do not generate * events) and are handled here, outside the main interrupt handler, * to reduce the size of the handler. */static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd){	struct ace_private *ap;	ap = dev->priv;	while (evtcsm != evtprd) {		switch (ap->evt_ring[evtcsm].evt) {		case E_FW_RUNNING:			printk(KERN_INFO "%s: Firmware up and running\n",			       dev->name);			ap->fw_running = 1;			wmb();			break;		case E_STATS_UPDATED:			break;		case E_LNK_STATE:		{			u16 code = ap->evt_ring[evtcsm].code;			switch (code) {			case E_C_LINK_UP:			{				u32 state = readl(&ap->regs->GigLnkState);				printk(KERN_WARNING "%s: Optical link UP "				       "(%s Duplex, Flow Control: %s%s)\n",				       dev->name,				       state & LNK_FULL_DUPLEX ? "Full":"Half",				       state & LNK_TX_FLOW_CTL_Y ? "TX " : "",				       state & LNK_RX_FLOW_CTL_Y ? "RX" : "");				break;			}			case E_C_LINK_DOWN:				printk(KERN_WARNING "%s: Optical link DOWN\n",				       dev->name);				break;			case E_C_LINK_10_100:				printk(KERN_WARNING "%s: 10/100BaseT link "				       "UP\n", dev->name);				break;			default:				printk(KERN_ERR "%s: Unknown optical link "				       "state %02x\n", dev->name, code);			}			break;		}		case E_ERROR:			switch(ap->evt_ring[evtcsm].code) {			case E_C_ERR_INVAL_CMD:				printk(KERN_ERR "%s: invalid command error\n",				       dev->name);				break;			case E_C_ERR_UNIMP_CMD:				printk(KERN_ERR "%s: unimplemented command "				       "error\n", dev->name);				break;			case E_C_ERR_BAD_CFG:				printk(KERN_ERR "%s: bad config error\n",				       dev->name);				break;			default:				printk(KERN_ERR "%s: unknown error %02x\n",				       dev->name, ap->evt_ring[evtcsm].code);			}			break;		case E_RESET_JUMBO_RNG:		{			int i;			for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {				if (ap->skb->rx_jumbo_skbuff[i].skb) {					ap->rx_jumbo_ring[i].size = 0;					set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0);					dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb);					ap->skb->rx_jumbo_skbuff[i].skb = NULL;				}			} 			if (ACE_IS_TIGON_I(ap)) { 				struct cmd cmd; 				cmd.evt = C_SET_RX_JUMBO_PRD_IDX; 				cmd.code = 0; 				cmd.idx = 0; 				ace_issue_cmd(ap->regs, &cmd); 			} else { 				writel(0, &((ap->regs)->RxJumboPrd)); 				wmb(); 			}			ap->jumbo = 0;			ap->rx_jumbo_skbprd = 0;			printk(KERN_INFO "%s: Jumbo ring flushed\n",			       dev->name);			clear_bit(0, &ap->jumbo_refill_busy);			break;		}		default:			printk(KERN_ERR "%s: Unhandled event 0x%02x\n",			       dev->name, ap->evt_ring[evtcsm].evt);		}		evtcsm = (evtcsm + 1) % EVT_RING_ENTRIES;	}	return evtcsm;}static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm){	struct ace_private *ap = dev->priv;	u32 idx;	int mini_count = 0, std_count = 0;	idx = rxretcsm;	prefetchw(&ap->cur_rx_bufs);	prefetchw(&ap->cur_mini_bufs);		while (idx != rxretprd) {		struct ring_info *rip;		struct sk_buff *skb;		struct rx_desc *rxdesc, *retdesc;		u32 skbidx;		int bd_flags, desc_type, mapsize;		u16 csum;		retdesc = &ap->rx_return_ring[idx];		skbidx = retdesc->idx;		bd_flags = retdesc->flags;		desc_type = bd_flags & (BD_FLG_JUMBO | BD_FLG_MINI);		switch(desc_type) {			/*			 * Normal frames do not have any flags set			 *			 * Mini and normal frames arrive frequently,			 * so use a local counter to avoid doing			 * atomic operations for each packet arriving.			 */		case 0:			rip = &ap->skb->rx_std_skbuff[skbidx];			mapsize = ACE_STD_BUFSIZE - (2 + 16);			rxdesc = &ap->rx_std_ring[skbidx];			std_count++;			break;		case BD_FLG_JUMBO:			rip = &ap->skb->rx_jumbo_skbuff[skbidx];			mapsize = ACE_JUMBO_BUFSIZE - (2 + 16);			rxdesc = &ap->rx_jumbo_ring[skbidx];			atomic_dec(&ap->cur_jumbo_bufs);			break;		case BD_FLG_MINI:			rip = &ap->skb->rx_mini_skbuff[skbidx];			mapsize = ACE_MINI_BUFSIZE - (2 + 16);			rxdesc = &ap->rx_mini_ring[skbidx];			mini_count++; 			break;		default:			printk(KERN_INFO "%s: unknown frame type (0x%02x) "			       "returned by NIC\n", dev->name,			       retdesc->flags);			goto error;		}		skb = rip->skb;		rip->skb = NULL;		pci_unmap_page(ap->pdev,			       pci_unmap_addr(rip, mapping),			       mapsize,			       PCI_DMA_FROMDEVICE);		skb_put(skb, retdesc->size);		/*		 * Fly baby, fly!		 */		csum = retdesc->tcp_udp_csum;		skb->dev = dev;		skb->protocol = eth_type_trans(skb, dev);		/*		 * Instead of forcing the poor tigon mips cpu to calculate		 * pseudo hdr checksum, we do this ourselves.		 */		if (bd_flags & BD_FLG_TCP_UDP_SUM) {			skb->csum = htons(csum);			skb->ip_summed = CHECKSUM_HW;		} else {			skb->ip_summed = CHECKSUM_NONE;		}		netif_rx(skb);		/* send it up */		dev->last_rx = jiffies;		ap->stats.rx_packets++;		ap->stats.rx_bytes += retdesc->size;		idx = (idx + 1) % RX_RETURN_RING_ENTRIES;	}	atomic_sub(std_count, &ap->cur_rx_bufs);	if (!ACE_IS_TIGON_I(ap))		atomic_sub(mini_count, &ap->cur_mini_bufs); out:	/*	 * According to the documentation RxRetCsm is obsolete with	 * the 12.3.x Firmware - my Tigon I NICs seem to disagree!	 */	if (ACE_IS_TIGON_I(ap)) {		struct ace_regs *regs = ap->regs;		writel(idx, &regs->RxRetCsm);	}	ap->cur_rx = idx;	return; error:	idx = rxretprd;	goto out;}static inline void ace_tx_int(struct net_device *dev,			      u32 txcsm, u32 idx){	struct ace_private *ap = dev->priv;	do {		struct sk_buff *skb;		dma_addr_t mapping;		struct tx_ring_info *info;		info = ap->skb->tx_skbuff + idx;		skb = info->skb;		mapping = pci_unmap_addr(info, mapping);		if (mapping) {			pci_unmap_page(ap->pdev, mapping,				       pci_unmap_len(info, maplen),				       PCI_DMA_TODEVICE);			pci_unmap_addr_set(info, mapping, 0);		}		if (skb) {			ap->stats.tx_packets++;			ap->stats.tx_bytes += skb->len;			dev_kfree_skb_irq(skb);			info->skb = NULL;		}		idx = (idx + 1) % TX_RING_ENTRIES;	} while (idx != txcsm);	if (netif_queue_stopped(dev))		netif_wake_queue(dev);	wmb();	ap->tx_ret_csm = txcsm;	/* So... tx_ret_csm is advanced _after_ check for device wakeup.	 *	 * We could try to make it before. In this case we would get	 * the following race condition: hard_start_xmit on other cpu	 * enters after we advanced tx_ret_csm and fills space,	 * which we have just freed, so that we make illegal device wakeup.	 * There is no good way to workaround this (at entry	 * to ace_start_xmit detects this condition and prevents	 * ring corruption, but it is not a good workaround.)	 *	 * When tx_ret_csm is advanced after, we wake up device _only_	 * if we really have some space in ring (though the core doing	 * hard_start_xmit can see full ring for some period and has to	 * synchronize.) Superb.	 * BUT! We get another subtle race condition. hard_start_xmit	 * may think that ring is full between wakeup and advancing	 * tx_ret_csm and will stop device instantly! It is not so bad.	 * We are guaranteed that there is something in ring, so that	 * the next irq will resume transmission. To speedup this we could	 * mark descriptor, which closes ring with BD_FLG_COAL_NOW	 * (see ace_start_xmit).	 *	 * Well, this dilemma exists in all lock-free devices.	 * We, following scheme used in drivers by Donald Becker,	 * select the least dangerous.	 *							--ANK	 */}static void ace_interrupt(int irq, void *dev_id, struct pt_regs *ptregs){	struct ace_private *ap;	struct ace_regs *regs;	struct net_device *dev = (struct net_device *)dev_id;	u32 idx;	u32 txcsm, rxretcsm, rxretprd;	u32 evtcsm, evtprd;	ap = dev->priv;	regs = ap->regs;	/*	 * In case of PCI shared interrupts or spurious interrupts,	 * we want to make sure it is actually our interrupt

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -