⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 acenic_np.c

📁 该软件根据网络数据生成NetFlow记录。NetFlow可用于网络规划、负载均衡、安全监控等
💻 C
📖 第 1 页 / 共 5 页
字号:
		skb = alloc_skb(ACE_JUMBO_BUFSIZE, GFP_ATOMIC);		if (!skb)			break;		/*		 * Make sure the IP header ends up on a fresh cache line		 */		skb_reserve(skb, 2 + 16);		mapping = pci_map_single(ap->pdev, skb->data,					 ACE_JUMBO_BUFSIZE - (2 + 16),					 PCI_DMA_FROMDEVICE);		ap->skb->rx_jumbo_skbuff[idx].skb = skb;		ap->skb->rx_jumbo_skbuff[idx].mapping = mapping;		rd = &ap->rx_jumbo_ring[idx];		set_aceaddr(&rd->addr, mapping);		rd->size = ACE_JUMBO_MTU + ETH_HLEN + 4;		rd->idx = idx;		idx = (idx + 1) % RX_JUMBO_RING_ENTRIES;	}	if (!i)		goto error_out;	atomic_add(i, &ap->cur_jumbo_bufs);	ap->rx_jumbo_skbprd = idx;	if (ACE_IS_TIGON_I(ap)) {		struct cmd cmd;		cmd.evt = C_SET_RX_JUMBO_PRD_IDX;		cmd.code = 0;		cmd.idx = ap->rx_jumbo_skbprd;		ace_issue_cmd(regs, &cmd);	} else {		writel(idx, &regs->RxJumboPrd);		wmb();	} out:	clear_bit(0, &ap->jumbo_refill_busy);	return; error_out:	printk(KERN_INFO "Out of memory when allocating "	       "jumbo receive buffers\n");	goto out;}/* * All events are considered to be slow (RX/TX ints do not generate * events) and are handled here, outside the main interrupt handler, * to reduce the size of the handler. */static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd){	struct ace_private *ap;	ap = dev->priv;	while (evtcsm != evtprd) {		switch (ap->evt_ring[evtcsm].evt) {		case E_FW_RUNNING:			printk(KERN_INFO "%s: Firmware up and running\n",			       dev->name);			ap->fw_running = 1;			wmb();			break;		case E_STATS_UPDATED:			break;		case E_LNK_STATE:		{			u16 code = ap->evt_ring[evtcsm].code;			switch (code) {			case E_C_LINK_UP:				printk(KERN_WARNING "%s: Optical link UP\n",				       dev->name);				break;			case E_C_LINK_DOWN:				printk(KERN_WARNING "%s: Optical link DOWN\n",				       dev->name);				break;			case E_C_LINK_10_100:				printk(KERN_WARNING "%s: 10/100BaseT link "				       "UP\n", dev->name);				break;			default:				printk(KERN_ERR "%s: Unknown optical link "				       "state %02x\n", dev->name, code);			}			break;		}		case E_ERROR:			switch(ap->evt_ring[evtcsm].code) {			case E_C_ERR_INVAL_CMD:				printk(KERN_ERR "%s: invalid command error\n",				       dev->name);				break;			case E_C_ERR_UNIMP_CMD:				printk(KERN_ERR "%s: unimplemented command "				       "error\n", dev->name);				break;			case E_C_ERR_BAD_CFG:				printk(KERN_ERR "%s: bad config error\n",				       dev->name);				break;			default:				printk(KERN_ERR "%s: unknown error %02x\n",				       dev->name, ap->evt_ring[evtcsm].code);			}			break;		case E_RESET_JUMBO_RNG:		{			int i;			for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {				if (ap->skb->rx_jumbo_skbuff[i].skb) {					ap->rx_jumbo_ring[i].size = 0;					set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0);					dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb);					ap->skb->rx_jumbo_skbuff[i].skb = NULL;				}			} 			if (ACE_IS_TIGON_I(ap)) { 				struct cmd cmd; 				cmd.evt = C_SET_RX_JUMBO_PRD_IDX; 				cmd.code = 0; 				cmd.idx = 0; 				ace_issue_cmd(ap->regs, &cmd); 			} else { 				writel(0, &((ap->regs)->RxJumboPrd)); 				wmb(); 			}			ap->jumbo = 0;			ap->rx_jumbo_skbprd = 0;			printk(KERN_INFO "%s: Jumbo ring flushed\n",			       dev->name);			if (!ap->tx_full)				netif_wake_queue(dev);			clear_bit(0, &ap->jumbo_refill_busy);			break;		}		default:			printk(KERN_ERR "%s: Unhandled event 0x%02x\n",			       dev->name, ap->evt_ring[evtcsm].evt);		}		evtcsm = (evtcsm + 1) % EVT_RING_ENTRIES;	}	return evtcsm;}static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm){	struct ace_private *ap = dev->priv;	u32 idx;	int mini_count = 0, std_count = 0;	idx = rxretcsm;	while (idx != rxretprd) {		struct ring_info *rip;		struct sk_buff *skb;		struct rx_desc *rxdesc, *retdesc;		u32 skbidx;		int desc_type, mapsize;		u16 csum;		retdesc = &ap->rx_return_ring[idx];		skbidx = retdesc->idx;		desc_type = retdesc->flags & (BD_FLG_JUMBO | BD_FLG_MINI);		switch(desc_type) {			/*			 * Normal frames do not have any flags set			 *			 * Mini and normal frames arrive frequently,			 * so use a local counter to avoid doing			 * atomic operations for each packet arriving.			 */		case 0:			rip = &ap->skb->rx_std_skbuff[skbidx];			mapsize = ACE_STD_BUFSIZE - (2 + 16);			rxdesc = &ap->rx_std_ring[skbidx];			std_count++;			break;		case BD_FLG_JUMBO:			rip = &ap->skb->rx_jumbo_skbuff[skbidx];			mapsize = ACE_JUMBO_BUFSIZE - (2 + 16);			rxdesc = &ap->rx_jumbo_ring[skbidx];			atomic_dec(&ap->cur_jumbo_bufs);			break;		case BD_FLG_MINI:			rip = &ap->skb->rx_mini_skbuff[skbidx];			mapsize = ACE_MINI_BUFSIZE - (2 + 16);			rxdesc = &ap->rx_mini_ring[skbidx];			mini_count++; 			break;		default:			printk(KERN_INFO "%s: unknown frame type (0x%02x) "			       "returned by NIC\n", dev->name,			       retdesc->flags);			goto error;		}		skb = rip->skb;		rip->skb = NULL;		pci_unmap_single(ap->pdev, rip->mapping, mapsize,				 PCI_DMA_FROMDEVICE);		skb_put(skb, retdesc->size);#if 0		/* unncessary */		rxdesc->size = 0;#endif		/*		 * Fly baby, fly!		 */		csum = retdesc->tcp_udp_csum;		skb->dev = dev;		skb->protocol = eth_type_trans(skb, dev);		/*		 * If the checksum is correct and this is not a		 * fragment, tell the stack that the data is correct.		 */		if(!(csum ^ 0xffff) &&		   (!(((struct iphdr *)skb->data)->frag_off &		      __constant_htons(IP_MF|IP_OFFSET))))			skb->ip_summed = CHECKSUM_UNNECESSARY;		else			skb->ip_summed = CHECKSUM_NONE;		skb->dev = (void*)ap->board_idx;   /* IAP XXX NP HACK */		NP_netif_rx(skb);		/* send it up */		dev->last_rx = jiffies;		ap->stats.rx_packets++;		ap->stats.rx_bytes += retdesc->size;		idx = (idx + 1) % RX_RETURN_RING_ENTRIES;	}	atomic_sub(std_count, &ap->cur_rx_bufs);	if (!ACE_IS_TIGON_I(ap))		atomic_sub(mini_count, &ap->cur_mini_bufs); out:	/*	 * According to the documentation RxRetCsm is obsolete with	 * the 12.3.x Firmware - my Tigon I NICs seem to disagree!	 */	if (ACE_IS_TIGON_I(ap)) {		struct ace_regs *regs = ap->regs;		writel(idx, &regs->RxRetCsm);	}	ap->cur_rx = idx;	return; error:	idx = rxretprd;	goto out;}static void ace_interrupt(int irq, void *dev_id, struct pt_regs *ptregs){	struct ace_private *ap;	struct ace_regs *regs;	struct net_device *dev = (struct net_device *)dev_id;	u32 idx;	u32 txcsm, rxretcsm, rxretprd;	u32 evtcsm, evtprd;	ap = dev->priv;	regs = ap->regs;	/*	 * In case of PCI shared interrupts or spurious interrupts,	 * we want to make sure it is actually our interrupt before	 * spending any time in here.	 */	if (!(readl(&regs->HostCtrl) & IN_INT))		return;	/*	 * Tell the card not to generate interrupts while we are in here.	 */	writel(1, &regs->Mb0Lo);	/*	 * There is no conflict between transmit handling in	 * start_xmit and receive processing, thus there is no reason	 * to take a spin lock for RX handling. Wait until we start	 * working on the other stuff - hey we don't need a spin lock	 * anymore.	 */	rxretprd = *ap->rx_ret_prd;	rxretcsm = ap->cur_rx;	if (rxretprd != rxretcsm)		ace_rx_int(dev, rxretprd, rxretcsm);	txcsm = *ap->tx_csm;	idx = ap->tx_ret_csm;	if (txcsm != idx) {		do {			struct sk_buff *skb;			skb = ap->skb->tx_skbuff[idx].skb;			/*			 * Race condition between the code cleaning			 * the tx queue in the interrupt handler and the			 * interface close,			 *			 * This is a kludge that really should be fixed 			 * by preventing the driver from generating a tx			 * interrupt when the packet has already been			 * removed from the tx queue.			 *			 * Nailed by Don Dugger and Chip Salzenberg of			 * VA Linux.			 */			if (skb) {				dma_addr_t mapping;				mapping = ap->skb->tx_skbuff[idx].mapping;				ap->stats.tx_packets++;				ap->stats.tx_bytes += skb->len;				pci_unmap_single(ap->pdev, mapping, skb->len,						 PCI_DMA_TODEVICE);				dev_kfree_skb_irq(skb);				ap->skb->tx_skbuff[idx].skb = NULL;			}			/*			 * Question here is whether one should not skip			 * these writes - I have never seen any errors			 * caused by the NIC actually trying to access			 * these incorrectly.			 */#ifdef ACE_64BIT_PTR			ap->tx_ring[idx].addr.addrhi = 0;#endif			ap->tx_ring[idx].addr.addrlo = 0;			ap->tx_ring[idx].flagsize = 0;			idx = (idx + 1) % TX_RING_ENTRIES;		} while (idx != txcsm);		/*		 * Once we actually get to this point the tx ring has		 * already been trimmed thus it cannot be full!		 * Ie. skip the comparison of the tx producer vs. the		 * consumer.		 */		if (netif_queue_stopped(dev) && xchg(&ap->tx_full, 0)) {			/*			 * This does not need to be atomic (and expensive),			 * I've seen cases where it would fail otherwise ;-(			 */			netif_wake_queue(dev);			ace_mark_net_bh();			/*			 * TX ring is no longer full, aka the			 * transmitter is working fine - kill timer.			 */			del_timer(&ap->timer);		}		ap->tx_ret_csm = txcsm;		wmb();	}	evtcsm = readl(&regs->EvtCsm);	evtprd = *ap->evt_prd;	if (evtcsm != evtprd) {		evtcsm = ace_handle_event(dev, evtcsm, evtprd);		writel(evtcsm, &regs->EvtCsm);	}	/*	 * This has to go last in the interrupt handler and run with	 * the spin lock released ... what lock?	 */	if (netif_running(dev)) {		int cur_size;		int run_tasklet = 0;		cur_size = atomic_read(&ap->cur_rx_bufs);		if (cur_size < RX_LOW_STD_THRES) {			if ((cur_size < RX_PANIC_STD_THRES) &&			    !test_and_set_bit(0, &ap->std_refill_busy)) {#if DEBUG				printk("low on std buffers %i\n", cur_size);#endif				ace_load_std_rx_ring(ap,						     RX_RING_SIZE - cur_size);			} else				run_tasklet = 1;		}		if (!ACE_IS_TIGON_I(ap)) {			cur_size = atomic_read(&ap->cur_mini_bufs);			if (cur_size < RX_LOW_MINI_THRES) {				if ((cur_size < RX_PANIC_MINI_THRES) &&				    !test_and_set_bit(0,						      &ap->mini_refill_busy)) {#if DEBUG					printk("low on mini buffers %i\n",					       cur_size);#endif					ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size);				} else					run_tasklet = 1;			}		}		if (ap->jumbo) {			cur_size = atomic_read(&ap->cur_jumbo_bufs);			if (cur_size < RX_LOW_JUMBO_THRES) {				if ((cur_size < RX_PANIC_JUMBO_THRES) &&				    !test_and_set_bit(0,						      &ap->jumbo_refill_busy)){#if DEBUG					printk("low on jumbo buffers %i\n",					       cur_size);#endif					ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size);				} else					run_tasklet = 1;			}		}		if (run_tasklet && !ap->tasklet_pending) {			ap->tasklet_pending = 1;			tasklet_schedule(&ap->ace_tasklet);		}	}	/*	 * Allow the card to generate interrupts again	 */	writel(0, &regs->Mb0Lo);}static int ace_open(struct net_device *dev){	struct ace_private *ap;	struct ace_regs *regs;	struct cmd cmd;	ap = dev->priv;	regs = ap->regs;	if (!(ap->fw_running)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -