⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 acenic.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	ap = netdev_priv(dev);	while (evtcsm != evtprd) {		switch (ap->evt_ring[evtcsm].evt) {		case E_FW_RUNNING:			printk(KERN_INFO "%s: Firmware up and running\n",			       ap->name);			ap->fw_running = 1;			wmb();			break;		case E_STATS_UPDATED:			break;		case E_LNK_STATE:		{			u16 code = ap->evt_ring[evtcsm].code;			switch (code) {			case E_C_LINK_UP:			{				u32 state = readl(&ap->regs->GigLnkState);				printk(KERN_WARNING "%s: Optical link UP "				       "(%s Duplex, Flow Control: %s%s)\n",				       ap->name,				       state & LNK_FULL_DUPLEX ? "Full":"Half",				       state & LNK_TX_FLOW_CTL_Y ? "TX " : "",				       state & LNK_RX_FLOW_CTL_Y ? "RX" : "");				break;			}			case E_C_LINK_DOWN:				printk(KERN_WARNING "%s: Optical link DOWN\n",				       ap->name);				break;			case E_C_LINK_10_100:				printk(KERN_WARNING "%s: 10/100BaseT link "				       "UP\n", ap->name);				break;			default:				printk(KERN_ERR "%s: Unknown optical link "				       "state %02x\n", ap->name, code);			}			break;		}		case E_ERROR:			switch(ap->evt_ring[evtcsm].code) {			case E_C_ERR_INVAL_CMD:				printk(KERN_ERR "%s: invalid command error\n",				       ap->name);				break;			case E_C_ERR_UNIMP_CMD:				printk(KERN_ERR "%s: unimplemented command "				       "error\n", ap->name);				break;			case E_C_ERR_BAD_CFG:				printk(KERN_ERR "%s: bad config error\n",				       ap->name);				break;			default:				printk(KERN_ERR "%s: unknown error %02x\n",				       ap->name, ap->evt_ring[evtcsm].code);			}			break;		case E_RESET_JUMBO_RNG:		{			int i;			for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {				if (ap->skb->rx_jumbo_skbuff[i].skb) {					ap->rx_jumbo_ring[i].size = 0;					set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0);					dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb);					ap->skb->rx_jumbo_skbuff[i].skb = NULL;				}			} 			if (ACE_IS_TIGON_I(ap)) { 				struct cmd cmd; 				cmd.evt = C_SET_RX_JUMBO_PRD_IDX; 				cmd.code = 0; 				cmd.idx = 0; 				ace_issue_cmd(ap->regs, &cmd); 			} else { 				writel(0, &((ap->regs)->RxJumboPrd)); 				wmb(); 			}			ap->jumbo = 0;			ap->rx_jumbo_skbprd = 0;			printk(KERN_INFO "%s: Jumbo ring flushed\n",			       ap->name);			clear_bit(0, &ap->jumbo_refill_busy);			break;		}		default:			printk(KERN_ERR "%s: Unhandled event 0x%02x\n",			       ap->name, ap->evt_ring[evtcsm].evt);		}		evtcsm = (evtcsm + 1) % EVT_RING_ENTRIES;	}	return evtcsm;}static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm){	struct ace_private *ap = netdev_priv(dev);	u32 idx;	int mini_count = 0, std_count = 0;	idx = rxretcsm;	prefetchw(&ap->cur_rx_bufs);	prefetchw(&ap->cur_mini_bufs);	while (idx != rxretprd) {		struct ring_info *rip;		struct sk_buff *skb;		struct rx_desc *rxdesc, *retdesc;		u32 skbidx;		int bd_flags, desc_type, mapsize;		u16 csum;		/* make sure the rx descriptor isn't read before rxretprd */		if (idx == rxretcsm)			rmb();		retdesc = &ap->rx_return_ring[idx];		skbidx = retdesc->idx;		bd_flags = retdesc->flags;		desc_type = bd_flags & (BD_FLG_JUMBO | BD_FLG_MINI);		switch(desc_type) {			/*			 * Normal frames do not have any flags set			 *			 * Mini and normal frames arrive frequently,			 * so use a local counter to avoid doing			 * atomic operations for each packet arriving.			 */		case 0:			rip = &ap->skb->rx_std_skbuff[skbidx];			mapsize = ACE_STD_BUFSIZE;			rxdesc = &ap->rx_std_ring[skbidx];			std_count++;			break;		case BD_FLG_JUMBO:			rip = &ap->skb->rx_jumbo_skbuff[skbidx];			mapsize = ACE_JUMBO_BUFSIZE;			rxdesc = &ap->rx_jumbo_ring[skbidx];			atomic_dec(&ap->cur_jumbo_bufs);			break;		case BD_FLG_MINI:			rip = &ap->skb->rx_mini_skbuff[skbidx];			mapsize = ACE_MINI_BUFSIZE;			rxdesc = &ap->rx_mini_ring[skbidx];			mini_count++;			break;		default:			printk(KERN_INFO "%s: unknown frame type (0x%02x) "			       "returned by NIC\n", dev->name,			       retdesc->flags);			goto error;		}		skb = rip->skb;		rip->skb = NULL;		pci_unmap_page(ap->pdev,			       pci_unmap_addr(rip, mapping),			       mapsize,			       PCI_DMA_FROMDEVICE);		skb_put(skb, retdesc->size);		/*		 * Fly baby, fly!		 */		csum = retdesc->tcp_udp_csum;		skb->protocol = eth_type_trans(skb, dev);		/*		 * Instead of forcing the poor tigon mips cpu to calculate		 * pseudo hdr checksum, we do this ourselves.		 */		if (bd_flags & BD_FLG_TCP_UDP_SUM) {			skb->csum = htons(csum);			skb->ip_summed = CHECKSUM_COMPLETE;		} else {			skb->ip_summed = CHECKSUM_NONE;		}		/* send it up */#if ACENIC_DO_VLAN		if (ap->vlgrp && (bd_flags & BD_FLG_VLAN_TAG)) {			vlan_hwaccel_rx(skb, ap->vlgrp, retdesc->vlan);		} else#endif			netif_rx(skb);		dev->last_rx = jiffies;		ap->stats.rx_packets++;		ap->stats.rx_bytes += retdesc->size;		idx = (idx + 1) % RX_RETURN_RING_ENTRIES;	}	atomic_sub(std_count, &ap->cur_rx_bufs);	if (!ACE_IS_TIGON_I(ap))		atomic_sub(mini_count, &ap->cur_mini_bufs); out:	/*	 * According to the documentation RxRetCsm is obsolete with	 * the 12.3.x Firmware - my Tigon I NICs seem to disagree!	 */	if (ACE_IS_TIGON_I(ap)) {		writel(idx, &ap->regs->RxRetCsm);	}	ap->cur_rx = idx;	return; error:	idx = rxretprd;	goto out;}static inline void ace_tx_int(struct net_device *dev,			      u32 txcsm, u32 idx){	struct ace_private *ap = netdev_priv(dev);	do {		struct sk_buff *skb;		dma_addr_t mapping;		struct tx_ring_info *info;		info = ap->skb->tx_skbuff + idx;		skb = info->skb;		mapping = pci_unmap_addr(info, mapping);		if (mapping) {			pci_unmap_page(ap->pdev, mapping,				       pci_unmap_len(info, maplen),				       PCI_DMA_TODEVICE);			pci_unmap_addr_set(info, mapping, 0);		}		if (skb) {			ap->stats.tx_packets++;			ap->stats.tx_bytes += skb->len;			dev_kfree_skb_irq(skb);			info->skb = NULL;		}		idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);	} while (idx != txcsm);	if (netif_queue_stopped(dev))		netif_wake_queue(dev);	wmb();	ap->tx_ret_csm = txcsm;	/* So... tx_ret_csm is advanced _after_ check for device wakeup.	 *	 * We could try to make it before. In this case we would get	 * the following race condition: hard_start_xmit on other cpu	 * enters after we advanced tx_ret_csm and fills space,	 * which we have just freed, so that we make illegal device wakeup.	 * There is no good way to workaround this (at entry	 * to ace_start_xmit detects this condition and prevents	 * ring corruption, but it is not a good workaround.)	 *	 * When tx_ret_csm is advanced after, we wake up device _only_	 * if we really have some space in ring (though the core doing	 * hard_start_xmit can see full ring for some period and has to	 * synchronize.) Superb.	 * BUT! We get another subtle race condition. hard_start_xmit	 * may think that ring is full between wakeup and advancing	 * tx_ret_csm and will stop device instantly! It is not so bad.	 * We are guaranteed that there is something in ring, so that	 * the next irq will resume transmission. To speedup this we could	 * mark descriptor, which closes ring with BD_FLG_COAL_NOW	 * (see ace_start_xmit).	 *	 * Well, this dilemma exists in all lock-free devices.	 * We, following scheme used in drivers by Donald Becker,	 * select the least dangerous.	 *							--ANK	 */}static irqreturn_t ace_interrupt(int irq, void *dev_id){	struct net_device *dev = (struct net_device *)dev_id;	struct ace_private *ap = netdev_priv(dev);	struct ace_regs __iomem *regs = ap->regs;	u32 idx;	u32 txcsm, rxretcsm, rxretprd;	u32 evtcsm, evtprd;	/*	 * In case of PCI shared interrupts or spurious interrupts,	 * we want to make sure it is actually our interrupt before	 * spending any time in here.	 */	if (!(readl(&regs->HostCtrl) & IN_INT))		return IRQ_NONE;	/*	 * ACK intr now. Otherwise we will lose updates to rx_ret_prd,	 * which happened _after_ rxretprd = *ap->rx_ret_prd; but before	 * writel(0, &regs->Mb0Lo).	 *	 * "IRQ avoidance" recommended in docs applies to IRQs served	 * threads and it is wrong even for that case.	 */	writel(0, &regs->Mb0Lo);	readl(&regs->Mb0Lo);	/*	 * There is no conflict between transmit handling in	 * start_xmit and receive processing, thus there is no reason	 * to take a spin lock for RX handling. Wait until we start	 * working on the other stuff - hey we don't need a spin lock	 * anymore.	 */	rxretprd = *ap->rx_ret_prd;	rxretcsm = ap->cur_rx;	if (rxretprd != rxretcsm)		ace_rx_int(dev, rxretprd, rxretcsm);	txcsm = *ap->tx_csm;	idx = ap->tx_ret_csm;	if (txcsm != idx) {		/*		 * If each skb takes only one descriptor this check degenerates		 * to identity, because new space has just been opened.		 * But if skbs are fragmented we must check that this index		 * update releases enough of space, otherwise we just		 * wait for device to make more work.		 */		if (!tx_ring_full(ap, txcsm, ap->tx_prd))			ace_tx_int(dev, txcsm, idx);	}	evtcsm = readl(&regs->EvtCsm);	evtprd = *ap->evt_prd;	if (evtcsm != evtprd) {		evtcsm = ace_handle_event(dev, evtcsm, evtprd);		writel(evtcsm, &regs->EvtCsm);	}	/*	 * This has to go last in the interrupt handler and run with	 * the spin lock released ... what lock?	 */	if (netif_running(dev)) {		int cur_size;		int run_tasklet = 0;		cur_size = atomic_read(&ap->cur_rx_bufs);		if (cur_size < RX_LOW_STD_THRES) {			if ((cur_size < RX_PANIC_STD_THRES) &&			    !test_and_set_bit(0, &ap->std_refill_busy)) {#ifdef DEBUG				printk("low on std buffers %i\n", cur_size);#endif				ace_load_std_rx_ring(ap,						     RX_RING_SIZE - cur_size);			} else				run_tasklet = 1;		}		if (!ACE_IS_TIGON_I(ap)) {			cur_size = atomic_read(&ap->cur_mini_bufs);			if (cur_size < RX_LOW_MINI_THRES) {				if ((cur_size < RX_PANIC_MINI_THRES) &&				    !test_and_set_bit(0,						      &ap->mini_refill_busy)) {#ifdef DEBUG					printk("low on mini buffers %i\n",					       cur_size);#endif					ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size);				} else					run_tasklet = 1;			}		}		if (ap->jumbo) {			cur_size = atomic_read(&ap->cur_jumbo_bufs);			if (cur_size < RX_LOW_JUMBO_THRES) {				if ((cur_size < RX_PANIC_JUMBO_THRES) &&				    !test_and_set_bit(0,						      &ap->jumbo_refill_busy)){#ifdef DEBUG					printk("low on jumbo buffers %i\n",					       cur_size);#endif					ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size);				} else					run_tasklet = 1;			}		}		if (run_tasklet && !ap->tasklet_pending) {			ap->tasklet_pending = 1;			tasklet_schedule(&ap->ace_tasklet);		}	}	return IRQ_HANDLED;}#if ACENIC_DO_VLANstatic void ace_vlan_rx_register(struct net_device *dev, struct vlan_group *grp){	struct ace_private *ap = netdev_priv(dev);	unsigned long flags;	local_irq_save(flags);	ace_mask_irq(dev);	ap->vlgrp = grp;	ace_unmask_irq(dev);	local_irq_restore(flags);}#endif /* ACENIC_DO_VLAN */static int ace_open(struct net_device *dev){	struct ace_private *ap = netdev_priv(dev);	struct ace_regs __iomem *regs = ap->regs;	struct cmd cmd;	if (!(ap->fw_running)) {		printk(KERN_WARNING "%s: Firmware not running!\n", dev->name);		return -EBUSY;	}	writel(dev->mtu + ETH_HLEN + 4, &regs->IfMtu);	cmd.evt = C_CLEAR_STATS;	cmd.code = 0;	cmd.idx = 0;	ace_issue_cmd(regs, &cmd);	cmd.evt = C_HOST_STATE;	cmd.code = C_C_STACK_UP;	cmd.idx = 0;	ace_issue_cmd

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -