sungem.c

来自「linux 内核源代码」· C语言 代码 · 共 2,547 行 · 第 1/5 页

C
2,547
字号
	/* This interrupt is just for pause frame and pause	 * tracking.  It is useful for diagnostics and debug	 * but probably by default we will mask these events.	 */	if (mac_cstat & MAC_CSTAT_PS)		gp->pause_entered++;	if (mac_cstat & MAC_CSTAT_PRCV)		gp->pause_last_time_recvd = (mac_cstat >> 16);	return 0;}static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status){	u32 mif_status = readl(gp->regs + MIF_STATUS);	u32 reg_val, changed_bits;	reg_val = (mif_status & MIF_STATUS_DATA) >> 16;	changed_bits = (mif_status & MIF_STATUS_STAT);	gem_handle_mif_event(gp, reg_val, changed_bits);	return 0;}static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status){	u32 pci_estat = readl(gp->regs + GREG_PCIESTAT);	if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&	    gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {		printk(KERN_ERR "%s: PCI error [%04x] ",		       dev->name, pci_estat);		if (pci_estat & GREG_PCIESTAT_BADACK)			printk("<No ACK64# during ABS64 cycle> ");		if (pci_estat & GREG_PCIESTAT_DTRTO)			printk("<Delayed transaction timeout> ");		if (pci_estat & GREG_PCIESTAT_OTHER)			printk("<other>");		printk("\n");	} else {		pci_estat |= GREG_PCIESTAT_OTHER;		printk(KERN_ERR "%s: PCI error\n", dev->name);	}	if (pci_estat & GREG_PCIESTAT_OTHER) {		u16 pci_cfg_stat;		/* Interrogate PCI config space for the		 * true cause.		 */		pci_read_config_word(gp->pdev, PCI_STATUS,				     &pci_cfg_stat);		printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",		       dev->name, pci_cfg_stat);		if (pci_cfg_stat & PCI_STATUS_PARITY)			printk(KERN_ERR "%s: PCI parity error detected.\n",			       dev->name);		if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT)			printk(KERN_ERR "%s: PCI target abort.\n",			       dev->name);		if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT)			printk(KERN_ERR "%s: PCI master acks target abort.\n",			       dev->name);		if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT)			printk(KERN_ERR "%s: PCI master abort.\n",			       dev->name);		if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR)			printk(KERN_ERR "%s: PCI system error SERR#.\n",			       dev->name);		if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY)			printk(KERN_ERR "%s: PCI parity error.\n",			       dev->name);		/* Write the error bits back to clear them. */		pci_cfg_stat &= (PCI_STATUS_PARITY |				 PCI_STATUS_SIG_TARGET_ABORT |				 PCI_STATUS_REC_TARGET_ABORT |				 PCI_STATUS_REC_MASTER_ABORT |				 PCI_STATUS_SIG_SYSTEM_ERROR |				 PCI_STATUS_DETECTED_PARITY);		pci_write_config_word(gp->pdev,				      PCI_STATUS, pci_cfg_stat);	}	/* For all PCI errors, we should reset the chip. */	return 1;}/* All non-normal interrupt conditions get serviced here. * Returns non-zero if we should just exit the interrupt * handler right now (ie. if we reset the card which invalidates * all of the other original irq status bits). */static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status){	if (gem_status & GREG_STAT_RXNOBUF) {		/* Frame arrived, no free RX buffers available. */		if (netif_msg_rx_err(gp))			printk(KERN_DEBUG "%s: no buffer for rx frame\n",				gp->dev->name);		gp->net_stats.rx_dropped++;	}	if (gem_status & GREG_STAT_RXTAGERR) {		/* corrupt RX tag framing */		if (netif_msg_rx_err(gp))			printk(KERN_DEBUG "%s: corrupt rx tag framing\n",				gp->dev->name);		gp->net_stats.rx_errors++;		goto do_reset;	}	if (gem_status & GREG_STAT_PCS) {		if (gem_pcs_interrupt(dev, gp, gem_status))			goto do_reset;	}	if (gem_status & GREG_STAT_TXMAC) {		if (gem_txmac_interrupt(dev, gp, gem_status))			goto do_reset;	}	if (gem_status & GREG_STAT_RXMAC) {		if (gem_rxmac_interrupt(dev, gp, gem_status))			goto do_reset;	}	if (gem_status & GREG_STAT_MAC) {		if (gem_mac_interrupt(dev, gp, gem_status))			goto do_reset;	}	if (gem_status & GREG_STAT_MIF) {		if (gem_mif_interrupt(dev, gp, gem_status))			goto do_reset;	}	if (gem_status & GREG_STAT_PCIERR) {		if (gem_pci_interrupt(dev, gp, gem_status))			goto do_reset;	}	return 0;do_reset:	gp->reset_task_pending = 1;	schedule_work(&gp->reset_task);	return 1;}static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status){	int entry, limit;	if (netif_msg_intr(gp))		printk(KERN_DEBUG "%s: tx interrupt, gem_status: 0x%x\n",			gp->dev->name, gem_status);	entry = gp->tx_old;	limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT);	while (entry != limit) {		struct sk_buff *skb;		struct gem_txd *txd;		dma_addr_t dma_addr;		u32 dma_len;		int frag;		if (netif_msg_tx_done(gp))			printk(KERN_DEBUG "%s: tx done, slot %d\n",				gp->dev->name, entry);		skb = gp->tx_skbs[entry];		if (skb_shinfo(skb)->nr_frags) {			int last = entry + skb_shinfo(skb)->nr_frags;			int walk = entry;			int incomplete = 0;			last &= (TX_RING_SIZE - 1);			for (;;) {				walk = NEXT_TX(walk);				if (walk == limit)					incomplete = 1;				if (walk == last)					break;			}			if (incomplete)				break;		}		gp->tx_skbs[entry] = NULL;		gp->net_stats.tx_bytes += skb->len;		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {			txd = &gp->init_block->txd[entry];			dma_addr = le64_to_cpu(txd->buffer);			dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ;			pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);			entry = NEXT_TX(entry);		}		gp->net_stats.tx_packets++;		dev_kfree_skb_irq(skb);	}	gp->tx_old = entry;	if (netif_queue_stopped(dev) &&	    TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))		netif_wake_queue(dev);}static __inline__ void gem_post_rxds(struct gem *gp, int limit){	int cluster_start, curr, count, kick;	cluster_start = curr = (gp->rx_new & ~(4 - 1));	count = 0;	kick = -1;	wmb();	while (curr != limit) {		curr = NEXT_RX(curr);		if (++count == 4) {			struct gem_rxd *rxd =				&gp->init_block->rxd[cluster_start];			for (;;) {				rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));				rxd++;				cluster_start = NEXT_RX(cluster_start);				if (cluster_start == curr)					break;			}			kick = curr;			count = 0;		}	}	if (kick >= 0) {		mb();		writel(kick, gp->regs + RXDMA_KICK);	}}static int gem_rx(struct gem *gp, int work_to_do){	int entry, drops, work_done = 0;	u32 done;	if (netif_msg_rx_status(gp))		printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",			gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new);	entry = gp->rx_new;	drops = 0;	done = readl(gp->regs + RXDMA_DONE);	for (;;) {		struct gem_rxd *rxd = &gp->init_block->rxd[entry];		struct sk_buff *skb;		u64 status = cpu_to_le64(rxd->status_word);		dma_addr_t dma_addr;		int len;		if ((status & RXDCTRL_OWN) != 0)			break;		if (work_done >= RX_RING_SIZE || work_done >= work_to_do)			break;		/* When writing back RX descriptor, GEM writes status		 * then buffer address, possibly in seperate transactions.		 * If we don't wait for the chip to write both, we could		 * post a new buffer to this descriptor then have GEM spam		 * on the buffer address.  We sync on the RX completion		 * register to prevent this from happening.		 */		if (entry == done) {			done = readl(gp->regs + RXDMA_DONE);			if (entry == done)				break;		}		/* We can now account for the work we're about to do */		work_done++;		skb = gp->rx_skbs[entry];		len = (status & RXDCTRL_BUFSZ) >> 16;		if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {			gp->net_stats.rx_errors++;			if (len < ETH_ZLEN)				gp->net_stats.rx_length_errors++;			if (len & RXDCTRL_BAD)				gp->net_stats.rx_crc_errors++;			/* We'll just return it to GEM. */		drop_it:			gp->net_stats.rx_dropped++;			goto next;		}		dma_addr = cpu_to_le64(rxd->buffer);		if (len > RX_COPY_THRESHOLD) {			struct sk_buff *new_skb;			new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);			if (new_skb == NULL) {				drops++;				goto drop_it;			}			pci_unmap_page(gp->pdev, dma_addr,				       RX_BUF_ALLOC_SIZE(gp),				       PCI_DMA_FROMDEVICE);			gp->rx_skbs[entry] = new_skb;			new_skb->dev = gp->dev;			skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET));			rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,							       virt_to_page(new_skb->data),							       offset_in_page(new_skb->data),							       RX_BUF_ALLOC_SIZE(gp),							       PCI_DMA_FROMDEVICE));			skb_reserve(new_skb, RX_OFFSET);			/* Trim the original skb for the netif. */			skb_trim(skb, len);		} else {			struct sk_buff *copy_skb = dev_alloc_skb(len + 2);			if (copy_skb == NULL) {				drops++;				goto drop_it;			}			skb_reserve(copy_skb, 2);			skb_put(copy_skb, len);			pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);			skb_copy_from_linear_data(skb, copy_skb->data, len);			pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);			/* We'll reuse the original ring buffer. */			skb = copy_skb;		}		skb->csum = ntohs((status & RXDCTRL_TCPCSUM) ^ 0xffff);		skb->ip_summed = CHECKSUM_COMPLETE;		skb->protocol = eth_type_trans(skb, gp->dev);		netif_receive_skb(skb);		gp->net_stats.rx_packets++;		gp->net_stats.rx_bytes += len;		gp->dev->last_rx = jiffies;	next:		entry = NEXT_RX(entry);	}	gem_post_rxds(gp, entry);	gp->rx_new = entry;	if (drops)		printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",		       gp->dev->name);	return work_done;}static int gem_poll(struct napi_struct *napi, int budget){	struct gem *gp = container_of(napi, struct gem, napi);	struct net_device *dev = gp->dev;	unsigned long flags;	int work_done;	/*	 * NAPI locking nightmare: See comment at head of driver	 */	spin_lock_irqsave(&gp->lock, flags);	work_done = 0;	do {		/* Handle anomalies */		if (gp->status & GREG_STAT_ABNORMAL) {			if (gem_abnormal_irq(dev, gp, gp->status))				break;		}		/* Run TX completion thread */		spin_lock(&gp->tx_lock);		gem_tx(dev, gp, gp->status);		spin_unlock(&gp->tx_lock);		spin_unlock_irqrestore(&gp->lock, flags);		/* Run RX thread. We don't use any locking here,		 * code willing to do bad things - like cleaning the		 * rx ring - must call napi_disable(), which		 * schedule_timeout()'s if polling is already disabled.		 */		work_done += gem_rx(gp, budget);		if (work_done >= budget)			return work_done;		spin_lock_irqsave(&gp->lock, flags);		gp->status = readl(gp->regs + GREG_STAT);	} while (gp->status & GREG_STAT_NAPI);	__netif_rx_complete(dev, napi);	gem_enable_ints(gp);	spin_unlock_irqrestore(&gp->lock, flags);	return work_done;}static irqreturn_t gem_interrupt(int irq, void *dev_id){	struct net_device *dev = dev_id;	struct gem *gp = dev->priv;	unsigned long flags;	/* Swallow interrupts when shutting the chip down, though	 * that shouldn't happen, we should have done free_irq() at	 * this point...	 */	if (!gp->running)		return IRQ_HANDLED;	spin_lock_irqsave(&gp->lock, flags);	if (netif_rx_schedule_prep(dev, &gp->napi)) {		u32 gem_status = readl(gp->regs + GREG_STAT);		if (gem_status == 0) {			napi_enable(&gp->napi);			spin_unlock_irqrestore(&gp->lock, flags);			return IRQ_NONE;		}		gp->status = gem_status;		gem_disable_ints(gp);		__netif_rx_schedule(dev, &gp->napi);	}	spin_unlock_irqrestore(&gp->lock, flags);	/* If polling was disabled at the time we received that	 * interrupt, we may return IRQ_HANDLED here while we	 * should return IRQ_NONE. No big deal...	 */	return IRQ_HANDLED;}#ifdef CONFIG_NET_POLL_CONTROLLERstatic void gem_poll_controller(struct net_device *dev){	/* gem_interrupt is safe to reentrance so no need	 * to disable_irq here.	 */	gem_interrupt(dev->irq, dev);}#endifstatic void gem_tx_timeout(struct net_device *dev){	struct gem *gp = dev->priv;	printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);	if (!gp->running) {		printk("%s: hrm.. hw not running !\n", dev->name);		return;	}	printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n",	       dev->name,	       readl(gp->regs + TXDMA_CFG),	       readl(gp->regs + MAC_TXSTAT),	       readl(gp->regs + MAC_TXCFG));	printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",	       dev->name,	       readl(gp->regs + RXDMA_CFG),	       readl(gp->regs + MAC_RXSTAT),	       readl(gp->regs + MAC_RXCFG));	spin_lock_irq(&gp->lock);	spin_lock(&gp->tx_lock);	gp->reset_task_pending = 1;	schedule_work(&gp->reset_task);	spin_unlock(&gp->tx_lock);	spin_unlock_irq(&gp->lock);}static __inline__ int gem_intme(int entry){	/* Algorithm: IRQ every 1/2 of descriptors. */	if (!(entry & ((TX_RING_SIZE>>1)-1)))		return 1;	return 0;}static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev){	struct gem *gp = dev->priv;	int entry;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?