⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sungem.c

📁 linux和2410结合开发 用他可以生成2410所需的zImage文件
💻 C
📖 第 1 页 / 共 5 页
字号:
				break;		}		gp->tx_skbs[entry] = NULL;		gp->net_stats.tx_bytes += skb->len;		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {			txd = &gp->init_block->txd[entry];			dma_addr = le64_to_cpu(txd->buffer);			dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ;			pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);			entry = NEXT_TX(entry);		}		gp->net_stats.tx_packets++;		dev_kfree_skb_irq(skb);	}	gp->tx_old = entry;	if (netif_queue_stopped(dev) &&	    TX_BUFFS_AVAIL(gp) > 0)		netif_wake_queue(dev);}static __inline__ void gem_post_rxds(struct gem *gp, int limit){	int cluster_start, curr, count, kick;	cluster_start = curr = (gp->rx_new & ~(4 - 1));	count = 0;	kick = -1;	while (curr != limit) {		curr = NEXT_RX(curr);		if (++count == 4) {			struct gem_rxd *rxd =				&gp->init_block->rxd[cluster_start];			for (;;) {				rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));				rxd++;				cluster_start = NEXT_RX(cluster_start);				if (cluster_start == curr)					break;			}			kick = curr;			count = 0;		}	}	if (kick >= 0)		writel(kick, gp->regs + RXDMA_KICK);}static void gem_rx(struct gem *gp){	int entry, drops;	if (netif_msg_intr(gp))		printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",			gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new);	entry = gp->rx_new;	drops = 0;	for (;;) {		struct gem_rxd *rxd = &gp->init_block->rxd[entry];		struct sk_buff *skb;		u64 status = cpu_to_le64(rxd->status_word);		dma_addr_t dma_addr;		int len;		if ((status & RXDCTRL_OWN) != 0)			break;		skb = gp->rx_skbs[entry];		len = (status & RXDCTRL_BUFSZ) >> 16;		if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {			gp->net_stats.rx_errors++;			if (len < ETH_ZLEN)				gp->net_stats.rx_length_errors++;			if (len & RXDCTRL_BAD)				gp->net_stats.rx_crc_errors++;			/* We'll just return it to GEM. */		drop_it:			gp->net_stats.rx_dropped++;			goto next;		}		dma_addr = cpu_to_le64(rxd->buffer);		if (len > RX_COPY_THRESHOLD) {			struct sk_buff *new_skb;			new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);			if (new_skb == NULL) {				drops++;				goto drop_it;			}			pci_unmap_page(gp->pdev, dma_addr,				       RX_BUF_ALLOC_SIZE(gp),				       PCI_DMA_FROMDEVICE);			gp->rx_skbs[entry] = new_skb;			new_skb->dev = gp->dev;			skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET));			rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,							       virt_to_page(new_skb->data),							       ((unsigned long) new_skb->data &								~PAGE_MASK),							       RX_BUF_ALLOC_SIZE(gp),							       PCI_DMA_FROMDEVICE));			skb_reserve(new_skb, RX_OFFSET);			/* Trim the original skb for the netif. */			skb_trim(skb, len);		} else {			struct sk_buff *copy_skb = dev_alloc_skb(len + 2);			if (copy_skb == NULL) {				drops++;				goto drop_it;			}			copy_skb->dev = gp->dev;			skb_reserve(copy_skb, 2);			skb_put(copy_skb, len);			pci_dma_sync_single(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);			memcpy(copy_skb->data, skb->data, len);			/* We'll reuse the original ring buffer. */			skb = copy_skb;		}		skb->csum = ntohs((status & RXDCTRL_TCPCSUM) ^ 0xffff);		skb->ip_summed = CHECKSUM_HW;		skb->protocol = eth_type_trans(skb, gp->dev);		netif_rx(skb);		gp->net_stats.rx_packets++;		gp->net_stats.rx_bytes += len;		gp->dev->last_rx = jiffies;	next:		entry = NEXT_RX(entry);	}	gem_post_rxds(gp, entry);	gp->rx_new = entry;	if (drops)		printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",		       gp->dev->name);}static void gem_interrupt(int irq, void *dev_id, struct pt_regs *regs){	struct net_device *dev = dev_id;	struct gem *gp = dev->priv;	u32 gem_status = readl(gp->regs + GREG_STAT);	spin_lock(&gp->lock);	if (gem_status & GREG_STAT_ABNORMAL) {		if (gem_abnormal_irq(dev, gp, gem_status))			goto out;	}	if (gem_status & (GREG_STAT_TXALL | GREG_STAT_TXINTME))		gem_tx(dev, gp, gem_status);	if (gem_status & GREG_STAT_RXDONE)		gem_rx(gp);out:	spin_unlock(&gp->lock);}static void gem_tx_timeout(struct net_device *dev){	struct gem *gp = dev->priv;	printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);	if (!gp->hw_running) {		printk("%s: hrm.. hw not running !\n", dev->name);		return;	}	printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n",	       dev->name,	       readl(gp->regs + TXDMA_CFG),	       readl(gp->regs + MAC_TXSTAT),	       readl(gp->regs + MAC_TXCFG));	printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",	       dev->name,	       readl(gp->regs + RXDMA_CFG),	       readl(gp->regs + MAC_RXSTAT),	       readl(gp->regs + MAC_RXCFG));	spin_lock_irq(&gp->lock);	gp->reset_task_pending = 1;	schedule_task(&gp->reset_task);	spin_unlock_irq(&gp->lock);}static __inline__ int gem_intme(int entry){	/* Algorithm: IRQ every 1/2 of descriptors. */	if (!(entry & ((TX_RING_SIZE>>1)-1)))		return 1;	return 0;}static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev){	struct gem *gp = dev->priv;	int entry;	u64 ctrl;	ctrl = 0;	if (skb->ip_summed == CHECKSUM_HW) {		u64 csum_start_off, csum_stuff_off;		csum_start_off = (u64) (skb->h.raw - skb->data);		csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data);		ctrl = (TXDCTRL_CENAB |			(csum_start_off << 15) |			(csum_stuff_off << 21));	}	spin_lock_irq(&gp->lock);	if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) {		netif_stop_queue(dev);		spin_unlock_irq(&gp->lock);		return 1;	}	entry = gp->tx_new;	gp->tx_skbs[entry] = skb;	if (skb_shinfo(skb)->nr_frags == 0) {		struct gem_txd *txd = &gp->init_block->txd[entry];		dma_addr_t mapping;		u32 len;		len = skb->len;		mapping = pci_map_page(gp->pdev,				       virt_to_page(skb->data),				       ((unsigned long) skb->data &					~PAGE_MASK),				       len, PCI_DMA_TODEVICE);		ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len;		if (gem_intme(entry))			ctrl |= TXDCTRL_INTME;		txd->buffer = cpu_to_le64(mapping);		txd->control_word = cpu_to_le64(ctrl);		entry = NEXT_TX(entry);	} else {		struct gem_txd *txd;		u32 first_len;		u64 intme;		dma_addr_t first_mapping;		int frag, first_entry = entry;		intme = 0;		if (gem_intme(entry))			intme |= TXDCTRL_INTME;		/* We must give this initial chunk to the device last.		 * Otherwise we could race with the device.		 */		first_len = skb->len - skb->data_len;		first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data),					     ((unsigned long) skb->data & ~PAGE_MASK),					     first_len, PCI_DMA_TODEVICE);		entry = NEXT_TX(entry);		for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {			skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];			u32 len;			dma_addr_t mapping;			u64 this_ctrl;			len = this_frag->size;			mapping = pci_map_page(gp->pdev,					       this_frag->page,					       this_frag->page_offset,					       len, PCI_DMA_TODEVICE);			this_ctrl = ctrl;			if (frag == skb_shinfo(skb)->nr_frags - 1)				this_ctrl |= TXDCTRL_EOF;						txd = &gp->init_block->txd[entry];			txd->buffer = cpu_to_le64(mapping);			txd->control_word = cpu_to_le64(this_ctrl | len);			if (gem_intme(entry))				intme |= TXDCTRL_INTME;			entry = NEXT_TX(entry);		}		txd = &gp->init_block->txd[first_entry];		txd->buffer = cpu_to_le64(first_mapping);		txd->control_word =			cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);	}	gp->tx_new = entry;	if (TX_BUFFS_AVAIL(gp) <= 0)		netif_stop_queue(dev);	if (netif_msg_tx_queued(gp))		printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",		       dev->name, entry, skb->len);	writel(gp->tx_new, gp->regs + TXDMA_KICK);	spin_unlock_irq(&gp->lock);	dev->trans_start = jiffies;	return 0;}/* Jumbo-grams don't seem to work :-( */#if 1#define MAX_MTU		1500#else#define MAX_MTU		9000#endifstatic int gem_change_mtu(struct net_device *dev, int new_mtu){	struct gem *gp = dev->priv;	if (new_mtu < 0 || new_mtu > MAX_MTU)		return -EINVAL;	spin_lock_irq(&gp->lock);	dev->mtu = new_mtu;	gp->reset_task_pending = 1;	schedule_task(&gp->reset_task);	spin_unlock_irq(&gp->lock);	flush_scheduled_tasks();	return 0;}#define STOP_TRIES 32static void gem_stop(struct gem *gp){	int limit;	u32 val;	/* Make sure we won't get any more interrupts */	writel(0xffffffff, gp->regs + GREG_IMASK);	/* Reset the chip */	writel(GREG_SWRST_TXRST | GREG_SWRST_RXRST, gp->regs + GREG_SWRST);	limit = STOP_TRIES;	do {		udelay(20);		val = readl(gp->regs + GREG_SWRST);		if (limit-- <= 0)			break;	} while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));	if (limit <= 0)		printk(KERN_ERR "gem: SW reset is ghetto.\n");}static void gem_start_dma(struct gem *gp){	unsigned long val;		/* We are ready to rock, turn everything on. */	val = readl(gp->regs + TXDMA_CFG);	writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);	val = readl(gp->regs + RXDMA_CFG);	writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);	val = readl(gp->regs + MAC_TXCFG);	writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);	val = readl(gp->regs + MAC_RXCFG);	writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);	(void) readl(gp->regs + MAC_RXCFG);	udelay(100);	writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK);	writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);}/* Link modes of the BCM5400 PHY */static int phy_BCM5400_link_table[8][3] = {	{ 0, 0, 0 },	/* No link */	{ 0, 0, 0 },	/* 10BT Half Duplex */	{ 1, 0, 0 },	/* 10BT Full Duplex */	{ 0, 1, 0 },	/* 100BT Half Duplex */	{ 0, 1, 0 },	/* 100BT Half Duplex */	{ 1, 1, 0 },	/* 100BT Full Duplex*/	{ 1, 0, 1 },	/* 1000BT */	{ 1, 0, 1 },	/* 1000BT */};static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep){	u16 ctl;		/* Setup link parameters */	if (!ep)		goto start_aneg;	if (ep->autoneg == AUTONEG_ENABLE) {		/* TODO: parse ep->advertising */		gp->link_advertise |= (ADVERTISE_10HALF | ADVERTISE_10FULL);		gp->link_advertise |= (ADVERTISE_100HALF | ADVERTISE_100FULL);		/* Can I advertise gigabit here ? I'd need BCM PHY docs... */		gp->link_cntl = BMCR_ANENABLE;	} else {		gp->link_cntl = 0;		if (ep->speed == SPEED_100)			gp->link_cntl |= BMCR_SPEED100;		else if (ep->speed == SPEED_1000 && gp->gigabit_capable)			/* Hrm... check if this is right... */			gp->link_cntl |= BMCR_SPD2;		if (ep->duplex == DUPLEX_FULL)			gp->link_cntl |= BMCR_FULLDPLX;	}start_aneg:	spin_lock_irq(&gp->lock);	if (!gp->hw_running) {		spin_unlock_irq(&gp->lock);		return;	}	/* Configure PHY & start aneg */	ctl = phy_read(gp, MII_BMCR);	ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_ANENABLE);	ctl |= gp->link_cntl;	if (ctl & BMCR_ANENABLE) {		ctl |= BMCR_ANRESTART;		gp->lstate = link_aneg;	} else {		gp->lstate = link_force_ok;	}	phy_write(gp, MII_BMCR, ctl);	gp->timer_ticks = 0;	gp->link_timer.expires = jiffies + ((12 * HZ) / 10);	add_timer(&gp->link_timer);	spin_unlock_irq(&gp->lock);}static void gem_read_mii_link_mode(struct gem *gp, int *fd, int *spd, int *pause){	u32 val;	*fd = 0;	*spd = 10;	*pause = 0;		if (gp->phy_mod == phymod_bcm5400 ||	    gp->phy_mod == phymod_bcm5401 ||	    gp->phy_mod == phymod_bcm5411) {		int link_mode;		    	val = phy_read(gp, MII_BCM5400_AUXSTATUS);		link_mode = ((val & MII_BCM5400_AUXSTATUS_LINKMODE_MASK) >>			     MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT);		*fd = phy_BCM5400_link_table[link_mode][0];		*spd = phy_BCM5400_link_table[link_mode][2] ?			1000 :			(phy_BCM5400_link_table[link_mode][1] ? 100 : 10);		val = phy_read(gp, MII_LPA);		if (val & LPA_PAUSE)			*pause = 1;	} else {		val = phy_read(gp, MII_LPA);		if (val & (LPA_10FULL | LPA_100FULL))			*fd = 1;		if (val & (LPA_100FULL | LPA_100HALF))			*spd = 100;		if (gp->phy_mod == phymod_m1011) {			val = phy_read(gp, 0x0a);			if (val & 0xc00)				*spd = 1000;			if (val & 0x800)				*fd = 1;		}	}}/* A link-up condition has occurred, initialize and enable the * rest of the chip. */static void gem_set_link_modes(struct gem *gp){	u32 val;	int full_duplex, speed, pause;	full_duplex = 0;	speed = 10;	pause = 0;	if (gp->phy_type == phy_mii_mdio0 ||	    gp->phy_type == phy_mii_mdio1) {		val = phy_read(gp, MII_BMCR);		if (val & BMCR_ANENABLE)			gem_read_mii_link_mode(gp, &full_duplex, &speed, &pause);		else {			if (val & BMCR_FULLDPLX)				full_duplex = 1;			if (val & BMCR_SPEED100)				speed = 100;		}	} else {		u32 pcs_lpa = readl(gp->regs + PCS_MIILP);		if (pcs_lpa & PCS_MIIADV_FD)			full_duplex = 1;		speed = 1000;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -