⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 winbond-840.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
		}		np->mii_cnt = phy_idx;		if (phy_idx == 0) {				printk(KERN_WARNING "%s: MII PHY not found -- this device may "					   "not operate correctly.\n", dev->name);		}	}	find_cnt++;	return 0;#ifndef USE_IO_OPSerr_out_iomem:	release_mem_region(pci_resource_start(pdev, 1),			   pci_id_tbl[chip_idx].io_size);#endiferr_out_netdev:	unregister_netdev (dev);	kfree (dev);	return -ENODEV;}/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.  These are   often serial bit streams generated by the host processor.   The example below is for the common 93c46 EEPROM, 64 16 bit words. *//* Delay between EEPROM clock transitions.   No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need   a delay.  Note that pre-2.0.34 kernels had a cache-alignment bug that   made udelay() unreliable.   The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is   depricated.*/#define eeprom_delay(ee_addr)	readl(ee_addr)enum EEPROM_Ctrl_Bits {	EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,	EE_ChipSelect=0x801, EE_DataIn=0x08,};/* The EEPROM commands include the alway-set leading bit. */enum EEPROM_Cmds {	EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),};static int eeprom_read(long addr, int location){	int i;	int retval = 0;	int ee_addr = addr + EECtrl;	int read_cmd = location | EE_ReadCmd;	writel(EE_ChipSelect, ee_addr);	/* Shift the read command bits out. */	for (i = 10; i >= 0; i--) {		short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;		writel(dataval, ee_addr);		eeprom_delay(ee_addr);		writel(dataval | EE_ShiftClk, ee_addr);		eeprom_delay(ee_addr);	}	writel(EE_ChipSelect, ee_addr);	for (i = 16; i > 0; i--) {		writel(EE_ChipSelect | EE_ShiftClk, ee_addr);		eeprom_delay(ee_addr);		retval = (retval << 1) | ((readl(ee_addr) & EE_DataIn) ? 1 : 0);		writel(EE_ChipSelect, ee_addr);		eeprom_delay(ee_addr);	}	/* Terminate the EEPROM access. */	writel(0, ee_addr);	return retval;}/*  MII transceiver control section.	Read and write the MII registers using software-generated serial	MDIO protocol.  See the MII specifications or DP83840A data sheet	for details.	The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually	met by back-to-back 33Mhz PCI cycles. */#define mdio_delay(mdio_addr) readl(mdio_addr)/* Set iff a MII transceiver on any interface requires mdio preamble.   This only set with older tranceivers, so the extra   code size of a per-interface flag is not worthwhile. */static char mii_preamble_required = 1;#define MDIO_WRITE0 (MDIO_EnbOutput)#define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)/* Generate the preamble required for initial synchronization and   a few older transceivers. */static void mdio_sync(long mdio_addr){	int bits = 32;	/* Establish sync by sending at least 32 logic ones. */	while (--bits >= 0) {		writel(MDIO_WRITE1, mdio_addr);		mdio_delay(mdio_addr);		writel(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);		mdio_delay(mdio_addr);	}}static int mdio_read(struct net_device *dev, int phy_id, int location){	long mdio_addr = dev->base_addr + MIICtrl;	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;	int i, retval = 0;	if (mii_preamble_required)		mdio_sync(mdio_addr);	/* Shift the read command bits out. */	for (i = 15; i >= 0; i--) {		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;		writel(dataval, mdio_addr);		mdio_delay(mdio_addr);		writel(dataval | MDIO_ShiftClk, mdio_addr);		mdio_delay(mdio_addr);	}	/* Read the two transition, 16 data, and wire-idle bits. */	for (i = 20; i > 0; i--) {		writel(MDIO_EnbIn, mdio_addr);		mdio_delay(mdio_addr);		retval = (retval << 1) | ((readl(mdio_addr) & MDIO_DataIn) ? 1 : 0);		writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);		mdio_delay(mdio_addr);	}	return (retval>>1) & 0xffff;}static void mdio_write(struct net_device *dev, int phy_id, int location, int value){	struct netdev_private *np = (struct netdev_private *)dev->priv;	long mdio_addr = dev->base_addr + MIICtrl;	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;	int i;	if (location == 4  &&  phy_id == np->phys[0])		np->advertising = value;	if (mii_preamble_required)		mdio_sync(mdio_addr);	/* Shift the command bits out. */	for (i = 31; i >= 0; i--) {		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;		writel(dataval, mdio_addr);		mdio_delay(mdio_addr);		writel(dataval | MDIO_ShiftClk, mdio_addr);		mdio_delay(mdio_addr);	}	/* Clear out extra bits. */	for (i = 2; i > 0; i--) {		writel(MDIO_EnbIn, mdio_addr);		mdio_delay(mdio_addr);		writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);		mdio_delay(mdio_addr);	}	return;}static int netdev_open(struct net_device *dev){	struct netdev_private *np = (struct netdev_private *)dev->priv;	long ioaddr = dev->base_addr;	int i;	writel(0x00000001, ioaddr + PCIBusCfg);		/* Reset */	i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);	if (i)		return i;	if (debug > 1)		printk(KERN_DEBUG "%s: w89c840_open() irq %d.\n",			   dev->name, dev->irq);	if((i=alloc_ring(dev)))		return i;	init_registers(dev);	netif_start_queue(dev);	if (debug > 2)		printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);	/* Set the timer to check for link beat. */	init_timer(&np->timer);	np->timer.expires = jiffies + 3*HZ;	np->timer.data = (unsigned long)dev;	np->timer.function = &netdev_timer;				/* timer handler */	add_timer(&np->timer);	return 0;}static void check_duplex(struct net_device *dev){	struct netdev_private *np = (struct netdev_private *)dev->priv;	int mii_reg5 = mdio_read(dev, np->phys[0], 5);	int negotiated =  mii_reg5 & np->advertising;	int duplex;	if (np->duplex_lock  ||  mii_reg5 == 0xffff)		return;	duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;	if (np->full_duplex != duplex) {		np->full_duplex = duplex;		if (debug)			printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "				   "negotiated capability %4.4x.\n", dev->name,				   duplex ? "full" : "half", np->phys[0], negotiated);		np->csr6 &= ~0x200;		np->csr6 |= duplex ? 0x200 : 0;	}}static void netdev_timer(unsigned long data){	struct net_device *dev = (struct net_device *)data;	struct netdev_private *np = (struct netdev_private *)dev->priv;	long ioaddr = dev->base_addr;	int next_tick = 10*HZ;	int old_csr6 = np->csr6;	if (debug > 2)		printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "			   "config %8.8x.\n",			   dev->name, (int)readl(ioaddr + IntrStatus),			   (int)readl(ioaddr + NetworkConfig));	spin_lock_irq(&np->lock);	check_duplex(dev);	if (np->csr6 != old_csr6) {		writel(np->csr6 & ~0x0002, ioaddr + NetworkConfig);		writel(np->csr6 | 0x2002, ioaddr + NetworkConfig);	}	spin_unlock_irq(&np->lock);	np->timer.expires = jiffies + next_tick;	add_timer(&np->timer);}static void init_rxtx_rings(struct net_device *dev){	struct netdev_private *np = (struct netdev_private *)dev->priv;	int i;	np->rx_head_desc = &np->rx_ring[0];	np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];	/* Initial all Rx descriptors. */	for (i = 0; i < RX_RING_SIZE; i++) {		np->rx_ring[i].length = cpu_to_le32(np->rx_buf_sz);		np->rx_ring[i].status = 0;		np->rx_skbuff[i] = 0;	}	/* Mark the last entry as wrapping the ring. */	np->rx_ring[i-1].length |= cpu_to_le32(DescEndRing);	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */	for (i = 0; i < RX_RING_SIZE; i++) {		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);		np->rx_skbuff[i] = skb;		if (skb == NULL)			break;		skb->dev = dev;			/* Mark as being used by this device. */		np->rx_addr[i] = pci_map_single(np->pdev,skb->tail,					skb->len,PCI_DMA_FROMDEVICE);		np->rx_ring[i].buffer1 = cpu_to_le32(np->rx_addr[i]);		np->rx_ring[i].status = cpu_to_le32(DescOwn);	}	np->cur_rx = 0;	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);	/* Initialize the Tx descriptors */	for (i = 0; i < TX_RING_SIZE; i++) {		np->tx_skbuff[i] = 0;		np->tx_ring[i].status = 0;	}	np->tx_full = 0;	np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;	writel(np->ring_dma_addr, dev->base_addr + RxRingPtr);	writel(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,		dev->base_addr + TxRingPtr);}static void free_rxtx_rings(struct netdev_private* np){	int i;	/* Free all the skbuffs in the Rx queue. */	for (i = 0; i < RX_RING_SIZE; i++) {		np->rx_ring[i].status = 0;		if (np->rx_skbuff[i]) {			pci_unmap_single(np->pdev,						np->rx_addr[i],						np->rx_skbuff[i]->len,						PCI_DMA_FROMDEVICE);			dev_kfree_skb(np->rx_skbuff[i]);		}		np->rx_skbuff[i] = 0;	}	for (i = 0; i < TX_RING_SIZE; i++) {		if (np->tx_skbuff[i]) {			pci_unmap_single(np->pdev,						np->tx_addr[i],						np->tx_skbuff[i]->len,						PCI_DMA_TODEVICE);			dev_kfree_skb(np->tx_skbuff[i]);		}		np->tx_skbuff[i] = 0;	}}static void init_registers(struct net_device *dev){	struct netdev_private *np = (struct netdev_private *)dev->priv;	long ioaddr = dev->base_addr;	int i;	for (i = 0; i < 6; i++)		writeb(dev->dev_addr[i], ioaddr + StationAddr + i);	/* Initialize other registers. */	/* Configure the PCI bus bursts and FIFO thresholds.	   486: Set 8 longword cache alignment, 8 longword burst.	   586: Set 16 longword cache alignment, no burst limit.	   Cache alignment bits 15:14	     Burst length 13:8		0000	<not allowed> 		0000 align to cache	0800 8 longwords		4000	8  longwords		0100 1 longword		1000 16 longwords		8000	16 longwords		0200 2 longwords	2000 32 longwords		C000	32  longwords		0400 4 longwords	   Wait the specified 50 PCI cycles after a reset by initializing	   Tx and Rx queues and the address filter list. */#if defined(__powerpc__)		/* Big-endian */	writel(0x00100080 | 0xE010, ioaddr + PCIBusCfg);#elif defined(__alpha__)	writel(0xE010, ioaddr + PCIBusCfg);#elif defined(__i386__)#if defined(MODULE)	writel(0xE010, ioaddr + PCIBusCfg);#else	/* When not a module we can work around broken '486 PCI boards. */#define x86 boot_cpu_data.x86	writel((x86 <= 4 ? 0x4810 : 0xE010), ioaddr + PCIBusCfg);	if (x86 <= 4)		printk(KERN_INFO "%s: This is a 386/486 PCI system, setting cache "			   "alignment to %x.\n", dev->name,			   (x86 <= 4 ? 0x4810 : 0x8010));#endif#else	writel(0xE010, ioaddr + PCIBusCfg);#warning Processor architecture undefined!#endif	if (dev->if_port == 0)		dev->if_port = np->default_port;	/* Fast Ethernet; 128 byte Tx threshold; 		Transmit on; Receive on; */	np->csr6 = 0x20022002;	check_duplex(dev);	set_rx_mode(dev);	writel(0, ioaddr + RxStartDemand);	/* Clear and Enable interrupts by setting the interrupt mask. */	writel(0x1A0F5, ioaddr + IntrStatus);	writel(0x1A0F5, ioaddr + IntrEnable);}static void tx_timeout(struct net_device *dev){	struct netdev_private *np = (struct netdev_private *)dev->priv;	long ioaddr = dev->base_addr;	printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"		   " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));#ifndef __alpha__	{		int i;		printk(KERN_DEBUG "  Rx ring %8.8x: ", (int)np->rx_ring);		for (i = 0; i < RX_RING_SIZE; i++)			printk(" %8.8x", (unsigned int)np->rx_ring[i].status);		printk("\n"KERN_DEBUG"  Tx ring %8.8x: ", (int)np->tx_ring);		for (i = 0; i < TX_RING_SIZE; i++)			printk(" %8.8x", np->tx_ring[i].status);		printk("\n");	}	printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d.\n",				np->cur_tx, np->dirty_tx, np->tx_full,np->tx_q_bytes);	printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",readl(ioaddr+0x4C));#endif	spin_lock_irq(&np->lock);	/*	 * Under high load dirty_tx and the internal tx descriptor pointer	 * come out of sync, thus perform a software reset and reinitialize	 * everything.	 */	writel(1, dev->base_addr+PCIBusCfg);	udelay(1);	free_rxtx_rings(np);	init_rxtx_rings(dev);	init_registers(dev);	set_rx_mode(dev);	spin_unlock_irq(&np->lock);	netif_wake_queue(dev);	dev->trans_start = jiffies;	np->stats.tx_errors++;	return;}/* Initialize the Rx and Tx rings, along with various 'dev' bits. */static int alloc_ring(struct net_device *dev){	struct netdev_private *np = (struct netdev_private *)dev->priv;	np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);	np->rx_ring = pci_alloc_consistent(np->pdev,			sizeof(struct w840_rx_desc)*RX_RING_SIZE +			sizeof(struct w840_tx_desc)*TX_RING_SIZE,			&np->ring_dma_addr);	if(!np->rx_ring)		return -ENOMEM;	init_rxtx_rings(dev);	return 0;}static int start_tx(struct sk_buff *skb, struct net_device *dev){	struct netdev_private *np = (struct netdev_private *)dev->priv;	unsigned entry;	int len1, len2;	/* Caution: the write order is important here, set the field	   with the "ownership" bits last. */	/* Calculate the next Tx descriptor entry. */	entry = np->cur_tx % TX_RING_SIZE;	np->tx_skbuff[entry] = skb;	np->tx_addr[entry] = pci_map_single(np->pdev,				skb->data,skb->len, PCI_DMA_TODEVICE);	np->tx_ring[entry].buffer1 = cpu_to_le32(np->tx_addr[entry]);	len2 = 0;	len1 = skb->len;	if(len1 > TX_BUFLIMIT) {		len1 = TX_BUFLIMIT;		len2 = skb->len-len1;		np->tx_ring[entry].buffer2 = cpu_to_le32(np->tx_addr[entry]+TX_BUFLIMIT);	}	np->tx_ring[entry].length = cpu_to_le32(DescWholePkt | (len2 << 11) | len1);	if (entry >= TX_RING_SIZE-1)		 /* Wrap ring */		np->tx_ring[entry].length |= cpu_to_le32(DescIntr | DescEndRing);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -