⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 440gx_enet.c.svn-base

📁 u-boot for S3c2443 processor
💻 SVN-BASE
📖 第 1 页 / 共 3 页
字号:
#if defined(CONFIG_440_GX)#if defined(CONFIG_CIS8201_PHY)	/*	 * Cicada 8201 PHY needs to have an extended register whacked	 * for RGMII mode.	 */	if ( ((devnum == 2) || (devnum ==3)) && (4 == ethgroup) ) {		miiphy_write (reg, 23, 0x1200);		/*		 * Vitesse VSC8201/Cicada CIS8201 errata:		 * Interoperability problem with Intel 82547EI phys		 * This work around (provided by Vitesse) changes		 * the default timer convergence from 8ms to 12ms		 */		miiphy_write (reg, 0x1f, 0x2a30);		miiphy_write (reg, 0x08, 0x0200);		miiphy_write (reg, 0x1f, 0x52b5);		miiphy_write (reg, 0x02, 0x0004);		miiphy_write (reg, 0x01, 0x0671);		miiphy_write (reg, 0x00, 0x8fae);		miiphy_write (reg, 0x1f, 0x2a30);		miiphy_write (reg, 0x08, 0x0000);		miiphy_write (reg, 0x1f, 0x0000);		/* end Vitesse/Cicada errata */	}#endif#endif	/* Start/Restart autonegotiation */	phy_setup_aneg (reg);	udelay (1000);	miiphy_read (reg, PHY_BMSR, &reg_short);	/*	 * Wait if PHY is capable of autonegotiation and autonegotiation is not complete	 */	if ((reg_short & PHY_BMSR_AUTN_ABLE)	    && !(reg_short & PHY_BMSR_AUTN_COMP)) {		puts ("Waiting for PHY auto negotiation to complete");		i = 0;		while (!(reg_short & PHY_BMSR_AUTN_COMP)) {			/*			 * Timeout reached ?			 */			if (i > PHY_AUTONEGOTIATE_TIMEOUT) {				puts (" TIMEOUT !\n");				break;			}			if ((i++ % 1000) == 0) {				putc ('.');			}			udelay (1000);	/* 1 ms */			miiphy_read (reg, PHY_BMSR, &reg_short);		}		puts (" done\n");		udelay (500000);	/* another 500 ms (results in faster booting) */	}#endif	speed = miiphy_speed (reg);	duplex = miiphy_duplex (reg);	if (hw_p->print_speed) {		hw_p->print_speed = 0;		printf ("ENET Speed is %d Mbps - %s duplex connection\n",			(int) speed, (duplex == HALF) ? "HALF" : "FULL");	}	/* Set ZMII/RGMII speed according to the phy link speed */	reg = in32 (ZMII_SSR);	if ( (speed == 100) || (speed == 1000) )		out32 (ZMII_SSR, reg | (ZMII_SSR_SP << ZMII_SSR_V (devnum)));	else		out32 (ZMII_SSR,		       reg & (~(ZMII_SSR_SP << ZMII_SSR_V (devnum))));	if ((devnum == 2) || (devnum == 3)) {		if (speed == 1000)			reg = (RGMII_SSR_SP_1000MBPS << RGMII_SSR_V (devnum));		else if (speed == 100)			reg = (RGMII_SSR_SP_100MBPS << RGMII_SSR_V (devnum));		else			reg = (RGMII_SSR_SP_10MBPS << RGMII_SSR_V (devnum));		out32 (RGMII_SSR, reg);	}	/* set the Mal configuration reg */	/* Errata 1.12: MAL_1 -- Disable MAL bursting */	if (get_pvr () == PVR_440GP_RB)		mtdcr (malmcr,		       MAL_CR_OPBBL | MAL_CR_LEA | MAL_CR_PLBLT_DEFAULT);	else		mtdcr (malmcr,		       MAL_CR_PLBB | MAL_CR_OPBBL | MAL_CR_LEA |		       MAL_CR_PLBLT_DEFAULT | MAL_CR_EOPIE | 0x00330000);	/* Free "old" buffers */	if (hw_p->alloc_tx_buf)		free (hw_p->alloc_tx_buf);	if (hw_p->alloc_rx_buf)		free (hw_p->alloc_rx_buf);	/*	 * Malloc MAL buffer desciptors, make sure they are	 * aligned on cache line boundary size	 * (401/403/IOP480 = 16, 405 = 32)	 * and doesn't cross cache block boundaries.	 */	hw_p->alloc_tx_buf =		(mal_desc_t *) malloc ((sizeof (mal_desc_t) * NUM_TX_BUFF) +				       ((2 * CFG_CACHELINE_SIZE) - 2));	if (((int) hw_p->alloc_tx_buf & CACHELINE_MASK) != 0) {		hw_p->tx =			(mal_desc_t *) ((int) hw_p->alloc_tx_buf +					CFG_CACHELINE_SIZE -					((int) hw_p->					 alloc_tx_buf & CACHELINE_MASK));	} else {		hw_p->tx = hw_p->alloc_tx_buf;	}	hw_p->alloc_rx_buf =		(mal_desc_t *) malloc ((sizeof (mal_desc_t) * NUM_RX_BUFF) +				       ((2 * CFG_CACHELINE_SIZE) - 2));	if (((int) hw_p->alloc_rx_buf & CACHELINE_MASK) != 0) {		hw_p->rx =			(mal_desc_t *) ((int) hw_p->alloc_rx_buf +					CFG_CACHELINE_SIZE -					((int) hw_p->					 alloc_rx_buf & CACHELINE_MASK));	} else {		hw_p->rx = hw_p->alloc_rx_buf;	}	for (i = 0; i < NUM_TX_BUFF; i++) {		hw_p->tx[i].ctrl = 0;		hw_p->tx[i].data_len = 0;		if (hw_p->first_init == 0)			hw_p->txbuf_ptr =				(char *) malloc (ENET_MAX_MTU_ALIGNED);		hw_p->tx[i].data_ptr = hw_p->txbuf_ptr;		if ((NUM_TX_BUFF - 1) == i)			hw_p->tx[i].ctrl |= MAL_TX_CTRL_WRAP;		hw_p->tx_run[i] = -1;#if 0		printf ("TX_BUFF %d @ 0x%08lx\n", i,			(ulong) hw_p->tx[i].data_ptr);#endif	}	for (i = 0; i < NUM_RX_BUFF; i++) {		hw_p->rx[i].ctrl = 0;		hw_p->rx[i].data_len = 0;		/*       rx[i].data_ptr = (char *) &rx_buff[i]; */		hw_p->rx[i].data_ptr = (char *) NetRxPackets[i];		if ((NUM_RX_BUFF - 1) == i)			hw_p->rx[i].ctrl |= MAL_RX_CTRL_WRAP;		hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY | MAL_RX_CTRL_INTR;		hw_p->rx_ready[i] = -1;#if 0		printf ("RX_BUFF %d @ 0x%08lx\n", i, (ulong) rx[i].data_ptr);#endif	}	reg = 0x00000000;	reg |= dev->enetaddr[0];	/* set high address */	reg = reg << 8;	reg |= dev->enetaddr[1];	out32 (EMAC_IAH + hw_p->hw_addr, reg);	reg = 0x00000000;	reg |= dev->enetaddr[2];	/* set low address  */	reg = reg << 8;	reg |= dev->enetaddr[3];	reg = reg << 8;	reg |= dev->enetaddr[4];	reg = reg << 8;	reg |= dev->enetaddr[5];	out32 (EMAC_IAL + hw_p->hw_addr, reg);	switch (devnum) {	case 1:		/* setup MAL tx & rx channel pointers */		mtdcr (maltxbattr, 0x0);		mtdcr (maltxctp1r, hw_p->tx);		mtdcr (malrxbattr, 0x0);		mtdcr (malrxctp1r, hw_p->rx);		/* set RX buffer size */		mtdcr (malrcbs1, ENET_MAX_MTU_ALIGNED / 16);		break;#if defined (CONFIG_440_GX)	case 2:		/* setup MAL tx & rx channel pointers */		mtdcr (maltxbattr, 0x0);		mtdcr (maltxctp2r, hw_p->tx);		mtdcr (malrxbattr, 0x0);		mtdcr (malrxctp2r, hw_p->rx);		/* set RX buffer size */		mtdcr (malrcbs2, ENET_MAX_MTU_ALIGNED / 16);		break;	case 3:		/* setup MAL tx & rx channel pointers */		mtdcr (maltxbattr, 0x0);		mtdcr (maltxctp3r, hw_p->tx);		mtdcr (malrxbattr, 0x0);		mtdcr (malrxctp3r, hw_p->rx);		/* set RX buffer size */		mtdcr (malrcbs3, ENET_MAX_MTU_ALIGNED / 16);		break;#endif /*CONFIG_440_GX */	case 0:	default:		/* setup MAL tx & rx channel pointers */		mtdcr (maltxbattr, 0x0);		mtdcr (maltxctp0r, hw_p->tx);		mtdcr (malrxbattr, 0x0);		mtdcr (malrxctp0r, hw_p->rx);		/* set RX buffer size */		mtdcr (malrcbs0, ENET_MAX_MTU_ALIGNED / 16);		break;	}	/* Enable MAL transmit and receive channels */	mtdcr (maltxcasr, (MAL_TXRX_CASR >> hw_p->devnum));	mtdcr (malrxcasr, (MAL_TXRX_CASR >> hw_p->devnum));	/* set transmit enable & receive enable */	out32 (EMAC_M0 + hw_p->hw_addr, EMAC_M0_TXE | EMAC_M0_RXE);	/* set receive fifo to 4k and tx fifo to 2k */	mode_reg = in32 (EMAC_M1 + hw_p->hw_addr);	mode_reg |= EMAC_M1_RFS_4K | EMAC_M1_TX_FIFO_2K;	/* set speed */	if (speed == _1000BASET)		mode_reg = mode_reg | EMAC_M1_MF_1000MBPS | EMAC_M1_IST;	else if (speed == _100BASET)		mode_reg = mode_reg | EMAC_M1_MF_100MBPS | EMAC_M1_IST;	else		mode_reg = mode_reg & ~0x00C00000;	/* 10 MBPS */	if (duplex == FULL)		mode_reg = mode_reg | 0x80000000 | EMAC_M1_IST;	out32 (EMAC_M1 + hw_p->hw_addr, mode_reg);	/* Enable broadcast and indvidual address */	/* TBS: enabling runts as some misbehaved nics will send runts */	out32 (EMAC_RXM + hw_p->hw_addr, EMAC_RMR_BAE | EMAC_RMR_IAE);	/* we probably need to set the tx mode1 reg? maybe at tx time */	/* set transmit request threshold register */	out32 (EMAC_TRTR + hw_p->hw_addr, 0x18000000);	/* 256 byte threshold */	/* set receive  low/high water mark register */	/* 440GP has a 64 byte burst length */	out32 (EMAC_RX_HI_LO_WMARK + hw_p->hw_addr, 0x80009000);	out32 (EMAC_TXM1 + hw_p->hw_addr, 0xf8640000);	/* Set fifo limit entry in tx mode 0 */	out32 (EMAC_TXM0 + hw_p->hw_addr, 0x00000003);	/* Frame gap set */	out32 (EMAC_I_FRAME_GAP_REG + hw_p->hw_addr, 0x00000008);	/* Set EMAC IER */	hw_p->emac_ier = EMAC_ISR_PTLE | EMAC_ISR_BFCS |		EMAC_ISR_PTLE | EMAC_ISR_ORE | EMAC_ISR_IRE;	if (speed == _100BASET)		hw_p->emac_ier = hw_p->emac_ier | EMAC_ISR_SYE;	out32 (EMAC_ISR + hw_p->hw_addr, 0xffffffff);	/* clear pending interrupts */	out32 (EMAC_IER + hw_p->hw_addr, hw_p->emac_ier);	if (hw_p->first_init == 0) {		/*		 * Connect interrupt service routines		 */		irq_install_handler (VECNUM_EWU0 + (hw_p->devnum * 2),				     (interrupt_handler_t *) enetInt, dev);		irq_install_handler (VECNUM_ETH0 + (hw_p->devnum * 2),				     (interrupt_handler_t *) enetInt, dev);	}	mtmsr (msr);		/* enable interrupts again */	hw_p->bis = bis;	hw_p->first_init = 1;	return (1);}static int ppc_440x_eth_send (struct eth_device *dev, volatile void *ptr,			      int len){	struct enet_frame *ef_ptr;	ulong time_start, time_now;	unsigned long temp_txm0;	EMAC_440GX_HW_PST hw_p = dev->priv;	ef_ptr = (struct enet_frame *) ptr;	/*-----------------------------------------------------------------------+	 *  Copy in our address into the frame.	 *-----------------------------------------------------------------------*/	(void) memcpy (ef_ptr->source_addr, dev->enetaddr, ENET_ADDR_LENGTH);	/*-----------------------------------------------------------------------+	 * If frame is too long or too short, modify length.	 *-----------------------------------------------------------------------*/	/* TBS: where does the fragment go???? */	if (len > ENET_MAX_MTU)		len = ENET_MAX_MTU;	/*   memcpy ((void *) &tx_buff[tx_slot], (const void *) ptr, len); */	memcpy ((void *) hw_p->txbuf_ptr, (const void *) ptr, len);	/*-----------------------------------------------------------------------+	 * set TX Buffer busy, and send it	 *-----------------------------------------------------------------------*/	hw_p->tx[hw_p->tx_slot].ctrl = (MAL_TX_CTRL_LAST |					EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP) &		~(EMAC_TX_CTRL_ISA | EMAC_TX_CTRL_RSA);	if ((NUM_TX_BUFF - 1) == hw_p->tx_slot)		hw_p->tx[hw_p->tx_slot].ctrl |= MAL_TX_CTRL_WRAP;	hw_p->tx[hw_p->tx_slot].data_len = (short) len;	hw_p->tx[hw_p->tx_slot].ctrl |= MAL_TX_CTRL_READY;	__asm__ volatile ("eieio");	out32 (EMAC_TXM0 + hw_p->hw_addr,	       in32 (EMAC_TXM0 + hw_p->hw_addr) | EMAC_TXM0_GNP0);#ifdef INFO_440_ENET	hw_p->stats.pkts_tx++;#endif	/*-----------------------------------------------------------------------+	 * poll unitl the packet is sent and then make sure it is OK	 *-----------------------------------------------------------------------*/	time_start = get_timer (0);	while (1) {		temp_txm0 = in32 (EMAC_TXM0 + hw_p->hw_addr);		/* loop until either TINT turns on or 3 seconds elapse */		if ((temp_txm0 & EMAC_TXM0_GNP0) != 0) {			/* transmit is done, so now check for errors			 * If there is an error, an interrupt should			 * happen when we return			 */			time_now = get_timer (0);			if ((time_now - time_start) > 3000) {				return (-1);			}		} else {			return (len);		}	}}int enetInt (struct eth_device *dev){	int serviced;	int rc = -1;		/* default to not us */	unsigned long mal_isr;	unsigned long emac_isr = 0;	unsigned long mal_rx_eob;	unsigned long my_uic0msr, my_uic1msr;#if defined(CONFIG_440_GX)	unsigned long my_uic2msr;#endif	EMAC_440GX_HW_PST hw_p;	/*	 * Because the mal is generic, we need to get the current	 * eth device	 */	dev = eth_get_dev ();	hw_p = dev->priv;	/* enter loop that stays in interrupt code until nothing to service */	do {		serviced = 0;		my_uic0msr = mfdcr (uic0msr);		my_uic1msr = mfdcr (uic1msr);#if defined(CONFIG_440_GX)		my_uic2msr = mfdcr (uic2msr);#endif		if (!(my_uic0msr & (UIC_MRE | UIC_MTE))		    && !(my_uic1msr &			 (UIC_ETH0 | UIC_ETH1 | UIC_MS | UIC_MTDE |			  UIC_MRDE))) {			/* not for us */			return (rc);		}#if defined (CONFIG_440_GX)		if (!(my_uic0msr & (UIC_MRE | UIC_MTE))		    && !(my_uic2msr & (UIC_ETH2 | UIC_ETH3))) {			/* not for us */			return (rc);		}#endif		/* get and clear controller status interrupts */		/* look at Mal and EMAC interrupts */		if ((my_uic0msr & (UIC_MRE | UIC_MTE))		    || (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE))) {			/* we have a MAL interrupt */			mal_isr = mfdcr (malesr);			/* look for mal error */			if (my_uic1msr & (UIC_MS | UIC_MTDE | UIC_MRDE)) {				mal_err (dev, mal_isr, my_uic0msr,					 MAL_UIC_DEF, MAL_UIC_ERR);				serviced = 1;				rc = 0;			}		}		/* port by port dispatch of emac interrupts */		if (hw_p->devnum == 0) {			if (UIC_ETH0 & my_uic1msr) {	/* look for EMAC errors */				emac_isr = in32 (EMAC_ISR + hw_p->hw_addr);				if ((hw_p->emac_ier & emac_isr) != 0) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -