📄 4xx_enet.c
字号:
hw_p->devnum, hw_p->stats.pkts_tx, hw_p->stats.pkts_rx, hw_p->stats.pkts_handled); hw_p->stats.pkts_tx = 0; hw_p->stats.pkts_rx = 0; hw_p->stats.pkts_handled = 0; hw_p->print_speed = 1; /* print speed message again next time */#endif hw_p->tx_err_index = 0; /* Transmit Error Index for tx_err_log */ hw_p->rx_err_index = 0; /* Receive Error Index for rx_err_log */ hw_p->rx_slot = 0; /* MAL Receive Slot */ hw_p->rx_i_index = 0; /* Receive Interrupt Queue Index */ hw_p->rx_u_index = 0; /* Receive User Queue Index */ hw_p->tx_slot = 0; /* MAL Transmit Slot */ hw_p->tx_i_index = 0; /* Transmit Interrupt Queue Index */ hw_p->tx_u_index = 0; /* Transmit User Queue Index */#if defined(CONFIG_440) && !defined(CONFIG_440SP) && !defined(CONFIG_440SPE) /* set RMII mode */ /* NOTE: 440GX spec states that mode is mutually exclusive */ /* NOTE: Therefore, disable all other EMACS, since we handle */ /* NOTE: only one emac at a time */ reg = 0; out32 (ZMII_FER, 0); udelay (100);#if defined(CONFIG_440EP) || defined(CONFIG_440GR) out32 (ZMII_FER, (ZMII_FER_RMII | ZMII_FER_MDI) << ZMII_FER_V (devnum));#elif defined(CONFIG_440GX) || defined(CONFIG_440EPX) || defined(CONFIG_440GRX) ethgroup = ppc_4xx_eth_setup_bridge(devnum, bis);#elif defined(CONFIG_440GP) /* set RMII mode */ out32 (ZMII_FER, ZMII_RMII | ZMII_MDI0);#else if ((devnum == 0) || (devnum == 1)) { out32 (ZMII_FER, (ZMII_FER_SMII | ZMII_FER_MDI) << ZMII_FER_V (devnum)); } else { /* ((devnum == 2) || (devnum == 3)) */ out32 (ZMII_FER, ZMII_FER_MDI << ZMII_FER_V (devnum)); out32 (RGMII_FER, ((RGMII_FER_RGMII << RGMII_FER_V (2)) | (RGMII_FER_RGMII << RGMII_FER_V (3)))); }#endif out32 (ZMII_SSR, ZMII_SSR_SP << ZMII_SSR_V(devnum));#endif /* defined(CONFIG_440) && !defined(CONFIG_440SP) */ __asm__ volatile ("eieio"); /* reset emac so we have access to the phy */#if defined(CONFIG_440SPE) || defined(CONFIG_440EPX) || defined(CONFIG_440GRX) /* provide clocks for EMAC internal loopback */ mfsdr (sdr_mfr, mfr); mfr |= SDR0_MFR_ETH_CLK_SEL_V(devnum); mtsdr(sdr_mfr, mfr);#endif out32 (EMAC_M0 + hw_p->hw_addr, EMAC_M0_SRST); __asm__ volatile ("eieio"); failsafe = 1000; while ((in32 (EMAC_M0 + hw_p->hw_addr) & (EMAC_M0_SRST)) && failsafe) { udelay (1000); failsafe--; } if (failsafe <= 0) printf("\nProblem resetting EMAC!\n");#if defined(CONFIG_440SPE) || defined(CONFIG_440EPX) || defined(CONFIG_440GRX) /* remove clocks for EMAC internal loopback */ mfsdr (sdr_mfr, mfr); mfr &= ~SDR0_MFR_ETH_CLK_SEL_V(devnum); mtsdr(sdr_mfr, mfr);#endif#if defined(CONFIG_440GX) || \ defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \ defined(CONFIG_440SP) || defined(CONFIG_440SPE) /* Whack the M1 register */ mode_reg = 0x0; mode_reg &= ~0x00000038; if (sysinfo.freqOPB <= 50000000); else if (sysinfo.freqOPB <= 66666667) mode_reg |= EMAC_M1_OBCI_66; else if (sysinfo.freqOPB <= 83333333) mode_reg |= EMAC_M1_OBCI_83; else if (sysinfo.freqOPB <= 100000000) mode_reg |= EMAC_M1_OBCI_100; else mode_reg |= EMAC_M1_OBCI_GT100; out32 (EMAC_M1 + hw_p->hw_addr, mode_reg);#endif /* defined(CONFIG_440GX) || defined(CONFIG_440SP) */ /* wait for PHY to complete auto negotiation */ reg_short = 0;#ifndef CONFIG_CS8952_PHY switch (devnum) { case 0: reg = CONFIG_PHY_ADDR; break;#if defined (CONFIG_PHY1_ADDR) case 1: reg = CONFIG_PHY1_ADDR; break;#endif#if defined (CONFIG_440GX) case 2: reg = CONFIG_PHY2_ADDR; break; case 3: reg = CONFIG_PHY3_ADDR; break;#endif default: reg = CONFIG_PHY_ADDR; break; } bis->bi_phynum[devnum] = reg;#if defined(CONFIG_PHY_RESET) /* * Reset the phy, only if its the first time through * otherwise, just check the speeds & feeds */ if (hw_p->first_init == 0) {#if defined(CONFIG_88E1111_CLK_DELAY) /* * On some boards (e.g. ALPR) the Marvell 88E1111 PHY needs * the "RGMII transmit timing control" and "RGMII receive * timing control" bits set, so that Gbit communication works * without problems. * Also set the "Transmitter disable" to 1 to enable the * transmitter. * After setting these bits a soft-reset must occur for this * change to become active. */ miiphy_read (dev->name, reg, 0x14, ®_short); reg_short |= (1 << 7) | (1 << 1) | (1 << 0); miiphy_write (dev->name, reg, 0x14, reg_short);#endif#if defined(CONFIG_M88E1111_PHY) /* test-only: merge with CONFIG_88E1111_CLK_DELAY !!! */ miiphy_write (dev->name, reg, 0x14, 0x0ce3); miiphy_write (dev->name, reg, 0x18, 0x4101); miiphy_write (dev->name, reg, 0x09, 0x0e00); miiphy_write (dev->name, reg, 0x04, 0x01e1);#endif miiphy_reset (dev->name, reg);#if defined(CONFIG_440GX) || \ defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \ defined(CONFIG_440SP) || defined(CONFIG_440SPE)#if defined(CONFIG_CIS8201_PHY) /* * Cicada 8201 PHY needs to have an extended register whacked * for RGMII mode. */ if (((devnum == 2) || (devnum == 3)) && (4 == ethgroup)) {#if defined(CONFIG_CIS8201_SHORT_ETCH) miiphy_write (dev->name, reg, 23, 0x1300);#else miiphy_write (dev->name, reg, 23, 0x1000);#endif /* * Vitesse VSC8201/Cicada CIS8201 errata: * Interoperability problem with Intel 82547EI phys * This work around (provided by Vitesse) changes * the default timer convergence from 8ms to 12ms */ miiphy_write (dev->name, reg, 0x1f, 0x2a30); miiphy_write (dev->name, reg, 0x08, 0x0200); miiphy_write (dev->name, reg, 0x1f, 0x52b5); miiphy_write (dev->name, reg, 0x02, 0x0004); miiphy_write (dev->name, reg, 0x01, 0x0671); miiphy_write (dev->name, reg, 0x00, 0x8fae); miiphy_write (dev->name, reg, 0x1f, 0x2a30); miiphy_write (dev->name, reg, 0x08, 0x0000); miiphy_write (dev->name, reg, 0x1f, 0x0000); /* end Vitesse/Cicada errata */ }#endif#endif /* Start/Restart autonegotiation */ phy_setup_aneg (dev->name, reg); udelay (1000); }#endif /* defined(CONFIG_PHY_RESET) */ miiphy_read (dev->name, reg, PHY_BMSR, ®_short); /* * Wait if PHY is capable of autonegotiation and autonegotiation is not complete */ if ((reg_short & PHY_BMSR_AUTN_ABLE) && !(reg_short & PHY_BMSR_AUTN_COMP)) { puts ("Waiting for PHY auto negotiation to complete"); i = 0; while (!(reg_short & PHY_BMSR_AUTN_COMP)) { /* * Timeout reached ? */ if (i > PHY_AUTONEGOTIATE_TIMEOUT) { puts (" TIMEOUT !\n"); break; } if ((i++ % 1000) == 0) { putc ('.'); } udelay (1000); /* 1 ms */ miiphy_read (dev->name, reg, PHY_BMSR, ®_short); } puts (" done\n"); udelay (500000); /* another 500 ms (results in faster booting) */ }#endif /* #ifndef CONFIG_CS8952_PHY */ speed = miiphy_speed (dev->name, reg); duplex = miiphy_duplex (dev->name, reg); if (hw_p->print_speed) { hw_p->print_speed = 0; printf ("ENET Speed is %d Mbps - %s duplex connection\n", (int) speed, (duplex == HALF) ? "HALF" : "FULL"); }#if defined(CONFIG_440) && !defined(CONFIG_440SP) && !defined(CONFIG_440SPE) && \ !defined(CONFIG_440EPX) && !defined(CONFIG_440GRX)#if defined(CONFIG_440EP) || defined(CONFIG_440GR) mfsdr(sdr_mfr, reg); if (speed == 100) { reg = (reg & ~SDR0_MFR_ZMII_MODE_MASK) | SDR0_MFR_ZMII_MODE_RMII_100M; } else { reg = (reg & ~SDR0_MFR_ZMII_MODE_MASK) | SDR0_MFR_ZMII_MODE_RMII_10M; } mtsdr(sdr_mfr, reg);#endif /* Set ZMII/RGMII speed according to the phy link speed */ reg = in32 (ZMII_SSR); if ( (speed == 100) || (speed == 1000) ) out32 (ZMII_SSR, reg | (ZMII_SSR_SP << ZMII_SSR_V (devnum))); else out32 (ZMII_SSR, reg & (~(ZMII_SSR_SP << ZMII_SSR_V (devnum)))); if ((devnum == 2) || (devnum == 3)) { if (speed == 1000) reg = (RGMII_SSR_SP_1000MBPS << RGMII_SSR_V (devnum)); else if (speed == 100) reg = (RGMII_SSR_SP_100MBPS << RGMII_SSR_V (devnum)); else if (speed == 10) reg = (RGMII_SSR_SP_10MBPS << RGMII_SSR_V (devnum)); else { printf("Error in RGMII Speed\n"); return -1; } out32 (RGMII_SSR, reg); }#endif /* defined(CONFIG_440) && !defined(CONFIG_440SP) */#if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) if (speed == 1000) reg = (RGMII_SSR_SP_1000MBPS << RGMII_SSR_V (devnum)); else if (speed == 100) reg = (RGMII_SSR_SP_100MBPS << RGMII_SSR_V (devnum)); else if (speed == 10) reg = (RGMII_SSR_SP_10MBPS << RGMII_SSR_V (devnum)); else { printf("Error in RGMII Speed\n"); return -1; } out32 (RGMII_SSR, reg);#endif /* set the Mal configuration reg */#if defined(CONFIG_440GX) || \ defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \ defined(CONFIG_440SP) || defined(CONFIG_440SPE) mtdcr (malmcr, MAL_CR_PLBB | MAL_CR_OPBBL | MAL_CR_LEA | MAL_CR_PLBLT_DEFAULT | MAL_CR_EOPIE | 0x00330000);#else mtdcr (malmcr, MAL_CR_PLBB | MAL_CR_OPBBL | MAL_CR_LEA | MAL_CR_PLBLT_DEFAULT); /* Errata 1.12: MAL_1 -- Disable MAL bursting */ if (get_pvr() == PVR_440GP_RB) { mtdcr (malmcr, mfdcr(malmcr) & ~MAL_CR_PLBB); }#endif /* Free "old" buffers */ if (hw_p->alloc_tx_buf) free (hw_p->alloc_tx_buf); if (hw_p->alloc_rx_buf) free (hw_p->alloc_rx_buf); /* * Malloc MAL buffer desciptors, make sure they are * aligned on cache line boundary size * (401/403/IOP480 = 16, 405 = 32) * and doesn't cross cache block boundaries. */ hw_p->alloc_tx_buf = (mal_desc_t *) malloc ((sizeof (mal_desc_t) * NUM_TX_BUFF) + ((2 * CFG_CACHELINE_SIZE) - 2)); if (NULL == hw_p->alloc_tx_buf) return -1; if (((int) hw_p->alloc_tx_buf & CACHELINE_MASK) != 0) { hw_p->tx = (mal_desc_t *) ((int) hw_p->alloc_tx_buf + CFG_CACHELINE_SIZE - ((int) hw_p-> alloc_tx_buf & CACHELINE_MASK)); } else { hw_p->tx = hw_p->alloc_tx_buf; } hw_p->alloc_rx_buf = (mal_desc_t *) malloc ((sizeof (mal_desc_t) * NUM_RX_BUFF) + ((2 * CFG_CACHELINE_SIZE) - 2)); if (NULL == hw_p->alloc_rx_buf) { free(hw_p->alloc_tx_buf); hw_p->alloc_tx_buf = NULL; return -1; } if (((int) hw_p->alloc_rx_buf & CACHELINE_MASK) != 0) { hw_p->rx = (mal_desc_t *) ((int) hw_p->alloc_rx_buf + CFG_CACHELINE_SIZE - ((int) hw_p-> alloc_rx_buf & CACHELINE_MASK)); } else { hw_p->rx = hw_p->alloc_rx_buf; } for (i = 0; i < NUM_TX_BUFF; i++) { hw_p->tx[i].ctrl = 0; hw_p->tx[i].data_len = 0; if (hw_p->first_init == 0) { hw_p->txbuf_ptr = (char *) malloc (ENET_MAX_MTU_ALIGNED); if (NULL == hw_p->txbuf_ptr) { free(hw_p->alloc_rx_buf); free(hw_p->alloc_tx_buf); hw_p->alloc_rx_buf = NULL; hw_p->alloc_tx_buf = NULL; for(j = 0; j < i; j++) { free(hw_p->tx[i].data_ptr); hw_p->tx[i].data_ptr = NULL; } } } hw_p->tx[i].data_ptr = hw_p->txbuf_ptr; if ((NUM_TX_BUFF - 1) == i) hw_p->tx[i].ctrl |= MAL_TX_CTRL_WRAP; hw_p->tx_run[i] = -1;#if 0 printf ("TX_BUFF %d @ 0x%08lx\n", i, (ulong) hw_p->tx[i].data_ptr);#endif } for (i = 0; i < NUM_RX_BUFF; i++) { hw_p->rx[i].ctrl = 0; hw_p->rx[i].data_len = 0; /* rx[i].data_ptr = (char *) &rx_buff[i]; */ hw_p->rx[i].data_ptr = (char *) NetRxPackets[i]; if ((NUM_RX_BUFF - 1) == i) hw_p->rx[i].ctrl |= MAL_RX_CTRL_WRAP; hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY | MAL_RX_CTRL_INTR; hw_p->rx_ready[i] = -1;#if 0 printf ("RX_BUFF %d @ 0x%08lx\n", i, (ulong) rx[i].data_ptr);#endif } reg = 0x00000000; reg |= dev->enetaddr[0]; /* set high address */ reg = reg << 8; reg |= dev->enetaddr[1]; out32 (EMAC_IAH + hw_p->hw_addr, reg); reg = 0x00000000; reg |= dev->enetaddr[2]; /* set low address */ reg = reg << 8; reg |= dev->enetaddr[3]; reg = reg << 8; reg |= dev->enetaddr[4]; reg = reg << 8; reg |= dev->enetaddr[5]; out32 (EMAC_IAL + hw_p->hw_addr, reg); switch (devnum) { case 1: /* setup MAL tx & rx channel pointers */#if defined (CONFIG_405EP) || defined (CONFIG_440EP) || defined (CONFIG_440GR) mtdcr (maltxctp2r, hw_p->tx);#else mtdcr (maltxctp1r, hw_p->tx);#endif#if defined(CONFIG_440) mtdcr (maltxbattr, 0x0); mtdcr (malrxbattr, 0x0);#endif mtdcr (malrxctp1r, hw_p->rx); /* set RX buffer size */ mtdcr (malrcbs1, ENET_MAX_MTU_ALIGNED / 16); break;#if defined (CONFIG_440GX) case 2: /* setup MAL tx & rx channel pointers */ mtdcr (maltxbattr, 0x0); mtdcr (malrxbattr, 0x0); mtdcr (maltxctp2r, hw_p->tx); mtdcr (malrxctp2r, hw_p->rx); /* set RX buffer size */ mtdcr (malrcbs2, ENET_MAX_MTU_ALIGNED / 16); break; case 3: /* setup MAL tx & rx channel pointers */ mtdcr (maltxbattr, 0x0); mtdcr (maltxctp3r, hw_p->tx); mtdcr (malrxbattr, 0x0);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -