📄 if_wx.c
字号:
tmp = READ_CSR(sc, WXREG_EXCT); tmp |= WXPHY_RESET4; WRITE_CSR(sc, WXREG_EXCT, tmp); DELAY(20*1000); printf(": address %s\n", ether_sprintf(sc->wx_enaddr)); if (wx_attach_phy(sc)) { goto fail; } } else { ifmedia_init(&sc->wx_media, IFM_IMASK, wx_ifmedia_upd, wx_ifmedia_sts); ifmedia_add(&sc->wx_media, IFM_ETHER|IFM_1000_SX, 0, NULL); ifmedia_add(&sc->wx_media, IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); ifmedia_set(&sc->wx_media, IFM_ETHER|IFM_1000_SX|IFM_FDX); sc->wx_media.ifm_media = sc->wx_media.ifm_cur->ifm_media; } /* * Sixth, establish a default device control register word. */ ll += 1; if (sc->wx_cfg1 & WX_EEPROM_CTLR1_FD) sc->wx_dcr |= WXDCR_FD; if (sc->wx_cfg1 & WX_EEPROM_CTLR1_ILOS) sc->wx_dcr |= WXDCR_ILOS; tmp = (sc->wx_cfg1 >> WX_EEPROM_CTLR1_SWDPIO_SHIFT) & WXDCR_SWDPIO_MASK; sc->wx_dcr |= (tmp << WXDCR_SWDPIO_SHIFT); if (sc->wx_no_ilos) sc->wx_dcr &= ~WXDCR_ILOS; if (sc->wx_ilos) sc->wx_dcr |= WXDCR_ILOS; if (sc->wx_no_flow == 0) sc->wx_dcr |= WXDCR_RFCE | WXDCR_TFCE; /* * Seventh, allocate various sw structures... */ len = sizeof (rxpkt_t) * WX_MAX_RDESC; sc->rbase = (rxpkt_t *) WXMALLOC(len); if (sc->rbase == NULL) { goto fail; } bzero(sc->rbase, len); ll += 1; len = sizeof (txpkt_t) * WX_MAX_TDESC; sc->tbase = (txpkt_t *) WXMALLOC(len); if (sc->tbase == NULL) { goto fail; } bzero(sc->tbase, len); ll += 1; /* * Eighth, allocate and dma map (platform dependent) descriptor rings. * They have to be aligned on a 4KB boundary. */ if (wx_dring_setup(sc) == 0) { return (0); }fail: printf("%s: failed to do common attach (%d)\n", sc->wx_name, ll); wx_dring_teardown(sc); if (sc->rbase) { WXFREE(sc->rbase); sc->rbase = NULL; } if (sc->tbase) { WXFREE(sc->tbase); sc->tbase = NULL; } return (ENOMEM);}/* * EEPROM functions. */static INLINE voidwx_eeprom_raise_clk(wx_softc_t *sc, u_int32_t regval){ WRITE_CSR(sc, WXREG_EECDR, regval | WXEECD_SK); DELAY(50);}static INLINE voidwx_eeprom_lower_clk(wx_softc_t *sc, u_int32_t regval){ WRITE_CSR(sc, WXREG_EECDR, regval & ~WXEECD_SK); DELAY(50);}static INLINE voidwx_eeprom_sobits(wx_softc_t *sc, u_int16_t data, u_int16_t count){ u_int32_t regval, mask; mask = 1 << (count - 1); regval = READ_CSR(sc, WXREG_EECDR) & ~(WXEECD_DI|WXEECD_DO); do { if (data & mask) regval |= WXEECD_DI; else regval &= ~WXEECD_DI; WRITE_CSR(sc, WXREG_EECDR, regval); DELAY(50); wx_eeprom_raise_clk(sc, regval); wx_eeprom_lower_clk(sc, regval); mask >>= 1; } while (mask != 0); WRITE_CSR(sc, WXREG_EECDR, regval & ~WXEECD_DI);}static INLINE u_int16_twx_eeprom_sibits(wx_softc_t *sc){ unsigned int regval, i; u_int16_t data; data = 0; regval = READ_CSR(sc, WXREG_EECDR) & ~(WXEECD_DI|WXEECD_DO); for (i = 0; i != 16; i++) { data <<= 1; wx_eeprom_raise_clk(sc, regval); regval = READ_CSR(sc, WXREG_EECDR) & ~WXEECD_DI; if (regval & WXEECD_DO) { data |= 1; } wx_eeprom_lower_clk(sc, regval); } return (data);}static INLINE voidwx_eeprom_cleanup(wx_softc_t *sc){ u_int32_t regval; regval = READ_CSR(sc, WXREG_EECDR) & ~(WXEECD_DI|WXEECD_CS); WRITE_CSR(sc, WXREG_EECDR, regval); DELAY(50); wx_eeprom_raise_clk(sc, regval); wx_eeprom_lower_clk(sc, regval);}static u_int16_t INLINE wx_read_eeprom_word(wx_softc_t *sc, int offset){ u_int16_t data; WRITE_CSR(sc, WXREG_EECDR, WXEECD_CS); wx_eeprom_sobits(sc, EEPROM_READ_OPCODE, 3); wx_eeprom_sobits(sc, offset, 6); data = wx_eeprom_sibits(sc); wx_eeprom_cleanup(sc); return (letoh16(data));}static voidwx_read_eeprom(wx_softc_t *sc, u_int16_t *data, int offset, int words){ int i; for (i = 0; i < words; i++) { *data++ = wx_read_eeprom_word(sc, offset++); } sc->wx_cfg1 = wx_read_eeprom_word(sc, WX_EEPROM_CTLR1_OFF);}/* * Start packet transmission on the interface. */static voidwx_start(struct ifnet *ifp){ wx_softc_t *sc = SOFTC_IFP(ifp); u_int16_t widx = WX_MAX_TDESC, cidx, nactv; WX_LOCK(sc); DPRINTF(sc, ("%s: wx_start\n", sc->wx_name)); nactv = sc->tactive; while (nactv < WX_MAX_TDESC - 1) { int ndesc, plen; int gctried = 0; struct mbuf *m, *mb_head; IF_DEQUEUE(&ifp->if_snd, mb_head); if (mb_head == NULL) { break; } sc->wx_xmitwanted++; /* * If we have a packet less than ethermin, pad it out. */ if (mb_head->m_pkthdr.len < WX_MIN_RPKT_SIZE) { if (mb_head->m_next == NULL) { mb_head->m_len = WX_MIN_RPKT_SIZE; } else { MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { m_freem(mb_head); break; } m_copydata(mb_head, 0, mb_head->m_pkthdr.len, mtod(m, caddr_t)); m->m_pkthdr.len = m->m_len = WX_MIN_RPKT_SIZE; bzero(mtod(m, char *) + mb_head->m_pkthdr.len, WX_MIN_RPKT_SIZE - mb_head->m_pkthdr.len); sc->wx_xmitpullup++; m_freem(mb_head); mb_head = m; } } again: cidx = sc->tnxtfree; nactv = sc->tactive; /* * Go through each of the mbufs in the chain and initialize * the transmit buffer descriptors with the physical address * and size of that mbuf. If we have a length less than our * minimum transmit size, we bail (to do a pullup). If we run * out of descriptors, we also bail and try and do a pullup. */ for (plen = ndesc = 0, m = mb_head; m != NULL; m = m->m_next) { vm_offset_t vptr; wxtd_t *td; /* * If this mbuf has no data, skip it. */ if (m->m_len == 0) { continue; } /* * This appears to be a bogus check the PRO1000T. * I think they meant that the minimum packet size * is in fact WX_MIN_XPKT_SIZE (all data loaded) */#if 0 /* * If this mbuf is too small for the chip's minimum, * break out to cluster it. */ if (m->m_len < WX_MIN_XPKT_SIZE) { sc->wx_xmitrunt++; break; }#endif /* * Do we have a descriptor available for this mbuf? */ if (++nactv == WX_MAX_TDESC) { if (gctried++ == 0) { sc->wx_xmitgc++; wx_gc(sc); goto again; } break; } sc->tbase[cidx].dptr = m; td = &sc->tdescriptors[cidx];#if defined(__mips__) td = (wxtd_t *)PHYS_TO_UNCACHED(vtophys(td));#endif td->length = htole16(m->m_len); plen += m->m_len; vptr = mtod(m, vm_offset_t); td->address.highpart = htole32(0); td->address.lowpart = htole32(vtophys(vptr)); CACHESYNC(vptr, m->m_len, SYNC_W); td->cso = 0; td->status = 0; td->special = htole16(0); td->cmd = 0; td->css = 0;#if !defined(__mips__) CACHESYNC(td, sizeof(*td), SYNC_W);#endif DPRINTF(sc, ("%s: XMIT[%d](%p) %p vptr %lx (length %d " "DMA addr %x) idx %d\n", sc->wx_name, ndesc, td, m, (long) vptr, letoh16(td->length), letoh32(td->address.lowpart), cidx)); ndesc++; cidx = T_NXT_IDX(cidx); } /* * If we get here and m is NULL, we can send * the the packet chain described by mb_head. */ if (m == NULL) { /* * Mark the last descriptor with EOP and tell the * chip to insert a final checksum. */ wxtd_t *td = &sc->tdescriptors[T_PREV_IDX(cidx)];#if defined(__mips__) td = (wxtd_t *)PHYS_TO_UNCACHED(vtophys(td));#endif td->cmd = TXCMD_EOP|TXCMD_IFCS; /* * Set up a delayed interrupt when this packet * is sent and the descriptor written back. * Additional packets completing will cause * interrupt to be delayed further. Therefore, * after the *last* packet is sent, after the delay * period in TIDV, an interrupt will be generated * which will cause us to garbage collect. */ td->cmd |= TXCMD_IDE|TXCMD_RPS;#if !defined(__mips__) CACHESYNC(td, sizeof(*td), SYNC_W);#endif /* * Don't xmit odd length packets. * We're okay with bumping things * up as long as our mbuf allocation * is always larger than our MTU * by a comfortable amount. * * Yes, it's a hole to run past the end * of a packet. */ if (plen & 0x1) { sc->wx_oddpkt++; td->length = htole16(letoh16(td->length) + 1); } sc->tbase[sc->tnxtfree].sidx = sc->tnxtfree; sc->tbase[sc->tnxtfree].eidx = cidx; sc->tbase[sc->tnxtfree].next = NULL; if (sc->tbsyf) { sc->tbsyl->next = &sc->tbase[sc->tnxtfree]; } else { sc->tbsyf = &sc->tbase[sc->tnxtfree]; } sc->tbsyl = &sc->tbase[sc->tnxtfree]; sc->tnxtfree = cidx; sc->tactive = nactv; ifp->if_timer = 10;#if NBPFILTER > 0 if (ifp->if_bpf) bpf_mtap(WX_BPFTAP_ARG(ifp), mb_head);#endif /* defer xmit until we've got them all */ widx = cidx; continue; } /* * Otherwise, we couldn't send this packet for some reason. * * If don't have a descriptor available, and this is a * single mbuf packet, freeze output so that later we * can restart when we have more room. Otherwise, we'll * try and cluster the request. We've already tried to * garbage collect completed descriptors. */ if (nactv == WX_MAX_TDESC && mb_head->m_next == NULL) { sc->wx_xmitputback++; ifp->if_flags |= IFF_OACTIVE;#ifdef ALTQ /* * XXX when altq is enabled, we can't put the * packet back to the queue. * just give up this packet for now. */ if (ALTQ_IS_ENABLED(&ifp->if_snd)) { m_freem(mb_head); break; }#endif IF_PREPEND(&ifp->if_snd, mb_head); break; } /* * Otherwise, it's either a fragment length somewhere in the * chain that isn't at least WX_MIN_XPKT_SIZE in length or * the number of fragments exceeds the number of descriptors * available. * * We could try a variety of strategies here- if this is * a length problem for single mbuf packet or a length problem * for the last mbuf in a chain (we could just try and adjust * it), but it's just simpler to try and cluster it. */ MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { m_freem(mb_head); break; } MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { m_freem(m); m_freem(mb_head); break; } m_copydata(mb_head, 0, mb_head->m_pkthdr.len, mtod(m, caddr_t)); m->m_pkthdr.len = m->m_len = mb_head->m_pkthdr.len; m_freem(mb_head); mb_head = m; sc->wx_xmitcluster++; goto again; } if (widx < WX_MAX_TDESC) { if (IS_WISEMAN(sc)) { WRITE_CSR(sc, WXREG_TDT, widx); } else { WRITE_CSR(sc, WXREG_TDT_LIVENGOOD, widx); } } if (sc->tactive == WX_MAX_TDESC - 1) { sc->wx_xmitgc++; wx_gc(sc); if (sc->tactive >= WX_MAX_TDESC - 1) { sc->wx_xmitblocked++; ifp->if_flags |= IFF_OACTIVE; } } /* used SW LED to indicate transmission active */ if (sc->tactive > 0 && sc->wx_mii) { WRITE_CSR(sc, WXREG_DCR, READ_CSR(sc, WXREG_DCR) | (WXDCR_SWDPIO0|WXDCR_SWDPIN0)); } WX_UNLOCK(sc);}/* * Process interface interrupts. */static intwx_intr(void *arg){ wx_softc_t *sc = arg; int claimed = 0; WX_ILOCK(sc); /* * Read interrupt cause register. Reading it clears bits. */ sc->wx_icr = READ_CSR(sc, WXREG_ICR); if (sc->wx_icr) { claimed++; WX_DISABLE_INT(sc); sc->wx_intr++; if (sc->wx_icr & (WXISR_LSC|WXISR_RXSEQ|WXISR_GPI_EN1)) { sc->wx_linkintr++; wx_handle_link_intr(sc); } wx_handle_rxint(sc); if (sc->wx_icr & WXISR_TXDW) { sc->wx_txqe++; wx_gc(sc); }#if 0 if (sc->wx_icr & WXISR_TXQE) { sc->wx_txqe++; wx_gc(sc); }#endif if (sc->wx_if.if_snd.ifq_head != NULL) { wx_start(&sc->wx_if); } WX_ENABLE_INT(sc); } WX_IUNLK(sc); return (claimed);}static voidwx_handle_link_intr(wx_softc_t *sc){ u_int32_t txcw, rxcw, dcr, dsr; sc->wx_linkintr++; dcr = READ_CSR(sc, WXREG_DCR); DPRINTF(sc, ("%s: handle_link_intr: icr=%#x dcr=%#x\n", sc->wx_name, sc->wx_icr, dcr)); if (sc->wx_mii) { mii_data_t *mii = WX_MII_FROM_SOFTC(sc); mii_pollstat(mii); if (mii->mii_media_status & IFM_ACTIVE) { if (IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE) { IPRINTF(sc, (ldn, sc->wx_name)); sc->linkup = 0; } else { IPRINTF(sc, (lup, sc->wx_name)); sc->linkup = 1; } WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr); } else if (sc->wx_icr & WXISR_RXSEQ) { DPRINTF(sc, (sqe, sc->wx_name)); } return; } txcw = READ_CSR(sc, WXREG_XMIT_CFGW); rxcw = READ_CSR(sc, WXREG_RECV_CFGW); dsr = READ_CSR(sc, WXREG_DSR); /* * If we have LOS or are now receiving Ordered Sets and are not * doing auto-negotiation, restore autonegotiation. */ if (((dcr & WXDCR_SWDPIN1) || (rxcw & WXRXCW_C)) && ((txcw & WXTXCW_ANE) == 0)) { DPRINTF(sc, (ane, sc->wx_name)); WRITE_CSR(sc, WXREG_XMIT_CFGW, WXTXCW_DEFAULT); sc->wx_dcr &= ~WXDCR_SLU; WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr); sc->ane_failed = 0; } if (sc->wx_icr & WXISR_LSC) { if (READ_CSR(sc, WXREG_DSR) & WXDSR_LU) { IPRINTF(sc, (lup, sc->wx_name)); sc->linkup = 1; sc->wx_dcr |= (WXDCR_SWDPIO0|WXDCR_SWDPIN0); } else { IPRINTF(sc, (ldn, sc->wx_name)); sc->linkup = 0; sc->wx_dcr &= ~(WXDCR_SWDPIO0|WXDCR_SWDPIN0); } WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr); } else { DPRINTF(sc, (sqe, sc->wx_name)); }}static voidwx_check_link(wx_softc_t *sc){ u_int32_t rxcw, dcr, dsr;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -