📄 if_xna.c
字号:
break; case CMD_INVAL: m_freem(m); case CMD_NOP: default: /* * Don't free m if adapter appears to be hung */ printf ("xna%d: port init failed\n", unit); goto done; } } else { printf ("xna%d: port init failed\n", unit); goto done; } /* * Issue a "USTART" command for the Ethernet user. (Will initialize * all multicast addresses here.) */ if (m = xnamkuser(sc, XNA_ETHERU, CMD_USTART)) { xcmd = mtod(m, struct xnacmd_buf *); xnacmd(sc, m); smp_unlock(&sc->lk_xna_softc); splx(t); xcmd = mtod(m, struct xnacmd_buf *); XNATIMEOUT(xcmd, CMD_USTART); /* Wait */ t = splimp(); smp_lock(&sc->lk_xna_softc, LK_RETRY); switch (xcmd->opcode) { case CMD_COMPLETE: m_freem(m); break; case CMD_INVAL: m_freem(m); case CMD_NOP: default: /* * Don't free m if adapter appears to be hung */ printf ("xna%d: port init failed\n", unit); goto done; } } else { printf ("xna%d: port init failed\n", unit); goto done; } /* * Clear reset, mark interface up and running; start output * on device. */ sc->flags &= ~XNA_RFLAG; sc->is_if.if_flags |= IFF_UP|IFF_RUNNING; sc->is_if.if_flags &= ~IFF_OACTIVE; sc->ztime = time.tv_sec; if (sc->is_if.if_snd.ifq_head) xnastart(unit); /* queue output packets */done: /* * Relinqusih softc lock, drop IPL. */ smp_unlock(&sc->lk_xna_softc); splx(t); splx(s); return;}/* * Perform a node halt/reset (SOFT reset) due to state change interrupt. * Issue a timeout() on xnainit, but give up if we're being reset * constantly. xnainit() will set IFF_RUNNING if the reset succedes. */xnareset(unit) int unit;{ register struct xna_softc *sc = &xna_softc[unit]; register struct xnadevice *addr = &sc->xregs; register struct xnarecv_ring *rp; register struct xnacmd_ring *tp; struct xnapdb *xpdb = sc->xpdb; struct ifnet *ifp = &sc->is_if; struct mbuf *mp; int i, delay; /* * Start receive and command queues from scratch. Clean off the * receive and transmit rings, then initialize the first nNXNAACTV * receive descriptors at the head of the receive ring. */ for (i = 0, rp = &sc->rring[0]; i < nNXNARCV; i++, rp++) { mp = rp->mbuf_recv; if (mp) { m_freem(mp); rp->mbuf_recv = 0; } rp->status = ST_ROWN; } for (i = 0, tp = &sc->tring[0]; i < nNXNACMD; i++, tp++) { if (mp = tp->mbuf_tofree) { if (tp->status & ST_CMD) { switch ((mtod(mp, struct xnacmd_buf *))->opcode) { case CMD_RDCNTR: case CMD_RCCNTR: mp->m_off = MMINOFF; mp->m_len = MLEN; break; default: break; } } m_freem(mp); tp->mbuf_tofree = 0; } tp->status = ST_TOWN; } sc->nactv = nNXNAACTV; for (i = 0, rp = &sc->rring[0]; (i < sc->nactv); i++, rp++) { XNAMCLGET(mp); if (mp) xnainitdesc(rp, mp); else break; } sc->nrecv = i; sc->tindex = sc->nxmit = 0; sc->rindex = sc->nrecv; sc->rlast = sc->tlast = sc->nproc = -1; /* * Re-init the device; wait for XPST_INIT state to appear */ addr->xnapd1 = svtophy(xpdb); addr->xnapd2 = 0; addr->xnapci = XPCI_INIT; for (delay = 100; (delay && (addr->xnapst != XPST_INIT)); delay--) DELAY(10000); if ((addr->xnapst & XPST_MASK) != XPST_INIT) { printf ("xna%d: port reset failed: xpst=0x%x, xpd1=0x%x, xpd2=0x%x, xpud=0x%x\n", unit, addr->xnapst, addr->xnapd1, addr->xnapd2, addr->xnapud); return; } /* * Get the initial value of the "potential system buffer unavailable" * counter. Schedule an "xnainit" at SPLNET if device was running. */ sc->xna_sbuacnt = xpdb->p_sbua; if (ifp->if_flags & IFF_RUNNING) { ifp->if_flags &= ~IFF_RUNNING; timeout(xnainit, unit, 1); }}/* * XNA start routine. Strings output packets and commands onto the command * ring. Start routine is called at splimp() WITH the softc locked. */xnastart(unit) int unit;{ register struct mbuf *m, *mprev; register struct xnacmd_ring *tp; register int index, i; struct xna_softc *sc = &xna_softc[unit]; struct xnadevice *addr = &sc->xregs; struct mbuf *m0; /* * Process the transmit and command queues, giving priority to * commands. Done when we either run out of requests, or command * ring entries to string them on. */ for (index = sc->tindex, tp = &sc->tring[index]; sc->nxmit < nNXNACMD; index = ++index % nNXNACMD, tp = &sc->tring[index]) {next_m: if (sc->if_cmd.ifq_head) { IF_DEQUEUE(&sc->if_cmd, m0); tp->status |= ST_CMD; } else { if (sc->is_if.if_snd.ifq_head) { IF_DEQUEUE(&sc->is_if.if_snd, m0); tp->status &= ~ST_CMD; } else { sc->tindex = index; return; } } /* * String the mbuf chain onto the command ring entry's * buffer segment addresses. */ m = m0; mprev = 0;#ifdef vax /* * Architectures which allow VAX-virtual addressing can * string up individual buffer segments. */ for(i = 0; m; i++) { /* * This code is somewhat obscure. If we have only 2 * buffer segment addresses left, but there are more * than 2 mbuf's left in the chain, we need to shove * all but the LAST mbuf into a new cluster-sized * mbuf and place it on the next-to-the-last buffer * segment address. (This leaves the LAST buffer * segment address open for the LAST mbuf). This * technique is ESSENTIAL to prevent problems with * trailers, in which the trailing header is kept in * the last mbuf. */ if ((i == (XNA_XMIT_NBUFS - 2)) && (m->m_next && m->m_next->m_next)) { register int off = 0; register struct mbuf *n; XNAMCLGET(n); if (!n) { m_freem(m0); goto next_m; } while (m->m_next) { bcopy(mtod(m, caddr_t), mtod(n, caddr_t)+off, (unsigned)m->m_len); off += m->m_len; m = m_free(m); } n->m_len = off; n->m_next = m; if (mprev) mprev->m_next = n; m = n; } tp->bseg[i].xaddr_lo = mtod(m, u_long); tp->bseg[i].xaddr_hi = 0; tp->bseg[i].xlen = m->m_len; mprev = m; m = m->m_next; }#endif vax#ifdef mips /* * Architectures which place the adapter in physical * addressing mode must data copy mbuf chains into a * cluster. (Which is known to be physically contig.) */ i = 1; { register int off = 0; register struct mbuf *n; if (!(tp->status & ST_CMD)) { XNAMCLGET(n); if (!n) { m_freem(m0); goto next_m; } while (m) { bcopy(mtod(m, caddr_t), mtod(n, caddr_t)+off, (unsigned)m->m_len); off += m->m_len; mprev = m; m = m->m_next; } n->m_len = off; n->m_next = 0; mprev->m_next = n; } else n = m; tp->bseg[0].xaddr_lo = svtophy(mtod(n, u_long)); tp->bseg[0].xaddr_hi = 0; tp->bseg[0].xlen = n->m_len; }#endif mips tp->mbuf_tofree = m0; tp->usr_index = XNA_ETHERU; tp->nbufs = i; tp->status &= ~ST_TOWN; sc->nxmit++; sc->is_if.if_flags |= IFF_OACTIVE; /* * Advise the port that a new transmit packet is pending */ addr->xnapcp = XPCP_POLL; } sc->tindex = index;}/* * XNA command start routine. Wraps a command buffer in an mbuf (for queuing * purposes ONLY), then enqueues the request to xnastart(). Command start * routine is called at splimp() WITH the softc locked. */xnacmd(sc, m) register struct xna_softc *sc; register struct mbuf *m;{ register struct xnacmd_buf *xcmd = mtod(m, struct xnacmd_buf *); if (IF_QFULL(&sc->if_cmd)) { /* * Couldn't enqueue command request; * invalidate opcode to signal error. */ xcmd->opcode = CMD_INVAL; return; } /* * Place command request on command queue */ IF_ENQUEUE(&sc->if_cmd, m); xnastart(sc->is_if.if_unit);}/* * XNA device interrupt handler */xnaintr(unit) int unit;{ register struct xna_softc *sc = &xna_softc[unit]; register struct xnadevice *addr = &sc->xregs; int s = splimp(); /* * Lock softc, since we will be updating the per-unit ring pointers * and active ring entry counts frequently. */ smp_lock(&sc->lk_xna_softc, LK_RETRY); /* * See if we got here due to a port state change interrupt. If so, * need to log error, reset interface, and re-init. */ if ((addr->xnapst & XPST_MASK) != XPST_INIT) { struct el_rec *elrp; if((elrp = ealloc(sizeof(struct el_xna), EL_PRILOW))) { struct el_xna *elbod = &elrp->el_body.elxna; switch (addr->xna_dtype & XMIDTYPE_TYPE) { case XNADEXNA: bcopy (sc->xpdb->port_err, &elbod->xnatype.xnaxmi.xnaxmi_fatal, sizeof(struct xna_xmi_fatal)); LSUBID(elrp,ELCT_DCNTL,ELXMI_XNA, 0,sc->xpdb->ivec.nid_mask,unit, XNA_FATAL); break; case XNADEBNI: bcopy (sc->xpdb->port_err, &elbod->xnatype.xnabi.xnabi_fatal, sizeof(struct xna_bi_fatal)); LSUBID(elrp,ELCT_DCNTL,ELBI_XNA, 0,sc->xpdb->ivec.nid_mask,unit, XNA_FATAL); break; } EVALID(elrp); } if (!(sc->flags & XNA_RFLAG)) { sc->flags &= XNA_RFLAG; xnareset(unit); } else { printf ("xna%d: port reset failed: xpst=0x%x, xpd1=0x%x, xpd2=0x%x, xpud=0x%x\n", unit, addr->xnapst, addr->xnapd1, addr->xnapd2, addr->xnapud); } smp_unlock(&sc->lk_xna_softc); splx(s); return; } /* * Process receive ring */ { register int index; register struct xnarecv_ring *rp; int orindex = sc->rindex; register struct xnarecv_ring *nrp = &sc->rring[orindex]; struct mbuf *mp; /* * Process all incoming packets on the receive ring. Stop if * we get to the current receive index to avoid locking out * the system, but give back one descriptor for each one we * process to keep the device busy. */ for (index = ((sc->rlast+1)%nNXNARCV), rp = &sc->rring[index]; ((index != orindex) && (rp->status & ST_ROWN)); index = ++index % nNXNARCV, rp = &sc->rring[index]) { /* * Init the next descriptor(s) in line right away to make * sure we always have buffers to string up. If we DON'T * have mbuf's, we drop the current receive right here by * re-posting it's mbuf. (This is to save our necks during * burst-mode reception) */ sc->nrecv--; while (sc->nrecv < sc->nactv) { XNAMCLGET(mp); if (mp) { xnainitdesc(nrp, mp); sc->nrecv++; sc->rindex = ++sc->rindex % nNXNARCV; nrp = &sc->rring[sc->rindex]; } else { /* * The following line is a patch to avoid * bumping up m_off a second time! */ rp->mbuf_recv->m_off -= 2; xnainitdesc(nrp, rp->mbuf_recv); sc->nrecv++; sc->rindex = ++sc->rindex % nNXNARCV; nrp = &sc->rring[sc->rindex]; rp->mbuf_recv = 0; goto drop; } } /* * Process current receive */ if (rp->status & ST_RERR) { sc->is_if.if_ierrors++; m_freem(rp->mbuf_recv); rp->mbuf_recv = 0; } else { /* * Hand recv to upper levels */ sc->is_if.if_ipackets++; xnaread (sc, rp->mbuf_recv, rp->len, (struct mbuf *)0); rp->mbuf_recv = 0; }drop: sc->rlast = (++sc->rlast)%nNXNARCV; sc->nproc++; } } /* * Process transmit/command ring */ { register struct xnacmd_ring *tp; register struct mbuf *mp; register int index; /* * Process all outstanding transmits and commands completed by
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -