📄 hnddma.c
字号:
goto fail; bzero((char*)di->txp_dmah, size); }else di->txp_dmah = NULL; /* allocate rx packet pointer vector and DMA mapping vectors */ if (nrxd) { size = nrxd * sizeof(osldma_t **); if ((di->rxp_dmah = (osldma_t **)MALLOC(osh, size)) == NULL) goto fail; bzero((char*)di->rxp_dmah, size); }else di->rxp_dmah = NULL; /* initialize opsvec of function pointers */ di->hnddma.di_fn = DMA64_ENAB(di) ? dma64proc : dma32proc; return ((hnddma_t *)di);fail: _dma_detach(di); return (NULL);}/* init the tx or rx descriptor */static INLINE voiddma32_dd_upd(dma_info_t *di, dma32dd_t *ddring, ulong pa, uint outidx, uint32 *flags, uint32 bufcount){ uint32 offset = di->dataoffsetlow; /* dma32 uses 32 bits control to fit both flags and bufcounter */ *flags = *flags | (bufcount & CTRL_BC_MASK); if ((offset != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH)) { W_SM(&ddring[outidx].addr, BUS_SWAP32(pa + offset)); W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags)); } else { /* address extension */ uint32 ae; ASSERT(di->addrext); ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT; *flags |= (ae << CTRL_AE_SHIFT); W_SM(&ddring[outidx].addr, BUS_SWAP32((pa & ~PCI32ADDR_HIGH) + offset)); W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags)); }}/* init the tx or rx descriptor */static INLINE voiddma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, ulong pa, uint outidx, uint32 *flags, uint32 bufcount){ uint32 bufaddr_low = pa + di->dataoffsetlow; uint32 bufaddr_high = 0 + di->dataoffsethigh; uint32 ctrl2 = bufcount & D64_CTRL2_BC_MASK; W_SM(&ddring[outidx].addrlow, BUS_SWAP32(bufaddr_low)); W_SM(&ddring[outidx].addrhigh, BUS_SWAP32(bufaddr_high)); W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags)); W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));}static bool_dma_alloc(dma_info_t *di, uint direction){ if (DMA64_ENAB(di)) { return dma64_alloc(di, direction); } else { return dma32_alloc(di, direction); }}/* !! may be called with core in reset */static void_dma_detach(dma_info_t *di){ if (di == NULL) return; DMA_TRACE(("%s: dma_detach\n", di->name)); /* shouldn't be here if descriptors are unreclaimed */ ASSERT(di->txin == di->txout); ASSERT(di->rxin == di->rxout); /* free dma descriptor rings */ if (DMA64_ENAB(di)) { if (di->txd64) DMA_FREE_CONSISTENT(di->osh, ((int8*)di->txd64 - di->txdalign), di->txdalloc, (di->txdpa - di->txdalign), &di->tx_dmah); if (di->rxd64) DMA_FREE_CONSISTENT(di->osh, ((int8*)di->rxd64 - di->rxdalign), di->rxdalloc, (di->rxdpa - di->rxdalign), &di->rx_dmah); } else { if (di->txd32) DMA_FREE_CONSISTENT(di->osh, ((int8*)di->txd32 - di->txdalign), di->txdalloc, (di->txdpa - di->txdalign), &di->tx_dmah); if (di->rxd32) DMA_FREE_CONSISTENT(di->osh, ((int8*)di->rxd32 - di->rxdalign), di->rxdalloc, (di->rxdpa - di->rxdalign), &di->rx_dmah); } /* free packet pointer vectors */ if (di->txp) MFREE(di->osh, (void *)di->txp, (di->ntxd * sizeof(void *))); if (di->rxp) MFREE(di->osh, (void *)di->rxp, (di->nrxd * sizeof(void *))); /* free tx packet DMA handles */ if (di->txp_dmah) MFREE(di->osh, (void *)di->txp_dmah, di->ntxd * sizeof(osldma_t **)); /* free rx packet DMA handles */ if (di->rxp_dmah) MFREE(di->osh, (void *)di->rxp_dmah, di->nrxd * sizeof(osldma_t **)); /* free our private info structure */ MFREE(di->osh, (void *)di, sizeof(dma_info_t));}static bool_dma32_addrext(dma32regs_t *dma32regs){ uint32 w; OR_REG(&dma32regs->control, XC_AE); w = R_REG(&dma32regs->control); AND_REG(&dma32regs->control, ~XC_AE); return ((w & XC_AE) == XC_AE);}/* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */static bool_dma_isaddrext(dma_info_t *di){ if (DMA64_ENAB(di)) { return FALSE; } else if (di->d32txregs) return (_dma32_addrext(di->d32txregs)); return FALSE;}/* initialize descriptor table base address */static void_dma_ddtable_init(dma_info_t *di, uint direction, ulong pa){ if (DMA64_ENAB(di)) { if (direction == DMA_TX) { W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow); W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh); } else { W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow); W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh); } } else { uint32 offset = di->ddoffsetlow; if ((offset != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH)) { if (direction == DMA_TX) W_REG(&di->d32txregs->addr, (pa + offset)); else W_REG(&di->d32rxregs->addr, (pa + offset)); } else { /* dma32 address extension */ uint32 ae; ASSERT(di->addrext); ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT; if (direction == DMA_TX) { W_REG(&di->d32txregs->addr, ((pa & ~PCI32ADDR_HIGH) + offset)); SET_REG(&di->d32txregs->control, XC_AE, (ae << XC_AE_SHIFT)); } else { W_REG(&di->d32rxregs->addr, ((pa & ~PCI32ADDR_HIGH) + offset)); SET_REG(&di->d32rxregs->control, RC_AE, (ae << RC_AE_SHIFT)); } } }}static void_dma_fifoloopbackenable(dma_info_t *di){ DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name)); if (DMA64_ENAB(di)) OR_REG(&di->d64txregs->control, D64_XC_LE); else OR_REG(&di->d32txregs->control, XC_LE);}static void_dma_rxinit(dma_info_t *di){ DMA_TRACE(("%s: dma_rxinit\n", di->name)); if (di->nrxd == 0) return; di->rxin = di->rxout = 0; /* clear rx descriptor ring */ if (DMA64_ENAB(di)) { BZERO_SM((void *)di->rxd64, (di->nrxd * sizeof(dma64dd_t))); _dma_rxenable(di); _dma_ddtable_init(di, DMA_RX, di->rxdpa); } else { BZERO_SM((void *)di->rxd32, (di->nrxd * sizeof(dma32dd_t))); _dma_rxenable(di); _dma_ddtable_init(di, DMA_RX, di->rxdpa); }}static void_dma_rxenable(dma_info_t *di){ DMA_TRACE(("%s: dma_rxenable\n", di->name)); if (DMA64_ENAB(di)) W_REG(&di->d64rxregs->control, ((di->rxoffset << D64_RC_RO_SHIFT) | D64_RC_RE)); else W_REG(&di->d32rxregs->control, ((di->rxoffset << RC_RO_SHIFT) | RC_RE));}/* !! rx entry routine, returns a pointer to the next frame received, * or NULL if there are no more */static void *_dma_rx(dma_info_t *di){ void *p; uint len; int skiplen = 0; while ((p = _dma_getnextrxp(di, FALSE))) { /* skip giant packets which span multiple rx descriptors */ if (skiplen > 0) { skiplen -= di->rxbufsize; if (skiplen < 0) skiplen = 0; PKTFREE(di->osh, p, FALSE); continue; } len = ltoh16(*(uint16*)(PKTDATA(di->osh, p))); DMA_TRACE(("%s: dma_rx len %d\n", di->name, len)); /* bad frame length check */ if (len > (di->rxbufsize - di->rxoffset)) { DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", di->name, len)); if (len > 0) skiplen = len - (di->rxbufsize - di->rxoffset); PKTFREE(di->osh, p, FALSE); di->hnddma.rxgiants++; continue; } /* set actual length */ PKTSETLEN(di->osh, p, (di->rxoffset + len)); break; } return (p);}/* post receive buffers */static void_dma_rxfill(dma_info_t *di){ void *p; uint rxin, rxout; uint32 flags = 0; uint n; uint i; uint32 pa; uint extra_offset = 0; /* * Determine how many receive buffers we're lacking * from the full complement, allocate, initialize, * and post them, then update the chip rx lastdscr. */ rxin = di->rxin; rxout = di->rxout; n = di->nrxpost - NRXDACTIVE(rxin, rxout); DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n)); if (di->rxbufsize > BCMEXTRAHDROOM) extra_offset = BCMEXTRAHDROOM; for (i = 0; i < n; i++) { /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the size to be allocated */ if ((p = PKTGET(di->osh, di->rxbufsize + extra_offset, FALSE)) == NULL) { DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", di->name)); di->hnddma.rxnobuf++; break; } /* reserve an extra headroom, if applicable */ if (extra_offset) PKTPULL(di->osh, p, extra_offset); /* Do a cached write instead of uncached write since DMA_MAP * will flush the cache. */ *(uint32*)(PKTDATA(di->osh, p)) = 0; pa = (uint32) DMA_MAP(di->osh, PKTDATA(di->osh, p), di->rxbufsize, DMA_RX, p, &di->rxp_dmah[rxout]); ASSERT(ISALIGNED(pa, 4)); /* save the free packet pointer */ ASSERT(di->rxp[rxout] == NULL); di->rxp[rxout] = p; /* reset flags for each descriptor */ flags = 0; if (DMA64_ENAB(di)) { if (rxout == (di->nrxd - 1)) flags = D64_CTRL1_EOT; dma64_dd_upd(di, di->rxd64, pa, rxout, &flags, di->rxbufsize); } else { if (rxout == (di->nrxd - 1)) flags = CTRL_EOT; dma32_dd_upd(di, di->rxd32, pa, rxout, &flags, di->rxbufsize); } rxout = NEXTRXD(rxout); } di->rxout = rxout; /* update the chip lastdscr pointer */ if (DMA64_ENAB(di)) { W_REG(&di->d64rxregs->ptr, I2B(rxout, dma64dd_t)); } else { W_REG(&di->d32rxregs->ptr, I2B(rxout, dma32dd_t)); }}/* like getnexttxp but no reclaim */static void *_dma_peeknexttxp(dma_info_t *di){ uint end, i; if (di->ntxd == 0) return (NULL); if (DMA64_ENAB(di)) { end = B2I(R_REG(&di->d64txregs->status0) & D64_XS0_CD_MASK, dma64dd_t); } else { end = B2I(R_REG(&di->d32txregs->status) & XS_CD_MASK, dma32dd_t); } for (i = di->txin; i != end; i = NEXTTXD(i)) if (di->txp[i]) return (di->txp[i]); return (NULL);}static void_dma_rxreclaim(dma_info_t *di){ void *p; /* "unused local" warning suppression for OSLs that * define PKTFREE() without using the di->osh arg */ di = di; DMA_TRACE(("%s: dma_rxreclaim\n", di->name)); while ((p = _dma_getnextrxp(di, TRUE))) PKTFREE(di->osh, p, FALSE);}static void *_dma_getnextrxp(dma_info_t *di, bool forceall){ if (di->nrxd == 0) return (NULL); if (DMA64_ENAB(di)) { return dma64_getnextrxp(di, forceall); } else { return dma32_getnextrxp(di, forceall); }}static void_dma_txblock(dma_info_t *di){ di->hnddma.txavail = 0;}static void_dma_txunblock(dma_info_t *di){ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;}static uint_dma_txactive(dma_info_t *di){ return (NTXDACTIVE(di->txin, di->txout));}static void_dma_counterreset(dma_info_t *di){ /* reset all software counter */ di->hnddma.rxgiants = 0; di->hnddma.rxnobuf = 0; di->hnddma.txnobuf = 0;}/* get the address of the var in order to change later */static uintptr_dma_getvar(dma_info_t *di, char *name){ if (!strcmp(name, "&txavail")) return ((uintptr) &(di->hnddma.txavail)); else { ASSERT(0); } return (0);}voiddma_rxpiomode(dma32regs_t *regs){ W_REG(®s->control, RC_FM);}voiddma_txpioloopback(dma32regs_t *regs){ OR_REG(®s->control, XC_LE);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -