📄 hnddma.c
字号:
{ uint32 xc; /* If the chip is dead, it is not enabled :-) */ xc = R_REG(&di->d64txregs->control); return ((xc != 0xffffffff) && (xc & D64_XC_XE));}static voiddma64_txsuspend(dma_info_t *di){ DMA_TRACE(("%s: dma_txsuspend\n", di->name)); if (di->ntxd == 0) return; OR_REG(&di->d64txregs->control, D64_XC_SE);}static voiddma64_txresume(dma_info_t *di){ DMA_TRACE(("%s: dma_txresume\n", di->name)); if (di->ntxd == 0) return; AND_REG(&di->d64txregs->control, ~D64_XC_SE);}static booldma64_txsuspended(dma_info_t *di){ return (di->ntxd == 0) || ((R_REG(&di->d64txregs->control) & D64_XC_SE) == D64_XC_SE);}static voiddma64_txreclaim(dma_info_t *di, bool forceall){ void *p; DMA_TRACE(("%s: dma_txreclaim %s\n", di->name, forceall ? "all" : "")); while ((p = dma64_getnexttxp(di, forceall))) PKTFREE(di->osh, p, TRUE);}static booldma64_txstopped(dma_info_t *di){ return ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) == D64_XS0_XS_STOPPED);}static booldma64_rxstopped(dma_info_t *di){ return ((R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK) == D64_RS0_RS_STOPPED);}static booldma64_alloc(dma_info_t *di, uint direction){ uint size; uint ddlen; uint32 alignbytes; void *va; ddlen = sizeof(dma64dd_t); size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen); alignbytes = di->dma64align; if (!ISALIGNED(DMA_CONSISTENT_ALIGN, alignbytes)) size += alignbytes; if (direction == DMA_TX) { if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->txdpa, &di->tx_dmah)) == NULL) { DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name)); return FALSE; } di->txd64 = (dma64dd_t *) ROUNDUP((uintptr)va, alignbytes); di->txdalign = (uint)((int8*)di->txd64 - (int8*)va); di->txdpa += di->txdalign; di->txdalloc = size; ASSERT(ISALIGNED((uintptr)di->txd64, alignbytes)); } else { if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->rxdpa, &di->rx_dmah)) == NULL) { DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name)); return FALSE; } di->rxd64 = (dma64dd_t *) ROUNDUP((uintptr)va, alignbytes); di->rxdalign = (uint)((int8*)di->rxd64 - (int8*)va); di->rxdpa += di->rxdalign; di->rxdalloc = size; ASSERT(ISALIGNED((uintptr)di->rxd64, alignbytes)); } return TRUE;}static booldma64_txreset(dma_info_t *di){ uint32 status; if (di->ntxd == 0) return TRUE; /* suspend tx DMA first */ W_REG(&di->d64txregs->control, D64_XC_SE); SPINWAIT(((status = (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE) && (status != D64_XS0_XS_STOPPED), 10000); W_REG(&di->d64txregs->control, 0); SPINWAIT(((status = (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED), 10000); /* wait for the last transaction to complete */ OSL_DELAY(300); return (status == D64_XS0_XS_DISABLED);}static booldma64_rxidle(dma_info_t *di){ DMA_TRACE(("%s: dma_rxidle\n", di->name)); if (di->nrxd == 0) return TRUE; return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) == R_REG(&di->d64rxregs->ptr));}static booldma64_rxreset(dma_info_t *di){ uint32 status; if (di->nrxd == 0) return TRUE; W_REG(&di->d64rxregs->control, 0); SPINWAIT(((status = (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED), 10000); return (status == D64_RS0_RS_DISABLED);}static booldma64_rxenabled(dma_info_t *di){ uint32 rc; rc = R_REG(&di->d64rxregs->control); return ((rc != 0xffffffff) && (rc & D64_RC_RE));}static booldma64_txsuspendedidle(dma_info_t *di){ if (di->ntxd == 0) return TRUE; if (!(R_REG(&di->d64txregs->control) & D64_XC_SE)) return 0; if ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) == D64_XS0_XS_IDLE) return 1; return 0;}/* !! tx entry routine */static intdma64_txfast(dma_info_t *di, void *p0, bool commit){ void *p, *next; uchar *data; uint len; uint txout; uint32 flags = 0; uint32 pa; DMA_TRACE(("%s: dma_txfast\n", di->name)); txout = di->txout; /* * Walk the chain of packet buffers * allocating and initializing transmit descriptor entries. */ for (p = p0; p; p = next) { data = PKTDATA(di->osh, p); len = PKTLEN(di->osh, p); next = PKTNEXT(di->osh, p); /* return nonzero if out of tx descriptors */ if (NEXTTXD(txout) == di->txin) goto outoftxd; if (len == 0) continue; /* get physical address of buffer start */ pa = (uint32) DMA_MAP(di->osh, data, len, DMA_TX, p, &di->txp_dmah[txout]); flags = 0; if (p == p0) flags |= D64_CTRL1_SOF; if (next == NULL) flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF); if (txout == (di->ntxd - 1)) flags |= D64_CTRL1_EOT; dma64_dd_upd(di, di->txd64, pa, txout, &flags, len); ASSERT(di->txp[txout] == NULL); txout = NEXTTXD(txout); } /* if last txd eof not set, fix it */ if (!(flags & D64_CTRL1_EOF)) W_SM(&di->txd64[PREVTXD(txout)].ctrl1, BUS_SWAP32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF)); /* save the packet */ di->txp[PREVTXD(txout)] = p0; /* bump the tx descriptor index */ di->txout = txout; /* kick the chip */ if (commit) W_REG(&di->d64txregs->ptr, I2B(txout, dma64dd_t)); /* tx flow control */ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1; return (0);outoftxd: DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name)); PKTFREE(di->osh, p0, TRUE); di->hnddma.txavail = 0; di->hnddma.txnobuf++; return (-1);}/* * Reclaim next completed txd (txds if using chained buffers) and * return associated packet. * If 'force' is true, reclaim txd(s) and return associated packet * regardless of the value of the hardware "curr" pointer. */static void *dma64_getnexttxp(dma_info_t *di, bool forceall){ uint start, end, i; void *txp; DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, forceall ? "all" : "")); if (di->ntxd == 0) return (NULL); txp = NULL; start = di->txin; if (forceall) end = di->txout; else end = B2I(R_REG(&di->d64txregs->status0) & D64_XS0_CD_MASK, dma64dd_t); if ((start == 0) && (end > di->txout)) goto bogus; for (i = start; i != end && !txp; i = NEXTTXD(i)) { DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->txd64[i].addrlow)) - di->dataoffsetlow), (BUS_SWAP32(R_SM(&di->txd64[i].ctrl2)) & D64_CTRL2_BC_MASK), DMA_TX, di->txp[i], &di->txp_dmah[txout]); W_SM(&di->txd64[i].addrlow, 0xdeadbeef); W_SM(&di->txd64[i].addrhigh, 0xdeadbeef); txp = di->txp[i]; di->txp[i] = NULL; } di->txin = i; /* tx flow control */ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1; return (txp);bogus:/* DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall));*/ return (NULL);}static void *dma64_getnextrxp(dma_info_t *di, bool forceall){ uint i; void *rxp; /* if forcing, dma engine must be disabled */ ASSERT(!forceall || !dma64_rxenabled(di)); i = di->rxin; /* return if no packets posted */ if (i == di->rxout) return (NULL); /* ignore curr if forceall */ if (!forceall && (i == B2I(R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK, dma64dd_t))) return (NULL); /* get the packet pointer that corresponds to the rx descriptor */ rxp = di->rxp[i]; ASSERT(rxp); di->rxp[i] = NULL; /* clear this packet from the descriptor ring */ DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->rxd64[i].addrlow)) - di->dataoffsetlow), di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[rxout]); W_SM(&di->rxd64[i].addrlow, 0xdeadbeef); W_SM(&di->rxd64[i].addrhigh, 0xdeadbeef); di->rxin = NEXTRXD(i); return (rxp);}/* * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin). */static voiddma64_txrotate(dma_info_t *di){ uint ad; uint nactive; uint rot; uint old, new; uint32 w; uint first, last; ASSERT(dma64_txsuspendedidle(di)); nactive = _dma_txactive(di); ad = B2I((R_REG(&di->d64txregs->status1) & D64_XS1_AD_MASK), dma64dd_t); rot = TXD(ad - di->txin); ASSERT(rot < di->ntxd); /* full-ring case is a lot harder - don't worry about this */ if (rot >= (di->ntxd - nactive)) { DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name)); return; } first = di->txin; last = PREVTXD(di->txout); /* move entries starting at last and moving backwards to first */ for (old = last; old != PREVTXD(first); old = PREVTXD(old)) { new = TXD(old + rot); /* * Move the tx dma descriptor. * EOT is set only in the last entry in the ring. */ w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl1)) & ~D64_CTRL1_EOT; if (new == (di->ntxd - 1)) w |= D64_CTRL1_EOT; W_SM(&di->txd64[new].ctrl1, BUS_SWAP32(w)); w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl2)); W_SM(&di->txd64[new].ctrl2, BUS_SWAP32(w)); W_SM(&di->txd64[new].addrlow, R_SM(&di->txd64[old].addrlow)); W_SM(&di->txd64[new].addrhigh, R_SM(&di->txd64[old].addrhigh)); /* zap the old tx dma descriptor address field */ W_SM(&di->txd64[old].addrlow, BUS_SWAP32(0xdeadbeef)); W_SM(&di->txd64[old].addrhigh, BUS_SWAP32(0xdeadbeef)); /* move the corresponding txp[] entry */ ASSERT(di->txp[new] == NULL); di->txp[new] = di->txp[old]; di->txp[old] = NULL; } /* update txin and txout */ di->txin = ad; di->txout = TXD(di->txout + rot); di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1; /* kick the chip */ W_REG(&di->d64txregs->ptr, I2B(di->txout, dma64dd_t));}#endif /* BCMDMA64 */uintdma_addrwidth(sb_t *sbh, void *dmaregs){ dma32regs_t *dma32regs; if (DMA64_CAP) { /* DMA engine and backplane are 64-bit capable */ if (((sb_coreflagshi(sbh, 0, 0) & SBTMH_DMA64) == SBTMH_DMA64) && sb_backplane64(sbh)) { /* If bus is System Backplane or PCIE then we can access 64-bits */ if ((BUSTYPE(sbh->bustype) == SB_BUS) || ((BUSTYPE(sbh->bustype) == PCI_BUS) && sbh->buscoretype == SB_PCIE)) return (DMADDRWIDTH_64); } } /* Start checking for 32-bit / 30-bit addressing */ dma32regs = (dma32regs_t *)dmaregs; /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */ if ((BUSTYPE(sbh->bustype) == SB_BUS) || ((BUSTYPE(sbh->bustype) == PCI_BUS) && sbh->buscoretype == SB_PCIE) || (_dma32_addrext(dma32regs))) return (DMADDRWIDTH_32); /* Fallthru */ return (DMADDRWIDTH_30);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -