📄 hnddma.c
字号:
/* * Generic Broadcom Home Networking Division (HND) DMA module. * This supports the following chips: BCM42xx, 44xx, 47xx . * * Copyright 2005-2006, Broadcom Corporation * All Rights Reserved. * * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE. * * $Id$ */#include <typedefs.h>#include <bcmdefs.h>#include <osl.h>#include <bcmendian.h>#include <sbconfig.h>#include <bcmutils.h>#include <bcmdevs.h>#include <sbutils.h>#include <sbhnddma.h>#include <hnddma.h>/* debug/trace */#define DMA_ERROR(args)#define DMA_TRACE(args)/* default dma message level (if input msg_level pointer is null in dma_attach()) */static uint dma_msg_level = 0;#define MAXNAMEL 8 /* 8 char names */#define DI_INFO(dmah) (dma_info_t *)dmah/* dma engine software state */typedef struct dma_info { struct hnddma_pub hnddma; /* exported structure, don't use hnddma_t, * which could be const */ uint *msg_level; /* message level pointer */ char name[MAXNAMEL]; /* callers name for diag msgs */ void *osh; /* os handle */ sb_t *sbh; /* sb handle */ bool dma64; /* dma64 enabled */ bool addrext; /* this dma engine supports DmaExtendedAddrChanges */ dma32regs_t *d32txregs; /* 32 bits dma tx engine registers */ dma32regs_t *d32rxregs; /* 32 bits dma rx engine registers */ dma64regs_t *d64txregs; /* 64 bits dma tx engine registers */ dma64regs_t *d64rxregs; /* 64 bits dma rx engine registers */ uint32 dma64align; /* either 8k or 4k depends on number of dd */ dma32dd_t *txd32; /* pointer to dma32 tx descriptor ring */ dma64dd_t *txd64; /* pointer to dma64 tx descriptor ring */ uint ntxd; /* # tx descriptors tunable */ uint txin; /* index of next descriptor to reclaim */ uint txout; /* index of next descriptor to post */ void **txp; /* pointer to parallel array of pointers to packets */ osldma_t *tx_dmah; /* DMA TX descriptor ring handle */ osldma_t **txp_dmah; /* DMA TX packet data handle */ ulong txdpa; /* physical address of descriptor ring */ uint txdalign; /* #bytes added to alloc'd mem to align txd */ uint txdalloc; /* #bytes allocated for the ring */ dma32dd_t *rxd32; /* pointer to dma32 rx descriptor ring */ dma64dd_t *rxd64; /* pointer to dma64 rx descriptor ring */ uint nrxd; /* # rx descriptors tunable */ uint rxin; /* index of next descriptor to reclaim */ uint rxout; /* index of next descriptor to post */ void **rxp; /* pointer to parallel array of pointers to packets */ osldma_t *rx_dmah; /* DMA RX descriptor ring handle */ osldma_t **rxp_dmah; /* DMA RX packet data handle */ ulong rxdpa; /* physical address of descriptor ring */ uint rxdalign; /* #bytes added to alloc'd mem to align rxd */ uint rxdalloc; /* #bytes allocated for the ring */ /* tunables */ uint rxbufsize; /* rx buffer size in bytes, not including the extra headroom */ uint nrxpost; /* # rx buffers to keep posted */ uint rxoffset; /* rxcontrol offset */ uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */ uint ddoffsethigh; /* high 32 bits */ uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */ uint dataoffsethigh; /* high 32 bits */} dma_info_t;#ifdef BCMDMA64#define DMA64_ENAB(di) ((di)->dma64)#define DMA64_CAP TRUE#else#define DMA64_ENAB(di) (0)#define DMA64_CAP FALSE#endif/* descriptor bumping macros */#define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */#define TXD(x) XXD((x), di->ntxd)#define RXD(x) XXD((x), di->nrxd)#define NEXTTXD(i) TXD(i + 1)#define PREVTXD(i) TXD(i - 1)#define NEXTRXD(i) RXD(i + 1)#define NTXDACTIVE(h, t) TXD(t - h)#define NRXDACTIVE(h, t) RXD(t - h)/* macros to convert between byte offsets and indexes */#define B2I(bytes, type) ((bytes) / sizeof(type))#define I2B(index, type) ((index) * sizeof(type))#define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */#define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] *//* common prototypes */static bool _dma_isaddrext(dma_info_t *di);static bool _dma_alloc(dma_info_t *di, uint direction);static void _dma_detach(dma_info_t *di);static void _dma_ddtable_init(dma_info_t *di, uint direction, ulong pa);static void _dma_rxinit(dma_info_t *di);static void *_dma_rx(dma_info_t *di);static void _dma_rxfill(dma_info_t *di);static void _dma_rxreclaim(dma_info_t *di);static void _dma_rxenable(dma_info_t *di);static void * _dma_getnextrxp(dma_info_t *di, bool forceall);static void _dma_txblock(dma_info_t *di);static void _dma_txunblock(dma_info_t *di);static uint _dma_txactive(dma_info_t *di);static void* _dma_peeknexttxp(dma_info_t *di);static uintptr _dma_getvar(dma_info_t *di, char *name);static void _dma_counterreset(dma_info_t *di);static void _dma_fifoloopbackenable(dma_info_t *di);/* ** 32 bit DMA prototypes */static bool dma32_alloc(dma_info_t *di, uint direction);static bool dma32_txreset(dma_info_t *di);static bool dma32_rxreset(dma_info_t *di);static bool dma32_txsuspendedidle(dma_info_t *di);static int dma32_txfast(dma_info_t *di, void *p0, bool commit);static void *dma32_getnexttxp(dma_info_t *di, bool forceall);static void *dma32_getnextrxp(dma_info_t *di, bool forceall);static void dma32_txrotate(dma_info_t *di);static bool dma32_rxidle(dma_info_t *di);static void dma32_txinit(dma_info_t *di);static bool dma32_txenabled(dma_info_t *di);static void dma32_txsuspend(dma_info_t *di);static void dma32_txresume(dma_info_t *di);static bool dma32_txsuspended(dma_info_t *di);static void dma32_txreclaim(dma_info_t *di, bool forceall);static bool dma32_txstopped(dma_info_t *di);static bool dma32_rxstopped(dma_info_t *di);static bool dma32_rxenabled(dma_info_t *di);/* ** 64 bit DMA prototypes and stubs */#ifdef BCMDMA64static bool dma64_alloc(dma_info_t *di, uint direction);static bool dma64_txreset(dma_info_t *di);static bool dma64_rxreset(dma_info_t *di);static bool dma64_txsuspendedidle(dma_info_t *di);static int dma64_txfast(dma_info_t *di, void *p0, bool commit);static void *dma64_getnexttxp(dma_info_t *di, bool forceall);static void *dma64_getnextrxp(dma_info_t *di, bool forceall);static void dma64_txrotate(dma_info_t *di);static bool dma64_rxidle(dma_info_t *di);static void dma64_txinit(dma_info_t *di);static bool dma64_txenabled(dma_info_t *di);static void dma64_txsuspend(dma_info_t *di);static void dma64_txresume(dma_info_t *di);static bool dma64_txsuspended(dma_info_t *di);static void dma64_txreclaim(dma_info_t *di, bool forceall);static bool dma64_txstopped(dma_info_t *di);static bool dma64_rxstopped(dma_info_t *di);static bool dma64_rxenabled(dma_info_t *di);#elsestatic bool dma64_alloc(dma_info_t *di, uint direction) { return FALSE; }static bool dma64_txreset(dma_info_t *di) { return FALSE; }static bool dma64_rxreset(dma_info_t *di) { return FALSE; }static bool dma64_txsuspendedidle(dma_info_t *di) { return FALSE;}static int dma64_txfast(dma_info_t *di, void *p0, bool commit) { return 0; }static void *dma64_getnexttxp(dma_info_t *di, bool forceall) { return NULL; }static void *dma64_getnextrxp(dma_info_t *di, bool forceall) { return NULL; }static void dma64_txrotate(dma_info_t *di) { return; }static bool dma64_rxidle(dma_info_t *di) { return FALSE; }static void dma64_txinit(dma_info_t *di) { return; }static bool dma64_txenabled(dma_info_t *di) { return FALSE; }static void dma64_txsuspend(dma_info_t *di) { return; }static void dma64_txresume(dma_info_t *di) { return; }static bool dma64_txsuspended(dma_info_t *di) {return FALSE; }static void dma64_txreclaim(dma_info_t *di, bool forceall) { return; }static bool dma64_txstopped(dma_info_t *di) { return FALSE; }static bool dma64_rxstopped(dma_info_t *di) { return FALSE; }static bool dma64_rxenabled(dma_info_t *di) { return FALSE; }#endif /* BCMDMA64 */static di_fcn_t dma64proc = { (di_detach_t)_dma_detach, (di_txinit_t)dma64_txinit, (di_txreset_t)dma64_txreset, (di_txenabled_t)dma64_txenabled, (di_txsuspend_t)dma64_txsuspend, (di_txresume_t)dma64_txresume, (di_txsuspended_t)dma64_txsuspended, (di_txsuspendedidle_t)dma64_txsuspendedidle, (di_txfast_t)dma64_txfast, (di_txstopped_t)dma64_txstopped, (di_txreclaim_t)dma64_txreclaim, (di_getnexttxp_t)dma64_getnexttxp, (di_peeknexttxp_t)_dma_peeknexttxp, (di_txblock_t)_dma_txblock, (di_txunblock_t)_dma_txunblock, (di_txactive_t)_dma_txactive, (di_txrotate_t)dma64_txrotate, (di_rxinit_t)_dma_rxinit, (di_rxreset_t)dma64_rxreset, (di_rxidle_t)dma64_rxidle, (di_rxstopped_t)dma64_rxstopped, (di_rxenable_t)_dma_rxenable, (di_rxenabled_t)dma64_rxenabled, (di_rx_t)_dma_rx, (di_rxfill_t)_dma_rxfill, (di_rxreclaim_t)_dma_rxreclaim, (di_getnextrxp_t)_dma_getnextrxp, (di_fifoloopbackenable_t)_dma_fifoloopbackenable, (di_getvar_t)_dma_getvar, (di_counterreset_t)_dma_counterreset, NULL, NULL, NULL, 34};static di_fcn_t dma32proc = { (di_detach_t)_dma_detach, (di_txinit_t)dma32_txinit, (di_txreset_t)dma32_txreset, (di_txenabled_t)dma32_txenabled, (di_txsuspend_t)dma32_txsuspend, (di_txresume_t)dma32_txresume, (di_txsuspended_t)dma32_txsuspended, (di_txsuspendedidle_t)dma32_txsuspendedidle, (di_txfast_t)dma32_txfast, (di_txstopped_t)dma32_txstopped, (di_txreclaim_t)dma32_txreclaim, (di_getnexttxp_t)dma32_getnexttxp, (di_peeknexttxp_t)_dma_peeknexttxp, (di_txblock_t)_dma_txblock, (di_txunblock_t)_dma_txunblock, (di_txactive_t)_dma_txactive, (di_txrotate_t)dma32_txrotate, (di_rxinit_t)_dma_rxinit, (di_rxreset_t)dma32_rxreset, (di_rxidle_t)dma32_rxidle, (di_rxstopped_t)dma32_rxstopped, (di_rxenable_t)_dma_rxenable, (di_rxenabled_t)dma32_rxenabled, (di_rx_t)_dma_rx, (di_rxfill_t)_dma_rxfill, (di_rxreclaim_t)_dma_rxreclaim, (di_getnextrxp_t)_dma_getnextrxp, (di_fifoloopbackenable_t)_dma_fifoloopbackenable, (di_getvar_t)_dma_getvar, (di_counterreset_t)_dma_counterreset, NULL, NULL, NULL, 34};hnddma_t *dma_attach(osl_t *osh, char *name, sb_t *sbh, void *dmaregstx, void *dmaregsrx, uint ntxd, uint nrxd, uint rxbufsize, uint nrxpost, uint rxoffset, uint *msg_level){ dma_info_t *di; uint size; /* allocate private info structure */ if ((di = MALLOC(osh, sizeof (dma_info_t))) == NULL) { return (NULL); } bzero((char *)di, sizeof(dma_info_t)); di->msg_level = msg_level ? msg_level : &dma_msg_level; if (sbh != NULL) di->dma64 = ((sb_coreflagshi(sbh, 0, 0) & SBTMH_DMA64) == SBTMH_DMA64);#ifndef BCMDMA64 if (di->dma64) { DMA_ERROR(("dma_attach: driver doesn't have the capability to support " "64 bits DMA\n")); goto fail; }#endif /* check arguments */ ASSERT(ISPOWEROF2(ntxd)); ASSERT(ISPOWEROF2(nrxd)); if (nrxd == 0) ASSERT(dmaregsrx == NULL); if (ntxd == 0) ASSERT(dmaregstx == NULL); /* init dma reg pointer */ if (di->dma64) { ASSERT(ntxd <= D64MAXDD); ASSERT(nrxd <= D64MAXDD); di->d64txregs = (dma64regs_t *)dmaregstx; di->d64rxregs = (dma64regs_t *)dmaregsrx; di->dma64align = D64RINGALIGN; if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) { /* for smaller dd table, HW relax the alignment requirement */ di->dma64align = D64RINGALIGN / 2; } } else { ASSERT(ntxd <= D32MAXDD); ASSERT(nrxd <= D32MAXDD); di->d32txregs = (dma32regs_t *)dmaregstx; di->d32rxregs = (dma32regs_t *)dmaregsrx; } DMA_TRACE(("%s: dma_attach: %s osh %p ntxd %d nrxd %d rxbufsize %d nrxpost %d " "rxoffset %d dmaregstx %p dmaregsrx %p\n", name, (di->dma64 ? "DMA64" : "DMA32"), osh, ntxd, nrxd, rxbufsize, nrxpost, rxoffset, dmaregstx, dmaregsrx)); /* make a private copy of our callers name */ strncpy(di->name, name, MAXNAMEL); di->name[MAXNAMEL-1] = '\0'; di->osh = osh; di->sbh = sbh; /* save tunables */ di->ntxd = ntxd; di->nrxd = nrxd; /* the actual dma size doesn't include the extra headroom */ if (rxbufsize > BCMEXTRAHDROOM) di->rxbufsize = rxbufsize - BCMEXTRAHDROOM; else di->rxbufsize = rxbufsize; di->nrxpost = nrxpost; di->rxoffset = rxoffset; /* * figure out the DMA physical address offset for dd and data * for old chips w/o sb, use zero * for new chips w sb, * PCI/PCIE: they map silicon backplace address to zero based memory, need offset * Other bus: use zero * SB_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor */ di->ddoffsetlow = 0; di->dataoffsetlow = 0; if (sbh != NULL) { if (sbh->bustype == PCI_BUS) { /* for pci bus, add offset */ if ((sbh->buscoretype == SB_PCIE) && di->dma64) { di->ddoffsetlow = 0; di->ddoffsethigh = SB_PCIE_DMA_H32; } else { di->ddoffsetlow = SB_PCI_DMA; di->ddoffsethigh = 0; } di->dataoffsetlow = di->ddoffsetlow; di->dataoffsethigh = di->ddoffsethigh; }#if defined(__mips__) && defined(IL_BIGENDIAN) di->dataoffsetlow = di->dataoffsetlow + SB_SDRAM_SWAPPED;#endif } di->addrext = _dma_isaddrext(di); /* allocate tx packet pointer vector */ if (ntxd) { size = ntxd * sizeof(void *); if ((di->txp = MALLOC(osh, size)) == NULL) { DMA_ERROR(("%s: dma_attach: out of tx memory, malloced %d bytes\n", di->name, MALLOCED(osh))); goto fail; } bzero((char *)di->txp, size); } /* allocate rx packet pointer vector */ if (nrxd) { size = nrxd * sizeof(void *); if ((di->rxp = MALLOC(osh, size)) == NULL) { DMA_ERROR(("%s: dma_attach: out of rx memory, malloced %d bytes\n", di->name, MALLOCED(osh))); goto fail; } bzero((char *)di->rxp, size); } /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */ if (ntxd) { if (!_dma_alloc(di, DMA_TX)) goto fail; } /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */ if (nrxd) { if (!_dma_alloc(di, DMA_RX)) goto fail; } if ((di->ddoffsetlow == SB_PCI_DMA) && (di->txdpa > SB_PCI_DMA_SZ) && !di->addrext) { DMA_ERROR(("%s: dma_attach: txdpa 0x%lx: addrext not supported\n", di->name, di->txdpa)); goto fail; } if ((di->ddoffsetlow == SB_PCI_DMA) && (di->rxdpa > SB_PCI_DMA_SZ) && !di->addrext) { DMA_ERROR(("%s: dma_attach: rxdpa 0x%lx: addrext not supported\n", di->name, di->rxdpa)); goto fail; } DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext)); /* allocate tx packet pointer vector and DMA mapping vectors */ if (ntxd) { size = ntxd * sizeof(osldma_t **); if ((di->txp_dmah = (osldma_t **)MALLOC(osh, size)) == NULL)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -