📄 bcm43xx_dma.c
字号:
/* Broadcom BCM43xx wireless driver DMA ringbuffer and descriptor allocation/management Copyright (c) 2005 Michael Buesch <mbuesch@freenet.de> Some code in this file is derived from the b44.c driver Copyright (C) 2002 David S. Miller Copyright (C) Pekka Pietikainen This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA.*/#include "bcm43xx.h"#include "bcm43xx_dma.h"#include "bcm43xx_main.h"#include "bcm43xx_debugfs.h"#include "bcm43xx_power.h"#include "bcm43xx_xmit.h"#include <linux/dmapool.h>#include <linux/pci.h>#include <linux/delay.h>#include <linux/skbuff.h>#include <asm/semaphore.h>static inline int free_slots(struct bcm43xx_dmaring *ring){ return (ring->nr_slots - ring->used_slots);}static inline int next_slot(struct bcm43xx_dmaring *ring, int slot){ assert(slot >= -1 && slot <= ring->nr_slots - 1); if (slot == ring->nr_slots - 1) return 0; return slot + 1;}static inline int prev_slot(struct bcm43xx_dmaring *ring, int slot){ assert(slot >= 0 && slot <= ring->nr_slots - 1); if (slot == 0) return ring->nr_slots - 1; return slot - 1;}/* Request a slot for usage. */static inlineint request_slot(struct bcm43xx_dmaring *ring){ int slot; assert(ring->tx); assert(!ring->suspended); assert(free_slots(ring) != 0); slot = next_slot(ring, ring->current_slot); ring->current_slot = slot; ring->used_slots++; /* Check the number of available slots and suspend TX, * if we are running low on free slots. */ if (unlikely(free_slots(ring) < ring->suspend_mark)) { netif_stop_queue(ring->bcm->net_dev); ring->suspended = 1; }#ifdef CONFIG_BCM43XX_DEBUG if (ring->used_slots > ring->max_used_slots) ring->max_used_slots = ring->used_slots;#endif /* CONFIG_BCM43XX_DEBUG*/ return slot;}/* Return a slot to the free slots. */static inlinevoid return_slot(struct bcm43xx_dmaring *ring, int slot){ assert(ring->tx); ring->used_slots--; /* Check if TX is suspended and check if we have * enough free slots to resume it again. */ if (unlikely(ring->suspended)) { if (free_slots(ring) >= ring->resume_mark) { ring->suspended = 0; netif_wake_queue(ring->bcm->net_dev); } }}static inlinedma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring, unsigned char *buf, size_t len, int tx){ dma_addr_t dmaaddr; if (tx) { dmaaddr = dma_map_single(&ring->bcm->pci_dev->dev, buf, len, DMA_TO_DEVICE); } else { dmaaddr = dma_map_single(&ring->bcm->pci_dev->dev, buf, len, DMA_FROM_DEVICE); } return dmaaddr;}static inlinevoid unmap_descbuffer(struct bcm43xx_dmaring *ring, dma_addr_t addr, size_t len, int tx){ if (tx) { dma_unmap_single(&ring->bcm->pci_dev->dev, addr, len, DMA_TO_DEVICE); } else { dma_unmap_single(&ring->bcm->pci_dev->dev, addr, len, DMA_FROM_DEVICE); }}static inlinevoid sync_descbuffer_for_cpu(struct bcm43xx_dmaring *ring, dma_addr_t addr, size_t len){ assert(!ring->tx); dma_sync_single_for_cpu(&ring->bcm->pci_dev->dev, addr, len, DMA_FROM_DEVICE);}static inlinevoid sync_descbuffer_for_device(struct bcm43xx_dmaring *ring, dma_addr_t addr, size_t len){ assert(!ring->tx); dma_sync_single_for_device(&ring->bcm->pci_dev->dev, addr, len, DMA_FROM_DEVICE);}/* Unmap and free a descriptor buffer. */static inlinevoid free_descriptor_buffer(struct bcm43xx_dmaring *ring, struct bcm43xx_dmadesc *desc, struct bcm43xx_dmadesc_meta *meta, int irq_context){ assert(meta->skb); if (irq_context) dev_kfree_skb_irq(meta->skb); else dev_kfree_skb(meta->skb); meta->skb = NULL;}static int alloc_ringmemory(struct bcm43xx_dmaring *ring){ struct device *dev = &(ring->bcm->pci_dev->dev); ring->vbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE, &(ring->dmabase), GFP_KERNEL); if (!ring->vbase) { printk(KERN_ERR PFX "DMA ringmemory allocation failed\n"); return -ENOMEM; } if (ring->dmabase + BCM43xx_DMA_RINGMEMSIZE > BCM43xx_DMA_BUSADDRMAX) { printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RINGMEMORY >1G " "(0x%08x, len: %lu)\n", ring->dmabase, BCM43xx_DMA_RINGMEMSIZE); dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE, ring->vbase, ring->dmabase); return -ENOMEM; } assert(!(ring->dmabase & 0x000003FF)); memset(ring->vbase, 0, BCM43xx_DMA_RINGMEMSIZE); return 0;}static void free_ringmemory(struct bcm43xx_dmaring *ring){ struct device *dev = &(ring->bcm->pci_dev->dev); dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE, ring->vbase, ring->dmabase);}/* Reset the RX DMA channel */int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, u16 mmio_base){ int i; u32 value; bcm43xx_write32(bcm, mmio_base + BCM43xx_DMA_RX_CONTROL, 0x00000000); for (i = 0; i < 1000; i++) { value = bcm43xx_read32(bcm, mmio_base + BCM43xx_DMA_RX_STATUS); value &= BCM43xx_DMA_RXSTAT_STAT_MASK; if (value == BCM43xx_DMA_RXSTAT_STAT_DISABLED) { i = -1; break; } udelay(10); } if (i != -1) { printk(KERN_ERR PFX "Error: Wait on DMA RX status timed out.\n"); return -ENODEV; } return 0;}/* Reset the RX DMA channel */int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, u16 mmio_base){ int i; u32 value; for (i = 0; i < 1000; i++) { value = bcm43xx_read32(bcm, mmio_base + BCM43xx_DMA_TX_STATUS); value &= BCM43xx_DMA_TXSTAT_STAT_MASK; if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED || value == BCM43xx_DMA_TXSTAT_STAT_IDLEWAIT || value == BCM43xx_DMA_TXSTAT_STAT_STOPPED) break; udelay(10); } bcm43xx_write32(bcm, mmio_base + BCM43xx_DMA_TX_CONTROL, 0x00000000); for (i = 0; i < 1000; i++) { value = bcm43xx_read32(bcm, mmio_base + BCM43xx_DMA_TX_STATUS); value &= BCM43xx_DMA_TXSTAT_STAT_MASK; if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED) { i = -1; break; } udelay(10); } if (i != -1) { printk(KERN_ERR PFX "Error: Wait on DMA TX status timed out.\n"); return -ENODEV; } /* ensure the reset is completed. */ udelay(300); return 0;}static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring, struct bcm43xx_dmadesc *desc, struct bcm43xx_dmadesc_meta *meta, gfp_t gfp_flags){ struct bcm43xx_rxhdr *rxhdr; dma_addr_t dmaaddr; u32 desc_addr; u32 desc_ctl; const int slot = (int)(desc - ring->vbase); struct sk_buff *skb; assert(slot >= 0 && slot < ring->nr_slots); assert(!ring->tx); skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); if (unlikely(!skb)) return -ENOMEM; dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); if (unlikely(dmaaddr + ring->rx_buffersize > BCM43xx_DMA_BUSADDRMAX)) { unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); dev_kfree_skb_any(skb); printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RX SKB >1G " "(0x%08x, len: %u)\n", dmaaddr, ring->rx_buffersize); return -ENOMEM; } meta->skb = skb; meta->dmaaddr = dmaaddr; skb->dev = ring->bcm->net_dev; desc_addr = (u32)(dmaaddr + ring->memoffset); desc_ctl = (BCM43xx_DMADTOR_BYTECNT_MASK & (u32)(ring->rx_buffersize - ring->frameoffset)); if (slot == ring->nr_slots - 1) desc_ctl |= BCM43xx_DMADTOR_DTABLEEND; set_desc_addr(desc, desc_addr); set_desc_ctl(desc, desc_ctl); rxhdr = (struct bcm43xx_rxhdr *)(skb->data); rxhdr->frame_length = 0; rxhdr->flags1 = 0; return 0;}/* Allocate the initial descbuffers. * This is used for an RX ring only. */static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring){ int i, err = -ENOMEM; struct bcm43xx_dmadesc *desc; struct bcm43xx_dmadesc_meta *meta; for (i = 0; i < ring->nr_slots; i++) { desc = ring->vbase + i; meta = ring->meta + i; err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); if (err) goto err_unwind; } ring->used_slots = ring->nr_slots; err = 0;out: return err;err_unwind: for (i--; i >= 0; i--) { desc = ring->vbase + i; meta = ring->meta + i; unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); dev_kfree_skb(meta->skb); } goto out;}/* Do initial setup of the DMA controller. * Reset the controller, write the ring busaddress * and switch the "enable" bit on. */static int dmacontroller_setup(struct bcm43xx_dmaring *ring){ int err = 0; u32 value; if (ring->tx) { /* Set Transmit Control register to "transmit enable" */ bcm43xx_write32(ring->bcm, ring->mmio_base + BCM43xx_DMA_TX_CONTROL, BCM43xx_DMA_TXCTRL_ENABLE); /* Set Transmit Descriptor ring address. */ bcm43xx_write32(ring->bcm, ring->mmio_base + BCM43xx_DMA_TX_DESC_RING, ring->dmabase + ring->memoffset); } else { err = alloc_initial_descbuffers(ring); if (err) goto out; /* Set Receive Control "receive enable" and frame offset */ value = (ring->frameoffset << BCM43xx_DMA_RXCTRL_FRAMEOFF_SHIFT); value |= BCM43xx_DMA_RXCTRL_ENABLE; bcm43xx_write32(ring->bcm, ring->mmio_base + BCM43xx_DMA_RX_CONTROL, value); /* Set Receive Descriptor ring address. */ bcm43xx_write32(ring->bcm, ring->mmio_base + BCM43xx_DMA_RX_DESC_RING, ring->dmabase + ring->memoffset); /* Init the descriptor pointer. */ bcm43xx_write32(ring->bcm, ring->mmio_base + BCM43xx_DMA_RX_DESC_INDEX, 200); }out: return err;}/* Shutdown the DMA controller. */static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring){ if (ring->tx) { bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base); /* Zero out Transmit Descriptor ring address. */ bcm43xx_write32(ring->bcm, ring->mmio_base + BCM43xx_DMA_TX_DESC_RING, 0x00000000); } else { bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base); /* Zero out Receive Descriptor ring address. */ bcm43xx_write32(ring->bcm, ring->mmio_base + BCM43xx_DMA_RX_DESC_RING, 0x00000000); }}static void free_all_descbuffers(struct bcm43xx_dmaring *ring){ struct bcm43xx_dmadesc *desc; struct bcm43xx_dmadesc_meta *meta; int i; if (!ring->used_slots) return; for (i = 0; i < ring->nr_slots; i++) { desc = ring->vbase + i; meta = ring->meta + i; if (!meta->skb) { assert(ring->tx); continue; } if (ring->tx) { unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); } else { unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); } free_descriptor_buffer(ring, desc, meta, 0); }}/* Main initialization function. */staticstruct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm, u16 dma_controller_base, int nr_descriptor_slots, int tx){ struct bcm43xx_dmaring *ring; int err; ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) goto out; ring->meta = kzalloc(sizeof(*ring->meta) * nr_descriptor_slots, GFP_KERNEL); if (!ring->meta) goto err_kfree_ring; ring->memoffset = BCM43xx_DMA_DMABUSADDROFFSET;#ifdef CONFIG_BCM947XX if (bcm->pci_dev->bus->number == 0) ring->memoffset = 0;#endif ring->bcm = bcm; ring->nr_slots = nr_descriptor_slots; ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100; ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100; assert(ring->suspend_mark < ring->resume_mark);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -