⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dma.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 3 页
字号:
			if (value == B43_DMA64_TXSTAT_DISABLED ||			    value == B43_DMA64_TXSTAT_IDLEWAIT ||			    value == B43_DMA64_TXSTAT_STOPPED)				break;		} else {			value &= B43_DMA32_TXSTATE;			if (value == B43_DMA32_TXSTAT_DISABLED ||			    value == B43_DMA32_TXSTAT_IDLEWAIT ||			    value == B43_DMA32_TXSTAT_STOPPED)				break;		}		msleep(1);	}	offset = dma64 ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;	b43_write32(dev, mmio_base + offset, 0);	for (i = 0; i < 10; i++) {		offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS;		value = b43_read32(dev, mmio_base + offset);		if (dma64) {			value &= B43_DMA64_TXSTAT;			if (value == B43_DMA64_TXSTAT_DISABLED) {				i = -1;				break;			}		} else {			value &= B43_DMA32_TXSTATE;			if (value == B43_DMA32_TXSTAT_DISABLED) {				i = -1;				break;			}		}		msleep(1);	}	if (i != -1) {		b43err(dev->wl, "DMA TX reset timed out\n");		return -ENODEV;	}	/* ensure the reset is completed. */	msleep(1);	return 0;}static int setup_rx_descbuffer(struct b43_dmaring *ring,			       struct b43_dmadesc_generic *desc,			       struct b43_dmadesc_meta *meta, gfp_t gfp_flags){	struct b43_rxhdr_fw4 *rxhdr;	struct b43_hwtxstatus *txstat;	dma_addr_t dmaaddr;	struct sk_buff *skb;	B43_WARN_ON(ring->tx);	skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);	if (unlikely(!skb))		return -ENOMEM;	dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);	if (dma_mapping_error(dmaaddr)) {		/* ugh. try to realloc in zone_dma */		gfp_flags |= GFP_DMA;		dev_kfree_skb_any(skb);		skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);		if (unlikely(!skb))			return -ENOMEM;		dmaaddr = map_descbuffer(ring, skb->data,					 ring->rx_buffersize, 0);	}	if (dma_mapping_error(dmaaddr)) {		dev_kfree_skb_any(skb);		return -EIO;	}	meta->skb = skb;	meta->dmaaddr = dmaaddr;	ring->ops->fill_descriptor(ring, desc, dmaaddr,				   ring->rx_buffersize, 0, 0, 0);	rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);	rxhdr->frame_len = 0;	txstat = (struct b43_hwtxstatus *)(skb->data);	txstat->cookie = 0;	return 0;}/* Allocate the initial descbuffers. * This is used for an RX ring only. */static int alloc_initial_descbuffers(struct b43_dmaring *ring){	int i, err = -ENOMEM;	struct b43_dmadesc_generic *desc;	struct b43_dmadesc_meta *meta;	for (i = 0; i < ring->nr_slots; i++) {		desc = ring->ops->idx2desc(ring, i, &meta);		err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);		if (err) {			b43err(ring->dev->wl,			       "Failed to allocate initial descbuffers\n");			goto err_unwind;		}	}	mb();	ring->used_slots = ring->nr_slots;	err = 0;      out:	return err;      err_unwind:	for (i--; i >= 0; i--) {		desc = ring->ops->idx2desc(ring, i, &meta);		unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);		dev_kfree_skb(meta->skb);	}	goto out;}/* Do initial setup of the DMA controller. * Reset the controller, write the ring busaddress * and switch the "enable" bit on. */static int dmacontroller_setup(struct b43_dmaring *ring){	int err = 0;	u32 value;	u32 addrext;	u32 trans = ssb_dma_translation(ring->dev->dev);	if (ring->tx) {		if (ring->dma64) {			u64 ringbase = (u64) (ring->dmabase);			addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)			    >> SSB_DMA_TRANSLATION_SHIFT;			value = B43_DMA64_TXENABLE;			value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)			    & B43_DMA64_TXADDREXT_MASK;			b43_dma_write(ring, B43_DMA64_TXCTL, value);			b43_dma_write(ring, B43_DMA64_TXRINGLO,				      (ringbase & 0xFFFFFFFF));			b43_dma_write(ring, B43_DMA64_TXRINGHI,				      ((ringbase >> 32) &				       ~SSB_DMA_TRANSLATION_MASK)				      | (trans << 1));		} else {			u32 ringbase = (u32) (ring->dmabase);			addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)			    >> SSB_DMA_TRANSLATION_SHIFT;			value = B43_DMA32_TXENABLE;			value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)			    & B43_DMA32_TXADDREXT_MASK;			b43_dma_write(ring, B43_DMA32_TXCTL, value);			b43_dma_write(ring, B43_DMA32_TXRING,				      (ringbase & ~SSB_DMA_TRANSLATION_MASK)				      | trans);		}	} else {		err = alloc_initial_descbuffers(ring);		if (err)			goto out;		if (ring->dma64) {			u64 ringbase = (u64) (ring->dmabase);			addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)			    >> SSB_DMA_TRANSLATION_SHIFT;			value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);			value |= B43_DMA64_RXENABLE;			value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)			    & B43_DMA64_RXADDREXT_MASK;			b43_dma_write(ring, B43_DMA64_RXCTL, value);			b43_dma_write(ring, B43_DMA64_RXRINGLO,				      (ringbase & 0xFFFFFFFF));			b43_dma_write(ring, B43_DMA64_RXRINGHI,				      ((ringbase >> 32) &				       ~SSB_DMA_TRANSLATION_MASK)				      | (trans << 1));			b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *				      sizeof(struct b43_dmadesc64));		} else {			u32 ringbase = (u32) (ring->dmabase);			addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)			    >> SSB_DMA_TRANSLATION_SHIFT;			value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);			value |= B43_DMA32_RXENABLE;			value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)			    & B43_DMA32_RXADDREXT_MASK;			b43_dma_write(ring, B43_DMA32_RXCTL, value);			b43_dma_write(ring, B43_DMA32_RXRING,				      (ringbase & ~SSB_DMA_TRANSLATION_MASK)				      | trans);			b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *				      sizeof(struct b43_dmadesc32));		}	}out:	return err;}/* Shutdown the DMA controller. */static void dmacontroller_cleanup(struct b43_dmaring *ring){	if (ring->tx) {		b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,					   ring->dma64);		if (ring->dma64) {			b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);			b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);		} else			b43_dma_write(ring, B43_DMA32_TXRING, 0);	} else {		b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,					   ring->dma64);		if (ring->dma64) {			b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);			b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);		} else			b43_dma_write(ring, B43_DMA32_RXRING, 0);	}}static void free_all_descbuffers(struct b43_dmaring *ring){	struct b43_dmadesc_generic *desc;	struct b43_dmadesc_meta *meta;	int i;	if (!ring->used_slots)		return;	for (i = 0; i < ring->nr_slots; i++) {		desc = ring->ops->idx2desc(ring, i, &meta);		if (!meta->skb) {			B43_WARN_ON(!ring->tx);			continue;		}		if (ring->tx) {			unmap_descbuffer(ring, meta->dmaaddr,					 meta->skb->len, 1);		} else {			unmap_descbuffer(ring, meta->dmaaddr,					 ring->rx_buffersize, 0);		}		free_descriptor_buffer(ring, meta);	}}static u64 supported_dma_mask(struct b43_wldev *dev){	u32 tmp;	u16 mmio_base;	tmp = b43_read32(dev, SSB_TMSHIGH);	if (tmp & SSB_TMSHIGH_DMA64)		return DMA_64BIT_MASK;	mmio_base = b43_dmacontroller_base(0, 0);	b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);	tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);	if (tmp & B43_DMA32_TXADDREXT_MASK)		return DMA_32BIT_MASK;	return DMA_30BIT_MASK;}/* Main initialization function. */staticstruct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,				      int controller_index,				      int for_tx, int dma64){	struct b43_dmaring *ring;	int err;	int nr_slots;	dma_addr_t dma_test;	ring = kzalloc(sizeof(*ring), GFP_KERNEL);	if (!ring)		goto out;	nr_slots = B43_RXRING_SLOTS;	if (for_tx)		nr_slots = B43_TXRING_SLOTS;	ring->meta = kcalloc(nr_slots, sizeof(struct b43_dmadesc_meta),			     GFP_KERNEL);	if (!ring->meta)		goto err_kfree_ring;	if (for_tx) {		ring->txhdr_cache = kcalloc(nr_slots,					    sizeof(struct b43_txhdr_fw4),					    GFP_KERNEL);		if (!ring->txhdr_cache)			goto err_kfree_meta;		/* test for ability to dma to txhdr_cache */		dma_test = dma_map_single(dev->dev->dev,					  ring->txhdr_cache,					  sizeof(struct b43_txhdr_fw4),					  DMA_TO_DEVICE);		if (dma_mapping_error(dma_test)) {			/* ugh realloc */			kfree(ring->txhdr_cache);			ring->txhdr_cache = kcalloc(nr_slots,						    sizeof(struct							   b43_txhdr_fw4),						    GFP_KERNEL | GFP_DMA);			if (!ring->txhdr_cache)				goto err_kfree_meta;			dma_test = dma_map_single(dev->dev->dev,						  ring->txhdr_cache,						  sizeof(struct b43_txhdr_fw4),						  DMA_TO_DEVICE);			if (dma_mapping_error(dma_test))				goto err_kfree_txhdr_cache;		}		dma_unmap_single(dev->dev->dev,				 dma_test, sizeof(struct b43_txhdr_fw4),				 DMA_TO_DEVICE);	}	ring->dev = dev;	ring->nr_slots = nr_slots;	ring->mmio_base = b43_dmacontroller_base(dma64, controller_index);	ring->index = controller_index;	ring->dma64 = !!dma64;	if (dma64)		ring->ops = &dma64_ops;	else		ring->ops = &dma32_ops;	if (for_tx) {		ring->tx = 1;		ring->current_slot = -1;	} else {		if (ring->index == 0) {			ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE;			ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET;		} else if (ring->index == 3) {			ring->rx_buffersize = B43_DMA3_RX_BUFFERSIZE;			ring->frameoffset = B43_DMA3_RX_FRAMEOFFSET;		} else			B43_WARN_ON(1);	}	spin_lock_init(&ring->lock);#ifdef CONFIG_B43_DEBUG	ring->last_injected_overflow = jiffies;#endif	err = alloc_ringmemory(ring);	if (err)		goto err_kfree_txhdr_cache;	err = dmacontroller_setup(ring);	if (err)		goto err_free_ringmemory;      out:	return ring;      err_free_ringmemory:	free_ringmemory(ring);      err_kfree_txhdr_cache:	kfree(ring->txhdr_cache);      err_kfree_meta:	kfree(ring->meta);      err_kfree_ring:	kfree(ring);	ring = NULL;	goto out;}/* Main cleanup function. */static void b43_destroy_dmaring(struct b43_dmaring *ring){	if (!ring)		return;	b43dbg(ring->dev->wl, "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",	       (ring->dma64) ? "64" : "32",	       ring->mmio_base,	       (ring->tx) ? "TX" : "RX", ring->max_used_slots, ring->nr_slots);	/* Device IRQs are disabled prior entering this function,	 * so no need to take care of concurrency with rx handler stuff.	 */	dmacontroller_cleanup(ring);	free_all_descbuffers(ring);	free_ringmemory(ring);	kfree(ring->txhdr_cache);	kfree(ring->meta);	kfree(ring);}void b43_dma_free(struct b43_wldev *dev){	struct b43_dma *dma;	if (b43_using_pio(dev))		return;	dma = &dev->dma;	b43_destroy_dmaring(dma->rx_ring3);	dma->rx_ring3 = NULL;	b43_destroy_dmaring(dma->rx_ring0);	dma->rx_ring0 = NULL;	b43_destroy_dmaring(dma->tx_ring5);	dma->tx_ring5 = NULL;	b43_destroy_dmaring(dma->tx_ring4);	dma->tx_ring4 = NULL;	b43_destroy_dmaring(dma->tx_ring3);	dma->tx_ring3 = NULL;	b43_destroy_dmaring(dma->tx_ring2);	dma->tx_ring2 = NULL;	b43_destroy_dmaring(dma->tx_ring1);	dma->tx_ring1 = NULL;	b43_destroy_dmaring(dma->tx_ring0);	dma->tx_ring0 = NULL;}int b43_dma_init(struct b43_wldev *dev){	struct b43_dma *dma = &dev->dma;	struct b43_dmaring *ring;	int err;	u64 dmamask;	int dma64 = 0;	dmamask = supported_dma_mask(dev);	if (dmamask == DMA_64BIT_MASK)		dma64 = 1;	err = ssb_dma_set_mask(dev->dev, dmamask);	if (err) {#ifdef B43_PIO		b43warn(dev->wl, "DMA for this device not supported. "			"Falling back to PIO\n");		dev->__using_pio = 1;		return -EAGAIN;#else		b43err(dev->wl, "DMA for this device not supported and "		       "no PIO support compiled in\n");		return -EOPNOTSUPP;#endif	}	err = -ENOMEM;	/* setup TX DMA channels. */	ring = b43_setup_dmaring(dev, 0, 1, dma64);	if (!ring)		goto out;	dma->tx_ring0 = ring;	ring = b43_setup_dmaring(dev, 1, 1, dma64);	if (!ring)		goto err_destroy_tx0;	dma->tx_ring1 = ring;	ring = b43_setup_dmaring(dev, 2, 1, dma64);	if (!ring)		goto err_destroy_tx1;	dma->tx_ring2 = ring;	ring = b43_setup_dmaring(dev, 3, 1, dma64);	if (!ring)		goto err_destroy_tx2;	dma->tx_ring3 = ring;	ring = b43_setup_dmaring(dev, 4, 1, dma64);	if (!ring)		goto err_destroy_tx3;	dma->tx_ring4 = ring;	ring = b43_setup_dmaring(dev, 5, 1, dma64);	if (!ring)		goto err_destroy_tx4;	dma->tx_ring5 = ring;	/* setup RX DMA channels. */	ring = b43_setup_dmaring(dev, 0, 0, dma64);	if (!ring)		goto err_destroy_tx5;	dma->rx_ring0 = ring;	if (dev->dev->id.revision < 5) {		ring = b43_setup_dmaring(dev, 3, 0, dma64);		if (!ring)			goto err_destroy_rx0;		dma->rx_ring3 = ring;	}	b43dbg(dev->wl, "%d-bit DMA initialized\n",	       (dmamask == DMA_64BIT_MASK) ? 64 :	       (dmamask == DMA_32BIT_MASK) ? 32 : 30);	err = 0;      out:	return err;      err_destroy_rx0:	b43_destroy_dmaring(dma->rx_ring0);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -