⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dma.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 3 页
字号:
	err = 0;out:	return err;err_destroy_rx0:	b43legacy_destroy_dmaring(dma->rx_ring0);	dma->rx_ring0 = NULL;err_destroy_tx5:	b43legacy_destroy_dmaring(dma->tx_ring5);	dma->tx_ring5 = NULL;err_destroy_tx4:	b43legacy_destroy_dmaring(dma->tx_ring4);	dma->tx_ring4 = NULL;err_destroy_tx3:	b43legacy_destroy_dmaring(dma->tx_ring3);	dma->tx_ring3 = NULL;err_destroy_tx2:	b43legacy_destroy_dmaring(dma->tx_ring2);	dma->tx_ring2 = NULL;err_destroy_tx1:	b43legacy_destroy_dmaring(dma->tx_ring1);	dma->tx_ring1 = NULL;err_destroy_tx0:	b43legacy_destroy_dmaring(dma->tx_ring0);	dma->tx_ring0 = NULL;	goto out;}/* Generate a cookie for the TX header. */static u16 generate_cookie(struct b43legacy_dmaring *ring,			   int slot){	u16 cookie = 0x1000;	/* Use the upper 4 bits of the cookie as	 * DMA controller ID and store the slot number	 * in the lower 12 bits.	 * Note that the cookie must never be 0, as this	 * is a special value used in RX path.	 */	switch (ring->index) {	case 0:		cookie = 0xA000;		break;	case 1:		cookie = 0xB000;		break;	case 2:		cookie = 0xC000;		break;	case 3:		cookie = 0xD000;		break;	case 4:		cookie = 0xE000;		break;	case 5:		cookie = 0xF000;		break;	}	B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000));	cookie |= (u16)slot;	return cookie;}/* Inspect a cookie and find out to which controller/slot it belongs. */staticstruct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev,				      u16 cookie, int *slot){	struct b43legacy_dma *dma = &dev->dma;	struct b43legacy_dmaring *ring = NULL;	switch (cookie & 0xF000) {	case 0xA000:		ring = dma->tx_ring0;		break;	case 0xB000:		ring = dma->tx_ring1;		break;	case 0xC000:		ring = dma->tx_ring2;		break;	case 0xD000:		ring = dma->tx_ring3;		break;	case 0xE000:		ring = dma->tx_ring4;		break;	case 0xF000:		ring = dma->tx_ring5;		break;	default:		B43legacy_WARN_ON(1);	}	*slot = (cookie & 0x0FFF);	B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));	return ring;}static int dma_tx_fragment(struct b43legacy_dmaring *ring,			    struct sk_buff *skb,			    struct ieee80211_tx_control *ctl){	const struct b43legacy_dma_ops *ops = ring->ops;	u8 *header;	int slot, old_top_slot, old_used_slots;	int err;	struct b43legacy_dmadesc_generic *desc;	struct b43legacy_dmadesc_meta *meta;	struct b43legacy_dmadesc_meta *meta_hdr;	struct sk_buff *bounce_skb;#define SLOTS_PER_PACKET  2	B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);	old_top_slot = ring->current_slot;	old_used_slots = ring->used_slots;	/* Get a slot for the header. */	slot = request_slot(ring);	desc = ops->idx2desc(ring, slot, &meta_hdr);	memset(meta_hdr, 0, sizeof(*meta_hdr));	header = &(ring->txhdr_cache[slot * sizeof(			       struct b43legacy_txhdr_fw3)]);	err = b43legacy_generate_txhdr(ring->dev, header,				 skb->data, skb->len, ctl,				 generate_cookie(ring, slot));	if (unlikely(err)) {		ring->current_slot = old_top_slot;		ring->used_slots = old_used_slots;		return err;	}	meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,				       sizeof(struct b43legacy_txhdr_fw3), 1);	if (dma_mapping_error(meta_hdr->dmaaddr))		return -EIO;	ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,			     sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0);	/* Get a slot for the payload. */	slot = request_slot(ring);	desc = ops->idx2desc(ring, slot, &meta);	memset(meta, 0, sizeof(*meta));	memcpy(&meta->txstat.control, ctl, sizeof(*ctl));	meta->skb = skb;	meta->is_last_fragment = 1;	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);	/* create a bounce buffer in zone_dma on mapping failure. */	if (dma_mapping_error(meta->dmaaddr)) {		bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);		if (!bounce_skb) {			ring->current_slot = old_top_slot;			ring->used_slots = old_used_slots;			err = -ENOMEM;			goto out_unmap_hdr;		}		memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);		dev_kfree_skb_any(skb);		skb = bounce_skb;		meta->skb = skb;		meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);		if (dma_mapping_error(meta->dmaaddr)) {			ring->current_slot = old_top_slot;			ring->used_slots = old_used_slots;			err = -EIO;			goto out_free_bounce;		}	}	ops->fill_descriptor(ring, desc, meta->dmaaddr,			     skb->len, 0, 1, 1);	wmb();	/* previous stuff MUST be done */	/* Now transfer the whole frame. */	ops->poke_tx(ring, next_slot(ring, slot));	return 0;out_free_bounce:	dev_kfree_skb_any(skb);out_unmap_hdr:	unmap_descbuffer(ring, meta_hdr->dmaaddr,			 sizeof(struct b43legacy_txhdr_fw3), 1);	return err;}static inlineint should_inject_overflow(struct b43legacy_dmaring *ring){#ifdef CONFIG_B43LEGACY_DEBUG	if (unlikely(b43legacy_debug(ring->dev,				     B43legacy_DBG_DMAOVERFLOW))) {		/* Check if we should inject another ringbuffer overflow		 * to test handling of this situation in the stack. */		unsigned long next_overflow;		next_overflow = ring->last_injected_overflow + HZ;		if (time_after(jiffies, next_overflow)) {			ring->last_injected_overflow = jiffies;			b43legacydbg(ring->dev->wl,			       "Injecting TX ring overflow on "			       "DMA controller %d\n", ring->index);			return 1;		}	}#endif /* CONFIG_B43LEGACY_DEBUG */	return 0;}int b43legacy_dma_tx(struct b43legacy_wldev *dev,		     struct sk_buff *skb,		     struct ieee80211_tx_control *ctl){	struct b43legacy_dmaring *ring;	int err = 0;	unsigned long flags;	ring = priority_to_txring(dev, ctl->queue);	spin_lock_irqsave(&ring->lock, flags);	B43legacy_WARN_ON(!ring->tx);	if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) {		b43legacywarn(dev->wl, "DMA queue overflow\n");		err = -ENOSPC;		goto out_unlock;	}	/* Check if the queue was stopped in mac80211,	 * but we got called nevertheless.	 * That would be a mac80211 bug. */	B43legacy_BUG_ON(ring->stopped);	err = dma_tx_fragment(ring, skb, ctl);	if (unlikely(err == -ENOKEY)) {		/* Drop this packet, as we don't have the encryption key		 * anymore and must not transmit it unencrypted. */		dev_kfree_skb_any(skb);		err = 0;		goto out_unlock;	}	if (unlikely(err)) {		b43legacyerr(dev->wl, "DMA tx mapping failure\n");		goto out_unlock;	}	ring->nr_tx_packets++;	if ((free_slots(ring) < SLOTS_PER_PACKET) ||	    should_inject_overflow(ring)) {		/* This TX ring is full. */		ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring));		ring->stopped = 1;		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))			b43legacydbg(dev->wl, "Stopped TX ring %d\n",			       ring->index);	}out_unlock:	spin_unlock_irqrestore(&ring->lock, flags);	return err;}void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,				 const struct b43legacy_txstatus *status){	const struct b43legacy_dma_ops *ops;	struct b43legacy_dmaring *ring;	struct b43legacy_dmadesc_generic *desc;	struct b43legacy_dmadesc_meta *meta;	int slot;	ring = parse_cookie(dev, status->cookie, &slot);	if (unlikely(!ring))		return;	B43legacy_WARN_ON(!irqs_disabled());	spin_lock(&ring->lock);	B43legacy_WARN_ON(!ring->tx);	ops = ring->ops;	while (1) {		B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));		desc = ops->idx2desc(ring, slot, &meta);		if (meta->skb)			unmap_descbuffer(ring, meta->dmaaddr,					 meta->skb->len, 1);		else			unmap_descbuffer(ring, meta->dmaaddr,					 sizeof(struct b43legacy_txhdr_fw3),					 1);		if (meta->is_last_fragment) {			B43legacy_WARN_ON(!meta->skb);			/* Call back to inform the ieee80211 subsystem about the			 * status of the transmission.			 * Some fields of txstat are already filled in dma_tx().			 */			if (status->acked) {				meta->txstat.flags |= IEEE80211_TX_STATUS_ACK;			} else {				if (!(meta->txstat.control.flags				      & IEEE80211_TXCTL_NO_ACK))					 meta->txstat.excessive_retries = 1;			}			if (status->frame_count == 0) {				/* The frame was not transmitted at all. */				meta->txstat.retry_count = 0;			} else				meta->txstat.retry_count = status->frame_count							   - 1;			ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb,						    &(meta->txstat));			/* skb is freed by ieee80211_tx_status_irqsafe() */			meta->skb = NULL;		} else {			/* No need to call free_descriptor_buffer here, as			 * this is only the txhdr, which is not allocated.			 */			B43legacy_WARN_ON(meta->skb != NULL);		}		/* Everything unmapped and free'd. So it's not used anymore. */		ring->used_slots--;		if (meta->is_last_fragment)			break;		slot = next_slot(ring, slot);	}	dev->stats.last_tx = jiffies;	if (ring->stopped) {		B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);		ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring));		ring->stopped = 0;		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))			b43legacydbg(dev->wl, "Woke up TX ring %d\n",			       ring->index);	}	spin_unlock(&ring->lock);}void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,			      struct ieee80211_tx_queue_stats *stats){	const int nr_queues = dev->wl->hw->queues;	struct b43legacy_dmaring *ring;	struct ieee80211_tx_queue_stats_data *data;	unsigned long flags;	int i;	for (i = 0; i < nr_queues; i++) {		data = &(stats->data[i]);		ring = priority_to_txring(dev, i);		spin_lock_irqsave(&ring->lock, flags);		data->len = ring->used_slots / SLOTS_PER_PACKET;		data->limit = ring->nr_slots / SLOTS_PER_PACKET;		data->count = ring->nr_tx_packets;		spin_unlock_irqrestore(&ring->lock, flags);	}}static void dma_rx(struct b43legacy_dmaring *ring,		   int *slot){	const struct b43legacy_dma_ops *ops = ring->ops;	struct b43legacy_dmadesc_generic *desc;	struct b43legacy_dmadesc_meta *meta;	struct b43legacy_rxhdr_fw3 *rxhdr;	struct sk_buff *skb;	u16 len;	int err;	dma_addr_t dmaaddr;	desc = ops->idx2desc(ring, *slot, &meta);	sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);	skb = meta->skb;	if (ring->index == 3) {		/* We received an xmit status. */		struct b43legacy_hwtxstatus *hw =				(struct b43legacy_hwtxstatus *)skb->data;		int i = 0;		while (hw->cookie == 0) {			if (i > 100)				break;			i++;			udelay(2);			barrier();		}		b43legacy_handle_hwtxstatus(ring->dev, hw);		/* recycle the descriptor buffer. */		sync_descbuffer_for_device(ring, meta->dmaaddr,					   ring->rx_buffersize);		return;	}	rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data;	len = le16_to_cpu(rxhdr->frame_len);	if (len == 0) {		int i = 0;		do {			udelay(2);			barrier();			len = le16_to_cpu(rxhdr->frame_len);		} while (len == 0 && i++ < 5);		if (unlikely(len == 0)) {			/* recycle the descriptor buffer. */			sync_descbuffer_for_device(ring, meta->dmaaddr,						   ring->rx_buffersize);			goto drop;		}	}	if (unlikely(len > ring->rx_buffersize)) {		/* The data did not fit into one descriptor buffer		 * and is split over multiple buffers.		 * This should never happen, as we try to allocate buffers		 * big enough. So simply ignore this packet.		 */		int cnt = 0;		s32 tmp = len;		while (1) {			desc = ops->idx2desc(ring, *slot, &meta);			/* recycle the descriptor buffer. */			sync_descbuffer_for_device(ring, meta->dmaaddr,						   ring->rx_buffersize);			*slot = next_slot(ring, *slot);			cnt++;			tmp -= ring->rx_buffersize;			if (tmp <= 0)				break;		}		b43legacyerr(ring->dev->wl, "DMA RX buffer too small "		       "(len: %u, buffer: %u, nr-dropped: %d)\n",		       len, ring->rx_buffersize, cnt);		goto drop;	}	dmaaddr = meta->dmaaddr;	err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);	if (unlikely(err)) {		b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()"			     " failed\n");		sync_descbuffer_for_device(ring, dmaaddr,					   ring->rx_buffersize);		goto drop;	}	unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);	skb_put(skb, len + ring->frameoffset);	skb_pull(skb, ring->frameoffset);	b43legacy_rx(ring->dev, skb, rxhdr);drop:	return;}void b43legacy_dma_rx(struct b43legacy_dmaring *ring){	const struct b43legacy_dma_ops *ops = ring->ops;	int slot;	int current_slot;	int used_slots = 0;	B43legacy_WARN_ON(ring->tx);	current_slot = ops->get_current_rxslot(ring);	B43legacy_WARN_ON(!(current_slot >= 0 && current_slot <			   ring->nr_slots));	slot = ring->current_slot;	for (; slot != current_slot; slot = next_slot(ring, slot)) {		dma_rx(ring, &slot);		update_max_used_slots(ring, ++used_slots);	}	ops->set_current_rxslot(ring, slot);	ring->current_slot = slot;}static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring){	unsigned long flags;	spin_lock_irqsave(&ring->lock, flags);	B43legacy_WARN_ON(!ring->tx);	ring->ops->tx_suspend(ring);	spin_unlock_irqrestore(&ring->lock, flags);}static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring){	unsigned long flags;	spin_lock_irqsave(&ring->lock, flags);	B43legacy_WARN_ON(!ring->tx);	ring->ops->tx_resume(ring);	spin_unlock_irqrestore(&ring->lock, flags);}void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev){	b43legacy_power_saving_ctl_bits(dev, -1, 1);	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0);	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1);	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2);	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3);	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4);	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5);}void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev){	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5);	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4);	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3);	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2);	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1);	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0);	b43legacy_power_saving_ctl_bits(dev, -1, -1);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -