⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ioat_dma.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
/** * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors * @chan: the channel to be filled out */static int ioat_dma_alloc_chan_resources(struct dma_chan *chan){	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);	struct ioat_desc_sw *desc;	u16 chanctrl;	u32 chanerr;	int i;	LIST_HEAD(tmp_list);	/* have we already been set up? */	if (!list_empty(&ioat_chan->free_desc))		return ioat_chan->desccount;	/* Setup register to interrupt and write completion status on error */	chanctrl = IOAT_CHANCTRL_ERR_INT_EN |		IOAT_CHANCTRL_ANY_ERR_ABORT_EN |		IOAT_CHANCTRL_ERR_COMPLETION_EN;	writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);	if (chanerr) {		dev_err(&ioat_chan->device->pdev->dev,			"CHANERR = %x, clearing\n", chanerr);		writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);	}	/* Allocate descriptors */	for (i = 0; i < ioat_initial_desc_count; i++) {		desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);		if (!desc) {			dev_err(&ioat_chan->device->pdev->dev,				"Only %d initial descriptors\n", i);			break;		}		list_add_tail(&desc->node, &tmp_list);	}	spin_lock_bh(&ioat_chan->desc_lock);	ioat_chan->desccount = i;	list_splice(&tmp_list, &ioat_chan->free_desc);	if (ioat_chan->device->version != IOAT_VER_1_2)		ioat2_dma_massage_chan_desc(ioat_chan);	spin_unlock_bh(&ioat_chan->desc_lock);	/* allocate a completion writeback area */	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */	ioat_chan->completion_virt =		pci_pool_alloc(ioat_chan->device->completion_pool,			       GFP_KERNEL,			       &ioat_chan->completion_addr);	memset(ioat_chan->completion_virt, 0,	       sizeof(*ioat_chan->completion_virt));	writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);	writel(((u64) ioat_chan->completion_addr) >> 32,	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);	tasklet_enable(&ioat_chan->cleanup_task);	ioat_dma_start_null_desc(ioat_chan);  /* give chain to dma device */	return ioat_chan->desccount;}/** * ioat_dma_free_chan_resources - release all the descriptors * @chan: the channel to be cleaned */static void ioat_dma_free_chan_resources(struct dma_chan *chan){	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);	struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);	struct ioat_desc_sw *desc, *_desc;	int in_use_descs = 0;	tasklet_disable(&ioat_chan->cleanup_task);	ioat_dma_memcpy_cleanup(ioat_chan);	/* Delay 100ms after reset to allow internal DMA logic to quiesce	 * before removing DMA descriptor resources.	 */	writeb(IOAT_CHANCMD_RESET,	       ioat_chan->reg_base			+ IOAT_CHANCMD_OFFSET(ioat_chan->device->version));	mdelay(100);	spin_lock_bh(&ioat_chan->desc_lock);	switch (ioat_chan->device->version) {	case IOAT_VER_1_2:		list_for_each_entry_safe(desc, _desc,					 &ioat_chan->used_desc, node) {			in_use_descs++;			list_del(&desc->node);			pci_pool_free(ioatdma_device->dma_pool, desc->hw,				      desc->async_tx.phys);			kfree(desc);		}		list_for_each_entry_safe(desc, _desc,					 &ioat_chan->free_desc, node) {			list_del(&desc->node);			pci_pool_free(ioatdma_device->dma_pool, desc->hw,				      desc->async_tx.phys);			kfree(desc);		}		break;	case IOAT_VER_2_0:		list_for_each_entry_safe(desc, _desc,					 ioat_chan->free_desc.next, node) {			list_del(&desc->node);			pci_pool_free(ioatdma_device->dma_pool, desc->hw,				      desc->async_tx.phys);			kfree(desc);		}		desc = to_ioat_desc(ioat_chan->free_desc.next);		pci_pool_free(ioatdma_device->dma_pool, desc->hw,			      desc->async_tx.phys);		kfree(desc);		INIT_LIST_HEAD(&ioat_chan->free_desc);		INIT_LIST_HEAD(&ioat_chan->used_desc);		break;	}	spin_unlock_bh(&ioat_chan->desc_lock);	pci_pool_free(ioatdma_device->completion_pool,		      ioat_chan->completion_virt,		      ioat_chan->completion_addr);	/* one is ok since we left it on there on purpose */	if (in_use_descs > 1)		dev_err(&ioat_chan->device->pdev->dev,			"Freeing %d in use descriptors!\n",			in_use_descs - 1);	ioat_chan->last_completion = ioat_chan->completion_addr = 0;	ioat_chan->pending = 0;	ioat_chan->dmacount = 0;}/** * ioat_dma_get_next_descriptor - return the next available descriptor * @ioat_chan: IOAT DMA channel handle * * Gets the next descriptor from the chain, and must be called with the * channel's desc_lock held.  Allocates more descriptors if the channel * has run out. */static struct ioat_desc_sw *ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan){	struct ioat_desc_sw *new;	if (!list_empty(&ioat_chan->free_desc)) {		new = to_ioat_desc(ioat_chan->free_desc.next);		list_del(&new->node);	} else {		/* try to get another desc */		new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);		if (!new) {			dev_err(&ioat_chan->device->pdev->dev,				"alloc failed\n");			return NULL;		}	}	prefetch(new->hw);	return new;}static struct ioat_desc_sw *ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan){	struct ioat_desc_sw *new;	/*	 * used.prev points to where to start processing	 * used.next points to next free descriptor	 * if used.prev == NULL, there are none waiting to be processed	 * if used.next == used.prev.prev, there is only one free descriptor,	 *      and we need to use it to as a noop descriptor before	 *      linking in a new set of descriptors, since the device	 *      has probably already read the pointer to it	 */	if (ioat_chan->used_desc.prev &&	    ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {		struct ioat_desc_sw *desc;		struct ioat_desc_sw *noop_desc;		int i;		/* set up the noop descriptor */		noop_desc = to_ioat_desc(ioat_chan->used_desc.next);		noop_desc->hw->size = 0;		noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;		noop_desc->hw->src_addr = 0;		noop_desc->hw->dst_addr = 0;		ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;		ioat_chan->pending++;		ioat_chan->dmacount++;		/* try to get a few more descriptors */		for (i = 16; i; i--) {			desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);			if (!desc) {				dev_err(&ioat_chan->device->pdev->dev,					"alloc failed\n");				break;			}			list_add_tail(&desc->node, ioat_chan->used_desc.next);			desc->hw->next				= to_ioat_desc(desc->node.next)->async_tx.phys;			to_ioat_desc(desc->node.prev)->hw->next				= desc->async_tx.phys;			ioat_chan->desccount++;		}		ioat_chan->used_desc.next = noop_desc->node.next;	}	new = to_ioat_desc(ioat_chan->used_desc.next);	prefetch(new);	ioat_chan->used_desc.next = new->node.next;	if (ioat_chan->used_desc.prev == NULL)		ioat_chan->used_desc.prev = &new->node;	prefetch(new->hw);	return new;}static struct ioat_desc_sw *ioat_dma_get_next_descriptor(						struct ioat_dma_chan *ioat_chan){	if (!ioat_chan)		return NULL;	switch (ioat_chan->device->version) {	case IOAT_VER_1_2:		return ioat1_dma_get_next_descriptor(ioat_chan);		break;	case IOAT_VER_2_0:		return ioat2_dma_get_next_descriptor(ioat_chan);		break;	}	return NULL;}static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(						struct dma_chan *chan,						size_t len,						int int_en){	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);	struct ioat_desc_sw *new;	spin_lock_bh(&ioat_chan->desc_lock);	new = ioat_dma_get_next_descriptor(ioat_chan);	spin_unlock_bh(&ioat_chan->desc_lock);	if (new) {		new->len = len;		return &new->async_tx;	} else		return NULL;}static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(						struct dma_chan *chan,						size_t len,						int int_en){	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);	struct ioat_desc_sw *new;	spin_lock_bh(&ioat_chan->desc_lock);	new = ioat2_dma_get_next_descriptor(ioat_chan);	/*	 * leave ioat_chan->desc_lock set in ioat 2 path	 * it will get unlocked at end of tx_submit	 */	if (new) {		new->len = len;		return &new->async_tx;	} else		return NULL;}static void ioat_dma_cleanup_tasklet(unsigned long data){	struct ioat_dma_chan *chan = (void *)data;	ioat_dma_memcpy_cleanup(chan);	writew(IOAT_CHANCTRL_INT_DISABLE,	       chan->reg_base + IOAT_CHANCTRL_OFFSET);}/** * ioat_dma_memcpy_cleanup - cleanup up finished descriptors * @chan: ioat channel to be cleaned up */static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan){	unsigned long phys_complete;	struct ioat_desc_sw *desc, *_desc;	dma_cookie_t cookie = 0;	unsigned long desc_phys;	struct ioat_desc_sw *latest_desc;	prefetch(ioat_chan->completion_virt);	if (!spin_trylock_bh(&ioat_chan->cleanup_lock))		return;	/* The completion writeback can happen at any time,	   so reads by the driver need to be atomic operations	   The descriptor physical addresses are limited to 32-bits	   when the CPU can only do a 32-bit mov */#if (BITS_PER_LONG == 64)	phys_complete =		ioat_chan->completion_virt->full		& IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;#else	phys_complete =		ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;#endif	if ((ioat_chan->completion_virt->full		& IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==				IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {		dev_err(&ioat_chan->device->pdev->dev,			"Channel halted, chanerr = %x\n",			readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));		/* TODO do something to salvage the situation */	}	if (phys_complete == ioat_chan->last_completion) {		spin_unlock_bh(&ioat_chan->cleanup_lock);		return;	}	cookie = 0;	spin_lock_bh(&ioat_chan->desc_lock);	switch (ioat_chan->device->version) {	case IOAT_VER_1_2:		list_for_each_entry_safe(desc, _desc,					 &ioat_chan->used_desc, node) {			/*			 * Incoming DMA requests may use multiple descriptors,			 * due to exceeding xfercap, perhaps. If so, only the			 * last one will have a cookie, and require unmapping.			 */			if (desc->async_tx.cookie) {				cookie = desc->async_tx.cookie;				/*				 * yes we are unmapping both _page and _single				 * alloc'd regions with unmap_page. Is this				 * *really* that bad?				 */				pci_unmap_page(ioat_chan->device->pdev,						pci_unmap_addr(desc, dst),						pci_unmap_len(desc, len),						PCI_DMA_FROMDEVICE);				pci_unmap_page(ioat_chan->device->pdev,						pci_unmap_addr(desc, src),						pci_unmap_len(desc, len),						PCI_DMA_TODEVICE);				if (desc->async_tx.callback) {					desc->async_tx.callback(desc->async_tx.callback_param);					desc->async_tx.callback = NULL;				}			}			if (desc->async_tx.phys != phys_complete) {				/*				 * a completed entry, but not the last, so clean				 * up if the client is done with the descriptor				 */				if (desc->async_tx.ack) {					list_del(&desc->node);					list_add_tail(&desc->node,						      &ioat_chan->free_desc);				} else					desc->async_tx.cookie = 0;			} else {				/*				 * last used desc. Do not remove, so we can				 * append from it, but don't look at it next				 * time, either				 */				desc->async_tx.cookie = 0;				/* TODO check status bits? */				break;			}		}		break;	case IOAT_VER_2_0:		/* has some other thread has already cleaned up? */		if (ioat_chan->used_desc.prev == NULL)			break;		/* work backwards to find latest finished desc */		desc = to_ioat_desc(ioat_chan->used_desc.next);		latest_desc = NULL;		do {			desc = to_ioat_desc(desc->node.prev);			desc_phys = (unsigned long)desc->async_tx.phys				       & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;			if (desc_phys == phys_complete) {				latest_desc = desc;				break;			}		} while (&desc->node != ioat_chan->used_desc.prev);		if (latest_desc != NULL) {			/* work forwards to clear finished descriptors */			for (desc = to_ioat_desc(ioat_chan->used_desc.prev);			     &desc->node != latest_desc->node.next &&			     &desc->node != ioat_chan->used_desc.next;			     desc = to_ioat_desc(desc->node.next)) {				if (desc->async_tx.cookie) {					cookie = desc->async_tx.cookie;					desc->async_tx.cookie = 0;					pci_unmap_page(ioat_chan->device->pdev,						      pci_unmap_addr(desc, dst),						      pci_unmap_len(desc, len),						      PCI_DMA_FROMDEVICE);					pci_unmap_page(ioat_chan->device->pdev,						      pci_unmap_addr(desc, src),						      pci_unmap_len(desc, len),						      PCI_DMA_TODEVICE);					if (desc->async_tx.callback) {						desc->async_tx.callback(desc->async_tx.callback_param);						desc->async_tx.callback = NULL;					}				}			}			/* move used.prev up beyond those that are finished */			if (&desc->node == ioat_chan->used_desc.next)				ioat_chan->used_desc.prev = NULL;			else				ioat_chan->used_desc.prev = &desc->node;		}		break;	}	spin_unlock_bh(&ioat_chan->desc_lock);	ioat_chan->last_completion = phys_complete;	if (cookie != 0)		ioat_chan->completed_cookie = cookie;	spin_unlock_bh(&ioat_chan->cleanup_lock);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -