via_dmablit.c

来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 806 行 · 第 1/2 页

C
806
字号
/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro *  * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE  * USE OR OTHER DEALINGS IN THE SOFTWARE. * * Authors:  *    Thomas Hellstrom. *    Partially based on code obtained from Digeo Inc. *//* * Unmaps the DMA mappings.  * FIXME: Is this a NoOp on x86? Also  * FIXME: What happens if this one is called and a pending blit has previously done  * the same DMA mappings?  */#include "drmP.h"#include "via_drm.h"#include "via_drv.h"#include "via_dmablit.h"#include <linux/pagemap.h>#define VIA_PGDN(x)             (((unsigned long)(x)) & PAGE_MASK)#define VIA_PGOFF(x)            (((unsigned long)(x)) & ~PAGE_MASK)#define VIA_PFN(x)              ((unsigned long)(x) >> PAGE_SHIFT)typedef struct _drm_via_descriptor {	uint32_t mem_addr;	uint32_t dev_addr;	uint32_t size;	uint32_t next;} drm_via_descriptor_t;/* * Unmap a DMA mapping. */static voidvia_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg){	int num_desc = vsg->num_desc;	unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;	unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;	drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] + 		descriptor_this_page;	dma_addr_t next = vsg->chain_start;	while(num_desc--) {		if (descriptor_this_page-- == 0) {			cur_descriptor_page--;			descriptor_this_page = vsg->descriptors_per_page - 1;			desc_ptr = vsg->desc_pages[cur_descriptor_page] + 				descriptor_this_page;		}		dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);		dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);		next = (dma_addr_t) desc_ptr->next;		desc_ptr--;	}}/* * If mode = 0, count how many descriptors are needed. * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors. * Descriptors are run in reverse order by the hardware because we are not allowed to update the * 'next' field without syncing calls when the descriptor is already mapped. */static voidvia_map_blit_for_device(struct pci_dev *pdev,		   const drm_via_dmablit_t *xfer,		   drm_via_sg_info_t *vsg, 		   int mode){	unsigned cur_descriptor_page = 0;	unsigned num_descriptors_this_page = 0;	unsigned char *mem_addr = xfer->mem_addr;	unsigned char *cur_mem;	unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);	uint32_t fb_addr = xfer->fb_addr;	uint32_t cur_fb;	unsigned long line_len;	unsigned remaining_len;	int num_desc = 0;	int cur_line;	dma_addr_t next = 0 | VIA_DMA_DPR_EC;	drm_via_descriptor_t *desc_ptr = NULL;	if (mode == 1) 		desc_ptr = vsg->desc_pages[cur_descriptor_page];	for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {		line_len = xfer->line_length;		cur_fb = fb_addr;		cur_mem = mem_addr;				while (line_len > 0) {                        remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);			line_len -= remaining_len;			if (mode == 1) {                                desc_ptr->mem_addr = 					dma_map_page(&pdev->dev, 						     vsg->pages[VIA_PFN(cur_mem) - 								VIA_PFN(first_addr)],						     VIA_PGOFF(cur_mem), remaining_len, 						     vsg->direction);                                desc_ptr->dev_addr = cur_fb;				                                desc_ptr->size = remaining_len;				desc_ptr->next = (uint32_t) next;				next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr), 						      DMA_TO_DEVICE);				desc_ptr++;				if (++num_descriptors_this_page >= vsg->descriptors_per_page) {					num_descriptors_this_page = 0;					desc_ptr = vsg->desc_pages[++cur_descriptor_page];				}			}						num_desc++;			cur_mem += remaining_len;			cur_fb += remaining_len;		}				mem_addr += xfer->mem_stride;		fb_addr += xfer->fb_stride;	}	if (mode == 1) {		vsg->chain_start = next;		vsg->state = dr_via_device_mapped;	}	vsg->num_desc = num_desc;}/* * Function that frees up all resources for a blit. It is usable even if the  * blit info has only be partially built as long as the status enum is consistent * with the actual status of the used resources. */static voidvia_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) {	struct page *page;	int i;	switch(vsg->state) {	case dr_via_device_mapped:		via_unmap_blit_from_device(pdev, vsg);	case dr_via_desc_pages_alloc:		for (i=0; i<vsg->num_desc_pages; ++i) {			if (vsg->desc_pages[i] != NULL)			  free_page((unsigned long)vsg->desc_pages[i]);		}		kfree(vsg->desc_pages);	case dr_via_pages_locked:		for (i=0; i<vsg->num_pages; ++i) {			if ( NULL != (page = vsg->pages[i])) {				if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) 					SetPageDirty(page);				page_cache_release(page);			}		}	case dr_via_pages_alloc:		vfree(vsg->pages);	default:		vsg->state = dr_via_sg_init;	}	if (vsg->bounce_buffer) {		vfree(vsg->bounce_buffer);		vsg->bounce_buffer = NULL;	}	vsg->free_on_sequence = 0;}		/* * Fire a blit engine. */static voidvia_fire_dmablit(drm_device_t *dev, drm_via_sg_info_t *vsg, int engine){	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;	VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);	VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD | 		  VIA_DMA_CSR_DE);	VIA_WRITE(VIA_PCI_DMA_MR0  + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);	VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);	VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);}/* * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will * occur here if the calling user does not have access to the submitted address. */static intvia_lock_all_dma_pages(drm_via_sg_info_t *vsg,  drm_via_dmablit_t *xfer){	int ret;	unsigned long first_pfn = VIA_PFN(xfer->mem_addr);	vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) - 		first_pfn + 1;		if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))		return DRM_ERR(ENOMEM);	memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);	down_read(&current->mm->mmap_sem);	ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr,			     vsg->num_pages, vsg->direction, 0, vsg->pages, NULL);	up_read(&current->mm->mmap_sem);	if (ret != vsg->num_pages) {		if (ret < 0) 			return ret;		vsg->state = dr_via_pages_locked;		return DRM_ERR(EINVAL);	}	vsg->state = dr_via_pages_locked;	DRM_DEBUG("DMA pages locked\n");	return 0;}/* * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be * quite large for some blits, and pages don't need to be contingous. */static int via_alloc_desc_pages(drm_via_sg_info_t *vsg){	int i;		vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);	vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / 		vsg->descriptors_per_page;	if (NULL ==  (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL))) 		return DRM_ERR(ENOMEM);		memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages);	vsg->state = dr_via_desc_pages_alloc;	for (i=0; i<vsg->num_desc_pages; ++i) {		if (NULL == (vsg->desc_pages[i] = 			     (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))			return DRM_ERR(ENOMEM);	}	DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,		  vsg->num_desc);	return 0;}			static voidvia_abort_dmablit(drm_device_t *dev, int engine){	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);}static voidvia_dmablit_engine_off(drm_device_t *dev, int engine){	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD); }/* * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here. * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while * the workqueue task takes care of processing associated with the old blit. */		voidvia_dmablit_handler(drm_device_t *dev, int engine, int from_irq){	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;	drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;	int cur;	int done_transfer;	unsigned long irqsave=0;	uint32_t status = 0;	DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",		  engine, from_irq, (unsigned long) blitq);	if (from_irq) {		spin_lock(&blitq->blit_lock);	} else {		spin_lock_irqsave(&blitq->blit_lock, irqsave);	}	done_transfer = blitq->is_active && 	  (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);	done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE)); 	cur = blitq->cur;	if (done_transfer) {		blitq->blits[cur]->aborted = blitq->aborting;		blitq->done_blit_handle++;		DRM_WAKEUP(blitq->blit_queue + cur);				cur++;		if (cur >= VIA_NUM_BLIT_SLOTS) 			cur = 0;		blitq->cur = cur;		/*		 * Clear transfer done flag.		 */		VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04,  VIA_DMA_CSR_TD);		blitq->is_active = 0;		blitq->aborting = 0;		schedule_work(&blitq->wq);		} else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {		/*		 * Abort transfer after one second.		 */		via_abort_dmablit(dev, engine);		blitq->aborting = 1;		blitq->end = jiffies + DRM_HZ;	}	  			if (!blitq->is_active) {		if (blitq->num_outstanding) {			via_fire_dmablit(dev, blitq->blits[cur], engine);			blitq->is_active = 1;			blitq->cur = cur;			blitq->num_outstanding--;			blitq->end = jiffies + DRM_HZ;			if (!timer_pending(&blitq->poll_timer)) {				blitq->poll_timer.expires = jiffies+1;				add_timer(&blitq->poll_timer);			}		} else {			if (timer_pending(&blitq->poll_timer)) {				del_timer(&blitq->poll_timer);			}			via_dmablit_engine_off(dev, engine);		}	}			if (from_irq) {		spin_unlock(&blitq->blit_lock);	} else {		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);	}} /* * Check whether this blit is still active, performing necessary locking. */static intvia_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue){	unsigned long irqsave;	uint32_t slot;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?