via_dmablit.c

来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 806 行 · 第 1/2 页

C
806
字号
	int active;	spin_lock_irqsave(&blitq->blit_lock, irqsave);	/*	 * Allow for handle wraparounds.	 */	active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&		((blitq->cur_blit_handle - handle) <= (1 << 23));	if (queue && active) {		slot = handle - blitq->done_blit_handle + blitq->cur -1;		if (slot >= VIA_NUM_BLIT_SLOTS) {			slot -= VIA_NUM_BLIT_SLOTS;		}		*queue = blitq->blit_queue + slot;	}	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);	return active;}	/* * Sync. Wait for at least three seconds for the blit to be performed. */static intvia_dmablit_sync(drm_device_t *dev, uint32_t handle, int engine) {	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;	drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;	wait_queue_head_t *queue;	int ret = 0;	if (via_dmablit_active(blitq, engine, handle, &queue)) {		DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ, 			    !via_dmablit_active(blitq, engine, handle, NULL));	}	DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",		  handle, engine, ret);		return ret;}/* * A timer that regularly polls the blit engine in cases where we don't have interrupts: * a) Broken hardware (typically those that don't have any video capture facility). * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted. * The timer and hardware IRQ's can and do work in parallel. If the hardware has * irqs, it will shorten the latency somewhat. */static voidvia_dmablit_timer(unsigned long data){	drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;	drm_device_t *dev = blitq->dev;	int engine = (int)		(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);			DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine, 		  (unsigned long) jiffies);	via_dmablit_handler(dev, engine, 0);		if (!timer_pending(&blitq->poll_timer)) {		blitq->poll_timer.expires = jiffies+1;		add_timer(&blitq->poll_timer);	}	via_dmablit_handler(dev, engine, 0);}/* * Workqueue task that frees data and mappings associated with a blit. * Also wakes up waiting processes. Each of these tasks handles one * blit engine only and may not be called on each interrupt. */static void via_dmablit_workqueue(void *data){	drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;	drm_device_t *dev = blitq->dev;	unsigned long irqsave;	drm_via_sg_info_t *cur_sg;	int cur_released;			DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long) 		  (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));	spin_lock_irqsave(&blitq->blit_lock, irqsave);		while(blitq->serviced != blitq->cur) {		cur_released = blitq->serviced++;		DRM_DEBUG("Releasing blit slot %d\n", cur_released);		if (blitq->serviced >= VIA_NUM_BLIT_SLOTS) 			blitq->serviced = 0;				cur_sg = blitq->blits[cur_released];		blitq->num_free++;						spin_unlock_irqrestore(&blitq->blit_lock, irqsave);				DRM_WAKEUP(&blitq->busy_queue);				via_free_sg_info(dev->pdev, cur_sg);		kfree(cur_sg);				spin_lock_irqsave(&blitq->blit_lock, irqsave);	}	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);}	/* * Init all blit engines. Currently we use two, but some hardware have 4. */voidvia_init_dmablit(drm_device_t *dev){	int i,j;	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;	drm_via_blitq_t *blitq;	pci_set_master(dev->pdev);			for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {		blitq = dev_priv->blit_queues + i;		blitq->dev = dev;		blitq->cur_blit_handle = 0;		blitq->done_blit_handle = 0;		blitq->head = 0;		blitq->cur = 0;		blitq->serviced = 0;		blitq->num_free = VIA_NUM_BLIT_SLOTS;		blitq->num_outstanding = 0;		blitq->is_active = 0;		blitq->aborting = 0;		blitq->blit_lock = SPIN_LOCK_UNLOCKED;		for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {			DRM_INIT_WAITQUEUE(blitq->blit_queue + j);		}		DRM_INIT_WAITQUEUE(&blitq->busy_queue);		INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq);		init_timer(&blitq->poll_timer);		blitq->poll_timer.function = &via_dmablit_timer;		blitq->poll_timer.data = (unsigned long) blitq;	}	}/* * Build all info and do all mappings required for a blit. */		static intvia_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer){	int draw = xfer->to_fb;	int ret = 0;		vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;	vsg->bounce_buffer = NULL;	vsg->state = dr_via_sg_init;	if (xfer->num_lines <= 0 || xfer->line_length <= 0) {		DRM_ERROR("Zero size bitblt.\n");		return DRM_ERR(EINVAL);	}	/*	 * Below check is a driver limitation, not a hardware one. We	 * don't want to lock unused pages, and don't want to incoporate the	 * extra logic of avoiding them. Make sure there are no. 	 * (Not a big limitation anyway.)	 */	if (((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) ||	    (xfer->mem_stride > 2048*4)) {		DRM_ERROR("Too large system memory stride. Stride: %d, "			  "Length: %d\n", xfer->mem_stride, xfer->line_length);		return DRM_ERR(EINVAL);	}	if (xfer->num_lines > 2048) {		DRM_ERROR("Too many PCI DMA bitblt lines.\n");		return DRM_ERR(EINVAL);	}			/* 	 * we allow a negative fb stride to allow flipping of images in	 * transfer. 	 */	if (xfer->mem_stride < xfer->line_length ||		abs(xfer->fb_stride) < xfer->line_length) {		DRM_ERROR("Invalid frame-buffer / memory stride.\n");		return DRM_ERR(EINVAL);	}	/*	 * A hardware bug seems to be worked around if system memory addresses start on	 * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted	 * about this. Meanwhile, impose the following restrictions:	 */#ifdef VIA_BUGFREE	if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||	    ((xfer->mem_stride & 3) != (xfer->fb_stride & 3))) {		DRM_ERROR("Invalid DRM bitblt alignment.\n");	        return DRM_ERR(EINVAL);	}#else	if ((((unsigned long)xfer->mem_addr & 15) ||	    ((unsigned long)xfer->fb_addr & 3)) || (xfer->mem_stride & 15) ||	    (xfer->fb_stride & 3)) {		DRM_ERROR("Invalid DRM bitblt alignment.\n");	        return DRM_ERR(EINVAL);	}	#endif	if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {		DRM_ERROR("Could not lock DMA pages.\n");		via_free_sg_info(dev->pdev, vsg);		return ret;	}	via_map_blit_for_device(dev->pdev, xfer, vsg, 0);	if (0 != (ret = via_alloc_desc_pages(vsg))) {		DRM_ERROR("Could not allocate DMA descriptor pages.\n");		via_free_sg_info(dev->pdev, vsg);		return ret;	}	via_map_blit_for_device(dev->pdev, xfer, vsg, 1);		return 0;}	/* * Reserve one free slot in the blit queue. Will wait for one second for one * to become available. Otherwise -EBUSY is returned. */static int via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine){	int ret=0;	unsigned long irqsave;	DRM_DEBUG("Num free is %d\n", blitq->num_free);	spin_lock_irqsave(&blitq->blit_lock, irqsave);	while(blitq->num_free == 0) {		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);		DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);		if (ret) {			return (DRM_ERR(EINTR) == ret) ? DRM_ERR(EAGAIN) : ret;		}				spin_lock_irqsave(&blitq->blit_lock, irqsave);	}		blitq->num_free--;	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);	return 0;}/* * Hand back a free slot if we changed our mind. */static void via_dmablit_release_slot(drm_via_blitq_t *blitq){	unsigned long irqsave;	spin_lock_irqsave(&blitq->blit_lock, irqsave);	blitq->num_free++;	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);	DRM_WAKEUP( &blitq->busy_queue );}/* * Grab a free slot. Build blit info and queue a blit. */static int via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer)	 {	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;	drm_via_sg_info_t *vsg;	drm_via_blitq_t *blitq;        int ret;	int engine;	unsigned long irqsave;	if (dev_priv == NULL) {		DRM_ERROR("Called without initialization.\n");		return DRM_ERR(EINVAL);	}	engine = (xfer->to_fb) ? 0 : 1;	blitq = dev_priv->blit_queues + engine;	if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) {		return ret;	}	if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {		via_dmablit_release_slot(blitq);		return DRM_ERR(ENOMEM);	}	if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {		via_dmablit_release_slot(blitq);		kfree(vsg);		return ret;	}	spin_lock_irqsave(&blitq->blit_lock, irqsave);	blitq->blits[blitq->head++] = vsg;	if (blitq->head >= VIA_NUM_BLIT_SLOTS) 		blitq->head = 0;	blitq->num_outstanding++;	xfer->sync.sync_handle = ++blitq->cur_blit_handle; 	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);	xfer->sync.engine = engine;       	via_dmablit_handler(dev, engine, 0);	return 0;}/* * Sync on a previously submitted blit. Note that the X server use signals extensively, and * that there is a very big proability that this IOCTL will be interrupted by a signal. In that * case it returns with -EAGAIN for the signal to be delivered.  * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock(). */intvia_dma_blit_sync( DRM_IOCTL_ARGS ){	drm_via_blitsync_t sync;	int err;	DRM_DEVICE;	DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync));		if (sync.engine >= VIA_NUM_BLIT_ENGINES) 		return DRM_ERR(EINVAL);	err = via_dmablit_sync(dev, sync.sync_handle, sync.engine);	if (DRM_ERR(EINTR) == err)		err = DRM_ERR(EAGAIN);	return err;}	/* * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should  * be reissued. See the above IOCTL code. */int via_dma_blit( DRM_IOCTL_ARGS ){	drm_via_dmablit_t xfer;	int err;	DRM_DEVICE;	DRM_COPY_FROM_USER_IOCTL(xfer, (drm_via_dmablit_t __user *)data, sizeof(xfer));	err = via_dmablit(dev, &xfer);	DRM_COPY_TO_USER_IOCTL((void __user *)data, xfer, sizeof(xfer));	return err;}

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?