via_dmablit.c

来自「linux 内核源代码」· C语言 代码 · 共 817 行 · 第 1/2 页

C
817
字号
	/*	 * Allow for handle wraparounds.	 */	active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&		((blitq->cur_blit_handle - handle) <= (1 << 23));	if (queue && active) {		slot = handle - blitq->done_blit_handle + blitq->cur -1;		if (slot >= VIA_NUM_BLIT_SLOTS) {			slot -= VIA_NUM_BLIT_SLOTS;		}		*queue = blitq->blit_queue + slot;	}	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);	return active;}	/* * Sync. Wait for at least three seconds for the blit to be performed. */static intvia_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine) {	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;	drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;	wait_queue_head_t *queue;	int ret = 0;	if (via_dmablit_active(blitq, engine, handle, &queue)) {		DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ, 			    !via_dmablit_active(blitq, engine, handle, NULL));	}	DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",		  handle, engine, ret);		return ret;}/* * A timer that regularly polls the blit engine in cases where we don't have interrupts: * a) Broken hardware (typically those that don't have any video capture facility). * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted. * The timer and hardware IRQ's can and do work in parallel. If the hardware has * irqs, it will shorten the latency somewhat. */static voidvia_dmablit_timer(unsigned long data){	drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;	struct drm_device *dev = blitq->dev;	int engine = (int)		(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);			DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine, 		  (unsigned long) jiffies);	via_dmablit_handler(dev, engine, 0);		if (!timer_pending(&blitq->poll_timer)) {		mod_timer(&blitq->poll_timer, jiffies + 1);	       /*		* Rerun handler to delete timer if engines are off, and		* to shorten abort latency. This is a little nasty.		*/	       via_dmablit_handler(dev, engine, 0);	}}/* * Workqueue task that frees data and mappings associated with a blit. * Also wakes up waiting processes. Each of these tasks handles one * blit engine only and may not be called on each interrupt. */static void via_dmablit_workqueue(struct work_struct *work){	drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);	struct drm_device *dev = blitq->dev;	unsigned long irqsave;	drm_via_sg_info_t *cur_sg;	int cur_released;			DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long) 		  (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));	spin_lock_irqsave(&blitq->blit_lock, irqsave);		while(blitq->serviced != blitq->cur) {		cur_released = blitq->serviced++;		DRM_DEBUG("Releasing blit slot %d\n", cur_released);		if (blitq->serviced >= VIA_NUM_BLIT_SLOTS) 			blitq->serviced = 0;				cur_sg = blitq->blits[cur_released];		blitq->num_free++;						spin_unlock_irqrestore(&blitq->blit_lock, irqsave);				DRM_WAKEUP(&blitq->busy_queue);				via_free_sg_info(dev->pdev, cur_sg);		kfree(cur_sg);				spin_lock_irqsave(&blitq->blit_lock, irqsave);	}	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);}	/* * Init all blit engines. Currently we use two, but some hardware have 4. */voidvia_init_dmablit(struct drm_device *dev){	int i,j;	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;	drm_via_blitq_t *blitq;	pci_set_master(dev->pdev);			for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {		blitq = dev_priv->blit_queues + i;		blitq->dev = dev;		blitq->cur_blit_handle = 0;		blitq->done_blit_handle = 0;		blitq->head = 0;		blitq->cur = 0;		blitq->serviced = 0;		blitq->num_free = VIA_NUM_BLIT_SLOTS - 1;		blitq->num_outstanding = 0;		blitq->is_active = 0;		blitq->aborting = 0;		spin_lock_init(&blitq->blit_lock);		for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {			DRM_INIT_WAITQUEUE(blitq->blit_queue + j);		}		DRM_INIT_WAITQUEUE(&blitq->busy_queue);		INIT_WORK(&blitq->wq, via_dmablit_workqueue);		setup_timer(&blitq->poll_timer, via_dmablit_timer,				(unsigned long)blitq);	}	}/* * Build all info and do all mappings required for a blit. */		static intvia_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer){	int draw = xfer->to_fb;	int ret = 0;		vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;	vsg->bounce_buffer = NULL;	vsg->state = dr_via_sg_init;	if (xfer->num_lines <= 0 || xfer->line_length <= 0) {		DRM_ERROR("Zero size bitblt.\n");		return -EINVAL;	}	/*	 * Below check is a driver limitation, not a hardware one. We	 * don't want to lock unused pages, and don't want to incoporate the	 * extra logic of avoiding them. Make sure there are no. 	 * (Not a big limitation anyway.)	 */	if ((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) {		DRM_ERROR("Too large system memory stride. Stride: %d, "			  "Length: %d\n", xfer->mem_stride, xfer->line_length);		return -EINVAL;	}	if ((xfer->mem_stride == xfer->line_length) &&	   (xfer->fb_stride == xfer->line_length)) {		xfer->mem_stride *= xfer->num_lines;		xfer->line_length = xfer->mem_stride;		xfer->fb_stride = xfer->mem_stride;		xfer->num_lines = 1;	}	/*	 * Don't lock an arbitrary large number of pages, since that causes a	 * DOS security hole.	 */	if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {		DRM_ERROR("Too large PCI DMA bitblt.\n");		return -EINVAL;	}			/* 	 * we allow a negative fb stride to allow flipping of images in	 * transfer. 	 */	if (xfer->mem_stride < xfer->line_length ||		abs(xfer->fb_stride) < xfer->line_length) {		DRM_ERROR("Invalid frame-buffer / memory stride.\n");		return -EINVAL;	}	/*	 * A hardware bug seems to be worked around if system memory addresses start on	 * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted	 * about this. Meanwhile, impose the following restrictions:	 */#ifdef VIA_BUGFREE	if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||	    ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {		DRM_ERROR("Invalid DRM bitblt alignment.\n");		return -EINVAL;	}#else	if ((((unsigned long)xfer->mem_addr & 15) ||	      ((unsigned long)xfer->fb_addr & 3)) ||	   ((xfer->num_lines > 1) && 	   ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {		DRM_ERROR("Invalid DRM bitblt alignment.\n");		return -EINVAL;	}	#endif	if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {		DRM_ERROR("Could not lock DMA pages.\n");		via_free_sg_info(dev->pdev, vsg);		return ret;	}	via_map_blit_for_device(dev->pdev, xfer, vsg, 0);	if (0 != (ret = via_alloc_desc_pages(vsg))) {		DRM_ERROR("Could not allocate DMA descriptor pages.\n");		via_free_sg_info(dev->pdev, vsg);		return ret;	}	via_map_blit_for_device(dev->pdev, xfer, vsg, 1);		return 0;}	/* * Reserve one free slot in the blit queue. Will wait for one second for one * to become available. Otherwise -EBUSY is returned. */static int via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine){	int ret=0;	unsigned long irqsave;	DRM_DEBUG("Num free is %d\n", blitq->num_free);	spin_lock_irqsave(&blitq->blit_lock, irqsave);	while(blitq->num_free == 0) {		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);		DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);		if (ret) {			return (-EINTR == ret) ? -EAGAIN : ret;		}				spin_lock_irqsave(&blitq->blit_lock, irqsave);	}		blitq->num_free--;	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);	return 0;}/* * Hand back a free slot if we changed our mind. */static void via_dmablit_release_slot(drm_via_blitq_t *blitq){	unsigned long irqsave;	spin_lock_irqsave(&blitq->blit_lock, irqsave);	blitq->num_free++;	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);	DRM_WAKEUP( &blitq->busy_queue );}/* * Grab a free slot. Build blit info and queue a blit. */static int via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)	 {	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;	drm_via_sg_info_t *vsg;	drm_via_blitq_t *blitq;	int ret;	int engine;	unsigned long irqsave;	if (dev_priv == NULL) {		DRM_ERROR("Called without initialization.\n");		return -EINVAL;	}	engine = (xfer->to_fb) ? 0 : 1;	blitq = dev_priv->blit_queues + engine;	if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) {		return ret;	}	if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {		via_dmablit_release_slot(blitq);		return -ENOMEM;	}	if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {		via_dmablit_release_slot(blitq);		kfree(vsg);		return ret;	}	spin_lock_irqsave(&blitq->blit_lock, irqsave);	blitq->blits[blitq->head++] = vsg;	if (blitq->head >= VIA_NUM_BLIT_SLOTS) 		blitq->head = 0;	blitq->num_outstanding++;	xfer->sync.sync_handle = ++blitq->cur_blit_handle; 	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);	xfer->sync.engine = engine;       	via_dmablit_handler(dev, engine, 0);	return 0;}/* * Sync on a previously submitted blit. Note that the X server use signals extensively, and * that there is a very big probability that this IOCTL will be interrupted by a signal. In that * case it returns with -EAGAIN for the signal to be delivered.  * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock(). */intvia_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv ){	drm_via_blitsync_t *sync = data;	int err;	if (sync->engine >= VIA_NUM_BLIT_ENGINES) 		return -EINVAL;	err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);	if (-EINTR == err)		err = -EAGAIN;	return err;}	/* * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should  * be reissued. See the above IOCTL code. */int via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv ){	drm_via_dmablit_t *xfer = data;	int err;	err = via_dmablit(dev, xfer);	return err;}

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?