📄 drm_dma.h
字号:
idx = d->send_indices[i]; if (idx < 0 || idx >= dma->buf_count) { atomic_dec(&q->use_count); DRM_ERROR("Index %d (of %d max)\n", d->send_indices[i], dma->buf_count - 1); return -EINVAL; } buf = dma->buflist[ idx ]; if (buf->pid != current->pid) { atomic_dec(&q->use_count); DRM_ERROR("Process %d using buffer owned by %d\n", current->pid, buf->pid); return -EINVAL; } if (buf->list != DRM_LIST_NONE) { atomic_dec(&q->use_count); DRM_ERROR("Process %d using buffer %d on list %d\n", current->pid, buf->idx, buf->list); } buf->used = d->send_sizes[i]; buf->while_locked = while_locked; buf->context = d->context; if (!buf->used) { DRM_ERROR("Queueing 0 length buffer\n"); } if (buf->pending) { atomic_dec(&q->use_count); DRM_ERROR("Queueing pending buffer:" " buffer %d, offset %d\n", d->send_indices[i], i); return -EINVAL; } if (buf->waiting) { atomic_dec(&q->use_count); DRM_ERROR("Queueing waiting buffer:" " buffer %d, offset %d\n", d->send_indices[i], i); return -EINVAL; } buf->waiting = 1; if (atomic_read(&q->use_count) == 1 || atomic_read(&q->finalization)) { DRM(free_buffer)(dev, buf); } else { DRM(waitlist_put)(&q->waitlist, buf); atomic_inc(&q->total_queued); } } atomic_dec(&q->use_count); return 0;}static int DRM(dma_get_buffers_of_order)(drm_device_t *dev, drm_dma_t *d, int order){ int i; drm_buf_t *buf; drm_device_dma_t *dma = dev->dma; for (i = d->granted_count; i < d->request_count; i++) { buf = DRM(freelist_get)(&dma->bufs[order].freelist, d->flags & _DRM_DMA_WAIT); if (!buf) break; if (buf->pending || buf->waiting) { DRM_ERROR("Free buffer %d in use by %d (w%d, p%d)\n", buf->idx, buf->pid, buf->waiting, buf->pending); } buf->pid = current->pid; if (copy_to_user(&d->request_indices[i], &buf->idx, sizeof(buf->idx))) return -EFAULT; if (copy_to_user(&d->request_sizes[i], &buf->total, sizeof(buf->total))) return -EFAULT; ++d->granted_count; } return 0;}int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma){ int order; int retcode = 0; int tmp_order; order = DRM(order)(dma->request_size); dma->granted_count = 0; retcode = DRM(dma_get_buffers_of_order)(dev, dma, order); if (dma->granted_count < dma->request_count && (dma->flags & _DRM_DMA_SMALLER_OK)) { for (tmp_order = order - 1; !retcode && dma->granted_count < dma->request_count && tmp_order >= DRM_MIN_ORDER; --tmp_order) { retcode = DRM(dma_get_buffers_of_order)(dev, dma, tmp_order); } } if (dma->granted_count < dma->request_count && (dma->flags & _DRM_DMA_LARGER_OK)) { for (tmp_order = order + 1; !retcode && dma->granted_count < dma->request_count && tmp_order <= DRM_MAX_ORDER; ++tmp_order) { retcode = DRM(dma_get_buffers_of_order)(dev, dma, tmp_order); } } return 0;}#endif /* __HAVE_OLD_DMA */#if __HAVE_DMA_IRQint DRM(irq_install)( drm_device_t *dev, int irq ){ int ret; if ( !irq ) return -EINVAL; down( &dev->struct_sem ); if ( dev->irq ) { up( &dev->struct_sem ); return -EBUSY; } dev->irq = irq; up( &dev->struct_sem ); DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, irq ); dev->context_flag = 0; dev->interrupt_flag = 0; dev->dma_flag = 0; dev->dma->next_buffer = NULL; dev->dma->next_queue = NULL; dev->dma->this_buffer = NULL;#if __HAVE_DMA_IRQ_BH INIT_LIST_HEAD( &dev->tq.list ); dev->tq.sync = 0; dev->tq.routine = DRM(dma_immediate_bh); dev->tq.data = dev;#endif#if __HAVE_VBL_IRQ init_waitqueue_head(&dev->vbl_queue); spin_lock_init( &dev->vbl_lock ); INIT_LIST_HEAD( &dev->vbl_sigs.head ); dev->vbl_pending = 0;#endif /* Before installing handler */ DRM(driver_irq_preinstall)(dev); /* Install handler */ ret = request_irq( dev->irq, DRM(dma_service), DRM_IRQ_TYPE, dev->devname, dev ); if ( ret < 0 ) { down( &dev->struct_sem ); dev->irq = 0; up( &dev->struct_sem ); return ret; } /* After installing handler */ DRM(driver_irq_postinstall)(dev); return 0;}int DRM(irq_uninstall)( drm_device_t *dev ){ int irq; down( &dev->struct_sem ); irq = dev->irq; dev->irq = 0; up( &dev->struct_sem ); if ( !irq ) return -EINVAL; DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, irq ); DRM(driver_irq_uninstall)( dev ); free_irq( irq, dev ); return 0;}int DRM(control)( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ){ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_control_t ctl; if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) ) return -EFAULT; switch ( ctl.func ) { case DRM_INST_HANDLER: return DRM(irq_install)( dev, ctl.irq ); case DRM_UNINST_HANDLER: return DRM(irq_uninstall)( dev ); default: return -EINVAL; }}#if __HAVE_VBL_IRQint DRM(wait_vblank)(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data ){ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_wait_vblank_t vblwait; struct timeval now; int ret = 0; unsigned int flags; if (!dev->irq) return -EINVAL; DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data, sizeof(vblwait) ); switch ( vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK ) { case _DRM_VBLANK_RELATIVE: vblwait.request.sequence += atomic_read( &dev->vbl_received ); vblwait.request.type &= ~_DRM_VBLANK_RELATIVE; case _DRM_VBLANK_ABSOLUTE: break; default: return -EINVAL; } flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK; if ( flags & _DRM_VBLANK_SIGNAL ) { unsigned long irqflags; drm_vbl_sig_t *vbl_sig; vblwait.reply.sequence = atomic_read( &dev->vbl_received ); spin_lock_irqsave( &dev->vbl_lock, irqflags ); /* Check if this task has already scheduled the same signal * for the same vblank sequence number; nothing to be done in * that case */ list_for_each( ( (struct list_head *) vbl_sig ), &dev->vbl_sigs.head ) { if (vbl_sig->sequence == vblwait.request.sequence && vbl_sig->info.si_signo == vblwait.request.signal && vbl_sig->task == current) { spin_unlock_irqrestore( &dev->vbl_lock, irqflags ); goto done; } } if ( dev->vbl_pending >= 100 ) { spin_unlock_irqrestore( &dev->vbl_lock, irqflags ); return -EBUSY; } dev->vbl_pending++; spin_unlock_irqrestore( &dev->vbl_lock, irqflags ); if ( !( vbl_sig = kmalloc(sizeof(drm_vbl_sig_t), GFP_KERNEL) ) ) return -ENOMEM; memset( (void *)vbl_sig, 0, sizeof(*vbl_sig) ); vbl_sig->sequence = vblwait.request.sequence; vbl_sig->info.si_signo = vblwait.request.signal; vbl_sig->task = current; spin_lock_irqsave( &dev->vbl_lock, irqflags ); list_add_tail( (struct list_head *) vbl_sig, &dev->vbl_sigs.head ); spin_unlock_irqrestore( &dev->vbl_lock, irqflags ); } else { ret = DRM(vblank_wait)( dev, &vblwait.request.sequence ); do_gettimeofday( &now ); vblwait.reply.tval_sec = now.tv_sec; vblwait.reply.tval_usec = now.tv_usec; }done: DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait, sizeof(vblwait) ); return ret;}void DRM(vbl_send_signals)( drm_device_t *dev ){ struct list_head *tmp; drm_vbl_sig_t *vbl_sig; unsigned int vbl_seq = atomic_read( &dev->vbl_received ); unsigned long flags; spin_lock_irqsave( &dev->vbl_lock, flags ); list_for_each_safe( ( (struct list_head *) vbl_sig ), tmp, &dev->vbl_sigs.head ) { if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) { vbl_sig->info.si_code = vbl_seq; send_sig_info( vbl_sig->info.si_signo, &vbl_sig->info, vbl_sig->task ); list_del( (struct list_head *) vbl_sig ); kfree( vbl_sig ); dev->vbl_pending--; } } spin_unlock_irqrestore( &dev->vbl_lock, flags );}#endif /* __HAVE_VBL_IRQ */#elseint DRM(control)( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ){ drm_control_t ctl; if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) ) return -EFAULT; switch ( ctl.func ) { case DRM_INST_HANDLER: case DRM_UNINST_HANDLER: return 0; default: return -EINVAL; }}#endif /* __HAVE_DMA_IRQ */#endif /* __HAVE_DMA */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -