📄 mga_dma.c
字号:
return 0;}int mga_dma_schedule(drm_device_t *dev, int locked){ drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; int retval = 0; if (!dev_priv) return -EBUSY; if (test_and_set_bit(0, &dev->dma_flag)) { retval = -EBUSY; goto sch_out_wakeup; } if(test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status) || test_bit(MGA_IN_WAIT, &dev_priv->dispatch_status) || test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status)) { locked = 1; } if (!locked && !drm_lock_take(&dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) { clear_bit(0, &dev->dma_flag); retval = -EBUSY; goto sch_out_wakeup; } if(!test_and_set_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status)) { /* Fire dma buffer */ if(mga_decide_to_fire(dev)) { clear_bit(MGA_BUF_FORCE_FIRE, &dev_priv->next_prim->buffer_status); if(dev_priv->current_prim == dev_priv->next_prim) { /* Schedule overflow for a later time */ set_bit(MGA_BUF_NEEDS_OVERFLOW, &dev_priv->next_prim->buffer_status); } mga_fire_primary(dev, dev_priv->next_prim); } else { clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status); } } if (!locked) { if (drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) { DRM_ERROR("\n"); } } clear_bit(0, &dev->dma_flag);sch_out_wakeup: if(test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status) && atomic_read(&dev_priv->pending_bufs) == 0) { /* Everything has been processed by the hardware */ clear_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status); wake_up_interruptible(&dev_priv->flush_queue); } if(test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status) && dev_priv->tail->age < dev_priv->last_prim_age) wake_up_interruptible(&dev_priv->buf_queue); return retval;}static void mga_dma_service(int irq, void *device, struct pt_regs *regs){ drm_device_t *dev = (drm_device_t *)device; drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; drm_mga_prim_buf_t *last_prim_buffer; atomic_inc(&dev->total_irq); if((MGA_READ(MGAREG_STATUS) & 0x00000001) != 0x00000001) return; MGA_WRITE(MGAREG_ICLEAR, 0x00000001); last_prim_buffer = dev_priv->last_prim; last_prim_buffer->num_dwords = 0; last_prim_buffer->sec_used = 0; dev_priv->sarea_priv->last_dispatch = dev_priv->last_prim_age = last_prim_buffer->prim_age; clear_bit(MGA_BUF_IN_USE, &last_prim_buffer->buffer_status); clear_bit(MGA_BUF_SWAP_PENDING, &last_prim_buffer->buffer_status); clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status); atomic_dec(&dev_priv->pending_bufs); queue_task(&dev->tq, &tq_immediate); mark_bh(IMMEDIATE_BH); wake_up_interruptible(&dev_priv->wait_queue);}static void mga_dma_task_queue(void *device){ mga_dma_schedule((drm_device_t *)device, 0);}int mga_dma_cleanup(drm_device_t *dev){ if(dev->dev_private) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; if (dev->irq) mga_flush_queue(dev); mga_dma_quiescent(dev); if(dev_priv->ioremap) { int temp = (dev_priv->warp_ucode_size + dev_priv->primary_size + PAGE_SIZE - 1) / PAGE_SIZE * PAGE_SIZE; drm_ioremapfree((void *) dev_priv->ioremap, temp); } if(dev_priv->status_page != NULL) { iounmap(dev_priv->status_page); } if(dev_priv->real_status_page != 0UL) { mga_free_page(dev, dev_priv->real_status_page); } if(dev_priv->prim_bufs != NULL) { int i; for(i = 0; i < MGA_NUM_PRIM_BUFS; i++) { if(dev_priv->prim_bufs[i] != NULL) { drm_free(dev_priv->prim_bufs[i], sizeof(drm_mga_prim_buf_t), DRM_MEM_DRIVER); } } drm_free(dev_priv->prim_bufs, sizeof(void *) * (MGA_NUM_PRIM_BUFS + 1), DRM_MEM_DRIVER); } if(dev_priv->head != NULL) { mga_freelist_cleanup(dev); } drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER); dev->dev_private = NULL; } return 0;}static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) { drm_mga_private_t *dev_priv; drm_map_t *sarea_map = NULL; dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); if(dev_priv == NULL) return -ENOMEM; dev->dev_private = (void *) dev_priv; memset(dev_priv, 0, sizeof(drm_mga_private_t)); if((init->reserved_map_idx >= dev->map_count) || (init->buffer_map_idx >= dev->map_count)) { mga_dma_cleanup(dev); return -EINVAL; } dev_priv->reserved_map_idx = init->reserved_map_idx; dev_priv->buffer_map_idx = init->buffer_map_idx; sarea_map = dev->maplist[0]; dev_priv->sarea_priv = (drm_mga_sarea_t *) ((u8 *)sarea_map->handle + init->sarea_priv_offset); /* Scale primary size to the next page */ dev_priv->chipset = init->chipset; dev_priv->frontOffset = init->frontOffset; dev_priv->backOffset = init->backOffset; dev_priv->depthOffset = init->depthOffset; dev_priv->textureOffset = init->textureOffset; dev_priv->textureSize = init->textureSize; dev_priv->cpp = init->cpp; dev_priv->sgram = init->sgram; dev_priv->stride = init->stride; dev_priv->mAccess = init->mAccess; init_waitqueue_head(&dev_priv->flush_queue); init_waitqueue_head(&dev_priv->buf_queue); dev_priv->WarpPipe = 0xff000000; dev_priv->vertexsize = 0; DRM_DEBUG("chipset=%d ucode_size=%d backOffset=%x depthOffset=%x\n", dev_priv->chipset, dev_priv->warp_ucode_size, dev_priv->backOffset, dev_priv->depthOffset); DRM_DEBUG("cpp: %d sgram: %d stride: %d maccess: %x\n", dev_priv->cpp, dev_priv->sgram, dev_priv->stride, dev_priv->mAccess); memcpy(&dev_priv->WarpIndex, &init->WarpIndex, sizeof(drm_mga_warp_index_t) * MGA_MAX_WARP_PIPES); if(mga_init_primary_bufs(dev, init) != 0) { DRM_ERROR("Can not initialize primary buffers\n"); mga_dma_cleanup(dev); return -ENOMEM; } dev_priv->real_status_page = mga_alloc_page(dev); if(dev_priv->real_status_page == 0UL) { mga_dma_cleanup(dev); DRM_ERROR("Can not allocate status page\n"); return -ENOMEM; } dev_priv->status_page = ioremap_nocache(virt_to_bus((void *)dev_priv->real_status_page), PAGE_SIZE); if(dev_priv->status_page == NULL) { mga_dma_cleanup(dev); DRM_ERROR("Can not remap status page\n"); return -ENOMEM; } /* Write status page when secend or softrap occurs */ MGA_WRITE(MGAREG_PRIMPTR, virt_to_bus((void *)dev_priv->real_status_page) | 0x00000003); /* Private is now filled in, initialize the hardware */ { PRIMLOCALS; PRIMGETPTR( dev_priv ); PRIMOUTREG(MGAREG_DMAPAD, 0); PRIMOUTREG(MGAREG_DMAPAD, 0); PRIMOUTREG(MGAREG_DWGSYNC, 0x0100); PRIMOUTREG(MGAREG_SOFTRAP, 0); /* Poll for the first buffer to insure that * the status register will be correct */ mga_flush_write_combine(); MGA_WRITE(MGAREG_PRIMADDRESS, phys_head | TT_GENERAL); MGA_WRITE(MGAREG_PRIMEND, ((phys_head + num_dwords * 4) | PDEA_pagpxfer_enable)); while(MGA_READ(MGAREG_DWGSYNC) != 0x0100) ; } if(mga_freelist_init(dev) != 0) { DRM_ERROR("Could not initialize freelist\n"); mga_dma_cleanup(dev); return -ENOMEM; } return 0;}int mga_dma_init(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg){ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_mga_init_t init; if (copy_from_user(&init, (drm_mga_init_t *)arg, sizeof(init))) return -EFAULT; switch(init.func) { case MGA_INIT_DMA: return mga_dma_initialize(dev, &init); case MGA_CLEANUP_DMA: return mga_dma_cleanup(dev); } return -EINVAL;}int mga_irq_install(drm_device_t *dev, int irq){ int retcode; if (!irq) return -EINVAL; down(&dev->struct_sem); if (dev->irq) { up(&dev->struct_sem); return -EBUSY; } dev->irq = irq; up(&dev->struct_sem); DRM_DEBUG("install irq handler %d\n", irq); dev->context_flag = 0; dev->interrupt_flag = 0; dev->dma_flag = 0; dev->dma->next_buffer = NULL; dev->dma->next_queue = NULL; dev->dma->this_buffer = NULL; INIT_LIST_HEAD(&dev->tq.list); dev->tq.sync = 0; dev->tq.routine = mga_dma_task_queue; dev->tq.data = dev; /* Before installing handler */ MGA_WRITE(MGAREG_IEN, 0); /* Install handler */ if ((retcode = request_irq(dev->irq, mga_dma_service, SA_SHIRQ, dev->devname, dev))) { down(&dev->struct_sem); dev->irq = 0; up(&dev->struct_sem); return retcode; } /* After installing handler */ MGA_WRITE(MGAREG_ICLEAR, 0x00000001); MGA_WRITE(MGAREG_IEN, 0x00000001); return 0;}int mga_irq_uninstall(drm_device_t *dev){ int irq; down(&dev->struct_sem); irq = dev->irq; dev->irq = 0; up(&dev->struct_sem); if (!irq) return -EINVAL; DRM_DEBUG("remove irq handler %d\n", irq); MGA_WRITE(MGAREG_ICLEAR, 0x00000001); MGA_WRITE(MGAREG_IEN, 0); free_irq(irq, dev); return 0;}int mga_control(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg){ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_control_t ctl; if (copy_from_user(&ctl, (drm_control_t *)arg, sizeof(ctl))) return -EFAULT; switch (ctl.func) { case DRM_INST_HANDLER: return mga_irq_install(dev, ctl.irq); case DRM_UNINST_HANDLER: return mga_irq_uninstall(dev); default: return -EINVAL; }}static int mga_flush_queue(drm_device_t *dev){ DECLARE_WAITQUEUE(entry, current); drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; int ret = 0; if(!dev_priv) return 0; if(dev_priv->next_prim->num_dwords != 0) { add_wait_queue(&dev_priv->flush_queue, &entry); if (test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status)) DRM_ERROR("Incorrect mga_flush_queue logic\n"); set_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status); mga_dma_schedule(dev, 0); for (;;) { current->state = TASK_INTERRUPTIBLE; if (!test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status)) break; atomic_inc(&dev->total_sleeps); schedule(); if (signal_pending(current)) { ret = -EINTR; /* Can't restart */ clear_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status); break; } } current->state = TASK_RUNNING; remove_wait_queue(&dev_priv->flush_queue, &entry); } return ret;}/* Must be called with the lock held */void mga_reclaim_buffers(drm_device_t *dev, pid_t pid){ drm_device_dma_t *dma = dev->dma; int i; if (!dma) return; if(dev->dev_private == NULL) return; if(dma->buflist == NULL) return; DRM_DEBUG("buf_count=%d\n", dma->buf_count); mga_flush_queue(dev); for (i = 0; i < dma->buf_count; i++) { drm_buf_t *buf = dma->buflist[ i ]; drm_mga_buf_priv_t *buf_priv = buf->dev_private; /* Only buffers that need to get reclaimed ever * get set to free */ if (buf->pid == pid && buf_priv) { if(buf_priv->my_freelist->age == MGA_BUF_USED) buf_priv->my_freelist->age = MGA_BUF_FREE; } }}int mga_lock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg){ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; DECLARE_WAITQUEUE(entry, current); int ret = 0; drm_lock_t lock; if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock))) return -EFAULT; if (lock.context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", current->pid, lock.context); return -EINVAL; } if (lock.context < 0) return -EINVAL; /* Only one queue: */ if (!ret) { add_wait_queue(&dev->lock.lock_queue, &entry); for (;;) { current->state = TASK_INTERRUPTIBLE; if (!dev->lock.hw_lock) { /* Device has been unregistered */ ret = -EINTR; break; } if (drm_lock_take(&dev->lock.hw_lock->lock, lock.context)) { dev->lock.pid = current->pid; dev->lock.lock_time = jiffies; atomic_inc(&dev->total_locks); break; /* Got lock */ } /* Contention */ atomic_inc(&dev->total_sleeps); schedule(); if (signal_pending(current)) { ret = -ERESTARTSYS; break; } } current->state = TASK_RUNNING; remove_wait_queue(&dev->lock.lock_queue, &entry); } if (!ret) { sigemptyset(&dev->sigmask); sigaddset(&dev->sigmask, SIGSTOP); sigaddset(&dev->sigmask, SIGTSTP); sigaddset(&dev->sigmask, SIGTTIN); sigaddset(&dev->sigmask, SIGTTOU); dev->sigdata.context = lock.context; dev->sigdata.lock = dev->lock.hw_lock; block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); if (lock.flags & _DRM_LOCK_QUIESCENT) { DRM_DEBUG("_DRM_LOCK_QUIESCENT\n"); mga_flush_queue(dev); mga_dma_quiescent(dev); } } if (ret) DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock"); return ret;}int mga_flush_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg){ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_lock_t lock; drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock))) return -EFAULT; if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("lock not held\n"); return -EINVAL; } if(lock.flags & _DRM_LOCK_FLUSH || lock.flags & _DRM_LOCK_FLUSH_ALL) { drm_mga_prim_buf_t *temp_buf; temp_buf = dev_priv->current_prim; if(temp_buf && temp_buf->num_dwords) { set_bit(MGA_BUF_FORCE_FIRE, &temp_buf->buffer_status); mga_advance_primary(dev); } mga_dma_schedule(dev, 1); } if(lock.flags & _DRM_LOCK_QUIESCENT) { mga_flush_queue(dev); mga_dma_quiescent(dev); } return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -