📄 radeon_cp.c
字号:
drm_core_ioremapfree(dev->agp_buffer_map, dev); dev->agp_buffer_map = NULL; } } else#endif { if (dev_priv->gart_info.bus_addr) if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info)) DRM_ERROR("failed to cleanup PCI GART!\n"); if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) { drm_ioremapfree((void *)dev_priv->gart_info.addr, RADEON_PCIGART_TABLE_SIZE, dev); dev_priv->gart_info.addr = 0; } } /* only clear to the start of flags */ memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags)); return 0;}/* This code will reinit the Radeon CP hardware after a resume from disc. * AFAIK, it would be very difficult to pickle the state at suspend time, so * here we make sure that all Radeon hardware initialisation is re-done without * affecting running applications. * * Charl P. Botha <http://cpbotha.net> */static int radeon_do_resume_cp(drm_device_t * dev){ drm_radeon_private_t *dev_priv = dev->dev_private; if (!dev_priv) { DRM_ERROR("Called with no initialization\n"); return DRM_ERR(EINVAL); } DRM_DEBUG("Starting radeon_do_resume_cp()\n");#if __OS_HAS_AGP if (!dev_priv->is_pci) { /* Turn off PCI GART */ radeon_set_pcigart(dev_priv, 0); } else#endif { /* Turn on PCI GART */ radeon_set_pcigart(dev_priv, 1); } radeon_cp_load_microcode(dev_priv); radeon_cp_init_ring_buffer(dev, dev_priv); radeon_do_engine_reset(dev); DRM_DEBUG("radeon_do_resume_cp() complete\n"); return 0;}int radeon_cp_init(DRM_IOCTL_ARGS){ DRM_DEVICE; drm_radeon_init_t init; LOCK_TEST_WITH_RETURN(dev, filp); DRM_COPY_FROM_USER_IOCTL(init, (drm_radeon_init_t __user *) data, sizeof(init)); if (init.func == RADEON_INIT_R300_CP) r300_init_reg_flags(); switch (init.func) { case RADEON_INIT_CP: case RADEON_INIT_R200_CP: case RADEON_INIT_R300_CP: return radeon_do_init_cp(dev, &init); case RADEON_CLEANUP_CP: return radeon_do_cleanup_cp(dev); } return DRM_ERR(EINVAL);}int radeon_cp_start(DRM_IOCTL_ARGS){ DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, filp); if (dev_priv->cp_running) { DRM_DEBUG("%s while CP running\n", __FUNCTION__); return 0; } if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) { DRM_DEBUG("%s called with bogus CP mode (%d)\n", __FUNCTION__, dev_priv->cp_mode); return 0; } radeon_do_cp_start(dev_priv); return 0;}/* Stop the CP. The engine must have been idled before calling this * routine. */int radeon_cp_stop(DRM_IOCTL_ARGS){ DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_cp_stop_t stop; int ret; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, filp); DRM_COPY_FROM_USER_IOCTL(stop, (drm_radeon_cp_stop_t __user *) data, sizeof(stop)); if (!dev_priv->cp_running) return 0; /* Flush any pending CP commands. This ensures any outstanding * commands are exectuted by the engine before we turn it off. */ if (stop.flush) { radeon_do_cp_flush(dev_priv); } /* If we fail to make the engine go idle, we return an error * code so that the DRM ioctl wrapper can try again. */ if (stop.idle) { ret = radeon_do_cp_idle(dev_priv); if (ret) return ret; } /* Finally, we can turn off the CP. If the engine isn't idle, * we will get some dropped triangles as they won't be fully * rendered before the CP is shut down. */ radeon_do_cp_stop(dev_priv); /* Reset the engine */ radeon_do_engine_reset(dev); return 0;}void radeon_do_release(drm_device_t * dev){ drm_radeon_private_t *dev_priv = dev->dev_private; int i, ret; if (dev_priv) { if (dev_priv->cp_running) { /* Stop the cp */ while ((ret = radeon_do_cp_idle(dev_priv)) != 0) { DRM_DEBUG("radeon_do_cp_idle %d\n", ret);#ifdef __linux__ schedule();#else tsleep(&ret, PZERO, "rdnrel", 1);#endif } radeon_do_cp_stop(dev_priv); radeon_do_engine_reset(dev); } /* Disable *all* interrupts */ if (dev_priv->mmio) /* remove this after permanent addmaps */ RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); if (dev_priv->mmio) { /* remove all surfaces */ for (i = 0; i < RADEON_MAX_SURFACES; i++) { RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0); RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * i, 0); RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * i, 0); } } /* Free memory heap structures */ radeon_mem_takedown(&(dev_priv->gart_heap)); radeon_mem_takedown(&(dev_priv->fb_heap)); /* deallocate kernel resources */ radeon_do_cleanup_cp(dev); }}/* Just reset the CP ring. Called as part of an X Server engine reset. */int radeon_cp_reset(DRM_IOCTL_ARGS){ DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, filp); if (!dev_priv) { DRM_DEBUG("%s called before init done\n", __FUNCTION__); return DRM_ERR(EINVAL); } radeon_do_cp_reset(dev_priv); /* The CP is no longer running after an engine reset */ dev_priv->cp_running = 0; return 0;}int radeon_cp_idle(DRM_IOCTL_ARGS){ DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, filp); return radeon_do_cp_idle(dev_priv);}/* Added by Charl P. Botha to call radeon_do_resume_cp(). */int radeon_cp_resume(DRM_IOCTL_ARGS){ DRM_DEVICE; return radeon_do_resume_cp(dev);}int radeon_engine_reset(DRM_IOCTL_ARGS){ DRM_DEVICE; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, filp); return radeon_do_engine_reset(dev);}/* ================================================================ * Fullscreen mode *//* KW: Deprecated to say the least: */int radeon_fullscreen(DRM_IOCTL_ARGS){ return 0;}/* ================================================================ * Freelist management *//* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through * bufs until freelist code is used. Note this hides a problem with * the scratch register * (used to keep track of last buffer * completed) being written to before * the last buffer has actually * completed rendering. * * KW: It's also a good way to find free buffers quickly. * * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't * sleep. However, bugs in older versions of radeon_accel.c mean that * we essentially have to do this, else old clients will break. * * However, it does leave open a potential deadlock where all the * buffers are held by other clients, which can't release them because * they can't get the lock. */drm_buf_t *radeon_freelist_get(drm_device_t * dev){ drm_device_dma_t *dma = dev->dma; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_buf_priv_t *buf_priv; drm_buf_t *buf; int i, t; int start; if (++dev_priv->last_buf >= dma->buf_count) dev_priv->last_buf = 0; start = dev_priv->last_buf; for (t = 0; t < dev_priv->usec_timeout; t++) { u32 done_age = GET_SCRATCH(1); DRM_DEBUG("done_age = %d\n", done_age); for (i = start; i < dma->buf_count; i++) { buf = dma->buflist[i]; buf_priv = buf->dev_private; if (buf->filp == 0 || (buf->pending && buf_priv->age <= done_age)) { dev_priv->stats.requested_bufs++; buf->pending = 0; return buf; } start = 0; } if (t) { DRM_UDELAY(1); dev_priv->stats.freelist_loops++; } } DRM_DEBUG("returning NULL!\n"); return NULL;}#if 0drm_buf_t *radeon_freelist_get(drm_device_t * dev){ drm_device_dma_t *dma = dev->dma; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_buf_priv_t *buf_priv; drm_buf_t *buf; int i, t; int start; u32 done_age = DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)); if (++dev_priv->last_buf >= dma->buf_count) dev_priv->last_buf = 0; start = dev_priv->last_buf; dev_priv->stats.freelist_loops++; for (t = 0; t < 2; t++) { for (i = start; i < dma->buf_count; i++) { buf = dma->buflist[i]; buf_priv = buf->dev_private; if (buf->filp == 0 || (buf->pending && buf_priv->age <= done_age)) { dev_priv->stats.requested_bufs++; buf->pending = 0; return buf; } } start = 0; } return NULL;}#endifvoid radeon_freelist_reset(drm_device_t * dev){ drm_device_dma_t *dma = dev->dma; drm_radeon_private_t *dev_priv = dev->dev_private; int i; dev_priv->last_buf = 0; for (i = 0; i < dma->buf_count; i++) { drm_buf_t *buf = dma->buflist[i]; drm_radeon_buf_priv_t *buf_priv = buf->dev_private; buf_priv->age = 0; }}/* ================================================================ * CP command submission */int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n){ drm_radeon_ring_buffer_t *ring = &dev_priv->ring; int i; u32 last_head = GET_RING_HEAD(dev_priv); for (i = 0; i < dev_priv->usec_timeout; i++) { u32 head = GET_RING_HEAD(dev_priv); ring->space = (head - ring->tail) * sizeof(u32); if (ring->space <= 0) ring->space += ring->size; if (ring->space > n) return 0; dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; if (head != last_head) i = 0; last_head = head; DRM_UDELAY(1); } /* FIXME: This return value is ignored in the BEGIN_RING macro! */#if RADEON_FIFO_DEBUG radeon_status(dev_priv); DRM_ERROR("failed!\n");#endif return DRM_ERR(EBUSY);}static int radeon_cp_get_buffers(DRMFILE filp, drm_device_t * dev, drm_dma_t * d){ int i; drm_buf_t *buf; for (i = d->granted_count; i < d->request_count; i++) { buf = radeon_freelist_get(dev); if (!buf) return DRM_ERR(EBUSY); /* NOTE: broken client */ buf->filp = filp; if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, sizeof(buf->idx))) return DRM_ERR(EFAULT); if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, sizeof(buf->total))) return DRM_ERR(EFAULT); d->granted_count++; } return 0;}int radeon_cp_buffers(DRM_IOCTL_ARGS){ DRM_DEVICE; drm_device_dma_t *dma = dev->dma; int ret = 0; drm_dma_t __user *argp = (void __user *)data; drm_dma_t d; LOCK_TEST_WITH_RETURN(dev, filp); DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d)); /* Please don't send us buffers. */ if (d.send_count != 0) { DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", DRM_CURRENTPID, d.send_count); return DRM_ERR(EINVAL); } /* We'll send you buffers. */ if (d.request_count < 0 || d.request_count > dma->buf_count) { DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", DRM_CURRENTPID, d.request_count, dma->buf_count); return DRM_ERR(EINVAL); } d.granted_count = 0; if (d.request_count) { ret = radeon_cp_get_buffers(filp, dev, &d); } DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d)); return ret;}int radeon_driver_preinit(struct drm_device *dev, unsigned long flags){ drm_radeon_private_t *dev_priv; int ret = 0; dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER); if (dev_priv == NULL) return DRM_ERR(ENOMEM); memset(dev_priv, 0, sizeof(drm_radeon_private_t)); dev->dev_private = (void *)dev_priv; dev_priv->flags = flags; switch (flags & CHIP_FAMILY_MASK) { case CHIP_R100: case CHIP_RV200: case CHIP_R200: case CHIP_R300: case CHIP_R420: dev_priv->flags |= CHIP_HAS_HIERZ; break; default: /* all other chips have no hierarchical z buffer */ break; } if (drm_device_is_agp(dev)) dev_priv->flags |= CHIP_IS_AGP; if (drm_device_is_pcie(dev)) dev_priv->flags |= CHIP_IS_PCIE; DRM_DEBUG("%s card detected\n", ((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : "PCI")); return ret;}int radeon_presetup(struct drm_device *dev){ int ret; drm_local_map_t *map; drm_radeon_private_t *dev_priv = dev->dev_private; ret = drm_addmap(dev, drm_get_resource_start(dev, 2), drm_get_resource_len(dev, 2), _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio); if (ret != 0) return ret; ret = drm_addmap(dev, drm_get_resource_start(dev, 0), drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, &map); if (ret != 0) return ret; return 0;}int radeon_driver_postcleanup(struct drm_device *dev){ drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); dev->dev_private = NULL; return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -