📄 radeon_state.c
字号:
}static void radeon_apply_surface_regs(int surf_index, drm_radeon_private_t * dev_priv){ if (!dev_priv->mmio) return; radeon_do_cp_idle(dev_priv); RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * surf_index, dev_priv->surfaces[surf_index].flags); RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * surf_index, dev_priv->surfaces[surf_index].lower); RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * surf_index, dev_priv->surfaces[surf_index].upper);}/* Allocates a virtual surface * doesn't always allocate a real surface, will stretch an existing * surface when possible. * * Note that refcount can be at most 2, since during a free refcount=3 * might mean we have to allocate a new surface which might not always * be available. * For example : we allocate three contigous surfaces ABC. If B is * freed, we suddenly need two surfaces to store A and C, which might * not always be available. */static int alloc_surface(drm_radeon_surface_alloc_t * new, drm_radeon_private_t * dev_priv, DRMFILE filp){ struct radeon_virt_surface *s; int i; int virt_surface_index; uint32_t new_upper, new_lower; new_lower = new->address; new_upper = new_lower + new->size - 1; /* sanity check */ if ((new_lower >= new_upper) || (new->flags == 0) || (new->size == 0) || ((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) != RADEON_SURF_ADDRESS_FIXED_MASK) || ((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0)) return -1; /* make sure there is no overlap with existing surfaces */ for (i = 0; i < RADEON_MAX_SURFACES; i++) { if ((dev_priv->surfaces[i].refcount != 0) && (((new_lower >= dev_priv->surfaces[i].lower) && (new_lower < dev_priv->surfaces[i].upper)) || ((new_lower < dev_priv->surfaces[i].lower) && (new_upper > dev_priv->surfaces[i].lower)))) { return -1; } } /* find a virtual surface */ for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) if (dev_priv->virt_surfaces[i].filp == 0) break; if (i == 2 * RADEON_MAX_SURFACES) { return -1; } virt_surface_index = i; /* try to reuse an existing surface */ for (i = 0; i < RADEON_MAX_SURFACES; i++) { /* extend before */ if ((dev_priv->surfaces[i].refcount == 1) && (new->flags == dev_priv->surfaces[i].flags) && (new_upper + 1 == dev_priv->surfaces[i].lower)) { s = &(dev_priv->virt_surfaces[virt_surface_index]); s->surface_index = i; s->lower = new_lower; s->upper = new_upper; s->flags = new->flags; s->filp = filp; dev_priv->surfaces[i].refcount++; dev_priv->surfaces[i].lower = s->lower; radeon_apply_surface_regs(s->surface_index, dev_priv); return virt_surface_index; } /* extend after */ if ((dev_priv->surfaces[i].refcount == 1) && (new->flags == dev_priv->surfaces[i].flags) && (new_lower == dev_priv->surfaces[i].upper + 1)) { s = &(dev_priv->virt_surfaces[virt_surface_index]); s->surface_index = i; s->lower = new_lower; s->upper = new_upper; s->flags = new->flags; s->filp = filp; dev_priv->surfaces[i].refcount++; dev_priv->surfaces[i].upper = s->upper; radeon_apply_surface_regs(s->surface_index, dev_priv); return virt_surface_index; } } /* okay, we need a new one */ for (i = 0; i < RADEON_MAX_SURFACES; i++) { if (dev_priv->surfaces[i].refcount == 0) { s = &(dev_priv->virt_surfaces[virt_surface_index]); s->surface_index = i; s->lower = new_lower; s->upper = new_upper; s->flags = new->flags; s->filp = filp; dev_priv->surfaces[i].refcount = 1; dev_priv->surfaces[i].lower = s->lower; dev_priv->surfaces[i].upper = s->upper; dev_priv->surfaces[i].flags = s->flags; radeon_apply_surface_regs(s->surface_index, dev_priv); return virt_surface_index; } } /* we didn't find anything */ return -1;}static int free_surface(DRMFILE filp, drm_radeon_private_t * dev_priv, int lower){ struct radeon_virt_surface *s; int i; /* find the virtual surface */ for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) { s = &(dev_priv->virt_surfaces[i]); if (s->filp) { if ((lower == s->lower) && (filp == s->filp)) { if (dev_priv->surfaces[s->surface_index]. lower == s->lower) dev_priv->surfaces[s->surface_index]. lower = s->upper; if (dev_priv->surfaces[s->surface_index]. upper == s->upper) dev_priv->surfaces[s->surface_index]. upper = s->lower; dev_priv->surfaces[s->surface_index].refcount--; if (dev_priv->surfaces[s->surface_index]. refcount == 0) dev_priv->surfaces[s->surface_index]. flags = 0; s->filp = NULL; radeon_apply_surface_regs(s->surface_index, dev_priv); return 0; } } } return 1;}static void radeon_surfaces_release(DRMFILE filp, drm_radeon_private_t * dev_priv){ int i; for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) { if (dev_priv->virt_surfaces[i].filp == filp) free_surface(filp, dev_priv, dev_priv->virt_surfaces[i].lower); }}/* ================================================================ * IOCTL functions */static int radeon_surface_alloc(DRM_IOCTL_ARGS){ DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_surface_alloc_t alloc; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return DRM_ERR(EINVAL); } DRM_COPY_FROM_USER_IOCTL(alloc, (drm_radeon_surface_alloc_t __user *) data, sizeof(alloc)); if (alloc_surface(&alloc, dev_priv, filp) == -1) return DRM_ERR(EINVAL); else return 0;}static int radeon_surface_free(DRM_IOCTL_ARGS){ DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_surface_free_t memfree; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return DRM_ERR(EINVAL); } DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_mem_free_t __user *) data, sizeof(memfree)); if (free_surface(filp, dev_priv, memfree.address)) return DRM_ERR(EINVAL); else return 0;}static int radeon_cp_clear(DRM_IOCTL_ARGS){ DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_radeon_clear_t clear; drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, filp); DRM_COPY_FROM_USER_IOCTL(clear, (drm_radeon_clear_t __user *) data, sizeof(clear)); RING_SPACE_TEST_WITH_RETURN(dev_priv); if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; if (DRM_COPY_FROM_USER(&depth_boxes, clear.depth_boxes, sarea_priv->nbox * sizeof(depth_boxes[0]))) return DRM_ERR(EFAULT); radeon_cp_dispatch_clear(dev, &clear, depth_boxes); COMMIT_RING(); return 0;}/* Not sure why this isn't set all the time: */static int radeon_do_init_pageflip(drm_device_t * dev){ drm_radeon_private_t *dev_priv = dev->dev_private; RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(6); RADEON_WAIT_UNTIL_3D_IDLE(); OUT_RING(CP_PACKET0(RADEON_CRTC_OFFSET_CNTL, 0)); OUT_RING(RADEON_READ(RADEON_CRTC_OFFSET_CNTL) | RADEON_CRTC_OFFSET_FLIP_CNTL); OUT_RING(CP_PACKET0(RADEON_CRTC2_OFFSET_CNTL, 0)); OUT_RING(RADEON_READ(RADEON_CRTC2_OFFSET_CNTL) | RADEON_CRTC_OFFSET_FLIP_CNTL); ADVANCE_RING(); dev_priv->page_flipping = 1; dev_priv->current_page = 0; dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page; return 0;}/* Called whenever a client dies, from drm_release. * NOTE: Lock isn't necessarily held when this is called! */static int radeon_do_cleanup_pageflip(drm_device_t * dev){ drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); if (dev_priv->current_page != 0) radeon_cp_dispatch_flip(dev); dev_priv->page_flipping = 0; return 0;}/* Swapping and flipping are different operations, need different ioctls. * They can & should be intermixed to support multiple 3d windows. */static int radeon_cp_flip(DRM_IOCTL_ARGS){ DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, filp); RING_SPACE_TEST_WITH_RETURN(dev_priv); if (!dev_priv->page_flipping) radeon_do_init_pageflip(dev); radeon_cp_dispatch_flip(dev); COMMIT_RING(); return 0;}static int radeon_cp_swap(DRM_IOCTL_ARGS){ DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, filp); RING_SPACE_TEST_WITH_RETURN(dev_priv); if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; radeon_cp_dispatch_swap(dev); dev_priv->sarea_priv->ctx_owner = 0; COMMIT_RING(); return 0;}static int radeon_cp_vertex(DRM_IOCTL_ARGS){ DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_file_t *filp_priv; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; drm_radeon_vertex_t vertex; drm_radeon_tcl_prim_t prim; LOCK_TEST_WITH_RETURN(dev, filp); DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex_t __user *) data, sizeof(vertex)); DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", DRM_CURRENTPID, vertex.idx, vertex.count, vertex.discard); if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", vertex.idx, dma->buf_count - 1); return DRM_ERR(EINVAL); } if (vertex.prim < 0 || vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { DRM_ERROR("buffer prim %d\n", vertex.prim); return DRM_ERR(EINVAL); } RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); buf = dma->buflist[vertex.idx]; if (buf->filp != filp) { DRM_ERROR("process %d using buffer owned by %p\n", DRM_CURRENTPID, buf->filp); return DRM_ERR(EINVAL); } if (buf->pending) { DRM_ERROR("sending pending buffer %d\n", vertex.idx); return DRM_ERR(EINVAL); } /* Build up a prim_t record: */ if (vertex.count) { buf->used = vertex.count; /* not used? */ if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) { if (radeon_emit_state(dev_priv, filp_priv, &sarea_priv->context_state, sarea_priv->tex_state, sarea_priv->dirty)) { DRM_ERROR("radeon_emit_state failed\n"); return DRM_ERR(EINVAL); } sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | RADEON_UPLOAD_TEX1IMAGES | RADEON_UPLOAD_TEX2IMAGES | RADEON_REQUIRE_QUIESCENCE); } prim.start = 0; prim.finish = vertex.count; /* unused */ prim.prim = vertex.prim; prim.numverts = vertex.count; prim.vc_format = dev_priv->sarea_priv->vc_format; radeon_cp_dispatch_vertex(dev, buf, &prim); } if (vertex.discard) { radeon_cp_discard_buffer(dev, buf); } COMMIT_RING(); return 0;}static int radeon_cp_indices(DRM_IOCTL_ARGS){ DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_file_t *filp_priv; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; drm_radeon_indices_t elts; drm_radeon_tcl_prim_t prim; int count; LOCK_TEST_WITH_RETURN(dev, filp); if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return DRM_ERR(EINVAL); } DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); DRM_COPY_FROM_USER_IOCTL(elts, (drm_radeon_indices_t __user *) data, sizeof(elts)); DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n", DRM_CURRENTPID, elts.idx, elts.start, elts.end, elts.discard); if (elts.idx < 0 || elts.idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", elts.idx, dma->buf_count - 1); return DRM_ERR(EINVAL); } if (elts.prim < 0 || elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { DRM_ERROR("buffer prim %d\n", elts.prim); return DRM_ERR(EINVAL); } RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); buf = dma->buflist[elts.idx]; if (buf->fi
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -