📄 radeon_state.c
字号:
drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start; int numverts = (int)prim->numverts; int nbox = sarea_priv->nbox; int i = 0; RING_LOCALS; DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n", prim->prim, prim->vc_format, prim->start, prim->finish, prim->numverts); if (bad_prim_vertex_nr(prim->prim, prim->numverts)) { DRM_ERROR("bad prim %x numverts %d\n", prim->prim, prim->numverts); return; } do { /* Emit the next cliprect */ if (i < nbox) { radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]); } /* Emit the vertex buffer rendering commands */ BEGIN_RING(5); OUT_RING(CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, 3)); OUT_RING(offset); OUT_RING(numverts); OUT_RING(prim->vc_format); OUT_RING(prim->prim | RADEON_PRIM_WALK_LIST | RADEON_COLOR_ORDER_RGBA | RADEON_VTX_FMT_RADEON_MODE | (numverts << RADEON_NUM_VERTICES_SHIFT)); ADVANCE_RING(); i++; } while (i < nbox);}static void radeon_cp_discard_buffer(drm_device_t * dev, drm_buf_t * buf){ drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_buf_priv_t *buf_priv = buf->dev_private; RING_LOCALS; buf_priv->age = ++dev_priv->sarea_priv->last_dispatch; /* Emit the vertex buffer age */ BEGIN_RING(2); RADEON_DISPATCH_AGE(buf_priv->age); ADVANCE_RING(); buf->pending = 1; buf->used = 0;}static void radeon_cp_dispatch_indirect(drm_device_t * dev, drm_buf_t * buf, int start, int end){ drm_radeon_private_t *dev_priv = dev->dev_private; RING_LOCALS; DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end); if (start != end) { int offset = (dev_priv->gart_buffers_offset + buf->offset + start); int dwords = (end - start + 3) / sizeof(u32); /* Indirect buffer data must be an even number of * dwords, so if we've been given an odd number we must * pad the data with a Type-2 CP packet. */ if (dwords & 1) { u32 *data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset + start); data[dwords++] = RADEON_CP_PACKET2; } /* Fire off the indirect buffer */ BEGIN_RING(3); OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1)); OUT_RING(offset); OUT_RING(dwords); ADVANCE_RING(); }}static void radeon_cp_dispatch_indices(drm_device_t * dev, drm_buf_t * elt_buf, drm_radeon_tcl_prim_t * prim){ drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; int offset = dev_priv->gart_buffers_offset + prim->offset; u32 *data; int dwords; int i = 0; int start = prim->start + RADEON_INDEX_PRIM_OFFSET; int count = (prim->finish - start) / sizeof(u16); int nbox = sarea_priv->nbox; DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n", prim->prim, prim->vc_format, prim->start, prim->finish, prim->offset, prim->numverts); if (bad_prim_vertex_nr(prim->prim, count)) { DRM_ERROR("bad prim %x count %d\n", prim->prim, count); return; } if (start >= prim->finish || (prim->start & 0x7)) { DRM_ERROR("buffer prim %d\n", prim->prim); return; } dwords = (prim->finish - prim->start + 3) / sizeof(u32); data = (u32 *) ((char *)dev->agp_buffer_map->handle + elt_buf->offset + prim->start); data[0] = CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, dwords - 2); data[1] = offset; data[2] = prim->numverts; data[3] = prim->vc_format; data[4] = (prim->prim | RADEON_PRIM_WALK_IND | RADEON_COLOR_ORDER_RGBA | RADEON_VTX_FMT_RADEON_MODE | (count << RADEON_NUM_VERTICES_SHIFT)); do { if (i < nbox) radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]); radeon_cp_dispatch_indirect(dev, elt_buf, prim->start, prim->finish); i++; } while (i < nbox);}#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZEstatic int radeon_cp_dispatch_texture(DRMFILE filp, drm_device_t * dev, drm_radeon_texture_t * tex, drm_radeon_tex_image_t * image){ drm_radeon_private_t *dev_priv = dev->dev_private; drm_file_t *filp_priv; drm_buf_t *buf; u32 format; u32 *buffer; const u8 __user *data; int size, dwords, tex_width, blit_width, spitch; u32 height; int i; u32 texpitch, microtile; u32 offset; RING_LOCALS; DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &tex->offset)) { DRM_ERROR("Invalid destination offset\n"); return DRM_ERR(EINVAL); } dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD; /* Flush the pixel cache. This ensures no pixel data gets mixed * up with the texture data from the host data blit, otherwise * part of the texture image may be corrupted. */ BEGIN_RING(4); RADEON_FLUSH_CACHE(); RADEON_WAIT_UNTIL_IDLE(); ADVANCE_RING(); /* The compiler won't optimize away a division by a variable, * even if the only legal values are powers of two. Thus, we'll * use a shift instead. */ switch (tex->format) { case RADEON_TXFORMAT_ARGB8888: case RADEON_TXFORMAT_RGBA8888: format = RADEON_COLOR_FORMAT_ARGB8888; tex_width = tex->width * 4; blit_width = image->width * 4; break; case RADEON_TXFORMAT_AI88: case RADEON_TXFORMAT_ARGB1555: case RADEON_TXFORMAT_RGB565: case RADEON_TXFORMAT_ARGB4444: case RADEON_TXFORMAT_VYUY422: case RADEON_TXFORMAT_YVYU422: format = RADEON_COLOR_FORMAT_RGB565; tex_width = tex->width * 2; blit_width = image->width * 2; break; case RADEON_TXFORMAT_I8: case RADEON_TXFORMAT_RGB332: format = RADEON_COLOR_FORMAT_CI8; tex_width = tex->width * 1; blit_width = image->width * 1; break; default: DRM_ERROR("invalid texture format %d\n", tex->format); return DRM_ERR(EINVAL); } spitch = blit_width >> 6; if (spitch == 0 && image->height > 1) return DRM_ERR(EINVAL); texpitch = tex->pitch; if ((texpitch << 22) & RADEON_DST_TILE_MICRO) { microtile = 1; if (tex_width < 64) { texpitch &= ~(RADEON_DST_TILE_MICRO >> 22); /* we got tiled coordinates, untile them */ image->x *= 2; } } else microtile = 0; DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width); do { DRM_DEBUG("tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n", tex->offset >> 10, tex->pitch, tex->format, image->x, image->y, image->width, image->height); /* Make a copy of some parameters in case we have to * update them for a multi-pass texture blit. */ height = image->height; data = (const u8 __user *)image->data; size = height * blit_width; if (size > RADEON_MAX_TEXTURE_SIZE) { height = RADEON_MAX_TEXTURE_SIZE / blit_width; size = height * blit_width; } else if (size < 4 && size > 0) { size = 4; } else if (size == 0) { return 0; } buf = radeon_freelist_get(dev); if (0 && !buf) { radeon_do_cp_idle(dev_priv); buf = radeon_freelist_get(dev); } if (!buf) { DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n"); if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image))) return DRM_ERR(EFAULT); return DRM_ERR(EAGAIN); } /* Dispatch the indirect buffer. */ buffer = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset); dwords = size / 4; if (microtile) { /* texture micro tiling in use, minimum texture width is thus 16 bytes. however, we cannot use blitter directly for texture width < 64 bytes, since minimum tex pitch is 64 bytes and we need this to match the texture width, otherwise the blitter will tile it wrong. Thus, tiling manually in this case. Additionally, need to special case tex height = 1, since our actual image will have height 2 and we need to ensure we don't read beyond the texture size from user space. */ if (tex->height == 1) { if (tex_width >= 64 || tex_width <= 16) { if (DRM_COPY_FROM_USER(buffer, data, tex_width * sizeof(u32))) { DRM_ERROR ("EFAULT on pad, %d bytes\n", tex_width); return DRM_ERR(EFAULT); } } else if (tex_width == 32) { if (DRM_COPY_FROM_USER (buffer, data, 16)) { DRM_ERROR ("EFAULT on pad, %d bytes\n", tex_width); return DRM_ERR(EFAULT); } if (DRM_COPY_FROM_USER (buffer + 8, data + 16, 16)) { DRM_ERROR ("EFAULT on pad, %d bytes\n", tex_width); return DRM_ERR(EFAULT); } } } else if (tex_width >= 64 || tex_width == 16) { if (DRM_COPY_FROM_USER(buffer, data, dwords * sizeof(u32))) { DRM_ERROR("EFAULT on data, %d dwords\n", dwords); return DRM_ERR(EFAULT); } } else if (tex_width < 16) { for (i = 0; i < tex->height; i++) { if (DRM_COPY_FROM_USER (buffer, data, tex_width)) { DRM_ERROR ("EFAULT on pad, %d bytes\n", tex_width); return DRM_ERR(EFAULT); } buffer += 4; data += tex_width; } } else if (tex_width == 32) { /* TODO: make sure this works when not fitting in one buffer (i.e. 32bytes x 2048...) */ for (i = 0; i < tex->height; i += 2) { if (DRM_COPY_FROM_USER (buffer, data, 16)) { DRM_ERROR ("EFAULT on pad, %d bytes\n", tex_width); return DRM_ERR(EFAULT); } data += 16; if (DRM_COPY_FROM_USER (buffer + 8, data, 16)) { DRM_ERROR ("EFAULT on pad, %d bytes\n", tex_width); return DRM_ERR(EFAULT); } data += 16; if (DRM_COPY_FROM_USER (buffer + 4, data, 16)) { DRM_ERROR ("EFAULT on pad, %d bytes\n", tex_width); return DRM_ERR(EFAULT); } data += 16; if (DRM_COPY_FROM_USER (buffer + 12, data, 16)) { DRM_ERROR ("EFAULT on pad, %d bytes\n", tex_width); return DRM_ERR(EFAULT); } data += 16; buffer += 16; } } } else { if (tex_width >= 32) { /* Texture image width is larger than the minimum, so we * can upload it directly. */ if (DRM_COPY_FROM_USER(buffer, data, dwords * sizeof(u32))) { DRM_ERROR("EFAULT on data, %d dwords\n", dwords); return DRM_ERR(EFAULT); } } else { /* Texture image width is less than the minimum, so we * need to pad out each image scanline to the minimum * width. */ for (i = 0; i < tex->height; i++) { if (DRM_COPY_FROM_USER (buffer, data, tex_width)) { DRM_ERROR ("EFAULT on pad, %d bytes\n", tex_width); return DRM_ERR(EFAULT); } buffer += 8; data += tex_width; } } } buf->filp = filp; buf->used = size; offset = dev_priv->gart_buffers_offset + buf->offset; BEGIN_RING(9); OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5)); OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL | RADEON_GMC_DST_PITCH_OFFSET_CNTL | RADEON_GMC_BRUSH_NONE | (format << 8) | RADEON_GMC_SRC_DATATYPE_COLOR | RADEON_ROP3_S | RADEON_DP_SRC_SOURCE_MEMORY | RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS); OUT_RING((spitch << 22) | (offset >> 10)); OUT_RING((texpitch << 22) | (tex->offset >> 10)); OUT_RING(0); OUT_RING((image->x << 16) | image->y); OUT_RING((image->width << 16) | height); RADEON_WAIT_UNTIL_2D_IDLE(); ADVANCE_RING(); radeon_cp_discard_buffer(dev, buf); /* Update the input parameters for next time */ image->y += height; image->height -= height; image->data = (const u8 __user *)image->data + size; } while (image->height > 0); /* Flush the pixel cache after the blit completes. This ensures * the texture data is written out to memory before rendering * continues. */ BEGIN_RING(4); RADEON_FLUSH_CACHE(); RADEON_WAIT_UNTIL_2D_IDLE(); ADVANCE_RING(); return 0;}static void radeon_cp_dispatch_stipple(drm_device_t * dev, u32 * stipple){ drm_radeon_private_t *dev_priv = dev->dev_private; int i; RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(35); OUT_RING(CP_PACKET0(RADEON_RE_STIPPLE_ADDR, 0)); OUT_RING(0x00000000); OUT_RING(CP_PACKET0_TABLE(RADEON_RE_STIPPLE_DATA, 31)); for (i = 0; i < 32; i++) { OUT_RING(stipple[i]); } ADVANCE_RING();
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -