📄 radeon_state.c
字号:
*/ switch ( tex->format ) { case RADEON_TXFORMAT_ARGB8888: case RADEON_TXFORMAT_RGBA8888: format = RADEON_COLOR_FORMAT_ARGB8888; tex_width = tex->width * 4; blit_width = image->width * 4; break; case RADEON_TXFORMAT_AI88: case RADEON_TXFORMAT_ARGB1555: case RADEON_TXFORMAT_RGB565: case RADEON_TXFORMAT_ARGB4444: case RADEON_TXFORMAT_VYUY422: case RADEON_TXFORMAT_YVYU422: format = RADEON_COLOR_FORMAT_RGB565; tex_width = tex->width * 2; blit_width = image->width * 2; break; case RADEON_TXFORMAT_I8: case RADEON_TXFORMAT_RGB332: format = RADEON_COLOR_FORMAT_CI8; tex_width = tex->width * 1; blit_width = image->width * 1; break; default: DRM_ERROR( "invalid texture format %d\n", tex->format ); return DRM_ERR(EINVAL); } DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width ); do { DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n", tex->offset >> 10, tex->pitch, tex->format, image->x, image->y, image->width, image->height ); /* Make a copy of the parameters in case we have to * update them for a multi-pass texture blit. */ y = image->y; height = image->height; data = (const u8 *)image->data; size = height * blit_width; if ( size > RADEON_MAX_TEXTURE_SIZE ) { height = RADEON_MAX_TEXTURE_SIZE / blit_width; size = height * blit_width; } else if ( size < 4 && size > 0 ) { size = 4; } else if ( size == 0 ) { return 0; } /* Update the input parameters for next time */ image->y += height; image->height -= height; image->data += size; buf = radeon_freelist_get( dev ); if ( 0 && !buf ) { radeon_do_cp_idle( dev_priv ); buf = radeon_freelist_get( dev ); } if ( !buf ) { DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n"); DRM_COPY_TO_USER( tex->image, image, sizeof(*image) ); return DRM_ERR(EAGAIN); } /* Dispatch the indirect buffer. */ buffer = (u32*)((char*)dev_priv->buffers->handle + buf->offset); dwords = size / 4; buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 ); buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL | RADEON_GMC_BRUSH_NONE | (format << 8) | RADEON_GMC_SRC_DATATYPE_COLOR | RADEON_ROP3_S | RADEON_DP_SRC_SOURCE_HOST_DATA | RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS); buffer[2] = (tex->pitch << 22) | (tex->offset >> 10); buffer[3] = 0xffffffff; buffer[4] = 0xffffffff; buffer[5] = (y << 16) | image->x; buffer[6] = (height << 16) | image->width; buffer[7] = dwords; buffer += 8; if ( tex_width >= 32 ) { /* Texture image width is larger than the minimum, so we * can upload it directly. */ if ( DRM_COPY_FROM_USER( buffer, data, dwords * sizeof(u32) ) ) { DRM_ERROR( "EFAULT on data, %d dwords\n", dwords ); return DRM_ERR(EFAULT); } } else { /* Texture image width is less than the minimum, so we * need to pad out each image scanline to the minimum * width. */ for ( i = 0 ; i < tex->height ; i++ ) { if ( DRM_COPY_FROM_USER( buffer, data, tex_width ) ) { DRM_ERROR( "EFAULT on pad, %d bytes\n", tex_width ); return DRM_ERR(EFAULT); } buffer += 8; data += tex_width; } } buf->pid = DRM_CURRENTPID; buf->used = (dwords + 8) * sizeof(u32); radeon_cp_dispatch_indirect( dev, buf, 0, buf->used ); radeon_cp_discard_buffer( dev, buf ); } while (image->height > 0); /* Flush the pixel cache after the blit completes. This ensures * the texture data is written out to memory before rendering * continues. */ BEGIN_RING( 4 ); RADEON_FLUSH_CACHE(); RADEON_WAIT_UNTIL_2D_IDLE(); ADVANCE_RING(); return 0;}static void radeon_cp_dispatch_stipple( drm_device_t *dev, u32 *stipple ){ drm_radeon_private_t *dev_priv = dev->dev_private; int i; RING_LOCALS; DRM_DEBUG( "\n" ); BEGIN_RING( 35 ); OUT_RING( CP_PACKET0( RADEON_RE_STIPPLE_ADDR, 0 ) ); OUT_RING( 0x00000000 ); OUT_RING( CP_PACKET0_TABLE( RADEON_RE_STIPPLE_DATA, 31 ) ); for ( i = 0 ; i < 32 ; i++ ) { OUT_RING( stipple[i] ); } ADVANCE_RING();}/* ================================================================ * IOCTL functions */int radeon_cp_clear( DRM_IOCTL_ARGS ){ DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_radeon_clear_t clear; drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; DRM_DEBUG( "\n" ); LOCK_TEST_WITH_RETURN( dev ); DRM_COPY_FROM_USER_IOCTL( clear, (drm_radeon_clear_t *)data, sizeof(clear) ); RING_SPACE_TEST_WITH_RETURN( dev_priv ); if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS ) sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; if ( DRM_COPY_FROM_USER( &depth_boxes, clear.depth_boxes, sarea_priv->nbox * sizeof(depth_boxes[0]) ) ) return DRM_ERR(EFAULT); radeon_cp_dispatch_clear( dev, &clear, depth_boxes ); COMMIT_RING(); return 0;}/* Not sure why this isn't set all the time: */ static int radeon_do_init_pageflip( drm_device_t *dev ){ drm_radeon_private_t *dev_priv = dev->dev_private; RING_LOCALS; DRM_DEBUG( "\n" ); BEGIN_RING( 6 ); RADEON_WAIT_UNTIL_3D_IDLE(); OUT_RING( CP_PACKET0( RADEON_CRTC_OFFSET_CNTL, 0 ) ); OUT_RING( RADEON_READ( RADEON_CRTC_OFFSET_CNTL ) | RADEON_CRTC_OFFSET_FLIP_CNTL ); OUT_RING( CP_PACKET0( RADEON_CRTC2_OFFSET_CNTL, 0 ) ); OUT_RING( RADEON_READ( RADEON_CRTC2_OFFSET_CNTL ) | RADEON_CRTC_OFFSET_FLIP_CNTL ); ADVANCE_RING(); dev_priv->page_flipping = 1; dev_priv->current_page = 0; dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page; return 0;}/* Called whenever a client dies, from DRM(release). * NOTE: Lock isn't necessarily held when this is called! */int radeon_do_cleanup_pageflip( drm_device_t *dev ){ drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG( "\n" ); if (dev_priv->current_page != 0) radeon_cp_dispatch_flip( dev ); dev_priv->page_flipping = 0; return 0;}/* Swapping and flipping are different operations, need different ioctls. * They can & should be intermixed to support multiple 3d windows. */int radeon_cp_flip( DRM_IOCTL_ARGS ){ DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG( "\n" ); LOCK_TEST_WITH_RETURN( dev ); RING_SPACE_TEST_WITH_RETURN( dev_priv ); if (!dev_priv->page_flipping) radeon_do_init_pageflip( dev ); radeon_cp_dispatch_flip( dev ); COMMIT_RING(); return 0;}int radeon_cp_swap( DRM_IOCTL_ARGS ){ DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; DRM_DEBUG( "\n" ); LOCK_TEST_WITH_RETURN( dev ); RING_SPACE_TEST_WITH_RETURN( dev_priv ); if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS ) sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; radeon_cp_dispatch_swap( dev ); dev_priv->sarea_priv->ctx_owner = 0; COMMIT_RING(); return 0;}int radeon_cp_vertex( DRM_IOCTL_ARGS ){ DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; drm_radeon_vertex_t vertex; drm_radeon_tcl_prim_t prim; LOCK_TEST_WITH_RETURN( dev ); if ( !dev_priv ) { DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); return DRM_ERR(EINVAL); } DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex_t *)data, sizeof(vertex) ); DRM_DEBUG( "pid=%d index=%d count=%d discard=%d\n", DRM_CURRENTPID, vertex.idx, vertex.count, vertex.discard ); if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) { DRM_ERROR( "buffer index %d (of %d max)\n", vertex.idx, dma->buf_count - 1 ); return DRM_ERR(EINVAL); } if ( vertex.prim < 0 || vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) { DRM_ERROR( "buffer prim %d\n", vertex.prim ); return DRM_ERR(EINVAL); } RING_SPACE_TEST_WITH_RETURN( dev_priv ); VB_AGE_TEST_WITH_RETURN( dev_priv ); buf = dma->buflist[vertex.idx]; if ( buf->pid != DRM_CURRENTPID ) { DRM_ERROR( "process %d using buffer owned by %d\n", DRM_CURRENTPID, buf->pid ); return DRM_ERR(EINVAL); } if ( buf->pending ) { DRM_ERROR( "sending pending buffer %d\n", vertex.idx ); return DRM_ERR(EINVAL); } /* Build up a prim_t record: */ if (vertex.count) { buf->used = vertex.count; /* not used? */ if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { radeon_emit_state( dev_priv, &sarea_priv->context_state, sarea_priv->tex_state, sarea_priv->dirty ); sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | RADEON_UPLOAD_TEX1IMAGES | RADEON_UPLOAD_TEX2IMAGES | RADEON_REQUIRE_QUIESCENCE); } prim.start = 0; prim.finish = vertex.count; /* unused */ prim.prim = vertex.prim; prim.numverts = vertex.count; prim.vc_format = dev_priv->sarea_priv->vc_format; radeon_cp_dispatch_vertex( dev, buf, &prim, dev_priv->sarea_priv->boxes, dev_priv->sarea_priv->nbox ); } if (vertex.discard) { radeon_cp_discard_buffer( dev, buf ); } COMMIT_RING(); return 0;}int radeon_cp_indices( DRM_IOCTL_ARGS ){ DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; drm_radeon_indices_t elts; drm_radeon_tcl_prim_t prim; int count; LOCK_TEST_WITH_RETURN( dev ); if ( !dev_priv ) { DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); return DRM_ERR(EINVAL); } DRM_COPY_FROM_USER_IOCTL( elts, (drm_radeon_indices_t *)data, sizeof(elts) ); DRM_DEBUG( "pid=%d index=%d start=%d end=%d discard=%d\n", DRM_CURRENTPID, elts.idx, elts.start, elts.end, elts.discard ); if ( elts.idx < 0 || elts.idx >= dma->buf_count ) { DRM_ERROR( "buffer index %d (of %d max)\n", elts.idx, dma->buf_count - 1 ); return DRM_ERR(EINVAL); } if ( elts.prim < 0 || elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) { DRM_ERROR( "buffer prim %d\n", elts.prim ); return DRM_ERR(EINVAL); } RING_SPACE_TEST_WITH_RETURN( dev_priv ); VB_AGE_TEST_WITH_RETURN( dev_priv ); buf = dma->buflist[elts.idx]; if ( buf->pid != DRM_CURRENTPID ) { DRM_ERROR( "process %d using buffer owned by %d\n", DRM_CURRENTPID, buf->pid ); return DRM_ERR(EINVAL); } if ( buf->pending ) { DRM_ERROR( "sending pending buffer %d\n", elts.idx ); return DRM_ERR(EINVAL); } count = (elts.end - elts.start) / sizeof(u16); elts.start -= RADEON_INDEX_PRIM_OFFSET; if ( elts.start & 0x7 ) { DRM_ERROR( "misaligned buffer 0x%x\n", elts.start ); return DRM_ERR(EINVAL); } if ( elts.start < buf->used ) { DRM_ERROR( "no header 0x%x - 0x%x\n", elts.start, buf->used ); return DRM_ERR(EINVAL); } buf->used = elts.end; if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { radeon_emit_state( dev_priv, &sarea_priv->context_state, sarea_priv->tex_state, sarea_priv->dirty ); sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | RADEON_UPLOAD_TEX1IMAGES | RADEON_UPLOAD_TEX2IMAGES | RADEON_REQUIRE_QUIESCENCE); } /* Build up a prim_t record: */ prim.start = elts.start; prim.finish = elts.end; prim.prim = elts.prim; prim.offset = 0; /* offset from start of dma buffers */ prim.numverts = RADEON_MAX_VB_VERTS; /* duh */ prim.vc_format = dev_priv->sarea_priv->vc_format; radeon_cp_dispatch_indices( dev, buf, &prim, dev_priv->sarea_priv->boxes, dev_priv->sarea_priv->nbox ); if (elts.discard) { radeon_cp_discard_buffer( dev, buf ); } COMMIT_RING(); return 0;}int radeon_cp_texture( DRM_IOCTL_ARGS ){ DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_texture_t tex; drm_radeon_tex_image_t image; int ret; LOCK_TEST_WITH_RETURN( dev ); DRM_COPY_FROM_USER_IOCTL( tex, (drm_radeon_texture_t *)data, sizeof(tex) ); if ( tex.image == NULL ) { DRM_ERROR( "null texture image!\n" ); return DRM_ERR(EINVAL); } if ( DRM_COPY_FROM_USER( &image, (drm_radeon_tex_image_t *)tex.image, sizeof(image) ) ) return DRM_ERR(EFAULT); RING_SPACE_TEST_WITH_RETURN( dev_priv ); VB_AGE_TEST_WITH_RETURN( dev_priv ); ret = radeon_cp_dispatch_texture( dev, &tex, &image ); COMMIT_RING(); return ret;}int radeon_cp_stipple( DRM_IOCTL_ARGS ){ DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_stipple_t stipple; u32 mask[32]; LOCK_TEST_WITH_RETURN( dev ); DRM_COPY_FROM_USER_IOCTL( stipple, (drm_radeon_stipple_t *)data, sizeof(stipple) ); if ( DRM_COPY_FROM_USER( &mask, stipple.mask, 32 * sizeof(u32) ) ) return DRM_ERR(EFAULT); RING_SPACE_TEST_WITH_RETURN( dev_priv ); radeon_cp_dispatch_stipple( dev, mask ); COMMIT_RING(); return 0;}int radeon_cp_indirect( DRM_IOCTL_ARGS ){ DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; drm_radeon_indirect_t indirect; RING_LOCALS; LOCK_TEST_WITH_RETURN( dev ); if ( !dev_priv ) { DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); return DRM_ERR(EINVAL); } DRM_COPY_FROM_USER_IOCTL( indirect, (drm_radeon_indirect_t *)data, sizeof(indirect) ); DRM_DEBUG( "indirect: idx=%d s=%d e=%d d=%d\n", indirect.idx, indirect.start, indirect.end, indirect.discard ); if ( indirect.idx < 0 || indirect.idx >= dma->buf_count ) { DRM_ERROR( "buffer index %d (of %d max)\n", indirect.idx, dma->buf_count - 1 ); return DRM_ERR(EINVAL); } buf = dma->buflist[indirect.idx]; if ( buf->pid != DRM_CURRENTPID ) { DRM_ERROR( "process %d using buffer owned by %d\n", DRM_CURRENTPID, buf->pid ); return DRM_ERR(EINVAL); } if ( buf->pending ) { DRM_ERROR( "sending pending buffer %d\n", indirect.idx );
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -