📄 radeon_state.c
字号:
return DRM_ERR(EINVAL); } if ( indirect.start < buf->used ) { DRM_ERROR( "reusing indirect: start=0x%x actual=0x%x\n", indirect.start, buf->used ); return DRM_ERR(EINVAL); } RING_SPACE_TEST_WITH_RETURN( dev_priv ); VB_AGE_TEST_WITH_RETURN( dev_priv ); buf->used = indirect.end; /* Wait for the 3D stream to idle before the indirect buffer * containing 2D acceleration commands is processed. */ BEGIN_RING( 2 ); RADEON_WAIT_UNTIL_3D_IDLE(); ADVANCE_RING(); /* Dispatch the indirect buffer full of commands from the * X server. This is insecure and is thus only available to * privileged clients. */ radeon_cp_dispatch_indirect( dev, buf, indirect.start, indirect.end ); if (indirect.discard) { radeon_cp_discard_buffer( dev, buf ); } COMMIT_RING(); return 0;}int radeon_cp_vertex2( DRM_IOCTL_ARGS ){ DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; drm_radeon_vertex2_t vertex; int i; unsigned char laststate; LOCK_TEST_WITH_RETURN( dev ); if ( !dev_priv ) { DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); return DRM_ERR(EINVAL); } DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex2_t *)data, sizeof(vertex) ); DRM_DEBUG( "pid=%d index=%d discard=%d\n", DRM_CURRENTPID, vertex.idx, vertex.discard ); if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) { DRM_ERROR( "buffer index %d (of %d max)\n", vertex.idx, dma->buf_count - 1 ); return DRM_ERR(EINVAL); } RING_SPACE_TEST_WITH_RETURN( dev_priv ); VB_AGE_TEST_WITH_RETURN( dev_priv ); buf = dma->buflist[vertex.idx]; if ( buf->pid != DRM_CURRENTPID ) { DRM_ERROR( "process %d using buffer owned by %d\n", DRM_CURRENTPID, buf->pid ); return DRM_ERR(EINVAL); } if ( buf->pending ) { DRM_ERROR( "sending pending buffer %d\n", vertex.idx ); return DRM_ERR(EINVAL); } if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) return DRM_ERR(EINVAL); for (laststate = 0xff, i = 0 ; i < vertex.nr_prims ; i++) { drm_radeon_prim_t prim; drm_radeon_tcl_prim_t tclprim; if ( DRM_COPY_FROM_USER( &prim, &vertex.prim[i], sizeof(prim) ) ) return DRM_ERR(EFAULT); if ( prim.stateidx != laststate ) { drm_radeon_state_t state; if ( DRM_COPY_FROM_USER( &state, &vertex.state[prim.stateidx], sizeof(state) ) ) return DRM_ERR(EFAULT); radeon_emit_state2( dev_priv, &state ); laststate = prim.stateidx; } tclprim.start = prim.start; tclprim.finish = prim.finish; tclprim.prim = prim.prim; tclprim.vc_format = prim.vc_format; if ( prim.prim & RADEON_PRIM_WALK_IND ) { tclprim.offset = prim.numverts * 64; tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */ radeon_cp_dispatch_indices( dev, buf, &tclprim, sarea_priv->boxes, sarea_priv->nbox); } else { tclprim.numverts = prim.numverts; tclprim.offset = 0; /* not used */ radeon_cp_dispatch_vertex( dev, buf, &tclprim, sarea_priv->boxes, sarea_priv->nbox); } if (sarea_priv->nbox == 1) sarea_priv->nbox = 0; } if ( vertex.discard ) { radeon_cp_discard_buffer( dev, buf ); } COMMIT_RING(); return 0;}static int radeon_emit_packets( drm_radeon_private_t *dev_priv, drm_radeon_cmd_header_t header, drm_radeon_cmd_buffer_t *cmdbuf ){ int id = (int)header.packet.packet_id; int sz, reg; int *data = (int *)cmdbuf->buf; RING_LOCALS; if (id >= RADEON_MAX_STATE_PACKETS) return DRM_ERR(EINVAL); sz = packet[id].len; reg = packet[id].start; if (sz * sizeof(int) > cmdbuf->bufsz) return DRM_ERR(EINVAL); BEGIN_RING(sz+1); OUT_RING( CP_PACKET0( reg, (sz-1) ) ); OUT_RING_USER_TABLE( data, sz ); ADVANCE_RING(); cmdbuf->buf += sz * sizeof(int); cmdbuf->bufsz -= sz * sizeof(int); return 0;}static __inline__ int radeon_emit_scalars( drm_radeon_private_t *dev_priv, drm_radeon_cmd_header_t header, drm_radeon_cmd_buffer_t *cmdbuf ){ int sz = header.scalars.count; int *data = (int *)cmdbuf->buf; int start = header.scalars.offset; int stride = header.scalars.stride; RING_LOCALS; BEGIN_RING( 3+sz ); OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) ); OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) ); OUT_RING_USER_TABLE( data, sz ); ADVANCE_RING(); cmdbuf->buf += sz * sizeof(int); cmdbuf->bufsz -= sz * sizeof(int); return 0;}/* God this is ugly */static __inline__ int radeon_emit_scalars2( drm_radeon_private_t *dev_priv, drm_radeon_cmd_header_t header, drm_radeon_cmd_buffer_t *cmdbuf ){ int sz = header.scalars.count; int *data = (int *)cmdbuf->buf; int start = ((unsigned int)header.scalars.offset) + 0x100; int stride = header.scalars.stride; RING_LOCALS; BEGIN_RING( 3+sz ); OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) ); OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) ); OUT_RING_USER_TABLE( data, sz ); ADVANCE_RING(); cmdbuf->buf += sz * sizeof(int); cmdbuf->bufsz -= sz * sizeof(int); return 0;}static __inline__ int radeon_emit_vectors( drm_radeon_private_t *dev_priv, drm_radeon_cmd_header_t header, drm_radeon_cmd_buffer_t *cmdbuf ){ int sz = header.vectors.count; int *data = (int *)cmdbuf->buf; int start = header.vectors.offset; int stride = header.vectors.stride; RING_LOCALS; BEGIN_RING( 3+sz ); OUT_RING( CP_PACKET0( RADEON_SE_TCL_VECTOR_INDX_REG, 0 ) ); OUT_RING( start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT)); OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_VECTOR_DATA_REG, (sz-1) ) ); OUT_RING_USER_TABLE( data, sz ); ADVANCE_RING(); cmdbuf->buf += sz * sizeof(int); cmdbuf->bufsz -= sz * sizeof(int); return 0;}static int radeon_emit_packet3( drm_device_t *dev, drm_radeon_cmd_buffer_t *cmdbuf ){ drm_radeon_private_t *dev_priv = dev->dev_private; int cmdsz, tmp; int *cmd = (int *)cmdbuf->buf; RING_LOCALS; DRM_DEBUG("\n"); if (DRM_GET_USER_UNCHECKED( tmp, &cmd[0])) return DRM_ERR(EFAULT); cmdsz = 2 + ((tmp & RADEON_CP_PACKET_COUNT_MASK) >> 16); if ((tmp & 0xc0000000) != RADEON_CP_PACKET3 || cmdsz * 4 > cmdbuf->bufsz) return DRM_ERR(EINVAL); BEGIN_RING( cmdsz ); OUT_RING_USER_TABLE( cmd, cmdsz ); ADVANCE_RING(); cmdbuf->buf += cmdsz * 4; cmdbuf->bufsz -= cmdsz * 4; return 0;}static int radeon_emit_packet3_cliprect( drm_device_t *dev, drm_radeon_cmd_buffer_t *cmdbuf, int orig_nbox ){ drm_radeon_private_t *dev_priv = dev->dev_private; drm_clip_rect_t box; int cmdsz, tmp; int *cmd = (int *)cmdbuf->buf; drm_clip_rect_t *boxes = cmdbuf->boxes; int i = 0; RING_LOCALS; DRM_DEBUG("\n"); if (DRM_GET_USER_UNCHECKED( tmp, &cmd[0])) return DRM_ERR(EFAULT); cmdsz = 2 + ((tmp & RADEON_CP_PACKET_COUNT_MASK) >> 16); if ((tmp & 0xc0000000) != RADEON_CP_PACKET3 || cmdsz * 4 > cmdbuf->bufsz) return DRM_ERR(EINVAL); if (!orig_nbox) goto out; do { if ( i < cmdbuf->nbox ) { if (DRM_COPY_FROM_USER_UNCHECKED( &box, &boxes[i], sizeof(box) )) return DRM_ERR(EFAULT); /* FIXME The second and subsequent times round * this loop, send a WAIT_UNTIL_3D_IDLE before * calling emit_clip_rect(). This fixes a * lockup on fast machines when sending * several cliprects with a cmdbuf, as when * waving a 2D window over a 3D * window. Something in the commands from user * space seems to hang the card when they're * sent several times in a row. That would be * the correct place to fix it but this works * around it until I can figure that out - Tim * Smith */ if ( i ) { BEGIN_RING( 2 ); RADEON_WAIT_UNTIL_3D_IDLE(); ADVANCE_RING(); } radeon_emit_clip_rect( dev_priv, &box ); } BEGIN_RING( cmdsz ); OUT_RING_USER_TABLE( cmd, cmdsz ); ADVANCE_RING(); } while ( ++i < cmdbuf->nbox ); if (cmdbuf->nbox == 1) cmdbuf->nbox = 0; out: cmdbuf->buf += cmdsz * 4; cmdbuf->bufsz -= cmdsz * 4; return 0;}static int radeon_emit_wait( drm_device_t *dev, int flags ){ drm_radeon_private_t *dev_priv = dev->dev_private; RING_LOCALS; DRM_DEBUG("%s: %x\n", __FUNCTION__, flags); switch (flags) { case RADEON_WAIT_2D: BEGIN_RING( 2 ); RADEON_WAIT_UNTIL_2D_IDLE(); ADVANCE_RING(); break; case RADEON_WAIT_3D: BEGIN_RING( 2 ); RADEON_WAIT_UNTIL_3D_IDLE(); ADVANCE_RING(); break; case RADEON_WAIT_2D|RADEON_WAIT_3D: BEGIN_RING( 2 ); RADEON_WAIT_UNTIL_IDLE(); ADVANCE_RING(); break; default: return DRM_ERR(EINVAL); } return 0;}int radeon_cp_cmdbuf( DRM_IOCTL_ARGS ){ DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_device_dma_t *dma = dev->dma; drm_buf_t *buf = 0; int idx; drm_radeon_cmd_buffer_t cmdbuf; drm_radeon_cmd_header_t header; int orig_nbox; LOCK_TEST_WITH_RETURN( dev ); if ( !dev_priv ) { DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); return DRM_ERR(EINVAL); } DRM_COPY_FROM_USER_IOCTL( cmdbuf, (drm_radeon_cmd_buffer_t *)data, sizeof(cmdbuf) ); RING_SPACE_TEST_WITH_RETURN( dev_priv ); VB_AGE_TEST_WITH_RETURN( dev_priv ); if (DRM_VERIFYAREA_READ( cmdbuf.buf, cmdbuf.bufsz )) return DRM_ERR(EFAULT); if (cmdbuf.nbox && DRM_VERIFYAREA_READ(cmdbuf.boxes, cmdbuf.nbox * sizeof(drm_clip_rect_t))) return DRM_ERR(EFAULT); orig_nbox = cmdbuf.nbox; while ( cmdbuf.bufsz >= sizeof(header) ) { if (DRM_GET_USER_UNCHECKED( header.i, (int *)cmdbuf.buf )) { DRM_ERROR("__get_user %p\n", cmdbuf.buf); return DRM_ERR(EFAULT); } cmdbuf.buf += sizeof(header); cmdbuf.bufsz -= sizeof(header); switch (header.header.cmd_type) { case RADEON_CMD_PACKET: DRM_DEBUG("RADEON_CMD_PACKET\n"); if (radeon_emit_packets( dev_priv, header, &cmdbuf )) { DRM_ERROR("radeon_emit_packets failed\n"); return DRM_ERR(EINVAL); } break; case RADEON_CMD_SCALARS: DRM_DEBUG("RADEON_CMD_SCALARS\n"); if (radeon_emit_scalars( dev_priv, header, &cmdbuf )) { DRM_ERROR("radeon_emit_scalars failed\n"); return DRM_ERR(EINVAL); } break; case RADEON_CMD_VECTORS: DRM_DEBUG("RADEON_CMD_VECTORS\n"); if (radeon_emit_vectors( dev_priv, header, &cmdbuf )) { DRM_ERROR("radeon_emit_vectors failed\n"); return DRM_ERR(EINVAL); } break; case RADEON_CMD_DMA_DISCARD: DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); idx = header.dma.buf_idx; if ( idx < 0 || idx >= dma->buf_count ) { DRM_ERROR( "buffer index %d (of %d max)\n", idx, dma->buf_count - 1 ); return DRM_ERR(EINVAL); } buf = dma->buflist[idx]; if ( buf->pid != DRM_CURRENTPID || buf->pending ) { DRM_ERROR( "bad buffer\n" ); return DRM_ERR(EINVAL); } radeon_cp_discard_buffer( dev, buf ); break; case RADEON_CMD_PACKET3: DRM_DEBUG("RADEON_CMD_PACKET3\n"); if (radeon_emit_packet3( dev, &cmdbuf )) { DRM_ERROR("radeon_emit_packet3 failed\n"); return DRM_ERR(EINVAL); } break; case RADEON_CMD_PACKET3_CLIP: DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n"); if (radeon_emit_packet3_cliprect( dev, &cmdbuf, orig_nbox )) { DRM_ERROR("radeon_emit_packet3_clip failed\n"); return DRM_ERR(EINVAL); } break; case RADEON_CMD_SCALARS2: DRM_DEBUG("RADEON_CMD_SCALARS2\n"); if (radeon_emit_scalars2( dev_priv, header, &cmdbuf )) { DRM_ERROR("radeon_emit_scalars2 failed\n"); return DRM_ERR(EINVAL); } break; case RADEON_CMD_WAIT: DRM_DEBUG("RADEON_CMD_WAIT\n"); if (radeon_emit_wait( dev, header.wait.flags )) { DRM_ERROR("radeon_emit_wait failed\n"); return DRM_ERR(EINVAL); } break; default: DRM_ERROR("bad cmd_type %d at %p\n", header.header.cmd_type, cmdbuf.buf - sizeof(header)); return DRM_ERR(EINVAL); } } DRM_DEBUG("DONE\n"); COMMIT_RING(); return 0;}int radeon_cp_getparam( DRM_IOCTL_ARGS ){ DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_getparam_t param; int value; if ( !dev_priv ) { DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); return DRM_ERR(EINVAL); } DRM_COPY_FROM_USER_IOCTL( param, (drm_radeon_getparam_t *)data, sizeof(param) ); DRM_DEBUG( "pid=%d\n", DRM_CURRENTPID ); switch( param.param ) { case RADEON_PARAM_AGP_BUFFER_OFFSET: value = dev_priv->agp_buffers_offset; break; case RADEON_PARAM_LAST_FRAME: dev_priv->stats.last_frame_reads++; value = GET_SCRATCH( 0 ); break; case RADEON_PARAM_LAST_DISPATCH: value = GET_SCRATCH( 1 ); break; case RADEON_PARAM_LAST_CLEAR: dev_priv->stats.last_clear_reads++; value = GET_SCRATCH( 2 ); break; case RADEON_PARAM_IRQ_NR: value = dev->irq; break; case RADEON_PARAM_AGP_BASE: value = dev_priv->agp_vm_start; break; default: return DRM_ERR(EINVAL); } if ( DRM_COPY_TO_USER( param.value, &value, sizeof(int) ) ) { DRM_ERROR( "copy_to_user\n" ); return DRM_ERR(EFAULT); } return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -