📄 radeon_state.c
字号:
dev_priv->sarea_priv->last_dispatch++; sarea_priv->dirty &= ~RADEON_UPLOAD_CLIPRECTS; sarea_priv->nbox = 0;}static int radeon_cp_dispatch_blit( drm_device_t *dev, drm_radeon_blit_t *blit ){ drm_radeon_private_t *dev_priv = dev->dev_private; drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; drm_radeon_buf_priv_t *buf_priv; u32 format; u32 *data; int dword_shift, dwords; RING_LOCALS; DRM_DEBUG( "blit: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n", blit->offset >> 10, blit->pitch, blit->format, blit->x, blit->y, blit->width, blit->height ); radeon_update_ring_snapshot( dev_priv ); /* The compiler won't optimize away a division by a variable, * even if the only legal values are powers of two. Thus, we'll * use a shift instead. */ switch ( blit->format ) { case RADEON_TXF_32BPP_ARGB8888: case RADEON_TXF_32BPP_RGBA8888: format = RADEON_COLOR_FORMAT_ARGB8888; dword_shift = 0; break; case RADEON_TXF_16BPP_AI88: case RADEON_TXF_16BPP_ARGB1555: case RADEON_TXF_16BPP_RGB565: case RADEON_TXF_16BPP_ARGB4444: format = RADEON_COLOR_FORMAT_RGB565; dword_shift = 1; break; case RADEON_TXF_8BPP_I: case RADEON_TXF_8BPP_RGB332: format = RADEON_COLOR_FORMAT_CI8; dword_shift = 2; break; default: DRM_ERROR( "invalid blit format %d\n", blit->format ); return -EINVAL; } /* Flush the pixel cache. This ensures no pixel data gets mixed * up with the texture data from the host data blit, otherwise * part of the texture image may be corrupted. */ BEGIN_RING( 4 ); RADEON_FLUSH_CACHE(); RADEON_WAIT_UNTIL_IDLE(); ADVANCE_RING(); /* Dispatch the indirect buffer. */ buf = dma->buflist[blit->idx]; buf_priv = buf->dev_private; if ( buf->pid != current->pid ) { DRM_ERROR( "process %d using buffer owned by %d\n", current->pid, buf->pid ); return -EINVAL; } if ( buf->pending ) { DRM_ERROR( "sending pending buffer %d\n", blit->idx ); return -EINVAL; } buf_priv->discard = 1; dwords = (blit->width * blit->height) >> dword_shift; if ( !dwords ) dwords = 1; data = (u32 *)((char *)dev_priv->buffers->handle + buf->offset); data[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 ); data[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL | RADEON_GMC_BRUSH_NONE | (format << 8) | RADEON_GMC_SRC_DATATYPE_COLOR | RADEON_ROP3_S | RADEON_DP_SRC_SOURCE_HOST_DATA | RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS); data[2] = (blit->pitch << 22) | (blit->offset >> 10); data[3] = 0xffffffff; data[4] = 0xffffffff; data[5] = (blit->y << 16) | blit->x; data[6] = (blit->height << 16) | blit->width; data[7] = dwords; buf->used = (dwords + 8) * sizeof(u32); radeon_cp_dispatch_indirect( dev, buf, 0, buf->used ); /* Flush the pixel cache after the blit completes. This ensures * the texture data is written out to memory before rendering * continues. */ BEGIN_RING( 4 ); RADEON_FLUSH_CACHE(); RADEON_WAIT_UNTIL_2D_IDLE(); ADVANCE_RING(); return 0;}static void radeon_cp_dispatch_stipple( drm_device_t *dev, u32 *stipple ){ drm_radeon_private_t *dev_priv = dev->dev_private; int i; RING_LOCALS; DRM_DEBUG( "%s\n", __FUNCTION__ ); radeon_update_ring_snapshot( dev_priv ); BEGIN_RING( 35 ); OUT_RING( CP_PACKET0( RADEON_RE_STIPPLE_ADDR, 0 ) ); OUT_RING( 0x00000000 ); OUT_RING( CP_PACKET0_TABLE( RADEON_RE_STIPPLE_DATA, 31 ) ); for ( i = 0 ; i < 32 ; i++ ) { OUT_RING( stipple[i] ); } ADVANCE_RING();}/* ================================================================ * IOCTL functions */int radeon_cp_clear( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ){ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_radeon_clear_t clear; DRM_DEBUG( "%s\n", __FUNCTION__ ); if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || dev->lock.pid != current->pid ) { DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); return -EINVAL; } if ( copy_from_user( &clear, (drm_radeon_clear_t *) arg, sizeof(clear) ) ) return -EFAULT; if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS ) sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; radeon_cp_dispatch_clear( dev, &clear ); return 0;}int radeon_cp_swap( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ){ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; DRM_DEBUG( "%s\n", __FUNCTION__ ); if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || dev->lock.pid != current->pid ) { DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); return -EINVAL; } if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS ) sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; if ( !dev_priv->page_flipping ) { radeon_cp_dispatch_swap( dev ); dev_priv->sarea_priv->dirty |= (RADEON_UPLOAD_CONTEXT | RADEON_UPLOAD_MASKS); } else { radeon_cp_dispatch_flip( dev ); } return 0;}int radeon_cp_vertex( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ){ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_radeon_private_t *dev_priv = dev->dev_private; drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; drm_radeon_buf_priv_t *buf_priv; drm_radeon_vertex_t vertex; if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || dev->lock.pid != current->pid ) { DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); return -EINVAL; } if ( !dev_priv || dev_priv->is_pci ) { DRM_ERROR( "%s called with a PCI card\n", __FUNCTION__ ); return -EINVAL; } if ( copy_from_user( &vertex, (drm_radeon_vertex_t *)arg, sizeof(vertex) ) ) return -EFAULT; DRM_DEBUG( "%s: pid=%d index=%d count=%d discard=%d\n", __FUNCTION__, current->pid, vertex.idx, vertex.count, vertex.discard ); if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) { DRM_ERROR( "buffer index %d (of %d max)\n", vertex.idx, dma->buf_count - 1 ); return -EINVAL; } if ( vertex.prim < 0 || vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) { DRM_ERROR( "buffer prim %d\n", vertex.prim ); return -EINVAL; } VB_AGE_CHECK_WITH_RET( dev_priv ); buf = dma->buflist[vertex.idx]; buf_priv = buf->dev_private; if ( buf->pid != current->pid ) { DRM_ERROR( "process %d using buffer owned by %d\n", current->pid, buf->pid ); return -EINVAL; } if ( buf->pending ) { DRM_ERROR( "sending pending buffer %d\n", vertex.idx ); return -EINVAL; } buf->used = vertex.count; buf_priv->prim = vertex.prim; buf_priv->discard = vertex.discard; radeon_cp_dispatch_vertex( dev, buf ); return 0;}int radeon_cp_indices( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ){ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_radeon_private_t *dev_priv = dev->dev_private; drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; drm_radeon_buf_priv_t *buf_priv; drm_radeon_indices_t elts; int count; if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || dev->lock.pid != current->pid ) { DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); return -EINVAL; } if ( !dev_priv || dev_priv->is_pci ) { DRM_ERROR( "%s called with a PCI card\n", __FUNCTION__ ); return -EINVAL; } if ( copy_from_user( &elts, (drm_radeon_indices_t *)arg, sizeof(elts) ) ) return -EFAULT; DRM_DEBUG( "%s: pid=%d index=%d start=%d end=%d discard=%d\n", __FUNCTION__, current->pid, elts.idx, elts.start, elts.end, elts.discard ); if ( elts.idx < 0 || elts.idx >= dma->buf_count ) { DRM_ERROR( "buffer index %d (of %d max)\n", elts.idx, dma->buf_count - 1 ); return -EINVAL; } if ( elts.prim < 0 || elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) { DRM_ERROR( "buffer prim %d\n", elts.prim ); return -EINVAL; } VB_AGE_CHECK_WITH_RET( dev_priv ); buf = dma->buflist[elts.idx]; buf_priv = buf->dev_private; if ( buf->pid != current->pid ) { DRM_ERROR( "process %d using buffer owned by %d\n", current->pid, buf->pid ); return -EINVAL; } if ( buf->pending ) { DRM_ERROR( "sending pending buffer %d\n", elts.idx ); return -EINVAL; } count = (elts.end - elts.start) / sizeof(u16); elts.start -= RADEON_INDEX_PRIM_OFFSET; if ( elts.start & 0x7 ) { DRM_ERROR( "misaligned buffer 0x%x\n", elts.start ); return -EINVAL; } if ( elts.start < buf->used ) { DRM_ERROR( "no header 0x%x - 0x%x\n", elts.start, buf->used ); return -EINVAL; } buf->used = elts.end; buf_priv->prim = elts.prim; buf_priv->discard = elts.discard; radeon_cp_dispatch_indices( dev, buf, elts.start, elts.end, count ); return 0;}int radeon_cp_blit( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ){ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_radeon_private_t *dev_priv = dev->dev_private; drm_device_dma_t *dma = dev->dma; drm_radeon_blit_t blit; if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || dev->lock.pid != current->pid ) { DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); return -EINVAL; } if ( copy_from_user( &blit, (drm_radeon_blit_t *)arg, sizeof(blit) ) ) return -EFAULT; DRM_DEBUG( "%s: pid=%d index=%d\n", __FUNCTION__, current->pid, blit.idx ); if ( blit.idx < 0 || blit.idx > dma->buf_count ) { DRM_ERROR( "sending %d buffers (of %d max)\n", blit.idx, dma->buf_count ); return -EINVAL; } VB_AGE_CHECK_WITH_RET( dev_priv ); return radeon_cp_dispatch_blit( dev, &blit );}int radeon_cp_stipple( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ){ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_radeon_stipple_t stipple; u32 mask[32]; if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || dev->lock.pid != current->pid ) { DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); return -EINVAL; } if ( copy_from_user( &stipple, (drm_radeon_stipple_t *)arg, sizeof(stipple) ) ) return -EFAULT; if ( copy_from_user( &mask, stipple.mask, 32 * sizeof(u32) ) ) return -EFAULT; radeon_cp_dispatch_stipple( dev, mask ); return 0;}int radeon_cp_indirect( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ){ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_radeon_private_t *dev_priv = dev->dev_private; drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; drm_radeon_buf_priv_t *buf_priv; drm_radeon_indirect_t indirect; RING_LOCALS; if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || dev->lock.pid != current->pid ) { DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); return -EINVAL; } if ( !dev_priv || dev_priv->is_pci ) { DRM_ERROR( "%s called with a PCI card\n", __FUNCTION__ ); return -EINVAL; } if ( copy_from_user( &indirect, (drm_radeon_indirect_t *)arg, sizeof(indirect) ) ) return -EFAULT; DRM_DEBUG( "indirect: idx=%d s=%d e=%d d=%d\n", indirect.idx, indirect.start, indirect.end, indirect.discard ); if ( indirect.idx < 0 || indirect.idx >= dma->buf_count ) { DRM_ERROR( "buffer index %d (of %d max)\n", indirect.idx, dma->buf_count - 1 ); return -EINVAL; } buf = dma->buflist[indirect.idx]; buf_priv = buf->dev_private; if ( buf->pid != current->pid ) { DRM_ERROR( "process %d using buffer owned by %d\n", current->pid, buf->pid ); return -EINVAL; } if ( buf->pending ) { DRM_ERROR( "sending pending buffer %d\n", indirect.idx ); return -EINVAL; } if ( indirect.start < buf->used ) { DRM_ERROR( "reusing indirect: start=0x%x actual=0x%x\n", indirect.start, buf->used ); return -EINVAL; } VB_AGE_CHECK_WITH_RET( dev_priv ); buf->used = indirect.end; buf_priv->discard = indirect.discard; /* Wait for the 3D stream to idle before the indirect buffer * containing 2D acceleration commands is processed. */ BEGIN_RING( 2 ); RADEON_WAIT_UNTIL_3D_IDLE(); ADVANCE_RING(); /* Dispatch the indirect buffer full of commands from the * X server. This is insecure and is thus only available to * privileged clients. */ radeon_cp_dispatch_indirect( dev, buf, indirect.start, indirect.end ); return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -