⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 radeon_state.c

📁 内核linux2.4.20,可跟rtlinux3.2打补丁 组成实时linux系统,编译内核
💻 C
📖 第 1 页 / 共 3 页
字号:
	/* The compiler won't optimize away a division by a variable,	 * even if the only legal values are powers of two.  Thus, we'll	 * use a shift instead.	 */	switch ( tex->format ) {	case RADEON_TXFORMAT_ARGB8888:	case RADEON_TXFORMAT_RGBA8888:		format = RADEON_COLOR_FORMAT_ARGB8888;		tex_width = tex->width * 4;		blit_width = image->width * 4;		break;	case RADEON_TXFORMAT_AI88:	case RADEON_TXFORMAT_ARGB1555:	case RADEON_TXFORMAT_RGB565:	case RADEON_TXFORMAT_ARGB4444:		format = RADEON_COLOR_FORMAT_RGB565;		tex_width = tex->width * 2;		blit_width = image->width * 2;		break;	case RADEON_TXFORMAT_I8:	case RADEON_TXFORMAT_RGB332:		format = RADEON_COLOR_FORMAT_CI8;		tex_width = tex->width * 1;		blit_width = image->width * 1;		break;	default:		DRM_ERROR( "invalid texture format %d\n", tex->format );		return -EINVAL;	}	DRM_DEBUG( "   tex=%dx%d  blit=%d\n",		   tex_width, tex->height, blit_width );	/* Flush the pixel cache.  This ensures no pixel data gets mixed	 * up with the texture data from the host data blit, otherwise	 * part of the texture image may be corrupted.	 */	BEGIN_RING( 4 );	RADEON_FLUSH_CACHE();	RADEON_WAIT_UNTIL_IDLE();	ADVANCE_RING();	/* Make a copy of the parameters in case we have to update them	 * for a multi-pass texture blit.	 */	y = image->y;	height = image->height;	data = (u8 *)image->data;	size = height * blit_width;	if ( size > RADEON_MAX_TEXTURE_SIZE ) {		/* Texture image is too large, do a multipass upload */		ret = -EAGAIN;		/* Adjust the blit size to fit the indirect buffer */		height = RADEON_MAX_TEXTURE_SIZE / blit_width;		size = height * blit_width;		/* Update the input parameters for next time */		image->y += height;		image->height -= height;		image->data = (char *)image->data + size;		if ( copy_to_user( tex->image, image, sizeof(*image) ) ) {			DRM_ERROR( "EFAULT on tex->image\n" );			return -EFAULT;		}	} else if ( size < 4 ) {		size = 4;	}	dwords = size / 4;	/* Dispatch the indirect buffer.	 */	buffer = (u32 *)((char *)dev_priv->buffers->handle + buf->offset);	buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );	buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |		     RADEON_GMC_BRUSH_NONE |		     (format << 8) |		     RADEON_GMC_SRC_DATATYPE_COLOR |		     RADEON_ROP3_S |		     RADEON_DP_SRC_SOURCE_HOST_DATA |		     RADEON_GMC_CLR_CMP_CNTL_DIS |		     RADEON_GMC_WR_MSK_DIS);	buffer[2] = (tex->pitch << 22) | (tex->offset >> 10);	buffer[3] = 0xffffffff;	buffer[4] = 0xffffffff;	buffer[5] = (y << 16) | image->x;	buffer[6] = (height << 16) | image->width;	buffer[7] = dwords;	buffer += 8;	if ( tex_width >= 32 ) {		/* Texture image width is larger than the minimum, so we		 * can upload it directly.		 */		if ( copy_from_user( buffer, data, dwords * sizeof(u32) ) ) {			DRM_ERROR( "EFAULT on data, %d dwords\n", dwords );			return -EFAULT;		}	} else {		/* Texture image width is less than the minimum, so we		 * need to pad out each image scanline to the minimum		 * width.		 */		for ( i = 0 ; i < tex->height ; i++ ) {			if ( copy_from_user( buffer, data, tex_width ) ) {				DRM_ERROR( "EFAULT on pad, %d bytes\n",					   tex_width );				return -EFAULT;			}			buffer += 8;			data += tex_width;		}	}	buf->pid = current->pid;	buf->used = (dwords + 8) * sizeof(u32);	buf_priv->discard = 1;	radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );	/* Flush the pixel cache after the blit completes.  This ensures	 * the texture data is written out to memory before rendering	 * continues.	 */	BEGIN_RING( 4 );	RADEON_FLUSH_CACHE();	RADEON_WAIT_UNTIL_2D_IDLE();	ADVANCE_RING();	return ret;}static void radeon_cp_dispatch_stipple( drm_device_t *dev, u32 *stipple ){	drm_radeon_private_t *dev_priv = dev->dev_private;	int i;	RING_LOCALS;	DRM_DEBUG( "%s\n", __FUNCTION__ );	BEGIN_RING( 35 );	OUT_RING( CP_PACKET0( RADEON_RE_STIPPLE_ADDR, 0 ) );	OUT_RING( 0x00000000 );	OUT_RING( CP_PACKET0_TABLE( RADEON_RE_STIPPLE_DATA, 31 ) );	for ( i = 0 ; i < 32 ; i++ ) {		OUT_RING( stipple[i] );	}	ADVANCE_RING();}/* ================================================================ * IOCTL functions */int radeon_cp_clear( struct inode *inode, struct file *filp,		     unsigned int cmd, unsigned long arg ){	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->dev;	drm_radeon_private_t *dev_priv = dev->dev_private;	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;	drm_radeon_clear_t clear;	drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];	DRM_DEBUG( "%s\n", __FUNCTION__ );	LOCK_TEST_WITH_RETURN( dev );	if ( copy_from_user( &clear, (drm_radeon_clear_t *)arg,			     sizeof(clear) ) )		return -EFAULT;	RING_SPACE_TEST_WITH_RETURN( dev_priv );	if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS )		sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;	if ( copy_from_user( &depth_boxes, clear.depth_boxes,			     sarea_priv->nbox * sizeof(depth_boxes[0]) ) )		return -EFAULT;	radeon_cp_dispatch_clear( dev, &clear, depth_boxes );	return 0;}int radeon_cp_swap( struct inode *inode, struct file *filp,		    unsigned int cmd, unsigned long arg ){	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->dev;	drm_radeon_private_t *dev_priv = dev->dev_private;	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;	DRM_DEBUG( "%s\n", __FUNCTION__ );	LOCK_TEST_WITH_RETURN( dev );	RING_SPACE_TEST_WITH_RETURN( dev_priv );	if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS )		sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;	if ( !dev_priv->page_flipping ) {		radeon_cp_dispatch_swap( dev );		dev_priv->sarea_priv->dirty |= (RADEON_UPLOAD_CONTEXT |						RADEON_UPLOAD_MASKS);	} else {		radeon_cp_dispatch_flip( dev );	}	return 0;}int radeon_cp_vertex( struct inode *inode, struct file *filp,		      unsigned int cmd, unsigned long arg ){	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->dev;	drm_radeon_private_t *dev_priv = dev->dev_private;	drm_device_dma_t *dma = dev->dma;	drm_buf_t *buf;	drm_radeon_buf_priv_t *buf_priv;	drm_radeon_vertex_t vertex;	LOCK_TEST_WITH_RETURN( dev );	if ( !dev_priv ) {		DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );		return -EINVAL;	}	if ( copy_from_user( &vertex, (drm_radeon_vertex_t *)arg,			     sizeof(vertex) ) )		return -EFAULT;	DRM_DEBUG( "%s: pid=%d index=%d count=%d discard=%d\n",		   __FUNCTION__, current->pid,		   vertex.idx, vertex.count, vertex.discard );	if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) {		DRM_ERROR( "buffer index %d (of %d max)\n",			   vertex.idx, dma->buf_count - 1 );		return -EINVAL;	}	if ( vertex.prim < 0 ||	     vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) {		DRM_ERROR( "buffer prim %d\n", vertex.prim );		return -EINVAL;	}	RING_SPACE_TEST_WITH_RETURN( dev_priv );	VB_AGE_TEST_WITH_RETURN( dev_priv );	buf = dma->buflist[vertex.idx];	buf_priv = buf->dev_private;	if ( buf->pid != current->pid ) {		DRM_ERROR( "process %d using buffer owned by %d\n",			   current->pid, buf->pid );		return -EINVAL;	}	if ( buf->pending ) {		DRM_ERROR( "sending pending buffer %d\n", vertex.idx );		return -EINVAL;	}	buf->used = vertex.count;	buf_priv->prim = vertex.prim;	buf_priv->discard = vertex.discard;	radeon_cp_dispatch_vertex( dev, buf );	return 0;}int radeon_cp_indices( struct inode *inode, struct file *filp,		       unsigned int cmd, unsigned long arg ){	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->dev;	drm_radeon_private_t *dev_priv = dev->dev_private;	drm_device_dma_t *dma = dev->dma;	drm_buf_t *buf;	drm_radeon_buf_priv_t *buf_priv;	drm_radeon_indices_t elts;	int count;	LOCK_TEST_WITH_RETURN( dev );	if ( !dev_priv ) {		DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );		return -EINVAL;	}	if ( copy_from_user( &elts, (drm_radeon_indices_t *)arg,			     sizeof(elts) ) )		return -EFAULT;	DRM_DEBUG( "%s: pid=%d index=%d start=%d end=%d discard=%d\n",		   __FUNCTION__, current->pid,		   elts.idx, elts.start, elts.end, elts.discard );	if ( elts.idx < 0 || elts.idx >= dma->buf_count ) {		DRM_ERROR( "buffer index %d (of %d max)\n",			   elts.idx, dma->buf_count - 1 );		return -EINVAL;	}	if ( elts.prim < 0 ||	     elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) {		DRM_ERROR( "buffer prim %d\n", elts.prim );		return -EINVAL;	}	RING_SPACE_TEST_WITH_RETURN( dev_priv );	VB_AGE_TEST_WITH_RETURN( dev_priv );	buf = dma->buflist[elts.idx];	buf_priv = buf->dev_private;	if ( buf->pid != current->pid ) {		DRM_ERROR( "process %d using buffer owned by %d\n",			   current->pid, buf->pid );		return -EINVAL;	}	if ( buf->pending ) {		DRM_ERROR( "sending pending buffer %d\n", elts.idx );		return -EINVAL;	}	count = (elts.end - elts.start) / sizeof(u16);	elts.start -= RADEON_INDEX_PRIM_OFFSET;	if ( elts.start & 0x7 ) {		DRM_ERROR( "misaligned buffer 0x%x\n", elts.start );		return -EINVAL;	}	if ( elts.start < buf->used ) {		DRM_ERROR( "no header 0x%x - 0x%x\n", elts.start, buf->used );		return -EINVAL;	}	buf->used = elts.end;	buf_priv->prim = elts.prim;	buf_priv->discard = elts.discard;	radeon_cp_dispatch_indices( dev, buf, elts.start, elts.end, count );	return 0;}int radeon_cp_texture( struct inode *inode, struct file *filp,		       unsigned int cmd, unsigned long arg ){	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->dev;	drm_radeon_private_t *dev_priv = dev->dev_private;	drm_radeon_texture_t tex;	drm_radeon_tex_image_t image;	LOCK_TEST_WITH_RETURN( dev );	if ( copy_from_user( &tex, (drm_radeon_texture_t *)arg, sizeof(tex) ) )		return -EFAULT;	if ( tex.image == NULL ) {		DRM_ERROR( "null texture image!\n" );		return -EINVAL;	}	if ( copy_from_user( &image,			     (drm_radeon_tex_image_t *)tex.image,			     sizeof(image) ) )		return -EFAULT;	RING_SPACE_TEST_WITH_RETURN( dev_priv );	VB_AGE_TEST_WITH_RETURN( dev_priv );	return radeon_cp_dispatch_texture( dev, &tex, &image );}int radeon_cp_stipple( struct inode *inode, struct file *filp,		       unsigned int cmd, unsigned long arg ){	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->dev;	drm_radeon_private_t *dev_priv = dev->dev_private;	drm_radeon_stipple_t stipple;	u32 mask[32];	LOCK_TEST_WITH_RETURN( dev );	if ( copy_from_user( &stipple, (drm_radeon_stipple_t *)arg,			     sizeof(stipple) ) )		return -EFAULT;	if ( copy_from_user( &mask, stipple.mask, 32 * sizeof(u32) ) )		return -EFAULT;	RING_SPACE_TEST_WITH_RETURN( dev_priv );	radeon_cp_dispatch_stipple( dev, mask );	return 0;}int radeon_cp_indirect( struct inode *inode, struct file *filp,			unsigned int cmd, unsigned long arg ){	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->dev;	drm_radeon_private_t *dev_priv = dev->dev_private;	drm_device_dma_t *dma = dev->dma;	drm_buf_t *buf;	drm_radeon_buf_priv_t *buf_priv;	drm_radeon_indirect_t indirect;	RING_LOCALS;	LOCK_TEST_WITH_RETURN( dev );	if ( !dev_priv ) {		DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );		return -EINVAL;	}	if ( copy_from_user( &indirect, (drm_radeon_indirect_t *)arg,			     sizeof(indirect) ) )		return -EFAULT;	DRM_DEBUG( "indirect: idx=%d s=%d e=%d d=%d\n",		   indirect.idx, indirect.start,		   indirect.end, indirect.discard );	if ( indirect.idx < 0 || indirect.idx >= dma->buf_count ) {		DRM_ERROR( "buffer index %d (of %d max)\n",			   indirect.idx, dma->buf_count - 1 );		return -EINVAL;	}	buf = dma->buflist[indirect.idx];	buf_priv = buf->dev_private;	if ( buf->pid != current->pid ) {		DRM_ERROR( "process %d using buffer owned by %d\n",			   current->pid, buf->pid );		return -EINVAL;	}	if ( buf->pending ) {		DRM_ERROR( "sending pending buffer %d\n", indirect.idx );		return -EINVAL;	}	if ( indirect.start < buf->used ) {		DRM_ERROR( "reusing indirect: start=0x%x actual=0x%x\n",			   indirect.start, buf->used );		return -EINVAL;	}	RING_SPACE_TEST_WITH_RETURN( dev_priv );	VB_AGE_TEST_WITH_RETURN( dev_priv );	buf->used = indirect.end;	buf_priv->discard = indirect.discard;	/* Wait for the 3D stream to idle before the indirect buffer	 * containing 2D acceleration commands is processed.	 */	BEGIN_RING( 2 );	RADEON_WAIT_UNTIL_3D_IDLE();	ADVANCE_RING();	/* Dispatch the indirect buffer full of commands from the	 * X server.  This is insecure and is thus only available to	 * privileged clients.	 */	radeon_cp_dispatch_indirect( dev, buf, indirect.start, indirect.end );	return 0;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -