⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 radeon_state.c

📁 这个linux源代码是很全面的~基本完整了~使用c编译的~由于时间问题我没有亲自测试~但就算用来做参考资料也是非常好的
💻 C
📖 第 1 页 / 共 4 页
字号:
		break;	default:		DRM_ERROR( "invalid texture format %d\n", tex->format );		return -EINVAL;	}	DRM_DEBUG( "   tex=%dx%d  blit=%d\n",		   tex_width, tex->height, blit_width );	/* Flush the pixel cache.  This ensures no pixel data gets mixed	 * up with the texture data from the host data blit, otherwise	 * part of the texture image may be corrupted.	 */	BEGIN_RING( 4 );	RADEON_FLUSH_CACHE();	RADEON_WAIT_UNTIL_IDLE();	ADVANCE_RING();#ifdef __BIG_ENDIAN	/* The Mesa texture functions provide the data in little endian as the	 * chip wants it, but we need to compensate for the fact that the CP	 * ring gets byte-swapped	 */	BEGIN_RING( 2 );	OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );	ADVANCE_RING();#endif	/* Make a copy of the parameters in case we have to update them	 * for a multi-pass texture blit.	 */	y = image->y;	height = image->height;	data = (const u8 *)image->data;	size = height * blit_width;	if ( size > RADEON_MAX_TEXTURE_SIZE ) {		/* Texture image is too large, do a multipass upload */		ret = -EAGAIN;		/* Adjust the blit size to fit the indirect buffer */		height = RADEON_MAX_TEXTURE_SIZE / blit_width;		size = height * blit_width;		/* Update the input parameters for next time */		image->y += height;		image->height -= height;		image->data = (const char *)image->data + size;		if ( copy_to_user( tex->image, image, sizeof(*image) ) ) {			DRM_ERROR( "EFAULT on tex->image\n" );			return -EFAULT;		}	} else if ( size < 4 && size > 0 ) {		size = 4;	}	dwords = size / 4;	/* Dispatch the indirect buffer.	 */	buffer = (u32 *)((char *)dev_priv->buffers->handle + buf->offset);	buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );	buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |		     RADEON_GMC_BRUSH_NONE |		     (format << 8) |		     RADEON_GMC_SRC_DATATYPE_COLOR |		     RADEON_ROP3_S |		     RADEON_DP_SRC_SOURCE_HOST_DATA |		     RADEON_GMC_CLR_CMP_CNTL_DIS |		     RADEON_GMC_WR_MSK_DIS);	buffer[2] = (tex->pitch << 22) | (tex->offset >> 10);	buffer[3] = 0xffffffff;	buffer[4] = 0xffffffff;	buffer[5] = (y << 16) | image->x;	buffer[6] = (height << 16) | image->width;	buffer[7] = dwords;	buffer += 8;	if ( tex_width >= 32 ) {		/* Texture image width is larger than the minimum, so we		 * can upload it directly.		 */		if ( copy_from_user( buffer, data, dwords * sizeof(u32) ) ) {			DRM_ERROR( "EFAULT on data, %d dwords\n", dwords );			return -EFAULT;		}	} else {		/* Texture image width is less than the minimum, so we		 * need to pad out each image scanline to the minimum		 * width.		 */		for ( i = 0 ; i < tex->height ; i++ ) {			if ( copy_from_user( buffer, data, tex_width ) ) {				DRM_ERROR( "EFAULT on pad, %d bytes\n",					   tex_width );				return -EFAULT;			}			buffer += 8;			data += tex_width;		}	}	buf->pid = current->pid;	buf->used = (dwords + 8) * sizeof(u32);	radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );	radeon_cp_discard_buffer( dev, buf );	/* Flush the pixel cache after the blit completes.  This ensures	 * the texture data is written out to memory before rendering	 * continues.	 */	BEGIN_RING( 4 );	RADEON_FLUSH_CACHE();	RADEON_WAIT_UNTIL_2D_IDLE();	ADVANCE_RING();	return ret;}static void radeon_cp_dispatch_stipple( drm_device_t *dev, u32 *stipple ){	drm_radeon_private_t *dev_priv = dev->dev_private;	int i;	RING_LOCALS;	DRM_DEBUG( "\n" );	BEGIN_RING( 35 );	OUT_RING( CP_PACKET0( RADEON_RE_STIPPLE_ADDR, 0 ) );	OUT_RING( 0x00000000 );	OUT_RING( CP_PACKET0_TABLE( RADEON_RE_STIPPLE_DATA, 31 ) );	for ( i = 0 ; i < 32 ; i++ ) {		OUT_RING( stipple[i] );	}	ADVANCE_RING();}/* ================================================================ * IOCTL functions */int radeon_cp_clear( struct inode *inode, struct file *filp,		     unsigned int cmd, unsigned long arg ){	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->dev;	drm_radeon_private_t *dev_priv = dev->dev_private;	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;	drm_radeon_clear_t clear;	drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];	DRM_DEBUG( "\n" );	LOCK_TEST_WITH_RETURN( dev );	if ( copy_from_user( &clear, (drm_radeon_clear_t *)arg,			     sizeof(clear) ) )		return -EFAULT;	RING_SPACE_TEST_WITH_RETURN( dev_priv );	if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS )		sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;	if ( copy_from_user( &depth_boxes, clear.depth_boxes,			     sarea_priv->nbox * sizeof(depth_boxes[0]) ) )		return -EFAULT;	radeon_cp_dispatch_clear( dev, &clear, depth_boxes );	COMMIT_RING();	return 0;}/* Not sure why this isn't set all the time: */ static int radeon_do_init_pageflip( drm_device_t *dev ){	drm_radeon_private_t *dev_priv = dev->dev_private;	RING_LOCALS;	DRM_DEBUG( "\n" );	BEGIN_RING( 6 );	RADEON_WAIT_UNTIL_3D_IDLE();	OUT_RING( CP_PACKET0( RADEON_CRTC_OFFSET_CNTL, 0 ) );	OUT_RING( RADEON_READ( RADEON_CRTC_OFFSET_CNTL ) | RADEON_CRTC_OFFSET_FLIP_CNTL );	OUT_RING( CP_PACKET0( RADEON_CRTC2_OFFSET_CNTL, 0 ) );	OUT_RING( RADEON_READ( RADEON_CRTC2_OFFSET_CNTL ) | RADEON_CRTC_OFFSET_FLIP_CNTL );	ADVANCE_RING();	dev_priv->page_flipping = 1;	dev_priv->current_page = 0;	dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;	return 0;}/* Called whenever a client dies, from DRM(release). * NOTE:  Lock isn't necessarily held when this is called! */int radeon_do_cleanup_pageflip( drm_device_t *dev ){	drm_radeon_private_t *dev_priv = dev->dev_private;	DRM_DEBUG( "\n" );	if (dev_priv->current_page != 0)		radeon_cp_dispatch_flip( dev );	dev_priv->page_flipping = 0;	return 0;}/* Swapping and flipping are different operations, need different ioctls. * They can & should be intermixed to support multiple 3d windows.   */int radeon_cp_flip(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data ){        drm_file_t      *priv   = filp->private_data;        drm_device_t    *dev    = priv->dev;	drm_radeon_private_t *dev_priv = dev->dev_private;	DRM_DEBUG( "\n" );	LOCK_TEST_WITH_RETURN( dev );	RING_SPACE_TEST_WITH_RETURN( dev_priv );	if (!dev_priv->page_flipping) 		radeon_do_init_pageflip( dev );			radeon_cp_dispatch_flip( dev );	COMMIT_RING();	return 0;}int radeon_cp_swap( struct inode *inode, struct file *filp,		    unsigned int cmd, unsigned long arg ){	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->dev;	drm_radeon_private_t *dev_priv = dev->dev_private;	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;	DRM_DEBUG( "\n" );	LOCK_TEST_WITH_RETURN( dev );	RING_SPACE_TEST_WITH_RETURN( dev_priv );	if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS )		sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;	radeon_cp_dispatch_swap( dev );	dev_priv->sarea_priv->ctx_owner = 0;	COMMIT_RING();	return 0;}int radeon_cp_vertex( struct inode *inode, struct file *filp,		      unsigned int cmd, unsigned long arg ){	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->dev;	drm_radeon_private_t *dev_priv = dev->dev_private;	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;	drm_device_dma_t *dma = dev->dma;	drm_buf_t *buf;	drm_radeon_vertex_t vertex;	drm_radeon_tcl_prim_t prim;	LOCK_TEST_WITH_RETURN( dev );	if ( !dev_priv ) {		DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );		return -EINVAL;	}	if ( copy_from_user( &vertex, (drm_radeon_vertex_t *)arg,			     sizeof(vertex) ) )		return -EFAULT;	DRM_DEBUG( "pid=%d index=%d count=%d discard=%d\n",		   current->pid,		   vertex.idx, vertex.count, vertex.discard );	if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) {		DRM_ERROR( "buffer index %d (of %d max)\n",			   vertex.idx, dma->buf_count - 1 );		return -EINVAL;	}	if ( vertex.prim < 0 ||	     vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) {		DRM_ERROR( "buffer prim %d\n", vertex.prim );		return -EINVAL;	}	RING_SPACE_TEST_WITH_RETURN( dev_priv );	VB_AGE_TEST_WITH_RETURN( dev_priv );	buf = dma->buflist[vertex.idx];	if ( buf->pid != current->pid ) {		DRM_ERROR( "process %d using buffer owned by %d\n",			   current->pid, buf->pid );		return -EINVAL;	}	if ( buf->pending ) {		DRM_ERROR( "sending pending buffer %d\n", vertex.idx );		return -EINVAL;	}	/* Build up a prim_t record:	 */	if (vertex.count) {		buf->used = vertex.count; /* not used? */		if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) {			radeon_emit_state( dev_priv,					   &sarea_priv->context_state,					   sarea_priv->tex_state,					   sarea_priv->dirty );						sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |					       RADEON_UPLOAD_TEX1IMAGES |					       RADEON_UPLOAD_TEX2IMAGES |					       RADEON_REQUIRE_QUIESCENCE);		}		prim.start = 0;		prim.finish = vertex.count; /* unused */		prim.prim = vertex.prim;		prim.numverts = vertex.count;		prim.vc_format = dev_priv->sarea_priv->vc_format;				radeon_cp_dispatch_vertex( dev, buf, &prim,					   dev_priv->sarea_priv->boxes,					   dev_priv->sarea_priv->nbox );	}	if (vertex.discard) {		radeon_cp_discard_buffer( dev, buf );	}	COMMIT_RING();	return 0;}int radeon_cp_indices( struct inode *inode, struct file *filp,		       unsigned int cmd, unsigned long arg ){	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->dev;	drm_radeon_private_t *dev_priv = dev->dev_private;	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;	drm_device_dma_t *dma = dev->dma;	drm_buf_t *buf;	drm_radeon_indices_t elts;	drm_radeon_tcl_prim_t prim;	int count;	LOCK_TEST_WITH_RETURN( dev );	if ( !dev_priv ) {		DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );		return -EINVAL;	}	if ( copy_from_user( &elts, (drm_radeon_indices_t *)arg,			     sizeof(elts) ) )		return -EFAULT;	DRM_DEBUG( "%s: pid=%d index=%d start=%d end=%d discard=%d\n",		   __FUNCTION__, current->pid,		   elts.idx, elts.start, elts.end, elts.discard );	if ( elts.idx < 0 || elts.idx >= dma->buf_count ) {		DRM_ERROR( "buffer index %d (of %d max)\n",			   elts.idx, dma->buf_count - 1 );		return -EINVAL;	}	if ( elts.prim < 0 ||	     elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) {		DRM_ERROR( "buffer prim %d\n", elts.prim );		return -EINVAL;	}	RING_SPACE_TEST_WITH_RETURN( dev_priv );	VB_AGE_TEST_WITH_RETURN( dev_priv );	buf = dma->buflist[elts.idx];	if ( buf->pid != current->pid ) {		DRM_ERROR( "process %d using buffer owned by %d\n",			   current->pid, buf->pid );		return -EINVAL;	}	if ( buf->pending ) {		DRM_ERROR( "sending pending buffer %d\n", elts.idx );		return -EINVAL;	}	count = (elts.end - elts.start) / sizeof(u16);	elts.start -= RADEON_INDEX_PRIM_OFFSET;	if ( elts.start & 0x7 ) {		DRM_ERROR( "misaligned buffer 0x%x\n", elts.start );		return -EINVAL;	}	if ( elts.start < buf->used ) {		DRM_ERROR( "no header 0x%x - 0x%x\n", elts.start, buf->used );		return -EINVAL;	}	buf->used = elts.end;	if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) {		radeon_emit_state( dev_priv,				   &sarea_priv->context_state,				   sarea_priv->tex_state,				   sarea_priv->dirty );		sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |				       RADEON_UPLOAD_TEX1IMAGES |				       RADEON_UPLOAD_TEX2IMAGES |				       RADEON_REQUIRE_QUIESCENCE);	}	/* Build up a prim_t record:	 */	prim.start = elts.start;	prim.finish = elts.end; 	prim.prim = elts.prim;	prim.offset = 0;	/* offset from start of dma buffers */	prim.numverts = RADEON_MAX_VB_VERTS; /* duh */	prim.vc_format = dev_priv->sarea_priv->vc_format;		radeon_cp_dispatch_indices( dev, buf, &prim,				   dev_priv->sarea_priv->boxes,				   dev_priv->sarea_priv->nbox );	if (elts.discard) {		radeon_cp_discard_buffer( dev, buf );	}	COMMIT_RING();	return 0;}int radeon_cp_texture( struct inode *inode, struct file *filp,		       unsigned int cmd, unsigned long arg ){	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->dev;	drm_radeon_private_t *dev_priv = dev->dev_private;	drm_radeon_texture_t tex;	drm_radeon_tex_image_t image;	int ret;	LOCK_TEST_WITH_RETURN( dev );	if ( copy_from_user( &tex, (drm_radeon_texture_t *)arg, sizeof(tex) ) )		return -EFAULT;	if ( tex.image == NULL ) {		DRM_ERROR( "null texture image!\n" );		return -EINVAL;	}	if ( copy_from_user( &image,			     (drm_radeon_tex_image_t *)tex.image,			     sizeof(image) ) )		return -EFAULT;	RING_SPACE_TEST_WITH_RETURN( dev_priv );	VB_AGE_TEST_WITH_RETURN( dev_priv );	ret = radeon_cp_dispatch_texture( dev, &tex, &image );	COMMIT_RING();	return ret;}int radeon_cp_stipple( struct inode *inode, struct file *filp,		       unsigned int cmd, unsigned long arg ){	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->dev;	drm_radeon_private_t *dev_priv = dev->dev_private;	drm_radeon_stipple_t stipple;	u32 mask[32];	LOCK_TEST_WITH_RETURN( dev );	if ( copy_from_user( &stipple, (drm_radeon_stipple_t *)arg,			     sizeof(stipple) ) )		return -EFAULT;	if ( copy_from_user( &mask, stipple.mask, 32 * sizeof(u32) ) )		return -EFAULT;	RING_SPACE_TEST_WITH_RETURN( dev_priv );	radeon_cp_dispatch_stipple( dev, mask );	COMMIT_RING();	return 0;}int radeon_cp_indirect( struct inode *inode, struct file *filp,			unsigned int cmd, unsigned long arg ){	drm_file_t *priv = filp->private_data;	drm_device_t *dev = priv->dev;	drm_radeon_private_t *dev_priv = dev->dev_private;	drm_device_dma_t *dma = dev->dma;	drm_buf_t *buf;	drm_radeon_indirect_t indirect;	RING_LOCALS;	LOCK_TEST_WITH_RETURN( dev );	if ( !dev_priv ) {		DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );		return -EINVAL;	}	if ( copy_from_user( &indirect, (drm_radeon_indirect_t *)arg,			     sizeof(indirect) ) )		return -EFAULT;	DRM_DEBUG( "indirect: idx=%d s=%d e=%d d=%d\n",		   indirect.idx, indirect.start,		   indirect.end, indirect.discard );	if ( indirect.idx < 0 || indirect.idx >= dma->buf_count ) {		DRM_ERROR( "buffer index %d (of %d max)\n",			   indirect.idx, dma->buf_count - 1 );		return -EINVAL;	}	buf = dma->buflist[indirect.idx];	if ( buf->pid != current->pid ) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -