⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 radeon_state.c

📁 h内核
💻 C
📖 第 1 页 / 共 5 页
字号:
        unsigned int vc_format;} drm_radeon_tcl_prim_t;static void radeon_cp_dispatch_vertex( drm_device_t *dev,				       drm_buf_t *buf,				       drm_radeon_tcl_prim_t *prim ){	drm_radeon_private_t *dev_priv = dev->dev_private;	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;	int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start;	int numverts = (int)prim->numverts;	int nbox = sarea_priv->nbox;	int i = 0;	RING_LOCALS;	DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n",		  prim->prim,		  prim->vc_format,		  prim->start,		  prim->finish,		  prim->numverts);	if (bad_prim_vertex_nr( prim->prim, prim->numverts )) {		DRM_ERROR( "bad prim %x numverts %d\n", 			   prim->prim, prim->numverts );		return;	}	do {		/* Emit the next cliprect */		if ( i < nbox ) {			radeon_emit_clip_rect( dev_priv, 					       &sarea_priv->boxes[i] );		}		/* Emit the vertex buffer rendering commands */		BEGIN_RING( 5 );		OUT_RING( CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, 3 ) );		OUT_RING( offset );		OUT_RING( numverts );		OUT_RING( prim->vc_format );		OUT_RING( prim->prim | RADEON_PRIM_WALK_LIST |			  RADEON_COLOR_ORDER_RGBA |			  RADEON_VTX_FMT_RADEON_MODE |			  (numverts << RADEON_NUM_VERTICES_SHIFT) );		ADVANCE_RING();		i++;	} while ( i < nbox );}static void radeon_cp_discard_buffer( drm_device_t *dev, drm_buf_t *buf ){	drm_radeon_private_t *dev_priv = dev->dev_private;	drm_radeon_buf_priv_t *buf_priv = buf->dev_private;	RING_LOCALS;	buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;	/* Emit the vertex buffer age */	BEGIN_RING( 2 );	RADEON_DISPATCH_AGE( buf_priv->age );	ADVANCE_RING();	buf->pending = 1;	buf->used = 0;}static void radeon_cp_dispatch_indirect( drm_device_t *dev,					 drm_buf_t *buf,					 int start, int end ){	drm_radeon_private_t *dev_priv = dev->dev_private;	RING_LOCALS;	DRM_DEBUG( "indirect: buf=%d s=0x%x e=0x%x\n",		   buf->idx, start, end );	if ( start != end ) {		int offset = (dev_priv->gart_buffers_offset			      + buf->offset + start);		int dwords = (end - start + 3) / sizeof(u32);		/* Indirect buffer data must be an even number of		 * dwords, so if we've been given an odd number we must		 * pad the data with a Type-2 CP packet.		 */		if ( dwords & 1 ) {			u32 *data = (u32 *)				((char *)dev->agp_buffer_map->handle				 + buf->offset + start);			data[dwords++] = RADEON_CP_PACKET2;		}		/* Fire off the indirect buffer */		BEGIN_RING( 3 );		OUT_RING( CP_PACKET0( RADEON_CP_IB_BASE, 1 ) );		OUT_RING( offset );		OUT_RING( dwords );		ADVANCE_RING();	}}static void radeon_cp_dispatch_indices( drm_device_t *dev,					drm_buf_t *elt_buf,					drm_radeon_tcl_prim_t *prim ){	drm_radeon_private_t *dev_priv = dev->dev_private;	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;	int offset = dev_priv->gart_buffers_offset + prim->offset;	u32 *data;	int dwords;	int i = 0;	int start = prim->start + RADEON_INDEX_PRIM_OFFSET;	int count = (prim->finish - start) / sizeof(u16);	int nbox = sarea_priv->nbox;	DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n",		  prim->prim,		  prim->vc_format,		  prim->start,		  prim->finish,		  prim->offset,		  prim->numverts);	if (bad_prim_vertex_nr( prim->prim, count )) {		DRM_ERROR( "bad prim %x count %d\n", 			   prim->prim, count );		return;	}	if ( start >= prim->finish ||	     (prim->start & 0x7) ) {		DRM_ERROR( "buffer prim %d\n", prim->prim );		return;	}	dwords = (prim->finish - prim->start + 3) / sizeof(u32);	data = (u32 *)((char *)dev->agp_buffer_map->handle +		       elt_buf->offset + prim->start);	data[0] = CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, dwords-2 );	data[1] = offset;	data[2] = prim->numverts;	data[3] = prim->vc_format;	data[4] = (prim->prim |		   RADEON_PRIM_WALK_IND |		   RADEON_COLOR_ORDER_RGBA |		   RADEON_VTX_FMT_RADEON_MODE |		   (count << RADEON_NUM_VERTICES_SHIFT) );	do {		if ( i < nbox ) 			radeon_emit_clip_rect( dev_priv, 					       &sarea_priv->boxes[i] );		radeon_cp_dispatch_indirect( dev, elt_buf,					     prim->start,					     prim->finish );		i++;	} while ( i < nbox );}#define RADEON_MAX_TEXTURE_SIZE (RADEON_BUFFER_SIZE - 8 * sizeof(u32))static int radeon_cp_dispatch_texture( DRMFILE filp,				       drm_device_t *dev,				       drm_radeon_texture_t *tex,				       drm_radeon_tex_image_t *image ){	drm_radeon_private_t *dev_priv = dev->dev_private;	drm_file_t *filp_priv;	drm_buf_t *buf;	u32 format;	u32 *buffer;	const u8 __user *data;	int size, dwords, tex_width, blit_width;	u32 height;	int i;	RING_LOCALS;	DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );	if ( radeon_check_and_fixup_offset( dev_priv, filp_priv, &tex->offset ) ) {		DRM_ERROR( "Invalid destination offset\n" );		return DRM_ERR( EINVAL );	}	dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;	/* Flush the pixel cache.  This ensures no pixel data gets mixed	 * up with the texture data from the host data blit, otherwise	 * part of the texture image may be corrupted.	 */	BEGIN_RING( 4 );	RADEON_FLUSH_CACHE();	RADEON_WAIT_UNTIL_IDLE();	ADVANCE_RING();#ifdef __BIG_ENDIAN	/* The Mesa texture functions provide the data in little endian as the	 * chip wants it, but we need to compensate for the fact that the CP	 * ring gets byte-swapped	 */	BEGIN_RING( 2 );	OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );	ADVANCE_RING();#endif	/* The compiler won't optimize away a division by a variable,	 * even if the only legal values are powers of two.  Thus, we'll	 * use a shift instead.	 */	switch ( tex->format ) {	case RADEON_TXFORMAT_ARGB8888:	case RADEON_TXFORMAT_RGBA8888:		format = RADEON_COLOR_FORMAT_ARGB8888;		tex_width = tex->width * 4;		blit_width = image->width * 4;		break;	case RADEON_TXFORMAT_AI88:	case RADEON_TXFORMAT_ARGB1555:	case RADEON_TXFORMAT_RGB565:	case RADEON_TXFORMAT_ARGB4444:	case RADEON_TXFORMAT_VYUY422:	case RADEON_TXFORMAT_YVYU422:		format = RADEON_COLOR_FORMAT_RGB565;		tex_width = tex->width * 2;		blit_width = image->width * 2;		break;	case RADEON_TXFORMAT_I8:	case RADEON_TXFORMAT_RGB332:		format = RADEON_COLOR_FORMAT_CI8;		tex_width = tex->width * 1;		blit_width = image->width * 1;		break;	default:		DRM_ERROR( "invalid texture format %d\n", tex->format );		return DRM_ERR(EINVAL);	}	DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width );	do {		DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",			   tex->offset >> 10, tex->pitch, tex->format,			   image->x, image->y, image->width, image->height );		/* Make a copy of some parameters in case we have to		 * update them for a multi-pass texture blit.		 */		height = image->height;		data = (const u8 __user *)image->data;				size = height * blit_width;		if ( size > RADEON_MAX_TEXTURE_SIZE ) {			height = RADEON_MAX_TEXTURE_SIZE / blit_width;			size = height * blit_width;		} else if ( size < 4 && size > 0 ) {			size = 4;		} else if ( size == 0 ) {			return 0;		}		buf = radeon_freelist_get( dev );		if ( 0 && !buf ) {			radeon_do_cp_idle( dev_priv );			buf = radeon_freelist_get( dev );		}		if ( !buf ) {			DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n");			if (DRM_COPY_TO_USER( tex->image, image, sizeof(*image) ))				return DRM_ERR(EFAULT);			return DRM_ERR(EAGAIN);		}		/* Dispatch the indirect buffer.		 */		buffer = (u32*)((char*)dev->agp_buffer_map->handle + buf->offset);		dwords = size / 4;		buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );		buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |			     RADEON_GMC_BRUSH_NONE |			     (format << 8) |			     RADEON_GMC_SRC_DATATYPE_COLOR |			     RADEON_ROP3_S |			     RADEON_DP_SRC_SOURCE_HOST_DATA |			     RADEON_GMC_CLR_CMP_CNTL_DIS |			     RADEON_GMC_WR_MSK_DIS);				buffer[2] = (tex->pitch << 22) | (tex->offset >> 10);		buffer[3] = 0xffffffff;		buffer[4] = 0xffffffff;		buffer[5] = (image->y << 16) | image->x;		buffer[6] = (height << 16) | image->width;		buffer[7] = dwords;		buffer += 8;		if ( tex_width >= 32 ) {			/* Texture image width is larger than the minimum, so we			 * can upload it directly.			 */			if ( DRM_COPY_FROM_USER( buffer, data, 						 dwords * sizeof(u32) ) ) {				DRM_ERROR( "EFAULT on data, %d dwords\n", 					   dwords );				return DRM_ERR(EFAULT);			}		} else {			/* Texture image width is less than the minimum, so we			 * need to pad out each image scanline to the minimum			 * width.			 */			for ( i = 0 ; i < tex->height ; i++ ) {				if ( DRM_COPY_FROM_USER( buffer, data, 							 tex_width ) ) {					DRM_ERROR( "EFAULT on pad, %d bytes\n",						   tex_width );					return DRM_ERR(EFAULT);				}				buffer += 8;				data += tex_width;			}		}		buf->filp = filp;		buf->used = (dwords + 8) * sizeof(u32);		radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );		radeon_cp_discard_buffer( dev, buf );		/* Update the input parameters for next time */		image->y += height;		image->height -= height;		image->data = (const u8 __user *)image->data + size;	} while (image->height > 0);	/* Flush the pixel cache after the blit completes.  This ensures	 * the texture data is written out to memory before rendering	 * continues.	 */	BEGIN_RING( 4 );	RADEON_FLUSH_CACHE();	RADEON_WAIT_UNTIL_2D_IDLE();	ADVANCE_RING();	return 0;}static void radeon_cp_dispatch_stipple( drm_device_t *dev, u32 *stipple ){	drm_radeon_private_t *dev_priv = dev->dev_private;	int i;	RING_LOCALS;	DRM_DEBUG( "\n" );	BEGIN_RING( 35 );	OUT_RING( CP_PACKET0( RADEON_RE_STIPPLE_ADDR, 0 ) );	OUT_RING( 0x00000000 );	OUT_RING( CP_PACKET0_TABLE( RADEON_RE_STIPPLE_DATA, 31 ) );	for ( i = 0 ; i < 32 ; i++ ) {		OUT_RING( stipple[i] );	}	ADVANCE_RING();}static void radeon_apply_surface_regs(int surf_index, drm_radeon_private_t *dev_priv){	if (!dev_priv->mmio)		return;	radeon_do_cp_idle(dev_priv);	RADEON_WRITE(RADEON_SURFACE0_INFO + 16*surf_index,		dev_priv->surfaces[surf_index].flags);	RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16*surf_index,		dev_priv->surfaces[surf_index].lower);	RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16*surf_index,		dev_priv->surfaces[surf_index].upper);}/* Allocates a virtual surface * doesn't always allocate a real surface, will stretch an existing  * surface when possible. * * Note that refcount can be at most 2, since during a free refcount=3 * might mean we have to allocate a new surface which might not always * be available. * For example : we allocate three contigous surfaces ABC. If B is  * freed, we suddenly need two surfaces to store A and C, which might * not always be available. */static int alloc_surface(drm_radeon_surface_alloc_t* new, drm_radeon_private_t *dev_priv, DRMFILE filp){	struct radeon_virt_surface *s;	int i;	int virt_surface_index;	uint32_t new_upper, new_lower;	new_lower = new->address;	new_upper = new_lower + new->size - 1;	/* sanity check */	if ((new_lower >= new_upper) || (new->flags == 0) || (new->size == 0) ||		((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) != RADEON_SURF_ADDRESS_FIXED_MASK) ||		((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0))		return -1;	/* make sure there is no overlap with existing surfaces */	for (i = 0; i < RADEON_MAX_SURFACES; i++) {		if ((dev_priv->surfaces[i].refcount != 0) &&		(( (new_lower >= dev_priv->surfaces[i].lower) &&			(new_lower < dev_priv->surfaces[i].upper) ) ||		 ( (new_lower < dev_priv->surfaces[i].lower) &&			(new_upper > dev_priv->surfaces[i].lower) )) ){		return -1;}	}	/* find a virtual surface */	for (i = 0; i < 2*RADEON_MAX_SURFACES; i++)		if (dev_priv->virt_surfaces[i].filp == 0)			break;	if (i == 2*RADEON_MAX_SURFACES) {		return -1;}	virt_surface_index = i;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -