📄 radeon_cp.c
字号:
static void radeon_do_cp_flush( drm_radeon_private_t *dev_priv ){#if 0 u32 tmp; tmp = RADEON_READ( RADEON_CP_RB_WPTR ) | (1 << 31); RADEON_WRITE( RADEON_CP_RB_WPTR, tmp );#endif}/* Wait for the CP to go idle. */int radeon_do_cp_idle( drm_radeon_private_t *dev_priv ){ RING_LOCALS; BEGIN_RING( 6 ); RADEON_PURGE_CACHE(); RADEON_PURGE_ZCACHE(); RADEON_WAIT_UNTIL_IDLE(); ADVANCE_RING(); return radeon_do_wait_for_idle( dev_priv );}/* Start the Command Processor. */static void radeon_do_cp_start( drm_radeon_private_t *dev_priv ){ RING_LOCALS; radeon_do_wait_for_idle( dev_priv ); RADEON_WRITE( RADEON_CP_CSQ_CNTL, dev_priv->cp_mode ); dev_priv->cp_running = 1; BEGIN_RING( 6 ); RADEON_PURGE_CACHE(); RADEON_PURGE_ZCACHE(); RADEON_WAIT_UNTIL_IDLE(); ADVANCE_RING();}/* Reset the Command Processor. This will not flush any pending * commands, so you must wait for the CP command stream to complete * before calling this routine. */static void radeon_do_cp_reset( drm_radeon_private_t *dev_priv ){ u32 cur_read_ptr; cur_read_ptr = RADEON_READ( RADEON_CP_RB_RPTR ); RADEON_WRITE( RADEON_CP_RB_WPTR, cur_read_ptr ); *dev_priv->ring.head = cur_read_ptr; dev_priv->ring.tail = cur_read_ptr;}/* Stop the Command Processor. This will not flush any pending * commands, so you must flush the command stream and wait for the CP * to go idle before calling this routine. */static void radeon_do_cp_stop( drm_radeon_private_t *dev_priv ){ RADEON_WRITE( RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS ); dev_priv->cp_running = 0;}/* Reset the engine. This will stop the CP if it is running. */static int radeon_do_engine_reset( drm_device_t *dev ){ drm_radeon_private_t *dev_priv = dev->dev_private; u32 clock_cntl_index, mclk_cntl, rbbm_soft_reset; DRM_DEBUG( "%s\n", __FUNCTION__ ); radeon_do_pixcache_flush( dev_priv ); clock_cntl_index = RADEON_READ( RADEON_CLOCK_CNTL_INDEX ); mclk_cntl = RADEON_READ_PLL( dev, RADEON_MCLK_CNTL ); /* FIXME: remove magic number here and in radeon ddx driver!!! */ RADEON_WRITE_PLL( RADEON_MCLK_CNTL, mclk_cntl | 0x003f00000 ); rbbm_soft_reset = RADEON_READ( RADEON_RBBM_SOFT_RESET ); RADEON_WRITE( RADEON_RBBM_SOFT_RESET, ( rbbm_soft_reset | RADEON_SOFT_RESET_CP | RADEON_SOFT_RESET_HI | RADEON_SOFT_RESET_SE | RADEON_SOFT_RESET_RE | RADEON_SOFT_RESET_PP | RADEON_SOFT_RESET_E2 | RADEON_SOFT_RESET_RB | RADEON_SOFT_RESET_HDP ) ); RADEON_READ( RADEON_RBBM_SOFT_RESET ); RADEON_WRITE( RADEON_RBBM_SOFT_RESET, ( rbbm_soft_reset & ~( RADEON_SOFT_RESET_CP | RADEON_SOFT_RESET_HI | RADEON_SOFT_RESET_SE | RADEON_SOFT_RESET_RE | RADEON_SOFT_RESET_PP | RADEON_SOFT_RESET_E2 | RADEON_SOFT_RESET_RB | RADEON_SOFT_RESET_HDP ) ) ); RADEON_READ( RADEON_RBBM_SOFT_RESET ); RADEON_WRITE_PLL( RADEON_MCLK_CNTL, mclk_cntl ); RADEON_WRITE( RADEON_CLOCK_CNTL_INDEX, clock_cntl_index ); RADEON_WRITE( RADEON_RBBM_SOFT_RESET, rbbm_soft_reset ); /* Reset the CP ring */ radeon_do_cp_reset( dev_priv ); /* The CP is no longer running after an engine reset */ dev_priv->cp_running = 0; /* Reset any pending vertex, indirect buffers */ radeon_freelist_reset( dev ); return 0;}static void radeon_cp_init_ring_buffer( drm_device_t *dev ){ drm_radeon_private_t *dev_priv = dev->dev_private; u32 ring_start, cur_read_ptr; u32 tmp; /* Initialize the memory controller */ RADEON_WRITE( RADEON_MC_FB_LOCATION, (dev_priv->agp_vm_start - 1) & 0xffff0000 ); RADEON_WRITE( RADEON_MC_AGP_LOCATION, (((dev_priv->agp_vm_start - 1 + dev_priv->agp_size) & 0xffff0000) | (dev_priv->agp_vm_start >> 16)) ); ring_start = (dev_priv->cp_ring->offset - dev->agp->base + dev_priv->agp_vm_start); RADEON_WRITE( RADEON_CP_RB_BASE, ring_start ); /* Set the write pointer delay */ RADEON_WRITE( RADEON_CP_RB_WPTR_DELAY, 0 ); /* Initialize the ring buffer's read and write pointers */ cur_read_ptr = RADEON_READ( RADEON_CP_RB_RPTR ); RADEON_WRITE( RADEON_CP_RB_WPTR, cur_read_ptr ); *dev_priv->ring.head = cur_read_ptr; dev_priv->ring.tail = cur_read_ptr; RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, dev_priv->ring_rptr->offset ); /* Set ring buffer size */ RADEON_WRITE( RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw ); radeon_do_wait_for_idle( dev_priv ); /* Turn off PCI GART */ tmp = RADEON_READ( RADEON_AIC_CNTL ) & ~RADEON_PCIGART_TRANSLATE_EN; RADEON_WRITE( RADEON_AIC_CNTL, tmp ); /* Turn on bus mastering */ tmp = RADEON_READ( RADEON_BUS_CNTL ) & ~RADEON_BUS_MASTER_DIS; RADEON_WRITE( RADEON_BUS_CNTL, tmp ); /* Sync everything up */ RADEON_WRITE( RADEON_ISYNC_CNTL, (RADEON_ISYNC_ANY2D_IDLE3D | RADEON_ISYNC_ANY3D_IDLE2D | RADEON_ISYNC_WAIT_IDLEGUI | RADEON_ISYNC_CPSCRATCH_IDLEGUI) );}static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init ){ drm_radeon_private_t *dev_priv; int i; dev_priv = drm_alloc( sizeof(drm_radeon_private_t), DRM_MEM_DRIVER ); if ( dev_priv == NULL ) return -ENOMEM; dev->dev_private = (void *)dev_priv; memset( dev_priv, 0, sizeof(drm_radeon_private_t) ); dev_priv->is_pci = init->is_pci; /* We don't support PCI cards until PCI GART is implemented. * Fail here so we can remove all checks for PCI cards around * the CP ring code. */ if ( dev_priv->is_pci ) { drm_free( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER ); dev->dev_private = NULL; return -EINVAL; } dev_priv->usec_timeout = init->usec_timeout; if ( dev_priv->usec_timeout < 1 || dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT ) { drm_free( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER ); dev->dev_private = NULL; return -EINVAL; } dev_priv->cp_mode = init->cp_mode; /* Simple idle check. */ atomic_set( &dev_priv->idle_count, 0 ); /* We don't support anything other than bus-mastering ring mode, * but the ring can be in either AGP or PCI space for the ring * read pointer. */ if ( ( init->cp_mode != RADEON_CSQ_PRIBM_INDDIS ) && ( init->cp_mode != RADEON_CSQ_PRIBM_INDBM ) ) { drm_free( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER ); dev->dev_private = NULL; return -EINVAL; } switch ( init->fb_bpp ) { case 16: dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565; break; case 32: default: dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888; break; } dev_priv->front_offset = init->front_offset; dev_priv->front_pitch = init->front_pitch; dev_priv->back_offset = init->back_offset; dev_priv->back_pitch = init->back_pitch; switch ( init->depth_bpp ) { case 16: dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z; break; case 32: default: dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z; break; } dev_priv->depth_offset = init->depth_offset; dev_priv->depth_pitch = init->depth_pitch; dev_priv->front_pitch_offset = (((dev_priv->front_pitch/64) << 22) | (dev_priv->front_offset >> 10)); dev_priv->back_pitch_offset = (((dev_priv->back_pitch/64) << 22) | (dev_priv->back_offset >> 10)); dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch/64) << 22) | (dev_priv->depth_offset >> 10)); /* Hardware state for depth clears. Remove this if/when we no * longer clear the depth buffer with a 3D rectangle. Hard-code * all values to prevent unwanted 3D state from slipping through * and screwing with the clear operation. */ dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE | RADEON_Z_ENABLE | (dev_priv->color_fmt << 10) | RADEON_ZBLOCK16); dev_priv->depth_clear.rb3d_zstencilcntl = (dev_priv->depth_fmt | RADEON_Z_TEST_ALWAYS | RADEON_STENCIL_TEST_ALWAYS | RADEON_STENCIL_S_FAIL_KEEP | RADEON_STENCIL_ZPASS_KEEP | RADEON_STENCIL_ZFAIL_KEEP | RADEON_Z_WRITE_ENABLE); dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW | RADEON_BFACE_SOLID | RADEON_FFACE_SOLID | RADEON_FLAT_SHADE_VTX_LAST | RADEON_DIFFUSE_SHADE_FLAT | RADEON_ALPHA_SHADE_FLAT | RADEON_SPECULAR_SHADE_FLAT | RADEON_FOG_SHADE_FLAT | RADEON_VTX_PIX_CENTER_OGL | RADEON_ROUND_MODE_TRUNC | RADEON_ROUND_PREC_8TH_PIX); /* FIXME: We want multiple shared areas, including one shared * only by the X Server and kernel module. */ for ( i = 0 ; i < dev->map_count ; i++ ) { if ( dev->maplist[i]->type == _DRM_SHM ) { dev_priv->sarea = dev->maplist[i]; break; } } DO_FIND_MAP( dev_priv->fb, init->fb_offset ); DO_FIND_MAP( dev_priv->mmio, init->mmio_offset ); DO_FIND_MAP( dev_priv->cp_ring, init->ring_offset ); DO_FIND_MAP( dev_priv->ring_rptr, init->ring_rptr_offset ); DO_FIND_MAP( dev_priv->buffers, init->buffers_offset ); if ( !dev_priv->is_pci ) { DO_FIND_MAP( dev_priv->agp_textures, init->agp_textures_offset ); } dev_priv->sarea_priv = (drm_radeon_sarea_t *)((u8 *)dev_priv->sarea->handle + init->sarea_priv_offset); DO_IOREMAP( dev_priv->cp_ring ); DO_IOREMAP( dev_priv->ring_rptr ); DO_IOREMAP( dev_priv->buffers );#if 0 if ( !dev_priv->is_pci ) { DO_IOREMAP( dev_priv->agp_textures ); }#endif dev_priv->agp_size = init->agp_size; dev_priv->agp_vm_start = RADEON_READ( RADEON_CONFIG_APER_SIZE ); dev_priv->agp_buffers_offset = (dev_priv->buffers->offset - dev->agp->base + dev_priv->agp_vm_start); dev_priv->ring.head = ((__volatile__ u32 *) dev_priv->ring_rptr->handle); dev_priv->ring.start = (u32 *)dev_priv->cp_ring->handle; dev_priv->ring.end = ((u32 *)dev_priv->cp_ring->handle + init->ring_size / sizeof(u32)); dev_priv->ring.size = init->ring_size; dev_priv->ring.size_l2qw = drm_order( init->ring_size / 8 ); dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;#if 0 /* Initialize the scratch register pointer. This will cause * the scratch register values to be written out to memory * whenever they are updated. * FIXME: This doesn't quite work yet, so we're disabling it * for the release. */ RADEON_WRITE( RADEON_SCRATCH_ADDR, (dev_priv->ring_rptr->offset + RADEON_SCRATCH_REG_OFFSET) ); RADEON_WRITE( RADEON_SCRATCH_UMSK, 0x7 );#endif dev_priv->scratch = ((__volatile__ u32 *) dev_priv->ring_rptr->handle + (RADEON_SCRATCH_REG_OFFSET / sizeof(u32))); dev_priv->sarea_priv->last_frame = 0; RADEON_WRITE( RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame ); dev_priv->sarea_priv->last_dispatch = 0; RADEON_WRITE( RADEON_LAST_DISPATCH_REG, dev_priv->sarea_priv->last_dispatch ); dev_priv->sarea_priv->last_clear = 0; RADEON_WRITE( RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear ); radeon_cp_load_microcode( dev_priv ); radeon_cp_init_ring_buffer( dev ); radeon_do_engine_reset( dev );#if ROTATE_BUFS dev_priv->last_buf = 0;#endif return 0;}static int radeon_do_cleanup_cp( drm_device_t *dev ){ if ( dev->dev_private ) { drm_radeon_private_t *dev_priv = dev->dev_private; DO_IOREMAPFREE( dev_priv->cp_ring ); DO_IOREMAPFREE( dev_priv->ring_rptr ); DO_IOREMAPFREE( dev_priv->buffers );#if 0 if ( !dev_priv->is_pci ) { DO_IOREMAPFREE( dev_priv->agp_textures ); }#endif drm_free( dev->dev_private, sizeof(drm_radeon_private_t), DRM_MEM_DRIVER ); dev->dev_private = NULL; } return 0;}int radeon_cp_init( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ){ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_radeon_init_t init; if ( copy_from_user( &init, (drm_radeon_init_t *)arg, sizeof(init) ) ) return -EFAULT; switch ( init.func ) { case RADEON_INIT_CP: return radeon_do_init_cp( dev, &init ); case RADEON_CLEANUP_CP: return radeon_do_cleanup_cp( dev ); } return -EINVAL;}int radeon_cp_start( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ){ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_radeon_private_t *dev_priv = dev->dev_private; DRM_DEBUG( "%s\n", __FUNCTION__ ); if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || dev->lock.pid != current->pid ) { DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -