📄 radeon_ioctl.c
字号:
int sz = AOS_BUFSZ(nr); int i; int *tmp; if (RADEON_DEBUG & DEBUG_IOCTL) fprintf(stderr, "%s\n", __FUNCTION__); cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sz, __FUNCTION__ ); cmd[0].i = 0; cmd[0].header.cmd_type = RADEON_CMD_PACKET3; cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (((sz / sizeof(int))-3) << 16); cmd[2].i = nr; tmp = &cmd[0].i; cmd += 3; for (i = 0 ; i < nr ; i++) { if (i & 1) { cmd[0].i |= ((component[i]->aos_stride << 24) | (component[i]->aos_size << 16)); cmd[2].i = (component[i]->aos_start + offset * component[i]->aos_stride * 4); cmd += 3; } else { cmd[0].i = ((component[i]->aos_stride << 8) | (component[i]->aos_size << 0)); cmd[1].i = (component[i]->aos_start + offset * component[i]->aos_stride * 4); } } if (RADEON_DEBUG & DEBUG_VERTS) { fprintf(stderr, "%s:\n", __FUNCTION__); for (i = 0 ; i < sz ; i++) fprintf(stderr, " %d: %x\n", i, tmp[i]); }#endif}/* using already shifted color_fmt! */void radeonEmitBlit( radeonContextPtr rmesa, /* FIXME: which drmMinor is required? */ GLuint color_fmt, GLuint src_pitch, GLuint src_offset, GLuint dst_pitch, GLuint dst_offset, GLint srcx, GLint srcy, GLint dstx, GLint dsty, GLuint w, GLuint h ){ drm_radeon_cmd_header_t *cmd; if (RADEON_DEBUG & DEBUG_IOCTL) fprintf(stderr, "%s src %x/%x %d,%d dst: %x/%x %d,%d sz: %dx%d\n", __FUNCTION__, src_pitch, src_offset, srcx, srcy, dst_pitch, dst_offset, dstx, dsty, w, h); assert( (src_pitch & 63) == 0 ); assert( (dst_pitch & 63) == 0 ); assert( (src_offset & 1023) == 0 ); assert( (dst_offset & 1023) == 0 ); assert( w < (1<<16) ); assert( h < (1<<16) ); cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 8 * sizeof(int), __FUNCTION__ ); cmd[0].i = 0; cmd[0].header.cmd_type = RADEON_CMD_PACKET3; cmd[1].i = RADEON_CP_PACKET3_CNTL_BITBLT_MULTI | (5 << 16); cmd[2].i = (RADEON_GMC_SRC_PITCH_OFFSET_CNTL | RADEON_GMC_DST_PITCH_OFFSET_CNTL | RADEON_GMC_BRUSH_NONE | color_fmt | RADEON_GMC_SRC_DATATYPE_COLOR | RADEON_ROP3_S | RADEON_DP_SRC_SOURCE_MEMORY | RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS ); cmd[3].i = ((src_pitch/64)<<22) | (src_offset >> 10); cmd[4].i = ((dst_pitch/64)<<22) | (dst_offset >> 10); cmd[5].i = (srcx << 16) | srcy; cmd[6].i = (dstx << 16) | dsty; /* dst */ cmd[7].i = (w << 16) | h;}void radeonEmitWait( radeonContextPtr rmesa, GLuint flags ){ drm_radeon_cmd_header_t *cmd; assert( !(flags & ~(RADEON_WAIT_2D|RADEON_WAIT_3D)) ); cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 1 * sizeof(int), __FUNCTION__ ); cmd[0].i = 0; cmd[0].wait.cmd_type = RADEON_CMD_WAIT; cmd[0].wait.flags = flags;}static int radeonFlushCmdBufLocked( radeonContextPtr rmesa, const char * caller ){ int ret, i; drm_radeon_cmd_buffer_t cmd; if (rmesa->lost_context) radeonBackUpAndEmitLostStateLocked(rmesa); if (RADEON_DEBUG & DEBUG_IOCTL) { fprintf(stderr, "%s from %s\n", __FUNCTION__, caller); if (RADEON_DEBUG & DEBUG_VERBOSE) for (i = 0 ; i < rmesa->store.cmd_used ; i += 4 ) fprintf(stderr, "%d: %x\n", i/4, *(int *)(&rmesa->store.cmd_buf[i])); } if (RADEON_DEBUG & DEBUG_DMA) fprintf(stderr, "%s: Releasing %d buffers\n", __FUNCTION__, rmesa->dma.nr_released_bufs); if (RADEON_DEBUG & DEBUG_SANITY) { if (rmesa->state.scissor.enabled) ret = radeonSanityCmdBuffer( rmesa, rmesa->state.scissor.numClipRects, rmesa->state.scissor.pClipRects); else ret = radeonSanityCmdBuffer( rmesa, rmesa->numClipRects, rmesa->pClipRects); if (ret) { fprintf(stderr, "drmSanityCommandWrite: %d\n", ret); goto out; } } cmd.bufsz = rmesa->store.cmd_used; cmd.buf = rmesa->store.cmd_buf; if (rmesa->state.scissor.enabled) { cmd.nbox = rmesa->state.scissor.numClipRects; cmd.boxes = rmesa->state.scissor.pClipRects; } else { cmd.nbox = rmesa->numClipRects; cmd.boxes = rmesa->pClipRects; } ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_CMDBUF, &cmd, sizeof(cmd) ); if (ret) fprintf(stderr, "drmCommandWrite: %d\n", ret); if (RADEON_DEBUG & DEBUG_SYNC) { fprintf(stderr, "\nSyncing in %s\n\n", __FUNCTION__); radeonWaitForIdleLocked( rmesa ); } out: rmesa->store.primnr = 0; rmesa->store.statenr = 0; rmesa->store.cmd_used = 0; rmesa->dma.nr_released_bufs = 0; rmesa->save_on_next_emit = 1; return ret;}/* Note: does not emit any commands to avoid recursion on * radeonAllocCmdBuf. */void radeonFlushCmdBuf( radeonContextPtr rmesa, const char *caller ){ int ret; LOCK_HARDWARE( rmesa ); ret = radeonFlushCmdBufLocked( rmesa, caller ); UNLOCK_HARDWARE( rmesa ); if (ret) { fprintf(stderr, "drm_radeon_cmd_buffer_t: %d (exiting)\n", ret); exit(ret); }}/* ============================================================= * Hardware vertex buffer handling */void radeonRefillCurrentDmaRegion( radeonContextPtr rmesa ){ struct radeon_dma_buffer *dmabuf; int fd = rmesa->dri.fd; int index = 0; int size = 0; drmDMAReq dma; int ret; if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA)) fprintf(stderr, "%s\n", __FUNCTION__); if (rmesa->dma.flush) { rmesa->dma.flush( rmesa ); } if (rmesa->dma.current.buf) radeonReleaseDmaRegion( rmesa, &rmesa->dma.current, __FUNCTION__ ); if (rmesa->dma.nr_released_bufs > 4) radeonFlushCmdBuf( rmesa, __FUNCTION__ ); dma.context = rmesa->dri.hwContext; dma.send_count = 0; dma.send_list = NULL; dma.send_sizes = NULL; dma.flags = 0; dma.request_count = 1; dma.request_size = RADEON_BUFFER_SIZE; dma.request_list = &index; dma.request_sizes = &size; dma.granted_count = 0; LOCK_HARDWARE(rmesa); /* no need to validate */ ret = drmDMA( fd, &dma ); if (ret != 0) { /* Free some up this way? */ if (rmesa->dma.nr_released_bufs) { radeonFlushCmdBufLocked( rmesa, __FUNCTION__ ); } if (RADEON_DEBUG & DEBUG_DMA) fprintf(stderr, "Waiting for buffers\n"); radeonWaitForIdleLocked( rmesa ); ret = drmDMA( fd, &dma ); if ( ret != 0 ) { UNLOCK_HARDWARE( rmesa ); fprintf( stderr, "Error: Could not get dma buffer... exiting\n" ); exit( -1 ); } } UNLOCK_HARDWARE(rmesa); if (RADEON_DEBUG & DEBUG_DMA) fprintf(stderr, "Allocated buffer %d\n", index); dmabuf = CALLOC_STRUCT( radeon_dma_buffer ); dmabuf->buf = &rmesa->radeonScreen->buffers->list[index]; dmabuf->refcount = 1; rmesa->dma.current.buf = dmabuf; rmesa->dma.current.address = dmabuf->buf->address; rmesa->dma.current.end = dmabuf->buf->total; rmesa->dma.current.start = 0; rmesa->dma.current.ptr = 0; rmesa->c_vertexBuffers++;}void radeonReleaseDmaRegion( radeonContextPtr rmesa, struct radeon_dma_region *region, const char *caller ){ if (RADEON_DEBUG & DEBUG_IOCTL) fprintf(stderr, "%s from %s\n", __FUNCTION__, caller); if (!region->buf) return; if (rmesa->dma.flush) rmesa->dma.flush( rmesa ); if (--region->buf->refcount == 0) { drm_radeon_cmd_header_t *cmd; if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA)) fprintf(stderr, "%s -- DISCARD BUF %d\n", __FUNCTION__, region->buf->buf->idx); cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sizeof(*cmd), __FUNCTION__ ); cmd->dma.cmd_type = RADEON_CMD_DMA_DISCARD; cmd->dma.buf_idx = region->buf->buf->idx; FREE(region->buf); rmesa->dma.nr_released_bufs++; } region->buf = NULL; region->start = 0;}/* Allocates a region from rmesa->dma.current. If there isn't enough * space in current, grab a new buffer (and discard what was left of current) */void radeonAllocDmaRegion( radeonContextPtr rmesa, struct radeon_dma_region *region, int bytes, int alignment ){ if (RADEON_DEBUG & DEBUG_IOCTL) fprintf(stderr, "%s %d\n", __FUNCTION__, bytes); if (rmesa->dma.flush) rmesa->dma.flush( rmesa ); if (region->buf) radeonReleaseDmaRegion( rmesa, region, __FUNCTION__ ); alignment--; rmesa->dma.current.start = rmesa->dma.current.ptr = (rmesa->dma.current.ptr + alignment) & ~alignment; if ( rmesa->dma.current.ptr + bytes > rmesa->dma.current.end ) radeonRefillCurrentDmaRegion( rmesa ); region->start = rmesa->dma.current.start; region->ptr = rmesa->dma.current.start; region->end = rmesa->dma.current.start + bytes; region->address = rmesa->dma.current.address; region->buf = rmesa->dma.current.buf; region->buf->refcount++; rmesa->dma.current.ptr += bytes; /* bug - if alignment > 7 */ rmesa->dma.current.start = rmesa->dma.current.ptr = (rmesa->dma.current.ptr + 0x7) & ~0x7; }/* ================================================================ * SwapBuffers with client-side throttling */static u_int32_t radeonGetLastFrame (radeonContextPtr rmesa) { drm_radeon_getparam_t gp; int ret; u_int32_t frame; gp.param = RADEON_PARAM_LAST_FRAME; gp.value = (int *)&frame; ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_GETPARAM, &gp, sizeof(gp) ); if ( ret ) { fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret ); exit(1); } return frame;}static void radeonEmitIrqLocked( radeonContextPtr rmesa ){ drm_radeon_irq_emit_t ie; int ret; ie.irq_seq = &rmesa->iw.irq_seq; ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_IRQ_EMIT, &ie, sizeof(ie) ); if ( ret ) { fprintf( stderr, "%s: drm_radeon_irq_emit_t: %d\n", __FUNCTION__, ret ); exit(1); }}static void radeonWaitIrq( radeonContextPtr rmesa ){ int ret; do { ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_IRQ_WAIT, &rmesa->iw, sizeof(rmesa->iw) ); } while (ret && (errno == EINTR || errno == EBUSY)); if ( ret ) { fprintf( stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__, ret ); exit(1); }}static void radeonWaitForFrameCompletion( radeonContextPtr rmesa ){ drm_radeon_sarea_t *sarea = rmesa->sarea; if (rmesa->do_irqs) { if (radeonGetLastFrame(rmesa) < sarea->last_frame) { if (!rmesa->irqsEmitted) { while (radeonGetLastFrame (rmesa) < sarea->last_frame) ; } else { UNLOCK_HARDWARE( rmesa ); radeonWaitIrq( rmesa ); LOCK_HARDWARE( rmesa ); } rmesa->irqsEmitted = 10; } if (rmesa->irqsEmitted) { radeonEmitIrqLocked( rmesa ); rmesa->irqsEmitted--; } } else { while (radeonGetLastFrame (rmesa) < sarea->last_frame) { UNLOCK_HARDWARE( rmesa );
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -