📄 dri_bufmgr_fake.c
字号:
} else { DBG("return to lru: offset %x sz %x\n", block->mem->ofs, block->mem->size); move_to_tail(&bufmgr_fake->lru, block); } ret = 1; } else { /* Blocks are ordered by fence, so if one fails, all from * here will fail also: */ DBG("fence not passed: offset %x sz %x %d %d \n", block->mem->ofs, block->mem->size, block->fence, bufmgr_fake->last_fence); break; } } DBG("%s: %d\n", __FUNCTION__, ret); return ret;}static void fence_blocks(dri_bufmgr_fake *bufmgr_fake, unsigned fence){ struct block *block, *tmp; foreach_s (block, tmp, &bufmgr_fake->on_hardware) { DBG("Fence block %p (sz 0x%x ofs %x buf %p) with fence %d\n", block, block->mem->size, block->mem->ofs, block->bo, fence); block->fence = fence; block->on_hardware = 0; block->fenced = 1; /* Move to tail of pending list here */ move_to_tail(&bufmgr_fake->fenced, block); } assert(is_empty_list(&bufmgr_fake->on_hardware));}static GLboolean evict_and_alloc_block(dri_bo *bo){ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; dri_bo_fake *bo_fake = (dri_bo_fake *)bo; assert(bo_fake->block == NULL); /* Search for already free memory: */ if (alloc_block(bo)) return GL_TRUE; /* If we're not thrashing, allow lru eviction to dig deeper into * recently used textures. We'll probably be thrashing soon: */ if (!bufmgr_fake->thrashing) { while (evict_lru(bufmgr_fake, 0)) if (alloc_block(bo)) return GL_TRUE; } /* Keep thrashing counter alive? */ if (bufmgr_fake->thrashing) bufmgr_fake->thrashing = 20; /* Wait on any already pending fences - here we are waiting for any * freed memory that has been submitted to hardware and fenced to * become available: */ while (!is_empty_list(&bufmgr_fake->fenced)) { GLuint fence = bufmgr_fake->fenced.next->fence; _fence_wait_internal(bufmgr_fake, fence); if (alloc_block(bo)) return GL_TRUE; } if (!is_empty_list(&bufmgr_fake->on_hardware)) { while (!is_empty_list(&bufmgr_fake->fenced)) { GLuint fence = bufmgr_fake->fenced.next->fence; _fence_wait_internal(bufmgr_fake, fence); } if (!bufmgr_fake->thrashing) { DBG("thrashing\n"); } bufmgr_fake->thrashing = 20; if (alloc_block(bo)) return GL_TRUE; } while (evict_mru(bufmgr_fake)) if (alloc_block(bo)) return GL_TRUE; DBG("%s 0x%x bytes failed\n", __FUNCTION__, bo->size); return GL_FALSE;}/*********************************************************************** * Public functions *//** * Wait for hardware idle by emitting a fence and waiting for it. */static voiddri_bufmgr_fake_wait_idle(dri_bufmgr_fake *bufmgr_fake){ unsigned int cookie; cookie = bufmgr_fake->fence_emit(bufmgr_fake->driver_priv); _fence_wait_internal(bufmgr_fake, cookie);}/** * Wait for execution pending on a buffer */static voiddri_bufmgr_fake_bo_wait_idle(dri_bo *bo){ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; dri_bo_fake *bo_fake = (dri_bo_fake *)bo; if (bo_fake->block == NULL || !bo_fake->block->fenced) return; _fence_wait_internal(bufmgr_fake, bo_fake->block->fence);}/* Specifically ignore texture memory sharing. * -- just evict everything * -- and wait for idle */voiddri_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr){ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr; struct block *block, *tmp; bufmgr_fake->need_fence = 1; bufmgr_fake->fail = 0; /* Wait for hardware idle. We don't know where acceleration has been * happening, so we'll need to wait anyway before letting anything get * put on the card again. */ dri_bufmgr_fake_wait_idle(bufmgr_fake); /* Check that we hadn't released the lock without having fenced the last * set of buffers. */ assert(is_empty_list(&bufmgr_fake->fenced)); assert(is_empty_list(&bufmgr_fake->on_hardware)); foreach_s(block, tmp, &bufmgr_fake->lru) { assert(_fence_test(bufmgr_fake, block->fence)); set_dirty(block->bo); }}static dri_bo *dri_fake_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size, unsigned int alignment, uint64_t location_mask){ dri_bufmgr_fake *bufmgr_fake; dri_bo_fake *bo_fake; bufmgr_fake = (dri_bufmgr_fake *)bufmgr; assert(size != 0); bo_fake = calloc(1, sizeof(*bo_fake)); if (!bo_fake) return NULL; bo_fake->bo.size = size; bo_fake->bo.offset = -1; bo_fake->bo.virtual = NULL; bo_fake->bo.bufmgr = bufmgr; bo_fake->refcount = 1; /* Alignment must be a power of two */ assert((alignment & (alignment - 1)) == 0); if (alignment == 0) alignment = 1; bo_fake->alignment = alignment; bo_fake->id = ++bufmgr_fake->buf_nr; bo_fake->name = name; bo_fake->flags = 0; bo_fake->is_static = GL_FALSE; DBG("drm_bo_alloc: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name, bo_fake->bo.size / 1024); return &bo_fake->bo;}static dri_bo *dri_fake_bo_alloc_static(dri_bufmgr *bufmgr, const char *name, unsigned long offset, unsigned long size, void *virtual, uint64_t location_mask){ dri_bufmgr_fake *bufmgr_fake; dri_bo_fake *bo_fake; bufmgr_fake = (dri_bufmgr_fake *)bufmgr; assert(size != 0); bo_fake = calloc(1, sizeof(*bo_fake)); if (!bo_fake) return NULL; bo_fake->bo.size = size; bo_fake->bo.offset = offset; bo_fake->bo.virtual = virtual; bo_fake->bo.bufmgr = bufmgr; bo_fake->refcount = 1; bo_fake->id = ++bufmgr_fake->buf_nr; bo_fake->name = name; bo_fake->flags = BM_PINNED | DRM_BO_FLAG_NO_MOVE; bo_fake->is_static = GL_TRUE; DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name, bo_fake->bo.size / 1024); return &bo_fake->bo;}static voiddri_fake_bo_reference(dri_bo *bo){ dri_bo_fake *bo_fake = (dri_bo_fake *)bo; bo_fake->refcount++;}static voiddri_fake_bo_unreference(dri_bo *bo){ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; dri_bo_fake *bo_fake = (dri_bo_fake *)bo; int i; if (!bo) return; if (--bo_fake->refcount == 0) { assert(bo_fake->map_count == 0); /* No remaining references, so free it */ if (bo_fake->block) free_block(bufmgr_fake, bo_fake->block); free_backing_store(bo); for (i = 0; i < bo_fake->nr_relocs; i++) dri_bo_unreference(bo_fake->relocs[i].target_buf); DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id, bo_fake->name); free(bo_fake->relocs); free(bo); return; }}/** * Set the buffer as not requiring backing store, and instead get the callback * invoked whenever it would be set dirty. */void dri_bo_fake_disable_backing_store(dri_bo *bo, void (*invalidate_cb)(dri_bo *bo, void *ptr), void *ptr){ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; dri_bo_fake *bo_fake = (dri_bo_fake *)bo; if (bo_fake->backing_store) free_backing_store(bo); bo_fake->flags |= BM_NO_BACKING_STORE; DBG("disable_backing_store set buf %d dirty\n", bo_fake->id); bo_fake->dirty = 1; bo_fake->invalidate_cb = invalidate_cb; bo_fake->invalidate_ptr = ptr; /* Note that it is invalid right from the start. Also note * invalidate_cb is called with the bufmgr locked, so cannot * itself make bufmgr calls. */ if (invalidate_cb != NULL) invalidate_cb(bo, ptr);}/** * Map a buffer into bo->virtual, allocating either card memory space (If * BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary. */static intdri_fake_bo_map(dri_bo *bo, GLboolean write_enable){ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; dri_bo_fake *bo_fake = (dri_bo_fake *)bo; /* Static buffers are always mapped. */ if (bo_fake->is_static) return 0; /* Allow recursive mapping. Mesa may recursively map buffers with * nested display loops, and it is used internally in bufmgr_fake * for relocation. */ if (bo_fake->map_count++ != 0) return 0; { DBG("drm_bo_map: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name, bo_fake->bo.size / 1024); if (bo->virtual != NULL) { _mesa_printf("%s: already mapped\n", __FUNCTION__); abort(); } else if (bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED)) { if (!bo_fake->block && !evict_and_alloc_block(bo)) { DBG("%s: alloc failed\n", __FUNCTION__); bufmgr_fake->fail = 1; return 1; } else { assert(bo_fake->block); bo_fake->dirty = 0; if (!(bo_fake->flags & BM_NO_FENCE_SUBDATA) && bo_fake->block->fenced) { dri_bufmgr_fake_bo_wait_idle(bo); } bo->virtual = bo_fake->block->virtual; } } else { if (write_enable) set_dirty(bo); if (bo_fake->backing_store == 0) alloc_backing_store(bo); bo->virtual = bo_fake->backing_store; } } return 0;}static intdri_fake_bo_unmap(dri_bo *bo){ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; dri_bo_fake *bo_fake = (dri_bo_fake *)bo; /* Static buffers are always mapped. */ if (bo_fake->is_static) return 0; assert(bo_fake->map_count != 0); if (--bo_fake->map_count != 0) return 0; DBG("drm_bo_unmap: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name, bo_fake->bo.size / 1024); bo->virtual = NULL; return 0;}static voiddri_fake_kick_all(dri_bufmgr_fake *bufmgr_fake){ struct block *block, *tmp; bufmgr_fake->performed_rendering = GL_FALSE; /* okay for ever BO that is on the HW kick it off. seriously not afraid of the POLICE right now */ foreach_s(block, tmp, &bufmgr_fake->on_hardware) { dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo; block->on_hardware = 0; free_block(bufmgr_fake, block); bo_fake->block = NULL; bo_fake->validated = GL_FALSE; if (!(bo_fake->flags & BM_NO_BACKING_STORE)) bo_fake->dirty = 1; }}static intdri_fake_bo_validate(dri_bo *bo, uint64_t flags){ dri_bufmgr_fake *bufmgr_fake; dri_bo_fake *bo_fake = (dri_bo_fake *)bo; /* XXX: Sanity-check whether we've already validated this one under * different flags. See drmAddValidateItem().
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -