📄 dual_page.c
字号:
pMapping->CpuPAddr = ENV_SysPAddrToCpuPAddr (SysPAddr);
pMapping->CpuVAddr = (IMG_CPU_VIRTADDR) ((IMG_UINTPTR_T) pPager->pDPState->pContiguousPool[pMapping->uPoolNr].CpuVAddr + SysPAddr.uiAddr - pPager->pDPState->pContiguousPool[pMapping->uPoolNr].descriptor->BaseSysPAddr.uiAddr);
}
else
{
pMapping->eCpuMemoryOrigin = hm_env;
HostAllocMem (PVRSRV_HOST_PAGEABLE_HEAP,
pMapping->uSize,
(IMG_VOID **)&pMapping->CpuVAddr,
0);
if (pMapping->CpuVAddr==0)
{
PVR_DPF((PVR_DBG_ERROR, "CpuMemoryAlloc: ERROR HostAllocMem failed"));
goto fail_cleanup;
}
}
return IMG_TRUE;
fail_cleanup:
CpuMemoryFree (pPager, pMapping);
return IMG_FALSE;
}
static int
CpuMemoryWrap (DP_MAPPING *pMapping, IMG_CPU_PHYADDR base)
{
PVR_ASSERT (pMapping!=IMG_NULL);
pMapping->eCpuMemoryOrigin = hm_wrapped;
pMapping->CpuPAddr = base;
/* We are requesting writecombined uncached mapping */
pMapping->CpuVAddr = HostMapPhysToLin (pMapping->CpuPAddr, pMapping->uSize, CACHETYPE_WRITECOMBINED);
if (pMapping->CpuVAddr==0) goto fail_cleanup;
return 1;
fail_cleanup:
return 0;
}
static void
CpuMemoryFree (DP_PAGER *pPager,
DP_MAPPING *pMapping)
{
switch (pMapping->eCpuMemoryOrigin)
{
case hm_none:
break;
case hm_contiguous:
if (pMapping->CpuPAddr.uiAddr!=0)
{
RA_Free (pPager->pDPState->pContiguousPool[pMapping->uPoolNr].pSysPhysArena,
(IMG_UINTPTR_T) ENV_CpuPAddrToSysPAddr (pMapping->CpuPAddr).uiAddr,
IMG_NULL);
}
break;
case hm_wrapped:
if (pMapping->CpuVAddr!=0)
HostUnMapPhysToLin ((void*) pMapping->CpuVAddr, pMapping->uSize);
break;
case hm_env:
if (pMapping->CpuVAddr!=0)
{
/* todo: mjs this must map to vfree() */
HostFreeMem(PVRSRV_HOST_PAGEABLE_HEAP, (void *) pMapping->CpuVAddr);
}
break;
}
}
/*----------------------------------------------------------------------------
<function>
FUNCTION: DP_Initialise
PURPOSE: Initialise the dual page module, must be called before any
other dual page module function.
PARAMETERS: pRAState - ra state
RETURNS: IMG_TRUE - Success
IMG_FALSE - Failed.
</function>
-----------------------------------------------------------------------------*/
IMG_BOOL
DP_Initialise (RA_STATE *pRAState, DP_STATE **ppState)
{
DP_STATE *pDPState = IMG_NULL;
IMG_UINT32 n;
SYS_POOL_DESCRIPTOR *descriptor;
PVR_DPF ((PVR_DBG_MESSAGE, "DP_Initialise ()"));
PVR_ASSERT (pRAState!=IMG_NULL);
PVR_ASSERT (ppState!=IMG_NULL);
HostAllocMem( PVRSRV_HOST_PAGEABLE_HEAP,
sizeof (*pDPState),
(IMG_VOID **)&pDPState,0);
if (pDPState==IMG_NULL) goto cleanup;
pDPState->pRAState = pRAState;
pDPState->pPagerPool = IMG_NULL;
pDPState->pMappingPool = IMG_NULL;
pDPState->pContiguousPool = IMG_NULL;
pDPState->uContiguousPoolCount = 0;
if (!MMU_Initialise ()) goto cleanup;
pDPState->pPagerPool = POOL_Create ("img-pager", sizeof (DP_PAGER));
if (pDPState->pPagerPool==IMG_NULL) goto cleanup;
pDPState->pMappingPool = POOL_Create ("img-mapping", sizeof (DP_MAPPING));
if (pDPState->pMappingPool==IMG_NULL) goto cleanup;
if (SysGetManagedMemoryInfo (&descriptor,
&pDPState->uContiguousPoolCount) != PVRSRV_OK)
goto cleanup;
HostAllocMem (PVRSRV_HOST_PAGEABLE_HEAP,
sizeof(*pDPState->pContiguousPool) * pDPState->uContiguousPoolCount,
(IMG_VOID **)&pDPState->pContiguousPool,
0);
if (pDPState->pContiguousPool==IMG_NULL) goto cleanup;
for (n=0; n<pDPState->uContiguousPoolCount; n++)
pDPState->pContiguousPool[n].pSysPhysArena = IMG_NULL;
for (n=0; n<pDPState->uContiguousPoolCount; n++)
{
pDPState->pContiguousPool[n].descriptor = &descriptor[n];
pDPState->pContiguousPool[n].bAllocated = IMG_FALSE;
pDPState->pContiguousPool[n].pSysPhysArena =
RA_Create (pRAState,
descriptor[n].name,
0, /* no initial allocation */
0, /* no initial allocation */
HostGetPageSize(),
_contiguous_alloc,
_contiguous_free,
&pDPState->pContiguousPool[n].pSysPhysArena);
if (pDPState->pContiguousPool[n].pSysPhysArena == IMG_NULL) goto cleanup;
}
*ppState = pDPState;
return IMG_TRUE;
cleanup:
if (pDPState!=IMG_NULL)
{
if (pDPState->pContiguousPool!=IMG_NULL)
{
for (n=0; n<pDPState->uContiguousPoolCount; n++)
RA_Delete (pDPState->pContiguousPool[n].pSysPhysArena);
HostFreeMem (PVRSRV_HOST_PAGEABLE_HEAP, pDPState->pContiguousPool);
}
POOL_Delete (pDPState->pMappingPool);
POOL_Delete (pDPState->pPagerPool);
MMU_Finalise ();
HostFreeMem (PVRSRV_HOST_PAGEABLE_HEAP, pDPState);
}
return IMG_FALSE;
}
/*----------------------------------------------------------------------------
<function>
FUNCTION: DP_Finalise
PURPOSE: Finalise the dual page module. All page allocations must be
free'd before calling this function.
PARAMETERS: In - pDPState
RETURNS: None.
</function>
------------------------------------------------------------------------------*/
void
DP_Finalise (DP_STATE *pDPState)
{
IMG_UINT32 n;
PVR_DPF ((PVR_DBG_MESSAGE, "DP_Finalise()"));
PVR_ASSERT (pDPState != IMG_NULL);
if (pDPState != IMG_NULL)
{
for (n=0; n<pDPState->uContiguousPoolCount; n++)
{
RA_Delete (pDPState->pContiguousPool[n].pSysPhysArena);
}
HostFreeMem (PVRSRV_HOST_PAGEABLE_HEAP, pDPState->pContiguousPool);
POOL_Delete (pDPState->pMappingPool);
POOL_Delete (pDPState->pPagerPool);
MMU_Finalise ();
HostFreeMem(PVRSRV_HOST_PAGEABLE_HEAP, pDPState);
}
}
/*----------------------------------------------------------------------------
<function>
FUNCTION: DP_Create
PURPOSE: Create a source of pages mapped into both cpu and device
virtual address spaces for a specific device.
PARAMETERS: In: device - device handle
RETURNS: dual page handle
</function>
------------------------------------------------------------------------------*/
DP_PAGER *
DP_Create (DP_STATE *pDPState, struct device_tag *device)
{
DP_PAGER *pPager;
SYS_MMU_MODE eMMUMode;
PVR_ASSERT (pDPState!=IMG_NULL);
pPager = POOL_Alloc (pDPState->pPagerPool);
if (pPager == IMG_NULL)
{
return (DP_PAGER *)0;
}
pPager->eMode = MM_Disable;
pPager->pMMU = IMG_NULL;
pPager->pDPState = pDPState;
if (device->bHaveMMU)
eMMUMode = SysMMUMode ();
else
eMMUMode = SYS_MMU_DISABLE;
switch (eMMUMode)
{
case SYS_MMU_LINEAR:
{
IMG_UINT32 uPoolNr;
pPager->eMode = MM_Linear;
pPager->pMMU = MMU_Create (device, pDPState->pRAState, pPager);
if (!pPager->pMMU)
{
POOL_Free (pDPState->pPagerPool, pPager);
return IMG_NULL;
}
for (uPoolNr=0; uPoolNr<pDPState->uContiguousPoolCount; uPoolNr++)
{
IMG_DEV_PHYADDR devPAddr =
ENV_SysPAddrToDevPAddr (pDPState->pContiguousPool[uPoolNr].descriptor->BaseSysPAddr);
if (!MMU_Alloc (pPager->pMMU,
pDPState->pContiguousPool[uPoolNr].descriptor->uSize,
IMG_NULL,
0, /* map into low space */
0, /* no alignment constraint */
&(pDPState->pContiguousPool[uPoolNr].DevVAddr)))
{
IMG_UINT32 u;
PVR_DPF ((PVR_DBG_ERROR,
"insufficient device virtual space to map memory pool %d",
uPoolNr));
for (u=0; u<uPoolNr; u++)
MMU_Free (pPager->pMMU,
pDPState->pContiguousPool[u].DevVAddr);
MMU_Delete (pPager->pMMU);
POOL_Free (pDPState->pPagerPool, pPager);
return IMG_NULL;
}
MMU_MapPages (pPager->pMMU,
pDPState->pContiguousPool[uPoolNr].DevVAddr,
devPAddr,
pDPState->pContiguousPool[uPoolNr].descriptor->uSize);
}
MMU_Enable (pPager->pMMU);
break;
}
case SYS_MMU_NORMAL:
pPager->eMode = MM_Normal;
pPager->pMMU = MMU_Create (device, pDPState->pRAState, pPager);
if (pPager->pMMU == IMG_NULL)
{
POOL_Free (pDPState->pPagerPool, pPager);
return IMG_NULL;
}
MMU_Enable (pPager->pMMU);
break;
case SYS_MMU_DISABLE:
{
IMG_UINT32 uPoolNr;
for (uPoolNr=0; uPoolNr<pDPState->uContiguousPoolCount; uPoolNr++)
{
IMG_DEV_PHYADDR DevPAddr;
DevPAddr = ENV_SysPAddrToDevPAddr (pDPState->pContiguousPool[uPoolNr].descriptor->BaseSysPAddr);
/* With the mmu disable dev v and dev p addresses are equivalent */
pDPState->pContiguousPool[uPoolNr].DevVAddr.uiAddr = DevPAddr.uiAddr;
/* @todo: mjs fix this up in an appropriate device
* specific way */
if (DevPAddr.uiAddr + pDPState->pContiguousPool[uPoolNr].descriptor->uSize > 0x2000000)
{
PVR_DPF ((PVR_DBG_ERROR,
"memory pool %d is not device addressable",
uPoolNr));
POOL_Free (pDPState->pPagerPool, pPager);
return IMG_NULL;
}
}
break;
}
}
return pPager;
}
/*----------------------------------------------------------------------------
<function>
FUNCTION: DP_Delete
PURPOSE: Delete a source of pages created with DP_Create ().
PARAMETERS: In: dual page handle
RETURNS: None
</function>
-----------------------------------------------------------------------------*/
void
DP_Delete (DP_PAGER *pPager)
{
if (pPager != IMG_NULL)
{
PVR_DPF ((PVR_DBG_MESSAGE, "DP_Delete ()"));
PVR_ASSERT (pPager->pDPState!=IMG_NULL);
if (pPager->pMMU != IMG_NULL)
{
if (SysMMUMode () == SYS_MMU_LINEAR)
{
IMG_UINT32 uPoolNr;
for (uPoolNr=0; uPoolNr<pPager->pDPState->uContiguousPoolCount;
uPoolNr++)
{
MMU_Free (pPager->pMMU,
pPager->pDPState->pContiguousPool[uPoolNr].DevVAddr);
}
}
MMU_Delete (pPager->pMMU);
}
POOL_Free (pPager->pDPState->pPagerPool, pPager);
}
}
/*----------------------------------------------------------------------------
<function>
FUNCTION: DP_AllocMany
PURPOSE: Wrapper around dual_page_alloc conforming to the resource
allocator's callback resource requestor interface. Allocates
a block of pages larger than requested, allowing the resource
allocator to operate a small cache of pre allocated pages.
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -