📄 dual_page.c
字号:
/* -*- c-file-style: "img" -*-
<module>
* Name : dual_page.c
* Title : Dual Space Page Management.
* Author : Marcus Shawcroft
* Created : 14 May 2003
*
* Copyright : 2003, 2004 by Imagination Technologies Limited.
* All rights reserved. No part of this software, either
* material or conceptual may be copied or distributed,
* transmitted, transcribed, stored in a retrieval system
* or translated into any human or computer language in any
* form by any means, electronic, mechanical, manual or
* other-wise, or disclosed to third parties without the
* express written permission of Imagination Technologies
* Limited, Unit 8, HomePark Industrial Estate,
* King's Langley, Hertfordshire, WD4 8LZ, U.K.
*
* Description : Manages pages mapped into two virtual memory spaces,
* cpu and device.
*
* Version : $Revision: 1.28 $
* Modifications :
* $Log: dual_page.c $ *
* .. *
* --- Revision Logs Removed --- *
*
</module>
*/
#include "services_headers.h"
#include "buffer_manager.h"
#include "hash.h"
#include "ra.h"
#include "dual_buffer.h"
#include "dual_page.h"
#include "hw.h"
#include "mmu.h"
#include "pool.h"
#include "env.h"
struct _DP_POOL_
{
RA_ARENA *pSysPhysArena;
SYS_POOL_DESCRIPTOR *descriptor;
IMG_BOOL bAllocated;
/* Device virtual address for the contiguous arena mapping */
IMG_DEV_VIRTADDR DevVAddr;
/* CPU virtual address for the contiguous arena mapping */
IMG_CPU_VIRTADDR CpuVAddr;
};
struct _DP_STATE_
{
/* ra state from RA_Initialise() */
RA_STATE *pRAState;
/* Resource allocation arena for contiguous blocks of system
* physical memory */
struct _DP_POOL_ *pContiguousPool;
IMG_UINT32 uContiguousPoolCount;
/* pool of struct mapping objects */
POOL *pMappingPool;
/* pool of DP_PAGER objects */
POOL *pPagerPool;
};
struct _DP_PAGER_
{
enum _MMU_MODE_
{
MM_Disable,
MM_Linear,
MM_Normal
} eMode;
MMU *pMMU;
DP_STATE *pDPState;
};
static int
CpuMemoryWrap (DP_MAPPING *pMapping, IMG_CPU_PHYADDR base);
static void
CpuMemoryFree (DP_PAGER *pPager,
DP_MAPPING *pMapping);
/* TODO: find a proper home for this function */
static void
MMU_MapShadow (MMU *pMMU,
IMG_DEV_VIRTADDR DevVAddr,
IMG_SIZE_T uSize,
IMG_CPU_VIRTADDR CpuVAddr)
{
IMG_UINT32 uOffset;
PVR_DPF ((PVR_DBG_MESSAGE,
"MMU_MapShadow (0x%x, 0x%x, 0x%x)",
DevVAddr.uiAddr,
uSize,
CpuVAddr));
for (uOffset=0; uOffset<uSize; uOffset+=HOST_PAGESIZE())
{
IMG_CPU_PHYADDR CpuPAddr;
IMG_DEV_PHYADDR DevPAddr;
IMG_DEV_VIRTADDR MapDevVAddr;
CpuPAddr = ENV_CpuVAddrToCpuPAddr ((IMG_UINTPTR_T)CpuVAddr + uOffset);
DevPAddr = ENV_CpuPAddrToDevPAddr (CpuPAddr);
MapDevVAddr.uiAddr = DevVAddr.uiAddr+uOffset;
#if 0
PVR_DPF ((PVR_DBG_MESSAGE,
"..CpuVAddr=0x%x, CpuPAddr=0x%x, DevVAddr=0x%x, DevPAddr=0x%x",
(IMG_UINTPTR_T)CpuVAddr + uOffset, CpuPAddr.addr, MapDevVAddr.addr, DevPAddr.addr));
#endif
MMU_MapPage (pMMU, MapDevVAddr, DevPAddr);
}
}
/* todo: boilerplate */
static IMG_BOOL
_contiguous_alloc (void *h,
IMG_SIZE_T uSize,
IMG_SIZE_T *pActualSize,
void **ref,
IMG_UINT32 uFlags,
IMG_UINTPTR_T *pBase)
{
struct _DP_POOL_ *pPool = h;
PVR_ASSERT (pPool!=IMG_NULL);
PVR_ASSERT (pBase!=IMG_NULL);
PVR_ASSERT (pActualSize!=IMG_NULL);
PVR_DPF ((PVR_DBG_MESSAGE,
"_contiguous_alloc: pPool->bAllocated=%d, pPool->descriptor->size=%x, pPool->descriptor->start=%x",
pPool->bAllocated, pPool->descriptor->uSize,
pPool->descriptor->BaseSysPAddr.uiAddr));
if (pPool->bAllocated)
return IMG_FALSE;
*pActualSize = pPool->descriptor->uSize;
pPool->CpuVAddr =
HostMapPhysToLin (ENV_SysPAddrToCpuPAddr (pPool->descriptor->BaseSysPAddr),
pPool->descriptor->uSize,
CACHETYPE_WRITECOMBINED | EXTRA_CACHETYPE_SHARED);
if (pPool->CpuVAddr == IMG_NULL)
return IMG_FALSE;
*pBase = (IMG_UINTPTR_T) pPool->descriptor->BaseSysPAddr.uiAddr;
pPool->bAllocated = IMG_TRUE;
if (pPool->descriptor->hook_first_alloc != IMG_NULL)
(pPool->descriptor->hook_first_alloc) (pPool->descriptor->hook_handle);
return IMG_TRUE;
}
static void
_contiguous_free (void *h, IMG_UINTPTR_T base, void *ref)
{
struct _DP_POOL_ *pPool = h;
PVR_DPF ((PVR_DBG_MESSAGE,
"_contiguous_free\npool->descriptor->start.uiAddr=0x%08x; base=0x%08x",
pPool->descriptor->BaseSysPAddr.uiAddr, base));
PVR_ASSERT (pPool != IMG_NULL);
PVR_ASSERT ((IMG_UINTPTR_T)pPool->descriptor->BaseSysPAddr.uiAddr == base);
pPool->bAllocated = IMG_FALSE;
/* todo check failure */ HostUnMapPhysToLin (pPool->CpuVAddr,
pPool->descriptor->uSize);
pPool->CpuVAddr = IMG_NULL;
if (pPool->descriptor->hook_last_free != IMG_NULL)
(pPool->descriptor->hook_last_free) (pPool->descriptor->hook_handle);
}
/*----------------------------------------------------------------------------
<function>
FUNCTION: DevMemoryAlloc
PURPOSE: Allocate using the device MMU.
PARAMETERS: In: pPager - the pager to allocate from.
Out: pMapping - the mapping descriptor to be filled in for this
allocation.
Out: pActualSize - the actual size of the block allocated in
bytes.
In: uFlags - allocation flags
In: dev_vaddr_alignment - required device virtual address
alignment, or 0.
Out: base - receives the cpu virtual base address of the
allocated block.
RETURNS: IMG_TRUE - Success
IMG_FALSE - Failed.
</function>
-----------------------------------------------------------------------------*/
static IMG_BOOL
DevMemoryAlloc (DP_PAGER *pPager,
DP_MAPPING *pMapping,
IMG_SIZE_T *pActualSize,
IMG_UINT32 uFlags,
IMG_UINT32 dev_vaddr_alignment,
IMG_UINTPTR_T *pBase)
{
switch (pPager->eMode)
{
case MM_Normal:
/* allocate device linear space */
if (!MMU_Alloc (pPager->pMMU, pMapping->uSize, pActualSize, uFlags,
dev_vaddr_alignment, &(pMapping->DevVAddr)))
return IMG_FALSE;
switch (pMapping->eCpuMemoryOrigin)
{
case hm_none:
break;
case hm_wrapped:
case hm_contiguous:
MMU_MapPages (pPager->pMMU,
pMapping->DevVAddr,
ENV_CpuPAddrToDevPAddr (pMapping->CpuPAddr),
pMapping->uSize);
MMU_FlushRange (pPager->pMMU, pMapping->DevVAddr, pMapping->uSize);
break;
case hm_env:
MMU_MapShadow (pPager->pMMU,
pMapping->DevVAddr,
pMapping->uSize,
pMapping->CpuVAddr);
MMU_FlushRange (pPager->pMMU, pMapping->DevVAddr, pMapping->uSize);
break;
default:
PVR_DPF((PVR_DBG_ERROR,"Illegal value for pMapping->eCpuMemoryOrigin"));
PVR_ASSERT (0);
return IMG_FALSE;
}
break;
case MM_Linear:
switch (pMapping->eCpuMemoryOrigin)
{
case hm_wrapped:
/* allocate device linear space */
if (!MMU_Alloc (pPager->pMMU,
pMapping->uSize,
pActualSize,
uFlags,
dev_vaddr_alignment,
&(pMapping->DevVAddr)))
return IMG_FALSE;
MMU_MapPages (pPager->pMMU,
pMapping->DevVAddr,
ENV_CpuPAddrToDevPAddr (pMapping->CpuPAddr),
pMapping->uSize);
MMU_FlushRange (pPager->pMMU, pMapping->DevVAddr, pMapping->uSize);
break;
case hm_contiguous:
{
IMG_DEV_VIRTADDR BaseDevVAddr;
IMG_DEV_PHYADDR AllocDevPAddr;
IMG_DEV_PHYADDR BaseDevPAddr;
/* with a linear mapping dev_vaddr is the dev_vaddr of the
* pool base plus the offset between the dev_paddr
* allocated and the dev_paddr of the pool base */
BaseDevVAddr = pPager->pDPState->pContiguousPool[pMapping->uPoolNr].DevVAddr;
AllocDevPAddr = ENV_CpuPAddrToDevPAddr (pMapping->CpuPAddr);
BaseDevPAddr = ENV_SysPAddrToDevPAddr (pPager->pDPState->pContiguousPool[pMapping->uPoolNr].descriptor->BaseSysPAddr);
pMapping->DevVAddr.uiAddr = BaseDevVAddr.uiAddr +
(AllocDevPAddr.uiAddr - BaseDevPAddr.uiAddr);
break;
}
default:
PVR_ASSERT (0);
break;
}
break;
case MM_Disable:
/* without an mmu dev_vaddr == dev_paddr */
pMapping->DevVAddr.uiAddr =
ENV_CpuPAddrToDevPAddr (pMapping->CpuPAddr).uiAddr;
break;
}
*pBase = pMapping->DevVAddr.uiAddr;
return IMG_TRUE;
}
static void
DevMemoryFree (DP_MAPPING *pMapping)
{
PVR_ASSERT (pMapping!=IMG_NULL);
switch (pMapping->pPager->eMode)
{
case MM_Disable:
break;
case MM_Linear:
if (pMapping->eCpuMemoryOrigin == hm_wrapped)
MMU_Free (pMapping->pPager->pMMU, pMapping->DevVAddr);
break;
case MM_Normal:
MMU_Free (pMapping->pPager->pMMU, pMapping->DevVAddr);
break;
}
}
static IMG_BOOL
CpuMemoryAlloc (DP_PAGER *pPager,
DP_MAPPING *pMapping,
IMG_SIZE_T *pActualSize,
IMG_UINT32 uFlags,
IMG_UINT32 uDevVAddrAlignment)
{
PVR_ASSERT (pPager!=IMG_NULL);
PVR_ASSERT (pPager->pDPState!=IMG_NULL);
/* If we are running without a device mmu force CONTIGUOUS
* allocation. */
if (pPager->eMode==MM_Linear ||
pPager->eMode==MM_Disable ||
(uFlags & BP_CONTIGUOUS))
{
IMG_SYS_PHYADDR SysPAddr;
IMG_UINT32 uAlignmentOffset;
IMG_UINT32 pa;
IMG_UINT32 va;
pMapping->uPoolNr = uFlags & BP_POOL_MASK;
if (pMapping->uPoolNr >= pPager->pDPState->uContiguousPoolCount)
{
PVR_DPF ((PVR_DBG_ERROR, "dual_page: illegal pool number %d",
pMapping->uPoolNr));
goto fail_cleanup;
}
pMapping->eCpuMemoryOrigin = hm_contiguous;
pMapping->CpuVAddr = IMG_NULL;
pMapping->CpuPAddr.uiAddr = 0;
pa = pPager->pDPState->pContiguousPool[pMapping->uPoolNr].descriptor->BaseSysPAddr.uiAddr;
va = pPager->pDPState->pContiguousPool[pMapping->uPoolNr].DevVAddr.uiAddr;
if (va>pa)
uAlignmentOffset = (va - pa) % uDevVAddrAlignment;
else
uAlignmentOffset = uDevVAddrAlignment - ((pa - va) % uDevVAddrAlignment);
if (!RA_Alloc ( pPager->pDPState->pContiguousPool[pMapping->uPoolNr].pSysPhysArena,
pMapping->uSize,
pActualSize,
IMG_NULL,
0,
uDevVAddrAlignment,
uAlignmentOffset,
&SysPAddr.uiAddr))
{
PVR_DPF((PVR_DBG_ERROR, "CpuMemoryAlloc: ERROR RA_Alloc failed"));
return IMG_FALSE;
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -