📄 cm_mem.c
字号:
/*
*
* Fun: cmFree
*
* Desc: Return the memory block for the memory region.
*
*
* Ret: ROK - successful
* RFAILED - unsuccessful.
*
* Notes: The user calls this function to return the previously allocated
* memory block to the memory region. The memory manager does not
* check the validity of the state of the memory block(like whether
* it was allocated earlier). The caller must be sure that, the
* address specified in the parameter 'ptr' is valid and was
* allocated previously from same region.
*
*
* File: cm_mem.c
*
*/
#ifdef ANSI
PRIVATE S16 cmFree
(
Void *regionCb,
Data *ptr,
Size size
)
#else
PRIVATE S16 cmFree(regionCb, ptr, size)
Void *regionCb;
Data *ptr;
Size size;
#endif
{
U16 idx;
CmMmBkt *bkt;
CmMmRegCb *regCb;
TRC2(cmFree);
regCb = (CmMmRegCb *)regionCb;
#if (ERRCLASS & ERRCLS_INT_PAR)
/* error check on parameters */
if((regCb == NULLP) || (!size) || (ptr == NULLP))
{
RETVALUE(RFAILED);
}
/* Check if the memory block is from the memory region */
if(ptr >= ((CmMmRegCb *)regCb)->regInfo.start +
((CmMmRegCb *)regCb)->regInfo.size)
{
RETVALUE(RFAILED);
}
#endif
/*
* Check if the memory block was allocated from the bucket pool.
*/
if(ptr < (regCb->regInfo.start + regCb->bktSize))
{
/* The memory block was allocated from the bucket pool */
/* Get the map to the mapping table */
idx = ((size - 1) >> regCb->bktQnPwr);
#if (ERRCLASS & ERRCLS_DEBUG)
if(regCb->mapTbl[idx].bktIdx == 0xFF)
{
/* Some fatal error in the map table initialization. */
RETVALUE(RFAILED);
}
#endif
/* Enqueue the memory block and return it to the user */
bkt = &(regCb->bktTbl[regCb->mapTbl[idx].bktIdx]);
while(1)
{
/*
* Check if the size request is not greater than the size available
* in the bucket
*/
if(size > bkt->size)
{
/* Try to go to the next bucket if available */
if((idx < (CMM_MAX_MAP_ENT - 1)) &&
(regCb->mapTbl[++idx].bktIdx != 0xFF))
{
bkt = &(regCb->bktTbl[regCb->mapTbl[idx].bktIdx]);
}
else
{
/* This is the last bucket, try to allocate from heap */
RETVALUE(RFAILED);
}
}
else
break;
}
/* Acquire the bucket lock */
(Void) SLock(&(bkt->bktLock));
*((CmMmEntry **)ptr) = bkt->next;
bkt->next = (CmMmEntry *)ptr;
/*
* Decrement the statistics variable of number of memory block
* allocated
*/
bkt->numAlloc--;
/* Release the lock */
(Void) SUnlock(&(bkt->bktLock));
RETVALUE(ROK);
}
/* The memory block was allocated from the heap pool */
RETVALUE(cmHeapFree (&(regCb->heapCb), ptr, size));
} /* end of cmFree */
/*
*
* Fun: cmCtl
*
* Desc: Control request function.
*
*
* Ret: ROK - successful
* RFAILED - unsuccessful.
*
* Notes: The current semantics of the control function is defined for two
* types of events: virtual address to physical address translation
* and memory resource check.
*
* The physical address translation is valid only for the memory
* region physically contiguous and non pagable.
*
*
*
* File: cm_mem.c
*
*/
#ifdef ANSI
PRIVATE S16 cmCtl
(
Void *regionCb,
Event event,
SMemCtl *memCtl
)
#else
PRIVATE S16 cmCtl(regionCb, event, memCtl)
Void *regionCb;
Event event;
SMemCtl *memCtl;
#endif
{
CmMmRegCb *regCb;
U32 bktIdx;
TRC2(cmCtl);
regCb = (CmMmRegCb *)regionCb;
#if (ERRCLASS & ERRCLS_INT_PAR)
/* error check on parameters */
if((regCb == NULLP) || (memCtl == NULLP))
{
RETVALUE(RFAILED);
}
#endif
switch(event)
{
case SS_MEM_V_TO_P:
{
Size offset;
#if (ERRCLASS & ERRCLS_INT_PAR)
if((memCtl->u.vtop.vaddr == NULLP) ||
(memCtl->u.vtop.paddr == NULLP))
{
RETVALUE(RFAILED);
}
#endif
/* Check if the virtual to physical address translation is valid */
if(regCb->chFlag & CMM_REG_PHY_VALID)
{
offset = memCtl->u.vtop.vaddr - regCb->regInfo.start;
*(memCtl->u.vtop.paddr) = regCb->pAddr + offset;
RETVALUE(ROK);
}
break;
}
case SS_MEM_CHK_RES:
{
#if (ERRCLASS & ERRCLS_INT_PAR)
if(!(memCtl->u.chkres.size) ||
(memCtl->u.chkres.status == NULLP))
{
RETVALUE(RFAILED);
}
#endif
/* Check if the Bucket pool is configured */
if(regCb->bktSize)
{
U16 idx;
CmMmBkt *bkt;
U32 avlSize, totSize;
/*
* The bucket pool is configured. The status value returned
* does reflect on the memory availabilty in the bucket pool.
* The value does not consider the available memory in the
* heap pool.
*/
idx = ((memCtl->u.chkres.size - 1) >> regCb->bktQnPwr);
bkt = &(regCb->bktTbl[regCb->mapTbl[idx].bktIdx]);
avlSize = (bkt->numBlks - bkt->numAlloc) * bkt->size;
avlSize += regCb->heapCb.avlSize;
totSize = (bkt->numBlks * bkt->size) + regCb->heapSize;
*(memCtl->u.chkres.status) = (avlSize * 10)/totSize;
}
else
{
/* Bucket pool not configured */
/*
* Find the percentage memory available in the heap pool. The value
* does not consider the fragmentation of the heap pool.
*/
*(memCtl->u.chkres.status) = 10 - ((regCb->heapCb.avlSize * 10) /
(regCb->heapSize));
}
RETVALUE(ROK);
}
case SS_MEM_SHOW:
{
U8 buf[256];
SDisplay(0, "--------------------- mem show ----------------------");
for(bktIdx = 0; bktIdx < regCb->numBkts; bktIdx++)
{
sprintf(buf, "| Bucket[%6d] totle %9d alloced %10d |",
regCb->bktTbl[bktIdx].size,
regCb->bktTbl[bktIdx].numBlks,
regCb->bktTbl[bktIdx].numAlloc);
SDisplay(0, buf);
}
sprintf(buf, "| Heap totle %9d freed %10d |",
(U32)(regCb->heapCb.vEnd - regCb->heapCb.vStart),
regCb->heapCb.avlSize);
SDisplay(0, buf);
SDisplay(0, "--------------------- mem show ----------------------");
break;
}
default:
{
/* No other event is supported currently */
RETVALUE(RFAILED);
}
}
/* shouldn't reach here */
RETVALUE(RFAILED);
} /* end of cmCtl */
/*
*
* Fun: cmMmBktInit
*
* Desc: Initialize the bucket and the map table.
*
*
* Ret: ROK - successful,
* RFAILED - unsuccessful.
*
* Notes: This function is called by the cmMmRegInit.
*
* File: cm_mem.c
*
*/
#ifdef ANSI
PRIVATE Void cmMmBktInit
(
Data **memAddr,
CmMmRegCb *regCb,
CmMmRegCfg *cfg,
U16 bktIdx,
U16 *lstMapIdx
)
#else
PRIVATE Void cmMmBktInit (memAddr, regCb, cfg, bktIdx, lstMapIdx)
Data **memAddr;
CmMmRegCb *regCb;
CmMmRegCfg *cfg;
U16 bktIdx;
U16 *lstMapIdx;
#endif
{
U16 cnt;
U16 idx;
U16 numBlks;
Size size;
Data **next;
TRC2(cmMmBktInit);
size = cfg->bktCfg[bktIdx].size;
numBlks = cfg->bktCfg[bktIdx].numBlks;
/* Reset the next pointer */
regCb->bktTbl[bktIdx].next = NULLP;
/* Initialize the link list of the memory block */
next = &(regCb->bktTbl[bktIdx].next);
for(cnt = 0; cnt < numBlks; cnt++)
{
*next = *memAddr;
next = (CmMmEntry **)(*memAddr);
*memAddr = (*memAddr) + size;
}
*next = NULLP;
/* Initialize the Map entry */
idx = size / cfg->bktQnSize;
/*
* Check if the size is multiple of quantum size. If not we need to initialize
* one more map table entry.
*/
if(size % cfg->bktQnSize)
{
idx++;
}
while( *lstMapIdx < idx)
{
regCb->mapTbl[*lstMapIdx].bktIdx = bktIdx;
#if (ERRCLASS & ERRCLS_DEBUG)
regCb->mapTbl[*lstMapIdx].numReq = 0;
regCb->mapTbl[*lstMapIdx].numFailure = 0;
#endif
(*lstMapIdx)++;
}
/* Initialize the bucket structure */
regCb->bktTbl[bktIdx].size = size;
regCb->bktTbl[bktIdx].numBlks = numBlks;
regCb->bktTbl[bktIdx].numAlloc = 0;
/* Update the total bucket size */
regCb->bktSize += (size * numBlks);
RETVOID;
} /* end of cmMmBktInit */
#ifdef USE_TRSRC_CODE
/*
*
* Fun: cmMmHeapInit
*
* Desc: Initialize the heap pool.
*
*
* Ret: ROK - successful
* RFAILED - unsuccessful.
*
* Notes: This function is called by the cmMmRegInit.
*
* File: cm_mem.c
*
*/
#ifdef ANSI
PRIVATE Void cmMmHeapInit
(
Data *memAddr,
CmMmHeapCb *heapCb,
Size size
)
#else
PRIVATE Void cmMmHeapInit (memAddr, heapCb, size)
Data *memAddr;
CmMmHeapCb *heapCb;
Size size;
#endif
{
TRC2(cmMmHeapInit);
/* Initialize the heap control block */
heapCb->vStart = memAddr;
heapCb->vEnd = memAddr + size;
heapCb->avlSize = size;
heapCb->minSize = CMM_MINBUFSIZE;
heapCb->next = (CmHEntry *)memAddr;
heapCb->next->next = NULLP;
heapCb->next->size = size;
#if (ERRCLASS & ERRCLS_DEBUG)
heapCb->numFragBlk = 0;
heapCb->numReq = 0;
heapCb->numFailure = 0;
#endif
RETVOID;
} /* end of cmMmHeapInit */
/*
*
* Fun: cmHeapAlloc
*
* Desc: Allocates the memory block from the heap pool.
*
*
* Ret: ROK - successful
* RFAILED - unsuccessful.
*
* Notes: This function is called by the cmAlloc. cmAlloc calls this
* function when there is no memory block available in the bucket
* and the heap pool is configured.
*
*
*
* File: cm_mem.c
*
*/
#ifdef ANSI
PRIVATE S16 cmHeapAlloc
(
CmMmHeapCb *heapCb,
Data **ptr,
Size *size
)
#else
PRIVATE S16 cmHeapAlloc (heapCb, ptr, size)
CmMmHeapCb *heapCb;
Data **ptr;
Size *size;
#endif
{
CmHEntry *prvHBlk; /* Previous heap block */
CmHEntry *curHBlk; /* Current heap block */
Size tmpSize;
TRC2(cmHeapAlloc);
/* Roundup the requested size */
*size = CMM_DATALIGN(*size, (heapCb->minSize));
/* Check if the available total size is adequate. */
/* Acquire the heap lock */
(Void) SLock (&(heapCb->heapLock));
if((*size) >= heapCb->avlSize)
{
(Void)SUnlock(&(heapCb->heapLock));
RETVALUE(ROUTRES);
}
/*
* Search through the heap block list in the heap pool of size
* greater than or equal to the requested size.
*
*/
prvHBlk = (CmHEntry *)&(heapCb->next);
for( curHBlk = prvHBlk->next; curHBlk; prvHBlk = curHBlk, curHBlk = curHBlk->next)
{
/*
* Since the size of the block is always multiple of CMM_MINBUFSIZE
* and the requested size is rounded to the size multiple of
* CMM_MINBUFSIZE, the difference between the size of the heap block
* and the size to allocate will be either zero or multiple of
* CMM_MINBUFSIZE.
*/
if((*size) <= curHBlk->size)
{
if((tmpSize = (curHBlk->size - (*size))))
{
/* Heap block of bigger size */
*ptr = (Data *)curHBlk + tmpSize;
curHBlk->size = tmpSize;
}
else
{
/* Heap block is same size of the requested size */
*ptr = (Data *)curHBlk;
prvHBlk->next = curHBlk->next;
}
heapCb->avlSize -= (*size);
/* Release the lock */
(Void) SUnlock (&(heapCb->heapLock));
RETVALUE(ROK);
}
}
/* Release the lock */
(Void) SUnlock (&(heapCb->heapLock));
RETVALUE(ROUTRES);
} /* end of cmHeapAlloc */
/*
*
* Fun: cmHeapFree
*
* Desc: Return the memory block from the heap pool.
*
*
* Ret: ROK - successful
* RFAILED - unsuccessful.
*
* Notes: This function returns the memory block to the heap pool. This
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -