📄 mmulib.c
字号:
mmuStateTransArraySize = sizeof (mmuStateTransArrayLocal) / sizeof (STATE_TRANS_TUPLE); mmuLibFuncs = mmuLibFuncsLocal;#if (!ARM_HAS_MPU) mmuPageBlockSize = PAGE_BLOCK_SIZE;#endif /* We only support a 4096 byte page size */ if (pageSize != PAGE_SIZE) { errno = S_mmuLib_INVALID_PAGE_SIZE; return ERROR; } mmuEnabled = ((mmuCrGet() & MMUCR_M_ENABLE) != 0); mmuPageSize = pageSize;#if (!ARM_HAS_MPU) /* * Establish the maximum number of second level page table entries * in a "mini-heap". The default can be over ridden by placing * a non-zero value in mmuSecondLevelMiniHeap_Max. A value of 1 * mmuSecondLevelMiniHeap_Max effectively disables the mini-heap. */ if (mmuSecondLevelMiniHeap_Max == 0) mmuSecondLevelMiniHeap_Max = ((mmuPageSize * PAGES_PER_MINI_HEAP) / L2_PTE_SIZE); /* * Calculate mini-heap size in advance, making sure it is a multiple * of mmuPageSize. */ mmuSecondLevelMiniHeap_Size = ((mmuSecondLevelMiniHeap_Max * L2_PTE_SIZE) / mmuPageSize) * mmuPageSize; /* * mini-heap must be at least mmuPageSize */ if ( mmuSecondLevelMiniHeap_Size < mmuPageSize ) mmuSecondLevelMiniHeap_Size = mmuPageSize; /* * Make sure count and size agree unless not using the mini-heap. */ if ( mmuSecondLevelMiniHeap_Max != 1 ) mmuSecondLevelMiniHeap_Max = mmuSecondLevelMiniHeap_Size / L2_PTE_SIZE; /* * Allocate the global page block array to keep track of which parts * of virtual memory are handled by the global translation tables. * Allocate on page boundary so we can write protect it. */ globalPageBlock = (UINT8 *) memPartAlignedAlloc (mmuPageSource, NUM_L1_DESCS * sizeof(UINT8), pageSize); if (globalPageBlock == NULL) return ERROR; /* Set all entries to FALSE, i.e. not global */ bzero ((char *) globalPageBlock, NUM_L1_DESCS * sizeof(UINT8)); /* * Build a dummy translation table which will hold the PTEs for * global memory. All real translation tables will point to this * one for controlling the state of the global virtual memory */ /* allocate space to hold the Level 1 Descriptor Table */ mmuGlobalTransTbl.pLevel1Table = pL1 = (LEVEL_1_DESC *) memPartAlignedAlloc (mmuPageSource, L1_TABLE_SIZE, L1_TABLE_SIZE); if (pL1 == NULL) return ERROR; /* * Invalidate all the Level 1 table entries. This will have the effect * of setting all Level 1 Descriptors to type Fault. */ bzero ((char *) pL1, NUM_L1_DESCS * sizeof(LEVEL_1_DESC));#endif /* !ARM_HAS_MPU */ initialized = TRUE;#endif /* ARMMMU != ARMMMU_NONE */ return OK; } /* mmuLibInit() */#if (ARMMMU != ARMMMU_NONE) #if (!ARM_HAS_MPU)/******************************************************************************** mmuPteGet - get the PTE for a given page (ARM)** mmuPteGet traverses a translation table and returns the (virtual) address of* the PTE for the given virtual address.** RETURNS: OK or ERROR if there is no virtual space for the given address*/LOCAL STATUS mmuPteGet ( MMU_TRANS_TBL * pTransTbl, /* translation table */ void * virtAddr, /* virtual address */ PTE ** result /* result is returned here */ ) { LEVEL_1_DESC * pL1; /* Level 1 descriptor */ PTE * pL2; /* Level 2 descriptor */ UINT pteTableIndex; /* extract pointer to Level 1 Descriptor from L1 Descriptor table */ pL1 = &pTransTbl->pLevel1Table [(UINT) virtAddr / PAGE_BLOCK_SIZE]; /* check virtual address has a physical address */ if (pL1->fields.type != DESC_TYPE_PAGE) return ERROR; /* Get Small Page base address from Level 1 Descriptor */ pL2 = (PTE *) pL1->fields.addr; /* addr of page table */ pL2 = (PTE *) ((UINT32)pL2 << L1D_TO_BASE_SHIFT); pL2 = _func_armPhysToVirt (pL2); /* pL2 now points to (in VA) base of Level 2 Descriptor Table */ /* create index into level 2 Descriptor (PTE) table */ pteTableIndex = (((UINT) virtAddr & PTE_INDEX_MASK) >> PTE_INDEX_SHIFT); /* get PTE */ *result = &pL2 [pteTableIndex]; return OK; } /* mmuPteGet() */ #endif (!ARM_HAS_MPU)/******************************************************************************** mmuTransTblInit - initialize a new translation table (ARM)** Initialize a new translation table. The Level 1 table is copied from the* global translation mmuGlobalTransTbl, so that we will share the global* virtual memory with all other translation tables.** RETURNS: OK or ERROR if unable to allocate memory for Level 1 table.*/LOCAL STATUS mmuTransTblInit ( MMU_TRANS_TBL * newTransTbl /* translation table to be inited */ ) {#if (!ARM_HAS_MPU) FAST LEVEL_1_DESC * pL1; /* * Allocate space to hold the Level 1 Descriptor table, which must * reside on a 16 kbyte boundary for the ARM and we want it on a page * boundary to be able to write-protect it as well. */ newTransTbl->pLevel1Table = pL1 = (LEVEL_1_DESC *) memPartAlignedAlloc (mmuPageSource, L1_TABLE_SIZE, L1_TABLE_SIZE); if (pL1 == NULL) return ERROR; /* * Copy the Level 1 descriptor table from mmuGlobalTransTbl, * so we get the global virtual memory. */ bcopy ((char *) mmuGlobalTransTbl.pLevel1Table, (char *) pL1, L1_TABLE_SIZE); /* * Flush the data cache. cacheLib should have been initialized by now, * but even if it hasn't, it is safe to call cacheLib routines, as they * check before calling their function pointers. */ cacheFlush (DATA_CACHE, pL1, L1_TABLE_SIZE); /* * Write protect virtual memory pointing to the the level 1 table in * the global translation table to ensure that it can't be corrupted. * Also, mark it as non-cacheable. All page tables are marked as * non-cacheable: there is no advantage to the MMU as the TLB * tree-walk hardware works direct to memory and not through the * cache. There is a disadvantage to us, as every time we modify * memory we need to flush the cache. All in all, it is easier not to * have the pages marked as cacheable in the first place. */#if (ARMMMU == ARMMMU_XSCALE) mmuStateSetMultiple (&mmuGlobalTransTbl, (void *) pL1, MMU_STATE_MASK_WRITABLE | MMU_STATE_MASK_CACHEABLE | \ MMU_STATE_MASK_EX_CACHEABLE, MMU_STATE_WRITABLE_NOT | MMU_STATE_CACHEABLE_NOT | \ MMU_STATE_EX_CACHEABLE_NOT, L1_DESC_PAGES);#else mmuStateSetMultiple (&mmuGlobalTransTbl, (void *) pL1, MMU_STATE_MASK_WRITABLE | MMU_STATE_MASK_CACHEABLE, MMU_STATE_WRITABLE_NOT | MMU_STATE_CACHEABLE_NOT, L1_DESC_PAGES);#endif#else /* (!ARM_HAS_MPU) */ /* clear out all the region definitions */ bzero ((char *)newTransTbl, sizeof(MMU_TRANS_TBL)); /* set the null definitions into the MPU */ mmuPrrSet (newTransTbl->regs); mmuCcrSet (0); mmuWbcrSet (0); mmuPrSet (0);#endif /* (!ARM_HAS_MPU) */ return OK; } /* mmuTransTblInit() *//******************************************************************************** mmuTransTblCreate - create a new translation table (ARM)** Create an ARM translation table. Allocates space for the MMU_TRANS_TBL* data structure and calls mmuTransTblInit on that object.** RETURNS: address of new object or NULL if allocation failed,* or NULL if initialization failed.*/LOCAL MMU_TRANS_TBL *mmuTransTblCreate (void) { MMU_TRANS_TBL * newTransTbl; newTransTbl = (MMU_TRANS_TBL *) malloc (sizeof (MMU_TRANS_TBL)); if (newTransTbl == NULL) return NULL; if (mmuTransTblInit (newTransTbl) == ERROR) { free ((char *) newTransTbl); return NULL; } return newTransTbl; } /* mmuTransTblCreate() *//******************************************************************************** mmuTransTblDelete - delete a translation table (ARM)** mmuTransTblDelete deallocates all the memory used to store the translation* table entries. It does not deallocate physical pages mapped into the global* virtual memory space.** This routine is only called from vmLib, not from vmBaseLib.** RETURNS: OK*/LOCAL STATUS mmuTransTblDelete ( MMU_TRANS_TBL * transTbl /* translation table to be deleted */ ) {#if (!ARM_HAS_MPU) int i; LEVEL_1_DESC * pL1; /* Level 1 Descriptor */ PTE * pPte; /* page table entry */ PTE * tpPte; /* page table entry */ UINT32 *bHp; /* Block Header Detector */ pL1 = transTbl->pLevel1Table; /* get Level 1 Descriptor table */ /* write enable the physical pages containing Level 1 Descriptors */ mmuStateSetMultiple (&mmuGlobalTransTbl, transTbl->pLevel1Table, MMU_STATE_MASK_WRITABLE, MMU_STATE_WRITABLE, L1_DESC_PAGES); /* * deallocate only non-global page blocks, deallocate in reverse order * to allow "unpacking" of second level page tables */ pL1 += (NUM_L1_DESCS -1); for (i = (NUM_L1_DESCS -1); i >= 0; i--, pL1--) if ((pL1->fields.type == DESC_TYPE_PAGE) && !((BOOL)globalPageBlock[i])) { /* get pointer to each Page Table */ pPte = (PTE *) (pL1->fields.addr << L1D_TO_BASE_SHIFT); pPte = _func_armPhysToVirt (pPte); /* conv to virtual address */ /* * Check if this Pte is on a partially allocated mini-heap */ if ( mmuSecondLevelMiniHeap != NULL || mmuSecondLevelMiniHeap_Index < mmuSecondLevelMiniHeap_Max ) { tpPte = (PTE *)(mmuSecondLevelMiniHeap + ((mmuSecondLevelMiniHeap_Index -1) * L2_PTE_SIZE)); if ( pPte == tpPte ) { /* From a partially allocated mini-heap, free it. */ mmuSecondLevelMiniHeap_Index--; } } else tpPte = NULL; /* set the Page Table writable and cacheable */#if (ARMMMU == ARMMMU_XSCALE) mmuStateSet (&mmuGlobalTransTbl, (PTE *)((int)pPte & ~(mmuPageSize - 1)), MMU_STATE_MASK_WRITABLE | MMU_STATE_MASK_CACHEABLE | \ MMU_STATE_MASK_EX_CACHEABLE, MMU_STATE_WRITABLE | MMU_STATE_CACHEABLE | \ MMU_STATE_EX_CACHEABLE);#else mmuStateSet (&mmuGlobalTransTbl, (PTE *)((int)pPte & ~(mmuPageSize - 1)), MMU_STATE_MASK_WRITABLE | MMU_STATE_MASK_CACHEABLE, MMU_STATE_WRITABLE | MMU_STATE_CACHEABLE);#endif bHp = (UINT32 *)pPte; if ( *(bHp - 1) == (mmuSecondLevelMiniHeap_Size + sizeof(BLOCK_HDR)) ) { /* If on an allocated boundary */ /* Check if this is the current mini-heap */ if ( tpPte && tpPte == (PTE *)mmuSecondLevelMiniHeap ) { mmuSecondLevelMiniHeap = NULL; mmuSecondLevelMiniHeap_Index = 0; } /* Free the Page Table */ memPartFree (mmuPageSource, (char *)pPte); } } /* free the Level 1 Descriptor table */ memPartFree (mmuPageSource, (char *)transTbl->pLevel1Table); free (transTbl); /* free the translation table data structure */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -