📄 vmlib.c
字号:
if (NOT_PAGE_ALIGNED (len)) { errno = S_vmLib_NOT_PAGE_ALIGNED; return (ERROR); } /* take mutual exclusion semaphore to protect translation table */ semTake (&context->sem, WAIT_FOREVER); while (numBytesProcessed < len) { /* make sure there isn't a conflict with global virtual memory */ if (ADDR_IN_GLOBAL_SPACE (thisVirtPage)) { errno = S_vmLib_ADDR_IN_GLOBAL_SPACE; retVal = ERROR; break; } if (MMU_PAGE_MAP (context->mmuTransTbl, thisVirtPage, thisPhysPage) == ERROR) { retVal = ERROR; break; } if (vmStateSet (context, thisVirtPage, pageSize, VM_STATE_MASK_VALID | VM_STATE_MASK_WRITABLE | VM_STATE_MASK_CACHEABLE | VM_STATE_MASK_EX_CACHEABLE, VM_STATE_VALID | VM_STATE_WRITABLE | VM_STATE_CACHEABLE| VM_STATE_EX_CACHEABLE) == ERROR) { retVal = ERROR; break; } thisVirtPage += pageSize; thisPhysPage += (mmuPhysAddrShift ? 1 : pageSize); numBytesProcessed += pageSize; } semGive (&context->sem); return (retVal); }/********************************************************************************* vmGlobalMap - map physical pages to virtual space in shared global virtual memory (VxVMI Option)** This routine maps physical pages to virtual space that is shared by all* virtual memory contexts. Calls to vmGlobalMap() should be made before any* virtual memory contexts are created to insure that the shared global* mappings are included in all virtual memory contexts. Mappings created* with vmGlobalMap() after virtual memory contexts are created are not* guaranteed to appear in all virtual memory contexts. After the call to* vmGlobalMap(), the state of all pages in the the newly mapped virtual* memory is unspecified and must be set with a call to vmStateSet(), once* the initial virtual memory context is created.** This routine should not be called from interrupt level.** AVAILABILITY* This routine is distributed as a component of the unbundled virtual memory* support option, VxVMI.** RETURNS: OK, or ERROR if <virtualAddr> or <physicalAddr> are not * on page boundaries, <len> is not a multiple of the page size, * or the mapping fails.** ERRNO: S_vmLib_NOT_PAGE_ALIGNED*/STATUS vmGlobalMap ( void *virtualAddr, /* virtual address */ void *physicalAddr, /* physical address */ UINT len /* len of virtual and physical spaces */ ) { int pageSize = vmPageSize; char * thisVirtPage = (char *) virtualAddr; char * thisPhysPage = (char *) physicalAddr; FAST UINT numBytesProcessed = 0; STATUS retVal = OK; if (!vmLibInfo.vmLibInstalled) return (ERROR); if (NOT_PAGE_ALIGNED (thisVirtPage)) { errno = S_vmLib_NOT_PAGE_ALIGNED; return (ERROR); } if ((!mmuPhysAddrShift) && (NOT_PAGE_ALIGNED (thisPhysPage))) { errno = S_vmLib_NOT_PAGE_ALIGNED; return (ERROR); } if (NOT_PAGE_ALIGNED (len)) { errno = S_vmLib_NOT_PAGE_ALIGNED; return (ERROR); } semTake (&globalMemMutex, WAIT_FOREVER); while (numBytesProcessed < len) { if (MMU_GLOBAL_PAGE_MAP (thisVirtPage, thisPhysPage) == ERROR) { retVal = ERROR; break; } /* mark the block containing the page in the globalPageBlockArray as * being global */ globalPageBlockArray[(unsigned) thisVirtPage / mmuPageBlockSize] = TRUE; thisVirtPage += pageSize; thisPhysPage += (mmuPhysAddrShift ? 1 : pageSize); numBytesProcessed += pageSize; } semGive (&globalMemMutex); return (retVal); }/********************************************************************************* vmGlobalInfoGet - get global virtual memory information (VxVMI Option)** This routine provides a description of those parts of the virtual memory* space dedicated to global memory. The routine returns a pointer to an* array of UINT8. Each element of the array corresponds to a block of* virtual memory, the size of which is architecture-dependent and can be* obtained with a call to vmPageBlockSizeGet(). To determine if a* particular address is in global virtual memory, use the following code:** .CS* UINT8 *globalPageBlockArray = vmGlobalInfoGet ();* int pageBlockSize = vmPageBlockSizeGet ();* * if (globalPageBlockArray[addr/pageBlockSize])* ...* .CE** The array pointed to by the returned pointer is guaranteed to be static as* long as no calls are made to vmGlobalMap() while the array is being* examined. The information in the array can be used to determine what* portions of the virtual memory space are available for use as private* virtual memory within a virtual memory context.** This routine is callable from interrupt level.** AVAILABILITY* This routine is distributed as a component of the unbundled virtual memory* support option, VxVMI.** RETURNS: A pointer to an array of UINT8.** SEE ALSO: vmPageBlockSizeGet()*/UINT8 *vmGlobalInfoGet (void) { return (globalPageBlockArray); }/********************************************************************************* vmPageBlockSizeGet - get the architecture-dependent page block size (VxVMI Option)** This routine returns the size of a page block for the current* architecture. Each MMU architecture constructs translation tables such* that a minimum number of pages are pre-defined when a new section of the* translation table is built. This minimal group of pages is referred to as* a "page block." This routine may be used in conjunction with* vmGlobalInfoGet() to examine the layout of global virtual memory.** This routine is callable from interrupt level.** AVAILABILITY* This routine is distributed as a component of the unbundled virtual memory* support option, VxVMI.** RETURNS: The page block size of the current architecture.** SEE ALSO: vmGlobalInfoGet()*/int vmPageBlockSizeGet (void) { return (mmuPageBlockSize); }/********************************************************************************* vmTranslate - translate a virtual address to a physical address (VxVMI Option)** This routine retrieves mapping information for a virtual address from the* page translation tables. If the specified virtual address has never been* mapped, the returned status can be either OK or ERROR; however, if it is* OK, then the returned physical address will be -1. If <context> is* specified as NULL, the current context is used.** This routine is callable from interrupt level.** AVAILABILITY* This routine is distributed as a component of the unbundled virtual memory* support option, VxVMI.** RETURNS: OK, or ERROR if the validation or translation fails.*/STATUS vmTranslate ( VM_CONTEXT_ID context, /* context - NULL == currentContext */ void *virtualAddr, /* virtual address */ void **physicalAddr /* place to put result */ ) { STATUS retVal; if (context == NULL) context = currentContext; if (OBJ_VERIFY (context, vmContextClassId) != OK) return (ERROR); retVal = MMU_TRANSLATE (context->mmuTransTbl, virtualAddr, physicalAddr); return (retVal); }/********************************************************************************* vmPageSizeGet - return the page size (VxVMI Option)** This routine returns the architecture-dependent page size.** This routine is callable from interrupt level.** AVAILABILITY* This routine is distributed as a component of the unbundled virtual memory* support option, VxVMI.** RETURNS: The page size of the current architecture. **/int vmPageSizeGet (void) { return (vmPageSize); }/********************************************************************************* vmCurrentGet - get the current virtual memory context (VxVMI Option)** This routine returns the current virtual memory context.** This routine is callable from interrupt level.** AVAILABILITY* This routine is distributed as a component of the unbundled virtual memory* support option, VxVMI.** RETURNS: The current virtual memory context, or* NULL if no virtual memory context is installed.*/VM_CONTEXT_ID vmCurrentGet (void) { return (currentContext); }/********************************************************************************* vmCurrentSet - set the current virtual memory context (VxVMI Option)** This routine installs a specified virtual memory context.** This routine is callable from interrupt level.** AVAILABILITY* This routine is distributed as a component of the unbundled virtual memory* support option, VxVMI.** RETURNS: OK, or ERROR if the validation or context switch fails.*/STATUS vmCurrentSet ( VM_CONTEXT_ID context /* context to install */ ) { if (OBJ_VERIFY (context, vmContextClassId) != OK) return (ERROR); /* XXX do we need to flush the cpu's cache on a context switch? * yes, if the cache operates on virtual addresses (68k does) */ cacheClear (INSTRUCTION_CACHE, 0, ENTIRE_CACHE); cacheClear (DATA_CACHE, 0, ENTIRE_CACHE); currentContext = context; MMU_CURRENT_SET (context->mmuTransTbl); return (OK); }/********************************************************************************* vmEnable - enable or disable virtual memory (VxVMI Option)** This routine turns virtual memory on and off. Memory management should not* be turned off once it is turned on except in the case of system shutdown.** This routine is callable from interrupt level.** AVAILABILITY* This routine is distributed as a component of the unbundled virtual memory* support option, VxVMI.** RETURNS: OK, or ERROR if the validation or architecture-dependent code* fails.*/STATUS vmEnable ( BOOL enable /* TRUE == enable MMU, FALSE == disable MMU */ ) { return (MMU_ENABLE (enable)); }/********************************************************************************* vmTextProtect - write-protect a text segment (VxVMI Option)** This routine write-protects the VxWorks text segment and sets a flag so* that all text segments loaded by the incremental loader will be* write-protected. The routine should be called after both vmLibInit() and* vmGlobalMapInit() have been called.** AVAILABILITY* This routine is distributed as a component of the unbundled virtual memory* support option, VxVMI.** RETURNS: OK, or ERROR if the text segment cannot be write-protected.** ERRNO: S_vmLib_TEXT_PROTECTION_UNAVAILABLE*/ STATUS vmTextProtect (void) { UINT begin; UINT end;#if (CPU==SH7750 || CPU==SH7729 || CPU==SH7700) UINT memBase = (UINT) etext & 0xe0000000; /* identify logical space */#endif if (!vmLibInfo.vmLibInstalled) { errno = S_vmLib_TEXT_PROTECTION_UNAVAILABLE; return (ERROR); }#if (CPU==SH7750 || CPU==SH7729 || CPU==SH7700) begin = (((UINT) sysInit & 0x1fffffff) | memBase) / vmPageSize * vmPageSize;#else begin = (UINT) sysInit / vmPageSize * vmPageSize;#endif end = (UINT) etext / vmPageSize * vmPageSize + vmPageSize; vmLibInfo.protectTextSegs = TRUE; return (vmStateSet (0, (void *) begin, end - begin, VM_STATE_MASK_WRITABLE, VM_STATE_WRITABLE_NOT)); }/********************************************************************************* vmTextPageProtect - protect or unprotect a page of the text segment** RETURNS: */LOCAL STATUS vmTextPageProtect ( void *textPageAddr, /* page to change */ BOOL protect /* TRUE = write protect, FALSE = write enable */ ) { UINT newState = (protect ? VM_STATE_WRITABLE_NOT : VM_STATE_WRITABLE); int retVal = ERROR; if (vmLibInfo.protectTextSegs) retVal = vmStateSet (NULL, textPageAddr, vmPageSize, VM_STATE_MASK_WRITABLE, newState); return (retVal); }#endif /* (CPU_FAMILY != MIPS) && (CPU_FAMILY != PPC) */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -