📄 mem.c
字号:
/****************************************************************************** SciTech OS Portability Manager Library** ========================================================================** The contents of this file are subject to the SciTech MGL Public* License Version 1.0 (the "License"); you may not use this file* except in compliance with the License. You may obtain a copy of* the License at http://www.scitechsoft.com/mgl-license.txt** Software distributed under the License is distributed on an* "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or* implied. See the License for the specific language governing* rights and limitations under the License.** The Original Code is Copyright (C) 1991-1998 SciTech Software, Inc.** The Initial Developer of the Original Code is SciTech Software, Inc.* All Rights Reserved.** ========================================================================** Language: ANSI C* Environment: 32-bit Windows NT device drivers.** Description: Implementation for the NT driver memory management functions* for the PM library.*****************************************************************************/#include "pmapi.h"#include "drvlib/os/os.h"#include "sdd/sddhelp.h"#include "mtrr.h"#include "oshdr.h"/*--------------------------- Global variables ----------------------------*/#define MAX_MEMORY_SHARED 100#define MAX_MEMORY_MAPPINGS 100#define MAX_MEMORY_LOCKED 100typedef struct { void *linear; ulong length; PMDL pMdl; } memshared;typedef struct { void *linear; void *mmIoMapped; ulong length; PMDL pMdl; } memlocked;typedef struct { ulong physical; ulong linear; ulong length; ibool isCached; } mmapping;static int numMappings = 0;static memshared shared[MAX_MEMORY_MAPPINGS] = {0};static mmapping maps[MAX_MEMORY_MAPPINGS];static memlocked locked[MAX_MEMORY_LOCKED];/*----------------------------- Implementation ----------------------------*/ulong PMAPI _PM_getPDB(void);/* Page table entry flags */#define PAGE_FLAGS_PRESENT 0x00000001#define PAGE_FLAGS_WRITEABLE 0x00000002#define PAGE_FLAGS_USER 0x00000004#define PAGE_FLAGS_WRITE_THROUGH 0x00000008#define PAGE_FLAGS_CACHE_DISABLE 0x00000010#define PAGE_FLAGS_ACCESSED 0x00000020#define PAGE_FLAGS_DIRTY 0x00000040#define PAGE_FLAGS_4MB 0x00000080/****************************************************************************PARAMETERS:base - Physical base address of the memory to maps inlimit - Limit of physical memory to region to maps inRETURNS:Linear address of the newly mapped memory.REMARKS:Maps a physical memory range to a linear memory range.****************************************************************************/static ulong _PM_mapPhysicalToLinear( ulong base, ulong limit, ibool isCached){ ulong length = limit+1; PHYSICAL_ADDRESS paIoBase = {0}; /* NT loves large Ints */ paIoBase = RtlConvertUlongToLargeInteger( base ); /* Map IO space into Kernel */ if (isCached) return (ULONG)MmMapIoSpace(paIoBase, length, MmCached ); else return (ULONG)MmMapIoSpace(paIoBase, length, MmNonCached );}/****************************************************************************REMARKS:Adjust the page table caching bits directly. Requires ring 0 access andonly works with DOS4GW and compatible extenders (CauseWay also works sinceit has direct support for the ring 0 instructions we need from ring 3). Willnot work in a DOS box, but we call into the ring 0 helper VxD so we shouldnever get here in a DOS box anyway (assuming the VxD is present). If wedo get here and we are in windows, this code will be skipped.****************************************************************************/static void _PM_adjustPageTables( ulong linear, ulong limit, ibool isGlobal, ibool isCached){ int startPDB,endPDB,iPDB,startPage,endPage,start,end,iPage; ulong pageTable,*pPDB,*pPageTable; ulong mask = 0xFFFFFFFF; ulong bits = 0x00000000; /* Enable user level access for page table entry */ if (isGlobal) { mask &= ~PAGE_FLAGS_USER; bits |= PAGE_FLAGS_USER; } /* Disable PCD bit if page table entry should be uncached */ if (!isCached) { mask &= ~(PAGE_FLAGS_CACHE_DISABLE | PAGE_FLAGS_WRITE_THROUGH); bits |= (PAGE_FLAGS_CACHE_DISABLE | PAGE_FLAGS_WRITE_THROUGH); } pPDB = (ulong*)_PM_mapPhysicalToLinear(_PM_getPDB(),0xFFF,true); if (pPDB) { startPDB = (linear >> 22) & 0x3FF; startPage = (linear >> 12) & 0x3FF; endPDB = ((linear+limit) >> 22) & 0x3FF; endPage = ((linear+limit) >> 12) & 0x3FF; for (iPDB = startPDB; iPDB <= endPDB; iPDB++) { /* Set the bits in the page directory entry - required as per */ /* Pentium 4 manual. This also takes care of the 4MB page entries */ pPDB[iPDB] = (pPDB[iPDB] & mask) | bits; if (!(pPDB[iPDB] & PAGE_FLAGS_4MB)) { /* If we are dealing with 4KB pages then we need to iterate */ /* through each of the page table entries */ pageTable = pPDB[iPDB] & ~0xFFF; pPageTable = (ulong*)_PM_mapPhysicalToLinear(pageTable,0xFFF,true); start = (iPDB == startPDB) ? startPage : 0; end = (iPDB == endPDB) ? endPage : 0x3FF; for (iPage = start; iPage <= end; iPage++) { pPageTable[iPage] = (pPageTable[iPage] & mask) | bits; } MmUnmapIoSpace(pPageTable,0xFFF); } } MmUnmapIoSpace(pPDB,0xFFF); PM_flushTLB(); }}/****************************************************************************REMARKS:Allocate a block of shared memory. For NT we allocate shared memoryas locked, global memory that is accessible from any memory context(including interrupt time context), which allows us to load our importantdata structure and code such that we can access it directly from a ring0 interrupt context.****************************************************************************/void * PMAPI PM_mallocShared( long size){ int i; /* First find a free slot in our shared memory table */ for (i = 0; i < MAX_MEMORY_SHARED; i++) { if (shared[i].linear == 0) break; } if (i == MAX_MEMORY_SHARED) return NULL; /* Allocate the paged pool */ shared[i].linear = ExAllocatePool(PagedPool, size); /* Create a list to manage this allocation */ shared[i].pMdl = IoAllocateMdl(shared[i].linear,size,FALSE,FALSE,(PIRP) NULL); /* Lock this allocation in memory */ MmProbeAndLockPages(shared[i].pMdl,KernelMode,IoModifyAccess); /* Modify bits to grant user access */ _PM_adjustPageTables((ulong)shared[i].linear, size, true, true); return (void*)shared[i].linear;}/****************************************************************************REMARKS:Free a block of shared memory****************************************************************************/void PMAPI PM_freeShared( void *p){ int i; /* Find a shared memory block in our table and free it */ for (i = 0; i < MAX_MEMORY_SHARED; i++) { if (shared[i].linear == p) { /* Unlock what we locked */ MmUnlockPages(shared[i].pMdl); /* Free our MDL */ IoFreeMdl(shared[i].pMdl); /* Free our mem */ ExFreePool(shared[i].linear); /* Flag that is entry is available */ shared[i].linear = 0; break; } }}/****************************************************************************REMARKS:Map a physical address to a linear address in the callers process.****************************************************************************/void * PMAPI PM_mapPhysicalAddr( ulong base, ulong limit, ibool isCached){ ulong linear,length = limit+1; int i; /* Search table of existing mappings to see if we have already mapped */ /* a region of memory that will serve this purpose. */ for (i = 0; i < numMappings; i++) { if (maps[i].physical == base && maps[i].length == length && maps[i].isCached == isCached) { _PM_adjustPageTables((ulong)maps[i].linear, maps[i].length, true, isCached); return (void*)maps[i].linear; } } if (numMappings == MAX_MEMORY_MAPPINGS) return NULL; /* We did not find any previously mapped memory region, so maps it in. */ if ((linear = _PM_mapPhysicalToLinear(base,limit,isCached)) == 0xFFFFFFFF)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -