📄 kern_objectcache.cxx
字号:
/* * Copyright (C) 1998, 1999, Jonathan S. Shapiro. * * This file is part of the EROS Operating System. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2, * or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */#include <kerninc/kernel.hxx>#include <kerninc/Check.hxx>#include <kerninc/ObjectCache.hxx>#include <kerninc/Node.hxx>#include <kerninc/Depend.hxx>#include <kerninc/Thread.hxx>#include <kerninc/Invocation.hxx>#include <disk/LowVolume.hxx>#include <disk/DiskKey.hxx>#include <kerninc/BootInfo.h>#include <kerninc/ObjectSource.hxx>#ifdef USES_MAPPING_PAGES#include <arch-kerninc/PTE.hxx>#endif#include <kerninc/util.h>#include <eros/memory.h>#include <arch-kerninc/KernTune.hxx>/* #include "Blast.hxx" */#include <kerninc/PhysMem.hxx>#define dbg_cachealloc 0x1 /* steps in taking snapshot */#define dbg_ckpt 0x2 /* migration state machine */#define dbg_map 0x4 /* migration state machine */#define dbg_ndalloc 0x8 /* node allocation */#define dbg_pgalloc 0x10 /* page allocation */#define dbg_obsrc 0x20 /* addition of object sources */#define dbg_findfirst 0x40 /* finding first subrange *//* Following should be an OR of some of the above */#define dbg_flags ( 0u )#define DEBUG(x) if (dbg_##x & dbg_flags)struct PageInfo { uint32_t nPages; uint32_t basepa; ObjectHeader *firstObHdr;};uint32_t ObjectCache::nNodes;uint32_t ObjectCache::nFreeNodeFrames;Node *ObjectCache::nodeTable;Node *ObjectCache::firstFreeNode;ObjectHeader *ObjectCache::firstFreePage;uint32_t ObjectCache::nPages;uint32_t ObjectCache::nFreePageFrames;uint32_t ObjectCache::nReservedIoPageFrames = 0;uint32_t ObjectCache::nCommittedIoPageFrames = 0;ObjectHeader *ObjectCache::coreTable;/* Now that CachedDomains is a tunable, domain cache and depend * entries are allocated well before we get here. The new logic only * needs to worry about allocating nodes, pages, and core table * entries. Nodes and pages are allocated in equal proportions, with * one core table entry per page. */voidObjectCache::Init(){ InitObjectSources(); uint32_t availBytes = PhysMem::AvailBytes(&PhysMem::any); DEBUG(cachealloc) printf("%d bytes of available storage.\n", availBytes); uint32_t allocQuanta = sizeof(Node) + EROS_PAGE_SIZE + sizeof(ObjectHeader); availBytes = PhysMem::AvailBytes(&PhysMem::any); nNodes = availBytes / allocQuanta; #ifdef TESTING_AGEING nNodes = 90; /* This is one less than logtst requires. */#endif nodeTable = ::new (0) Node[nNodes]; DEBUG(ndalloc) printf("Allocated Nodes: 0x%x at 0x%08x\n", sizeof(Node[nNodes]), nodeTable); /* Drop all the nodes on the free node list: */ for (uint32_t i = 0; i < nNodes; i++) { /* object type is set in constructor... */ assert (nodeTable[i].obType == ObType::NtFreeFrame); nodeTable[i].next = &nodeTable[i+1]; } nodeTable[nNodes - 1].next = 0; firstFreeNode = &nodeTable[0]; nFreeNodeFrames = nNodes; Depend_InitKeyDependTable(nNodes); DEBUG(cachealloc) printf("%d bytes of available storage after key dep tbl alloc.\n", availBytes); AllocateUserPages(); DEBUG(cachealloc) printf("%d cached domains, %d nodes, %d pages\n", KTUNE_NCONTEXT, nNodes, nPages); nFreePageFrames = nPages;}Node *ObjectCache::ContainingNode(void *vp){ uint8_t *bp = (uint8_t *) vp; uint8_t *nt = (uint8_t *) nodeTable; int nuint8_ts = bp - nt; Node *nnt = (Node *) nodeTable; return &nnt[nuint8_ts/sizeof(Node)];}voidObjectCache::AllocateUserPages(){ /* When we get here, we are allocating the last of the core * memory. take it all. * * A tacit assumption is made in this code that any space allocated * within a given memory region has been allocated from top or * bottom. Any remaining space is assumed to be a single, contiguous * hole. There is an assertion check that should catch cases where * this assumption is false, whereupon some poor soul will need to * fix it. */ nPages = PhysMem::AvailPages(&PhysMem::pages); coreTable = new (0) ObjectHeader[nPages]; assert(coreTable); DEBUG(pgalloc) printf("Allocated Page Headers: 0x%x at 0x%08x\n", sizeof(ObjectHeader[nPages]), coreTable); /* Block the pages by class, allocate them, and recompute nPages. * Link all pages onto the appropriate free list: */ /* On the way through this loop, nPages holds the total number * of pages in all previous allocations until the very end. */ nPages = 0; for (unsigned rgn = 0; rgn < PhysMem::nPmemInfo; rgn++) { PmemInfo *pmi= &PhysMem::pmemInfo[rgn]; if (pmi->type != MI_MEMORY) continue; PmemConstraint xmc; xmc.base = pmi->base; xmc.bound = pmi->bound; xmc.align = EROS_PAGE_SIZE; kpsize_t np = PhysMem::ContiguousPages(&xmc); /* See the comment at the top of this function if this assertion * fails! */ assert(np == PhysMem::AvailPages(&xmc));#ifdef TESTING_AGEING np = min (np, 50);#endif pmi->nPages = np; pmi->basepa = (uint32_t) PhysMem::Alloc(EROS_PAGE_SIZE * np, &xmc); pmi->firstObHdr = &coreTable[nPages]; /* See the comment at the top of this function if this assertion * fails! */ assert(PhysMem::AvailPages(&xmc) == 0); nPages += np; }#if 0 printf("nPages = %d (0x%x)\n", nPages, nPages); halt();#endif /* Populate all of the page address pointers in the core table * entries: */ {#if 0 ObjectHeader *pObHdr = coreTable;#endif for (unsigned rgn = 0; rgn < PhysMem::nPmemInfo; rgn++) { PmemInfo *pmi= &PhysMem::pmemInfo[rgn]; if (pmi->type != MI_MEMORY) continue; kpa_t framePa = pmi->basepa; ObjectHeader *pObHdr = pmi->firstObHdr; for (uint32_t pg = 0; pg < pmi->nPages; pg++) { pObHdr->pageAddr = PTOV(framePa); framePa += EROS_PAGE_SIZE; pObHdr++; } } } /* Link all of the resulting core table entries onto the free page list: */ for (uint32_t i = 0; i < nPages; i++) { coreTable[i].obType = ObType::PtFreeFrame; coreTable[i].next = &coreTable[i+1]; } coreTable[nPages - 1].next = 0; firstFreePage = &coreTable[0];}void ObjectCache::AddDevicePages(PmemInfo *pmi){ /* Not all BIOS's report a page-aligned start address for everything. */ pmi->basepa = (pmi->base & ~EROS_PAGE_MASK); pmi->nPages = (pmi->bound - pmi->basepa) / EROS_PAGE_SIZE; /* About to call operator new(): */ pmi->firstObHdr = new ((void *)1) ObjectHeader[pmi->nPages]; kpa_t framePa = pmi->basepa; ObjectHeader *pObHdr = pmi->firstObHdr; for (uint32_t pg = 0; pg < pmi->nPages; pg++) { pObHdr->pageAddr = PTOV(framePa); framePa += EROS_PAGE_SIZE; pObHdr++; } /* Note that these pages do NOT go on the free list! */}ObjectHeader*ObjectCache::OIDtoObHdr(uint32_t /*cdaLo*/, uint16_t /*cdaHi*/){ /* FIX: implement me */ fatal("OIDtoObHdr unimplemented!\n"); return 0;}ObjectHeader *ObjectCache::GetCorePageFrame(uint32_t ndx){ for (unsigned rgn = 0; rgn < PhysMem::nPmemInfo; rgn++) { PmemInfo *pmi= &PhysMem::pmemInfo[rgn]; if (pmi->type != MI_MEMORY) continue; if (ndx < pmi->nPages) return &pmi->firstObHdr[ndx]; ndx -= pmi->nPages; } return 0;}Node *ObjectCache::GetCoreNodeFrame(uint32_t ndx){ return &nodeTable[ndx];}ObjectHeader *ObjectCache::PhysPageToObHdr(kpa_t pagepa){ ObjectHeader *pHdr = 0; for (unsigned rgn = 0; rgn < PhysMem::nPmemInfo; rgn++) { PmemInfo *pmi= &PhysMem::pmemInfo[rgn]; if (pmi->type != MI_MEMORY && pmi->type != MI_DEVICEMEM && pmi->type != MI_BOOTROM) continue; kva_t startpa = pmi->basepa; kva_t endpa = startpa + pmi->nPages * EROS_PAGE_SIZE; if (pagepa < startpa || pagepa >= endpa) continue; assert (pagepa >= pmi->basepa); uint32_t pageNo = (pagepa - pmi->basepa) / EROS_PAGE_SIZE; pHdr = &pmi->firstObHdr[pageNo]; break; } return pHdr;}#ifndef NDEBUGboolObjectCache::ValidPagePtr(const ObjectHeader *pObj){ uint32_t wobj = (uint32_t) pObj; for (unsigned rgn = 0; rgn < PhysMem::nPmemInfo; rgn++) { PmemInfo *pmi= &PhysMem::pmemInfo[rgn]; if (pmi->type != MI_MEMORY && pmi->type != MI_DEVICEMEM && pmi->type != MI_BOOTROM) continue; uint32_t wbase = (uint32_t) pmi->firstObHdr; uint32_t top = (uint32_t) (pmi->firstObHdr + pmi->nPages); if (wobj < wbase) continue; if (wobj >= top) continue; uint32_t delta = wobj - wbase; if (delta % sizeof(ObjectHeader)) return false; return true; } return false;}#endif#ifndef NDEBUGboolObjectCache::ValidNodePtr(const Node *pObj){ uint32_t wobj = (uint32_t) pObj; uint32_t wbase = (uint32_t) nodeTable; uint32_t wtop = (uint32_t) (nodeTable + nNodes); if (wobj < wbase) return false; if (wobj >= wtop) return false; uint32_t delta = wobj - wbase; if (delta % sizeof(Node)) return false; return true;}#endif#ifndef NDEBUGboolObjectCache::ValidKeyPtr(const Key *pKey){ uint32_t wobj = (uint32_t) pKey; uint32_t wbase = (uint32_t) nodeTable; uint32_t wtop = (uint32_t) (nodeTable + nNodes); if (inv.IsInvocationKey(pKey)) return true; if ( Process::ValidKeyReg(pKey) ) return true; if ( Thread::ValidThreadKey(pKey) ) return true; if (wobj < wbase) return false; if (wobj >= wtop) return false; /* It's in the right range to be a pointer into a node. See if it's * at a valid slot: */ uint32_t delta = wobj - wbase; delta /= sizeof(Node); Node *pNode = nodeTable + delta; for (uint32_t i = 0; i < EROS_NODE_SIZE; i++) { if ( & ((*pNode)[i]) == pKey ) return true; } return false;}#endif#ifdef OPTION_DDBvoidObjectCache::ddb_dump_pinned_objects(){ extern void db_printf(const char *fmt, ...); uint32_t userPins = 0; for (uint32_t nd = 0; nd < nNodes; nd++) { Node *pObj = GetCoreNodeFrame(nd); if (pObj->IsUserPinned() || pObj->IsKernelPinned()) { if (pObj->IsUserPinned()) userPins++; printf("node 0x%08x%08x\n", (uint32_t) (pObj->ob.oid >> 32), (uint32_t) pObj->ob.oid); } } for (uint32_t pg = 0; pg < nPages; pg++) { ObjectHeader *pObj = GetCorePageFrame(pg); if (pObj->IsUserPinned() || pObj->IsKernelPinned()) { if (pObj->IsUserPinned()) userPins++; printf("page 0x%08x%08x\n", (uint32_t) (pObj->ob.oid >> 32), (uint32_t) pObj->ob.oid); } }#ifdef OLD_PIN printf("User pins found: %d official count: %d\n", userPins, ObjectHeader::PinnedObjectCount);#else printf("User pins found: %d \n", userPins);#endif}voidObjectCache::ddb_dump_pages(){ uint32_t nFree = 0; extern void db_printf(const char *fmt, ...); for (uint32_t pg = 0; pg < nPages; pg++) { ObjectHeader *pObj = GetCorePageFrame(pg); if (pObj->IsFree()) { nFree++; continue; } char producerType = 'p'; if (pObj->obType == ObType::PtMappingPage) { if (pObj->producer == 0) { producerType = '?'; } else if (pObj->producer->obType <= ObType::NtLAST_NODE_TYPE) { producerType = 'n'; } else { producerType = 'p'; } } #ifdef OPTION_OB_MOD_CHECK char goodSum = (pObj->ob.check == pObj->CalcCheck()) ? 'y' : 'n';#else char goodSum = '?';#endif printf("%02d: %s oid %c0x%08x%08x up:%c cr:%c ck:%c drt:%c%c io:%c sm:%c dc:%c\n", pg, ObType::ddb_name(pObj->obType), producerType, (uint32_t) (pObj->ob.oid >> 32), (uint32_t) (pObj->ob.oid), pObj->IsUserPinned() ? 'y' : 'n', pObj->GetFlags(OFLG_CURRENT) ? 'y' : 'n', pObj->GetFlags(OFLG_CKPT) ? 'y' : 'n', pObj->GetFlags(OFLG_DIRTY) ? 'y' : 'n',
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -