📄 kern_objectcache.cxx
字号:
pObj->GetFlags(OFLG_REDIRTY) ? 'y' : 'n', pObj->GetFlags(OFLG_IO) ? 'y' : 'n', goodSum, pObj->GetFlags(OFLG_DISKCAPS) ? 'y' : 'n'); } printf("Total of %d pages, of which %d are free\n", nPages, nFree);}voidObjectCache::ddb_dump_nodes(){ uint32_t nFree = 0; extern void db_printf(const char *fmt, ...); for (uint32_t nd = 0; nd < nNodes; nd++) { ObjectHeader *pObj = GetCoreNodeFrame(nd); if (pObj->IsFree()) { nFree++; continue; } if (pObj->obType > ObType::NtLAST_NODE_TYPE) fatal("Node @0x%08x: object type %d is broken\n", pObj, pObj->obType); #ifdef OPTION_OB_MOD_CHECK char goodSum = (pObj->ob.check == pObj->CalcCheck()) ? 'y' : 'n';#else char goodSum = '?';#endif printf("%02d: %s oid 0x%08x%08x up:%c cr:%c ck:%c drt:%c%c io:%c sm:%d dc:%c\n", nd, ObType::ddb_name(pObj->obType), (uint32_t) (pObj->ob.oid >> 32), (uint32_t) (pObj->ob.oid), pObj->IsUserPinned() ? 'y' : 'n', pObj->GetFlags(OFLG_CURRENT) ? 'y' : 'n', pObj->GetFlags(OFLG_CKPT) ? 'y' : 'n', pObj->GetFlags(OFLG_DIRTY) ? 'y' : 'n', pObj->GetFlags(OFLG_REDIRTY) ? 'y' : 'n', pObj->GetFlags(OFLG_IO) ? 'y' : 'n', goodSum, pObj->GetFlags(OFLG_DISKCAPS) ? 'y' : 'n'); } printf("Total of %d nodes, of which %d are free\n", nNodes, nFree);}#endif/* Queue for threads that are waiting for available page frames: */static ThreadPile PageAvailableQueue;voidObjectCache::AgeNodeFrames(){ static uint32_t curNode = 0; uint32_t nStuck = 0; uint32_t nPinned = 0;#ifdef DBG_WILD_PTR if (dbg_wild_ptr) Check::Consistency("Before AgeNodeFrames()");#endif for (int pass = 0; pass <= Age::PageOut; pass++) { nPinned = 0; nStuck = 0; for (uint32_t count = 0; count < nNodes; count++, curNode++) { if (curNode >= nNodes) curNode = 0; Node *pObj = GetCoreNodeFrame(curNode); assert(pObj->age <= Age::PageOut); assert (pObj->GetFlags(OFLG_IO) == 0); if (pObj->IsUserPinned() || pObj->IsKernelPinned()) { nPinned++; nStuck++; continue; } if (pObj->IsFree()) continue;#ifdef OPTION_DISKLESS /* In the diskless kernel, dirty objects are only removed as a * result of being destroyed. Otherwise, they linger pointlessly * in a kind of Camus-esque twilight of nonexistence. */ if (pObj->IsDirty()) continue;#endif /* DO NOT AGE OUT CONSTITUENTS OF AN ACTIVE CONTEXT * * If a node has an active context structure, it is not a * candidate for ageing. This pins a (relatively) small number * of nodes in memory, but guarantees that the invoker and the * invokee in a given invocation will not get aged out. * Simultaneously, it guarantees that on invocation of a process * capability the process context will not go out due to ageing. * * The reason this is an issue is because we short-circuit the * pin of the constituents in Process::Prepare() by testing for * a valid saveArea. * * In SMP implementations we will need to pin contexts in the * same way that we pin nodes, in order to ensure that one * processor does not remove a context that is active on * another. * * One unfortunate aspect of this design rule is that it becomes * substantially harder to test the ageing mechanism. This rule * has the effect of increasing the minimum number of nodes * required to successfully operate the system. This issue can * be eliminated once we start pinning process structures. * * FIX: This is actually not good, as stale processes can * potentially liver in the context cache for quite a long * time. At some point, we need to introduce a context cache * cleaning mechanism so that the non-persistent kernel will * have one. Hmm. I suppose that context cache flush can * continue to be triggered by the Checkpoint timer even in the * non-persistent kernel. */ if (pObj->obType == ObType::NtProcessRoot || pObj->obType == ObType::NtRegAnnex || pObj->obType == ObType::NtKeyRegs) { if (pObj->context && ((pObj->context == Thread::Current()->context) || (inv.IsActive() && (inv.invokee == pObj->context)))) { nStuck++; pObj->age = Age::LiveProc; } } /* THIS ALGORITHM IS NOT THE SAME AS THE PAGE AGEING ALGORITHM!!! * * While nodes are promptly cleanable (just write them to a log * pot and let the page cleaner work on them), there is still no * sense tying up log I/O bandwidth writing the ones that are * highly active. We therefore invalidate them, but we don't try * to write them until they hit the ageout age. */ if (pObj->age == Age::Invalidate) /* Clean the frame, but do not invalidate products yet, * because the object may get resurrected. */ CleanFrame(pObj, false); if (pObj->age < Age::PageOut) { pObj->age++; continue; } DEBUG(ckpt) dprintf(false, "Ageing out node=0x%08x oty=%d dirty=%c oid=0x%08x%08x\n", pObj, pObj->obType, (pObj->IsDirty() ? 'y' : 'n'), (uint32_t) (pObj->ob.oid >> 32), (uint32_t) (pObj->ob.oid)); CleanFrame(pObj); /* Make sure that the next process that wants a frame is * unlikely to choose the same node frame: */ curNode++; assert (!pObj->IsDirty()); /* Remove this page from the cache and return it to the free page * list: */ ReleaseFrame(pObj);#if defined(TESTING_AGEING) && 0 dprintf(true, "AgeNodeFrame(): Object evicted\n");#endif#ifdef DBG_WILD_PTR if (dbg_wild_ptr) Check::Consistency("After AgeNodeFrames()");#endif return; } } fatal("%d stuck nodes of which %d are pinned\n", nStuck, nPinned);}#ifdef USES_MAPPING_PAGESvoidObjectCache::ReleaseMappingFrame(ObjectHeader *pObj){ ObjectHeader * pProducer = pObj->producer; assert(pProducer); assert ( pProducer->kr.IsValid(pProducer) ); assert (pProducer->IsUserPinned() == false); assert (pProducer->IsKernelPinned() == false); /* Zapping the key ring will help if producer was a page or * was of perfect height -- ensures that the PTE in the next * higher level of the table gets zapped. */ pProducer->kr.UnprepareAll(); if ( pProducer->obType == ObType::NtSegment ) { /* FIX: What follows is perhaps a bit too strong: * * Unpreparing the producer will have invalidated ALL of it's * products, including this one. We should probably just be * disassociating THIS product from the producer. * * While this is overkill, it definitely works... */ ((Node*)pProducer)->Unprepare(false); } if (pObj->obType == ObType::PtMappingPage) { kva_t pgva = ObjectCache::ObHdrToPage(pObj); DEBUG(map) printf("Blasting mapping page at 0x%08x\n", pgva);#if defined(OPTION_SMALL_SPACES) && 0 if (pObj->producerNdx == EROS_NODE_LGSIZE) printf("Blasting pg dir 0x%08x\n", pgva);#endif PTE::ZapMappingPage(pgva); } ReleaseFrame(pObj); /* This product (and all it's siblings) are now on the free * list. The possibility exists, however, that we contrived * to invalidate some address associated with the current * thread by yanking this mapping table, so we need to do a * Yield() here to force the current process to retry: */ Thread::Current()->Wakeup(); Thread::Current()->Yield();}#endifObjectHeader *ObjectCache::CopyObject(ObjectHeader *pObj){ ObjectHeader *newObj; assert(pObj->products == 0); assert(pObj->kr.IsEmpty()); pObj->TransLock(); assert( pObj->obType != ObType::PtDevicePage ); if (pObj->obType == ObType::PtDataPage) { /* Object is now free of encumberance, but it wasn't an evictable * object, and it may be dirty. We need to find another location * for it. */ assert (PTE::ObIsNotWritable(pObj)); newObj = ObjectCache::GrabPageFrame(); assert(newObj != pObj); kva_t fromAddr = ObHdrToPage(pObj); kva_t toAddr = ObHdrToPage(newObj); bcopy((void *) fromAddr, (void *) toAddr, EROS_PAGE_SIZE); } else { /* It's a node */ assert (pObj->obType <= ObType::NtLAST_NODE_TYPE); Node *oldNode = (Node *) pObj; Node *newNode = ObjectCache::GrabNodeFrame(); newObj = newNode; assert(newObj != pObj); newNode->callCount = oldNode->callCount; for (unsigned i = 0; i < EROS_NODE_SIZE; i++) { (*newNode)[i].NH_Set((*oldNode)[i]); } } newObj->ob.oid = pObj->ob.oid; newObj->ob.allocCount = pObj->ob.allocCount; newObj->age = Age::NewBorn; /* FIX: is this right? */ /* The copy is now current. The old object is still the checkpoint version. */ newObj->SetFlags(OFLG_CURRENT); pObj->ClearFlags(OFLG_CURRENT); assert(pObj->GetFlags(OFLG_IO) == 0); newObj->SetFlags(pObj->GetFlags(OFLG_DISKCAPS)); newObj->ob.ioCount = 0; newObj->obType = pObj->obType;#ifdef OPTION_OB_MOD_CHECK newObj->ob.check = newObj->CalcCheck();#endif assert(newObj->kr.IsEmpty()); newObj->Intern(); return newObj;}/* Evict the current resident of the node/page frame. This is called * when we need to claim a particular object frame in the object * cache. It is satisfactory to accomplish this by grabbing some * other frame and moving the object to it. */boolObjectCache::EvictFrame(ObjectHeader *pObj){ /* This logic probably will not work for nodes, because we might * well be zapping the current process. If you change this, be sure * to appropriately conditionalize various checks below. */ assert(pObj->obType > ObType::NtLAST_NODE_TYPE); if (!CleanFrame(pObj)) { (void) CopyObject(pObj); /* Since we could not write the old frame out, we assume that it * is not backed by anything. In this case, the right thing to do * is to simply mark the old one clean, turn off it's checkpoint * bit if any (it's not writable anyway), and allow ReleaseFrame() * to release it. */ pObj->ClearFlags(OFLG_CKPT | OFLG_DIRTY); } ReleaseFrame(pObj); GrabThisFrame(pObj); return true;}boolObjectCache::CleanFrame(ObjectHeader *pObj, bool invalidateProducts){ /* If this object is due to go out and actively involved in I/O, * then we are still waiting for the effects of the last call to * complete, and we should put the current thread to sleep on this * object: */ if (pObj->GetFlags(OFLG_IO)) { Thread::Current()->SleepOn(pObj->ObjectSleepQueue()); Thread::Current()->Yield(); assert (false); } /* Clean up the object we are reclaiming so we can free it: */ pObj->kr.UnprepareAll(); /* This zaps any PTE's as a side effect. */ if (invalidateProducts == false) return true; if (pObj->obType <= ObType::NtLAST_NODE_TYPE) ((Node *)pObj)->DoClearThisNode(); else pObj->InvalidateProducts(); /* Object must be paged out if dirty: */ if (pObj->IsDirty()) { if (IsRemovable(pObj) == false) return false; /* If the object got rescued, it won't have hit ageout age, so * the only way it should still be dirty is if the write has not * completed: */ DEBUG(ckpt) dprintf(true, "ty %d oid 0x%08x%08x slq=0x%08x\n", pObj->obType, (uint32_t) (pObj->ob.oid >> 32), (uint32_t) pObj->ob.oid, &pObj->ObjectSleepQueue()); WriteBack(pObj); } return true;}/* The page frame ager is one of the places where a bunch of sticky * issues all collide. Some design principles that we would LIKE to * satisfy: * * 1. Pages should be aged without regard to cleanliness. The issue * is that ageing which favors reclaiming clean pages will tend to * reclaim code pages. * * 2. The CPU should not sit idle when frames are reclaimable. This * conflicts with principle (1), since any policy that satisfies * (1) implies stalling somewhere. * * 3. Real-time and non-real-time processes should not have resource * conflicts imposed by the ager. This means both on the frames * themselves (easy to solve by coloring) and on the I/O Request * structures (which is much harder). * * I can see no way to satisfy all of these constraints at once. For * that matter, I can't see any solution that always satisfies (1) and * (2) simultaneously. The problem is that cleaning takes time which * is many orders of magnitude longer than a context switch. * * The best solution I have come up with is to try to ameliorate * matters by impedance matching. Under normal circumstances, the * ager is the only generator of writes in the system. During * stabilization and migration, any write it may do is more likely to * help than hurt. These plus the journaling logic are ALL of the * sources of writes in the system, and we know that object reads and * writes can never conflict (if the object is already in core to be * written, then we won't be reading it from the disk). * * This leaves two concerns: * * 1. Ensuring that there is no contention on the I/O request pool. * 2. Ensuring that there is limited contention for disk bandwidth. * * Since reads and writes do not conflict, (1) can be resolved by * splitting the I/O request pools by read/write (not currently * implemented). If this split is implemented, (2) can be * accomplished by the simple expedient of restricting the relative * I/O request pool sizes. * * In an attempt to limit the impact of delays due to dirty objects, * the ager attempts to write objects long before they are candidates * for reclamation (i.e. we run a unified ageing and cleaning policy * -- this may want to be revisited in the future, as if the outbound * I/O bandwidth is available we might as well use it). * * The ageing policy proceeds as follows: when ageing is invoked, run * the ager until one of the following happens: * * 1. We find a page to reclaim * 2. We find a dirty page due to be written. * * If (2) occurs, initiate the write and attempt to find a bounded * number (currently 5) of additional writes to initiate. */voidObjectCache::AgePageFrames(){ static uint32_t curPage = 0; uint32_t nStuck = 0; uint32_t nPasses = 200; /* arbitrary - catches kernel bugs and */ /* dickhead kernel hackers (like me) */#ifdef DBG_WILD_PTR if (dbg_wild_ptr) Check::Consistency("Before AgePageFrames()");#endif ObjectHeader *reclaimedObject = 0; do { for (uint32_t count = 0; count < nPages; count++, curPage++) { if (curPage >= nPages) curPage = 0; ObjectHeader *pObj = GetCorePageFrame(curPage); /* Some page types do not get aged: */ if (pObj->obType == ObType::PtNewAlloc) { nStuck++; continue; } if (pObj->obType == ObType::PtDevicePage) { nStuck++; continue; } if (pObj->obType == ObType::PtKernelHeap) { nStuck++; continue; } if (pObj->IsFree()) continue; /* Some pages cannot be aged because they are active or pinned: */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -