⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ck_checkpoint.cxx

📁 C++ 编写的EROS RTOS
💻 CXX
📖 第 1 页 / 共 3 页
字号:
#ifdef DBG_WILD_PTR  if (dbg_wild_ptr)    Check::Consistency("Post retag()");#endif  /* Count is *usually* the allocation count, but for processes it is   * the greater of the allocation count and the call count.   */  if (pPagePot->count[fi.tagEntry] < count) {    pPagePot->count[fi.tagEntry] = count;    pTagHdr->SetDirtyFlag();  }#ifdef DBG_WILD_PTR  if (dbg_wild_ptr)    Check::Consistency("Exit UpdateTagPot()");#endif  return result;}/* The first time a node migrates to a pot that is not currently a node * pot, we need to reformat that pot.  It is guaranteed that all nodes * within the pot are present in the ckpt generation, except those * superceded in the current generation.  We need, however, to make * sure that the content of the pot is validly formatted with nodes so * that the RetagFrame logic will compute correct allocation counts. * Since the ckpt/current nodes are guaranteed to have higher counts, * we simply set all alloc/call counts to ZERO here. */voidCheckpoint::InitNodePot(DiskNode *pDiskNode, OID oid){  for (int nd = 0; nd < EROS_NODES_PER_FRAME; nd++, oid++) {    pDiskNode[nd].oid = oid;    pDiskNode[nd].allocCount = 0llu;    pDiskNode[nd].callCount = 0llu;    for (unsigned int k = 0; k < EROS_NODE_SIZE; k++)      pDiskNode[nd][k].KS_VoidInitKey();  }}voidCheckpoint::MigrateNode(CoreDirent *cd){#if defined(DBG_WILD_PTR)  if (dbg_wild_ptr)    Check::Consistency("Top MigrateNode()");#endif    assert (cd->type == FRM_TYPE_NODE || cd->type == FRM_TYPE_ZNODE);  DEBUG(mignode)    MsgLog::printf("Migrating node 0x%08x%08x, ty=%d lid=0x%x\n",		   (uint32_t) (cd->oid >> 32),		   (uint32_t) (cd->oid),		   cd->type, cd->lid);    /* If the node is still in core, it is the checkpoint version,   * because we will not be asked to migrate it unless it is the   * latest one.  If so, take it from that version:   */  Node *pNode = ObjectHeader::LookupNode(cd->oid);  if (pNode)    pNode->TransLock();  #if 0  /* This test is bogus.  Under extreme pressure, the object may be   * aged to the disk and reloaded between the time it is stabilized   * and the time we migrate it.  It need not be modified when   * reloaded, but the ckpt flag will not be set if that event   * sequence has occurred.  It's much more likely to happen with   * nodes than with pages, since node pots age out relatively slowly.   */  assert (pNode == 0 || pNode->flags.ckpt);#endif  assert (pNode == 0 || pNode->IsDirty() == false);  ObjectHeader *pPot = GetObjectPot(cd->oid);  pPot->TransLock();    assert (pPot->obType == ObType::PtObjectPot);    uint32_t offset = (uint32_t) (cd->oid - pPot->ob.oid);  assert( offset < DISK_NODES_PER_PAGE );  DiskNode *pDiskNode = (DiskNode *) ObjectCache::ObHdrToPage(pPot);  /* The pot we are writing must be marked dirty, but no log space   * should be allocated for it, so just do it by hand:   */  pPot->SetDirtyFlag();  if (pNode) {    /* Call count gets bumped whenever allocation count gets bumped,     * so just use that:     */    assert (pNode->callCount >= pNode->ob.allocCount);    if ( UpdateTagPot(cd, pNode->callCount) )      InitNodePot(pDiskNode, pPot->ob.oid);    #if defined(DBG_WILD_PTR)    if (dbg_wild_ptr)      Check::Consistency("pNode nonzero, post UpdateTagPot()");#endif    pDiskNode[offset] = *pNode;  }  else if (cd->type == FRM_TYPE_ZNODE) {    /* Could use LoadCurrentNode above, but that would force an     * allocation of a node frame for a node that isn't really active:     */        assert(cd->lid == ZERO_LID);    if ( UpdateTagPot(cd, cd->count) )      InitNodePot(pDiskNode, pPot->ob.oid);#if defined(DBG_WILD_PTR)    if (dbg_wild_ptr)      Check::Consistency("pNode nonzero, post UpdateTagPot()");#endif    pDiskNode[offset].oid = cd->oid;    pDiskNode[offset].allocCount = cd->count;    pDiskNode[offset].callCount = cd->count;    for (uint32_t i = 0; i < EROS_NODE_SIZE; i++) {      /* not hazarded because disk key */      pDiskNode[offset][i].KS_VoidInitKey();    }  }  else {    assert(CONTENT_LID(cd->lid));    /* Otherwise, we need to bring in the associated log pot if not     * already present:     */    ObjectHeader *pLogPot = Persist::GetCkFrame(cd->lid);    pLogPot->TransLock();    assert (pLogPot->IsDirty() == false);        assert( pLogPot );    DiskNode *which = (DiskNode*) ObjectCache::ObHdrToPage(pLogPot);    which += (cd->lid % EROS_OBJECTS_PER_FRAME);    /* Call count gets bumped whenever allocation count gets bumped,     * so just use that:     */    assert (which->callCount >= which->allocCount);    if ( UpdateTagPot(cd, which->callCount) )      InitNodePot(pDiskNode, pPot->ob.oid);    #if defined(DBG_WILD_PTR)    if (dbg_wild_ptr)      Check::Consistency("pNode zero, post UpdateTagPot()");#endif    pDiskNode[offset] = *which;  }#if defined(DBG_WILD_PTR)  if (dbg_wild_ptr)    Check::Consistency("After node migration");#endif}voidCheckpoint::MigratePage(CoreDirent *cd){  assert (cd->type == FRM_TYPE_ZDPAGE ||	  cd->type == FRM_TYPE_DPAGE);    DEBUG(migpage)    MsgLog::printf("Migrating page 0x%08x%08x, ty=%d lid=0x%x\n",		   (uint32_t) (cd->oid >> 32),		   (uint32_t) (cd->oid),		   cd->type, cd->lid);    /* If the page is still in core, it MAY still be the checkpoint   * version.  If so, take it from that version:   */  ObjectHeader *pPage = ObjectHeader::Lookup(ObType::PtDataPage, cd->oid);  pPage->TransLock();  #if 0  /* This test is bogus.  Under extreme pressure, the object may be   * aged to the disk and reloaded between the time it is stabilized   * and the time we migrate it.  It need not be modified when   * reloaded, but the ckpt flag will not be set if that event   * sequence has occurred.  It's much more likely to happen with   * nodes than with pages, since node pots age out relatively slowly.   * We shouldn't be migrating if it's been dirtied since anyway.   */  assert (pPage == 0 || pPage->flags.ckpt);#endif  assert (pPage == 0 || pPage->IsDirty() == false);  UpdateTagPot(cd, cd->count);  if (cd->type == FRM_TYPE_DPAGE) {    assert(CONTENT_LID(cd->lid));    /* If it wasn't in core, we need it now: */    if (pPage == 0)      pPage = Persist::GetCkFrame(cd->lid);    assert(pPage);    assert (pPage->IsDirty() == false);    assert ( PTE::ObIsNotWritable(pPage) );    Persist::WritePageToHome(pPage, cd->oid);  }  else {    assert (cd->type == FRM_TYPE_ZDPAGE);    assert(cd->lid == ZERO_LID);  }}voidCheckpoint::MigrateObjects(){  assert (Thread::Current());  for ( ; mg_nextDirent != CkNIL;	mg_nextDirent = mg_nextDirent->Successor() ) {    if (Thread::Current()->pageWriteCount >= Thread::FairMigrateWrites) {      Thread::Current()->pageWriteCount = 0;      return;    }    /* If object has been redirtied, no need to migrate it: */    if (coreGeneration[current].FindObject(mg_nextDirent->oid) != CkNIL)      continue;					       switch(mg_nextDirent->type) {    case FRM_TYPE_ZDPAGE:    case FRM_TYPE_DPAGE:      MigratePage(mg_nextDirent);      break;    case FRM_TYPE_NODE:    case FRM_TYPE_ZNODE:      MigrateNode(mg_nextDirent);      break;    default:      MsgLog::fatal("Attempt to migrate unknown type %d\n",		    mg_nextDirent->type);    }  }      DrainLastObjectPot();    DrainLastTagPot();    mg_objectPotOID = 0;  mg_nextDirent = coreGeneration[last_ckpt].FirstDirEntry();  migrationStatus = mg_DrainMigration;}voidCheckpoint::DrainLastTagPot(){  ObjectHeader *pPot = ObjectHeader::Lookup(ObType::PtAllocPot,					    mg_tagPotOID);   if (pPot && (pPot->IsDirty()))    Persist::WritePage(pPot);}voidCheckpoint::DrainMigrationCallback(DuplexedIO *){  DEBUG(mig) MsgLog::dprintf(true, "Migration drainage callback happened\n");  migrationStatus = mg_UpdateRangeHeaders;}voidCheckpoint::DrainMigration(){  BlockDev::PlugAllBlockDevices(DrainMigrationCallback);}voidCheckpoint::UpdateRangeHeaders(){  /* We should be able to simply scribble the last checkpoint header   * at the front of every range...   */  Persist::WriteRangeHeaders(lastCkptObHdr);  migrationStatus = mg_Idle;}voidCheckpoint::DrainCheckpointCallback(DuplexedIO *){  DEBUG(mig) MsgLog::dprintf(true, "Checkpoint drainage callback happened\n");  migrationStatus = mg_WriteHeader;}voidCheckpoint::DrainCheckpoint(){  BlockDev::PlugAllBlockDevices(DrainCheckpointCallback);}boolCheckpoint::ProcessMigration(){#ifdef DBG_WILD_PTR  if (dbg_wild_ptr)    Check::Consistency("Top ProcessMigration()");#endif  #ifndef NDEBUG  Checkpoint::CheckConsistency("Top ProcessMigration", false);#endif  if (migrationStatus == mg_Idle) {    return true;  }  Console::ShowTwiddleChar(MigStateAbbrev(),1);  DEBUG(mig)    MsgLog::dprintf(true, "Checkpoint::ProcessMigration(): enter in state %s\n",		    MigStateName());  /* In the current design, we no longer need to wait for I/O to   * stabilize, because if outbound I/O is in progress on the object   * then it is on the way to the log, and the attempt to write it   * again will block while allocating the IoRequest structure.   */    if (migrationStatus == mg_StartCheckpoint)    migrationStatus = mg_StabilizeNodes;  if (migrationStatus == mg_StabilizeNodes)    StabilizeNodes();  if (migrationStatus == mg_StabilizePages)    StabilizePages();  if (migrationStatus == mg_WriteDir)    WriteCkptDirectory();  if (migrationStatus == mg_DrainCheckpoint)    DrainCheckpoint();    if (migrationStatus == mg_WriteHeader)    WriteCkptHeader();  if (migrationStatus == mg_StartMigration)    migrationStatus = mg_MigrateObjects;  #ifdef DBG_WILD_PTR  if (dbg_wild_ptr)    Check::Consistency("Before MigrateObjects()");#endif  if (migrationStatus == mg_MigrateObjects)    MigrateObjects();#ifdef DBG_WILD_PTR  if (dbg_wild_ptr)    Check::Consistency("After MigrateObjects()");#endif  if (migrationStatus == mg_DrainMigration)    DrainMigration();    if (migrationStatus == mg_UpdateRangeHeaders)    UpdateRangeHeaders();    DEBUG(mig) MsgLog::dprintf(true, "Checkpoint::ProcessMigration(): exit in state %s\n",		  MigStateName());  DEBUG(migfinish)    if (migrationStatus == mg_Idle) {      MsgLog::dprintf(false, "Migration completed\n");      /* Check::Pages(); */    }    #ifdef DBG_WILD_PTR  if (dbg_wild_ptr)    Check::Consistency("Bottom ProcessMigration()");#endif    return false;}bool#ifndef NDEBUGCheckpoint::CheckConsistency(const char *msg, bool allAllocated)#elseCheckpoint::CheckConsistency(const char *, bool allAllocated)#endif{  bool ckresult = true;    IRQ::DISABLE();  #ifndef NDEBUG  REQUIRE( CoreDirent::CheckConsistency(msg) );#endif    if (! coreGeneration[0].CheckConsistency(false) )    ckresult = false;    if ( !coreGeneration[1].CheckConsistency(allAllocated) )    ckresult = false;  REQUIRE (nAllocatedLogFrame <= nReservedLogFrame);  REQUIRE (nReservedLogFrame <= nAvailLogFrame);  uint32_t totRsrv = 0;  uint32_t totAlloc = 0;  uint32_t totRelease = 0;  for (uint32_t i = 0; i < nGeneration; i++) {    totRsrv += coreGeneration[i].rsrv.nFrames;    totAlloc += coreGeneration[i].alloc.nFrames;    totRelease += coreGeneration[i].release.nFrames;  }    REQUIRE ( nReservedLogFrame == totRsrv - totRelease);  REQUIRE ( nAllocatedLogFrame == totAlloc - totRelease);  REQUIRE (nAllocatedLogFrame + nMasterPage == allocMap.NumAllocated());  IRQ::ENABLE();    return ckresult;}#ifdef OPTION_DDBvoidCheckpoint::ddb_dump_mig_status(){  extern void db_printf(const char *fmt, ...);  db_printf("Migration state: %s nCheckpointsCompleted: %d\n",	    MigStateName(), nCheckpointsCompleted);  db_printf("nxtCrPg      %6d nxtCrNd      %6d nxtCrDirent 0x%x\n",	    mg_nextCorePage, mg_nextCoreNode, mg_nextDirent);  db_printf("nxtThrdDirPg %6d nxtRsrvDirPg %6d\n",	    mg_nextThreadDirPg, mg_nextReserveDirPg);  db_printf("obPotOid 0x%08x%08x tagPotOid 0x%08x%08x\n",	    (uint32_t) (mg_objectPotOID >> 32),	    (uint32_t) (mg_objectPotOID),	    (uint32_t) (mg_tagPotOID >> 32),	    (uint32_t) (mg_tagPotOID));}#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -