📄 rf_paritylogging.c
字号:
}static void rf_ShutdownParityLoggingRegionBufferPool(RF_ThreadArg_t arg){ RF_Raid_t *raidPtr; raidPtr = (RF_Raid_t *)arg; if (rf_parityLogDebug) { int tid; rf_get_threadid(tid); printf("[%d] ShutdownParityLoggingRegionBufferPool\n", tid); } FreeRegionBufferQueue(&raidPtr->regionBufferPool);}static void rf_ShutdownParityLoggingParityBufferPool(RF_ThreadArg_t arg){ RF_Raid_t *raidPtr; raidPtr = (RF_Raid_t *)arg; if (rf_parityLogDebug) { int tid; rf_get_threadid(tid); printf("[%d] ShutdownParityLoggingParityBufferPool\n", tid); } FreeRegionBufferQueue(&raidPtr->parityBufferPool);}static void rf_ShutdownParityLoggingDiskQueue(RF_ThreadArg_t arg){ RF_ParityLogData_t *d; RF_CommonLogData_t *c; RF_Raid_t *raidPtr; raidPtr = (RF_Raid_t *)arg; if (rf_parityLogDebug) { int tid; rf_get_threadid(tid); printf("[%d] ShutdownParityLoggingDiskQueue\n", tid); } /* free disk manager stuff */ RF_ASSERT(raidPtr->parityLogDiskQueue.bufHead == NULL); RF_ASSERT(raidPtr->parityLogDiskQueue.bufTail == NULL); RF_ASSERT(raidPtr->parityLogDiskQueue.reintHead == NULL); RF_ASSERT(raidPtr->parityLogDiskQueue.reintTail == NULL); while (raidPtr->parityLogDiskQueue.freeDataList) { d = raidPtr->parityLogDiskQueue.freeDataList; raidPtr->parityLogDiskQueue.freeDataList = raidPtr->parityLogDiskQueue.freeDataList->next; RF_Free(d, sizeof(RF_ParityLogData_t)); } while (raidPtr->parityLogDiskQueue.freeCommonList) { c = raidPtr->parityLogDiskQueue.freeCommonList; rf_mutex_destroy(&c->mutex); raidPtr->parityLogDiskQueue.freeCommonList = raidPtr->parityLogDiskQueue.freeCommonList->next; RF_Free(c, sizeof(RF_CommonLogData_t)); }}static void rf_ShutdownParityLogging(RF_ThreadArg_t arg){ RF_Raid_t *raidPtr; int status; raidPtr = (RF_Raid_t *)arg; if (rf_parityLogDebug) { int tid; rf_get_threadid(tid); printf("[%d] ShutdownParityLogging\n", tid); }#ifndef SIMULATE /* shutdown disk thread */ /* This has the desirable side-effect of forcing all regions to be reintegrated. This is necessary since all parity log maps are currently held in volatile memory. */ RF_LOCK_MUTEX(raidPtr->parityLogDiskQueue.mutex); raidPtr->parityLogDiskQueue.threadState |= RF_PLOG_TERMINATE; RF_UNLOCK_MUTEX(raidPtr->parityLogDiskQueue.mutex); RF_SIGNAL_COND(raidPtr->parityLogDiskQueue.cond); /* * pLogDiskThread will now terminate when queues are cleared * now wait for it to be done */ RF_LOCK_MUTEX(raidPtr->parityLogDiskQueue.mutex); while(!(raidPtr->parityLogDiskQueue.threadState&RF_PLOG_SHUTDOWN)) { RF_WAIT_COND(raidPtr->parityLogDiskQueue.cond, raidPtr->parityLogDiskQueue.mutex); } RF_UNLOCK_MUTEX(raidPtr->parityLogDiskQueue.mutex);#else /* !SIMULATE */ /* explicitly call shutdown routines which force reintegration */ rf_ShutdownLogging(raidPtr);#endif /* !SIMULATE */ if (rf_parityLogDebug) { int tid; rf_get_threadid(tid); printf("[%d] ShutdownParityLogging done (thread completed)\n", tid); }}int rf_GetDefaultNumFloatingReconBuffersParityLogging(RF_Raid_t *raidPtr){ return(20);}RF_HeadSepLimit_t rf_GetDefaultHeadSepLimitParityLogging(RF_Raid_t *raidPtr){ return(10);}/* return the region ID for a given RAID address */RF_RegionId_t rf_MapRegionIDParityLogging( RF_Raid_t *raidPtr, RF_SectorNum_t address){ RF_RegionId_t regionID;/* regionID = address / (raidPtr->regionParityRange * raidPtr->Layout.numDataCol); */ regionID = address / raidPtr->regionParityRange; if (regionID == rf_numParityRegions) { /* last region may be larger than other regions */ regionID--; } RF_ASSERT(address >= raidPtr->regionInfo[regionID].parityStartAddr); RF_ASSERT(address < raidPtr->regionInfo[regionID].parityStartAddr + raidPtr->regionInfo[regionID].numSectorsParity); RF_ASSERT(regionID < rf_numParityRegions); return(regionID);}/* given a logical RAID sector, determine physical disk address of data */void rf_MapSectorParityLogging( RF_Raid_t *raidPtr, RF_RaidAddr_t raidSector, RF_RowCol_t *row, RF_RowCol_t *col, RF_SectorNum_t *diskSector, int remap){ RF_StripeNum_t SUID = raidSector / raidPtr->Layout.sectorsPerStripeUnit; *row = 0; /* *col = (SUID % (raidPtr->numCol - raidPtr->Layout.numParityLogCol)); */ *col = SUID % raidPtr->Layout.numDataCol; *diskSector = (SUID / (raidPtr->Layout.numDataCol)) * raidPtr->Layout.sectorsPerStripeUnit + (raidSector % raidPtr->Layout.sectorsPerStripeUnit);}/* given a logical RAID sector, determine physical disk address of parity */void rf_MapParityParityLogging( RF_Raid_t *raidPtr, RF_RaidAddr_t raidSector, RF_RowCol_t *row, RF_RowCol_t *col, RF_SectorNum_t *diskSector, int remap){ RF_StripeNum_t SUID = raidSector / raidPtr->Layout.sectorsPerStripeUnit; *row = 0; /* *col = raidPtr->Layout.numDataCol-(SUID/raidPtr->Layout.numDataCol)%(raidPtr->numCol - raidPtr->Layout.numParityLogCol); */ *col = raidPtr->Layout.numDataCol; *diskSector =(SUID / (raidPtr->Layout.numDataCol)) * raidPtr->Layout.sectorsPerStripeUnit + (raidSector % raidPtr->Layout.sectorsPerStripeUnit);}/* given a regionID and sector offset, determine the physical disk address of the parity log */void rf_MapLogParityLogging( RF_Raid_t *raidPtr, RF_RegionId_t regionID, RF_SectorNum_t regionOffset, RF_RowCol_t *row, RF_RowCol_t *col, RF_SectorNum_t *startSector){ *row = 0; *col = raidPtr->numCol - 1; *startSector = raidPtr->regionInfo[regionID].regionStartAddr + regionOffset;}/* given a regionID, determine the physical disk address of the logged parity for that region */void rf_MapRegionParity( RF_Raid_t *raidPtr, RF_RegionId_t regionID, RF_RowCol_t *row, RF_RowCol_t *col, RF_SectorNum_t *startSector, RF_SectorCount_t *numSector){ *row = 0; *col = raidPtr->numCol - 2; *startSector = raidPtr->regionInfo[regionID].parityStartAddr; *numSector = raidPtr->regionInfo[regionID].numSectorsParity;}/* given a logical RAID address, determine the participating disks in the stripe */void rf_IdentifyStripeParityLogging( RF_Raid_t *raidPtr, RF_RaidAddr_t addr, RF_RowCol_t **diskids, RF_RowCol_t *outRow){ RF_StripeNum_t stripeID = rf_RaidAddressToStripeID(&raidPtr->Layout, addr); RF_ParityLoggingConfigInfo_t *info = (RF_ParityLoggingConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo; *outRow = 0; *diskids = info->stripeIdentifier[ stripeID % raidPtr->numCol ];}void rf_MapSIDToPSIDParityLogging( RF_RaidLayout_t *layoutPtr, RF_StripeNum_t stripeID, RF_StripeNum_t *psID, RF_ReconUnitNum_t *which_ru){ *which_ru = 0; *psID = stripeID;}/* select an algorithm for performing an access. Returns two pointers, * one to a function that will return information about the DAG, and * another to a function that will create the dag. */void rf_ParityLoggingDagSelect( RF_Raid_t *raidPtr, RF_IoType_t type, RF_AccessStripeMap_t *asmp, RF_VoidFuncPtr *createFunc){ RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout); RF_PhysDiskAddr_t *failedPDA; RF_RowCol_t frow, fcol; RF_RowStatus_t rstat; int prior_recon; int tid; RF_ASSERT(RF_IO_IS_R_OR_W(type)); if (asmp->numDataFailed + asmp->numParityFailed > 1) { RF_ERRORMSG("Multiple disks failed in a single group! Aborting I/O operation.\n"); /* *infoFunc = */ *createFunc = NULL; return; } else if (asmp->numDataFailed + asmp->numParityFailed == 1) { /* if under recon & already reconstructed, redirect the access to the spare drive * and eliminate the failure indication */ failedPDA = asmp->failedPDA; frow = failedPDA->row; fcol = failedPDA->col; rstat = raidPtr->status[failedPDA->row]; prior_recon = (rstat == rf_rs_reconfigured) || ( (rstat == rf_rs_reconstructing) ? rf_CheckRUReconstructed(raidPtr->reconControl[frow]->reconMap, failedPDA->startSector) : 0 ); if (prior_recon) { RF_RowCol_t or = failedPDA->row,oc=failedPDA->col; RF_SectorNum_t oo=failedPDA->startSector; if (layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) { /* redirect to dist spare space */ if (failedPDA == asmp->parityInfo) { /* parity has failed */ (layoutPtr->map->MapParity)(raidPtr, failedPDA->raidAddress, &failedPDA->row, &failedPDA->col, &failedPDA->startSector, RF_REMAP); if (asmp->parityInfo->next) { /* redir 2nd component, if any */ RF_PhysDiskAddr_t *p = asmp->parityInfo->next; RF_SectorNum_t SUoffs = p->startSector % layoutPtr->sectorsPerStripeUnit; p->row = failedPDA->row; p->col = failedPDA->col; p->startSector = rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, failedPDA->startSector) + SUoffs; /* cheating: startSector is not really a RAID address */ } } else if (asmp->parityInfo->next && failedPDA == asmp->parityInfo->next) { RF_ASSERT(0); /* should not ever happen */ } else { /* data has failed */ (layoutPtr->map->MapSector)(raidPtr, failedPDA->raidAddress, &failedPDA->row, &failedPDA->col, &failedPDA->startSector, RF_REMAP); } } else { /* redirect to dedicated spare space */ failedPDA->row = raidPtr->Disks[frow][fcol].spareRow; failedPDA->col = raidPtr->Disks[frow][fcol].spareCol; /* the parity may have two distinct components, both of which may need to be redirected */ if (asmp->parityInfo->next) { if (failedPDA == asmp->parityInfo) { failedPDA->next->row = failedPDA->row; failedPDA->next->col = failedPDA->col; } else if (failedPDA == asmp->parityInfo->next) { /* paranoid: should never occur */ asmp->parityInfo->row = failedPDA->row; asmp->parityInfo->col = failedPDA->col; } } } RF_ASSERT(failedPDA->col != -1); if (rf_dagDebug || rf_mapDebug) { rf_get_threadid(tid); printf("[%d] Redirected type '%c' r %d c %d o %ld -> r %d c %d o %ld\n", tid,type,or,oc,oo,failedPDA->row,failedPDA->col,failedPDA->startSector); } asmp->numDataFailed = asmp->numParityFailed = 0; } } if (type == RF_IO_TYPE_READ) { if (asmp->numDataFailed == 0) *createFunc = rf_CreateFaultFreeReadDAG; else *createFunc = rf_CreateRaidFiveDegradedReadDAG; } else { /* if mirroring, always use large writes. If the access requires two distinct parity updates, * always do a small write. If the stripe contains a failure but the access does not, do a * small write. * The first conditional (numStripeUnitsAccessed <= numDataCol/2) uses a less-than-or-equal * rather than just a less-than because when G is 3 or 4, numDataCol/2 is 1, and I want * single-stripe-unit updates to use just one disk. */ if ( (asmp->numDataFailed + asmp->numParityFailed) == 0) { if (((asmp->numStripeUnitsAccessed <= (layoutPtr->numDataCol / 2)) && (layoutPtr->numDataCol!=1)) || (asmp->parityInfo->next!=NULL) || rf_CheckStripeForFailures(raidPtr, asmp)) { *createFunc = rf_CreateParityLoggingSmallWriteDAG; } else *createFunc = rf_CreateParityLoggingLargeWriteDAG; } else if (asmp->numParityFailed == 1) *createFunc = (void (*)())rf_CreateNonRedundantWriteDAG; else if (asmp->numStripeUnitsAccessed != 1 && failedPDA->numSector != layoutPtr->sectorsPerStripeUnit) *createFunc = NULL; else *createFunc = rf_CreateDegradedWriteDAG; }}#endif /* RF_INCLUDE_PARITYLOGGING > 0 */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -