📄 dcachecbio.c
字号:
pData = pDc->dc_BigBufPtr ; pContig = pTmp ; for(ix = 0 ; ix < burstCount ; ix++) { bcopy(pContig->data, pData, dev->params.cbio_bytesPerBlk ); pData += dev->params.cbio_bytesPerBlk; pContig = (DCACHE_DESC *) DLL_NEXT( pContig ); } ret = pDc->dc_subDev->pFuncs->cbio_blkRW( pDc->dc_subDev, pTmp->block, burstCount, pDc->dc_BigBufPtr, CBIO_WRITE, NULL ); pDc->dc_lastAccBlock = pTmp->block + burstCount ; } /* else - burst write */ /* write error processing */ if(ret == ERROR) { retstat |= dcacheErrorHandler( dev, pTmp, CBIO_WRITE ); /* * NOTE - returning here wont get all blocks back on the * list, so we continue as usual, returning error at end * of function. * we start invalidating from this point on, because cache * coherency is questionable after a write error */ doInvalidate = TRUE ; } for(ix = 0; ix< burstCount ; ix++ ) { pDc->dc_dirtyCount -- ; if( doInvalidate ) { dcacheHashRemove(dev, pTmp); } else { pTmp->state = CB_STATE_CLEAN; } pContig = (DCACHE_DESC *) DLL_NEXT( pTmp ); /* Makes this block LRU now */ dllRemove( pList, &pTmp->lruList ); dllAdd( & pDc->dc_LRU, &pTmp->lruList ); pTmp = pContig ; } } /* for */ pDc->dc_actTick = tickGet() ; return( retstat ); }/******************************************************************************** * dcacheManyFlushInval - Flush and/or Invalidate many blocks at once** Walk through the entire block list, and perform the requested action* for each of the blocks that fall within the specified range.* All of these operations are done without releasing the mutex, so as to avoid* anyone filling or modifying any of the blocks or otherwise rearranging of the* LRU list while it is being traversed.**/LOCAL STATUS dcacheManyFlushInval ( CBIO_DEV_ID dev, /* device handle */ block_t start_block, /* range start - inclusive */ block_t end_block, /* range end - inclusive */ BOOL doFlush, /* TRUE : flush DIRTY blocks */ BOOL doInvalidate, /* TRUE : Invalidate blocks */ u_long * pWriteCounter /* where to count # of writes */ ) { DCACHE_DESC * pDesc, * pNext ; STATUS stat = OK ; DL_LIST flushList ; u_long writeCounter = 0 ; /* init the list in which we store all blocks to be flushed */ dllInit( & flushList ); /* entering this internal function, we should already own the mutex */ assert( dev->cbio_mutex.semOwner == (void *) taskIdSelf() ); /* start with Least Recently Used block, from tail of list */ pDesc = (DCACHE_DESC *) DLL_LAST( & dev->pDc->dc_LRU ); /* walk through the list towards the top */ while ( (pDesc != NULL) && (stat == OK ) ) { /* next, please ... */ pNext = (DCACHE_DESC *) DLL_PREVIOUS(pDesc); if( (pDesc->block < start_block) || (pDesc->block > end_block) ) goto next; switch( pDesc->state ) { case CB_STATE_EMPTY:/* no valid block is assigned */ case CB_STATE_UNSTABLE: break; /* ditto */ case CB_STATE_DIRTY:/* contains a valid but modified in memory */ if( doFlush ) { /* remove from LRU list, add to flush batch */ dllRemove( & dev->pDc->dc_LRU, &pDesc->lruList ); dcacheListAddSort( &flushList, pDesc ); writeCounter ++ ; goto next ; /* because of batch flush, these are invalidated later */ } case CB_STATE_CLEAN:/* contains a valid block, unmodified */ if( doInvalidate ) { dcacheHashRemove(dev, pDesc); } break ; default: assert(pDesc->state == CB_STATE_DIRTY); break ; }next: pDesc = pNext ; } if( ! DLL_EMPTY( &flushList ) ) { stat = dcacheFlushBatch( dev, &flushList, doInvalidate ) ; if( pWriteCounter != NULL ) *pWriteCounter += writeCounter ; } return (stat); }/********************************************************************************* dcacheBlockAllocate - allocate a cache block with disk data** Allocate a cache block to be associated with a certain disk block.* At this point we assume the block IS NOT already on the list** NOTE: The cbio_mutex must already be taken when entering this function.*/LOCAL DCACHE_DESC * dcacheBlockAllocate( CBIO_DEV_ID dev, block_t block ) { FAST DCACHE_DESC * pDesc ; FAST DCACHE_DESC ** pHashSlot ; FAST struct dcache_ctrl * pDc = dev->pDc ; int retry = 10 ; /* entering this internal function, we should already own the mutex */ assert( dev->cbio_mutex.semOwner == (void *) taskIdSelf() ); assert( dev == pDc->cbio_dev );again: /* start with Least Recently Used block, from tail of list */ pDesc = (DCACHE_DESC *) DLL_LAST( & pDc->dc_LRU ); if( retry -- <= 0) { errno = EAGAIN; return( NULL ); } /* never reuse blocks which are Dirty or Unstable */ while ( (pDesc->state == CB_STATE_DIRTY) || (pDesc->state == CB_STATE_UNSTABLE)) { pDesc = (DCACHE_DESC *) DLL_PREVIOUS(pDesc); if( pDesc == NULL) { break; } } if( pDesc == NULL ) { STATUS stat ; stat = dcacheManyFlushInval( dev, 0, NONE, TRUE, FALSE, & pDc->dc_writesForeground ); if( stat == ERROR) return (NULL); goto again; } assert( (pDesc->state == CB_STATE_EMPTY) || (pDesc->state == CB_STATE_CLEAN)); /* in case this block contained some valid block remove it from hash */ if( pDesc->state != CB_STATE_EMPTY ) dcacheHashRemove(dev, pDesc); /* mark block fields */ pDesc->state = CB_STATE_UNSTABLE ; pDesc->block = block ; assert( dev == pDc->cbio_dev ); if( pDc->dc_hashSize > 0) { /* insert the block into hash table too */ pHashSlot = &(pDc->dc_hashBase [ block % pDc->dc_hashSize ]); if( *pHashSlot != NULL ) assert( (block % pDc->dc_hashSize) == ((*pHashSlot)->block % pDc->dc_hashSize)); pDesc->hashNext = *pHashSlot ; *pHashSlot = pDesc ; pDesc->busy = 1; } return (pDesc ); }/********************************************************************************* dcacheBlockFill - fill a cache block with disk data** Reading can be costly if done a block at a time, hence* the use of the BigBuf to read ahead as much blocks as configured* fit into BigBuf, and spread them into cache buffers.*/LOCAL STATUS dcacheBlockFill( CBIO_DEV_ID dev, DCACHE_DESC * pDesc ) { FAST struct dcache_ctrl * pDc = dev->pDc ; STATUS stat = OK; int off ; int numBlks ; block_t block ; block_t startBlock ; if ( dev->params.cbio_removable && (pDesc->block != DCACHE_BOOT_BLOCK_NUM ) && (pDc->dc_actTick < tickGet() - sysClkRateGet() * DCACHE_IDLE_SECS )) { stat = dcacheChangeDetect( dev, FALSE ); if(stat == ERROR) return (stat); } /* if read ahead is possible ... */ if( (pDc->dc_BigBufSize > 1 ) && (pDc->dc_readAhead > 1)) { block = startBlock = pDesc->block ; /* actually this is read-ahead size we are dealing with */ numBlks = min( pDc->dc_BigBufSize, pDc->dc_readAhead ); /* and dont use-up all clean blocks we got right now */ numBlks = min( (u_long) numBlks, (u_long)(pDc->dc_numCacheBlocks - pDc->dc_dirtyCount)/2 ); /* and if we're at the end of the device */ numBlks = min( (u_long)numBlks, (u_long)dev->params.cbio_nBlocks - block ); /* BEGIN - Hidden write handling */ /* make sure the jump is Forward and Significantly large */ if(pDc->dc_lastAccBlock < (pDesc->block - pDc->dc_cylinderSize)) { stat = dcacheManyFlushInval( dev, pDc->dc_lastAccBlock, startBlock + numBlks, TRUE, FALSE, & pDc->dc_writesHidden ); } if( stat == ERROR) /* write errors count be ignored */ return( stat ); /* END - Hidden write handling */ stat = pDc->dc_subDev->pFuncs->cbio_blkRW( pDc->dc_subDev, startBlock, numBlks, pDc->dc_BigBufPtr, CBIO_READ, NULL ); if( stat == ERROR ) goto read_single ; pDc->dc_lastAccBlock = startBlock + numBlks; /* dump each prefetched block into its own block buffer */ do { /* calculate how far into the BBlk we need to go */ off = (pDesc->block - startBlock) * dev->params.cbio_bytesPerBlk ; /* perform the sweep: move from bigBuf to normal blocks */ pDesc->state = CB_STATE_UNSTABLE ; bcopy( pDc->dc_BigBufPtr + off, pDesc->data , dev->params.cbio_bytesPerBlk ); /* make this block MRU */ dllRemove( & pDc->dc_LRU, &pDesc->lruList ); dllInsert( & pDc->dc_LRU, NULL, &pDesc->lruList ); pDesc->state = CB_STATE_CLEAN ; /* another one is done */ numBlks -- ; block ++ ; pDesc = NULL ; /* get us more them blocks for read-ahead data */ if( numBlks > 0) { /* locate block in LRU list, although it shouldn't be there */ if( dcacheBlockLocate(dev, block) != NULL ) { break; /* if found, terminate read-ahead */ } /* now really, allocate one */ pDesc = dcacheBlockAllocate(dev, block); /* to clean blocks, stop here */ if (pDesc == NULL ) { break ; /* from while loop */ } } /* if numBlks > 0 */ } while (numBlks > 0); /* return here if read ahead was successful */ pDc->dc_actTick = tickGet() ; return (OK); } /* if read ahead is possible ... */ /* if read-ahead was not possible, read single block */read_single: { /* get the actual data from disk */ stat = pDc->dc_subDev->pFuncs->cbio_blkRW( pDc->dc_subDev, pDesc->block, 1, pDesc->data, CBIO_READ, NULL ); if(stat == ERROR) { stat = dcacheErrorHandler( dev, pDesc, CBIO_READ ); } else { pDesc->state = CB_STATE_CLEAN ; } pDc->dc_lastAccBlock = pDesc->block; pDc->dc_actTick = tickGet() ; return (stat); } pDc->dc_actTick = tickGet() ; return (OK); }/********************************************************************************* dcacheBlkBypassRW - read or write operation bypassing the disk cache** Just call the underlying block driver Read or Write function.* This should be called when we own the mutex, to avoid conflict* in case some other task messes with the same blocks we use,* and to maintain predictable I/O performance.*/LOCAL STATUS dcacheBlkBypassRW ( CBIO_DEV_ID dev, block_t start_block, block_t num_blocks, addr_t buffer, enum cbio_rw rw ) { STATUS stat = OK ; FAST struct dcache_ctrl * pDc = dev->pDc ; /* NOTE: no need to do change detect here, its been done by ManyInval */ stat = pDc->dc_subDev->pFuncs->cbio_blkRW( pDc->dc_subDev, start_block, num_blocks, buffer, rw, NULL ); if(stat == ERROR) stat = dcacheErrorHandler( dev, NULL, rw ); pDc->dc_lastAccBlock = start_block + num_blocks ; pDc->dc_actTick = tickGet() ; return (stat); }/********************************************************************************* dcacheBlockLocate - locate a block in the cache list** First, search the block in the has table, if that is not* possible, find the block in the LRU list.*/LOCAL DCACHE_DESC * dcacheBlockLocate( CBIO_DEV_ID dev, block_t block ) { FAST DCACHE_DESC *pDesc = NULL; FAST struct dcache_ctrl * pDc = dev->pDc ; if( pDc->dc_hashSize > 0) { /* First, search in hash table */ pDesc = pDc->dc_hashBase [ block % pDc->dc_hashSize ] ; pDc->dc_hashHits ++ ; /* think positively */ while( pDesc != NULL ) { /* verify all blocks are in correct hash slot */ assert( (block % pDc->dc_hashSize) == (pDesc->block % pDc->dc_hashSize) ); if( pDesc->block == block ) return (pDesc); else pDesc = pDesc->hashNext ; } /* hash miss, block not in cache */ pDc->dc_hashHits -- ; pDc->dc_hashMisses ++ ; return NULL; } /* Now search the LRU list - linear search */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -