⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dcachecbio.c

📁 vxworks源码源码解读是学习vxworks的最佳途径
💻 C
📖 第 1 页 / 共 5 页
字号:
    for( ; num_blocks > 0; num_blocks-- )	/* For each block req'ed */	{	if( dev->pDc->dc_dirtyCount > dev->pDc->dc_dirtyMax )	    if( dcacheManyFlushInval( pDev, 0, NONE, TRUE, FALSE,				    & pDev->pDc->dc_writesForeground) == ERROR )		goto _error ;	/* get a source  block , with data in it */	pDescSrc = dcacheBlockGet(pDev, src_block, NULL, TRUE);	if( pDescSrc == NULL )	    goto _error ;	/* Debug: make sure the block still has got the data */	assert(  (pDescSrc->state == CB_STATE_CLEAN) ||		 (pDescSrc->state == CB_STATE_DIRTY) );	/* get a destination  block , with or without data */	pDescDst = dcacheBlockGet(pDev, dst_block, NULL, FALSE);	if( pDescDst == NULL )	    goto _error ;	/* Debug: make sure the block still has got the data */	assert(  (pDescDst->state == CB_STATE_CLEAN) ||		 (pDescDst->state == CB_STATE_DIRTY) );	/* move data between the cached blocks */	saveState = pDescDst->state ;	pDescDst->state = CB_STATE_UNSTABLE ;	bcopy( pDescSrc->data, pDescDst->data, pDev->params.cbio_bytesPerBlk );	if( saveState != CB_STATE_DIRTY )	    dev->pDc->dc_dirtyCount ++ ;	pDescDst->state = CB_STATE_DIRTY ;	src_block ++ ;	/* prepare for next */	dst_block ++ ;		}	/* end of: For each block req'ed */    semGive(&pDev->cbio_mutex);    return OK;_error:    semGive(&pDev->cbio_mutex);    return ERROR;    }/********************************************************************************* dcacheIoctl - Misc control operations ** This performs the requested ioctl() operation.* * CBIO modules can expect the following ioctl() codes from cbioLib.h:* CBIO_RESET - reset the CBIO device and the lower layer* CBIO_STATUS_CHK - check device status of CBIO device and lower layer* CBIO_DEVICE_LOCK - Prevent disk removal * CBIO_DEVICE_UNLOCK - Allow disk removal* CBIO_DEVICE_EJECT - Unmount and eject device* CBIO_CACHE_FLUSH - Flush any dirty cached data* CBIO_CACHE_INVAL - Flush & Invalidate all cached data* CBIO_CACHE_NEWBLK - Allocate scratch block** dev - the CBIO handle of the device being accessed (from creation routine)* * command - ioctl() command being issued* * arg - specific to the particular ioctl() function requested or un-used.** RETURNS OK or ERROR and may otherwise set errno.*/LOCAL STATUS dcacheIoctl    (    CBIO_DEV_ID	dev,    int		command,    addr_t	arg    )    {    STATUS stat = OK ;    FAST struct dcache_ctrl * pDc = dev->pDc ;    DCACHE_DESC *pDesc = NULL ;    if( OBJ_VERIFY( dev, cbioClassId ) != OK)	{	DEBUG_MSG("dcacheIoctl: invalid handle\n",0,0,0,0,0,0);	errno = S_objLib_OBJ_ID_ERROR;	return ERROR;	}    if( (u_long) TRUE == dev->cbio_readyChanged  &&         ( command != (int) CBIO_RESET ))	{	errno = S_ioLib_DISK_NOT_PRESENT ;	return ERROR;	}    if( semTake( &dev->cbio_mutex, WAIT_FOREVER) == ERROR )	return ERROR;    switch ( command )	{	case CBIO_RESET :	    /* reset subordinate device, pass along 3rd argument */	    pDc->dc_subDev->pFuncs->cbio_ioctl(		pDc->dc_subDev, command, arg);	    /* if device present, subordinate has its readyChanged FALSE */	    dev->cbio_readyChanged = pDc->dc_subDev->cbio_readyChanged ;	    /* since the disk geometry may have changed, we must re-init */	    if( FALSE == dev->cbio_readyChanged )		{		stat = dacacheDevInit(dev);		/* read new signature */		(void) dcacheChangeDetect( dev, TRUE );		dev->cbio_readyChanged = FALSE ;		}	    if( stat == ERROR && errno == OK )		errno = S_ioLib_DISK_NOT_PRESENT ;	    break;	case CBIO_CACHE_FLUSH :		{		if( (arg < (addr_t)0 ) || (pDc->dc_syncInterval == 0) ||		    (dcacheUpdTaskId == 0))		    {		    /* if forced, or if no updater task, FLUSH inline */		    stat = dcacheManyFlushInval( dev, 0, NONE, TRUE, FALSE,				& pDc->dc_writesForced );		    }		else if (arg == 0)		    {		    int pri;		    /* default action for the most common situation */		    pDc->dc_updTick = tickGet() ;		    /* wakeup the updater task */		    taskUndelay( dcacheUpdTaskId );		    /* loan him our current priority */		    taskPriorityGet( 0, &pri  );		    taskPrioritySet( dcacheUpdTaskId, pri );		    }		else		    {		    /* lazy flush request */		    pDc->dc_updTick = (int) arg * sysClkRateGet() +		    			tickGet();		    }		}		break;	case CBIO_CACHE_INVAL :		stat = dcacheManyFlushInval( dev, 0, NONE, TRUE, TRUE,				& pDc->dc_writesForced );		break;	case CBIO_STATUS_CHK :	    /* avoid checking too frequently */	    if ( pDc->dc_actTick <			tickGet() - sysClkRateGet() * DCACHE_IDLE_SECS )		{		stat = pDc->dc_subDev->pFuncs->cbio_ioctl(			pDc->dc_subDev, CBIO_STATUS_CHK, 0);		dev->cbio_readyChanged = pDc->dc_subDev->cbio_readyChanged ;		/* disk seems ready, but is it the same one ? */		if( stat != ERROR )		    {		    stat = dcacheChangeDetect( dev, TRUE );		    pDc->dc_actTick = tickGet() ;		    }		}	    break;	case CBIO_CACHE_NEWBLK :	/* Allocate scratch block */	    arg += dev->params.cbio_offset ;	    /* get that block */	    pDesc = dcacheBlockGet(dev, (block_t) arg, NULL, FALSE);	    /* zero-fill the block so that unwritten data reads 0s */	    bzero( pDesc->data, dev->params.cbio_bytesPerBlk );	    break ;	case CBIO_DEVICE_LOCK :		/* these belong to low-level */	case CBIO_DEVICE_UNLOCK :	case CBIO_DEVICE_EJECT :	default:	    stat = pDc->dc_subDev->pFuncs->cbio_ioctl(			pDc->dc_subDev, command, arg);	}    semGive(&dev->cbio_mutex);    return (stat);    }/********************************************************************************* shiftCalc - calculate how many shift bits** How many shifts <n> are needed such that <mask> == 1 << <N>* This is very useful for replacing multiplication with shifts,* where it is known a priori that the multiplier is 2^k.** RETURNS: Number of shifts.*/LOCAL int shiftCalc    (    u_long mask    )    {    FAST i;    for (i=0; i<32; i++)	{	if (mask & 1)	    break ;	mask = mask >> 1 ;	}    return( i );    }/********************************************************************************* dcacheTunableVerify - verify the tunable parameters for sanity**/LOCAL void dcacheTunableVerify( CBIO_DEV_ID dev )    {    FAST struct dcache_ctrl * pDc = dev->pDc ;    /* read-ahead - shouldn't be less then 2 */    pDc->dc_readAhead = max( 2, pDc->dc_readAhead );    /* read-ahead - shouldn't be more then BigBuf size */    pDc->dc_readAhead = min( pDc->dc_BigBufSize, 	pDc->dc_readAhead );    /* dirty max - should be at least as large as BugBuf*2 to be worth */    pDc->dc_dirtyMax = max(pDc->dc_BigBufSize*2,    	pDc->dc_dirtyMax);    /* dirty max - must not be more then say 80% of total cache */    pDc->dc_dirtyMax = min((pDc->dc_numCacheBlocks*4)/5,    	pDc->dc_dirtyMax);    /* bypass count could be anything, but no less then two */    pDc->dc_bypassCount = max( 2, pDc->dc_bypassCount );    /* dont allow long sync period for removable devices */    if( dev->params.cbio_removable )	pDc->dc_syncInterval = min( 1, pDc->dc_syncInterval );    /* cylinder size is used to rationalize hidden write attempts */    pDc->dc_cylinderSize = max( (u_long) pDc->dc_dirtyMax, 	(u_long) dev->params.cbio_blksPerTrack * dev->params.cbio_nHeads );    }/********************************************************************************* dcacheMemInit - initialize disk cache memory structure***/LOCAL STATUS dcacheMemInit( CBIO_DEV_ID dev )    {    FAST struct dcache_ctrl * pDc = dev->pDc ;    DCACHE_DESC *pDesc ;    caddr_t	pData ;    int		size ;    block_t	nBlk ;    u_long	n ;    int		sizeBBlk = 0;    /* Init LRU list to be empty */    dllInit( & pDc->dc_LRU );    /* calculate how many blocks we can afford to keep in our cache */    size = dev->cbio_memSize ;    sizeBBlk = size / dev->params.cbio_bytesPerBlk / 4 ;    if( sizeBBlk < 2 )	sizeBBlk = 0; /* FIXME - avoid using it if it isn't there */    /* too big burst is counter-productive, so limit it to typically 63K */    sizeBBlk = min(sizeBBlk, 127);    size -= sizeBBlk * dev->params.cbio_bytesPerBlk ;    nBlk = size / ( dev->params.cbio_bytesPerBlk + sizeof(DCACHE_DESC) );    pDc->dc_numCacheBlocks		= nBlk ;    /* Lookup an appropriate entry in the presets table */    for(n = 0; n < (NELEMENTS( dcacheTunablePresets )-1); n++ )	{	if(nBlk <= dcacheTunablePresets[n].dc_numCacheBlocks )	    break;	}    /* having found the right entry in the presets table, use it */    pDc->dc_dirtyMax	= dcacheTunablePresets[n].dc_dirtyMax;    pDc->dc_bypassCount	= dcacheTunablePresets[n].dc_bypassCount;    pDc->dc_readAhead	= dcacheTunablePresets[n].dc_readAhead;    pDc->dc_hashSize	= dcacheTunablePresets[n].dc_hashSize;    pDc->dc_syncInterval= dcacheTunablePresets[n].dc_syncInterval;    /* re-calculate the nBlk now accounting for hash table size too */    size -= pDc->dc_hashSize * sizeof(caddr_t) ;    nBlk = size / ( dev->params.cbio_bytesPerBlk + sizeof(DCACHE_DESC) );    if( pDc->dc_numCacheBlocks != nBlk )	{#ifdef	DEBUG	INFO_MSG("dcacheMemInit: # of blocks reduced, %d to %d due to hash\n",		pDc->dc_numCacheBlocks, nBlk, 0,0,0,0);#endif	/*DEBUG*/	}    pDc->dc_numCacheBlocks		= nBlk ;    /* set address for first descriptor and data block */    pDesc = (void *) dev->cbio_memBase ;    pData = dev->cbio_memBase + (nBlk * sizeof(DCACHE_DESC));    DEBUG_MSG("dcacheMemInit: cache size %d bytes, curved %d blocks, at %#x\n",	dev->cbio_memSize, nBlk, (int) dev->cbio_memBase, 0,0,0 );    for( n = nBlk ; n > 0; n -- )	{	/* init fields */	pDesc->block = NONE;	pDesc->state = CB_STATE_EMPTY ;	pDesc->data  = pData ;	pDesc->busy = 0;	pDesc->hashNext = NULL;	/* add node to list */	dllAdd( & pDc->dc_LRU, &pDesc->lruList );	/* advance pointers */	pDesc ++ ;	pData += dev->params.cbio_bytesPerBlk ;	}    /* record size and location of burst block */    pDc->dc_BigBufSize = sizeBBlk ;    if( sizeBBlk > 0)	pDc->dc_BigBufPtr = pData ;    else	pDc->dc_BigBufPtr = NULL ;    pData += sizeBBlk * dev->params.cbio_bytesPerBlk ;		/* forw mark */    /* reset of memory is used for hash table */    pDc->dc_hashBase = (DCACHE_DESC **) pData ;    pData += pDc->dc_hashSize * sizeof(caddr_t) ;    bzero( (caddr_t) pDc->dc_hashBase,		pDc->dc_hashSize * sizeof(caddr_t) );    dcacheTunableVerify( dev );    DEBUG_MSG("dcacheMemInit: pDesc %x, pData %x, sizeBBlk %d nBlk %d\n",	(int) pDesc, (int) pData, sizeBBlk, nBlk, 0, 0);    /* self-test memory allocations */    assert( (int) pDesc <=	(int) (dev->cbio_memBase + (nBlk * sizeof(DCACHE_DESC)))) ;    assert( pData <= (dev->cbio_memBase + dev->cbio_memSize));    return OK;    }/********************************************************************************* dcacheDevTune - modify tunable disk cache parameters** This function allows the user to tune some disk cache parameters* to obtain better performance for a given application or workload pattern.* These parameters are checked for sanity before being used, hence it is* recommended to verify the actual parameters being set with dcacheShow().** Following is the description of each tunable parameter:** .IP <bypassCount>* In order to achieve maximum performance, Disk Cache is bypassed for very* large requests. This parameter sets the threshold number of blocks for* bypassing the cache, resulting usually in the data being transferred by* the low level driver directly to/from application data buffers (also* known as cut-through DMA). Passing the value of 0 in this argument* preserves the previous value of the associated parameter.** .IP <syncInterval>* The Disk Cache provides a low priority task that will update all* modified blocks onto the disk periodically. This parameters controls the* time between these updates in seconds. The longer this period, the* better throughput is likely to be achieved, while risking to loose more* data in the event of a failure. For removable devices this interval is* fixed at 1 second. Setting this parameter to 0 results in immediate* writes to disk when requested, resulting in minimal data loss risk at* the cost of somewhat degraded performance.** .IP <readAhead>* In order to avoid accessing the disk in small units, the Disk Cache* will read many contiguous blocks once a block which is absent from the* cache is needed. Increasing this value increases read performance, but a* value which is too large may cause blocks which are frequently used to* be removed from the cache, resulting in a low Hit Ratio, and increasing* the number of Seeks, slowing down performance dramatically. Passing the* value of 0 in this argument preserves the pervious value of the* associated parameter.** .IP <dirtyMax>* Routinely the Disk Cache will keep modified blocks in memory until it is* specifically instructed to update these blocks to the disk, or until the* specified time interval between disk updates has elapsed, or until the* number of modified blocks is large enough to justify an update. Because* the disk is updated in an ordered manner, and the blocks are written in* groups when adjacent blocks have been modified, a larger dirtyMax* parameter will minimize the number of Seek operation, but a value which* is too large may decrease the Hit Ratio, thus degrading performance.* Passing the value of 0 in this argument preserves the pervious value of* the associated parameter.* * RETURNS: OK or  ERROR if device handle is invalid.* Parameter value which is out of range will be silently corrected.** SEE ALSO: dcacheShow()*/STATUS dcacheDevTune    (    CBIO_DEV_ID dev,		/* device handle */    int		dirtyMax,	/* max # of dirty cache blocks allowed */    int		bypassCount,	/* request size for bypassing cache */    int		readAhead,	/* how many blocks to read ahead */    int		syncInterval	/* how many seconds between disk updates */    )    {    

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -