📄 cachefilewithcache.java
字号:
boolean buffer_cached = false;
try{
// must allocate space OUTSIDE sync block (see manager for details)
CacheEntry entry =
manager.allocateCacheSpace(
CacheEntry.CT_READ_AHEAD,
this,
cache_buffer, file_position, actual_read_ahead );
entry.setClean();
try{
this_mon.enter();
// flush before read so that any bits in cache get re-read correctly on read
flushCache( file_position, actual_read_ahead, true, -1, 0, -1 );
getFMFile().read( cache_buffer, file_position );
read_ahead_bytes_made += actual_read_ahead;
manager.fileBytesRead( actual_read_ahead );
cache_buffer.position( DirectByteBuffer.SS_CACHE, 0 );
cache.add( entry );
manager.addCacheSpace( entry );
}finally{
this_mon.exit();
}
buffer_cached = true;
}finally{
if ( !buffer_cached ){
// if the read operation failed, and hence the buffer
// wasn't added to the cache, then release it here
cache_buffer.returnToPool();
}
}
// recursively read from the cache, should hit the data we just read although
// there is the possibility that it could be flushed before then - hence the
// recursion flag that will avoid this happening next time around
readCache( file_buffer, file_position, true );
}else{
if ( TRACE ){
LGLogger.log( "\tnot performing read-ahead" );
}
try{
this_mon.enter();
flushCache( file_position, read_length, true, -1, 0, -1 );
getFMFile().read( file_buffer, file_position );
}finally{
this_mon.exit();
}
manager.fileBytesRead( read_length );
}
break;
}catch( CacheFileManagerException e ){
if ( i == 1 ){
throw( e );
}
}catch( FMFileManagerException e ){
if ( i == 1 ){
manager.rethrow(e);
}
}
}
}
}else{
try{
getFMFile().read( file_buffer, file_position );
manager.fileBytesRead( read_length );
}catch( FMFileManagerException e ){
manager.rethrow(e);
}
}
}finally{
if ( AEDiagnostics.CHECK_DUMMY_FILE_DATA ){
long temp_position = file_position + file_offset_in_torrent;
file_buffer.position( DirectByteBuffer.SS_CACHE, file_buffer_position );
while( file_buffer.hasRemaining( DirectByteBuffer.SS_CACHE )){
byte v = file_buffer.get( DirectByteBuffer.SS_CACHE );
if ((byte)temp_position != v ){
System.out.println( "readCache: read is bad at " + temp_position +
": expected = " + (byte)temp_position + ", actual = " + v );
file_buffer.position( DirectByteBuffer.SS_CACHE, file_buffer_limit );
break;
}
temp_position++;
}
}
}
}
protected void
writeCache(
DirectByteBuffer file_buffer,
long file_position,
boolean buffer_handed_over )
throws CacheFileManagerException
{
boolean buffer_cached = false;
boolean failed = false;
try{
int file_buffer_position = file_buffer.position(DirectByteBuffer.SS_CACHE);
int file_buffer_limit = file_buffer.limit(DirectByteBuffer.SS_CACHE);
int write_length = file_buffer_limit - file_buffer_position;
if ( write_length == 0 ){
return; // nothing to do
}
if ( AEDiagnostics.CHECK_DUMMY_FILE_DATA ){
long temp_position = file_position + file_offset_in_torrent;
while( file_buffer.hasRemaining( DirectByteBuffer.SS_CACHE )){
byte v = file_buffer.get( DirectByteBuffer.SS_CACHE );
if ((byte)temp_position != v ){
System.out.println( "writeCache: write is bad at " + temp_position +
": expected = " + (byte)temp_position + ", actual = " + v );
break;
}
temp_position++;
}
file_buffer.position( DirectByteBuffer.SS_CACHE, file_buffer_position );
}
if ( manager.isWriteCacheEnabled() ){
if ( TRACE ){
LGLogger.log(
"writeCache: " + getName() + ", " + file_position + " - " + (file_position + write_length - 1 ) +
":" + file_buffer_position + "/" + file_buffer_limit );
}
// if the data is smaller than a piece and not handed over then it is most
// likely apart of a piece at the start or end of a file. If so, copy it
// and insert the copy into cache
if ( ( !buffer_handed_over ) &&
write_length < piece_size ){
if ( TRACE ){
LGLogger.log( " making copy of non-handedover buffer" );
}
DirectByteBuffer cache_buffer = DirectByteBufferPool.getBuffer( DirectByteBuffer.AL_CACHE_WRITE, write_length );
cache_buffer.put( DirectByteBuffer.SS_CACHE, file_buffer );
cache_buffer.position( DirectByteBuffer.SS_CACHE, 0 );
// make it look like this buffer has been handed over
file_buffer = cache_buffer;
file_buffer_position = 0;
file_buffer_limit = write_length;
buffer_handed_over = true;
}
if ( buffer_handed_over ){
// cache this write, allocate outside sync block (see manager for details)
CacheEntry entry =
manager.allocateCacheSpace(
CacheEntry.CT_DATA_WRITE,
this,
file_buffer,
file_position,
write_length );
try{
this_mon.enter();
// if we are overwriting stuff already in the cache then force-write overlapped
// data (easiest solution as this should only occur on hash-fails)
// do the flush and add sychronized to avoid possibility of another
// thread getting in-between and adding same block thus causing mutiple entries
// for same space
flushCache( file_position, write_length, true, -1, 0, -1 );
cache.add( entry );
manager.addCacheSpace( entry );
}finally{
this_mon.exit();
}
manager.cacheBytesWritten( write_length );
buffer_cached = true;
}else{
// not handed over, invalidate any cache that exists for the area
// as it is now out of date
try{
this_mon.enter();
flushCache( file_position, write_length, true, -1, 0, -1 );
getFMFile().write( file_buffer, file_position );
}finally{
this_mon.exit();
}
manager.fileBytesWritten( write_length );
}
}else{
getFMFile().write( file_buffer, file_position );
manager.fileBytesWritten( write_length );
}
}catch( CacheFileManagerException e ){
failed = true;
throw( e );
}catch( FMFileManagerException e ){
failed = true;
manager.rethrow(e);
}finally{
if ( buffer_handed_over ){
if ( !(failed || buffer_cached )){
file_buffer.returnToPool();
}
}
}
}
protected void
flushCache(
long file_position,
long length, // -1 -> do all from position onwards
boolean release_entries,
long minimum_to_release, // -1 -> all
long oldest_dirty_time, // dirty entries newer than this won't be flushed
// 0 -> now
long min_chunk_size ) // minimum contiguous size for flushing, -1 -> no limit
throws CacheFileManagerException
{
try{
this_mon.enter();
if ( cache.size() == 0 ){
return;
}
Iterator it = cache.iterator();
Throwable last_failure = null;
long entry_total_released = 0;
List multi_block_entries = new ArrayList();
long multi_block_start = -1;
long multi_block_next = -1;
while( it.hasNext()){
CacheEntry entry = (CacheEntry)it.next();
long entry_file_position = entry.getFilePosition();
int entry_length = entry.getLength();
if ( entry_file_position + entry_length <= file_position ){
// to the left
continue;
}else if ( length != -1 && file_position + length <= entry_file_position ){
// to the right, give up
break;
}
// overlap!!!!
// we're going to deal with this entry one way or another. In particular if
// we are releasing entries then this is guaranteed to be released, either directly
// or via a flush if dirty
boolean dirty = entry.isDirty();
try{
if ( dirty &&
( oldest_dirty_time == 0 ||
entry.getLastUsed() < oldest_dirty_time )){
if ( multi_block_start == -1 ){
// start of day
multi_block_start = entry_file_position;
multi_block_next = entry_file_position + entry_length;
multi_block_entries.add( entry );
}else if ( multi_block_next == entry_file_position ){
// continuation, add in
multi_block_next = entry_file_position + entry_length;
multi_block_entries.add( entry );
}else{
// we've got a gap - flush current and start another series
// set up ready for next block in case the flush fails - we try
// and flush as much as possible in the face of failure
boolean skip_chunk = false;
if ( min_chunk_size != -1 ){
if ( release_entries ){
Debug.out( "CacheFile: can't use min chunk with release option" );
}else{
skip_chunk = multi_block_next - multi_block_start < min_chunk_size;
}
}
List f_multi_block_entries = multi_block_entries;
long f_multi_block_start = multi_block_start;
long f_multi_block_next = multi_block_next;
multi_block_start = entry_file_position;
multi_block_next = entry_file_position + entry_length;
multi_block_entries = new ArrayList();
multi_block_entries.add( entry );
if ( skip_chunk ){
if ( TRACE ){
LGLogger.log( "flushCache: skipping " + multi_block_entries.size() + " entries, [" + multi_block_start + "," + multi_block_next + "] as too small" );
}
}else{
multiBlockFlush(
f_multi_block_entries,
f_multi_block_start,
f_multi_block_next,
release_entries );
}
}
}
}catch( Throwable e ){
Debug.out( "cacheFlush fails: " + e.getMessage());
last_failure = e;
}finally{
if ( release_entries ){
it.remove();
// if it is dirty it will be released when the flush is done
if ( !dirty ){
manager.releaseCacheSpace( entry );
}
entry_total_released += entry.getLength();
if ( minimum_to_release != -1 && entry_total_released > minimum_to_release ){
// if this entry needs flushing this is done outside the loop
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -