📄 cachefilewithcache.java
字号:
/*
* Created on 03-Aug-2004
* Created by Paul Gardner
* Copyright (C) 2004, 2005, 2006 Aelitis, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* AELITIS, SAS au capital de 46,603.30 euros
* 8 Allee Lenotre, La Grille Royale, 78600 Le Mesnil le Roi, France.
*
*/
package com.aelitis.azureus.core.diskmanager.cache.impl;
/**
* @author parg
*
*/
import java.io.File;
import java.util.*;
import org.gudy.azureus2.core3.util.*;
import org.gudy.azureus2.core3.torrent.*;
import org.gudy.azureus2.core3.config.COConfigurationManager;
import org.gudy.azureus2.core3.logging.*;
import com.aelitis.azureus.core.diskmanager.cache.*;
import com.aelitis.azureus.core.diskmanager.file.*;
public class
CacheFileWithCache
implements CacheFile
{
// Make code prettier by bringing over SS_CACHE from DirectByteBuffer
private static final byte SS_CACHE = DirectByteBuffer.SS_CACHE;
private static final LogIDs LOGID = LogIDs.CACHE;
protected static Comparator comparator = new
Comparator()
{
public int
compare(
Object _o1,
Object _o2)
{
// entries in the cache should never overlap
CacheEntry o1 = (CacheEntry)_o1;
CacheEntry o2 = (CacheEntry)_o2;
long offset1 = o1.getFilePosition();
int length1 = o1.getLength();
long offset2 = o2.getFilePosition();
int length2 = o2.getLength();
if ( offset1 + length1 <= offset2 ||
offset2 + length2 <= offset1 ){
}else{
Debug.out( "Overlapping cache entries - " + o1.getString() + "/" + o2.getString());
}
return( offset1 - offset2 < 0?-1:1 );
}
};
protected static boolean TRACE = false;
protected final static boolean TRACE_CACHE_CONTENTS = false;
static{
TRACE = COConfigurationManager.getBooleanParameter( "diskmanager.perf.cache.trace" );
if ( TRACE ){
System.out.println( "**** Disk Cache tracing enabled ****" );
}
}
protected final static int READAHEAD_LOW_LIMIT = 64*1024;
protected final static int READAHEAD_HIGH_LIMIT = 256*1024;
protected final static int READAHEAD_HISTORY = 32;
protected CacheFileManagerImpl manager;
protected FMFile file;
protected int access_mode = CF_READ;
protected TOTorrentFile torrent_file;
protected TOTorrent torrent;
protected long file_offset_in_torrent;
protected long[] read_history = new long[ READAHEAD_HISTORY ];
protected int read_history_next = 0;
protected TreeSet cache = new TreeSet(comparator);
protected int current_read_ahead_size = 0;
protected static final int READ_AHEAD_STATS_WAIT_TICKS = 10*1000 / CacheFileManagerImpl.STATS_UPDATE_FREQUENCY;
protected int read_ahead_stats_wait = READ_AHEAD_STATS_WAIT_TICKS;
protected Average read_ahead_made_average = Average.getInstance(CacheFileManagerImpl.STATS_UPDATE_FREQUENCY, 5);
protected Average read_ahead_used_average = Average.getInstance(CacheFileManagerImpl.STATS_UPDATE_FREQUENCY, 5);
protected long read_ahead_bytes_made;
protected long last_read_ahead_bytes_made;
protected long read_ahead_bytes_used;
protected long last_read_ahead_bytes_used;
protected int piece_size = 0;
protected int piece_offset = 0;
protected AEMonitor this_mon = new AEMonitor( "CacheFile" );
protected volatile CacheFileManagerException pending_exception;
protected
CacheFileWithCache(
CacheFileManagerImpl _manager,
FMFile _file,
TOTorrentFile _torrent_file )
{
manager = _manager;
file = _file;
Arrays.fill( read_history, -1 );
if ( _torrent_file != null ){
torrent_file = _torrent_file;
torrent = torrent_file.getTorrent();
piece_size = (int)torrent.getPieceLength();
for (int i=0;i<torrent.getFiles().length;i++){
TOTorrentFile f = torrent.getFiles()[i];
if ( f == torrent_file ){
break;
}
file_offset_in_torrent += f.getLength();
}
piece_offset = piece_size - (int)( file_offset_in_torrent % piece_size );
if ( piece_offset == piece_size ){
piece_offset = 0;
}
current_read_ahead_size = Math.min( READAHEAD_LOW_LIMIT, piece_size );
}
}
public TOTorrentFile
getTorrentFile()
{
return( torrent_file );
}
protected void
updateStats()
{
long made = read_ahead_bytes_made;
long used = read_ahead_bytes_used;
long made_diff = made - last_read_ahead_bytes_made;
long used_diff = used - last_read_ahead_bytes_used;
read_ahead_made_average.addValue( made_diff );
read_ahead_used_average.addValue( used_diff );
last_read_ahead_bytes_made = made;
last_read_ahead_bytes_used = used;
// give changes made to read ahead size a chance to work through the stats
// before recalculating
if ( --read_ahead_stats_wait == 0 ){
read_ahead_stats_wait = READ_AHEAD_STATS_WAIT_TICKS;
// see if we need to adjust the read-ahead size
double made_average = read_ahead_made_average.getAverage();
double used_average = read_ahead_used_average.getAverage();
// if used average > 75% of made average then increase
double ratio = used_average*100/made_average;
if ( ratio > 0.75 ){
current_read_ahead_size += 16*1024;
// no bigger than a piece
current_read_ahead_size = Math.min( current_read_ahead_size, piece_size );
// no bigger than the fixed max size
current_read_ahead_size = Math.min( current_read_ahead_size, READAHEAD_HIGH_LIMIT );
// no bigger than a 16th of the cache, in case its really small (e.g. 1M)
current_read_ahead_size = Math.min( current_read_ahead_size, (int)(manager.getCacheSize()/16 ));
}else if ( ratio < 0.5 ){
current_read_ahead_size -= 16*1024;
// no smaller than the min
current_read_ahead_size = Math.max( current_read_ahead_size, READAHEAD_LOW_LIMIT );
}
}
// System.out.println( "read-ahead: done = " + read_ahead_bytes_made + ", used = " + read_ahead_bytes_used + ", done_av = " + read_ahead_made_average.getAverage() + ", used_av = " + read_ahead_used_average.getAverage()+ ", size = " + current_read_ahead_size );
}
protected void
readCache(
final DirectByteBuffer file_buffer,
final long file_position,
final boolean recursive,
final boolean disable_read_cache )
throws CacheFileManagerException
{
checkPendingException();
final int file_buffer_position = file_buffer.position(SS_CACHE);
final int file_buffer_limit = file_buffer.limit(SS_CACHE);
final int read_length = file_buffer_limit - file_buffer_position;
try{
if ( manager.isCacheEnabled()){
if (TRACE)
Logger.log(new LogEvent(torrent, LOGID, "readCache: " + getName()
+ ", " + file_position + " - "
+ (file_position + read_length - 1) + ":" + file_buffer_position
+ "/" + file_buffer_limit));
if ( read_length == 0 ){
return; // nothing to do
}
long writing_file_position = file_position;
int writing_left = read_length;
boolean ok = true;
int used_entries = 0;
long used_read_ahead = 0;
// if we can totally satisfy the read from the cache, then use it
// otherwise flush the cache (not so smart here to only read missing)
try{
this_mon.enter();
// record the position of the byte *following* the end of this read
read_history[read_history_next++] = file_position + read_length;
if ( read_history_next == READAHEAD_HISTORY ){
read_history_next = 0;
}
Iterator it = cache.iterator();
while( ok && writing_left > 0 && it.hasNext()){
CacheEntry entry = (CacheEntry)it.next();
long entry_file_position = entry.getFilePosition();
int entry_length = entry.getLength();
if ( entry_file_position > writing_file_position ){
// data missing at the start of the read section
ok = false;
break;
}else if ( entry_file_position + entry_length <= writing_file_position ){
// not got there yet
}else{
// copy required amount into read buffer
int skip = (int)(writing_file_position - entry_file_position);
int available = entry_length - skip;
if ( available > writing_left ){
available = writing_left;
}
DirectByteBuffer entry_buffer = entry.getBuffer();
int entry_buffer_position = entry_buffer.position(SS_CACHE);
int entry_buffer_limit = entry_buffer.limit(SS_CACHE);
try{
entry_buffer.limit( SS_CACHE, entry_buffer_position + skip + available );
entry_buffer.position( SS_CACHE, entry_buffer_position + skip );
if (TRACE)
Logger.log(new LogEvent(torrent, LOGID, "cacheRead: using "
+ entry.getString() + "["
+ entry_buffer.position(SS_CACHE) + "/"
+ entry_buffer.limit(SS_CACHE) + "]" + "to write to ["
+ file_buffer.position(SS_CACHE) + "/"
+ file_buffer.limit(SS_CACHE) + "]"));
used_entries++;
file_buffer.put( SS_CACHE, entry_buffer );
manager.cacheEntryUsed( entry );
}finally{
entry_buffer.limit( SS_CACHE, entry_buffer_limit );
entry_buffer.position( SS_CACHE, entry_buffer_position );
}
writing_file_position += available;
writing_left -= available;
if ( entry.getType() == CacheEntry.CT_READ_AHEAD ){
used_read_ahead += available;
}
}
}
}finally{
if ( ok ){
read_ahead_bytes_used += used_read_ahead;
}
this_mon.exit();
}
if ( ok && writing_left == 0 ){
// only record this as a cache read hit if we haven't just read the
// data from the file system
if ( !recursive ){
manager.cacheBytesRead( read_length );
}
if (TRACE)
Logger.log(new LogEvent(torrent, LOGID,
"cacheRead: cache use ok [entries = " + used_entries + "]"));
}else{
if (TRACE)
Logger.log(new LogEvent(torrent, LOGID,
"cacheRead: cache use fails, reverting to plain read"));
// reset in case we've done some partial reads
file_buffer.position( SS_CACHE, file_buffer_position );
// If read-ahead fails then we resort to a straight read
// Read-ahead can fail if a cache-flush fails (e.g. out of disk space
// on a file belonging to a different torrent than this.
// We don't want such a failure to break this read operation
for (int i=0;i<2;i++){
try{
boolean do_read_ahead =
i == 0 && // first time round
!recursive &&
!disable_read_cache &&
manager.isReadCacheEnabled() &&
read_length < current_read_ahead_size &&
file_position + current_read_ahead_size <= file.getLength();
if ( do_read_ahead ){
// only read ahead if this is a continuation of a prior read within history
do_read_ahead = false;
for (int j=0;j<READAHEAD_HISTORY;j++){
if ( read_history[j] == file_position ){
do_read_ahead = true;
break;
}
}
}
int actual_read_ahead = current_read_ahead_size;
if ( do_read_ahead ){
// don't read ahead over the end of a piece
int request_piece_offset = (int)((file_position - piece_offset ) % piece_size);
if ( request_piece_offset < 0 ){
request_piece_offset += piece_size;
}
//System.out.println( "request offset = " + request_piece_offset );
int data_left = piece_size - request_piece_offset;
if ( data_left < actual_read_ahead ){
actual_read_ahead = data_left;
// no point in using read-ahead logic if actual read ahead
// smaller or same as request size!
if ( actual_read_ahead <= read_length ){
do_read_ahead = false;
}
//System.out.println( " trimmed to " + data_left );
}
}
if ( do_read_ahead ){
if (TRACE)
Logger.log(new LogEvent(torrent, LOGID,
"\tperforming read-ahead"));
DirectByteBuffer cache_buffer =
DirectByteBufferPool.getBuffer( DirectByteBuffer.AL_CACHE_READ, actual_read_ahead );
boolean buffer_cached = false;
try{
// must allocate space OUTSIDE sync block (see manager for details)
CacheEntry entry =
manager.allocateCacheSpace(
CacheEntry.CT_READ_AHEAD,
this,
cache_buffer, file_position, actual_read_ahead );
entry.setClean();
try{
this_mon.enter();
// flush before read so that any bits in cache get re-read correctly on read
flushCache( file_position, actual_read_ahead, true, -1, 0, -1 );
getFMFile().read( cache_buffer, file_position );
read_ahead_bytes_made += actual_read_ahead;
manager.fileBytesRead( actual_read_ahead );
cache_buffer.position( SS_CACHE, 0 );
cache.add( entry );
manager.addCacheSpace( entry );
}finally{
this_mon.exit();
}
buffer_cached = true;
}finally{
if ( !buffer_cached ){
// if the read operation failed, and hence the buffer
// wasn't added to the cache, then release it here
cache_buffer.returnToPool();
}
}
// recursively read from the cache, should hit the data we just read although
// there is the possibility that it could be flushed before then - hence the
// recursion flag that will avoid this happening next time around
readCache( file_buffer, file_position, true, disable_read_cache );
}else{
if (TRACE)
Logger.log(new LogEvent(torrent, LOGID,
"\tnot performing read-ahead"));
try{
this_mon.enter();
flushCache( file_position, read_length, true, -1, 0, -1 );
getFMFile().read( file_buffer, file_position );
}finally{
this_mon.exit();
}
manager.fileBytesRead( read_length );
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -