⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cachefilewithcache.java

📁 这是一个基于java编写的torrent的P2P源码
💻 JAVA
📖 第 1 页 / 共 3 页
字号:
						
							break;
							
						}catch( CacheFileManagerException e ){
							
							if ( i == 1 ){
								
								throw( e );
							}
							
						}catch( FMFileManagerException e ){
							
							if ( i == 1 ){
								
								manager.rethrow(this,e);
							}
						}
					}				
				}
			}else{
				
				try{			
					getFMFile().read( file_buffer, file_position );
					
					manager.fileBytesRead( read_length );
		
				}catch( FMFileManagerException e ){
						
					manager.rethrow(this,e);
				}
			}
		}finally{
			
			if ( AEDiagnostics.CHECK_DUMMY_FILE_DATA ){
				
				long	temp_position = file_position + file_offset_in_torrent;
			
				file_buffer.position( SS_CACHE, file_buffer_position );
				
				while( file_buffer.hasRemaining( SS_CACHE )){
						
					byte	v = file_buffer.get( SS_CACHE );
						
					if ((byte)temp_position != v ){
							
						System.out.println( "readCache: read is bad at " + temp_position +
											": expected = " + (byte)temp_position + ", actual = " + v );
				
						file_buffer.position( SS_CACHE, file_buffer_limit );
						
						break;
					}
						
					temp_position++;
				}
			}					
		}
	}
	
	protected void
	writeCache(
		DirectByteBuffer	file_buffer,
		long				file_position,
		boolean				buffer_handed_over )
	
		throws CacheFileManagerException
	{
		checkPendingException();
		
		boolean	buffer_cached	= false;
		boolean	failed			= false;
		
		try{
			int	file_buffer_position	= file_buffer.position(SS_CACHE);
			int file_buffer_limit		= file_buffer.limit(SS_CACHE);
			
			int	write_length = file_buffer_limit - file_buffer_position;
			
			if ( write_length == 0 ){
				
				return;	// nothing to do
			}
			
			if ( AEDiagnostics.CHECK_DUMMY_FILE_DATA ){
			
				long	temp_position = file_position + file_offset_in_torrent;
				
				while( file_buffer.hasRemaining( SS_CACHE )){
					
					byte	v = file_buffer.get( SS_CACHE );
					
					if ((byte)temp_position != v ){
						
						System.out.println( "writeCache: write is bad at " + temp_position +
											": expected = " + (byte)temp_position + ", actual = " + v );
						
						break;
					}
					
					temp_position++;
				}
				
				file_buffer.position( SS_CACHE, file_buffer_position );
			}
			
			if ( manager.isWriteCacheEnabled() ){
				
				if (TRACE)
					Logger.log(new LogEvent(torrent, LOGID, "writeCache: " + getName()
							+ ", " + file_position + " - "
							+ (file_position + write_length - 1) + ":" + file_buffer_position
							+ "/" + file_buffer_limit));
				
					// if the data is smaller than a piece and not handed over
                    // then it is most
					// likely apart of a piece at the start or end of a file. If
                    // so, copy it
					// and insert the copy into cache
							
				if ( 	( !buffer_handed_over ) &&
						write_length < piece_size ){
				
					if (TRACE)
						Logger.log(new LogEvent(torrent, LOGID,
								"    making copy of non-handedover buffer"));
					
					DirectByteBuffer	cache_buffer = DirectByteBufferPool.getBuffer( DirectByteBuffer.AL_CACHE_WRITE, write_length );
										
					cache_buffer.put( SS_CACHE, file_buffer );
					
					cache_buffer.position( SS_CACHE, 0 );
					
						// make it look like this buffer has been handed over
					
					file_buffer				= cache_buffer;
					
					file_buffer_position	= 0;
					file_buffer_limit		= write_length;
					
					buffer_handed_over	= true;
				}
				
				if ( buffer_handed_over ){
					
						// cache this write, allocate outside sync block (see manager for details)
	
					CacheEntry	entry = 
						manager.allocateCacheSpace(
								CacheEntry.CT_DATA_WRITE,
								this, 
								file_buffer, 
								file_position, 
								write_length );
					
					try{
						this_mon.enter();
						
						if ( access_mode != CF_WRITE ){
							
							throw( new CacheFileManagerException( this,"Write failed - cache file is read only" ));
						}
						
							// if we are overwriting stuff already in the cache then force-write overlapped
							// data (easiest solution as this should only occur on hash-fails)

							// do the flush and add sychronized to avoid possibility of another
							// thread getting in-between and adding same block thus causing mutiple entries
							// for same space
						
						flushCache( file_position, write_length, true, -1, 0, -1 );
						
						cache.add( entry );
					
						manager.addCacheSpace( entry );
						
					}finally{
						
						this_mon.exit();
					}
																
					manager.cacheBytesWritten( write_length );
					
					buffer_cached	= true;
					
				}else{

						// not handed over, invalidate any cache that exists for the area
						// as it is now out of date
					
					try{
						
						this_mon.enter();
						
						flushCache( file_position, write_length, true, -1, 0, -1 );

						getFMFile().write( file_buffer, file_position );
						
					}finally{
						
						this_mon.exit();
					}
					
					manager.fileBytesWritten( write_length );
				}
			}else{
				
				getFMFile().write( file_buffer, file_position );
				
				manager.fileBytesWritten( write_length );
			}
			
		}catch( CacheFileManagerException e ){
			
			failed	= true;
			
			throw( e );
			
		}catch( FMFileManagerException e ){
			
			failed	= true;
			
			manager.rethrow(this,e);
			
		}finally{
			
			if ( buffer_handed_over ){
				
				if ( !(failed || buffer_cached )){
					
					file_buffer.returnToPool();
				}
			}
		}
	}
	
	protected void
	flushCache(
		long				file_position,
		long				length,					// -1 -> do all from position onwards
		boolean				release_entries,
		long				minimum_to_release,		// -1 -> all
		long				oldest_dirty_time, 		// dirty entries newer than this won't be flushed
													// 0 -> now
		long				min_chunk_size )		// minimum contiguous size for flushing, -1 -> no limit
	
		throws CacheFileManagerException
	{
		try{
			flushCacheSupport( file_position, length, release_entries, minimum_to_release, oldest_dirty_time, min_chunk_size );
			
		}catch( CacheFileManagerException	e ){
			
			if ( !release_entries ){
			
					// make sure we release the offending buffer entries otherwise they'll hang around
					// in memory causing grief when the next attempt it made to flush them...
				
				flushCacheSupport( 0, -1, true, -1, 0, -1 );
			}
			
			throw( e );
		}
	}
	
	protected void
	flushCacheSupport(
		long				file_position,
		long				length,					// -1 -> do all from position onwards
		boolean				release_entries,
		long				minimum_to_release,		// -1 -> all
		long				oldest_dirty_time, 		// dirty entries newer than this won't be flushed
													// 0 -> now
		long				min_chunk_size )		// minimum contiguous size for flushing, -1 -> no limit
	
		throws CacheFileManagerException
	{
		try{
			this_mon.enter();	

			if ( cache.size() == 0 ){
				
				return;
			}
			
			Iterator	it = cache.iterator();
			
			Throwable	last_failure = null;
			
			long	entry_total_released = 0;
			
			List	multi_block_entries		= new ArrayList();
			long	multi_block_start		= -1;
			long	multi_block_next		= -1;
			
			while( it.hasNext()){
			
				CacheEntry	entry = (CacheEntry)it.next();
				
				long	entry_file_position 	= entry.getFilePosition();
				int		entry_length			= entry.getLength();
			
				if ( entry_file_position + entry_length <= file_position ){
					
						// to the left
				
					continue;
					
				}else if ( length != -1 && file_position + length <= entry_file_position ){
					
						// to the right, give up
					
					break;
				}
				
					// overlap!!!!
					// we're going to deal with this entry one way or another. In particular if
					// we are releasing entries then this is guaranteed to be released, either directly
					// or via a flush if dirty
				
				boolean	dirty = entry.isDirty();

				try{
						
					if ( 	dirty && 
							(	oldest_dirty_time == 0 ||
								entry.getLastUsed() < oldest_dirty_time )){
																	
						if ( multi_block_start == -1 ){
							
								// start of day
							
							multi_block_start	= entry_file_position;
							
							multi_block_next	= entry_file_position + entry_length;
							
							multi_block_entries.add( entry );
							
						}else if ( multi_block_next == entry_file_position ){
							
								// continuation, add in
							
							multi_block_next = entry_file_position + entry_length;
					
							multi_block_entries.add( entry );
							
						}else{
							
								// we've got a gap - flush current and start another series
							
								// set up ready for next block in case the flush fails - we try
								// and flush as much as possible in the face of failure
							
							boolean	skip_chunk	= false;
							
							if ( min_chunk_size != -1 ){
								
								if ( release_entries ){
								
									Debug.out( "CacheFile: can't use min chunk with release option" );
								}else{
									
									skip_chunk	= multi_block_next - multi_block_start < min_chunk_size;
								}
							}
							
							List	f_multi_block_entries	= multi_block_entries;
							long	f_multi_block_start		= multi_block_start;
							long	f_multi_block_next		= multi_block_next;
							
							multi_block_start	= entry_file_position;
							
							multi_block_next	= entry_file_position + entry_length;
							
							multi_block_entries	= new ArrayList();
							
							multi_block_entries.add( entry );
							
							if ( skip_chunk ){
								if (TRACE)
									Logger.log(new LogEvent(torrent, LOGID,
											"flushCache: skipping " + multi_block_entries.size()
													+ " entries, [" + multi_block_start + ","
													+ multi_block_next + "] as too small"));			
							}else{
								
								multiBlockFlush(
										f_multi_block_entries,
										f_multi_block_start,
										f_multi_block_next,
										release_entries );
							}
						}
					}
				}catch( Throwable e ){
										
					last_failure	= e;
					
				}finally{
					
					if ( release_entries ){
					
						it.remove();
						
							// if it is dirty it will be released when the flush is done
						
						if ( !dirty ){
	
							manager.releaseCacheSpace( entry );
						}
						
						entry_total_released += entry.getLength();

						if ( minimum_to_release != -1 && entry_total_released > minimum_to_release ){
							
								// if this entry needs flushing this is done outside the loop
							
							break;
						}
					}
				}
			}
			
			if ( multi_block_start != -1 ){
				
				boolean	skip_chunk	= false;
				
				if ( min_chunk_size != -1 ){
					
					if ( release_entries ){
					
						Debug.out( "CacheFile: can't use min chunk with release option" );
					}else{
						
						skip_chunk	= multi_block_next - multi_block_start < min_chunk_size;
					}
				}

				if ( skip_chunk ){
					
					if (TRACE)
						Logger
								.log(new LogEvent(torrent, LOGID, "flushCache: skipping "
										+ multi_block_entries.size() + " entries, ["
										+ multi_block_start + "," + multi_block_next
										+ "] as too small"));			
					
				}else{
					
					multiBlockFlush(
							multi_block_entries,
							multi_block_start,
							multi_block_next,
							release_entries );
				}
			}
			
			if ( last_failure != null ){
				
				if ( last_failure instanceof CacheFileManagerException ){
					
					throw((CacheFileManagerException)last_failure );
				}
				
				throw( new CacheFileManagerException( this,"cache flush failed", last_failure ));
			}
		}finally{
			
			this_mon.exit();
		}
	}
	
	protected void
	multiBlockFlush(
		List		multi_block_entries,
		long		multi_block_start,
		long		multi_block_next,
		boolean		release_entries )
	
		throws CacheFileManagerException
	{
		boolean	write_ok	= false;
		
		try{
			if (TRACE)
				Logger.log(new LogEvent(torrent, LOGID, "multiBlockFlush: writing "
						+ multi_block_entries.size() + " entries, [" + multi_block_start
						+ "," + multi_block_next + "," + release_entries + "]"));			
			
			DirectByteBuffer[]	buffers = new DirectByteBuffer[ multi_block_entries.size()];
			
			long	expected_per_entry_write = 0;
			
			for (int i=0;i<buffers.length;i++){
				
				CacheEntry	entry = (CacheEntry)multi_block_entries.get(i);
				
					// sanitity check - we should always be flushing entire entries
			
				DirectByteBuffer	buffer = entry.getBuffer();
				
				if ( buffer.limit(SS_CACHE) - buffer.position(SS_CACHE) != entry.getLength()){
					
					throw( new CacheFileManagerException( this,"flush: inconsistent entry length, position wrong" ));
				}
				
				expected_per_entry_write	+= entry.getLength();
				
				buffers[i] = buffer;
			}
			
			long	expected_overall_write	= multi_block_next - multi_block_start;

			if ( expected_per_entry_write != expected_overall_write ){
		
				throw( new CacheFileManagerException( this,"flush: inconsistent write length, entrys = " + expected_per_entry_write + " overall = " + expected_overall_write ));
				
			}
			
			getFMFile().write( buffers, multi_block_start );
									
			manager.fileBytesWritten( expected_overall_write );
			
			write_ok	= true;
			
		}catch( FMFileManagerException e ){
			
			throw( new CacheFileManagerException( this,"flush fails", e ));
			
		}finally{			
			
			for (int i=0;i<multi_block_entries.size();i++){
				
				CacheEntry	entry = (CacheEntry)multi_block_entries.get(i);
				
				if ( release_entries ){

					manager.releaseCacheSpace( entry );
					
				}else{
					
					entry.resetBufferPosition();
			
					if ( write_ok ){
						
						entry.setClean();
					}
				}
			}
		}
	}
	
	protected void
	flushCache(
		long				file_start_position,
		boolean				release_entries,
		long				minumum_to_release )

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -