⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 rdresumehandler.java

📁 一个基于JAVA的多torrent下载程序
💻 JAVA
📖 第 1 页 / 共 2 页
字号:
/*
 * Created on 31-Jul-2004
 * Created by Paul Gardner
 * Copyright (C) 2004, 2005, 2006 Aelitis, All Rights Reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 * 
 * AELITIS, SAS au capital de 46,603.30 euros
 * 8 Allee Lenotre, La Grille Royale, 78600 Le Mesnil le Roi, France.
 *
 */

package org.gudy.azureus2.core3.disk.impl.resume;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.io.File;

import org.gudy.azureus2.core3.logging.*;
import org.gudy.azureus2.core3.torrent.TOTorrent;
import org.gudy.azureus2.core3.util.*;

import org.gudy.azureus2.core3.config.*;
import org.gudy.azureus2.core3.download.*;
import org.gudy.azureus2.core3.disk.impl.*;
import org.gudy.azureus2.core3.disk.impl.access.*;
import org.gudy.azureus2.core3.disk.impl.piecemapper.DMPieceList;
import org.gudy.azureus2.core3.disk.impl.piecemapper.DMPieceMapEntry;
import org.gudy.azureus2.core3.disk.*;

import com.aelitis.azureus.core.diskmanager.cache.CacheFileManagerException;

/**
 * @author parg
 *
 */
public class 
RDResumeHandler
	implements ParameterListener
{
	private static final LogIDs LOGID = LogIDs.DISK;

	private static final byte		PIECE_NOT_DONE			= 0;
	private static final byte		PIECE_DONE				= 1;
	private static final byte		PIECE_RECHECK_REQUIRED	= 2;
		
	private DiskManagerImpl		disk_manager;
	private DMChecker			checker;
		
	private boolean				started;
	private boolean				stopped;
	private boolean				bStoppedMidCheck;
	
	protected boolean useFastResume = COConfigurationManager.getBooleanParameter("Use Resume", true);

	public 
	RDResumeHandler(
		DiskManagerImpl		_disk_manager,
		DMChecker			_writer_and_checker )
	{
		disk_manager		= _disk_manager;
		checker				= _writer_and_checker;
	}
	
	public void
	start()
	{
		if ( started ){
			
			Debug.out( "RDResumeHandler: reuse not supported" );	
		}
		
		started	= true;
		
		COConfigurationManager.addParameterListener("Use Resume", this);
	}
	
	public void
	stop()
	{	
		stopped	= true;
		
		COConfigurationManager.removeParameterListener("Use Resume", this);
	}
	
	public void 
	parameterChanged(
		String parameterName )
	{
	    useFastResume = COConfigurationManager.getBooleanParameter("Use Resume", true);
	}
	
	public void 
	checkAllPieces(
		boolean newfiles ) 
	{
		//long	start = System.currentTimeMillis();
				
		DiskManagerRecheckInstance	recheck_inst = disk_manager.getRecheckScheduler().register( disk_manager, false );

		try{			
			disk_manager.setState( DiskManager.CHECKING );
					
			
			boolean resumeEnabled = useFastResume;
			
				//disable fast resume if a new file was created
			
			if (newfiles){
				
				resumeEnabled = false;
			}
			
			boolean	resume_data_complete = false;
			
			
			final AESemaphore	pending_checks_sem 	= new AESemaphore( "RD:PendingChecks" );
			int					pending_check_num	= 0;

			DiskManagerPiece[]	pieces	= disk_manager.getPieces();

			if ( resumeEnabled ){
				
				boolean resumeValid = false;
				
				byte[] resume_pieces = null;
				
				Map partialPieces = null;
				
				Map	resume_data = getResumeData();							
				
				if ( resume_data != null ){
					
					try {
						
						resume_pieces = (byte[])resume_data.get("resume data");
						
						if ( resume_pieces != null ){
							
							if ( resume_pieces.length != pieces.length ){
							
								Debug.out( "Resume data array length mismatch: " + resume_pieces.length + "/" + pieces.length );
								
								resume_pieces	= null;
							}
						}
						
						partialPieces = (Map)resume_data.get("blocks");
						
						resumeValid = ((Long)resume_data.get("valid")).intValue() == 1;
						
							// if the torrent download is complete we don't need to invalidate the
							// resume data
						
						if ( isTorrentResumeDataComplete( disk_manager.getDownloadManager(), resume_data )){
							
							resume_data_complete	= true;
									
						}else{
							
								// set it so that if we crash the NOT_DONE pieces will be
								// rechecked
							
							resume_data.put("valid", new Long(0));
							
							saveResumeData( resume_data );
						}
						
					}catch(Exception ignore){
						
						// ignore.printStackTrace();
					}
					
				}else{
					
					// System.out.println( "resume dir not found");
				}
								
				if ( resume_pieces == null ){
					
					resumeValid	= false;
					
					resume_pieces	= new byte[pieces.length];
				}
				
					// calculate the current file sizes up front for performance reasons
				
				DiskManagerFileInfo[]	files = disk_manager.getFiles();
				
				Map	file_sizes = new HashMap();
				
				for (int i=0;i<files.length;i++){
					
					try{
						Long	len = new Long(((DiskManagerFileInfoImpl)files[i]).getCacheFile().getLength());
					
						file_sizes.put( files[i], len );
						
					}catch( CacheFileManagerException e ){
						
						Debug.printStackTrace(e);
					}
				}
				
				for (int i = 0; i < pieces.length; i++){
					
					DiskManagerPiece	dm_piece	= pieces[i];
					
					disk_manager.setPercentDone(((i + 1) * 1000) / disk_manager.getNbPieces() );
					
					byte	piece_state = resume_pieces[i];
					
						// valid resume data means that the resume array correctly represents
						// the state of pieces on disk, be they done or not
					
					if ( piece_state == PIECE_DONE ){
					
							// at least check that file sizes are OK for this piece to be valid
						
						DMPieceList list = disk_manager.getPieceList(i);
						
						for (int j=0;j<list.size();j++){
							
							DMPieceMapEntry	entry = list.get(j);
							
							Long	file_size 		= (Long)file_sizes.get(entry.getFile());
							
							if ( file_size == null ){
								
								piece_state	= PIECE_NOT_DONE;
								
								if (Logger.isEnabled())
									Logger.log(new LogEvent(disk_manager, LOGID,
											LogEvent.LT_WARNING, "Piece #" + i
													+ ": file is missing, " + "fails re-check."));

								break;
							}
							
							long	expected_size 	= entry.getOffset() + entry.getLength();
							
							if ( file_size.longValue() < expected_size ){
								
								piece_state	= PIECE_NOT_DONE;
								
								if (Logger.isEnabled())
									Logger.log(new LogEvent(disk_manager, LOGID,
											LogEvent.LT_WARNING, "Piece #" + i
													+ ": file is too small, fails re-check. File size = "
													+ file_size + ", piece needs " + expected_size));

								break;
							}
						}
					}
					
					if ( piece_state == PIECE_DONE ){
						
						dm_piece.setDone( true );
						
					}else{								
						
							// We only need to recheck pieces that are marked as not-ok
							// if the resume data is invalid or explicit recheck needed
						
						if ( piece_state == PIECE_RECHECK_REQUIRED || !resumeValid ){
													
							while( !stopped ){
									
								if ( recheck_inst.getPermission()){
									
									break;
								}
							}
							
							if ( stopped ){
								
									// we only flag as stopped mid-check if the stop action has prevented
									// a hash check from occurring
								
								bStoppedMidCheck	= true;
								
								break;
								
							}else{
								
								try{	
									DiskManagerCheckRequest	request = disk_manager.createCheckRequest( i, null );
									
									request.setLowPriority( true );
									
									checker.enqueueCheckRequest(
										request,
										new DiskManagerCheckRequestListener()
										{
											public void 
											checkCompleted( 
												DiskManagerCheckRequest 	request,
												boolean						passed )
											{
												complete();
											}
											 
											public void
											checkCancelled(
												DiskManagerCheckRequest		request )
											{
												complete();
											}
											
											public void 
											checkFailed( 
												DiskManagerCheckRequest 	request, 
												Throwable		 			cause )
											{
												complete();
											}
											
											protected void
											complete()
											{
												pending_checks_sem.release();
											}
										});
									
									pending_check_num++;
									
								}catch( Throwable e ){
								
									Debug.printStackTrace(e);
								}
							}
						}
					}
				}
					
				if ( partialPieces != null && resumeValid ){
														
					Iterator iter = partialPieces.entrySet().iterator();
					
					while (iter.hasNext()) {
						
						Map.Entry key = (Map.Entry)iter.next();
						
						int pieceNumber = Integer.parseInt((String)key.getKey());
													
						List blocks = (List)partialPieces.get(key.getKey());
						
						Iterator iterBlock = blocks.iterator();
						
						while (iterBlock.hasNext()) {
							
							pieces[pieceNumber].setWritten(((Long)iterBlock.next()).intValue());
						}
					}
				}
			}else{
				
				while( ! stopped ){
					
					if ( recheck_inst.getPermission()){
						
						break;
					}
				}
				
					// resume not enabled, recheck everything
				
				for (int i = 0; i < pieces.length; i++){
					
					if ( stopped ){
						
						bStoppedMidCheck = true;
						
						break;
					}
										
					disk_manager.setPercentDone(((i + 1) * 1000) / disk_manager.getNbPieces() );						
						
					try{
						DiskManagerCheckRequest	request = disk_manager.createCheckRequest( i, null );
						
						request.setLowPriority( true );

						checker.enqueueCheckRequest(
								request, 
								new DiskManagerCheckRequestListener()
								{
									public void 
									checkCompleted( 
										DiskManagerCheckRequest 	request,
										boolean						passed )
									{
										complete();
									}
									 
									public void
									checkCancelled(
										DiskManagerCheckRequest		request )
									{
										complete();
									}
									
									public void 
									checkFailed( 
										DiskManagerCheckRequest 	request, 
										Throwable		 			cause )
									{
										complete();
									}
									
									protected void
									complete()
									{
										pending_checks_sem.release();
									}
								});
						
						pending_check_num++;
						
					}catch( Throwable e ){
					
						Debug.printStackTrace(e);
					}
				}								
			}
						
			while( pending_check_num > 0 ){
				
				pending_checks_sem.reserve();
				
				pending_check_num--;
			}
			
				//dump the newly built resume data to the disk/torrent
			
			if ( !( stopped || resume_data_complete )){
				
				try{
					dumpResumeDataToDisk(false, false);
					
				}catch( Exception e ){
					
					Debug.out( "Failed to dump initial resume data to disk" );
					
					Debug.printStackTrace( e );
				}
			}
		}catch( Throwable e ){
			
				// if something went wrong then log and continue. 
			
			Debug.printStackTrace(e);
			
		}finally{
			
			recheck_inst.unregister();
       		
			// System.out.println( "Check of '" + disk_manager.getDownloadManager().getDisplayName() + "' completed in " + (System.currentTimeMillis() - start));
		}
	}
	
	public void 
	dumpResumeDataToDisk(
		boolean savePartialPieces, 
		boolean force_recheck )
	
		throws Exception
	{
    
			// if file caching is enabled then this is an important time to ensure that the cache is
			// flushed as we are going to record details about the accuracy of written data.
			// First build the resume map from the data (as updates can still be goin on)
			// Then, flush the cache. This means that on a successful flush the built resume
			// data matches at least the valid state of the data
			// Then update the torrent
		
		DiskManagerFileInfo[]	files = disk_manager.getFiles();
		
		if ( !useFastResume ){
			
				// flush cache even if resume disable
			
			for (int i=0;i<files.length;i++){

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -