⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 trackerstatus.java

📁 这是一个基于java编写的torrent的P2P源码
💻 JAVA
📖 第 1 页 / 共 3 页
字号:
/*
 * Created on 22 juil. 2003
 * Copyright (C) 2003, 2004, 2005, 2006 Aelitis, All Rights Reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 *
 * AELITIS, SAS au capital de 46,603.30 euros
 * 8 Allee Lenotre, La Grille Royale, 78600 Le Mesnil le Roi, France.
 *
 */
package org.gudy.azureus2.core3.tracker.client.impl.bt;

import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.FileNotFoundException;
import java.net.*;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.zip.GZIPInputStream;

import javax.net.ssl.*;

import org.gudy.azureus2.core3.config.COConfigurationManager;
import org.gudy.azureus2.core3.internat.MessageText;
import org.gudy.azureus2.core3.logging.*;
import org.gudy.azureus2.core3.security.SESecurityManager;
import org.gudy.azureus2.core3.tracker.client.TRTrackerAnnouncer;
import org.gudy.azureus2.core3.tracker.client.TRTrackerScraperClientResolver;
import org.gudy.azureus2.core3.tracker.client.TRTrackerScraperResponse;
import org.gudy.azureus2.core3.tracker.client.impl.TRTrackerScraperImpl;
import org.gudy.azureus2.core3.tracker.client.impl.TRTrackerScraperResponseImpl;
import org.gudy.azureus2.core3.tracker.protocol.udp.*;
import org.gudy.azureus2.core3.tracker.util.TRTrackerUtils;
import org.gudy.azureus2.core3.util.*;
import org.gudy.azureus2.plugins.clientid.ClientIDException;
import org.gudy.azureus2.plugins.clientid.ClientIDGenerator;
import org.gudy.azureus2.pluginsimpl.local.clientid.ClientIDManagerImpl;

import com.aelitis.azureus.core.networkmanager.impl.udp.UDPNetworkManager;
import com.aelitis.net.udp.uc.PRUDPPacket;
import com.aelitis.net.udp.uc.PRUDPPacketHandler;
import com.aelitis.net.udp.uc.PRUDPPacketHandlerException;
import com.aelitis.net.udp.uc.PRUDPPacketHandlerFactory;

/**
 * @author Olivier
 * 
 */
 
/** One TrackerStatus object handles scrape functionality for all torrents
 * on one tracker.
 */
public class TrackerStatus {
  // Used to be componentID 2
	private final static LogIDs LOGID = LogIDs.TRACKER;
  // header for our MessageText messages.  Used to shorten code. 
  private final static String SS = "Scrape.status.";
  private final static String SSErr = "Scrape.status.error.";

  private final static int FAULTY_SCRAPE_RETRY_INTERVAL = 60 * 10 * 1000;
  private final static int NOHASH_RETRY_INTERVAL = 1000 * 60 * 60 * 3; // 3 hrs
  
  /**
   * When scraping a single hash, also scrape other hashes that are going to
   * be scraped within this range.
   */
  private final static int GROUP_SCRAPES_MS 	= 60 * 15 * 1000;
  private final static int GROUP_SCRAPES_LIMIT	= 20;
  
  
  static{
  	PRUDPTrackerCodecs.registerCodecs();
  }
  
  private static List	logged_invalid_urls	= new ArrayList();
  
  private static ThreadPool	thread_pool = new ThreadPool( "TrackerStatus", 8, true );	// queue when full rather than block
  
  private final URL		tracker_url;
  private boolean		az_tracker;
  
  private String 	scrapeURL = null;
 
  /** key = Torrent hash.  values = TRTrackerScraperResponseImpl */
  private HashMap 					hashes;
  /** only needed to notify listeners */ 
  private TRTrackerScraperImpl		scraper;
  
  private boolean bSingleHashScrapes = false;
    
  protected AEMonitor hashes_mon 	= new AEMonitor( "TrackerStatus:hashes" );
  private final TrackerChecker checker;

  public 
  TrackerStatus(
  	TrackerChecker 			_checker, 
  	TRTrackerScraperImpl	_scraper, 
	URL 					_tracker_url ) 
  {    	
  	checker 	= _checker;
	scraper		= _scraper;
    tracker_url	= _tracker_url;
    
    az_tracker = TRTrackerUtils.isAZTracker( tracker_url );
    
    bSingleHashScrapes	= COConfigurationManager.getBooleanParameter( "Tracker Client Scrape Single Only" );
    
    String trackerUrl	= tracker_url.toString();
    
    hashes = new HashMap();
    
    try {
      trackerUrl = trackerUrl.replaceAll(" ", "");
      int position = trackerUrl.lastIndexOf('/');
      if(	position >= 0 &&
      		trackerUrl.length() >= position+9 && 
      		trackerUrl.substring(position+1,position+9).equals("announce")) {

        this.scrapeURL = trackerUrl.substring(0,position+1) + "scrape" + trackerUrl.substring(position+9);
        // System.out.println( "url = " + trackerUrl + ", scrape =" + scrapeURL );
        
      }else if ( trackerUrl.toLowerCase().startsWith("udp:")){
      		// UDP scrapes aren't based on URL rewriting, just carry on
      	
      	scrapeURL = trackerUrl;
      	
       }else if ( position >= 0 && trackerUrl.lastIndexOf('.') < position ){
       	
       		// some trackers support /scrape appended but don't have an /announce
       		// don't do this though it the URL ends with .php (or infact .<anything>)
       	
       	scrapeURL = trackerUrl + (trackerUrl.endsWith("/")?"":"/") + "scrape";
       	
      } else {
        if (!logged_invalid_urls.contains(trackerUrl)) {

          logged_invalid_urls.add(trackerUrl);
          // Error logging is done by the caller, since it has the hash/torrent info
        }
      }
    } catch (Throwable e) {
    	Debug.printStackTrace( e );
    } 
  }

  
  protected boolean 
  isTrackerScrapeUrlValid() 
  {
    return scrapeURL != null;
  }
  
  
  
  protected TRTrackerScraperResponseImpl getHashData(HashWrapper hash) {
  	try{
  		hashes_mon.enter();
  		
  		return (TRTrackerScraperResponseImpl) hashes.get(hash);
  	}finally{
  		
  		hashes_mon.exit();
  	}
  }

 


  protected void 
  updateSingleHash(
	HashWrapper hash, 
	boolean force) 
  {
    updateSingleHash(hash, force, true);
  }

  	protected void 
  	updateSingleHash(
  		HashWrapper 	hash, 
  		boolean 		force, 
  		boolean 		async ) 
  	{      
  		//LGLogger.log( "updateSingleHash():: force=" + force + ", async=" +async+ ", url=" +scrapeURL+ ", hash=" +ByteFormatter.nicePrint(hash, true) );
    
  		if ( scrapeURL == null ){
      
  			return;
  		}
    
  		try {
  			ArrayList responsesToUpdate = new ArrayList();

  			TRTrackerScraperResponseImpl response;
    
  			try{
  				hashes_mon.enter();
   		
	    		response = (TRTrackerScraperResponseImpl)hashes.get( hash );
		    
	    		if (response == null) {
	    			
	    			response = addHash(hash);
	    		}
	    	}finally{
	    	
	    		hashes_mon.exit();
	    	}
	
	    	long lMainNextScrapeStartTime = response.getNextScrapeStartTime();
	
	    	if( !force && lMainNextScrapeStartTime > SystemTime.getCurrentTime() ) {
	    		
	    		return;
	    	}
    
	    		// Set status id to SCRAPING, but leave status string until we actually
	    		// do the scrape
	    	
	    	response.setStatus(TRTrackerScraperResponse.ST_SCRAPING, null);
	
	    	responsesToUpdate.add(response);
	    
	    		// Go through hashes and pick out other scrapes that are "close to" wanting a new scrape.
	    
		    if (!bSingleHashScrapes){
		    	
		    	try{
		    	  hashes_mon.enter();
		    		
			      Iterator iterHashes = hashes.values().iterator();
			      
			      	// if we hit trackers with excessive scrapes they respond in varying fashions - from no reply
			      	// to returning 414 to whatever. Rather than hit trackers with large payloads that they then
			      	// reject we limit to MULTI_SCRAPE_LIMIT in one go
			      
			      while( iterHashes.hasNext() && responsesToUpdate.size() < GROUP_SCRAPES_LIMIT ){
			      	
			        TRTrackerScraperResponseImpl r = (TRTrackerScraperResponseImpl)iterHashes.next();
			        
			        if ( !r.getHash().equals( hash )) {
			        	
			          long lTimeDiff = Math.abs(lMainNextScrapeStartTime - r.getNextScrapeStartTime());
			          
			          if (lTimeDiff <= GROUP_SCRAPES_MS && r.getStatus() != TRTrackerScraperResponse.ST_SCRAPING) {
			          	
			            r.setStatus(TRTrackerScraperResponse.ST_SCRAPING, null);
			            
			            responsesToUpdate.add(r);
			          }
			        }
			      }
		      }finally{
		      	
		      	hashes_mon.exit();
		      }
		    }
		    
		    runScrapes(responsesToUpdate,  force, async);
		    
  		}catch( Throwable t ) {
      
  			Debug.out( "updateSingleHash() exception", t );
  		}
  	}
	
  	protected void
  	runScrapes(
  		final ArrayList 	responses, 
  	    final boolean 		force, 
  	    boolean 			async) 
  	{
  		if ( async ){
  			
  			thread_pool.run( 
  				new AERunnable()
  				{
  					public void
  					runSupport()
  					{
  						runScrapesSupport( responses, force );
  					}
  				});
  			
  		}else{
  		
  			runScrapesSupport( responses, force );
  		}
  	}
  	
 
  	protected void 
    runScrapesSupport(
    	ArrayList 	responses, 
    	boolean 	force ) 
    {
		try {
			if (Logger.isEnabled())
				Logger.log(new LogEvent(LOGID, "TrackerStatus: scraping '"
						+ scrapeURL + "', number of hashes = " + responses.size()
						+ ", single_hash_scrapes = " + bSingleHashScrapes));

			boolean original_bSingleHashScrapes = bSingleHashScrapes;

			boolean disable_all_scrapes = !COConfigurationManager
					.getBooleanParameter("Tracker Client Scrape Enable");
			boolean disable_stopped_scrapes = !COConfigurationManager
					.getBooleanParameter("Tracker Client Scrape Stopped Enable");

			try {
				// if URL already includes a query component then just append our
				// params

				HashWrapper one_of_the_hashes = null;
				TRTrackerScraperResponseImpl one_of_the_responses = null;

				char first_separator = scrapeURL.indexOf('?') == -1 ? '?' : '&';

				String info_hash = "";

				String flags = "";
				
				for (int i = 0; i < responses.size(); i++) {
					TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl) responses.get(i);

					HashWrapper hash = response.getHash();

					if (Logger.isEnabled())
						Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash), LOGID,
								"TrackerStatus: scraping, single_hash_scrapes = "
										+ bSingleHashScrapes));

					if (!scraper.isNetworkEnabled(hash, tracker_url)) {

						response.setNextScrapeStartTime(SystemTime.getCurrentTime()
								+ FAULTY_SCRAPE_RETRY_INTERVAL);

						response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText
								.getString(SS + "networkdisabled"));

						scraper.scrapeReceived(response);

					} else if ( !force && ( 
								disable_all_scrapes ||
								(disable_stopped_scrapes && !scraper.isTorrentRunning(hash)))){

						response.setNextScrapeStartTime(SystemTime.getCurrentTime()
								+ FAULTY_SCRAPE_RETRY_INTERVAL);

						response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText
								.getString(SS + "disabled"));

						scraper.scrapeReceived(response);

					} else {

						response.setStatus(TRTrackerScraperResponse.ST_SCRAPING,
								MessageText.getString(SS + "scraping"));

						// technically haven't recieved a scrape yet, but we need
						// to notify listeners (the ones that display status)
						scraper.scrapeReceived(response);

						// the client-id stuff RELIES on info_hash being the FIRST
						// parameter added by
						// us to the URL, so don't change it!

						info_hash += ((one_of_the_hashes != null) ? '&' : first_separator)
								+ "info_hash=";

						info_hash += URLEncoder.encode(
								new String(hash.getBytes(), Constants.BYTE_ENCODING),
								Constants.BYTE_ENCODING).replaceAll("\\+", "%20");

						Object[]	extensions = scraper.getExtensions(hash);
						
						if ( extensions != null ){
							
							if ( extensions[0] != null ){
								
								info_hash += (String)extensions[0]; 
							}
							
							flags += (Character)extensions[1];
							
						}else{
							
							flags += TRTrackerScraperClientResolver.FL_NONE;
						}
						
						one_of_the_responses = response;
						one_of_the_hashes = hash;
					}
				} // for responses

				if (one_of_the_hashes == null)
					return;

				// set context in case authentication dialog is required
				TorrentUtils.setTLSTorrentHash(one_of_the_hashes);

				String	request = scrapeURL + info_hash;
				
				if ( az_tracker ){
					
					String	port_details = TRTrackerUtils.getPortsForURL();
					
					request += port_details;
					
					request += "&azsf=" + flags + "&azver=" + TRTrackerAnnouncer.AZ_TRACKER_VERSION_CURRENT;
				}
				
				URL reqUrl = new URL( request );

				if (Logger.isEnabled())
					Logger.log(new LogEvent(LOGID,
							"Accessing scrape interface using url : " + reqUrl));

				ByteArrayOutputStream message = new ByteArrayOutputStream();

				long scrapeStartTime = SystemTime.getCurrentTime();

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -