trackerstatus.java

来自「Azureus is a powerful, full-featured, cr」· Java 代码 · 共 917 行 · 第 1/3 页

JAVA
917
字号
/*
 * Created on 22 juil. 2003
 *
 */
package org.gudy.azureus2.core3.tracker.client.classic;

import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.FileNotFoundException;
import java.net.*;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.zip.GZIPInputStream;

import javax.net.ssl.*;

import org.gudy.azureus2.core3.config.COConfigurationManager;
import org.gudy.azureus2.core3.internat.MessageText;
import org.gudy.azureus2.core3.logging.LGLogger;
import org.gudy.azureus2.core3.security.SESecurityManager;
import org.gudy.azureus2.core3.tracker.client.TRTrackerScraperResponse;
import org.gudy.azureus2.core3.tracker.protocol.udp.*;
import org.gudy.azureus2.core3.tracker.util.TRTrackerUtils;
import org.gudy.azureus2.core3.util.*;

import com.aelitis.azureus.core.proxy.AEProxyFactory;

/**
 * @author Olivier
 * 
 */
 
/** One TrackerStatus object handles scrape functionality for all torrents
 * on one tracker.
 */
public class TrackerStatus {
	public final static int componentID = 2;
	public final static int evtLifeCycle = 0;
	public final static int evtFullTrace = 1;
	public final static int evtErrors = 2;

  private final static int FAULTY_SCRAPE_RETRY_INTERVAL = 60 * 10 * 1000;
  
  private URL		tracker_url;
  
  private String 	scrapeURL = null;
 
  /** key = Torrent hash.  values = TRTrackerScraperResponseImpl */
  private HashMap 					hashes;
  /** only needed to notify listeners */ 
  private TRTrackerScraperImpl		scraper;
  
  private boolean bSingleHashScrapes = false;
    
  protected AEMonitor hashes_mon 	= new AEMonitor( "TrackerStatus:hashes" );

  public 
  TrackerStatus(
  	TRTrackerScraperImpl	_scraper, 
	URL 					_tracker_url ) 
  {    	
  	scraper		= _scraper;
    tracker_url	= _tracker_url;
    
    String trackerUrl	= tracker_url.toString();
    
    hashes = new HashMap();
    
    try {
      trackerUrl = trackerUrl.replaceAll(" ", "");
      int position = trackerUrl.lastIndexOf('/');
      if(	position >= 0 &&
      		trackerUrl.length() >= position+9 && 
      		trackerUrl.substring(position+1,position+9).equals("announce")) {

        this.scrapeURL = trackerUrl.substring(0,position+1) + "scrape" + trackerUrl.substring(position+9);
        // System.out.println( "url = " + trackerUrl + ", scrape =" + scrapeURL );
        
      }else if ( trackerUrl.toLowerCase().startsWith("udp:")){
      		// UDP scrapes aren't based on URL rewriting, just carry on
      	
      	scrapeURL = trackerUrl;
      	
       }else if ( position >= 0 && trackerUrl.lastIndexOf('.') < position ){
       	
       		// some trackers support /scrape appended but don't have an /announce
       		// don't do this though it the URL ends with .php (or infact .<anything>)
       	
       	scrapeURL = trackerUrl + (trackerUrl.endsWith("/")?"":"/") + "scrape";
       	
    }else {
        LGLogger.log(componentID, evtErrors, LGLogger.ERROR,
                     "can't scrape using '" + trackerUrl + "' as it doesn't end in '/announce'");		
       }
    } catch (Exception e) {
    	Debug.printStackTrace( e );
    } 
  }

  protected TRTrackerScraperResponseImpl getHashData(HashWrapper hash) {
  	try{
  		hashes_mon.enter();
  		
  		return (TRTrackerScraperResponseImpl) hashes.get(hash.getHash());
  	}finally{
  		
  		hashes_mon.exit();
  	}
  }

  protected TRTrackerScraperResponseImpl getHashData(byte[] hash) {
  	try{
  		hashes_mon.enter();
 
  		return (TRTrackerScraperResponseImpl) hashes.get(hash);
  	}finally{
  		
  		hashes_mon.exit();
  	}
  }


  protected void updateSingleHash(HashWrapper hash, boolean force) {
    updateSingleHash(hash.getHash(), force, true);
  }

  protected void updateSingleHash(byte[] hash, boolean force) {
    updateSingleHash(hash, force, true);
  }

  protected void updateSingleHash(byte[] hash, boolean force, boolean async) {      
    //LGLogger.log("updateSingleHash:" + force + " " + scrapeURL + " : " + ByteFormatter.nicePrint(hash, true));
    if (scrapeURL == null)  {
      return;
    }
    
    ArrayList responsesToUpdate = new ArrayList();

    TRTrackerScraperResponseImpl response;
    
   try{
   		hashes_mon.enter();
   		
	    response = (TRTrackerScraperResponseImpl)hashes.get(hash);
	    
	    if (response == null) {
	      response = addHash(hash);
	    }
    }finally{
    	
    	hashes_mon.exit();
    }

    long lMainNextScrapeStartTime = response.getNextScrapeStartTime();
    
    if (!SystemTime.isErrorLast1min() && !force && 
        lMainNextScrapeStartTime >= SystemTime.getCurrentTime()) {
      return;
    }
    // Set status id to SCRAPING, but leave status string until we actually
    // do the scrape
    response.setStatus(TRTrackerScraperResponse.ST_SCRAPING, null);

    responsesToUpdate.add(response);
    
   // Go through hashes and pick out other scrapes that are "close to" wanting a new scrape.
    
    if (!bSingleHashScrapes){
    	
    	try{
    	  hashes_mon.enter();
    		
	      Iterator iterHashes = hashes.values().iterator();
	      
	      while( iterHashes.hasNext() ) {
	      	
	        TRTrackerScraperResponseImpl r = (TRTrackerScraperResponseImpl)iterHashes.next();
	        
	        if (!r.getHash().equals(hash)) {
	        	
	          long lTimeDiff = Math.abs(lMainNextScrapeStartTime - r.getNextScrapeStartTime());
	          
	          if (lTimeDiff <= 30000 && r.getStatus() != TRTrackerScraperResponse.ST_SCRAPING) {
	          	
	            r.setStatus(TRTrackerScraperResponse.ST_SCRAPING, null);
	            
	            responsesToUpdate.add(r);
	          }
	        }
	      }
      }finally{
      	
      	hashes_mon.exit();
      }
    }
    
    new ThreadedScrapeRunner(responsesToUpdate,  force, async);
  }
  
  /** Does the scrape and decoding asynchronously.
    *
    * TODO: Allow handling of multiple TRTrackerScraperResponseImpl objects
    *       on one URL
    */
  private class ThreadedScrapeRunner extends AEThread {
    boolean force;
    ArrayList responses;

    public ThreadedScrapeRunner(ArrayList _responses, boolean _force, boolean async) {
      super("ThreadedScrapeRunner");
      force = _force;
      responses = _responses;

      if (async) {
        setDaemon(true);
        start();
      } else {
        run();
      }
    }

    public void runSupport() {
      if (scrapeURL == null)  {
        return;
      }
      
      //LGLogger.log( "ThreadedScrapeRunner:: responses.size()=" +responses.size()+ ", bSingleHashScrapes=" +bSingleHashScrapes );
            
      boolean	original_bSingleHashScrapes = bSingleHashScrapes;
      
      try {
      		// if URL already includes a query component then just append our params
      	
      	byte[]	one_of_the_hashes	= null;
      	
      	char	first_separator = scrapeURL.indexOf('?')==-1?'?':'&';
      	
        String info_hash = "";
        for (int i = 0; i < responses.size(); i++) {
          TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl)responses.get(i);
          response.setStatus(TRTrackerScraperResponse.ST_SCRAPING,
                             MessageText.getString("Scrape.status.scraping"));
          byte[] hash = response.getHash();
          
          one_of_the_hashes	= hash;
          
          info_hash += ((i > 0) ? '&' : first_separator) + "info_hash=";
          info_hash += URLEncoder.encode(new String(hash, Constants.BYTE_ENCODING), 
                                         Constants.BYTE_ENCODING).replaceAll("\\+", "%20");
        }

        if ( one_of_the_hashes == null ){
        	
        	Debug.out( "No hashes for scrape" );
        	
        }else{
        	
        		// set context in case authentication dialog is required
        	
        	TorrentUtils.setTLSTorrentHash( one_of_the_hashes );
        }
        
        URL reqUrl = new URL(scrapeURL + info_hash);
        
        LGLogger.log(componentID, evtLifeCycle, LGLogger.SENT,
                     "Accessing scrape interface using url : " + reqUrl);
   
        ByteArrayOutputStream message = new ByteArrayOutputStream();
        
        long scrapeStartTime = SystemTime.getCurrentTime();
        
        if ( reqUrl.getProtocol().equalsIgnoreCase( "udp" )){
          // TODO: support multi hash scrapes on UDP
        	scrapeUDP( reqUrl, message, ((TRTrackerScraperResponseImpl)responses.get(0)).getHash());
        	bSingleHashScrapes = true;
        }else{
        	scrapeHTTP( reqUrl, message );
        }
                
        Map map = BDecoder.decode(message.toByteArray());
        
        Map mapFiles = map==null?null:(Map) map.get("files");

        LGLogger.log(componentID, evtLifeCycle, LGLogger.RECEIVED,
                     "Response from scrape interface " + scrapeURL + ": " + 
                     ((mapFiles == null) ? "null" : ""+mapFiles.size()) +
                     " returned");

        if (mapFiles == null || mapFiles.size() == 0) {
        	
     		// azureus extension here to handle "failure reason" returned for scrapes
        	
 		     byte[]	failure_reason_bytes = map==null?null:(byte[]) map.get("failure reason");

		     if ( failure_reason_bytes != null ){
		     	
	            for (int i = 0; i < responses.size(); i++) {
			       
	            	TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl)responses.get(i);
		       
	            	response.setNextScrapeStartTime(
	            			SystemTime.getCurrentTime() + 
                            FAULTY_SCRAPE_RETRY_INTERVAL);

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?