⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 trackerstatus.java

📁 这是一个基于java编写的torrent的P2P源码
💻 JAVA
📖 第 1 页 / 共 3 页
字号:
	  				});
	  		
	  		con = ssl_con;
	  		
	  	} else {
	  		con = (HttpURLConnection) reqUrl.openConnection();
	  	}

		String	user_agent = (String)http_properties.get( ClientIDGenerator.PR_USER_AGENT );
 		
 		if ( user_agent != null ){
 			
 			con.setRequestProperty("User-Agent", user_agent );
 		}
 		
 			// some trackers support gzip encoding of replies
 		
	    con.addRequestProperty("Accept-Encoding","gzip");
	    
	    con.setRequestProperty("Connection", "close" );
	    
	  	con.connect();

	  	is = con.getInputStream();
	
		String	resulting_url_str = con.getURL().toString();
			
		if ( !reqUrl.toString().equals( resulting_url_str )){
			
				// some kind of redirect has occurred. Unfortunately we can't get at the underlying
				// redirection reason (temp, perm etc) so we support the use of an explicit indicator
				// in the resulting url
			
			String	marker = "permredirect=1";
			
			int	pos = resulting_url_str.indexOf( marker );
		
			if ( pos != -1 ){
				
				pos = pos-1;	// include the '&' or '?'
				
				try{
					redirect_url = 
						new URL( resulting_url_str.substring(0,pos));
								
				}catch( Throwable e ){
					Debug.printStackTrace(e);
				}
			}
		}
		
	  	String encoding = con.getHeaderField( "content-encoding");
	  	
	  	boolean	gzip = encoding != null && encoding.equalsIgnoreCase("gzip");
	  	
	  	// System.out.println( "encoding = " + encoding );
	  	
	  	if ( gzip ){
	  		
	  		is = new GZIPInputStream( is );
	  	}
	  	
	  	byte[]	data = new byte[1024];
	  	
	  	int num_read = 0;
	  	
	  	while( true ){
	  		
	  		try {
				int	len = is.read(data);
					
				if ( len > 0 ){
					
					message.write(data, 0, len);
					
					num_read += len;
					
					if ( num_read > 128*1024 ){
						
							// someone's sending us junk, bail out
					   
						message.reset();
						
						throw( new Exception( "Tracker response invalid (too large)" ));
						
					}
				}else if ( len == 0 ){
					
					Thread.sleep(20);
					
				}else{
					
					break;
				}
	  		} catch (Exception e) {
	  			
	  			if (Logger.isEnabled())
						Logger.log(new LogEvent(LOGID, LogEvent.LT_ERROR,
								"Error from scrape interface " + scrapeURL + " : "
										+ Debug.getNestedExceptionMessage(e)));

	  			return( null );
	  		}
	  	}
	  } finally {
	  	if (is != null) {
        try {
	  		  is.close();
  	  	} catch (IOException e1) { }
  	  }
	  }
	  
	  return( redirect_url );
  }
  
  protected void
  scrapeUDP(
  	URL								reqUrl,
	ByteArrayOutputStream			message,
	HashWrapper						hash,
	TRTrackerScraperResponseImpl	current_response )
  
  		throws Exception
  {
  		/* reduce network traffic by only scraping UDP when the torrent isn't running as
  		 * UDP version 2 contains scrape data in the announce response
  		 */
  	
  	if ( 	PRUDPPacketTracker.VERSION == 2 &&
  			scraper.isTorrentDownloading( hash )){
  	
  		if (Logger.isEnabled())
				Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash), LOGID,
						LogEvent.LT_WARNING, "Scrape of " + reqUrl
								+ " skipped as torrent running and "
								+ "therefore scrape data available in " + "announce replies"));

			// easiest approach here is to brew up a response that looks like the current one
		
		Map	map = new HashMap();

		Map	files = new ByteEncodedKeyHashMap();
		
		map.put( "files", files );
									
		Map	file = new HashMap();
			
		byte[]	resp_hash = hash.getBytes();
		
		// System.out.println("got hash:" + ByteFormatter.nicePrint( resp_hash, true ));
	
		files.put( new String(resp_hash, Constants.BYTE_ENCODING), file );
		
		file.put( "complete", new Long( current_response.getSeeds()));
		file.put( "downloaded", new Long(-1));	// unknown
		file.put( "incomplete", new Long(current_response.getPeers()));
		
		byte[] data = BEncoder.encode( map );
		
		message.write( data );
		
  		return;
  	}
  	
	reqUrl = TRTrackerUtils.adjustURLForHosting( reqUrl );

	PasswordAuthentication	auth 	= null;
	boolean					auth_ok	= false;
	
	try{
		if ( reqUrl.getQuery().toLowerCase().indexOf("auth") != -1 ){
					
			auth = SESecurityManager.getPasswordAuthentication( "UDP Tracker", reqUrl );
		}		
	
		int port = UDPNetworkManager.getSingleton().getUDPNonDataListeningPortNumber();
		
		PRUDPPacketHandler handler = PRUDPPacketHandlerFactory.getHandler( port );
		
		InetSocketAddress destination = new InetSocketAddress(reqUrl.getHost(),reqUrl.getPort()==-1?80:reqUrl.getPort());
		
		String	failure_reason = null;
		
		for (int retry_loop=0;retry_loop<PRUDPPacketTracker.DEFAULT_RETRY_COUNT;retry_loop++){
		
			try{
				PRUDPPacket connect_request = new PRUDPPacketRequestConnect();
				
				PRUDPPacket reply = handler.sendAndReceive( auth, connect_request, destination );
				
				if ( reply.getAction() == PRUDPPacketTracker.ACT_REPLY_CONNECT ){
					
					PRUDPPacketReplyConnect connect_reply = (PRUDPPacketReplyConnect)reply;
					
					long	my_connection = connect_reply.getConnectionId();
					
					PRUDPPacketRequestScrape scrape_request = new PRUDPPacketRequestScrape( my_connection, hash.getBytes() );
									
					reply = handler.sendAndReceive( auth, scrape_request, destination );
					
					if ( reply.getAction() == PRUDPPacketTracker.ACT_REPLY_SCRAPE ){
	
						auth_ok	= true;
	
						if ( PRUDPPacketTracker.VERSION == 1 ){
							PRUDPPacketReplyScrape	scrape_reply = (PRUDPPacketReplyScrape)reply;
							
							Map	map = new HashMap();
							
							/*
							int	interval = scrape_reply.getInterval();
							
							if ( interval != 0 ){
								
								map.put( "interval", new Long(interval ));
							}
							*/
							
							byte[][]	reply_hashes 	= scrape_reply.getHashes();
							int[]		complete 		= scrape_reply.getComplete();
							int[]		downloaded 		= scrape_reply.getDownloaded();
							int[]		incomplete 		= scrape_reply.getIncomplete();
							
							Map	files = new ByteEncodedKeyHashMap();
							
							map.put( "files", files );
							
							for (int i=0;i<reply_hashes.length;i++){
								
								Map	file = new HashMap();
								
								byte[]	resp_hash = reply_hashes[i];
								
								// System.out.println("got hash:" + ByteFormatter.nicePrint( resp_hash, true ));
							
								files.put( new String(resp_hash, Constants.BYTE_ENCODING), file );
								
								file.put( "complete", new Long(complete[i]));
								file.put( "downloaded", new Long(downloaded[i]));
								file.put( "incomplete", new Long(incomplete[i]));
							}
							
							byte[] data = BEncoder.encode( map );
							
							message.write( data );
							
							return;
						}else{
							PRUDPPacketReplyScrape2	scrape_reply = (PRUDPPacketReplyScrape2)reply;
							
							Map	map = new HashMap();
							
							/*
							int	interval = scrape_reply.getInterval();
							
							if ( interval != 0 ){
								
								map.put( "interval", new Long(interval ));
							}
							*/
							
							int[]		complete 	= scrape_reply.getComplete();
							int[]		downloaded 	= scrape_reply.getDownloaded();
							int[]		incomplete 	= scrape_reply.getIncomplete();
							
							Map	files = new ByteEncodedKeyHashMap();
							
							map.put( "files", files );
														
							Map	file = new HashMap();
								
							byte[]	resp_hash = hash.getBytes();
							
							// System.out.println("got hash:" + ByteFormatter.nicePrint( resp_hash, true ));
						
							files.put( new String(resp_hash, Constants.BYTE_ENCODING), file );
							
							file.put( "complete", new Long(complete[0]));
							file.put( "downloaded", new Long(downloaded[0]));
							file.put( "incomplete", new Long(incomplete[0]));
							
							byte[] data = BEncoder.encode( map );
							
							message.write( data );
							
							return;
						}
					}else{
						
						failure_reason = ((PRUDPPacketReplyError)reply).getMessage();
						
						if (Logger.isEnabled())
								Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash),
										LOGID, LogEvent.LT_ERROR,
										"Response from scrape interface : " + failure_reason));
						
						break;
					}
				}else{
	
					failure_reason = ((PRUDPPacketReplyError)reply).getMessage();
					
					if (Logger.isEnabled())
							Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash), LOGID,
									LogEvent.LT_ERROR, "Response from scrape interface : "
											+ ((PRUDPPacketReplyError) reply).getMessage()));
				
					break;
				}
	
			}catch( PRUDPPacketHandlerException e ){
				
				if ( e.getMessage() == null || e.getMessage().indexOf("timed out") == -1 ){
					
					throw( e );
				}
				
				failure_reason	= "Timeout";
			}
		}
		
		if ( failure_reason != null ){
			
			Map	map = new HashMap();
			
			map.put( "failure reason", failure_reason.getBytes());
			
			byte[] data = BEncoder.encode( map );
			
			message.write( data );

		}
	}finally{
		if ( auth != null ){
			
			SESecurityManager.setPasswordAuthenticationOutcome( TRTrackerBTAnnouncerImpl.UDP_REALM, reqUrl, auth_ok );
		}
	}
  }
  
  protected String
  getURLParam(
  		String		url,
		String		param )
  {
  	int	p1 = url.indexOf( param + "=" );
  	
  	if ( p1 == -1 ){
  		
  		return( null );
  	}
  	
  	int	p2 = url.indexOf( "&", p1 );
  	
  	if ( p2 == -1 ){
  		
  		return( url.substring(p1+param.length()+1));
  	}
  	
  	return( url.substring(p1+param.length()+1,p2));
  }
  

  protected TRTrackerScraperResponseImpl addHash(HashWrapper hash) {
    TRTrackerScraperResponseImpl response = new TRTrackerBTScraperResponseImpl(this, hash);
    if (scrapeURL == null)  {
      response.setStatus(TRTrackerScraperResponse.ST_ERROR,
                         MessageText.getString(SS + "error") + 
                         MessageText.getString(SSErr + "badURL"));
    } else {
      response.setStatus(TRTrackerScraperResponse.ST_INITIALIZING,
                         MessageText.getString(SS + "initializing"));
    }
    
    response.setNextScrapeStartTime(checker.getNextScrapeCheckOn());
  	try{
  		hashes_mon.enter();
  	
  		hashes.put( hash, response);
      
  	}finally{
  		
  		hashes_mon.exit();
  	}

    //notifiy listeners
    scraper.scrapeReceived( response );

  	return response;
  }
  
  protected void removeHash(HashWrapper hash) {
  	try{
  		hashes_mon.enter();
  	
  		hashes.remove( hash );
  		
  	}finally{
  		
  		hashes_mon.exit();
  	}
  }
  
  protected URL
  getTrackerURL()
  {
  	return( tracker_url );
  }
  
  protected Map getHashes() {
    return hashes;
  }
  
  protected AEMonitor
  getHashesMonitor()
  {
  	return( hashes_mon );
  }

	protected void scrapeReceived(TRTrackerScraperResponse response) {
	  scraper.scrapeReceived(response);
	}

	public boolean getSupportsMultipeHashScrapes() {
		return !bSingleHashScrapes;
	}
	
	protected String
	getString()
	{	  
	  return( tracker_url + ", " + scrapeURL + ", multi-scrape=" + !bSingleHashScrapes );
	}
}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -