trackerstatus.java
来自「Azureus is a powerful, full-featured, cr」· Java 代码 · 共 917 行 · 第 1/3 页
JAVA
917 行
response.setStatus(
TRTrackerScraperResponse.ST_ERROR,
MessageText.getString("Scrape.status.error") +
new String( failure_reason_bytes, Constants.DEFAULT_ENCODING ));
//notifiy listeners
scraper.scrapeReceived( response );
}
}else{
if (responses.size() > 1) {
// multi were requested, 0 returned. Therefore, multi not supported
bSingleHashScrapes = true;
LGLogger.log(componentID, evtFullTrace, LGLogger.INFORMATION,
scrapeURL + " doesn't properly support multi-hash scrapes");
for (int i = 0; i < responses.size(); i++) {
TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl)responses.get(i);
response.setStatus(TRTrackerScraperResponse.ST_ERROR,
MessageText.getString("Scrape.status.error") +
MessageText.getString("Scrape.status.error.invalid"));
//notifiy listeners
scraper.scrapeReceived( response );
}
} else {
// 1 was requested, 0 returned. Therefore, hash not found.
TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl)responses.get(0);
response.setNextScrapeStartTime(SystemTime.getCurrentTime() +
FAULTY_SCRAPE_RETRY_INTERVAL);
response.setStatus(TRTrackerScraperResponse.ST_ERROR,
MessageText.getString("Scrape.status.error") +
MessageText.getString("Scrape.status.error.nohash"));
//notifiy listeners
scraper.scrapeReceived( response );
}
}
return;
}
/* If we requested mutliple hashes, but only one was returned, revert to
Single Hash Scrapes, but continue on to process the one has that was
returned (it may be a random one from the list)
*/
if (!bSingleHashScrapes && responses.size() > 1 && mapFiles.size() == 1) {
bSingleHashScrapes = true;
LGLogger.log(componentID, evtFullTrace, LGLogger.INFORMATION,
scrapeURL + " only returned " + mapFiles.size() +
" hash scrape(s), but we asked for " + responses.size());
}
for (int i = 0; i < responses.size(); i++) {
TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl)responses.get(i);
//LGLogger.log( "decoding response #" +i+ ": " + ByteFormatter.nicePrint( response.getHash(), true ) );
//retrieve the scrape data for the relevent infohash
Map scrapeMap = (Map)mapFiles.get(new String(response.getHash(), Constants.BYTE_ENCODING));
if ( scrapeMap == null ){
// some trackers that return only 1 hash return a random one!
if (responses.size() == 1 || mapFiles.size() != 1) {
response.setNextScrapeStartTime(SystemTime.getCurrentTime() +
FAULTY_SCRAPE_RETRY_INTERVAL);
response.setStatus(TRTrackerScraperResponse.ST_ERROR,
MessageText.getString("Scrape.status.error") +
MessageText.getString("Scrape.status.error.nohash"));
//notifiy listeners
scraper.scrapeReceived( response );
} else {
// This tracker doesn't support multiple hash requests.
// revert status to what it was
response.revertStatus();
if (response.getStatus() == TRTrackerScraperResponse.ST_SCRAPING) {
System.out.println("Hash " + ByteFormatter.nicePrint(response.getHash(), true) + " mysteriously reverted to ST_SCRAPING!");
//response.setStatus(TRTrackerScraperResponse.ST_ONLINE, "");
response.setNextScrapeStartTime(SystemTime.getCurrentTime() +
FAULTY_SCRAPE_RETRY_INTERVAL);
response.setStatus( TRTrackerScraperResponse.ST_ERROR,
MessageText.getString("Scrape.status.error") +
MessageText.getString("Scrape.status.error.invalid"));
}else{
// force single-hash scrapes here
bSingleHashScrapes = true;
// only leave the next retry time if this is the first single hash fail
if ( original_bSingleHashScrapes ){
response.setNextScrapeStartTime(
SystemTime.getCurrentTime() +
FAULTY_SCRAPE_RETRY_INTERVAL);
}
}
//notifiy listeners
scraper.scrapeReceived( response );
// if this was the first scrape request in the list, TrackerChecker
// will attempt to scrape again because we didn't reset the
// nextscrapestarttime. But the next time, bSingleHashScrapes
// will be true, and only 1 has will be requested, so there
// will not be infinite looping
}
// System.out.println("scrape: hash missing from reply");
} else {
//retrieve values
int seeds = ((Long)scrapeMap.get("complete")).intValue();
int peers = ((Long)scrapeMap.get("incomplete")).intValue();
//make sure we dont use invalid replies
if ( seeds < 0 || peers < 0 ) {
System.out.println("scrapeMap="+scrapeMap);
// We requested multiple hashes, but tracker didn't support
// multiple hashes and returned 1 hash. However, that hash is
// invalid because seeds or peers was < 0. So, exit. Scrape
// manager will run scrapes for each individual hash.
if (responses.size() > 1 && bSingleHashScrapes){
response.setStatus( TRTrackerScraperResponse.ST_ERROR,
MessageText.getString("Scrape.status.error") +
MessageText.getString("Scrape.status.error.invalid"));
scraper.scrapeReceived( response );
continue;
}
response.setNextScrapeStartTime(SystemTime.getCurrentTime() +
FAULTY_SCRAPE_RETRY_INTERVAL);
response.setStatus(TRTrackerScraperResponse.ST_ERROR,
MessageText.getString("Scrape.status.error") +
MessageText.getString("Scrape.status.error.invalid") + " " +
(seeds < 0 ? MessageText.getString("MyTorrentsView.seeds") + " == " + seeds + ". " : "") +
(peers < 0 ? MessageText.getString("MyTorrentsView.peers") + " == " + peers + ". " : "")
);
scraper.scrapeReceived( response );
continue;
}
// decode additional flags - see http://anime-xtreme.com/tracker/blah.txt for example
/*
files
infohash
complete
incomplete
downloaded
name
flags
min_request_interval
*/
// Min 15 min, plus 10 seconds for every seed
// ex. 10 Seeds = 15m + 100s = ~16.66m
// 60 seeds = 15m + 600s = ~25m
// 1000 seeds = 15m + 10000s = ~2h 52m
int scrapeInterval = 15 * 60 + (seeds * 10);
Map mapFlags = (Map) map.get("flags");
if (mapFlags != null) {
int iNewScrapeInterval = ((Long)mapFlags.get("min_request_interval")).intValue();
if (iNewScrapeInterval > scrapeInterval)
scrapeInterval = iNewScrapeInterval;
//Debug.out("scrape min_request_interval = " +iNewScrapeInterval);
}
if (scrapeInterval < 10*60) scrapeInterval = 10*60;
if (scrapeInterval > 3*60*60) scrapeInterval = 3*60*60;
long nextScrapeTime = SystemTime.getCurrentTime() + (scrapeInterval * 1000);
response.setNextScrapeStartTime(nextScrapeTime);
//create the response
response.setScrapeStartTime(scrapeStartTime);
response.seeds = seeds;
response.peers = peers;
response.setStatus(TRTrackerScraperResponse.ST_ONLINE,
MessageText.getString("Scrape.status.ok") );
//LGLogger.log("finished decoding #" +i);
//notifiy listeners
scraper.scrapeReceived( response );
}
} // for responses
} catch (NoClassDefFoundError ignoreSSL) { // javax/net/ssl/SSLSocket
for (int i = 0; i < responses.size(); i++) {
TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl)responses.get(i);
response.setNextScrapeStartTime(SystemTime.getCurrentTime() +
FAULTY_SCRAPE_RETRY_INTERVAL);
response.setStatus(TRTrackerScraperResponse.ST_ERROR,
MessageText.getString("Scrape.status.error") +
ignoreSSL.getMessage());
//notifiy listeners
scraper.scrapeReceived( response );
}
} catch (FileNotFoundException e) {
for (int i = 0; i < responses.size(); i++) {
TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl)responses.get(i);
response.setNextScrapeStartTime(SystemTime.getCurrentTime() +
FAULTY_SCRAPE_RETRY_INTERVAL);
response.setStatus(TRTrackerScraperResponse.ST_ERROR,
MessageText.getString("Scrape.status.error") +
MessageText.getString("DownloadManager.error.filenotfound"));
//notifiy listeners
scraper.scrapeReceived( response );
}
} catch (ConnectException e) {
for (int i = 0; i < responses.size(); i++) {
TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl)responses.get(i);
response.setNextScrapeStartTime(SystemTime.getCurrentTime() +
FAULTY_SCRAPE_RETRY_INTERVAL);
response.setStatus(TRTrackerScraperResponse.ST_ERROR,
MessageText.getString("Scrape.status.error") + e.getLocalizedMessage());
//notifiy listeners
scraper.scrapeReceived( response );
}
} catch (Exception e) {
// for apache we can get error 414 - URL too long. simplest solution for this
// is to fall back to single scraping
String error_message = e.getMessage();
if ( error_message != null && error_message.indexOf( "414" ) != -1 ){
bSingleHashScrapes = true;
}
LGLogger.log(componentID, evtErrors, LGLogger.ERROR,
"Error from scrape interface " + scrapeURL + " : " + Debug.getNestedExceptionMessage(e));
for (int i = 0; i < responses.size(); i++) {
TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl)responses.get(i);
response.setNextScrapeStartTime(SystemTime.getCurrentTime() +
FAULTY_SCRAPE_RETRY_INTERVAL);
response.setStatus(TRTrackerScraperResponse.ST_ERROR,
MessageText.getString("Scrape.status.error") + Debug.getNestedExceptionMessage(e));
//notifiy listeners
scraper.scrapeReceived( response );
}
}
}
}
protected void scrapeHTTP(URL reqUrl, ByteArrayOutputStream message)
throws IOException
{
TRTrackerUtils.checkForBlacklistedURLs( reqUrl );
reqUrl = TRTrackerUtils.adjustURLForHosting( reqUrl );
reqUrl = AEProxyFactory.getAddressMapper().internalise( reqUrl );
// System.out.println( "scraping " + reqUrl.toString());
InputStream is = null;
try{
HttpURLConnection con = null;
if ( reqUrl.getProtocol().equalsIgnoreCase("https")){
// see ConfigurationChecker for SSL client defaults
HttpsURLConnection ssl_con = (HttpsURLConnection)reqUrl.openConnection();
// allow for certs that contain IP addresses rather than dns names
ssl_con.setHostnameVerifier(
new HostnameVerifier() {
public boolean verify(String host, SSLSession session) {
return( true );
}
});
con = ssl_con;
} else {
con = (HttpURLConnection) reqUrl.openConnection();
}
con.setRequestProperty("User-Agent", Constants.AZUREUS_NAME + " " + Constants.AZUREUS_VERSION);
// some trackers support gzip encoding of replies
con.addRequestProperty("Accept-Encoding","gzip");
con.connect();
is = con.getInputStream();
String encoding = con.getHeaderField( "content-encoding");
boolean gzip = encoding != null && encoding.equalsIgnoreCase("gzip");
// System.out.println( "encoding = " + encoding );
if ( gzip ){
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?