📄 trtrackerservertorrentimpl.java
字号:
}
}
}
if ( to_remove == 0 ){
break;
}
}
}finally{
peer_list_compaction_suspended = false;
}
checkForPeerListCompaction( false );
}
}
if ( deferred_failure != null ){
if ( peer != null && !peer_already_removed ){
removePeer( peer, TRTrackerServerTorrentPeerListener.ET_FAILED, url_parameters );
}
throw( deferred_failure );
}
return( peer );
}catch( UnsupportedEncodingException e ){
throw( new TRTrackerServerException( "Encoding fails", e ));
}finally{
// note we can bail out here through a return when there are too many IP overrides
this_mon.exit();
}
}
public void
peerQueued(
String ip,
int tcp_port,
int udp_port,
int http_port,
byte crypto_level,
byte az_ver,
int timeout_secs,
boolean seed )
{
// System.out.println( "peerQueued: " + ip + "/" + tcp_port + "/" + udp_port + "/" + crypto_level );
if ( peer_map.size() >= QUEUED_PEERS_MAX_SWARM_SIZE || tcp_port == 0 ){
return;
}
try{
this_mon.enter();
QueuedPeer new_qp = new QueuedPeer( ip, tcp_port, udp_port, http_port, crypto_level, az_ver, timeout_secs, seed );
String reuse_key = new String( new_qp.getIP(), Constants.BYTE_ENCODING ) + ":" + tcp_port;
// if still active then drop it
if ( peer_reuse_map.containsKey( reuse_key )){
return;
}
if ( queued_peers != null ){
Iterator it = queued_peers.iterator();
while( it.hasNext()){
QueuedPeer qp = (QueuedPeer)it.next();
if ( qp.sameAs( new_qp )){
it.remove();
queued_peers.add( new_qp );
return;
}
}
if ( queued_peers.size() >= QUEUED_PEERS_MAX ){
QueuedPeer oldest = null;
it = queued_peers.iterator();
while( it.hasNext()){
QueuedPeer qp = (QueuedPeer)it.next();
if ( oldest == null ){
oldest = qp;
}else{
if ( qp.getCreateTime() < oldest.getCreateTime()){
oldest = qp;
}
}
}
queued_peers.remove( oldest );
}
}else{
queued_peers = new LinkedList();
}
queued_peers.addFirst( new_qp );
}catch( UnsupportedEncodingException e ){
Debug.printStackTrace(e);
}finally{
this_mon.exit();
}
}
protected void
removePeer(
TRTrackerServerPeerImpl peer,
int reason,
String url_parameters )
{
removePeer( peer, -1, reason, url_parameters );
}
protected void
removePeer(
TRTrackerServerPeerImpl peer,
int peer_list_index,
int reason,
String url_parameters ) // -1 if not known
{
try{
this_mon.enter();
if ( peer.isIPOverride()){
ip_override_count--;
}
stats.removeLeft( peer.getAmountLeft());
if ( peer_map.size() != peer_reuse_map.size()){
if ( !map_size_diff_reported ){
map_size_diff_reported = true;
Debug.out( "TRTrackerServerTorrent::removePeer: maps size different ( " + peer_map.size() + "/" + peer_reuse_map.size() +")");
}
}
{
Object o = peer_map.remove( peer.getPeerId());
if ( o == null ){
Debug.out(" TRTrackerServerTorrent::removePeer: peer_map doesn't contain peer");
}else{
try{
peerEvent( peer, reason, url_parameters );
}catch( TRTrackerServerException e ){
// ignore during peer removal
}
}
}
if ( peer_list_index == -1 ){
int peer_index = peer_list.indexOf( peer );
if ( peer_index == -1){
Debug.out(" TRTrackerServerTorrent::removePeer: peer_list doesn't contain peer");
}else{
peer_list.set( peer_index, null );
}
}else{
if ( peer_list.get( peer_list_index ) == peer ){
peer_list.set( peer_list_index, null );
}else{
Debug.out(" TRTrackerServerTorrent::removePeer: peer_list doesn't contain peer at index");
}
}
peer_list_hole_count++;
checkForPeerListCompaction( false );
try{
Object o = peer_reuse_map.remove( new String( peer.getIPAsRead(), Constants.BYTE_ENCODING ) + ":" + peer.getTCPPort());
if ( o == null ){
Debug.out(" TRTrackerServerTorrent::removePeer: peer_reuse_map doesn't contain peer");
}
}catch( UnsupportedEncodingException e ){
}
if ( biased_peers != null ){
biased_peers.remove( peer );
}
if ( peer.isSeed()){
seed_count--;
}
removed_count++;
}finally{
this_mon.exit();
}
}
protected void
updateBiasedPeers(
Set biased_peers_set )
{
try{
this_mon.enter();
Iterator it = peer_list.iterator();
if ( it.hasNext() && biased_peers == null ){
biased_peers = new ArrayList();
}
while( it.hasNext()){
TRTrackerServerPeerImpl this_peer = (TRTrackerServerPeerImpl)it.next();
if ( this_peer != null ){
boolean biased = biased_peers_set.contains( this_peer.getIPRaw());
this_peer.setBiased( biased );
if ( biased ){
if ( !biased_peers.contains( this_peer )){
biased_peers.add( this_peer );
}
}else{
biased_peers.remove( this_peer );
}
}
}
}finally{
this_mon.exit();
}
}
public Map
exportAnnounceToMap(
HashMap preprocess_map,
TRTrackerServerPeerImpl requesting_peer, // maybe null for an initial announce from a stopped peer
boolean include_seeds,
int num_want,
long interval,
long min_interval,
boolean no_peer_id,
byte compact_mode,
byte crypto_level,
DHTNetworkPosition network_position )
{
try{
this_mon.enter();
long now = SystemTime.getCurrentTime();
// we have to force non-caching for nat_warnings responses as they include
// peer-specific data
boolean nat_warning = requesting_peer != null && requesting_peer.getNATStatus() == TRTrackerServerPeerImpl.NAT_CHECK_FAILED;
int total_peers = peer_map.size();
int cache_millis = TRTrackerServerImpl.getAnnounceCachePeriod();
boolean send_peer_ids = TRTrackerServerImpl.getSendPeerIds();
// override if client has explicitly not requested them
if ( no_peer_id || compact_mode != COMPACT_MODE_NONE ){
send_peer_ids = false;
}
boolean add_to_cache = false;
int max_peers = TRTrackerServerImpl.getMaxPeersToSend();
// num_want < 0 -> not supplied so give them max
if ( num_want < 0 ){
num_want = total_peers;
}
// trim back to max_peers if specified
if ( max_peers > 0 && num_want > max_peers ){
num_want = max_peers;
}
if ( caching_enabled &&
(!nat_warning) &&
preprocess_map.size() == 0 && // don't cache if we've got pre-process stuff to add
cache_millis > 0 &&
num_want >= MIN_CACHE_ENTRY_SIZE &&
total_peers >= TRTrackerServerImpl.getAnnounceCachePeerThreshold() &&
crypto_level != TRTrackerServerPeer.CRYPTO_REQUIRED ){ // no cache for crypto required peers
// too busy to bother with network position stuff
network_position = null;
// note that we've got to select a cache entry that is somewhat
// relevant to the num_want param (but NOT greater than it)
// remove stuff that's too old
Iterator it = announce_cache.keySet().iterator();
while( it.hasNext() ){
Integer key = (Integer)it.next();
announceCacheEntry entry = (announceCacheEntry)announce_cache.get( key );
if ( now - entry.getTime() > cache_millis ){
it.remove();
}
}
// look for an entry with a reasonable num_want
// e.g. for 100 look between 50 and 100
for (int i=num_want/10;i>num_want/20;i--){
announceCacheEntry entry = (announceCacheEntry)announce_cache.get(new Integer(i));
if( entry != null ){
if ( now - entry.getTime() > cache_millis ){
announce_cache.remove( new Integer(i));
}else{
// make sure this is compatible
if ( entry.getSendPeerIds() == send_peer_ids &&
entry.getCompactMode() == compact_mode ){
return( entry.getData());
}
}
}
}
add_to_cache = true;
}
LinkedList rep_peers = new LinkedList();
// System.out.println( "exportPeersToMap: num_want = " + num_want + ", max = " + max_peers );
// if they want them all simply give them the set
if ( num_want > 0 ){
if ( num_want >= total_peers){
// if they want them all simply give them the set
for (int i=0;i<peer_list.size();i++){
TRTrackerServerPeerImpl peer = (TRTrackerServerPeerImpl)peer_list.get(i);
if ( peer == null || peer == requesting_peer ){
}else if ( now > peer.getTimeout()){
// System.out.println( "removing timed out client '" + peer.getString());
removePeer( peer, i, TRTrackerServerTorrentPeerListener.ET_TIMEOUT, null );
}else if ( peer.getTCPPort() == 0 ){
// a port of 0 means that the peer definitely can't accept incoming connections
}else if ( crypto_level == TRTrackerServerPeer.CRYPTO_NONE && peer.getCryptoLevel() == TRTrackerServerPeer.CRYPTO_REQUIRED ){
// don't return "crypto required" peers to those that can't correctly connect to them
}else if ( include_seeds || !peer.isSeed()){
if ( peer.isBiased()){
// only interested in negative bias here to remove from result
int bias = peer.getBias();
if ( bias < 0 ){
bias = -bias;
// -100 -> always drop, -1 -> drop 1% of the time
int rand = random.nextInt( 100 );
if ( rand < bias ){
continue;
}
}
}
Map rep_peer = new HashMap(3);
if ( send_peer_ids ){
rep_peer.put( "peer id", peer.getPeerId().getHash());
}
if ( compact_mode != COMPACT_MODE_NONE ){
byte[] peer_bytes = peer.getIPBytes();
if ( peer_bytes == null ){
continue;
}
rep_peer.put( "ip", peer_bytes );
if ( compact_mode >= COMPACT_MODE_AZ ){
rep_peer.put( "azver", new Long( peer.getAZVer()));
rep_peer.put( "azudp", new Long( peer.getUDPPort()));
if ( peer.isSeed()){
rep_peer.put( "azhttp", new Long( peer.getHTTPPort()));
}
rep_peer.put( "azup", new Long( peer.getUpSpeed()));
if ( peer.isBiased()){
rep_peer.put( "azbiased", "" );
}
if ( network_position != null ){
DHTNetworkPosition peer_pos = peer.getNetworkPosition();
if ( peer_pos != null && network_position.getPositionType() == peer_pos.getPositionType()){
rep_peer.put( "azrtt", new Long( (long)peer_pos.estimateRTT(network_position )));
}
}
}
}else{
rep_peer.put( "ip", peer.getIPAsRead() );
}
rep_peer.put( "port", new Long( peer.getTCPPort()));
if ( crypto_level != TRTrackerServerPeer.CRYPTO_NONE ){
rep_peer.put( "crypto_flag", new Long( peer.getCryptoLevel() == TRTrackerServerPeer.CRYPTO_REQUIRED?1:0));
}
if ( peer.isBiased()){
rep_peers.addFirst( rep_peer );
}else{
rep_peers.addLast( rep_peer );
}
}
}
}else{
int peer_list_size = peer_list.size();
// to avoid returning duplicates when doing the two-loop check
// for nat selection we maintain an array of markers
if ( duplicate_peer_checker.length < peer_list_size ){
duplicate_peer_checker = new byte[peer_list_size*2];
duplicate_peer_checker_index = 1;
}else if ( duplicate_peer_checker.length > (peer_list_size*2)){
duplicate_peer_checker = new byte[(3*peer_list_size)/2];
duplicate_peer_checker_index = 1;
}else{
duplicate_peer_checker_index++;
if ( duplicate_peer_checker_index == 0 ){
Arrays.fill( duplicate_peer_checker, (byte)0);
duplicate_peer_checker_index = 1;
}
}
boolean peer_removed = false;
try{
// got to suspend peer list compaction as we rely on the
// list staying the same size during processing below
peer_list_compaction_suspended = true;
// too costly to randomise as below. use more efficient but slightly less accurate
// approach
// two pass process if bad nat detection enabled
int added = 0;
//int bad_nat_added = 0;
for (int bad_nat_loop=TRTrackerServerNATChecker.getSingleton().isEnabled()?0:1;bad_nat_loop<2;bad_nat_loop++){
int limit = num_want*2; // some entries we find might not be usable
// so in the limit search for more
if ( num_want*3 > total_peers ){
limit++;
}
if ( biased_peers != null && biased_peers.size() > 1 ){
// juggle things a bit
Object x = biased_peers.remove(0);
biased_peers.add( random.nextInt( biased_peers.size()), x);
}
for (int i=0;i<limit && added < num_want;i++){
int peer_index;
TRTrackerServerPeerImpl peer;
// deal with bias up front
if ( bad_nat_loop == 1 && biased_peers != null && i < biased_peers.size()){
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -