📄 pepeercontrolimpl.java
字号:
pc.setSnubbed(false);
}
// Disconnect seeds
checkSeeds(true);
boolean checkPieces =COConfigurationManager.getBooleanParameter("Check Pieces on Completion", true);
// re-check all pieces to make sure they are not corrupt, but only if we weren't already complete
if (checkPieces &&!start_of_day)
{
DiskManagerCheckRequest req =disk_mgr.createCheckRequest(-1, new Integer(CHECK_REASON_COMPLETE));
disk_mgr.enqueueCompleteRecheckRequest(req, this);
}
disk_mgr.downloadEnded();
_timeStartedSeeding =SystemTime.getCurrentTime();
adapter.setStateSeeding(start_of_day);
}
}
/**
* This method will locate expired requests on peers, will cancel them,
* and mark the peer as snubbed if we haven't received usefull data from
* them within the last 60 seconds
*/
private void checkRequests()
{
final long now =SystemTime.getCurrentTime();
//for every connection
final List peer_transports = peer_transports_cow;
for (int i =peer_transports.size() -1; i >=0 ; i--)
{
final PEPeerTransport pc =(PEPeerTransport)peer_transports.get(i);
if (pc.getPeerState() ==PEPeer.TRANSFERING)
{
final boolean isSeed =pc.isSeed();
final List expired = pc.getExpiredRequests();
if (expired !=null &&expired.size() >0)
{
// snub peers that haven't sent any good data for a minute
final long goodTime =pc.getTimeSinceGoodDataReceived();
if (goodTime ==-1 ||goodTime >60 *1000)
{
pc.setSnubbed(true);
}
final long dataTime =pc.getTimeSinceLastDataMessageReceived();
final boolean noData =(dataTime ==-1) ||((now -dataTime) >1000 *(isSeed ?120 :60));
for (int j =0; j <expired.size(); j++)
{
//for every expired request
//get the request object
final DiskManagerReadRequest request =(DiskManagerReadRequest) expired.get(j);
//Only cancel first request if more than 2 mins have passed
if (j >0 ||(noData &&(now -request.getTimeCreated() >120 *1000)))
{
pc.sendCancel(request); //cancel the request object
//get the piece number
final int pieceNumber = request.getPieceNumber();
final PEPiece pePiece =pePieces[pieceNumber];
//unmark the request on the block
if (pePiece !=null)
pePiece.clearRequested(request.getOffset() /DiskManager.BLOCK_SIZE);
//set piece to not fully requested
dm_pieces[pieceNumber].clearRequested();
if (!piecePicker.isInEndGameMode())
checkEmptyPiece(pieceNumber);
}
}
// if they never respond to our requests, must disconnect them - after 240 secons
if (noData &&(goodTime ==-1 ||goodTime >240 *1000)
&&piecePicker.getMinAvailability() >(isSeed ?2 :1))
closeAndRemovePeer(pc, "Peer not responsive to piece requests.", true);
}
}
}
}
private void
updateTrackerAnnounceInterval()
{
if ( mainloop_loop_count % MAINLOOP_FIVE_SECOND_INTERVAL != 0 ){
return;
}
final int WANT_LIMIT = 100;
int num_wanted = getMaxNewConnectionsAllowed();
boolean has_remote = adapter.isNATHealthy();
if( has_remote ) {
//is not firewalled, so can accept incoming connections,
//which means no need to continually keep asking the tracker for peers
num_wanted = (int)(num_wanted / 1.5);
}
if ( num_wanted < 0 || num_wanted > WANT_LIMIT ) {
num_wanted = WANT_LIMIT;
}
int current_connection_count = PeerIdentityManager.getIdentityCount( _hash );
TRTrackerScraperResponse tsr = adapter.getTrackerScrapeResponse();
if( tsr != null && tsr.isValid() ) { //we've got valid scrape info
int num_seeds = tsr.getSeeds();
int num_peers = tsr.getPeers();
int swarm_size;
if( seeding_mode ) {
//Only use peer count when seeding, as other seeds are unconnectable.
//Since trackers return peers randomly (some of which will be seeds),
//backoff by the seed2peer ratio since we're given only that many peers
//on average each announce.
float ratio = (float)num_peers / (num_seeds + num_peers);
swarm_size = (int)(num_peers * ratio);
}
else {
swarm_size = num_peers + num_seeds;
}
if( swarm_size < num_wanted ) { //lower limit to swarm size if necessary
num_wanted = swarm_size;
}
}
if( num_wanted < 1 ) { //we dont need any more connections
adapter.setTrackerRefreshDelayOverrides( 100 ); //use normal announce interval
return;
}
if( current_connection_count == 0 ) current_connection_count = 1; //fudge it :)
int current_percent = (current_connection_count * 100) / (current_connection_count + num_wanted);
adapter.setTrackerRefreshDelayOverrides( current_percent ); //set dynamic interval override
}
public boolean
hasDownloadablePiece()
{
return( piecePicker.hasDownloadablePiece());
}
public int[] getAvailability()
{
return piecePicker.getAvailability();
}
//this only gets called when the My Torrents view is displayed
public float getMinAvailability()
{
return piecePicker.getMinAvailability();
}
public float getAvgAvail()
{
return piecePicker.getAvgAvail();
}
public void addPeerTransport( PEPeerTransport transport ) {
if (!ip_filter.isInRange(transport.getIp(), adapter.getDisplayName())) {
ArrayList peer_transports = peer_transports_cow;
if (!peer_transports.contains( transport )) {
addToPeerTransports(transport);
}
else{
Debug.out( "addPeerTransport():: peer_transports.contains(transport): SHOULD NEVER HAPPEN !" );
transport.closeConnection( "already connected" );
}
}
else {
transport.closeConnection( "IP address blocked by filters" );
}
}
/**
* Do all peer choke/unchoke processing.
*/
private void doUnchokes() {
int max_to_unchoke = adapter.getMaxUploads(); //how many simultaneous uploads we should consider
ArrayList peer_transports = peer_transports_cow;
//determine proper unchoker
if( seeding_mode ) {
if( unchoker == null || !(unchoker instanceof SeedingUnchoker) ) {
unchoker = new SeedingUnchoker();
}
}
else {
if( unchoker == null || !(unchoker instanceof DownloadingUnchoker) ) {
unchoker = new DownloadingUnchoker();
}
}
//do main choke/unchoke update every 10 secs
if( mainloop_loop_count % MAINLOOP_TEN_SECOND_INTERVAL == 0 ) {
boolean refresh = mainloop_loop_count % MAINLOOP_THIRTY_SECOND_INTERVAL == 0;
unchoker.calculateUnchokes( max_to_unchoke, peer_transports, refresh );
ArrayList peers_to_choke = unchoker.getChokes();
ArrayList peers_to_unchoke = unchoker.getUnchokes();
//do chokes
for( int i=0; i < peers_to_choke.size(); i++ ) {
PEPeerTransport peer = (PEPeerTransport)peers_to_choke.get( i );
if( !peer.isChokedByMe() ) {
peer.sendChoke();
}
}
//do unchokes
for( int i=0; i < peers_to_unchoke.size(); i++ ) {
PEPeerTransport peer = (PEPeerTransport)peers_to_unchoke.get( i );
if( peer.isChokedByMe() ) {
peer.sendUnChoke();
}
}
}
else if( mainloop_loop_count % MAINLOOP_ONE_SECOND_INTERVAL == 0 ) { //do quick unchoke check every 1 sec
ArrayList peers_to_unchoke = unchoker.getImmediateUnchokes( max_to_unchoke, peer_transports );
//ensure that lan-local peers always get unchoked
for( int i=0; i < peer_transports.size(); i++ ) {
PEPeerTransport peer = (PEPeerTransport)peer_transports.get( i );
if( peer.isLANLocal() ) {
peers_to_unchoke.add( peer );
}
}
//do unchokes
for( int i=0; i < peers_to_unchoke.size(); i++ ) {
PEPeerTransport peer = (PEPeerTransport)peers_to_unchoke.get( i );
if( peer.isChokedByMe() ) {
peer.sendUnChoke();
}
}
}
}
// send the have requests out
private void sendHave(int pieceNumber) {
//fo
List peer_transports = peer_transports_cow;
for (int i = 0; i < peer_transports.size(); i++) {
//get a peer connection
PEPeerTransport pc = (PEPeerTransport) peer_transports.get(i);
//send the have message
pc.sendHave(pieceNumber);
}
}
//Method that checks if we are connected to another seed, and if so, disconnect from him.
private void checkSeeds(boolean forceDisconnect) {
//proceed on mainloop 1 second intervals if we're a seed and we want to force disconnects
if ((mainloop_loop_count % MAINLOOP_ONE_SECOND_INTERVAL) != 0)
return;
if (!disconnect_seeds_when_seeding ||(!forceDisconnect &&!seeding_mode))
return;
ArrayList to_close = null;
List peer_transports = peer_transports_cow;
for (int i = 0; i < peer_transports.size(); i++) {
PEPeerTransport pc = (PEPeerTransport) peer_transports.get(i);
if (pc != null && pc.getPeerState() == PEPeer.TRANSFERING && pc.isSeed()) {
if( to_close == null ) to_close = new ArrayList();
to_close.add( pc );
}
}
if( to_close != null ) {
for( int i=0; i < to_close.size(); i++ ) {
closeAndRemovePeer( (PEPeerTransport)to_close.get(i), "disconnect other seed when seeding", false );
}
}
}
private void updateStats() {
//calculate seeds vs peers
List peer_transports = peer_transports_cow;
_seeds = _peers = _remotes = 0;
for (int i = 0; i < peer_transports.size(); i++) {
PEPeerTransport pc = (PEPeerTransport) peer_transports.get(i);
if (pc.getPeerState() == PEPeer.TRANSFERING) {
if (pc.isSeed())
_seeds++;
else
_peers++;
if(((PEPeer)pc).isIncoming()) {
_remotes++;
}
}
}
}
/**
* The way to unmark a request as being downloaded, or also
* called by Peer connections objects when connection is closed or choked
* @param request
*/
public void requestCanceled(DiskManagerReadRequest request)
{
int pieceNumber =request.getPieceNumber(); //get the piece number
int pieceOffset =request.getOffset(); //get the piece offset
if (pePieces[pieceNumber] !=null)
pePieces[pieceNumber].clearRequested(pieceOffset /DiskManager.BLOCK_SIZE);
//set as not fully Requested
dm_pieces[pieceNumber].clearRequested();
}
public PEPeerControl
getControl()
{
return( this );
}
public byte[]
getTorrentHash()
{
try{
return( disk_mgr.getTorrent().getHash());
}catch( Throwable e ){
Debug.printStackTrace(e);
return( null );
}
}
// get the hash value
public byte[] getHash() {
return _hash.getDataID();
}
public PeerIdentityDataID
getPeerIdentityDataID()
{
return( _hash );
}
// get the peer id value
public byte[] getPeerId() {
return _myPeerId;
}
// get the remaining percentage
public long getRemaining() {
return disk_mgr.getRemaining();
}
public void discarded(int length) {
if (length > 0){
_stats.discarded(length);
}
}
public void dataBytesReceived(int length) {
if (length > 0) {
_stats.dataBytesReceived(length);
_averageReceptionSpeed.addValue(length);
}
}
public void protocolBytesReceived( int length ) {
if (length > 0) {
_stats.protocolBytesReceived(length);
}
}
public void dataBytesSent(int length) {
if (length > 0) {
_stats.dataBytesSent(length);
}
}
public void protocolBytesSent( int length ) {
if (length > 0) {
_stats.protocolBytesSent(length);
}
}
/** DiskManagerWriteRequestListener message
* @see org.gudy.azureus2.core3.disk.DiskManagerWriteRequestListener
*/
public void writeCompleted(DiskManagerWriteRequest request)
{
int pieceNumber =request.getPieceNumber();
if (!dm_pieces[pieceNumber].isDone())
{
final PEPiece pePiece =pePieces[pieceNumber];
if (pePiece !=null)
pePiece.setWritten((PEPeer)request.getUserData(), request.getOffset() /DiskManager.BLOCK_SIZE );
}
}
public void
writeFailed(
DiskManagerWriteRequest request,
Throwable cause )
{
}
/** This method will queue up a dism manager write request for the block if the block is not already written.
* It will send out cancels for the block to all peer either if in end-game mode, or per cancel param
* @param pieceNumber to potentialy write to
* @param offset within piece to queue write for
* @param data to be writen
* @param sender peer that sent this data
* @param cancel if cancels definatly need to be sent to all peers for this request
*/
public void writeBlock(int pieceNumber, int offset, DirectByteBuffer data, PEPeer sender, boolean cancel)
{
final int blockNumber =offset /DiskManager.BLOCK_SIZE;
final DiskManagerPiece dmPiece =dm_pieces[pieceNumber];
if (dmPiece.isWritten(blockNumber))
{
data.returnToPool();
return;
}
DiskManagerWriteRequest request =disk_mgr.createWriteRequest(pieceNumber, offset, data, sender);
disk_mgr.enqueueWriteRequest(request, this);
if (piecePicker.isInEndGameMode())
{
// In case we are in endGame mode, remove the block from the chunk list
piecePicker.removeFromEndGameModeChunks(pieceNumber, offset);
cancel =true;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -