⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pepeercontrolimpl.java

📁 这是一个基于java编写的torrent的P2P源码
💻 JAVA
📖 第 1 页 / 共 5 页
字号:
  	}
  	
	public int[] getAvailability() 
	{
		return piecePicker.getAvailability();
	}
	
	//this only gets called when the My Torrents view is displayed
	public float getMinAvailability()
	{
		return piecePicker.getMinAvailability();
	}

	public float getAvgAvail()
	{
		return piecePicker.getAvgAvail();
	}

	public void addPeerTransport( PEPeerTransport transport ) {
    if (!ip_filter.isInRange(transport.getIp(), adapter.getDisplayName())) {
		final ArrayList peer_transports = peer_transports_cow;
			
			if (!peer_transports.contains( transport )) {
				addToPeerTransports(transport);    
			}
			else{
				Debug.out( "addPeerTransport():: peer_transports.contains(transport): SHOULD NEVER HAPPEN !" );        
				transport.closeConnection( "already connected" );
			}
		}
		else {
			transport.closeConnection( "IP address blocked by filters" );
		}
	}
	
	
	/**
	 * Do all peer choke/unchoke processing.
	 */
	private void doUnchokes() {  
			
			// logic below is either 1 second or 10 secondly, bail out early id neither
		
		if( !UploadSlotManager.AUTO_SLOT_ENABLE ) {		 //manual per-torrent unchoke slot mode
		
			if( mainloop_loop_count % MAINLOOP_ONE_SECOND_INTERVAL != 0 ) { 
				return;
			}

			final int max_to_unchoke = adapter.getMaxUploads();  //how many simultaneous uploads we should consider
			final ArrayList peer_transports = peer_transports_cow;
		
			//determine proper unchoker
			if( seeding_mode ) {
				if( unchoker == null || !(unchoker instanceof SeedingUnchoker) ) {
					unchoker = new SeedingUnchoker();
				}
			}
			else {
				if( unchoker == null || !(unchoker instanceof DownloadingUnchoker) ) {
					unchoker = new DownloadingUnchoker();
				}
			}
			
			//do main choke/unchoke update every 10 secs
			if( mainloop_loop_count % MAINLOOP_TEN_SECOND_INTERVAL == 0 ) {
			
				final boolean refresh = mainloop_loop_count % MAINLOOP_THIRTY_SECOND_INTERVAL == 0;
			
				unchoker.calculateUnchokes( max_to_unchoke, peer_transports, refresh );
			
				UnchokerUtil.performChokes( unchoker.getChokes(), unchoker.getUnchokes() );
			}
			else if( mainloop_loop_count % MAINLOOP_ONE_SECOND_INTERVAL == 0 ) {  //do quick unchoke check every 1 sec
			
				final ArrayList peers_to_unchoke = unchoker.getImmediateUnchokes( max_to_unchoke, peer_transports );
			
				//ensure that lan-local peers always get unchoked   //TODO
				for( Iterator it=peer_transports.iterator();it.hasNext();) {
					final PEPeerTransport peer = (PEPeerTransport)it.next();				
					if( peer.isLANLocal() ) {
						peers_to_unchoke.add( peer );
					}else if ( fast_unchoke_new_peers ){
						
						if ( peer.getConnectionState() == PEPeerTransport.CONNECTION_FULLY_ESTABLISHED ){						
							if ( peer.getData( "fast_unchoke_done" ) == null ){
			
								peers_to_unchoke.add( peer );
																
								peer.setData( "fast_unchoke_done", "" );
							}
						}
					}
				}
						
				UnchokerUtil.performChokes( null, peers_to_unchoke );
			}
		
		}
		
	}
	
	
//	send the have requests out
	private void sendHave(int pieceNumber) {
		//fo
		final ArrayList peer_transports = peer_transports_cow;
		
		for (int i = 0; i < peer_transports.size(); i++) {
			//get a peer connection
			final PEPeerTransport pc = (PEPeerTransport) peer_transports.get(i);
			//send the have message
			pc.sendHave(pieceNumber);
		}
		
	}
	
  //Method that checks if we are connected to another seed, and if so, disconnect from him.
  private void checkSeeds() {
    //proceed on mainloop 1 second intervals if we're a seed and we want to force disconnects
    if ((mainloop_loop_count % MAINLOOP_ONE_SECOND_INTERVAL) != 0)
        return;
    
  	if (!disconnect_seeds_when_seeding ){
        return;
  	}
  	
    ArrayList to_close = null;
    
	final ArrayList peer_transports = peer_transports_cow;
    for (int i = 0; i < peer_transports.size(); i++) {
      final PEPeerTransport pc = (PEPeerTransport) peer_transports.get(i);
      
      if (pc != null && pc.getPeerState() == PEPeer.TRANSFERING && pc.isSeed()) {      	
      	if( to_close == null )  to_close = new ArrayList();
				to_close.add( pc );
      }
    }
		
		if( to_close != null ) {		
			for( int i=0; i < to_close.size(); i++ ) {  			
				closeAndRemovePeer( (PEPeerTransport)to_close.get(i), "disconnect other seed when seeding", false );
			}
		}
  }

  
  
  
	private void updateStats() {   
		
	   if ( (mainloop_loop_count %MAINLOOP_ONE_SECOND_INTERVAL) != 0 ){
		   return;
	   }
	   
		//calculate seeds vs peers
		final ArrayList peer_transports = peer_transports_cow;
		
		int	new_seeds = 0;
		int new_peers = 0;
		int new_remotes = 0;
		
		for (Iterator it=peer_transports.iterator();it.hasNext();){
			final PEPeerTransport pc = (PEPeerTransport) it.next();
			if (pc.getPeerState() == PEPeer.TRANSFERING) {
				if (pc.isSeed())
					new_seeds++;
				else
					new_peers++;
				
				if(((PEPeer)pc).isIncoming()) {
					new_remotes++;
				}
			}
		}
		
		_seeds = new_seeds;
		_peers = new_peers;
		_remotes = new_remotes;
	}

	/**
	 * The way to unmark a request as being downloaded, or also 
	 * called by Peer connections objects when connection is closed or choked
	 * @param request a DiskManagerReadRequest holding details of what was canceled
	 */
	public void requestCanceled(DiskManagerReadRequest request)
	{
        final int pieceNumber =request.getPieceNumber();  //get the piece number
        PEPiece pe_piece = pePieces[pieceNumber];
        if (pe_piece != null )
        	pe_piece.clearRequested(request.getOffset() /DiskManager.BLOCK_SIZE);
	}
	
	
	public PEPeerControl
	getControl()
	{
		return( this );
	}
	
	public byte[][]
  	getSecrets(
  		int	crypto_level )
	{
		return( adapter.getSecrets( crypto_level ));
	}
	
//	get the hash value
	public byte[] getHash() {
		return _hash.getDataID();
	}
	
	public PeerIdentityDataID
	getPeerIdentityDataID()
	{
		return( _hash );
	}
	
//	get the peer id value
	public byte[] getPeerId() {
		return _myPeerId;
	}
	
//	get the remaining percentage
	public long getRemaining() {
		return disk_mgr.getRemaining();
	}
	
	
	public void discarded(PEPeer peer, int length) {
		if (length > 0){
			_stats.discarded(peer, length);
		}
	}
	
	public void dataBytesReceived(PEPeer peer, int length) {
		if (length > 0) {
			_stats.dataBytesReceived(peer,length);
			
			_averageReceptionSpeed.addValue(length);
		}
	}
	
	
	public void protocolBytesReceived(PEPeer peer,  int length ) {
		if (length > 0) {
			_stats.protocolBytesReceived(peer,length);
		}
	}
	
	public void dataBytesSent(PEPeer peer, int length) {
		if (length > 0) {
			_stats.dataBytesSent(peer, length );
		}
	}
	
	
	public void protocolBytesSent( PEPeer peer, int length ) {
		if (length > 0) {
			_stats.protocolBytesSent(peer,length);
		}
	}
	
	/** DiskManagerWriteRequestListener message
     * @see org.gudy.azureus2.core3.disk.DiskManagerWriteRequestListener
	 */
	public void writeCompleted(DiskManagerWriteRequest request)
	{
		final int pieceNumber =request.getPieceNumber();
		
		DiskManagerPiece	dm_piece = dm_pieces[pieceNumber];
		
        if (!dm_piece.isDone()){
        
            final PEPiece pePiece =pePieces[pieceNumber];
            
            if ( pePiece != null ){
            	
                pePiece.setWritten((PEPeer)request.getUserData(), request.getOffset() /DiskManager.BLOCK_SIZE );
                
            }else{
            	
            		// this is a way of fixing a 99.9% bug where a dmpiece is left in a 
            		// fully downloaded state with the underlying pe_piece null. Possible explanation is
            		// that a slow peer sends an entire piece at around the time a pe_piece gets reset
            		// due to inactivity.
            	            
            		// we also get here when recovering data that has come in late after the piece has
            		// been abandoned
            	
            	dm_piece.setWritten( request.getOffset() /DiskManager.BLOCK_SIZE );
            }
        }
	}

  public void 
  writeFailed( 
	DiskManagerWriteRequest 	request, 
	Throwable		 			cause )
  {
	  	// if the write has failed then the download will have been stopped so there is no need to try
	  	// and reset the piece
  }
  
    /** This method will queue up a dism manager write request for the block if the block is not already written.
     * It will send out cancels for the block to all peer either if in end-game mode, or per cancel param 
     * @param pieceNumber to potentialy write to
     * @param offset within piece to queue write for
     * @param data to be writen
     * @param sender peer that sent this data
     * @param cancel if cancels definatly need to be sent to all peers for this request
     */
    public void writeBlock(int pieceNumber, int offset, DirectByteBuffer data, PEPeer sender, boolean cancel)
    {
        final int blockNumber =offset /DiskManager.BLOCK_SIZE;
        final DiskManagerPiece dmPiece =dm_pieces[pieceNumber];
        if (dmPiece.isWritten(blockNumber))
        {
            data.returnToPool();
            return;
        }
        
        PEPiece	pe_piece = pePieces[ pieceNumber ];
        
        if ( pe_piece != null ){
        	
        	pe_piece.setDownloaded( offset );
        }
        
        final DiskManagerWriteRequest request =disk_mgr.createWriteRequest(pieceNumber, offset, data, sender);
        disk_mgr.enqueueWriteRequest(request, this);
        // In case we are in endGame mode, remove the block from the chunk list
        if (piecePicker.isInEndGameMode())
            piecePicker.removeFromEndGameModeChunks(pieceNumber, offset);
        if (cancel ||piecePicker.isInEndGameMode())
        {   // cancel any matching outstanding download requests
            // For all connections cancel the request
    		final ArrayList peer_transports = peer_transports_cow;
            for (int i =0; i <peer_transports.size(); i++)
            {
                final PEPeerTransport connection =(PEPeerTransport) peer_transports.get(i);
                final DiskManagerReadRequest dmr =disk_mgr.createReadRequest(pieceNumber, offset, dmPiece.getBlockSize(blockNumber));
                connection.sendCancel(dmr);
            }
        }
    }
	
	
//	/**
//	 * This method is only called when a block is received after the initial request expired,
//	 * but the data has not yet been fulfilled by any other peer, so we use the block data anyway
//	 * instead of throwing it away, and cancel any outstanding requests for that block that might have
//	 * been sent after initial expiry.
//	 */
//	public void writeBlockAndCancelOutstanding(int pieceNumber, int offset, DirectByteBuffer data,PEPeer sender) {
//        final int blockNumber =offset /DiskManager.BLOCK_SIZE;
//        final DiskManagerPiece dmPiece =dm_pieces[pieceNumber];
//        if (dmPiece.isWritten(blockNumber))
//        {
//            data.returnToPool();
//            return;
//        }
//		DiskManagerWriteRequest request =disk_mgr.createWriteRequest(pieceNumber, offset, data, sender);
//		disk_mgr.enqueueWriteRequest(request, this);
//
//		// cancel any matching outstanding download requests
//		List peer_transports =peer_transports_cow;
//		for (int i =0; i <peer_transports.size(); i++)
//		{
//			PEPeerTransport connection =(PEPeerTransport) peer_transports.get(i);
//			DiskManagerReadRequest dmr =disk_mgr.createReadRequest(pieceNumber, offset, dmPiece.getBlockSize(blockNumber));
//			connection.sendCancel(dmr);
//		}
//	}
	
	
	public boolean isWritten(int piece_number, int offset)
	{
		return dm_pieces[piece_number].isWritten(offset /DiskManager.BLOCK_SIZE);
	}

	public boolean 
	validateReadRequest(
		int pieceNumber, 
		int offset, 
		int length) 
	{
		if ( disk_mgr.checkBlockConsistency(pieceNumber, offset, length)){
			
			if ( enable_seeding_piece_rechecks && isSeeding()){
				
				DiskManagerPiece	dm_piece = dm_pieces[pieceNumber];
				
				int	read_count = dm_piece.getReadCount()&0xffff;
				
				if ( read_count < SEED_CHECK_WAIT_MARKER - 1 ){
					
					read_count++;
										
					dm_piece.setReadCount((short)read_count );
				}
			}
			
			return( true );
		}else{
			
			return( false );
		}
	}

	public boolean validatePieceReply(int pieceNumber, int offset, DirectByteBuffer data) {
		return disk_mgr.checkBlockConsistency(pieceNumber, offset, data);
	}
	
	public int getAvailability(int pieceNumber)
	{
		return piecePicker.getAvailability(pieceNumber); 
	}
	
	public void havePiece(int pieceNumber, int pieceLength, PEPeer pcOrigin) {
		piecePicker.addHavePiece(pcOrigin, pieceNumber);
		_stats.haveNewPiece(pieceLength);
		
		if(superSeedMode) {
			superSeedPieces[pieceNumber].peerHasPiece(pcOrigin);
			if(pieceNumber == pcOrigin.getUniqueAnnounce()) {
				pcOrigin.setUniqueAnnounce(-1);
				superSeedModeNumberOfAnnounces--;
			}      
		}
		int availability =piecePicker.getAvailability(pieceNumber) -1;
		if (availability < 4) {
			if (dm_pieces[pieceNumber].isDone())
				availability--;
			if (availability <= 0)
				return;
            //for all peers

			final ArrayList peer_transports = peer_transports_cow;

			for (int i = peer_transports.size() - 1; i >= 0; i--) {
				final PEPeerTransport pc = (PEPeerTransport) peer_transports.get(i);
				if (pc !=pcOrigin &&pc.getPeerState() ==PEPeer.TRANSFERING &&pc.isPieceAvailable(pieceNumber))
					((PEPeerStatsImpl)pc.getStats()).statisticalSentPiece(pieceLength / availability);
			}
		}
	}
	

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -