⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 btcontent.cpp

📁 cTorrent advanced 3.3.2。是对 CTorrent 的一个改进版本。这是目前的最新版。
💻 CPP
📖 第 1 页 / 共 4 页
字号:
     if( !pBMasterFilter ) ERR_RETURN();#endif    if( arg_file_to_download ) SetFilter();  }  m_left_bytes = m_btfiles.GetTotalLength() / m_piece_length;  if( m_btfiles.GetTotalLength() % m_piece_length ) m_left_bytes++;  if( m_left_bytes != m_npieces ) ERR_RETURN();    m_left_bytes = m_btfiles.GetTotalLength();  if( arg_flg_check_only ){    struct stat sb;    if( stat(arg_bitfield_file, &sb) == 0 ){      if( remove(arg_bitfield_file) < 0 ){        CONSOLE.Warning(2, "warn, couldn't delete bit field file \"%s\":  %s",          arg_bitfield_file, strerror(errno));      }    }    if( r ){      if( CheckExist() < 0 ) ERR_RETURN();      if( !pBF->IsEmpty() )        m_btfiles.PrintOut(); // show file completion    }    CONSOLE.Print("Already/Total: %d/%d (%d%%)", (int)(pBF->Count()),      (int)m_npieces, (int)(100 * pBF->Count() / m_npieces));    if( !arg_flg_force_seed_mode ){      SaveBitfield();      if( arg_completion_exit ) CompletionCommand();      exit(0);    }  }else if( r ){  // files exist already    if( pBRefer->SetReferFile(arg_bitfield_file) < 0 ){      if( !arg_flg_force_seed_mode ){        CONSOLE.Warning(2,          "warn, couldn't set bit field refer file \"%s\":  %s",          arg_bitfield_file, strerror(errno));        CONSOLE.Warning(2, "This is normal if you are seeding.");      }      pBRefer->SetAll();  // need to check all pieces    }else{      CONSOLE.Interact("Found bit field file; %s previous state.",        arg_flg_force_seed_mode ? "resuming download from" : "verifying");      if( unlink(arg_bitfield_file) < 0 ){        CONSOLE.Warning(2, "warn, couldn't delete bit field file \"%s\":  %s",          arg_bitfield_file, strerror(errno));      }      // Mark missing pieces as "checked" (eligible for download).      *pBChecked = *pBRefer;      pBChecked->Invert();    }  }  if( !r ){  // don't hash-check if the files were just created    m_check_piece = m_npieces;    pBChecked->SetAll();    if( arg_flg_force_seed_mode ){      CONSOLE.Warning(2, "Files were not present; overriding force mode!");    }  }else if( arg_flg_force_seed_mode && !arg_flg_check_only ){    size_t idx = 0;    *pBF = *pBRefer;    if( pBF->IsFull() ){      CONSOLE.Interact("Skipping hash checks and forcing seed mode.");      CONSOLE.Interact(       "-----> STOP NOW if you have not downloaded the whole torrent! <-----");      m_left_bytes = 0;    }else for( ; idx < m_npieces; idx++ ){      if( pBF->IsSet(idx) )        m_left_bytes -= GetPieceLength(idx);    }    m_check_piece = m_npieces;    pBChecked->SetAll();  }  delete pBRefer;  m_cache = new BTCACHE *[m_npieces];  if( !m_cache ){    CONSOLE.Warning(1, "error, allocate cache index failed");    ERR_RETURN();  }  memset(m_cache, 0, m_npieces * sizeof(BTCACHE*));  CacheConfigure();  *ptr = (unsigned char) 19; ptr++; // protocol string length  memcpy(ptr,"BitTorrent protocol",19); ptr += 19; //  protocol string  memset(ptr,0,8);		// reserved set zero.  {				// peer id        char *sptr = arg_user_agent;        char *dptr = (char *)m_shake_buffer + 48;        char *eptr = dptr + PEER_ID_LEN;        while (*sptr) *dptr++ = *sptr++;        while (dptr < eptr) *dptr++ = (unsigned char)random();  }  if( arg_announce ){    int n;    delete []m_announce;    if( (n = atoi(arg_announce)) && n <= 9 && m_announcelist[n-1] )      m_announce = m_announcelist[n-1];    else m_announce = arg_announce;    CONSOLE.Print("Using announce URL:  %s", m_announce);  }  return 0;}btContent::~btContent(){  if(m_hash_table) delete []m_hash_table;  if(m_announce) delete []m_announce;  if(global_piece_buffer) delete []global_piece_buffer;  if(pBF) delete pBF;}void btContent::_Set_InfoHash(unsigned char buf[20]) {   memcpy(m_shake_buffer + 28, buf, 20);}// returns <0 if error; if using cache: 1 if read from disk, 0 otherwisessize_t btContent::ReadSlice(char *buf,size_t idx,size_t off,size_t len){  ssize_t retval = 0;  uint64_t offset = (uint64_t)idx * (uint64_t)m_piece_length + off;  if( !m_cache_size ) return buf ? m_btfiles.IO(buf, offset, len, 0) : 0;  else{    size_t len2;    BTCACHE *p;    p = m_cache[idx];    while( len && p ){      while( p && offset + len > p->bc_off && !CACHE_FIT(p, offset, len) ){        p = p->bc_next;      }      if( !p || !CACHE_FIT(p, offset, len) ) break;      if( offset < p->bc_off ){        len2 = p->bc_off - offset;        if( CacheIO(buf, offset, len2, 0) < 0 ) return -1;        retval = 1;        if(buf) m_cache_miss += len2 / DEFAULT_SLICE_SIZE +                                ((len2 % DEFAULT_SLICE_SIZE) ? 1 : 0);        else m_cache_pre += len2 / DEFAULT_SLICE_SIZE +                            ((len2 % DEFAULT_SLICE_SIZE) ? 1 : 0);        p = m_cache[idx];  // p may not be valid after CacheIO      }else{        char *src;        if( offset > p->bc_off ){          len2 = p->bc_off + p->bc_len - offset;          if( len2 > len ) len2 = len;          src = p->bc_buf + offset - p->bc_off;        }else{          len2 = (len > p->bc_len) ? p->bc_len : len;          src = p->bc_buf;        }        if( buf ){          memcpy(buf, src, len2);          m_cache_hit += len2 / DEFAULT_SLICE_SIZE +                         ((len2 % DEFAULT_SLICE_SIZE) ? 1 : 0);        }else{  // prefetch only, update the age          if( m_cache_newest != p ){            if( m_cache_oldest == p ) m_cache_oldest = p->age_next;            else p->age_prev->age_next = p->age_next;            p->age_next->age_prev = p->age_prev;            m_cache_newest->age_next = p;            p->age_next = (BTCACHE *)0;            p->age_prev = m_cache_newest;            m_cache_newest = p;          }        }        p = p->bc_next;      }      if( buf ) buf += len2;      offset += len2;      len -= len2;    }// end for;    if( len ){      if(buf) m_cache_miss += len / DEFAULT_SLICE_SIZE +                              ((len % DEFAULT_SLICE_SIZE) ? 1 : 0);      else m_cache_pre += len / DEFAULT_SLICE_SIZE +                          ((len % DEFAULT_SLICE_SIZE) ? 1 : 0);      retval = CacheIO(buf, offset, len, 0);      return (retval < 0) ? retval : 1;    }  }  return retval;}void btContent::CacheClean(size_t need){  BTCACHE *p, *pnext;  int f_flush = 0;  if( m_flush_failed ) FlushCache();  // try again  again:  for( p=m_cache_oldest; p && m_cache_size < m_cache_used + need; p=pnext ){    pnext = p->age_next;    if( f_flush ){      if(arg_verbose)        CONSOLE.Debug("Flushing %d/%d/%d", (int)(p->bc_off / m_piece_length),          (int)(p->bc_off % m_piece_length), (int)(p->bc_len));      FlushEntry(p);    }    if( !p->bc_f_flush ){      if(arg_verbose)        CONSOLE.Debug("Expiring %d/%d/%d", (int)(p->bc_off / m_piece_length),          (int)(p->bc_off % m_piece_length), (int)(p->bc_len));      if( m_cache_oldest == p ) m_cache_oldest = p->age_next;      else p->age_prev->age_next = p->age_next;      if( m_cache_newest == p ) m_cache_newest = p->age_prev;      else p->age_next->age_prev = p->age_prev;      if( p->bc_prev ) p->bc_prev->bc_next = p->bc_next;      else m_cache[p->bc_off / m_piece_length] = p->bc_next;      if( p->bc_next ) p->bc_next->bc_prev = p->bc_prev;      m_cache_used -= p->bc_len;      delete []p->bc_buf;      delete p;    }  }  if( m_cache_size < m_cache_used + need ){  // still not enough    if( m_cache_size < cfg_cache_size*1024*1024 ){  // can alloc more      m_cache_size = (m_cache_used + need > cfg_cache_size*1024*1024) ?        cfg_cache_size*1024*1024 : (m_cache_used + need);    }    if( m_cache_size < m_cache_used + need && m_cache_used && !f_flush ){      if(arg_verbose) CONSOLE.Debug("CacheClean flushing to obtain space");      f_flush = 1;      goto again;    }  // else we tried...  }}// Don't call this function if cfg_cache_size==0 !void btContent::CacheEval(){  BTCACHE *p = m_cache_oldest;  size_t interval;  size_t unflushed = 0, dlnext, upadd = 0, upmax = 0, upmin = 0, total;  size_t rateup = Self.RateUL();  size_t ratedn = Self.RateDL();  size_t unchoked = WORLD.GetUnchoked();  // Time until next cache size eval: unchoke interval or time to dl a piece.  if( ratedn ){    interval = m_piece_length / ratedn;    if( interval > WORLD.GetUnchokeInterval() )      interval = WORLD.GetUnchokeInterval();    else if( 0==interval ) interval = 1;  }else interval = WORLD.GetUnchokeInterval();  // Download: total unflushed data + data to dl before next eval  // Hold the first piece a bit to let uploading begin.  if( pBF->IsFull() ) dlnext = 0;  else{    if( pBF->Count() < 2 ) unflushed = m_cache_used;    else for( ; p; p = p->age_next )      if( p->bc_f_flush ) unflushed += p->bc_len;    // Make sure we can read back and check a completed piece.    // But free some cache if download has completely stalled.    dlnext = ratedn ? (ratedn * interval + m_piece_length) : 0;  }  // Upload: need enough to hold read/dl'd data until it can be sent  upmin = DEFAULT_SLICE_SIZE * unchoked;  upmax = cfg_cache_size*1024*1024;  if( pBF->IsFull() ){    // Seed mode.  All cache data is prefetched, and we don't normally need to    // keep prefetched data longer than 2.5 unchoke intervals.    if( rateup && unchoked ){      // A very slow peer can't possibly benefit from cache--don't grow for it.      size_t slowest = (size_t)( 1 + DEFAULT_SLICE_SIZE /                                 ((double)cfg_cache_size*1024*1024 / rateup) );      // Lead cache: data we need to cache to keep the slowest up's data cached      // Add a slice per up for timing uncertainty      if( slowest = WORLD.GetSlowestUp(slowest) )        upadd = DEFAULT_SLICE_SIZE * ( rateup / slowest + unchoked-1 );      else upadd = DEFAULT_SLICE_SIZE * unchoked;      upmin = DEFAULT_SLICE_SIZE * unchoked;      upmax = (size_t)( DEFAULT_SLICE_SIZE * (unchoked-1) +        rateup * 2.5 * WORLD.GetUnchokeInterval() );    }  }else{    if( rateup > ratedn ){      size_t slowest = (size_t)( 1 +        cfg_req_slice_size * ((double)ratedn / cfg_cache_size*1024*1024) +        DEFAULT_SLICE_SIZE * ((double)rateup / cfg_cache_size*1024*1024) );      if( slowest = WORLD.GetSlowestUp(slowest) )        // lead cache is how much we'll use while uploading a slice to slowest        // (default_slice_size / slowest) * (ratedn + rateup)        upadd = (size_t)( ((double)DEFAULT_SLICE_SIZE / slowest) *                          (ratedn + rateup + 1) );      else upadd = m_piece_length * unchoked;    }    else if( rateup ){      // same as m_piece_length / (cfg_cache_size*1024*1024 / (double)ratedn)      size_t slowest = (size_t)( 1 +        ratedn * ((double)m_piece_length / (cfg_cache_size*1024*1024)) );      if( slowest = WORLD.GetSlowestUp(slowest) ){        // m_piece_length / (double)slowest * ratedn        // optimize, then round up a piece and add a piece        upadd = m_piece_length * (ratedn / slowest + 2);      }else{  // gimme 10 seconds worth (unchoke interval)        // Can't keep pieces in cache long enough to upload them.        // Rely on prefetching slices from disk instead.        upadd = ratedn * WORLD.GetUnchokeInterval() +                DEFAULT_SLICE_SIZE * unchoked;      }    }  }  if( upadd < upmin ) upadd = upmin;  // Add a slice to round up  total = unflushed + dlnext + upadd + cfg_req_slice_size;  // Limit to max configured size  if( total > cfg_cache_size*1024*1024 ) total = cfg_cache_size*1024*1024;  // Don't decrease cache size if flush failed.  if( !m_flush_failed || total > m_cache_size ) m_cache_size = total;  if(arg_verbose)    CONSOLE.Debug("DL need: %dK  UL need: %dK  Cache: %dK  Used: %dK",    (int)((unflushed+dlnext)/1024), (int)(upadd/1024),    (int)(m_cache_size/1024), (int)(m_cache_used/1024));  m_cache_eval_time = now + interval;}void btContent::CacheConfigure(){  if( cfg_cache_size ){    if( cfg_cache_size > GetTotalFilesLength()/1024/1024 )      cfg_cache_size = (GetTotalFilesLength()+1024*1024-1)/1024/1024;    CacheEval();  }else m_cache_size = 0;  if( m_cache_size < m_cache_used && !m_flush_failed ) CacheClean(0);}int btContent::NeedFlush() const{  if( m_flush_failed ){    if( now > m_flush_tried ) return 1;  }else    return (m_flushq ||            (m_cache_oldest && m_cache_oldest->bc_f_flush &&             m_cache_used >= cfg_cache_size*1024*1024-cfg_req_slice_size+1)) ?           1 : 0;}void btContent::FlushCache(){  if(arg_verbose) CONSOLE.Debug("Flushing all cache");  for( int i=0; i < m_npieces; i++ ){    if( m_cache[i] ) FlushPiece(i);    if( m_flush_failed ) break;  }}void btContent::FlushPiece(size_t idx){  BTCACHE *p;  p = m_cache[idx];  for( ; p; p = p->bc_next ){    // Update the age if piece is complete, as this should mean we've just    // completed the piece and made it available.    if( pBF->IsSet(idx) && m_cache_newest != p ){      if( m_cache_oldest == p ) m_cache_oldest = p->age_next;      else p->age_prev->age_next = p->age_next;      p->age_next->age_prev = p->age_prev;      m_cache_newest->age_next = p;      p->age_next = (BTCACHE *)0;      p->age_prev = m_cache_newest;      m_cache_newest = p;    }    if( p->bc_f_flush ) FlushEntry(p);  }}void btContent::FlushEntry(BTCACHE *p){  if( p->bc_f_flush ){    if( m_btfiles.IO(p->bc_buf, p->bc_off, p->bc_len, 1) < 0 ){      m_flush_tried = now;      if( now >= m_flush_failed + 300 ){        if( !m_flush_failed )          m_cache_size += cfg_req_slice_size * WORLD.GetDownloads() * 2;        CONSOLE.Warning(1, "warn, write file failed while flushing cache.");        CONSOLE.Warning(1,          "You need to have at least %llu bytes free on this filesystem!",          (unsigned long long)(m_left_bytes + m_cache_used));        CONSOLE.Warning(1,          "This could also be caused by a conflict or disk error.");        if( !IsFull() ||            (!m_flush_failed && m_cache_size > cfg_cache_size*1024*1024) ){

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -