⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 rerequester.py

📁 bittorrent source by python. please enjoy
💻 PY
📖 第 1 页 / 共 2 页
字号:
    def _announce(self, event=None):        assert not self.dead        assert thread.get_ident() == self.rawserver.ident        self.current_started = bttime()        self.errorfunc(logging.INFO, 'announce: ' + str(self.current_started))        s = ('%s&uploaded=%s&downloaded=%s&left=%s' %             (self.url, str(self.up()*self.config.get('lie',1) - self.previous_up),              str(self.down() - self.previous_down), str(self.amount_left())))        if self.last is not None:            s += '&last=' + quote(str(self.last))        if self.trackerid is not None:            s += '&trackerid=' + quote(str(self.trackerid))        if self.howmany() >= self.config['max_initiate']:            s += '&numwant=0'        else:            s += '&compact=1'        if event is not None:            s += '&event=' + event        def _start_announce(*a):            self.running_df = ThreadedDeferred(_wrap_task(self.externalsched),                                               self._rerequest, s, self.peerid,                                               daemon=True)            def _rerequest_finish(x):                self.running_df = None            def _rerequest_error(e):                self.errorfunc(logging.ERROR, _("Rerequest failed!"),                               exception=True, exc_info=e)            self.running_df.addCallbacks(_rerequest_finish, _rerequest_error)            if event == 'stopped':                # if self._rerequest needs any state, pass it through args                self.cleanup()        if not event:            assert self.running_df == None, "Previous rerequest event is still running!"        if self.running_df:            self.running_df.addCallback(_start_announce)        else:            _start_announce()    # Must destroy all references that could cause reference circles    def cleanup(self):        assert thread.get_ident() == self.rawserver.ident        self.dead = True        self.sched = None        self.howmany = None        self.connect = None        self.externalsched = lambda *args: None        self.amount_left = None        self.up = None        self.down = None        # don't zero this one, we need it on shutdown w/ error        #self.errorfunc = None        self.upratefunc = None        self.downratefunc = None        self.ever_got_incoming = None        self.diefunc = None        self.successfunc = None    def _rerequest(self, url, peerid):        if self.config['ip']:            try:                url += '&ip=' + socket.gethostbyname(self.config['ip'])            except:                self.errorfunc(logging.WARNING,                               _("Problem resolving config ip (%s), gethostbyname failed") % self.config['ip'],                               exc_info=sys.exc_info())        request = Request(url)        request.add_header('User-Agent', 'BitTorrent/' + version)        if self.config['tracker_proxy']:            request.set_proxy(self.config['tracker_proxy'], 'http')        try:            h = urlopen(request)            data = h.read()            h.close()        # urllib2 can raise various crap that doesn't have a common base        # exception class especially when proxies are used, at least        # ValueError and stuff from httplib        except Exception, e:            try:                s = unicode(e.args[0])            except:                s = unicode(e)            r = _("Problem connecting to tracker - %s: %s") % (e.__class__, s)            def f():                self._postrequest(errormsg=r, exc=e, peerid=peerid)        else:            def f():                self._postrequest(data=data, peerid=peerid)        self.externalsched(0, f)    def _give_up(self):        if self.howmany() == 0 and self.amount_left() > 0:            # sched shouldn't be strictly necessary            def die():                self.diefunc(logging.CRITICAL,                             _("Aborting the torrent as it could not "                               "connect to the tracker while not "                               "connected to any peers. "))            self.sched(0, die)            def _fail(self, exc=None, rejected=False):        assert thread.get_ident() == self.rawserver.ident        if self.announce_list:            restarted = self.announce_list_fail()            if restarted:                self.fail_wait = None                if rejected:                    self._give_up()            else:                            self.baseurl = self.announce_list_next()                            self.peerid = None                # If it was a socket error, try the new url right away. it's                # probably not abusive since there was no one there to abuse.                # In the timeout case, our socket timeout is high enough that                # we simulate fail_wait anyway.                # URLError is here because of timeouts.                if isinstance(exc, socket.error) or isinstance(exc, URLError):                    self._check()                    return        else:            if rejected:                self._give_up()                            if self.fail_wait is None:            self.fail_wait = 50        else:            self.fail_wait *= 1.4 + random.random() * .2        self.fail_wait = min(self.fail_wait,                             self.config['max_announce_retry_interval'])    def _postrequest(self, data=None, errormsg=None, exc=None, peerid=None):        assert thread.get_ident() == self.rawserver.ident        self.current_started = None        self.errorfunc(logging.INFO, 'postrequest: ' + str(self.current_started))        self.last_time = bttime()        if self.dead:            return        if errormsg is not None:            self.errorfunc(logging.WARNING, errormsg)            self._fail(exc)            return        try:            r = bdecode(data)            check_peers(r)        except BTFailure, e:            if data != '':                self.errorfunc(logging.ERROR,                               _("bad data from tracker (%s)") % repr(data),                               exc_info=sys.exc_info())            self._fail()            return        if type(r.get('complete')) in (int, long) and \           type(r.get('incomplete')) in (int, long):            self.tracker_num_seeds = r['complete']            self.tracker_num_peers = r['incomplete']        else:            self.tracker_num_seeds = self.tracker_num_peers = None        if r.has_key('failure reason'):            self.errorfunc(logging.ERROR, _("rejected by tracker - ") +                           r['failure reason'])            self._fail(rejected=True)            return        self.fail_wait = None        if r.has_key('warning message'):            self.errorfunc(logging.ERROR, _("warning from tracker - ") +                           r['warning message'])        self.announce_interval = r.get('interval', self.announce_interval)        self.config['rerequest_interval'] = r.get('min interval',                                        self.config['rerequest_interval'])        self.trackerid = r.get('tracker id', self.trackerid)        self.last = r.get('last')        p = r['peers']        peers = {}        if type(p) == str:            for x in xrange(0, len(p), 6):                ip = socket.inet_ntoa(p[x:x+4])                port = struct.unpack('>H', p[x+4:x+6])[0]                peers[(ip, port)] = None        else:            for x in p:                peers[(x['ip'], x['port'])] = x.get('peer id')        ps = len(peers) + self.howmany()        if ps < self.config['max_initiate']:            if self.doneflag.isSet():                if r.get('num peers', 1000) - r.get('done peers', 0) > ps * 1.2:                    self.last = None            else:                if r.get('num peers', 1000) > ps * 1.2:                    self.last = None        for addr, id in peers.iteritems():            self.connect(addr, id)        if peerid == self.wanted_peerid:            self.successfunc()        if self.announce_list:            self.announce_list_success()                    self._check()class DHTRerequester(Rerequester):    def __init__(self, config, sched, howmany, connect, externalsched, rawserver,            amount_left, up, down, port, myid, infohash, errorfunc, doneflag,            upratefunc, downratefunc, ever_got_incoming, diefunc, sfunc, dht):        self.dht = dht        Rerequester.__init__(self, "http://localhost/announce", [], config, sched, externalsched, rawserver,                             howmany, connect,                             amount_left, up, down, port, myid, infohash, errorfunc, doneflag,                             upratefunc, downratefunc, ever_got_incoming, diefunc, sfunc)    def _announce(self, event=None):        self.current_started = bttime()        self._rerequest("", self.peerid)    def _rerequest(self, url, peerid):        self.peers = ""        try:            self.dht.getPeersAndAnnounce(str(self.announce_infohash), self.port, self._got_peers)        except Exception, e:            self._postrequest(errormsg=_("Trackerless lookup failed: ") + unicode(e.args[0]),                              peerid=self.wanted_peerid)    def _got_peers(self, peers):        if not self.howmany:            return        if not peers:            self._postrequest(bencode({'peers':''}), peerid=self.wanted_peerid)        else:            self._postrequest(bencode({'peers':peers[0]}), peerid=None)    def _announced_peers(self, nodes):        pass    def announce_stop(self):        # don't do anything        pass

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -