⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 storagewrapper.py

📁 bittorrent source by python. please enjoy
💻 PY
📖 第 1 页 / 共 3 页
字号:
        # working_path or the destination_path.        d = cPickle.loads(f.read())                try:            snapshot = d['snapshot']            work_or_dest = 0            for filename, s in snapshot.iteritems():                # all files should reside in either the working path or the                # destination path.  For batch torrents, the file may have a                # relative path so compare common path.                if len(snapshot) > 1:  # batch torrent.                    commonw = os.path.commonprefix([filename,working_path])                    commond = os.path.commonprefix([filename,destination_path])                else:                    commonw = commond = filename                # first file determines whether all are in work or dest path.                if work_or_dest == 0:                    if commonw == working_path:                        work_or_dest = -1                    elif commond == destination_path:                        work_or_dest = 1                    else:                        return False                elif work_or_dest == -1 and commonw != working_path:                    return False                elif work_or_dest == 1 and commond != dest_path:                    return False                                    # this could be a lot smarter, like punching holes in the                # ranges on failed files in a batch torrent.                if not os.path.exists(filename):                    raise ValueError("No such file or directory: %s" % filename)                if os.path.getsize(filename) < s['size']:                    raise ValueError("File sizes do not match.")                if os.path.getmtime(filename) < (s['mtime'] - 5):                    raise ValueError("File modification times do not match.")                            self.places = array(self.typecode)            self.places.fromstring(d['places'])            self.rplaces = array(self.typecode)            self.rplaces.fromstring(d['rplaces'])            self.have = d['have']            self.have_set = d['have_set']            # We are reading the undownloaded section from the fast resume.            # We should check whether the file exists.  If it doesn't then            # we should not read from fastresume.            self.storage.undownloaded = d['undownloaded']            self.amount_left = d['amount_left']                        self.amount_inactive = self.amount_left            # all unwritten partials are now inactive            self.inactive_requests = d['unwritten_partials']            self.amount_left_with_partials = self.amount_left            for k, v in self.inactive_requests.iteritems():                s = [b for b, e in v]                if s:                    self._make_pending(k, s)                self.active_requests.setdefault(k, [])            if self.amount_left_with_partials < 0:                raise ValueError("Amount left < 0: %d" % self.amount_left_with_partials)            if self.amount_left_with_partials > self.total_length:                raise ValueError("Amount left > total length: %d > %d" % (self.amount_left_with_partials, self.total_length))            self._initialized(True)        except:            self.have = Bitfield(self.numpieces)            self.have_set = SparseSet()            self.inactive_requests = {}            self.active_requests = {}            self.places = array(self.typecode, [NO_PLACE] * self.numpieces)            self.rplaces = array(self.typecode, range(self.numpieces))            raise                return True        def write_fastresume(self, resumefile):        try:            self._write_fastresume_v2(resumefile)        except:            import traceback            traceback.print_exc()                def _write_fastresume_v1(self, resumefile):        if not self.initialized:            return        global_logger.debug('Writing fast resume: %s' % version_string)        resumefile.write(version_string + '\n')        # write fake junk        resumefile.write(str(0) + '\n')        for b, e, filename in self.storage.ranges:            resumefile.write(str(0) + ' ' +                             str(0) + '\n')                # copy the array so as not to screw the current state of things        rplaces = array(self.rplaces.typecode, list(self.rplaces))        # Ow. -G        for i in xrange(self.numpieces):            if rplaces[i] >= 0 and not self.have[rplaces[i]]:                rplaces[i] = FASTRESUME_PARTIAL        rplaces.tofile(resumefile)        self.fastresume_dirty = False    def _write_fastresume_v2(self, resumefile):        if not self.initialized:            return                global_logger.debug('Writing fast resume: %s' % version_string)        resumefile.write(version_string + '\n')        d = {}        snapshot = {}        for filename in self.storage.range_by_name.iterkeys():            if not os.path.exists(filename):                continue            s = {}            s['size'] = os.path.getsize(filename)            s['mtime'] = os.path.getmtime(filename)            snapshot[filename] = s        d['snapshot'] = snapshot                d['places'] = self.places.tostring()        d['rplaces'] = self.rplaces.tostring()        d['have'] = self.have        d['have_set'] = self.have_set        d['undownloaded'] = self.storage.undownloaded        d['amount_left'] = self.amount_left        # collapse inactive and active requests into unwritten partials        unwritten_partials = {}        for k, v in self.inactive_requests.iteritems():            if v:                unwritten_partials.setdefault(k, []).extend(v)        for k, v in self.active_requests.iteritems():            if v:                unwritten_partials.setdefault(k, []).extend(v)        d['unwritten_partials'] = unwritten_partials        resumefile.write(cPickle.dumps(d))        self.fastresume_dirty = False    ############################################################################    def _realize_partials(self, partials):        self.amount_left_with_partials = self.amount_left        for piece in partials:            if self.places[piece] < 0:                pos = partials[piece][0]                self.places[piece] = pos                self.rplaces[pos] = piece    def _markgot(self, piece, pos):        if self.have[piece]:            if piece != pos:                return            self.rplaces[self.places[pos]] = ALLOCATED            self.places[pos] = self.rplaces[pos] = pos            return        self.places[piece] = pos        self.rplaces[pos] = piece        self.have[piece] = True        self.have_set.add(piece)        plen = self._piecelen(piece)        self.storage.downloaded(self.piece_size * piece, plen)        self.amount_left -= plen        self.amount_inactive -= plen        assert piece not in self.inactive_requests    ## hashcheck        ############################################################################    def _get_data(self, i):        if i in self._pieces_in_buf:            p = i - self._pieces_in_buf[0]            return buffer(self._piece_buf, p * self.piece_size, self._piecelen(i))        df = launch_coroutine(_wrap_task(self.add_task),                              self._get_data_gen, i)        return df    def _get_data_gen(self, i):        num_pieces = int(max(1, self.READ_AHEAD_BUFFER_SIZE / self.piece_size))        if i + num_pieces >= self.numpieces:            size = self.total_length - (i * self.piece_size)            num_pieces = self.numpieces - i        else:            size = num_pieces * self.piece_size        self._pieces_in_buf = range(i, i + num_pieces)        df = self._storage_read(i, size)        yield df        try:            self._piece_buf = df.getResult()        except BTFailure: # short read            self._piece_buf = ''        p = i - self._pieces_in_buf[0]        yield buffer(self._piece_buf, p * self.piece_size, self._piecelen(i))            def hashcheck_pieces(self, begin=0, end=None):        df = launch_coroutine(_wrap_task(self.add_task),                              self._hashcheck_pieces,                              begin, end)        return df    def _hashcheck_pieces(self, begin=0, end=None):        # we need a full reverse-lookup of hashes for out of order compatability        targets = {}        for i in xrange(self.numpieces):            targets[self.hashes[i]] = i        partials = {}        if end is None:            end = self.numpieces        global_logger.debug('Hashcheck from %d to %d' % (begin, end))        # TODO: make this work with more than one running at a time        for i in xrange(begin, end):            # we're shutting down, abort.            if self.doneflag.isSet():                yield False                            piece_len = self._piecelen(i)            global_logger.debug( "i=%d, piece_len=%d" % (i,piece_len) )            if not self._waspre(i, piece_len):                # hole in the file                continue            r = self._get_data(i)            if isinstance(r, Deferred):                yield r                data = r.getResult()            else:                data = r                        sh = sha(buffer(data, 0, self.lastlen))            sp = sh.digest()            sh.update(buffer(data, self.lastlen))            s = sh.digest()            # handle out-of-order pieces            if s in targets and piece_len == self._piecelen(targets[s]):                # handle one or more pieces with identical hashes properly                piece_found = i                if s != self.hashes[i]:                    piece_found = targets[s]                self.checked_pieces.add(piece_found)                self._markgot(piece_found, i)            # last piece junk. I'm not even sure this is right.            elif (not self.have[self.numpieces - 1] and                  sp == self.hashes[-1] and                  (i == self.numpieces - 1 or                   not self._waspre(self.numpieces - 1))):                self.checked_pieces.add(self.numpieces - 1)                self._markgot(self.numpieces - 1, i)            else:                self._check_partial(i, partials, data)            self.statusfunc(fractionDone = 1 - self.amount_left /                            self.total_length)                    global_logger.debug('Hashcheck from %d to %d complete.' % (begin, end))        self._realize_partials(partials)        self.fastresume_dirty = True        yield True    def hashcheck_piece(self, index, data = None):        df = launch_coroutine(_wrap_task(self.add_task),                              self._hashcheck_piece,                              index, data = data)        return df    def _hashcheck_piece(self, index, data = None):        if not data:            df = self._storage_read(index, self._piecelen(index))            yield df            data = df.getResult()        if sha(data).digest() != self.hashes[index]:            yield False        self.checked_pieces.add(index)        yield True    ############################################################################    

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -