⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 storage_iocp.py

📁 bittorrent source by python. please enjoy
💻 PY
📖 第 1 页 / 共 2 页
字号:
        df = Deferred()        # abort disk ops on unregistered files        if filename not in self.file_to_torrent:            df.callback(None)            return df                if self.active_file_to_handles.total_length() == self.max_files_open:            self.waiting_ops.append((df, filename, for_write, length))        else:            self._produce_handle(df, filename, for_write, length)                    return df    def _produce_handle(self, df, filename, for_write, length):        if filename in self.open_file_to_handles:            handle = self.open_file_to_handles.pop_from_row(filename)            if for_write and not is_open_for_write(handle.mode):                handle.close()                handle = open_sparse_file(filename, 'rb+', length=length)            #elif not for_write and is_open_for_write(handle.mode):            #    handle.close()            #    handle = file(filename, 'rb', 0)        else:            if self.get_open_file_count() == self.max_files_open:                oldfname, oldhandle = self.open_file_to_handles.popitem()                oldhandle.close()            self._ensure_exists(filename, length)            if for_write:                handle = open_sparse_file(filename, 'rb+', length=length)            else:                handle = open_sparse_file(filename, 'rb', length=length)        self.active_file_to_handles.push_to_row(filename, handle)        df.callback(handle)    def release_handle(self, filename, handle):        self.active_file_to_handles.remove_fom_row(filename, handle)        self.open_file_to_handles.push_to_row(filename, handle)        self.free_handle_notify()class Storage(object):    def __init__(self, config, filepool, save_path, files, add_task,                 external_add_task, doneflag):        self.filepool = filepool        self.config = config        self.doneflag = doneflag        self.add_task = add_task        self.external_add_task = external_add_task        self.initialize(save_path, files)    def initialize(self, save_path, files):        # a list of bytes ranges and filenames for window-based IO        self.ranges = []        # a dict of filename-to-ranges for piece priorities and filename lookup        self.range_by_name = {}        # a sparse set for smart allocation detection        self.allocated_regions = SparseSet()        # dict of filename-to-length on disk (for % complete in the file view)        self.undownloaded = {}        self.save_path = save_path        # Rather implement this as an ugly hack here than change all the        # individual calls. Affects all torrent instances using this module.        if self.config['bad_libc_workaround']:            bad_libc_workaround()        self.initialized = False        self.startup_df = ThreadedDeferred(_wrap_task(self.external_add_task),                                           self._build_file_structs,                                           self.filepool, files)        return self.startup_df    def _build_file_structs(self, filepool, files):        total = 0        for filename, length in files:            # we're shutting down, abort.            if self.doneflag.isSet():                return False            self.undownloaded[filename] = length            if length > 0:                self.ranges.append((total, total + length, filename))            self.range_by_name[filename] = (total, total + length)            if os.path.exists(filename):                if not os.path.isfile(filename):                    raise BTFailure(_("File %s already exists, but is not a "                                      "regular file") % filename)                l = os.path.getsize(filename)                if l > length:                    # This is the truncation Bram was talking about that no one                    # else thinks is a good idea.                    #h = file(filename, 'rb+')                    #make_file_sparse(filename, h, length)                    #h.truncate(length)                    #h.close()                    l = length                a = get_allocated_regions(filename, begin=0, length=l)                if a is not None:                    a.offset(total)                else:                    a = SparseSet()                    if l > 0:                        a.add(total, total + l)                self.allocated_regions += a            total += length        self.total_length = total        self.initialized = True        return True    def get_byte_range_for_filename(self, filename):        if filename not in self.range_by_name:            filename = os.path.normpath(filename)            filename = os.path.join(self.save_path, filename)        return self.range_by_name[filename]    def was_preallocated(self, pos, length):        return self.allocated_regions.is_range_in(pos, pos+length)    def get_total_length(self):        return self.total_length    def _intervals(self, pos, amount):        r = []        stop = pos + amount        p = max(bisect_right(self.ranges, (pos, 2 ** 500)) - 1, 0)        for begin, end, filename in self.ranges[p:]:            if begin >= stop:                break            r.append((filename, max(pos, begin) - begin, min(end, stop) - begin))        return r    def _file_op(self, filename, pos, param, write):        begin, end = self.get_byte_range_for_filename(filename)        length = end - begin        final = Deferred()        hdf = self.filepool.acquire_handle(filename, for_write=write, length=length)        def handle_error(f=None):            final.callback(0)        # error acquiring handle        if hdf is None:            handle_error()            return final        def op(h):            h.seek(pos)            if write:                odf = h.write(param)            else:                odf = h.read(param)            def complete(r):                self.filepool.release_handle(filename, h)                final.callback(r)            odf.addCallback(complete)            odf.addErrback(final.errback)        hdf.addCallback(op)        hdf.addErrback(handle_error)        return final    def _batch_read(self, pos, amount):        dfs = []        r = []        # queue all the reads        for filename, pos, end in self._intervals(pos, amount):            df = self._file_op(filename, pos, end - pos, write=False)            dfs.append(df)        # yield on all the reads in order - they complete in any order        for df in dfs:            yield df            r.append(df.getResult())        r = ''.join(r)        if len(r) != amount:            raise BTFailure(_("Short read (%d of %d) - something truncated files?") %                            (len(r), amount))        yield r    def read(self, pos, amount):        df = launch_coroutine(_wrap_task(self.add_task),                              self._batch_read, pos, amount)        return df    def _batch_write(self, pos, s):        dfs = []        total = 0        amount = len(s)        # queue all the writes        for filename, begin, end in self._intervals(pos, amount):            length = end - begin            assert length > 0, '%s %s' % (pos, amount)            d = buffer(s, total, length)            total += length            df = self._file_op(filename, begin, d, write=True)            dfs.append(df)        assert total == amount, '%s and %s' % (total, amount)        written = 0                    # yield on all the writes - they complete in any order        for df in dfs:            yield df            written += df.getResult()                    assert total == written, '%s and %s' % (total, written)                yield total    def write(self, pos, s):        df = launch_coroutine(_wrap_task(self.add_task),                              self._batch_write, pos, s)        return df    def close(self):        if not self.initialized:            end = Deferred()            def post_init(r):                df = self.filepool.close_files(self.range_by_name)                df.addCallback(end.callback)            self.startup_df.addCallback(post_init)            return end        df = self.filepool.close_files(self.range_by_name)        return df    def downloaded(self, pos, length):        for filename, begin, end in self._intervals(pos, length):            self.undownloaded[filename] -= end - begin

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -