📄 convertedmetainfo.py
字号:
self.reported_errors = True if self.bad_torrent_unsolvable: errorfunc(logging.ERROR, _("This .torrent file has been created with a broken " "tool and has incorrectly encoded filenames. Some or " "all of the filenames may appear different from what " "the creator of the .torrent file intended.")) elif self.bad_torrent_noncharacter: errorfunc(logging.ERROR, _("This .torrent file has been created with a broken " "tool and has bad character values that do not " "correspond to any real character. Some or all of the " "filenames may appear different from what the creator " "of the .torrent file intended.")) elif self.bad_torrent_wrongfield: errorfunc(logging.ERROR, _("This .torrent file has been created with a broken " "tool and has incorrectly encoded filenames. The " "names used may still be correct.")) elif self.bad_conversion: errorfunc(logging.WARNING, _('The character set used on the local filesystem ("%s") ' 'cannot represent all characters used in the ' 'filename(s) of this torrent. Filenames have been ' 'changed from the original.') % get_filesystem_encoding()) elif self.bad_windows: errorfunc(logging.WARNING, _("The Windows filesystem cannot handle some " "characters used in the filename(s) of this torrent. " "Filenames have been changed from the original.")) elif self.bad_path: errorfunc(logging.WARNING, _("This .torrent file has been created with a broken " "tool and has at least 1 file with an invalid file " "or directory name. However since all such files " "were marked as having length 0 those files are " "just ignored.")) # At least BitComet seems to make bad .torrent files that have # fields in an unspecified non-utf8 encoding. Some of those have separate # 'field.utf-8' attributes. Less broken .torrent files have an integer # 'codepage' key or a string 'encoding' key at the root level. def _get_attr(self, d, attrib): def _decode(o, encoding): if encoding is None: encoding = 'utf8' if isinstance(o, str): try: s = o.decode(encoding) except: self.bad_torrent_wrongfield = True s = o.decode(encoding, 'replace') t = s.translate(noncharacter_translate) if t != s: self.bad_torrent_noncharacter = True return t if isinstance(o, dict): return dict([ (k, _decode(v, k.endswith('.utf-8') and None or encoding)) for k, v in o.iteritems() ]) if isinstance(o, list): return [ _decode(i, encoding) for i in o ] return o # we prefer utf8 if we can find it. at least it declares its encoding v = _decode(d.get(attrib + '.utf-8'), 'utf8') if v is None: v = _decode(d[attrib], self.encoding) return v def _fix_windows(self, name, t=windows_translate): bad = False r = name.translate(t) # for some reason name cannot end with '.' or space if r[-1] in '. ': r = r + '-' if r != name: self.bad_windows = True bad = True return (r, bad) def _to_fs(self, name): return self._to_fs_2(name)[1] def _to_fs_2(self, name): if sys.platform.startswith('win'): name, bad = self._fix_windows(name) r, bad = encode_for_filesystem(name) self.bad_conversion = bad return (bad, r) def to_data(self): return bencode(self.metainfo) def check_for_resume(self, path): """ Determine whether this torrent was previously downloaded to path. Returns: -1: STOP! gross mismatch of files 0: MAYBE a resume, maybe not 1: almost definitely a RESUME - file contents, sizes, and count match exactly """ STOP = -1 MAYBE = 0 RESUME = 1 if self.is_batch != os.path.isdir(path): return STOP disk_files = {} if self.is_batch: metainfo_files = dict(zip(self.files_fs, self.sizes)) metainfo_dirs = set() for f in self.files_fs: metainfo_dirs.add(os.path.split(f)[0]) # BUG: do this in a thread, so it doesn't block the UI for (dirname, dirs, files) in os.walk(path): here = dirname[len(path)+1:] for f in files: p = os.path.join(here, f) if p in metainfo_files: disk_files[p] = os.stat(os.path.join(dirname, f))[6] if disk_files[p] > metainfo_files[p]: # file on disk that's bigger than the # corresponding one in the torrent return STOP else: # file on disk that's not in the torrent return STOP for i, d in enumerate(dirs): if d not in metainfo_dirs: # directory on disk that's not in the torrent return STOP else: if os.access(path, os.F_OK): disk_files[self.name_fs] = os.stat(path)[6] metainfo_files = {self.name_fs : self.sizes[0]} if len(disk_files) == 0: # no files on disk, definitely not a resume return STOP if set(disk_files.keys()) != set(metainfo_files.keys()): # check files if len(metainfo_files) > len(disk_files): #file in the torrent that's not on disk return MAYBE else: # check sizes ret = RESUME for f, s in disk_files.iteritems(): if disk_files[f] < metainfo_files[f]: # file on disk that's smaller than the # corresponding one in the torrent ret = MAYBE else: # file sizes match exactly continue return ret def get_tracker_ips(self, wrap_task): """Returns the list of tracker IP addresses or the empty list if the torrent is trackerless. This extracts the tracker ip addresses from the urls in the announce or announce list.""" df = ThreadedDeferred(wrap_task, self._get_tracker_ips, daemon=True) return df def _get_tracker_ips(self): if hasattr(self, "_tracker_ips"): # cache result. return self._tracker_ips if self.announce is not None: urls = [self.announce] elif self.announce_list is not None: # list of lists. urls = [] for ulst in self.announce_list: urls.extend(ulst) else: # trackerless assert self.is_trackerless return [] tracker_ports = [urlparse.urlparse(url)[1] for url in urls] trackers = [tp.split(':')[0] for tp in tracker_ports] self._tracker_ips = [] for t in trackers: try: ip_list = socket.gethostbyname_ex(t)[2] self._tracker_ips.extend(ip_list) except socket.gaierror: global_logger.error( _("Cannot find tracker with name %s") % t ) return self._tracker_ips
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -