feedparser.py
来自「Harvestman-最新版本」· Python 代码 · 共 1,630 行 · 第 1/5 页
PY
1,630 行
#!/usr/bin/env python"""Universal feed parserHandles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feedsVisit http://feedparser.org/ for the latest versionVisit http://feedparser.org/docs/ for the latest documentationRequired: Python 2.1 or laterRecommended: Python 2.3 or laterRecommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>"""__version__ = "4.1"# + "$Revision: 1.92 $"[11:15] + "-cvs"__license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.Redistribution and use in source and binary forms, with or without modification,are permitted provided that the following conditions are met:* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THEIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSEARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BELIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, ORCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OFSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESSINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER INCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THEPOSSIBILITY OF SUCH DAMAGE."""__author__ = "Mark Pilgrim <http://diveintomark.org/>"__contributors__ = ["Jason Diamond <http://injektilo.org/>", "John Beimler <http://john.beimler.org/>", "Fazal Majid <http://www.majid.info/mylos/weblog/>", "Aaron Swartz <http://aaronsw.com/>", "Kevin Marks <http://epeus.blogspot.com/>"]_debug = 0# HTTP "User-Agent" header to send to servers when downloading feeds.# If you are embedding feedparser in a larger application, you should# change this to your application name and URL.USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__# HTTP "Accept" header to send to servers when downloading feeds. If you don't# want to send an Accept header, set this to None.ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"# List of preferred XML parsers, by SAX driver name. These will be tried first,# but if they're not installed, Python will keep searching through its own list# of pre-installed parsers until it finds one that supports everything we need.PREFERRED_XML_PARSERS = ["drv_libxml2"]# If you want feedparser to automatically run HTML markup through HTML Tidy, set# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html># or utidylib <http://utidylib.berlios.de/>.TIDY_MARKUP = 0# List of Python interfaces for HTML Tidy, in order of preference. Only useful# if TIDY_MARKUP = 1PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]# ---------- required modules (should come with any Python distribution) ----------import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2try: from cStringIO import StringIO as _StringIOexcept: from StringIO import StringIO as _StringIO# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------# gzip is included with most Python distributions, but may not be available if you compiled your owntry: import gzipexcept: gzip = Nonetry: import zlibexcept: zlib = None# If a real XML parser is available, feedparser will attempt to use it. feedparser has# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.try: import xml.sax xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers from xml.sax.saxutils import escape as _xmlescape _XML_AVAILABLE = 1except: _XML_AVAILABLE = 0 def _xmlescape(data): data = data.replace('&', '&') data = data.replace('>', '>') data = data.replace('<', '<') return data# base64 support for Atom feeds that contain embedded binary datatry: import base64, binasciiexcept: base64 = binascii = None# cjkcodecs and iconv_codec provide support for more character encodings.# Both are available from http://cjkpython.i18n.org/try: import cjkcodecs.aliasesexcept: passtry: import iconv_codecexcept: pass# chardet library auto-detects character encodings# Download from http://chardet.feedparser.org/try: import chardet if _debug: import chardet.constants chardet.constants._debug = 1except: chardet = None# ---------- don't touch these ----------class ThingsNobodyCaresAboutButMe(Exception): passclass CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): passclass CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): passclass NonXMLContentType(ThingsNobodyCaresAboutButMe): passclass UndeclaredNamespace(Exception): passsgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')sgmllib.special = re.compile('<!')sgmllib.charref = re.compile('&#(x?[0-9A-Fa-f]+)[^0-9A-Fa-f]')SUPPORTED_VERSIONS = {'': 'unknown', 'rss090': 'RSS 0.90', 'rss091n': 'RSS 0.91 (Netscape)', 'rss091u': 'RSS 0.91 (Userland)', 'rss092': 'RSS 0.92', 'rss093': 'RSS 0.93', 'rss094': 'RSS 0.94', 'rss20': 'RSS 2.0', 'rss10': 'RSS 1.0', 'rss': 'RSS (unknown version)', 'atom01': 'Atom 0.1', 'atom02': 'Atom 0.2', 'atom03': 'Atom 0.3', 'atom10': 'Atom 1.0', 'atom': 'Atom (unknown version)', 'cdf': 'CDF', 'hotrss': 'Hot RSS' }try: UserDict = dictexcept NameError: # Python 2.1 does not have dict from UserDict import UserDict def dict(aList): rc = {} for k, v in aList: rc[k] = v return rcclass FeedParserDict(UserDict): keymap = {'channel': 'feed', 'items': 'entries', 'guid': 'id', 'date': 'updated', 'date_parsed': 'updated_parsed', 'description': ['subtitle', 'summary'], 'url': ['href'], 'modified': 'updated', 'modified_parsed': 'updated_parsed', 'issued': 'published', 'issued_parsed': 'published_parsed', 'copyright': 'rights', 'copyright_detail': 'rights_detail', 'tagline': 'subtitle', 'tagline_detail': 'subtitle_detail'} def __getitem__(self, key): if key == 'category': return UserDict.__getitem__(self, 'tags')[0]['term'] if key == 'categories': return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')] realkey = self.keymap.get(key, key) if type(realkey) == types.ListType: for k in realkey: if UserDict.has_key(self, k): return UserDict.__getitem__(self, k) if UserDict.has_key(self, key): return UserDict.__getitem__(self, key) return UserDict.__getitem__(self, realkey) def __setitem__(self, key, value): for k in self.keymap.keys(): if key == k: key = self.keymap[k] if type(key) == types.ListType: key = key[0] return UserDict.__setitem__(self, key, value) def get(self, key, default=None): if self.has_key(key): return self[key] else: return default def setdefault(self, key, value): if not self.has_key(key): self[key] = value return self[key] def has_key(self, key): try: return hasattr(self, key) or UserDict.has_key(self, key) except AttributeError: return False def __getattr__(self, key): try: return self.__dict__[key] except KeyError: pass try: assert not key.startswith('_') return self.__getitem__(key) except: raise AttributeError, "object has no attribute '%s'" % key def __setattr__(self, key, value): if key.startswith('_') or key == 'data': self.__dict__[key] = value else: return self.__setitem__(key, value) def __contains__(self, key): return self.has_key(key)def zopeCompatibilityHack(): global FeedParserDict del FeedParserDict def FeedParserDict(aDict=None): rc = {} if aDict: rc.update(aDict) return rc_ebcdic_to_ascii_map = Nonedef _ebcdic_to_ascii(s): global _ebcdic_to_ascii_map if not _ebcdic_to_ascii_map: emap = ( 0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201, 202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208, 209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, 123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237, 125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243, 92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249, 48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255 ) import string _ebcdic_to_ascii_map = string.maketrans( \ ''.join(map(chr, range(256))), ''.join(map(chr, emap))) return s.translate(_ebcdic_to_ascii_map)_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')def _urljoin(base, uri): uri = _urifixer.sub(r'\1\3', uri) return urlparse.urljoin(base, uri)class _FeedParserMixin: namespaces = {'': '', 'http://backend.userland.com/rss': '', 'http://blogs.law.harvard.edu/tech/rss': '', 'http://purl.org/rss/1.0/': '', 'http://my.netscape.com/rdf/simple/0.9/': '', 'http://example.com/newformat#': '', 'http://example.com/necho': '', 'http://purl.org/echo/': '', 'uri/of/echo/namespace#': '', 'http://purl.org/pie/': '', 'http://purl.org/atom/ns#': '', 'http://www.w3.org/2005/Atom': '', 'http://purl.org/rss/1.0/modules/rss091#': '', 'http://webns.net/mvcb/': 'admin', 'http://purl.org/rss/1.0/modules/aggregation/': 'ag', 'http://purl.org/rss/1.0/modules/annotate/': 'annotate', 'http://media.tangent.org/rss/1.0/': 'audio', 'http://backend.userland.com/blogChannelModule': 'blogChannel', 'http://web.resource.org/cc/': 'cc', 'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons', 'http://purl.org/rss/1.0/modules/company': 'co', 'http://purl.org/rss/1.0/modules/content/': 'content', 'http://my.theinfo.org/changed/1.0/rss/': 'cp', 'http://purl.org/dc/elements/1.1/': 'dc', 'http://purl.org/dc/terms/': 'dcterms', 'http://purl.org/rss/1.0/modules/email/': 'email', 'http://purl.org/rss/1.0/modules/event/': 'ev', 'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner', 'http://freshmeat.net/rss/fm/': 'fm', 'http://xmlns.com/foaf/0.1/': 'foaf', 'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo', 'http://postneo.com/icbm/': 'icbm', 'http://purl.org/rss/1.0/modules/image/': 'image', 'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes', 'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes', 'http://purl.org/rss/1.0/modules/link/': 'l', 'http://search.yahoo.com/mrss': 'media',
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?