# HG changeset patch # User cmlenz # Date 1258025774 0 # Node ID 1e2be9fb3348d6fe22d4cf04486dc1dd285793d8 # Parent 9598353ed630e95172132a64b268792339b3b491 Add a couple of fallback imports for Python 3.0. diff --git a/genshi/core.py b/genshi/core.py --- a/genshi/core.py +++ b/genshi/core.py @@ -14,9 +14,9 @@ """Core classes for markup processing.""" try: + reduce # builtin in Python < 3 +except NameError: from functools import reduce -except ImportError: - pass # builtin in Python <= 2.5 from itertools import chain import operator diff --git a/genshi/filters/html.py b/genshi/filters/html.py --- a/genshi/filters/html.py +++ b/genshi/filters/html.py @@ -13,6 +13,10 @@ """Implementation of a number of stream filters.""" +try: + any +except NameError: + from genshi.util import any import re from genshi.core import Attrs, QName, stripentities @@ -100,7 +104,7 @@ checked = declval in [unicode(v) for v in value] else: - checked = bool([v for v in value if v]) + checked = any(value) else: if declval: checked = declval == unicode(value) @@ -414,7 +418,7 @@ """ decls = [] text = self._strip_css_comments(self._replace_unicode_escapes(text)) - for decl in [d for d in text.split(';') if d]: + for decl in text.split(';'): decl = decl.strip() if not decl: continue diff --git a/genshi/filters/i18n.py b/genshi/filters/i18n.py --- a/genshi/filters/i18n.py +++ b/genshi/filters/i18n.py @@ -18,6 +18,10 @@ :note: Directives support added since version 0.6 """ +try: + any +except NameError: + from genshi.util import any from gettext import NullTranslations import os import re @@ -657,9 +661,10 @@ # If this is an i18n directive, no need to translate text # nodes here - is_i18n_directive = [b for b in - [isinstance(d, ExtractableI18NDirective) - for d in directives] if b] + is_i18n_directive = any([ + isinstance(d, ExtractableI18NDirective) + for d in directives + ]) substream = list(self(substream, ctxt, search_text=not is_i18n_directive)) yield kind, (directives, substream), pos diff --git a/genshi/input.py b/genshi/input.py --- a/genshi/input.py +++ b/genshi/input.py @@ -16,10 +16,18 @@ """ from itertools import chain +try: + import htmlentitydefs as entities + import HTMLParser as html +except ImportError: + from html import entities + from html import parser as html +try: + from StringIO import StringIO + BytesIO = StringIO +except ImportError: + from io import BytesIO, StringIO from xml.parsers import expat -import HTMLParser as html -import htmlentitydefs -from StringIO import StringIO from genshi.core import Attrs, QName, Stream, stripentities from genshi.core import START, END, XML_DECL, DOCTYPE, TEXT, START_NS, END_NS, \ @@ -88,7 +96,7 @@ """ _entitydefs = ['' % (name, value) for name, value in - htmlentitydefs.name2codepoint.items()] + entities.name2codepoint.items()] _external_dtd = '\n'.join(_entitydefs) def __init__(self, source, filename=None, encoding=None): @@ -169,7 +177,7 @@ def _build_foreign(self, context, base, sysid, pubid): parser = self.expat.ExternalEntityParserCreate(context) - parser.ParseFile(StringIO(self._external_dtd)) + parser.ParseFile(BytesIO(self._external_dtd)) return 1 def _enqueue(self, kind, data=None, pos=None): @@ -237,7 +245,7 @@ if text.startswith('&'): # deal with undefined entities try: - text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]) + text = unichr(entities.name2codepoint[text[1:-1]]) self._enqueue(TEXT, text) except KeyError: filename, lineno, offset = self._getpos() @@ -267,7 +275,7 @@ :return: the parsed XML event stream :raises ParseError: if the XML text is not well-formed """ - return Stream(list(XMLParser(StringIO(text)))) + return Stream(list(XMLParser(BytesIO(text)))) class HTMLParser(html.HTMLParser, object): @@ -387,7 +395,7 @@ def handle_entityref(self, name): try: - text = unichr(htmlentitydefs.name2codepoint[name]) + text = unichr(entities.name2codepoint[name]) except KeyError: text = '&%s;' % name self._enqueue(TEXT, text) @@ -421,7 +429,7 @@ :raises ParseError: if the HTML text is not well-formed, and error recovery fails """ - return Stream(list(HTMLParser(StringIO(text), encoding=encoding))) + return Stream(list(HTMLParser(BytesIO(text), encoding=encoding))) def _coalesce(stream): """Coalesces adjacent TEXT events into a single event.""" diff --git a/genshi/path.py b/genshi/path.py --- a/genshi/path.py +++ b/genshi/path.py @@ -40,9 +40,9 @@ from collections import deque try: + reduce # builtin in Python < 3 +except NameError: from functools import reduce -except ImportError: - pass # builtin in Python <= 2.5 from math import ceil, floor import operator import re diff --git a/genshi/util.py b/genshi/util.py --- a/genshi/util.py +++ b/genshi/util.py @@ -13,7 +13,10 @@ """Various utility classes and functions.""" -import htmlentitydefs +try: + import htmlentitydefs as entities +except ImportError: + from html import entities import re __docformat__ = 'restructuredtext en' @@ -160,8 +163,7 @@ def plaintext(text, keeplinebreaks=True): - """Returns the text as a `unicode` string with all entities and tags - removed. + """Return the text with all entities and tags removed. >>> plaintext('1 < 2') u'1 < 2' @@ -217,7 +219,7 @@ if keepxmlentities and ref in ('amp', 'apos', 'gt', 'lt', 'quot'): return '&%s;' % ref try: - return unichr(htmlentitydefs.name2codepoint[ref]) + return unichr(entities.name2codepoint[ref]) except KeyError: if keepxmlentities: return '&%s;' % ref @@ -246,3 +248,22 @@ :return: the text with tags removed """ return _STRIPTAGS_RE.sub('', text) + + +# Compatibility fallback implementations for older Python versions + +try: + all = all + any = any +except NameError: + def any(S): + for x in S: + if x: + return True + return False + + def all(S): + for x in S: + if not x: + return False + return True