# HG changeset patch # User aronacher # Date 1213470035 0 # Node ID f7269b43236da841323d47883a2792d8e22e7035 # Parent 6104e7967a12fe933f9631d5a350a640b9b04a1f added some newlines to extract and jslexer to stay consistent with the rest of the sourcecode. diff --git a/babel/messages/extract.py b/babel/messages/extract.py --- a/babel/messages/extract.py +++ b/babel/messages/extract.py @@ -66,6 +66,7 @@ return line comments[:] = map(_strip, comments) + def extract_from_dir(dirname=os.getcwd(), method_map=DEFAULT_MAPPING, options_map=None, keywords=DEFAULT_KEYWORDS, comment_tags=(), callback=None, strip_comment_tags=False): @@ -171,6 +172,7 @@ yield filename, lineno, message, comments break + def extract_from_file(method, filename, keywords=DEFAULT_KEYWORDS, comment_tags=(), options=None, strip_comment_tags=False): """Extract messages from a specific file. @@ -200,6 +202,7 @@ finally: fileobj.close() + def extract(method, fileobj, keywords=DEFAULT_KEYWORDS, comment_tags=(), options=None, strip_comment_tags=False): """Extract messages from the given file-like object using the specified @@ -319,12 +322,14 @@ yield lineno, messages, comments + def extract_nothing(fileobj, keywords, comment_tags, options): """Pseudo extractor that does not actually extract anything, but simply returns an empty list. """ return [] + def extract_python(fileobj, keywords, comment_tags, options): """Extract messages from Python source code. @@ -429,6 +434,7 @@ elif tok == NAME and value in keywords: funcname = value + def extract_javascript(fileobj, keywords, comment_tags, options): """Extract messages from JavaScript source code. diff --git a/babel/messages/jslexer.py b/babel/messages/jslexer.py --- a/babel/messages/jslexer.py +++ b/babel/messages/jslexer.py @@ -58,6 +58,7 @@ class TokenError(ValueError): """Raised if the tokenizer stumbled upon invalid tokens.""" + class Token(tuple): """Represents a token as returned by `tokenize`.""" __slots__ = () @@ -69,6 +70,7 @@ value = property(itemgetter(1)) lineno = property(itemgetter(2)) + def indicates_division(token): """A helper function that helps the tokenizer to decide if the current token may be followed by a division operator. @@ -77,6 +79,7 @@ return token.value in (')', ']', '}', '++', '--') return token.type in ('name', 'number', 'string', 'regexp') + def unquote_string(string): """Unquote a string with JavaScript rules. The string has to start with string delimiters (``'`` or ``"``.) @@ -134,6 +137,7 @@ return u''.join(result) + def tokenize(source): """Tokenize a JavaScript source.