Mercurial > babel > old > mirror
annotate babel/messages/jslexer.py @ 342:603192024857
added some newlines to extract and jslexer to stay consistent with the rest of the sourcecode.
author | aronacher |
---|---|
date | Sat, 14 Jun 2008 19:00:35 +0000 |
parents | 672b6b8e945d |
children | 6dae6a9e1096 |
rev | line source |
---|---|
341 | 1 # -*- coding: utf-8 -*- |
2 # | |
3 # Copyright (C) 2008 Edgewall Software | |
4 # All rights reserved. | |
5 # | |
6 # This software is licensed as described in the file COPYING, which | |
7 # you should have received as part of this distribution. The terms | |
8 # are also available at http://babel.edgewall.org/wiki/License. | |
9 # | |
10 # This software consists of voluntary contributions made by many | |
11 # individuals. For the exact contribution history, see the revision | |
12 # history and logs, available at http://babel.edgewall.org/log/. | |
13 | |
14 """A simple JavaScript 1.5 lexer which is used for the JavaScript | |
15 extractor. | |
16 """ | |
17 | |
18 import re | |
19 from operator import itemgetter | |
20 | |
21 | |
22 operators = [ | |
23 '+', '-', '*', '%', '!=', '==', '<', '>', '<=', '>=', '=', | |
24 '+=', '-=', '*=', '%=', '<<', '>>', '>>>', '<<=', '>>=', | |
25 '>>>=', '&', '&=', '|', '|=', '&&', '||', '^', '^=', '(', ')', | |
26 '[', ']', '{', '}', '!', '--', '++', '~', ',', ';', '.' | |
27 ] | |
28 operators.sort(lambda a, b: cmp(-len(a), -len(b))) | |
29 | |
30 escapes = {'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t'} | |
31 | |
32 rules = [ | |
33 (None, re.compile(r'\s+(?u)')), | |
34 (None, re.compile(r'<!--.*')), | |
35 ('linecomment', re.compile(r'//.*')), | |
36 ('multilinecomment', re.compile(r'/\*.*?\*/(?us)')), | |
37 ('name', re.compile(r'(\$+\w*|[^\W\d]\w*)(?u)')), | |
38 ('number', re.compile(r'''(?x)( | |
39 (?:0|[1-9]\d*) | |
40 (\.\d+)? | |
41 ([eE][-+]?\d+)? | | |
42 (0x[a-fA-F0-9]+) | |
43 )''')), | |
44 ('operator', re.compile(r'(%s)' % '|'.join(map(re.escape, operators)))), | |
45 ('string', re.compile(r'''(?xs)( | |
46 '(?:[^'\\]*(?:\\.[^'\\]*)*)' | | |
47 "(?:[^"\\]*(?:\\.[^"\\]*)*)" | |
48 )''')) | |
49 ] | |
50 | |
51 division_re = re.compile(r'/=?') | |
52 regex_re = re.compile(r'/.+?/[a-zA-Z]*(?s)') | |
53 line_re = re.compile(r'(\r\n|\n|\r)') | |
54 line_join_re = re.compile(r'\\' + line_re.pattern) | |
55 uni_escape_re = re.compile(r'[a-fA-F0-9]{1,4}') | |
56 | |
57 | |
58 class TokenError(ValueError): | |
59 """Raised if the tokenizer stumbled upon invalid tokens.""" | |
60 | |
342
603192024857
added some newlines to extract and jslexer to stay consistent with the rest of the sourcecode.
aronacher
parents:
341
diff
changeset
|
61 |
341 | 62 class Token(tuple): |
63 """Represents a token as returned by `tokenize`.""" | |
64 __slots__ = () | |
65 | |
66 def __new__(cls, type, value, lineno): | |
67 return tuple.__new__(cls, (type, value, lineno)) | |
68 | |
69 type = property(itemgetter(0)) | |
70 value = property(itemgetter(1)) | |
71 lineno = property(itemgetter(2)) | |
72 | |
342
603192024857
added some newlines to extract and jslexer to stay consistent with the rest of the sourcecode.
aronacher
parents:
341
diff
changeset
|
73 |
341 | 74 def indicates_division(token): |
75 """A helper function that helps the tokenizer to decide if the current | |
76 token may be followed by a division operator. | |
77 """ | |
78 if token.type == 'operator': | |
79 return token.value in (')', ']', '}', '++', '--') | |
80 return token.type in ('name', 'number', 'string', 'regexp') | |
81 | |
342
603192024857
added some newlines to extract and jslexer to stay consistent with the rest of the sourcecode.
aronacher
parents:
341
diff
changeset
|
82 |
341 | 83 def unquote_string(string): |
84 """Unquote a string with JavaScript rules. The string has to start with | |
85 string delimiters (``'`` or ``"``.) | |
86 | |
87 :return: a string | |
88 """ | |
89 assert string and string[0] == string[-1] and string[0] in '"\'', \ | |
90 'string provided is not properly delimited' | |
91 string = line_join_re.sub('\\1', string[1:-1]) | |
92 result = [] | |
93 add = result.append | |
94 pos = 0 | |
95 | |
96 while 1: | |
97 # scan for the next escape | |
98 escape_pos = string.find('\\', pos) | |
99 if escape_pos < 0: | |
100 break | |
101 add(string[pos:escape_pos]) | |
102 | |
103 # check which character is escaped | |
104 next_char = string[escape_pos + 1] | |
105 if next_char in escapes: | |
106 add(escapes[next_char]) | |
107 | |
108 # unicode escapes. trie to consume up to four characters of | |
109 # hexadecimal characters and try to interpret them as unicode | |
110 # character point. If there is no such character point, put | |
111 # all the consumed characters into the string. | |
112 elif next_char in 'uU': | |
113 escaped = uni_escape_re.match(string, escape_pos + 2) | |
114 if escaped is not None: | |
115 escaped_value = escaped.group() | |
116 if len(escaped_value) == 4: | |
117 try: | |
118 add(unichr(int(escaped_value, 16))) | |
119 except ValueError: | |
120 pass | |
121 else: | |
122 pos = escape_pos + 6 | |
123 continue | |
124 add(next_char + escaped_value) | |
125 pos = escaped.end() | |
126 continue | |
127 else: | |
128 add(next_char) | |
129 | |
130 # bogus escape. Just remove the backslash. | |
131 else: | |
132 add(next_char) | |
133 pos = escape_pos + 2 | |
134 | |
135 if pos < len(string): | |
136 add(string[pos:]) | |
137 | |
138 return u''.join(result) | |
139 | |
342
603192024857
added some newlines to extract and jslexer to stay consistent with the rest of the sourcecode.
aronacher
parents:
341
diff
changeset
|
140 |
341 | 141 def tokenize(source): |
142 """Tokenize a JavaScript source. | |
143 | |
144 :return: generator of `Token`\s | |
145 """ | |
146 may_divide = False | |
147 pos = 0 | |
148 lineno = 1 | |
149 end = len(source) | |
150 | |
151 while pos < end: | |
152 # handle regular rules first | |
153 for token_type, rule in rules: | |
154 match = rule.match(source, pos) | |
155 if match is not None: | |
156 break | |
157 # if we don't have a match we don't give up yet, but check for | |
158 # division operators or regular expression literals, based on | |
159 # the status of `may_divide` which is determined by the last | |
160 # processed non-whitespace token using `indicates_division`. | |
161 else: | |
162 if may_divide: | |
163 match = division_re.match(source, pos) | |
164 token_type = 'operator' | |
165 else: | |
166 match = regex_re.match(source, pos) | |
167 token_type = 'regexp' | |
168 if match is None: | |
169 raise TokenError('invalid syntax around line %d' % lineno) | |
170 | |
171 token_value = match.group() | |
172 if token_type is not None: | |
173 token = Token(token_type, token_value, lineno) | |
174 may_divide = indicates_division(token) | |
175 yield token | |
176 lineno += len(line_re.findall(token_value)) | |
177 pos = match.end() |