Mercurial > babel > old > mirror
annotate 0.9.x/scripts/import_cldr.py @ 557:3c590f6f5dfa stable tip
Merged revisions 607 via svnmerge from
http://svn.edgewall.org/repos/babel/trunk
........
r607 | pjenvey | 2011-04-24 21:41:23 -0700 (Sun, 24 Apr 2011) | 3 lines
keywords only support space separated values, not comma separated
thanks agronholm
........
author | pjenvey |
---|---|
date | Mon, 25 Apr 2011 04:44:13 +0000 |
parents | f29b2dadd9fc |
children |
rev | line source |
---|---|
263 | 1 #!/usr/bin/env python |
2 # -*- coding: utf-8 -*- | |
3 # | |
4 # Copyright (C) 2007 Edgewall Software | |
5 # All rights reserved. | |
6 # | |
7 # This software is licensed as described in the file COPYING, which | |
8 # you should have received as part of this distribution. The terms | |
9 # are also available at http://babel.edgewall.org/wiki/License. | |
10 # | |
11 # This software consists of voluntary contributions made by many | |
12 # individuals. For the exact contribution history, see the revision | |
13 # history and logs, available at http://babel.edgewall.org/log/. | |
14 | |
15 import copy | |
16 from optparse import OptionParser | |
17 import os | |
18 import pickle | |
381 | 19 import re |
263 | 20 import sys |
21 try: | |
22 from xml.etree.ElementTree import parse | |
23 except ImportError: | |
24 from elementtree.ElementTree import parse | |
25 | |
26 # Make sure we're using Babel source, and not some previously installed version | |
27 sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), '..')) | |
28 | |
29 from babel import dates, numbers | |
381 | 30 from babel.localedata import Alias |
511 | 31 from babel.util import set |
263 | 32 |
33 weekdays = {'mon': 0, 'tue': 1, 'wed': 2, 'thu': 3, 'fri': 4, 'sat': 5, | |
34 'sun': 6} | |
35 | |
36 try: | |
37 any | |
38 except NameError: | |
39 def any(iterable): | |
40 return filter(None, list(iterable)) | |
41 | |
381 | 42 |
263 | 43 def _text(elem): |
44 buf = [elem.text or ''] | |
45 for child in elem: | |
46 buf.append(_text(child)) | |
47 buf.append(elem.tail or '') | |
48 return u''.join(filter(None, buf)).strip() | |
49 | |
381 | 50 |
51 NAME_RE = re.compile(r"^\w+$") | |
52 TYPE_ATTR_RE = re.compile(r"^\w+\[@type='(.*?)'\]$") | |
53 | |
54 NAME_MAP = { | |
55 'dateFormats': 'date_formats', | |
56 'dateTimeFormats': 'datetime_formats', | |
57 'eraAbbr': 'abbreviated', | |
58 'eraNames': 'wide', | |
59 'eraNarrow': 'narrow', | |
60 'timeFormats': 'time_formats' | |
61 } | |
62 | |
63 def _translate_alias(ctxt, path): | |
64 parts = path.split('/') | |
65 keys = ctxt[:] | |
66 for part in parts: | |
67 if part == '..': | |
68 keys.pop() | |
69 else: | |
70 match = TYPE_ATTR_RE.match(part) | |
71 if match: | |
72 keys.append(match.group(1)) | |
73 else: | |
74 assert NAME_RE.match(part) | |
75 keys.append(NAME_MAP.get(part, part)) | |
76 return keys | |
77 | |
78 | |
263 | 79 def main(): |
80 parser = OptionParser(usage='%prog path/to/cldr') | |
81 options, args = parser.parse_args() | |
82 if len(args) != 1: | |
83 parser.error('incorrect number of arguments') | |
84 | |
85 srcdir = args[0] | |
86 destdir = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), | |
87 '..', 'babel') | |
88 | |
89 sup = parse(os.path.join(srcdir, 'supplemental', 'supplementalData.xml')) | |
90 | |
348
05975a0e7021
Merged revisions [358:360], [364:370], [373:378], [380:382] from [source:trunk].
cmlenz
parents:
263
diff
changeset
|
91 # Import global data from the supplemental files |
263 | 92 global_data = {} |
93 | |
94 territory_zones = global_data.setdefault('territory_zones', {}) | |
95 zone_aliases = global_data.setdefault('zone_aliases', {}) | |
96 zone_territories = global_data.setdefault('zone_territories', {}) | |
471 | 97 for elem in sup.findall('.//timezoneData/zoneFormatting/zoneItem'): |
263 | 98 tzid = elem.attrib['type'] |
99 territory_zones.setdefault(elem.attrib['territory'], []).append(tzid) | |
100 zone_territories[tzid] = elem.attrib['territory'] | |
101 if 'aliases' in elem.attrib: | |
102 for alias in elem.attrib['aliases'].split(): | |
103 zone_aliases[alias] = tzid | |
104 | |
348
05975a0e7021
Merged revisions [358:360], [364:370], [373:378], [380:382] from [source:trunk].
cmlenz
parents:
263
diff
changeset
|
105 # Import Metazone mapping |
05975a0e7021
Merged revisions [358:360], [364:370], [373:378], [380:382] from [source:trunk].
cmlenz
parents:
263
diff
changeset
|
106 meta_zones = global_data.setdefault('meta_zones', {}) |
05975a0e7021
Merged revisions [358:360], [364:370], [373:378], [380:382] from [source:trunk].
cmlenz
parents:
263
diff
changeset
|
107 tzsup = parse(os.path.join(srcdir, 'supplemental', 'metazoneInfo.xml')) |
471 | 108 for elem in tzsup.findall('.//timezone'): |
348
05975a0e7021
Merged revisions [358:360], [364:370], [373:378], [380:382] from [source:trunk].
cmlenz
parents:
263
diff
changeset
|
109 for child in elem.findall('usesMetazone'): |
05975a0e7021
Merged revisions [358:360], [364:370], [373:378], [380:382] from [source:trunk].
cmlenz
parents:
263
diff
changeset
|
110 if 'to' not in child.attrib: # FIXME: support old mappings |
05975a0e7021
Merged revisions [358:360], [364:370], [373:378], [380:382] from [source:trunk].
cmlenz
parents:
263
diff
changeset
|
111 meta_zones[elem.attrib['type']] = child.attrib['mzone'] |
05975a0e7021
Merged revisions [358:360], [364:370], [373:378], [380:382] from [source:trunk].
cmlenz
parents:
263
diff
changeset
|
112 |
263 | 113 outfile = open(os.path.join(destdir, 'global.dat'), 'wb') |
114 try: | |
115 pickle.dump(global_data, outfile, 2) | |
116 finally: | |
117 outfile.close() | |
118 | |
119 # build a territory containment mapping for inheritance | |
120 regions = {} | |
471 | 121 for elem in sup.findall('.//territoryContainment/group'): |
263 | 122 regions[elem.attrib['type']] = elem.attrib['contains'].split() |
123 | |
124 # Resolve territory containment | |
125 territory_containment = {} | |
126 region_items = regions.items() | |
127 region_items.sort() | |
128 for group, territory_list in region_items: | |
129 for territory in territory_list: | |
130 containers = territory_containment.setdefault(territory, set([])) | |
131 if group in territory_containment: | |
132 containers |= territory_containment[group] | |
133 containers.add(group) | |
134 | |
135 filenames = os.listdir(os.path.join(srcdir, 'main')) | |
136 filenames.remove('root.xml') | |
137 filenames.sort(lambda a,b: len(a)-len(b)) | |
138 filenames.insert(0, 'root.xml') | |
139 | |
140 for filename in filenames: | |
141 stem, ext = os.path.splitext(filename) | |
142 if ext != '.xml': | |
143 continue | |
144 | |
391
e69a068990f0
Ported [424], [425], and [428] back to 0.9.x branch.
cmlenz
parents:
381
diff
changeset
|
145 print>>sys.stderr, 'Processing input file %r' % filename |
263 | 146 tree = parse(os.path.join(srcdir, 'main', filename)) |
147 data = {} | |
148 | |
149 language = None | |
471 | 150 elem = tree.find('.//identity/language') |
263 | 151 if elem is not None: |
152 language = elem.attrib['type'] | |
153 print>>sys.stderr, ' Language: %r' % language | |
154 | |
155 territory = None | |
471 | 156 elem = tree.find('.//identity/territory') |
263 | 157 if elem is not None: |
158 territory = elem.attrib['type'] | |
159 else: | |
160 territory = '001' # world | |
161 print>>sys.stderr, ' Territory: %r' % territory | |
162 regions = territory_containment.get(territory, []) | |
163 print>>sys.stderr, ' Regions: %r' % regions | |
164 | |
165 # <localeDisplayNames> | |
166 | |
167 territories = data.setdefault('territories', {}) | |
471 | 168 for elem in tree.findall('.//territories/territory'): |
381 | 169 if ('draft' in elem.attrib or 'alt' in elem.attrib) \ |
170 and elem.attrib['type'] in territories: | |
263 | 171 continue |
172 territories[elem.attrib['type']] = _text(elem) | |
173 | |
174 languages = data.setdefault('languages', {}) | |
471 | 175 for elem in tree.findall('.//languages/language'): |
381 | 176 if ('draft' in elem.attrib or 'alt' in elem.attrib) \ |
177 and elem.attrib['type'] in languages: | |
263 | 178 continue |
179 languages[elem.attrib['type']] = _text(elem) | |
180 | |
181 variants = data.setdefault('variants', {}) | |
471 | 182 for elem in tree.findall('.//variants/variant'): |
381 | 183 if ('draft' in elem.attrib or 'alt' in elem.attrib) \ |
184 and elem.attrib['type'] in variants: | |
263 | 185 continue |
186 variants[elem.attrib['type']] = _text(elem) | |
187 | |
188 scripts = data.setdefault('scripts', {}) | |
471 | 189 for elem in tree.findall('.//scripts/script'): |
381 | 190 if ('draft' in elem.attrib or 'alt' in elem.attrib) \ |
191 and elem.attrib['type'] in scripts: | |
263 | 192 continue |
193 scripts[elem.attrib['type']] = _text(elem) | |
194 | |
195 # <dates> | |
196 | |
197 week_data = data.setdefault('week_data', {}) | |
471 | 198 supelem = sup.find('.//weekData') |
263 | 199 |
200 for elem in supelem.findall('minDays'): | |
201 territories = elem.attrib['territories'].split() | |
202 if territory in territories or any([r in territories for r in regions]): | |
203 week_data['min_days'] = int(elem.attrib['count']) | |
204 | |
205 for elem in supelem.findall('firstDay'): | |
206 territories = elem.attrib['territories'].split() | |
207 if territory in territories or any([r in territories for r in regions]): | |
208 week_data['first_day'] = weekdays[elem.attrib['day']] | |
209 | |
210 for elem in supelem.findall('weekendStart'): | |
211 territories = elem.attrib['territories'].split() | |
212 if territory in territories or any([r in territories for r in regions]): | |
213 week_data['weekend_start'] = weekdays[elem.attrib['day']] | |
214 | |
215 for elem in supelem.findall('weekendEnd'): | |
216 territories = elem.attrib['territories'].split() | |
217 if territory in territories or any([r in territories for r in regions]): | |
218 week_data['weekend_end'] = weekdays[elem.attrib['day']] | |
219 | |
220 zone_formats = data.setdefault('zone_formats', {}) | |
471 | 221 for elem in tree.findall('.//timeZoneNames/gmtFormat'): |
381 | 222 if 'draft' not in elem.attrib and 'alt' not in elem.attrib: |
263 | 223 zone_formats['gmt'] = unicode(elem.text).replace('{0}', '%s') |
224 break | |
471 | 225 for elem in tree.findall('.//timeZoneNames/regionFormat'): |
381 | 226 if 'draft' not in elem.attrib and 'alt' not in elem.attrib: |
263 | 227 zone_formats['region'] = unicode(elem.text).replace('{0}', '%s') |
228 break | |
471 | 229 for elem in tree.findall('.//timeZoneNames/fallbackFormat'): |
381 | 230 if 'draft' not in elem.attrib and 'alt' not in elem.attrib: |
263 | 231 zone_formats['fallback'] = unicode(elem.text) \ |
232 .replace('{0}', '%(0)s').replace('{1}', '%(1)s') | |
233 break | |
234 | |
235 time_zones = data.setdefault('time_zones', {}) | |
471 | 236 for elem in tree.findall('.//timeZoneNames/zone'): |
263 | 237 info = {} |
238 city = elem.findtext('exemplarCity') | |
239 if city: | |
240 info['city'] = unicode(city) | |
241 for child in elem.findall('long/*'): | |
242 info.setdefault('long', {})[child.tag] = unicode(child.text) | |
243 for child in elem.findall('short/*'): | |
244 info.setdefault('short', {})[child.tag] = unicode(child.text) | |
245 time_zones[elem.attrib['type']] = info | |
246 | |
247 meta_zones = data.setdefault('meta_zones', {}) | |
471 | 248 for elem in tree.findall('.//timeZoneNames/metazone'): |
263 | 249 info = {} |
250 city = elem.findtext('exemplarCity') | |
251 if city: | |
252 info['city'] = unicode(city) | |
253 for child in elem.findall('long/*'): | |
254 info.setdefault('long', {})[child.tag] = unicode(child.text) | |
255 for child in elem.findall('short/*'): | |
256 info.setdefault('short', {})[child.tag] = unicode(child.text) | |
257 info['common'] = elem.findtext('commonlyUsed') == 'true' | |
258 meta_zones[elem.attrib['type']] = info | |
259 | |
471 | 260 for calendar in tree.findall('.//calendars/calendar'): |
263 | 261 if calendar.attrib['type'] != 'gregorian': |
262 # TODO: support other calendar types | |
263 continue | |
264 | |
265 months = data.setdefault('months', {}) | |
266 for ctxt in calendar.findall('months/monthContext'): | |
381 | 267 ctxt_type = ctxt.attrib['type'] |
268 ctxts = months.setdefault(ctxt_type, {}) | |
263 | 269 for width in ctxt.findall('monthWidth'): |
381 | 270 width_type = width.attrib['type'] |
271 widths = ctxts.setdefault(width_type, {}) | |
272 for elem in width.getiterator(): | |
273 if elem.tag == 'month': | |
274 if ('draft' in elem.attrib or 'alt' in elem.attrib) \ | |
275 and int(elem.attrib['type']) in widths: | |
276 continue | |
277 widths[int(elem.attrib.get('type'))] = unicode(elem.text) | |
278 elif elem.tag == 'alias': | |
279 ctxts[width_type] = Alias( | |
280 _translate_alias(['months', ctxt_type, width_type], | |
281 elem.attrib['path']) | |
282 ) | |
263 | 283 |
284 days = data.setdefault('days', {}) | |
285 for ctxt in calendar.findall('days/dayContext'): | |
381 | 286 ctxt_type = ctxt.attrib['type'] |
287 ctxts = days.setdefault(ctxt_type, {}) | |
263 | 288 for width in ctxt.findall('dayWidth'): |
381 | 289 width_type = width.attrib['type'] |
290 widths = ctxts.setdefault(width_type, {}) | |
291 for elem in width.getiterator(): | |
292 if elem.tag == 'day': | |
293 dtype = weekdays[elem.attrib['type']] | |
294 if ('draft' in elem.attrib or 'alt' not in elem.attrib) \ | |
295 and dtype in widths: | |
296 continue | |
297 widths[dtype] = unicode(elem.text) | |
298 elif elem.tag == 'alias': | |
299 ctxts[width_type] = Alias( | |
300 _translate_alias(['days', ctxt_type, width_type], | |
301 elem.attrib['path']) | |
302 ) | |
263 | 303 |
304 quarters = data.setdefault('quarters', {}) | |
305 for ctxt in calendar.findall('quarters/quarterContext'): | |
381 | 306 ctxt_type = ctxt.attrib['type'] |
263 | 307 ctxts = quarters.setdefault(ctxt.attrib['type'], {}) |
308 for width in ctxt.findall('quarterWidth'): | |
381 | 309 width_type = width.attrib['type'] |
310 widths = ctxts.setdefault(width_type, {}) | |
311 for elem in width.getiterator(): | |
312 if elem.tag == 'quarter': | |
313 if ('draft' in elem.attrib or 'alt' in elem.attrib) \ | |
314 and int(elem.attrib['type']) in widths: | |
315 continue | |
316 widths[int(elem.attrib['type'])] = unicode(elem.text) | |
317 elif elem.tag == 'alias': | |
318 ctxts[width_type] = Alias( | |
319 _translate_alias(['quarters', ctxt_type, width_type], | |
320 elem.attrib['path']) | |
321 ) | |
263 | 322 |
323 eras = data.setdefault('eras', {}) | |
324 for width in calendar.findall('eras/*'): | |
381 | 325 width_type = NAME_MAP[width.tag] |
326 widths = eras.setdefault(width_type, {}) | |
327 for elem in width.getiterator(): | |
328 if elem.tag == 'era': | |
329 if ('draft' in elem.attrib or 'alt' in elem.attrib) \ | |
330 and int(elem.attrib['type']) in widths: | |
331 continue | |
332 widths[int(elem.attrib.get('type'))] = unicode(elem.text) | |
333 elif elem.tag == 'alias': | |
334 eras[width_type] = Alias( | |
335 _translate_alias(['eras', width_type], | |
336 elem.attrib['path']) | |
337 ) | |
263 | 338 |
339 # AM/PM | |
340 periods = data.setdefault('periods', {}) | |
341 for elem in calendar.findall('am'): | |
381 | 342 if ('draft' in elem.attrib or 'alt' in elem.attrib) \ |
343 and elem.tag in periods: | |
263 | 344 continue |
345 periods[elem.tag] = unicode(elem.text) | |
346 for elem in calendar.findall('pm'): | |
381 | 347 if ('draft' in elem.attrib or 'alt' in elem.attrib) \ |
348 and elem.tag in periods: | |
263 | 349 continue |
350 periods[elem.tag] = unicode(elem.text) | |
351 | |
352 date_formats = data.setdefault('date_formats', {}) | |
381 | 353 for format in calendar.findall('dateFormats'): |
354 for elem in format.getiterator(): | |
355 if elem.tag == 'dateFormatLength': | |
356 if 'draft' in elem.attrib and \ | |
357 elem.attrib.get('type') in date_formats: | |
358 continue | |
359 try: | |
360 date_formats[elem.attrib.get('type')] = \ | |
361 dates.parse_pattern(unicode(elem.findtext('dateFormat/pattern'))) | |
362 except ValueError, e: | |
363 print>>sys.stderr, 'ERROR: %s' % e | |
364 elif elem.tag == 'alias': | |
365 date_formats = Alias(_translate_alias( | |
366 ['date_formats'], elem.attrib['path']) | |
367 ) | |
263 | 368 |
369 time_formats = data.setdefault('time_formats', {}) | |
381 | 370 for format in calendar.findall('timeFormats'): |
371 for elem in format.getiterator(): | |
372 if elem.tag == 'timeFormatLength': | |
373 if ('draft' in elem.attrib or 'alt' in elem.attrib) \ | |
374 and elem.attrib.get('type') in time_formats: | |
375 continue | |
376 try: | |
377 time_formats[elem.attrib.get('type')] = \ | |
378 dates.parse_pattern(unicode(elem.findtext('timeFormat/pattern'))) | |
379 except ValueError, e: | |
380 print>>sys.stderr, 'ERROR: %s' % e | |
381 elif elem.tag == 'alias': | |
382 time_formats = Alias(_translate_alias( | |
383 ['time_formats'], elem.attrib['path']) | |
384 ) | |
263 | 385 |
386 datetime_formats = data.setdefault('datetime_formats', {}) | |
381 | 387 for format in calendar.findall('dateTimeFormats'): |
388 for elem in format.getiterator(): | |
389 if elem.tag == 'dateTimeFormatLength': | |
390 if ('draft' in elem.attrib or 'alt' in elem.attrib) \ | |
391 and elem.attrib.get('type') in datetime_formats: | |
392 continue | |
393 try: | |
394 datetime_formats[elem.attrib.get('type')] = \ | |
395 unicode(elem.findtext('dateTimeFormat/pattern')) | |
396 except ValueError, e: | |
397 print>>sys.stderr, 'ERROR: %s' % e | |
398 elif elem.tag == 'alias': | |
399 datetime_formats = Alias(_translate_alias( | |
400 ['datetime_formats'], elem.attrib['path']) | |
401 ) | |
263 | 402 |
403 # <numbers> | |
404 | |
405 number_symbols = data.setdefault('number_symbols', {}) | |
471 | 406 for elem in tree.findall('.//numbers/symbols/*'): |
512
f29b2dadd9fc
merge r478 from trunk: Fix the import script to skip alt or draft items in the numbers/symbols subtree of a locale (ticket #217)
fschwarz
parents:
511
diff
changeset
|
407 if ('draft' in elem.attrib or 'alt' in elem.attrib): |
f29b2dadd9fc
merge r478 from trunk: Fix the import script to skip alt or draft items in the numbers/symbols subtree of a locale (ticket #217)
fschwarz
parents:
511
diff
changeset
|
408 continue |
263 | 409 number_symbols[elem.tag] = unicode(elem.text) |
410 | |
411 decimal_formats = data.setdefault('decimal_formats', {}) | |
471 | 412 for elem in tree.findall('.//decimalFormats/decimalFormatLength'): |
381 | 413 if ('draft' in elem.attrib or 'alt' in elem.attrib) \ |
414 and elem.attrib.get('type') in decimal_formats: | |
263 | 415 continue |
416 pattern = unicode(elem.findtext('decimalFormat/pattern')) | |
417 decimal_formats[elem.attrib.get('type')] = numbers.parse_pattern(pattern) | |
418 | |
419 scientific_formats = data.setdefault('scientific_formats', {}) | |
471 | 420 for elem in tree.findall('.//scientificFormats/scientificFormatLength'): |
381 | 421 if ('draft' in elem.attrib or 'alt' in elem.attrib) \ |
422 and elem.attrib.get('type') in scientific_formats: | |
263 | 423 continue |
424 pattern = unicode(elem.findtext('scientificFormat/pattern')) | |
425 scientific_formats[elem.attrib.get('type')] = numbers.parse_pattern(pattern) | |
426 | |
427 currency_formats = data.setdefault('currency_formats', {}) | |
471 | 428 for elem in tree.findall('.//currencyFormats/currencyFormatLength'): |
381 | 429 if ('draft' in elem.attrib or 'alt' in elem.attrib) \ |
430 and elem.attrib.get('type') in currency_formats: | |
263 | 431 continue |
432 pattern = unicode(elem.findtext('currencyFormat/pattern')) | |
433 currency_formats[elem.attrib.get('type')] = numbers.parse_pattern(pattern) | |
434 | |
435 percent_formats = data.setdefault('percent_formats', {}) | |
471 | 436 for elem in tree.findall('.//percentFormats/percentFormatLength'): |
381 | 437 if ('draft' in elem.attrib or 'alt' in elem.attrib) \ |
438 and elem.attrib.get('type') in percent_formats: | |
263 | 439 continue |
440 pattern = unicode(elem.findtext('percentFormat/pattern')) | |
441 percent_formats[elem.attrib.get('type')] = numbers.parse_pattern(pattern) | |
442 | |
443 currency_names = data.setdefault('currency_names', {}) | |
444 currency_symbols = data.setdefault('currency_symbols', {}) | |
471 | 445 for elem in tree.findall('.//currencies/currency'): |
391
e69a068990f0
Ported [424], [425], and [428] back to 0.9.x branch.
cmlenz
parents:
381
diff
changeset
|
446 code = elem.attrib['type'] |
e69a068990f0
Ported [424], [425], and [428] back to 0.9.x branch.
cmlenz
parents:
381
diff
changeset
|
447 # TODO: support plural rules for currency name selection |
e69a068990f0
Ported [424], [425], and [428] back to 0.9.x branch.
cmlenz
parents:
381
diff
changeset
|
448 for name in elem.findall('displayName'): |
e69a068990f0
Ported [424], [425], and [428] back to 0.9.x branch.
cmlenz
parents:
381
diff
changeset
|
449 if ('draft' in name.attrib or 'count' in name.attrib) \ |
e69a068990f0
Ported [424], [425], and [428] back to 0.9.x branch.
cmlenz
parents:
381
diff
changeset
|
450 and code in currency_names: |
e69a068990f0
Ported [424], [425], and [428] back to 0.9.x branch.
cmlenz
parents:
381
diff
changeset
|
451 continue |
e69a068990f0
Ported [424], [425], and [428] back to 0.9.x branch.
cmlenz
parents:
381
diff
changeset
|
452 currency_names[code] = unicode(name.text) |
e69a068990f0
Ported [424], [425], and [428] back to 0.9.x branch.
cmlenz
parents:
381
diff
changeset
|
453 # TODO: support choice patterns for currency symbol selection |
e69a068990f0
Ported [424], [425], and [428] back to 0.9.x branch.
cmlenz
parents:
381
diff
changeset
|
454 symbol = elem.find('symbol') |
e69a068990f0
Ported [424], [425], and [428] back to 0.9.x branch.
cmlenz
parents:
381
diff
changeset
|
455 if symbol is not None and 'draft' not in symbol.attrib \ |
e69a068990f0
Ported [424], [425], and [428] back to 0.9.x branch.
cmlenz
parents:
381
diff
changeset
|
456 and 'choice' not in symbol.attrib: |
e69a068990f0
Ported [424], [425], and [428] back to 0.9.x branch.
cmlenz
parents:
381
diff
changeset
|
457 currency_symbols[code] = unicode(symbol.text) |
263 | 458 |
459 outfile = open(os.path.join(destdir, 'localedata', stem + '.dat'), 'wb') | |
460 try: | |
461 pickle.dump(data, outfile, 2) | |
462 finally: | |
463 outfile.close() | |
464 | |
381 | 465 |
263 | 466 if __name__ == '__main__': |
467 main() |