Package translate :: Package tools :: Module poterminology
[hide private]
[frames] | no frames]

Source Code for Module translate.tools.poterminology

  1  #!/usr/bin/env python 
  2  # -*- coding: utf-8 -*- 
  3  # 
  4  # This file is part of translate. 
  5  # 
  6  # translate is free software; you can redistribute it and/or modify 
  7  # it under the terms of the GNU General Public License as published by 
  8  # the Free Software Foundation; either version 2 of the License, or 
  9  # (at your option) any later version. 
 10  # 
 11  # translate is distributed in the hope that it will be useful, 
 12  # but WITHOUT ANY WARRANTY; without even the implied warranty of 
 13  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the 
 14  # GNU General Public License for more details. 
 15  # 
 16  # You should have received a copy of the GNU General Public License 
 17  # along with translate; if not, write to the Free Software 
 18  # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA 
 19   
 20  """reads a set of .po or .pot files to produce a pootle-terminology.pot 
 21   
 22  See: http://translate.sourceforge.net/wiki/toolkit/poterminology for examples and 
 23  usage instructions 
 24  """ 
 25  import os 
 26  import re 
 27  import sys 
 28  import logging 
 29   
 30  from translate.lang import factory as lang_factory 
 31  from translate.misc import optrecurse 
 32  from translate.storage import po 
 33  from translate.storage import factory 
 34  from translate.misc import file_discovery 
 35   
36 -def create_termunit(term, unit, targets, locations, sourcenotes, transnotes, filecounts):
37 termunit = po.pounit(term) 38 if unit is not None: 39 termunit.merge(unit, overwrite=False, comments=False) 40 if len(targets.keys()) > 1: 41 txt = '; '.join(["%s {%s}" % (target, ', '.join(files)) 42 for target, files in targets.iteritems()]) 43 if termunit.target.find('};') < 0: 44 termunit.target = txt 45 termunit.markfuzzy() 46 else: 47 # if annotated multiple terms already present, keep as-is 48 termunit.addnote(txt, "translator") 49 for location in locations: 50 termunit.addlocation(location) 51 for sourcenote in sourcenotes: 52 termunit.addnote(sourcenote, "developer") 53 for transnote in transnotes: 54 termunit.addnote(transnote, "translator") 55 for filename, count in filecounts.iteritems(): 56 termunit.addnote("(poterminology) %s (%d)\n" % (filename, count), 'translator') 57 return termunit
58
59 -class TerminologyExtractor(object):
60 - def __init__(self, foldtitle=True, ignorecase=False, accelchars="", termlength=3, 61 sourcelanguage="en", invert=False, stopfile=None):
62 self.foldtitle = foldtitle 63 self.ignorecase = ignorecase 64 self.accelchars = accelchars 65 self.termlength = termlength 66 67 self.sourcelanguage = sourcelanguage 68 self.invert = invert 69 70 self.stopwords = {} 71 self.stoprelist = [] 72 self.stopfoldtitle = True 73 self.stopignorecase = False 74 75 if stopfile is None: 76 try: 77 stopfile = file_discovery.get_abs_data_filename('stoplist-%s' % self.sourcelanguage) 78 except: 79 pass 80 self.stopfile = stopfile 81 self.parse_stopword_file() 82 83 # handles c-format and python-format 84 self.formatpat = re.compile(r"%(?:\([^)]+\)|[0-9]+\$)?[-+#0]*[0-9.*]*(?:[hlLzjt][hl])?[EFGXc-ginoprsux]") 85 # handles XML/HTML elements (<foo>text</foo> => text) 86 self.xmlelpat = re.compile(r"<(?:![[-]|[/?]?[A-Za-z_:])[^>]*>") 87 # handles XML/HTML entities (&#32; &#x20; &amp; &my_entity;) 88 self.xmlentpat = re.compile(r"&(?:#(?:[0-9]+|x[0-9a-f]+)|[a-z_:][\w.-:]*);", 89 flags=re.UNICODE|re.IGNORECASE) 90 91 self.units = 0 92 self.glossary = {}
93
94 - def parse_stopword_file(self):
95 96 actions = { '+': frozenset(), ':': frozenset(['skip']), 97 '<': frozenset(['phrase']), '=': frozenset(['word']), 98 '>': frozenset(['word','skip']), 99 '@': frozenset(['word','phrase']) } 100 101 stopfile = open(self.stopfile, "r") 102 line = 0 103 try: 104 for stopline in stopfile: 105 line += 1 106 stoptype = stopline[0] 107 if stoptype == '#' or stoptype == "\n": 108 continue 109 elif stoptype == '!': 110 if stopline[1] == 'C': 111 self.stopfoldtitle = False 112 self.stopignorecase = False 113 elif stopline[1] == 'F': 114 self.stopfoldtitle = True 115 self.stopignorecase = False 116 elif stopline[1] == 'I': 117 self.stopignorecase = True 118 else: 119 logging.warning("%s line %d - bad case mapping directive", (self.stopfile, line)) 120 elif stoptype == '/': 121 self.stoprelist.append(re.compile(stopline[1:-1]+'$')) 122 else: 123 self.stopwords[stopline[1:-1]] = actions[stoptype] 124 except KeyError, character: 125 logging.warning("%s line %d - bad stopword entry starts with", (self.stopfile, line)) 126 logging.warning("%s line %d all lines after error ignored", (self.stopfile, line + 1)) 127 stopfile.close()
128
129 - def clean(self, string):
130 """returns the cleaned string that contains the text to be matched""" 131 for accelerator in self.accelchars: 132 string = string.replace(accelerator, "") 133 string = self.formatpat.sub(" ", string) 134 string = self.xmlelpat.sub(" ", string) 135 string = self.xmlentpat.sub(" ", string) 136 string = string.strip() 137 return string
138
139 - def stopmap(self, word):
140 """return case-mapped stopword for input word""" 141 if self.stopignorecase or (self.stopfoldtitle and word.istitle()): 142 word = word.lower() 143 return word
144
145 - def stopword(self, word, defaultset=frozenset()):
146 """return stoplist frozenset for input word""" 147 return self.stopwords.get(self.stopmap(word),defaultset)
148
149 - def addphrases(self, words, skips, translation, partials=True):
150 """adds (sub)phrases with non-skipwords and more than one word""" 151 if (len(words) > skips + 1 and 152 'skip' not in self.stopword(words[0]) and 153 'skip' not in self.stopword(words[-1])): 154 self.glossary.setdefault(' '.join(words), []).append(translation) 155 if partials: 156 part = list(words) 157 while len(part) > 2: 158 if 'skip' in self.stopword(part.pop()): 159 skips -= 1 160 if (len(part) > skips + 1 and 161 'skip' not in self.stopword(part[0]) and 162 'skip' not in self.stopword(part[-1])): 163 self.glossary.setdefault(' '.join(part), []).append(translation)
164
165 - def processunits(self, units, fullinputpath):
166 sourcelang = lang_factory.getlanguage(self.sourcelanguage) 167 rematchignore = frozenset(('word','phrase')) 168 defaultignore = frozenset() 169 for unit in units: 170 self.units += 1 171 if unit.isheader(): 172 continue 173 if unit.hasplural(): 174 continue 175 if not self.invert: 176 source = self.clean(unit.source) 177 target = self.clean(unit.target) 178 else: 179 target = self.clean(unit.source) 180 source = self.clean(unit.target) 181 if len(source) <= 1: 182 continue 183 for sentence in sourcelang.sentences(source): 184 words = [] 185 skips = 0 186 for word in sourcelang.words(sentence): 187 stword = self.stopmap(word) 188 if self.ignorecase or (self.foldtitle and word.istitle()): 189 word = word.lower() 190 ignore = defaultignore 191 if stword in self.stopwords: 192 ignore = self.stopwords[stword] 193 else: 194 for stopre in self.stoprelist: 195 if stopre.match(stword) != None: 196 ignore = rematchignore 197 break 198 translation = (source, target, unit, fullinputpath) 199 if 'word' not in ignore: 200 # reduce plurals 201 root = word 202 if len(word) > 3 and word[-1] == 's' and word[0:-1] in self.glossary: 203 root = word[0:-1] 204 elif len(root) > 2 and root + 's' in self.glossary: 205 self.glossary[root] = self.glossary.pop(root + 's') 206 self.glossary.setdefault(root, []).append(translation) 207 if self.termlength > 1: 208 if 'phrase' in ignore: 209 # add trailing phrases in previous words 210 while len(words) > 2: 211 if 'skip' in self.stopword(words.pop(0)): 212 skips -= 1 213 self.addphrases(words, skips, translation) 214 words = [] 215 skips = 0 216 else: 217 words.append(word) 218 if 'skip' in ignore: 219 skips += 1 220 if len(words) > self.termlength + skips: 221 while len(words) > self.termlength + skips: 222 if 'skip' in self.stopword(words.pop(0)): 223 skips -= 1 224 self.addphrases(words, skips, translation) 225 else: 226 self.addphrases(words, skips, translation, partials=False) 227 if self.termlength > 1: 228 # add trailing phrases in sentence after reaching end 229 while self.termlength > 1 and len(words) > 2: 230 231 if 'skip' in self.stopword(words.pop(0)): 232 skips -= 1 233 self.addphrases(words, skips, translation)
234
235 - def extract_terms(self, create_termunit=create_termunit, inputmin=1, fullmsgmin=1, substrmin=2, locmin=2):
236 terms = {} 237 locre = re.compile(r":[0-9]+$") 238 print >> sys.stderr, ("%d terms from %d units" % 239 (len(self.glossary), self.units)) 240 for term, translations in self.glossary.iteritems(): 241 if len(translations) <= 1: 242 continue 243 filecounts = {} 244 sources = set() 245 locations = set() 246 sourcenotes = set() 247 transnotes = set() 248 targets = {} 249 fullmsg = False 250 bestunit = None 251 for source, target, unit, filename in translations: 252 sources.add(source) 253 filecounts[filename] = filecounts.setdefault(filename, 0) + 1 254 #FIXME: why reclean source and target?! 255 if term.lower() == self.clean(unit.source).lower(): 256 fullmsg = True 257 target = self.clean(unit.target) 258 if self.ignorecase or (self.foldtitle and target.istitle()): 259 target = target.lower() 260 unit.target = target 261 if target != "": 262 targets.setdefault(target, []).append(filename) 263 if term.lower() == unit.source.strip().lower(): 264 sourcenotes.add(unit.getnotes("source code")) 265 transnotes.add(unit.getnotes("translator")) 266 unit.source = term 267 bestunit = unit 268 #FIXME: figure out why we did a merge to begin with 269 #termunit.merge(unit, overwrite=False, comments=False) 270 for loc in unit.getlocations(): 271 locations.add(locre.sub("", loc)) 272 273 numsources = len(sources) 274 numfiles = len(filecounts) 275 numlocs = len(locations) 276 if numfiles < inputmin or numlocs < locmin: 277 continue 278 if fullmsg: 279 if numsources < fullmsgmin: 280 continue 281 elif numsources < substrmin: 282 continue 283 284 locmax = 2 * locmin 285 if numlocs > locmax: 286 locations = list(locations)[0:locmax] 287 locations.append("(poterminology) %d more locations" 288 % (numlocs - locmax)) 289 290 termunit = create_termunit(term, bestunit, targets, locations, sourcenotes, transnotes, filecounts) 291 terms[term] = ((10 * numfiles) + numsources, termunit) 292 return terms
293
294 - def filter_terms(self, terms, sortorders=[ "frequency", "dictionary", "length" ]):
295 """reduce subphrases from extracted terms""" 296 # reduce subphrase 297 termlist = terms.keys() 298 print >> sys.stderr, "%d terms after thresholding" % len(termlist) 299 termlist.sort(lambda x, y: cmp(len(x), len(y))) 300 for term in termlist: 301 words = term.split() 302 if len(words) <= 2: 303 continue 304 while len(words) > 2: 305 words.pop() 306 if terms[term][0] == terms.get(' '.join(words), [0])[0]: 307 del terms[' '.join(words)] 308 words = term.split() 309 while len(words) > 2: 310 words.pop(0) 311 if terms[term][0] == terms.get(' '.join(words), [0])[0]: 312 del terms[' '.join(words)] 313 print >> sys.stderr, "%d terms after subphrase reduction" % len(terms.keys()) 314 termitems = terms.values() 315 while len(sortorders) > 0: 316 order = sortorders.pop() 317 if order == "frequency": 318 termitems.sort(lambda x, y: cmp(y[0], x[0])) 319 elif order == "dictionary": 320 termitems.sort(lambda x, y: cmp(x[1].source.lower(), y[1].source.lower())) 321 elif order == "length": 322 termitems.sort(lambda x, y: cmp(len(x[1].source), len(y[1].source))) 323 else: 324 logging.warning("unknown sort order %s", order) 325 return termitems
326 327
328 -class TerminologyOptionParser(optrecurse.RecursiveOptionParser):
329 """a specialized Option Parser for the terminology tool...""" 330
331 - def parse_args(self, args=None, values=None):
332 """parses the command line options, handling implicit input/output args""" 333 (options, args) = optrecurse.optparse.OptionParser.parse_args(self, args, values) 334 # some intelligence as to what reasonable people might give on the command line 335 if args and not options.input: 336 if not options.output and not options.update and len(args) > 1: 337 options.input = args[:-1] 338 args = args[-1:] 339 else: 340 options.input = args 341 args = [] 342 # don't overwrite last freestanding argument file, to avoid accidents 343 # due to shell wildcard expansion 344 if args and not options.output and not options.update: 345 if os.path.lexists(args[-1]) and not os.path.isdir(args[-1]): 346 self.error("To overwrite %s, specify it with -o/--output or -u/--update" % (args[-1])) 347 options.output = args[-1] 348 args = args[:-1] 349 if options.output and options.update: 350 self.error("You cannot use both -u/--update and -o/--output") 351 if args: 352 self.error("You have used an invalid combination of -i/--input, -o/--output, -u/--update and freestanding args") 353 if not options.input: 354 self.error("No input file or directory was specified") 355 if isinstance(options.input, list) and len(options.input) == 1: 356 options.input = options.input[0] 357 if options.inputmin == None: 358 options.inputmin = 1 359 elif not isinstance(options.input, list) and not os.path.isdir(options.input): 360 if options.inputmin == None: 361 options.inputmin = 1 362 elif options.inputmin == None: 363 options.inputmin = 2 364 if options.update: 365 options.output = options.update 366 if isinstance(options.input, list): 367 options.input.append(options.update) 368 elif options.input: 369 options.input = [options.input, options.update] 370 else: 371 options.input = options.update 372 if not options.output: 373 options.output = "pootle-terminology.pot" 374 return (options, args)
375
376 - def set_usage(self, usage=None):
377 """sets the usage string - if usage not given, uses getusagestring for each option""" 378 if usage is None: 379 self.usage = "%prog " + " ".join([self.getusagestring(option) for option in self.option_list]) + \ 380 "\n input directory is searched for PO files, terminology PO file is output file" 381 else: 382 super(TerminologyOptionParser, self).set_usage(usage)
383
384 - def run(self):
385 """parses the arguments, and runs recursiveprocess with the resulting options""" 386 self.files = 0 387 (options, args) = self.parse_args() 388 options.inputformats = self.inputformats 389 options.outputoptions = self.outputoptions 390 self.usepsyco(options) 391 self.extractor = TerminologyExtractor(foldtitle=options.foldtitle, ignorecase=options.ignorecase, 392 accelchars=options.accelchars, termlength=options.termlength, 393 sourcelanguage=options.sourcelanguage, 394 invert=options.invert, stopfile=options.stopfile) 395 self.recursiveprocess(options)
396
397 - def recursiveprocess(self, options):
398 """recurse through directories and process files""" 399 if self.isrecursive(options.input, 'input') and getattr(options, "allowrecursiveinput", True): 400 if isinstance(options.input, list): 401 inputfiles = self.recurseinputfilelist(options) 402 else: 403 inputfiles = self.recurseinputfiles(options) 404 else: 405 if options.input: 406 inputfiles = [os.path.basename(options.input)] 407 options.input = os.path.dirname(options.input) 408 else: 409 inputfiles = [options.input] 410 if os.path.isdir(options.output): 411 options.output = os.path.join(options.output,"pootle-terminology.pot") 412 413 self.initprogressbar(inputfiles, options) 414 for inputpath in inputfiles: 415 self.files += 1 416 fullinputpath = self.getfullinputpath(options, inputpath) 417 success = True 418 try: 419 self.processfile(None, options, fullinputpath) 420 except Exception, error: 421 if isinstance(error, KeyboardInterrupt): 422 raise 423 self.warning("Error processing: input %s" % (fullinputpath), options, sys.exc_info()) 424 success = False 425 self.reportprogress(inputpath, success) 426 del self.progressbar 427 self.outputterminology(options)
428
429 - def processfile(self, fileprocessor, options, fullinputpath):
430 """process an individual file""" 431 inputfile = self.openinputfile(options, fullinputpath) 432 inputfile = factory.getobject(inputfile) 433 self.extractor.processunits(inputfile.units, fullinputpath)
434
435 - def outputterminology(self, options):
436 """saves the generated terminology glossary""" 437 termfile = po.pofile() 438 print >> sys.stderr, ("scanned %d files" % self.files) 439 terms = self.extractor.extract_terms(inputmin=options.inputmin, fullmsgmin=options.fullmsgmin, 440 substrmin=options.substrmin, locmin=options.locmin) 441 termitems = self.extractor.filter_terms(terms, sortorders=options.sortorders) 442 for count, unit in termitems: 443 termfile.units.append(unit) 444 open(options.output, "w").write(str(termfile))
445
446 -def fold_case_option(option, opt_str, value, parser):
447 parser.values.ignorecase = False 448 parser.values.foldtitle = True
449
450 -def preserve_case_option(option, opt_str, value, parser):
451 parser.values.ignorecase = parser.values.foldtitle = False
452
453 -def main():
454 formats = {"po":("po", None), "pot": ("pot", None), None:("po", None)} 455 parser = TerminologyOptionParser(formats) 456 457 parser.add_option("-u", "--update", type="string", dest="update", 458 metavar="UPDATEFILE", help="update terminology in UPDATEFILE") 459 460 parser.add_option("-S", "--stopword-list", type="string", metavar="STOPFILE", dest="stopfile", 461 help="read stopword (term exclusion) list from STOPFILE (default %s)" % 462 file_discovery.get_abs_data_filename('stoplist-en')) 463 464 parser.set_defaults(foldtitle = True, ignorecase = False) 465 parser.add_option("-F", "--fold-titlecase", callback=fold_case_option, 466 action="callback", help="fold \"Title Case\" to lowercase (default)") 467 parser.add_option("-C", "--preserve-case", callback=preserve_case_option, 468 action="callback", help="preserve all uppercase/lowercase") 469 parser.add_option("-I", "--ignore-case", dest="ignorecase", 470 action="store_true", help="make all terms lowercase") 471 472 parser.add_option("", "--accelerator", dest="accelchars", default="", 473 metavar="ACCELERATORS", help="ignores the given accelerator characters when matching") 474 475 parser.add_option("-t", "--term-words", type="int", dest="termlength", default="3", 476 help="generate terms of up to LENGTH words (default 3)", metavar="LENGTH") 477 parser.add_option("", "--inputs-needed", type="int", dest="inputmin", 478 help="omit terms appearing in less than MIN input files (default 2, or 1 if only one input file)", metavar="MIN") 479 parser.add_option("", "--fullmsg-needed", type="int", dest="fullmsgmin", default="1", 480 help="omit full message terms appearing in less than MIN different messages (default 1)", metavar="MIN") 481 parser.add_option("", "--substr-needed", type="int", dest="substrmin", default="2", 482 help="omit substring-only terms appearing in less than MIN different messages (default 2)", metavar="MIN") 483 parser.add_option("", "--locs-needed", type="int", dest="locmin", default="2", 484 help="omit terms appearing in less than MIN different original source files (default 2)", metavar="MIN") 485 486 sortorders_default = [ "frequency", "dictionary", "length" ] 487 parser.add_option("", "--sort", dest="sortorders", action="append", 488 type="choice", choices=sortorders_default, metavar="ORDER", default=sortorders_default, 489 help="output sort order(s): %s (default is all orders in the above priority)" % ', '.join(sortorders_default)) 490 491 parser.add_option("", "--source-language", dest="sourcelanguage", default="en", 492 help="the source language code (default 'en')", metavar="LANG") 493 parser.add_option("-v", "--invert", dest="invert", 494 action="store_true", default=False, help="invert the source and target languages for terminology") 495 parser.set_usage() 496 parser.description = __doc__ 497 parser.run()
498 499 500 if __name__ == '__main__': 501 main() 502