Package s3 :: Module s3translate
[frames] | no frames]

Source Code for Module s3.s3translate

   1  # -*- coding: utf-8 -*- 
   2   
   3  """ Translation API 
   4   
   5      @copyright: 2012-2019 (c) Sahana Software Foundation 
   6      @license: MIT 
   7   
   8      Permission is hereby granted, free of charge, to any person 
   9      obtaining a copy of this software and associated documentation 
  10      files (the "Software"), to deal in the Software without 
  11      restriction, including without limitation the rights to use, 
  12      copy, modify, merge, publish, distribute, sublicense, and/or sell 
  13      copies of the Software, and to permit persons to whom the 
  14      Software is furnished to do so, subject to the following 
  15      conditions: 
  16   
  17      The above copyright notice and this permission notice shall be 
  18      included in all copies or substantial portions of the Software. 
  19   
  20      THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 
  21      EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 
  22      OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 
  23      NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 
  24      HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 
  25      WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
  26      FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 
  27      OTHER DEALINGS IN THE SOFTWARE. 
  28  """ 
  29   
  30  import os 
  31  import parser 
  32  import token 
  33   
  34  from gluon import current 
  35  from gluon.languages import read_dict, write_dict 
  36  from gluon.storage import Storage 
  37   
  38  from s3fields import S3ReusableField 
  39   
  40  """ 
  41      List of classes with description : 
  42   
  43   
  44      TranslateAPI           : API class to retrieve strings and files by module 
  45   
  46      TranslateGetFiles      : Class to traverse the eden directory and 
  47                               categorize files based on module 
  48   
  49      TranslateParseFiles    : Class to extract strings to translate from code files 
  50   
  51      TranslateReadFiles     : Class to open a file, read its contents and build 
  52                               a parse tree (for .py files) or use regex 
  53                               (for html/js files) to obtain a list of strings 
  54                               by calling methods from TranslateParseFiles 
  55   
  56      Strings                : Class to manipulate strings and their files 
  57   
  58      Pootle                 : Class to synchronise a Pootle server's translation 
  59                               with the local one 
  60   
  61      TranslateReportStatus  : Class to report the translated percentage of each 
  62                               language file for each module. It also updates 
  63                               these percentages as and when required 
  64  """ 
65 66 # ============================================================================= 67 -class TranslateAPI(object):
68 """ 69 API class for the Translation module to get 70 files, modules and strings individually 71 """ 72 73 core_modules = ("auth", "default", "errors", "appadmin") 74
75 - def __init__(self):
76 77 self.grp = TranslateGetFiles() 78 self.grp.group_files(current.request.folder)
79 80 # --------------------------------------------------------------------- 81 @staticmethod
82 - def get_langcodes():
83 """ Return a list of language codes """ 84 85 lang_list = [] 86 langdir = os.path.join(current.request.folder, "languages") 87 files = os.listdir(langdir) 88 89 for f in files: 90 lang_list.append(f[:-3]) 91 92 return lang_list
93 94 # ---------------------------------------------------------------------
95 - def get_modules(self):
96 """ Return a list of modules """ 97 98 return self.grp.modlist
99 100 # ---------------------------------------------------------------------
101 - def get_strings_by_module(self, module):
102 """ Return a list of strings corresponding to a module """ 103 104 grp = self.grp 105 d = grp.d 106 if module in d.keys(): 107 fileList = d[module] 108 else: 109 current.log.warning("Module '%s' doesn't exist!" % module) 110 return [] 111 112 modlist = grp.modlist 113 strings = [] 114 sappend = strings.append 115 116 R = TranslateReadFiles() 117 findstr = R.findstr 118 119 for f in fileList: 120 if f.endswith(".py") == True: 121 tmpstr = findstr(f, "ALL", modlist) 122 elif f.endswith(".html") == True or \ 123 f.endswith(".js") == True: 124 tmpstr = R.read_html_js(f) 125 else: 126 tmpstr = [] 127 for s in tmpstr: 128 sappend(("%s:%s" % (f, str(s[0])), s[1])) 129 130 # Handle "special" files separately 131 fileList = d["special"] 132 for f in fileList: 133 if f.endswith(".py") == True: 134 tmpstr = findstr(f, module, modlist) 135 for s in tmpstr: 136 sappend(("%s:%s" % (f, str(s[0])), s[1])) 137 138 return strings
139 140 # ---------------------------------------------------------------------
141 - def get_strings_by_file(self, filename):
142 """ Return a list of strings in a given file """ 143 144 if os.path.isfile(filename): 145 filename = os.path.abspath(filename) 146 else: 147 current.log.warning("'%s' is not a valid file path!" % filename) 148 return [] 149 150 R = TranslateReadFiles() 151 strings = [] 152 sappend = strings.append 153 tmpstr = [] 154 155 if filename.endswith(".py") == True: 156 tmpstr = R.findstr(filename, "ALL", self.grp.modlist) 157 elif filename.endswith(".html") == True or \ 158 filename.endswith(".js") == True: 159 tmpstr = R.read_html_js(filename) 160 else: 161 current.log.warning("Please enter a '.py', '.js' or '.html' file path") 162 return [] 163 164 for s in tmpstr: 165 sappend(("%s:%s" % (filename, str(s[0])), s[1])) 166 return strings
167
168 # ============================================================================= 169 -class TranslateGetFiles(object):
170 """ Class to group files by modules """ 171
172 - def __init__(self):
173 """ 174 Set up a dictionary to hold files belonging to a particular 175 module with the module name as the key. Files which contain 176 strings belonging to more than one module are grouped under 177 the "special" key. 178 """ 179 180 # Initialize to an empty list for each module 181 d = {} 182 modlist = self.get_module_list(current.request.folder) 183 for m in modlist: 184 d[m] = [] 185 186 # List of files belonging to 'core' module 187 d["core"] = [] 188 189 # 'special' files which contain strings from more than one module 190 d["special"] = [] 191 192 self.d = d 193 self.modlist = modlist
194 195 # --------------------------------------------------------------------- 196 @staticmethod
197 - def get_module_list(dir):
198 """ 199 Returns a list of modules using files in /controllers/ 200 as point of reference 201 """ 202 203 mod = [] 204 mappend = mod.append 205 cont_dir = os.path.join(dir, "controllers") 206 mod_files = os.listdir(cont_dir) 207 208 for f in mod_files: 209 if f[0] != ".": 210 # Strip extension 211 mappend(f[:-3]) 212 213 # Add Modules which aren't in controllers 214 mod += ["support", 215 "translate", 216 ] 217 218 return mod
219 220 # ---------------------------------------------------------------------
221 - def group_files(self, currentDir, curmod="", vflag=0):
222 """ 223 Recursive function to group Eden files into respective modules 224 """ 225 226 path = os.path 227 currentDir = path.abspath(currentDir) 228 base_dir = path.basename(currentDir) 229 230 if base_dir in (".git", 231 "docs", 232 "languages", 233 "private", 234 "templates", # Added separately 235 "tests", 236 "uploads", 237 ): 238 # Skip 239 return 240 241 # If current directory is '/views', set vflag 242 if base_dir == "views": 243 vflag = 1 244 245 d = self.d 246 files = os.listdir(currentDir) 247 248 for f in files: 249 if f.startswith(".") or f.endswith(".pyc") or f in ("test.py", "tests.py"): 250 continue 251 252 curFile = path.join(currentDir, f) 253 if path.isdir(curFile): 254 # If the current directory is /views, 255 # categorize files based on the directory name 256 if vflag: 257 self.group_files(curFile, f, vflag) 258 else: 259 self.group_files(curFile, curmod, vflag) 260 261 else: 262 # If in /appname/views, categorize by parent directory name 263 if vflag: 264 base = curmod 265 266 # Categorize file as "special" as it contains strings 267 # belonging to various modules 268 elif f in ("s3menus.py", 269 "s3cfg.py", 270 "000_config.py", 271 "config.py", 272 "menus.py"): 273 base = "special" 274 else: 275 # Remove extension ('.py') 276 base = path.splitext(f)[0] 277 278 # If file has "s3" as prefix, remove "s3" to get module name 279 if "s3" in base: 280 base = base[2:] 281 282 # If file is inside /models and file name is 283 # of the form var_module.py, remove the "var_" prefix 284 #elif base_dir == "models" and "_" in base: 285 # base = base.split("_")[1] 286 287 # If base refers to a module, append to corresponding list 288 if base in d.keys(): 289 d[base].append(curFile) 290 else: 291 # Append it to "core" files list 292 d["core"].append(curFile)
293
294 # ============================================================================= 295 -class TranslateParseFiles(object):
296 """ 297 Class to extract strings to translate from code files 298 """ 299
300 - def __init__(self):
301 """ Initializes all object variables """ 302 303 self.cflag = 0 # To indicate if next element is a class 304 self.fflag = 0 # To indicate if next element is a function 305 self.sflag = 0 # To indicate 'T' has just been found 306 self.tflag = 0 # To indicate we are currently inside T(...) 307 self.mflag = 0 # To indicate we are currently inside M(...) 308 self.bracket = 0 # Acts as a counter for parenthesis in T(...) 309 self.outstr = "" # Collects all the data inside T(...) 310 self.class_name = "" # Stores the current class name 311 self.func_name = "" # Stores the current function name 312 self.mod_name = "" # Stores module that the string may belong to 313 self.findent = -1 # Stores indentation level in menus.py
314 315 # ---------------------------------------------------------------------
316 - def parseList(self, entry, tmpstr):
317 """ Recursive function to extract strings from a parse tree """ 318 319 if isinstance(entry, list): 320 id = entry[0] 321 value = entry[1] 322 if isinstance(value, list): 323 parseList = self.parseList 324 for element in entry: 325 parseList(element, tmpstr) 326 else: 327 if token.tok_name[id] == "STRING": 328 tmpstr.append(value)
329 330 # ---------------------------------------------------------------------
331 - def parseConfig(self, spmod, strings, entry, modlist):
332 """ Function to extract strings from config.py / 000_config.py """ 333 334 if isinstance(entry, list): 335 id = entry[0] 336 value = entry[1] 337 338 # If the element is not a root node, 339 # go deeper into the tree using dfs 340 if isinstance(value, list): 341 parseConfig = self.parseConfig 342 for element in entry: 343 parseConfig(spmod, strings, element, modlist) 344 else: 345 if self.fflag == 1 and token.tok_name[id] == "NAME": 346 # Here, func_name stores the module_name of the form 347 # deployment.settings.module_name.variable 348 self.func_name = value 349 self.fflag = 0 350 351 # Set flag to store the module name from 352 # deployment_settings.module_name 353 elif token.tok_name[id] == "NAME" and \ 354 (value == "deployment_settings" or \ 355 value == "settings"): 356 self.fflag = 1 357 358 # Get module name from deployment_setting.modules list 359 elif self.tflag == 0 and self.func_name == "modules" and \ 360 token.tok_name[id] == "STRING": 361 if value[1:-1] in modlist: 362 self.mod_name = value[1:-1] 363 364 # If 'T' is encountered, set sflag 365 elif token.tok_name[id] == "NAME" and value == "T": 366 self.sflag = 1 367 368 # If sflag is set and '(' is found, set tflag 369 elif self.sflag == 1: 370 if token.tok_name[id] == "LPAR": 371 self.tflag = 1 372 self.bracket = 1 373 self.sflag = 0 374 375 # Check if inside 'T()' 376 elif self.tflag == 1: 377 # If '(' is encountered, append it to outstr 378 if token.tok_name[id] == "LPAR": 379 self.bracket += 1 380 if self.bracket > 1: 381 self.outstr += "(" 382 383 elif token.tok_name[id] == "RPAR": 384 self.bracket -= 1 385 # If it's not the last ')' of 'T()', 386 # append to outstr 387 if self.bracket > 0: 388 self.outstr += ")" 389 390 # If it's the last ')', add string to list 391 else: 392 if spmod == "core": 393 if self.func_name != "modules" and \ 394 self.func_name not in modlist: 395 strings.append((entry[2], self.outstr)) 396 elif (self.func_name == "modules" and \ 397 self.mod_name == spmod) or \ 398 (self.func_name == spmod): 399 strings.append((entry[2], self.outstr)) 400 self.outstr = "" 401 self.tflag = 0 402 403 # If we are inside 'T()', append value to outstr 404 elif self.bracket > 0: 405 self.outstr += value
406 407 # ---------------------------------------------------------------------
408 - def parseS3cfg(self, spmod, strings, entry, modlist):
409 """ Function to extract the strings from s3cfg.py """ 410 411 if isinstance(entry, list): 412 id = entry[0] 413 value = entry[1] 414 if isinstance(value, list): 415 parseS3cfg = self.parseS3cfg 416 for element in entry: 417 parseS3cfg(spmod, strings, element, modlist) 418 else: 419 420 # If value is a function name, store it in func_name 421 if self.fflag == 1: 422 self.func_name = value 423 self.fflag = 0 424 425 # If value is 'def', set fflag to store func_name next 426 elif token.tok_name[id] == "NAME" and value == "def": 427 self.fflag = 1 428 429 # If 'T' is encountered, set sflag 430 elif token.tok_name[id] == "NAME" and value == "T": 431 self.sflag = 1 432 433 elif self.sflag == 1: 434 if token.tok_name[id] == "LPAR": 435 self.tflag = 1 436 self.bracket = 1 437 self.sflag = 0 438 439 elif self.tflag == 1: 440 if token.tok_name[id] == "LPAR": 441 self.bracket += 1 442 if self.bracket > 1: 443 self.outstr += "(" 444 elif token.tok_name[id] == "RPAR": 445 self.bracket -= 1 446 if self.bracket > 0: 447 self.outstr += ")" 448 else: 449 # If core module is requested 450 if spmod == "core": 451 # If extracted data doesn't belong 452 # to any other module, append to list 453 if "_" not in self.func_name or \ 454 self.func_name.split("_")[1] not in modlist: 455 strings.append((entry[2], self.outstr)) 456 457 # If 'module' in 'get_module_variable()' 458 # is the requested module, append to list 459 elif "_" in self.func_name and \ 460 self.func_name.split("_")[1] == spmod: 461 strings.append((entry[2], self.outstr)) 462 self.outstr = "" 463 self.tflag = 0 464 elif self.bracket > 0: 465 self.outstr += value
466 467 # ---------------------------------------------------------------------
468 - def parseMenu(self, spmod, strings, entry, level):
469 """ Function to extract the strings from menus.py """ 470 471 if isinstance(entry, list): 472 id = entry[0] 473 value = entry[1] 474 if isinstance(value, list): 475 parseMenu = self.parseMenu 476 for element in entry: 477 parseMenu(spmod, strings, element, level + 1) 478 else: 479 480 # If value is a class name, store it in class_name 481 if self.cflag == 1: 482 self.class_name = value 483 self.cflag = 0 484 485 # If value is 'class', set cflag to store class name next 486 elif token.tok_name[id] == "NAME" and value == "class": 487 self.cflag = 1 488 489 elif self.fflag == 1: 490 # Here func_name is used to store the function names 491 # which are in 'S3OptionsMenu' class 492 self.func_name = value 493 self.fflag = 0 494 495 # If value is "def" and it's the first function in the 496 # S3OptionsMenu class or its indentation level is equal 497 # to the first function in 'S3OptionsMenu class', then 498 # set fflag and store the indentation level in findent 499 elif token.tok_name[id] == "NAME" and value == "def" and \ 500 (self.findent == -1 or level == self.findent): 501 if self.class_name == "S3OptionsMenu": 502 self.findent = level 503 self.fflag = 1 504 else: 505 self.func_name = "" 506 507 # If current element is 'T', set sflag 508 elif token.tok_name[id] == "NAME" and value == "T": 509 self.sflag = 1 510 511 elif self.sflag == 1: 512 if token.tok_name[id] == "LPAR": 513 self.tflag = 1 514 self.bracket = 1 515 self.sflag = 0 516 517 # If inside 'T()', extract the data accordingly 518 elif self.tflag == 1: 519 if token.tok_name[id] == "LPAR": 520 self.bracket += 1 521 if self.bracket > 1: 522 self.outstr += "(" 523 elif token.tok_name[id] == "RPAR": 524 self.bracket -= 1 525 if self.bracket > 0: 526 self.outstr += ")" 527 else: 528 529 # If the requested module is 'core' and 530 # extracted data doesn't lie inside the 531 # S3OptionsMenu class, append it to list 532 if spmod == "core": 533 if self.func_name == "": 534 strings.append((entry[2], self.outstr)) 535 536 # If the function name (in S3OptionsMenu class) 537 # is equal to the module requested, 538 # then append it to list 539 elif self.func_name == spmod: 540 strings.append((entry[2], self.outstr)) 541 self.outstr = "" 542 self.tflag = 0 543 elif self.bracket > 0: 544 self.outstr += value 545 546 else: 547 # Get strings inside 'M()' 548 # If value is 'M', set mflag 549 if token.tok_name[id] == "NAME" and value == "M": 550 self.mflag = 1 551 552 elif self.mflag == 1: 553 554 # If mflag is set and argument inside is a string, 555 # append it to list 556 if token.tok_name[id] == "STRING": 557 if spmod == "core": 558 if self.func_name == "": 559 strings.append((entry[2], value)) 560 elif self.func_name == spmod: 561 strings.append((entry[2], value)) 562 563 # If current argument in 'M()' is of type arg = var 564 # or if ')' is found, unset mflag 565 elif token.tok_name[id] == "EQUAL" or \ 566 token.tok_name[id] == "RPAR": 567 self.mflag = 0
568 569 # ---------------------------------------------------------------------
570 - def parseAll(self, strings, entry):
571 """ Function to extract all the strings from a file """ 572 573 if isinstance(entry, list): 574 id = entry[0] 575 value = entry[1] 576 if isinstance(value, list): 577 parseAll = self.parseAll 578 for element in entry: 579 parseAll(strings, element) 580 else: 581 # If current element is 'T', set sflag 582 if token.tok_name[id] == "NAME" and value == "T": 583 self.sflag = 1 584 585 elif self.sflag == 1: 586 if token.tok_name[id] == "LPAR": 587 self.tflag = 1 588 self.bracket = 1 589 self.sflag = 0 590 591 # If inside 'T', extract data accordingly 592 elif self.tflag == 1: 593 if token.tok_name[id] == "LPAR": 594 self.bracket += 1 595 if self.bracket > 1: 596 self.outstr += "(" 597 elif token.tok_name[id] == "RPAR": 598 self.bracket -= 1 599 if self.bracket > 0: 600 self.outstr += ")" 601 else: 602 strings.append((entry[2], self.outstr)) 603 self.outstr = "" 604 self.tflag = 0 605 606 elif self.bracket > 0: 607 self.outstr += value 608 609 else: 610 # If current element is 'M', set mflag 611 if token.tok_name[id] == "NAME" and value == "M": 612 self.mflag = 1 613 614 elif self.mflag == 1: 615 # If inside 'M()', extract string accordingly 616 if token.tok_name[id] == "STRING": 617 strings.append((entry[2], value)) 618 619 elif token.tok_name[id] == "EQUAL" or \ 620 token.tok_name[id] == "RPAR": 621 self.mflag = 0
622
623 # ============================================================================= 624 -class TranslateReadFiles(object):
625 """ Class to read code files """ 626 627 # --------------------------------------------------------------------- 628 @staticmethod
629 - def findstr(fileName, spmod, modlist):
630 """ 631 Using the methods in TranslateParseFiles to extract the strings 632 fileName -> the file to be used for extraction 633 spmod -> the required module 634 modlist -> a list of all modules in Eden 635 """ 636 637 try: 638 f = open(fileName) 639 except: 640 path = os.path.split(__file__)[0] 641 fileName = os.path.join(path, fileName) 642 try: 643 f = open(fileName) 644 except: 645 return 646 647 # Read all contents of file 648 fileContent = f.read() 649 f.close() 650 651 # Remove CL-RF and NOEOL characters 652 fileContent = "%s\n" % fileContent.replace("\r", "") 653 654 try: 655 st = parser.suite(fileContent) 656 except: 657 return [] 658 659 # Create a parse tree list for traversal 660 stList = parser.st2list(st, line_info=1) 661 662 P = TranslateParseFiles() 663 664 # List which holds the extracted strings 665 strings = [] 666 667 if spmod == "ALL": 668 # If all strings are to be extracted, call ParseAll() 669 parseAll = P.parseAll 670 for element in stList: 671 parseAll(strings, element) 672 else: 673 # Handle cases for special files which contain 674 # strings belonging to different modules 675 fileName = os.path.basename(fileName) 676 if fileName == "s3menus.py": 677 parseMenu = P.parseMenu 678 for element in stList: 679 parseMenu(spmod, strings, element, 0) 680 681 elif fileName == "s3cfg.py": 682 parseS3cfg = P.parseS3cfg 683 for element in stList: 684 parseS3cfg(spmod, strings, element, modlist) 685 686 elif fileName in ("000_config.py", "config.py"): 687 parseConfig = P.parseConfig 688 for element in stList: 689 parseConfig(spmod, strings, element, modlist) 690 691 # Extract strings from deployment_settings.variable() calls 692 final_strings = [] 693 fsappend = final_strings.append 694 settings = current.deployment_settings 695 for (loc, s) in strings: 696 697 if s[0] != '"' and s[0] != "'": 698 699 # This is a variable 700 if "settings." in s: 701 # Convert the call to a standard form 702 s = s.replace("current.deployment_settings", "settings") 703 s = s.replace("()", "") 704 l = s.split(".") 705 obj = settings 706 707 # Get the actual value 708 for atr in l[1:]: 709 try: 710 obj = getattr(obj, atr)() 711 except: 712 current.log.warning("Can't find this deployment_setting, maybe a crud.settings", atr) 713 else: 714 s = obj 715 fsappend((loc, s)) 716 else: 717 # @ToDo: Get the value of non-settings variables 718 pass 719 720 else: 721 fsappend((loc, s)) 722 723 return final_strings
724 725 # --------------------------------------------------------------------- 726 @staticmethod
727 - def read_html_js(filename):
728 """ 729 Function to read and extract strings from html/js files 730 using regular expressions 731 """ 732 733 import re 734 735 PY_STRING_LITERAL_RE = r'(?<=[^\w]T\()(?P<name>'\ 736 + r"[uU]?[rR]?(?:'''(?:[^']|'{1,2}(?!'))*''')|"\ 737 + r"(?:'(?:[^'\\]|\\.)*')|"\ 738 + r'(?:"""(?:[^"]|"{1,2}(?!"))*""")|'\ 739 + r'(?:"(?:[^"\\]|\\.)*"))' 740 regex_trans = re.compile(PY_STRING_LITERAL_RE, re.DOTALL) 741 findall = regex_trans.findall 742 743 html_js_file = open(filename) 744 linecount = 0 745 strings = [] 746 sappend = strings.append 747 748 for line in html_js_file: 749 linecount += 1 750 occur = findall(line) 751 for s in occur: 752 sappend((linecount, s)) 753 754 html_js_file.close() 755 return strings
756 757 # --------------------------------------------------------------------- 758 @staticmethod
759 - def get_user_strings():
760 """ 761 Function to return the list of user-supplied strings 762 """ 763 764 user_file = os.path.join(current.request.folder, "uploads", 765 "user_strings.txt") 766 767 strings = [] 768 COMMENT = "User supplied" 769 770 if os.path.exists(user_file): 771 f = open(user_file, "r") 772 for line in f: 773 line = line.replace("\n", "").replace("\r", "") 774 strings.append((COMMENT, line)) 775 f.close() 776 777 return strings
778 779 # --------------------------------------------------------------------- 780 @staticmethod
781 - def merge_user_strings_file(newstrings):
782 """ 783 Function to merge the existing file of user-supplied strings 784 with newly uploaded strings 785 """ 786 787 user_file = os.path.join(current.request.folder, "uploads", 788 "user_strings.txt") 789 790 oldstrings = [] 791 oappend = oldstrings.append 792 793 if os.path.exists(user_file): 794 f = open(user_file, "r") 795 for line in f: 796 oappend(line) 797 f.close() 798 799 # Append user strings if not already present 800 f = open(user_file, "a") 801 for s in newstrings: 802 if s not in oldstrings: 803 f.write(s) 804 805 f.close()
806 807 # --------------------------------------------------------------------- 808 @staticmethod
809 - def get_database_strings(all_template_flag):
810 """ 811 Function to get database strings from csv files 812 which are to be considered for translation. 813 """ 814 815 from s3import import S3BulkImporter 816 817 # List of database strings 818 database_strings = [] 819 dappend = database_strings.append 820 template_list = [] 821 base_dir = current.request.folder 822 path = os.path 823 # If all templates flag is set we look in all templates' tasks.cfg file 824 if all_template_flag: 825 template_dir = path.join(base_dir, "modules", "templates") 826 files = os.listdir(template_dir) 827 # template_list will have the list of all templates 828 tappend = template_list.append 829 for f in files: 830 curFile = path.join(template_dir, f) 831 baseFile = path.basename(curFile) 832 if path.isdir(curFile): 833 tappend(baseFile) 834 else: 835 # Set current template. 836 template_list.append(current.deployment_settings.base.template) 837 838 # List of fields which don't have an S3ReusableField defined but we 839 # know we wish to translate 840 # @ToDo: Extend to dict if we need to support some which don't just translate the name 841 always_translate = ("project_beneficiary_type_id", 842 "stats_demographic_id", 843 ) 844 845 # Use bulk importer class to parse tasks.cfg in template folder 846 bi = S3BulkImporter() 847 S = Strings() 848 read_csv = S.read_csv 849 for template in template_list: 850 pth = path.join(base_dir, "modules", "templates", template) 851 if path.exists(path.join(pth, "tasks.cfg")) == False: 852 continue 853 bi.load_descriptor(pth) 854 855 s3db = current.s3db 856 for csv in bi.tasks: 857 # Ignore special import files 858 if csv[0] != 1: 859 continue 860 861 # csv is in format: prefix, tablename, path of csv file 862 # assuming represent.translate is always on primary key id 863 translate = False 864 fieldname = "%s_%s_id" % (csv[1], csv[2]) 865 if fieldname in always_translate: 866 translate = True 867 represent = Storage(fields = ["name"]) 868 elif hasattr(s3db, fieldname) is False: 869 continue 870 else: 871 reusable_field = s3db.get(fieldname) 872 # Excludes lambdas which are in defaults() 873 # i.e. reusable fields in disabled modules 874 if reusable_field and isinstance(reusable_field, S3ReusableField): 875 represent = reusable_field.attr.represent 876 if hasattr(represent, "translate"): 877 translate = represent.translate 878 879 # If translate attribute is set to True 880 if translate: 881 if hasattr(represent, "fields") is False: 882 # Only name field is considered 883 fields = ["name"] 884 else: 885 # List of fields is retrieved from represent.fields 886 fields = represent.fields 887 888 # Consider it for translation (csv[3]) 889 csv_path = csv[3] 890 try: 891 data = read_csv(csv_path) 892 except IOError: 893 # Phantom 894 continue 895 title_row = data[0] 896 idx = 0 897 idxlist = [] 898 idxappend = idxlist.append 899 for e in title_row: 900 if e.lower() in fields: 901 idxappend(idx) 902 idx += 1 903 904 if idxlist: 905 # Line number of string retrieved. 906 line_number = 1 907 for row in data[1:]: 908 line_number += 1 909 # If string is not empty 910 for idx in idxlist: 911 try: 912 s = row[idx] 913 except: 914 current.log.error("CSV row incomplete", csv_path) 915 if s != "": 916 loc = "%s:%s" % (csv_path, line_number) 917 dappend((loc, s)) 918 919 return database_strings
920
921 # ============================================================================= 922 -class Strings(object):
923 """ Class to manipulate strings and their files """ 924 925 # --------------------------------------------------------------------- 926 @staticmethod
927 - def remove_quotes(Strings):
928 """ 929 Function to remove single or double quotes around the strings 930 """ 931 932 l = [] 933 lappend = l.append 934 935 for (d1, d2) in Strings: 936 if (d1[0] == '"' and d1[-1] == '"') or \ 937 (d1[0] == "'" and d1[-1] == "'"): 938 d1 = d1[1:-1] 939 if (d2[0] == '"' and d2[-1] == '"') or \ 940 (d2[0] == "'" and d2[-1] == "'"): 941 d2 = d2[1:-1] 942 lappend((d1, d2)) 943 944 return l
945 946 # --------------------------------------------------------------------- 947 @staticmethod
948 - def remove_duplicates(Strings):
949 """ 950 Function to club all the duplicate strings into one row 951 with ";" separated locations 952 """ 953 954 uniq = {} 955 appname = current.request.application 956 957 for (loc, data) in Strings: 958 uniq[data] = "" 959 960 for (loc, data) in Strings: 961 962 # Remove the prefix from the filename 963 loc = loc.split(appname, 1)[1] 964 if uniq[data] != "": 965 uniq[data] = uniq[data] + ";" + loc 966 else: 967 uniq[data] = loc 968 969 l = [] 970 lappend = l.append 971 972 for data in uniq.keys(): 973 lappend((uniq[data], data)) 974 975 return l
976 977 # --------------------------------------------------------------------- 978 @staticmethod
979 - def remove_untranslated(lang_code):
980 """ 981 Function to remove all untranslated strings from a lang_code.py 982 """ 983 984 w2pfilename = os.path.join(current.request.folder, "languages", 985 "%s.py" % lang_code) 986 987 data = read_dict(w2pfilename) 988 #try: 989 # # Python 2.7 990 # # - won't even compile 991 # data = {k: v for k, v in data.iteritems() if k != v} 992 #except: 993 # Python 2.6 994 new_data = {} 995 for k, v in data.iteritems(): 996 if k != v: 997 new_data[k] = v 998 data = new_data 999 1000 write_dict(w2pfilename, data)
1001 1002 # ---------------------------------------------------------------------
1003 - def export_file(self, langfile, modlist, filelist, filetype, all_template_flag):
1004 """ 1005 Function to get the strings by module(s)/file(s), merge with 1006 those strings from existing w2p language file which are already 1007 translated and call the "write_xls()" method if the 1008 default filetype "xls" is chosen. If "po" is chosen, then the 1009 write_po()" method is called. 1010 """ 1011 1012 request = current.request 1013 settings = current.deployment_settings 1014 1015 folder = request.folder 1016 join = os.path.join 1017 1018 langcode = langfile[:-3] 1019 langfile = join(folder, "languages", langfile) 1020 1021 # If the language file doesn't exist, create it 1022 if not os.path.exists(langfile): 1023 f = open(langfile, "w") 1024 f.write("") 1025 f.close() 1026 1027 NewStrings = [] 1028 A = TranslateAPI() 1029 1030 if all_template_flag == 1: 1031 # Select All Templates 1032 A.grp.group_files(join(folder, "modules", "templates")) 1033 else: 1034 # Specific template(s) is selected 1035 templates = settings.get_template() 1036 if not isinstance(templates, (tuple, list)): 1037 templates = (templates,) 1038 group_files = A.grp.group_files 1039 for template in templates: 1040 template_folder = join(folder, "modules", "templates", template) 1041 group_files(template_folder) 1042 1043 R = TranslateReadFiles() 1044 1045 ## Select Modules 1046 1047 # Core Modules are always included 1048 core_modules = ("auth", "default") 1049 for mod in core_modules: 1050 modlist.append(mod) 1051 1052 # appadmin and error are part of admin 1053 if "admin" in modlist: 1054 modlist.append("appadmin") 1055 modlist.append("error") 1056 1057 # Select dependent modules 1058 models = current.models 1059 for mod in modlist: 1060 if hasattr(models, mod): 1061 obj = getattr(models, mod) 1062 # Currently only inv module has a depends list 1063 if hasattr(obj, "depends"): 1064 for element in obj.depends: 1065 if element not in modlist: 1066 modlist.append(element) 1067 1068 get_strings_by_module = A.get_strings_by_module 1069 for mod in modlist: 1070 NewStrings += get_strings_by_module(mod) 1071 1072 # Retrieve strings in a file 1073 get_strings_by_file = A.get_strings_by_file 1074 for f in filelist: 1075 NewStrings += get_strings_by_file(f) 1076 1077 # Remove quotes 1078 NewStrings = self.remove_quotes(NewStrings) 1079 # Add database strings 1080 NewStrings += R.get_database_strings(all_template_flag) 1081 # Add user-supplied strings 1082 NewStrings += R.get_user_strings() 1083 # Remove duplicates 1084 NewStrings = self.remove_duplicates(NewStrings) 1085 NewStrings.sort(key=lambda tup: tup[1]) 1086 1087 # Retrieve strings from existing w2p language file 1088 OldStrings = self.read_w2p(langfile) 1089 OldStrings.sort(key=lambda tup: tup[0]) 1090 1091 # Merge those strings which were already translated earlier 1092 Strings = [] 1093 sappend = Strings.append 1094 i = 0 1095 lim = len(OldStrings) 1096 1097 for (l, s) in NewStrings: 1098 1099 while i < lim and OldStrings[i][0] < s: 1100 i += 1 1101 1102 if i != lim and OldStrings[i][0] == s and \ 1103 OldStrings[i][1].startswith("*** ") == False: 1104 sappend((l, s, OldStrings[i][1])) 1105 else: 1106 sappend((l, s, "")) 1107 1108 if filetype == "xls": 1109 # Create excel file 1110 return self.write_xls(Strings, langcode) 1111 elif filetype == "po": 1112 # Create pootle file 1113 return self.write_po(Strings)
1114 1115 # --------------------------------------------------------------------- 1116 @staticmethod
1117 - def read_csv(fileName):
1118 """ Function to read a CSV file and return a list of rows """ 1119 1120 import csv 1121 csv.field_size_limit(2**20) # 1 Mb 1122 1123 data = [] 1124 dappend = data.append 1125 f = open(fileName, "rb") 1126 transReader = csv.reader(f) 1127 for row in transReader: 1128 dappend(row) 1129 f.close() 1130 return data
1131 1132 # --------------------------------------------------------------------- 1133 @staticmethod
1134 - def read_w2p(fileName):
1135 """ 1136 Function to read a web2py language file and 1137 return a list of translation string pairs 1138 """ 1139 1140 data = read_dict(fileName) 1141 1142 # Convert to list of tuples 1143 # @ToDo: Why? 1144 strings = [] 1145 sappend = strings.append 1146 for s in data: 1147 sappend((s, data[s])) 1148 return strings
1149 1150 # --------------------------------------------------------------------- 1151 @staticmethod
1152 - def write_csv(fileName, data):
1153 """ Function to write a list of rows into a csv file """ 1154 1155 import csv 1156 1157 f = open(fileName, "wb") 1158 1159 # Quote all the elements while writing 1160 transWriter = csv.writer(f, delimiter=" ", 1161 quotechar='"', quoting = csv.QUOTE_ALL) 1162 transWriter.writerow(("location", "source", "target")) 1163 for row in data: 1164 transWriter.writerow(row) 1165 1166 f.close()
1167 1168 # ---------------------------------------------------------------------
1169 - def write_po(self, data):
1170 """ Returns a ".po" file constructed from given strings """ 1171 1172 from subprocess import call 1173 from tempfile import NamedTemporaryFile 1174 from gluon.contenttype import contenttype 1175 1176 f = NamedTemporaryFile(delete=False) 1177 csvfilename = "%s.csv" % f.name 1178 self.write_csv(csvfilename, data) 1179 1180 g = NamedTemporaryFile(delete=False) 1181 pofilename = "%s.po" % g.name 1182 # Shell needed on Win32 1183 # @ToDo: Copy relevant parts of Translate Toolkit internally to avoid external dependencies 1184 call(["csv2po", "-i", csvfilename, "-o", pofilename], shell=True) 1185 1186 h = open(pofilename, "r") 1187 1188 # Modify headers to return the po file for download 1189 filename = "trans.po" 1190 disposition = "attachment; filename=\"%s\"" % filename 1191 response = current.response 1192 response.headers["Content-Type"] = contenttype(".po") 1193 response.headers["Content-disposition"] = disposition 1194 1195 h.seek(0) 1196 return h.read()
1197 1198 # ---------------------------------------------------------------------
1199 - def write_w2p(self, csvfiles, lang_code, option):
1200 """ 1201 Function to merge multiple translated csv files into one 1202 and then merge/overwrite the existing w2p language file 1203 """ 1204 1205 w2pfilename = os.path.join(current.request.folder, "languages", 1206 "%s.py" % lang_code) 1207 1208 # Dictionary to store translated strings 1209 # with untranslated string as the key 1210 data = {} 1211 1212 for f in csvfiles: 1213 newdata = self.read_csv(f) 1214 # Test: 2 cols or 3? 1215 cols = len(newdata[0]) 1216 if cols == 1: 1217 raise SyntaxError("CSV file needs to have at least 2 columns!") 1218 elif cols == 2: 1219 # 1st column is source, 2nd is target 1220 for row in newdata: 1221 data[row[0]] = row[1] 1222 else: 1223 # 1st column is location, 2nd is source, 3rd is target 1224 for row in newdata: 1225 data[row[1]] = row[2] 1226 1227 if option == "m": 1228 # Merge strings with existing .py file 1229 keys = data.keys() 1230 olddata = read_dict(w2pfilename) 1231 for s in olddata: 1232 if s not in keys: 1233 data[s] = olddata[s] 1234 1235 write_dict(w2pfilename, data)
1236 1237 # --------------------------------------------------------------------- 1238 @staticmethod
1239 - def write_xls(Strings, langcode):
1240 """ 1241 Function to create a spreadsheet (.xls file) of strings with 1242 location, original string and translated string as columns 1243 """ 1244 1245 try: 1246 from cStringIO import StringIO # Faster, where available 1247 except: 1248 from StringIO import StringIO 1249 import xlwt 1250 1251 from gluon.contenttype import contenttype 1252 1253 # Define spreadsheet properties 1254 wbk = xlwt.Workbook("utf-8") 1255 sheet = wbk.add_sheet("Translate") 1256 style = xlwt.XFStyle() 1257 font = xlwt.Font() 1258 font.name = "Times New Roman" 1259 style.font = font 1260 1261 sheet.write(0, 0, "location", style) 1262 sheet.write(0, 1, "source", style) 1263 sheet.write(0, 2, "target", style) 1264 1265 row_num = 1 1266 1267 # Write the data to spreadsheet 1268 for (loc, d1, d2) in Strings: 1269 d2 = d2.decode("string-escape").decode("utf-8") 1270 sheet.write(row_num, 0, loc, style) 1271 try: 1272 sheet.write(row_num, 1, d1, style) 1273 except: 1274 current.log.warning("Invalid source string!", loc) 1275 sheet.write(row_num, 1, "", style) 1276 sheet.write(row_num, 2, d2, style) 1277 row_num += 1 1278 1279 # Set column width 1280 for colx in range(0, 3): 1281 sheet.col(colx).width = 15000 1282 1283 # Initialize output 1284 output = StringIO() 1285 1286 # Save the spreadsheet 1287 wbk.save(output) 1288 1289 # Modify headers to return the xls file for download 1290 filename = "%s.xls" % langcode 1291 disposition = "attachment; filename=\"%s\"" % filename 1292 response = current.response 1293 response.headers["Content-Type"] = contenttype(".xls") 1294 response.headers["Content-disposition"] = disposition 1295 1296 output.seek(0) 1297 return output.read()
1298
1299 # ============================================================================= 1300 -class Pootle(object):
1301 """ 1302 Class to synchronise a Pootle server's translation with the local 1303 one 1304 1305 @ToDo: Before uploading file to Pootle, ensure all relevant 1306 untranslated strings are present. 1307 """ 1308 1309 # ---------------------------------------------------------------------
1310 - def upload(self, lang_code, filename):
1311 """ 1312 Upload a file to Pootle 1313 """ 1314 1315 # @ToDo try/except error 1316 import mechanize 1317 import re 1318 1319 br = mechanize.Browser() 1320 br.addheaders = [("User-agent", "Firefox")] 1321 1322 br.set_handle_equiv(False) 1323 # Ignore robots.txt 1324 br.set_handle_robots(False) 1325 # Don't add Referer (sic) header 1326 br.set_handle_referer(False) 1327 1328 settings = current.deployment_settings 1329 1330 username = settings.get_L10n_pootle_username() 1331 if username is False: 1332 current.log.error("No login information found") 1333 return 1334 1335 pootle_url = settings.get_L10n_pootle_url() 1336 login_url = "%saccounts/login" % pootle_url 1337 try: 1338 br.open(login_url) 1339 except: 1340 current.log.error("Connecton Error") 1341 return 1342 1343 br.select_form("loginform") 1344 1345 br.form["username"] = username 1346 br.form["password"] = settings.get_L10n_pootle_password() 1347 br.submit() 1348 1349 current_url = br.geturl() 1350 if current_url.endswith("login/"): 1351 current.log.error("Login Error") 1352 return 1353 1354 pattern = "<option value=(.+?)>%s.po" % lang_code 1355 1356 # Process lang_code (if of form ab_cd --> convert to ab_CD) 1357 if len(lang_code) > 2: 1358 lang_code = "%s_%s" % (lang_code[:2], lang_code[-2:].upper()) 1359 1360 link = "%s%s/eden/" % (pootle_url, lang_code) 1361 1362 page_source = br.open(link).read() 1363 # Use Regex to extract the value for field : "upload to" 1364 regex = re.search(pattern, page_source) 1365 result = regex.group(0) 1366 result = re.split(r'[="]', result) 1367 upload_code = result[2] 1368 1369 try: 1370 br.select_form("uploadform") 1371 # If user is not admin then overwrite option is not there 1372 br.form.find_control(name="overwrite").value = ["overwrite"] 1373 br.form.find_control(name ="upload_to").value = [upload_code] 1374 br.form.add_file(open(filename), "text/plain", filename) 1375 br.submit() 1376 except: 1377 current.log.error("Error in Uploading form") 1378 return
1379 1380 # ---------------------------------------------------------------------
1381 - def download(self, lang_code):
1382 """ 1383 Download a file from Pootle 1384 1385 @ToDo: Allow selection between different variants of language files 1386 """ 1387 1388 import requests 1389 import zipfile 1390 try: 1391 from cStringIO import StringIO # Faster, where available 1392 except: 1393 from StringIO import StringIO 1394 from subprocess import call 1395 from tempfile import NamedTemporaryFile 1396 1397 code = lang_code 1398 if len(lang_code) > 2: 1399 code = "%s_%s" % (lang_code[:2], lang_code[-2:].upper()) 1400 1401 pootle_url = current.deployment_settings.get_L10n_pootle_url() 1402 link = "%s%s/eden/export/zip" % (pootle_url, code) 1403 try: 1404 r = requests.get(link) 1405 except: 1406 current.log.error("Connection Error") 1407 return False 1408 1409 zipf = zipfile.ZipFile(StringIO(r.content)) 1410 zipf.extractall() 1411 file_name_po = "%s.po" % lang_code 1412 file_name_py = "%s.py" % lang_code 1413 1414 f = NamedTemporaryFile(delete=False) 1415 w2pfilename = "%s.py" % f.name 1416 1417 call(["po2web2py", "-i", file_name_po, "-o", w2pfilename]) 1418 1419 S = Strings() 1420 path = os.path.join(current.request.folder, "languages", file_name_py) 1421 pystrings = S.read_w2p(path) 1422 pystrings.sort(key=lambda tup: tup[0]) 1423 1424 postrings = S.read_w2p(w2pfilename) 1425 # Remove untranslated strings 1426 postrings = [tup for tup in postrings if tup[0] != tup[1]] 1427 postrings.sort(key=lambda tup: tup[0]) 1428 1429 os.unlink(file_name_po) 1430 os.unlink(w2pfilename) 1431 return (postrings, pystrings)
1432 1433 # ---------------------------------------------------------------------
1434 - def merge_strings(self, postrings, pystrings, preference):
1435 """ 1436 Merge strings from a PO file and a Py file 1437 """ 1438 1439 lim_po = len(postrings) 1440 lim_py = len(pystrings) 1441 i = 0 1442 j = 0 1443 1444 # Store strings which are missing from pootle 1445 extra = [] 1446 eappend = extra.append 1447 1448 while i < lim_py and j < lim_po: 1449 if pystrings[i][0] < postrings[j][0]: 1450 if preference == False: 1451 eappend(pystrings[i]) 1452 i += 1 1453 elif pystrings[i][0] > postrings[j][0]: 1454 j += 1 1455 1456 # pystrings[i] == postrings[j] 1457 else: 1458 # Pootle is being given preference 1459 if preference: 1460 # Check if string is not empty 1461 if postrings[j][1] and not postrings[j][1].startswith("***"): 1462 pystrings[i] = postrings[j] 1463 # Py is being given prefernece 1464 else: 1465 if pystrings[i][1] and not pystrings[i][1].startswith("***"): 1466 postrings[j] = pystrings[i] 1467 i += 1 1468 j += 1 1469 1470 if preference: 1471 return pystrings 1472 1473 else: 1474 # Add strings which were left 1475 while i < lim_py: 1476 extra.append(pystrings[i]) 1477 i += 1 1478 # Add extra strings to Pootle list 1479 for st in extra: 1480 postrings.append(st) 1481 1482 postrings.sort(key=lambda tup: tup[0]) 1483 return postrings
1484 1485 # ---------------------------------------------------------------------
1486 - def merge_pootle(self, preference, lang_code):
1487 1488 # returns a tuple (postrings, pystrings) 1489 ret = self.download(lang_code) 1490 if not ret: 1491 return 1492 1493 from subprocess import call 1494 from tempfile import NamedTemporaryFile 1495 1496 # returns pystrings if preference was True else returns postrings 1497 ret = self.merge_strings(ret[0], ret[1], preference) 1498 1499 S = Strings() 1500 1501 data = [] 1502 dappend = data.append 1503 1504 temp_csv = NamedTemporaryFile(delete=False) 1505 csvfilename = "%s.csv" % temp_csv.name 1506 1507 if preference: 1508 # Only python file has been changed 1509 for i in ret: 1510 dappend(("", i[0], i[1].decode("string-escape"))) 1511 1512 S.write_csv(csvfilename, data) 1513 # overwrite option 1514 S.write_w2p([csvfilename], lang_code, "o") 1515 1516 os.unlink(csvfilename) 1517 1518 else: 1519 # Only Pootle file has been changed 1520 for i in ret: 1521 dappend(("", i[0], i[1].decode("string-escape"))) 1522 1523 S.write_csv(csvfilename, data) 1524 1525 temp_po = NamedTemporaryFile(delete=False) 1526 pofilename = "%s.po" % temp_po.name 1527 1528 # Shell needed on Win32 1529 # @ToDo: Copy relevant parts of Translate Toolkit internally to avoid external dependencies 1530 call(["csv2po", "-i", csvfilename, "-o", pofilename], shell=True) 1531 self.upload(lang_code, pofilename) 1532 1533 # Clean up extra created files 1534 os.unlink(csvfilename) 1535 os.unlink(pofilename)
1536
1537 # ============================================================================= 1538 -class TranslateReportStatus(object):
1539 """ 1540 Class to report the percentage of translated strings for 1541 each module for a given language. 1542 """ 1543 1544 # ------------------------------------------------------------------------- 1545 @classmethod
1546 - def create_master_file(cls):
1547 """ 1548 Create master file of strings and their distribution in modules 1549 """ 1550 1551 try: 1552 import cPickle as pickle 1553 except: 1554 import pickle 1555 1556 # Instantiate the translateAPI 1557 api = TranslateAPI() 1558 1559 # Generate list of modules 1560 modules = api.get_modules() 1561 modules.append("core") 1562 1563 # The list of all strings 1564 all_strings = [] 1565 addstring = all_strings.append 1566 1567 # Dictionary of {module: indices of strings used in this module} 1568 indices = {} 1569 1570 # Helper dict for fast lookups 1571 string_indices = {} 1572 1573 index = 0 1574 get_strings_by_module = api.get_strings_by_module 1575 for module in modules: 1576 1577 module_indices = [] 1578 addindex = module_indices.append 1579 1580 strings = get_strings_by_module(module) 1581 for (origin, string) in strings: 1582 1583 # Remove outermost quotes around the string 1584 if (string[0] == '"' and string[-1] == '"') or\ 1585 (string[0] == "'" and string[-1] == "'"): 1586 string = string[1:-1] 1587 1588 string_index = string_indices.get(string) 1589 if string_index is None: 1590 string_indices[string] = index 1591 addstring(string) 1592 addindex(index) 1593 index += 1 1594 else: 1595 addindex(string_index) 1596 1597 indices[module] = module_indices 1598 1599 # Save all_strings and string_dict as pickle objects in a file 1600 data_file = os.path.join(current.request.folder, 1601 "uploads", 1602 "temp.pkl") 1603 f = open(data_file, "wb") 1604 pickle.dump(all_strings, f) 1605 pickle.dump(indices, f) 1606 f.close() 1607 1608 # Mark all string counts as dirty 1609 ptable = current.s3db.translate_percentage 1610 current.db(ptable.id > 0).update(dirty=True)
1611 1612 # ------------------------------------------------------------------------- 1613 @classmethod
1614 - def update_string_counts(cls, lang_code):
1615 """ 1616 Update the translation percentages for all modules for a given 1617 language. 1618 1619 @ToDo: Generate fresh .py files with all relevant strings for this 1620 (since we don't store untranslated strings) 1621 """ 1622 1623 try: 1624 import cPickle as pickle 1625 except: 1626 import pickle 1627 1628 base_dir = current.request.folder 1629 1630 # Read the language file 1631 langfile = "%s.py" % lang_code 1632 langfile = os.path.join(base_dir, "languages", langfile) 1633 lang_strings = read_dict(langfile) 1634 1635 # Retrieve the data stored in master file 1636 data_file = os.path.join(base_dir, "uploads", "temp.pkl") 1637 f = open(data_file, "rb") 1638 all_strings = pickle.load(f) 1639 string_dict = pickle.load(f) 1640 f.close() 1641 1642 db = current.db 1643 ptable = current.s3db.translate_percentage 1644 1645 translated = set() 1646 addindex = translated.add 1647 for index, string in enumerate(all_strings): 1648 translation = lang_strings.get(string) 1649 if translation is None or translation[:4] == "*** ": 1650 continue 1651 elif translation != string or lang_code == "en-gb": 1652 addindex(index) 1653 1654 for module, indices in string_dict.items(): 1655 all_indices = set(indices) 1656 num_untranslated = len(all_indices - translated) 1657 num_translated = len(all_indices) - num_untranslated 1658 1659 data = dict(code = lang_code, 1660 module = module, 1661 translated = num_translated, 1662 untranslated = num_untranslated, 1663 dirty=False) 1664 1665 query = (ptable.code == lang_code) & \ 1666 (ptable.module == module) 1667 record = db(query).select(ptable._id, limitby=(0, 1)).first() 1668 if record: 1669 record.update_record(**data) 1670 else: 1671 ptable.insert(**data) 1672 1673 return
1674 1675 # ------------------------------------------------------------------------- 1676 @classmethod
1677 - def get_translation_percentages(cls, lang_code):
1678 """ 1679 Get the percentages of translated strings per module for 1680 the given language code. 1681 1682 @param lang_code: the language code 1683 """ 1684 1685 pickle_file = os.path.join(current.request.folder, 1686 "uploads", 1687 "temp.pkl") 1688 # If master file doesn't exist, create it 1689 if not os.path.exists(pickle_file): 1690 cls.create_master_file() 1691 1692 db = current.db 1693 ptable = current.s3db.translate_percentage 1694 1695 query = (ptable.code == lang_code) 1696 fields = ("dirty", "translated", "untranslated", "module") 1697 1698 rows = db(query).select(*fields) 1699 if not rows or rows.first().dirty: 1700 # Update the string counts 1701 cls.update_string_counts(lang_code) 1702 rows = db(query).select(*fields) 1703 1704 percentage = {} 1705 total_strings = 0 1706 total_translated = 0 1707 total_untranslated = 0 1708 for row in rows: 1709 1710 num_translated = row.translated 1711 num_untranslated = row.untranslated 1712 1713 total_strings += num_translated + num_untranslated 1714 1715 if not num_untranslated: 1716 percentage[row.module] = 100 1717 else: 1718 total = num_translated + num_untranslated 1719 total_translated += num_translated 1720 total_untranslated += num_untranslated 1721 percentage[row.module] = \ 1722 round((float(num_translated) / total) * 100, 2) 1723 1724 if not total_untranslated: 1725 percentage["complete_file"] = 100 1726 else: 1727 percentage["complete_file"] = \ 1728 round((float(total_translated) / (total_strings)) * 100, 2) 1729 return percentage
1730 1731 # END ========================================================================= 1732