Package s3 :: Module s3resource
[frames] | no frames]

Source Code for Module s3.s3resource

   1  # -*- coding: utf-8 -*- 
   2   
   3  """ S3 Resources 
   4   
   5      @copyright: 2009-2019 (c) Sahana Software Foundation 
   6      @license: MIT 
   7   
   8      Permission is hereby granted, free of charge, to any person 
   9      obtaining a copy of this software and associated documentation 
  10      files (the "Software"), to deal in the Software without 
  11      restriction, including without limitation the rights to use, 
  12      copy, modify, merge, publish, distribute, sublicense, and/or sell 
  13      copies of the Software, and to permit persons to whom the 
  14      Software is furnished to do so, subject to the following 
  15      conditions: 
  16   
  17      The above copyright notice and this permission notice shall be 
  18      included in all copies or substantial portions of the Software. 
  19   
  20      THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 
  21      EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 
  22      OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 
  23      NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 
  24      HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 
  25      WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
  26      FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 
  27      OTHER DEALINGS IN THE SOFTWARE. 
  28   
  29      @group Resource API: S3Resource, 
  30      @group Filter API: S3ResourceFilter 
  31      @group Helper Classes: S3AxisFilter, S3ResourceData 
  32  """ 
  33   
  34  __all__ = ("S3AxisFilter", 
  35             "S3Resource", 
  36             "S3ResourceFilter", 
  37             ) 
  38   
  39  import json 
  40  import sys 
  41   
  42  from itertools import chain 
  43   
  44  try: 
  45      from cStringIO import StringIO # Faster, where available 
  46  except ImportError: 
  47      from StringIO import StringIO 
  48   
  49  try: 
  50      from lxml import etree 
  51  except ImportError: 
  52      sys.stderr.write("ERROR: lxml module needed for XML handling\n") 
  53      raise 
  54   
  55  from gluon import current 
  56  from gluon.html import A, TAG 
  57  from gluon.validators import IS_EMPTY_OR 
  58  from gluon.storage import Storage 
  59  from gluon.tools import callback 
  60   
  61  from s3dal import Expression, Field, Row, Rows, Table, S3DAL, VirtualCommand 
  62  from s3data import S3DataTable, S3DataList 
  63  from s3datetime import s3_format_datetime 
  64  from s3fields import S3Represent, s3_all_meta_field_names 
  65  from s3query import FS, S3ResourceField, S3ResourceQuery, S3Joins, S3URLQuery 
  66  from s3utils import s3_get_foreign_key, s3_get_last_record_id, s3_has_foreign_key, s3_remove_last_record_id, s3_str, s3_unicode 
  67  from s3validators import IS_ONE_OF 
  68  from s3xml import S3XMLFormat 
  69   
  70  osetattr = object.__setattr__ 
  71  ogetattr = object.__getattribute__ 
  72   
  73  MAXDEPTH = 10 
  74  DEFAULT = lambda: None 
75 76 # Compact JSON encoding 77 #SEPARATORS = (",", ":") 78 79 # ============================================================================= 80 -class S3Resource(object):
81 """ 82 API for resources. 83 84 A "resource" is a set of records in a database table including their 85 references in certain related resources (components). A resource can 86 be defined like: 87 88 resource = S3Resource(table) 89 90 A resource defined like this would include all records in the table. 91 Further parameters for the resource constructor as well as methods 92 of the resource instance can be used to filter for particular subsets. 93 94 This API provides extended standard methods to access and manipulate 95 data in resources while respecting current authorization and other 96 S3 framework rules. 97 """ 98
99 - def __init__(self, tablename, 100 id=None, 101 prefix=None, 102 uid=None, 103 filter=None, 104 vars=None, 105 parent=None, 106 linked=None, 107 linktable=None, 108 alias=None, 109 components=None, 110 filter_component=None, 111 include_deleted=False, 112 approved=True, 113 unapproved=False, 114 context=False, 115 extra_filters=None):
116 """ 117 Constructor 118 119 @param tablename: tablename, Table, or an S3Resource instance 120 @param prefix: prefix to use for the tablename 121 122 @param id: record ID (or list of record IDs) 123 @param uid: record UID (or list of record UIDs) 124 125 @param filter: filter query 126 @param vars: dictionary of URL query variables 127 128 @param components: list of component aliases 129 to load for this resource 130 @param filter_component: alias of the component the URL filters 131 apply for (filters for this component 132 must be handled separately) 133 134 @param alias: the alias for this resource (internal use only) 135 @param parent: the parent resource (internal use only) 136 @param linked: the linked resource (internal use only) 137 @param linktable: the link table (internal use only) 138 139 @param include_deleted: include deleted records (used for 140 synchronization) 141 142 @param approved: include approved records 143 @param unapproved: include unapproved records 144 @param context: apply context filters 145 @param extra_filters: extra filters (to be applied on 146 pre-filtered subsets), as list of 147 tuples (method, expression) 148 """ 149 150 s3db = current.s3db 151 auth = current.auth 152 153 # Names --------------------------------------------------------------- 154 155 table = None 156 table_alias = None 157 158 if prefix is None: 159 if not isinstance(tablename, basestring): 160 if isinstance(tablename, Table): 161 table = tablename 162 table_alias = table._tablename 163 tablename = table_alias 164 165 #self.table = tablename 166 #self._alias = self.table._tablename 167 #tablename = self._alias 168 elif isinstance(tablename, S3Resource): 169 table = tablename.table 170 table_alias = table._tablename 171 tablename = tablename.tablename 172 173 #self.table = tablename.table 174 #self._alias = self.table._tablename 175 #tablename = tablename.tablename 176 else: 177 error = "%s is not a valid type for a tablename" % tablename 178 raise SyntaxError(error) 179 if "_" in tablename: 180 prefix, name = tablename.split("_", 1) 181 else: 182 raise SyntaxError("invalid tablename: %s" % tablename) 183 else: 184 name = tablename 185 tablename = "%s_%s" % (prefix, name) 186 187 self.tablename = tablename 188 189 # Module prefix and resource name 190 self.prefix = prefix 191 self.name = name 192 193 # Resource alias defaults to tablename without module prefix 194 if not alias: 195 alias = name 196 self.alias = alias 197 198 # Table --------------------------------------------------------------- 199 200 if table is None: 201 table = s3db[tablename] 202 203 # Set default approver 204 auth.permission.set_default_approver(table) 205 206 if parent is not None: 207 if parent.tablename == self.tablename: 208 # Component table same as parent table => must use table alias 209 table_alias = "%s_%s_%s" % (prefix, alias, name) 210 table = s3db.get_aliased(table, table_alias) 211 212 self.table = table 213 self._alias = table_alias or tablename 214 215 self.fields = table.fields 216 self._id = table._id 217 218 self.defaults = None 219 220 # Hooks --------------------------------------------------------------- 221 222 # Authorization hooks 223 self.accessible_query = auth.s3_accessible_query 224 225 # Filter -------------------------------------------------------------- 226 227 # Default query options 228 self.include_deleted = include_deleted 229 self._approved = approved 230 self._unapproved = unapproved 231 232 # Component Filter 233 self.filter = None 234 235 # Resource Filter 236 self.rfilter = None 237 238 # Rows ---------------------------------------------------------------- 239 240 self._rows = None 241 self._rowindex = None 242 self.rfields = None 243 self.dfields = None 244 self._ids = [] 245 self._uids = [] 246 self._length = None 247 248 # Request attributes -------------------------------------------------- 249 250 self.vars = None # set during build_query 251 self.lastid = None 252 self.files = Storage() 253 254 # Components ---------------------------------------------------------- 255 256 # Initialize component properties (will be set during _attach) 257 self.link = None 258 self.linktable = None 259 self.actuate = None 260 self.lkey = None 261 self.rkey = None 262 self.pkey = None 263 self.fkey = None 264 self.multiple = True 265 266 self.parent = parent # the parent resource 267 self.linked = linked # the linked resource 268 269 self.components = S3Components(self, components) 270 self.links = self.components.links 271 272 if parent is None: 273 # Build query 274 self.build_query(id = id, 275 uid = uid, 276 filter = filter, 277 vars = vars, 278 extra_filters = extra_filters, 279 filter_component = filter_component, 280 ) 281 if context: 282 self.add_filter(s3db.context) 283 284 # Component - attach link table 285 elif linktable is not None: 286 # This is a link-table component - attach the link table 287 link_alias = "%s__link" % self.alias 288 self.link = S3Resource(linktable, 289 alias = link_alias, 290 parent = self.parent, 291 linked = self, 292 include_deleted = self.include_deleted, 293 approved = self._approved, 294 unapproved = self._unapproved, 295 ) 296 297 # Export and Import --------------------------------------------------- 298 299 # Pending Imports 300 self.skip_import = False 301 self.job = None 302 self.mtime = None 303 self.error = None 304 self.error_tree = None 305 self.import_count = 0 306 self.import_created = [] 307 self.import_updated = [] 308 self.import_deleted = [] 309 310 # Export meta data 311 self.muntil = None # latest mtime of the exported records 312 self.results = None # number of exported records 313 314 # Standard methods ---------------------------------------------------- 315 316 # CRUD 317 from s3crud import S3CRUD 318 self.crud = S3CRUD() 319 self.crud.resource = self
320 321 # ------------------------------------------------------------------------- 322 # Query handling 323 # -------------------------------------------------------------------------
324 - def build_query(self, 325 id=None, 326 uid=None, 327 filter=None, 328 vars=None, 329 extra_filters=None, 330 filter_component=None):
331 """ 332 Query builder 333 334 @param id: record ID or list of record IDs to include 335 @param uid: record UID or list of record UIDs to include 336 @param filter: filtering query (DAL only) 337 @param vars: dict of URL query variables 338 @param extra_filters: extra filters (to be applied on 339 pre-filtered subsets), as list of 340 tuples (method, expression) 341 @param filter_component: the alias of the component the URL 342 filters apply for (filters for this 343 component must be handled separately) 344 """ 345 346 # Reset the rows counter 347 self._length = None 348 349 self.rfilter = S3ResourceFilter(self, 350 id = id, 351 uid = uid, 352 filter = filter, 353 vars = vars, 354 extra_filters = extra_filters, 355 filter_component = filter_component, 356 ) 357 return self.rfilter
358 359 # -------------------------------------------------------------------------
360 - def add_filter(self, f=None, c=None):
361 """ 362 Extend the current resource filter 363 364 @param f: a Query or a S3ResourceQuery instance 365 @param c: alias of the component this filter concerns, 366 automatically adds the respective component join 367 (not needed for S3ResourceQuery instances) 368 """ 369 370 if f is None: 371 return 372 373 self.clear() 374 375 if self.rfilter is None: 376 self.rfilter = S3ResourceFilter(self) 377 378 self.rfilter.add_filter(f, component=c)
379 380 # -------------------------------------------------------------------------
381 - def add_component_filter(self, alias, f=None):
382 """ 383 Extend the resource filter of a particular component, does 384 not affect the master resource filter (as opposed to add_filter) 385 386 @param alias: the alias of the component 387 @param f: a Query or a S3ResourceQuery instance 388 """ 389 390 if f is None: 391 return 392 393 if self.rfilter is None: 394 self.rfilter = S3ResourceFilter(self) 395 396 self.rfilter.add_filter(f, component=alias, master=False)
397 398 # -------------------------------------------------------------------------
399 - def add_extra_filter(self, method, expression):
400 """ 401 And an extra filter (to be applied on pre-filtered subsets) 402 403 @param method: a name of a known filter method, or a 404 callable filter method 405 @param expression: the filter expression (string) 406 """ 407 408 self.clear() 409 410 if self.rfilter is None: 411 self.rfilter = S3ResourceFilter(self) 412 413 self.rfilter.add_extra_filter(method, expression)
414 415 # -------------------------------------------------------------------------
416 - def set_extra_filters(self, filters):
417 """ 418 Replace the current extra filters 419 420 @param filters: list of tuples (method, expression), or None 421 to remove all extra filters 422 """ 423 424 self.clear() 425 426 if self.rfilter is None: 427 self.rfilter = S3ResourceFilter(self) 428 429 self.rfilter.set_extra_filters(filters)
430 431 # -------------------------------------------------------------------------
432 - def get_query(self):
433 """ 434 Get the effective query 435 436 @return: Query 437 """ 438 439 if self.rfilter is None: 440 self.build_query() 441 442 return self.rfilter.get_query()
443 444 # -------------------------------------------------------------------------
445 - def get_filter(self):
446 """ 447 Get the effective virtual filter 448 449 @return: S3ResourceQuery 450 """ 451 452 if self.rfilter is None: 453 self.build_query() 454 455 return self.rfilter.get_filter()
456 457 # -------------------------------------------------------------------------
458 - def clear_query(self):
459 """ 460 Remove the current query (does not remove the set!) 461 """ 462 463 self.rfilter = None 464 465 for component in self.components.loaded.values(): 466 component.clear_query()
467 468 # ------------------------------------------------------------------------- 469 # Data access (new API) 470 # -------------------------------------------------------------------------
471 - def count(self, left=None, distinct=False):
472 """ 473 Get the total number of available records in this resource 474 475 @param left: left outer joins, if required 476 @param distinct: only count distinct rows 477 """ 478 479 if self.rfilter is None: 480 self.build_query() 481 if self._length is None: 482 self._length = self.rfilter.count(left=left, 483 distinct=distinct) 484 return self._length
485 486 # -------------------------------------------------------------------------
487 - def select(self, 488 fields, 489 start=0, 490 limit=None, 491 left=None, 492 orderby=None, 493 groupby=None, 494 distinct=False, 495 virtual=True, 496 count=False, 497 getids=False, 498 as_rows=False, 499 represent=False, 500 show_links=True, 501 raw_data=False):
502 """ 503 Extract data from this resource 504 505 @param fields: the fields to extract (selector strings) 506 @param start: index of the first record 507 @param limit: maximum number of records 508 @param left: additional left joins required for filters 509 @param orderby: orderby-expression for DAL 510 @param groupby: fields to group by (overrides fields!) 511 @param distinct: select distinct rows 512 @param virtual: include mandatory virtual fields 513 @param count: include the total number of matching records 514 @param getids: include the IDs of all matching records 515 @param as_rows: return the rows (don't extract) 516 @param represent: render field value representations 517 @param raw_data: include raw data in the result 518 """ 519 520 data = S3ResourceData(self, 521 fields, 522 start=start, 523 limit=limit, 524 left=left, 525 orderby=orderby, 526 groupby=groupby, 527 distinct=distinct, 528 virtual=virtual, 529 count=count, 530 getids=getids, 531 as_rows=as_rows, 532 represent=represent, 533 show_links=show_links, 534 raw_data=raw_data) 535 if as_rows: 536 return data.rows 537 else: 538 return data
539 540 # -------------------------------------------------------------------------
541 - def insert(self, **fields):
542 """ 543 Insert a record into this resource 544 545 @param fields: dict of field/value pairs to insert 546 """ 547 548 table = self.table 549 tablename = self.tablename 550 551 # Check permission 552 authorised = current.auth.s3_has_permission("create", tablename) 553 if not authorised: 554 from s3error import S3PermissionError 555 raise S3PermissionError("Operation not permitted: INSERT INTO %s" % 556 tablename) 557 558 # Insert new record 559 record_id = self.table.insert(**fields) 560 561 # Post-process create 562 if record_id: 563 564 # Audit 565 current.audit("create", self.prefix, self.name, record=record_id) 566 567 record = Storage(fields) 568 record.id = record_id 569 570 # Update super 571 s3db = current.s3db 572 s3db.update_super(table, record) 573 574 # Record owner 575 auth = current.auth 576 auth.s3_set_record_owner(table, record_id) 577 auth.s3_make_session_owner(table, record_id) 578 579 # Execute onaccept 580 s3db.onaccept(tablename, record, method="create") 581 582 return record_id
583 584 # -------------------------------------------------------------------------
585 - def update(self):
586 """ 587 Bulk updater, @todo 588 """ 589 590 raise NotImplementedError
591 592 # -------------------------------------------------------------------------
593 - def delete(self, 594 format=None, 595 cascade=False, 596 replaced_by=None, 597 log_errors=False, 598 ):
599 """ 600 Delete all records in this resource 601 602 @param format: the representation format of the request (optional) 603 @param cascade: this is a cascade delete (prevents commits) 604 @param replaced_by: used by record merger 605 @param log_errors: log errors even when cascade=True 606 607 @return: number of records deleted 608 609 NB skipping undeletable rows is no longer the default behavior, 610 process will now fail immediately for any error; use S3Delete 611 directly if skipping of undeletable rows is desired 612 """ 613 614 from s3delete import S3Delete 615 616 delete = S3Delete(self, representation=format) 617 result = delete(cascade = cascade, 618 replaced_by = replaced_by, 619 #skip_undeletable = False, 620 ) 621 622 if log_errors and cascade: 623 # Call log_errors explicitly if suppressed by cascade 624 delete.log_errors() 625 626 return result
627 628 # -------------------------------------------------------------------------
629 - def approve(self, components=(), approve=True, approved_by=None):
630 """ 631 Approve all records in this resource 632 633 @param components: list of component aliases to include, None 634 for no components, empty list or tuple to 635 approve all components (default) 636 @param approve: set to approved (False to reset to unapproved) 637 @param approved_by: set approver explicitly, a valid auth_user.id 638 or 0 for approval by system authority 639 """ 640 641 if "approved_by" not in self.fields: 642 # No approved_by field => treat as approved by default 643 return True 644 645 auth = current.auth 646 if approve: 647 if approved_by is None: 648 user = auth.user 649 if user: 650 user_id = user.id 651 else: 652 return False 653 else: 654 user_id = approved_by 655 else: 656 # Reset to unapproved 657 user_id = None 658 659 db = current.db 660 table = self._table 661 662 # Get all record_ids in the resource 663 pkey = self._id.name 664 rows = self.select([pkey], limit=None, as_rows=True) 665 if not rows: 666 # No records to approve => exit early 667 return True 668 669 # Collect record_ids and clear cached permissions 670 record_ids = set() 671 add = record_ids.add 672 forget_permissions = auth.permission.forget 673 for record in rows: 674 record_id = record[pkey] 675 forget_permissions(table, record_id) 676 add(record_id) 677 678 # Set approved_by for each record in the set 679 dbset = db(table._id.belongs(record_ids)) 680 try: 681 success = dbset.update(approved_by = user_id) 682 except: 683 # DB error => raise in debug mode to produce a proper ticket 684 if current.response.s3.debug: 685 raise 686 success = False 687 if not success: 688 db.rollback() 689 return False 690 691 # Invoke onapprove-callback for each updated record 692 onapprove = self.get_config("onapprove", None) 693 if onapprove: 694 rows = dbset.select(limitby=(0, len(record_ids))) 695 for row in rows: 696 callback(onapprove, row, tablename=self.tablename) 697 698 # Return early if no components to approve 699 if components is None: 700 return True 701 702 # Determine which components to approve 703 # NB: Components are pre-filtered with the master filter, too 704 if components: 705 # FIXME this is probably wrong => should load 706 # the components which are to be approved 707 cdict = self.components.exposed 708 components = [cdict[k] for k in cdict if k in components] 709 else: 710 # Approve all currently attached components 711 # FIXME use exposed.values() 712 components = self.components.values() 713 714 for component in components: 715 success = component.approve(components = None, 716 approve = approve, 717 approved_by = approved_by, 718 ) 719 if not success: 720 return False 721 722 return True
723 724 # -------------------------------------------------------------------------
725 - def reject(self, cascade=False):
726 """ Reject (delete) all records in this resource """ 727 728 db = current.db 729 s3db = current.s3db 730 731 define_resource = s3db.resource 732 DELETED = current.xml.DELETED 733 734 INTEGRITY_ERROR = current.ERROR.INTEGRITY_ERROR 735 tablename = self.tablename 736 table = self.table 737 pkey = table._id.name 738 739 # Get hooks configuration 740 get_config = s3db.get_config 741 ondelete = get_config(tablename, "ondelete") 742 onreject = get_config(tablename, "onreject") 743 ondelete_cascade = get_config(tablename, "ondelete_cascade") 744 745 # Get all rows 746 if "uuid" in table.fields: 747 rows = self.select([table._id.name, "uuid"], as_rows=True) 748 else: 749 rows = self.select([table._id.name], as_rows=True) 750 if not rows: 751 return True 752 753 delete_super = s3db.delete_super 754 755 if DELETED in table: 756 757 references = table._referenced_by 758 759 for row in rows: 760 761 error = self.error 762 self.error = None 763 764 # On-delete-cascade 765 if ondelete_cascade: 766 callback(ondelete_cascade, row, tablename=tablename) 767 768 # Automatic cascade 769 for ref in references: 770 tn, fn = ref.tablename, ref.name 771 rtable = db[tn] 772 rfield = rtable[fn] 773 query = (rfield == row[pkey]) 774 # Ignore RESTRICTs => reject anyway 775 if rfield.ondelete in ("CASCADE", "RESTRICT"): 776 rresource = define_resource(tn, filter=query, unapproved=True) 777 rresource.reject(cascade=True) 778 if rresource.error: 779 break 780 elif rfield.ondelete == "SET NULL": 781 try: 782 db(query).update(**{fn:None}) 783 except: 784 self.error = INTEGRITY_ERROR 785 break 786 elif rfield.ondelete == "SET DEFAULT": 787 try: 788 db(query).update(**{fn:rfield.default}) 789 except: 790 self.error = INTEGRITY_ERROR 791 break 792 793 if not self.error and not delete_super(table, row): 794 self.error = INTEGRITY_ERROR 795 796 if self.error: 797 db.rollback() 798 raise RuntimeError("Reject failed for %s.%s" % 799 (tablename, row[table._id])) 800 else: 801 # Pull back prior error status 802 self.error = error 803 error = None 804 805 # On-reject hook 806 if onreject: 807 callback(onreject, row, tablename=tablename) 808 809 # Park foreign keys 810 fields = {"deleted": True} 811 if "deleted_fk" in table: 812 record = table[row[pkey]] 813 fk = {} 814 for f in table.fields: 815 if record[f] is not None and \ 816 s3_has_foreign_key(table[f]): 817 fk[f] = record[f] 818 fields[f] = None 819 else: 820 continue 821 if fk: 822 fields.update(deleted_fk=json.dumps(fk)) 823 824 # Update the row, finally 825 db(table._id == row[pkey]).update(**fields) 826 827 # Clear session 828 if s3_get_last_record_id(tablename) == row[pkey]: 829 s3_remove_last_record_id(tablename) 830 831 # On-delete hook 832 if ondelete: 833 callback(ondelete, row, tablename=tablename) 834 835 else: 836 # Hard delete 837 for row in rows: 838 839 # On-delete-cascade 840 if ondelete_cascade: 841 callback(ondelete_cascade, row, tablename=tablename) 842 843 # On-reject 844 if onreject: 845 callback(onreject, row, tablename=tablename) 846 847 try: 848 del table[row[pkey]] 849 except: 850 # Row is not deletable 851 self.error = INTEGRITY_ERROR 852 db.rollback() 853 raise 854 else: 855 # Clear session 856 if s3_get_last_record_id(tablename) == row[pkey]: 857 s3_remove_last_record_id(tablename) 858 859 # Delete super-entity 860 delete_super(table, row) 861 862 # On-delete 863 if ondelete: 864 callback(ondelete, row, tablename=tablename) 865 866 return True
867 868 # -------------------------------------------------------------------------
869 - def merge(self, 870 original_id, 871 duplicate_id, 872 replace=None, 873 update=None, 874 main=True):
875 """ Merge two records, see also S3RecordMerger.merge """ 876 877 from s3merge import S3RecordMerger 878 return S3RecordMerger(self).merge(original_id, 879 duplicate_id, 880 replace=replace, 881 update=update, 882 main=main)
883 884 # ------------------------------------------------------------------------- 885 # Exports 886 # -------------------------------------------------------------------------
887 - def datatable(self, 888 fields=None, 889 start=0, 890 limit=None, 891 left=None, 892 orderby=None, 893 distinct=False, 894 ):
895 """ 896 Generate a data table of this resource 897 898 @param fields: list of fields to include (field selector strings) 899 @param start: index of the first record to include 900 @param limit: maximum number of records to include 901 @param left: additional left joins for DB query 902 @param orderby: orderby for DB query 903 @param distinct: distinct-flag for DB query 904 905 @return: tuple (S3DataTable, numrows), where numrows represents 906 the total number of rows in the table that match the query 907 """ 908 909 # Choose fields 910 if fields is None: 911 fields = [f.name for f in self.readable_fields()] 912 selectors = list(fields) 913 914 table = self.table 915 916 # Automatically include the record ID 917 table_id = table._id 918 pkey = table_id.name 919 if pkey not in selectors: 920 fields.insert(0, pkey) 921 selectors.insert(0, pkey) 922 923 # Skip representation of IDs in data tables 924 id_repr = table_id.represent 925 table_id.represent = None 926 927 # Extract the data 928 data = self.select(selectors, 929 start = start, 930 limit = limit, 931 orderby = orderby, 932 left = left, 933 distinct = distinct, 934 count = True, 935 getids = False, 936 represent = True, 937 ) 938 939 rows = data.rows 940 941 # Restore ID representation 942 table_id.represent = id_repr 943 944 # Empty table - or just no match? 945 empty = False 946 if not rows: 947 DELETED = current.xml.DELETED 948 if DELETED in table: 949 query = (table[DELETED] != True) 950 else: 951 query = (table_id > 0) 952 row = current.db(query).select(table_id, limitby=(0, 1)).first() 953 if not row: 954 empty = True 955 956 # Generate the data table 957 rfields = data.rfields 958 dt = S3DataTable(rfields, rows, orderby=orderby, empty=empty) 959 960 return dt, data.numrows
961 962 # -------------------------------------------------------------------------
963 - def datalist(self, 964 fields=None, 965 start=0, 966 limit=None, 967 left=None, 968 orderby=None, 969 distinct=False, 970 list_id=None, 971 layout=None):
972 """ 973 Generate a data list of this resource 974 975 @param fields: list of fields to include (field selector strings) 976 @param start: index of the first record to include 977 @param limit: maximum number of records to include 978 @param left: additional left joins for DB query 979 @param orderby: orderby for DB query 980 @param distinct: distinct-flag for DB query 981 @param list_id: the list identifier 982 @param layout: custom renderer function (see S3DataList.render) 983 984 @return: tuple (S3DataList, numrows, ids), where numrows represents 985 the total number of rows in the table that match the query 986 """ 987 988 # Choose fields 989 if fields is None: 990 fields = [f.name for f in self.readable_fields()] 991 selectors = list(fields) 992 993 table = self.table 994 995 # Automatically include the record ID 996 pkey = table._id.name 997 if pkey not in selectors: 998 fields.insert(0, pkey) 999 selectors.insert(0, pkey) 1000 1001 # Extract the data 1002 data = self.select(selectors, 1003 start = start, 1004 limit = limit, 1005 orderby = orderby, 1006 left = left, 1007 distinct = distinct, 1008 count = True, 1009 getids = False, 1010 raw_data = True, 1011 represent = True, 1012 ) 1013 1014 # Generate the data list 1015 numrows = data.numrows 1016 dl = S3DataList(self, 1017 fields, 1018 data.rows, 1019 list_id = list_id, 1020 start = start, 1021 limit = limit, 1022 total = numrows, 1023 layout = layout, 1024 ) 1025 1026 return dl, numrows
1027 1028 # -------------------------------------------------------------------------
1029 - def json(self, 1030 fields=None, 1031 start=0, 1032 limit=None, 1033 left=None, 1034 distinct=False, 1035 orderby=None):
1036 """ 1037 Export a JSON representation of the resource. 1038 1039 @param fields: list of field selector strings 1040 @param start: index of the first record 1041 @param limit: maximum number of records 1042 @param left: list of (additional) left joins 1043 @param distinct: select only distinct rows 1044 @param orderby: Orderby-expression for the query 1045 1046 @return: the JSON (as string), representing a list of 1047 dicts with {"tablename.fieldname":"value"} 1048 """ 1049 1050 data = self.select(fields=fields, 1051 start=start, 1052 limit=limit, 1053 orderby=orderby, 1054 left=left, 1055 distinct=distinct)["rows"] 1056 1057 return json.dumps(data)
1058 1059 # ------------------------------------------------------------------------- 1060 # Data Object API 1061 # -------------------------------------------------------------------------
1062 - def load(self, 1063 fields=None, 1064 skip=None, 1065 start=None, 1066 limit=None, 1067 orderby=None, 1068 virtual=True, 1069 cacheable=False):
1070 """ 1071 Loads records from the resource, applying the current filters, 1072 and stores them in the instance. 1073 1074 @param fields: list of field names to include 1075 @param skip: list of field names to skip 1076 @param start: the index of the first record to load 1077 @param limit: the maximum number of records to load 1078 @param orderby: orderby-expression for the query 1079 @param virtual: whether to load virtual fields or not 1080 @param cacheable: don't define Row actions like update_record 1081 or delete_record (faster, and the record can 1082 be cached) 1083 1084 @return: the records as list of Rows 1085 """ 1086 1087 1088 table = self.table 1089 tablename = self.tablename 1090 1091 UID = current.xml.UID 1092 load_uids = hasattr(table, UID) 1093 1094 if not skip: 1095 skip = () 1096 1097 if fields or skip: 1098 s3 = current.response.s3 1099 if "all_meta_fields" in s3: 1100 meta_fields = s3.all_meta_fields 1101 else: 1102 meta_fields = s3.all_meta_fields = s3_all_meta_field_names() 1103 s3db = current.s3db 1104 1105 # Field selection 1106 qfields = ([table._id.name, UID]) 1107 append = qfields.append 1108 for f in table.fields: 1109 1110 if f in ("wkt", "the_geom"): 1111 if tablename == "gis_location": 1112 if f == "the_geom": 1113 # Filter out bulky Polygons 1114 continue 1115 else: 1116 fmt = current.auth.permission.format 1117 if fmt == "cap": 1118 # Include WKT 1119 pass 1120 elif fmt == "xml" and current.deployment_settings.get_gis_xml_wkt(): 1121 # Include WKT 1122 pass 1123 else: 1124 # Filter out bulky Polygons 1125 continue 1126 elif tablename.startswith("gis_layer_shapefile_"): 1127 # Filter out bulky Polygons 1128 continue 1129 1130 if fields or skip: 1131 1132 # Must include all meta-fields 1133 if f in meta_fields: 1134 append(f) 1135 continue 1136 1137 # Must include the fkey if component 1138 if self.parent and not self.link and f == self.fkey: 1139 append(f) 1140 continue 1141 1142 # Must include all super-keys 1143 ktablename = s3_get_foreign_key(table[f], m2m=False)[0] 1144 if ktablename: 1145 ktable = s3db.table(ktablename) 1146 if ktable and hasattr(ktable, "instance_type"): 1147 append(f) 1148 continue 1149 1150 if f in skip: 1151 continue 1152 if not fields or f in fields: 1153 qfields.append(f) 1154 1155 fields = list(set(fn for fn in qfields if hasattr(table, fn))) 1156 1157 if self._rows is not None: 1158 self.clear() 1159 1160 pagination = limit is not None or start 1161 1162 rfilter = self.rfilter 1163 multiple = rfilter.multiple if rfilter is not None else True 1164 if not multiple and self.parent and self.parent.count() == 1: 1165 start = 0 1166 limit = 1 1167 1168 rows = self.select(fields, 1169 start=start, 1170 limit=limit, 1171 orderby=orderby, 1172 virtual=virtual, 1173 as_rows=True) 1174 1175 ids = self._ids = [] 1176 new_id = ids.append 1177 1178 self._uids = [] 1179 self._rows = [] 1180 1181 if rows: 1182 new_uid = self._uids.append 1183 new_row = self._rows.append 1184 pkey = table._id.name 1185 for row in rows: 1186 if hasattr(row, tablename): 1187 _row = ogetattr(row, tablename) 1188 if type(_row) is Row: 1189 row = _row 1190 record_id = ogetattr(row, pkey) 1191 if record_id not in ids: 1192 new_id(record_id) 1193 new_row(row) 1194 if load_uids: 1195 new_uid(ogetattr(row, UID)) 1196 1197 # If this is an unlimited load, or the first page with no 1198 # rows, then the result length is equal to the total number 1199 # of matching records => store length for subsequent count()s 1200 length = len(self._rows) 1201 if not pagination or not start and not length: 1202 self._length = length 1203 1204 return self._rows
1205 1206 # -------------------------------------------------------------------------
1207 - def clear(self):
1208 """ Removes the records currently stored in this instance """ 1209 1210 self._rows = None 1211 self._rowindex = None 1212 self._length = None 1213 self._ids = None 1214 self._uids = None 1215 self.files = Storage() 1216 1217 for component in self.components.loaded.values(): 1218 component.clear()
1219 1220 # -------------------------------------------------------------------------
1221 - def records(self, fields=None):
1222 """ 1223 Get the current set as Rows instance 1224 1225 @param fields: the fields to include (list of Fields) 1226 """ 1227 1228 if fields is None: 1229 if self.tablename == "gis_location": 1230 fields = [f for f in self.table 1231 if f.name not in ("wkt", "the_geom")] 1232 else: 1233 fields = [f for f in self.table] 1234 1235 if self._rows is None: 1236 return Rows(current.db) 1237 else: 1238 colnames = map(str, fields) 1239 return Rows(current.db, self._rows, colnames=colnames)
1240 1241 # -------------------------------------------------------------------------
1242 - def __getitem__(self, key):
1243 """ 1244 Find a record currently stored in this instance by its record ID 1245 1246 @param key: the record ID 1247 @return: a Row 1248 1249 @raises: IndexError if the record is not currently loaded 1250 """ 1251 1252 index = self._rowindex 1253 if index is None: 1254 _id = self._id.name 1255 rows = self._rows 1256 if rows: 1257 index = Storage([(str(row[_id]), row) for row in rows]) 1258 else: 1259 index = Storage() 1260 self._rowindex = index 1261 key = str(key) 1262 if key in index: 1263 return index[key] 1264 raise IndexError
1265 1266 # -------------------------------------------------------------------------
1267 - def __iter__(self):
1268 """ 1269 Iterate over the records currently stored in this instance 1270 """ 1271 1272 if self._rows is None: 1273 self.load() 1274 rows = self._rows 1275 for i in xrange(len(rows)): 1276 yield rows[i] 1277 return
1278 1279 # -------------------------------------------------------------------------
1280 - def get(self, key, component=None, link=None):
1281 """ 1282 Get component records for a record currently stored in this 1283 instance. 1284 1285 @param key: the record ID 1286 @param component: the name of the component 1287 @param link: the name of the link table 1288 1289 @return: a Row (if component is None) or a list of rows 1290 """ 1291 1292 if not key: 1293 raise KeyError("Record not found") 1294 if self._rows is None: 1295 self.load() 1296 try: 1297 master = self[key] 1298 except IndexError: 1299 raise KeyError("Record not found") 1300 1301 if not component and not link: 1302 return master 1303 elif link: 1304 if link in self.links: 1305 c = self.links[link] 1306 else: 1307 calias = current.s3db.get_alias(self.tablename, link) 1308 if calias: 1309 c = self.components[calias].link 1310 else: 1311 raise AttributeError("Undefined link %s" % link) 1312 else: 1313 try: 1314 c = self.components[component] 1315 except KeyError: 1316 raise AttributeError("Undefined component %s" % component) 1317 1318 rows = c._rows 1319 if rows is None: 1320 rows = c.load() 1321 if not rows: 1322 return [] 1323 pkey, fkey = c.pkey, c.fkey 1324 if pkey in master: 1325 master_id = master[pkey] 1326 if c.link: 1327 lkey, rkey = c.lkey, c.rkey 1328 lids = [r[rkey] for r in c.link if master_id == r[lkey]] 1329 rows = [record for record in rows if record[fkey] in lids] 1330 else: 1331 try: 1332 rows = [record for record in rows if master_id == record[fkey]] 1333 except AttributeError: 1334 # Most likely need to tweak static/formats/geoson/export.xsl 1335 raise AttributeError("Component %s records are missing fkey %s" % (component, fkey)) 1336 else: 1337 rows = [] 1338 return rows
1339 1340 # -------------------------------------------------------------------------
1341 - def get_id(self):
1342 """ Get the IDs of all records currently stored in this instance """ 1343 1344 if self._ids is None: 1345 self.__load_ids() 1346 1347 if not self._ids: 1348 return None 1349 elif len(self._ids) == 1: 1350 return self._ids[0] 1351 else: 1352 return self._ids
1353 1354 # -------------------------------------------------------------------------
1355 - def get_uid(self):
1356 """ Get the UUIDs of all records currently stored in this instance """ 1357 1358 if current.xml.UID not in self.table.fields: 1359 return None 1360 if self._ids is None: 1361 self.__load_ids() 1362 1363 if not self._uids: 1364 return None 1365 elif len(self._uids) == 1: 1366 return self._uids[0] 1367 else: 1368 return self._uids
1369 1370 # -------------------------------------------------------------------------
1371 - def __len__(self):
1372 """ 1373 The number of currently loaded rows 1374 """ 1375 1376 if self._rows is not None: 1377 return len(self._rows) 1378 else: 1379 return 0
1380 1381 # -------------------------------------------------------------------------
1382 - def __load_ids(self):
1383 """ Loads the IDs/UIDs of all records matching the current filter """ 1384 1385 table = self.table 1386 UID = current.xml.UID 1387 1388 pkey = table._id.name 1389 1390 if UID in table.fields: 1391 has_uid = True 1392 fields = (pkey, UID) 1393 else: 1394 has_uid = False 1395 fields = (pkey, ) 1396 1397 rfilter = self.rfilter 1398 multiple = rfilter.multiple if rfilter is not None else True 1399 if not multiple and self.parent and self.parent.count() == 1: 1400 start = 0 1401 limit = 1 1402 else: 1403 start = limit = None 1404 1405 rows = self.select(fields, 1406 start=start, 1407 limit=limit)["rows"] 1408 1409 if rows: 1410 ID = str(table._id) 1411 self._ids = [row[ID] for row in rows] 1412 if has_uid: 1413 uid = str(table[UID]) 1414 self._uids = [row[uid] for row in rows] 1415 else: 1416 self._ids = [] 1417 1418 return
1419 1420 # ------------------------------------------------------------------------- 1421 # Representation 1422 # -------------------------------------------------------------------------
1423 - def __repr__(self):
1424 """ 1425 String representation of this resource 1426 """ 1427 1428 pkey = self.table._id.name 1429 1430 if self._rows: 1431 ids = [r[pkey] for r in self] 1432 return "<S3Resource %s %s>" % (self.tablename, ids) 1433 else: 1434 return "<S3Resource %s>" % self.tablename
1435 1436 # -------------------------------------------------------------------------
1437 - def __contains__(self, item):
1438 """ 1439 Tests whether this resource contains a (real) field. 1440 1441 @param item: the field selector or Field instance 1442 """ 1443 1444 fn = str(item) 1445 if "." in fn: 1446 tn, fn = fn.split(".", 1) 1447 if tn == self.tablename: 1448 item = fn 1449 try: 1450 rf = self.resolve_selector(str(item)) 1451 except (SyntaxError, AttributeError): 1452 return 0 1453 if rf.field is not None: 1454 return 1 1455 else: 1456 return 0
1457 1458 # -------------------------------------------------------------------------
1459 - def __nonzero__(self):
1460 """ 1461 Boolean test of this resource 1462 """ 1463 1464 return self is not None
1465 1466 # ------------------------------------------------------------------------- 1467 # XML Export 1468 # -------------------------------------------------------------------------
1469 - def export_xml(self, 1470 start=None, 1471 limit=None, 1472 msince=None, 1473 fields=None, 1474 dereference=True, 1475 maxdepth=MAXDEPTH, 1476 mcomponents=DEFAULT, 1477 rcomponents=None, 1478 references=None, 1479 mdata=False, 1480 stylesheet=None, 1481 as_tree=False, 1482 as_json=False, 1483 maxbounds=False, 1484 filters=None, 1485 pretty_print=False, 1486 location_data=None, 1487 map_data=None, 1488 target=None, 1489 **args):
1490 """ 1491 Export this resource as S3XML 1492 1493 @param start: index of the first record to export (slicing) 1494 @param limit: maximum number of records to export (slicing) 1495 1496 @param msince: export only records which have been modified 1497 after this datetime 1498 1499 @param fields: data fields to include (default: all) 1500 1501 @param dereference: include referenced resources 1502 @param maxdepth: maximum depth for reference exports 1503 1504 @param mcomponents: components of the master resource to 1505 include (list of aliases), empty list 1506 for all available components 1507 @param rcomponents: components of referenced resources to 1508 include (list of "tablename:alias") 1509 1510 @param references: foreign keys to include (default: all) 1511 @param mdata: mobile data export 1512 (=>reduced field set, lookup-only option) 1513 @param stylesheet: path to the XSLT stylesheet (if required) 1514 @param as_tree: return the ElementTree (do not convert into string) 1515 @param as_json: represent the XML tree as JSON 1516 @param maxbounds: include lat/lon boundaries in the top 1517 level element (off by default) 1518 @param filters: additional URL filters (Sync), as dict 1519 {tablename: {url_var: string}} 1520 @param pretty_print: insert newlines/indentation in the output 1521 @param location_data: dictionary of location data which has been 1522 looked-up in bulk ready for xml.gis_encode() 1523 @param map_data: dictionary of options which can be read by the map 1524 @param target: alias of component targetted (or None to target master resource) 1525 @param args: dict of arguments to pass to the XSLT stylesheet 1526 """ 1527 1528 xml = current.xml 1529 1530 output = None 1531 args = Storage(args) 1532 1533 xmlformat = S3XMLFormat(stylesheet) if stylesheet else None 1534 1535 if mcomponents is DEFAULT: 1536 mcomponents = [] 1537 1538 # Export as element tree 1539 tree = self.export_tree(start = start, 1540 limit = limit, 1541 msince = msince, 1542 fields = fields, 1543 dereference = dereference, 1544 maxdepth = maxdepth, 1545 mcomponents = mcomponents, 1546 rcomponents = rcomponents, 1547 references = references, 1548 filters = filters, 1549 mdata = mdata, 1550 maxbounds = maxbounds, 1551 xmlformat = xmlformat, 1552 location_data = location_data, 1553 map_data = map_data, 1554 target = target, 1555 ) 1556 1557 # XSLT transformation 1558 if tree and xmlformat is not None: 1559 import uuid 1560 args.update(domain = xml.domain, 1561 base_url = current.response.s3.base_url, 1562 prefix = self.prefix, 1563 name = self.name, 1564 utcnow = s3_format_datetime(), 1565 msguid = uuid.uuid4().urn, 1566 ) 1567 tree = xmlformat.transform(tree, **args) 1568 1569 # Convert into the requested format 1570 # NB Content-Type headers are to be set by caller 1571 if tree: 1572 if as_tree: 1573 output = tree 1574 elif as_json: 1575 output = xml.tree2json(tree, pretty_print=pretty_print) 1576 else: 1577 output = xml.tostring(tree, pretty_print=pretty_print) 1578 1579 return output
1580 1581 # -------------------------------------------------------------------------
1582 - def export_tree(self, 1583 start=0, 1584 limit=None, 1585 msince=None, 1586 fields=None, 1587 references=None, 1588 dereference=True, 1589 maxdepth=MAXDEPTH, 1590 mcomponents=None, 1591 rcomponents=None, 1592 filters=None, 1593 mdata=False, 1594 maxbounds=False, 1595 xmlformat=None, 1596 location_data=None, 1597 map_data=None, 1598 target=None, 1599 ):
1600 """ 1601 Export the resource as element tree 1602 1603 @param start: index of the first record to export 1604 @param limit: maximum number of records to export 1605 @param msince: minimum modification date of the records 1606 @param fields: data fields to include (default: all) 1607 @param references: foreign keys to include (default: all) 1608 @param dereference: also export referenced records 1609 @param maxdepth: 1610 @param mcomponents: components of the master resource to 1611 include (list of tablenames), empty list 1612 for all 1613 @param rcomponents: components of referenced resources to 1614 include (list of tablenames), empty list 1615 for all 1616 @param filters: additional URL filters (Sync), as dict 1617 {tablename: {url_var: string}} 1618 @param mdata: export is intended for mobile offline client 1619 (=>reduced field set, lookup-only option), 1620 overrides fields/references 1621 @param maxbounds: include lat/lon boundaries in the top 1622 level element (off by default) 1623 @param xmlformat: 1624 @param location_data: dictionary of location data which has been 1625 looked-up in bulk ready for xml.gis_encode() 1626 @param target: alias of component targetted (or None to target master resource) 1627 @param map_data: dictionary of options which can be read by the map 1628 """ 1629 1630 xml = current.xml 1631 1632 # Base URL 1633 if xml.show_urls: 1634 base_url = current.response.s3.base_url 1635 else: 1636 base_url = None 1637 1638 # Initialize export metadata 1639 self.muntil = None 1640 self.results = 0 1641 1642 # Use lazy representations 1643 lazy = [] 1644 current.auth_user_represent = S3Represent(lookup = "auth_user", 1645 fields = ["email"], 1646 ) 1647 1648 # Filter for MCI >= 0 (setting) 1649 table = self.table 1650 if xml.filter_mci and "mci" in table.fields: 1651 mci_filter = (table.mci >= 0) 1652 self.add_filter(mci_filter) 1653 1654 # Sync filters 1655 tablename = self.tablename 1656 if filters and tablename in filters: 1657 queries = S3URLQuery.parse(self, filters[tablename]) 1658 add_filter = self.add_filter 1659 [add_filter(q) for a in queries for q in queries[a]] 1660 1661 # Order by modified_on if msince is requested 1662 if msince is not None and "modified_on" in table.fields: 1663 orderby = "%s ASC" % table["modified_on"] 1664 else: 1665 orderby = None 1666 1667 # Construct the record base URL 1668 prefix = self.prefix 1669 name = self.name 1670 if base_url: 1671 url = "%s/%s/%s" % (base_url, prefix, name) 1672 else: 1673 url = "/%s/%s" % (prefix, name) 1674 1675 # Mobile data settings 1676 llrepr = None 1677 if mdata: 1678 from s3mobile import S3MobileSchema 1679 ms = S3MobileSchema(self) 1680 if ms.lookup_only: 1681 # Override fields/references (only meta fields) 1682 llrepr = ms.llrepr 1683 fields, references = [], [] 1684 else: 1685 # Determine fields/references from mobile schema 1686 fields = references = [f.name for f in ms.fields()] 1687 1688 # Determine which components to export for this table 1689 components_to_export = self.components_to_export 1690 if llrepr is None: 1691 components = components_to_export(self.tablename, mcomponents) 1692 else: 1693 components = None 1694 1695 # Separate references and data fields 1696 (rfields, dfields) = self.split_fields(data = fields, 1697 references = references, 1698 ) 1699 1700 # Fields to load 1701 if xmlformat: 1702 include, exclude = xmlformat.get_fields(target or tablename) 1703 else: 1704 include, exclude = None, None 1705 1706 # Load the master records 1707 self.load(fields = include, 1708 skip = exclude, 1709 start = start, 1710 limit = limit, 1711 orderby = orderby, 1712 virtual = False, 1713 cacheable = True, 1714 ) 1715 1716 # Total number of results 1717 results = self.count() 1718 1719 # Get location data for records (if not passed-in from caller) 1720 if not target and not location_data: 1721 location_data = current.gis.get_location_data(self, count=results) or {} 1722 1723 # Create root element 1724 root = etree.Element(xml.TAG.root) 1725 1726 # Add map data to root element 1727 if map_data: 1728 # Gets loaded before re-dumping, so no need to compact 1729 # or avoid double-encoding 1730 # NB Ensure we don't double-encode unicode! 1731 #root.set("map", json.dumps(map_data, separators=SEPARATORS, 1732 # ensure_ascii=False)) 1733 root.set("map", json.dumps(map_data)) 1734 1735 # Initialize export map (=already exported records) 1736 export_map = Storage() 1737 get_exported = export_map.get 1738 1739 # Initialize reference lists 1740 reference_map = [] 1741 all_references = [] 1742 1743 # Is this master resource? 1744 master = not target 1745 1746 # Export the master records 1747 export_resource = self.__export_resource 1748 for record in self._rows: 1749 element = export_resource(record, 1750 rfields = rfields, 1751 dfields = dfields, 1752 parent = root, 1753 base_url = url, 1754 reference_map = reference_map, 1755 export_map = export_map, 1756 lazy = lazy, 1757 components = components, 1758 filters = filters, 1759 master = master, 1760 target = target, 1761 msince = msince, 1762 llrepr = llrepr, 1763 location_data = location_data, 1764 xmlformat = xmlformat, 1765 ) 1766 if element is None: 1767 results -= 1 1768 1769 if reference_map: 1770 all_references.extend(reference_map) 1771 1772 # Determine components to export for each referenced table 1773 ref_components = {} 1774 if rcomponents: 1775 for key in rcomponents: 1776 if ":" in key: 1777 tn, alias = key.rsplit(":", 1) 1778 if tn in ref_components: 1779 ref_components[tn].append(alias) 1780 else: 1781 ref_components[tn] = [alias] 1782 1783 # Iteratively resolve all references 1784 define_resource = current.s3db.resource 1785 REF = xml.ATTRIBUTE.ref 1786 depth = maxdepth if dereference else 0 1787 while reference_map and depth: 1788 1789 depth -= 1 1790 load_map = {} 1791 1792 for ref in reference_map: 1793 1794 if "table" in ref and "id" in ref: 1795 1796 # Get tablename and IDs 1797 tname = ref["table"] 1798 ids = ref["id"] 1799 if not isinstance(ids, list): 1800 ids = [ids] 1801 1802 # Exclude records which are already in the tree 1803 exported = get_exported(tname, []) 1804 ids = [x for x in ids if x not in exported] 1805 if not ids: 1806 continue 1807 1808 # Append the new ids to load_map[tname] 1809 if tname in load_map: 1810 ids = [x for x in ids if x not in load_map[tname]] 1811 load_map[tname] += ids 1812 else: 1813 load_map[tname] = ids 1814 1815 # Collect all references from the referenced records 1816 reference_map = [] 1817 1818 for tablename in load_map: 1819 1820 # Get the list of record IDs to export for this table 1821 load_list = load_map[tablename] 1822 1823 # Sync filters 1824 if filters: 1825 filter_vars = filters.get(tablename) 1826 else: 1827 filter_vars = None 1828 1829 # Determine which components to export for this table 1830 components = components_to_export( 1831 tablename, 1832 ref_components.get(tablename), 1833 ) 1834 1835 # Instantiate the referenced resource 1836 prefix, name = tablename.split("_", 1) 1837 rresource = define_resource(tablename, 1838 components = components, 1839 id = load_list, 1840 vars = filter_vars, 1841 ) 1842 1843 # Fields and references to export 1844 # @todo: applying the same fields/references as for master 1845 # means no differentiation by table => not the best 1846 # solution, but what else is without adding complexity? 1847 fields_, references_ = fields, references 1848 1849 # Mobile data settings 1850 if mdata: 1851 ms = S3MobileSchema(rresource) 1852 if ms.lookup_only: 1853 # Override fields/references (only meta fields) 1854 fields_, references_ = [], [] 1855 components = None 1856 llrepr = ms.llrepr 1857 else: 1858 # Determine fields/references from mobile schema 1859 fields_ = references_ = [f.name for f in ms.fields()] 1860 1861 # Construct the URL for the resource 1862 # @todo: don't do this for link tables 1863 table = rresource.table 1864 if base_url: 1865 url = "%s/%s/%s" % (base_url, prefix, name) 1866 else: 1867 url = "/%s/%s" % (prefix, name) 1868 1869 # Separate references and data fields 1870 rfields, dfields = rresource.split_fields( 1871 data = fields_, 1872 references = references_, 1873 ) 1874 1875 # Fields to load 1876 if xmlformat: 1877 include, exclude = xmlformat.get_fields(rresource.tablename) 1878 else: 1879 include, exclude = None, None 1880 1881 # Load the records for the referenced resource 1882 rresource.load(fields = include, 1883 skip = exclude, 1884 limit = None, 1885 virtual = False, 1886 cacheable = True, 1887 ) 1888 1889 # Export the referenced records 1890 export_resource = rresource.__export_resource 1891 for record in rresource: 1892 element = export_resource(record, 1893 rfields = rfields, 1894 dfields = dfields, 1895 parent = root, 1896 base_url = url, 1897 reference_map = reference_map, 1898 export_map = export_map, 1899 components = components, 1900 lazy = lazy, 1901 filters = filters, 1902 master = False, 1903 target = target, 1904 llrepr = llrepr, 1905 location_data = location_data, 1906 xmlformat = xmlformat, 1907 ) 1908 1909 # Mark as referenced element (for XSLT) 1910 if element is not None: 1911 element.set(REF, "True") 1912 1913 # Extend the reference map with the references of 1914 # the referenced records 1915 if reference_map: 1916 all_references.extend(reference_map) 1917 1918 # Render all pending lazy representations 1919 if lazy: 1920 for renderer, element, attr, f in lazy: 1921 renderer.render_node(element, attr, f) 1922 1923 # Add Lat/Lon attributes to all location references 1924 if all_references: 1925 xml.latlon(all_references) 1926 1927 # Complete the tree 1928 tree = xml.tree(None, 1929 root = root, 1930 domain = xml.domain, 1931 url = base_url, 1932 results = results, 1933 start = start, 1934 limit = limit, 1935 maxbounds = maxbounds, 1936 ) 1937 1938 # Store number of results 1939 self.results = results 1940 1941 return tree
1942 1943 # -------------------------------------------------------------------------
1944 - def __export_resource(self, 1945 record, 1946 rfields=None, 1947 dfields=None, 1948 parent=None, 1949 base_url=None, 1950 reference_map=None, 1951 export_map=None, 1952 lazy=None, 1953 components=None, 1954 filters=None, 1955 msince=None, 1956 master=True, 1957 target=None, 1958 llrepr=None, 1959 location_data=None, 1960 xmlformat=None, 1961 ):
1962 """ 1963 Add a <resource> to the element tree 1964 1965 @param record: the record 1966 @param rfields: list of reference fields to export 1967 @param dfields: list of data fields to export 1968 @param parent: the parent element 1969 @param base_url: the base URL of the resource 1970 @param reference_map: the reference map of the request 1971 @param export_map: the export map of the request 1972 @param lazy: 1973 @param components: list of components to include from referenced 1974 resources (tablenames) 1975 @param filters: additional URL filters (Sync), as dict 1976 {tablename: {url_var: string}} 1977 @param msince: the minimum update datetime for exported records 1978 @param master: True of this is the master resource 1979 @param target: alias of component targetted (or None to target master resource) 1980 @param llrepr: lookup list representation method 1981 (suppresses component export if set) 1982 @param location_data: the location_data for GIS encoding 1983 @param xmlformat: 1984 """ 1985 1986 pkey = self.table._id 1987 1988 # Construct the record URL 1989 if base_url: 1990 record_url = "%s/%s" % (base_url, record[pkey]) 1991 else: 1992 record_url = None 1993 1994 xml = current.xml 1995 MTIME = xml.MTIME 1996 MCI = xml.MCI 1997 1998 # Export the record 1999 export = self._export_record 2000 element, rmap = self._export_record(record, 2001 rfields = rfields, 2002 dfields = dfields, 2003 parent = parent, 2004 export_map = export_map, 2005 lazy = lazy, 2006 url = record_url, 2007 master = master, 2008 llrepr = llrepr, 2009 location_data = location_data 2010 ) 2011 2012 if element is None: 2013 # Record was already exported 2014 return None 2015 2016 # Preliminary msince-check 2017 add = True 2018 if msince: 2019 mtime = record.get(MTIME) 2020 if mtime and mtime < msince: 2021 add = False 2022 2023 # Export components 2024 if components and llrepr is None: 2025 2026 get_hierarchy_link = current.s3db.hierarchy_link 2027 2028 resource_components = self.components 2029 for alias in components: 2030 2031 component = resource_components.get(alias) 2032 if not component: 2033 # Invalid alias 2034 continue 2035 2036 hierarchy_link = get_hierarchy_link(component.tablename) 2037 2038 if component.link is not None: 2039 c = component.link 2040 calias = None 2041 lalias = c.alias 2042 else: 2043 c = component 2044 calias = c.alias 2045 lalias = None 2046 2047 # If msince is requested (sync), skip components 2048 # without modified_on timestamp: 2049 if msince and MTIME not in c.fields: 2050 continue 2051 2052 ctablename = c.tablename 2053 cpkey = c.table._id 2054 2055 # Before loading the component: add filters 2056 if c._rows is None: 2057 2058 ctable = c.table 2059 cfields = ctable.fields 2060 2061 # MCI filter 2062 if xml.filter_mci and xml.MCI in cfields: 2063 mci_filter = FS(MCI) >= 0 2064 c.add_filter(mci_filter) 2065 2066 # Sync filters 2067 if filters and ctablename in filters: 2068 queries = S3URLQuery.parse(self, filters[ctablename]) 2069 [c.add_filter(q) for a in queries for q in queries[a]] 2070 2071 # Msince filter 2072 if msince and (alias != hierarchy_link or add) and \ 2073 MTIME in cfields: 2074 query = FS(MTIME) >= msince 2075 c.add_filter(query) 2076 2077 # Load only records which have not been exported yet 2078 if export_map: 2079 export_list = export_map.get(ctablename) 2080 if export_list: 2081 query = ~(FS(cpkey.name).belongs(export_list)) 2082 c.add_filter(query) 2083 2084 # Fields to load 2085 if xmlformat: 2086 include, exclude = xmlformat.get_fields(ctablename) 2087 else: 2088 include, exclude = None, None 2089 2090 # Load the records 2091 # NB this is only done once for all master records, 2092 # subset per master record is selected by self.get 2093 c.load(fields = include, 2094 skip = exclude, 2095 limit = None, 2096 virtual = False, 2097 cacheable = True, 2098 ) 2099 2100 # Find the component records for the current master record 2101 crecords = self.get(record[pkey], 2102 component = calias, 2103 link = lalias, 2104 ) 2105 if not crecords: 2106 continue 2107 else: 2108 add = True 2109 2110 # Separate references and data fields 2111 crfields, cdfields = c.split_fields(skip=[c.fkey]) 2112 2113 2114 # Limit single-components to the first match 2115 # @todo: load() should limit this automatically: 2116 if not c.multiple and len(crecords): 2117 crecords = [crecords[0]] 2118 2119 2120 # Get location data for the component 2121 if target == ctablename: 2122 master = True 2123 if not location_data: 2124 count = c.count() 2125 location_data = current.gis.get_location_data(c, count=count) or {} 2126 else: 2127 master = False 2128 2129 # Construct the component base URL 2130 if record_url: 2131 cname = c.name if c.linked else c.alias 2132 component_url = "%s/%s" % (record_url, cname) 2133 else: 2134 component_url = None 2135 2136 # Export the component records 2137 export = c._export_record 2138 map_record = c.__map_record 2139 for crecord in crecords: 2140 2141 # Construct the component record URL 2142 # @todo: don't do this for link tables as they usually 2143 # don't have controllers of their own? 2144 if component_url: 2145 crecord_url = "%s/%s" % (component_url, crecord[cpkey]) 2146 else: 2147 crecord_url = None 2148 2149 # Export the component record 2150 crmap = export(crecord, 2151 rfields = crfields, 2152 dfields = cdfields, 2153 parent = element, 2154 lazy = lazy, 2155 url = crecord_url, 2156 master = master, 2157 location_data = location_data, 2158 )[1] 2159 2160 # Update "modified until" from component 2161 if not self.muntil or \ 2162 c.muntil and c.muntil > self.muntil: 2163 self.muntil = c.muntil 2164 2165 # Extend reference map and export map 2166 map_record(crecord, 2167 crmap, 2168 reference_map, 2169 export_map, 2170 ) 2171 2172 if add: 2173 2174 # Update reference_map and export_map 2175 self.__map_record(record, rmap, reference_map, export_map) 2176 2177 elif parent is not None and element is not None: 2178 2179 # Remove the element from the parent 2180 try: 2181 parent.remove(element) 2182 except ValueError: 2183 pass 2184 element = None 2185 2186 return element
2187 2188 # -------------------------------------------------------------------------
2189 - def _export_record(self, 2190 record, 2191 rfields=None, 2192 dfields=None, 2193 parent=None, 2194 export_map=None, 2195 lazy=None, 2196 url=None, 2197 master=True, 2198 llrepr=None, 2199 location_data=None):
2200 """ 2201 Exports a single record to the element tree. 2202 2203 @param record: the record 2204 @param rfields: list of foreign key fields to export 2205 @param dfields: list of data fields to export 2206 @param parent: the parent element 2207 @param export_map: the export map of the current request 2208 @param url: URL of the record 2209 @param master: True if this is a record in the master resource 2210 @param llrepr: lookup list representation method 2211 @param location_data: the location_data for GIS encoding 2212 """ 2213 2214 xml = current.xml 2215 2216 tablename = self.tablename 2217 table = self.table 2218 2219 # Replace user ID representation by lazy method 2220 auth_user_represent = Storage() 2221 if hasattr(current, "auth_user_represent"): 2222 user_ids = ("created_by", "modified_by", "owned_by_user") 2223 for fn in user_ids: 2224 if hasattr(table, fn): 2225 f = ogetattr(table, fn) 2226 auth_user_represent[fn] = f.represent 2227 f.represent = current.auth_user_represent 2228 2229 # Do not export the record if it already is in the export map 2230 if export_map and tablename in export_map and \ 2231 record[table._id] in export_map[tablename]: 2232 return (None, None) 2233 2234 # Audit read 2235 current.audit("read", self.prefix, self.name, 2236 record = record[table._id], 2237 representation = "xml", 2238 ) 2239 2240 # Map the references of this record 2241 rmap = xml.rmap(table, record, rfields) 2242 2243 # Add alias-attribute if distinct from resource name 2244 linked = self.linked 2245 if self.parent is not None and linked is not None: 2246 alias = linked.alias 2247 name = linked.name 2248 else: 2249 alias = self.alias 2250 name = self.name 2251 if alias == name: 2252 alias = None 2253 2254 # Generate the <resource> element 2255 postprocess = self.get_config("xml_post_render") 2256 element = xml.resource(parent, 2257 table, 2258 record, 2259 fields = dfields, 2260 alias = alias, 2261 lazy = lazy, 2262 url = url, 2263 llrepr = llrepr, 2264 postprocess = postprocess, 2265 ) 2266 2267 # Add the <reference> elements 2268 xml.add_references(element, 2269 rmap, 2270 show_ids = xml.show_ids, 2271 lazy = lazy, 2272 ) 2273 2274 if master: 2275 # GIS-encode the element 2276 # @ToDo: Do this 1/tree not 1/record 2277 xml.gis_encode(self, record, element, location_data=location_data) 2278 2279 # Restore normal user_id representations 2280 for fn in auth_user_represent: 2281 ogetattr(table, fn).represent = auth_user_represent[fn] 2282 2283 # Update muntil date if record is younger 2284 if "modified_on" in record: 2285 muntil = record.modified_on 2286 if muntil and not self.muntil or muntil > self.muntil: 2287 self.muntil = muntil 2288 2289 return (element, rmap)
2290 2291 # ------------------------------------------------------------------------- 2292 @staticmethod
2293 - def components_to_export(tablename, aliases):
2294 """ 2295 Get a list of aliases of components that shall be exported 2296 together with the master resource 2297 2298 @param tablename: the tablename of the master resource 2299 @param aliases: the list of required components 2300 2301 @returns: a list of component aliases 2302 """ 2303 2304 s3db = current.s3db 2305 2306 if aliases is not None: 2307 names = aliases if aliases else None 2308 hooks = s3db.get_components(tablename, names=names) 2309 else: 2310 hooks = {} 2311 hierarchy_link = s3db.hierarchy_link(tablename) 2312 2313 filtered, unfiltered = {}, {} 2314 for alias, hook in hooks.items(): 2315 key = (hook.tablename, 2316 hook.pkey, 2317 hook.lkey, 2318 hook.rkey, 2319 hook.fkey, 2320 ) 2321 if hook.filterby: 2322 filtered[alias] = key 2323 else: 2324 unfiltered[key] = alias 2325 2326 components = [] 2327 2328 for alias, key in filtered.items(): 2329 if key in unfiltered: 2330 # Skip the filtered subset if the unfiltered set will be exported 2331 if alias == hierarchy_link: 2332 hierarchy_link = unfiltered[key] 2333 continue 2334 if alias != hierarchy_link: 2335 components.append(alias) 2336 2337 for key, alias in unfiltered.items(): 2338 if alias != hierarchy_link: 2339 components.append(alias) 2340 2341 # Hierarchy parent link must be last in the list 2342 if hierarchy_link: 2343 components.append(hierarchy_link) 2344 2345 return components
2346 2347 # -------------------------------------------------------------------------
2348 - def __map_record(self, record, rmap, reference_map, export_map):
2349 """ 2350 Add the record to the export map, and update the 2351 reference map with the record's references 2352 2353 @param record: the record 2354 @param rmap: the reference map of the record 2355 @param reference_map: the reference map of the request 2356 @param export_map: the export map of the request 2357 """ 2358 2359 tablename = self.tablename 2360 record_id = record[self.table._id] 2361 2362 if rmap: 2363 reference_map.extend(rmap) 2364 if tablename in export_map: 2365 export_map[tablename].append(record_id) 2366 else: 2367 export_map[tablename] = [record_id] 2368 return
2369 2370 # ------------------------------------------------------------------------- 2371 # XML Import 2372 # -------------------------------------------------------------------------
2373 - def import_xml(self, source, 2374 files=None, 2375 id=None, 2376 format="xml", 2377 stylesheet=None, 2378 extra_data=None, 2379 ignore_errors=False, 2380 job_id=None, 2381 commit_job=True, 2382 delete_job=False, 2383 strategy=None, 2384 update_policy=None, 2385 conflict_policy=None, 2386 last_sync=None, 2387 onconflict=None, 2388 **args):
2389 """ 2390 XML Importer 2391 2392 @param source: the data source, accepts source=xxx, source=[xxx, yyy, zzz] or 2393 source=[(resourcename1, xxx), (resourcename2, yyy)], where the 2394 xxx has to be either an ElementTree or a file-like object 2395 @param files: attached files (None to read in the HTTP request) 2396 @param id: ID (or list of IDs) of the record(s) to update (performs only update) 2397 @param format: type of source = "xml", "json" or "csv" 2398 @param stylesheet: stylesheet to use for transformation 2399 @param extra_data: for CSV imports, dict of extra cols to add to each row 2400 @param ignore_errors: skip invalid records silently 2401 @param job_id: resume from previous import job_id 2402 @param commit_job: commit the job to the database 2403 @param delete_job: delete the import job from the queue 2404 @param strategy: tuple of allowed import methods (create/update/delete) 2405 @param update_policy: policy for updates (sync) 2406 @param conflict_policy: policy for conflict resolution (sync) 2407 @param last_sync: last synchronization datetime (sync) 2408 @param onconflict: callback hook for conflict resolution (sync) 2409 @param args: parameters to pass to the transformation stylesheet 2410 """ 2411 2412 # Check permission for the resource 2413 has_permission = current.auth.s3_has_permission 2414 authorised = has_permission("create", self.table) and \ 2415 has_permission("update", self.table) 2416 if not authorised: 2417 raise IOError("Insufficient permissions") 2418 2419 xml = current.xml 2420 tree = None 2421 self.job = None 2422 2423 if not job_id: 2424 2425 # Additional stylesheet parameters 2426 args.update(domain=xml.domain, 2427 base_url=current.response.s3.base_url, 2428 prefix=self.prefix, 2429 name=self.name, 2430 utcnow=s3_format_datetime()) 2431 2432 # Build import tree 2433 if not isinstance(source, (list, tuple)): 2434 source = [source] 2435 for item in source: 2436 if isinstance(item, (list, tuple)): 2437 resourcename, s = item[:2] 2438 else: 2439 resourcename, s = None, item 2440 if isinstance(s, etree._ElementTree): 2441 t = s 2442 elif format == "json": 2443 if isinstance(s, basestring): 2444 source = StringIO(s) 2445 t = xml.json2tree(s) 2446 else: 2447 t = xml.json2tree(s) 2448 elif format == "csv": 2449 t = xml.csv2tree(s, 2450 resourcename=resourcename, 2451 extra_data=extra_data) 2452 elif format == "xls": 2453 t = xml.xls2tree(s, 2454 resourcename=resourcename, 2455 extra_data=extra_data) 2456 else: 2457 t = xml.parse(s) 2458 if not t: 2459 if xml.error: 2460 raise SyntaxError(xml.error) 2461 else: 2462 raise SyntaxError("Invalid source") 2463 2464 if stylesheet is not None: 2465 t = xml.transform(t, stylesheet, **args) 2466 if not t: 2467 raise SyntaxError(xml.error) 2468 2469 if not tree: 2470 tree = t.getroot() 2471 else: 2472 tree.extend(list(t.getroot())) 2473 2474 if files is not None and isinstance(files, dict): 2475 self.files = Storage(files) 2476 2477 else: 2478 # job ID given 2479 pass 2480 2481 response = current.response 2482 # Flag to let onvalidation/onaccept know this is coming from a Bulk Import 2483 response.s3.bulk = True 2484 success = self.import_tree(id, tree, 2485 ignore_errors=ignore_errors, 2486 job_id=job_id, 2487 commit_job=commit_job, 2488 delete_job=delete_job, 2489 strategy=strategy, 2490 update_policy=update_policy, 2491 conflict_policy=conflict_policy, 2492 last_sync=last_sync, 2493 onconflict=onconflict) 2494 response.s3.bulk = False 2495 2496 self.files = Storage() 2497 2498 # Response message 2499 if format == "json": 2500 # Whilst all Responses are JSON, it's easier to debug by having the 2501 # response appear in the browser than launching a text editor 2502 response.headers["Content-Type"] = "application/json" 2503 if self.error_tree is not None: 2504 tree = xml.tree2json(self.error_tree) 2505 else: 2506 tree = None 2507 2508 # Import Summary Info 2509 import_info = {"records": self.import_count} 2510 created = list(set(self.import_created)) 2511 if created: 2512 import_info["created"] = created 2513 updated = list(set(self.import_updated)) 2514 if updated: 2515 import_info["updated"] = updated 2516 deleted = list(set(self.import_deleted)) 2517 if deleted: 2518 import_info["deleted"] = deleted 2519 2520 if success is True: 2521 # 2nd phase of 2-phase import 2522 # Execute postimport if-defined 2523 postimport = self.get_config("postimport") 2524 if postimport: 2525 #try: 2526 callback(postimport, import_info, tablename=self.tablename) 2527 #except: 2528 # error = "postimport failed: %s" % postimport 2529 # current.log.error(error) 2530 # raise RuntimeError 2531 2532 return xml.json_message(message=self.error, tree=tree, 2533 **import_info) 2534 2535 elif success and hasattr(success, "job_id"): 2536 # 1st phase of 2-phase import 2537 # NB import_info is meaningless here as IDs have been rolled-back 2538 self.job = success 2539 return xml.json_message(message=self.error, tree=tree, 2540 **import_info) 2541 2542 # Failure 2543 return xml.json_message(False, 400, 2544 message=self.error, tree=tree)
2545 2546 # -------------------------------------------------------------------------
2547 - def import_tree(self, record_id, tree, 2548 job_id=None, 2549 ignore_errors=False, 2550 delete_job=False, 2551 commit_job=True, 2552 strategy=None, 2553 update_policy=None, 2554 conflict_policy=None, 2555 last_sync=None, 2556 onconflict=None):
2557 """ 2558 Import data from an S3XML element tree. 2559 2560 @param record_id: record ID or list of record IDs to update 2561 @param tree: the element tree 2562 @param ignore_errors: continue at errors (=skip invalid elements) 2563 2564 @param job_id: restore a job from the job table (ID or UID) 2565 @param delete_job: delete the import job from the job table 2566 @param commit_job: commit the job (default) 2567 2568 @todo: update for link table support 2569 """ 2570 2571 from s3import import S3ImportJob 2572 2573 db = current.db 2574 xml = current.xml 2575 auth = current.auth 2576 tablename = self.tablename 2577 table = self.table 2578 2579 if job_id is not None: 2580 2581 # Restore a job from the job table 2582 self.error = None 2583 self.error_tree = None 2584 try: 2585 import_job = S3ImportJob(table, 2586 job_id=job_id, 2587 strategy=strategy, 2588 update_policy=update_policy, 2589 conflict_policy=conflict_policy, 2590 last_sync=last_sync, 2591 onconflict=onconflict) 2592 except: 2593 self.error = current.ERROR.BAD_SOURCE 2594 return False 2595 2596 # Delete the job? 2597 if delete_job: 2598 import_job.delete() 2599 return True 2600 2601 # Load all items 2602 job_id = import_job.job_id 2603 item_table = import_job.item_table 2604 items = db(item_table.job_id == job_id).select() 2605 load_item = import_job.load_item 2606 for item in items: 2607 success = load_item(item) 2608 if not success: 2609 self.error = import_job.error 2610 self.error_tree = import_job.error_tree 2611 import_job.restore_references() 2612 2613 # this is only relevant for commit_job=True 2614 if commit_job: 2615 if self.error and not ignore_errors: 2616 return False 2617 else: 2618 return import_job 2619 2620 # Call the import pre-processor to prepare tables 2621 # and cleanup the tree as necessary 2622 # NB For 2-phase imports this gets called twice! 2623 # can't use commit_job to differentiate since we need it to run on the trial import 2624 import_prep = current.response.s3.import_prep 2625 if import_prep: 2626 tree = import_job.get_tree() 2627 callback(import_prep, 2628 # takes tuple (resource, tree) as argument 2629 (self, tree), 2630 tablename=tablename) 2631 # Skip import? 2632 if self.skip_import: 2633 current.log.debug("Skipping import to %s" % self.tablename) 2634 self.skip_import = False 2635 return True 2636 2637 else: 2638 # Create a new job from an element tree 2639 # Do not import into tables without "id" field 2640 if "id" not in table.fields: 2641 self.error = current.ERROR.BAD_RESOURCE 2642 return False 2643 2644 # Reset error and error tree 2645 self.error = None 2646 self.error_tree = None 2647 2648 # Call the import pre-processor to prepare tables 2649 # and cleanup the tree as necessary 2650 # NB For 2-phase imports this gets called twice! 2651 # can't use commit_job to differentiate since we need it to run on the trial import 2652 import_prep = current.response.s3.import_prep 2653 if import_prep: 2654 if not isinstance(tree, etree._ElementTree): 2655 tree = etree.ElementTree(tree) 2656 callback(import_prep, 2657 # takes tuple (resource, tree) as argument 2658 (self, tree), 2659 tablename=tablename) 2660 # Skip import? 2661 if self.skip_import: 2662 current.log.debug("Skipping import to %s" % self.tablename) 2663 self.skip_import = False 2664 return True 2665 2666 # Select the elements for this table 2667 elements = xml.select_resources(tree, tablename) 2668 if not elements: 2669 # nothing to import => still ok 2670 return True 2671 2672 # Find matching elements, if a target record ID is given 2673 UID = xml.UID 2674 if record_id and UID in table: 2675 if not isinstance(record_id, (tuple, list)): 2676 query = (table._id == record_id) 2677 else: 2678 query = (table._id.belongs(record_id)) 2679 originals = db(query).select(table[UID]) 2680 uids = [row[UID] for row in originals] 2681 matches = [] 2682 import_uid = xml.import_uid 2683 append = matches.append 2684 for element in elements: 2685 element_uid = import_uid(element.get(UID, None)) 2686 if not element_uid: 2687 continue 2688 if element_uid in uids: 2689 append(element) 2690 if not matches: 2691 first = elements[0] 2692 if len(elements) and not first.get(UID, None): 2693 first.set(UID, uids[0]) 2694 matches = [first] 2695 if not matches: 2696 self.error = current.ERROR.NO_MATCH 2697 return False 2698 else: 2699 elements = matches 2700 2701 # Import all matching elements 2702 import_job = S3ImportJob(table, 2703 tree=tree, 2704 files=self.files, 2705 strategy=strategy, 2706 update_policy=update_policy, 2707 conflict_policy=conflict_policy, 2708 last_sync=last_sync, 2709 onconflict=onconflict) 2710 add_item = import_job.add_item 2711 exposed_aliases = self.components.exposed_aliases 2712 for element in elements: 2713 success = add_item(element = element, 2714 components = exposed_aliases, 2715 ) 2716 if not success: 2717 self.error = import_job.error 2718 self.error_tree = import_job.error_tree 2719 if self.error and not ignore_errors: 2720 return False 2721 2722 # Commit the import job 2723 auth.rollback = not commit_job 2724 success = import_job.commit(ignore_errors=ignore_errors, 2725 log_items = self.get_config("oncommit_import_item")) 2726 auth.rollback = False 2727 self.error = import_job.error 2728 self.import_count += import_job.count 2729 self.import_created += import_job.created 2730 self.import_updated += import_job.updated 2731 self.import_deleted += import_job.deleted 2732 job_mtime = import_job.mtime 2733 if self.mtime is None or \ 2734 job_mtime and job_mtime > self.mtime: 2735 self.mtime = job_mtime 2736 if self.error: 2737 if ignore_errors: 2738 self.error = "%s - invalid items ignored" % self.error 2739 self.error_tree = import_job.error_tree 2740 elif not success: 2741 # Oops - how could this happen? We can have an error 2742 # without failure, but not a failure without error! 2743 # If we ever get here, then there's a bug without a 2744 # chance to recover - hence let it crash: 2745 raise RuntimeError("Import failed without error message") 2746 if not success or not commit_job: 2747 db.rollback() 2748 if not commit_job: 2749 import_job.store() 2750 return import_job 2751 else: 2752 # Remove the job when committed 2753 if job_id is not None: 2754 import_job.delete() 2755 2756 return self.error is None or ignore_errors
2757 2758 # ------------------------------------------------------------------------- 2759 # XML introspection 2760 # -------------------------------------------------------------------------
2761 - def export_options(self, 2762 component=None, 2763 fields=None, 2764 only_last=False, 2765 show_uids=False, 2766 hierarchy=False, 2767 as_json=False):
2768 """ 2769 Export field options of this resource as element tree 2770 2771 @param component: name of the component which the options are 2772 requested of, None for the primary table 2773 @param fields: list of names of fields for which the options 2774 are requested, None for all fields (which have 2775 options) 2776 @param as_json: convert the output into JSON 2777 @param only_last: obtain only the latest record 2778 """ 2779 2780 if component is not None: 2781 c = self.components.get(component) 2782 if c: 2783 tree = c.export_options(fields=fields, 2784 only_last=only_last, 2785 show_uids=show_uids, 2786 hierarchy=hierarchy, 2787 as_json=as_json) 2788 return tree 2789 else: 2790 # If we get here, we've been called from the back-end, 2791 # otherwise the request would have failed during parse. 2792 # So it's safe to raise an exception: 2793 raise AttributeError 2794 else: 2795 if as_json and only_last and len(fields) == 1: 2796 # Identify the field 2797 default = {"option":[]} 2798 try: 2799 field = self.table[fields[0]] 2800 except AttributeError: 2801 # Can't raise an exception here as this goes 2802 # directly to the client 2803 return json.dumps(default) 2804 2805 # Check that the validator has a lookup table 2806 requires = field.requires 2807 if not isinstance(requires, (list, tuple)): 2808 requires = [requires] 2809 requires = requires[0] 2810 if isinstance(requires, IS_EMPTY_OR): 2811 requires = requires.other 2812 from s3validators import IS_LOCATION 2813 if not isinstance(requires, (IS_ONE_OF, IS_LOCATION)): 2814 # Can't raise an exception here as this goes 2815 # directly to the client 2816 return json.dumps(default) 2817 2818 # Identify the lookup table 2819 db = current.db 2820 lookuptable = requires.ktable 2821 lookupfield = db[lookuptable][requires.kfield] 2822 2823 # Fields to extract 2824 fields = [lookupfield] 2825 h = None 2826 if hierarchy: 2827 from s3hierarchy import S3Hierarchy 2828 h = S3Hierarchy(lookuptable) 2829 if not h.config: 2830 h = None 2831 elif h.pkey.name != lookupfield.name: 2832 # Also extract the node key for the hierarchy 2833 fields.append(h.pkey) 2834 2835 # Get the latest record 2836 # NB: this assumes that the lookupfield is auto-incremented 2837 row = db().select(orderby=~lookupfield, 2838 limitby=(0, 1), 2839 *fields).first() 2840 2841 # Represent the value and generate the output JSON 2842 if row: 2843 value = row[lookupfield] 2844 widget = field.widget 2845 if hasattr(widget, "represent") and widget.represent: 2846 # Prefer the widget's represent as options.json 2847 # is usually called to Ajax-update the widget 2848 represent = widget.represent(value) 2849 elif field.represent: 2850 represent = field.represent(value) 2851 else: 2852 represent = s3_str(value) 2853 if isinstance(represent, A): 2854 represent = represent.components[0] 2855 2856 item = {"@value": value, "$": represent} 2857 if h: 2858 parent = h.parent(row[h.pkey]) 2859 if parent: 2860 item["@parent"] = str(parent) 2861 result = [item] 2862 else: 2863 result = [] 2864 return json.dumps({'option': result}) 2865 2866 xml = current.xml 2867 tree = xml.get_options(self.table, 2868 fields=fields, 2869 show_uids=show_uids, 2870 hierarchy=hierarchy) 2871 2872 if as_json: 2873 return xml.tree2json(tree, pretty_print=False, 2874 native=True) 2875 else: 2876 return xml.tostring(tree, pretty_print=False)
2877 2878 # -------------------------------------------------------------------------
2879 - def export_fields(self, component=None, as_json=False):
2880 """ 2881 Export a list of fields in the resource as element tree 2882 2883 @param component: name of the component to lookup the fields 2884 (None for primary table) 2885 @param as_json: convert the output XML into JSON 2886 """ 2887 2888 if component is not None: 2889 try: 2890 c = self.components[component] 2891 except KeyError: 2892 raise AttributeError("Undefined component %s" % component) 2893 return c.export_fields(as_json=as_json) 2894 else: 2895 xml = current.xml 2896 tree = xml.get_fields(self.prefix, self.name) 2897 if as_json: 2898 return xml.tree2json(tree, pretty_print=True) 2899 else: 2900 return xml.tostring(tree, pretty_print=True)
2901 2902 # -------------------------------------------------------------------------
2903 - def export_struct(self, 2904 meta=False, 2905 options=False, 2906 references=False, 2907 stylesheet=None, 2908 as_json=False, 2909 as_tree=False):
2910 """ 2911 Get the structure of the resource 2912 2913 @param options: include option lists in option fields 2914 @param references: include option lists even for reference fields 2915 @param stylesheet: the stylesheet to use for transformation 2916 @param as_json: convert into JSON after transformation 2917 """ 2918 2919 xml = current.xml 2920 2921 # Get the structure of the main resource 2922 root = etree.Element(xml.TAG.root) 2923 main = xml.get_struct(self.prefix, self.name, 2924 alias = self.alias, 2925 parent = root, 2926 meta = meta, 2927 options = options, 2928 references = references, 2929 ) 2930 2931 # Include the exposed components 2932 for component in self.components.exposed.values(): 2933 prefix = component.prefix 2934 name = component.name 2935 xml.get_struct(prefix, name, 2936 alias = component.alias, 2937 parent = main, 2938 meta = meta, 2939 options = options, 2940 references = references, 2941 ) 2942 2943 # Transformation 2944 tree = etree.ElementTree(root) 2945 if stylesheet is not None: 2946 args = {"domain": xml.domain, 2947 "base_url": current.response.s3.base_url, 2948 "prefix": self.prefix, 2949 "name": self.name, 2950 "utcnow": s3_format_datetime(), 2951 } 2952 2953 tree = xml.transform(tree, stylesheet, **args) 2954 if tree is None: 2955 return None 2956 2957 # Return tree if requested 2958 if as_tree: 2959 return tree 2960 2961 # Otherwise string-ify it 2962 if as_json: 2963 return xml.tree2json(tree, pretty_print=True) 2964 else: 2965 return xml.tostring(tree, pretty_print=True)
2966 2967 # ------------------------------------------------------------------------- 2968 # Data Model Helpers 2969 # ------------------------------------------------------------------------- 2970 @classmethod
2971 - def original(cls, table, record, mandatory=None):
2972 """ 2973 Find the original record for a possible duplicate: 2974 - if the record contains a UUID, then only that UUID is used 2975 to match the record with an existing DB record 2976 - otherwise, if the record contains some values for unique 2977 fields, all of them must match the same existing DB record 2978 2979 @param table: the table 2980 @param record: the record as dict or S3XML Element 2981 """ 2982 2983 db = current.db 2984 xml = current.xml 2985 xml_decode = xml.xml_decode 2986 2987 VALUE = xml.ATTRIBUTE["value"] 2988 UID = xml.UID 2989 ATTRIBUTES_TO_FIELDS = xml.ATTRIBUTES_TO_FIELDS 2990 2991 # Get primary keys 2992 pkeys = [f for f in table.fields if table[f].unique] 2993 pvalues = Storage() 2994 2995 # Get the values from record 2996 get = record.get 2997 if type(record) is etree._Element: #isinstance(record, etree._Element): 2998 xpath = record.xpath 2999 xexpr = "%s[@%s='%%s']" % (xml.TAG["data"], 3000 xml.ATTRIBUTE["field"]) 3001 for f in pkeys: 3002 v = None 3003 if f == UID or f in ATTRIBUTES_TO_FIELDS: 3004 v = get(f, None) 3005 else: 3006 child = xpath(xexpr % f) 3007 if child: 3008 child = child[0] 3009 v = child.get(VALUE, xml_decode(child.text)) 3010 if v: 3011 pvalues[f] = v 3012 elif isinstance(record, dict): 3013 for f in pkeys: 3014 v = get(f, None) 3015 if v: 3016 pvalues[f] = v 3017 else: 3018 raise TypeError 3019 3020 # Build match query 3021 query = None 3022 for f in pvalues: 3023 if f == UID: 3024 continue 3025 _query = (table[f] == pvalues[f]) 3026 if query is not None: 3027 query = query | _query 3028 else: 3029 query = _query 3030 3031 fields = cls.import_fields(table, pvalues, mandatory=mandatory) 3032 3033 # Try to find exactly one match by non-UID unique keys 3034 if query is not None: 3035 original = db(query).select(limitby=(0, 2), *fields) 3036 if len(original) == 1: 3037 return original.first() 3038 3039 # If no match, then try to find a UID-match 3040 if UID in pvalues: 3041 uid = xml.import_uid(pvalues[UID]) 3042 query = (table[UID] == uid) 3043 original = db(query).select(limitby=(0, 1), *fields).first() 3044 if original: 3045 return original 3046 3047 # No match or multiple matches 3048 return None
3049 3050 # ------------------------------------------------------------------------- 3051 @staticmethod
3052 - def import_fields(table, data, mandatory=None):
3053 3054 fnames = set(s3_all_meta_field_names()) 3055 fnames.add(table._id.name) 3056 if mandatory: 3057 fnames |= set(mandatory) 3058 for fn in data: 3059 fnames.add(fn) 3060 return [table[fn] for fn in fnames if fn in table.fields]
3061 3062 # -------------------------------------------------------------------------
3063 - def readable_fields(self, subset=None):
3064 """ 3065 Get a list of all readable fields in the resource table 3066 3067 @param subset: list of fieldnames to limit the selection to 3068 """ 3069 3070 fkey = None 3071 table = self.table 3072 3073 parent = self.parent 3074 linked = self.linked 3075 3076 if parent and linked is None: 3077 component = parent.components.get(self.alias) 3078 if component: 3079 fkey = component.fkey 3080 elif linked is not None: 3081 component = linked 3082 if component: 3083 fkey = component.lkey 3084 3085 if subset: 3086 return [ogetattr(table, f) for f in subset 3087 if f in table.fields and \ 3088 ogetattr(table, f).readable and f != fkey] 3089 else: 3090 return [ogetattr(table, f) for f in table.fields 3091 if ogetattr(table, f).readable and f != fkey]
3092 3093 # -------------------------------------------------------------------------
3094 - def resolve_selectors(self, selectors, 3095 skip_components=False, 3096 extra_fields=True, 3097 show=True):
3098 """ 3099 Resolve a list of field selectors against this resource 3100 3101 @param selectors: the field selectors 3102 @param skip_components: skip fields in components 3103 @param extra_fields: automatically add extra_fields of all virtual 3104 fields in this table 3105 @param show: default for S3ResourceField.show 3106 3107 @return: tuple of (fields, joins, left, distinct) 3108 """ 3109 3110 prefix = lambda s: "~.%s" % s \ 3111 if "." not in s.split("$", 1)[0] else s 3112 3113 display_fields = set() 3114 add = display_fields.add 3115 3116 # Store field selectors 3117 for item in selectors: 3118 if not item: 3119 continue 3120 elif type(item) is tuple: 3121 item = item[-1] 3122 if isinstance(item, str): 3123 selector = item 3124 elif isinstance(item, S3ResourceField): 3125 selector = item.selector 3126 elif isinstance(item, FS): 3127 selector = item.name 3128 else: 3129 continue 3130 add(prefix(selector)) 3131 3132 slist = list(selectors) 3133 3134 # Collect extra fields from virtual tables 3135 if extra_fields: 3136 extra = self.get_config("extra_fields") 3137 if extra: 3138 append = slist.append 3139 for selector in extra: 3140 s = prefix(selector) 3141 if s not in display_fields: 3142 append(s) 3143 3144 joins = {} 3145 left = {} 3146 3147 distinct = False 3148 3149 columns = set() 3150 add_column = columns.add 3151 3152 rfields = [] 3153 append = rfields.append 3154 3155 for s in slist: 3156 3157 # Allow to override the field label 3158 if type(s) is tuple: 3159 label, selector = s 3160 else: 3161 label, selector = None, s 3162 3163 # Resolve the selector 3164 if isinstance(selector, str): 3165 selector = prefix(selector) 3166 try: 3167 rfield = S3ResourceField(self, selector, label=label) 3168 except (AttributeError, SyntaxError): 3169 continue 3170 elif isinstance(selector, FS): 3171 try: 3172 rfield = selector.resolve(self) 3173 except (AttributeError, SyntaxError): 3174 continue 3175 elif isinstance(selector, S3ResourceField): 3176 rfield = selector 3177 else: 3178 continue 3179 3180 # Unresolvable selector? 3181 if rfield.field is None and not rfield.virtual: 3182 continue 3183 3184 # De-duplicate columns 3185 colname = rfield.colname 3186 if colname in columns: 3187 continue 3188 else: 3189 add_column(colname) 3190 3191 # Replace default label 3192 if label is not None: 3193 rfield.label = label 3194 3195 # Skip components 3196 if skip_components: 3197 head = rfield.selector.split("$", 1)[0] 3198 if "." in head and head.split(".")[0] not in ("~", self.alias): 3199 continue 3200 3201 # Resolve the joins 3202 if rfield.distinct: 3203 left.update(rfield._joins) 3204 distinct = True 3205 elif rfield.join: 3206 joins.update(rfield._joins) 3207 3208 rfield.show = show and rfield.selector in display_fields 3209 append(rfield) 3210 3211 return (rfields, joins, left, distinct)
3212 3213 # -------------------------------------------------------------------------
3214 - def resolve_selector(self, selector):
3215 """ 3216 Wrapper for S3ResourceField, retained for backward compatibility 3217 """ 3218 3219 return S3ResourceField(self, selector)
3220 3221 # -------------------------------------------------------------------------
3222 - def split_fields(self, skip=DEFAULT, data=None, references=None):
3223 """ 3224 Split the readable fields in the resource table into 3225 reference and non-reference fields. 3226 3227 @param skip: list of field names to skip 3228 @param data: data fields to include (None for all) 3229 @param references: foreign key fields to include (None for all) 3230 """ 3231 3232 if skip is DEFAULT: 3233 skip = [] 3234 3235 rfields = self.rfields 3236 dfields = self.dfields 3237 3238 if rfields is None or dfields is None: 3239 if self.tablename == "gis_location": 3240 settings = current.deployment_settings 3241 if "wkt" not in skip: 3242 fmt = current.auth.permission.format 3243 if fmt == "cap": 3244 # Include WKT 3245 pass 3246 elif fmt == "xml" and settings.get_gis_xml_wkt(): 3247 # Include WKT 3248 pass 3249 else: 3250 # Skip bulky WKT fields 3251 skip.append("wkt") 3252 if "the_geom" not in skip and settings.get_gis_spatialdb(): 3253 skip.append("the_geom") 3254 3255 xml = current.xml 3256 UID = xml.UID 3257 IGNORE_FIELDS = xml.IGNORE_FIELDS 3258 FIELDS_TO_ATTRIBUTES = xml.FIELDS_TO_ATTRIBUTES 3259 3260 show_ids = current.xml.show_ids 3261 rfields = [] 3262 dfields = [] 3263 table = self.table 3264 pkey = table._id.name 3265 for f in table.fields: 3266 3267 if f == UID or f in skip or f in IGNORE_FIELDS: 3268 # Skip (show_ids=True overrides this for pkey) 3269 if f != pkey or not show_ids: 3270 continue 3271 3272 # Meta-field? => always include (in dfields) 3273 meta = f in FIELDS_TO_ATTRIBUTES 3274 3275 if s3_has_foreign_key(table[f]) and not meta: 3276 # Foreign key => add to rfields unless excluded 3277 if references is None or f in references: 3278 rfields.append(f) 3279 3280 elif data is None or f in data or meta: 3281 # Data field => add to dfields 3282 dfields.append(f) 3283 3284 self.rfields = rfields 3285 self.dfields = dfields 3286 3287 return (rfields, dfields)
3288 3289 # ------------------------------------------------------------------------- 3290 # Utility functions 3291 # -------------------------------------------------------------------------
3292 - def configure(self, **settings):
3293 """ 3294 Update configuration settings for this resource 3295 3296 @param settings: configuration settings for this resource 3297 as keyword arguments 3298 """ 3299 3300 current.s3db.configure(self.tablename, **settings)
3301 3302 # -------------------------------------------------------------------------
3303 - def get_config(self, key, default=None):
3304 """ 3305 Get a configuration setting for the current resource 3306 3307 @param key: the setting key 3308 @param default: the default value to return if the setting 3309 is not configured for this resource 3310 """ 3311 3312 return current.s3db.get_config(self.tablename, key, default=default)
3313 3314 # -------------------------------------------------------------------------
3315 - def clear_config(self, *keys):
3316 """ 3317 Clear configuration settings for this resource 3318 3319 @param keys: keys to remove (can be multiple) 3320 3321 @note: no keys specified removes all settings for this resource 3322 """ 3323 3324 current.s3db.clear_config(self.tablename, *keys)
3325 3326 # -------------------------------------------------------------------------
3327 - def limitby(self, start=0, limit=0):
3328 """ 3329 Convert start+limit parameters into a limitby tuple 3330 - limit without start => start = 0 3331 - start without limit => limit = ROWSPERPAGE 3332 - limit 0 (or less) => limit = 1 3333 - start less than 0 => start = 0 3334 3335 @param start: index of the first record to select 3336 @param limit: maximum number of records to select 3337 """ 3338 3339 if limit is None: 3340 return None 3341 3342 if start is None: 3343 start = 0 3344 if limit == 0: 3345 limit = current.response.s3.ROWSPERPAGE 3346 3347 if limit <= 0: 3348 limit = 1 3349 if start < 0: 3350 start = 0 3351 3352 return (start, start + limit)
3353 3354 # -------------------------------------------------------------------------
3355 - def _join(self, implicit=False, reverse=False):
3356 """ 3357 Get a join for this component 3358 3359 @param implicit: return a subquery with an implicit join rather 3360 than an explicit join 3361 @param reverse: get the reverse join (joining master to component) 3362 3363 @return: a Query if implicit=True, otherwise a list of joins 3364 """ 3365 3366 if self.parent is None: 3367 # This isn't a component 3368 return None 3369 else: 3370 ltable = self.parent.table 3371 3372 rtable = self.table 3373 pkey = self.pkey 3374 fkey = self.fkey 3375 3376 DELETED = current.xml.DELETED 3377 3378 if self.linked: 3379 return self.linked._join(implicit=implicit, reverse=reverse) 3380 3381 elif self.linktable: 3382 linktable = self.linktable 3383 lkey = self.lkey 3384 rkey = self.rkey 3385 lquery = (ltable[pkey] == linktable[lkey]) 3386 if DELETED in linktable: 3387 lquery &= (linktable[DELETED] != True) 3388 if self.filter is not None and not reverse: 3389 rquery = (linktable[rkey] == rtable[fkey]) & self.filter 3390 else: 3391 rquery = (linktable[rkey] == rtable[fkey]) 3392 if reverse: 3393 join = [linktable.on(rquery), ltable.on(lquery)] 3394 else: 3395 join = [linktable.on(lquery), rtable.on(rquery)] 3396 3397 else: 3398 lquery = (ltable[pkey] == rtable[fkey]) 3399 if DELETED in rtable and not reverse: 3400 lquery &= (rtable[DELETED] != True) 3401 if self.filter is not None: 3402 lquery &= self.filter 3403 if reverse: 3404 join = [ltable.on(lquery)] 3405 else: 3406 join = [rtable.on(lquery)] 3407 3408 if implicit: 3409 query = None 3410 for expression in join: 3411 if query is None: 3412 query = expression.second 3413 else: 3414 query &= expression.second 3415 return query 3416 else: 3417 return join
3418 3419 # -------------------------------------------------------------------------
3420 - def get_join(self):
3421 """ Get join for this component """ 3422 3423 return self._join(implicit=True)
3424 3425 # -------------------------------------------------------------------------
3426 - def get_left_join(self):
3427 """ Get a left join for this component """ 3428 3429 return self._join()
3430 3431 # ------------------------------------------------------------------------- 3456 3457 # -------------------------------------------------------------------------
3458 - def component_id(self, master_id, link_id):
3459 """ 3460 Helper method to find the component record ID for 3461 a particular link of a particular master record 3462 3463 @param link: the link (S3Resource) 3464 @param master_id: the ID of the master record 3465 @param link_id: the ID of the link table entry 3466 """ 3467 3468 if self.parent is None or self.linked is None: 3469 return None 3470 3471 join = self.get_join() 3472 ltable = self.table 3473 mtable = self.parent.table 3474 ctable = self.linked.table 3475 query = join & (ltable._id == link_id) 3476 if master_id is not None: 3477 # master ID is redundant, but can be used to check negatives 3478 query &= (mtable._id == master_id) 3479 row = current.db(query).select(ctable._id, limitby=(0, 1)).first() 3480 if row: 3481 return row[ctable._id.name] 3482 else: 3483 return None
3484 3485 # ------------------------------------------------------------------------- 3536 3537 # -------------------------------------------------------------------------
3538 - def datatable_filter(self, fields, get_vars):
3539 """ 3540 Parse datatable search/sort vars into a tuple of 3541 query, orderby and left joins 3542 3543 @param fields: list of field selectors representing 3544 the order of fields in the datatable (list_fields) 3545 @param get_vars: the datatable GET vars 3546 3547 @return: tuple of (query, orderby, left joins) 3548 """ 3549 3550 db = current.db 3551 get_aliased = current.s3db.get_aliased 3552 3553 left_joins = S3Joins(self.tablename) 3554 3555 sSearch = "sSearch" 3556 iColumns = "iColumns" 3557 iSortingCols = "iSortingCols" 3558 3559 parent = self.parent 3560 fkey = self.fkey 3561 3562 # Skip joins for linked tables 3563 if self.linked is not None: 3564 skip = self.linked.tablename 3565 else: 3566 skip = None 3567 3568 # Resolve the list fields 3569 rfields = self.resolve_selectors(fields)[0] 3570 3571 # FILTER -------------------------------------------------------------- 3572 3573 searchq = None 3574 if sSearch in get_vars and iColumns in get_vars: 3575 3576 # Build filter 3577 text = get_vars[sSearch] 3578 words = [w for w in text.lower().split()] 3579 3580 if words: 3581 try: 3582 numcols = int(get_vars[iColumns]) 3583 except ValueError: 3584 numcols = 0 3585 3586 flist = [] 3587 for i in xrange(numcols): 3588 try: 3589 rfield = rfields[i] 3590 field = rfield.field 3591 except (KeyError, IndexError): 3592 continue 3593 if field is None: 3594 # Virtual 3595 if hasattr(rfield, "search_field"): 3596 field = db[rfield.tname][rfield.search_field] 3597 else: 3598 # Cannot search 3599 continue 3600 ftype = str(field.type) 3601 3602 # Add left joins 3603 left_joins.extend(rfield.left) 3604 3605 if ftype[:9] == "reference" and \ 3606 hasattr(field, "sortby") and field.sortby: 3607 # For foreign keys, we search through their sortby 3608 3609 # Get the lookup table 3610 tn = ftype[10:] 3611 if parent is not None and \ 3612 parent.tablename == tn and field.name != fkey: 3613 alias = "%s_%s_%s" % (parent.prefix, 3614 "linked", 3615 parent.name) 3616 ktable = get_aliased(db[tn], alias) 3617 ktable._id = ktable[ktable._id.name] 3618 tn = alias 3619 elif tn == field.tablename: 3620 prefix, name = field.tablename.split("_", 1) 3621 alias = "%s_%s_%s" % (prefix, field.name, name) 3622 ktable = get_aliased(db[tn], alias) 3623 ktable._id = ktable[ktable._id.name] 3624 tn = alias 3625 else: 3626 ktable = db[tn] 3627 3628 # Add left join for lookup table 3629 if tn != skip: 3630 left_joins.add(ktable.on(field == ktable._id)) 3631 3632 if isinstance(field.sortby, (list, tuple)): 3633 flist.extend([ktable[f] for f in field.sortby 3634 if f in ktable.fields]) 3635 else: 3636 if field.sortby in ktable.fields: 3637 flist.append(ktable[field.sortby]) 3638 3639 else: 3640 # Otherwise, we search through the field itself 3641 flist.append(field) 3642 3643 # Build search query 3644 # @todo: migrate this to S3ResourceQuery? 3645 opts = Storage() 3646 queries = [] 3647 for w in words: 3648 3649 wqueries = [] 3650 for field in flist: 3651 ftype = str(field.type) 3652 options = None 3653 fname = str(field) 3654 if fname in opts: 3655 options = opts[fname] 3656 elif ftype[:7] in ("integer", 3657 "list:in", 3658 "list:st", 3659 "referen", 3660 "list:re", 3661 "string"): 3662 requires = field.requires 3663 if not isinstance(requires, (list, tuple)): 3664 requires = [requires] 3665 if requires: 3666 r = requires[0] 3667 if isinstance(r, IS_EMPTY_OR): 3668 r = r.other 3669 if hasattr(r, "options"): 3670 try: 3671 options = r.options() 3672 except: 3673 options = [] 3674 if options is None and ftype in ("string", "text"): 3675 wqueries.append(field.lower().like("%%%s%%" % w)) 3676 elif options is not None: 3677 opts[fname] = options 3678 vlist = [v for v, t in options 3679 if s3_unicode(t).lower().find(s3_unicode(w)) != -1] 3680 if vlist: 3681 wqueries.append(field.belongs(vlist)) 3682 if len(wqueries): 3683 queries.append(reduce(lambda x, y: x | y \ 3684 if x is not None else y, 3685 wqueries)) 3686 if len(queries): 3687 searchq = reduce(lambda x, y: x & y \ 3688 if x is not None else y, queries) 3689 3690 # ORDERBY ------------------------------------------------------------- 3691 3692 orderby = [] 3693 if iSortingCols in get_vars: 3694 3695 # Sorting direction 3696 def direction(i): 3697 sort_dir = get_vars["sSortDir_%s" % str(i)] 3698 return sort_dir and " %s" % sort_dir or ""
3699 3700 # Get the fields to order by 3701 try: 3702 numcols = int(get_vars[iSortingCols]) 3703 except: 3704 numcols = 0 3705 3706 columns = [] 3707 pkey = str(self._id) 3708 for i in xrange(numcols): 3709 try: 3710 iSortCol = int(get_vars["iSortCol_%s" % i]) 3711 except (AttributeError, KeyError): 3712 # iSortCol_x not present in get_vars => ignore 3713 columns.append(Storage(field=None)) 3714 continue 3715 3716 # Map sortable-column index to the real list_fields 3717 # index: for every non-id non-sortable column to the 3718 # left of sortable column subtract 1 3719 for j in xrange(iSortCol): 3720 if get_vars.get("bSortable_%s" % j, "true") == "false": 3721 try: 3722 if rfields[j].colname != pkey: 3723 iSortCol -= 1 3724 except KeyError: 3725 break 3726 3727 try: 3728 rfield = rfields[iSortCol] 3729 except IndexError: 3730 # iSortCol specifies a non-existent column, i.e. 3731 # iSortCol_x>=numcols => ignore 3732 columns.append(Storage(field=None)) 3733 else: 3734 columns.append(rfield) 3735 3736 # Process the orderby-fields 3737 for i in xrange(len(columns)): 3738 rfield = columns[i] 3739 field = rfield.field 3740 if field is None: 3741 continue 3742 ftype = str(field.type) 3743 3744 represent = field.represent 3745 if not hasattr(represent, "skip_dt_orderby") and \ 3746 hasattr(represent, "dt_orderby"): 3747 # Custom orderby logic in field.represent 3748 field.represent.dt_orderby(field, 3749 direction(i), 3750 orderby, 3751 left_joins) 3752 3753 elif ftype[:9] == "reference" and \ 3754 hasattr(field, "sortby") and field.sortby: 3755 # Foreign keys with sortby will be sorted by sortby 3756 3757 # Get the lookup table 3758 tn = ftype[10:] 3759 if parent is not None and \ 3760 parent.tablename == tn and field.name != fkey: 3761 alias = "%s_%s_%s" % (parent.prefix, "linked", parent.name) 3762 ktable = get_aliased(db[tn], alias) 3763 ktable._id = ktable[ktable._id.name] 3764 tn = alias 3765 elif tn == field.tablename: 3766 prefix, name = field.tablename.split("_", 1) 3767 alias = "%s_%s_%s" % (prefix, field.name, name) 3768 ktable = get_aliased(db[tn], alias) 3769 ktable._id = ktable[ktable._id.name] 3770 tn = alias 3771 else: 3772 ktable = db[tn] 3773 3774 # Add left joins for lookup table 3775 if tn != skip: 3776 left_joins.extend(rfield.left) 3777 left_joins.add(ktable.on(field == ktable._id)) 3778 3779 # Construct orderby from sortby 3780 if not isinstance(field.sortby, (list, tuple)): 3781 orderby.append("%s.%s%s" % (tn, field.sortby, direction(i))) 3782 else: 3783 orderby.append(", ".join(["%s.%s%s" % 3784 (tn, fn, direction(i)) 3785 for fn in field.sortby])) 3786 3787 else: 3788 # Otherwise, we sort by the field itself 3789 orderby.append("%s%s" % (field, direction(i))) 3790 3791 if orderby: 3792 orderby = ", ".join(orderby) 3793 else: 3794 orderby = None 3795 3796 left_joins = left_joins.as_list(tablenames=left_joins.joins.keys()) 3797 return (searchq, orderby, left_joins)
3798 3799 # -------------------------------------------------------------------------
3800 - def axisfilter(self, axes):
3801 """ 3802 Get all values for the given S3ResourceFields (axes) which 3803 match the resource query, used in pivot tables to filter out 3804 additional values where dimensions can have multiple values 3805 per record 3806 3807 @param axes: the axis fields as list/tuple of S3ResourceFields 3808 3809 @return: a dict with values per axis, only containes those 3810 axes which are affected by the resource filter 3811 """ 3812 3813 axisfilter = {} 3814 3815 qdict = self.get_query().as_dict(flat=True) 3816 3817 for rfield in axes: 3818 field = rfield.field 3819 3820 if field is None: 3821 # virtual field or unresolvable selector 3822 continue 3823 3824 left_joins = S3Joins(self.tablename) 3825 left_joins.extend(rfield.left) 3826 3827 tablenames = left_joins.joins.keys() 3828 tablenames.append(self.tablename) 3829 af = S3AxisFilter(qdict, tablenames) 3830 3831 if af.op is not None: 3832 query = af.query() 3833 left = left_joins.as_list() 3834 3835 # @todo: this does not work with virtual fields: need 3836 # to retrieve all extra_fields for the dimension table 3837 # and can't groupby (=must deduplicate afterwards) 3838 rows = current.db(query).select(field, 3839 left=left, 3840 groupby=field) 3841 colname = rfield.colname 3842 if rfield.ftype[:5] == "list:": 3843 values = [] 3844 vappend = values.append 3845 for row in rows: 3846 v = row[colname] 3847 vappend(v if v else [None]) 3848 values = set(chain.from_iterable(values)) 3849 3850 include, exclude = af.values(rfield) 3851 fdict = {} 3852 if include: 3853 for v in values: 3854 vstr = s3_unicode(v) if v is not None else v 3855 if vstr in include and vstr not in exclude: 3856 fdict[v] = None 3857 else: 3858 fdict = dict((v, None) for v in values) 3859 3860 axisfilter[colname] = fdict 3861 3862 else: 3863 axisfilter[colname] = dict((row[colname], None) 3864 for row in rows) 3865 3866 return axisfilter
3867 3868 # -------------------------------------------------------------------------
3869 - def prefix_selector(self, selector):
3870 """ 3871 Helper method to ensure consistent prefixing of field selectors 3872 3873 @param selector: the selector 3874 """ 3875 3876 head = selector.split("$", 1)[0] 3877 if "." in head: 3878 prefix = head.split(".", 1)[0] 3879 if prefix == self.alias: 3880 return selector.replace("%s." % prefix, "~.") 3881 else: 3882 return selector 3883 else: 3884 return "~.%s" % selector
3885 3886 # -------------------------------------------------------------------------
3887 - def list_fields(self, key="list_fields", id_column=0):
3888 """ 3889 Get the list_fields for this resource 3890 3891 @param key: alternative key for the table configuration 3892 @param id_column: - False to exclude the record ID 3893 - True to include it if it is configured 3894 - 0 to make it the first column regardless 3895 whether it is configured or not 3896 """ 3897 3898 list_fields = self.get_config(key, None) 3899 3900 if not list_fields and key != "list_fields": 3901 list_fields = self.get_config("list_fields", None) 3902 if not list_fields: 3903 list_fields = [f.name for f in self.readable_fields()] 3904 3905 id_field = pkey = self._id.name 3906 3907 # Do not include the parent key for components 3908 if self.parent and not self.link and \ 3909 not current.response.s3.component_show_key: 3910 fkey = self.fkey 3911 else: 3912 fkey = None 3913 3914 fields = [] 3915 append = fields.append 3916 selectors = set() 3917 seen = selectors.add 3918 for f in list_fields: 3919 selector = f[1] if type(f) is tuple else f 3920 if fkey and selector == fkey: 3921 continue 3922 if selector == pkey and not id_column: 3923 id_field = f 3924 elif selector not in selectors: 3925 seen(selector) 3926 append(f) 3927 3928 if id_column is 0: 3929 fields.insert(0, id_field) 3930 3931 return fields
3932 3933 # -------------------------------------------------------------------------
3934 - def get_defaults(self, master, defaults=None, data=None):
3935 """ 3936 Get implicit defaults for new component records 3937 3938 @param master: the master record 3939 @param defaults: any explicit defaults 3940 @param data: any actual values for the new record 3941 3942 @return: a dict of {fieldname: values} with the defaults 3943 """ 3944 3945 values = {} 3946 3947 parent = self.parent 3948 if not parent: 3949 # Not a component 3950 return values 3951 3952 # Implicit defaults from component filters 3953 hook = current.s3db.get_component(parent.tablename, self.alias) 3954 filterby = hook.get("filterby") 3955 if filterby: 3956 for (k, v) in filterby.items(): 3957 if not isinstance(v, (tuple, list)): 3958 values[k] = v 3959 3960 # Explicit defaults from component hook 3961 if self.defaults: 3962 values.update(self.defaults) 3963 3964 # Explicit defaults from caller 3965 if defaults: 3966 values.update(defaults) 3967 3968 # Actual record values 3969 if data: 3970 values.update(data) 3971 3972 # Check for values to look up from master record 3973 lookup = {} 3974 for (k, v) in list(values.items()): 3975 # Skip nonexistent fields 3976 if k not in self.fields: 3977 del values[k] 3978 continue 3979 # Resolve any field selectors 3980 if isinstance(v, FS): 3981 try: 3982 rfield = v.resolve(parent) 3983 except (AttributeError, SyntaxError): 3984 continue 3985 field = rfield.field 3986 if not field or field.table != parent.table: 3987 continue 3988 if field.name in master: 3989 values[k] = master[field.name] 3990 else: 3991 del values[k] 3992 lookup[field.name] = k 3993 3994 # Do we need to reload the master record to look up values? 3995 if lookup: 3996 row = None 3997 parent_id = parent._id 3998 record_id = master.get(parent_id.name) 3999 if record_id: 4000 fields = [parent.table[f] for f in lookup] 4001 row = current.db(parent_id == record_id).select(limitby = (0, 1), 4002 *fields).first() 4003 if row: 4004 for (k, v) in lookup.items(): 4005 if k in row: 4006 values[v] = row[k] 4007 4008 return values
4009 4010 # ------------------------------------------------------------------------- 4011 @property
4012 - def _table(self):
4013 """ 4014 Get the original Table object (without SQL Alias), this 4015 is required for SQL update (DAL doesn't detect the alias 4016 and uses the wrong tablename). 4017 """ 4018 4019 if self.tablename != self._alias: 4020 return current.s3db[self.tablename] 4021 else: 4022 return self.table
4023
4024 # ============================================================================= 4025 -class S3Components(object):
4026 """ 4027 Lazy component loader 4028 """ 4029
4030 - def __init__(self, master, expose=None):
4031 """ 4032 Constructor 4033 4034 @param master: the master resource (S3Resource) 4035 @param expose: aliases of components to expose, defaults to 4036 all configured components 4037 """ 4038 4039 self.master = master 4040 4041 if expose is None: 4042 hooks = current.s3db.get_hooks(master.tablename)[1] 4043 if hooks: 4044 self.exposed_aliases = set(hooks.keys()) 4045 else: 4046 self.exposed_aliases = set() 4047 else: 4048 self.exposed_aliases = set(expose) 4049 4050 self._components = {} 4051 self._exposed = {} 4052 4053 self.links = {}
4054 4055 # -------------------------------------------------------------------------
4056 - def get(self, alias, default=None):
4057 """ 4058 Access a component resource by its alias; will load the 4059 component if not loaded yet 4060 4061 @param alias: the component alias 4062 @param default: default to return if the alias is not defined 4063 4064 @return: the component resource (S3Resource) 4065 """ 4066 4067 components = self._components 4068 4069 component = components.get(alias) 4070 if not component: 4071 self.__load((alias,)) 4072 return components.get(alias, default) 4073 else: 4074 return component
4075 4076 # -------------------------------------------------------------------------
4077 - def __getitem__(self, alias):
4078 """ 4079 Access a component by its alias in key notation; will load the 4080 component if not loaded yet 4081 4082 @param alias: the component alias 4083 4084 @return: the component resource (S3Resource) 4085 4086 @raises: KeyError if the component is not defined 4087 """ 4088 4089 component = self.get(alias) 4090 if component is None: 4091 raise KeyError 4092 else: 4093 return component
4094 4095 # -------------------------------------------------------------------------
4096 - def __contains__(self, alias):
4097 """ 4098 Check if a component is defined for this resource 4099 4100 @param alias: the alias to check 4101 4102 @return: True|False whether the component is defined 4103 """ 4104 4105 if self.get(alias): 4106 return True 4107 else: 4108 return False
4109 4110 # ------------------------------------------------------------------------- 4111 @property
4112 - def loaded(self):
4113 """ 4114 Get all currently loaded components 4115 4116 @return: dict {alias: resource} with loaded components 4117 """ 4118 return self._components
4119 4120 # ------------------------------------------------------------------------- 4121 @property
4122 - def exposed(self):
4123 """ 4124 Get all exposed components (=> will thus load them all) 4125 4126 @return: dict {alias: resource} with exposed components 4127 """ 4128 4129 loaded = self._components 4130 exposed = self._exposed 4131 4132 missing = set() 4133 for alias in self.exposed_aliases: 4134 if alias not in exposed: 4135 if alias in loaded: 4136 exposed[alias] = loaded[alias] 4137 else: 4138 missing.add(alias) 4139 4140 if missing: 4141 self.__load(missing) 4142 4143 return exposed
4144 4145 # ------------------------------------------------------------------------- 4146 # Methods kept for backwards-compatibility 4147 # - to be deprecated 4148 # - use-cases should explicitly address either .loaded or .exposed 4149 #
4150 - def keys(self):
4151 """ 4152 Get the aliases of all exposed components ([alias]) 4153 """ 4154 return self.exposed.keys()
4155
4156 - def values(self):
4157 """ 4158 Get all exposed components ([resource]) 4159 """ 4160 return self.exposed.values()
4161
4162 - def items(self):
4163 """ 4164 Get all exposed components ([(alias, resource)]) 4165 """ 4166 return self.exposed.items()
4167 4168 # -------------------------------------------------------------------------
4169 - def __load(self, aliases, force=False):
4170 """ 4171 Instantiate component resources 4172 4173 @param aliases: iterable of aliases of components to instantiate 4174 @param force: forced reload of components 4175 4176 @return: dict of loaded components {alias: resource} 4177 """ 4178 4179 s3db = current.s3db 4180 4181 master = self.master 4182 4183 components = self._components 4184 exposed = self._exposed 4185 exposed_aliases = self.exposed_aliases 4186 4187 links = self.links 4188 4189 if aliases: 4190 if force: 4191 # Forced reload 4192 new = aliases 4193 else: 4194 new = [alias for alias in aliases if alias not in components] 4195 else: 4196 new = None 4197 4198 hooks = s3db.get_components(master.table, names=new) 4199 if not hooks: 4200 return 4201 4202 for alias, hook in hooks.items(): 4203 4204 filterby = hook.filterby 4205 if alias is not None and filterby is not None: 4206 table_alias = "%s_%s_%s" % (hook.prefix, 4207 hook.alias, 4208 hook.name, 4209 ) 4210 table = s3db.get_aliased(hook.table, table_alias) 4211 hook.table = table 4212 else: 4213 table_alias = None 4214 table = hook.table 4215 4216 # Instantiate component resource 4217 component = S3Resource(table, 4218 parent = master, 4219 alias = alias, 4220 linktable = hook.linktable, 4221 include_deleted = master.include_deleted, 4222 approved = master._approved, 4223 unapproved = master._unapproved, 4224 ) 4225 4226 if table_alias: 4227 component.tablename = hook.tablename 4228 component._alias = table_alias 4229 4230 # Copy hook properties to the component resource 4231 component.pkey = hook.pkey 4232 component.fkey = hook.fkey 4233 4234 component.linktable = hook.linktable 4235 component.lkey = hook.lkey 4236 component.rkey = hook.rkey 4237 component.actuate = hook.actuate 4238 component.autodelete = hook.autodelete 4239 component.autocomplete = hook.autocomplete 4240 4241 #component.alias = alias 4242 component.multiple = hook.multiple 4243 component.defaults = hook.defaults 4244 4245 # Component filter 4246 if not filterby: 4247 # Can use filterby=False to enforce table aliasing yet 4248 # suppress component filtering, useful e.g. if the same 4249 # table is declared as component more than once for the 4250 # same master table (using different foreign keys) 4251 component.filter = None 4252 4253 else: 4254 # Filter by multiple criteria 4255 query = None 4256 for k, v in filterby.items(): 4257 if isinstance(v, FS): 4258 # Match a field in the master table 4259 # => identify the field 4260 try: 4261 rfield = v.resolve(master) 4262 except (AttributeError, SyntaxError): 4263 if current.response.s3.debug: 4264 raise 4265 else: 4266 current.log.error(sys.exc_info()[1]) 4267 continue 4268 # => must be a real field in the master table 4269 field = rfield.field 4270 if not field or field.table != master.table: 4271 current.log.error("Component filter for %s<=%s: " 4272 "invalid lookup field '%s'" % 4273 (master.tablename, alias, v.name)) 4274 continue 4275 subquery = (table[k] == field) 4276 else: 4277 is_list = isinstance(v, (tuple, list)) 4278 if is_list and len(v) == 1: 4279 filterfor = v[0] 4280 is_list = False 4281 else: 4282 filterfor = v 4283 if not is_list: 4284 subquery = (table[k] == filterfor) 4285 elif filterfor: 4286 subquery = (table[k].belongs(set(filterfor))) 4287 else: 4288 continue 4289 if subquery: 4290 if query is None: 4291 query = subquery 4292 else: 4293 query &= subquery 4294 4295 component.filter = query 4296 4297 # Copy component properties to the link resource 4298 link = component.link 4299 if link is not None: 4300 4301 link.pkey = component.pkey 4302 link.fkey = component.lkey 4303 4304 link.multiple = component.multiple 4305 4306 link.actuate = component.actuate 4307 link.autodelete = component.autodelete 4308 4309 # Register the link table 4310 links[link.name] = links[link.alias] = link 4311 4312 # Register the component 4313 components[alias] = component 4314 4315 if alias in exposed_aliases: 4316 exposed[alias] = component 4317 4318 return components
4319 4320 # -------------------------------------------------------------------------
4321 - def reset(self, aliases=None, expose=DEFAULT):
4322 """ 4323 Detach currently loaded components, e.g. to force a reload 4324 4325 @param aliases: aliases to remove, None for all 4326 @param expose: aliases of components to expose (default: 4327 keep previously exposed aliases), None for 4328 all configured components 4329 """ 4330 4331 if expose is not DEFAULT: 4332 if expose is None: 4333 hooks = current.s3db.get_hooks(self.master.tablename)[1] 4334 if hooks: 4335 self.exposed_aliases = set(hooks.keys()) 4336 else: 4337 self.exposed_aliases = set() 4338 else: 4339 self.exposed_aliases = set(expose) 4340 4341 if aliases: 4342 4343 loaded = self._components 4344 links = self.links 4345 exposed = self._exposed 4346 4347 for alias in aliases: 4348 component = loaded.pop(alias, None) 4349 if component: 4350 link = component.link 4351 for k, v in links.items(): 4352 if v is link: 4353 links.pop(k) 4354 exposed.pop(alias, None) 4355 else: 4356 self._components = {} 4357 self._exposed = {} 4358 4359 self.links.clear()
4360
4361 # ============================================================================= 4362 -class S3AxisFilter(object):
4363 """ 4364 Experimental: helper class to extract filter values for pivot 4365 table axis fields 4366 """ 4367 4368 # -------------------------------------------------------------------------
4369 - def __init__(self, qdict, tablenames):
4370 """ 4371 Constructor, recursively introspect the query dict and extract 4372 all relevant subqueries. 4373 4374 @param qdict: the query dict (from Query.as_dict(flat=True)) 4375 @param tablenames: the names of the relevant tables 4376 """ 4377 4378 self.l = None 4379 self.r = None 4380 self.op = None 4381 4382 self.tablename = None 4383 self.fieldname = None 4384 4385 if not qdict: 4386 return 4387 4388 l = qdict["first"] 4389 if "second" in qdict: 4390 r = qdict["second"] 4391 else: 4392 r = None 4393 4394 op = qdict["op"] 4395 if op: 4396 # Convert operator name to standard uppercase name 4397 # without underscore prefix 4398 op = op.upper().strip("_") 4399 4400 if "tablename" in l: 4401 if l["tablename"] in tablenames: 4402 self.tablename = l["tablename"] 4403 self.fieldname = l["fieldname"] 4404 if isinstance(r, dict): 4405 self.op = None 4406 else: 4407 self.op = op 4408 self.r = r 4409 4410 elif op == "AND": 4411 self.l = S3AxisFilter(l, tablenames) 4412 self.r = S3AxisFilter(r, tablenames) 4413 if self.l.op or self.r.op: 4414 self.op = op 4415 4416 elif op == "OR": 4417 self.l = S3AxisFilter(l, tablenames) 4418 self.r = S3AxisFilter(r, tablenames) 4419 if self.l.op and self.r.op: 4420 self.op = op 4421 4422 elif op == "NOT": 4423 self.l = S3AxisFilter(l, tablenames) 4424 self.op = op 4425 4426 else: 4427 self.l = S3AxisFilter(l, tablenames) 4428 if self.l.op: 4429 self.op = op
4430 4431 # -------------------------------------------------------------------------
4432 - def query(self):
4433 """ Reconstruct the query from this filter """ 4434 4435 op = self.op 4436 if op is None: 4437 return None 4438 4439 if self.tablename and self.fieldname: 4440 l = current.s3db[self.tablename][self.fieldname] 4441 elif self.l: 4442 l = self.l.query() 4443 else: 4444 l = None 4445 4446 r = self.r 4447 if op in ("AND", "OR", "NOT"): 4448 r = r.query() if r else True 4449 4450 if op == "AND": 4451 if l is not None and r is not None: 4452 return l & r 4453 elif r is not None: 4454 return r 4455 else: 4456 return l 4457 elif op == "OR": 4458 if l is not None and r is not None: 4459 return l | r 4460 else: 4461 return None 4462 elif op == "NOT": 4463 if l is not None: 4464 return ~l 4465 else: 4466 return None 4467 elif l is None: 4468 return None 4469 4470 if isinstance(r, S3AxisFilter): 4471 r = r.query() 4472 if r is None: 4473 return None 4474 4475 if op == "LOWER": 4476 return l.lower() 4477 elif op == "UPPER": 4478 return l.upper() 4479 elif op == "EQ": 4480 return l == r 4481 elif op == "NE": 4482 return l != r 4483 elif op == "LT": 4484 return l < r 4485 elif op == "LE": 4486 return l <= r 4487 elif op == "GE": 4488 return l >= r 4489 elif op == "GT": 4490 return l > r 4491 elif op == "BELONGS": 4492 return l.belongs(r) 4493 elif op == "CONTAINS": 4494 return l.contains(r) 4495 else: 4496 return None
4497 4498 # -------------------------------------------------------------------------
4499 - def values(self, rfield):
4500 """ 4501 Helper method to filter list:type axis values 4502 4503 @param rfield: the axis field 4504 4505 @return: pair of value lists [include], [exclude] 4506 """ 4507 4508 op = self.op 4509 tablename = self.tablename 4510 fieldname = self.fieldname 4511 4512 if tablename == rfield.tname and \ 4513 fieldname == rfield.fname: 4514 value = self.r 4515 if isinstance(value, (list, tuple)): 4516 value = [s3_unicode(v) for v in value] 4517 if not value: 4518 value = [None] 4519 else: 4520 value = [s3_unicode(value)] 4521 if op == "CONTAINS": 4522 return value, [] 4523 elif op == "EQ": 4524 return value, [] 4525 elif op == "NE": 4526 return [], value 4527 elif op == "AND": 4528 li, le = self.l.values(rfield) 4529 ri, re = self.r.values(rfield) 4530 return [v for v in li + ri if v not in le + re], [] 4531 elif op == "OR": 4532 li, le = self.l.values(rfield) 4533 ri, re = self.r.values(rfield) 4534 return [v for v in li + ri], [] 4535 if op == "NOT": 4536 li, le = self.l.values(rfield) 4537 return [], li 4538 return [], []
4539
4540 # ============================================================================= 4541 -class S3ResourceFilter(object):
4542 """ Class representing a resource filter """ 4543
4544 - def __init__(self, 4545 resource, 4546 id=None, 4547 uid=None, 4548 filter=None, 4549 vars=None, 4550 extra_filters=None, 4551 filter_component=None):
4552 """ 4553 Constructor 4554 4555 @param resource: the S3Resource 4556 @param id: the record ID (or list of record IDs) 4557 @param uid: the record UID (or list of record UIDs) 4558 @param filter: a filter query (S3ResourceQuery or Query) 4559 @param vars: the dict of GET vars (URL filters) 4560 @param extra_filters: extra filters (to be applied on 4561 pre-filtered subsets), as list of 4562 tuples (method, expression) 4563 @param filter_component: the alias of the component the URL 4564 filters apply for (filters for this 4565 component must be handled separately) 4566 """ 4567 4568 self.resource = resource 4569 4570 self.queries = [] 4571 self.filters = [] 4572 self.cqueries = {} 4573 self.cfilters = {} 4574 4575 # Extra filters 4576 self._extra_filter_methods = None 4577 if extra_filters: 4578 self.set_extra_filters(extra_filters) 4579 else: 4580 self.efilters = [] 4581 4582 self.query = None 4583 self.rfltr = None 4584 self.vfltr = None 4585 4586 self.transformed = None 4587 4588 self.multiple = True 4589 self.distinct = False 4590 4591 # Joins 4592 self.ijoins = {} 4593 self.ljoins = {} 4594 4595 table = resource.table 4596 4597 # Accessible/available query 4598 if resource.accessible_query is not None: 4599 method = [] 4600 if resource._approved: 4601 method.append("read") 4602 if resource._unapproved: 4603 method.append("review") 4604 mquery = resource.accessible_query(method, table) 4605 else: 4606 mquery = (table._id > 0) 4607 4608 # Deletion status 4609 DELETED = current.xml.DELETED 4610 if DELETED in table.fields and not resource.include_deleted: 4611 remaining = (table[DELETED] != True) 4612 mquery = remaining & mquery 4613 4614 # ID query 4615 if id is not None: 4616 if not isinstance(id, (list, tuple)): 4617 self.multiple = False 4618 mquery = mquery & (table._id == id) 4619 else: 4620 mquery = mquery & (table._id.belongs(id)) 4621 4622 # UID query 4623 UID = current.xml.UID 4624 if uid is not None and UID in table: 4625 if not isinstance(uid, (list, tuple)): 4626 self.multiple = False 4627 mquery = mquery & (table[UID] == uid) 4628 else: 4629 mquery = mquery & (table[UID].belongs(uid)) 4630 4631 parent = resource.parent 4632 if not parent: 4633 # Standard master query 4634 self.mquery = mquery 4635 4636 # URL queries 4637 if vars: 4638 resource.vars = Storage(vars) 4639 4640 if not vars.get("track"): 4641 # Apply BBox Filter unless using S3Track to geolocate 4642 bbox, joins = self.parse_bbox_query(resource, vars) 4643 if bbox is not None: 4644 self.queries.append(bbox) 4645 if joins: 4646 self.ljoins.update(joins) 4647 4648 # Filters 4649 add_filter = self.add_filter 4650 4651 # Current concept: 4652 # Interpret all URL filters in the context of master 4653 queries = S3URLQuery.parse(resource, vars) 4654 4655 # @todo: Alternative concept (inconsistent?): 4656 # Interpret all URL filters in the context of filter_component: 4657 #if filter_component: 4658 # context = resource.components.get(filter_component) 4659 # if not context: 4660 # context = resource 4661 #queries = S3URLQuery.parse(context, vars) 4662 4663 for alias in queries: 4664 if filter_component == alias: 4665 for q in queries[alias]: 4666 add_filter(q, component=alias, master=False) 4667 else: 4668 for q in queries[alias]: 4669 add_filter(q) 4670 self.cfilters = queries 4671 else: 4672 # Parent filter 4673 pf = parent.rfilter 4674 if not pf: 4675 pf = parent.build_query() 4676 4677 # Extended master query 4678 self.mquery = mquery & pf.get_query() 4679 4680 # Join the master 4681 self.ijoins[parent._alias] = resource._join(reverse=True) 4682 4683 # Component/link-table specific filters 4684 add_filter = self.add_filter 4685 aliases = [resource.alias] 4686 if resource.link is not None: 4687 aliases.append(resource.link.alias) 4688 elif resource.linked is not None: 4689 aliases.append(resource.linked.alias) 4690 for alias in aliases: 4691 for filter_set in (pf.cqueries, pf.cfilters): 4692 if alias in filter_set: 4693 [add_filter(q) for q in filter_set[alias]] 4694 4695 # Additional filters 4696 if filter is not None: 4697 self.add_filter(filter)
4698 4699 # ------------------------------------------------------------------------- 4700 # Properties 4701 # ------------------------------------------------------------------------- 4702 @property
4703 - def extra_filter_methods(self):
4704 """ 4705 Getter for extra filter methods, lazy property so methods 4706 are only imported/initialized when needed 4707 4708 @todo: document the expected signature of filter methods 4709 4710 @return: dict {name: callable} of known named filter methods 4711 """ 4712 4713 methods = self._extra_filter_methods 4714 if methods is None: 4715 4716 # @todo: implement hooks 4717 methods = {} 4718 4719 self._extra_filter_methods = methods 4720 4721 return methods
4722 4723 # ------------------------------------------------------------------------- 4724 # Manipulation 4725 # -------------------------------------------------------------------------
4726 - def add_filter(self, query, component=None, master=True):
4727 """ 4728 Extend this filter 4729 4730 @param query: a Query or S3ResourceQuery object 4731 @param component: alias of the component the filter shall be 4732 added to (None for master) 4733 @param master: False to filter only component 4734 """ 4735 4736 alias = None 4737 if not master: 4738 if not component: 4739 return 4740 if component != self.resource.alias: 4741 alias = component 4742 4743 if isinstance(query, S3ResourceQuery): 4744 self.transformed = None 4745 filters = self.filters 4746 cfilters = self.cfilters 4747 self.distinct |= query._joins(self.resource)[1] 4748 4749 else: 4750 # DAL Query 4751 filters = self.queries 4752 cfilters = self.cqueries 4753 4754 self.query = None 4755 if alias: 4756 if alias in self.cfilters: 4757 cfilters[alias].append(query) 4758 else: 4759 cfilters[alias] = [query] 4760 else: 4761 filters.append(query) 4762 return
4763 4764 # -------------------------------------------------------------------------
4765 - def add_extra_filter(self, method, expression):
4766 """ 4767 Add an extra filter 4768 4769 @param method: a name of a known filter method, or a 4770 callable filter method 4771 @param expression: the filter expression (string) 4772 """ 4773 4774 efilters = self.efilters 4775 efilters.append((method, expression)) 4776 4777 return efilters
4778 4779 # -------------------------------------------------------------------------
4780 - def set_extra_filters(self, filters):
4781 """ 4782 Replace the current extra filters 4783 4784 @param filters: list of tuples (method, expression), or None 4785 to remove all extra filters 4786 """ 4787 4788 self.efilters = [] 4789 if filters: 4790 add = self.add_extra_filter 4791 for method, expression in filters: 4792 add(method, expression) 4793 4794 return self.efilters
4795 4796 # ------------------------------------------------------------------------- 4797 # Getters 4798 # -------------------------------------------------------------------------
4799 - def get_query(self):
4800 """ Get the effective DAL query """ 4801 4802 if self.query is not None: 4803 return self.query 4804 4805 resource = self.resource 4806 4807 query = reduce(lambda x, y: x & y, self.queries, self.mquery) 4808 if self.filters: 4809 if self.transformed is None: 4810 4811 # Combine all filters 4812 filters = reduce(lambda x, y: x & y, self.filters) 4813 4814 # Transform with external search engine 4815 transformed = filters.transform(resource) 4816 self.transformed = transformed 4817 4818 # Split DAL and virtual filters 4819 self.rfltr, self.vfltr = transformed.split(resource) 4820 4821 # Add to query 4822 rfltr = self.rfltr 4823 if isinstance(rfltr, S3ResourceQuery): 4824 4825 # Resolve query against the resource 4826 rq = rfltr.query(resource) 4827 4828 # False indicates that the subquery shall be ignored 4829 # (e.g. if not supported by platform) 4830 if rq is not False: 4831 query &= rq 4832 4833 elif rfltr is not None: 4834 4835 # Combination of virtual field filter and web2py Query 4836 query &= rfltr 4837 4838 self.query = query 4839 return query
4840 4841 # -------------------------------------------------------------------------
4842 - def get_filter(self):
4843 """ Get the effective virtual filter """ 4844 4845 if self.query is None: 4846 self.get_query() 4847 return self.vfltr
4848 4849 # -------------------------------------------------------------------------
4850 - def get_extra_filters(self):
4851 """ 4852 Get the list of extra filters 4853 4854 @return: list of tuples (method, expression) 4855 """ 4856 4857 return list(self.efilters)
4858 4859 # -------------------------------------------------------------------------
4860 - def get_joins(self, left=False, as_list=True):
4861 """ 4862 Get the joins required for this filter 4863 4864 @param left: get the left joins 4865 @param as_list: return a flat list rather than a nested dict 4866 """ 4867 4868 if self.query is None: 4869 self.get_query() 4870 4871 joins = dict(self.ljoins if left else self.ijoins) 4872 4873 resource = self.resource 4874 for q in self.filters: 4875 subjoins = q._joins(resource, left=left)[0] 4876 joins.update(subjoins) 4877 4878 # Cross-component left joins 4879 parent = resource.parent 4880 if parent: 4881 pf = parent.rfilter 4882 if pf is None: 4883 pf = parent.build_query() 4884 4885 parent_left = pf.get_joins(left=True, as_list=False) 4886 if parent_left: 4887 tablename = resource._alias 4888 if left: 4889 for tn in parent_left: 4890 if tn not in joins and tn != tablename: 4891 joins[tn] = parent_left[tn] 4892 joins[parent._alias] = resource._join(reverse=True) 4893 else: 4894 joins.pop(parent._alias, None) 4895 4896 if as_list: 4897 return [j for tablename in joins for j in joins[tablename]] 4898 else: 4899 return joins
4900 4901 # -------------------------------------------------------------------------
4902 - def get_fields(self):
4903 """ Get all field selectors in this filter """ 4904 4905 if self.query is None: 4906 self.get_query() 4907 4908 if self.vfltr: 4909 return self.vfltr.fields() 4910 else: 4911 return []
4912 4913 # ------------------------------------------------------------------------- 4914 # Filtering 4915 # -------------------------------------------------------------------------
4916 - def __call__(self, rows, start=None, limit=None):
4917 """ 4918 Filter a set of rows by the effective virtual filter 4919 4920 @param rows: a Rows object 4921 @param start: index of the first matching record to select 4922 @param limit: maximum number of records to select 4923 """ 4924 4925 vfltr = self.get_filter() 4926 4927 if rows is None or vfltr is None: 4928 return rows 4929 resource = self.resource 4930 if start is None: 4931 start = 0 4932 first = start 4933 if limit is not None: 4934 last = start + limit 4935 if last < first: 4936 first, last = last, first 4937 if first < 0: 4938 first = 0 4939 if last < 0: 4940 last = 0 4941 else: 4942 last = None 4943 i = 0 4944 result = [] 4945 append = result.append 4946 for row in rows: 4947 if last is not None and i >= last: 4948 break 4949 success = vfltr(resource, row, virtual=True) 4950 if success or success is None: 4951 if i >= first: 4952 append(row) 4953 i += 1 4954 return Rows(rows.db, result, 4955 colnames=rows.colnames, compact=False)
4956 4957 # -------------------------------------------------------------------------
4958 - def apply_extra_filters(self, ids, start=None, limit=None):
4959 """ 4960 Apply all extra filters on a list of record ids 4961 4962 @param ids: the pre-filtered set of record IDs 4963 @param limit: the maximum number of matching IDs to establish, 4964 None to find all matching IDs 4965 4966 @return: a sequence of matching IDs 4967 """ 4968 4969 # Get the resource 4970 resource = self.resource 4971 4972 # Get extra filters 4973 efilters = self.efilters 4974 4975 # Resolve filter methods 4976 methods = self.extra_filter_methods 4977 filters = [] 4978 append = filters.append 4979 for method, expression in efilters: 4980 if callable(method): 4981 append((method, expression)) 4982 else: 4983 method = methods.get(method) 4984 if method: 4985 append((method, expression)) 4986 else: 4987 current.log.warning("Unknown filter method: %s" % method) 4988 if not filters: 4989 # No applicable filters 4990 return ids 4991 4992 # Clear extra filters so that apply_extra_filters is not 4993 # called from inside a filter method (e.g. if the method 4994 # uses resource.select) 4995 self.efilters = [] 4996 4997 # Initialize subset 4998 subset = set() 4999 tail = ids 5000 limit_ = limit 5001 5002 while tail: 5003 5004 if limit: 5005 head, tail = tail[:limit_], tail[limit_:] 5006 else: 5007 head, tail = tail, None 5008 5009 match = head 5010 for method, expression in filters: 5011 # Apply filter 5012 match = method(resource, match, expression) 5013 if not match: 5014 break 5015 5016 if match: 5017 subset |= set(match) 5018 5019 found = len(subset) 5020 5021 if limit: 5022 if found < limit: 5023 # Need more 5024 limit_ = limit - found 5025 else: 5026 # Found all 5027 tail = None 5028 5029 # Restore order 5030 subset = [item for item in ids if item in subset] 5031 5032 # Select start 5033 if start: 5034 subset = subset[start:] 5035 5036 # Restore extra filters 5037 self.efilters = efilters 5038 5039 return subset
5040 5041 # -------------------------------------------------------------------------
5042 - def count(self, left=None, distinct=False):
5043 """ 5044 Get the total number of matching records 5045 5046 @param left: left outer joins 5047 @param distinct: count only distinct rows 5048 """ 5049 5050 distinct |= self.distinct 5051 5052 resource = self.resource 5053 if resource is None: 5054 return 0 5055 5056 table = resource.table 5057 5058 vfltr = self.get_filter() 5059 5060 if vfltr is None and not distinct: 5061 5062 tablename = table._tablename 5063 5064 ijoins = S3Joins(tablename, self.get_joins(left=False)) 5065 ljoins = S3Joins(tablename, self.get_joins(left=True)) 5066 ljoins.add(left) 5067 5068 join = ijoins.as_list(prefer=ljoins) 5069 left = ljoins.as_list() 5070 5071 cnt = table._id.count() 5072 row = current.db(self.query).select(cnt, 5073 join=join, 5074 left=left).first() 5075 if row: 5076 return row[cnt] 5077 else: 5078 return 0 5079 5080 else: 5081 data = resource.select([table._id.name], 5082 # We don't really want to retrieve 5083 # any rows but just count, hence: 5084 limit=1, 5085 count=True) 5086 return data["numrows"]
5087 5088 # ------------------------------------------------------------------------- 5089 # Utility Methods 5090 # -------------------------------------------------------------------------
5091 - def __repr__(self):
5092 """ String representation of the instance """ 5093 5094 resource = self.resource 5095 5096 inner_joins = self.get_joins(left=False) 5097 if inner_joins: 5098 inner = S3Joins(resource.tablename, inner_joins) 5099 ijoins = ", ".join([str(j) for j in inner.as_list()]) 5100 else: 5101 ijoins = None 5102 5103 left_joins = self.get_joins(left=True) 5104 if left_joins: 5105 left = S3Joins(resource.tablename, left_joins) 5106 ljoins = ", ".join([str(j) for j in left.as_list()]) 5107 else: 5108 ljoins = None 5109 5110 vfltr = self.get_filter() 5111 if vfltr: 5112 vfltr = vfltr.represent(resource) 5113 else: 5114 vfltr = None 5115 5116 represent = "<S3ResourceFilter %s, " \ 5117 "query=%s, " \ 5118 "join=[%s], " \ 5119 "left=[%s], " \ 5120 "distinct=%s, " \ 5121 "filter=%s>" % (resource.tablename, 5122 self.get_query(), 5123 ijoins, 5124 ljoins, 5125 self.distinct, 5126 vfltr, 5127 ) 5128 5129 return represent
5130 5131 # ------------------------------------------------------------------------- 5132 @staticmethod
5133 - def parse_bbox_query(resource, get_vars):
5134 """ 5135 Generate a Query from a URL boundary box query; supports multiple 5136 bboxes, but optimised for the usual case of just 1 5137 5138 @param resource: the resource 5139 @param get_vars: the URL GET vars 5140 """ 5141 5142 tablenames = ("gis_location", 5143 "gis_feature_query", 5144 "gis_layer_shapefile", 5145 ) 5146 5147 POLYGON = "POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))" 5148 5149 query = None 5150 joins = {} 5151 5152 if get_vars: 5153 5154 table = resource.table 5155 tablename = resource.tablename 5156 fields = table.fields 5157 5158 introspect = tablename not in tablenames 5159 for k, v in get_vars.items(): 5160 5161 if k[:4] == "bbox": 5162 5163 if type(v) is list: 5164 v = v[-1] 5165 try: 5166 minLon, minLat, maxLon, maxLat = v.split(",") 5167 except ValueError: 5168 # Badly-formed bbox - ignore 5169 continue 5170 5171 # Identify the location reference 5172 field = None 5173 rfield = None 5174 alias = False 5175 5176 if k.find(".") != -1: 5177 5178 # Field specified in query 5179 fname = k.split(".")[1] 5180 if fname not in fields: 5181 # Field not found - ignore 5182 continue 5183 field = table[fname] 5184 if query is not None or "bbox" in get_vars: 5185 # Need alias 5186 alias = True 5187 5188 elif introspect: 5189 5190 # Location context? 5191 context = resource.get_config("context") 5192 if context and "location" in context: 5193 try: 5194 rfield = resource.resolve_selector("(location)$lat") 5195 except (SyntaxError, AttributeError): 5196 rfield = None 5197 else: 5198 if not rfield.field or rfield.tname != "gis_location": 5199 # Invalid location context 5200 rfield = None 5201 5202 # Fall back to location_id (or site_id as last resort) 5203 if rfield is None: 5204 fname = None 5205 for f in fields: 5206 ftype = str(table[f].type) 5207 if ftype[:22] == "reference gis_location": 5208 fname = f 5209 break 5210 elif not fname and \ 5211 ftype[:18] == "reference org_site": 5212 fname = f 5213 field = table[fname] if fname else None 5214 5215 if not rfield and not field: 5216 # No location reference could be identified => skip 5217 continue 5218 5219 # Construct the join to gis_location 5220 gtable = current.s3db.gis_location 5221 if rfield: 5222 joins.update(rfield.left) 5223 5224 elif field: 5225 fname = field.name 5226 gtable = current.s3db.gis_location 5227 if alias: 5228 gtable = gtable.with_alias("gis_%s_location" % fname) 5229 tname = str(gtable) 5230 ftype = str(field.type) 5231 if ftype == "reference gis_location": 5232 joins[tname] = [gtable.on(gtable.id == field)] 5233 elif ftype == "reference org_site": 5234 stable = current.s3db.org_site 5235 if alias: 5236 stable = stable.with_alias("org_%s_site" % fname) 5237 joins[tname] = [stable.on(stable.site_id == field), 5238 gtable.on(gtable.id == stable.location_id)] 5239 elif introspect: 5240 # => not a location or site reference 5241 continue 5242 5243 elif tablename in ("gis_location", "gis_feature_query"): 5244 gtable = table 5245 5246 elif tablename == "gis_layer_shapefile": 5247 # Find the layer_shapefile_%(layer_id)s component 5248 # (added dynamically in gis/layer_shapefile controller) 5249 gtable = None 5250 hooks = current.s3db.get_hooks("gis_layer_shapefile")[1] 5251 for alias in hooks: 5252 if alias[:19] == "gis_layer_shapefile": 5253 component = resource.components.get(alias) 5254 if component: 5255 gtable = component.table 5256 break 5257 # Join by layer_id 5258 if gtable: 5259 joins[str(gtable)] = \ 5260 [gtable.on(gtable.layer_id == table._id)] 5261 else: 5262 continue 5263 5264 # Construct the bbox filter 5265 bbox_filter = None 5266 if current.deployment_settings.get_gis_spatialdb(): 5267 # Use the Spatial Database 5268 minLon = float(minLon) 5269 maxLon = float(maxLon) 5270 minLat = float(minLat) 5271 maxLat = float(maxLat) 5272 bbox = POLYGON % (minLon, minLat, 5273 minLon, maxLat, 5274 maxLon, maxLat, 5275 maxLon, minLat, 5276 minLon, minLat) 5277 try: 5278 # Spatial DAL & Database 5279 bbox_filter = gtable.the_geom \ 5280 .st_intersects(bbox) 5281 except: 5282 # Old DAL or non-spatial database 5283 pass 5284 5285 if bbox_filter is None: 5286 # Standard Query 5287 bbox_filter = (gtable.lon > float(minLon)) & \ 5288 (gtable.lon < float(maxLon)) & \ 5289 (gtable.lat > float(minLat)) & \ 5290 (gtable.lat < float(maxLat)) 5291 5292 # Add bbox filter to query 5293 if query is None: 5294 query = bbox_filter 5295 else: 5296 # Merge with the previous BBOX 5297 query = query & bbox_filter 5298 5299 return query, joins
5300 5301 # -------------------------------------------------------------------------
5302 - def serialize_url(self):
5303 """ 5304 Serialize this filter as URL query 5305 5306 @return: a Storage of URL GET variables 5307 """ 5308 5309 resource = self.resource 5310 url_vars = Storage() 5311 for f in self.filters: 5312 sub = f.serialize_url(resource=resource) 5313 url_vars.update(sub) 5314 return url_vars
5315
5316 # ============================================================================= 5317 -class S3ResourceData(object):
5318 """ Class representing data in a resource """ 5319
5320 - def __init__(self, 5321 resource, 5322 fields, 5323 start=0, 5324 limit=None, 5325 left=None, 5326 orderby=None, 5327 groupby=None, 5328 distinct=False, 5329 virtual=True, 5330 count=False, 5331 getids=False, 5332 as_rows=False, 5333 represent=False, 5334 show_links=True, 5335 raw_data=False):
5336 """ 5337 Constructor, extracts (and represents) data from a resource 5338 5339 @param resource: the resource 5340 @param fields: the fields to extract (selector strings) 5341 @param start: index of the first record 5342 @param limit: maximum number of records 5343 @param left: additional left joins required for custom filters 5344 @param orderby: orderby-expression for DAL 5345 @param groupby: fields to group by (overrides fields!) 5346 @param distinct: select distinct rows 5347 @param virtual: include mandatory virtual fields 5348 @param count: include the total number of matching records 5349 @param getids: include the IDs of all matching records 5350 @param as_rows: return the rows (don't extract/represent) 5351 @param represent: render field value representations 5352 @param raw_data: include raw data in the result 5353 5354 @note: as_rows / groupby prevent automatic splitting of 5355 large multi-table joins, so use with care! 5356 @note: with groupby, only the groupby fields will be returned 5357 (i.e. fields will be ignored), because aggregates are 5358 not supported (yet) 5359 """ 5360 5361 db = current.db 5362 5363 # Suppress instantiation of LazySets in rows where we don't need them 5364 if not as_rows and not groupby: 5365 rname = db._referee_name 5366 db._referee_name = None 5367 else: 5368 rname = None 5369 5370 # The resource 5371 self.resource = resource 5372 self.table = table = resource.table 5373 5374 # Dict to collect accessible queries for differential 5375 # field authorization (each joined table is authorized 5376 # separately) 5377 self.aqueries = aqueries = {} 5378 5379 # Retain the current accessible-context of the parent 5380 # resource in reverse component joins: 5381 parent = resource.parent 5382 if parent and parent.accessible_query is not None: 5383 method = [] 5384 if parent._approved: 5385 method.append("read") 5386 if parent._unapproved: 5387 method.append("review") 5388 aqueries[parent.tablename] = parent.accessible_query(method, 5389 parent.table, 5390 ) 5391 5392 # Joins (inner/left) 5393 tablename = table._tablename 5394 self.ijoins = ijoins = S3Joins(tablename) 5395 self.ljoins = ljoins = S3Joins(tablename) 5396 5397 # The query 5398 master_query = query = resource.get_query() 5399 5400 # Joins from filters 5401 # @note: in components, rfilter is None until after get_query! 5402 rfilter = resource.rfilter 5403 filter_tables = set(ijoins.add(rfilter.get_joins(left=False))) 5404 filter_tables.update(ljoins.add(rfilter.get_joins(left=True))) 5405 5406 # Left joins from caller 5407 master_tables = set(ljoins.add(left)) 5408 filter_tables.update(master_tables) 5409 5410 resolve = resource.resolve_selectors 5411 5412 # Virtual fields and extra fields required by filter 5413 virtual_fields = rfilter.get_fields() 5414 vfields, vijoins, vljoins, d = resolve(virtual_fields, show=False) 5415 extra_tables = set(ijoins.extend(vijoins)) 5416 extra_tables.update(ljoins.extend(vljoins)) 5417 distinct |= d 5418 5419 # Display fields (fields to include in the result) 5420 if fields is None: 5421 fields = [f.name for f in resource.readable_fields()] 5422 dfields, dijoins, dljoins, d = resolve(fields, extra_fields=False) 5423 ijoins.extend(dijoins) 5424 ljoins.extend(dljoins) 5425 distinct |= d 5426 5427 # Primary key 5428 pkey = str(table._id) 5429 5430 # Initialize field data and effort estimates 5431 if not groupby or as_rows: 5432 self.init_field_data(dfields) 5433 else: 5434 self.field_data = self.effort = None 5435 5436 # Resolve ORDERBY 5437 orderby, orderby_aggr, orderby_fields, tables = self.resolve_orderby(orderby) 5438 if tables: 5439 filter_tables.update(tables) 5440 5441 # Joins for filter query 5442 filter_ijoins = ijoins.as_list(tablenames = filter_tables, 5443 aqueries = aqueries, 5444 prefer = ljoins, 5445 ) 5446 filter_ljoins = ljoins.as_list(tablenames = filter_tables, 5447 aqueries = aqueries, 5448 ) 5449 5450 # Virtual fields filter 5451 vfilter = resource.get_filter() 5452 5453 # Extra filters 5454 efilter = rfilter.get_extra_filters() 5455 5456 # Is this a paginated request? 5457 pagination = limit is not None or start 5458 5459 # Subselect? 5460 if ljoins or ijoins or \ 5461 efilter or \ 5462 vfilter and pagination: 5463 subselect = True 5464 else: 5465 subselect = False 5466 5467 # Do we need a filter query? 5468 fq = count_only = False 5469 if not groupby: 5470 end_count = (vfilter or efilter) and not pagination 5471 if count and not end_count: 5472 fq = True 5473 count_only = True 5474 if subselect or \ 5475 getids and pagination or \ 5476 extra_tables and extra_tables != filter_tables: 5477 fq = True 5478 count_only = False 5479 5480 # Shall we use scalability-optimized strategies? 5481 bigtable = current.deployment_settings.get_base_bigtable() 5482 5483 # Filter Query: 5484 # If we need to determine the number and/or ids of all matching 5485 # records, but not to extract all records, then we run a 5486 # separate query here to extract just this information: 5487 ids = page = totalrows = None 5488 if fq: 5489 # Execute the filter query 5490 if bigtable: 5491 limitby = resource.limitby(start=start, limit=limit) 5492 else: 5493 limitby = None 5494 totalrows, ids = self.filter_query(query, 5495 join = filter_ijoins, 5496 left = filter_ljoins, 5497 getids = not count_only, 5498 orderby = orderby_aggr, 5499 limitby = limitby, 5500 ) 5501 5502 # Simplify the master query if possible 5503 empty = False 5504 limitby = None 5505 orderby_on_limitby = True 5506 5507 # If we know all possible record IDs from the filter query, 5508 # then we can simplify the master query so it doesn't need 5509 # complex joins 5510 if ids is not None: 5511 if not ids: 5512 # No records matching the filter query, so we 5513 # can skip the master query too 5514 empty = True 5515 else: 5516 # Which records do we need to extract? 5517 if pagination and (efilter or vfilter): 5518 master_ids = ids 5519 else: 5520 if bigtable: 5521 master_ids = page = ids 5522 else: 5523 limitby = resource.limitby(start=start, limit=limit) 5524 if limitby: 5525 page = ids[limitby[0]:limitby[1]] 5526 else: 5527 page = ids 5528 master_ids = page 5529 5530 # Simplify master query 5531 if page is not None and not page: 5532 # Empty page, skip the master query 5533 empty = True 5534 master_query = None 5535 elif len(master_ids) == 1: 5536 # Single record, don't use belongs (faster) 5537 master_query = table._id == master_ids[0] 5538 else: 5539 master_query = table._id.belongs(set(master_ids)) 5540 5541 orderby = None 5542 if not ljoins or ijoins: 5543 # Without joins, there can only be one row per id, 5544 # so we can limit the master query (faster) 5545 limitby = (0, len(master_ids)) 5546 # Prevent automatic ordering 5547 orderby_on_limitby = False 5548 else: 5549 # With joins, there could be more than one row per id, 5550 # so we can not limit the master query 5551 limitby = None 5552 5553 elif pagination and not (efilter or vfilter or count or getids): 5554 5555 limitby = resource.limitby(start=start, limit=limit) 5556 5557 if not empty: 5558 # If we don't use a simplified master_query, we must include 5559 # all necessary joins for filter and orderby (=filter_tables) in 5560 # the master query 5561 if ids is None and (filter_ijoins or filter_ljoins): 5562 master_tables = filter_tables 5563 5564 # Determine fields in master query 5565 if not groupby: 5566 master_tables.update(extra_tables) 5567 tables, qfields, mfields, groupby = self.master_fields(dfields, 5568 vfields, 5569 master_tables, 5570 as_rows = as_rows, 5571 groupby = groupby, 5572 ) 5573 # Additional tables to join? 5574 if tables: 5575 master_tables.update(tables) 5576 5577 # ORDERBY settings 5578 if groupby: 5579 distinct = False 5580 orderby = orderby_aggr 5581 has_id = pkey in qfields 5582 else: 5583 if distinct and orderby: 5584 # With DISTINCT, ORDERBY-fields must appear in SELECT 5585 # (required by postgresql?) 5586 for orderby_field in orderby_fields: 5587 fn = str(orderby_field) 5588 if fn not in qfields: 5589 qfields[fn] = orderby_field 5590 5591 # Make sure we have the primary key in SELECT 5592 if pkey not in qfields: 5593 qfields[pkey] = resource._id 5594 has_id = True 5595 5596 # Execute master query 5597 db = current.db 5598 5599 master_fields = qfields.keys() 5600 if not groupby and not pagination and \ 5601 has_id and ids and len(master_fields) == 1: 5602 # We already have the ids, and master query doesn't select 5603 # anything else => skip the master query, construct Rows from 5604 # ids instead 5605 master_id = table._id.name 5606 rows = Rows(db, 5607 [Row({master_id: record_id}) for record_id in ids], 5608 colnames = [pkey], 5609 compact = False, 5610 ) 5611 # Add field methods (some do work from bare ids) 5612 try: 5613 fields_lazy = [(f.name, f) for f in table._virtual_methods] 5614 except (AttributeError, TypeError): 5615 # Incompatible PyDAL version 5616 pass 5617 else: 5618 if fields_lazy: 5619 for row in rows: 5620 for f, v in fields_lazy: 5621 try: 5622 row[f] = (v.handler or VirtualCommand)(v.f, row) 5623 except (AttributeError, KeyError): 5624 pass 5625 else: 5626 # Joins for master query 5627 master_ijoins = ijoins.as_list(tablenames = master_tables, 5628 aqueries = aqueries, 5629 prefer = ljoins, 5630 ) 5631 master_ljoins = ljoins.as_list(tablenames = master_tables, 5632 aqueries = aqueries, 5633 ) 5634 5635 # Suspend (mandatory) virtual fields if so requested 5636 if not virtual: 5637 vf = table.virtualfields 5638 osetattr(table, "virtualfields", []) 5639 5640 rows = db(master_query).select(join = master_ijoins, 5641 left = master_ljoins, 5642 distinct = distinct, 5643 groupby = groupby, 5644 orderby = orderby, 5645 limitby = limitby, 5646 orderby_on_limitby = orderby_on_limitby, 5647 cacheable = not as_rows, 5648 *qfields.values()) 5649 5650 # Restore virtual fields 5651 if not virtual: 5652 osetattr(table, "virtualfields", vf) 5653 5654 else: 5655 rows = Rows(current.db) 5656 5657 # Apply any virtual/extra filters, determine the subset 5658 if not len(rows) and not ids: 5659 5660 # Empty set => empty subset (no point to filter/count) 5661 page = [] 5662 ids = [] 5663 totalrows = 0 5664 5665 elif not groupby: 5666 if efilter or vfilter: 5667 5668 # Filter by virtual fields 5669 shortcut = False 5670 if vfilter: 5671 if pagination and not any((getids, count, efilter)): 5672 # Don't need ids or totalrows 5673 rows = rfilter(rows, start=start, limit=limit) 5674 page = self.getids(rows, pkey) 5675 shortcut = True 5676 else: 5677 rows = rfilter(rows) 5678 5679 # Extra filter 5680 if efilter: 5681 if vfilter or not ids: 5682 ids = self.getids(rows, pkey) 5683 if pagination and not (getids or count): 5684 limit_ = start + limit 5685 else: 5686 limit_ = None 5687 ids = rfilter.apply_extra_filters(ids, limit = limit_) 5688 rows = self.getrows(rows, ids, pkey) 5689 5690 if pagination: 5691 # Subset selection with vfilter/efilter 5692 # (=post-filter pagination) 5693 if not shortcut: 5694 if not efilter: 5695 ids = self.getids(rows, pkey) 5696 totalrows = len(ids) 5697 rows, page = self.subset(rows, ids, 5698 start = start, 5699 limit = limit, 5700 has_id = has_id, 5701 ) 5702 else: 5703 # Unlimited select with vfilter/efilter 5704 if not efilter: 5705 ids = self.getids(rows, pkey) 5706 page = ids 5707 totalrows = len(ids) 5708 5709 elif pagination: 5710 5711 if page is None: 5712 if limitby: 5713 # Limited master query without count/getids 5714 # (=rows is the subset, only need page IDs) 5715 page = self.getids(rows, pkey) 5716 else: 5717 # Limited select with unlimited master query 5718 # (=getids/count without filter query, need subset) 5719 if not ids: 5720 ids = self.getids(rows, pkey) 5721 # Build the subset 5722 rows, page = self.subset(rows, ids, 5723 start = start, 5724 limit = limit, 5725 has_id = has_id, 5726 ) 5727 totalrows = len(ids) 5728 5729 elif not ids: 5730 # Unlimited select without vfilter/efilter 5731 page = ids = self.getids(rows, pkey) 5732 totalrows = len(ids) 5733 5734 # Build the result 5735 self.rfields = dfields 5736 self.numrows = 0 if totalrows is None else totalrows 5737 self.ids = ids 5738 5739 if groupby or as_rows: 5740 # Just store the rows, no further queries or extraction 5741 self.rows = rows 5742 5743 elif not rows: 5744 # No rows found => empty list 5745 self.rows = [] 5746 5747 else: 5748 # Extract the data from the master rows 5749 records = self.extract(rows, 5750 pkey, 5751 list(mfields), 5752 join = hasattr(rows[0], tablename), 5753 represent = represent, 5754 ) 5755 5756 # Extract the page record IDs if we don't have them yet 5757 if page is None: 5758 if ids is None: 5759 self.ids = ids = self.getids(rows, pkey) 5760 page = ids 5761 5762 5763 # Execute any joined queries 5764 joined_fields = self.joined_fields(dfields, qfields) 5765 joined_query = table._id.belongs(page) 5766 5767 for jtablename, jfields in joined_fields.items(): 5768 records = self.joined_query(jtablename, 5769 joined_query, 5770 jfields, 5771 records, 5772 represent = represent, 5773 ) 5774 5775 # Re-combine and represent the records 5776 results = {} 5777 5778 field_data = self.field_data 5779 NONE = current.messages["NONE"] 5780 5781 render = self.render 5782 for dfield in dfields: 5783 5784 if represent: 5785 # results = {RecordID: {ColumnName: Representation}} 5786 results = render(dfield, 5787 results, 5788 none = NONE, 5789 raw_data = raw_data, 5790 show_links = show_links, 5791 ) 5792 5793 else: 5794 # results = {RecordID: {ColumnName: Value}} 5795 colname = dfield.colname 5796 5797 fdata = field_data[colname] 5798 frecords = fdata[1] 5799 list_type = fdata[3] 5800 5801 for record_id in records: 5802 if record_id not in results: 5803 result = results[record_id] = Storage() 5804 else: 5805 result = results[record_id] 5806 5807 data = frecords[record_id].keys() 5808 if len(data) == 1 and not list_type: 5809 data = data[0] 5810 result[colname] = data 5811 5812 self.rows = [results[record_id] for record_id in page] 5813 5814 if rname: 5815 # Restore referee name 5816 db._referee_name = rname
5817 5818 # -------------------------------------------------------------------------
5819 - def init_field_data(self, rfields):
5820 """ 5821 Initialize field data and effort estimates for representation 5822 5823 Field data: allow representation per unique value (rather than 5824 record by record), together with bulk-represent this 5825 can reduce the total lookup effort per field to a 5826 single query 5827 5828 Effort estimates: if no bulk-represent is available for a 5829 list:reference, then a lookup per unique value 5830 is only faster if the number of unique values 5831 is significantly lower than the number of 5832 extracted rows (and the number of values per 5833 row), otherwise a per-row lookup is more 5834 efficient. 5835 5836 E.g. 5 rows with 2 values each, 5837 10 unique values in total 5838 => row-by-row lookup more efficient 5839 (5 queries vs 10 queries) 5840 but: 5 rows with 2 values each, 5841 2 unique values in total 5842 => value-by-value lookup is faster 5843 (5 queries vs 2 queries) 5844 5845 However: 15 rows with 15 values each, 5846 20 unique values in total 5847 => value-by-value lookup faster 5848 (15 queries á 15 values vs. 5849 20 queries á 1 value)! 5850 5851 The required effort is estimated 5852 during the data extraction, and then used to 5853 determine the lookup strategy for the 5854 representation. 5855 5856 @param rfields: the fields to extract ([S3ResourceField]) 5857 """ 5858 5859 table = self.resource.table 5860 tablename = table._tablename 5861 pkey = str(table._id) 5862 5863 field_data = {pkey: ({}, {}, False, False, False, False)} 5864 effort = {pkey: 0} 5865 for dfield in rfields: 5866 colname = dfield.colname 5867 effort[colname] = 0 5868 ftype = dfield.ftype[:4] 5869 field_data[colname] = ({}, {}, 5870 dfield.tname != tablename, 5871 ftype == "list", 5872 dfield.virtual, 5873 ftype == "json", 5874 ) 5875 5876 self.field_data = field_data 5877 self.effort = effort 5878 5879 return
5880 5881 # -------------------------------------------------------------------------
5882 - def resolve_orderby(self, orderby):
5883 """ 5884 Resolve the ORDERBY expression. 5885 5886 @param orderby: the orderby expression from the caller 5887 @return: tuple (expr, aggr, fields, tables): 5888 expr: the orderby expression (resolved into Fields) 5889 aggr: the orderby expression with aggregations 5890 fields: the fields in the orderby 5891 tables: the tables required for the orderby 5892 5893 @note: for GROUPBY id (e.g. filter query), all ORDERBY fields 5894 must appear in aggregation functions, otherwise ORDERBY 5895 can be ambiguous => use aggr instead of expr 5896 """ 5897 5898 table = self.resource.table 5899 tablename = table._tablename 5900 pkey = str(table._id) 5901 5902 ljoins = self.ljoins 5903 ijoins = self.ijoins 5904 5905 tables = set() 5906 adapter = S3DAL() 5907 5908 if orderby: 5909 5910 db = current.db 5911 items = self.resolve_expression(orderby) 5912 5913 expr = [] 5914 aggr = [] 5915 fields = [] 5916 5917 for item in items: 5918 5919 expression = None 5920 5921 if type(item) is Expression: 5922 f = item.first 5923 op = item.op 5924 if op == adapter.AGGREGATE: 5925 # Already an aggregation 5926 expression = item 5927 elif isinstance(f, Field) and op == adapter.INVERT: 5928 direction = "desc" 5929 else: 5930 # Other expression - not supported 5931 continue 5932 elif isinstance(item, Field): 5933 direction = "asc" 5934 f = item 5935 elif isinstance(item, str): 5936 fn, direction = (item.strip().split() + ["asc"])[:2] 5937 tn, fn = ([tablename] + fn.split(".", 1))[-2:] 5938 try: 5939 f = db[tn][fn] 5940 except (AttributeError, KeyError): 5941 continue 5942 else: 5943 continue 5944 5945 fname = str(f) 5946 tname = fname.split(".", 1)[0] 5947 5948 if tname != tablename: 5949 if tname in ljoins or tname in ijoins: 5950 tables.add(tname) 5951 else: 5952 # No join found for this field => skip 5953 continue 5954 5955 fields.append(f) 5956 if expression is None: 5957 expression = f if direction == "asc" else ~f 5958 expr.append(expression) 5959 direction = direction.strip().lower()[:3] 5960 if fname != pkey: 5961 expression = f.min() if direction == "asc" else ~(f.max()) 5962 else: 5963 expr.append(expression) 5964 aggr.append(expression) 5965 5966 else: 5967 expr = None 5968 aggr = None 5969 fields = None 5970 5971 return expr, aggr, fields, tables
5972 5973 # -------------------------------------------------------------------------
5974 - def filter_query(self, 5975 query, 5976 join=None, 5977 left=None, 5978 getids=False, 5979 limitby=None, 5980 orderby=None, 5981 ):
5982 """ 5983 Execute a query to determine the number/record IDs of all 5984 matching rows 5985 5986 @param query: the filter query 5987 @param join: the inner joins for the query 5988 @param left: the left joins for the query 5989 @param getids: extract the IDs of matching records 5990 @param limitby: tuple of indices (start, end) to extract only 5991 a limited set of IDs 5992 @param orderby: ORDERBY expression for the query 5993 5994 @return: tuple of (TotalNumberOfRecords, RecordIDs) 5995 """ 5996 5997 db = current.db 5998 5999 table = self.table 6000 6001 # Temporarily deactivate virtual fields 6002 vf = table.virtualfields 6003 osetattr(table, "virtualfields", []) 6004 6005 if getids and limitby: 6006 # Large result sets expected on average (settings.base.bigtable) 6007 # => effort almost independent of result size, much faster 6008 # for large and very large filter results 6009 start = limitby[0] 6010 limit = limitby[1] - start 6011 6012 # Don't penalize the smallest filter results (=effective filtering) 6013 if limit: 6014 maxids = max(limit, 200) 6015 limitby_ = (start, start + maxids) 6016 else: 6017 limitby_ = None 6018 6019 # Extract record IDs 6020 field = table._id 6021 rows = db(query).select(field, 6022 join = join, 6023 left = left, 6024 limitby = limitby_, 6025 orderby = orderby, 6026 groupby = field, 6027 cacheable = True, 6028 ) 6029 pkey = str(field) 6030 results = rows[:limit] if limit else rows 6031 ids = [row[pkey] for row in results] 6032 6033 totalids = len(rows) 6034 if limit and totalids >= maxids or start != 0 and not totalids: 6035 # Count all matching records 6036 cnt = table._id.count(distinct=True) 6037 row = db(query).select(cnt, 6038 join = join, 6039 left = left, 6040 cacheable = True, 6041 ).first() 6042 totalrows = row[cnt] 6043 else: 6044 # We already know how many there are 6045 totalrows = start + totalids 6046 6047 elif getids: 6048 # Extract all matching IDs, then count them in Python 6049 # => effort proportional to result size, slightly faster 6050 # than counting separately for small filter results 6051 field = table._id 6052 rows = db(query).select(field, 6053 join=join, 6054 left=left, 6055 orderby = orderby, 6056 groupby = field, 6057 cacheable = True, 6058 ) 6059 pkey = str(field) 6060 ids = [row[pkey] for row in rows] 6061 totalrows = len(ids) 6062 6063 else: 6064 # Only count, do not extract any IDs (constant effort) 6065 field = table._id.count(distinct=True) 6066 rows = db(query).select(field, 6067 join = join, 6068 left = left, 6069 cacheable = True, 6070 ) 6071 ids = None 6072 totalrows = rows.first()[field] 6073 6074 # Restore the virtual fields 6075 osetattr(table, "virtualfields", vf) 6076 6077 return totalrows, ids
6078 6079 # -------------------------------------------------------------------------
6080 - def master_fields(self, 6081 dfields, 6082 vfields, 6083 joined_tables, 6084 as_rows=False, 6085 groupby=None):
6086 """ 6087 Find all tables and fields to retrieve in the master query 6088 6089 @param dfields: the requested fields (S3ResourceFields) 6090 @param vfields: the virtual filter fields 6091 @param joined_tables: the tables joined in the master query 6092 @param as_rows: whether to produce web2py Rows 6093 @param groupby: the GROUPBY expression from the caller 6094 6095 @return: tuple (tables, fields, extract, groupby): 6096 tables: the tables required to join 6097 fields: the fields to retrieve 6098 extract: the fields to extract from the result 6099 groupby: the GROUPBY expression (resolved into Fields) 6100 """ 6101 6102 db = current.db 6103 tablename = self.resource.table._tablename 6104 6105 # Names of additional tables to join 6106 tables = set() 6107 6108 # Fields to retrieve in the master query, as dict {ColumnName: Field} 6109 fields = {} 6110 6111 # Column names of fields to extract from the master rows 6112 extract = set() 6113 6114 if groupby: 6115 # Resolve the groupby into Fields 6116 items = self.resolve_expression(groupby) 6117 6118 groupby = [] 6119 groupby_append = groupby.append 6120 for item in items: 6121 6122 # Identify the field 6123 tname = None 6124 if isinstance(item, Field): 6125 f = item 6126 elif isinstance(item, str): 6127 fn = item.strip() 6128 tname, fn = ([tablename] + fn.split(".", 1))[-2:] 6129 try: 6130 f = db[tname][fn] 6131 except (AttributeError, KeyError): 6132 continue 6133 else: 6134 continue 6135 groupby_append(f) 6136 6137 # Add to fields 6138 fname = str(f) 6139 if not tname: 6140 tname = f.tablename 6141 fields[fname] = f 6142 6143 # Do we need to join additional tables? 6144 if tname == tablename: 6145 # no join required 6146 continue 6147 else: 6148 # Get joins from dfields 6149 tnames = None 6150 for dfield in dfields: 6151 if dfield.colname == fname: 6152 tnames = self.rfield_tables(dfield) 6153 break 6154 if tnames: 6155 tables |= tnames 6156 else: 6157 # Join at least the table that holds the fields 6158 tables.add(tname) 6159 6160 # Only extract GROUPBY fields (as we don't support aggregates) 6161 extract = set(fields.keys()) 6162 6163 else: 6164 rfields = dfields + vfields 6165 for rfield in rfields: 6166 6167 # Is the field in a joined table? 6168 tname = rfield.tname 6169 joined = tname == tablename or tname in joined_tables 6170 6171 if as_rows or joined: 6172 colname = rfield.colname 6173 if rfield.show: 6174 # If show => add to extract 6175 extract.add(colname) 6176 if rfield.field: 6177 # If real field => add to fields 6178 fields[colname] = rfield.field 6179 if not joined: 6180 # Not joined yet? => add all required tables 6181 tables |= self.rfield_tables(rfield) 6182 6183 return tables, fields, extract, groupby
6184 6185 # -------------------------------------------------------------------------
6186 - def joined_fields(self, all_fields, master_fields):
6187 """ 6188 Determine which fields in joined tables haven't been 6189 retrieved in the master query 6190 6191 @param all_fields: all requested fields (list of S3ResourceFields) 6192 @param master_fields: all fields in the master query, a dict 6193 {ColumnName: Field} 6194 6195 @return: a nested dict {TableName: {ColumnName: Field}}, 6196 additionally required left joins are stored per 6197 table in the inner dict as "_left" 6198 """ 6199 6200 resource = self.resource 6201 table = resource.table 6202 tablename = table._tablename 6203 6204 fields = {} 6205 for rfield in all_fields: 6206 6207 colname = rfield.colname 6208 if colname in master_fields or rfield.tname == tablename: 6209 continue 6210 tname = rfield.tname 6211 6212 if tname not in fields: 6213 sfields = fields[tname] = {} 6214 left = rfield.left 6215 joins = S3Joins(table) 6216 if left: 6217 [joins.add(left[tn]) for tn in left] 6218 sfields["_left"] = joins 6219 else: 6220 sfields = fields[tname] 6221 6222 if colname not in sfields: 6223 sfields[colname] = rfield.field 6224 6225 return fields
6226 6227 # -------------------------------------------------------------------------
6228 - def joined_query(self, tablename, query, fields, records, represent=False):
6229 """ 6230 Extract additional fields from a joined table: if there are 6231 fields in joined tables which haven't been extracted in the 6232 master query, then we perform a separate query for each joined 6233 table (this is faster than building a multi-table-join) 6234 6235 @param tablename: name of the joined table 6236 @param query: the Query 6237 @param fields: the fields to extract 6238 @param records: the output dict to update, structure: 6239 {RecordID: {ColumnName: RawValues}} 6240 @param represent: store extracted data (self.field_data) for 6241 fast representation, and estimate lookup 6242 efforts (self.effort) 6243 6244 @return: the output dict 6245 """ 6246 6247 s3db = current.s3db 6248 6249 ljoins = self.ljoins 6250 table = self.resource.table 6251 pkey = str(table._id) 6252 6253 # Get the extra fields for subtable 6254 sresource = s3db.resource(tablename) 6255 efields, ejoins, l, d = sresource.resolve_selectors([]) 6256 6257 # Get all left joins for subtable 6258 tnames = ljoins.extend(l) + list(fields["_left"].tables) 6259 sjoins = ljoins.as_list(tablenames = tnames, 6260 aqueries = self.aqueries, 6261 ) 6262 if not sjoins: 6263 return records 6264 del fields["_left"] 6265 6266 # Get all fields for subtable query 6267 extract = fields.keys() 6268 for efield in efields: 6269 fields[efield.colname] = efield.field 6270 sfields = [f for f in fields.values() if f] 6271 if not sfields: 6272 sfields.append(sresource._id) 6273 sfields.insert(0, table._id) 6274 6275 # Retrieve the subtable rows 6276 rows = current.db(query).select(left = sjoins, 6277 distinct = True, 6278 cacheable = True, 6279 *sfields) 6280 6281 # Extract and merge the data 6282 records = self.extract(rows, 6283 pkey, 6284 extract, 6285 records = records, 6286 join = True, 6287 represent = represent, 6288 ) 6289 6290 return records
6291 6292 # -------------------------------------------------------------------------
6293 - def extract(self, 6294 rows, 6295 pkey, 6296 columns, 6297 join=True, 6298 records=None, 6299 represent=False):
6300 """ 6301 Extract the data from rows and store them in self.field_data 6302 6303 @param rows: the rows 6304 @param pkey: the primary key 6305 @param columns: the columns to extract 6306 @param join: the rows are the result of a join query 6307 @param records: the records dict to merge the data into 6308 @param represent: collect unique values per field and estimate 6309 representation efforts for list:types 6310 """ 6311 6312 field_data = self.field_data 6313 effort = self.effort 6314 6315 if records is None: 6316 records = {} 6317 6318 def get(key): 6319 t, f = key.split(".", 1) 6320 if join: 6321 return lambda row, t=t, f=f: ogetattr(ogetattr(row, t), f) 6322 else: 6323 return lambda row, f=f: ogetattr(row, f)
6324 6325 getkey = get(pkey) 6326 getval = [get(c) for c in columns] 6327 6328 from itertools import groupby 6329 for k, g in groupby(rows, key=getkey): 6330 group = list(g) 6331 record = records.get(k, {}) 6332 for idx, col in enumerate(columns): 6333 fvalues, frecords, joined, list_type, virtual, json_type = field_data[col] 6334 values = record.get(col, {}) 6335 lazy = False 6336 for row in group: 6337 try: 6338 value = getval[idx](row) 6339 except AttributeError: 6340 current.log.warning("Warning S3Resource.extract: column %s not in row" % col) 6341 value = None 6342 if lazy or callable(value): 6343 # Lazy virtual field 6344 value = value() 6345 lazy = True 6346 if virtual and not list_type and type(value) is list: 6347 # Virtual field that returns a list 6348 list_type = True 6349 if list_type and value is not None: 6350 if represent and value: 6351 effort[col] += 30 + len(value) 6352 for v in value: 6353 if v not in values: 6354 values[v] = None 6355 if represent and v not in fvalues: 6356 fvalues[v] = None 6357 elif json_type: 6358 # Returns unhashable types 6359 value = json.dumps(value) 6360 if value not in values: 6361 values[value] = None 6362 if represent and value not in fvalues: 6363 fvalues[value] = None 6364 else: 6365 if value not in values: 6366 values[value] = None 6367 if represent and value not in fvalues: 6368 fvalues[value] = None 6369 record[col] = values 6370 if k not in frecords: 6371 frecords[k] = record[col] 6372 records[k] = record 6373 6374 return records
6375 6376 # -------------------------------------------------------------------------
6377 - def render(self, 6378 rfield, 6379 results, 6380 none="-", 6381 raw_data=False, 6382 show_links=True):
6383 """ 6384 Render the representations of the values for rfield in 6385 all records in the result 6386 6387 @param rfield: the field (S3ResourceField) 6388 @param results: the output dict to update with the representations, 6389 structure: {RecordID: {ColumnName: Representation}}, 6390 the raw data will be a special item "_row" in the 6391 inner dict holding a Storage of the raw field values 6392 @param none: default representation of None 6393 @param raw_data: retain the raw data in the output dict 6394 @param show_links: allow representation functions to render 6395 links as HTML 6396 """ 6397 6398 colname = rfield.colname 6399 6400 fvalues, frecords, joined, list_type = self.field_data[colname][:4] 6401 6402 # Get the renderer 6403 renderer = rfield.represent 6404 if not callable(renderer): 6405 # @ToDo: Don't convert unformatted numbers to strings 6406 renderer = lambda v: s3_str(v) if v is not None else none 6407 6408 # Deactivate linkto if so requested 6409 if not show_links and hasattr(renderer, "show_link"): 6410 show_link = renderer.show_link 6411 renderer.show_link = False 6412 else: 6413 show_link = None 6414 6415 per_row_lookup = list_type and \ 6416 self.effort[colname] < len(fvalues) * 30 6417 6418 # Render all unique values 6419 if hasattr(renderer, "bulk") and not list_type: 6420 per_row_lookup = False 6421 fvalues = renderer.bulk(fvalues.keys(), list_type=False) 6422 elif not per_row_lookup: 6423 for value in fvalues: 6424 try: 6425 text = renderer(value) 6426 except: 6427 text = s3_str(value) 6428 fvalues[value] = text 6429 6430 # Write representations into result 6431 for record_id in frecords: 6432 6433 if record_id not in results: 6434 results[record_id] = Storage() \ 6435 if not raw_data \ 6436 else Storage(_row=Storage()) 6437 6438 record = frecords[record_id] 6439 result = results[record_id] 6440 6441 # List type with per-row lookup? 6442 if per_row_lookup: 6443 value = record.keys() 6444 if None in value and len(value) > 1: 6445 value = [v for v in value if v is not None] 6446 try: 6447 text = renderer(value) 6448 except: 6449 text = s3_str(value) 6450 result[colname] = text 6451 if raw_data: 6452 result["_row"][colname] = value 6453 6454 # Single value (master record) 6455 elif len(record) == 1 or \ 6456 not joined and not list_type: 6457 value = record.keys()[0] 6458 result[colname] = fvalues[value] \ 6459 if value in fvalues else none 6460 if raw_data: 6461 result["_row"][colname] = value 6462 continue 6463 6464 # Multiple values (joined or list-type) 6465 else: 6466 if hasattr(renderer, "render_list"): 6467 # Prefer S3Represent's render_list (so it can be customized) 6468 data = renderer.render_list(record.keys(), 6469 fvalues, 6470 show_link = show_links, 6471 ) 6472 else: 6473 # Build comma-separated list of values 6474 vlist = [] 6475 for value in record: 6476 if value is None and not list_type: 6477 continue 6478 value = fvalues[value] \ 6479 if value in fvalues else none 6480 vlist.append(value) 6481 6482 if any([hasattr(v, "xml") for v in vlist]): 6483 data = TAG[""]( 6484 list( 6485 chain.from_iterable( 6486 [(v, ", ") for v in vlist]) 6487 )[:-1] 6488 ) 6489 else: 6490 data = ", ".join([s3_str(v) for v in vlist]) 6491 6492 result[colname] = data 6493 if raw_data: 6494 result["_row"][colname] = record.keys() 6495 6496 # Restore linkto 6497 if show_link is not None: 6498 renderer.show_link = show_link 6499 6500 return results
6501 6502 # -------------------------------------------------------------------------
6503 - def __getitem__(self, key):
6504 """ 6505 Helper method to access the results as dict items, for 6506 backwards-compatibility 6507 6508 @param key: the key 6509 6510 @todo: migrate use-cases to .<key> notation, then deprecate 6511 """ 6512 6513 if key in ("rfields", "numrows", "ids", "rows"): 6514 return getattr(self, key) 6515 else: 6516 raise AttributeError
6517 6518 # -------------------------------------------------------------------------
6519 - def getids(self, rows, pkey):
6520 """ 6521 Extract all unique record IDs from rows, preserving the 6522 order by first match 6523 6524 @param rows: the Rows 6525 @param pkey: the primary key 6526 6527 @return: list of unique record IDs 6528 """ 6529 6530 x = set() 6531 seen = x.add 6532 6533 result = [] 6534 append = result.append 6535 for row in rows: 6536 row_id = row[pkey] 6537 if row_id not in x: 6538 seen(row_id) 6539 append(row_id) 6540 return result
6541 6542 # -------------------------------------------------------------------------
6543 - def getrows(self, rows, ids, pkey):
6544 """ 6545 Select a subset of rows by their record IDs 6546 6547 @param rows: the Rows 6548 @param ids: the record IDs 6549 @param pkey: the primary key 6550 6551 @return: the subset (Rows) 6552 """ 6553 6554 if ids: 6555 ids = set(ids) 6556 subset = lambda row: row[pkey] in ids 6557 else: 6558 subset = lambda row: False 6559 return rows.find(subset)
6560 6561 # -------------------------------------------------------------------------
6562 - def subset(self, rows, ids, start=None, limit=None, has_id=True):
6563 """ 6564 Build a subset [start:limit] from rows and ids 6565 6566 @param rows: the Rows 6567 @param ids: all matching record IDs 6568 @param start: start index of the page 6569 @param limit: maximum length of the page 6570 @param has_id: whether the Rows contain the primary key 6571 6572 @return: tuple (rows, page), with: 6573 rows = the Rows in the subset, in order 6574 page = the record IDs in the subset, in order 6575 """ 6576 6577 if limit and start is None: 6578 start = 0 6579 6580 if start is not None and limit is not None: 6581 rows = rows[start:start+limit] 6582 page = ids[start:start+limit] 6583 6584 elif start is not None: 6585 rows = rows[start:] 6586 page = ids[start:] 6587 6588 else: 6589 page = ids 6590 6591 return rows, page
6592 6593 # ------------------------------------------------------------------------- 6594 @staticmethod
6595 - def rfield_tables(rfield):
6596 """ 6597 Get the names of all tables that need to be joined for a field 6598 6599 @param rfield: the field (S3ResourceField) 6600 6601 @return: a set of tablenames 6602 """ 6603 6604 left = rfield.left 6605 if left: 6606 # => add all left joins required for that table 6607 tablenames = set(j.first._tablename 6608 for tn in left for j in left[tn]) 6609 else: 6610 # => we don't know any further left joins, 6611 # but as a minimum we need to add this table 6612 tablenames = set([rfield.tname]) 6613 6614 return tablenames
6615 6616 # ------------------------------------------------------------------------- 6617 @staticmethod
6618 - def resolve_expression(expr):
6619 """ 6620 Resolve an orderby or groupby expression into its items 6621 6622 @param expr: the orderby/groupby expression 6623 """ 6624 6625 if isinstance(expr, str): 6626 items = expr.split(",") 6627 elif not isinstance(expr, (list, tuple)): 6628 items = [expr] 6629 else: 6630 items = expr 6631 return items
6632 6633 # END ========================================================================= 6634