diff --git a/CHANGELOG.md b/CHANGELOG.md index a1141a971..a8614565f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ * Add - implement multiprocessing in populate (#695) PR #704, #969 * Bugfix - Dependencies not properly loaded on populate. (#902) PR #919 * Bugfix - Replace use of numpy aliases of built-in types with built-in type. (#938) PR #939 +* Bugfix - Deletes and drops must include the master of each part. (#151, #374) PR #957 * Bugfix - `ExternalTable.delete` should not remove row on error (#953) PR #956 * Bugfix - Fix error handling of remove_object function in `s3.py` (#952) PR #955 * Bugfix - Fix regression issue with `DISTINCT` clause and `GROUP_BY` (#914) PR #963 @@ -134,7 +135,7 @@ * Fix #628 - incompatibility with pyparsing 2.4.1 ### 0.11.1 -- Nov 15, 2018 -* Fix ordering of attributes in proj (#483 and #516) +* Fix ordering of attributes in proj (#483, #516) * Prohibit direct insert into auto-populated tables (#511) ### 0.11.0 -- Oct 25, 2018 diff --git a/datajoint/autopopulate.py b/datajoint/autopopulate.py index 3aa9e78a8..baf2284ff 100644 --- a/datajoint/autopopulate.py +++ b/datajoint/autopopulate.py @@ -63,7 +63,8 @@ def _rename_attributes(table, props): if props['aliased'] else table.proj()) if self._key_source is None: - parents = self.target.parents(primary=True, as_objects=True, foreign_key_info=True) + parents = self.target.parents( + primary=True, as_objects=True, foreign_key_info=True) if not parents: raise DataJointError('A table must have dependencies ' 'from its primary key for auto-populate to work') @@ -74,17 +75,19 @@ def _rename_attributes(table, props): def make(self, key): """ - Derived classes must implement method `make` that fetches data from tables that are - above them in the dependency hierarchy, restricting by the given key, computes dependent - attributes, and inserts the new tuples into self. + Derived classes must implement method `make` that fetches data from tables + above them in the dependency hierarchy, restricting by the given key, + computes secondary attributes, and inserts the new tuples into self. """ - raise NotImplementedError('Subclasses of AutoPopulate must implement the method `make`') + raise NotImplementedError( + 'Subclasses of AutoPopulate must implement the method `make`') @property def target(self): """ :return: table to be populated. - In the typical case, dj.AutoPopulate is mixed into a dj.Table class by inheritance and the target is self. + In the typical case, dj.AutoPopulate is mixed into a dj.Table class by + inheritance and the target is self. """ return self @@ -111,11 +114,14 @@ def _jobs_to_do(self, restrictions): if not isinstance(todo, QueryExpression): raise DataJointError('Invalid key_source value') - # check if target lacks any attributes from the primary key of key_source + try: + # check if target lacks any attributes from the primary key of key_source raise DataJointError( - 'The populate target lacks attribute %s from the primary key of key_source' % next( - name for name in todo.heading.primary_key if name not in self.target.heading)) + 'The populate target lacks attribute %s ' + 'from the primary key of key_source' % next( + name for name in todo.heading.primary_key + if name not in self.target.heading)) except StopIteration: pass return (todo & AndList(restrictions)).proj() @@ -126,7 +132,8 @@ def populate(self, *restrictions, suppress_errors=False, return_exception_object """ table.populate() calls table.make(key) for every primary key in self.key_source for which there is not already a tuple in table. - :param restrictions: a list of restrictions each restrict (table.key_source - target.proj()) + :param restrictions: a list of restrictions each restrict + (table.key_source - target.proj()) :param suppress_errors: if True, do not terminate execution. :param return_exception_objects: return error objects instead of just error messages :param reserve_jobs: if True, reserve jobs to populate in asynchronous fashion @@ -259,5 +266,6 @@ def progress(self, *restrictions, display=True): print('%-20s' % self.__class__.__name__, 'Completed %d of %d (%2.1f%%) %s' % ( total - remaining, total, 100 - 100 * remaining / (total+1e-12), - datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')), flush=True) + datetime.datetime.strftime(datetime.datetime.now(), + '%Y-%m-%d %H:%M:%S')), flush=True) return remaining, total diff --git a/datajoint/table.py b/datajoint/table.py index 20a07c32f..587de6aef 100644 --- a/datajoint/table.py +++ b/datajoint/table.py @@ -14,7 +14,7 @@ from .condition import make_condition from .expression import QueryExpression from . import blob -from .utils import user_choice +from .utils import user_choice, get_master from .heading import Heading from .errors import (DuplicateError, AccessError, DataJointError, UnknownAttributeError, IntegrityError) @@ -46,10 +46,10 @@ class _RenameMap(tuple): class Table(QueryExpression): """ - Table is an abstract class that represents a base relation, i.e. a table in the schema. + Table is an abstract class that represents a table in the schema. + It implements insert and delete methods and inherits query functionality. To make it a concrete class, override the abstract properties specifying the connection, table name, database, and definition. - A Relation implements insert and delete methods in addition to inherited relational operators. """ _table_name = None # must be defined in subclass @@ -66,7 +66,8 @@ def table_name(self): @property def definition(self): - raise NotImplementedError('Subclasses of Table must implement the `definition` property') + raise NotImplementedError( + 'Subclasses of Table must implement the `definition` property') def declare(self, context=None): """ @@ -96,7 +97,8 @@ def alter(self, prompt=True, context=None): """ if self.connection.in_transaction: raise DataJointError( - 'Cannot update table declaration inside a transaction, e.g. from inside a populate/make call') + 'Cannot update table declaration inside a transaction, ' + 'e.g. from inside a populate/make call') if context is None: frame = inspect.currentframe().f_back context = dict(frame.f_globals, **frame.f_locals) @@ -118,7 +120,8 @@ def alter(self, prompt=True, context=None): # skip if no create privilege pass else: - self.__class__._heading = Heading(table_info=self.heading.table_info) # reset heading + # reset heading + self.__class__._heading = Heading(table_info=self.heading.table_info) if prompt: print('Table altered') self._log('Altered ' + self.full_table_name) @@ -227,9 +230,12 @@ def external(self): def update1(self, row): """ update1 updates one existing entry in the table. - Caution: Updates are not part of the DataJoint data manipulation model. For strict data integrity, - use delete and insert. - :param row: a dict containing the primary key and the attributes to update. + Caution: In DataJoint the primary modes for data manipulation is to ``insert`` and ``delete`` + entire records since referential integrity works on the level of records, not fields. + Therefore, updates are reserved for corrective operations outside of main workflow. + Use UPDATE methods sparingly with full awareness of potential violations of assumptions. + + :param row: a ``dict`` containing the primary key values and the attributes to update. Setting an attribute value to None will reset it to the default value (if any) The primary key attributes must always be provided. Examples: @@ -242,7 +248,8 @@ def update1(self, row): if not set(row).issuperset(self.primary_key): raise DataJointError('The argument of update1 must supply all primary key values.') try: - raise DataJointError('Attribute `%s` not found.' % next(k for k in row if k not in self.heading.names)) + raise DataJointError('Attribute `%s` not found.' % + next(k for k in row if k not in self.heading.names)) except StopIteration: pass # ok if len(self.restriction): @@ -251,7 +258,8 @@ def update1(self, row): if len(self & key) != 1: raise DataJointError('Update entry must exist.') # UPDATE query - row = [self.__make_placeholder(k, v) for k, v in row.items() if k not in self.primary_key] + row = [self.__make_placeholder(k, v) for k, v in row.items() + if k not in self.primary_key] query = "UPDATE {table} SET {assignments} WHERE {where}".format( table=self.full_table_name, assignments=",".join('`%s`=%s' % r[:2] for r in row), @@ -260,22 +268,24 @@ def update1(self, row): def insert1(self, row, **kwargs): """ - Insert one data record or one Mapping (like a dict). - :param row: a numpy record, a dict-like object, or an ordered sequence to be inserted as one row. - For kwargs, see insert() + Insert one data record into the table. For ``kwargs``, see ``insert()``. + + :param row: a numpy record, a dict-like object, or an ordered sequence to be inserted + as one row. """ self.insert((row,), **kwargs) - def insert(self, rows, replace=False, skip_duplicates=False, ignore_extra_fields=False, allow_direct_insert=None): + def insert(self, rows, replace=False, skip_duplicates=False, ignore_extra_fields=False, + allow_direct_insert=None): """ Insert a collection of rows. - :param rows: An iterable where an element is a numpy record, a dict-like object, a pandas.DataFrame, a sequence, - or a query expression with the same heading as table self. + :param rows: An iterable where an element is a numpy record, a dict-like object, a + pandas.DataFrame, a sequence, or a query expression with the same heading as self. :param replace: If True, replaces the existing tuple. :param skip_duplicates: If True, silently skip duplicate inserts. :param ignore_extra_fields: If False, fields that are not in the heading raise error. :param allow_direct_insert: applies only in auto-populated tables. - If False (default), insert are allowed only from inside the make callback. + If False (default), insert are allowed only from inside the make callback. Example:: >>> relation.insert([ >>> dict(subject_id=7, species="mouse", date_of_birth="2014-09-01"), @@ -289,20 +299,22 @@ def insert(self, rows, replace=False, skip_duplicates=False, ignore_extra_fields ).to_records(index=False) # prohibit direct inserts into auto-populated tables - if not allow_direct_insert and not getattr(self, '_allow_insert', True): # allow_insert is only used in AutoPopulate + if not allow_direct_insert and not getattr(self, '_allow_insert', True): raise DataJointError( - 'Inserts into an auto-populated table can only done inside its make method during a populate call.' + 'Inserts into an auto-populated table can only be done inside ' + 'its make method during a populate call.' ' To override, set keyword argument allow_direct_insert=True.') - if inspect.isclass(rows) and issubclass(rows, QueryExpression): # instantiate if a class - rows = rows() + if inspect.isclass(rows) and issubclass(rows, QueryExpression): + rows = rows() # instantiate if a class if isinstance(rows, QueryExpression): # insert from select if not ignore_extra_fields: try: raise DataJointError( - "Attribute %s not found. To ignore extra attributes in insert, set ignore_extra_fields=True." % - next(name for name in rows.heading if name not in self.heading)) + "Attribute %s not found. To ignore extra attributes in insert, " + "set ignore_extra_fields=True." % next( + name for name in rows.heading if name not in self.heading)) except StopIteration: pass fields = list(name for name in rows.heading if name in self.heading) @@ -349,58 +361,65 @@ def delete_quick(self, get_count=False): self._log(query[:255]) return count - def _delete_cascade(self): - """service function to perform cascading deletes recursively.""" - max_attempts = 50 - for _ in range(max_attempts): - try: - delete_count = self.delete_quick(get_count=True) - except IntegrityError as error: - match = foreign_key_error_regexp.match(error.args[0]).groupdict() - if "`.`" not in match['child']: # if schema name missing, use self - match['child'] = '{}.{}'.format(self.full_table_name.split(".")[0], - match['child']) - if match['pk_attrs'] is not None: # fully matched, adjusting the keys - match['fk_attrs'] = [k.strip('`') for k in match['fk_attrs'].split(',')] - match['pk_attrs'] = [k.strip('`') for k in match['pk_attrs'].split(',')] - else: # only partially matched, querying with constraint to determine keys - match['fk_attrs'], match['parent'], match['pk_attrs'] = list(map( - list, zip(*self.connection.query(constraint_info_query, args=( - match['name'].strip('`'), - *[_.strip('`') for _ in match['child'].split('`.`')] - )).fetchall()))) - match['parent'] = match['parent'][0] - # restrict child by self if - # 1. if self's restriction attributes are not in child's primary key - # 2. if child renames any attributes - # otherwise restrict child by self's restriction. - child = FreeTable(self.connection, match['child']) - if set(self.restriction_attributes) <= set(child.primary_key) and \ - match['fk_attrs'] == match['pk_attrs']: - child._restriction = self._restriction - elif match['fk_attrs'] != match['pk_attrs']: - child &= self.proj(**dict(zip(match['fk_attrs'], - match['pk_attrs']))) - else: - child &= self.proj() - child._delete_cascade() - else: - print("Deleting {count} rows from {table}".format( - count=delete_count, table=self.full_table_name)) - break - else: - raise DataJointError('Exceeded maximum number of delete attempts.') - return delete_count - - def delete(self, transaction=True, safemode=None): + def delete(self, transaction=True, safemode=None, force_parts=False): """ Deletes the contents of the table and its dependent tables, recursively. :param transaction: if True, use the entire delete becomes an atomic transaction. + This is the default and recommended behavior. Set to False if this delete is nested + within another transaction. :param safemode: If True, prohibit nested transactions and prompt to confirm. Default is dj.config['safemode']. + :param force_parts: Delete from parts even when not deleting from their masters. :return: number of deleted rows (excluding those from dependent tables) """ + deleted = set() + + def cascade(table): + """service function to perform cascading deletes recursively.""" + max_attempts = 50 + for _ in range(max_attempts): + try: + delete_count = table.delete_quick(get_count=True) + except IntegrityError as error: + match = foreign_key_error_regexp.match(error.args[0]).groupdict() + if "`.`" not in match['child']: # if schema name missing, use table + match['child'] = '{}.{}'.format(table.full_table_name.split(".")[0], + match['child']) + if match['pk_attrs'] is not None: # fully matched, adjusting the keys + match['fk_attrs'] = [k.strip('`') for k in match['fk_attrs'].split(',')] + match['pk_attrs'] = [k.strip('`') for k in match['pk_attrs'].split(',')] + else: # only partially matched, querying with constraint to determine keys + match['fk_attrs'], match['parent'], match['pk_attrs'] = list(map( + list, zip(*table.connection.query(constraint_info_query, args=( + match['name'].strip('`'), + *[_.strip('`') for _ in match['child'].split('`.`')] + )).fetchall()))) + match['parent'] = match['parent'][0] + + # Restrict child by table if + # 1. if table's restriction attributes are not in child's primary key + # 2. if child renames any attributes + # Otherwise restrict child by table's restriction. + child = FreeTable(table.connection, match['child']) + if set(table.restriction_attributes) <= set(child.primary_key) and \ + match['fk_attrs'] == match['pk_attrs']: + child._restriction = table._restriction + elif match['fk_attrs'] != match['pk_attrs']: + child &= table.proj(**dict(zip(match['fk_attrs'], + match['pk_attrs']))) + else: + child &= table.proj() + cascade(child) + else: + deleted.add(table.full_table_name) + print("Deleting {count} rows from {table}".format( + count=delete_count, table=table.full_table_name)) + break + else: + raise DataJointError('Exceeded maximum number of delete attempts.') + return delete_count + safemode = config['safemode'] if safemode is None else safemode # Start transaction @@ -417,12 +436,23 @@ def delete(self, transaction=True, safemode=None): # Cascading delete try: - delete_count = self._delete_cascade() + delete_count = cascade(self) except: if transaction: self.connection.cancel_transaction() raise + if not force_parts: + # Avoid deleting from child before master (See issue #151) + for part in deleted: + master = get_master(part) + if master and master not in deleted: + if transaction: + self.connection.cancel_transaction() + raise DataJointError( + 'Attempt to delete part table {part} before deleting from ' + 'its master {master} first.'.format(part=part, master=master)) + # Confirm and commit if delete_count == 0: if safemode: @@ -461,12 +491,21 @@ def drop(self): User is prompted for confirmation if config['safemode'] is set to True. """ if self.restriction: - raise DataJointError('A relation with an applied restriction condition cannot be dropped.' + raise DataJointError('A table with an applied restriction cannot be dropped.' ' Call drop() on the unrestricted Table.') self.connection.dependencies.load() do_drop = True - tables = [table for table in self.connection.dependencies.descendants(self.full_table_name) - if not table.isdigit()] + tables = [table for table in self.connection.dependencies.descendants( + self.full_table_name) if not table.isdigit()] + + # avoid dropping part tables without their masters: See issue #374 + for part in tables: + master = get_master(part) + if master and master not in tables: + raise DataJointError( + 'Attempt to drop part table {part} before dropping ' + 'its master. Drop {master} first.'.format(part=part, master=master)) + if config['safemode']: for table in tables: print(table, '(%d tuples)' % len(FreeTable(self.connection, table))) @@ -556,12 +595,14 @@ def describe(self, context=None, printout=True): def _update(self, attrname, value=None): """ - This is a deprecated function to be removed in datajoint 0.14. Use .update1 instead. + This is a deprecated function to be removed in datajoint 0.14. + Use ``.update1`` instead. - Updates a field in an existing tuple. This is not a datajoyous operation and should not be used - routinely. Relational database maintain referential integrity on the level of a tuple. Therefore, - the UPDATE operator can violate referential integrity. The datajoyous way to update information is - to delete the entire tuple and insert the entire update tuple. + Updates a field in one existing tuple. self must be restricted to exactly one entry. + In DataJoint the principal way of updating data is to delete and re-insert the + entire record and updates are reserved for corrective actions. + This is because referential integrity is observed on the level of entire + records rather than individual attributes. Safety constraints: 1. self must be restricted to exactly one tuple @@ -665,7 +706,7 @@ def check_fields(fields): if field not in self.heading: raise KeyError(u'`{0:s}` is not in the table heading'.format(field)) elif set(field_list) != set(fields).intersection(self.heading.names): - raise DataJointError('Attempt to insert rows with different fields') + raise DataJointError('Attempt to insert rows with different fields.') if isinstance(row, np.void): # np.array check_fields(row.dtype.fields) diff --git a/datajoint/user_tables.py b/datajoint/user_tables.py index b7bfd9db2..76cea5fbf 100644 --- a/datajoint/user_tables.py +++ b/datajoint/user_tables.py @@ -173,9 +173,10 @@ def delete(self, force=False): unless force is True, prohibits direct deletes from parts. """ if force: - super().delete() + super().delete(force_parts=True) else: - raise DataJointError('Cannot delete from a Part directly. Delete from master instead') + raise DataJointError( + 'Cannot delete from a Part directly. Delete from master instead') def drop(self, force=False): """ diff --git a/datajoint/utils.py b/datajoint/utils.py index 42b18222b..057e7e820 100644 --- a/datajoint/utils.py +++ b/datajoint/utils.py @@ -31,6 +31,25 @@ def user_choice(prompt, choices=("yes", "no"), default=None): return response +def get_master(full_table_name: str) -> str: + """ + If the table name is that of a part table, then return what the master table name would be. + This follows DataJoint's table naming convention where a master and a part must be in the + same schema and the part table is prefixed with the master table name + ``__``. + + Example: + `ephys`.`session` -- master + `ephys`.`session__recording` -- part + + :param full_table_name: Full table name including part. + :type full_table_name: str + :return: Supposed master full table name or empty string if not a part table name. + :rtype: str + """ + match = re.match(r'(?P`\w+`.`\w+)__(?P\w+)`', full_table_name) + return match['master'] + '`' if match else '' + + def to_camel_case(s): """ Convert names with under score (_) separation into camel case names. diff --git a/docs-parts/intro/Releases_lang1.rst b/docs-parts/intro/Releases_lang1.rst index c87234aab..0b0c6c66a 100644 --- a/docs-parts/intro/Releases_lang1.rst +++ b/docs-parts/intro/Releases_lang1.rst @@ -7,6 +7,7 @@ * Add - implement multiprocessing in populate (#695) PR #704, #969 * Bugfix - Dependencies not properly loaded on populate. (#902) PR #919 * Bugfix - Replace use of numpy aliases of built-in types with built-in type. (#938) PR #939 +* Bugfix - Deletes and drops must include the master of each part. (#151, #374) PR #957 * Bugfix - `ExternalTable.delete` should not remove row on error (#953) PR #956 * Bugfix - Fix error handling of remove_object function in `s3.py` (#952) PR #955 * Bugfix - Fix sql code generation to comply with sql mode ``ONLY_FULL_GROUP_BY`` (#916) PR #965 @@ -141,7 +142,7 @@ 0.11.1 -- Nov 15, 2018 ---------------------- -* Fix ordering of attributes in proj (#483 and #516) +* Fix ordering of attributes in proj (#483, #516) * Prohibit direct insert into auto-populated tables (#511) 0.11.0 -- Oct 25, 2018 diff --git a/test_requirements.txt b/test_requirements.txt index 6f13c7c6d..0ed24a620 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,4 +1,4 @@ nose nose-cov coveralls -Faker +faker diff --git a/tests/schema_simple.py b/tests/schema_simple.py index c4ec45e00..9263fa01a 100644 --- a/tests/schema_simple.py +++ b/tests/schema_simple.py @@ -4,6 +4,10 @@ import random import datajoint as dj import itertools +import hashlib +import uuid +import faker + from . import PREFIX, CONN_INFO import numpy as np @@ -151,6 +155,55 @@ class DataB(dj.Lookup): contents = list(zip(range(5), range(5, 10))) +@schema +class Website(dj.Lookup): + definition = """ + url_hash : uuid + --- + url : varchar(1000) + """ + + def insert1_url(self, url): + hashed = hashlib.sha1() + hashed.update(url.encode()) + url_hash = uuid.UUID(bytes=hashed.digest()[:16]) + self.insert1(dict(url=url, url_hash=url_hash), skip_duplicates=True) + return url_hash + + +@schema +class Profile(dj.Manual): + definition = """ + ssn : char(11) + --- + name : varchar(70) + residence : varchar(255) + blood_group : enum('A+', 'A-', 'AB+', 'AB-', 'B+', 'B-', 'O+', 'O-') + username : varchar(120) + birthdate : date + job : varchar(120) + sex : enum('M', 'F') + """ + + class Website(dj.Part): + definition = """ + -> master + -> Website + """ + + def populate_random(self, n=10): + faker.Faker.seed(0) + fake = faker.Faker() + faker.Faker.seed(0) # make tests deterministic + for _ in range(n): + profile = fake.profile() + with self.connection.transaction: + self.insert1(profile, ignore_extra_fields=True) + for url in profile['website']: + self.Website().insert1( + dict(ssn=profile['ssn'], url_hash=Website().insert1_url(url))) + + @schema class TTestUpdate(dj.Lookup): definition = """ diff --git a/tests/test_cascading_delete.py b/tests/test_cascading_delete.py index 6988d432a..6e730590a 100644 --- a/tests/test_cascading_delete.py +++ b/tests/test_cascading_delete.py @@ -1,6 +1,6 @@ -from nose.tools import assert_false, assert_true, assert_equal +from nose.tools import assert_false, assert_true, assert_equal, raises import datajoint as dj -from .schema_simple import A, B, D, E, L +from .schema_simple import A, B, D, E, L, Website, Profile from .schema import ComplexChild, ComplexParent @@ -27,17 +27,19 @@ def test_delete_tree(): @staticmethod def test_stepwise_delete(): - assert_false(dj.config['safemode'], 'safemode must be off for testing') #TODO: just turn it off instead of warning - assert_true(L() and A() and B() and B.C(), 'schema population failed as a precondition to test') + assert not dj.config['safemode'], 'safemode must be off for testing' + assert L() and A() and B() and B.C(), 'schema population failed' B.C().delete(force=True) - assert_false(B.C(), 'failed to delete child tables') + assert not B.C(), 'failed to delete child tables' B().delete() - assert_false(B(), 'failed to delete the parent table following child table deletion') + assert not B(), \ + 'failed to delete from the parent table following child table deletion' @staticmethod def test_delete_tree_restricted(): - assert_false(dj.config['safemode'], 'safemode must be off for testing') - assert_true(L() and A() and B() and B.C() and D() and E() and E.F(), 'schema is not populated') + assert not dj.config['safemode'], 'safemode must be off for testing' + assert L() and A() and B() and B.C() and D() and E() and E.F(), \ + 'schema is not populated' cond = 'cond_in_a' rel = A() & cond rest = dict( @@ -48,19 +50,14 @@ def test_delete_tree_restricted(): E=len(E()-rel), F=len(E.F()-rel)) rel.delete() - assert_false(rel or - (B() & rel) or - (B.C() & rel) or - (D() & rel) or - (E() & rel) or - (E.F() & rel), - 'incomplete delete') - assert_equal(len(A()), rest['A'], 'invalid delete restriction') - assert_equal(len(B()), rest['B'], 'invalid delete restriction') - assert_equal(len(B.C()), rest['C'], 'invalid delete restriction') - assert_equal(len(D()), rest['D'], 'invalid delete restriction') - assert_equal(len(E()), rest['E'], 'invalid delete restriction') - assert_equal(len(E.F()), rest['F'], 'invalid delete restriction') + assert not (rel or B() & rel or B.C() & rel or D() & rel or E() & rel + or (E.F() & rel)), 'incomplete delete' + assert len(A()) == rest['A'], 'invalid delete restriction' + assert len(B()) == rest['B'], 'invalid delete restriction' + assert len(B.C()) == rest['C'], 'invalid delete restriction' + assert len(D()) == rest['D'], 'invalid delete restriction' + assert len(E()) == rest['E'], 'invalid delete restriction' + assert len(E.F()) == rest['F'], 'invalid delete restriction' @staticmethod def test_delete_lookup(): @@ -96,3 +93,18 @@ def test_delete_complex_keys(): (ComplexParent & restriction).delete() assert len(ComplexParent & restriction) == 0, 'Parent record was not deleted' assert len(ComplexChild & restriction) == 0, 'Child record was not deleted' + + def test_delete_master(self): + Profile().populate_random() + Profile().delete() + + @raises(dj.DataJointError) + def test_delete_parts(self): + """test issue #151""" + Profile().populate_random() + Website().delete() + + @raises(dj.DataJointError) + def test_drop_part(self): + """test issue #374""" + Website().drop() diff --git a/tests/test_schema.py b/tests/test_schema.py index 42d4e0c0e..1ff41efdf 100644 --- a/tests/test_schema.py +++ b/tests/test_schema.py @@ -124,7 +124,8 @@ def test_list_tables(): # https://github.com/datajoint/datajoint-python/issues/838 assert(set(['reserved_word', '#l', '#a', '__d', '__b', '__b__c', '__e', '__e__f', '#outfit_launch', '#outfit_launch__outfit_piece', '#i_j', '#j_i', - '#t_test_update', '#data_a', '#data_b', 'f', '#argmax_test' + '#t_test_update', '#data_a', '#data_b', 'f', '#argmax_test', + '#website', 'profile', 'profile__website' ]) == set(schema_simple.list_tables()))