LiveSync: Track undeletions/restores

This commit is contained in:
Adrian Moennich 2021-06-02 17:30:08 +02:00
parent 787afea576
commit 5a9f9b7661
6 changed files with 127 additions and 8 deletions

View File

@ -35,6 +35,7 @@ def connect_signals(plugin):
plugin.connect(signals.event.moved, _moved)
# created
plugin.connect(signals.event.created, _created)
plugin.connect(signals.event.restored, _restored)
plugin.connect(signals.event.contribution_created, _created)
plugin.connect(signals.event.subcontribution_created, _created)
# deleted
@ -69,6 +70,7 @@ def connect_signals(plugin):
plugin.connect(signals.acl.entry_changed, _acl_entry_changed, sender=Contribution)
# notes
plugin.connect(signals.event.notes.note_added, _created)
plugin.connect(signals.event.notes.note_restored, _restored)
plugin.connect(signals.event.notes.note_deleted, _deleted)
plugin.connect(signals.event.notes.note_modified, _updated)
# attachments
@ -108,6 +110,10 @@ def _created(obj, **kwargs):
_register_change(obj, ChangeType.created)
def _restored(obj, **kwargs):
_register_change(obj, ChangeType.undeleted)
def _deleted(obj, **kwargs):
_register_deletion(obj)

View File

@ -0,0 +1,30 @@
"""Add undeleted change type
Revision ID: 330e32d26232
Revises: 02a78555cdcb
Create Date: 2021-06-02 13:07:48.837833
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '330e32d26232'
down_revision = '02a78555cdcb'
branch_labels = None
depends_on = None
def upgrade():
op.execute('''
ALTER TABLE plugin_livesync.queues DROP CONSTRAINT "ck_queues_valid_enum_change";
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT "ck_queues_valid_enum_change" CHECK ((change = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7])));
''')
def downgrade():
op.execute('DELETE FROM plugin_livesync.queues WHERE change = 7')
op.execute('''
ALTER TABLE plugin_livesync.queues DROP CONSTRAINT "ck_queues_valid_enum_change";
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT "ck_queues_valid_enum_change" CHECK ((change = ANY (ARRAY[1, 2, 3, 4, 5, 6])));
''')

View File

@ -25,6 +25,7 @@ class ChangeType(int, IndicoEnum):
data_changed = 4
protection_changed = 5
location_changed = 6
undeleted = 7
class EntryType(int, IndicoEnum):

View File

@ -52,6 +52,7 @@ def process_records(records):
"""
changes = defaultdict(int)
cascaded_create_records = set()
cascaded_undelete_records = set()
cascaded_update_records = set()
cascaded_delete_records = set()
cascaded_location_changes = set()
@ -64,6 +65,9 @@ def process_records(records):
if record.change == ChangeType.created:
assert record.type != EntryType.category
cascaded_create_records.add(record)
elif record.change == ChangeType.undeleted:
assert record.type != EntryType.category
cascaded_undelete_records.add(record)
elif record.change == ChangeType.deleted:
assert record.type != EntryType.category
cascaded_delete_records.add(record)
@ -93,6 +97,14 @@ def process_records(records):
for obj in _process_cascaded_locations(cascaded_location_changes):
changes[obj] |= SimpleChange.updated
for obj in _process_cascaded_event_contents(cascaded_undelete_records, skip_all_deleted=True):
# This may result in a create for an object which is already created - in the (somewhat rare)
# case of a deletion being followed by a restore in the same set of records.
# However, since we expect backends to either convert those operations to an update or skip
# them altogether this shouldn't be a problem
changes[obj] |= SimpleChange.created
changes[obj] &= ~SimpleChange.deleted
created_and_deleted = {obj for obj, flags in changes.items() if (flags & CREATED_DELETED) == CREATED_DELETED}
for obj in created_and_deleted:
# discard any change where the object was both created and deleted
@ -142,7 +154,7 @@ def _process_cascaded_category_contents(records):
yield from _process_cascaded_event_contents(records, additional_events=changed_events)
def _process_cascaded_event_contents(records, additional_events=None, *, include_deleted=False):
def _process_cascaded_event_contents(records, additional_events=None, *, include_deleted=False, skip_all_deleted=False):
"""
Flatten a series of records into its most basic elements (subcontribution level).
@ -152,6 +164,7 @@ def _process_cascaded_event_contents(records, additional_events=None, *, include
:param additional_events: events whose content will be included in addition to those
found in records
:param include_deleted: whether to include soft-deleted objects as well
:param skip_all_deleted: whether to skip soft-deleted objects even if explicitly queued
"""
changed_events = additional_events or set()
changed_sessions = set()
@ -163,12 +176,27 @@ def _process_cascaded_event_contents(records, additional_events=None, *, include
def _deleted_cond(cond):
return True if include_deleted else cond
note_records = {rec.note_id for rec in records if rec.type == EntryType.note}
attachment_records = {rec.attachment_id for rec in records if rec.type == EntryType.attachment}
session_records = {rec.session_id for rec in records if rec.type == EntryType.session}
contribution_records = {rec.contrib_id for rec in records if rec.type == EntryType.contribution}
subcontribution_records = {rec.subcontrib_id for rec in records if rec.type == EntryType.subcontribution}
event_records = {rec.event_id for rec in records if rec.type == EntryType.event}
def _check_deleted(rec):
return not skip_all_deleted or not rec.object.is_deleted
note_records = {
rec.note_id for rec in records if rec.type == EntryType.note and _check_deleted(rec)
}
attachment_records = {
rec.attachment_id for rec in records if rec.type == EntryType.attachment and _check_deleted(rec)
}
session_records = {
rec.session_id for rec in records if rec.type == EntryType.session and _check_deleted(rec)
}
contribution_records = {
rec.contrib_id for rec in records if rec.type == EntryType.contribution and _check_deleted(rec)
}
subcontribution_records = {
rec.subcontrib_id for rec in records if rec.type == EntryType.subcontribution and _check_deleted(rec)
}
event_records = {
rec.event_id for rec in records if rec.type == EntryType.event and _check_deleted(rec)
}
if attachment_records:
changed_attachments.update(Attachment.query.filter(Attachment.id.in_(attachment_records)))

View File

@ -139,3 +139,56 @@ def test_process_records_simplify_created_deleted_child(db, create_event, create
event: SimpleChange.created,
contrib1: SimpleChange.created,
}
def test_process_records_simplify_created_deleted(db, create_event, dummy_agent):
db.session.add(dummy_agent)
event = create_event()
queue = [
LiveSyncQueueEntry(change=ChangeType.created, agent=dummy_agent, type=EntryType.event, event=event),
LiveSyncQueueEntry(change=ChangeType.data_changed, agent=dummy_agent, type=EntryType.event, event=event),
LiveSyncQueueEntry(change=ChangeType.deleted, agent=dummy_agent, type=EntryType.event, event=event),
]
db.session.flush()
result = process_records(queue)
# creation + deletion should cancel each other out
assert result == {}
def test_process_records_simplify_created_deleted_undeleted(db, create_event, dummy_agent):
db.session.add(dummy_agent)
event = create_event()
queue = [
LiveSyncQueueEntry(change=ChangeType.created, agent=dummy_agent, type=EntryType.event, event=event),
LiveSyncQueueEntry(change=ChangeType.data_changed, agent=dummy_agent, type=EntryType.event, event=event),
LiveSyncQueueEntry(change=ChangeType.deleted, agent=dummy_agent, type=EntryType.event, event=event),
LiveSyncQueueEntry(change=ChangeType.undeleted, agent=dummy_agent, type=EntryType.event, event=event),
]
db.session.flush()
result = process_records(queue)
# a restore always results in a creation event, even if the other changes cancelled each other
assert result == {event: SimpleChange.created}
def test_process_records_simplify_deleted_undeleted(db, create_event, dummy_agent):
db.session.add(dummy_agent)
event = create_event()
queue = [
LiveSyncQueueEntry(change=ChangeType.deleted, agent=dummy_agent, type=EntryType.event, event=event),
LiveSyncQueueEntry(change=ChangeType.undeleted, agent=dummy_agent, type=EntryType.event, event=event),
]
db.session.flush()
result = process_records(queue)
# this is not ideal (an empty dict would be better here, as being deleted first and THEN being restored),
# could cancel each other, but there is no good way to do this without losing the more important
# functionality from the test above unless we take the order of changes into account - and with the
# cascading logic this is not really possible without cascading each queue entry separately, but doing
# so would likely result in worse performance.
# see the comment in `process_records` for details
assert result == {event: SimpleChange.created}

View File

@ -56,7 +56,8 @@ def test_run_initial(mocker):
assert not uploader.processed_records.called
def _sorted_process_cascaded_event_contents(records, additional_events=None, *, include_deleted=False):
def _sorted_process_cascaded_event_contents(records, additional_events=None, *, include_deleted=False,
skip_all_deleted=False):
return sorted(_process_cascaded_event_contents(records, additional_events), key=attrgetter('id'))