123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737 |
- # -*- coding: utf-8 -*-
- # Copyright 2014-2016 OpenMarket Ltd
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- from ._base import SQLBaseStore, _RollbackButIsFineException
- from twisted.internet import defer, reactor
- from synapse.events import FrozenEvent, USE_FROZEN_DICTS
- from synapse.events.utils import prune_event
- from synapse.util.async import ObservableDeferred
- from synapse.util.logcontext import (
- preserve_fn, PreserveLoggingContext, preserve_context_over_deferred
- )
- from synapse.util.logutils import log_function
- from synapse.util.metrics import Measure
- from synapse.api.constants import EventTypes
- from synapse.api.errors import SynapseError
- from canonicaljson import encode_canonical_json
- from collections import deque, namedtuple, OrderedDict
- from functools import wraps
- import synapse
- import synapse.metrics
- import logging
- import math
- import ujson as json
- logger = logging.getLogger(__name__)
- metrics = synapse.metrics.get_metrics_for(__name__)
- persist_event_counter = metrics.register_counter("persisted_events")
- def encode_json(json_object):
- if USE_FROZEN_DICTS:
- # ujson doesn't like frozen_dicts
- return encode_canonical_json(json_object)
- else:
- return json.dumps(json_object, ensure_ascii=False)
- # These values are used in the `enqueus_event` and `_do_fetch` methods to
- # control how we batch/bulk fetch events from the database.
- # The values are plucked out of thing air to make initial sync run faster
- # on jki.re
- # TODO: Make these configurable.
- EVENT_QUEUE_THREADS = 3 # Max number of threads that will fetch events
- EVENT_QUEUE_ITERATIONS = 3 # No. times we block waiting for requests for events
- EVENT_QUEUE_TIMEOUT_S = 0.1 # Timeout when waiting for requests for events
- class _EventPeristenceQueue(object):
- """Queues up events so that they can be persisted in bulk with only one
- concurrent transaction per room.
- """
- _EventPersistQueueItem = namedtuple("_EventPersistQueueItem", (
- "events_and_contexts", "current_state", "backfilled", "deferred",
- ))
- def __init__(self):
- self._event_persist_queues = {}
- self._currently_persisting_rooms = set()
- def add_to_queue(self, room_id, events_and_contexts, backfilled, current_state):
- """Add events to the queue, with the given persist_event options.
- """
- queue = self._event_persist_queues.setdefault(room_id, deque())
- if queue:
- end_item = queue[-1]
- if end_item.current_state or current_state:
- # We perist events with current_state set to True one at a time
- pass
- if end_item.backfilled == backfilled:
- end_item.events_and_contexts.extend(events_and_contexts)
- return end_item.deferred.observe()
- deferred = ObservableDeferred(defer.Deferred())
- queue.append(self._EventPersistQueueItem(
- events_and_contexts=events_and_contexts,
- backfilled=backfilled,
- current_state=current_state,
- deferred=deferred,
- ))
- return deferred.observe()
- def handle_queue(self, room_id, per_item_callback):
- """Attempts to handle the queue for a room if not already being handled.
- The given callback will be invoked with for each item in the queue,1
- of type _EventPersistQueueItem. The per_item_callback will continuously
- be called with new items, unless the queue becomnes empty. The return
- value of the function will be given to the deferreds waiting on the item,
- exceptions will be passed to the deferres as well.
- This function should therefore be called whenever anything is added
- to the queue.
- If another callback is currently handling the queue then it will not be
- invoked.
- """
- if room_id in self._currently_persisting_rooms:
- return
- self._currently_persisting_rooms.add(room_id)
- @defer.inlineCallbacks
- def handle_queue_loop():
- try:
- queue = self._get_drainining_queue(room_id)
- for item in queue:
- try:
- ret = yield per_item_callback(item)
- item.deferred.callback(ret)
- except Exception as e:
- item.deferred.errback(e)
- finally:
- queue = self._event_persist_queues.pop(room_id, None)
- if queue:
- self._event_persist_queues[room_id] = queue
- self._currently_persisting_rooms.discard(room_id)
- preserve_fn(handle_queue_loop)()
- def _get_drainining_queue(self, room_id):
- queue = self._event_persist_queues.setdefault(room_id, deque())
- try:
- while True:
- yield queue.popleft()
- except IndexError:
- # Queue has been drained.
- pass
- _EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event"))
- def _retry_on_integrity_error(func):
- """Wraps a database function so that it gets retried on IntegrityError,
- with `delete_existing=True` passed in.
- Args:
- func: function that returns a Deferred and accepts a `delete_existing` arg
- """
- @wraps(func)
- @defer.inlineCallbacks
- def f(self, *args, **kwargs):
- try:
- res = yield func(self, *args, **kwargs)
- except self.database_engine.module.IntegrityError:
- logger.exception("IntegrityError, retrying.")
- res = yield func(self, *args, delete_existing=True, **kwargs)
- defer.returnValue(res)
- return f
- class EventsStore(SQLBaseStore):
- EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
- EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
- def __init__(self, hs):
- super(EventsStore, self).__init__(hs)
- self._clock = hs.get_clock()
- self.register_background_update_handler(
- self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts
- )
- self.register_background_update_handler(
- self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME,
- self._background_reindex_fields_sender,
- )
- self.register_background_index_update(
- "event_contains_url_index",
- index_name="event_contains_url_index",
- table="events",
- columns=["room_id", "topological_ordering", "stream_ordering"],
- where_clause="contains_url = true AND outlier = false",
- )
- self._event_persist_queue = _EventPeristenceQueue()
- def persist_events(self, events_and_contexts, backfilled=False):
- """
- Write events to the database
- Args:
- events_and_contexts: list of tuples of (event, context)
- backfilled: ?
- """
- partitioned = {}
- for event, ctx in events_and_contexts:
- partitioned.setdefault(event.room_id, []).append((event, ctx))
- deferreds = []
- for room_id, evs_ctxs in partitioned.items():
- d = preserve_fn(self._event_persist_queue.add_to_queue)(
- room_id, evs_ctxs,
- backfilled=backfilled,
- current_state=None,
- )
- deferreds.append(d)
- for room_id in partitioned.keys():
- self._maybe_start_persisting(room_id)
- return preserve_context_over_deferred(
- defer.gatherResults(deferreds, consumeErrors=True)
- )
- @defer.inlineCallbacks
- @log_function
- def persist_event(self, event, context, current_state=None, backfilled=False):
- deferred = self._event_persist_queue.add_to_queue(
- event.room_id, [(event, context)],
- backfilled=backfilled,
- current_state=current_state,
- )
- self._maybe_start_persisting(event.room_id)
- yield preserve_context_over_deferred(deferred)
- max_persisted_id = yield self._stream_id_gen.get_current_token()
- defer.returnValue((event.internal_metadata.stream_ordering, max_persisted_id))
- def _maybe_start_persisting(self, room_id):
- @defer.inlineCallbacks
- def persisting_queue(item):
- if item.current_state:
- for event, context in item.events_and_contexts:
- # There should only ever be one item in
- # events_and_contexts when current_state is
- # not None
- yield self._persist_event(
- event, context,
- current_state=item.current_state,
- backfilled=item.backfilled,
- )
- else:
- yield self._persist_events(
- item.events_and_contexts,
- backfilled=item.backfilled,
- )
- self._event_persist_queue.handle_queue(room_id, persisting_queue)
- @_retry_on_integrity_error
- @defer.inlineCallbacks
- def _persist_events(self, events_and_contexts, backfilled=False,
- delete_existing=False):
- if not events_and_contexts:
- return
- if backfilled:
- stream_ordering_manager = self._backfill_id_gen.get_next_mult(
- len(events_and_contexts)
- )
- else:
- stream_ordering_manager = self._stream_id_gen.get_next_mult(
- len(events_and_contexts)
- )
- with stream_ordering_manager as stream_orderings:
- for (event, context), stream, in zip(
- events_and_contexts, stream_orderings
- ):
- event.internal_metadata.stream_ordering = stream
- chunks = [
- events_and_contexts[x:x + 100]
- for x in xrange(0, len(events_and_contexts), 100)
- ]
- for chunk in chunks:
- # We can't easily parallelize these since different chunks
- # might contain the same event. :(
- yield self.runInteraction(
- "persist_events",
- self._persist_events_txn,
- events_and_contexts=chunk,
- backfilled=backfilled,
- delete_existing=delete_existing,
- )
- persist_event_counter.inc_by(len(chunk))
- @_retry_on_integrity_error
- @defer.inlineCallbacks
- @log_function
- def _persist_event(self, event, context, current_state=None, backfilled=False,
- delete_existing=False):
- try:
- with self._stream_id_gen.get_next() as stream_ordering:
- event.internal_metadata.stream_ordering = stream_ordering
- yield self.runInteraction(
- "persist_event",
- self._persist_event_txn,
- event=event,
- context=context,
- current_state=current_state,
- backfilled=backfilled,
- delete_existing=delete_existing,
- )
- persist_event_counter.inc()
- except _RollbackButIsFineException:
- pass
- @defer.inlineCallbacks
- def get_event(self, event_id, check_redacted=True,
- get_prev_content=False, allow_rejected=False,
- allow_none=False):
- """Get an event from the database by event_id.
- Args:
- event_id (str): The event_id of the event to fetch
- check_redacted (bool): If True, check if event has been redacted
- and redact it.
- get_prev_content (bool): If True and event is a state event,
- include the previous states content in the unsigned field.
- allow_rejected (bool): If True return rejected events.
- allow_none (bool): If True, return None if no event found, if
- False throw an exception.
- Returns:
- Deferred : A FrozenEvent.
- """
- events = yield self._get_events(
- [event_id],
- check_redacted=check_redacted,
- get_prev_content=get_prev_content,
- allow_rejected=allow_rejected,
- )
- if not events and not allow_none:
- raise SynapseError(404, "Could not find event %s" % (event_id,))
- defer.returnValue(events[0] if events else None)
- @defer.inlineCallbacks
- def get_events(self, event_ids, check_redacted=True,
- get_prev_content=False, allow_rejected=False):
- """Get events from the database
- Args:
- event_ids (list): The event_ids of the events to fetch
- check_redacted (bool): If True, check if event has been redacted
- and redact it.
- get_prev_content (bool): If True and event is a state event,
- include the previous states content in the unsigned field.
- allow_rejected (bool): If True return rejected events.
- Returns:
- Deferred : Dict from event_id to event.
- """
- events = yield self._get_events(
- event_ids,
- check_redacted=check_redacted,
- get_prev_content=get_prev_content,
- allow_rejected=allow_rejected,
- )
- defer.returnValue({e.event_id: e for e in events})
- @log_function
- def _persist_event_txn(self, txn, event, context, current_state, backfilled=False,
- delete_existing=False):
- # We purposefully do this first since if we include a `current_state`
- # key, we *want* to update the `current_state_events` table
- if current_state:
- txn.call_after(self._get_current_state_for_key.invalidate_all)
- txn.call_after(self.get_rooms_for_user.invalidate_all)
- txn.call_after(self.get_users_in_room.invalidate, (event.room_id,))
- # Add an entry to the current_state_resets table to record the point
- # where we clobbered the current state
- stream_order = event.internal_metadata.stream_ordering
- self._simple_insert_txn(
- txn,
- table="current_state_resets",
- values={"event_stream_ordering": stream_order}
- )
- self._simple_delete_txn(
- txn,
- table="current_state_events",
- keyvalues={"room_id": event.room_id},
- )
- for s in current_state:
- self._simple_insert_txn(
- txn,
- "current_state_events",
- {
- "event_id": s.event_id,
- "room_id": s.room_id,
- "type": s.type,
- "state_key": s.state_key,
- }
- )
- return self._persist_events_txn(
- txn,
- [(event, context)],
- backfilled=backfilled,
- delete_existing=delete_existing,
- )
- @log_function
- def _persist_events_txn(self, txn, events_and_contexts, backfilled,
- delete_existing=False):
- """Insert some number of room events into the necessary database tables.
- Rejected events are only inserted into the events table, the events_json table,
- and the rejections table. Things reading from those table will need to check
- whether the event was rejected.
- If delete_existing is True then existing events will be purged from the
- database before insertion. This is useful when retrying due to IntegrityError.
- """
- # Ensure that we don't have the same event twice.
- # Pick the earliest non-outlier if there is one, else the earliest one.
- new_events_and_contexts = OrderedDict()
- for event, context in events_and_contexts:
- prev_event_context = new_events_and_contexts.get(event.event_id)
- if prev_event_context:
- if not event.internal_metadata.is_outlier():
- if prev_event_context[0].internal_metadata.is_outlier():
- # To ensure correct ordering we pop, as OrderedDict is
- # ordered by first insertion.
- new_events_and_contexts.pop(event.event_id, None)
- new_events_and_contexts[event.event_id] = (event, context)
- else:
- new_events_and_contexts[event.event_id] = (event, context)
- events_and_contexts = new_events_and_contexts.values()
- depth_updates = {}
- for event, context in events_and_contexts:
- # Remove the any existing cache entries for the event_ids
- txn.call_after(self._invalidate_get_event_cache, event.event_id)
- if not backfilled:
- txn.call_after(
- self._events_stream_cache.entity_has_changed,
- event.room_id, event.internal_metadata.stream_ordering,
- )
- if not event.internal_metadata.is_outlier() and not context.rejected:
- depth_updates[event.room_id] = max(
- event.depth, depth_updates.get(event.room_id, event.depth)
- )
- for room_id, depth in depth_updates.items():
- self._update_min_depth_for_room_txn(txn, room_id, depth)
- txn.execute(
- "SELECT event_id, outlier FROM events WHERE event_id in (%s)" % (
- ",".join(["?"] * len(events_and_contexts)),
- ),
- [event.event_id for event, _ in events_and_contexts]
- )
- have_persisted = {
- event_id: outlier
- for event_id, outlier in txn.fetchall()
- }
- to_remove = set()
- for event, context in events_and_contexts:
- if context.rejected:
- # If the event is rejected then we don't care if the event
- # was an outlier or not.
- if event.event_id in have_persisted:
- # If we have already seen the event then ignore it.
- to_remove.add(event)
- continue
- if event.event_id not in have_persisted:
- continue
- to_remove.add(event)
- outlier_persisted = have_persisted[event.event_id]
- if not event.internal_metadata.is_outlier() and outlier_persisted:
- # We received a copy of an event that we had already stored as
- # an outlier in the database. We now have some state at that
- # so we need to update the state_groups table with that state.
- # insert into the state_group, state_groups_state and
- # event_to_state_groups tables.
- try:
- self._store_mult_state_groups_txn(txn, ((event, context),))
- except Exception:
- logger.exception("")
- raise
- metadata_json = encode_json(
- event.internal_metadata.get_dict()
- ).decode("UTF-8")
- sql = (
- "UPDATE event_json SET internal_metadata = ?"
- " WHERE event_id = ?"
- )
- txn.execute(
- sql,
- (metadata_json, event.event_id,)
- )
- # Add an entry to the ex_outlier_stream table to replicate the
- # change in outlier status to our workers.
- stream_order = event.internal_metadata.stream_ordering
- state_group_id = context.state_group
- self._simple_insert_txn(
- txn,
- table="ex_outlier_stream",
- values={
- "event_stream_ordering": stream_order,
- "event_id": event.event_id,
- "state_group": state_group_id,
- }
- )
- sql = (
- "UPDATE events SET outlier = ?"
- " WHERE event_id = ?"
- )
- txn.execute(
- sql,
- (False, event.event_id,)
- )
- # Update the event_backward_extremities table now that this
- # event isn't an outlier any more.
- self._update_extremeties(txn, [event])
- events_and_contexts = [
- ec for ec in events_and_contexts if ec[0] not in to_remove
- ]
- if not events_and_contexts:
- # Make sure we don't pass an empty list to functions that expect to
- # be storing at least one element.
- return
- # From this point onwards the events are only events that we haven't
- # seen before.
- def event_dict(event):
- return {
- k: v
- for k, v in event.get_dict().items()
- if k not in [
- "redacted",
- "redacted_because",
- ]
- }
- if delete_existing:
- # For paranoia reasons, we go and delete all the existing entries
- # for these events so we can reinsert them.
- # This gets around any problems with some tables already having
- # entries.
- logger.info("Deleting existing")
- for table in (
- "events",
- "event_auth",
- "event_json",
- "event_content_hashes",
- "event_destinations",
- "event_edge_hashes",
- "event_edges",
- "event_forward_extremities",
- "event_push_actions",
- "event_reference_hashes",
- "event_search",
- "event_signatures",
- "event_to_state_groups",
- "guest_access",
- "history_visibility",
- "local_invites",
- "room_names",
- "state_events",
- "rejections",
- "redactions",
- "room_memberships",
- "topics"
- ):
- txn.executemany(
- "DELETE FROM %s WHERE event_id = ?" % (table,),
- [(ev.event_id,) for ev, _ in events_and_contexts]
- )
- self._simple_insert_many_txn(
- txn,
- table="event_json",
- values=[
- {
- "event_id": event.event_id,
- "room_id": event.room_id,
- "internal_metadata": encode_json(
- event.internal_metadata.get_dict()
- ).decode("UTF-8"),
- "json": encode_json(event_dict(event)).decode("UTF-8"),
- }
- for event, _ in events_and_contexts
- ],
- )
- self._simple_insert_many_txn(
- txn,
- table="events",
- values=[
- {
- "stream_ordering": event.internal_metadata.stream_ordering,
- "topological_ordering": event.depth,
- "depth": event.depth,
- "event_id": event.event_id,
- "room_id": event.room_id,
- "type": event.type,
- "processed": True,
- "outlier": event.internal_metadata.is_outlier(),
- "content": encode_json(event.content).decode("UTF-8"),
- "origin_server_ts": int(event.origin_server_ts),
- "received_ts": self._clock.time_msec(),
- "sender": event.sender,
- "contains_url": (
- "url" in event.content
- and isinstance(event.content["url"], basestring)
- ),
- }
- for event, _ in events_and_contexts
- ],
- )
- # Remove the rejected events from the list now that we've added them
- # to the events table and the events_json table.
- to_remove = set()
- for event, context in events_and_contexts:
- if context.rejected:
- # Insert the event_id into the rejections table
- self._store_rejections_txn(
- txn, event.event_id, context.rejected
- )
- to_remove.add(event)
- events_and_contexts = [
- ec for ec in events_and_contexts if ec[0] not in to_remove
- ]
- if not events_and_contexts:
- # Make sure we don't pass an empty list to functions that expect to
- # be storing at least one element.
- return
- # From this point onwards the events are only ones that weren't rejected.
- for event, context in events_and_contexts:
- # Insert all the push actions into the event_push_actions table.
- if context.push_actions:
- self._set_push_actions_for_event_and_users_txn(
- txn, event, context.push_actions
- )
- if event.type == EventTypes.Redaction and event.redacts is not None:
- # Remove the entries in the event_push_actions table for the
- # redacted event.
- self._remove_push_actions_for_event_id_txn(
- txn, event.room_id, event.redacts
- )
- self._simple_insert_many_txn(
- txn,
- table="event_auth",
- values=[
- {
- "event_id": event.event_id,
- "room_id": event.room_id,
- "auth_id": auth_id,
- }
- for event, _ in events_and_contexts
- for auth_id, _ in event.auth_events
- ],
- )
- # Insert into the state_groups, state_groups_state, and
- # event_to_state_groups tables.
- self._store_mult_state_groups_txn(txn, events_and_contexts)
- # Update the event_forward_extremities, event_backward_extremities and
- # event_edges tables.
- self._handle_mult_prev_events(
- txn,
- events=[event for event, _ in events_and_contexts],
- )
- for event, _ in events_and_contexts:
- if event.type == EventTypes.Name:
- # Insert into the room_names and event_search tables.
- self._store_room_name_txn(txn, event)
- elif event.type == EventTypes.Topic:
- # Insert into the topics table and event_search table.
- self._store_room_topic_txn(txn, event)
- elif event.type == EventTypes.Message:
- # Insert into the event_search table.
- self._store_room_message_txn(txn, event)
- elif event.type == EventTypes.Redaction:
- # Insert into the redactions table.
- self._store_redaction(txn, event)
- elif event.type == EventTypes.RoomHistoryVisibility:
- # Insert into the event_search table.
- self._store_history_visibility_txn(txn, event)
- elif event.type == EventTypes.GuestAccess:
- # Insert into the event_search table.
- self._store_guest_access_txn(txn, event)
- # Insert into the room_memberships table.
- self._store_room_members_txn(
- txn,
- [
- event
- for event, _ in events_and_contexts
- if event.type == EventTypes.Member
- ],
- backfilled=backfilled,
- )
- # Insert event_reference_hashes table.
- self._store_event_reference_hashes_txn(
- txn, [event for event, _ in events_and_contexts]
- )
- state_events_and_contexts = [
- ec for ec in events_and_contexts if ec[0].is_state()
- ]
- state_values = []
- for event, context in state_events_and_contexts:
- vals = {
- "event_id": event.event_id,
- "room_id": event.room_id,
- "type": event.type,
- "state_key": event.state_key,
- }
- # TODO: How does this work with backfilling?
- if hasattr(event, "replaces_state"):
- vals["prev_state"] = event.replaces_state
- state_values.append(vals)
- self._simple_insert_many_txn(
- txn,
- table="state_events",
- values=state_values,
- )
- self._simple_insert_many_txn(
- txn,
- table="event_edges",
- values=[
- {
- "event_id": event.event_id,
- "prev_event_id": prev_id,
- "room_id": event.room_id,
- "is_state": True,
- }
- for event, _ in state_events_and_contexts
- for prev_id, _ in event.prev_state
- ],
- )
- # Prefill the event cache
- self._add_to_cache(txn, events_and_contexts)
- if backfilled:
- # Backfilled events come before the current state so we don't need
- # to update the current state table
- return
- for event, _ in state_events_and_contexts:
- if event.internal_metadata.is_outlier():
- # Outlier events shouldn't clobber the current state.
- continue
- txn.call_after(
- self._get_current_state_for_key.invalidate,
- (event.room_id, event.type, event.state_key,)
- )
- self._simple_upsert_txn(
- txn,
- "current_state_events",
- keyvalues={
- "room_id": event.room_id,
- "type": event.type,
- "state_key": event.state_key,
- },
- values={
- "event_id": event.event_id,
- }
- )
- return
- def _add_to_cache(self, txn, events_and_contexts):
- to_prefill = []
- rows = []
- N = 200
- for i in range(0, len(events_and_contexts), N):
- ev_map = {
- e[0].event_id: e[0]
- for e in events_and_contexts[i:i + N]
- }
- if not ev_map:
- break
- sql = (
- "SELECT "
- " e.event_id as event_id, "
- " r.redacts as redacts,"
- " rej.event_id as rejects "
- " FROM events as e"
- " LEFT JOIN rejections as rej USING (event_id)"
- " LEFT JOIN redactions as r ON e.event_id = r.redacts"
- " WHERE e.event_id IN (%s)"
- ) % (",".join(["?"] * len(ev_map)),)
- txn.execute(sql, ev_map.keys())
- rows = self.cursor_to_dict(txn)
- for row in rows:
- event = ev_map[row["event_id"]]
- if not row["rejects"] and not row["redacts"]:
- to_prefill.append(_EventCacheEntry(
- event=event,
- redacted_event=None,
- ))
- def prefill():
- for cache_entry in to_prefill:
- self._get_event_cache.prefill((cache_entry[0].event_id,), cache_entry)
- txn.call_after(prefill)
- def _store_redaction(self, txn, event):
- # invalidate the cache for the redacted event
- txn.call_after(self._invalidate_get_event_cache, event.redacts)
- txn.execute(
- "INSERT INTO redactions (event_id, redacts) VALUES (?,?)",
- (event.event_id, event.redacts)
- )
- @defer.inlineCallbacks
- def have_events_in_timeline(self, event_ids):
- """Given a list of event ids, check if we have already processed and
- stored them as non outliers.
- """
- rows = yield self._simple_select_many_batch(
- table="events",
- retcols=("event_id",),
- column="event_id",
- iterable=list(event_ids),
- keyvalues={"outlier": False},
- desc="have_events_in_timeline",
- )
- defer.returnValue(set(r["event_id"] for r in rows))
- def have_events(self, event_ids):
- """Given a list of event ids, check if we have already processed them.
- Returns:
- dict: Has an entry for each event id we already have seen. Maps to
- the rejected reason string if we rejected the event, else maps to
- None.
- """
- if not event_ids:
- return defer.succeed({})
- def f(txn):
- sql = (
- "SELECT e.event_id, reason FROM events as e "
- "LEFT JOIN rejections as r ON e.event_id = r.event_id "
- "WHERE e.event_id = ?"
- )
- res = {}
- for event_id in event_ids:
- txn.execute(sql, (event_id,))
- row = txn.fetchone()
- if row:
- _, rejected = row
- res[event_id] = rejected
- return res
- return self.runInteraction(
- "have_events", f,
- )
- @defer.inlineCallbacks
- def _get_events(self, event_ids, check_redacted=True,
- get_prev_content=False, allow_rejected=False):
- if not event_ids:
- defer.returnValue([])
- event_id_list = event_ids
- event_ids = set(event_ids)
- event_entry_map = self._get_events_from_cache(
- event_ids,
- allow_rejected=allow_rejected,
- )
- missing_events_ids = [e for e in event_ids if e not in event_entry_map]
- if missing_events_ids:
- missing_events = yield self._enqueue_events(
- missing_events_ids,
- check_redacted=check_redacted,
- allow_rejected=allow_rejected,
- )
- event_entry_map.update(missing_events)
- events = []
- for event_id in event_id_list:
- entry = event_entry_map.get(event_id, None)
- if not entry:
- continue
- if allow_rejected or not entry.event.rejected_reason:
- if check_redacted and entry.redacted_event:
- event = entry.redacted_event
- else:
- event = entry.event
- events.append(event)
- if get_prev_content:
- if "replaces_state" in event.unsigned:
- prev = yield self.get_event(
- event.unsigned["replaces_state"],
- get_prev_content=False,
- allow_none=True,
- )
- if prev:
- event.unsigned = dict(event.unsigned)
- event.unsigned["prev_content"] = prev.content
- event.unsigned["prev_sender"] = prev.sender
- defer.returnValue(events)
- def _invalidate_get_event_cache(self, event_id):
- self._get_event_cache.invalidate((event_id,))
- def _get_events_from_cache(self, events, allow_rejected):
- event_map = {}
- for event_id in events:
- ret = self._get_event_cache.get((event_id,), None)
- if not ret:
- continue
- if allow_rejected or not ret.event.rejected_reason:
- event_map[event_id] = ret
- else:
- event_map[event_id] = None
- return event_map
- def _do_fetch(self, conn):
- """Takes a database connection and waits for requests for events from
- the _event_fetch_list queue.
- """
- event_list = []
- i = 0
- while True:
- try:
- with self._event_fetch_lock:
- event_list = self._event_fetch_list
- self._event_fetch_list = []
- if not event_list:
- single_threaded = self.database_engine.single_threaded
- if single_threaded or i > EVENT_QUEUE_ITERATIONS:
- self._event_fetch_ongoing -= 1
- return
- else:
- self._event_fetch_lock.wait(EVENT_QUEUE_TIMEOUT_S)
- i += 1
- continue
- i = 0
- event_id_lists = zip(*event_list)[0]
- event_ids = [
- item for sublist in event_id_lists for item in sublist
- ]
- rows = self._new_transaction(
- conn, "do_fetch", [], None, self._fetch_event_rows, event_ids
- )
- row_dict = {
- r["event_id"]: r
- for r in rows
- }
- # We only want to resolve deferreds from the main thread
- def fire(lst, res):
- for ids, d in lst:
- if not d.called:
- try:
- with PreserveLoggingContext():
- d.callback([
- res[i]
- for i in ids
- if i in res
- ])
- except:
- logger.exception("Failed to callback")
- with PreserveLoggingContext():
- reactor.callFromThread(fire, event_list, row_dict)
- except Exception as e:
- logger.exception("do_fetch")
- # We only want to resolve deferreds from the main thread
- def fire(evs):
- for _, d in evs:
- if not d.called:
- with PreserveLoggingContext():
- d.errback(e)
- if event_list:
- with PreserveLoggingContext():
- reactor.callFromThread(fire, event_list)
- @defer.inlineCallbacks
- def _enqueue_events(self, events, check_redacted=True, allow_rejected=False):
- """Fetches events from the database using the _event_fetch_list. This
- allows batch and bulk fetching of events - it allows us to fetch events
- without having to create a new transaction for each request for events.
- """
- if not events:
- defer.returnValue({})
- events_d = defer.Deferred()
- with self._event_fetch_lock:
- self._event_fetch_list.append(
- (events, events_d)
- )
- self._event_fetch_lock.notify()
- if self._event_fetch_ongoing < EVENT_QUEUE_THREADS:
- self._event_fetch_ongoing += 1
- should_start = True
- else:
- should_start = False
- if should_start:
- with PreserveLoggingContext():
- self.runWithConnection(
- self._do_fetch
- )
- with PreserveLoggingContext():
- rows = yield events_d
- if not allow_rejected:
- rows[:] = [r for r in rows if not r["rejects"]]
- res = yield preserve_context_over_deferred(defer.gatherResults(
- [
- preserve_fn(self._get_event_from_row)(
- row["internal_metadata"], row["json"], row["redacts"],
- rejected_reason=row["rejects"],
- )
- for row in rows
- ],
- consumeErrors=True
- ))
- defer.returnValue({
- e.event.event_id: e
- for e in res if e
- })
- def _fetch_event_rows(self, txn, events):
- rows = []
- N = 200
- for i in range(1 + len(events) / N):
- evs = events[i * N:(i + 1) * N]
- if not evs:
- break
- sql = (
- "SELECT "
- " e.event_id as event_id, "
- " e.internal_metadata,"
- " e.json,"
- " r.redacts as redacts,"
- " rej.event_id as rejects "
- " FROM event_json as e"
- " LEFT JOIN rejections as rej USING (event_id)"
- " LEFT JOIN redactions as r ON e.event_id = r.redacts"
- " WHERE e.event_id IN (%s)"
- ) % (",".join(["?"] * len(evs)),)
- txn.execute(sql, evs)
- rows.extend(self.cursor_to_dict(txn))
- return rows
- @defer.inlineCallbacks
- def _get_event_from_row(self, internal_metadata, js, redacted,
- rejected_reason=None):
- with Measure(self._clock, "_get_event_from_row"):
- d = json.loads(js)
- internal_metadata = json.loads(internal_metadata)
- if rejected_reason:
- rejected_reason = yield self._simple_select_one_onecol(
- table="rejections",
- keyvalues={"event_id": rejected_reason},
- retcol="reason",
- desc="_get_event_from_row_rejected_reason",
- )
- original_ev = FrozenEvent(
- d,
- internal_metadata_dict=internal_metadata,
- rejected_reason=rejected_reason,
- )
- redacted_event = None
- if redacted:
- redacted_event = prune_event(original_ev)
- redaction_id = yield self._simple_select_one_onecol(
- table="redactions",
- keyvalues={"redacts": redacted_event.event_id},
- retcol="event_id",
- desc="_get_event_from_row_redactions",
- )
- redacted_event.unsigned["redacted_by"] = redaction_id
- # Get the redaction event.
- because = yield self.get_event(
- redaction_id,
- check_redacted=False,
- allow_none=True,
- )
- if because:
- # It's fine to do add the event directly, since get_pdu_json
- # will serialise this field correctly
- redacted_event.unsigned["redacted_because"] = because
- cache_entry = _EventCacheEntry(
- event=original_ev,
- redacted_event=redacted_event,
- )
- self._get_event_cache.prefill((original_ev.event_id,), cache_entry)
- defer.returnValue(cache_entry)
- @defer.inlineCallbacks
- def count_daily_messages(self):
- """
- Returns an estimate of the number of messages sent in the last day.
- If it has been significantly less or more than one day since the last
- call to this function, it will return None.
- """
- def _count_messages(txn):
- now = self.hs.get_clock().time()
- txn.execute(
- "SELECT reported_stream_token, reported_time FROM stats_reporting"
- )
- last_reported = self.cursor_to_dict(txn)
- txn.execute(
- "SELECT stream_ordering"
- " FROM events"
- " ORDER BY stream_ordering DESC"
- " LIMIT 1"
- )
- now_reporting = self.cursor_to_dict(txn)
- if not now_reporting:
- logger.info("Calculating daily messages skipped; no now_reporting")
- return None
- now_reporting = now_reporting[0]["stream_ordering"]
- txn.execute("DELETE FROM stats_reporting")
- txn.execute(
- "INSERT INTO stats_reporting"
- " (reported_stream_token, reported_time)"
- " VALUES (?, ?)",
- (now_reporting, now,)
- )
- if not last_reported:
- logger.info("Calculating daily messages skipped; no last_reported")
- return None
- # Close enough to correct for our purposes.
- yesterday = (now - 24 * 60 * 60)
- since_yesterday_seconds = yesterday - last_reported[0]["reported_time"]
- any_since_yesterday = math.fabs(since_yesterday_seconds) > 60 * 60
- if any_since_yesterday:
- logger.info(
- "Calculating daily messages skipped; since_yesterday_seconds: %d" %
- (since_yesterday_seconds,)
- )
- return None
- txn.execute(
- "SELECT COUNT(*) as messages"
- " FROM events NATURAL JOIN event_json"
- " WHERE json like '%m.room.message%'"
- " AND stream_ordering > ?"
- " AND stream_ordering <= ?",
- (
- last_reported[0]["reported_stream_token"],
- now_reporting,
- )
- )
- rows = self.cursor_to_dict(txn)
- if not rows:
- logger.info("Calculating daily messages skipped; messages count missing")
- return None
- return rows[0]["messages"]
- ret = yield self.runInteraction("count_messages", _count_messages)
- defer.returnValue(ret)
- @defer.inlineCallbacks
- def _background_reindex_fields_sender(self, progress, batch_size):
- target_min_stream_id = progress["target_min_stream_id_inclusive"]
- max_stream_id = progress["max_stream_id_exclusive"]
- rows_inserted = progress.get("rows_inserted", 0)
- INSERT_CLUMP_SIZE = 1000
- def reindex_txn(txn):
- sql = (
- "SELECT stream_ordering, event_id, json FROM events"
- " INNER JOIN event_json USING (event_id)"
- " WHERE ? <= stream_ordering AND stream_ordering < ?"
- " ORDER BY stream_ordering DESC"
- " LIMIT ?"
- )
- txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
- rows = txn.fetchall()
- if not rows:
- return 0
- min_stream_id = rows[-1][0]
- update_rows = []
- for row in rows:
- try:
- event_id = row[1]
- event_json = json.loads(row[2])
- sender = event_json["sender"]
- content = event_json["content"]
- contains_url = "url" in content
- if contains_url:
- contains_url &= isinstance(content["url"], basestring)
- except (KeyError, AttributeError):
- # If the event is missing a necessary field then
- # skip over it.
- continue
- update_rows.append((sender, contains_url, event_id))
- sql = (
- "UPDATE events SET sender = ?, contains_url = ? WHERE event_id = ?"
- )
- for index in range(0, len(update_rows), INSERT_CLUMP_SIZE):
- clump = update_rows[index:index + INSERT_CLUMP_SIZE]
- txn.executemany(sql, clump)
- progress = {
- "target_min_stream_id_inclusive": target_min_stream_id,
- "max_stream_id_exclusive": min_stream_id,
- "rows_inserted": rows_inserted + len(rows)
- }
- self._background_update_progress_txn(
- txn, self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress
- )
- return len(rows)
- result = yield self.runInteraction(
- self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn
- )
- if not result:
- yield self._end_background_update(self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME)
- defer.returnValue(result)
- @defer.inlineCallbacks
- def _background_reindex_origin_server_ts(self, progress, batch_size):
- target_min_stream_id = progress["target_min_stream_id_inclusive"]
- max_stream_id = progress["max_stream_id_exclusive"]
- rows_inserted = progress.get("rows_inserted", 0)
- INSERT_CLUMP_SIZE = 1000
- def reindex_search_txn(txn):
- sql = (
- "SELECT stream_ordering, event_id FROM events"
- " WHERE ? <= stream_ordering AND stream_ordering < ?"
- " ORDER BY stream_ordering DESC"
- " LIMIT ?"
- )
- txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
- rows = txn.fetchall()
- if not rows:
- return 0
- min_stream_id = rows[-1][0]
- event_ids = [row[1] for row in rows]
- rows_to_update = []
- chunks = [
- event_ids[i:i + 100]
- for i in xrange(0, len(event_ids), 100)
- ]
- for chunk in chunks:
- ev_rows = self._simple_select_many_txn(
- txn,
- table="event_json",
- column="event_id",
- iterable=chunk,
- retcols=["event_id", "json"],
- keyvalues={},
- )
- for row in ev_rows:
- event_id = row["event_id"]
- event_json = json.loads(row["json"])
- try:
- origin_server_ts = event_json["origin_server_ts"]
- except (KeyError, AttributeError):
- # If the event is missing a necessary field then
- # skip over it.
- continue
- rows_to_update.append((origin_server_ts, event_id))
- sql = (
- "UPDATE events SET origin_server_ts = ? WHERE event_id = ?"
- )
- for index in range(0, len(rows_to_update), INSERT_CLUMP_SIZE):
- clump = rows_to_update[index:index + INSERT_CLUMP_SIZE]
- txn.executemany(sql, clump)
- progress = {
- "target_min_stream_id_inclusive": target_min_stream_id,
- "max_stream_id_exclusive": min_stream_id,
- "rows_inserted": rows_inserted + len(rows_to_update)
- }
- self._background_update_progress_txn(
- txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress
- )
- return len(rows_to_update)
- result = yield self.runInteraction(
- self.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn
- )
- if not result:
- yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME)
- defer.returnValue(result)
- def get_current_backfill_token(self):
- """The current minimum token that backfilled events have reached"""
- return -self._backfill_id_gen.get_current_token()
- def get_all_new_events(self, last_backfill_id, last_forward_id,
- current_backfill_id, current_forward_id, limit):
- """Get all the new events that have arrived at the server either as
- new events or as backfilled events"""
- have_backfill_events = last_backfill_id != current_backfill_id
- have_forward_events = last_forward_id != current_forward_id
- if not have_backfill_events and not have_forward_events:
- return defer.succeed(AllNewEventsResult([], [], [], [], []))
- def get_all_new_events_txn(txn):
- sql = (
- "SELECT e.stream_ordering, ej.internal_metadata, ej.json, eg.state_group"
- " FROM events as e"
- " JOIN event_json as ej"
- " ON e.event_id = ej.event_id AND e.room_id = ej.room_id"
- " LEFT JOIN event_to_state_groups as eg"
- " ON e.event_id = eg.event_id"
- " WHERE ? < e.stream_ordering AND e.stream_ordering <= ?"
- " ORDER BY e.stream_ordering ASC"
- " LIMIT ?"
- )
- if have_forward_events:
- txn.execute(sql, (last_forward_id, current_forward_id, limit))
- new_forward_events = txn.fetchall()
- if len(new_forward_events) == limit:
- upper_bound = new_forward_events[-1][0]
- else:
- upper_bound = current_forward_id
- sql = (
- "SELECT event_stream_ordering FROM current_state_resets"
- " WHERE ? < event_stream_ordering"
- " AND event_stream_ordering <= ?"
- " ORDER BY event_stream_ordering ASC"
- )
- txn.execute(sql, (last_forward_id, upper_bound))
- state_resets = txn.fetchall()
- sql = (
- "SELECT event_stream_ordering, event_id, state_group"
- " FROM ex_outlier_stream"
- " WHERE ? > event_stream_ordering"
- " AND event_stream_ordering >= ?"
- " ORDER BY event_stream_ordering DESC"
- )
- txn.execute(sql, (last_forward_id, upper_bound))
- forward_ex_outliers = txn.fetchall()
- else:
- new_forward_events = []
- state_resets = []
- forward_ex_outliers = []
- sql = (
- "SELECT -e.stream_ordering, ej.internal_metadata, ej.json,"
- " eg.state_group"
- " FROM events as e"
- " JOIN event_json as ej"
- " ON e.event_id = ej.event_id AND e.room_id = ej.room_id"
- " LEFT JOIN event_to_state_groups as eg"
- " ON e.event_id = eg.event_id"
- " WHERE ? > e.stream_ordering AND e.stream_ordering >= ?"
- " ORDER BY e.stream_ordering DESC"
- " LIMIT ?"
- )
- if have_backfill_events:
- txn.execute(sql, (-last_backfill_id, -current_backfill_id, limit))
- new_backfill_events = txn.fetchall()
- if len(new_backfill_events) == limit:
- upper_bound = new_backfill_events[-1][0]
- else:
- upper_bound = current_backfill_id
- sql = (
- "SELECT -event_stream_ordering, event_id, state_group"
- " FROM ex_outlier_stream"
- " WHERE ? > event_stream_ordering"
- " AND event_stream_ordering >= ?"
- " ORDER BY event_stream_ordering DESC"
- )
- txn.execute(sql, (-last_backfill_id, -upper_bound))
- backward_ex_outliers = txn.fetchall()
- else:
- new_backfill_events = []
- backward_ex_outliers = []
- return AllNewEventsResult(
- new_forward_events, new_backfill_events,
- forward_ex_outliers, backward_ex_outliers,
- state_resets,
- )
- return self.runInteraction("get_all_new_events", get_all_new_events_txn)
- def delete_old_state(self, room_id, topological_ordering):
- return self.runInteraction(
- "delete_old_state",
- self._delete_old_state_txn, room_id, topological_ordering
- )
- def _delete_old_state_txn(self, txn, room_id, topological_ordering):
- """Deletes old room state
- """
- # Tables that should be pruned:
- # event_auth
- # event_backward_extremities
- # event_content_hashes
- # event_destinations
- # event_edge_hashes
- # event_edges
- # event_forward_extremities
- # event_json
- # event_push_actions
- # event_reference_hashes
- # event_search
- # event_signatures
- # event_to_state_groups
- # events
- # rejections
- # room_depth
- # state_groups
- # state_groups_state
- # First ensure that we're not about to delete all the forward extremeties
- txn.execute(
- "SELECT e.event_id, e.depth FROM events as e "
- "INNER JOIN event_forward_extremities as f "
- "ON e.event_id = f.event_id "
- "AND e.room_id = f.room_id "
- "WHERE f.room_id = ?",
- (room_id,)
- )
- rows = txn.fetchall()
- max_depth = max(row[0] for row in rows)
- if max_depth <= topological_ordering:
- # We need to ensure we don't delete all the events from the datanase
- # otherwise we wouldn't be able to send any events (due to not
- # having any backwards extremeties)
- raise SynapseError(
- 400, "topological_ordering is greater than forward extremeties"
- )
- txn.execute(
- "SELECT event_id, state_key FROM events"
- " LEFT JOIN state_events USING (room_id, event_id)"
- " WHERE room_id = ? AND topological_ordering < ?",
- (room_id, topological_ordering,)
- )
- event_rows = txn.fetchall()
- for event_id, state_key in event_rows:
- txn.call_after(self._get_state_group_for_event.invalidate, (event_id,))
- # We calculate the new entries for the backward extremeties by finding
- # all events that point to events that are to be purged
- txn.execute(
- "SELECT DISTINCT e.event_id FROM events as e"
- " INNER JOIN event_edges as ed ON e.event_id = ed.prev_event_id"
- " INNER JOIN events as e2 ON e2.event_id = ed.event_id"
- " WHERE e.room_id = ? AND e.topological_ordering < ?"
- " AND e2.topological_ordering >= ?",
- (room_id, topological_ordering, topological_ordering)
- )
- new_backwards_extrems = txn.fetchall()
- txn.execute(
- "DELETE FROM event_backward_extremities WHERE room_id = ?",
- (room_id,)
- )
- # Update backward extremeties
- txn.executemany(
- "INSERT INTO event_backward_extremities (room_id, event_id)"
- " VALUES (?, ?)",
- [
- (room_id, event_id) for event_id, in new_backwards_extrems
- ]
- )
- # Get all state groups that are only referenced by events that are
- # to be deleted.
- txn.execute(
- "SELECT state_group FROM event_to_state_groups"
- " INNER JOIN events USING (event_id)"
- " WHERE state_group IN ("
- " SELECT DISTINCT state_group FROM events"
- " INNER JOIN event_to_state_groups USING (event_id)"
- " WHERE room_id = ? AND topological_ordering < ?"
- " )"
- " GROUP BY state_group HAVING MAX(topological_ordering) < ?",
- (room_id, topological_ordering, topological_ordering)
- )
- state_rows = txn.fetchall()
- state_groups_to_delete = [sg for sg, in state_rows]
- # Now we get all the state groups that rely on these state groups
- new_state_edges = []
- chunks = [
- state_groups_to_delete[i:i + 100]
- for i in xrange(0, len(state_groups_to_delete), 100)
- ]
- for chunk in chunks:
- rows = self._simple_select_many_txn(
- txn,
- table="state_group_edges",
- column="prev_state_group",
- iterable=chunk,
- retcols=["state_group"],
- keyvalues={},
- )
- new_state_edges.extend(row["state_group"] for row in rows)
- # Now we turn the state groups that reference to-be-deleted state groups
- # to non delta versions.
- for new_state_edge in new_state_edges:
- curr_state = self._get_state_groups_from_groups_txn(
- txn, [new_state_edge], types=None
- )
- curr_state = curr_state[new_state_edge]
- self._simple_delete_txn(
- txn,
- table="state_groups_state",
- keyvalues={
- "state_group": new_state_edge,
- }
- )
- self._simple_delete_txn(
- txn,
- table="state_group_edges",
- keyvalues={
- "state_group": new_state_edge,
- }
- )
- self._simple_insert_many_txn(
- txn,
- table="state_groups_state",
- values=[
- {
- "state_group": new_state_edge,
- "room_id": room_id,
- "type": key[0],
- "state_key": key[1],
- "event_id": state_id,
- }
- for key, state_id in curr_state.items()
- ],
- )
- txn.executemany(
- "DELETE FROM state_groups_state WHERE state_group = ?",
- state_rows
- )
- txn.executemany(
- "DELETE FROM state_groups WHERE id = ?",
- state_rows
- )
- # Delete all non-state
- txn.executemany(
- "DELETE FROM event_to_state_groups WHERE event_id = ?",
- [(event_id,) for event_id, _ in event_rows]
- )
- txn.execute(
- "UPDATE room_depth SET min_depth = ? WHERE room_id = ?",
- (topological_ordering, room_id,)
- )
- # Delete all remote non-state events
- to_delete = [
- (event_id,) for event_id, state_key in event_rows
- if state_key is None and not self.hs.is_mine_id(event_id)
- ]
- for table in (
- "events",
- "event_json",
- "event_auth",
- "event_content_hashes",
- "event_destinations",
- "event_edge_hashes",
- "event_edges",
- "event_forward_extremities",
- "event_push_actions",
- "event_reference_hashes",
- "event_search",
- "event_signatures",
- "rejections",
- ):
- txn.executemany(
- "DELETE FROM %s WHERE event_id = ?" % (table,),
- to_delete
- )
- txn.executemany(
- "DELETE FROM events WHERE event_id = ?",
- to_delete
- )
- # Mark all state and own events as outliers
- txn.executemany(
- "UPDATE events SET outlier = ?"
- " WHERE event_id = ?",
- [
- (True, event_id,) for event_id, state_key in event_rows
- if state_key is not None or self.hs.is_mine_id(event_id)
- ]
- )
- AllNewEventsResult = namedtuple("AllNewEventsResult", [
- "new_forward_events", "new_backfill_events",
- "forward_ex_outliers", "backward_ex_outliers",
- "state_resets"
- ])
|