events_bg_updates.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. # -*- coding: utf-8 -*-
  2. # Copyright 2019 The Matrix.org Foundation C.I.C.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import logging
  16. from six import text_type
  17. from canonicaljson import json
  18. from twisted.internet import defer
  19. from synapse.storage.background_updates import BackgroundUpdateStore
  20. logger = logging.getLogger(__name__)
  21. class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
  22. EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
  23. EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
  24. DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities"
  25. def __init__(self, db_conn, hs):
  26. super(EventsBackgroundUpdatesStore, self).__init__(db_conn, hs)
  27. self.register_background_update_handler(
  28. self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts
  29. )
  30. self.register_background_update_handler(
  31. self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME,
  32. self._background_reindex_fields_sender,
  33. )
  34. self.register_background_index_update(
  35. "event_contains_url_index",
  36. index_name="event_contains_url_index",
  37. table="events",
  38. columns=["room_id", "topological_ordering", "stream_ordering"],
  39. where_clause="contains_url = true AND outlier = false",
  40. )
  41. # an event_id index on event_search is useful for the purge_history
  42. # api. Plus it means we get to enforce some integrity with a UNIQUE
  43. # clause
  44. self.register_background_index_update(
  45. "event_search_event_id_idx",
  46. index_name="event_search_event_id_idx",
  47. table="event_search",
  48. columns=["event_id"],
  49. unique=True,
  50. psql_only=True,
  51. )
  52. self.register_background_update_handler(
  53. self.DELETE_SOFT_FAILED_EXTREMITIES, self._cleanup_extremities_bg_update
  54. )
  55. @defer.inlineCallbacks
  56. def _background_reindex_fields_sender(self, progress, batch_size):
  57. target_min_stream_id = progress["target_min_stream_id_inclusive"]
  58. max_stream_id = progress["max_stream_id_exclusive"]
  59. rows_inserted = progress.get("rows_inserted", 0)
  60. INSERT_CLUMP_SIZE = 1000
  61. def reindex_txn(txn):
  62. sql = (
  63. "SELECT stream_ordering, event_id, json FROM events"
  64. " INNER JOIN event_json USING (event_id)"
  65. " WHERE ? <= stream_ordering AND stream_ordering < ?"
  66. " ORDER BY stream_ordering DESC"
  67. " LIMIT ?"
  68. )
  69. txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
  70. rows = txn.fetchall()
  71. if not rows:
  72. return 0
  73. min_stream_id = rows[-1][0]
  74. update_rows = []
  75. for row in rows:
  76. try:
  77. event_id = row[1]
  78. event_json = json.loads(row[2])
  79. sender = event_json["sender"]
  80. content = event_json["content"]
  81. contains_url = "url" in content
  82. if contains_url:
  83. contains_url &= isinstance(content["url"], text_type)
  84. except (KeyError, AttributeError):
  85. # If the event is missing a necessary field then
  86. # skip over it.
  87. continue
  88. update_rows.append((sender, contains_url, event_id))
  89. sql = "UPDATE events SET sender = ?, contains_url = ? WHERE event_id = ?"
  90. for index in range(0, len(update_rows), INSERT_CLUMP_SIZE):
  91. clump = update_rows[index : index + INSERT_CLUMP_SIZE]
  92. txn.executemany(sql, clump)
  93. progress = {
  94. "target_min_stream_id_inclusive": target_min_stream_id,
  95. "max_stream_id_exclusive": min_stream_id,
  96. "rows_inserted": rows_inserted + len(rows),
  97. }
  98. self._background_update_progress_txn(
  99. txn, self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress
  100. )
  101. return len(rows)
  102. result = yield self.runInteraction(
  103. self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn
  104. )
  105. if not result:
  106. yield self._end_background_update(self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME)
  107. return result
  108. @defer.inlineCallbacks
  109. def _background_reindex_origin_server_ts(self, progress, batch_size):
  110. target_min_stream_id = progress["target_min_stream_id_inclusive"]
  111. max_stream_id = progress["max_stream_id_exclusive"]
  112. rows_inserted = progress.get("rows_inserted", 0)
  113. INSERT_CLUMP_SIZE = 1000
  114. def reindex_search_txn(txn):
  115. sql = (
  116. "SELECT stream_ordering, event_id FROM events"
  117. " WHERE ? <= stream_ordering AND stream_ordering < ?"
  118. " ORDER BY stream_ordering DESC"
  119. " LIMIT ?"
  120. )
  121. txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
  122. rows = txn.fetchall()
  123. if not rows:
  124. return 0
  125. min_stream_id = rows[-1][0]
  126. event_ids = [row[1] for row in rows]
  127. rows_to_update = []
  128. chunks = [event_ids[i : i + 100] for i in range(0, len(event_ids), 100)]
  129. for chunk in chunks:
  130. ev_rows = self._simple_select_many_txn(
  131. txn,
  132. table="event_json",
  133. column="event_id",
  134. iterable=chunk,
  135. retcols=["event_id", "json"],
  136. keyvalues={},
  137. )
  138. for row in ev_rows:
  139. event_id = row["event_id"]
  140. event_json = json.loads(row["json"])
  141. try:
  142. origin_server_ts = event_json["origin_server_ts"]
  143. except (KeyError, AttributeError):
  144. # If the event is missing a necessary field then
  145. # skip over it.
  146. continue
  147. rows_to_update.append((origin_server_ts, event_id))
  148. sql = "UPDATE events SET origin_server_ts = ? WHERE event_id = ?"
  149. for index in range(0, len(rows_to_update), INSERT_CLUMP_SIZE):
  150. clump = rows_to_update[index : index + INSERT_CLUMP_SIZE]
  151. txn.executemany(sql, clump)
  152. progress = {
  153. "target_min_stream_id_inclusive": target_min_stream_id,
  154. "max_stream_id_exclusive": min_stream_id,
  155. "rows_inserted": rows_inserted + len(rows_to_update),
  156. }
  157. self._background_update_progress_txn(
  158. txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress
  159. )
  160. return len(rows_to_update)
  161. result = yield self.runInteraction(
  162. self.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn
  163. )
  164. if not result:
  165. yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME)
  166. return result
  167. @defer.inlineCallbacks
  168. def _cleanup_extremities_bg_update(self, progress, batch_size):
  169. """Background update to clean out extremities that should have been
  170. deleted previously.
  171. Mainly used to deal with the aftermath of #5269.
  172. """
  173. # This works by first copying all existing forward extremities into the
  174. # `_extremities_to_check` table at start up, and then checking each
  175. # event in that table whether we have any descendants that are not
  176. # soft-failed/rejected. If that is the case then we delete that event
  177. # from the forward extremities table.
  178. #
  179. # For efficiency, we do this in batches by recursively pulling out all
  180. # descendants of a batch until we find the non soft-failed/rejected
  181. # events, i.e. the set of descendants whose chain of prev events back
  182. # to the batch of extremities are all soft-failed or rejected.
  183. # Typically, we won't find any such events as extremities will rarely
  184. # have any descendants, but if they do then we should delete those
  185. # extremities.
  186. def _cleanup_extremities_bg_update_txn(txn):
  187. # The set of extremity event IDs that we're checking this round
  188. original_set = set()
  189. # A dict[str, set[str]] of event ID to their prev events.
  190. graph = {}
  191. # The set of descendants of the original set that are not rejected
  192. # nor soft-failed. Ancestors of these events should be removed
  193. # from the forward extremities table.
  194. non_rejected_leaves = set()
  195. # Set of event IDs that have been soft failed, and for which we
  196. # should check if they have descendants which haven't been soft
  197. # failed.
  198. soft_failed_events_to_lookup = set()
  199. # First, we get `batch_size` events from the table, pulling out
  200. # their successor events, if any, and the successor events'
  201. # rejection status.
  202. txn.execute(
  203. """SELECT prev_event_id, event_id, internal_metadata,
  204. rejections.event_id IS NOT NULL, events.outlier
  205. FROM (
  206. SELECT event_id AS prev_event_id
  207. FROM _extremities_to_check
  208. LIMIT ?
  209. ) AS f
  210. LEFT JOIN event_edges USING (prev_event_id)
  211. LEFT JOIN events USING (event_id)
  212. LEFT JOIN event_json USING (event_id)
  213. LEFT JOIN rejections USING (event_id)
  214. """,
  215. (batch_size,),
  216. )
  217. for prev_event_id, event_id, metadata, rejected, outlier in txn:
  218. original_set.add(prev_event_id)
  219. if not event_id or outlier:
  220. # Common case where the forward extremity doesn't have any
  221. # descendants.
  222. continue
  223. graph.setdefault(event_id, set()).add(prev_event_id)
  224. soft_failed = False
  225. if metadata:
  226. soft_failed = json.loads(metadata).get("soft_failed")
  227. if soft_failed or rejected:
  228. soft_failed_events_to_lookup.add(event_id)
  229. else:
  230. non_rejected_leaves.add(event_id)
  231. # Now we recursively check all the soft-failed descendants we
  232. # found above in the same way, until we have nothing left to
  233. # check.
  234. while soft_failed_events_to_lookup:
  235. # We only want to do 100 at a time, so we split given list
  236. # into two.
  237. batch = list(soft_failed_events_to_lookup)
  238. to_check, to_defer = batch[:100], batch[100:]
  239. soft_failed_events_to_lookup = set(to_defer)
  240. sql = """SELECT prev_event_id, event_id, internal_metadata,
  241. rejections.event_id IS NOT NULL
  242. FROM event_edges
  243. INNER JOIN events USING (event_id)
  244. INNER JOIN event_json USING (event_id)
  245. LEFT JOIN rejections USING (event_id)
  246. WHERE
  247. prev_event_id IN (%s)
  248. AND NOT events.outlier
  249. """ % (
  250. ",".join("?" for _ in to_check),
  251. )
  252. txn.execute(sql, to_check)
  253. for prev_event_id, event_id, metadata, rejected in txn:
  254. if event_id in graph:
  255. # Already handled this event previously, but we still
  256. # want to record the edge.
  257. graph[event_id].add(prev_event_id)
  258. continue
  259. graph[event_id] = {prev_event_id}
  260. soft_failed = json.loads(metadata).get("soft_failed")
  261. if soft_failed or rejected:
  262. soft_failed_events_to_lookup.add(event_id)
  263. else:
  264. non_rejected_leaves.add(event_id)
  265. # We have a set of non-soft-failed descendants, so we recurse up
  266. # the graph to find all ancestors and add them to the set of event
  267. # IDs that we can delete from forward extremities table.
  268. to_delete = set()
  269. while non_rejected_leaves:
  270. event_id = non_rejected_leaves.pop()
  271. prev_event_ids = graph.get(event_id, set())
  272. non_rejected_leaves.update(prev_event_ids)
  273. to_delete.update(prev_event_ids)
  274. to_delete.intersection_update(original_set)
  275. deleted = self._simple_delete_many_txn(
  276. txn=txn,
  277. table="event_forward_extremities",
  278. column="event_id",
  279. iterable=to_delete,
  280. keyvalues={},
  281. )
  282. logger.info(
  283. "Deleted %d forward extremities of %d checked, to clean up #5269",
  284. deleted,
  285. len(original_set),
  286. )
  287. if deleted:
  288. # We now need to invalidate the caches of these rooms
  289. rows = self._simple_select_many_txn(
  290. txn,
  291. table="events",
  292. column="event_id",
  293. iterable=to_delete,
  294. keyvalues={},
  295. retcols=("room_id",),
  296. )
  297. room_ids = set(row["room_id"] for row in rows)
  298. for room_id in room_ids:
  299. txn.call_after(
  300. self.get_latest_event_ids_in_room.invalidate, (room_id,)
  301. )
  302. self._simple_delete_many_txn(
  303. txn=txn,
  304. table="_extremities_to_check",
  305. column="event_id",
  306. iterable=original_set,
  307. keyvalues={},
  308. )
  309. return len(original_set)
  310. num_handled = yield self.runInteraction(
  311. "_cleanup_extremities_bg_update", _cleanup_extremities_bg_update_txn
  312. )
  313. if not num_handled:
  314. yield self._end_background_update(self.DELETE_SOFT_FAILED_EXTREMITIES)
  315. def _drop_table_txn(txn):
  316. txn.execute("DROP TABLE _extremities_to_check")
  317. yield self.runInteraction(
  318. "_cleanup_extremities_bg_update_drop_table", _drop_table_txn
  319. )
  320. return num_handled