test_event_federation.py 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212
  1. # Copyright 2018 New Vector Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the 'License');
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an 'AS IS' BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import datetime
  15. from typing import Dict, List, Tuple, Union, cast
  16. import attr
  17. from parameterized import parameterized
  18. from twisted.test.proto_helpers import MemoryReactor
  19. from synapse.api.constants import EventTypes
  20. from synapse.api.room_versions import (
  21. KNOWN_ROOM_VERSIONS,
  22. EventFormatVersions,
  23. RoomVersion,
  24. )
  25. from synapse.events import EventBase, _EventInternalMetadata
  26. from synapse.rest import admin
  27. from synapse.rest.client import login, room
  28. from synapse.server import HomeServer
  29. from synapse.storage.database import LoggingTransaction
  30. from synapse.storage.types import Cursor
  31. from synapse.types import JsonDict
  32. from synapse.util import Clock, json_encoder
  33. import tests.unittest
  34. import tests.utils
  35. @attr.s(auto_attribs=True, frozen=True, slots=True)
  36. class _BackfillSetupInfo:
  37. room_id: str
  38. depth_map: Dict[str, int]
  39. class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
  40. servlets = [
  41. admin.register_servlets,
  42. room.register_servlets,
  43. login.register_servlets,
  44. ]
  45. def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
  46. self.store = hs.get_datastores().main
  47. persist_events = hs.get_datastores().persist_events
  48. assert persist_events is not None
  49. self.persist_events = persist_events
  50. def test_get_prev_events_for_room(self) -> None:
  51. room_id = "@ROOM:local"
  52. # add a bunch of events and hashes to act as forward extremities
  53. def insert_event(txn: Cursor, i: int) -> None:
  54. event_id = "$event_%i:local" % i
  55. txn.execute(
  56. (
  57. "INSERT INTO events ("
  58. " room_id, event_id, type, depth, topological_ordering,"
  59. " content, processed, outlier, stream_ordering) "
  60. "VALUES (?, ?, 'm.test', ?, ?, 'test', ?, ?, ?)"
  61. ),
  62. (room_id, event_id, i, i, True, False, i),
  63. )
  64. txn.execute(
  65. (
  66. "INSERT INTO event_forward_extremities (room_id, event_id) "
  67. "VALUES (?, ?)"
  68. ),
  69. (room_id, event_id),
  70. )
  71. for i in range(0, 20):
  72. self.get_success(
  73. self.store.db_pool.runInteraction("insert", insert_event, i)
  74. )
  75. # this should get the last ten
  76. r = self.get_success(self.store.get_prev_events_for_room(room_id))
  77. self.assertEqual(10, len(r))
  78. for i in range(0, 10):
  79. self.assertEqual("$event_%i:local" % (19 - i), r[i])
  80. def test_get_rooms_with_many_extremities(self) -> None:
  81. room1 = "#room1"
  82. room2 = "#room2"
  83. room3 = "#room3"
  84. def insert_event(txn: Cursor, i: int, room_id: str) -> None:
  85. event_id = "$event_%i:local" % i
  86. txn.execute(
  87. (
  88. "INSERT INTO event_forward_extremities (room_id, event_id) "
  89. "VALUES (?, ?)"
  90. ),
  91. (room_id, event_id),
  92. )
  93. for i in range(0, 20):
  94. self.get_success(
  95. self.store.db_pool.runInteraction("insert", insert_event, i, room1)
  96. )
  97. self.get_success(
  98. self.store.db_pool.runInteraction("insert", insert_event, i, room2)
  99. )
  100. self.get_success(
  101. self.store.db_pool.runInteraction("insert", insert_event, i, room3)
  102. )
  103. # Test simple case
  104. r = self.get_success(self.store.get_rooms_with_many_extremities(5, 5, []))
  105. self.assertEqual(len(r), 3)
  106. # Does filter work?
  107. r = self.get_success(self.store.get_rooms_with_many_extremities(5, 5, [room1]))
  108. self.assertTrue(room2 in r)
  109. self.assertTrue(room3 in r)
  110. self.assertEqual(len(r), 2)
  111. r = self.get_success(
  112. self.store.get_rooms_with_many_extremities(5, 5, [room1, room2])
  113. )
  114. self.assertEqual(r, [room3])
  115. # Does filter and limit work?
  116. r = self.get_success(self.store.get_rooms_with_many_extremities(5, 1, [room1]))
  117. self.assertTrue(r == [room2] or r == [room3])
  118. def _setup_auth_chain(self, use_chain_cover_index: bool) -> str:
  119. room_id = "@ROOM:local"
  120. # The silly auth graph we use to test the auth difference algorithm,
  121. # where the top are the most recent events.
  122. #
  123. # A B
  124. # \ /
  125. # D E
  126. # \ |
  127. # ` F C
  128. # | /|
  129. # G ´ |
  130. # | \ |
  131. # H I
  132. # | |
  133. # K J
  134. auth_graph: Dict[str, List[str]] = {
  135. "a": ["e"],
  136. "b": ["e"],
  137. "c": ["g", "i"],
  138. "d": ["f"],
  139. "e": ["f"],
  140. "f": ["g"],
  141. "g": ["h", "i"],
  142. "h": ["k"],
  143. "i": ["j"],
  144. "k": [],
  145. "j": [],
  146. }
  147. depth_map = {
  148. "a": 7,
  149. "b": 7,
  150. "c": 4,
  151. "d": 6,
  152. "e": 6,
  153. "f": 5,
  154. "g": 3,
  155. "h": 2,
  156. "i": 2,
  157. "k": 1,
  158. "j": 1,
  159. }
  160. # Mark the room as maybe having a cover index.
  161. def store_room(txn: LoggingTransaction) -> None:
  162. self.store.db_pool.simple_insert_txn(
  163. txn,
  164. "rooms",
  165. {
  166. "room_id": room_id,
  167. "creator": "room_creator_user_id",
  168. "is_public": True,
  169. "room_version": "6",
  170. "has_auth_chain_index": use_chain_cover_index,
  171. },
  172. )
  173. self.get_success(self.store.db_pool.runInteraction("store_room", store_room))
  174. # We rudely fiddle with the appropriate tables directly, as that's much
  175. # easier than constructing events properly.
  176. def insert_event(txn: LoggingTransaction) -> None:
  177. stream_ordering = 0
  178. for event_id in auth_graph:
  179. stream_ordering += 1
  180. depth = depth_map[event_id]
  181. self.store.db_pool.simple_insert_txn(
  182. txn,
  183. table="events",
  184. values={
  185. "event_id": event_id,
  186. "room_id": room_id,
  187. "depth": depth,
  188. "topological_ordering": depth,
  189. "type": "m.test",
  190. "processed": True,
  191. "outlier": False,
  192. "stream_ordering": stream_ordering,
  193. },
  194. )
  195. self.persist_events._persist_event_auth_chain_txn(
  196. txn,
  197. [
  198. cast(EventBase, FakeEvent(event_id, room_id, auth_graph[event_id]))
  199. for event_id in auth_graph
  200. ],
  201. )
  202. self.get_success(
  203. self.store.db_pool.runInteraction(
  204. "insert",
  205. insert_event,
  206. )
  207. )
  208. return room_id
  209. @parameterized.expand([(True,), (False,)])
  210. def test_auth_chain_ids(self, use_chain_cover_index: bool) -> None:
  211. room_id = self._setup_auth_chain(use_chain_cover_index)
  212. # a and b have the same auth chain.
  213. auth_chain_ids = self.get_success(self.store.get_auth_chain_ids(room_id, ["a"]))
  214. self.assertCountEqual(auth_chain_ids, ["e", "f", "g", "h", "i", "j", "k"])
  215. auth_chain_ids = self.get_success(self.store.get_auth_chain_ids(room_id, ["b"]))
  216. self.assertCountEqual(auth_chain_ids, ["e", "f", "g", "h", "i", "j", "k"])
  217. auth_chain_ids = self.get_success(
  218. self.store.get_auth_chain_ids(room_id, ["a", "b"])
  219. )
  220. self.assertCountEqual(auth_chain_ids, ["e", "f", "g", "h", "i", "j", "k"])
  221. auth_chain_ids = self.get_success(self.store.get_auth_chain_ids(room_id, ["c"]))
  222. self.assertCountEqual(auth_chain_ids, ["g", "h", "i", "j", "k"])
  223. # d and e have the same auth chain.
  224. auth_chain_ids = self.get_success(self.store.get_auth_chain_ids(room_id, ["d"]))
  225. self.assertCountEqual(auth_chain_ids, ["f", "g", "h", "i", "j", "k"])
  226. auth_chain_ids = self.get_success(self.store.get_auth_chain_ids(room_id, ["e"]))
  227. self.assertCountEqual(auth_chain_ids, ["f", "g", "h", "i", "j", "k"])
  228. auth_chain_ids = self.get_success(self.store.get_auth_chain_ids(room_id, ["f"]))
  229. self.assertCountEqual(auth_chain_ids, ["g", "h", "i", "j", "k"])
  230. auth_chain_ids = self.get_success(self.store.get_auth_chain_ids(room_id, ["g"]))
  231. self.assertCountEqual(auth_chain_ids, ["h", "i", "j", "k"])
  232. auth_chain_ids = self.get_success(self.store.get_auth_chain_ids(room_id, ["h"]))
  233. self.assertEqual(auth_chain_ids, {"k"})
  234. auth_chain_ids = self.get_success(self.store.get_auth_chain_ids(room_id, ["i"]))
  235. self.assertEqual(auth_chain_ids, {"j"})
  236. # j and k have no parents.
  237. auth_chain_ids = self.get_success(self.store.get_auth_chain_ids(room_id, ["j"]))
  238. self.assertEqual(auth_chain_ids, set())
  239. auth_chain_ids = self.get_success(self.store.get_auth_chain_ids(room_id, ["k"]))
  240. self.assertEqual(auth_chain_ids, set())
  241. # More complex input sequences.
  242. auth_chain_ids = self.get_success(
  243. self.store.get_auth_chain_ids(room_id, ["b", "c", "d"])
  244. )
  245. self.assertCountEqual(auth_chain_ids, ["e", "f", "g", "h", "i", "j", "k"])
  246. auth_chain_ids = self.get_success(
  247. self.store.get_auth_chain_ids(room_id, ["h", "i"])
  248. )
  249. self.assertCountEqual(auth_chain_ids, ["k", "j"])
  250. # e gets returned even though include_given is false, but it is in the
  251. # auth chain of b.
  252. auth_chain_ids = self.get_success(
  253. self.store.get_auth_chain_ids(room_id, ["b", "e"])
  254. )
  255. self.assertCountEqual(auth_chain_ids, ["e", "f", "g", "h", "i", "j", "k"])
  256. # Test include_given.
  257. auth_chain_ids = self.get_success(
  258. self.store.get_auth_chain_ids(room_id, ["i"], include_given=True)
  259. )
  260. self.assertCountEqual(auth_chain_ids, ["i", "j"])
  261. @parameterized.expand([(True,), (False,)])
  262. def test_auth_difference(self, use_chain_cover_index: bool) -> None:
  263. room_id = self._setup_auth_chain(use_chain_cover_index)
  264. # Now actually test that various combinations give the right result:
  265. difference = self.get_success(
  266. self.store.get_auth_chain_difference(room_id, [{"a"}, {"b"}])
  267. )
  268. self.assertSetEqual(difference, {"a", "b"})
  269. difference = self.get_success(
  270. self.store.get_auth_chain_difference(room_id, [{"a"}, {"b"}, {"c"}])
  271. )
  272. self.assertSetEqual(difference, {"a", "b", "c", "e", "f"})
  273. difference = self.get_success(
  274. self.store.get_auth_chain_difference(room_id, [{"a", "c"}, {"b"}])
  275. )
  276. self.assertSetEqual(difference, {"a", "b", "c"})
  277. difference = self.get_success(
  278. self.store.get_auth_chain_difference(room_id, [{"a", "c"}, {"b", "c"}])
  279. )
  280. self.assertSetEqual(difference, {"a", "b"})
  281. difference = self.get_success(
  282. self.store.get_auth_chain_difference(room_id, [{"a"}, {"b"}, {"d"}])
  283. )
  284. self.assertSetEqual(difference, {"a", "b", "d", "e"})
  285. difference = self.get_success(
  286. self.store.get_auth_chain_difference(room_id, [{"a"}, {"b"}, {"c"}, {"d"}])
  287. )
  288. self.assertSetEqual(difference, {"a", "b", "c", "d", "e", "f"})
  289. difference = self.get_success(
  290. self.store.get_auth_chain_difference(room_id, [{"a"}, {"b"}, {"e"}])
  291. )
  292. self.assertSetEqual(difference, {"a", "b"})
  293. difference = self.get_success(
  294. self.store.get_auth_chain_difference(room_id, [{"a"}])
  295. )
  296. self.assertSetEqual(difference, set())
  297. def test_auth_difference_partial_cover(self) -> None:
  298. """Test that we correctly handle rooms where not all events have a chain
  299. cover calculated. This can happen in some obscure edge cases, including
  300. during the background update that calculates the chain cover for old
  301. rooms.
  302. """
  303. room_id = "@ROOM:local"
  304. # The silly auth graph we use to test the auth difference algorithm,
  305. # where the top are the most recent events.
  306. #
  307. # A B
  308. # \ /
  309. # D E
  310. # \ |
  311. # ` F C
  312. # | /|
  313. # G ´ |
  314. # | \ |
  315. # H I
  316. # | |
  317. # K J
  318. auth_graph: Dict[str, List[str]] = {
  319. "a": ["e"],
  320. "b": ["e"],
  321. "c": ["g", "i"],
  322. "d": ["f"],
  323. "e": ["f"],
  324. "f": ["g"],
  325. "g": ["h", "i"],
  326. "h": ["k"],
  327. "i": ["j"],
  328. "k": [],
  329. "j": [],
  330. }
  331. depth_map = {
  332. "a": 7,
  333. "b": 7,
  334. "c": 4,
  335. "d": 6,
  336. "e": 6,
  337. "f": 5,
  338. "g": 3,
  339. "h": 2,
  340. "i": 2,
  341. "k": 1,
  342. "j": 1,
  343. }
  344. # We rudely fiddle with the appropriate tables directly, as that's much
  345. # easier than constructing events properly.
  346. def insert_event(txn: LoggingTransaction) -> None:
  347. # First insert the room and mark it as having a chain cover.
  348. self.store.db_pool.simple_insert_txn(
  349. txn,
  350. "rooms",
  351. {
  352. "room_id": room_id,
  353. "creator": "room_creator_user_id",
  354. "is_public": True,
  355. "room_version": "6",
  356. "has_auth_chain_index": True,
  357. },
  358. )
  359. stream_ordering = 0
  360. for event_id in auth_graph:
  361. stream_ordering += 1
  362. depth = depth_map[event_id]
  363. self.store.db_pool.simple_insert_txn(
  364. txn,
  365. table="events",
  366. values={
  367. "event_id": event_id,
  368. "room_id": room_id,
  369. "depth": depth,
  370. "topological_ordering": depth,
  371. "type": "m.test",
  372. "processed": True,
  373. "outlier": False,
  374. "stream_ordering": stream_ordering,
  375. },
  376. )
  377. # Insert all events apart from 'B'
  378. self.persist_events._persist_event_auth_chain_txn(
  379. txn,
  380. [
  381. cast(EventBase, FakeEvent(event_id, room_id, auth_graph[event_id]))
  382. for event_id in auth_graph
  383. if event_id != "b"
  384. ],
  385. )
  386. # Now we insert the event 'B' without a chain cover, by temporarily
  387. # pretending the room doesn't have a chain cover.
  388. self.store.db_pool.simple_update_txn(
  389. txn,
  390. table="rooms",
  391. keyvalues={"room_id": room_id},
  392. updatevalues={"has_auth_chain_index": False},
  393. )
  394. self.persist_events._persist_event_auth_chain_txn(
  395. txn,
  396. [cast(EventBase, FakeEvent("b", room_id, auth_graph["b"]))],
  397. )
  398. self.store.db_pool.simple_update_txn(
  399. txn,
  400. table="rooms",
  401. keyvalues={"room_id": room_id},
  402. updatevalues={"has_auth_chain_index": True},
  403. )
  404. self.get_success(
  405. self.store.db_pool.runInteraction(
  406. "insert",
  407. insert_event,
  408. )
  409. )
  410. # Now actually test that various combinations give the right result:
  411. difference = self.get_success(
  412. self.store.get_auth_chain_difference(room_id, [{"a"}, {"b"}])
  413. )
  414. self.assertSetEqual(difference, {"a", "b"})
  415. difference = self.get_success(
  416. self.store.get_auth_chain_difference(room_id, [{"a"}, {"b"}, {"c"}])
  417. )
  418. self.assertSetEqual(difference, {"a", "b", "c", "e", "f"})
  419. difference = self.get_success(
  420. self.store.get_auth_chain_difference(room_id, [{"a", "c"}, {"b"}])
  421. )
  422. self.assertSetEqual(difference, {"a", "b", "c"})
  423. difference = self.get_success(
  424. self.store.get_auth_chain_difference(room_id, [{"a", "c"}, {"b", "c"}])
  425. )
  426. self.assertSetEqual(difference, {"a", "b"})
  427. difference = self.get_success(
  428. self.store.get_auth_chain_difference(room_id, [{"a"}, {"b"}, {"d"}])
  429. )
  430. self.assertSetEqual(difference, {"a", "b", "d", "e"})
  431. difference = self.get_success(
  432. self.store.get_auth_chain_difference(room_id, [{"a"}, {"b"}, {"c"}, {"d"}])
  433. )
  434. self.assertSetEqual(difference, {"a", "b", "c", "d", "e", "f"})
  435. difference = self.get_success(
  436. self.store.get_auth_chain_difference(room_id, [{"a"}, {"b"}, {"e"}])
  437. )
  438. self.assertSetEqual(difference, {"a", "b"})
  439. difference = self.get_success(
  440. self.store.get_auth_chain_difference(room_id, [{"a"}])
  441. )
  442. self.assertSetEqual(difference, set())
  443. @parameterized.expand(
  444. [(room_version,) for room_version in KNOWN_ROOM_VERSIONS.values()]
  445. )
  446. def test_prune_inbound_federation_queue(self, room_version: RoomVersion) -> None:
  447. """Test that pruning of inbound federation queues work"""
  448. room_id = "some_room_id"
  449. def prev_event_format(prev_event_id: str) -> Union[Tuple[str, dict], str]:
  450. """Account for differences in prev_events format across room versions"""
  451. if room_version.event_format == EventFormatVersions.ROOM_V1_V2:
  452. return prev_event_id, {}
  453. return prev_event_id
  454. # Insert a bunch of events that all reference the previous one.
  455. self.get_success(
  456. self.store.db_pool.simple_insert_many(
  457. table="federation_inbound_events_staging",
  458. keys=(
  459. "origin",
  460. "room_id",
  461. "received_ts",
  462. "event_id",
  463. "event_json",
  464. "internal_metadata",
  465. ),
  466. values=[
  467. (
  468. "some_origin",
  469. room_id,
  470. 0,
  471. f"$fake_event_id_{i + 1}",
  472. json_encoder.encode(
  473. {"prev_events": [prev_event_format(f"$fake_event_id_{i}")]}
  474. ),
  475. "{}",
  476. )
  477. for i in range(500)
  478. ],
  479. desc="test_prune_inbound_federation_queue",
  480. )
  481. )
  482. # Calling prune once should return True, i.e. a prune happen. The second
  483. # time it shouldn't.
  484. pruned = self.get_success(
  485. self.store.prune_staged_events_in_room(room_id, room_version)
  486. )
  487. self.assertTrue(pruned)
  488. pruned = self.get_success(
  489. self.store.prune_staged_events_in_room(room_id, room_version)
  490. )
  491. self.assertFalse(pruned)
  492. # Assert that we only have a single event left in the queue, and that it
  493. # is the last one.
  494. count = self.get_success(
  495. self.store.db_pool.simple_select_one_onecol(
  496. table="federation_inbound_events_staging",
  497. keyvalues={"room_id": room_id},
  498. retcol="COUNT(*)",
  499. desc="test_prune_inbound_federation_queue",
  500. )
  501. )
  502. self.assertEqual(count, 1)
  503. next_staged_event_info = self.get_success(
  504. self.store.get_next_staged_event_id_for_room(room_id)
  505. )
  506. assert next_staged_event_info
  507. _, event_id = next_staged_event_info
  508. self.assertEqual(event_id, "$fake_event_id_500")
  509. def _setup_room_for_backfill_tests(self) -> _BackfillSetupInfo:
  510. """
  511. Sets up a room with various events and backward extremities to test
  512. backfill functions against.
  513. Returns:
  514. _BackfillSetupInfo including the `room_id` to test against and
  515. `depth_map` of events in the room
  516. """
  517. room_id = "!backfill-room-test:some-host"
  518. # The silly graph we use to test grabbing backward extremities,
  519. # where the top is the oldest events.
  520. # 1 (oldest)
  521. # |
  522. # 2 ⹁
  523. # | \
  524. # | [b1, b2, b3]
  525. # | |
  526. # | A
  527. # | /
  528. # 3 {
  529. # | \
  530. # | [b4, b5, b6]
  531. # | |
  532. # | B
  533. # | /
  534. # 4 ´
  535. # |
  536. # 5 (newest)
  537. event_graph: Dict[str, List[str]] = {
  538. "1": [],
  539. "2": ["1"],
  540. "3": ["2", "A"],
  541. "4": ["3", "B"],
  542. "5": ["4"],
  543. "A": ["b1", "b2", "b3"],
  544. "b1": ["2"],
  545. "b2": ["2"],
  546. "b3": ["2"],
  547. "B": ["b4", "b5", "b6"],
  548. "b4": ["3"],
  549. "b5": ["3"],
  550. "b6": ["3"],
  551. }
  552. depth_map: Dict[str, int] = {
  553. "1": 1,
  554. "2": 2,
  555. "b1": 3,
  556. "b2": 3,
  557. "b3": 3,
  558. "A": 4,
  559. "3": 5,
  560. "b4": 6,
  561. "b5": 6,
  562. "b6": 6,
  563. "B": 7,
  564. "4": 8,
  565. "5": 9,
  566. }
  567. # The events we have persisted on our server.
  568. # The rest are events in the room but not backfilled tet.
  569. our_server_events = {"5", "4", "B", "3", "A"}
  570. complete_event_dict_map: Dict[str, JsonDict] = {}
  571. stream_ordering = 0
  572. for event_id, prev_event_ids in event_graph.items():
  573. depth = depth_map[event_id]
  574. complete_event_dict_map[event_id] = {
  575. "event_id": event_id,
  576. "type": "test_regular_type",
  577. "room_id": room_id,
  578. "sender": "@sender",
  579. "prev_event_ids": prev_event_ids,
  580. "auth_event_ids": [],
  581. "origin_server_ts": stream_ordering,
  582. "depth": depth,
  583. "stream_ordering": stream_ordering,
  584. "content": {"body": "event" + event_id},
  585. }
  586. stream_ordering += 1
  587. def populate_db(txn: LoggingTransaction) -> None:
  588. # Insert the room to satisfy the foreign key constraint of
  589. # `event_failed_pull_attempts`
  590. self.store.db_pool.simple_insert_txn(
  591. txn,
  592. "rooms",
  593. {
  594. "room_id": room_id,
  595. "creator": "room_creator_user_id",
  596. "is_public": True,
  597. "room_version": "6",
  598. },
  599. )
  600. # Insert our server events
  601. for event_id in our_server_events:
  602. event_dict = complete_event_dict_map[event_id]
  603. self.store.db_pool.simple_insert_txn(
  604. txn,
  605. table="events",
  606. values={
  607. "event_id": event_dict.get("event_id"),
  608. "type": event_dict.get("type"),
  609. "room_id": event_dict.get("room_id"),
  610. "depth": event_dict.get("depth"),
  611. "topological_ordering": event_dict.get("depth"),
  612. "stream_ordering": event_dict.get("stream_ordering"),
  613. "processed": True,
  614. "outlier": False,
  615. },
  616. )
  617. # Insert the event edges
  618. for event_id in our_server_events:
  619. for prev_event_id in event_graph[event_id]:
  620. self.store.db_pool.simple_insert_txn(
  621. txn,
  622. table="event_edges",
  623. values={
  624. "event_id": event_id,
  625. "prev_event_id": prev_event_id,
  626. "room_id": room_id,
  627. },
  628. )
  629. # Insert the backward extremities
  630. prev_events_of_our_events = {
  631. prev_event_id
  632. for our_server_event in our_server_events
  633. for prev_event_id in complete_event_dict_map[our_server_event][
  634. "prev_event_ids"
  635. ]
  636. }
  637. backward_extremities = prev_events_of_our_events - our_server_events
  638. for backward_extremity in backward_extremities:
  639. self.store.db_pool.simple_insert_txn(
  640. txn,
  641. table="event_backward_extremities",
  642. values={
  643. "event_id": backward_extremity,
  644. "room_id": room_id,
  645. },
  646. )
  647. self.get_success(
  648. self.store.db_pool.runInteraction(
  649. "_setup_room_for_backfill_tests_populate_db",
  650. populate_db,
  651. )
  652. )
  653. return _BackfillSetupInfo(room_id=room_id, depth_map=depth_map)
  654. def test_get_backfill_points_in_room(self) -> None:
  655. """
  656. Test to make sure only backfill points that are older and come before
  657. the `current_depth` are returned.
  658. """
  659. setup_info = self._setup_room_for_backfill_tests()
  660. room_id = setup_info.room_id
  661. depth_map = setup_info.depth_map
  662. # Try at "B"
  663. backfill_points = self.get_success(
  664. self.store.get_backfill_points_in_room(room_id, depth_map["B"], limit=100)
  665. )
  666. backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
  667. self.assertEqual(backfill_event_ids, ["b6", "b5", "b4", "2", "b3", "b2", "b1"])
  668. # Try at "A"
  669. backfill_points = self.get_success(
  670. self.store.get_backfill_points_in_room(room_id, depth_map["A"], limit=100)
  671. )
  672. backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
  673. # Event "2" has a depth of 2 but is not included here because we only
  674. # know the approximate depth of 5 from our event "3".
  675. self.assertListEqual(backfill_event_ids, ["b3", "b2", "b1"])
  676. def test_get_backfill_points_in_room_excludes_events_we_have_attempted(
  677. self,
  678. ) -> None:
  679. """
  680. Test to make sure that events we have attempted to backfill (and within
  681. backoff timeout duration) do not show up as an event to backfill again.
  682. """
  683. setup_info = self._setup_room_for_backfill_tests()
  684. room_id = setup_info.room_id
  685. depth_map = setup_info.depth_map
  686. # Record some attempts to backfill these events which will make
  687. # `get_backfill_points_in_room` exclude them because we
  688. # haven't passed the backoff interval.
  689. self.get_success(
  690. self.store.record_event_failed_pull_attempt(room_id, "b5", "fake cause")
  691. )
  692. self.get_success(
  693. self.store.record_event_failed_pull_attempt(room_id, "b4", "fake cause")
  694. )
  695. self.get_success(
  696. self.store.record_event_failed_pull_attempt(room_id, "b3", "fake cause")
  697. )
  698. self.get_success(
  699. self.store.record_event_failed_pull_attempt(room_id, "b2", "fake cause")
  700. )
  701. # No time has passed since we attempted to backfill ^
  702. # Try at "B"
  703. backfill_points = self.get_success(
  704. self.store.get_backfill_points_in_room(room_id, depth_map["B"], limit=100)
  705. )
  706. backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
  707. # Only the backfill points that we didn't record earlier exist here.
  708. self.assertEqual(backfill_event_ids, ["b6", "2", "b1"])
  709. def test_get_backfill_points_in_room_attempted_event_retry_after_backoff_duration(
  710. self,
  711. ) -> None:
  712. """
  713. Test to make sure after we fake attempt to backfill event "b3" many times,
  714. we can see retry and see the "b3" again after the backoff timeout duration
  715. has exceeded.
  716. """
  717. setup_info = self._setup_room_for_backfill_tests()
  718. room_id = setup_info.room_id
  719. depth_map = setup_info.depth_map
  720. # Record some attempts to backfill these events which will make
  721. # `get_backfill_points_in_room` exclude them because we
  722. # haven't passed the backoff interval.
  723. self.get_success(
  724. self.store.record_event_failed_pull_attempt(room_id, "b3", "fake cause")
  725. )
  726. self.get_success(
  727. self.store.record_event_failed_pull_attempt(room_id, "b1", "fake cause")
  728. )
  729. self.get_success(
  730. self.store.record_event_failed_pull_attempt(room_id, "b1", "fake cause")
  731. )
  732. self.get_success(
  733. self.store.record_event_failed_pull_attempt(room_id, "b1", "fake cause")
  734. )
  735. self.get_success(
  736. self.store.record_event_failed_pull_attempt(room_id, "b1", "fake cause")
  737. )
  738. # Now advance time by 2 hours and we should only be able to see "b3"
  739. # because we have waited long enough for the single attempt (2^1 hours)
  740. # but we still shouldn't see "b1" because we haven't waited long enough
  741. # for this many attempts. We didn't do anything to "b2" so it should be
  742. # visible regardless.
  743. self.reactor.advance(datetime.timedelta(hours=2).total_seconds())
  744. # Try at "A" and make sure that "b1" is not in the list because we've
  745. # already attempted many times
  746. backfill_points = self.get_success(
  747. self.store.get_backfill_points_in_room(room_id, depth_map["A"], limit=100)
  748. )
  749. backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
  750. self.assertEqual(backfill_event_ids, ["b3", "b2"])
  751. # Now advance time by 20 hours (above 2^4 because we made 4 attemps) and
  752. # see if we can now backfill it
  753. self.reactor.advance(datetime.timedelta(hours=20).total_seconds())
  754. # Try at "A" again after we advanced enough time and we should see "b3" again
  755. backfill_points = self.get_success(
  756. self.store.get_backfill_points_in_room(room_id, depth_map["A"], limit=100)
  757. )
  758. backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
  759. self.assertEqual(backfill_event_ids, ["b3", "b2", "b1"])
  760. def test_get_backfill_points_in_room_works_after_many_failed_pull_attempts_that_could_naively_overflow(
  761. self,
  762. ) -> None:
  763. """
  764. A test that reproduces #13929 (Postgres only).
  765. Test to make sure we can still get backfill points after many failed pull
  766. attempts that cause us to backoff to the limit. Even if the backoff formula
  767. would tell us to wait for more seconds than can be expressed in a 32 bit
  768. signed int.
  769. """
  770. setup_info = self._setup_room_for_backfill_tests()
  771. room_id = setup_info.room_id
  772. depth_map = setup_info.depth_map
  773. # Pretend that we have tried and failed 10 times to backfill event b1.
  774. for _ in range(10):
  775. self.get_success(
  776. self.store.record_event_failed_pull_attempt(room_id, "b1", "fake cause")
  777. )
  778. # If the backoff periods grow without limit:
  779. # After the first failed attempt, we would have backed off for 1 << 1 = 2 hours.
  780. # After the second failed attempt we would have backed off for 1 << 2 = 4 hours,
  781. # so after the 10th failed attempt we should backoff for 1 << 10 == 1024 hours.
  782. # Wait 1100 hours just so we have a nice round number.
  783. self.reactor.advance(datetime.timedelta(hours=1100).total_seconds())
  784. # 1024 hours in milliseconds is 1024 * 3600000, which exceeds the largest 32 bit
  785. # signed integer. The bug we're reproducing is that this overflow causes an
  786. # error in postgres preventing us from fetching a set of backwards extremities
  787. # to retry fetching.
  788. backfill_points = self.get_success(
  789. self.store.get_backfill_points_in_room(room_id, depth_map["A"], limit=100)
  790. )
  791. # We should aim to fetch all backoff points: b1's latest backoff period has
  792. # expired, and we haven't tried the rest.
  793. backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
  794. self.assertEqual(backfill_event_ids, ["b3", "b2", "b1"])
  795. def _setup_room_for_insertion_backfill_tests(self) -> _BackfillSetupInfo:
  796. """
  797. Sets up a room with various insertion event backward extremities to test
  798. backfill functions against.
  799. Returns:
  800. _BackfillSetupInfo including the `room_id` to test against and
  801. `depth_map` of events in the room
  802. """
  803. room_id = "!backfill-room-test:some-host"
  804. depth_map: Dict[str, int] = {
  805. "1": 1,
  806. "2": 2,
  807. "insertion_eventA": 3,
  808. "3": 4,
  809. "insertion_eventB": 5,
  810. "4": 6,
  811. "5": 7,
  812. }
  813. def populate_db(txn: LoggingTransaction) -> None:
  814. # Insert the room to satisfy the foreign key constraint of
  815. # `event_failed_pull_attempts`
  816. self.store.db_pool.simple_insert_txn(
  817. txn,
  818. "rooms",
  819. {
  820. "room_id": room_id,
  821. "creator": "room_creator_user_id",
  822. "is_public": True,
  823. "room_version": "6",
  824. },
  825. )
  826. # Insert our server events
  827. stream_ordering = 0
  828. for event_id, depth in depth_map.items():
  829. self.store.db_pool.simple_insert_txn(
  830. txn,
  831. table="events",
  832. values={
  833. "event_id": event_id,
  834. "type": EventTypes.MSC2716_INSERTION
  835. if event_id.startswith("insertion_event")
  836. else "test_regular_type",
  837. "room_id": room_id,
  838. "depth": depth,
  839. "topological_ordering": depth,
  840. "stream_ordering": stream_ordering,
  841. "processed": True,
  842. "outlier": False,
  843. },
  844. )
  845. if event_id.startswith("insertion_event"):
  846. self.store.db_pool.simple_insert_txn(
  847. txn,
  848. table="insertion_event_extremities",
  849. values={
  850. "event_id": event_id,
  851. "room_id": room_id,
  852. },
  853. )
  854. stream_ordering += 1
  855. self.get_success(
  856. self.store.db_pool.runInteraction(
  857. "_setup_room_for_insertion_backfill_tests_populate_db",
  858. populate_db,
  859. )
  860. )
  861. return _BackfillSetupInfo(room_id=room_id, depth_map=depth_map)
  862. def test_get_insertion_event_backward_extremities_in_room(self) -> None:
  863. """
  864. Test to make sure only insertion event backward extremities that are
  865. older and come before the `current_depth` are returned.
  866. """
  867. setup_info = self._setup_room_for_insertion_backfill_tests()
  868. room_id = setup_info.room_id
  869. depth_map = setup_info.depth_map
  870. # Try at "insertion_eventB"
  871. backfill_points = self.get_success(
  872. self.store.get_insertion_event_backward_extremities_in_room(
  873. room_id, depth_map["insertion_eventB"], limit=100
  874. )
  875. )
  876. backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
  877. self.assertEqual(backfill_event_ids, ["insertion_eventB", "insertion_eventA"])
  878. # Try at "insertion_eventA"
  879. backfill_points = self.get_success(
  880. self.store.get_insertion_event_backward_extremities_in_room(
  881. room_id, depth_map["insertion_eventA"], limit=100
  882. )
  883. )
  884. backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
  885. # Event "2" has a depth of 2 but is not included here because we only
  886. # know the approximate depth of 5 from our event "3".
  887. self.assertListEqual(backfill_event_ids, ["insertion_eventA"])
  888. def test_get_insertion_event_backward_extremities_in_room_excludes_events_we_have_attempted(
  889. self,
  890. ) -> None:
  891. """
  892. Test to make sure that insertion events we have attempted to backfill
  893. (and within backoff timeout duration) do not show up as an event to
  894. backfill again.
  895. """
  896. setup_info = self._setup_room_for_insertion_backfill_tests()
  897. room_id = setup_info.room_id
  898. depth_map = setup_info.depth_map
  899. # Record some attempts to backfill these events which will make
  900. # `get_insertion_event_backward_extremities_in_room` exclude them
  901. # because we haven't passed the backoff interval.
  902. self.get_success(
  903. self.store.record_event_failed_pull_attempt(
  904. room_id, "insertion_eventA", "fake cause"
  905. )
  906. )
  907. # No time has passed since we attempted to backfill ^
  908. # Try at "insertion_eventB"
  909. backfill_points = self.get_success(
  910. self.store.get_insertion_event_backward_extremities_in_room(
  911. room_id, depth_map["insertion_eventB"], limit=100
  912. )
  913. )
  914. backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
  915. # Only the backfill points that we didn't record earlier exist here.
  916. self.assertEqual(backfill_event_ids, ["insertion_eventB"])
  917. def test_get_insertion_event_backward_extremities_in_room_attempted_event_retry_after_backoff_duration(
  918. self,
  919. ) -> None:
  920. """
  921. Test to make sure after we fake attempt to backfill event
  922. "insertion_eventA" many times, we can see retry and see the
  923. "insertion_eventA" again after the backoff timeout duration has
  924. exceeded.
  925. """
  926. setup_info = self._setup_room_for_insertion_backfill_tests()
  927. room_id = setup_info.room_id
  928. depth_map = setup_info.depth_map
  929. # Record some attempts to backfill these events which will make
  930. # `get_backfill_points_in_room` exclude them because we
  931. # haven't passed the backoff interval.
  932. self.get_success(
  933. self.store.record_event_failed_pull_attempt(
  934. room_id, "insertion_eventB", "fake cause"
  935. )
  936. )
  937. self.get_success(
  938. self.store.record_event_failed_pull_attempt(
  939. room_id, "insertion_eventA", "fake cause"
  940. )
  941. )
  942. self.get_success(
  943. self.store.record_event_failed_pull_attempt(
  944. room_id, "insertion_eventA", "fake cause"
  945. )
  946. )
  947. self.get_success(
  948. self.store.record_event_failed_pull_attempt(
  949. room_id, "insertion_eventA", "fake cause"
  950. )
  951. )
  952. self.get_success(
  953. self.store.record_event_failed_pull_attempt(
  954. room_id, "insertion_eventA", "fake cause"
  955. )
  956. )
  957. # Now advance time by 2 hours and we should only be able to see
  958. # "insertion_eventB" because we have waited long enough for the single
  959. # attempt (2^1 hours) but we still shouldn't see "insertion_eventA"
  960. # because we haven't waited long enough for this many attempts.
  961. self.reactor.advance(datetime.timedelta(hours=2).total_seconds())
  962. # Try at "insertion_eventA" and make sure that "insertion_eventA" is not
  963. # in the list because we've already attempted many times
  964. backfill_points = self.get_success(
  965. self.store.get_insertion_event_backward_extremities_in_room(
  966. room_id, depth_map["insertion_eventA"], limit=100
  967. )
  968. )
  969. backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
  970. self.assertEqual(backfill_event_ids, [])
  971. # Now advance time by 20 hours (above 2^4 because we made 4 attemps) and
  972. # see if we can now backfill it
  973. self.reactor.advance(datetime.timedelta(hours=20).total_seconds())
  974. # Try at "insertion_eventA" again after we advanced enough time and we
  975. # should see "insertion_eventA" again
  976. backfill_points = self.get_success(
  977. self.store.get_insertion_event_backward_extremities_in_room(
  978. room_id, depth_map["insertion_eventA"], limit=100
  979. )
  980. )
  981. backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
  982. self.assertEqual(backfill_event_ids, ["insertion_eventA"])
  983. def test_get_event_ids_to_not_pull_from_backoff(self) -> None:
  984. """
  985. Test to make sure only event IDs we should backoff from are returned.
  986. """
  987. # Create the room
  988. user_id = self.register_user("alice", "test")
  989. tok = self.login("alice", "test")
  990. room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
  991. failure_time = self.clock.time_msec()
  992. self.get_success(
  993. self.store.record_event_failed_pull_attempt(
  994. room_id, "$failed_event_id", "fake cause"
  995. )
  996. )
  997. event_ids_with_backoff = self.get_success(
  998. self.store.get_event_ids_to_not_pull_from_backoff(
  999. room_id=room_id, event_ids=["$failed_event_id", "$normal_event_id"]
  1000. )
  1001. )
  1002. self.assertEqual(
  1003. event_ids_with_backoff,
  1004. # We expect a 2^1 hour backoff after a single failed attempt.
  1005. {"$failed_event_id": failure_time + 2 * 60 * 60 * 1000},
  1006. )
  1007. def test_get_event_ids_to_not_pull_from_backoff_retry_after_backoff_duration(
  1008. self,
  1009. ) -> None:
  1010. """
  1011. Test to make sure no event IDs are returned after the backoff duration has
  1012. elapsed.
  1013. """
  1014. # Create the room
  1015. user_id = self.register_user("alice", "test")
  1016. tok = self.login("alice", "test")
  1017. room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
  1018. self.get_success(
  1019. self.store.record_event_failed_pull_attempt(
  1020. room_id, "$failed_event_id", "fake cause"
  1021. )
  1022. )
  1023. # Now advance time by 2 hours so we wait long enough for the single failed
  1024. # attempt (2^1 hours).
  1025. self.reactor.advance(datetime.timedelta(hours=2).total_seconds())
  1026. event_ids_with_backoff = self.get_success(
  1027. self.store.get_event_ids_to_not_pull_from_backoff(
  1028. room_id=room_id, event_ids=["$failed_event_id", "$normal_event_id"]
  1029. )
  1030. )
  1031. # Since this function only returns events we should backoff from, time has
  1032. # elapsed past the backoff range so there is no events to backoff from.
  1033. self.assertEqual(event_ids_with_backoff, {})
  1034. @attr.s(auto_attribs=True)
  1035. class FakeEvent:
  1036. event_id: str
  1037. room_id: str
  1038. auth_events: List[str]
  1039. type = "foo"
  1040. state_key = "foo"
  1041. internal_metadata = _EventInternalMetadata({})
  1042. def auth_event_ids(self) -> List[str]:
  1043. return self.auth_events
  1044. def is_state(self) -> bool:
  1045. return True