1
0

test_sync.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. # Copyright 2018 New Vector Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from typing import Optional
  15. from unittest.mock import AsyncMock, Mock, patch
  16. from twisted.test.proto_helpers import MemoryReactor
  17. from synapse.api.constants import EventTypes, JoinRules
  18. from synapse.api.errors import Codes, ResourceLimitError
  19. from synapse.api.filtering import Filtering
  20. from synapse.api.room_versions import RoomVersions
  21. from synapse.handlers.sync import SyncConfig, SyncResult
  22. from synapse.rest import admin
  23. from synapse.rest.client import knock, login, room
  24. from synapse.server import HomeServer
  25. from synapse.types import UserID, create_requester
  26. from synapse.util import Clock
  27. import tests.unittest
  28. import tests.utils
  29. class SyncTestCase(tests.unittest.HomeserverTestCase):
  30. """Tests Sync Handler."""
  31. servlets = [
  32. admin.register_servlets,
  33. knock.register_servlets,
  34. login.register_servlets,
  35. room.register_servlets,
  36. ]
  37. def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
  38. self.sync_handler = self.hs.get_sync_handler()
  39. self.store = self.hs.get_datastores().main
  40. # AuthBlocking reads from the hs' config on initialization. We need to
  41. # modify its config instead of the hs'
  42. self.auth_blocking = self.hs.get_auth_blocking()
  43. def test_wait_for_sync_for_user_auth_blocking(self) -> None:
  44. user_id1 = "@user1:test"
  45. user_id2 = "@user2:test"
  46. sync_config = generate_sync_config(user_id1)
  47. requester = create_requester(user_id1)
  48. self.reactor.advance(100) # So we get not 0 time
  49. self.auth_blocking._limit_usage_by_mau = True
  50. self.auth_blocking._max_mau_value = 1
  51. # Check that the happy case does not throw errors
  52. self.get_success(self.store.upsert_monthly_active_user(user_id1))
  53. self.get_success(
  54. self.sync_handler.wait_for_sync_for_user(requester, sync_config)
  55. )
  56. # Test that global lock works
  57. self.auth_blocking._hs_disabled = True
  58. e = self.get_failure(
  59. self.sync_handler.wait_for_sync_for_user(requester, sync_config),
  60. ResourceLimitError,
  61. )
  62. self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
  63. self.auth_blocking._hs_disabled = False
  64. sync_config = generate_sync_config(user_id2)
  65. requester = create_requester(user_id2)
  66. e = self.get_failure(
  67. self.sync_handler.wait_for_sync_for_user(requester, sync_config),
  68. ResourceLimitError,
  69. )
  70. self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
  71. def test_unknown_room_version(self) -> None:
  72. """
  73. A room with an unknown room version should not break sync (and should be excluded).
  74. """
  75. inviter = self.register_user("creator", "pass", admin=True)
  76. inviter_tok = self.login("@creator:test", "pass")
  77. user = self.register_user("user", "pass")
  78. tok = self.login("user", "pass")
  79. # Do an initial sync on a different device.
  80. requester = create_requester(user)
  81. initial_result = self.get_success(
  82. self.sync_handler.wait_for_sync_for_user(
  83. requester, sync_config=generate_sync_config(user, device_id="dev")
  84. )
  85. )
  86. # Create a room as the user.
  87. joined_room = self.helper.create_room_as(user, tok=tok)
  88. # Invite the user to the room as someone else.
  89. invite_room = self.helper.create_room_as(inviter, tok=inviter_tok)
  90. self.helper.invite(invite_room, targ=user, tok=inviter_tok)
  91. knock_room = self.helper.create_room_as(
  92. inviter, room_version=RoomVersions.V7.identifier, tok=inviter_tok
  93. )
  94. self.helper.send_state(
  95. knock_room,
  96. EventTypes.JoinRules,
  97. {"join_rule": JoinRules.KNOCK},
  98. tok=inviter_tok,
  99. )
  100. channel = self.make_request(
  101. "POST",
  102. "/_matrix/client/r0/knock/%s" % (knock_room,),
  103. b"{}",
  104. tok,
  105. )
  106. self.assertEqual(200, channel.code, channel.result)
  107. # The rooms should appear in the sync response.
  108. result = self.get_success(
  109. self.sync_handler.wait_for_sync_for_user(
  110. requester, sync_config=generate_sync_config(user)
  111. )
  112. )
  113. self.assertIn(joined_room, [r.room_id for r in result.joined])
  114. self.assertIn(invite_room, [r.room_id for r in result.invited])
  115. self.assertIn(knock_room, [r.room_id for r in result.knocked])
  116. # Test a incremental sync (by providing a since_token).
  117. result = self.get_success(
  118. self.sync_handler.wait_for_sync_for_user(
  119. requester,
  120. sync_config=generate_sync_config(user, device_id="dev"),
  121. since_token=initial_result.next_batch,
  122. )
  123. )
  124. self.assertIn(joined_room, [r.room_id for r in result.joined])
  125. self.assertIn(invite_room, [r.room_id for r in result.invited])
  126. self.assertIn(knock_room, [r.room_id for r in result.knocked])
  127. # Poke the database and update the room version to an unknown one.
  128. for room_id in (joined_room, invite_room, knock_room):
  129. self.get_success(
  130. self.hs.get_datastores().main.db_pool.simple_update(
  131. "rooms",
  132. keyvalues={"room_id": room_id},
  133. updatevalues={"room_version": "unknown-room-version"},
  134. desc="updated-room-version",
  135. )
  136. )
  137. # Blow away caches (supported room versions can only change due to a restart).
  138. self.store.get_rooms_for_user_with_stream_ordering.invalidate_all()
  139. self.store.get_rooms_for_user.invalidate_all()
  140. self.store._get_event_cache.clear()
  141. self.store._event_ref.clear()
  142. # The rooms should be excluded from the sync response.
  143. # Get a new request key.
  144. result = self.get_success(
  145. self.sync_handler.wait_for_sync_for_user(
  146. requester, sync_config=generate_sync_config(user)
  147. )
  148. )
  149. self.assertNotIn(joined_room, [r.room_id for r in result.joined])
  150. self.assertNotIn(invite_room, [r.room_id for r in result.invited])
  151. self.assertNotIn(knock_room, [r.room_id for r in result.knocked])
  152. # The rooms should also not be in an incremental sync.
  153. result = self.get_success(
  154. self.sync_handler.wait_for_sync_for_user(
  155. requester,
  156. sync_config=generate_sync_config(user, device_id="dev"),
  157. since_token=initial_result.next_batch,
  158. )
  159. )
  160. self.assertNotIn(joined_room, [r.room_id for r in result.joined])
  161. self.assertNotIn(invite_room, [r.room_id for r in result.invited])
  162. self.assertNotIn(knock_room, [r.room_id for r in result.knocked])
  163. def test_ban_wins_race_with_join(self) -> None:
  164. """Rooms shouldn't appear under "joined" if a join loses a race to a ban.
  165. A complicated edge case. Imagine the following scenario:
  166. * you attempt to join a room
  167. * racing with that is a ban which comes in over federation, which ends up with
  168. an earlier stream_ordering than the join.
  169. * you get a sync response with a sync token which is _after_ the ban, but before
  170. the join
  171. * now your join lands; it is a valid event because its `prev_event`s predate the
  172. ban, but will not make it into current_state_events (because bans win over
  173. joins in state res, essentially).
  174. * When we do a sync from the incremental sync, the only event in the timeline
  175. is your join ... and yet you aren't joined.
  176. The ban coming in over federation isn't crucial for this behaviour; the key
  177. requirements are:
  178. 1. the homeserver generates a join event with prev_events that precede the ban
  179. (so that it passes the "are you banned" test)
  180. 2. the join event has a stream_ordering after that of the ban.
  181. We use monkeypatching to artificially trigger condition (1).
  182. """
  183. # A local user Alice creates a room.
  184. owner = self.register_user("alice", "password")
  185. owner_tok = self.login(owner, "password")
  186. room_id = self.helper.create_room_as(owner, is_public=True, tok=owner_tok)
  187. # Do a sync as Alice to get the latest event in the room.
  188. alice_sync_result: SyncResult = self.get_success(
  189. self.sync_handler.wait_for_sync_for_user(
  190. create_requester(owner), generate_sync_config(owner)
  191. )
  192. )
  193. self.assertEqual(len(alice_sync_result.joined), 1)
  194. self.assertEqual(alice_sync_result.joined[0].room_id, room_id)
  195. last_room_creation_event_id = (
  196. alice_sync_result.joined[0].timeline.events[-1].event_id
  197. )
  198. # Eve, a ne'er-do-well, registers.
  199. eve = self.register_user("eve", "password")
  200. eve_token = self.login(eve, "password")
  201. # Alice preemptively bans Eve.
  202. self.helper.ban(room_id, owner, eve, tok=owner_tok)
  203. # Eve syncs.
  204. eve_requester = create_requester(eve)
  205. eve_sync_config = generate_sync_config(eve)
  206. eve_sync_after_ban: SyncResult = self.get_success(
  207. self.sync_handler.wait_for_sync_for_user(eve_requester, eve_sync_config)
  208. )
  209. # Sanity check this sync result. We shouldn't be joined to the room.
  210. self.assertEqual(eve_sync_after_ban.joined, [])
  211. # Eve tries to join the room. We monkey patch the internal logic which selects
  212. # the prev_events used when creating the join event, such that the ban does not
  213. # precede the join.
  214. mocked_get_prev_events = patch.object(
  215. self.hs.get_datastores().main,
  216. "get_prev_events_for_room",
  217. new_callable=AsyncMock,
  218. return_value=[last_room_creation_event_id],
  219. )
  220. with mocked_get_prev_events:
  221. self.helper.join(room_id, eve, tok=eve_token)
  222. # Eve makes a second, incremental sync.
  223. eve_incremental_sync_after_join: SyncResult = self.get_success(
  224. self.sync_handler.wait_for_sync_for_user(
  225. eve_requester,
  226. eve_sync_config,
  227. since_token=eve_sync_after_ban.next_batch,
  228. )
  229. )
  230. # Eve should not see herself as joined to the room.
  231. self.assertEqual(eve_incremental_sync_after_join.joined, [])
  232. # If we did a third initial sync, we should _still_ see eve is not joined to the room.
  233. eve_initial_sync_after_join: SyncResult = self.get_success(
  234. self.sync_handler.wait_for_sync_for_user(
  235. eve_requester,
  236. eve_sync_config,
  237. since_token=None,
  238. )
  239. )
  240. self.assertEqual(eve_initial_sync_after_join.joined, [])
  241. _request_key = 0
  242. def generate_sync_config(
  243. user_id: str, device_id: Optional[str] = "device_id"
  244. ) -> SyncConfig:
  245. """Generate a sync config (with a unique request key)."""
  246. global _request_key
  247. _request_key += 1
  248. return SyncConfig(
  249. user=UserID.from_string(user_id),
  250. filter_collection=Filtering(Mock()).DEFAULT_FILTER_COLLECTION,
  251. is_guest=False,
  252. request_key=("request_key", _request_key),
  253. device_id=device_id,
  254. )