room_list.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478
  1. # -*- coding: utf-8 -*-
  2. # Copyright 2014 - 2016 OpenMarket Ltd
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. from twisted.internet import defer
  16. from six import iteritems
  17. from six.moves import range
  18. from ._base import BaseHandler
  19. from synapse.api.constants import (
  20. EventTypes, JoinRules,
  21. )
  22. from synapse.util.async import concurrently_execute
  23. from synapse.util.caches.descriptors import cachedInlineCallbacks
  24. from synapse.util.caches.response_cache import ResponseCache
  25. from synapse.types import ThirdPartyInstanceID
  26. from collections import namedtuple
  27. from unpaddedbase64 import encode_base64, decode_base64
  28. import logging
  29. import msgpack
  30. logger = logging.getLogger(__name__)
  31. REMOTE_ROOM_LIST_POLL_INTERVAL = 60 * 1000
  32. # This is used to indicate we should only return rooms published to the main list.
  33. EMTPY_THIRD_PARTY_ID = ThirdPartyInstanceID(None, None)
  34. class RoomListHandler(BaseHandler):
  35. def __init__(self, hs):
  36. super(RoomListHandler, self).__init__(hs)
  37. self.response_cache = ResponseCache(hs, "room_list")
  38. self.remote_response_cache = ResponseCache(hs, "remote_room_list",
  39. timeout_ms=30 * 1000)
  40. def get_local_public_room_list(self, limit=None, since_token=None,
  41. search_filter=None,
  42. network_tuple=EMTPY_THIRD_PARTY_ID,):
  43. """Generate a local public room list.
  44. There are multiple different lists: the main one plus one per third
  45. party network. A client can ask for a specific list or to return all.
  46. Args:
  47. limit (int)
  48. since_token (str)
  49. search_filter (dict)
  50. network_tuple (ThirdPartyInstanceID): Which public list to use.
  51. This can be (None, None) to indicate the main list, or a particular
  52. appservice and network id to use an appservice specific one.
  53. Setting to None returns all public rooms across all lists.
  54. """
  55. logger.info(
  56. "Getting public room list: limit=%r, since=%r, search=%r, network=%r",
  57. limit, since_token, bool(search_filter), network_tuple,
  58. )
  59. if search_filter:
  60. # We explicitly don't bother caching searches or requests for
  61. # appservice specific lists.
  62. logger.info("Bypassing cache as search request.")
  63. return self._get_public_room_list(
  64. limit, since_token, search_filter, network_tuple=network_tuple,
  65. )
  66. key = (limit, since_token, network_tuple)
  67. return self.response_cache.wrap(
  68. key,
  69. self._get_public_room_list,
  70. limit, since_token, network_tuple=network_tuple,
  71. )
  72. @defer.inlineCallbacks
  73. def _get_public_room_list(self, limit=None, since_token=None,
  74. search_filter=None,
  75. network_tuple=EMTPY_THIRD_PARTY_ID,):
  76. if since_token and since_token != "END":
  77. since_token = RoomListNextBatch.from_token(since_token)
  78. else:
  79. since_token = None
  80. rooms_to_order_value = {}
  81. rooms_to_num_joined = {}
  82. newly_visible = []
  83. newly_unpublished = []
  84. if since_token:
  85. stream_token = since_token.stream_ordering
  86. current_public_id = yield self.store.get_current_public_room_stream_id()
  87. public_room_stream_id = since_token.public_room_stream_id
  88. newly_visible, newly_unpublished = yield self.store.get_public_room_changes(
  89. public_room_stream_id, current_public_id,
  90. network_tuple=network_tuple,
  91. )
  92. else:
  93. stream_token = yield self.store.get_room_max_stream_ordering()
  94. public_room_stream_id = yield self.store.get_current_public_room_stream_id()
  95. room_ids = yield self.store.get_public_room_ids_at_stream_id(
  96. public_room_stream_id, network_tuple=network_tuple,
  97. )
  98. # We want to return rooms in a particular order: the number of joined
  99. # users. We then arbitrarily use the room_id as a tie breaker.
  100. @defer.inlineCallbacks
  101. def get_order_for_room(room_id):
  102. # Most of the rooms won't have changed between the since token and
  103. # now (especially if the since token is "now"). So, we can ask what
  104. # the current users are in a room (that will hit a cache) and then
  105. # check if the room has changed since the since token. (We have to
  106. # do it in that order to avoid races).
  107. # If things have changed then fall back to getting the current state
  108. # at the since token.
  109. joined_users = yield self.store.get_users_in_room(room_id)
  110. if self.store.has_room_changed_since(room_id, stream_token):
  111. latest_event_ids = yield self.store.get_forward_extremeties_for_room(
  112. room_id, stream_token
  113. )
  114. if not latest_event_ids:
  115. return
  116. joined_users = yield self.state_handler.get_current_user_in_room(
  117. room_id, latest_event_ids,
  118. )
  119. num_joined_users = len(joined_users)
  120. rooms_to_num_joined[room_id] = num_joined_users
  121. if num_joined_users == 0:
  122. return
  123. # We want larger rooms to be first, hence negating num_joined_users
  124. rooms_to_order_value[room_id] = (-num_joined_users, room_id)
  125. logger.info("Getting ordering for %i rooms since %s",
  126. len(room_ids), stream_token)
  127. yield concurrently_execute(get_order_for_room, room_ids, 10)
  128. sorted_entries = sorted(rooms_to_order_value.items(), key=lambda e: e[1])
  129. sorted_rooms = [room_id for room_id, _ in sorted_entries]
  130. # `sorted_rooms` should now be a list of all public room ids that is
  131. # stable across pagination. Therefore, we can use indices into this
  132. # list as our pagination tokens.
  133. # Filter out rooms that we don't want to return
  134. rooms_to_scan = [
  135. r for r in sorted_rooms
  136. if r not in newly_unpublished and rooms_to_num_joined[room_id] > 0
  137. ]
  138. total_room_count = len(rooms_to_scan)
  139. if since_token:
  140. # Filter out rooms we've already returned previously
  141. # `since_token.current_limit` is the index of the last room we
  142. # sent down, so we exclude it and everything before/after it.
  143. if since_token.direction_is_forward:
  144. rooms_to_scan = rooms_to_scan[since_token.current_limit + 1:]
  145. else:
  146. rooms_to_scan = rooms_to_scan[:since_token.current_limit]
  147. rooms_to_scan.reverse()
  148. logger.info("After sorting and filtering, %i rooms remain",
  149. len(rooms_to_scan))
  150. # _append_room_entry_to_chunk will append to chunk but will stop if
  151. # len(chunk) > limit
  152. #
  153. # Normally we will generate enough results on the first iteration here,
  154. # but if there is a search filter, _append_room_entry_to_chunk may
  155. # filter some results out, in which case we loop again.
  156. #
  157. # We don't want to scan over the entire range either as that
  158. # would potentially waste a lot of work.
  159. #
  160. # XXX if there is no limit, we may end up DoSing the server with
  161. # calls to get_current_state_ids for every single room on the
  162. # server. Surely we should cap this somehow?
  163. #
  164. if limit:
  165. step = limit + 1
  166. else:
  167. # step cannot be zero
  168. step = len(rooms_to_scan) if len(rooms_to_scan) != 0 else 1
  169. chunk = []
  170. for i in range(0, len(rooms_to_scan), step):
  171. batch = rooms_to_scan[i:i + step]
  172. logger.info("Processing %i rooms for result", len(batch))
  173. yield concurrently_execute(
  174. lambda r: self._append_room_entry_to_chunk(
  175. r, rooms_to_num_joined[r],
  176. chunk, limit, search_filter
  177. ),
  178. batch, 5,
  179. )
  180. logger.info("Now %i rooms in result", len(chunk))
  181. if len(chunk) >= limit + 1:
  182. break
  183. chunk.sort(key=lambda e: (-e["num_joined_members"], e["room_id"]))
  184. # Work out the new limit of the batch for pagination, or None if we
  185. # know there are no more results that would be returned.
  186. # i.e., [since_token.current_limit..new_limit] is the batch of rooms
  187. # we've returned (or the reverse if we paginated backwards)
  188. # We tried to pull out limit + 1 rooms above, so if we have <= limit
  189. # then we know there are no more results to return
  190. new_limit = None
  191. if chunk and (not limit or len(chunk) > limit):
  192. if not since_token or since_token.direction_is_forward:
  193. if limit:
  194. chunk = chunk[:limit]
  195. last_room_id = chunk[-1]["room_id"]
  196. else:
  197. if limit:
  198. chunk = chunk[-limit:]
  199. last_room_id = chunk[0]["room_id"]
  200. new_limit = sorted_rooms.index(last_room_id)
  201. results = {
  202. "chunk": chunk,
  203. "total_room_count_estimate": total_room_count,
  204. }
  205. if since_token:
  206. results["new_rooms"] = bool(newly_visible)
  207. if not since_token or since_token.direction_is_forward:
  208. if new_limit is not None:
  209. results["next_batch"] = RoomListNextBatch(
  210. stream_ordering=stream_token,
  211. public_room_stream_id=public_room_stream_id,
  212. current_limit=new_limit,
  213. direction_is_forward=True,
  214. ).to_token()
  215. if since_token:
  216. results["prev_batch"] = since_token.copy_and_replace(
  217. direction_is_forward=False,
  218. current_limit=since_token.current_limit + 1,
  219. ).to_token()
  220. else:
  221. if new_limit is not None:
  222. results["prev_batch"] = RoomListNextBatch(
  223. stream_ordering=stream_token,
  224. public_room_stream_id=public_room_stream_id,
  225. current_limit=new_limit,
  226. direction_is_forward=False,
  227. ).to_token()
  228. if since_token:
  229. results["next_batch"] = since_token.copy_and_replace(
  230. direction_is_forward=True,
  231. current_limit=since_token.current_limit - 1,
  232. ).to_token()
  233. defer.returnValue(results)
  234. @defer.inlineCallbacks
  235. def _append_room_entry_to_chunk(self, room_id, num_joined_users, chunk, limit,
  236. search_filter):
  237. """Generate the entry for a room in the public room list and append it
  238. to the `chunk` if it matches the search filter
  239. """
  240. if limit and len(chunk) > limit + 1:
  241. # We've already got enough, so lets just drop it.
  242. return
  243. result = yield self.generate_room_entry(room_id, num_joined_users)
  244. if result and _matches_room_entry(result, search_filter):
  245. chunk.append(result)
  246. @cachedInlineCallbacks(num_args=1, cache_context=True)
  247. def generate_room_entry(self, room_id, num_joined_users, cache_context,
  248. with_alias=True, allow_private=False):
  249. """Returns the entry for a room
  250. """
  251. result = {
  252. "room_id": room_id,
  253. "num_joined_members": num_joined_users,
  254. }
  255. current_state_ids = yield self.store.get_current_state_ids(
  256. room_id, on_invalidate=cache_context.invalidate,
  257. )
  258. event_map = yield self.store.get_events([
  259. event_id for key, event_id in iteritems(current_state_ids)
  260. if key[0] in (
  261. EventTypes.JoinRules,
  262. EventTypes.Name,
  263. EventTypes.Topic,
  264. EventTypes.CanonicalAlias,
  265. EventTypes.RoomHistoryVisibility,
  266. EventTypes.GuestAccess,
  267. "m.room.avatar",
  268. )
  269. ])
  270. current_state = {
  271. (ev.type, ev.state_key): ev
  272. for ev in event_map.values()
  273. }
  274. # Double check that this is actually a public room.
  275. join_rules_event = current_state.get((EventTypes.JoinRules, ""))
  276. if join_rules_event:
  277. join_rule = join_rules_event.content.get("join_rule", None)
  278. if not allow_private and join_rule and join_rule != JoinRules.PUBLIC:
  279. defer.returnValue(None)
  280. if with_alias:
  281. aliases = yield self.store.get_aliases_for_room(
  282. room_id, on_invalidate=cache_context.invalidate
  283. )
  284. if aliases:
  285. result["aliases"] = aliases
  286. name_event = yield current_state.get((EventTypes.Name, ""))
  287. if name_event:
  288. name = name_event.content.get("name", None)
  289. if name:
  290. result["name"] = name
  291. topic_event = current_state.get((EventTypes.Topic, ""))
  292. if topic_event:
  293. topic = topic_event.content.get("topic", None)
  294. if topic:
  295. result["topic"] = topic
  296. canonical_event = current_state.get((EventTypes.CanonicalAlias, ""))
  297. if canonical_event:
  298. canonical_alias = canonical_event.content.get("alias", None)
  299. if canonical_alias:
  300. result["canonical_alias"] = canonical_alias
  301. visibility_event = current_state.get((EventTypes.RoomHistoryVisibility, ""))
  302. visibility = None
  303. if visibility_event:
  304. visibility = visibility_event.content.get("history_visibility", None)
  305. result["world_readable"] = visibility == "world_readable"
  306. guest_event = current_state.get((EventTypes.GuestAccess, ""))
  307. guest = None
  308. if guest_event:
  309. guest = guest_event.content.get("guest_access", None)
  310. result["guest_can_join"] = guest == "can_join"
  311. avatar_event = current_state.get(("m.room.avatar", ""))
  312. if avatar_event:
  313. avatar_url = avatar_event.content.get("url", None)
  314. if avatar_url:
  315. result["avatar_url"] = avatar_url
  316. defer.returnValue(result)
  317. @defer.inlineCallbacks
  318. def get_remote_public_room_list(self, server_name, limit=None, since_token=None,
  319. search_filter=None, include_all_networks=False,
  320. third_party_instance_id=None,):
  321. if search_filter:
  322. # We currently don't support searching across federation, so we have
  323. # to do it manually without pagination
  324. limit = None
  325. since_token = None
  326. res = yield self._get_remote_list_cached(
  327. server_name, limit=limit, since_token=since_token,
  328. include_all_networks=include_all_networks,
  329. third_party_instance_id=third_party_instance_id,
  330. )
  331. if search_filter:
  332. res = {"chunk": [
  333. entry
  334. for entry in list(res.get("chunk", []))
  335. if _matches_room_entry(entry, search_filter)
  336. ]}
  337. defer.returnValue(res)
  338. def _get_remote_list_cached(self, server_name, limit=None, since_token=None,
  339. search_filter=None, include_all_networks=False,
  340. third_party_instance_id=None,):
  341. repl_layer = self.hs.get_federation_client()
  342. if search_filter:
  343. # We can't cache when asking for search
  344. return repl_layer.get_public_rooms(
  345. server_name, limit=limit, since_token=since_token,
  346. search_filter=search_filter, include_all_networks=include_all_networks,
  347. third_party_instance_id=third_party_instance_id,
  348. )
  349. key = (
  350. server_name, limit, since_token, include_all_networks,
  351. third_party_instance_id,
  352. )
  353. return self.remote_response_cache.wrap(
  354. key,
  355. repl_layer.get_public_rooms,
  356. server_name, limit=limit, since_token=since_token,
  357. search_filter=search_filter,
  358. include_all_networks=include_all_networks,
  359. third_party_instance_id=third_party_instance_id,
  360. )
  361. class RoomListNextBatch(namedtuple("RoomListNextBatch", (
  362. "stream_ordering", # stream_ordering of the first public room list
  363. "public_room_stream_id", # public room stream id for first public room list
  364. "current_limit", # The number of previous rooms returned
  365. "direction_is_forward", # Bool if this is a next_batch, false if prev_batch
  366. ))):
  367. KEY_DICT = {
  368. "stream_ordering": "s",
  369. "public_room_stream_id": "p",
  370. "current_limit": "n",
  371. "direction_is_forward": "d",
  372. }
  373. REVERSE_KEY_DICT = {v: k for k, v in KEY_DICT.items()}
  374. @classmethod
  375. def from_token(cls, token):
  376. return RoomListNextBatch(**{
  377. cls.REVERSE_KEY_DICT[key]: val
  378. for key, val in msgpack.loads(decode_base64(token)).items()
  379. })
  380. def to_token(self):
  381. return encode_base64(msgpack.dumps({
  382. self.KEY_DICT[key]: val
  383. for key, val in self._asdict().items()
  384. }))
  385. def copy_and_replace(self, **kwds):
  386. return self._replace(
  387. **kwds
  388. )
  389. def _matches_room_entry(room_entry, search_filter):
  390. if search_filter and search_filter.get("generic_search_term", None):
  391. generic_search_term = search_filter["generic_search_term"].upper()
  392. if generic_search_term in room_entry.get("name", "").upper():
  393. return True
  394. elif generic_search_term in room_entry.get("topic", "").upper():
  395. return True
  396. elif generic_search_term in room_entry.get("canonical_alias", "").upper():
  397. return True
  398. else:
  399. return True
  400. return False