relations.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466
  1. # Copyright 2021 The Matrix.org Foundation C.I.C.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import logging
  15. from typing import (
  16. TYPE_CHECKING,
  17. Collection,
  18. Dict,
  19. FrozenSet,
  20. Iterable,
  21. List,
  22. Optional,
  23. Tuple,
  24. )
  25. import attr
  26. from synapse.api.constants import RelationTypes
  27. from synapse.api.errors import SynapseError
  28. from synapse.events import EventBase, relation_from_event
  29. from synapse.storage.databases.main.relations import _RelatedEvent
  30. from synapse.types import JsonDict, Requester, StreamToken, UserID
  31. from synapse.visibility import filter_events_for_client
  32. if TYPE_CHECKING:
  33. from synapse.server import HomeServer
  34. logger = logging.getLogger(__name__)
  35. @attr.s(slots=True, frozen=True, auto_attribs=True)
  36. class _ThreadAggregation:
  37. # The latest event in the thread.
  38. latest_event: EventBase
  39. # The total number of events in the thread.
  40. count: int
  41. # True if the current user has sent an event to the thread.
  42. current_user_participated: bool
  43. @attr.s(slots=True, auto_attribs=True)
  44. class BundledAggregations:
  45. """
  46. The bundled aggregations for an event.
  47. Some values require additional processing during serialization.
  48. """
  49. annotations: Optional[JsonDict] = None
  50. references: Optional[JsonDict] = None
  51. replace: Optional[EventBase] = None
  52. thread: Optional[_ThreadAggregation] = None
  53. def __bool__(self) -> bool:
  54. return bool(self.annotations or self.references or self.replace or self.thread)
  55. class RelationsHandler:
  56. def __init__(self, hs: "HomeServer"):
  57. self._main_store = hs.get_datastores().main
  58. self._storage = hs.get_storage()
  59. self._auth = hs.get_auth()
  60. self._clock = hs.get_clock()
  61. self._event_handler = hs.get_event_handler()
  62. self._event_serializer = hs.get_event_client_serializer()
  63. async def get_relations(
  64. self,
  65. requester: Requester,
  66. event_id: str,
  67. room_id: str,
  68. relation_type: Optional[str] = None,
  69. event_type: Optional[str] = None,
  70. aggregation_key: Optional[str] = None,
  71. limit: int = 5,
  72. direction: str = "b",
  73. from_token: Optional[StreamToken] = None,
  74. to_token: Optional[StreamToken] = None,
  75. ) -> JsonDict:
  76. """Get related events of a event, ordered by topological ordering.
  77. TODO Accept a PaginationConfig instead of individual pagination parameters.
  78. Args:
  79. requester: The user requesting the relations.
  80. event_id: Fetch events that relate to this event ID.
  81. room_id: The room the event belongs to.
  82. relation_type: Only fetch events with this relation type, if given.
  83. event_type: Only fetch events with this event type, if given.
  84. aggregation_key: Only fetch events with this aggregation key, if given.
  85. limit: Only fetch the most recent `limit` events.
  86. direction: Whether to fetch the most recent first (`"b"`) or the
  87. oldest first (`"f"`).
  88. from_token: Fetch rows from the given token, or from the start if None.
  89. to_token: Fetch rows up to the given token, or up to the end if None.
  90. Returns:
  91. The pagination chunk.
  92. """
  93. user_id = requester.user.to_string()
  94. # TODO Properly handle a user leaving a room.
  95. (_, member_event_id) = await self._auth.check_user_in_room_or_world_readable(
  96. room_id, user_id, allow_departed_users=True
  97. )
  98. # This gets the original event and checks that a) the event exists and
  99. # b) the user is allowed to view it.
  100. event = await self._event_handler.get_event(requester.user, room_id, event_id)
  101. if event is None:
  102. raise SynapseError(404, "Unknown parent event.")
  103. # Note that ignored users are not passed into get_relations_for_event
  104. # below. Ignored users are handled in filter_events_for_client (and by
  105. # not passing them in here we should get a better cache hit rate).
  106. related_events, next_token = await self._main_store.get_relations_for_event(
  107. event_id=event_id,
  108. event=event,
  109. room_id=room_id,
  110. relation_type=relation_type,
  111. event_type=event_type,
  112. aggregation_key=aggregation_key,
  113. limit=limit,
  114. direction=direction,
  115. from_token=from_token,
  116. to_token=to_token,
  117. )
  118. events = await self._main_store.get_events_as_list(
  119. [e.event_id for e in related_events]
  120. )
  121. events = await filter_events_for_client(
  122. self._storage, user_id, events, is_peeking=(member_event_id is None)
  123. )
  124. now = self._clock.time_msec()
  125. # Do not bundle aggregations when retrieving the original event because
  126. # we want the content before relations are applied to it.
  127. original_event = self._event_serializer.serialize_event(
  128. event, now, bundle_aggregations=None
  129. )
  130. # The relations returned for the requested event do include their
  131. # bundled aggregations.
  132. aggregations = await self.get_bundled_aggregations(
  133. events, requester.user.to_string()
  134. )
  135. serialized_events = self._event_serializer.serialize_events(
  136. events, now, bundle_aggregations=aggregations
  137. )
  138. return_value = {
  139. "chunk": serialized_events,
  140. "original_event": original_event,
  141. }
  142. if next_token:
  143. return_value["next_batch"] = await next_token.to_string(self._main_store)
  144. if from_token:
  145. return_value["prev_batch"] = await from_token.to_string(self._main_store)
  146. return return_value
  147. async def get_relations_for_event(
  148. self,
  149. event_id: str,
  150. event: EventBase,
  151. room_id: str,
  152. relation_type: str,
  153. ignored_users: FrozenSet[str] = frozenset(),
  154. ) -> Tuple[List[_RelatedEvent], Optional[StreamToken]]:
  155. """Get a list of events which relate to an event, ordered by topological ordering.
  156. Args:
  157. event_id: Fetch events that relate to this event ID.
  158. event: The matching EventBase to event_id.
  159. room_id: The room the event belongs to.
  160. relation_type: The type of relation.
  161. ignored_users: The users ignored by the requesting user.
  162. Returns:
  163. List of event IDs that match relations requested. The rows are of
  164. the form `{"event_id": "..."}`.
  165. """
  166. # Call the underlying storage method, which is cached.
  167. related_events, next_token = await self._main_store.get_relations_for_event(
  168. event_id, event, room_id, relation_type, direction="f"
  169. )
  170. # Filter out ignored users and convert to the expected format.
  171. related_events = [
  172. event for event in related_events if event.sender not in ignored_users
  173. ]
  174. return related_events, next_token
  175. async def get_annotations_for_event(
  176. self,
  177. event_id: str,
  178. room_id: str,
  179. limit: int = 5,
  180. ignored_users: FrozenSet[str] = frozenset(),
  181. ) -> List[JsonDict]:
  182. """Get a list of annotations on the event, grouped by event type and
  183. aggregation key, sorted by count.
  184. This is used e.g. to get the what and how many reactions have happend
  185. on an event.
  186. Args:
  187. event_id: Fetch events that relate to this event ID.
  188. room_id: The room the event belongs to.
  189. limit: Only fetch the `limit` groups.
  190. ignored_users: The users ignored by the requesting user.
  191. Returns:
  192. List of groups of annotations that match. Each row is a dict with
  193. `type`, `key` and `count` fields.
  194. """
  195. # Get the base results for all users.
  196. full_results = await self._main_store.get_aggregation_groups_for_event(
  197. event_id, room_id, limit
  198. )
  199. # Then subtract off the results for any ignored users.
  200. ignored_results = await self._main_store.get_aggregation_groups_for_users(
  201. event_id, room_id, limit, ignored_users
  202. )
  203. filtered_results = []
  204. for result in full_results:
  205. key = (result["type"], result["key"])
  206. if key in ignored_results:
  207. result = result.copy()
  208. result["count"] -= ignored_results[key]
  209. if result["count"] <= 0:
  210. continue
  211. filtered_results.append(result)
  212. return filtered_results
  213. async def get_threads_for_events(
  214. self, event_ids: Collection[str], user_id: str, ignored_users: FrozenSet[str]
  215. ) -> Dict[str, _ThreadAggregation]:
  216. """Get the bundled aggregations for threads for the requested events.
  217. Args:
  218. event_ids: Events to get aggregations for threads.
  219. user_id: The user requesting the bundled aggregations.
  220. ignored_users: The users ignored by the requesting user.
  221. Returns:
  222. A dictionary mapping event ID to the thread information.
  223. May not contain a value for all requested event IDs.
  224. """
  225. user = UserID.from_string(user_id)
  226. # Fetch thread summaries.
  227. summaries = await self._main_store.get_thread_summaries(event_ids)
  228. # Only fetch participated for a limited selection based on what had
  229. # summaries.
  230. thread_event_ids = [
  231. event_id for event_id, summary in summaries.items() if summary
  232. ]
  233. participated = await self._main_store.get_threads_participated(
  234. thread_event_ids, user_id
  235. )
  236. # Then subtract off the results for any ignored users.
  237. ignored_results = await self._main_store.get_threaded_messages_per_user(
  238. thread_event_ids, ignored_users
  239. )
  240. # A map of event ID to the thread aggregation.
  241. results = {}
  242. for event_id, summary in summaries.items():
  243. if summary:
  244. thread_count, latest_thread_event = summary
  245. # Subtract off the count of any ignored users.
  246. for ignored_user in ignored_users:
  247. thread_count -= ignored_results.get((event_id, ignored_user), 0)
  248. # This is gnarly, but if the latest event is from an ignored user,
  249. # attempt to find one that isn't from an ignored user.
  250. if latest_thread_event.sender in ignored_users:
  251. room_id = latest_thread_event.room_id
  252. # If the root event is not found, something went wrong, do
  253. # not include a summary of the thread.
  254. event = await self._event_handler.get_event(user, room_id, event_id)
  255. if event is None:
  256. continue
  257. potential_events, _ = await self.get_relations_for_event(
  258. event_id,
  259. event,
  260. room_id,
  261. RelationTypes.THREAD,
  262. ignored_users,
  263. )
  264. # If all found events are from ignored users, do not include
  265. # a summary of the thread.
  266. if not potential_events:
  267. continue
  268. # The *last* event returned is the one that is cared about.
  269. event = await self._event_handler.get_event(
  270. user, room_id, potential_events[-1].event_id
  271. )
  272. # It is unexpected that the event will not exist.
  273. if event is None:
  274. logger.warning(
  275. "Unable to fetch latest event in a thread with event ID: %s",
  276. potential_events[-1].event_id,
  277. )
  278. continue
  279. latest_thread_event = event
  280. results[event_id] = _ThreadAggregation(
  281. latest_event=latest_thread_event,
  282. count=thread_count,
  283. # If there's a thread summary it must also exist in the
  284. # participated dictionary.
  285. current_user_participated=participated[event_id],
  286. )
  287. return results
  288. async def get_bundled_aggregations(
  289. self, events: Iterable[EventBase], user_id: str
  290. ) -> Dict[str, BundledAggregations]:
  291. """Generate bundled aggregations for events.
  292. Args:
  293. events: The iterable of events to calculate bundled aggregations for.
  294. user_id: The user requesting the bundled aggregations.
  295. Returns:
  296. A map of event ID to the bundled aggregations for the event.
  297. Not all requested events may exist in the results (if they don't have
  298. bundled aggregations).
  299. The results may include additional events which are related to the
  300. requested events.
  301. """
  302. # De-duplicated events by ID to handle the same event requested multiple times.
  303. events_by_id = {}
  304. # A map of event ID to the relation in that event, if there is one.
  305. relations_by_id: Dict[str, str] = {}
  306. for event in events:
  307. # State events do not get bundled aggregations.
  308. if event.is_state():
  309. continue
  310. relates_to = relation_from_event(event)
  311. if relates_to:
  312. # An event which is a replacement (ie edit) or annotation (ie,
  313. # reaction) may not have any other event related to it.
  314. if relates_to.rel_type in (
  315. RelationTypes.ANNOTATION,
  316. RelationTypes.REPLACE,
  317. ):
  318. continue
  319. # Track the event's relation information for later.
  320. relations_by_id[event.event_id] = relates_to.rel_type
  321. # The event should get bundled aggregations.
  322. events_by_id[event.event_id] = event
  323. # event ID -> bundled aggregation in non-serialized form.
  324. results: Dict[str, BundledAggregations] = {}
  325. # Fetch any ignored users of the requesting user.
  326. ignored_users = await self._main_store.ignored_users(user_id)
  327. # Threads are special as the latest event of a thread might cause additional
  328. # events to be fetched. Thus, we check those first!
  329. # Fetch thread summaries (but only for the directly requested events).
  330. threads = await self.get_threads_for_events(
  331. # It is not valid to start a thread on an event which itself relates to another event.
  332. [eid for eid in events_by_id.keys() if eid not in relations_by_id],
  333. user_id,
  334. ignored_users,
  335. )
  336. for event_id, thread in threads.items():
  337. results.setdefault(event_id, BundledAggregations()).thread = thread
  338. # If the latest event in a thread is not already being fetched,
  339. # add it. This ensures that the bundled aggregations for the
  340. # latest thread event is correct.
  341. latest_thread_event = thread.latest_event
  342. if latest_thread_event and latest_thread_event.event_id not in events_by_id:
  343. events_by_id[latest_thread_event.event_id] = latest_thread_event
  344. # Keep relations_by_id in sync with events_by_id:
  345. #
  346. # We know that the latest event in a thread has a thread relation
  347. # (as that is what makes it part of the thread).
  348. relations_by_id[latest_thread_event.event_id] = RelationTypes.THREAD
  349. # Fetch other relations per event.
  350. for event in events_by_id.values():
  351. # Fetch any annotations (ie, reactions) to bundle with this event.
  352. annotations = await self.get_annotations_for_event(
  353. event.event_id, event.room_id, ignored_users=ignored_users
  354. )
  355. if annotations:
  356. results.setdefault(
  357. event.event_id, BundledAggregations()
  358. ).annotations = {"chunk": annotations}
  359. # Fetch any references to bundle with this event.
  360. references, next_token = await self.get_relations_for_event(
  361. event.event_id,
  362. event,
  363. event.room_id,
  364. RelationTypes.REFERENCE,
  365. ignored_users=ignored_users,
  366. )
  367. if references:
  368. aggregations = results.setdefault(event.event_id, BundledAggregations())
  369. aggregations.references = {
  370. "chunk": [{"event_id": ev.event_id} for ev in references]
  371. }
  372. if next_token:
  373. aggregations.references["next_batch"] = await next_token.to_string(
  374. self._main_store
  375. )
  376. # Fetch any edits (but not for redacted events).
  377. #
  378. # Note that there is no use in limiting edits by ignored users since the
  379. # parent event should be ignored in the first place if the user is ignored.
  380. edits = await self._main_store.get_applicable_edits(
  381. [
  382. event_id
  383. for event_id, event in events_by_id.items()
  384. if not event.internal_metadata.is_redacted()
  385. ]
  386. )
  387. for event_id, edit in edits.items():
  388. results.setdefault(event_id, BundledAggregations()).replace = edit
  389. return results