per_destination_queue.py 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745
  1. # Copyright 2014-2016 OpenMarket Ltd
  2. # Copyright 2019 New Vector Ltd
  3. # Copyright 2021 The Matrix.org Foundation C.I.C.
  4. #
  5. # Licensed under the Apache License, Version 2.0 (the "License");
  6. # you may not use this file except in compliance with the License.
  7. # You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. import datetime
  17. import logging
  18. from types import TracebackType
  19. from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Tuple, Type
  20. import attr
  21. from prometheus_client import Counter
  22. from synapse.api.errors import (
  23. FederationDeniedError,
  24. HttpResponseException,
  25. RequestSendFailed,
  26. )
  27. from synapse.api.presence import UserPresenceState
  28. from synapse.events import EventBase
  29. from synapse.federation.units import Edu
  30. from synapse.handlers.presence import format_user_presence_state
  31. from synapse.logging import issue9533_logger
  32. from synapse.logging.opentracing import SynapseTags, set_tag
  33. from synapse.metrics import sent_transactions_counter
  34. from synapse.metrics.background_process_metrics import run_as_background_process
  35. from synapse.types import ReadReceipt
  36. from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
  37. if TYPE_CHECKING:
  38. import synapse.server
  39. # This is defined in the Matrix spec and enforced by the receiver.
  40. MAX_EDUS_PER_TRANSACTION = 100
  41. logger = logging.getLogger(__name__)
  42. sent_edus_counter = Counter(
  43. "synapse_federation_client_sent_edus", "Total number of EDUs successfully sent"
  44. )
  45. sent_edus_by_type = Counter(
  46. "synapse_federation_client_sent_edus_by_type",
  47. "Number of sent EDUs successfully sent, by event type",
  48. ["type"],
  49. )
  50. class PerDestinationQueue:
  51. """
  52. Manages the per-destination transmission queues.
  53. Args:
  54. hs
  55. transaction_sender
  56. destination: the server_name of the destination that we are managing
  57. transmission for.
  58. """
  59. def __init__(
  60. self,
  61. hs: "synapse.server.HomeServer",
  62. transaction_manager: "synapse.federation.sender.TransactionManager",
  63. destination: str,
  64. ):
  65. self._server_name = hs.hostname
  66. self._clock = hs.get_clock()
  67. self._store = hs.get_datastores().main
  68. self._transaction_manager = transaction_manager
  69. self._instance_name = hs.get_instance_name()
  70. self._federation_shard_config = hs.config.worker.federation_shard_config
  71. self._state = hs.get_state_handler()
  72. self._should_send_on_this_instance = True
  73. if not self._federation_shard_config.should_handle(
  74. self._instance_name, destination
  75. ):
  76. # We don't raise an exception here to avoid taking out any other
  77. # processing. We have a guard in `attempt_new_transaction` that
  78. # ensure we don't start sending stuff.
  79. logger.error(
  80. "Create a per destination queue for %s on wrong worker",
  81. destination,
  82. )
  83. self._should_send_on_this_instance = False
  84. self._destination = destination
  85. self.transmission_loop_running = False
  86. # Flag to signal to any running transmission loop that there is new data
  87. # queued up to be sent.
  88. self._new_data_to_send = False
  89. # True whilst we are sending events that the remote homeserver missed
  90. # because it was unreachable. We start in this state so we can perform
  91. # catch-up at startup.
  92. # New events will only be sent once this is finished, at which point
  93. # _catching_up is flipped to False.
  94. self._catching_up: bool = True
  95. # The stream_ordering of the most recent PDU that was discarded due to
  96. # being in catch-up mode.
  97. self._catchup_last_skipped: int = 0
  98. # Cache of the last successfully-transmitted stream ordering for this
  99. # destination (we are the only updater so this is safe)
  100. self._last_successful_stream_ordering: Optional[int] = None
  101. # a queue of pending PDUs
  102. self._pending_pdus: List[EventBase] = []
  103. # XXX this is never actually used: see
  104. # https://github.com/matrix-org/synapse/issues/7549
  105. self._pending_edus: List[Edu] = []
  106. # Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered
  107. # based on their key (e.g. typing events by room_id)
  108. # Map of (edu_type, key) -> Edu
  109. self._pending_edus_keyed: Dict[Tuple[str, Hashable], Edu] = {}
  110. # Map of user_id -> UserPresenceState of pending presence to be sent to this
  111. # destination
  112. self._pending_presence: Dict[str, UserPresenceState] = {}
  113. # room_id -> receipt_type -> user_id -> receipt_dict
  114. self._pending_rrs: Dict[str, Dict[str, Dict[str, dict]]] = {}
  115. self._rrs_pending_flush = False
  116. # stream_id of last successfully sent to-device message.
  117. # NB: may be a long or an int.
  118. self._last_device_stream_id = 0
  119. # stream_id of last successfully sent device list update.
  120. self._last_device_list_stream_id = 0
  121. def __str__(self) -> str:
  122. return "PerDestinationQueue[%s]" % self._destination
  123. def pending_pdu_count(self) -> int:
  124. return len(self._pending_pdus)
  125. def pending_edu_count(self) -> int:
  126. return (
  127. len(self._pending_edus)
  128. + len(self._pending_presence)
  129. + len(self._pending_edus_keyed)
  130. )
  131. def send_pdu(self, pdu: EventBase) -> None:
  132. """Add a PDU to the queue, and start the transmission loop if necessary
  133. Args:
  134. pdu: pdu to send
  135. """
  136. if not self._catching_up or self._last_successful_stream_ordering is None:
  137. # only enqueue the PDU if we are not catching up (False) or do not
  138. # yet know if we have anything to catch up (None)
  139. self._pending_pdus.append(pdu)
  140. else:
  141. assert pdu.internal_metadata.stream_ordering
  142. self._catchup_last_skipped = pdu.internal_metadata.stream_ordering
  143. self.attempt_new_transaction()
  144. def send_presence(
  145. self, states: Iterable[UserPresenceState], start_loop: bool = True
  146. ) -> None:
  147. """Add presence updates to the queue.
  148. Args:
  149. states: Presence updates to send
  150. start_loop: Whether to start the transmission loop if not already
  151. running.
  152. Args:
  153. states: presence to send
  154. """
  155. self._pending_presence.update({state.user_id: state for state in states})
  156. self._new_data_to_send = True
  157. if start_loop:
  158. self.attempt_new_transaction()
  159. def queue_read_receipt(self, receipt: ReadReceipt) -> None:
  160. """Add a RR to the list to be sent. Doesn't start the transmission loop yet
  161. (see flush_read_receipts_for_room)
  162. Args:
  163. receipt: receipt to be queued
  164. """
  165. self._pending_rrs.setdefault(receipt.room_id, {}).setdefault(
  166. receipt.receipt_type, {}
  167. )[receipt.user_id] = {"event_ids": receipt.event_ids, "data": receipt.data}
  168. def flush_read_receipts_for_room(self, room_id: str) -> None:
  169. # if we don't have any read-receipts for this room, it may be that we've already
  170. # sent them out, so we don't need to flush.
  171. if room_id not in self._pending_rrs:
  172. return
  173. self._rrs_pending_flush = True
  174. self.attempt_new_transaction()
  175. def send_keyed_edu(self, edu: Edu, key: Hashable) -> None:
  176. self._pending_edus_keyed[(edu.edu_type, key)] = edu
  177. self.attempt_new_transaction()
  178. def send_edu(self, edu: Edu) -> None:
  179. self._pending_edus.append(edu)
  180. self.attempt_new_transaction()
  181. def attempt_new_transaction(self) -> None:
  182. """Try to start a new transaction to this destination
  183. If there is already a transaction in progress to this destination,
  184. returns immediately. Otherwise kicks off the process of sending a
  185. transaction in the background.
  186. """
  187. # Mark that we (may) have new things to send, so that any running
  188. # transmission loop will recheck whether there is stuff to send.
  189. self._new_data_to_send = True
  190. if self.transmission_loop_running:
  191. # XXX: this can get stuck on by a never-ending
  192. # request at which point pending_pdus just keeps growing.
  193. # we need application-layer timeouts of some flavour of these
  194. # requests
  195. logger.debug("TX [%s] Transaction already in progress", self._destination)
  196. return
  197. if not self._should_send_on_this_instance:
  198. # We don't raise an exception here to avoid taking out any other
  199. # processing.
  200. logger.error(
  201. "Trying to start a transaction to %s on wrong worker", self._destination
  202. )
  203. return
  204. logger.debug("TX [%s] Starting transaction loop", self._destination)
  205. run_as_background_process(
  206. "federation_transaction_transmission_loop",
  207. self._transaction_transmission_loop,
  208. )
  209. async def _transaction_transmission_loop(self) -> None:
  210. pending_pdus: List[EventBase] = []
  211. try:
  212. self.transmission_loop_running = True
  213. # This will throw if we wouldn't retry. We do this here so we fail
  214. # quickly, but we will later check this again in the http client,
  215. # hence why we throw the result away.
  216. await get_retry_limiter(self._destination, self._clock, self._store)
  217. if self._catching_up:
  218. # we potentially need to catch-up first
  219. await self._catch_up_transmission_loop()
  220. if self._catching_up:
  221. # not caught up yet
  222. return
  223. pending_pdus = []
  224. while True:
  225. self._new_data_to_send = False
  226. async with _TransactionQueueManager(self) as (
  227. pending_pdus,
  228. pending_edus,
  229. ):
  230. if not pending_pdus and not pending_edus:
  231. logger.debug("TX [%s] Nothing to send", self._destination)
  232. # If we've gotten told about new things to send during
  233. # checking for things to send, we try looking again.
  234. # Otherwise new PDUs or EDUs might arrive in the meantime,
  235. # but not get sent because we hold the
  236. # `transmission_loop_running` flag.
  237. if self._new_data_to_send:
  238. continue
  239. else:
  240. return
  241. if pending_pdus:
  242. logger.debug(
  243. "TX [%s] len(pending_pdus_by_dest[dest]) = %d",
  244. self._destination,
  245. len(pending_pdus),
  246. )
  247. await self._transaction_manager.send_new_transaction(
  248. self._destination, pending_pdus, pending_edus
  249. )
  250. sent_transactions_counter.inc()
  251. sent_edus_counter.inc(len(pending_edus))
  252. for edu in pending_edus:
  253. sent_edus_by_type.labels(edu.edu_type).inc()
  254. except NotRetryingDestination as e:
  255. logger.debug(
  256. "TX [%s] not ready for retry yet (next retry at %s) - "
  257. "dropping transaction for now",
  258. self._destination,
  259. datetime.datetime.fromtimestamp(
  260. (e.retry_last_ts + e.retry_interval) / 1000.0
  261. ),
  262. )
  263. if e.retry_interval > 60 * 60 * 1000:
  264. # we won't retry for another hour!
  265. # (this suggests a significant outage)
  266. # We drop pending EDUs because otherwise they will
  267. # rack up indefinitely.
  268. # (Dropping PDUs is already performed by `_start_catching_up`.)
  269. # Note that:
  270. # - the EDUs that are being dropped here are those that we can
  271. # afford to drop (specifically, only typing notifications,
  272. # read receipts and presence updates are being dropped here)
  273. # - Other EDUs such as to_device messages are queued with a
  274. # different mechanism
  275. # - this is all volatile state that would be lost if the
  276. # federation sender restarted anyway
  277. # dropping read receipts is a bit sad but should be solved
  278. # through another mechanism, because this is all volatile!
  279. self._pending_edus = []
  280. self._pending_edus_keyed = {}
  281. self._pending_presence = {}
  282. self._pending_rrs = {}
  283. self._start_catching_up()
  284. except FederationDeniedError as e:
  285. logger.info(e)
  286. except HttpResponseException as e:
  287. logger.warning(
  288. "TX [%s] Received %d response to transaction: %s",
  289. self._destination,
  290. e.code,
  291. e,
  292. )
  293. except RequestSendFailed as e:
  294. logger.warning(
  295. "TX [%s] Failed to send transaction: %s", self._destination, e
  296. )
  297. for p in pending_pdus:
  298. logger.info(
  299. "Failed to send event %s to %s", p.event_id, self._destination
  300. )
  301. except Exception:
  302. logger.exception("TX [%s] Failed to send transaction", self._destination)
  303. for p in pending_pdus:
  304. logger.info(
  305. "Failed to send event %s to %s", p.event_id, self._destination
  306. )
  307. finally:
  308. # We want to be *very* sure we clear this after we stop processing
  309. self.transmission_loop_running = False
  310. async def _catch_up_transmission_loop(self) -> None:
  311. first_catch_up_check = self._last_successful_stream_ordering is None
  312. if first_catch_up_check:
  313. # first catchup so get last_successful_stream_ordering from database
  314. self._last_successful_stream_ordering = (
  315. await self._store.get_destination_last_successful_stream_ordering(
  316. self._destination
  317. )
  318. )
  319. _tmp_last_successful_stream_ordering = self._last_successful_stream_ordering
  320. if _tmp_last_successful_stream_ordering is None:
  321. # if it's still None, then this means we don't have the information
  322. # in our database ­ we haven't successfully sent a PDU to this server
  323. # (at least since the introduction of the feature tracking
  324. # last_successful_stream_ordering).
  325. # Sadly, this means we can't do anything here as we don't know what
  326. # needs catching up — so catching up is futile; let's stop.
  327. self._catching_up = False
  328. return
  329. last_successful_stream_ordering: int = _tmp_last_successful_stream_ordering
  330. # get at most 50 catchup room/PDUs
  331. while True:
  332. event_ids = await self._store.get_catch_up_room_event_ids(
  333. self._destination, last_successful_stream_ordering
  334. )
  335. if not event_ids:
  336. # No more events to catch up on, but we can't ignore the chance
  337. # of a race condition, so we check that no new events have been
  338. # skipped due to us being in catch-up mode
  339. if self._catchup_last_skipped > last_successful_stream_ordering:
  340. # another event has been skipped because we were in catch-up mode
  341. continue
  342. # we are done catching up!
  343. self._catching_up = False
  344. break
  345. if first_catch_up_check:
  346. # as this is our check for needing catch-up, we may have PDUs in
  347. # the queue from before we *knew* we had to do catch-up, so
  348. # clear those out now.
  349. self._start_catching_up()
  350. # fetch the relevant events from the event store
  351. # - redacted behaviour of REDACT is fine, since we only send metadata
  352. # of redacted events to the destination.
  353. # - don't need to worry about rejected events as we do not actively
  354. # forward received events over federation.
  355. catchup_pdus = await self._store.get_events_as_list(event_ids)
  356. if not catchup_pdus:
  357. raise AssertionError(
  358. "No events retrieved when we asked for %r. "
  359. "This should not happen." % event_ids
  360. )
  361. # We send transactions with events from one room only, as its likely
  362. # that the remote will have to do additional processing, which may
  363. # take some time. It's better to give it small amounts of work
  364. # rather than risk the request timing out and repeatedly being
  365. # retried, and not making any progress.
  366. #
  367. # Note: `catchup_pdus` will have exactly one PDU per room.
  368. for pdu in catchup_pdus:
  369. # The PDU from the DB will be the last PDU in the room from
  370. # *this server* that wasn't sent to the remote. However, other
  371. # servers may have sent lots of events since then, and we want
  372. # to try and tell the remote only about the *latest* events in
  373. # the room. This is so that it doesn't get inundated by events
  374. # from various parts of the DAG, which all need to be processed.
  375. #
  376. # Note: this does mean that in large rooms a server coming back
  377. # online will get sent the same events from all the different
  378. # servers, but the remote will correctly deduplicate them and
  379. # handle it only once.
  380. # Step 1, fetch the current extremities
  381. extrems = await self._store.get_prev_events_for_room(pdu.room_id)
  382. if pdu.event_id in extrems:
  383. # If the event is in the extremities, then great! We can just
  384. # use that without having to do further checks.
  385. room_catchup_pdus = [pdu]
  386. else:
  387. # If not, fetch the extremities and figure out which we can
  388. # send.
  389. extrem_events = await self._store.get_events_as_list(extrems)
  390. new_pdus = []
  391. for p in extrem_events:
  392. # We pulled this from the DB, so it'll be non-null
  393. assert p.internal_metadata.stream_ordering
  394. # Filter out events that happened before the remote went
  395. # offline
  396. if (
  397. p.internal_metadata.stream_ordering
  398. < last_successful_stream_ordering
  399. ):
  400. continue
  401. # Filter out events where the server is not in the room,
  402. # e.g. it may have left/been kicked. *Ideally* we'd pull
  403. # out the kick and send that, but it's a rare edge case
  404. # so we don't bother for now (the server that sent the
  405. # kick should send it out if its online).
  406. hosts = await self._state.get_hosts_in_room_at_events(
  407. p.room_id, [p.event_id]
  408. )
  409. if self._destination not in hosts:
  410. continue
  411. new_pdus.append(p)
  412. # If we've filtered out all the extremities, fall back to
  413. # sending the original event. This should ensure that the
  414. # server gets at least some of missed events (especially if
  415. # the other sending servers are up).
  416. if new_pdus:
  417. room_catchup_pdus = new_pdus
  418. else:
  419. room_catchup_pdus = [pdu]
  420. logger.info(
  421. "Catching up rooms to %s: %r", self._destination, pdu.room_id
  422. )
  423. await self._transaction_manager.send_new_transaction(
  424. self._destination, room_catchup_pdus, []
  425. )
  426. sent_transactions_counter.inc()
  427. # We pulled this from the DB, so it'll be non-null
  428. assert pdu.internal_metadata.stream_ordering
  429. # Note that we mark the last successful stream ordering as that
  430. # from the *original* PDU, rather than the PDU(s) we actually
  431. # send. This is because we use it to mark our position in the
  432. # queue of missed PDUs to process.
  433. last_successful_stream_ordering = pdu.internal_metadata.stream_ordering
  434. self._last_successful_stream_ordering = last_successful_stream_ordering
  435. await self._store.set_destination_last_successful_stream_ordering(
  436. self._destination, last_successful_stream_ordering
  437. )
  438. def _get_rr_edus(self, force_flush: bool) -> Iterable[Edu]:
  439. if not self._pending_rrs:
  440. return
  441. if not force_flush and not self._rrs_pending_flush:
  442. # not yet time for this lot
  443. return
  444. edu = Edu(
  445. origin=self._server_name,
  446. destination=self._destination,
  447. edu_type="m.receipt",
  448. content=self._pending_rrs,
  449. )
  450. self._pending_rrs = {}
  451. self._rrs_pending_flush = False
  452. yield edu
  453. def _pop_pending_edus(self, limit: int) -> List[Edu]:
  454. pending_edus = self._pending_edus
  455. pending_edus, self._pending_edus = pending_edus[:limit], pending_edus[limit:]
  456. return pending_edus
  457. async def _get_device_update_edus(self, limit: int) -> Tuple[List[Edu], int]:
  458. last_device_list = self._last_device_list_stream_id
  459. # Retrieve list of new device updates to send to the destination
  460. now_stream_id, results = await self._store.get_device_updates_by_remote(
  461. self._destination, last_device_list, limit=limit
  462. )
  463. edus = [
  464. Edu(
  465. origin=self._server_name,
  466. destination=self._destination,
  467. edu_type=edu_type,
  468. content=content,
  469. )
  470. for (edu_type, content) in results
  471. ]
  472. assert len(edus) <= limit, "get_device_updates_by_remote returned too many EDUs"
  473. return edus, now_stream_id
  474. async def _get_to_device_message_edus(self, limit: int) -> Tuple[List[Edu], int]:
  475. last_device_stream_id = self._last_device_stream_id
  476. to_device_stream_id = self._store.get_to_device_stream_token()
  477. contents, stream_id = await self._store.get_new_device_msgs_for_remote(
  478. self._destination, last_device_stream_id, to_device_stream_id, limit
  479. )
  480. for content in contents:
  481. message_id = content.get("message_id")
  482. if not message_id:
  483. continue
  484. set_tag(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id)
  485. edus = [
  486. Edu(
  487. origin=self._server_name,
  488. destination=self._destination,
  489. edu_type="m.direct_to_device",
  490. content=content,
  491. )
  492. for content in contents
  493. ]
  494. if edus:
  495. issue9533_logger.debug(
  496. "Sending %i to-device messages to %s, up to stream id %i",
  497. len(edus),
  498. self._destination,
  499. stream_id,
  500. )
  501. return edus, stream_id
  502. def _start_catching_up(self) -> None:
  503. """
  504. Marks this destination as being in catch-up mode.
  505. This throws away the PDU queue.
  506. """
  507. self._catching_up = True
  508. self._pending_pdus = []
  509. @attr.s(slots=True, auto_attribs=True)
  510. class _TransactionQueueManager:
  511. """A helper async context manager for pulling stuff off the queues and
  512. tracking what was last successfully sent, etc.
  513. """
  514. queue: PerDestinationQueue
  515. _device_stream_id: Optional[int] = None
  516. _device_list_id: Optional[int] = None
  517. _last_stream_ordering: Optional[int] = None
  518. _pdus: List[EventBase] = attr.Factory(list)
  519. async def __aenter__(self) -> Tuple[List[EventBase], List[Edu]]:
  520. # First we calculate the EDUs we want to send, if any.
  521. # We start by fetching device related EDUs, i.e device updates and to
  522. # device messages. We have to keep 2 free slots for presence and rr_edus.
  523. limit = MAX_EDUS_PER_TRANSACTION - 2
  524. device_update_edus, dev_list_id = await self.queue._get_device_update_edus(
  525. limit
  526. )
  527. if device_update_edus:
  528. self._device_list_id = dev_list_id
  529. else:
  530. self.queue._last_device_list_stream_id = dev_list_id
  531. limit -= len(device_update_edus)
  532. (
  533. to_device_edus,
  534. device_stream_id,
  535. ) = await self.queue._get_to_device_message_edus(limit)
  536. if to_device_edus:
  537. self._device_stream_id = device_stream_id
  538. else:
  539. self.queue._last_device_stream_id = device_stream_id
  540. pending_edus = device_update_edus + to_device_edus
  541. # Now add the read receipt EDU.
  542. pending_edus.extend(self.queue._get_rr_edus(force_flush=False))
  543. # And presence EDU.
  544. if self.queue._pending_presence:
  545. pending_edus.append(
  546. Edu(
  547. origin=self.queue._server_name,
  548. destination=self.queue._destination,
  549. edu_type="m.presence",
  550. content={
  551. "push": [
  552. format_user_presence_state(
  553. presence, self.queue._clock.time_msec()
  554. )
  555. for presence in self.queue._pending_presence.values()
  556. ]
  557. },
  558. )
  559. )
  560. self.queue._pending_presence = {}
  561. # Finally add any other types of EDUs if there is room.
  562. pending_edus.extend(
  563. self.queue._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus))
  564. )
  565. while (
  566. len(pending_edus) < MAX_EDUS_PER_TRANSACTION
  567. and self.queue._pending_edus_keyed
  568. ):
  569. _, val = self.queue._pending_edus_keyed.popitem()
  570. pending_edus.append(val)
  571. # Now we look for any PDUs to send, by getting up to 50 PDUs from the
  572. # queue
  573. self._pdus = self.queue._pending_pdus[:50]
  574. if not self._pdus and not pending_edus:
  575. return [], []
  576. # if we've decided to send a transaction anyway, and we have room, we
  577. # may as well send any pending RRs
  578. if len(pending_edus) < MAX_EDUS_PER_TRANSACTION:
  579. pending_edus.extend(self.queue._get_rr_edus(force_flush=True))
  580. if self._pdus:
  581. self._last_stream_ordering = self._pdus[
  582. -1
  583. ].internal_metadata.stream_ordering
  584. assert self._last_stream_ordering
  585. return self._pdus, pending_edus
  586. async def __aexit__(
  587. self,
  588. exc_type: Optional[Type[BaseException]],
  589. exc: Optional[BaseException],
  590. tb: Optional[TracebackType],
  591. ) -> None:
  592. if exc_type is not None:
  593. # Failed to send transaction, so we bail out.
  594. return
  595. # Successfully sent transactions, so we remove pending PDUs from the queue
  596. if self._pdus:
  597. self.queue._pending_pdus = self.queue._pending_pdus[len(self._pdus) :]
  598. # Succeeded to send the transaction so we record where we have sent up
  599. # to in the various streams
  600. if self._device_stream_id:
  601. await self.queue._store.delete_device_msgs_for_remote(
  602. self.queue._destination, self._device_stream_id
  603. )
  604. self.queue._last_device_stream_id = self._device_stream_id
  605. # also mark the device updates as sent
  606. if self._device_list_id:
  607. logger.info(
  608. "Marking as sent %r %r", self.queue._destination, self._device_list_id
  609. )
  610. await self.queue._store.mark_as_sent_devices_by_remote(
  611. self.queue._destination, self._device_list_id
  612. )
  613. self.queue._last_device_list_stream_id = self._device_list_id
  614. if self._last_stream_ordering:
  615. # we sent some PDUs and it was successful, so update our
  616. # last_successful_stream_ordering in the destinations table.
  617. await self.queue._store.set_destination_last_successful_stream_ordering(
  618. self.queue._destination, self._last_stream_ordering
  619. )