per_destination_queue.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721
  1. # Copyright 2014-2016 OpenMarket Ltd
  2. # Copyright 2019 New Vector Ltd
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import datetime
  16. import logging
  17. from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Tuple
  18. import attr
  19. from prometheus_client import Counter
  20. from synapse.api.errors import (
  21. FederationDeniedError,
  22. HttpResponseException,
  23. RequestSendFailed,
  24. )
  25. from synapse.api.presence import UserPresenceState
  26. from synapse.events import EventBase
  27. from synapse.federation.units import Edu
  28. from synapse.handlers.presence import format_user_presence_state
  29. from synapse.logging.opentracing import SynapseTags, set_tag
  30. from synapse.metrics import sent_transactions_counter
  31. from synapse.metrics.background_process_metrics import run_as_background_process
  32. from synapse.types import ReadReceipt
  33. from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
  34. if TYPE_CHECKING:
  35. import synapse.server
  36. # This is defined in the Matrix spec and enforced by the receiver.
  37. MAX_EDUS_PER_TRANSACTION = 100
  38. logger = logging.getLogger(__name__)
  39. sent_edus_counter = Counter(
  40. "synapse_federation_client_sent_edus", "Total number of EDUs successfully sent"
  41. )
  42. sent_edus_by_type = Counter(
  43. "synapse_federation_client_sent_edus_by_type",
  44. "Number of sent EDUs successfully sent, by event type",
  45. ["type"],
  46. )
  47. class PerDestinationQueue:
  48. """
  49. Manages the per-destination transmission queues.
  50. Args:
  51. hs
  52. transaction_sender
  53. destination: the server_name of the destination that we are managing
  54. transmission for.
  55. """
  56. def __init__(
  57. self,
  58. hs: "synapse.server.HomeServer",
  59. transaction_manager: "synapse.federation.sender.TransactionManager",
  60. destination: str,
  61. ):
  62. self._server_name = hs.hostname
  63. self._clock = hs.get_clock()
  64. self._store = hs.get_datastore()
  65. self._transaction_manager = transaction_manager
  66. self._instance_name = hs.get_instance_name()
  67. self._federation_shard_config = hs.config.worker.federation_shard_config
  68. self._state = hs.get_state_handler()
  69. self._should_send_on_this_instance = True
  70. if not self._federation_shard_config.should_handle(
  71. self._instance_name, destination
  72. ):
  73. # We don't raise an exception here to avoid taking out any other
  74. # processing. We have a guard in `attempt_new_transaction` that
  75. # ensure we don't start sending stuff.
  76. logger.error(
  77. "Create a per destination queue for %s on wrong worker",
  78. destination,
  79. )
  80. self._should_send_on_this_instance = False
  81. self._destination = destination
  82. self.transmission_loop_running = False
  83. # Flag to signal to any running transmission loop that there is new data
  84. # queued up to be sent.
  85. self._new_data_to_send = False
  86. # True whilst we are sending events that the remote homeserver missed
  87. # because it was unreachable. We start in this state so we can perform
  88. # catch-up at startup.
  89. # New events will only be sent once this is finished, at which point
  90. # _catching_up is flipped to False.
  91. self._catching_up = True # type: bool
  92. # The stream_ordering of the most recent PDU that was discarded due to
  93. # being in catch-up mode.
  94. self._catchup_last_skipped = 0 # type: int
  95. # Cache of the last successfully-transmitted stream ordering for this
  96. # destination (we are the only updater so this is safe)
  97. self._last_successful_stream_ordering = None # type: Optional[int]
  98. # a queue of pending PDUs
  99. self._pending_pdus = [] # type: List[EventBase]
  100. # XXX this is never actually used: see
  101. # https://github.com/matrix-org/synapse/issues/7549
  102. self._pending_edus = [] # type: List[Edu]
  103. # Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered
  104. # based on their key (e.g. typing events by room_id)
  105. # Map of (edu_type, key) -> Edu
  106. self._pending_edus_keyed = {} # type: Dict[Tuple[str, Hashable], Edu]
  107. # Map of user_id -> UserPresenceState of pending presence to be sent to this
  108. # destination
  109. self._pending_presence = {} # type: Dict[str, UserPresenceState]
  110. # room_id -> receipt_type -> user_id -> receipt_dict
  111. self._pending_rrs = {} # type: Dict[str, Dict[str, Dict[str, dict]]]
  112. self._rrs_pending_flush = False
  113. # stream_id of last successfully sent to-device message.
  114. # NB: may be a long or an int.
  115. self._last_device_stream_id = 0
  116. # stream_id of last successfully sent device list update.
  117. self._last_device_list_stream_id = 0
  118. def __str__(self) -> str:
  119. return "PerDestinationQueue[%s]" % self._destination
  120. def pending_pdu_count(self) -> int:
  121. return len(self._pending_pdus)
  122. def pending_edu_count(self) -> int:
  123. return (
  124. len(self._pending_edus)
  125. + len(self._pending_presence)
  126. + len(self._pending_edus_keyed)
  127. )
  128. def send_pdus(self, pdus: Iterable[EventBase]) -> None:
  129. """Add PDUs to the queue, and start the transmission loop if necessary
  130. Args:
  131. pdus: pdus to send
  132. """
  133. if not self._catching_up or self._last_successful_stream_ordering is None:
  134. # only enqueue the PDU if we are not catching up (False) or do not
  135. # yet know if we have anything to catch up (None)
  136. self._pending_pdus.extend(pdus)
  137. else:
  138. self._catchup_last_skipped = max(
  139. pdu.internal_metadata.stream_ordering
  140. for pdu in pdus
  141. if pdu.internal_metadata.stream_ordering is not None
  142. )
  143. self.attempt_new_transaction()
  144. def send_presence(self, states: Iterable[UserPresenceState]) -> None:
  145. """Add presence updates to the queue. Start the transmission loop if necessary.
  146. Args:
  147. states: presence to send
  148. """
  149. self._pending_presence.update({state.user_id: state for state in states})
  150. self.attempt_new_transaction()
  151. def queue_read_receipt(self, receipt: ReadReceipt) -> None:
  152. """Add a RR to the list to be sent. Doesn't start the transmission loop yet
  153. (see flush_read_receipts_for_room)
  154. Args:
  155. receipt: receipt to be queued
  156. """
  157. self._pending_rrs.setdefault(receipt.room_id, {}).setdefault(
  158. receipt.receipt_type, {}
  159. )[receipt.user_id] = {"event_ids": receipt.event_ids, "data": receipt.data}
  160. def flush_read_receipts_for_room(self, room_id: str) -> None:
  161. # if we don't have any read-receipts for this room, it may be that we've already
  162. # sent them out, so we don't need to flush.
  163. if room_id not in self._pending_rrs:
  164. return
  165. self._rrs_pending_flush = True
  166. self.attempt_new_transaction()
  167. def send_keyed_edu(self, edu: Edu, key: Hashable) -> None:
  168. self._pending_edus_keyed[(edu.edu_type, key)] = edu
  169. self.attempt_new_transaction()
  170. def send_edu(self, edu) -> None:
  171. self._pending_edus.append(edu)
  172. self.attempt_new_transaction()
  173. def attempt_new_transaction(self) -> None:
  174. """Try to start a new transaction to this destination
  175. If there is already a transaction in progress to this destination,
  176. returns immediately. Otherwise kicks off the process of sending a
  177. transaction in the background.
  178. """
  179. # Mark that we (may) have new things to send, so that any running
  180. # transmission loop will recheck whether there is stuff to send.
  181. self._new_data_to_send = True
  182. if self.transmission_loop_running:
  183. # XXX: this can get stuck on by a never-ending
  184. # request at which point pending_pdus just keeps growing.
  185. # we need application-layer timeouts of some flavour of these
  186. # requests
  187. logger.debug("TX [%s] Transaction already in progress", self._destination)
  188. return
  189. if not self._should_send_on_this_instance:
  190. # We don't raise an exception here to avoid taking out any other
  191. # processing.
  192. logger.error(
  193. "Trying to start a transaction to %s on wrong worker", self._destination
  194. )
  195. return
  196. logger.debug("TX [%s] Starting transaction loop", self._destination)
  197. run_as_background_process(
  198. "federation_transaction_transmission_loop",
  199. self._transaction_transmission_loop,
  200. )
  201. async def _transaction_transmission_loop(self) -> None:
  202. pending_pdus = [] # type: List[EventBase]
  203. try:
  204. self.transmission_loop_running = True
  205. # This will throw if we wouldn't retry. We do this here so we fail
  206. # quickly, but we will later check this again in the http client,
  207. # hence why we throw the result away.
  208. await get_retry_limiter(self._destination, self._clock, self._store)
  209. if self._catching_up:
  210. # we potentially need to catch-up first
  211. await self._catch_up_transmission_loop()
  212. if self._catching_up:
  213. # not caught up yet
  214. return
  215. pending_pdus = []
  216. while True:
  217. self._new_data_to_send = False
  218. async with _TransactionQueueManager(self) as (
  219. pending_pdus,
  220. pending_edus,
  221. ):
  222. if not pending_pdus and not pending_edus:
  223. logger.debug("TX [%s] Nothing to send", self._destination)
  224. # If we've gotten told about new things to send during
  225. # checking for things to send, we try looking again.
  226. # Otherwise new PDUs or EDUs might arrive in the meantime,
  227. # but not get sent because we hold the
  228. # `transmission_loop_running` flag.
  229. if self._new_data_to_send:
  230. continue
  231. else:
  232. return
  233. if pending_pdus:
  234. logger.debug(
  235. "TX [%s] len(pending_pdus_by_dest[dest]) = %d",
  236. self._destination,
  237. len(pending_pdus),
  238. )
  239. await self._transaction_manager.send_new_transaction(
  240. self._destination, pending_pdus, pending_edus
  241. )
  242. sent_transactions_counter.inc()
  243. sent_edus_counter.inc(len(pending_edus))
  244. for edu in pending_edus:
  245. sent_edus_by_type.labels(edu.edu_type).inc()
  246. except NotRetryingDestination as e:
  247. logger.debug(
  248. "TX [%s] not ready for retry yet (next retry at %s) - "
  249. "dropping transaction for now",
  250. self._destination,
  251. datetime.datetime.fromtimestamp(
  252. (e.retry_last_ts + e.retry_interval) / 1000.0
  253. ),
  254. )
  255. if e.retry_interval > 60 * 60 * 1000:
  256. # we won't retry for another hour!
  257. # (this suggests a significant outage)
  258. # We drop pending EDUs because otherwise they will
  259. # rack up indefinitely.
  260. # (Dropping PDUs is already performed by `_start_catching_up`.)
  261. # Note that:
  262. # - the EDUs that are being dropped here are those that we can
  263. # afford to drop (specifically, only typing notifications,
  264. # read receipts and presence updates are being dropped here)
  265. # - Other EDUs such as to_device messages are queued with a
  266. # different mechanism
  267. # - this is all volatile state that would be lost if the
  268. # federation sender restarted anyway
  269. # dropping read receipts is a bit sad but should be solved
  270. # through another mechanism, because this is all volatile!
  271. self._pending_edus = []
  272. self._pending_edus_keyed = {}
  273. self._pending_presence = {}
  274. self._pending_rrs = {}
  275. self._start_catching_up()
  276. except FederationDeniedError as e:
  277. logger.info(e)
  278. except HttpResponseException as e:
  279. logger.warning(
  280. "TX [%s] Received %d response to transaction: %s",
  281. self._destination,
  282. e.code,
  283. e,
  284. )
  285. except RequestSendFailed as e:
  286. logger.warning(
  287. "TX [%s] Failed to send transaction: %s", self._destination, e
  288. )
  289. for p in pending_pdus:
  290. logger.info(
  291. "Failed to send event %s to %s", p.event_id, self._destination
  292. )
  293. except Exception:
  294. logger.exception("TX [%s] Failed to send transaction", self._destination)
  295. for p in pending_pdus:
  296. logger.info(
  297. "Failed to send event %s to %s", p.event_id, self._destination
  298. )
  299. finally:
  300. # We want to be *very* sure we clear this after we stop processing
  301. self.transmission_loop_running = False
  302. async def _catch_up_transmission_loop(self) -> None:
  303. first_catch_up_check = self._last_successful_stream_ordering is None
  304. if first_catch_up_check:
  305. # first catchup so get last_successful_stream_ordering from database
  306. self._last_successful_stream_ordering = (
  307. await self._store.get_destination_last_successful_stream_ordering(
  308. self._destination
  309. )
  310. )
  311. if self._last_successful_stream_ordering is None:
  312. # if it's still None, then this means we don't have the information
  313. # in our database ­ we haven't successfully sent a PDU to this server
  314. # (at least since the introduction of the feature tracking
  315. # last_successful_stream_ordering).
  316. # Sadly, this means we can't do anything here as we don't know what
  317. # needs catching up — so catching up is futile; let's stop.
  318. self._catching_up = False
  319. return
  320. # get at most 50 catchup room/PDUs
  321. while True:
  322. event_ids = await self._store.get_catch_up_room_event_ids(
  323. self._destination,
  324. self._last_successful_stream_ordering,
  325. )
  326. if not event_ids:
  327. # No more events to catch up on, but we can't ignore the chance
  328. # of a race condition, so we check that no new events have been
  329. # skipped due to us being in catch-up mode
  330. if self._catchup_last_skipped > self._last_successful_stream_ordering:
  331. # another event has been skipped because we were in catch-up mode
  332. continue
  333. # we are done catching up!
  334. self._catching_up = False
  335. break
  336. if first_catch_up_check:
  337. # as this is our check for needing catch-up, we may have PDUs in
  338. # the queue from before we *knew* we had to do catch-up, so
  339. # clear those out now.
  340. self._start_catching_up()
  341. # fetch the relevant events from the event store
  342. # - redacted behaviour of REDACT is fine, since we only send metadata
  343. # of redacted events to the destination.
  344. # - don't need to worry about rejected events as we do not actively
  345. # forward received events over federation.
  346. catchup_pdus = await self._store.get_events_as_list(event_ids)
  347. if not catchup_pdus:
  348. raise AssertionError(
  349. "No events retrieved when we asked for %r. "
  350. "This should not happen." % event_ids
  351. )
  352. # We send transactions with events from one room only, as its likely
  353. # that the remote will have to do additional processing, which may
  354. # take some time. It's better to give it small amounts of work
  355. # rather than risk the request timing out and repeatedly being
  356. # retried, and not making any progress.
  357. #
  358. # Note: `catchup_pdus` will have exactly one PDU per room.
  359. for pdu in catchup_pdus:
  360. # The PDU from the DB will be the last PDU in the room from
  361. # *this server* that wasn't sent to the remote. However, other
  362. # servers may have sent lots of events since then, and we want
  363. # to try and tell the remote only about the *latest* events in
  364. # the room. This is so that it doesn't get inundated by events
  365. # from various parts of the DAG, which all need to be processed.
  366. #
  367. # Note: this does mean that in large rooms a server coming back
  368. # online will get sent the same events from all the different
  369. # servers, but the remote will correctly deduplicate them and
  370. # handle it only once.
  371. # Step 1, fetch the current extremities
  372. extrems = await self._store.get_prev_events_for_room(pdu.room_id)
  373. if pdu.event_id in extrems:
  374. # If the event is in the extremities, then great! We can just
  375. # use that without having to do further checks.
  376. room_catchup_pdus = [pdu]
  377. else:
  378. # If not, fetch the extremities and figure out which we can
  379. # send.
  380. extrem_events = await self._store.get_events_as_list(extrems)
  381. new_pdus = []
  382. for p in extrem_events:
  383. # We pulled this from the DB, so it'll be non-null
  384. assert p.internal_metadata.stream_ordering
  385. # Filter out events that happened before the remote went
  386. # offline
  387. if (
  388. p.internal_metadata.stream_ordering
  389. < self._last_successful_stream_ordering
  390. ):
  391. continue
  392. # Filter out events where the server is not in the room,
  393. # e.g. it may have left/been kicked. *Ideally* we'd pull
  394. # out the kick and send that, but it's a rare edge case
  395. # so we don't bother for now (the server that sent the
  396. # kick should send it out if its online).
  397. hosts = await self._state.get_hosts_in_room_at_events(
  398. p.room_id, [p.event_id]
  399. )
  400. if self._destination not in hosts:
  401. continue
  402. new_pdus.append(p)
  403. # If we've filtered out all the extremities, fall back to
  404. # sending the original event. This should ensure that the
  405. # server gets at least some of missed events (especially if
  406. # the other sending servers are up).
  407. if new_pdus:
  408. room_catchup_pdus = new_pdus
  409. else:
  410. room_catchup_pdus = [pdu]
  411. logger.info(
  412. "Catching up rooms to %s: %r", self._destination, pdu.room_id
  413. )
  414. await self._transaction_manager.send_new_transaction(
  415. self._destination, room_catchup_pdus, []
  416. )
  417. sent_transactions_counter.inc()
  418. # We pulled this from the DB, so it'll be non-null
  419. assert pdu.internal_metadata.stream_ordering
  420. # Note that we mark the last successful stream ordering as that
  421. # from the *original* PDU, rather than the PDU(s) we actually
  422. # send. This is because we use it to mark our position in the
  423. # queue of missed PDUs to process.
  424. self._last_successful_stream_ordering = (
  425. pdu.internal_metadata.stream_ordering
  426. )
  427. await self._store.set_destination_last_successful_stream_ordering(
  428. self._destination, self._last_successful_stream_ordering
  429. )
  430. def _get_rr_edus(self, force_flush: bool) -> Iterable[Edu]:
  431. if not self._pending_rrs:
  432. return
  433. if not force_flush and not self._rrs_pending_flush:
  434. # not yet time for this lot
  435. return
  436. edu = Edu(
  437. origin=self._server_name,
  438. destination=self._destination,
  439. edu_type="m.receipt",
  440. content=self._pending_rrs,
  441. )
  442. self._pending_rrs = {}
  443. self._rrs_pending_flush = False
  444. yield edu
  445. def _pop_pending_edus(self, limit: int) -> List[Edu]:
  446. pending_edus = self._pending_edus
  447. pending_edus, self._pending_edus = pending_edus[:limit], pending_edus[limit:]
  448. return pending_edus
  449. async def _get_device_update_edus(self, limit: int) -> Tuple[List[Edu], int]:
  450. last_device_list = self._last_device_list_stream_id
  451. # Retrieve list of new device updates to send to the destination
  452. now_stream_id, results = await self._store.get_device_updates_by_remote(
  453. self._destination, last_device_list, limit=limit
  454. )
  455. edus = [
  456. Edu(
  457. origin=self._server_name,
  458. destination=self._destination,
  459. edu_type=edu_type,
  460. content=content,
  461. )
  462. for (edu_type, content) in results
  463. ]
  464. assert len(edus) <= limit, "get_device_updates_by_remote returned too many EDUs"
  465. return (edus, now_stream_id)
  466. async def _get_to_device_message_edus(self, limit: int) -> Tuple[List[Edu], int]:
  467. last_device_stream_id = self._last_device_stream_id
  468. to_device_stream_id = self._store.get_to_device_stream_token()
  469. contents, stream_id = await self._store.get_new_device_msgs_for_remote(
  470. self._destination, last_device_stream_id, to_device_stream_id, limit
  471. )
  472. for content in contents:
  473. message_id = content.get("message_id")
  474. if not message_id:
  475. continue
  476. set_tag(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id)
  477. edus = [
  478. Edu(
  479. origin=self._server_name,
  480. destination=self._destination,
  481. edu_type="m.direct_to_device",
  482. content=content,
  483. )
  484. for content in contents
  485. ]
  486. return (edus, stream_id)
  487. def _start_catching_up(self) -> None:
  488. """
  489. Marks this destination as being in catch-up mode.
  490. This throws away the PDU queue.
  491. """
  492. self._catching_up = True
  493. self._pending_pdus = []
  494. @attr.s(slots=True)
  495. class _TransactionQueueManager:
  496. """A helper async context manager for pulling stuff off the queues and
  497. tracking what was last successfully sent, etc.
  498. """
  499. queue = attr.ib(type=PerDestinationQueue)
  500. _device_stream_id = attr.ib(type=Optional[int], default=None)
  501. _device_list_id = attr.ib(type=Optional[int], default=None)
  502. _last_stream_ordering = attr.ib(type=Optional[int], default=None)
  503. _pdus = attr.ib(type=List[EventBase], factory=list)
  504. async def __aenter__(self) -> Tuple[List[EventBase], List[Edu]]:
  505. # First we calculate the EDUs we want to send, if any.
  506. # We start by fetching device related EDUs, i.e device updates and to
  507. # device messages. We have to keep 2 free slots for presence and rr_edus.
  508. limit = MAX_EDUS_PER_TRANSACTION - 2
  509. device_update_edus, dev_list_id = await self.queue._get_device_update_edus(
  510. limit
  511. )
  512. if device_update_edus:
  513. self._device_list_id = dev_list_id
  514. else:
  515. self.queue._last_device_list_stream_id = dev_list_id
  516. limit -= len(device_update_edus)
  517. (
  518. to_device_edus,
  519. device_stream_id,
  520. ) = await self.queue._get_to_device_message_edus(limit)
  521. if to_device_edus:
  522. self._device_stream_id = device_stream_id
  523. else:
  524. self.queue._last_device_stream_id = device_stream_id
  525. pending_edus = device_update_edus + to_device_edus
  526. # Now add the read receipt EDU.
  527. pending_edus.extend(self.queue._get_rr_edus(force_flush=False))
  528. # And presence EDU.
  529. if self.queue._pending_presence:
  530. pending_edus.append(
  531. Edu(
  532. origin=self.queue._server_name,
  533. destination=self.queue._destination,
  534. edu_type="m.presence",
  535. content={
  536. "push": [
  537. format_user_presence_state(
  538. presence, self.queue._clock.time_msec()
  539. )
  540. for presence in self.queue._pending_presence.values()
  541. ]
  542. },
  543. )
  544. )
  545. self.queue._pending_presence = {}
  546. # Finally add any other types of EDUs if there is room.
  547. pending_edus.extend(
  548. self.queue._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus))
  549. )
  550. while (
  551. len(pending_edus) < MAX_EDUS_PER_TRANSACTION
  552. and self.queue._pending_edus_keyed
  553. ):
  554. _, val = self.queue._pending_edus_keyed.popitem()
  555. pending_edus.append(val)
  556. # Now we look for any PDUs to send, by getting up to 50 PDUs from the
  557. # queue
  558. self._pdus = self.queue._pending_pdus[:50]
  559. if not self._pdus and not pending_edus:
  560. return [], []
  561. # if we've decided to send a transaction anyway, and we have room, we
  562. # may as well send any pending RRs
  563. if len(pending_edus) < MAX_EDUS_PER_TRANSACTION:
  564. pending_edus.extend(self.queue._get_rr_edus(force_flush=True))
  565. if self._pdus:
  566. self._last_stream_ordering = self._pdus[
  567. -1
  568. ].internal_metadata.stream_ordering
  569. assert self._last_stream_ordering
  570. return self._pdus, pending_edus
  571. async def __aexit__(self, exc_type, exc, tb):
  572. if exc_type is not None:
  573. # Failed to send transaction, so we bail out.
  574. return
  575. # Successfully sent transactions, so we remove pending PDUs from the queue
  576. if self._pdus:
  577. self.queue._pending_pdus = self.queue._pending_pdus[len(self._pdus) :]
  578. # Succeeded to send the transaction so we record where we have sent up
  579. # to in the various streams
  580. if self._device_stream_id:
  581. await self.queue._store.delete_device_msgs_for_remote(
  582. self.queue._destination, self._device_stream_id
  583. )
  584. self.queue._last_device_stream_id = self._device_stream_id
  585. # also mark the device updates as sent
  586. if self._device_list_id:
  587. logger.info(
  588. "Marking as sent %r %r", self.queue._destination, self._device_list_id
  589. )
  590. await self.queue._store.mark_as_sent_devices_by_remote(
  591. self.queue._destination, self._device_list_id
  592. )
  593. self.queue._last_device_list_stream_id = self._device_list_id
  594. if self._last_stream_ordering:
  595. # we sent some PDUs and it was successful, so update our
  596. # last_successful_stream_ordering in the destinations table.
  597. await self.queue._store.set_destination_last_successful_stream_ordering(
  598. self.queue._destination, self._last_stream_ordering
  599. )