transaction_queue.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699
  1. # -*- coding: utf-8 -*-
  2. # Copyright 2014-2016 OpenMarket Ltd
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import datetime
  16. import logging
  17. from six import itervalues
  18. from prometheus_client import Counter
  19. from twisted.internet import defer
  20. import synapse.metrics
  21. from synapse.api.errors import FederationDeniedError, HttpResponseException
  22. from synapse.handlers.presence import format_user_presence_state, get_interested_remotes
  23. from synapse.metrics import (
  24. LaterGauge,
  25. events_processed_counter,
  26. sent_edus_counter,
  27. sent_transactions_counter,
  28. )
  29. from synapse.util import PreserveLoggingContext, logcontext
  30. from synapse.util.metrics import measure_func
  31. from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
  32. from .persistence import TransactionActions
  33. from .units import Edu, Transaction
  34. logger = logging.getLogger(__name__)
  35. sent_pdus_destination_dist_count = Counter(
  36. "synapse_federation_client_sent_pdu_destinations:count", ""
  37. )
  38. sent_pdus_destination_dist_total = Counter(
  39. "synapse_federation_client_sent_pdu_destinations:total", ""
  40. )
  41. class TransactionQueue(object):
  42. """This class makes sure we only have one transaction in flight at
  43. a time for a given destination.
  44. It batches pending PDUs into single transactions.
  45. """
  46. def __init__(self, hs):
  47. self.server_name = hs.hostname
  48. self.store = hs.get_datastore()
  49. self.state = hs.get_state_handler()
  50. self.transaction_actions = TransactionActions(self.store)
  51. self.transport_layer = hs.get_federation_transport_client()
  52. self.clock = hs.get_clock()
  53. self.is_mine_id = hs.is_mine_id
  54. # Is a mapping from destinations -> deferreds. Used to keep track
  55. # of which destinations have transactions in flight and when they are
  56. # done
  57. self.pending_transactions = {}
  58. LaterGauge(
  59. "synapse_federation_transaction_queue_pending_destinations",
  60. "",
  61. [],
  62. lambda: len(self.pending_transactions),
  63. )
  64. # Is a mapping from destination -> list of
  65. # tuple(pending pdus, deferred, order)
  66. self.pending_pdus_by_dest = pdus = {}
  67. # destination -> list of tuple(edu, deferred)
  68. self.pending_edus_by_dest = edus = {}
  69. # Map of user_id -> UserPresenceState for all the pending presence
  70. # to be sent out by user_id. Entries here get processed and put in
  71. # pending_presence_by_dest
  72. self.pending_presence = {}
  73. # Map of destination -> user_id -> UserPresenceState of pending presence
  74. # to be sent to each destinations
  75. self.pending_presence_by_dest = presence = {}
  76. # Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered
  77. # based on their key (e.g. typing events by room_id)
  78. # Map of destination -> (edu_type, key) -> Edu
  79. self.pending_edus_keyed_by_dest = edus_keyed = {}
  80. LaterGauge(
  81. "synapse_federation_transaction_queue_pending_pdus",
  82. "",
  83. [],
  84. lambda: sum(map(len, pdus.values())),
  85. )
  86. LaterGauge(
  87. "synapse_federation_transaction_queue_pending_edus",
  88. "",
  89. [],
  90. lambda: (
  91. sum(map(len, edus.values()))
  92. + sum(map(len, presence.values()))
  93. + sum(map(len, edus_keyed.values()))
  94. ),
  95. )
  96. # destination -> list of tuple(failure, deferred)
  97. self.pending_failures_by_dest = {}
  98. # destination -> stream_id of last successfully sent to-device message.
  99. # NB: may be a long or an int.
  100. self.last_device_stream_id_by_dest = {}
  101. # destination -> stream_id of last successfully sent device list
  102. # update.
  103. self.last_device_list_stream_id_by_dest = {}
  104. # HACK to get unique tx id
  105. self._next_txn_id = int(self.clock.time_msec())
  106. self._order = 1
  107. self._is_processing = False
  108. self._last_poked_id = -1
  109. self._processing_pending_presence = False
  110. def can_send_to(self, destination):
  111. """Can we send messages to the given server?
  112. We can't send messages to ourselves. If we are running on localhost
  113. then we can only federation with other servers running on localhost.
  114. Otherwise we only federate with servers on a public domain.
  115. Args:
  116. destination(str): The server we are possibly trying to send to.
  117. Returns:
  118. bool: True if we can send to the server.
  119. """
  120. if destination == self.server_name:
  121. return False
  122. if self.server_name.startswith("localhost"):
  123. return destination.startswith("localhost")
  124. else:
  125. return not destination.startswith("localhost")
  126. def notify_new_events(self, current_id):
  127. """This gets called when we have some new events we might want to
  128. send out to other servers.
  129. """
  130. self._last_poked_id = max(current_id, self._last_poked_id)
  131. if self._is_processing:
  132. return
  133. # fire off a processing loop in the background. It's likely it will
  134. # outlast the current request, so run it in the sentinel logcontext.
  135. with PreserveLoggingContext():
  136. self._process_event_queue_loop()
  137. @defer.inlineCallbacks
  138. def _process_event_queue_loop(self):
  139. try:
  140. self._is_processing = True
  141. while True:
  142. last_token = yield self.store.get_federation_out_pos("events")
  143. next_token, events = yield self.store.get_all_new_events_stream(
  144. last_token, self._last_poked_id, limit=100,
  145. )
  146. logger.debug("Handling %s -> %s", last_token, next_token)
  147. if not events and next_token >= self._last_poked_id:
  148. break
  149. @defer.inlineCallbacks
  150. def handle_event(event):
  151. # Only send events for this server.
  152. send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
  153. is_mine = self.is_mine_id(event.event_id)
  154. if not is_mine and send_on_behalf_of is None:
  155. return
  156. try:
  157. # Get the state from before the event.
  158. # We need to make sure that this is the state from before
  159. # the event and not from after it.
  160. # Otherwise if the last member on a server in a room is
  161. # banned then it won't receive the event because it won't
  162. # be in the room after the ban.
  163. destinations = yield self.state.get_current_hosts_in_room(
  164. event.room_id, latest_event_ids=[
  165. prev_id for prev_id, _ in event.prev_events
  166. ],
  167. )
  168. except Exception:
  169. logger.exception(
  170. "Failed to calculate hosts in room for event: %s",
  171. event.event_id,
  172. )
  173. return
  174. destinations = set(destinations)
  175. if send_on_behalf_of is not None:
  176. # If we are sending the event on behalf of another server
  177. # then it already has the event and there is no reason to
  178. # send the event to it.
  179. destinations.discard(send_on_behalf_of)
  180. logger.debug("Sending %s to %r", event, destinations)
  181. self._send_pdu(event, destinations)
  182. @defer.inlineCallbacks
  183. def handle_room_events(events):
  184. for event in events:
  185. yield handle_event(event)
  186. events_by_room = {}
  187. for event in events:
  188. events_by_room.setdefault(event.room_id, []).append(event)
  189. yield logcontext.make_deferred_yieldable(defer.gatherResults(
  190. [
  191. logcontext.run_in_background(handle_room_events, evs)
  192. for evs in itervalues(events_by_room)
  193. ],
  194. consumeErrors=True
  195. ))
  196. yield self.store.update_federation_out_pos(
  197. "events", next_token
  198. )
  199. if events:
  200. now = self.clock.time_msec()
  201. ts = yield self.store.get_received_ts(events[-1].event_id)
  202. synapse.metrics.event_processing_lag.labels(
  203. "federation_sender").set(now - ts)
  204. synapse.metrics.event_processing_last_ts.labels(
  205. "federation_sender").set(ts)
  206. events_processed_counter.inc(len(events))
  207. synapse.metrics.event_processing_positions.labels(
  208. "federation_sender").set(next_token)
  209. finally:
  210. self._is_processing = False
  211. def _send_pdu(self, pdu, destinations):
  212. # We loop through all destinations to see whether we already have
  213. # a transaction in progress. If we do, stick it in the pending_pdus
  214. # table and we'll get back to it later.
  215. order = self._order
  216. self._order += 1
  217. destinations = set(destinations)
  218. destinations = set(
  219. dest for dest in destinations if self.can_send_to(dest)
  220. )
  221. logger.debug("Sending to: %s", str(destinations))
  222. if not destinations:
  223. return
  224. sent_pdus_destination_dist_total.inc(len(destinations))
  225. sent_pdus_destination_dist_count.inc()
  226. for destination in destinations:
  227. self.pending_pdus_by_dest.setdefault(destination, []).append(
  228. (pdu, order)
  229. )
  230. self._attempt_new_transaction(destination)
  231. @logcontext.preserve_fn # the caller should not yield on this
  232. @defer.inlineCallbacks
  233. def send_presence(self, states):
  234. """Send the new presence states to the appropriate destinations.
  235. This actually queues up the presence states ready for sending and
  236. triggers a background task to process them and send out the transactions.
  237. Args:
  238. states (list(UserPresenceState))
  239. """
  240. # First we queue up the new presence by user ID, so multiple presence
  241. # updates in quick successtion are correctly handled
  242. # We only want to send presence for our own users, so lets always just
  243. # filter here just in case.
  244. self.pending_presence.update({
  245. state.user_id: state for state in states
  246. if self.is_mine_id(state.user_id)
  247. })
  248. # We then handle the new pending presence in batches, first figuring
  249. # out the destinations we need to send each state to and then poking it
  250. # to attempt a new transaction. We linearize this so that we don't
  251. # accidentally mess up the ordering and send multiple presence updates
  252. # in the wrong order
  253. if self._processing_pending_presence:
  254. return
  255. self._processing_pending_presence = True
  256. try:
  257. while True:
  258. states_map = self.pending_presence
  259. self.pending_presence = {}
  260. if not states_map:
  261. break
  262. yield self._process_presence_inner(list(states_map.values()))
  263. except Exception:
  264. logger.exception("Error sending presence states to servers")
  265. finally:
  266. self._processing_pending_presence = False
  267. @measure_func("txnqueue._process_presence")
  268. @defer.inlineCallbacks
  269. def _process_presence_inner(self, states):
  270. """Given a list of states populate self.pending_presence_by_dest and
  271. poke to send a new transaction to each destination
  272. Args:
  273. states (list(UserPresenceState))
  274. """
  275. hosts_and_states = yield get_interested_remotes(self.store, states, self.state)
  276. for destinations, states in hosts_and_states:
  277. for destination in destinations:
  278. if not self.can_send_to(destination):
  279. continue
  280. self.pending_presence_by_dest.setdefault(
  281. destination, {}
  282. ).update({
  283. state.user_id: state for state in states
  284. })
  285. self._attempt_new_transaction(destination)
  286. def send_edu(self, destination, edu_type, content, key=None):
  287. edu = Edu(
  288. origin=self.server_name,
  289. destination=destination,
  290. edu_type=edu_type,
  291. content=content,
  292. )
  293. if not self.can_send_to(destination):
  294. return
  295. sent_edus_counter.inc()
  296. if key:
  297. self.pending_edus_keyed_by_dest.setdefault(
  298. destination, {}
  299. )[(edu.edu_type, key)] = edu
  300. else:
  301. self.pending_edus_by_dest.setdefault(destination, []).append(edu)
  302. self._attempt_new_transaction(destination)
  303. def send_failure(self, failure, destination):
  304. if destination == self.server_name or destination == "localhost":
  305. return
  306. if not self.can_send_to(destination):
  307. return
  308. self.pending_failures_by_dest.setdefault(
  309. destination, []
  310. ).append(failure)
  311. self._attempt_new_transaction(destination)
  312. def send_device_messages(self, destination):
  313. if destination == self.server_name or destination == "localhost":
  314. return
  315. if not self.can_send_to(destination):
  316. return
  317. self._attempt_new_transaction(destination)
  318. def get_current_token(self):
  319. return 0
  320. def _attempt_new_transaction(self, destination):
  321. """Try to start a new transaction to this destination
  322. If there is already a transaction in progress to this destination,
  323. returns immediately. Otherwise kicks off the process of sending a
  324. transaction in the background.
  325. Args:
  326. destination (str):
  327. Returns:
  328. None
  329. """
  330. # list of (pending_pdu, deferred, order)
  331. if destination in self.pending_transactions:
  332. # XXX: pending_transactions can get stuck on by a never-ending
  333. # request at which point pending_pdus_by_dest just keeps growing.
  334. # we need application-layer timeouts of some flavour of these
  335. # requests
  336. logger.debug(
  337. "TX [%s] Transaction already in progress",
  338. destination
  339. )
  340. return
  341. logger.debug("TX [%s] Starting transaction loop", destination)
  342. # Drop the logcontext before starting the transaction. It doesn't
  343. # really make sense to log all the outbound transactions against
  344. # whatever path led us to this point: that's pretty arbitrary really.
  345. #
  346. # (this also means we can fire off _perform_transaction without
  347. # yielding)
  348. with logcontext.PreserveLoggingContext():
  349. self._transaction_transmission_loop(destination)
  350. @defer.inlineCallbacks
  351. def _transaction_transmission_loop(self, destination):
  352. pending_pdus = []
  353. try:
  354. self.pending_transactions[destination] = 1
  355. # This will throw if we wouldn't retry. We do this here so we fail
  356. # quickly, but we will later check this again in the http client,
  357. # hence why we throw the result away.
  358. yield get_retry_limiter(destination, self.clock, self.store)
  359. pending_pdus = []
  360. while True:
  361. device_message_edus, device_stream_id, dev_list_id = (
  362. yield self._get_new_device_messages(destination)
  363. )
  364. # BEGIN CRITICAL SECTION
  365. #
  366. # In order to avoid a race condition, we need to make sure that
  367. # the following code (from popping the queues up to the point
  368. # where we decide if we actually have any pending messages) is
  369. # atomic - otherwise new PDUs or EDUs might arrive in the
  370. # meantime, but not get sent because we hold the
  371. # pending_transactions flag.
  372. pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
  373. pending_edus = self.pending_edus_by_dest.pop(destination, [])
  374. pending_presence = self.pending_presence_by_dest.pop(destination, {})
  375. pending_failures = self.pending_failures_by_dest.pop(destination, [])
  376. pending_edus.extend(
  377. self.pending_edus_keyed_by_dest.pop(destination, {}).values()
  378. )
  379. pending_edus.extend(device_message_edus)
  380. if pending_presence:
  381. pending_edus.append(
  382. Edu(
  383. origin=self.server_name,
  384. destination=destination,
  385. edu_type="m.presence",
  386. content={
  387. "push": [
  388. format_user_presence_state(
  389. presence, self.clock.time_msec()
  390. )
  391. for presence in pending_presence.values()
  392. ]
  393. },
  394. )
  395. )
  396. if pending_pdus:
  397. logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
  398. destination, len(pending_pdus))
  399. if not pending_pdus and not pending_edus and not pending_failures:
  400. logger.debug("TX [%s] Nothing to send", destination)
  401. self.last_device_stream_id_by_dest[destination] = (
  402. device_stream_id
  403. )
  404. return
  405. # END CRITICAL SECTION
  406. success = yield self._send_new_transaction(
  407. destination, pending_pdus, pending_edus, pending_failures,
  408. )
  409. if success:
  410. sent_transactions_counter.inc()
  411. # Remove the acknowledged device messages from the database
  412. # Only bother if we actually sent some device messages
  413. if device_message_edus:
  414. yield self.store.delete_device_msgs_for_remote(
  415. destination, device_stream_id
  416. )
  417. logger.info("Marking as sent %r %r", destination, dev_list_id)
  418. yield self.store.mark_as_sent_devices_by_remote(
  419. destination, dev_list_id
  420. )
  421. self.last_device_stream_id_by_dest[destination] = device_stream_id
  422. self.last_device_list_stream_id_by_dest[destination] = dev_list_id
  423. else:
  424. break
  425. except NotRetryingDestination as e:
  426. logger.debug(
  427. "TX [%s] not ready for retry yet (next retry at %s) - "
  428. "dropping transaction for now",
  429. destination,
  430. datetime.datetime.fromtimestamp(
  431. (e.retry_last_ts + e.retry_interval) / 1000.0
  432. ),
  433. )
  434. except FederationDeniedError as e:
  435. logger.info(e)
  436. except Exception as e:
  437. logger.warn(
  438. "TX [%s] Failed to send transaction: %s",
  439. destination,
  440. e,
  441. )
  442. for p, _ in pending_pdus:
  443. logger.info("Failed to send event %s to %s", p.event_id,
  444. destination)
  445. finally:
  446. # We want to be *very* sure we delete this after we stop processing
  447. self.pending_transactions.pop(destination, None)
  448. @defer.inlineCallbacks
  449. def _get_new_device_messages(self, destination):
  450. last_device_stream_id = self.last_device_stream_id_by_dest.get(destination, 0)
  451. to_device_stream_id = self.store.get_to_device_stream_token()
  452. contents, stream_id = yield self.store.get_new_device_msgs_for_remote(
  453. destination, last_device_stream_id, to_device_stream_id
  454. )
  455. edus = [
  456. Edu(
  457. origin=self.server_name,
  458. destination=destination,
  459. edu_type="m.direct_to_device",
  460. content=content,
  461. )
  462. for content in contents
  463. ]
  464. last_device_list = self.last_device_list_stream_id_by_dest.get(destination, 0)
  465. now_stream_id, results = yield self.store.get_devices_by_remote(
  466. destination, last_device_list
  467. )
  468. edus.extend(
  469. Edu(
  470. origin=self.server_name,
  471. destination=destination,
  472. edu_type="m.device_list_update",
  473. content=content,
  474. )
  475. for content in results
  476. )
  477. defer.returnValue((edus, stream_id, now_stream_id))
  478. @measure_func("_send_new_transaction")
  479. @defer.inlineCallbacks
  480. def _send_new_transaction(self, destination, pending_pdus, pending_edus,
  481. pending_failures):
  482. # Sort based on the order field
  483. pending_pdus.sort(key=lambda t: t[1])
  484. pdus = [x[0] for x in pending_pdus]
  485. edus = pending_edus
  486. failures = [x.get_dict() for x in pending_failures]
  487. success = True
  488. logger.debug("TX [%s] _attempt_new_transaction", destination)
  489. txn_id = str(self._next_txn_id)
  490. logger.debug(
  491. "TX [%s] {%s} Attempting new transaction"
  492. " (pdus: %d, edus: %d, failures: %d)",
  493. destination, txn_id,
  494. len(pdus),
  495. len(edus),
  496. len(failures)
  497. )
  498. logger.debug("TX [%s] Persisting transaction...", destination)
  499. transaction = Transaction.create_new(
  500. origin_server_ts=int(self.clock.time_msec()),
  501. transaction_id=txn_id,
  502. origin=self.server_name,
  503. destination=destination,
  504. pdus=pdus,
  505. edus=edus,
  506. pdu_failures=failures,
  507. )
  508. self._next_txn_id += 1
  509. yield self.transaction_actions.prepare_to_send(transaction)
  510. logger.debug("TX [%s] Persisted transaction", destination)
  511. logger.info(
  512. "TX [%s] {%s} Sending transaction [%s],"
  513. " (PDUs: %d, EDUs: %d, failures: %d)",
  514. destination, txn_id,
  515. transaction.transaction_id,
  516. len(pdus),
  517. len(edus),
  518. len(failures),
  519. )
  520. # Actually send the transaction
  521. # FIXME (erikj): This is a bit of a hack to make the Pdu age
  522. # keys work
  523. def json_data_cb():
  524. data = transaction.get_dict()
  525. now = int(self.clock.time_msec())
  526. if "pdus" in data:
  527. for p in data["pdus"]:
  528. if "age_ts" in p:
  529. unsigned = p.setdefault("unsigned", {})
  530. unsigned["age"] = now - int(p["age_ts"])
  531. del p["age_ts"]
  532. return data
  533. try:
  534. response = yield self.transport_layer.send_transaction(
  535. transaction, json_data_cb
  536. )
  537. code = 200
  538. if response:
  539. for e_id, r in response.get("pdus", {}).items():
  540. if "error" in r:
  541. logger.warn(
  542. "Transaction returned error for %s: %s",
  543. e_id, r,
  544. )
  545. except HttpResponseException as e:
  546. code = e.code
  547. response = e.response
  548. if e.code in (401, 404, 429) or 500 <= e.code:
  549. logger.info(
  550. "TX [%s] {%s} got %d response",
  551. destination, txn_id, code
  552. )
  553. raise e
  554. logger.info(
  555. "TX [%s] {%s} got %d response",
  556. destination, txn_id, code
  557. )
  558. logger.debug("TX [%s] Sent transaction", destination)
  559. logger.debug("TX [%s] Marking as delivered...", destination)
  560. yield self.transaction_actions.delivered(
  561. transaction, code, response
  562. )
  563. logger.debug("TX [%s] Marked as delivered", destination)
  564. if code != 200:
  565. for p in pdus:
  566. logger.info(
  567. "Failed to send event %s to %s", p.event_id, destination
  568. )
  569. success = False
  570. defer.returnValue(success)