devices.py 66 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836
  1. # Copyright 2016 OpenMarket Ltd
  2. # Copyright 2019 New Vector Ltd
  3. # Copyright 2019,2020 The Matrix.org Foundation C.I.C.
  4. #
  5. # Licensed under the Apache License, Version 2.0 (the "License");
  6. # you may not use this file except in compliance with the License.
  7. # You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. import abc
  17. import logging
  18. from typing import (
  19. TYPE_CHECKING,
  20. Any,
  21. Collection,
  22. Dict,
  23. Iterable,
  24. List,
  25. Optional,
  26. Set,
  27. Tuple,
  28. )
  29. from synapse.api.errors import Codes, StoreError
  30. from synapse.logging.opentracing import (
  31. get_active_span_text_map,
  32. set_tag,
  33. trace,
  34. whitelisted_homeserver,
  35. )
  36. from synapse.metrics.background_process_metrics import wrap_as_background_process
  37. from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
  38. from synapse.storage.database import (
  39. DatabasePool,
  40. LoggingDatabaseConnection,
  41. LoggingTransaction,
  42. make_tuple_comparison_clause,
  43. )
  44. from synapse.types import JsonDict, get_verify_key_from_cross_signing_key
  45. from synapse.util import json_decoder, json_encoder
  46. from synapse.util.caches.descriptors import cached, cachedList
  47. from synapse.util.caches.lrucache import LruCache
  48. from synapse.util.caches.stream_change_cache import StreamChangeCache
  49. from synapse.util.iterutils import batch_iter
  50. from synapse.util.stringutils import shortstr
  51. if TYPE_CHECKING:
  52. from synapse.server import HomeServer
  53. logger = logging.getLogger(__name__)
  54. issue_8631_logger = logging.getLogger("synapse.8631_debug")
  55. DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES = (
  56. "drop_device_list_streams_non_unique_indexes"
  57. )
  58. BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES = "remove_dup_outbound_pokes"
  59. class DeviceWorkerStore(SQLBaseStore):
  60. def __init__(
  61. self,
  62. database: DatabasePool,
  63. db_conn: LoggingDatabaseConnection,
  64. hs: "HomeServer",
  65. ):
  66. super().__init__(database, db_conn, hs)
  67. device_list_max = self._device_list_id_gen.get_current_token()
  68. device_list_prefill, min_device_list_id = self.db_pool.get_cache_dict(
  69. db_conn,
  70. "device_lists_stream",
  71. entity_column="user_id",
  72. stream_column="stream_id",
  73. max_value=device_list_max,
  74. limit=10000,
  75. )
  76. self._device_list_stream_cache = StreamChangeCache(
  77. "DeviceListStreamChangeCache",
  78. min_device_list_id,
  79. prefilled_cache=device_list_prefill,
  80. )
  81. (
  82. user_signature_stream_prefill,
  83. user_signature_stream_list_id,
  84. ) = self.db_pool.get_cache_dict(
  85. db_conn,
  86. "user_signature_stream",
  87. entity_column="from_user_id",
  88. stream_column="stream_id",
  89. max_value=device_list_max,
  90. limit=1000,
  91. )
  92. self._user_signature_stream_cache = StreamChangeCache(
  93. "UserSignatureStreamChangeCache",
  94. user_signature_stream_list_id,
  95. prefilled_cache=user_signature_stream_prefill,
  96. )
  97. (
  98. device_list_federation_prefill,
  99. device_list_federation_list_id,
  100. ) = self.db_pool.get_cache_dict(
  101. db_conn,
  102. "device_lists_outbound_pokes",
  103. entity_column="destination",
  104. stream_column="stream_id",
  105. max_value=device_list_max,
  106. limit=10000,
  107. )
  108. self._device_list_federation_stream_cache = StreamChangeCache(
  109. "DeviceListFederationStreamChangeCache",
  110. device_list_federation_list_id,
  111. prefilled_cache=device_list_federation_prefill,
  112. )
  113. if hs.config.worker.run_background_tasks:
  114. self._clock.looping_call(
  115. self._prune_old_outbound_device_pokes, 60 * 60 * 1000
  116. )
  117. async def count_devices_by_users(self, user_ids: Optional[List[str]] = None) -> int:
  118. """Retrieve number of all devices of given users.
  119. Only returns number of devices that are not marked as hidden.
  120. Args:
  121. user_ids: The IDs of the users which owns devices
  122. Returns:
  123. Number of devices of this users.
  124. """
  125. def count_devices_by_users_txn(txn, user_ids):
  126. sql = """
  127. SELECT count(*)
  128. FROM devices
  129. WHERE
  130. hidden = '0' AND
  131. """
  132. clause, args = make_in_list_sql_clause(
  133. txn.database_engine, "user_id", user_ids
  134. )
  135. txn.execute(sql + clause, args)
  136. return txn.fetchone()[0]
  137. if not user_ids:
  138. return 0
  139. return await self.db_pool.runInteraction(
  140. "count_devices_by_users", count_devices_by_users_txn, user_ids
  141. )
  142. async def get_device(
  143. self, user_id: str, device_id: str
  144. ) -> Optional[Dict[str, Any]]:
  145. """Retrieve a device. Only returns devices that are not marked as
  146. hidden.
  147. Args:
  148. user_id: The ID of the user which owns the device
  149. device_id: The ID of the device to retrieve
  150. Returns:
  151. A dict containing the device information, or `None` if the device does not
  152. exist.
  153. """
  154. return await self.db_pool.simple_select_one(
  155. table="devices",
  156. keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False},
  157. retcols=("user_id", "device_id", "display_name"),
  158. desc="get_device",
  159. allow_none=True,
  160. )
  161. async def get_device_opt(
  162. self, user_id: str, device_id: str
  163. ) -> Optional[Dict[str, Any]]:
  164. """Retrieve a device. Only returns devices that are not marked as
  165. hidden.
  166. Args:
  167. user_id: The ID of the user which owns the device
  168. device_id: The ID of the device to retrieve
  169. Returns:
  170. A dict containing the device information, or None if the device does not exist.
  171. """
  172. return await self.db_pool.simple_select_one(
  173. table="devices",
  174. keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False},
  175. retcols=("user_id", "device_id", "display_name"),
  176. desc="get_device",
  177. allow_none=True,
  178. )
  179. async def get_devices_by_user(self, user_id: str) -> Dict[str, Dict[str, str]]:
  180. """Retrieve all of a user's registered devices. Only returns devices
  181. that are not marked as hidden.
  182. Args:
  183. user_id:
  184. Returns:
  185. A mapping from device_id to a dict containing "device_id", "user_id"
  186. and "display_name" for each device.
  187. """
  188. devices = await self.db_pool.simple_select_list(
  189. table="devices",
  190. keyvalues={"user_id": user_id, "hidden": False},
  191. retcols=("user_id", "device_id", "display_name"),
  192. desc="get_devices_by_user",
  193. )
  194. return {d["device_id"]: d for d in devices}
  195. async def get_devices_by_auth_provider_session_id(
  196. self, auth_provider_id: str, auth_provider_session_id: str
  197. ) -> List[Dict[str, Any]]:
  198. """Retrieve the list of devices associated with a SSO IdP session ID.
  199. Args:
  200. auth_provider_id: The SSO IdP ID as defined in the server config
  201. auth_provider_session_id: The session ID within the IdP
  202. Returns:
  203. A list of dicts containing the device_id and the user_id of each device
  204. """
  205. return await self.db_pool.simple_select_list(
  206. table="device_auth_providers",
  207. keyvalues={
  208. "auth_provider_id": auth_provider_id,
  209. "auth_provider_session_id": auth_provider_session_id,
  210. },
  211. retcols=("user_id", "device_id"),
  212. desc="get_devices_by_auth_provider_session_id",
  213. )
  214. @trace
  215. async def get_device_updates_by_remote(
  216. self, destination: str, from_stream_id: int, limit: int
  217. ) -> Tuple[int, List[Tuple[str, JsonDict]]]:
  218. """Get a stream of device updates to send to the given remote server.
  219. Args:
  220. destination: The host the device updates are intended for
  221. from_stream_id: The minimum stream_id to filter updates by, exclusive
  222. limit: Maximum number of device updates to return
  223. Returns:
  224. - The current stream id (i.e. the stream id of the last update included
  225. in the response); and
  226. - The list of updates, where each update is a pair of EDU type and
  227. EDU contents.
  228. """
  229. now_stream_id = self.get_device_stream_token()
  230. has_changed = self._device_list_federation_stream_cache.has_entity_changed(
  231. destination, int(from_stream_id)
  232. )
  233. if not has_changed:
  234. return now_stream_id, []
  235. updates = await self.db_pool.runInteraction(
  236. "get_device_updates_by_remote",
  237. self._get_device_updates_by_remote_txn,
  238. destination,
  239. from_stream_id,
  240. now_stream_id,
  241. limit,
  242. )
  243. # We need to ensure `updates` doesn't grow too big.
  244. # Currently: `len(updates) <= limit`.
  245. # Return an empty list if there are no updates
  246. if not updates:
  247. return now_stream_id, []
  248. if issue_8631_logger.isEnabledFor(logging.DEBUG):
  249. data = {(user, device): stream_id for user, device, stream_id, _ in updates}
  250. issue_8631_logger.debug(
  251. "device updates need to be sent to %s: %s", destination, data
  252. )
  253. # get the cross-signing keys of the users in the list, so that we can
  254. # determine which of the device changes were cross-signing keys
  255. users = {r[0] for r in updates}
  256. master_key_by_user = {}
  257. self_signing_key_by_user = {}
  258. for user in users:
  259. cross_signing_key = await self.get_e2e_cross_signing_key(user, "master")
  260. if cross_signing_key:
  261. key_id, verify_key = get_verify_key_from_cross_signing_key(
  262. cross_signing_key
  263. )
  264. # verify_key is a VerifyKey from signedjson, which uses
  265. # .version to denote the portion of the key ID after the
  266. # algorithm and colon, which is the device ID
  267. master_key_by_user[user] = {
  268. "key_info": cross_signing_key,
  269. "device_id": verify_key.version,
  270. }
  271. cross_signing_key = await self.get_e2e_cross_signing_key(
  272. user, "self_signing"
  273. )
  274. if cross_signing_key:
  275. key_id, verify_key = get_verify_key_from_cross_signing_key(
  276. cross_signing_key
  277. )
  278. self_signing_key_by_user[user] = {
  279. "key_info": cross_signing_key,
  280. "device_id": verify_key.version,
  281. }
  282. # Perform the equivalent of a GROUP BY
  283. #
  284. # Iterate through the updates list and copy non-duplicate
  285. # (user_id, device_id) entries into a map, with the value being
  286. # the max stream_id across each set of duplicate entries
  287. #
  288. # maps (user_id, device_id) -> (stream_id, opentracing_context)
  289. #
  290. # opentracing_context contains the opentracing metadata for the request
  291. # that created the poke
  292. #
  293. # The most recent request's opentracing_context is used as the
  294. # context which created the Edu.
  295. # This is the stream ID that we will return for the consumer to resume
  296. # following this stream later.
  297. last_processed_stream_id = from_stream_id
  298. query_map = {}
  299. cross_signing_keys_by_user = {}
  300. for user_id, device_id, update_stream_id, update_context in updates:
  301. # Calculate the remaining length budget.
  302. # Note that, for now, each entry in `cross_signing_keys_by_user`
  303. # gives rise to two device updates in the result, so those cost twice
  304. # as much (and are the whole reason we need to separately calculate
  305. # the budget; we know len(updates) <= limit otherwise!)
  306. # N.B. len() on dicts is cheap since they store their size.
  307. remaining_length_budget = limit - (
  308. len(query_map) + 2 * len(cross_signing_keys_by_user)
  309. )
  310. assert remaining_length_budget >= 0
  311. is_master_key_update = (
  312. user_id in master_key_by_user
  313. and device_id == master_key_by_user[user_id]["device_id"]
  314. )
  315. is_self_signing_key_update = (
  316. user_id in self_signing_key_by_user
  317. and device_id == self_signing_key_by_user[user_id]["device_id"]
  318. )
  319. is_cross_signing_key_update = (
  320. is_master_key_update or is_self_signing_key_update
  321. )
  322. if (
  323. is_cross_signing_key_update
  324. and user_id not in cross_signing_keys_by_user
  325. ):
  326. # This will give rise to 2 device updates.
  327. # If we don't have the budget, stop here!
  328. if remaining_length_budget < 2:
  329. break
  330. if is_master_key_update:
  331. result = cross_signing_keys_by_user.setdefault(user_id, {})
  332. result["master_key"] = master_key_by_user[user_id]["key_info"]
  333. elif is_self_signing_key_update:
  334. result = cross_signing_keys_by_user.setdefault(user_id, {})
  335. result["self_signing_key"] = self_signing_key_by_user[user_id][
  336. "key_info"
  337. ]
  338. else:
  339. key = (user_id, device_id)
  340. if key not in query_map and remaining_length_budget < 1:
  341. # We don't have space for a new entry
  342. break
  343. previous_update_stream_id, _ = query_map.get(key, (0, None))
  344. if update_stream_id > previous_update_stream_id:
  345. # FIXME If this overwrites an older update, this discards the
  346. # previous OpenTracing context.
  347. # It might make it harder to track down issues using OpenTracing.
  348. # If there's a good reason why it doesn't matter, a comment here
  349. # about that would not hurt.
  350. query_map[key] = (update_stream_id, update_context)
  351. # As this update has been added to the response, advance the stream
  352. # position.
  353. last_processed_stream_id = update_stream_id
  354. # In the worst case scenario, each update is for a distinct user and is
  355. # added either to the query_map or to cross_signing_keys_by_user,
  356. # but not both:
  357. # len(query_map) + len(cross_signing_keys_by_user) <= len(updates) here,
  358. # so len(query_map) + len(cross_signing_keys_by_user) <= limit.
  359. results = await self._get_device_update_edus_by_remote(
  360. destination, from_stream_id, query_map
  361. )
  362. # len(results) <= len(query_map) here,
  363. # so len(results) + len(cross_signing_keys_by_user) <= limit.
  364. # Add the updated cross-signing keys to the results list
  365. for user_id, result in cross_signing_keys_by_user.items():
  366. result["user_id"] = user_id
  367. results.append(("m.signing_key_update", result))
  368. # also send the unstable version
  369. # FIXME: remove this when enough servers have upgraded
  370. # and remove the length budgeting above.
  371. results.append(("org.matrix.signing_key_update", result))
  372. if issue_8631_logger.isEnabledFor(logging.DEBUG):
  373. for (user_id, edu) in results:
  374. issue_8631_logger.debug(
  375. "device update to %s for %s from %s to %s: %s",
  376. destination,
  377. user_id,
  378. from_stream_id,
  379. last_processed_stream_id,
  380. edu,
  381. )
  382. return last_processed_stream_id, results
  383. def _get_device_updates_by_remote_txn(
  384. self,
  385. txn: LoggingTransaction,
  386. destination: str,
  387. from_stream_id: int,
  388. now_stream_id: int,
  389. limit: int,
  390. ) -> List[Tuple[str, str, int, Optional[str]]]:
  391. """Return device update information for a given remote destination
  392. Args:
  393. txn: The transaction to execute
  394. destination: The host the device updates are intended for
  395. from_stream_id: The minimum stream_id to filter updates by, exclusive
  396. now_stream_id: The maximum stream_id to filter updates by, inclusive
  397. limit: Maximum number of device updates to return
  398. Returns:
  399. List: List of device update tuples:
  400. - user_id
  401. - device_id
  402. - stream_id
  403. - opentracing_context
  404. """
  405. # get the list of device updates that need to be sent
  406. sql = """
  407. SELECT user_id, device_id, stream_id, opentracing_context FROM device_lists_outbound_pokes
  408. WHERE destination = ? AND ? < stream_id AND stream_id <= ?
  409. ORDER BY stream_id
  410. LIMIT ?
  411. """
  412. txn.execute(sql, (destination, from_stream_id, now_stream_id, limit))
  413. return list(txn)
  414. async def _get_device_update_edus_by_remote(
  415. self,
  416. destination: str,
  417. from_stream_id: int,
  418. query_map: Dict[Tuple[str, str], Tuple[int, Optional[str]]],
  419. ) -> List[Tuple[str, dict]]:
  420. """Returns a list of device update EDUs as well as E2EE keys
  421. Args:
  422. destination: The host the device updates are intended for
  423. from_stream_id: The minimum stream_id to filter updates by, exclusive
  424. query_map: Dictionary mapping (user_id, device_id) to
  425. (update stream_id, the relevant json-encoded opentracing context)
  426. Returns:
  427. List of objects representing a device update EDU.
  428. Postconditions:
  429. The returned list has a length not exceeding that of the query_map:
  430. len(result) <= len(query_map)
  431. """
  432. devices = (
  433. await self.get_e2e_device_keys_and_signatures(
  434. # Because these are (user_id, device_id) tuples with all
  435. # device_ids not being None, the returned list's length will not
  436. # exceed that of query_map.
  437. query_map.keys(),
  438. include_all_devices=True,
  439. include_deleted_devices=True,
  440. )
  441. if query_map
  442. else {}
  443. )
  444. results = []
  445. for user_id, user_devices in devices.items():
  446. # The prev_id for the first row is always the last row before
  447. # `from_stream_id`
  448. prev_id = await self._get_last_device_update_for_remote_user(
  449. destination, user_id, from_stream_id
  450. )
  451. # make sure we go through the devices in stream order
  452. device_ids = sorted(
  453. user_devices.keys(),
  454. key=lambda i: query_map[(user_id, i)][0],
  455. )
  456. for device_id in device_ids:
  457. device = user_devices[device_id]
  458. stream_id, opentracing_context = query_map[(user_id, device_id)]
  459. result = {
  460. "user_id": user_id,
  461. "device_id": device_id,
  462. "prev_id": [prev_id] if prev_id else [],
  463. "stream_id": stream_id,
  464. "org.matrix.opentracing_context": opentracing_context,
  465. }
  466. prev_id = stream_id
  467. if device is not None:
  468. keys = device.keys
  469. if keys:
  470. result["keys"] = keys
  471. device_display_name = device.display_name
  472. if device_display_name:
  473. result["device_display_name"] = device_display_name
  474. else:
  475. result["deleted"] = True
  476. results.append(("m.device_list_update", result))
  477. return results
  478. async def _get_last_device_update_for_remote_user(
  479. self, destination: str, user_id: str, from_stream_id: int
  480. ) -> int:
  481. def f(txn):
  482. prev_sent_id_sql = """
  483. SELECT coalesce(max(stream_id), 0) as stream_id
  484. FROM device_lists_outbound_last_success
  485. WHERE destination = ? AND user_id = ? AND stream_id <= ?
  486. """
  487. txn.execute(prev_sent_id_sql, (destination, user_id, from_stream_id))
  488. rows = txn.fetchall()
  489. return rows[0][0]
  490. return await self.db_pool.runInteraction(
  491. "get_last_device_update_for_remote_user", f
  492. )
  493. async def mark_as_sent_devices_by_remote(
  494. self, destination: str, stream_id: int
  495. ) -> None:
  496. """Mark that updates have successfully been sent to the destination."""
  497. await self.db_pool.runInteraction(
  498. "mark_as_sent_devices_by_remote",
  499. self._mark_as_sent_devices_by_remote_txn,
  500. destination,
  501. stream_id,
  502. )
  503. def _mark_as_sent_devices_by_remote_txn(
  504. self, txn: LoggingTransaction, destination: str, stream_id: int
  505. ) -> None:
  506. # We update the device_lists_outbound_last_success with the successfully
  507. # poked users.
  508. sql = """
  509. SELECT user_id, coalesce(max(o.stream_id), 0)
  510. FROM device_lists_outbound_pokes as o
  511. WHERE destination = ? AND o.stream_id <= ?
  512. GROUP BY user_id
  513. """
  514. txn.execute(sql, (destination, stream_id))
  515. rows = txn.fetchall()
  516. self.db_pool.simple_upsert_many_txn(
  517. txn=txn,
  518. table="device_lists_outbound_last_success",
  519. key_names=("destination", "user_id"),
  520. key_values=((destination, user_id) for user_id, _ in rows),
  521. value_names=("stream_id",),
  522. value_values=((stream_id,) for _, stream_id in rows),
  523. )
  524. # Delete all sent outbound pokes
  525. sql = """
  526. DELETE FROM device_lists_outbound_pokes
  527. WHERE destination = ? AND stream_id <= ?
  528. """
  529. txn.execute(sql, (destination, stream_id))
  530. async def add_user_signature_change_to_streams(
  531. self, from_user_id: str, user_ids: List[str]
  532. ) -> int:
  533. """Persist that a user has made new signatures
  534. Args:
  535. from_user_id: the user who made the signatures
  536. user_ids: the users who were signed
  537. Returns:
  538. The new stream ID.
  539. """
  540. async with self._device_list_id_gen.get_next() as stream_id:
  541. await self.db_pool.runInteraction(
  542. "add_user_sig_change_to_streams",
  543. self._add_user_signature_change_txn,
  544. from_user_id,
  545. user_ids,
  546. stream_id,
  547. )
  548. return stream_id
  549. def _add_user_signature_change_txn(
  550. self,
  551. txn: LoggingTransaction,
  552. from_user_id: str,
  553. user_ids: List[str],
  554. stream_id: int,
  555. ) -> None:
  556. txn.call_after(
  557. self._user_signature_stream_cache.entity_has_changed,
  558. from_user_id,
  559. stream_id,
  560. )
  561. self.db_pool.simple_insert_txn(
  562. txn,
  563. "user_signature_stream",
  564. values={
  565. "stream_id": stream_id,
  566. "from_user_id": from_user_id,
  567. "user_ids": json_encoder.encode(user_ids),
  568. },
  569. )
  570. @abc.abstractmethod
  571. def get_device_stream_token(self) -> int:
  572. """Get the current stream id from the _device_list_id_gen"""
  573. ...
  574. @trace
  575. async def get_user_devices_from_cache(
  576. self, query_list: List[Tuple[str, str]]
  577. ) -> Tuple[Set[str], Dict[str, Dict[str, JsonDict]]]:
  578. """Get the devices (and keys if any) for remote users from the cache.
  579. Args:
  580. query_list: List of (user_id, device_ids), if device_ids is
  581. falsey then return all device ids for that user.
  582. Returns:
  583. A tuple of (user_ids_not_in_cache, results_map), where
  584. user_ids_not_in_cache is a set of user_ids and results_map is a
  585. mapping of user_id -> device_id -> device_info.
  586. """
  587. user_ids = {user_id for user_id, _ in query_list}
  588. user_map = await self.get_device_list_last_stream_id_for_remotes(list(user_ids))
  589. # We go and check if any of the users need to have their device lists
  590. # resynced. If they do then we remove them from the cached list.
  591. users_needing_resync = await self.get_user_ids_requiring_device_list_resync(
  592. user_ids
  593. )
  594. user_ids_in_cache = {
  595. user_id for user_id, stream_id in user_map.items() if stream_id
  596. } - users_needing_resync
  597. user_ids_not_in_cache = user_ids - user_ids_in_cache
  598. results = {}
  599. for user_id, device_id in query_list:
  600. if user_id not in user_ids_in_cache:
  601. continue
  602. if device_id:
  603. device = await self._get_cached_user_device(user_id, device_id)
  604. results.setdefault(user_id, {})[device_id] = device
  605. else:
  606. results[user_id] = await self.get_cached_devices_for_user(user_id)
  607. set_tag("in_cache", results)
  608. set_tag("not_in_cache", user_ids_not_in_cache)
  609. return user_ids_not_in_cache, results
  610. @cached(num_args=2, tree=True)
  611. async def _get_cached_user_device(self, user_id: str, device_id: str) -> JsonDict:
  612. content = await self.db_pool.simple_select_one_onecol(
  613. table="device_lists_remote_cache",
  614. keyvalues={"user_id": user_id, "device_id": device_id},
  615. retcol="content",
  616. desc="_get_cached_user_device",
  617. )
  618. return db_to_json(content)
  619. @cached()
  620. async def get_cached_devices_for_user(self, user_id: str) -> Dict[str, JsonDict]:
  621. devices = await self.db_pool.simple_select_list(
  622. table="device_lists_remote_cache",
  623. keyvalues={"user_id": user_id},
  624. retcols=("device_id", "content"),
  625. desc="get_cached_devices_for_user",
  626. )
  627. return {
  628. device["device_id"]: db_to_json(device["content"]) for device in devices
  629. }
  630. def get_cached_device_list_changes(
  631. self,
  632. from_key: int,
  633. ) -> Optional[Set[str]]:
  634. """Get set of users whose devices have changed since `from_key`, or None
  635. if that information is not in our cache.
  636. """
  637. return self._device_list_stream_cache.get_all_entities_changed(from_key)
  638. async def get_users_whose_devices_changed(
  639. self,
  640. from_key: int,
  641. user_ids: Optional[Iterable[str]] = None,
  642. to_key: Optional[int] = None,
  643. ) -> Set[str]:
  644. """Get set of users whose devices have changed since `from_key` that
  645. are in the given list of user_ids.
  646. Args:
  647. from_key: The minimum device lists stream token to query device list changes for,
  648. exclusive.
  649. user_ids: If provided, only check if these users have changed their device lists.
  650. Otherwise changes from all users are returned.
  651. to_key: The maximum device lists stream token to query device list changes for,
  652. inclusive.
  653. Returns:
  654. The set of user_ids whose devices have changed since `from_key` (exclusive)
  655. until `to_key` (inclusive).
  656. """
  657. # Get set of users who *may* have changed. Users not in the returned
  658. # list have definitely not changed.
  659. if user_ids is None:
  660. # Get set of all users that have had device list changes since 'from_key'
  661. user_ids_to_check = self._device_list_stream_cache.get_all_entities_changed(
  662. from_key
  663. )
  664. else:
  665. # The same as above, but filter results to only those users in 'user_ids'
  666. user_ids_to_check = self._device_list_stream_cache.get_entities_changed(
  667. user_ids, from_key
  668. )
  669. if not user_ids_to_check:
  670. return set()
  671. def _get_users_whose_devices_changed_txn(txn):
  672. changes = set()
  673. stream_id_where_clause = "stream_id > ?"
  674. sql_args = [from_key]
  675. if to_key:
  676. stream_id_where_clause += " AND stream_id <= ?"
  677. sql_args.append(to_key)
  678. sql = f"""
  679. SELECT DISTINCT user_id FROM device_lists_stream
  680. WHERE {stream_id_where_clause}
  681. AND
  682. """
  683. # Query device changes with a batch of users at a time
  684. for chunk in batch_iter(user_ids_to_check, 100):
  685. clause, args = make_in_list_sql_clause(
  686. txn.database_engine, "user_id", chunk
  687. )
  688. txn.execute(sql + clause, sql_args + args)
  689. changes.update(user_id for user_id, in txn)
  690. return changes
  691. return await self.db_pool.runInteraction(
  692. "get_users_whose_devices_changed", _get_users_whose_devices_changed_txn
  693. )
  694. async def get_users_whose_signatures_changed(
  695. self, user_id: str, from_key: int
  696. ) -> Set[str]:
  697. """Get the users who have new cross-signing signatures made by `user_id` since
  698. `from_key`.
  699. Args:
  700. user_id: the user who made the signatures
  701. from_key: The device lists stream token
  702. Returns:
  703. A set of user IDs with updated signatures.
  704. """
  705. if self._user_signature_stream_cache.has_entity_changed(user_id, from_key):
  706. sql = """
  707. SELECT DISTINCT user_ids FROM user_signature_stream
  708. WHERE from_user_id = ? AND stream_id > ?
  709. """
  710. rows = await self.db_pool.execute(
  711. "get_users_whose_signatures_changed", None, sql, user_id, from_key
  712. )
  713. return {user for row in rows for user in db_to_json(row[0])}
  714. else:
  715. return set()
  716. async def get_all_device_list_changes_for_remotes(
  717. self, instance_name: str, last_id: int, current_id: int, limit: int
  718. ) -> Tuple[List[Tuple[int, tuple]], int, bool]:
  719. """Get updates for device lists replication stream.
  720. Args:
  721. instance_name: The writer we want to fetch updates from. Unused
  722. here since there is only ever one writer.
  723. last_id: The token to fetch updates from. Exclusive.
  724. current_id: The token to fetch updates up to. Inclusive.
  725. limit: The requested limit for the number of rows to return. The
  726. function may return more or fewer rows.
  727. Returns:
  728. A tuple consisting of: the updates, a token to use to fetch
  729. subsequent updates, and whether we returned fewer rows than exists
  730. between the requested tokens due to the limit.
  731. The token returned can be used in a subsequent call to this
  732. function to get further updates.
  733. The updates are a list of 2-tuples of stream ID and the row data
  734. """
  735. if last_id == current_id:
  736. return [], current_id, False
  737. def _get_all_device_list_changes_for_remotes(txn):
  738. # This query Does The Right Thing where it'll correctly apply the
  739. # bounds to the inner queries.
  740. sql = """
  741. SELECT stream_id, entity FROM (
  742. SELECT stream_id, user_id AS entity FROM device_lists_stream
  743. UNION ALL
  744. SELECT stream_id, destination AS entity FROM device_lists_outbound_pokes
  745. ) AS e
  746. WHERE ? < stream_id AND stream_id <= ?
  747. ORDER BY stream_id ASC
  748. LIMIT ?
  749. """
  750. txn.execute(sql, (last_id, current_id, limit))
  751. updates = [(row[0], row[1:]) for row in txn]
  752. limited = False
  753. upto_token = current_id
  754. if len(updates) >= limit:
  755. upto_token = updates[-1][0]
  756. limited = True
  757. return updates, upto_token, limited
  758. return await self.db_pool.runInteraction(
  759. "get_all_device_list_changes_for_remotes",
  760. _get_all_device_list_changes_for_remotes,
  761. )
  762. @cached(max_entries=10000)
  763. async def get_device_list_last_stream_id_for_remote(
  764. self, user_id: str
  765. ) -> Optional[str]:
  766. """Get the last stream_id we got for a user. May be None if we haven't
  767. got any information for them.
  768. """
  769. return await self.db_pool.simple_select_one_onecol(
  770. table="device_lists_remote_extremeties",
  771. keyvalues={"user_id": user_id},
  772. retcol="stream_id",
  773. desc="get_device_list_last_stream_id_for_remote",
  774. allow_none=True,
  775. )
  776. @cachedList(
  777. cached_method_name="get_device_list_last_stream_id_for_remote",
  778. list_name="user_ids",
  779. )
  780. async def get_device_list_last_stream_id_for_remotes(
  781. self, user_ids: Iterable[str]
  782. ) -> Dict[str, Optional[str]]:
  783. rows = await self.db_pool.simple_select_many_batch(
  784. table="device_lists_remote_extremeties",
  785. column="user_id",
  786. iterable=user_ids,
  787. retcols=("user_id", "stream_id"),
  788. desc="get_device_list_last_stream_id_for_remotes",
  789. )
  790. results = {user_id: None for user_id in user_ids}
  791. results.update({row["user_id"]: row["stream_id"] for row in rows})
  792. return results
  793. async def get_user_ids_requiring_device_list_resync(
  794. self,
  795. user_ids: Optional[Collection[str]] = None,
  796. ) -> Set[str]:
  797. """Given a list of remote users return the list of users that we
  798. should resync the device lists for. If None is given instead of a list,
  799. return every user that we should resync the device lists for.
  800. Returns:
  801. The IDs of users whose device lists need resync.
  802. """
  803. if user_ids:
  804. rows = await self.db_pool.simple_select_many_batch(
  805. table="device_lists_remote_resync",
  806. column="user_id",
  807. iterable=user_ids,
  808. retcols=("user_id",),
  809. desc="get_user_ids_requiring_device_list_resync_with_iterable",
  810. )
  811. else:
  812. rows = await self.db_pool.simple_select_list(
  813. table="device_lists_remote_resync",
  814. keyvalues=None,
  815. retcols=("user_id",),
  816. desc="get_user_ids_requiring_device_list_resync",
  817. )
  818. return {row["user_id"] for row in rows}
  819. async def mark_remote_user_device_cache_as_stale(self, user_id: str) -> None:
  820. """Records that the server has reason to believe the cache of the devices
  821. for the remote users is out of date.
  822. """
  823. await self.db_pool.simple_upsert(
  824. table="device_lists_remote_resync",
  825. keyvalues={"user_id": user_id},
  826. values={},
  827. insertion_values={"added_ts": self._clock.time_msec()},
  828. desc="mark_remote_user_device_cache_as_stale",
  829. )
  830. async def mark_remote_user_device_cache_as_valid(self, user_id: str) -> None:
  831. # Remove the database entry that says we need to resync devices, after a resync
  832. await self.db_pool.simple_delete(
  833. table="device_lists_remote_resync",
  834. keyvalues={"user_id": user_id},
  835. desc="mark_remote_user_device_cache_as_valid",
  836. )
  837. async def mark_remote_user_device_list_as_unsubscribed(self, user_id: str) -> None:
  838. """Mark that we no longer track device lists for remote user."""
  839. def _mark_remote_user_device_list_as_unsubscribed_txn(txn):
  840. self.db_pool.simple_delete_txn(
  841. txn,
  842. table="device_lists_remote_extremeties",
  843. keyvalues={"user_id": user_id},
  844. )
  845. self._invalidate_cache_and_stream(
  846. txn, self.get_device_list_last_stream_id_for_remote, (user_id,)
  847. )
  848. await self.db_pool.runInteraction(
  849. "mark_remote_user_device_list_as_unsubscribed",
  850. _mark_remote_user_device_list_as_unsubscribed_txn,
  851. )
  852. async def get_dehydrated_device(
  853. self, user_id: str
  854. ) -> Optional[Tuple[str, JsonDict]]:
  855. """Retrieve the information for a dehydrated device.
  856. Args:
  857. user_id: the user whose dehydrated device we are looking for
  858. Returns:
  859. a tuple whose first item is the device ID, and the second item is
  860. the dehydrated device information
  861. """
  862. # FIXME: make sure device ID still exists in devices table
  863. row = await self.db_pool.simple_select_one(
  864. table="dehydrated_devices",
  865. keyvalues={"user_id": user_id},
  866. retcols=["device_id", "device_data"],
  867. allow_none=True,
  868. )
  869. return (
  870. (row["device_id"], json_decoder.decode(row["device_data"])) if row else None
  871. )
  872. def _store_dehydrated_device_txn(
  873. self, txn, user_id: str, device_id: str, device_data: str
  874. ) -> Optional[str]:
  875. old_device_id = self.db_pool.simple_select_one_onecol_txn(
  876. txn,
  877. table="dehydrated_devices",
  878. keyvalues={"user_id": user_id},
  879. retcol="device_id",
  880. allow_none=True,
  881. )
  882. self.db_pool.simple_upsert_txn(
  883. txn,
  884. table="dehydrated_devices",
  885. keyvalues={"user_id": user_id},
  886. values={"device_id": device_id, "device_data": device_data},
  887. )
  888. return old_device_id
  889. async def store_dehydrated_device(
  890. self, user_id: str, device_id: str, device_data: JsonDict
  891. ) -> Optional[str]:
  892. """Store a dehydrated device for a user.
  893. Args:
  894. user_id: the user that we are storing the device for
  895. device_id: the ID of the dehydrated device
  896. device_data: the dehydrated device information
  897. Returns:
  898. device id of the user's previous dehydrated device, if any
  899. """
  900. return await self.db_pool.runInteraction(
  901. "store_dehydrated_device_txn",
  902. self._store_dehydrated_device_txn,
  903. user_id,
  904. device_id,
  905. json_encoder.encode(device_data),
  906. )
  907. async def remove_dehydrated_device(self, user_id: str, device_id: str) -> bool:
  908. """Remove a dehydrated device.
  909. Args:
  910. user_id: the user that the dehydrated device belongs to
  911. device_id: the ID of the dehydrated device
  912. """
  913. count = await self.db_pool.simple_delete(
  914. "dehydrated_devices",
  915. {"user_id": user_id, "device_id": device_id},
  916. desc="remove_dehydrated_device",
  917. )
  918. return count >= 1
  919. @wrap_as_background_process("prune_old_outbound_device_pokes")
  920. async def _prune_old_outbound_device_pokes(
  921. self, prune_age: int = 24 * 60 * 60 * 1000
  922. ) -> None:
  923. """Delete old entries out of the device_lists_outbound_pokes to ensure
  924. that we don't fill up due to dead servers.
  925. Normally, we try to send device updates as a delta since a previous known point:
  926. this is done by setting the prev_id in the m.device_list_update EDU. However,
  927. for that to work, we have to have a complete record of each change to
  928. each device, which can add up to quite a lot of data.
  929. An alternative mechanism is that, if the remote server sees that it has missed
  930. an entry in the stream_id sequence for a given user, it will request a full
  931. list of that user's devices. Hence, we can reduce the amount of data we have to
  932. store (and transmit in some future transaction), by clearing almost everything
  933. for a given destination out of the database, and having the remote server
  934. resync.
  935. All we need to do is make sure we keep at least one row for each
  936. (user, destination) pair, to remind us to send a m.device_list_update EDU for
  937. that user when the destination comes back. It doesn't matter which device
  938. we keep.
  939. """
  940. yesterday = self._clock.time_msec() - prune_age
  941. def _prune_txn(txn):
  942. # look for (user, destination) pairs which have an update older than
  943. # the cutoff.
  944. #
  945. # For each pair, we also need to know the most recent stream_id, and
  946. # an arbitrary device_id at that stream_id.
  947. select_sql = """
  948. SELECT
  949. dlop1.destination,
  950. dlop1.user_id,
  951. MAX(dlop1.stream_id) AS stream_id,
  952. (SELECT MIN(dlop2.device_id) AS device_id FROM
  953. device_lists_outbound_pokes dlop2
  954. WHERE dlop2.destination = dlop1.destination AND
  955. dlop2.user_id=dlop1.user_id AND
  956. dlop2.stream_id=MAX(dlop1.stream_id)
  957. )
  958. FROM device_lists_outbound_pokes dlop1
  959. GROUP BY destination, user_id
  960. HAVING min(ts) < ? AND count(*) > 1
  961. """
  962. txn.execute(select_sql, (yesterday,))
  963. rows = txn.fetchall()
  964. if not rows:
  965. return
  966. logger.info(
  967. "Pruning old outbound device list updates for %i users/destinations: %s",
  968. len(rows),
  969. shortstr((row[0], row[1]) for row in rows),
  970. )
  971. # we want to keep the update with the highest stream_id for each user.
  972. #
  973. # there might be more than one update (with different device_ids) with the
  974. # same stream_id, so we also delete all but one rows with the max stream id.
  975. delete_sql = """
  976. DELETE FROM device_lists_outbound_pokes
  977. WHERE destination = ? AND user_id = ? AND (
  978. stream_id < ? OR
  979. (stream_id = ? AND device_id != ?)
  980. )
  981. """
  982. count = 0
  983. for (destination, user_id, stream_id, device_id) in rows:
  984. txn.execute(
  985. delete_sql, (destination, user_id, stream_id, stream_id, device_id)
  986. )
  987. count += txn.rowcount
  988. # Since we've deleted unsent deltas, we need to remove the entry
  989. # of last successful sent so that the prev_ids are correctly set.
  990. sql = """
  991. DELETE FROM device_lists_outbound_last_success
  992. WHERE destination = ? AND user_id = ?
  993. """
  994. txn.execute_batch(sql, ((row[0], row[1]) for row in rows))
  995. logger.info("Pruned %d device list outbound pokes", count)
  996. await self.db_pool.runInteraction(
  997. "_prune_old_outbound_device_pokes",
  998. _prune_txn,
  999. )
  1000. class DeviceBackgroundUpdateStore(SQLBaseStore):
  1001. def __init__(
  1002. self,
  1003. database: DatabasePool,
  1004. db_conn: LoggingDatabaseConnection,
  1005. hs: "HomeServer",
  1006. ):
  1007. super().__init__(database, db_conn, hs)
  1008. self.db_pool.updates.register_background_index_update(
  1009. "device_lists_stream_idx",
  1010. index_name="device_lists_stream_user_id",
  1011. table="device_lists_stream",
  1012. columns=["user_id", "device_id"],
  1013. )
  1014. # create a unique index on device_lists_remote_cache
  1015. self.db_pool.updates.register_background_index_update(
  1016. "device_lists_remote_cache_unique_idx",
  1017. index_name="device_lists_remote_cache_unique_id",
  1018. table="device_lists_remote_cache",
  1019. columns=["user_id", "device_id"],
  1020. unique=True,
  1021. )
  1022. # And one on device_lists_remote_extremeties
  1023. self.db_pool.updates.register_background_index_update(
  1024. "device_lists_remote_extremeties_unique_idx",
  1025. index_name="device_lists_remote_extremeties_unique_idx",
  1026. table="device_lists_remote_extremeties",
  1027. columns=["user_id"],
  1028. unique=True,
  1029. )
  1030. # once they complete, we can remove the old non-unique indexes.
  1031. self.db_pool.updates.register_background_update_handler(
  1032. DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES,
  1033. self._drop_device_list_streams_non_unique_indexes,
  1034. )
  1035. # clear out duplicate device list outbound pokes
  1036. self.db_pool.updates.register_background_update_handler(
  1037. BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES,
  1038. self._remove_duplicate_outbound_pokes,
  1039. )
  1040. # a pair of background updates that were added during the 1.14 release cycle,
  1041. # but replaced with 58/06dlols_unique_idx.py
  1042. self.db_pool.updates.register_noop_background_update(
  1043. "device_lists_outbound_last_success_unique_idx",
  1044. )
  1045. self.db_pool.updates.register_noop_background_update(
  1046. "drop_device_lists_outbound_last_success_non_unique_idx",
  1047. )
  1048. async def _drop_device_list_streams_non_unique_indexes(self, progress, batch_size):
  1049. def f(conn):
  1050. txn = conn.cursor()
  1051. txn.execute("DROP INDEX IF EXISTS device_lists_remote_cache_id")
  1052. txn.execute("DROP INDEX IF EXISTS device_lists_remote_extremeties_id")
  1053. txn.close()
  1054. await self.db_pool.runWithConnection(f)
  1055. await self.db_pool.updates._end_background_update(
  1056. DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES
  1057. )
  1058. return 1
  1059. async def _remove_duplicate_outbound_pokes(self, progress, batch_size):
  1060. # for some reason, we have accumulated duplicate entries in
  1061. # device_lists_outbound_pokes, which makes prune_outbound_device_list_pokes less
  1062. # efficient.
  1063. #
  1064. # For each duplicate, we delete all the existing rows and put one back.
  1065. KEY_COLS = ["stream_id", "destination", "user_id", "device_id"]
  1066. last_row = progress.get(
  1067. "last_row",
  1068. {"stream_id": 0, "destination": "", "user_id": "", "device_id": ""},
  1069. )
  1070. def _txn(txn):
  1071. clause, args = make_tuple_comparison_clause(
  1072. [(x, last_row[x]) for x in KEY_COLS]
  1073. )
  1074. sql = """
  1075. SELECT stream_id, destination, user_id, device_id, MAX(ts) AS ts
  1076. FROM device_lists_outbound_pokes
  1077. WHERE %s
  1078. GROUP BY %s
  1079. HAVING count(*) > 1
  1080. ORDER BY %s
  1081. LIMIT ?
  1082. """ % (
  1083. clause, # WHERE
  1084. ",".join(KEY_COLS), # GROUP BY
  1085. ",".join(KEY_COLS), # ORDER BY
  1086. )
  1087. txn.execute(sql, args + [batch_size])
  1088. rows = self.db_pool.cursor_to_dict(txn)
  1089. row = None
  1090. for row in rows:
  1091. self.db_pool.simple_delete_txn(
  1092. txn,
  1093. "device_lists_outbound_pokes",
  1094. {x: row[x] for x in KEY_COLS},
  1095. )
  1096. row["sent"] = False
  1097. self.db_pool.simple_insert_txn(
  1098. txn,
  1099. "device_lists_outbound_pokes",
  1100. row,
  1101. )
  1102. if row:
  1103. self.db_pool.updates._background_update_progress_txn(
  1104. txn,
  1105. BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES,
  1106. {"last_row": row},
  1107. )
  1108. return len(rows)
  1109. rows = await self.db_pool.runInteraction(
  1110. BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES, _txn
  1111. )
  1112. if not rows:
  1113. await self.db_pool.updates._end_background_update(
  1114. BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES
  1115. )
  1116. return rows
  1117. class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
  1118. def __init__(
  1119. self,
  1120. database: DatabasePool,
  1121. db_conn: LoggingDatabaseConnection,
  1122. hs: "HomeServer",
  1123. ):
  1124. super().__init__(database, db_conn, hs)
  1125. # Map of (user_id, device_id) -> bool. If there is an entry that implies
  1126. # the device exists.
  1127. self.device_id_exists_cache = LruCache(
  1128. cache_name="device_id_exists", max_size=10000
  1129. )
  1130. async def store_device(
  1131. self,
  1132. user_id: str,
  1133. device_id: str,
  1134. initial_device_display_name: Optional[str],
  1135. auth_provider_id: Optional[str] = None,
  1136. auth_provider_session_id: Optional[str] = None,
  1137. ) -> bool:
  1138. """Ensure the given device is known; add it to the store if not
  1139. Args:
  1140. user_id: id of user associated with the device
  1141. device_id: id of device
  1142. initial_device_display_name: initial displayname of the device.
  1143. Ignored if device exists.
  1144. auth_provider_id: The SSO IdP the user used, if any.
  1145. auth_provider_session_id: The session ID (sid) got from a OIDC login.
  1146. Returns:
  1147. Whether the device was inserted or an existing device existed with that ID.
  1148. Raises:
  1149. StoreError: if the device is already in use
  1150. """
  1151. key = (user_id, device_id)
  1152. if self.device_id_exists_cache.get(key, None):
  1153. return False
  1154. try:
  1155. inserted = await self.db_pool.simple_upsert(
  1156. "devices",
  1157. keyvalues={
  1158. "user_id": user_id,
  1159. "device_id": device_id,
  1160. },
  1161. values={},
  1162. insertion_values={
  1163. "display_name": initial_device_display_name,
  1164. "hidden": False,
  1165. },
  1166. desc="store_device",
  1167. )
  1168. if not inserted:
  1169. # if the device already exists, check if it's a real device, or
  1170. # if the device ID is reserved by something else
  1171. hidden = await self.db_pool.simple_select_one_onecol(
  1172. "devices",
  1173. keyvalues={"user_id": user_id, "device_id": device_id},
  1174. retcol="hidden",
  1175. )
  1176. if hidden:
  1177. raise StoreError(400, "The device ID is in use", Codes.FORBIDDEN)
  1178. if auth_provider_id and auth_provider_session_id:
  1179. await self.db_pool.simple_insert(
  1180. "device_auth_providers",
  1181. values={
  1182. "user_id": user_id,
  1183. "device_id": device_id,
  1184. "auth_provider_id": auth_provider_id,
  1185. "auth_provider_session_id": auth_provider_session_id,
  1186. },
  1187. desc="store_device_auth_provider",
  1188. )
  1189. self.device_id_exists_cache.set(key, True)
  1190. return inserted
  1191. except StoreError:
  1192. raise
  1193. except Exception as e:
  1194. logger.error(
  1195. "store_device with device_id=%s(%r) user_id=%s(%r)"
  1196. " display_name=%s(%r) failed: %s",
  1197. type(device_id).__name__,
  1198. device_id,
  1199. type(user_id).__name__,
  1200. user_id,
  1201. type(initial_device_display_name).__name__,
  1202. initial_device_display_name,
  1203. e,
  1204. )
  1205. raise StoreError(500, "Problem storing device.")
  1206. async def delete_device(self, user_id: str, device_id: str) -> None:
  1207. """Delete a device and its device_inbox.
  1208. Args:
  1209. user_id: The ID of the user which owns the device
  1210. device_id: The ID of the device to delete
  1211. """
  1212. await self.delete_devices(user_id, [device_id])
  1213. async def delete_devices(self, user_id: str, device_ids: List[str]) -> None:
  1214. """Deletes several devices.
  1215. Args:
  1216. user_id: The ID of the user which owns the devices
  1217. device_ids: The IDs of the devices to delete
  1218. """
  1219. def _delete_devices_txn(txn: LoggingTransaction) -> None:
  1220. self.db_pool.simple_delete_many_txn(
  1221. txn,
  1222. table="devices",
  1223. column="device_id",
  1224. values=device_ids,
  1225. keyvalues={"user_id": user_id, "hidden": False},
  1226. )
  1227. self.db_pool.simple_delete_many_txn(
  1228. txn,
  1229. table="device_inbox",
  1230. column="device_id",
  1231. values=device_ids,
  1232. keyvalues={"user_id": user_id},
  1233. )
  1234. self.db_pool.simple_delete_many_txn(
  1235. txn,
  1236. table="device_auth_providers",
  1237. column="device_id",
  1238. values=device_ids,
  1239. keyvalues={"user_id": user_id},
  1240. )
  1241. await self.db_pool.runInteraction("delete_devices", _delete_devices_txn)
  1242. for device_id in device_ids:
  1243. self.device_id_exists_cache.invalidate((user_id, device_id))
  1244. async def update_device(
  1245. self, user_id: str, device_id: str, new_display_name: Optional[str] = None
  1246. ) -> None:
  1247. """Update a device. Only updates the device if it is not marked as
  1248. hidden.
  1249. Args:
  1250. user_id: The ID of the user which owns the device
  1251. device_id: The ID of the device to update
  1252. new_display_name: new displayname for device; None to leave unchanged
  1253. Raises:
  1254. StoreError: if the device is not found
  1255. """
  1256. updates = {}
  1257. if new_display_name is not None:
  1258. updates["display_name"] = new_display_name
  1259. if not updates:
  1260. return None
  1261. await self.db_pool.simple_update_one(
  1262. table="devices",
  1263. keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False},
  1264. updatevalues=updates,
  1265. desc="update_device",
  1266. )
  1267. async def update_remote_device_list_cache_entry(
  1268. self, user_id: str, device_id: str, content: JsonDict, stream_id: str
  1269. ) -> None:
  1270. """Updates a single device in the cache of a remote user's devicelist.
  1271. Note: assumes that we are the only thread that can be updating this user's
  1272. device list.
  1273. Args:
  1274. user_id: User to update device list for
  1275. device_id: ID of decivice being updated
  1276. content: new data on this device
  1277. stream_id: the version of the device list
  1278. """
  1279. await self.db_pool.runInteraction(
  1280. "update_remote_device_list_cache_entry",
  1281. self._update_remote_device_list_cache_entry_txn,
  1282. user_id,
  1283. device_id,
  1284. content,
  1285. stream_id,
  1286. )
  1287. def _update_remote_device_list_cache_entry_txn(
  1288. self,
  1289. txn: LoggingTransaction,
  1290. user_id: str,
  1291. device_id: str,
  1292. content: JsonDict,
  1293. stream_id: str,
  1294. ) -> None:
  1295. """Delete, update or insert a cache entry for this (user, device) pair."""
  1296. if content.get("deleted"):
  1297. self.db_pool.simple_delete_txn(
  1298. txn,
  1299. table="device_lists_remote_cache",
  1300. keyvalues={"user_id": user_id, "device_id": device_id},
  1301. )
  1302. txn.call_after(self.device_id_exists_cache.invalidate, (user_id, device_id))
  1303. else:
  1304. self.db_pool.simple_upsert_txn(
  1305. txn,
  1306. table="device_lists_remote_cache",
  1307. keyvalues={"user_id": user_id, "device_id": device_id},
  1308. values={"content": json_encoder.encode(content)},
  1309. # we don't need to lock, because we assume we are the only thread
  1310. # updating this user's devices.
  1311. lock=False,
  1312. )
  1313. txn.call_after(self._get_cached_user_device.invalidate, (user_id, device_id))
  1314. txn.call_after(self.get_cached_devices_for_user.invalidate, (user_id,))
  1315. txn.call_after(
  1316. self.get_device_list_last_stream_id_for_remote.invalidate, (user_id,)
  1317. )
  1318. self.db_pool.simple_upsert_txn(
  1319. txn,
  1320. table="device_lists_remote_extremeties",
  1321. keyvalues={"user_id": user_id},
  1322. values={"stream_id": stream_id},
  1323. # again, we can assume we are the only thread updating this user's
  1324. # extremity.
  1325. lock=False,
  1326. )
  1327. async def update_remote_device_list_cache(
  1328. self, user_id: str, devices: List[dict], stream_id: int
  1329. ) -> None:
  1330. """Replace the entire cache of the remote user's devices.
  1331. Note: assumes that we are the only thread that can be updating this user's
  1332. device list.
  1333. Args:
  1334. user_id: User to update device list for
  1335. devices: list of device objects supplied over federation
  1336. stream_id: the version of the device list
  1337. """
  1338. await self.db_pool.runInteraction(
  1339. "update_remote_device_list_cache",
  1340. self._update_remote_device_list_cache_txn,
  1341. user_id,
  1342. devices,
  1343. stream_id,
  1344. )
  1345. def _update_remote_device_list_cache_txn(
  1346. self, txn: LoggingTransaction, user_id: str, devices: List[dict], stream_id: int
  1347. ) -> None:
  1348. """Replace the list of cached devices for this user with the given list."""
  1349. self.db_pool.simple_delete_txn(
  1350. txn, table="device_lists_remote_cache", keyvalues={"user_id": user_id}
  1351. )
  1352. self.db_pool.simple_insert_many_txn(
  1353. txn,
  1354. table="device_lists_remote_cache",
  1355. keys=("user_id", "device_id", "content"),
  1356. values=[
  1357. (user_id, content["device_id"], json_encoder.encode(content))
  1358. for content in devices
  1359. ],
  1360. )
  1361. txn.call_after(self.get_cached_devices_for_user.invalidate, (user_id,))
  1362. txn.call_after(self._get_cached_user_device.invalidate, (user_id,))
  1363. txn.call_after(
  1364. self.get_device_list_last_stream_id_for_remote.invalidate, (user_id,)
  1365. )
  1366. self.db_pool.simple_upsert_txn(
  1367. txn,
  1368. table="device_lists_remote_extremeties",
  1369. keyvalues={"user_id": user_id},
  1370. values={"stream_id": stream_id},
  1371. # we don't need to lock, because we can assume we are the only thread
  1372. # updating this user's extremity.
  1373. lock=False,
  1374. )
  1375. async def add_device_change_to_streams(
  1376. self,
  1377. user_id: str,
  1378. device_ids: Collection[str],
  1379. room_ids: Collection[str],
  1380. ) -> Optional[int]:
  1381. """Persist that a user's devices have been updated, and which hosts
  1382. (if any) should be poked.
  1383. Args:
  1384. user_id: The ID of the user whose device changed.
  1385. device_ids: The IDs of any changed devices. If empty, this function will
  1386. return None.
  1387. room_ids: The rooms that the user is in
  1388. Returns:
  1389. The maximum stream ID of device list updates that were added to the database, or
  1390. None if no updates were added.
  1391. """
  1392. if not device_ids:
  1393. return None
  1394. context = get_active_span_text_map()
  1395. def add_device_changes_txn(txn, stream_ids):
  1396. self._add_device_change_to_stream_txn(
  1397. txn,
  1398. user_id,
  1399. device_ids,
  1400. stream_ids,
  1401. )
  1402. self._add_device_outbound_room_poke_txn(
  1403. txn,
  1404. user_id,
  1405. device_ids,
  1406. room_ids,
  1407. stream_ids,
  1408. context,
  1409. )
  1410. async with self._device_list_id_gen.get_next_mult(
  1411. len(device_ids)
  1412. ) as stream_ids:
  1413. await self.db_pool.runInteraction(
  1414. "add_device_change_to_stream",
  1415. add_device_changes_txn,
  1416. stream_ids,
  1417. )
  1418. return stream_ids[-1]
  1419. def _add_device_change_to_stream_txn(
  1420. self,
  1421. txn: LoggingTransaction,
  1422. user_id: str,
  1423. device_ids: Collection[str],
  1424. stream_ids: List[str],
  1425. ):
  1426. txn.call_after(
  1427. self._device_list_stream_cache.entity_has_changed,
  1428. user_id,
  1429. stream_ids[-1],
  1430. )
  1431. min_stream_id = stream_ids[0]
  1432. # Delete older entries in the table, as we really only care about
  1433. # when the latest change happened.
  1434. txn.execute_batch(
  1435. """
  1436. DELETE FROM device_lists_stream
  1437. WHERE user_id = ? AND device_id = ? AND stream_id < ?
  1438. """,
  1439. [(user_id, device_id, min_stream_id) for device_id in device_ids],
  1440. )
  1441. self.db_pool.simple_insert_many_txn(
  1442. txn,
  1443. table="device_lists_stream",
  1444. keys=("stream_id", "user_id", "device_id"),
  1445. values=[
  1446. (stream_id, user_id, device_id)
  1447. for stream_id, device_id in zip(stream_ids, device_ids)
  1448. ],
  1449. )
  1450. def _add_device_outbound_poke_to_stream_txn(
  1451. self,
  1452. txn: LoggingTransaction,
  1453. user_id: str,
  1454. device_ids: Iterable[str],
  1455. hosts: Collection[str],
  1456. stream_ids: List[int],
  1457. context: Dict[str, str],
  1458. ) -> None:
  1459. for host in hosts:
  1460. txn.call_after(
  1461. self._device_list_federation_stream_cache.entity_has_changed,
  1462. host,
  1463. stream_ids[-1],
  1464. )
  1465. now = self._clock.time_msec()
  1466. stream_id_iterator = iter(stream_ids)
  1467. encoded_context = json_encoder.encode(context)
  1468. self.db_pool.simple_insert_many_txn(
  1469. txn,
  1470. table="device_lists_outbound_pokes",
  1471. keys=(
  1472. "destination",
  1473. "stream_id",
  1474. "user_id",
  1475. "device_id",
  1476. "sent",
  1477. "ts",
  1478. "opentracing_context",
  1479. ),
  1480. values=[
  1481. (
  1482. destination,
  1483. next(stream_id_iterator),
  1484. user_id,
  1485. device_id,
  1486. not self.hs.is_mine_id(
  1487. user_id
  1488. ), # We only need to send out update for *our* users
  1489. now,
  1490. encoded_context if whitelisted_homeserver(destination) else "{}",
  1491. )
  1492. for destination in hosts
  1493. for device_id in device_ids
  1494. ],
  1495. )
  1496. def _add_device_outbound_room_poke_txn(
  1497. self,
  1498. txn: LoggingTransaction,
  1499. user_id: str,
  1500. device_ids: Iterable[str],
  1501. room_ids: Collection[str],
  1502. stream_ids: List[str],
  1503. context: Dict[str, str],
  1504. ) -> None:
  1505. """Record the user in the room has updated their device."""
  1506. encoded_context = json_encoder.encode(context)
  1507. # The `device_lists_changes_in_room.stream_id` column matches the
  1508. # corresponding `stream_id` of the update in the `device_lists_stream`
  1509. # table, i.e. all rows persisted for the same device update will have
  1510. # the same `stream_id` (but different room IDs).
  1511. self.db_pool.simple_insert_many_txn(
  1512. txn,
  1513. table="device_lists_changes_in_room",
  1514. keys=(
  1515. "user_id",
  1516. "device_id",
  1517. "room_id",
  1518. "stream_id",
  1519. "converted_to_destinations",
  1520. "opentracing_context",
  1521. ),
  1522. values=[
  1523. (
  1524. user_id,
  1525. device_id,
  1526. room_id,
  1527. stream_id,
  1528. False,
  1529. encoded_context,
  1530. )
  1531. for room_id in room_ids
  1532. for device_id, stream_id in zip(device_ids, stream_ids)
  1533. ],
  1534. )
  1535. async def get_uncoverted_outbound_room_pokes(
  1536. self, limit: int = 10
  1537. ) -> List[Tuple[str, str, str, int, Optional[Dict[str, str]]]]:
  1538. """Get device list changes by room that have not yet been handled and
  1539. written to `device_lists_outbound_pokes`.
  1540. Returns:
  1541. A list of user ID, device ID, room ID, stream ID and optional opentracing context.
  1542. """
  1543. sql = """
  1544. SELECT user_id, device_id, room_id, stream_id, opentracing_context
  1545. FROM device_lists_changes_in_room
  1546. WHERE NOT converted_to_destinations
  1547. ORDER BY stream_id
  1548. LIMIT ?
  1549. """
  1550. def get_uncoverted_outbound_room_pokes_txn(txn):
  1551. txn.execute(sql, (limit,))
  1552. return txn.fetchall()
  1553. return await self.db_pool.runInteraction(
  1554. "get_uncoverted_outbound_room_pokes", get_uncoverted_outbound_room_pokes_txn
  1555. )
  1556. async def add_device_list_outbound_pokes(
  1557. self,
  1558. user_id: str,
  1559. device_id: str,
  1560. room_id: str,
  1561. stream_id: int,
  1562. hosts: Collection[str],
  1563. context: Optional[Dict[str, str]],
  1564. ) -> None:
  1565. """Queue the device update to be sent to the given set of hosts,
  1566. calculated from the room ID.
  1567. Marks the associated row in `device_lists_changes_in_room` as handled.
  1568. """
  1569. def add_device_list_outbound_pokes_txn(txn, stream_ids: List[int]):
  1570. if hosts:
  1571. self._add_device_outbound_poke_to_stream_txn(
  1572. txn,
  1573. user_id=user_id,
  1574. device_ids=[device_id],
  1575. hosts=hosts,
  1576. stream_ids=stream_ids,
  1577. context=context,
  1578. )
  1579. self.db_pool.simple_update_txn(
  1580. txn,
  1581. table="device_lists_changes_in_room",
  1582. keyvalues={
  1583. "user_id": user_id,
  1584. "device_id": device_id,
  1585. "stream_id": stream_id,
  1586. "room_id": room_id,
  1587. },
  1588. updatevalues={"converted_to_destinations": True},
  1589. )
  1590. if not hosts:
  1591. # If there are no hosts then we don't try and generate stream IDs.
  1592. return await self.db_pool.runInteraction(
  1593. "add_device_list_outbound_pokes",
  1594. add_device_list_outbound_pokes_txn,
  1595. [],
  1596. )
  1597. async with self._device_list_id_gen.get_next_mult(len(hosts)) as stream_ids:
  1598. return await self.db_pool.runInteraction(
  1599. "add_device_list_outbound_pokes",
  1600. add_device_list_outbound_pokes_txn,
  1601. stream_ids,
  1602. )