context.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895
  1. # Copyright 2014-2016 OpenMarket Ltd
  2. # Copyright 2019 The Matrix.org Foundation C.I.C.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """ Thread-local-alike tracking of log contexts within synapse
  16. This module provides objects and utilities for tracking contexts through
  17. synapse code, so that log lines can include a request identifier, and so that
  18. CPU and database activity can be accounted for against the request that caused
  19. them.
  20. See doc/log_contexts.rst for details on how this works.
  21. """
  22. import inspect
  23. import logging
  24. import threading
  25. import types
  26. import warnings
  27. from typing import TYPE_CHECKING, Optional, Tuple, TypeVar, Union
  28. import attr
  29. from typing_extensions import Literal
  30. from twisted.internet import defer, threads
  31. if TYPE_CHECKING:
  32. from synapse.logging.scopecontextmanager import _LogContextScope
  33. logger = logging.getLogger(__name__)
  34. try:
  35. import resource
  36. # Python doesn't ship with a definition of RUSAGE_THREAD but it's defined
  37. # to be 1 on linux so we hard code it.
  38. RUSAGE_THREAD = 1
  39. # If the system doesn't support RUSAGE_THREAD then this should throw an
  40. # exception.
  41. resource.getrusage(RUSAGE_THREAD)
  42. is_thread_resource_usage_supported = True
  43. def get_thread_resource_usage() -> "Optional[resource._RUsage]":
  44. return resource.getrusage(RUSAGE_THREAD)
  45. except Exception:
  46. # If the system doesn't support resource.getrusage(RUSAGE_THREAD) then we
  47. # won't track resource usage.
  48. is_thread_resource_usage_supported = False
  49. def get_thread_resource_usage() -> "Optional[resource._RUsage]":
  50. return None
  51. # a hook which can be set during testing to assert that we aren't abusing logcontexts.
  52. def logcontext_error(msg: str):
  53. logger.warning(msg)
  54. # get an id for the current thread.
  55. #
  56. # threading.get_ident doesn't actually return an OS-level tid, and annoyingly,
  57. # on Linux it actually returns the same value either side of a fork() call. However
  58. # we only fork in one place, so it's not worth the hoop-jumping to get a real tid.
  59. #
  60. get_thread_id = threading.get_ident
  61. class ContextResourceUsage:
  62. """Object for tracking the resources used by a log context
  63. Attributes:
  64. ru_utime (float): user CPU time (in seconds)
  65. ru_stime (float): system CPU time (in seconds)
  66. db_txn_count (int): number of database transactions done
  67. db_sched_duration_sec (float): amount of time spent waiting for a
  68. database connection
  69. db_txn_duration_sec (float): amount of time spent doing database
  70. transactions (excluding scheduling time)
  71. evt_db_fetch_count (int): number of events requested from the database
  72. """
  73. __slots__ = [
  74. "ru_stime",
  75. "ru_utime",
  76. "db_txn_count",
  77. "db_txn_duration_sec",
  78. "db_sched_duration_sec",
  79. "evt_db_fetch_count",
  80. ]
  81. def __init__(self, copy_from: "Optional[ContextResourceUsage]" = None) -> None:
  82. """Create a new ContextResourceUsage
  83. Args:
  84. copy_from (ContextResourceUsage|None): if not None, an object to
  85. copy stats from
  86. """
  87. if copy_from is None:
  88. self.reset()
  89. else:
  90. # FIXME: mypy can't infer the types set via reset() above, so specify explicitly for now
  91. self.ru_utime = copy_from.ru_utime # type: float
  92. self.ru_stime = copy_from.ru_stime # type: float
  93. self.db_txn_count = copy_from.db_txn_count # type: int
  94. self.db_txn_duration_sec = copy_from.db_txn_duration_sec # type: float
  95. self.db_sched_duration_sec = copy_from.db_sched_duration_sec # type: float
  96. self.evt_db_fetch_count = copy_from.evt_db_fetch_count # type: int
  97. def copy(self) -> "ContextResourceUsage":
  98. return ContextResourceUsage(copy_from=self)
  99. def reset(self) -> None:
  100. self.ru_stime = 0.0
  101. self.ru_utime = 0.0
  102. self.db_txn_count = 0
  103. self.db_txn_duration_sec = 0.0
  104. self.db_sched_duration_sec = 0.0
  105. self.evt_db_fetch_count = 0
  106. def __repr__(self) -> str:
  107. return (
  108. "<ContextResourceUsage ru_stime='%r', ru_utime='%r', "
  109. "db_txn_count='%r', db_txn_duration_sec='%r', "
  110. "db_sched_duration_sec='%r', evt_db_fetch_count='%r'>"
  111. ) % (
  112. self.ru_stime,
  113. self.ru_utime,
  114. self.db_txn_count,
  115. self.db_txn_duration_sec,
  116. self.db_sched_duration_sec,
  117. self.evt_db_fetch_count,
  118. )
  119. def __iadd__(self, other: "ContextResourceUsage") -> "ContextResourceUsage":
  120. """Add another ContextResourceUsage's stats to this one's.
  121. Args:
  122. other (ContextResourceUsage): the other resource usage object
  123. """
  124. self.ru_utime += other.ru_utime
  125. self.ru_stime += other.ru_stime
  126. self.db_txn_count += other.db_txn_count
  127. self.db_txn_duration_sec += other.db_txn_duration_sec
  128. self.db_sched_duration_sec += other.db_sched_duration_sec
  129. self.evt_db_fetch_count += other.evt_db_fetch_count
  130. return self
  131. def __isub__(self, other: "ContextResourceUsage") -> "ContextResourceUsage":
  132. self.ru_utime -= other.ru_utime
  133. self.ru_stime -= other.ru_stime
  134. self.db_txn_count -= other.db_txn_count
  135. self.db_txn_duration_sec -= other.db_txn_duration_sec
  136. self.db_sched_duration_sec -= other.db_sched_duration_sec
  137. self.evt_db_fetch_count -= other.evt_db_fetch_count
  138. return self
  139. def __add__(self, other: "ContextResourceUsage") -> "ContextResourceUsage":
  140. res = ContextResourceUsage(copy_from=self)
  141. res += other
  142. return res
  143. def __sub__(self, other: "ContextResourceUsage") -> "ContextResourceUsage":
  144. res = ContextResourceUsage(copy_from=self)
  145. res -= other
  146. return res
  147. @attr.s(slots=True)
  148. class ContextRequest:
  149. """
  150. A bundle of attributes from the SynapseRequest object.
  151. This exists to:
  152. * Avoid a cycle between LoggingContext and SynapseRequest.
  153. * Be a single variable that can be passed from parent LoggingContexts to
  154. their children.
  155. """
  156. request_id = attr.ib(type=str)
  157. ip_address = attr.ib(type=str)
  158. site_tag = attr.ib(type=str)
  159. requester = attr.ib(type=Optional[str])
  160. authenticated_entity = attr.ib(type=Optional[str])
  161. method = attr.ib(type=str)
  162. url = attr.ib(type=str)
  163. protocol = attr.ib(type=str)
  164. user_agent = attr.ib(type=str)
  165. LoggingContextOrSentinel = Union["LoggingContext", "_Sentinel"]
  166. class _Sentinel:
  167. """Sentinel to represent the root context"""
  168. __slots__ = ["previous_context", "finished", "request", "scope", "tag"]
  169. def __init__(self) -> None:
  170. # Minimal set for compatibility with LoggingContext
  171. self.previous_context = None
  172. self.finished = False
  173. self.request = None
  174. self.scope = None
  175. self.tag = None
  176. def __str__(self):
  177. return "sentinel"
  178. def copy_to(self, record):
  179. pass
  180. def start(self, rusage: "Optional[resource._RUsage]"):
  181. pass
  182. def stop(self, rusage: "Optional[resource._RUsage]"):
  183. pass
  184. def add_database_transaction(self, duration_sec):
  185. pass
  186. def add_database_scheduled(self, sched_sec):
  187. pass
  188. def record_event_fetch(self, event_count):
  189. pass
  190. def __bool__(self):
  191. return False
  192. SENTINEL_CONTEXT = _Sentinel()
  193. class LoggingContext:
  194. """Additional context for log formatting. Contexts are scoped within a
  195. "with" block.
  196. If a parent is given when creating a new context, then:
  197. - logging fields are copied from the parent to the new context on entry
  198. - when the new context exits, the cpu usage stats are copied from the
  199. child to the parent
  200. Args:
  201. name (str): Name for the context for debugging.
  202. parent_context (LoggingContext|None): The parent of the new context
  203. """
  204. __slots__ = [
  205. "previous_context",
  206. "name",
  207. "parent_context",
  208. "_resource_usage",
  209. "usage_start",
  210. "main_thread",
  211. "finished",
  212. "request",
  213. "tag",
  214. "scope",
  215. ]
  216. def __init__(
  217. self,
  218. name: str,
  219. parent_context: "Optional[LoggingContext]" = None,
  220. request: Optional[ContextRequest] = None,
  221. ) -> None:
  222. self.previous_context = current_context()
  223. self.name = name
  224. # track the resources used by this context so far
  225. self._resource_usage = ContextResourceUsage()
  226. # The thread resource usage when the logcontext became active. None
  227. # if the context is not currently active.
  228. self.usage_start = None # type: Optional[resource._RUsage]
  229. self.main_thread = get_thread_id()
  230. self.request = None
  231. self.tag = ""
  232. self.scope = None # type: Optional[_LogContextScope]
  233. # keep track of whether we have hit the __exit__ block for this context
  234. # (suggesting that the the thing that created the context thinks it should
  235. # be finished, and that re-activating it would suggest an error).
  236. self.finished = False
  237. self.parent_context = parent_context
  238. if self.parent_context is not None:
  239. # we track the current request_id
  240. self.request = self.parent_context.request
  241. # we also track the current scope:
  242. self.scope = self.parent_context.scope
  243. if request is not None:
  244. # the request param overrides the request from the parent context
  245. self.request = request
  246. def __str__(self) -> str:
  247. return self.name
  248. @classmethod
  249. def current_context(cls) -> LoggingContextOrSentinel:
  250. """Get the current logging context from thread local storage
  251. This exists for backwards compatibility. ``current_context()`` should be
  252. called directly.
  253. Returns:
  254. LoggingContext: the current logging context
  255. """
  256. warnings.warn(
  257. "synapse.logging.context.LoggingContext.current_context() is deprecated "
  258. "in favor of synapse.logging.context.current_context().",
  259. DeprecationWarning,
  260. stacklevel=2,
  261. )
  262. return current_context()
  263. @classmethod
  264. def set_current_context(
  265. cls, context: LoggingContextOrSentinel
  266. ) -> LoggingContextOrSentinel:
  267. """Set the current logging context in thread local storage
  268. This exists for backwards compatibility. ``set_current_context()`` should be
  269. called directly.
  270. Args:
  271. context(LoggingContext): The context to activate.
  272. Returns:
  273. The context that was previously active
  274. """
  275. warnings.warn(
  276. "synapse.logging.context.LoggingContext.set_current_context() is deprecated "
  277. "in favor of synapse.logging.context.set_current_context().",
  278. DeprecationWarning,
  279. stacklevel=2,
  280. )
  281. return set_current_context(context)
  282. def __enter__(self) -> "LoggingContext":
  283. """Enters this logging context into thread local storage"""
  284. old_context = set_current_context(self)
  285. if self.previous_context != old_context:
  286. logcontext_error(
  287. "Expected previous context %r, found %r"
  288. % (
  289. self.previous_context,
  290. old_context,
  291. )
  292. )
  293. return self
  294. def __exit__(self, type, value, traceback) -> None:
  295. """Restore the logging context in thread local storage to the state it
  296. was before this context was entered.
  297. Returns:
  298. None to avoid suppressing any exceptions that were thrown.
  299. """
  300. current = set_current_context(self.previous_context)
  301. if current is not self:
  302. if current is SENTINEL_CONTEXT:
  303. logcontext_error("Expected logging context %s was lost" % (self,))
  304. else:
  305. logcontext_error(
  306. "Expected logging context %s but found %s" % (self, current)
  307. )
  308. # the fact that we are here suggests that the caller thinks that everything
  309. # is done and dusted for this logcontext, and further activity will not get
  310. # recorded against the correct metrics.
  311. self.finished = True
  312. def copy_to(self, record) -> None:
  313. """Copy logging fields from this context to a log record or
  314. another LoggingContext
  315. """
  316. # we track the current request
  317. record.request = self.request
  318. # we also track the current scope:
  319. record.scope = self.scope
  320. def start(self, rusage: "Optional[resource._RUsage]") -> None:
  321. """
  322. Record that this logcontext is currently running.
  323. This should not be called directly: use set_current_context
  324. Args:
  325. rusage: the resources used by the current thread, at the point of
  326. switching to this logcontext. May be None if this platform doesn't
  327. support getrusuage.
  328. """
  329. if get_thread_id() != self.main_thread:
  330. logcontext_error("Started logcontext %s on different thread" % (self,))
  331. return
  332. if self.finished:
  333. logcontext_error("Re-starting finished log context %s" % (self,))
  334. # If we haven't already started record the thread resource usage so
  335. # far
  336. if self.usage_start:
  337. logcontext_error("Re-starting already-active log context %s" % (self,))
  338. else:
  339. self.usage_start = rusage
  340. def stop(self, rusage: "Optional[resource._RUsage]") -> None:
  341. """
  342. Record that this logcontext is no longer running.
  343. This should not be called directly: use set_current_context
  344. Args:
  345. rusage: the resources used by the current thread, at the point of
  346. switching away from this logcontext. May be None if this platform
  347. doesn't support getrusuage.
  348. """
  349. try:
  350. if get_thread_id() != self.main_thread:
  351. logcontext_error("Stopped logcontext %s on different thread" % (self,))
  352. return
  353. if not rusage:
  354. return
  355. # Record the cpu used since we started
  356. if not self.usage_start:
  357. logcontext_error(
  358. "Called stop on logcontext %s without recording a start rusage"
  359. % (self,)
  360. )
  361. return
  362. utime_delta, stime_delta = self._get_cputime(rusage)
  363. self.add_cputime(utime_delta, stime_delta)
  364. finally:
  365. self.usage_start = None
  366. def get_resource_usage(self) -> ContextResourceUsage:
  367. """Get resources used by this logcontext so far.
  368. Returns:
  369. ContextResourceUsage: a *copy* of the object tracking resource
  370. usage so far
  371. """
  372. # we always return a copy, for consistency
  373. res = self._resource_usage.copy()
  374. # If we are on the correct thread and we're currently running then we
  375. # can include resource usage so far.
  376. is_main_thread = get_thread_id() == self.main_thread
  377. if self.usage_start and is_main_thread:
  378. rusage = get_thread_resource_usage()
  379. assert rusage is not None
  380. utime_delta, stime_delta = self._get_cputime(rusage)
  381. res.ru_utime += utime_delta
  382. res.ru_stime += stime_delta
  383. return res
  384. def _get_cputime(self, current: "resource._RUsage") -> Tuple[float, float]:
  385. """Get the cpu usage time between start() and the given rusage
  386. Args:
  387. rusage: the current resource usage
  388. Returns: Tuple[float, float]: seconds in user mode, seconds in system mode
  389. """
  390. assert self.usage_start is not None
  391. utime_delta = current.ru_utime - self.usage_start.ru_utime
  392. stime_delta = current.ru_stime - self.usage_start.ru_stime
  393. # sanity check
  394. if utime_delta < 0:
  395. logger.error(
  396. "utime went backwards! %f < %f",
  397. current.ru_utime,
  398. self.usage_start.ru_utime,
  399. )
  400. utime_delta = 0
  401. if stime_delta < 0:
  402. logger.error(
  403. "stime went backwards! %f < %f",
  404. current.ru_stime,
  405. self.usage_start.ru_stime,
  406. )
  407. stime_delta = 0
  408. return utime_delta, stime_delta
  409. def add_cputime(self, utime_delta: float, stime_delta: float) -> None:
  410. """Update the CPU time usage of this context (and any parents, recursively).
  411. Args:
  412. utime_delta: additional user time, in seconds, spent in this context.
  413. stime_delta: additional system time, in seconds, spent in this context.
  414. """
  415. self._resource_usage.ru_utime += utime_delta
  416. self._resource_usage.ru_stime += stime_delta
  417. if self.parent_context:
  418. self.parent_context.add_cputime(utime_delta, stime_delta)
  419. def add_database_transaction(self, duration_sec: float) -> None:
  420. """Record the use of a database transaction and the length of time it took.
  421. Args:
  422. duration_sec: The number of seconds the database transaction took.
  423. """
  424. if duration_sec < 0:
  425. raise ValueError("DB txn time can only be non-negative")
  426. self._resource_usage.db_txn_count += 1
  427. self._resource_usage.db_txn_duration_sec += duration_sec
  428. if self.parent_context:
  429. self.parent_context.add_database_transaction(duration_sec)
  430. def add_database_scheduled(self, sched_sec: float) -> None:
  431. """Record a use of the database pool
  432. Args:
  433. sched_sec: number of seconds it took us to get a connection
  434. """
  435. if sched_sec < 0:
  436. raise ValueError("DB scheduling time can only be non-negative")
  437. self._resource_usage.db_sched_duration_sec += sched_sec
  438. if self.parent_context:
  439. self.parent_context.add_database_scheduled(sched_sec)
  440. def record_event_fetch(self, event_count: int) -> None:
  441. """Record a number of events being fetched from the db
  442. Args:
  443. event_count: number of events being fetched
  444. """
  445. self._resource_usage.evt_db_fetch_count += event_count
  446. if self.parent_context:
  447. self.parent_context.record_event_fetch(event_count)
  448. class LoggingContextFilter(logging.Filter):
  449. """Logging filter that adds values from the current logging context to each
  450. record.
  451. """
  452. def __init__(self, request: str = ""):
  453. self._default_request = request
  454. def filter(self, record: logging.LogRecord) -> Literal[True]:
  455. """Add each fields from the logging contexts to the record.
  456. Returns:
  457. True to include the record in the log output.
  458. """
  459. context = current_context()
  460. record.request = self._default_request # type: ignore
  461. # context should never be None, but if it somehow ends up being, then
  462. # we end up in a death spiral of infinite loops, so let's check, for
  463. # robustness' sake.
  464. if context is not None:
  465. # Logging is interested in the request ID. Note that for backwards
  466. # compatibility this is stored as the "request" on the record.
  467. record.request = str(context) # type: ignore
  468. # Add some data from the HTTP request.
  469. request = context.request
  470. if request is None:
  471. return True
  472. record.ip_address = request.ip_address # type: ignore
  473. record.site_tag = request.site_tag # type: ignore
  474. record.requester = request.requester # type: ignore
  475. record.authenticated_entity = request.authenticated_entity # type: ignore
  476. record.method = request.method # type: ignore
  477. record.url = request.url # type: ignore
  478. record.protocol = request.protocol # type: ignore
  479. record.user_agent = request.user_agent # type: ignore
  480. return True
  481. class PreserveLoggingContext:
  482. """Context manager which replaces the logging context
  483. The previous logging context is restored on exit."""
  484. __slots__ = ["_old_context", "_new_context"]
  485. def __init__(
  486. self, new_context: LoggingContextOrSentinel = SENTINEL_CONTEXT
  487. ) -> None:
  488. self._new_context = new_context
  489. def __enter__(self) -> None:
  490. self._old_context = set_current_context(self._new_context)
  491. def __exit__(self, type, value, traceback) -> None:
  492. context = set_current_context(self._old_context)
  493. if context != self._new_context:
  494. if not context:
  495. logcontext_error(
  496. "Expected logging context %s was lost" % (self._new_context,)
  497. )
  498. else:
  499. logcontext_error(
  500. "Expected logging context %s but found %s"
  501. % (
  502. self._new_context,
  503. context,
  504. )
  505. )
  506. _thread_local = threading.local()
  507. _thread_local.current_context = SENTINEL_CONTEXT
  508. def current_context() -> LoggingContextOrSentinel:
  509. """Get the current logging context from thread local storage"""
  510. return getattr(_thread_local, "current_context", SENTINEL_CONTEXT)
  511. def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSentinel:
  512. """Set the current logging context in thread local storage
  513. Args:
  514. context(LoggingContext): The context to activate.
  515. Returns:
  516. The context that was previously active
  517. """
  518. # everything blows up if we allow current_context to be set to None, so sanity-check
  519. # that now.
  520. if context is None:
  521. raise TypeError("'context' argument may not be None")
  522. current = current_context()
  523. if current is not context:
  524. rusage = get_thread_resource_usage()
  525. current.stop(rusage)
  526. _thread_local.current_context = context
  527. context.start(rusage)
  528. return current
  529. def nested_logging_context(suffix: str) -> LoggingContext:
  530. """Creates a new logging context as a child of another.
  531. The nested logging context will have a 'name' made up of the parent context's
  532. name, plus the given suffix.
  533. CPU/db usage stats will be added to the parent context's on exit.
  534. Normal usage looks like:
  535. with nested_logging_context(suffix):
  536. # ... do stuff
  537. Args:
  538. suffix: suffix to add to the parent context's 'name'.
  539. Returns:
  540. LoggingContext: new logging context.
  541. """
  542. curr_context = current_context()
  543. if not curr_context:
  544. logger.warning(
  545. "Starting nested logging context from sentinel context: metrics will be lost"
  546. )
  547. parent_context = None
  548. else:
  549. assert isinstance(curr_context, LoggingContext)
  550. parent_context = curr_context
  551. prefix = str(curr_context)
  552. return LoggingContext(
  553. prefix + "-" + suffix,
  554. parent_context=parent_context,
  555. )
  556. def preserve_fn(f):
  557. """Function decorator which wraps the function with run_in_background"""
  558. def g(*args, **kwargs):
  559. return run_in_background(f, *args, **kwargs)
  560. return g
  561. def run_in_background(f, *args, **kwargs) -> defer.Deferred:
  562. """Calls a function, ensuring that the current context is restored after
  563. return from the function, and that the sentinel context is set once the
  564. deferred returned by the function completes.
  565. Useful for wrapping functions that return a deferred or coroutine, which you don't
  566. yield or await on (for instance because you want to pass it to
  567. deferred.gatherResults()).
  568. If f returns a Coroutine object, it will be wrapped into a Deferred (which will have
  569. the side effect of executing the coroutine).
  570. Note that if you completely discard the result, you should make sure that
  571. `f` doesn't raise any deferred exceptions, otherwise a scary-looking
  572. CRITICAL error about an unhandled error will be logged without much
  573. indication about where it came from.
  574. """
  575. current = current_context()
  576. try:
  577. res = f(*args, **kwargs)
  578. except Exception:
  579. # the assumption here is that the caller doesn't want to be disturbed
  580. # by synchronous exceptions, so let's turn them into Failures.
  581. return defer.fail()
  582. if isinstance(res, types.CoroutineType):
  583. res = defer.ensureDeferred(res)
  584. # At this point we should have a Deferred, if not then f was a synchronous
  585. # function, wrap it in a Deferred for consistency.
  586. if not isinstance(res, defer.Deferred):
  587. return defer.succeed(res)
  588. if res.called and not res.paused:
  589. # The function should have maintained the logcontext, so we can
  590. # optimise out the messing about
  591. return res
  592. # The function may have reset the context before returning, so
  593. # we need to restore it now.
  594. ctx = set_current_context(current)
  595. # The original context will be restored when the deferred
  596. # completes, but there is nothing waiting for it, so it will
  597. # get leaked into the reactor or some other function which
  598. # wasn't expecting it. We therefore need to reset the context
  599. # here.
  600. #
  601. # (If this feels asymmetric, consider it this way: we are
  602. # effectively forking a new thread of execution. We are
  603. # probably currently within a ``with LoggingContext()`` block,
  604. # which is supposed to have a single entry and exit point. But
  605. # by spawning off another deferred, we are effectively
  606. # adding a new exit point.)
  607. res.addBoth(_set_context_cb, ctx)
  608. return res
  609. def make_deferred_yieldable(deferred):
  610. """Given a deferred (or coroutine), make it follow the Synapse logcontext
  611. rules:
  612. If the deferred has completed (or is not actually a Deferred), essentially
  613. does nothing (just returns another completed deferred with the
  614. result/failure).
  615. If the deferred has not yet completed, resets the logcontext before
  616. returning a deferred. Then, when the deferred completes, restores the
  617. current logcontext before running callbacks/errbacks.
  618. (This is more-or-less the opposite operation to run_in_background.)
  619. """
  620. if inspect.isawaitable(deferred):
  621. # If we're given a coroutine we convert it to a deferred so that we
  622. # run it and find out if it immediately finishes, it it does then we
  623. # don't need to fiddle with log contexts at all and can return
  624. # immediately.
  625. deferred = defer.ensureDeferred(deferred)
  626. if not isinstance(deferred, defer.Deferred):
  627. return deferred
  628. if deferred.called and not deferred.paused:
  629. # it looks like this deferred is ready to run any callbacks we give it
  630. # immediately. We may as well optimise out the logcontext faffery.
  631. return deferred
  632. # ok, we can't be sure that a yield won't block, so let's reset the
  633. # logcontext, and add a callback to the deferred to restore it.
  634. prev_context = set_current_context(SENTINEL_CONTEXT)
  635. deferred.addBoth(_set_context_cb, prev_context)
  636. return deferred
  637. ResultT = TypeVar("ResultT")
  638. def _set_context_cb(result: ResultT, context: LoggingContext) -> ResultT:
  639. """A callback function which just sets the logging context"""
  640. set_current_context(context)
  641. return result
  642. def defer_to_thread(reactor, f, *args, **kwargs):
  643. """
  644. Calls the function `f` using a thread from the reactor's default threadpool and
  645. returns the result as a Deferred.
  646. Creates a new logcontext for `f`, which is created as a child of the current
  647. logcontext (so its CPU usage metrics will get attributed to the current
  648. logcontext). `f` should preserve the logcontext it is given.
  649. The result deferred follows the Synapse logcontext rules: you should `yield`
  650. on it.
  651. Args:
  652. reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
  653. the Deferred will be invoked, and whose threadpool we should use for the
  654. function.
  655. Normally this will be hs.get_reactor().
  656. f (callable): The function to call.
  657. args: positional arguments to pass to f.
  658. kwargs: keyword arguments to pass to f.
  659. Returns:
  660. Deferred: A Deferred which fires a callback with the result of `f`, or an
  661. errback if `f` throws an exception.
  662. """
  663. return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs)
  664. def defer_to_threadpool(reactor, threadpool, f, *args, **kwargs):
  665. """
  666. A wrapper for twisted.internet.threads.deferToThreadpool, which handles
  667. logcontexts correctly.
  668. Calls the function `f` using a thread from the given threadpool and returns
  669. the result as a Deferred.
  670. Creates a new logcontext for `f`, which is created as a child of the current
  671. logcontext (so its CPU usage metrics will get attributed to the current
  672. logcontext). `f` should preserve the logcontext it is given.
  673. The result deferred follows the Synapse logcontext rules: you should `yield`
  674. on it.
  675. Args:
  676. reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
  677. the Deferred will be invoked. Normally this will be hs.get_reactor().
  678. threadpool (twisted.python.threadpool.ThreadPool): The threadpool to use for
  679. running `f`. Normally this will be hs.get_reactor().getThreadPool().
  680. f (callable): The function to call.
  681. args: positional arguments to pass to f.
  682. kwargs: keyword arguments to pass to f.
  683. Returns:
  684. Deferred: A Deferred which fires a callback with the result of `f`, or an
  685. errback if `f` throws an exception.
  686. """
  687. curr_context = current_context()
  688. if not curr_context:
  689. logger.warning(
  690. "Calling defer_to_threadpool from sentinel context: metrics will be lost"
  691. )
  692. parent_context = None
  693. else:
  694. assert isinstance(curr_context, LoggingContext)
  695. parent_context = curr_context
  696. def g():
  697. with LoggingContext(str(curr_context), parent_context=parent_context):
  698. return f(*args, **kwargs)
  699. return make_deferred_yieldable(threads.deferToThreadPool(reactor, threadpool, g))