_base.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452
  1. # Copyright 2018 New Vector Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import abc
  15. import logging
  16. import re
  17. import urllib.parse
  18. from inspect import signature
  19. from typing import TYPE_CHECKING, Any, Awaitable, Callable, ClassVar, Dict, List, Tuple
  20. from prometheus_client import Counter, Gauge
  21. from twisted.internet.error import ConnectError, DNSLookupError
  22. from twisted.web.server import Request
  23. from synapse.api.errors import HttpResponseException, SynapseError
  24. from synapse.http import RequestTimedOutError
  25. from synapse.http.server import HttpServer
  26. from synapse.http.servlet import parse_json_object_from_request
  27. from synapse.http.site import SynapseRequest
  28. from synapse.logging import opentracing
  29. from synapse.logging.opentracing import trace_with_opname
  30. from synapse.types import JsonDict
  31. from synapse.util.caches.response_cache import ResponseCache
  32. from synapse.util.cancellation import is_function_cancellable
  33. from synapse.util.stringutils import random_string
  34. if TYPE_CHECKING:
  35. from synapse.server import HomeServer
  36. logger = logging.getLogger(__name__)
  37. _pending_outgoing_requests = Gauge(
  38. "synapse_pending_outgoing_replication_requests",
  39. "Number of active outgoing replication requests, by replication method name",
  40. ["name"],
  41. )
  42. _outgoing_request_counter = Counter(
  43. "synapse_outgoing_replication_requests",
  44. "Number of outgoing replication requests, by replication method name and result",
  45. ["name", "code"],
  46. )
  47. _STREAM_POSITION_KEY = "_INT_STREAM_POS"
  48. class ReplicationEndpoint(metaclass=abc.ABCMeta):
  49. """Helper base class for defining new replication HTTP endpoints.
  50. This creates an endpoint under `/_synapse/replication/:NAME/:PATH_ARGS..`
  51. (with a `/:txn_id` suffix for cached requests), where NAME is a name,
  52. PATH_ARGS are a tuple of parameters to be encoded in the URL.
  53. For example, if `NAME` is "send_event" and `PATH_ARGS` is `("event_id",)`,
  54. with `CACHE` set to true then this generates an endpoint:
  55. /_synapse/replication/send_event/:event_id/:txn_id
  56. For POST/PUT requests the payload is serialized to json and sent as the
  57. body, while for GET requests the payload is added as query parameters. See
  58. `_serialize_payload` for details.
  59. Incoming requests are handled by overriding `_handle_request`. Servers
  60. must call `register` to register the path with the HTTP server.
  61. Requests can be sent by calling the client returned by `make_client`.
  62. Requests are sent to master process by default, but can be sent to other
  63. named processes by specifying an `instance_name` keyword argument.
  64. Attributes:
  65. NAME (str): A name for the endpoint, added to the path as well as used
  66. in logging and metrics.
  67. PATH_ARGS (tuple[str]): A list of parameters to be added to the path.
  68. Adding parameters to the path (rather than payload) can make it
  69. easier to follow along in the log files.
  70. METHOD (str): The method of the HTTP request, defaults to POST. Can be
  71. one of POST, PUT or GET. If GET then the payload is sent as query
  72. parameters rather than a JSON body.
  73. CACHE (bool): Whether server should cache the result of the request/
  74. If true then transparently adds a txn_id to all requests, and
  75. `_handle_request` must return a Deferred.
  76. RETRY_ON_TIMEOUT(bool): Whether or not to retry the request when a 504
  77. is received.
  78. RETRY_ON_CONNECT_ERROR (bool): Whether or not to retry the request when
  79. a connection error is received.
  80. RETRY_ON_CONNECT_ERROR_ATTEMPTS (int): Number of attempts to retry when
  81. receiving connection errors, each will backoff exponentially longer.
  82. WAIT_FOR_STREAMS (bool): Whether to wait for replication streams to
  83. catch up before processing the request and/or response. Defaults to
  84. True.
  85. """
  86. NAME: str = abc.abstractproperty() # type: ignore
  87. PATH_ARGS: Tuple[str, ...] = abc.abstractproperty() # type: ignore
  88. METHOD = "POST"
  89. CACHE = True
  90. RETRY_ON_TIMEOUT = True
  91. RETRY_ON_CONNECT_ERROR = True
  92. RETRY_ON_CONNECT_ERROR_ATTEMPTS = 5 # =63s (2^6-1)
  93. WAIT_FOR_STREAMS: ClassVar[bool] = True
  94. def __init__(self, hs: "HomeServer"):
  95. if self.CACHE:
  96. self.response_cache: ResponseCache[str] = ResponseCache(
  97. hs.get_clock(), "repl." + self.NAME, timeout_ms=30 * 60 * 1000
  98. )
  99. # We reserve `instance_name` as a parameter to sending requests, so we
  100. # assert here that sub classes don't try and use the name.
  101. assert (
  102. "instance_name" not in self.PATH_ARGS
  103. ), "`instance_name` is a reserved parameter name"
  104. assert (
  105. "instance_name"
  106. not in signature(self.__class__._serialize_payload).parameters
  107. ), "`instance_name` is a reserved parameter name"
  108. assert self.METHOD in ("PUT", "POST", "GET")
  109. self._replication_secret = None
  110. if hs.config.worker.worker_replication_secret:
  111. self._replication_secret = hs.config.worker.worker_replication_secret
  112. self._streams = hs.get_replication_command_handler().get_streams_to_replicate()
  113. self._replication = hs.get_replication_data_handler()
  114. self._instance_name = hs.get_instance_name()
  115. def _check_auth(self, request: Request) -> None:
  116. # Get the authorization header.
  117. auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
  118. if not auth_headers:
  119. raise RuntimeError("Missing Authorization header.")
  120. if len(auth_headers) > 1:
  121. raise RuntimeError("Too many Authorization headers.")
  122. parts = auth_headers[0].split(b" ")
  123. if parts[0] == b"Bearer" and len(parts) == 2:
  124. received_secret = parts[1].decode("ascii")
  125. if self._replication_secret == received_secret:
  126. # Success!
  127. return
  128. raise RuntimeError("Invalid Authorization header.")
  129. @abc.abstractmethod
  130. async def _serialize_payload(**kwargs) -> JsonDict:
  131. """Static method that is called when creating a request.
  132. Concrete implementations should have explicit parameters (rather than
  133. kwargs) so that an appropriate exception is raised if the client is
  134. called with unexpected parameters. All PATH_ARGS must appear in
  135. argument list.
  136. Returns:
  137. If POST/PUT request then dictionary must be JSON serialisable,
  138. otherwise must be appropriate for adding as query args.
  139. """
  140. return {}
  141. @abc.abstractmethod
  142. async def _handle_request(
  143. self, request: Request, content: JsonDict, **kwargs: Any
  144. ) -> Tuple[int, JsonDict]:
  145. """Handle incoming request.
  146. This is called with the request object and PATH_ARGS.
  147. Returns:
  148. HTTP status code and a JSON serialisable dict to be used as response
  149. body of request.
  150. """
  151. @classmethod
  152. def make_client(cls, hs: "HomeServer") -> Callable:
  153. """Create a client that makes requests.
  154. Returns a callable that accepts the same parameters as
  155. `_serialize_payload`, and also accepts an optional `instance_name`
  156. parameter to specify which instance to hit (the instance must be in
  157. the `instance_map` config).
  158. """
  159. clock = hs.get_clock()
  160. client = hs.get_simple_http_client()
  161. local_instance_name = hs.get_instance_name()
  162. # The value of these option should match the replication listener settings
  163. master_host = hs.config.worker.worker_replication_host
  164. master_port = hs.config.worker.worker_replication_http_port
  165. master_tls = hs.config.worker.worker_replication_http_tls
  166. instance_map = hs.config.worker.instance_map
  167. outgoing_gauge = _pending_outgoing_requests.labels(cls.NAME)
  168. replication_secret = None
  169. if hs.config.worker.worker_replication_secret:
  170. replication_secret = hs.config.worker.worker_replication_secret.encode(
  171. "ascii"
  172. )
  173. @trace_with_opname("outgoing_replication_request")
  174. async def send_request(*, instance_name: str = "master", **kwargs: Any) -> Any:
  175. # We have to pull these out here to avoid circular dependencies...
  176. streams = hs.get_replication_command_handler().get_streams_to_replicate()
  177. replication = hs.get_replication_data_handler()
  178. with outgoing_gauge.track_inprogress():
  179. if instance_name == local_instance_name:
  180. raise Exception("Trying to send HTTP request to self")
  181. if instance_name == "master":
  182. host = master_host
  183. port = master_port
  184. tls = master_tls
  185. elif instance_name in instance_map:
  186. host = instance_map[instance_name].host
  187. port = instance_map[instance_name].port
  188. tls = instance_map[instance_name].tls
  189. else:
  190. raise Exception(
  191. "Instance %r not in 'instance_map' config" % (instance_name,)
  192. )
  193. data = await cls._serialize_payload(**kwargs)
  194. if cls.METHOD != "GET" and cls.WAIT_FOR_STREAMS:
  195. # Include the current stream positions that we write to. We
  196. # don't do this for GETs as they don't have a body, and we
  197. # generally assume that a GET won't rely on data we have
  198. # written.
  199. if _STREAM_POSITION_KEY in data:
  200. raise Exception(
  201. "data to send contains %r key", _STREAM_POSITION_KEY
  202. )
  203. data[_STREAM_POSITION_KEY] = {
  204. "streams": {
  205. stream.NAME: stream.current_token(local_instance_name)
  206. for stream in streams
  207. },
  208. "instance_name": local_instance_name,
  209. }
  210. url_args = [
  211. urllib.parse.quote(kwargs[name], safe="") for name in cls.PATH_ARGS
  212. ]
  213. if cls.CACHE:
  214. txn_id = random_string(10)
  215. url_args.append(txn_id)
  216. if cls.METHOD == "POST":
  217. request_func: Callable[
  218. ..., Awaitable[Any]
  219. ] = client.post_json_get_json
  220. elif cls.METHOD == "PUT":
  221. request_func = client.put_json
  222. elif cls.METHOD == "GET":
  223. request_func = client.get_json
  224. else:
  225. # We have already asserted in the constructor that a
  226. # compatible was picked, but lets be paranoid.
  227. raise Exception(
  228. "Unknown METHOD on %s replication endpoint" % (cls.NAME,)
  229. )
  230. # Here the protocol is hard coded to be http by default or https in case the replication
  231. # port is set to have tls true.
  232. scheme = "https" if tls else "http"
  233. uri = "%s://%s:%s/_synapse/replication/%s/%s" % (
  234. scheme,
  235. host,
  236. port,
  237. cls.NAME,
  238. "/".join(url_args),
  239. )
  240. headers: Dict[bytes, List[bytes]] = {}
  241. # Add an authorization header, if configured.
  242. if replication_secret:
  243. headers[b"Authorization"] = [b"Bearer " + replication_secret]
  244. opentracing.inject_header_dict(headers, check_destination=False)
  245. try:
  246. # Keep track of attempts made so we can bail if we don't manage to
  247. # connect to the target after N tries.
  248. attempts = 0
  249. # We keep retrying the same request for timeouts. This is so that we
  250. # have a good idea that the request has either succeeded or failed
  251. # on the master, and so whether we should clean up or not.
  252. while True:
  253. try:
  254. result = await request_func(uri, data, headers=headers)
  255. break
  256. except RequestTimedOutError:
  257. if not cls.RETRY_ON_TIMEOUT:
  258. raise
  259. logger.warning("%s request timed out; retrying", cls.NAME)
  260. # If we timed out we probably don't need to worry about backing
  261. # off too much, but lets just wait a little anyway.
  262. await clock.sleep(1)
  263. except (ConnectError, DNSLookupError) as e:
  264. if not cls.RETRY_ON_CONNECT_ERROR:
  265. raise
  266. if attempts > cls.RETRY_ON_CONNECT_ERROR_ATTEMPTS:
  267. raise
  268. delay = 2**attempts
  269. logger.warning(
  270. "%s request connection failed; retrying in %ds: %r",
  271. cls.NAME,
  272. delay,
  273. e,
  274. )
  275. await clock.sleep(delay)
  276. attempts += 1
  277. except HttpResponseException as e:
  278. # We convert to SynapseError as we know that it was a SynapseError
  279. # on the main process that we should send to the client. (And
  280. # importantly, not stack traces everywhere)
  281. _outgoing_request_counter.labels(cls.NAME, e.code).inc()
  282. raise e.to_synapse_error()
  283. except Exception as e:
  284. _outgoing_request_counter.labels(cls.NAME, "ERR").inc()
  285. raise SynapseError(
  286. 502, f"Failed to talk to {instance_name} process"
  287. ) from e
  288. _outgoing_request_counter.labels(cls.NAME, 200).inc()
  289. # Wait on any streams that the remote may have written to.
  290. for stream_name, position in result.get(
  291. _STREAM_POSITION_KEY, {}
  292. ).items():
  293. await replication.wait_for_stream_position(
  294. instance_name=instance_name,
  295. stream_name=stream_name,
  296. position=position,
  297. raise_on_timeout=False,
  298. )
  299. return result
  300. return send_request
  301. def register(self, http_server: HttpServer) -> None:
  302. """Called by the server to register this as a handler to the
  303. appropriate path.
  304. """
  305. url_args = list(self.PATH_ARGS)
  306. method = self.METHOD
  307. if self.CACHE and is_function_cancellable(self._handle_request):
  308. raise Exception(
  309. f"{self.__class__.__name__} has been marked as cancellable, but CACHE "
  310. "is set. The cancellable flag would have no effect."
  311. )
  312. if self.CACHE:
  313. url_args.append("txn_id")
  314. args = "/".join("(?P<%s>[^/]+)" % (arg,) for arg in url_args)
  315. pattern = re.compile("^/_synapse/replication/%s/%s$" % (self.NAME, args))
  316. http_server.register_paths(
  317. method,
  318. [pattern],
  319. self._check_auth_and_handle,
  320. self.__class__.__name__,
  321. )
  322. async def _check_auth_and_handle(
  323. self, request: SynapseRequest, **kwargs: Any
  324. ) -> Tuple[int, JsonDict]:
  325. """Called on new incoming requests when caching is enabled. Checks
  326. if there is a cached response for the request and returns that,
  327. otherwise calls `_handle_request` and caches its response.
  328. """
  329. # We just use the txn_id here, but we probably also want to use the
  330. # other PATH_ARGS as well.
  331. # Check the authorization headers before handling the request.
  332. if self._replication_secret:
  333. self._check_auth(request)
  334. if self.METHOD == "GET":
  335. # GET APIs always have an empty body.
  336. content = {}
  337. else:
  338. content = parse_json_object_from_request(request)
  339. # Wait on any streams that the remote may have written to.
  340. for stream_name, position in content.get(_STREAM_POSITION_KEY, {"streams": {}})[
  341. "streams"
  342. ].items():
  343. await self._replication.wait_for_stream_position(
  344. instance_name=content[_STREAM_POSITION_KEY]["instance_name"],
  345. stream_name=stream_name,
  346. position=position,
  347. raise_on_timeout=False,
  348. )
  349. if self.CACHE:
  350. txn_id = kwargs.pop("txn_id")
  351. # We ignore the `@cancellable` flag, since cancellation wouldn't interupt
  352. # `_handle_request` and `ResponseCache` does not handle cancellation
  353. # correctly yet. In particular, there may be issues to do with logging
  354. # context lifetimes.
  355. code, response = await self.response_cache.wrap(
  356. txn_id, self._handle_request, request, content, **kwargs
  357. )
  358. else:
  359. # The `@cancellable` decorator may be applied to `_handle_request`. But we
  360. # told `HttpServer.register_paths` that our handler is `_check_auth_and_handle`,
  361. # so we have to set up the cancellable flag ourselves.
  362. request.is_render_cancellable = is_function_cancellable(
  363. self._handle_request
  364. )
  365. code, response = await self._handle_request(request, content, **kwargs)
  366. # Return streams we may have written to in the course of processing this
  367. # request.
  368. if _STREAM_POSITION_KEY in response:
  369. raise Exception("data to send contains %r key", _STREAM_POSITION_KEY)
  370. if self.WAIT_FOR_STREAMS:
  371. response[_STREAM_POSITION_KEY] = {
  372. stream.NAME: stream.current_token(self._instance_name)
  373. for stream in self._streams
  374. }
  375. return code, response