configure_workers_and_start.py 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083
  1. #!/usr/bin/env python
  2. # Copyright 2021 The Matrix.org Foundation C.I.C.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. # This script reads environment variables and generates a shared Synapse worker,
  16. # nginx and supervisord configs depending on the workers requested.
  17. #
  18. # The environment variables it reads are:
  19. # * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
  20. # * SYNAPSE_REPORT_STATS: Whether to report stats.
  21. # * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKERS_CONFIG
  22. # below. Leave empty for no workers. Add a ':' and a number at the end to
  23. # multiply that worker. Append multiple worker types with '+' to merge the
  24. # worker types into a single worker. Add a name and a '=' to the front of a
  25. # worker type to give this instance a name in logs and nginx.
  26. # Examples:
  27. # SYNAPSE_WORKER_TYPES='event_persister, federation_sender, client_reader'
  28. # SYNAPSE_WORKER_TYPES='event_persister:2, federation_sender:2, client_reader'
  29. # SYNAPSE_WORKER_TYPES='stream_writers=account_data+presence+typing'
  30. # * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
  31. # will be treated as Application Service registration files.
  32. # * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
  33. # * SYNAPSE_TLS_KEY: Path to a TLS key. If this and SYNAPSE_TLS_CERT are specified,
  34. # Nginx will be configured to serve TLS on port 8448.
  35. # * SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER: Whether to use the forking launcher,
  36. # only intended for usage in Complement at the moment.
  37. # No stability guarantees are provided.
  38. # * SYNAPSE_LOG_LEVEL: Set this to DEBUG, INFO, WARNING or ERROR to change the
  39. # log level. INFO is the default.
  40. # * SYNAPSE_LOG_SENSITIVE: If unset, SQL and SQL values won't be logged,
  41. # regardless of the SYNAPSE_LOG_LEVEL setting.
  42. # * SYNAPSE_LOG_TESTING: if set, Synapse will log additional information useful
  43. # for testing.
  44. #
  45. # NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined
  46. # in the project's README), this script may be run multiple times, and functionality should
  47. # continue to work if so.
  48. import os
  49. import platform
  50. import re
  51. import subprocess
  52. import sys
  53. from collections import defaultdict
  54. from itertools import chain
  55. from pathlib import Path
  56. from typing import (
  57. Any,
  58. Dict,
  59. List,
  60. Mapping,
  61. MutableMapping,
  62. NoReturn,
  63. Optional,
  64. Set,
  65. SupportsIndex,
  66. )
  67. import yaml
  68. from jinja2 import Environment, FileSystemLoader
  69. MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
  70. MAIN_PROCESS_INSTANCE_NAME = "main"
  71. MAIN_PROCESS_LOCALHOST_ADDRESS = "127.0.0.1"
  72. MAIN_PROCESS_REPLICATION_PORT = 9093
  73. # Obviously, these would only be used with the UNIX socket option
  74. MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH = "/run/main_public.sock"
  75. MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH = "/run/main_private.sock"
  76. # A simple name used as a placeholder in the WORKERS_CONFIG below. This will be replaced
  77. # during processing with the name of the worker.
  78. WORKER_PLACEHOLDER_NAME = "placeholder_name"
  79. # Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources
  80. # Watching /_matrix/client needs a "client" listener
  81. # Watching /_matrix/federation needs a "federation" listener
  82. # Watching /_matrix/media and related needs a "media" listener
  83. # Stream Writers require "client" and "replication" listeners because they
  84. # have to attach by instance_map to the master process and have client endpoints.
  85. WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
  86. "pusher": {
  87. "app": "synapse.app.generic_worker",
  88. "listener_resources": [],
  89. "endpoint_patterns": [],
  90. "shared_extra_conf": {},
  91. "worker_extra_conf": "",
  92. },
  93. "user_dir": {
  94. "app": "synapse.app.generic_worker",
  95. "listener_resources": ["client"],
  96. "endpoint_patterns": [
  97. "^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$"
  98. ],
  99. "shared_extra_conf": {
  100. "update_user_directory_from_worker": WORKER_PLACEHOLDER_NAME
  101. },
  102. "worker_extra_conf": "",
  103. },
  104. "media_repository": {
  105. "app": "synapse.app.generic_worker",
  106. "listener_resources": ["media"],
  107. "endpoint_patterns": [
  108. "^/_matrix/media/",
  109. "^/_synapse/admin/v1/purge_media_cache$",
  110. "^/_synapse/admin/v1/room/.*/media.*$",
  111. "^/_synapse/admin/v1/user/.*/media.*$",
  112. "^/_synapse/admin/v1/media/.*$",
  113. "^/_synapse/admin/v1/quarantine_media/.*$",
  114. ],
  115. # The first configured media worker will run the media background jobs
  116. "shared_extra_conf": {
  117. "enable_media_repo": False,
  118. "media_instance_running_background_jobs": WORKER_PLACEHOLDER_NAME,
  119. },
  120. "worker_extra_conf": "enable_media_repo: true",
  121. },
  122. "appservice": {
  123. "app": "synapse.app.generic_worker",
  124. "listener_resources": [],
  125. "endpoint_patterns": [],
  126. "shared_extra_conf": {
  127. "notify_appservices_from_worker": WORKER_PLACEHOLDER_NAME
  128. },
  129. "worker_extra_conf": "",
  130. },
  131. "federation_sender": {
  132. "app": "synapse.app.generic_worker",
  133. "listener_resources": [],
  134. "endpoint_patterns": [],
  135. "shared_extra_conf": {},
  136. "worker_extra_conf": "",
  137. },
  138. "synchrotron": {
  139. "app": "synapse.app.generic_worker",
  140. "listener_resources": ["client"],
  141. "endpoint_patterns": [
  142. "^/_matrix/client/(v2_alpha|r0|v3)/sync$",
  143. "^/_matrix/client/(api/v1|v2_alpha|r0|v3)/events$",
  144. "^/_matrix/client/(api/v1|r0|v3)/initialSync$",
  145. "^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$",
  146. ],
  147. "shared_extra_conf": {},
  148. "worker_extra_conf": "",
  149. },
  150. "client_reader": {
  151. "app": "synapse.app.generic_worker",
  152. "listener_resources": ["client"],
  153. "endpoint_patterns": [
  154. "^/_matrix/client/(api/v1|r0|v3|unstable)/publicRooms$",
  155. "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/joined_members$",
  156. "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*$",
  157. "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$",
  158. "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$",
  159. "^/_matrix/client/v1/rooms/.*/hierarchy$",
  160. "^/_matrix/client/(v1|unstable)/rooms/.*/relations/",
  161. "^/_matrix/client/v1/rooms/.*/threads$",
  162. "^/_matrix/client/(api/v1|r0|v3|unstable)/login$",
  163. "^/_matrix/client/(api/v1|r0|v3|unstable)/account/3pid$",
  164. "^/_matrix/client/(api/v1|r0|v3|unstable)/account/whoami$",
  165. "^/_matrix/client/versions$",
  166. "^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$",
  167. "^/_matrix/client/(r0|v3|unstable)/register$",
  168. "^/_matrix/client/(r0|v3|unstable)/register/available$",
  169. "^/_matrix/client/(r0|v3|unstable)/auth/.*/fallback/web$",
  170. "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$",
  171. "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
  172. "^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms",
  173. "^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases",
  174. "^/_matrix/client/v1/rooms/.*/timestamp_to_event$",
  175. "^/_matrix/client/(api/v1|r0|v3|unstable)/search",
  176. "^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$)",
  177. "^/_matrix/client/(r0|v3|unstable)/password_policy$",
  178. "^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$",
  179. "^/_matrix/client/(r0|v3|unstable)/capabilities$",
  180. ],
  181. "shared_extra_conf": {},
  182. "worker_extra_conf": "",
  183. },
  184. "federation_reader": {
  185. "app": "synapse.app.generic_worker",
  186. "listener_resources": ["federation"],
  187. "endpoint_patterns": [
  188. "^/_matrix/federation/(v1|v2)/event/",
  189. "^/_matrix/federation/(v1|v2)/state/",
  190. "^/_matrix/federation/(v1|v2)/state_ids/",
  191. "^/_matrix/federation/(v1|v2)/backfill/",
  192. "^/_matrix/federation/(v1|v2)/get_missing_events/",
  193. "^/_matrix/federation/(v1|v2)/publicRooms",
  194. "^/_matrix/federation/(v1|v2)/query/",
  195. "^/_matrix/federation/(v1|v2)/make_join/",
  196. "^/_matrix/federation/(v1|v2)/make_leave/",
  197. "^/_matrix/federation/(v1|v2)/send_join/",
  198. "^/_matrix/federation/(v1|v2)/send_leave/",
  199. "^/_matrix/federation/(v1|v2)/invite/",
  200. "^/_matrix/federation/(v1|v2)/query_auth/",
  201. "^/_matrix/federation/(v1|v2)/event_auth/",
  202. "^/_matrix/federation/v1/timestamp_to_event/",
  203. "^/_matrix/federation/(v1|v2)/exchange_third_party_invite/",
  204. "^/_matrix/federation/(v1|v2)/user/devices/",
  205. "^/_matrix/federation/(v1|v2)/get_groups_publicised$",
  206. "^/_matrix/key/v2/query",
  207. ],
  208. "shared_extra_conf": {},
  209. "worker_extra_conf": "",
  210. },
  211. "federation_inbound": {
  212. "app": "synapse.app.generic_worker",
  213. "listener_resources": ["federation"],
  214. "endpoint_patterns": ["/_matrix/federation/(v1|v2)/send/"],
  215. "shared_extra_conf": {},
  216. "worker_extra_conf": "",
  217. },
  218. "event_persister": {
  219. "app": "synapse.app.generic_worker",
  220. "listener_resources": ["replication"],
  221. "endpoint_patterns": [],
  222. "shared_extra_conf": {},
  223. "worker_extra_conf": "",
  224. },
  225. "background_worker": {
  226. "app": "synapse.app.generic_worker",
  227. "listener_resources": [],
  228. "endpoint_patterns": [],
  229. # This worker cannot be sharded. Therefore, there should only ever be one
  230. # background worker. This is enforced for the safety of your database.
  231. "shared_extra_conf": {"run_background_tasks_on": WORKER_PLACEHOLDER_NAME},
  232. "worker_extra_conf": "",
  233. },
  234. "event_creator": {
  235. "app": "synapse.app.generic_worker",
  236. "listener_resources": ["client"],
  237. "endpoint_patterns": [
  238. "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact",
  239. "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send",
  240. "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$",
  241. "^/_matrix/client/(api/v1|r0|v3|unstable)/join/",
  242. "^/_matrix/client/(api/v1|r0|v3|unstable)/knock/",
  243. "^/_matrix/client/(api/v1|r0|v3|unstable)/profile/",
  244. ],
  245. "shared_extra_conf": {},
  246. "worker_extra_conf": "",
  247. },
  248. "frontend_proxy": {
  249. "app": "synapse.app.generic_worker",
  250. "listener_resources": ["client", "replication"],
  251. "endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"],
  252. "shared_extra_conf": {},
  253. "worker_extra_conf": "",
  254. },
  255. "account_data": {
  256. "app": "synapse.app.generic_worker",
  257. "listener_resources": ["client", "replication"],
  258. "endpoint_patterns": [
  259. "^/_matrix/client/(r0|v3|unstable)/.*/tags",
  260. "^/_matrix/client/(r0|v3|unstable)/.*/account_data",
  261. ],
  262. "shared_extra_conf": {},
  263. "worker_extra_conf": "",
  264. },
  265. "presence": {
  266. "app": "synapse.app.generic_worker",
  267. "listener_resources": ["client", "replication"],
  268. "endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/presence/"],
  269. "shared_extra_conf": {},
  270. "worker_extra_conf": "",
  271. },
  272. "receipts": {
  273. "app": "synapse.app.generic_worker",
  274. "listener_resources": ["client", "replication"],
  275. "endpoint_patterns": [
  276. "^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt",
  277. "^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers",
  278. ],
  279. "shared_extra_conf": {},
  280. "worker_extra_conf": "",
  281. },
  282. "to_device": {
  283. "app": "synapse.app.generic_worker",
  284. "listener_resources": ["client", "replication"],
  285. "endpoint_patterns": ["^/_matrix/client/(r0|v3|unstable)/sendToDevice/"],
  286. "shared_extra_conf": {},
  287. "worker_extra_conf": "",
  288. },
  289. "typing": {
  290. "app": "synapse.app.generic_worker",
  291. "listener_resources": ["client", "replication"],
  292. "endpoint_patterns": [
  293. "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing"
  294. ],
  295. "shared_extra_conf": {},
  296. "worker_extra_conf": "",
  297. },
  298. }
  299. # Templates for sections that may be inserted multiple times in config files
  300. NGINX_LOCATION_CONFIG_BLOCK = """
  301. location ~* {endpoint} {{
  302. proxy_pass {upstream};
  303. proxy_set_header X-Forwarded-For $remote_addr;
  304. proxy_set_header X-Forwarded-Proto $scheme;
  305. proxy_set_header Host $host;
  306. }}
  307. """
  308. NGINX_UPSTREAM_CONFIG_BLOCK = """
  309. upstream {upstream_worker_base_name} {{
  310. {body}
  311. }}
  312. """
  313. # Utility functions
  314. def log(txt: str) -> None:
  315. print(txt)
  316. def error(txt: str) -> NoReturn:
  317. print(txt, file=sys.stderr)
  318. sys.exit(2)
  319. def flush_buffers() -> None:
  320. sys.stdout.flush()
  321. sys.stderr.flush()
  322. def convert(src: str, dst: str, **template_vars: object) -> None:
  323. """Generate a file from a template
  324. Args:
  325. src: Path to the input file.
  326. dst: Path to write to.
  327. template_vars: The arguments to replace placeholder variables in the template with.
  328. """
  329. # Read the template file
  330. # We disable autoescape to prevent template variables from being escaped,
  331. # as we're not using HTML.
  332. env = Environment(loader=FileSystemLoader(os.path.dirname(src)), autoescape=False)
  333. template = env.get_template(os.path.basename(src))
  334. # Generate a string from the template.
  335. rendered = template.render(**template_vars)
  336. # Write the generated contents to a file
  337. #
  338. # We use append mode in case the files have already been written to by something else
  339. # (for instance, as part of the instructions in a dockerfile).
  340. with open(dst, "a") as outfile:
  341. # In case the existing file doesn't end with a newline
  342. outfile.write("\n")
  343. outfile.write(rendered)
  344. def add_worker_roles_to_shared_config(
  345. shared_config: dict,
  346. worker_types_set: Set[str],
  347. worker_name: str,
  348. worker_port: int,
  349. ) -> None:
  350. """Given a dictionary representing a config file shared across all workers,
  351. append appropriate worker information to it for the current worker_type instance.
  352. Args:
  353. shared_config: The config dict that all worker instances share (after being
  354. converted to YAML)
  355. worker_types_set: The type of worker (one of those defined in WORKERS_CONFIG).
  356. This list can be a single worker type or multiple.
  357. worker_name: The name of the worker instance.
  358. worker_port: The HTTP replication port that the worker instance is listening on.
  359. """
  360. # The instance_map config field marks the workers that write to various replication
  361. # streams
  362. instance_map = shared_config.setdefault("instance_map", {})
  363. # This is a list of the stream_writers that there can be only one of. Events can be
  364. # sharded, and therefore doesn't belong here.
  365. singular_stream_writers = [
  366. "account_data",
  367. "presence",
  368. "receipts",
  369. "to_device",
  370. "typing",
  371. ]
  372. # Worker-type specific sharding config. Now a single worker can fulfill multiple
  373. # roles, check each.
  374. if "pusher" in worker_types_set:
  375. shared_config.setdefault("pusher_instances", []).append(worker_name)
  376. if "federation_sender" in worker_types_set:
  377. shared_config.setdefault("federation_sender_instances", []).append(worker_name)
  378. if "event_persister" in worker_types_set:
  379. # Event persisters write to the events stream, so we need to update
  380. # the list of event stream writers
  381. shared_config.setdefault("stream_writers", {}).setdefault("events", []).append(
  382. worker_name
  383. )
  384. # Map of stream writer instance names to host/ports combos
  385. if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False):
  386. instance_map[worker_name] = {
  387. "path": f"/run/worker.{worker_port}",
  388. }
  389. else:
  390. instance_map[worker_name] = {
  391. "host": "localhost",
  392. "port": worker_port,
  393. }
  394. # Update the list of stream writers. It's convenient that the name of the worker
  395. # type is the same as the stream to write. Iterate over the whole list in case there
  396. # is more than one.
  397. for worker in worker_types_set:
  398. if worker in singular_stream_writers:
  399. shared_config.setdefault("stream_writers", {}).setdefault(
  400. worker, []
  401. ).append(worker_name)
  402. # Map of stream writer instance names to host/ports combos
  403. # For now, all stream writers need http replication ports
  404. if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False):
  405. instance_map[worker_name] = {
  406. "path": f"/run/worker.{worker_port}",
  407. }
  408. else:
  409. instance_map[worker_name] = {
  410. "host": "localhost",
  411. "port": worker_port,
  412. }
  413. def merge_worker_template_configs(
  414. existing_dict: Optional[Dict[str, Any]],
  415. to_be_merged_dict: Dict[str, Any],
  416. ) -> Dict[str, Any]:
  417. """When given an existing dict of worker template configuration consisting with both
  418. dicts and lists, merge new template data from WORKERS_CONFIG(or create) and
  419. return new dict.
  420. Args:
  421. existing_dict: Either an existing worker template or a fresh blank one.
  422. to_be_merged_dict: The template from WORKERS_CONFIGS to be merged into
  423. existing_dict.
  424. Returns: The newly merged together dict values.
  425. """
  426. new_dict: Dict[str, Any] = {}
  427. if not existing_dict:
  428. # It doesn't exist yet, just use the new dict(but take a copy not a reference)
  429. new_dict = to_be_merged_dict.copy()
  430. else:
  431. for i in to_be_merged_dict.keys():
  432. if (i == "endpoint_patterns") or (i == "listener_resources"):
  433. # merge the two lists, remove duplicates
  434. new_dict[i] = list(set(existing_dict[i] + to_be_merged_dict[i]))
  435. elif i == "shared_extra_conf":
  436. # merge dictionary's, the worker name will be replaced later
  437. new_dict[i] = {**existing_dict[i], **to_be_merged_dict[i]}
  438. elif i == "worker_extra_conf":
  439. # There is only one worker type that has a 'worker_extra_conf' and it is
  440. # the media_repo. Since duplicate worker types on the same worker don't
  441. # work, this is fine.
  442. new_dict[i] = existing_dict[i] + to_be_merged_dict[i]
  443. else:
  444. # Everything else should be identical, like "app", which only works
  445. # because all apps are now generic_workers.
  446. new_dict[i] = to_be_merged_dict[i]
  447. return new_dict
  448. def insert_worker_name_for_worker_config(
  449. existing_dict: Dict[str, Any], worker_name: str
  450. ) -> Dict[str, Any]:
  451. """Insert a given worker name into the worker's configuration dict.
  452. Args:
  453. existing_dict: The worker_config dict that is imported into shared_config.
  454. worker_name: The name of the worker to insert.
  455. Returns: Copy of the dict with newly inserted worker name
  456. """
  457. dict_to_edit = existing_dict.copy()
  458. for k, v in dict_to_edit["shared_extra_conf"].items():
  459. # Only proceed if it's the placeholder name string
  460. if v == WORKER_PLACEHOLDER_NAME:
  461. dict_to_edit["shared_extra_conf"][k] = worker_name
  462. return dict_to_edit
  463. def apply_requested_multiplier_for_worker(worker_types: List[str]) -> List[str]:
  464. """
  465. Apply multiplier(if found) by returning a new expanded list with some basic error
  466. checking.
  467. Args:
  468. worker_types: The unprocessed List of requested workers
  469. Returns:
  470. A new list with all requested workers expanded.
  471. """
  472. # Checking performed:
  473. # 1. if worker:2 or more is declared, it will create additional workers up to number
  474. # 2. if worker:1, it will create a single copy of this worker as if no number was
  475. # given
  476. # 3. if worker:0 is declared, this worker will be ignored. This is to allow for
  477. # scripting and automated expansion and is intended behaviour.
  478. # 4. if worker:NaN or is a negative number, it will error and log it.
  479. new_worker_types = []
  480. for worker_type in worker_types:
  481. if ":" in worker_type:
  482. worker_type_components = split_and_strip_string(worker_type, ":", 1)
  483. worker_count = 0
  484. # Should only be 2 components, a type of worker(s) and an integer as a
  485. # string. Cast the number as an int then it can be used as a counter.
  486. try:
  487. worker_count = int(worker_type_components[1])
  488. except ValueError:
  489. error(
  490. f"Bad number in worker count for '{worker_type}': "
  491. f"'{worker_type_components[1]}' is not an integer"
  492. )
  493. # As long as there are more than 0, we add one to the list to make below.
  494. for _ in range(worker_count):
  495. new_worker_types.append(worker_type_components[0])
  496. else:
  497. # If it's not a real worker_type, it will error out later.
  498. new_worker_types.append(worker_type)
  499. return new_worker_types
  500. def is_sharding_allowed_for_worker_type(worker_type: str) -> bool:
  501. """Helper to check to make sure worker types that cannot have multiples do not.
  502. Args:
  503. worker_type: The type of worker to check against.
  504. Returns: True if allowed, False if not
  505. """
  506. return worker_type not in [
  507. "background_worker",
  508. "account_data",
  509. "presence",
  510. "receipts",
  511. "typing",
  512. "to_device",
  513. ]
  514. def split_and_strip_string(
  515. given_string: str, split_char: str, max_split: SupportsIndex = -1
  516. ) -> List[str]:
  517. """
  518. Helper to split a string on split_char and strip whitespace from each end of each
  519. element.
  520. Args:
  521. given_string: The string to split
  522. split_char: The character to split the string on
  523. max_split: kwarg for split() to limit how many times the split() happens
  524. Returns:
  525. A List of strings
  526. """
  527. # Removes whitespace from ends of result strings before adding to list. Allow for
  528. # overriding 'maxsplit' kwarg, default being -1 to signify no maximum.
  529. return [x.strip() for x in given_string.split(split_char, maxsplit=max_split)]
  530. def generate_base_homeserver_config() -> None:
  531. """Starts Synapse and generates a basic homeserver config, which will later be
  532. modified for worker support.
  533. Raises: CalledProcessError if calling start.py returned a non-zero exit code.
  534. """
  535. # start.py already does this for us, so just call that.
  536. # note that this script is copied in in the official, monolith dockerfile
  537. os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT)
  538. subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
  539. def parse_worker_types(
  540. requested_worker_types: List[str],
  541. ) -> Dict[str, Set[str]]:
  542. """Read the desired list of requested workers and prepare the data for use in
  543. generating worker config files while also checking for potential gotchas.
  544. Args:
  545. requested_worker_types: The list formed from the split environment variable
  546. containing the unprocessed requests for workers.
  547. Returns: A dict of worker names to set of worker types. Format:
  548. {'worker_name':
  549. {'worker_type', 'worker_type2'}
  550. }
  551. """
  552. # A counter of worker_base_name -> int. Used for determining the name for a given
  553. # worker when generating its config file, as each worker's name is just
  554. # worker_base_name followed by instance number
  555. worker_base_name_counter: Dict[str, int] = defaultdict(int)
  556. # Similar to above, but more finely grained. This is used to determine we don't have
  557. # more than a single worker for cases where multiples would be bad(e.g. presence).
  558. worker_type_shard_counter: Dict[str, int] = defaultdict(int)
  559. # The final result of all this processing
  560. dict_to_return: Dict[str, Set[str]] = {}
  561. # Handle any multipliers requested for given workers.
  562. multiple_processed_worker_types = apply_requested_multiplier_for_worker(
  563. requested_worker_types
  564. )
  565. # Process each worker_type_string
  566. # Examples of expected formats:
  567. # - requested_name=type1+type2+type3
  568. # - synchrotron
  569. # - event_creator+event_persister
  570. for worker_type_string in multiple_processed_worker_types:
  571. # First, if a name is requested, use that — otherwise generate one.
  572. worker_base_name: str = ""
  573. if "=" in worker_type_string:
  574. # Split on "=", remove extra whitespace from ends then make list
  575. worker_type_split = split_and_strip_string(worker_type_string, "=")
  576. if len(worker_type_split) > 2:
  577. error(
  578. "There should only be one '=' in the worker type string. "
  579. f"Please fix: {worker_type_string}"
  580. )
  581. # Assign the name
  582. worker_base_name = worker_type_split[0]
  583. if not re.match(r"^[a-zA-Z0-9_+-]*[a-zA-Z_+-]$", worker_base_name):
  584. # Apply a fairly narrow regex to the worker names. Some characters
  585. # aren't safe for use in file paths or nginx configurations.
  586. # Don't allow to end with a number because we'll add a number
  587. # ourselves in a moment.
  588. error(
  589. "Invalid worker name; please choose a name consisting of "
  590. "alphanumeric letters, _ + -, but not ending with a digit: "
  591. f"{worker_base_name!r}"
  592. )
  593. # Continue processing the remainder of the worker_type string
  594. # with the name override removed.
  595. worker_type_string = worker_type_split[1]
  596. # Split the worker_type_string on "+", remove whitespace from ends then make
  597. # the list a set so it's deduplicated.
  598. worker_types_set: Set[str] = set(
  599. split_and_strip_string(worker_type_string, "+")
  600. )
  601. if not worker_base_name:
  602. # No base name specified: generate one deterministically from set of
  603. # types
  604. worker_base_name = "+".join(sorted(worker_types_set))
  605. # At this point, we have:
  606. # worker_base_name which is the name for the worker, without counter.
  607. # worker_types_set which is the set of worker types for this worker.
  608. # Validate worker_type and make sure we don't allow sharding for a worker type
  609. # that doesn't support it. Will error and stop if it is a problem,
  610. # e.g. 'background_worker'.
  611. for worker_type in worker_types_set:
  612. # Verify this is a real defined worker type. If it's not, stop everything so
  613. # it can be fixed.
  614. if worker_type not in WORKERS_CONFIG:
  615. error(
  616. f"{worker_type} is an unknown worker type! Was found in "
  617. f"'{worker_type_string}'. Please fix!"
  618. )
  619. if worker_type in worker_type_shard_counter:
  620. if not is_sharding_allowed_for_worker_type(worker_type):
  621. error(
  622. f"There can be only a single worker with {worker_type} "
  623. "type. Please recount and remove."
  624. )
  625. # Not in shard counter, must not have seen it yet, add it.
  626. worker_type_shard_counter[worker_type] += 1
  627. # Generate the number for the worker using incrementing counter
  628. worker_base_name_counter[worker_base_name] += 1
  629. worker_number = worker_base_name_counter[worker_base_name]
  630. worker_name = f"{worker_base_name}{worker_number}"
  631. if worker_number > 1:
  632. # If this isn't the first worker, check that we don't have a confusing
  633. # mixture of worker types with the same base name.
  634. first_worker_with_base_name = dict_to_return[f"{worker_base_name}1"]
  635. if first_worker_with_base_name != worker_types_set:
  636. error(
  637. f"Can not use worker_name: '{worker_name}' for worker_type(s): "
  638. f"{worker_types_set!r}. It is already in use by "
  639. f"worker_type(s): {first_worker_with_base_name!r}"
  640. )
  641. dict_to_return[worker_name] = worker_types_set
  642. return dict_to_return
  643. def generate_worker_files(
  644. environ: Mapping[str, str],
  645. config_path: str,
  646. data_dir: str,
  647. requested_worker_types: Dict[str, Set[str]],
  648. ) -> None:
  649. """Read the desired workers(if any) that is passed in and generate shared
  650. homeserver, nginx and supervisord configs.
  651. Args:
  652. environ: os.environ instance.
  653. config_path: The location of the generated Synapse main worker config file.
  654. data_dir: The location of the synapse data directory. Where log and
  655. user-facing config files live.
  656. requested_worker_types: A Dict containing requested workers in the format of
  657. {'worker_name1': {'worker_type', ...}}
  658. """
  659. # Note that yaml cares about indentation, so care should be taken to insert lines
  660. # into files at the correct indentation below.
  661. # Convenience helper for if using unix sockets instead of host:port
  662. using_unix_sockets = environ.get("SYNAPSE_USE_UNIX_SOCKET", False)
  663. # First read the original config file and extract the listeners block. Then we'll
  664. # add another listener for replication. Later we'll write out the result to the
  665. # shared config file.
  666. listeners: List[Any]
  667. if using_unix_sockets:
  668. listeners = [
  669. {
  670. "path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH,
  671. "type": "http",
  672. "resources": [{"names": ["replication"]}],
  673. }
  674. ]
  675. else:
  676. listeners = [
  677. {
  678. "port": MAIN_PROCESS_REPLICATION_PORT,
  679. "bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS,
  680. "type": "http",
  681. "resources": [{"names": ["replication"]}],
  682. }
  683. ]
  684. with open(config_path) as file_stream:
  685. original_config = yaml.safe_load(file_stream)
  686. original_listeners = original_config.get("listeners")
  687. if original_listeners:
  688. listeners += original_listeners
  689. # The shared homeserver config. The contents of which will be inserted into the
  690. # base shared worker jinja2 template. This config file will be passed to all
  691. # workers, included Synapse's main process. It is intended mainly for disabling
  692. # functionality when certain workers are spun up, and adding a replication listener.
  693. shared_config: Dict[str, Any] = {"listeners": listeners}
  694. # List of dicts that describe workers.
  695. # We pass this to the Supervisor template later to generate the appropriate
  696. # program blocks.
  697. worker_descriptors: List[Dict[str, Any]] = []
  698. # Upstreams for load-balancing purposes. This dict takes the form of the worker
  699. # type to the ports of each worker. For example:
  700. # {
  701. # worker_type: {1234, 1235, ...}}
  702. # }
  703. # and will be used to construct 'upstream' nginx directives.
  704. nginx_upstreams: Dict[str, Set[int]] = {}
  705. # A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what
  706. # will be placed after the proxy_pass directive. The main benefit to representing
  707. # this data as a dict over a str is that we can easily deduplicate endpoints
  708. # across multiple instances of the same worker. The final rendering will be combined
  709. # with nginx_upstreams and placed in /etc/nginx/conf.d.
  710. nginx_locations: Dict[str, str] = {}
  711. # Create the worker configuration directory if it doesn't already exist
  712. os.makedirs("/conf/workers", exist_ok=True)
  713. # Start worker ports from this arbitrary port
  714. worker_port = 18009
  715. # A list of internal endpoints to healthcheck, starting with the main process
  716. # which exists even if no workers do.
  717. # This list ends up being part of the command line to curl, (curl added support for
  718. # Unix sockets in version 7.40).
  719. if using_unix_sockets:
  720. healthcheck_urls = [
  721. f"--unix-socket {MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH} "
  722. # The scheme and hostname from the following URL are ignored.
  723. # The only thing that matters is the path `/health`
  724. "http://localhost/health"
  725. ]
  726. else:
  727. healthcheck_urls = ["http://localhost:8080/health"]
  728. # Get the set of all worker types that we have configured
  729. all_worker_types_in_use = set(chain(*requested_worker_types.values()))
  730. # Map locations to upstreams (corresponding to worker types) in Nginx
  731. # but only if we use the appropriate worker type
  732. for worker_type in all_worker_types_in_use:
  733. for endpoint_pattern in WORKERS_CONFIG[worker_type]["endpoint_patterns"]:
  734. nginx_locations[endpoint_pattern] = f"http://{worker_type}"
  735. # For each worker type specified by the user, create config values and write it's
  736. # yaml config file
  737. for worker_name, worker_types_set in requested_worker_types.items():
  738. # The collected and processed data will live here.
  739. worker_config: Dict[str, Any] = {}
  740. # Merge all worker config templates for this worker into a single config
  741. for worker_type in worker_types_set:
  742. copy_of_template_config = WORKERS_CONFIG[worker_type].copy()
  743. # Merge worker type template configuration data. It's a combination of lists
  744. # and dicts, so use this helper.
  745. worker_config = merge_worker_template_configs(
  746. worker_config, copy_of_template_config
  747. )
  748. # Replace placeholder names in the config template with the actual worker name.
  749. worker_config = insert_worker_name_for_worker_config(worker_config, worker_name)
  750. worker_config.update(
  751. {"name": worker_name, "port": str(worker_port), "config_path": config_path}
  752. )
  753. # Update the shared config with any worker_type specific options. The first of a
  754. # given worker_type needs to stay assigned and not be replaced.
  755. worker_config["shared_extra_conf"].update(shared_config)
  756. shared_config = worker_config["shared_extra_conf"]
  757. if using_unix_sockets:
  758. healthcheck_urls.append(
  759. f"--unix-socket /run/worker.{worker_port} http://localhost/health"
  760. )
  761. else:
  762. healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
  763. # Update the shared config with sharding-related options if necessary
  764. add_worker_roles_to_shared_config(
  765. shared_config, worker_types_set, worker_name, worker_port
  766. )
  767. # Enable the worker in supervisord
  768. worker_descriptors.append(worker_config)
  769. # Write out the worker's logging config file
  770. log_config_filepath = generate_worker_log_config(environ, worker_name, data_dir)
  771. # Then a worker config file
  772. convert(
  773. "/conf/worker.yaml.j2",
  774. f"/conf/workers/{worker_name}.yaml",
  775. **worker_config,
  776. worker_log_config_filepath=log_config_filepath,
  777. using_unix_sockets=using_unix_sockets,
  778. )
  779. # Save this worker's port number to the correct nginx upstreams
  780. for worker_type in worker_types_set:
  781. nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
  782. worker_port += 1
  783. # Build the nginx location config blocks
  784. nginx_location_config = ""
  785. for endpoint, upstream in nginx_locations.items():
  786. nginx_location_config += NGINX_LOCATION_CONFIG_BLOCK.format(
  787. endpoint=endpoint,
  788. upstream=upstream,
  789. )
  790. # Determine the load-balancing upstreams to configure
  791. nginx_upstream_config = ""
  792. for upstream_worker_base_name, upstream_worker_ports in nginx_upstreams.items():
  793. body = ""
  794. if using_unix_sockets:
  795. for port in upstream_worker_ports:
  796. body += f" server unix:/run/worker.{port};\n"
  797. else:
  798. for port in upstream_worker_ports:
  799. body += f" server localhost:{port};\n"
  800. # Add to the list of configured upstreams
  801. nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
  802. upstream_worker_base_name=upstream_worker_base_name,
  803. body=body,
  804. )
  805. # Finally, we'll write out the config files.
  806. # log config for the master process
  807. master_log_config = generate_worker_log_config(environ, "master", data_dir)
  808. shared_config["log_config"] = master_log_config
  809. # Find application service registrations
  810. appservice_registrations = None
  811. appservice_registration_dir = os.environ.get("SYNAPSE_AS_REGISTRATION_DIR")
  812. if appservice_registration_dir:
  813. # Scan for all YAML files that should be application service registrations.
  814. appservice_registrations = [
  815. str(reg_path.resolve())
  816. for reg_path in Path(appservice_registration_dir).iterdir()
  817. if reg_path.suffix.lower() in (".yaml", ".yml")
  818. ]
  819. workers_in_use = len(requested_worker_types) > 0
  820. # If there are workers, add the main process to the instance_map too.
  821. if workers_in_use:
  822. instance_map = shared_config.setdefault("instance_map", {})
  823. if using_unix_sockets:
  824. instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
  825. "path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH,
  826. }
  827. else:
  828. instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
  829. "host": MAIN_PROCESS_LOCALHOST_ADDRESS,
  830. "port": MAIN_PROCESS_REPLICATION_PORT,
  831. }
  832. # Shared homeserver config
  833. convert(
  834. "/conf/shared.yaml.j2",
  835. "/conf/workers/shared.yaml",
  836. shared_worker_config=yaml.dump(shared_config),
  837. appservice_registrations=appservice_registrations,
  838. enable_redis=workers_in_use,
  839. workers_in_use=workers_in_use,
  840. using_unix_sockets=using_unix_sockets,
  841. )
  842. # Nginx config
  843. convert(
  844. "/conf/nginx.conf.j2",
  845. "/etc/nginx/conf.d/matrix-synapse.conf",
  846. worker_locations=nginx_location_config,
  847. upstream_directives=nginx_upstream_config,
  848. tls_cert_path=os.environ.get("SYNAPSE_TLS_CERT"),
  849. tls_key_path=os.environ.get("SYNAPSE_TLS_KEY"),
  850. using_unix_sockets=using_unix_sockets,
  851. )
  852. # Supervisord config
  853. os.makedirs("/etc/supervisor", exist_ok=True)
  854. convert(
  855. "/conf/supervisord.conf.j2",
  856. "/etc/supervisor/supervisord.conf",
  857. main_config_path=config_path,
  858. enable_redis=workers_in_use,
  859. using_unix_sockets=using_unix_sockets,
  860. )
  861. convert(
  862. "/conf/synapse.supervisord.conf.j2",
  863. "/etc/supervisor/conf.d/synapse.conf",
  864. workers=worker_descriptors,
  865. main_config_path=config_path,
  866. use_forking_launcher=environ.get("SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER"),
  867. )
  868. # healthcheck config
  869. convert(
  870. "/conf/healthcheck.sh.j2",
  871. "/healthcheck.sh",
  872. healthcheck_urls=healthcheck_urls,
  873. )
  874. # Ensure the logging directory exists
  875. log_dir = data_dir + "/logs"
  876. if not os.path.exists(log_dir):
  877. os.mkdir(log_dir)
  878. def generate_worker_log_config(
  879. environ: Mapping[str, str], worker_name: str, data_dir: str
  880. ) -> str:
  881. """Generate a log.config file for the given worker.
  882. Returns: the path to the generated file
  883. """
  884. # Check whether we should write worker logs to disk, in addition to the console
  885. extra_log_template_args: Dict[str, Optional[str]] = {}
  886. if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
  887. extra_log_template_args["LOG_FILE_PATH"] = f"{data_dir}/logs/{worker_name}.log"
  888. extra_log_template_args["SYNAPSE_LOG_LEVEL"] = environ.get("SYNAPSE_LOG_LEVEL")
  889. extra_log_template_args["SYNAPSE_LOG_SENSITIVE"] = environ.get(
  890. "SYNAPSE_LOG_SENSITIVE"
  891. )
  892. extra_log_template_args["SYNAPSE_LOG_TESTING"] = environ.get("SYNAPSE_LOG_TESTING")
  893. # Render and write the file
  894. log_config_filepath = f"/conf/workers/{worker_name}.log.config"
  895. convert(
  896. "/conf/log.config",
  897. log_config_filepath,
  898. worker_name=worker_name,
  899. **extra_log_template_args,
  900. include_worker_name_in_log_line=environ.get(
  901. "SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER"
  902. ),
  903. )
  904. return log_config_filepath
  905. def main(args: List[str], environ: MutableMapping[str, str]) -> None:
  906. config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
  907. config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
  908. data_dir = environ.get("SYNAPSE_DATA_DIR", "/data")
  909. # override SYNAPSE_NO_TLS, we don't support TLS in worker mode,
  910. # this needs to be handled by a frontend proxy
  911. environ["SYNAPSE_NO_TLS"] = "yes"
  912. # Generate the base homeserver config if one does not yet exist
  913. if not os.path.exists(config_path):
  914. log("Generating base homeserver config")
  915. generate_base_homeserver_config()
  916. else:
  917. log("Base homeserver config exists—not regenerating")
  918. # This script may be run multiple times (mostly by Complement, see note at top of
  919. # file). Don't re-configure workers in this instance.
  920. mark_filepath = "/conf/workers_have_been_configured"
  921. if not os.path.exists(mark_filepath):
  922. # Collect and validate worker_type requests
  923. # Read the desired worker configuration from the environment
  924. worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip()
  925. # Only process worker_types if they exist
  926. if not worker_types_env:
  927. # No workers, just the main process
  928. worker_types = []
  929. requested_worker_types: Dict[str, Any] = {}
  930. else:
  931. # Split type names by comma, ignoring whitespace.
  932. worker_types = split_and_strip_string(worker_types_env, ",")
  933. requested_worker_types = parse_worker_types(worker_types)
  934. # Always regenerate all other config files
  935. log("Generating worker config files")
  936. generate_worker_files(environ, config_path, data_dir, requested_worker_types)
  937. # Mark workers as being configured
  938. with open(mark_filepath, "w") as f:
  939. f.write("")
  940. else:
  941. log("Worker config exists—not regenerating")
  942. # Lifted right out of start.py
  943. jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),)
  944. if os.path.isfile(jemallocpath):
  945. environ["LD_PRELOAD"] = jemallocpath
  946. else:
  947. log("Could not find %s, will not use" % (jemallocpath,))
  948. # Start supervisord, which will start Synapse, all of the configured worker
  949. # processes, redis, nginx etc. according to the config we created above.
  950. log("Starting supervisord")
  951. flush_buffers()
  952. os.execle(
  953. "/usr/local/bin/supervisord",
  954. "supervisord",
  955. "-c",
  956. "/etc/supervisor/supervisord.conf",
  957. environ,
  958. )
  959. if __name__ == "__main__":
  960. main(sys.argv, os.environ)