Browse Source

Merge remote-tracking branch 'origin/release-v1.69' into develop

Patrick Cloke 1 year ago
parent
commit
bc2bd92b93

+ 1 - 0
.dockerignore

@@ -9,6 +9,7 @@
 !pyproject.toml
 !pyproject.toml
 !poetry.lock
 !poetry.lock
 !Cargo.lock
 !Cargo.lock
+!Cargo.toml
 !build_rust.py
 !build_rust.py
 
 
 rust/target
 rust/target

+ 6 - 1
.github/workflows/docker.yml

@@ -27,7 +27,7 @@ jobs:
 
 
       - name: Inspect builder
       - name: Inspect builder
         run: docker buildx inspect
         run: docker buildx inspect
-          
+
       - name: Log in to DockerHub
       - name: Log in to DockerHub
         uses: docker/login-action@v2
         uses: docker/login-action@v2
         with:
         with:
@@ -55,3 +55,8 @@ jobs:
           tags: "${{ steps.set-tag.outputs.tags }}"
           tags: "${{ steps.set-tag.outputs.tags }}"
           file: "docker/Dockerfile"
           file: "docker/Dockerfile"
           platforms: linux/amd64,linux/arm64
           platforms: linux/amd64,linux/arm64
+
+          # arm64 builds OOM without the git fetch setting. c.f.
+          # https://github.com/rust-lang/cargo/issues/10583
+          build-args: |
+            CARGO_NET_GIT_FETCH_WITH_CLI=true

+ 33 - 1
CHANGES.md

@@ -1,4 +1,4 @@
-Synapse 1.69.0rc2 (2022-10-06)
+Synapse 1.69.0rc4 (2022-10-14)
 ==============================
 ==============================
 
 
 Please note that legacy Prometheus metric names are now deprecated and will be removed in Synapse 1.73.0.
 Please note that legacy Prometheus metric names are now deprecated and will be removed in Synapse 1.73.0.
@@ -6,6 +6,38 @@ Server administrators should update their dashboards and alerting rules to avoid
 See the [upgrade notes](https://matrix-org.github.io/synapse/v1.69/upgrade.html#upgrading-to-v1690) for more details.
 See the [upgrade notes](https://matrix-org.github.io/synapse/v1.69/upgrade.html#upgrading-to-v1690) for more details.
 
 
 
 
+Bugfixes
+--------
+
+- Fix poor performance of the `event_push_backfill_thread_id` background update, which was introduced in Synapse 1.68.0rc1. ([\#14172](https://github.com/matrix-org/synapse/issues/14172), [\#14181](https://github.com/matrix-org/synapse/issues/14181))
+
+
+Updates to the Docker image
+---------------------------
+
+- Fix docker build OOMing in CI for arm64 builds. ([\#14173](https://github.com/matrix-org/synapse/issues/14173))
+
+
+Synapse 1.69.0rc3 (2022-10-12)
+==============================
+
+Bugfixes
+--------
+
+- Fix an issue with Docker images causing the Rust dependencies to not be pinned correctly. Introduced in v1.68.0 ([\#14129](https://github.com/matrix-org/synapse/issues/14129))
+- Fix a bug introduced in Synapse 1.69.0rc1 which would cause registration replication requests to fail if the worker sending the request is not running Synapse 1.69. ([\#14135](https://github.com/matrix-org/synapse/issues/14135))
+- Fix error in background update when rotating existing notifications. Introduced in v1.69.0rc2. ([\#14138](https://github.com/matrix-org/synapse/issues/14138))
+
+
+Internal Changes
+----------------
+
+- Rename the `url_preview` extra to `url-preview`, for compatability with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14085](https://github.com/matrix-org/synapse/issues/14085))
+
+
+Synapse 1.69.0rc2 (2022-10-06)
+==============================
+
 Deprecations and Removals
 Deprecations and Removals
 -------------------------
 -------------------------
 
 

+ 12 - 0
debian/changelog

@@ -1,3 +1,15 @@
+matrix-synapse-py3 (1.69.0~rc4) stable; urgency=medium
+
+  * New Synapse release 1.69.0rc4.
+
+ -- Synapse Packaging team <packages@matrix.org>  Fri, 14 Oct 2022 15:04:47 +0100
+
+matrix-synapse-py3 (1.69.0~rc3) stable; urgency=medium
+
+  * New Synapse release 1.69.0rc3.
+
+ -- Synapse Packaging team <packages@matrix.org>  Wed, 12 Oct 2022 13:24:04 +0100
+
 matrix-synapse-py3 (1.69.0~rc2) stable; urgency=medium
 matrix-synapse-py3 (1.69.0~rc2) stable; urgency=medium
 
 
   * New Synapse release 1.69.0rc2.
   * New Synapse release 1.69.0rc2.

+ 7 - 1
docker/Dockerfile

@@ -108,6 +108,12 @@ RUN mkdir /rust /cargo
 
 
 RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
 RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
 
 
+
+# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not
+# set to true, so we expose it as a build-arg.
+ARG CARGO_NET_GIT_FETCH_WITH_CLI=false
+ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI
+
 # To speed up rebuilds, install all of the dependencies before we copy over
 # To speed up rebuilds, install all of the dependencies before we copy over
 # the whole synapse project, so that this layer in the Docker cache can be
 # the whole synapse project, so that this layer in the Docker cache can be
 # used while you develop on the source
 # used while you develop on the source
@@ -121,7 +127,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
 COPY synapse /synapse/synapse/
 COPY synapse /synapse/synapse/
 COPY rust /synapse/rust/
 COPY rust /synapse/rust/
 # ... and what we need to `pip install`.
 # ... and what we need to `pip install`.
-COPY pyproject.toml README.rst build_rust.py /synapse/
+COPY pyproject.toml README.rst build_rust.py Cargo.toml Cargo.lock /synapse/
 
 
 # Repeat of earlier build argument declaration, as this is a new build stage.
 # Repeat of earlier build argument declaration, as this is a new build stage.
 ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
 ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE

+ 9 - 4
pyproject.toml

@@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
 
 
 [tool.poetry]
 [tool.poetry]
 name = "matrix-synapse"
 name = "matrix-synapse"
-version = "1.69.0rc2"
+version = "1.69.0rc4"
 description = "Homeserver for the Matrix decentralised comms protocol"
 description = "Homeserver for the Matrix decentralised comms protocol"
 authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
 authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
 license = "Apache-2.0"
 license = "Apache-2.0"
@@ -219,7 +219,7 @@ oidc = ["authlib"]
 # `systemd.journal.JournalHandler`, as is documented in
 # `systemd.journal.JournalHandler`, as is documented in
 # `contrib/systemd/log_config.yaml`.
 # `contrib/systemd/log_config.yaml`.
 systemd = ["systemd-python"]
 systemd = ["systemd-python"]
-url_preview = ["lxml"]
+url-preview = ["lxml"]
 sentry = ["sentry-sdk"]
 sentry = ["sentry-sdk"]
 opentracing = ["jaeger-client", "opentracing"]
 opentracing = ["jaeger-client", "opentracing"]
 jwt = ["authlib"]
 jwt = ["authlib"]
@@ -250,7 +250,7 @@ all = [
     "pysaml2",
     "pysaml2",
     # oidc and jwt
     # oidc and jwt
     "authlib",
     "authlib",
-    # url_preview
+    # url-preview
     "lxml",
     "lxml",
     # sentry
     # sentry
     "sentry-sdk",
     "sentry-sdk",
@@ -307,7 +307,12 @@ twine = "*"
 towncrier = ">=18.6.0rc1"
 towncrier = ">=18.6.0rc1"
 
 
 [build-system]
 [build-system]
-requires = ["poetry-core==1.2.0", "setuptools_rust==1.5.2"]
+# The upper bounds here are defensive, intended to prevent situations like
+# #13849 and #14079 where we see buildtime or runtime errors caused by build
+# system changes.
+# We are happy to raise these upper bounds upon request,
+# provided we check that it's safe to do so (i.e. that CI passes).
+requires = ["poetry-core>=1.0.0,<=1.3.1", "setuptools_rust>=1.3,<=1.5.2"]
 build-backend = "poetry.core.masonry.api"
 build-backend = "poetry.core.masonry.api"
 
 
 
 

+ 1 - 1
synapse/config/repository.py

@@ -205,7 +205,7 @@ class ContentRepositoryConfig(Config):
         )
         )
         self.url_preview_enabled = config.get("url_preview_enabled", False)
         self.url_preview_enabled = config.get("url_preview_enabled", False)
         if self.url_preview_enabled:
         if self.url_preview_enabled:
-            check_requirements("url_preview")
+            check_requirements("url-preview")
 
 
             proxy_env = getproxies_environment()
             proxy_env = getproxies_environment()
             if "url_preview_ip_range_blacklist" not in config:
             if "url_preview_ip_range_blacklist" not in config:

+ 17 - 1
synapse/replication/http/register.py

@@ -39,6 +39,16 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
         self.store = hs.get_datastores().main
         self.store = hs.get_datastores().main
         self.registration_handler = hs.get_registration_handler()
         self.registration_handler = hs.get_registration_handler()
 
 
+        # Default value if the worker that sent the replication request did not include
+        # an 'approved' property.
+        if (
+            hs.config.experimental.msc3866.enabled
+            and hs.config.experimental.msc3866.require_approval_for_new_accounts
+        ):
+            self._approval_default = False
+        else:
+            self._approval_default = True
+
     @staticmethod
     @staticmethod
     async def _serialize_payload(  # type: ignore[override]
     async def _serialize_payload(  # type: ignore[override]
         user_id: str,
         user_id: str,
@@ -92,6 +102,12 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
 
 
         await self.registration_handler.check_registration_ratelimit(content["address"])
         await self.registration_handler.check_registration_ratelimit(content["address"])
 
 
+        # Always default admin users to approved (since it means they were created by
+        # an admin).
+        approved_default = self._approval_default
+        if content["admin"]:
+            approved_default = True
+
         await self.registration_handler.register_with_store(
         await self.registration_handler.register_with_store(
             user_id=user_id,
             user_id=user_id,
             password_hash=content["password_hash"],
             password_hash=content["password_hash"],
@@ -103,7 +119,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
             user_type=content["user_type"],
             user_type=content["user_type"],
             address=content["address"],
             address=content["address"],
             shadow_banned=content["shadow_banned"],
             shadow_banned=content["shadow_banned"],
-            approved=content["approved"],
+            approved=content.get("approved", approved_default),
         )
         )
 
 
         return 200, {}
         return 200, {}

+ 59 - 13
synapse/storage/databases/main/event_push_actions.py

@@ -310,11 +310,11 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
         event_push_actions_done = progress.get("event_push_actions_done", False)
         event_push_actions_done = progress.get("event_push_actions_done", False)
 
 
         def add_thread_id_txn(
         def add_thread_id_txn(
-            txn: LoggingTransaction, table_name: str, start_stream_ordering: int
+            txn: LoggingTransaction, start_stream_ordering: int
         ) -> int:
         ) -> int:
-            sql = f"""
+            sql = """
             SELECT stream_ordering
             SELECT stream_ordering
-            FROM {table_name}
+            FROM event_push_actions
             WHERE
             WHERE
                 thread_id IS NULL
                 thread_id IS NULL
                 AND stream_ordering > ?
                 AND stream_ordering > ?
@@ -326,7 +326,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
             # No more rows to process.
             # No more rows to process.
             rows = txn.fetchall()
             rows = txn.fetchall()
             if not rows:
             if not rows:
-                progress[f"{table_name}_done"] = True
+                progress["event_push_actions_done"] = True
                 self.db_pool.updates._background_update_progress_txn(
                 self.db_pool.updates._background_update_progress_txn(
                     txn, "event_push_backfill_thread_id", progress
                     txn, "event_push_backfill_thread_id", progress
                 )
                 )
@@ -335,16 +335,65 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
             # Update the thread ID for any of those rows.
             # Update the thread ID for any of those rows.
             max_stream_ordering = rows[-1][0]
             max_stream_ordering = rows[-1][0]
 
 
-            sql = f"""
-            UPDATE {table_name}
+            sql = """
+            UPDATE event_push_actions
             SET thread_id = 'main'
             SET thread_id = 'main'
-            WHERE stream_ordering <= ? AND thread_id IS NULL
+            WHERE ? < stream_ordering AND stream_ordering <= ? AND thread_id IS NULL
             """
             """
-            txn.execute(sql, (max_stream_ordering,))
+            txn.execute(
+                sql,
+                (
+                    start_stream_ordering,
+                    max_stream_ordering,
+                ),
+            )
 
 
             # Update progress.
             # Update progress.
             processed_rows = txn.rowcount
             processed_rows = txn.rowcount
-            progress[f"max_{table_name}_stream_ordering"] = max_stream_ordering
+            progress["max_event_push_actions_stream_ordering"] = max_stream_ordering
+            self.db_pool.updates._background_update_progress_txn(
+                txn, "event_push_backfill_thread_id", progress
+            )
+
+            return processed_rows
+
+        def add_thread_id_summary_txn(txn: LoggingTransaction) -> int:
+            min_user_id = progress.get("max_summary_user_id", "")
+            min_room_id = progress.get("max_summary_room_id", "")
+
+            # Slightly overcomplicated query for getting the Nth user ID / room
+            # ID tuple, or the last if there are less than N remaining.
+            sql = """
+            SELECT user_id, room_id FROM (
+                SELECT user_id, room_id FROM event_push_summary
+                WHERE (user_id, room_id) > (?, ?)
+                    AND thread_id IS NULL
+                ORDER BY user_id, room_id
+                LIMIT ?
+            ) AS e
+            ORDER BY user_id DESC, room_id DESC
+            LIMIT 1
+            """
+
+            txn.execute(sql, (min_user_id, min_room_id, batch_size))
+            row = txn.fetchone()
+            if not row:
+                return 0
+
+            max_user_id, max_room_id = row
+
+            sql = """
+            UPDATE event_push_summary
+            SET thread_id = 'main'
+            WHERE
+                (?, ?) < (user_id, room_id) AND (user_id, room_id) <= (?, ?)
+                AND thread_id IS NULL
+            """
+            txn.execute(sql, (min_user_id, min_room_id, max_user_id, max_room_id))
+            processed_rows = txn.rowcount
+
+            progress["max_summary_user_id"] = max_user_id
+            progress["max_summary_room_id"] = max_room_id
             self.db_pool.updates._background_update_progress_txn(
             self.db_pool.updates._background_update_progress_txn(
                 txn, "event_push_backfill_thread_id", progress
                 txn, "event_push_backfill_thread_id", progress
             )
             )
@@ -360,15 +409,12 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
             result = await self.db_pool.runInteraction(
             result = await self.db_pool.runInteraction(
                 "event_push_backfill_thread_id",
                 "event_push_backfill_thread_id",
                 add_thread_id_txn,
                 add_thread_id_txn,
-                "event_push_actions",
                 progress.get("max_event_push_actions_stream_ordering", 0),
                 progress.get("max_event_push_actions_stream_ordering", 0),
             )
             )
         else:
         else:
             result = await self.db_pool.runInteraction(
             result = await self.db_pool.runInteraction(
                 "event_push_backfill_thread_id",
                 "event_push_backfill_thread_id",
-                add_thread_id_txn,
-                "event_push_summary",
-                progress.get("max_event_push_summary_stream_ordering", 0),
+                add_thread_id_summary_txn,
             )
             )
 
 
             # Only done after the event_push_summary table is done.
             # Only done after the event_push_summary table is done.