Browse Source

things we don't need

Amber Brown 5 years ago
parent
commit
238b8d4d5e

+ 1 - 17
synapse/storage/_base.py

@@ -690,23 +690,7 @@ class SQLBaseStore(object):
             ", ".join(k for k in keyvalues),
             ", ".join(k + "=EXCLUDED." + k for k in values),
         )
-        try:
-            txn.execute(sql, list(allvalues.values()))
-        except self.database_engine.module.OperationalError as e:
-            # We only care about serialization errors, so check for it
-            if e.args[0] == "could not serialize access due to concurrent update":
-                # A concurrent update problem is when we try and do a native
-                # UPSERT but the row has changed from under us. We can either
-                # retry, or give up if asked to do so.
-                if best_effort:
-                    # If it's a concurrent-update problem, and this is marked as
-                    # 'best effort' (i.e. if there's a race, then the one we
-                    # raced with will suffice), then pretend that we succeeded.
-                    return False
-            else:
-                # Otherwise, raise, because it's a real OperationalError and we
-                # will need to be rolled back and retried.
-                raise
+        txn.execute(sql, list(allvalues.values()))
 
         # One-tuple, which is a boolean for insertion or not
         res = txn.fetchone()

+ 1 - 4
synapse/storage/monthly_active_users.py

@@ -237,9 +237,7 @@ class MonthlyActiveUsersStore(SQLBaseStore):
         # Am consciously deciding to lock the table on the basis that is ought
         # never be a big table and alternative approaches (batching multiple
         # upserts into a single txn) introduced a lot of extra complexity.
-        # See https://github.com/matrix-org/synapse/issues/3854 for more.
-        # If we support native upserts, we'll not lock, but also not retry
-        # on any races, by setting best_effort=True.
+        # See https://github.com/matrix-org/synapse/issues/3854 for more
         is_insert = self._simple_upsert_txn(
             txn,
             table="monthly_active_users",
@@ -249,7 +247,6 @@ class MonthlyActiveUsersStore(SQLBaseStore):
             values={
                 "timestamp": int(self._clock.time_msec()),
             },
-            best_effort=True,
         )
 
         return is_insert

+ 3 - 19
synapse/storage/prepare_database.py

@@ -20,14 +20,12 @@ import logging
 import os
 import re
 
-from synapse.storage.engines.postgres import PostgresEngine
-
 logger = logging.getLogger(__name__)
 
 
 # Remember to update this number every time a change is made to database
 # schema files, so the users will be informed on server restarts.
-SCHEMA_VERSION = 54
+SCHEMA_VERSION = 53
 
 dir_path = os.path.abspath(os.path.dirname(__file__))
 
@@ -117,24 +115,13 @@ def _setup_new_database(cur, database_engine):
 
     valid_dirs = []
     pattern = re.compile(r"^\d+(\.sql)?$")
-
-    if isinstance(database_engine, PostgresEngine):
-        specific = "postgres"
-    else:
-        specific = "sqlite"
-
-    specific_pattern = re.compile(r"^\d+(\.sql." + specific + r")?$")
-
     for filename in directory_entries:
-        match = pattern.match(filename) or specific_pattern.match(filename)
+        match = pattern.match(filename)
         abs_path = os.path.join(current_dir, filename)
         if match and os.path.isdir(abs_path):
             ver = int(match.group(0))
             if ver <= SCHEMA_VERSION:
                 valid_dirs.append((ver, abs_path))
-        elif filename == "README.md":
-            # Ignore the readme
-            pass
         else:
             logger.warn("Unexpected entry in 'full_schemas': %s", filename)
 
@@ -149,10 +136,7 @@ def _setup_new_database(cur, database_engine):
 
     directory_entries = os.listdir(sql_dir)
 
-    for filename in sorted(
-        fnmatch.filter(directory_entries, "*.sql")
-        + fnmatch.filter(directory_entries, "*.sql." + specific)
-    ):
+    for filename in fnmatch.filter(directory_entries, "*.sql"):
         sql_loc = os.path.join(sql_dir, filename)
         logger.debug("Applying schema %s", sql_loc)
         executescript(cur, sql_loc)