Amber Brown 5 years ago
parent
commit
aebb75a07d
79 changed files with 1109 additions and 518 deletions
  1. 2 2
      .github/PULL_REQUEST_TEMPLATE.md
  2. 7 0
      .travis.yml
  3. 4 1
      AUTHORS.rst
  4. 83 0
      CHANGES.md
  5. 4 5
      README.rst
  6. 0 1
      changelog.d/4141.feature
  7. 0 1
      changelog.d/4215.misc
  8. 0 1
      changelog.d/4262.feature
  9. 0 1
      changelog.d/4264.bugfix
  10. 0 1
      changelog.d/4265.feature
  11. 0 1
      changelog.d/4266.misc
  12. 0 1
      changelog.d/4267.feature
  13. 0 1
      changelog.d/4272.feature
  14. 0 1
      changelog.d/4273.misc
  15. 0 1
      changelog.d/4274.misc
  16. 0 1
      changelog.d/4279.bugfix
  17. 0 1
      changelog.d/4283.misc
  18. 0 1
      changelog.d/4284.bugfix
  19. 0 1
      changelog.d/4294.bugfix
  20. 0 1
      changelog.d/4295.bugfix
  21. 0 1
      changelog.d/4297.misc
  22. 0 1
      changelog.d/4298.feature
  23. 0 1
      changelog.d/4305.bugfix
  24. 0 1
      changelog.d/4307.feature
  25. 0 1
      changelog.d/4309.bugfix
  26. 0 1
      changelog.d/4313.bugfix
  27. 0 1
      changelog.d/4315.feature
  28. 0 1
      changelog.d/4316.bugfix
  29. 0 1
      changelog.d/4317.bugfix
  30. 0 1
      changelog.d/4319.feature
  31. 0 1
      changelog.d/4333.misc
  32. 0 1
      changelog.d/4334.removal
  33. 1 0
      changelog.d/4342.misc
  34. 1 0
      changelog.d/4368.misc
  35. 1 0
      changelog.d/4369.bugfix
  36. 1 0
      changelog.d/4370.misc
  37. 1 0
      changelog.d/4377.misc
  38. 1 0
      changelog.d/4387.misc
  39. 1 0
      changelog.d/4392.bugfix
  40. 1 0
      changelog.d/4397.bugfix
  41. 1 0
      changelog.d/4399.misc
  42. 1 0
      changelog.d/4407.bugfix
  43. 1 0
      changelog.d/4408.misc
  44. 1 1
      contrib/docker/docker-compose.yml
  45. 3 3
      debian/build_virtualenv
  46. 28 0
      debian/changelog
  47. 6 3
      debian/control
  48. 1 3
      docker/Dockerfile
  49. 33 4
      docker/Dockerfile-dhvirtualenv
  50. 0 14
      docker/build_debian.sh
  51. 0 46
      docker/build_debian_packages.sh
  52. 154 0
      scripts-dev/build_debian_packages
  53. 1 1
      synapse/__init__.py
  54. 27 38
      synapse/api/auth.py
  55. 1 1
      synapse/api/constants.py
  56. 18 0
      synapse/api/errors.py
  57. 8 3
      synapse/api/filtering.py
  58. 1 8
      synapse/app/__init__.py
  59. 1 1
      synapse/config/__main__.py
  60. 2 5
      synapse/config/key.py
  61. 37 1
      synapse/config/server.py
  62. 19 5
      synapse/federation/transaction_queue.py
  63. 19 0
      synapse/handlers/device.py
  64. 6 3
      synapse/handlers/identity.py
  65. 11 10
      synapse/handlers/pagination.py
  66. 158 115
      synapse/http/matrixfederationclient.py
  67. 47 23
      synapse/python_dependencies.py
  68. 33 1
      synapse/rest/client/v2_alpha/account_data.py
  69. 4 3
      synapse/rest/media/v1/media_repository.py
  70. 9 1
      synapse/storage/_base.py
  71. 127 6
      synapse/storage/client_ips.py
  72. 25 25
      synapse/storage/registration.py
  73. 26 0
      synapse/storage/schema/delta/53/user_ips_index.sql
  74. 3 1
      synapse/util/async_helpers.py
  75. 3 1
      synctl
  76. 1 154
      tests/api/test_auth.py
  77. 10 3
      tests/http/test_fedclient.py
  78. 71 0
      tests/storage/test_client_ips.py
  79. 104 0
      tests/util/test_async_utils.py

+ 2 - 2
.github/PULL_REQUEST_TEMPLATE.md

@@ -3,5 +3,5 @@
 <!-- Please read CONTRIBUTING.rst before submitting your pull request -->
 <!-- Please read CONTRIBUTING.rst before submitting your pull request -->
 
 
 * [ ] Pull request is based on the develop branch
 * [ ] Pull request is based on the develop branch
-* [ ] Pull request includes a [changelog file](CONTRIBUTING.rst#changelog)
-* [ ] Pull request includes a [sign off](CONTRIBUTING.rst#sign-off)
+* [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#changelog)
+* [ ] Pull request includes a [sign off](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#sign-off)

+ 7 - 0
.travis.yml

@@ -71,6 +71,13 @@ matrix:
 
 
 install:
 install:
   - pip install tox
   - pip install tox
+  
+  # if we don't have python3.6 in this environment, travis unhelpfully gives us
+  # a `python3.6` on our path which does nothing but spit out a warning. Tox
+  # tries to run it (even if we're not running a py36 env), so the build logs
+  # then have warnings which look like errors. To reduce the noise, remove the
+  # non-functional python3.6.
+  - ( ! command -v python3.6 || python3.6 --version ) &>/dev/null || rm -f $(command -v python3.6)
 
 
 script:
 script:
   - tox -e $TOX_ENV
   - tox -e $TOX_ENV

+ 4 - 1
AUTHORS.rst

@@ -65,4 +65,7 @@ Pierre Jaury <pierre at jaury.eu>
 * Docker packaging
 * Docker packaging
 
 
 Serban Constantin <serban.constantin at gmail dot com>
 Serban Constantin <serban.constantin at gmail dot com>
- * Small bug fix
+ * Small bug fix
+
+Jason Robinson <jasonr at matrix.org>
+ * Minor fixes

+ 83 - 0
CHANGES.md

@@ -1,3 +1,86 @@
+Synapse 0.34.1.1 (2019-01-11)
+=============================
+
+This release fixes CVE-2019-5885 and is recommended for all users of Synapse 0.34.1.
+
+This release is compatible with Python 2.7 and 3.5+. Python 3.7 is fully supported.
+
+Bugfixes
+--------
+
+- Fix spontaneous logout on upgrade
+  ([\#4374](https://github.com/matrix-org/synapse/issues/4374))
+
+
+Synapse 0.34.1 (2019-01-09)
+===========================
+
+Internal Changes
+----------------
+
+- Add better logging for unexpected errors while sending transactions ([\#4361](https://github.com/matrix-org/synapse/issues/4361), [\#4362](https://github.com/matrix-org/synapse/issues/4362))
+
+
+Synapse 0.34.1rc1 (2019-01-08)
+==============================
+
+Features
+--------
+
+- Special-case a support user for use in verifying behaviour of a given server. The support user does not appear in user directory or monthly active user counts. ([\#4141](https://github.com/matrix-org/synapse/issues/4141), [\#4344](https://github.com/matrix-org/synapse/issues/4344))
+- Support for serving .well-known files ([\#4262](https://github.com/matrix-org/synapse/issues/4262))
+- Rework SAML2 authentication ([\#4265](https://github.com/matrix-org/synapse/issues/4265), [\#4267](https://github.com/matrix-org/synapse/issues/4267))
+- SAML2 authentication: Initialise user display name from SAML2 data ([\#4272](https://github.com/matrix-org/synapse/issues/4272))
+- Synapse can now have its conditional/extra dependencies installed by pip. This functionality can be used by using `pip install matrix-synapse[feature]`, where feature is a comma separated list with the possible values `email.enable_notifs`, `matrix-synapse-ldap3`, `postgres`, `resources.consent`, `saml2`, `url_preview`, and `test`. If you want to install all optional dependencies, you can use "all" instead. ([\#4298](https://github.com/matrix-org/synapse/issues/4298), [\#4325](https://github.com/matrix-org/synapse/issues/4325), [\#4327](https://github.com/matrix-org/synapse/issues/4327))
+- Add routes for reading account data. ([\#4303](https://github.com/matrix-org/synapse/issues/4303))
+- Add opt-in support for v2 rooms ([\#4307](https://github.com/matrix-org/synapse/issues/4307))
+- Add a script to generate a clean config file ([\#4315](https://github.com/matrix-org/synapse/issues/4315))
+- Return server data in /login response ([\#4319](https://github.com/matrix-org/synapse/issues/4319))
+
+
+Bugfixes
+--------
+
+- Fix contains_url check to be consistent with other instances in code-base and check that value is an instance of string. ([\#3405](https://github.com/matrix-org/synapse/issues/3405))
+- Fix CAS login when username is not valid in an MXID ([\#4264](https://github.com/matrix-org/synapse/issues/4264))
+- Send CORS headers for /media/config ([\#4279](https://github.com/matrix-org/synapse/issues/4279))
+- Add 'sandbox' to CSP for media reprository ([\#4284](https://github.com/matrix-org/synapse/issues/4284))
+- Make the new landing page prettier. ([\#4294](https://github.com/matrix-org/synapse/issues/4294))
+- Fix deleting E2E room keys when using old SQLite versions. ([\#4295](https://github.com/matrix-org/synapse/issues/4295))
+- The metric synapse_admin_mau:current previously did not update when config.mau_stats_only was set to True ([\#4305](https://github.com/matrix-org/synapse/issues/4305))
+- Fixed per-room account data filters ([\#4309](https://github.com/matrix-org/synapse/issues/4309))
+- Fix indentation in default config ([\#4313](https://github.com/matrix-org/synapse/issues/4313))
+- Fix synapse:latest docker upload ([\#4316](https://github.com/matrix-org/synapse/issues/4316))
+- Fix test_metric.py compatibility with prometheus_client 0.5. Contributed by Maarten de Vries <maarten@de-vri.es>. ([\#4317](https://github.com/matrix-org/synapse/issues/4317))
+- Avoid packaging _trial_temp directory in -py3 debian packages ([\#4326](https://github.com/matrix-org/synapse/issues/4326))
+- Check jinja version for consent resource ([\#4327](https://github.com/matrix-org/synapse/issues/4327))
+- fix NPE in /messages by checking if all events were filtered out ([\#4330](https://github.com/matrix-org/synapse/issues/4330))
+- Fix `python -m synapse.config` on Python 3. ([\#4356](https://github.com/matrix-org/synapse/issues/4356))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove the deprecated v1/register API on Python 2. It was never ported to Python 3. ([\#4334](https://github.com/matrix-org/synapse/issues/4334))
+
+
+Internal Changes
+----------------
+
+- Getting URL previews of IP addresses no longer fails on Python 3. ([\#4215](https://github.com/matrix-org/synapse/issues/4215))
+- drop undocumented dependency on dateutil ([\#4266](https://github.com/matrix-org/synapse/issues/4266))
+- Update the example systemd config to use a virtualenv ([\#4273](https://github.com/matrix-org/synapse/issues/4273))
+- Update link to kernel DCO guide ([\#4274](https://github.com/matrix-org/synapse/issues/4274))
+- Make isort tox check print diff when it fails ([\#4283](https://github.com/matrix-org/synapse/issues/4283))
+- Log room_id in Unknown room errors ([\#4297](https://github.com/matrix-org/synapse/issues/4297))
+- Documentation improvements for coturn setup. Contributed by Krithin Sitaram. ([\#4333](https://github.com/matrix-org/synapse/issues/4333))
+- Update pull request template to use absolute links ([\#4341](https://github.com/matrix-org/synapse/issues/4341))
+- Update README to not lie about required restart when updating TLS certificates ([\#4343](https://github.com/matrix-org/synapse/issues/4343))
+- Update debian packaging for compatibility with transitional package ([\#4349](https://github.com/matrix-org/synapse/issues/4349))
+- Fix command hint to generate a config file when trying to start without a config file ([\#4353](https://github.com/matrix-org/synapse/issues/4353))
+- Add better logging for unexpected errors while sending transactions ([\#4358](https://github.com/matrix-org/synapse/issues/4358))
+
+
 Synapse 0.34.0 (2018-12-20)
 Synapse 0.34.0 (2018-12-20)
 ===========================
 ===========================
 
 

+ 4 - 5
README.rst

@@ -184,7 +184,7 @@ Configuring Synapse
 Before you can start Synapse, you will need to generate a configuration
 Before you can start Synapse, you will need to generate a configuration
 file. To do this, run (in your virtualenv, as before)::
 file. To do this, run (in your virtualenv, as before)::
 
 
-    cd ~/.synapse
+    cd ~/synapse
     python -m synapse.app.homeserver \
     python -m synapse.app.homeserver \
         --server-name my.domain.name \
         --server-name my.domain.name \
         --config-path homeserver.yaml \
         --config-path homeserver.yaml \
@@ -725,8 +725,8 @@ caveats, you will need to do the following:
   tell other servers how to find you. See `Setting up Federation`_.
   tell other servers how to find you. See `Setting up Federation`_.
 
 
 When updating the SSL certificate, just update the file pointed to by
 When updating the SSL certificate, just update the file pointed to by
-``tls_certificate_path``: there is no need to restart synapse. (You may like to
-use a symbolic link to help make this process atomic.)
+``tls_certificate_path`` and then restart Synapse. (You may like to use a symbolic link
+to help make this process atomic.)
 
 
 The most common mistake when setting up federation is not to tell Synapse about
 The most common mistake when setting up federation is not to tell Synapse about
 your SSL certificate. To check it, you can visit
 your SSL certificate. To check it, you can visit
@@ -796,8 +796,7 @@ A manual password reset can be done via direct database access as follows.
 
 
 First calculate the hash of the new password::
 First calculate the hash of the new password::
 
 
-    $ source ~/.synapse/bin/activate
-    $ ./scripts/hash_password
+    $ ~/synapse/env/bin/hash_password
     Password:
     Password:
     Confirm password:
     Confirm password:
     $2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
     $2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

+ 0 - 1
changelog.d/4141.feature

@@ -1 +0,0 @@
-Special-case a support user for use in verifying behaviour of a given server. The support user does not appear in user directory or monthly active user counts.

+ 0 - 1
changelog.d/4215.misc

@@ -1 +0,0 @@
-Getting URL previews of IP addresses no longer fails on Python 3.

+ 0 - 1
changelog.d/4262.feature

@@ -1 +0,0 @@
-Support for serving .well-known files

+ 0 - 1
changelog.d/4264.bugfix

@@ -1 +0,0 @@
-Fix CAS login when username is not valid in an MXID

+ 0 - 1
changelog.d/4265.feature

@@ -1 +0,0 @@
-Rework SAML2 authentication

+ 0 - 1
changelog.d/4266.misc

@@ -1 +0,0 @@
-drop undocumented dependency on dateutil

+ 0 - 1
changelog.d/4267.feature

@@ -1 +0,0 @@
-Rework SAML2 authentication

+ 0 - 1
changelog.d/4272.feature

@@ -1 +0,0 @@
-SAML2 authentication: Initialise user display name from SAML2 data

+ 0 - 1
changelog.d/4273.misc

@@ -1 +0,0 @@
-Update the example systemd config to use a virtualenv

+ 0 - 1
changelog.d/4274.misc

@@ -1 +0,0 @@
-Update link to kernel DCO guide

+ 0 - 1
changelog.d/4279.bugfix

@@ -1 +0,0 @@
-Send CORS headers for /media/config

+ 0 - 1
changelog.d/4283.misc

@@ -1 +0,0 @@
-Make isort tox check print diff when it fails

+ 0 - 1
changelog.d/4284.bugfix

@@ -1 +0,0 @@
-Add 'sandbox' to CSP for media reprository

+ 0 - 1
changelog.d/4294.bugfix

@@ -1 +0,0 @@
-Make the new landing page prettier.

+ 0 - 1
changelog.d/4295.bugfix

@@ -1 +0,0 @@
-Fix deleting E2E room keys when using old SQLite versions.

+ 0 - 1
changelog.d/4297.misc

@@ -1 +0,0 @@
-Log room_id in Unknown room errors

+ 0 - 1
changelog.d/4298.feature

@@ -1 +0,0 @@
-Synapse can now have its conditional/extra dependencies installed by pip. This functionality can be used by using `pip install matrix-synapse[feature]`, where feature is a comma separated list with the possible values "email.enable_notifs", "ldap3", "postgres", "saml2", "url_preview", and "test". If you want to install all optional dependencies, you can use "all" instead.

+ 0 - 1
changelog.d/4305.bugfix

@@ -1 +0,0 @@
-The metric synapse_admin_mau:current previously did not update when config.mau_stats_only was set to True

+ 0 - 1
changelog.d/4307.feature

@@ -1 +0,0 @@
-Add opt-in support for v2 rooms

+ 0 - 1
changelog.d/4309.bugfix

@@ -1 +0,0 @@
-Fixed per-room account data filters

+ 0 - 1
changelog.d/4313.bugfix

@@ -1 +0,0 @@
-Fix indentation in default config

+ 0 - 1
changelog.d/4315.feature

@@ -1 +0,0 @@
-Add a script to generate a clean config file

+ 0 - 1
changelog.d/4316.bugfix

@@ -1 +0,0 @@
-Fix synapse:latest docker upload

+ 0 - 1
changelog.d/4317.bugfix

@@ -1 +0,0 @@
-Fix test_metric.py compatibility with prometheus_client 0.5. Contributed by Maarten de Vries <maarten@de-vri.es>.

+ 0 - 1
changelog.d/4319.feature

@@ -1 +0,0 @@
-Return server data in /login response

+ 0 - 1
changelog.d/4333.misc

@@ -1 +0,0 @@
-Documentation improvements for coturn setup. Contributed by Krithin Sitaram.

+ 0 - 1
changelog.d/4334.removal

@@ -1 +0,0 @@
-Remove the deprecated v1/register API on Python 2. It was never ported to Python 3.

+ 1 - 0
changelog.d/4342.misc

@@ -0,0 +1 @@
+Update README to use the new virtualenv everywhere

+ 1 - 0
changelog.d/4368.misc

@@ -0,0 +1 @@
+Add better logging for unexpected errors while sending transactions

+ 1 - 0
changelog.d/4369.bugfix

@@ -0,0 +1 @@
+Prevent users with access tokens predating the introduction of device IDs from creating spurious entries in the user_ips table.

+ 1 - 0
changelog.d/4370.misc

@@ -0,0 +1 @@
+Apply a unique index to the user_ips table, preventing duplicates.

+ 1 - 0
changelog.d/4377.misc

@@ -0,0 +1 @@
+Silence travis-ci build warnings by removing non-functional python3.6

+ 1 - 0
changelog.d/4387.misc

@@ -0,0 +1 @@
+Fix a comment in the generated config file

+ 1 - 0
changelog.d/4392.bugfix

@@ -0,0 +1 @@
+Fix typo in ALL_USER_TYPES definition to ensure type is a tuple

+ 1 - 0
changelog.d/4397.bugfix

@@ -0,0 +1 @@
+Fix high CPU usage due to remote devicelist updates

+ 1 - 0
changelog.d/4399.misc

@@ -0,0 +1 @@
+Update dependencies on msgpack and pymacaroons to use the up-to-date packages.

+ 1 - 0
changelog.d/4407.bugfix

@@ -0,0 +1 @@
+Fix incorrect logcontexts after a Deferred was cancelled

+ 1 - 0
changelog.d/4408.misc

@@ -0,0 +1 @@
+Refactor 'sign_request' as 'build_auth_headers'

+ 1 - 1
contrib/docker/docker-compose.yml

@@ -37,7 +37,7 @@ services:
     labels:
     labels:
       - traefik.enable=true
       - traefik.enable=true
       - traefik.frontend.rule=Host:my.matrix.Host
       - traefik.frontend.rule=Host:my.matrix.Host
-      - traefik.port=8448
+      - traefik.port=8008
 
 
   db:
   db:
     image: docker.io/postgres:10-alpine
     image: docker.io/postgres:10-alpine

+ 3 - 3
debian/build_virtualenv

@@ -33,7 +33,8 @@ dh_virtualenv \
     --preinstall="lxml" \
     --preinstall="lxml" \
     --preinstall="mock" \
     --preinstall="mock" \
     --extra-pip-arg="--no-cache-dir" \
     --extra-pip-arg="--no-cache-dir" \
-    --extra-pip-arg="--compile"
+    --extra-pip-arg="--compile" \
+    --extras="all"
 
 
 # we copy the tests to a temporary directory so that we can put them on the
 # we copy the tests to a temporary directory so that we can put them on the
 # PYTHONPATH without putting the uninstalled synapse on the pythonpath.
 # PYTHONPATH without putting the uninstalled synapse on the pythonpath.
@@ -41,8 +42,7 @@ tmpdir=`mktemp -d`
 trap "rm -r $tmpdir" EXIT
 trap "rm -r $tmpdir" EXIT
 
 
 cp -r tests "$tmpdir"
 cp -r tests "$tmpdir"
-cd debian/matrix-synapse-py3
 
 
 PYTHONPATH="$tmpdir" \
 PYTHONPATH="$tmpdir" \
-    ./opt/venvs/matrix-synapse/bin/python \
+    debian/matrix-synapse-py3/opt/venvs/matrix-synapse/bin/python \
         -B -m twisted.trial --reporter=text -j2 tests
         -B -m twisted.trial --reporter=text -j2 tests

+ 28 - 0
debian/changelog

@@ -1,3 +1,31 @@
+matrix-synapse-py3 (0.34.1.1++1) stable; urgency=medium
+
+  * Update conflicts specifications to allow smoother transition from matrix-synapse.
+
+ -- Synapse Packaging team <packages@matrix.org>  Sat, 12 Jan 2019 12:58:35 +0000
+
+matrix-synapse-py3 (0.34.1.1) stable; urgency=high
+
+  * New synapse release 0.34.1.1
+
+ -- Synapse Packaging team <packages@matrix.org>  Thu, 10 Jan 2019 15:04:52 +0000
+
+matrix-synapse-py3 (0.34.1+1) stable; urgency=medium
+
+  * Remove 'Breaks: matrix-synapse-ldap3'. (matrix-synapse-py3 includes
+    the matrix-synapse-ldap3 python files, which makes the
+    matrix-synapse-ldap3 debian package redundant but not broken.
+
+ -- Synapse Packaging team <packages@matrix.org>  Wed, 09 Jan 2019 15:30:00 +0000
+
+matrix-synapse-py3 (0.34.1) stable; urgency=medium
+
+  * New synapse release 0.34.1.
+  * Update Conflicts specifications to allow installation alongside our
+    matrix-synapse transitional package.
+
+ -- Synapse Packaging team <packages@matrix.org>  Wed, 09 Jan 2019 14:52:24 +0000
+
 matrix-synapse-py3 (0.34.0) stable; urgency=medium
 matrix-synapse-py3 (0.34.0) stable; urgency=medium
 
 
   * New synapse release 0.34.0.
   * New synapse release 0.34.0.

+ 6 - 3
debian/control

@@ -5,7 +5,7 @@ Maintainer: Synapse Packaging team <packages@matrix.org>
 Build-Depends:
 Build-Depends:
  debhelper (>= 9),
  debhelper (>= 9),
  dh-systemd,
  dh-systemd,
- dh-virtualenv (>= 1.0),
+ dh-virtualenv (>= 1.1),
  lsb-release,
  lsb-release,
  python3-dev,
  python3-dev,
  python3,
  python3,
@@ -13,12 +13,15 @@ Build-Depends:
  python3-pip,
  python3-pip,
  python3-venv,
  python3-venv,
  tar,
  tar,
-Standards-Version: 3.9.5
+Standards-Version: 3.9.8
 Homepage: https://github.com/matrix-org/synapse
 Homepage: https://github.com/matrix-org/synapse
 
 
 Package: matrix-synapse-py3
 Package: matrix-synapse-py3
 Architecture: amd64
 Architecture: amd64
-Conflicts: matrix-synapse
+Provides: matrix-synapse
+Conflicts:
+ matrix-synapse (<< 0.34.0.1-0matrix2),
+ matrix-synapse (>= 0.34.0.1-1),
 Pre-Depends: dpkg (>= 1.16.1)
 Pre-Depends: dpkg (>= 1.16.1)
 Depends:
 Depends:
  adduser,
  adduser,

+ 1 - 3
docker/Dockerfile

@@ -33,9 +33,7 @@ RUN pip install --prefix="/install" --no-warn-script-location \
 
 
 COPY . /synapse
 COPY . /synapse
 RUN pip install --prefix="/install" --no-warn-script-location \
 RUN pip install --prefix="/install" --no-warn-script-location \
-        lxml \
-        psycopg2 \
-        /synapse
+        /synapse[all]
 
 
 ###
 ###
 ### Stage 1: runtime
 ### Stage 1: runtime

+ 33 - 4
docker/Dockerfile-dhvirtualenv

@@ -11,6 +11,35 @@
 
 
 # Get the distro we want to pull from as a dynamic build variable
 # Get the distro we want to pull from as a dynamic build variable
 ARG distro=""
 ARG distro=""
+
+###
+### Stage 0: build a dh-virtualenv
+###
+FROM ${distro} as builder
+
+RUN apt-get update -qq -o Acquire::Languages=none
+RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
+        -yqq --no-install-recommends \
+        build-essential \
+        ca-certificates \
+        devscripts \
+        equivs \
+        wget
+
+# fetch and unpack the package
+RUN wget -q -O /dh-virtuenv-1.1.tar.gz https://github.com/spotify/dh-virtualenv/archive/1.1.tar.gz
+RUN tar xvf /dh-virtuenv-1.1.tar.gz
+
+# install its build deps
+RUN cd dh-virtualenv-1.1/ \
+    && env DEBIAN_FRONTEND=noninteractive mk-build-deps -ri -t "apt-get -yqq --no-install-recommends"
+
+# build it
+RUN cd dh-virtualenv-1.1 && dpkg-buildpackage -us -uc -b
+
+###
+### Stage 1
+###
 FROM ${distro}
 FROM ${distro}
 
 
 # Install the build dependencies
 # Install the build dependencies
@@ -21,15 +50,15 @@ RUN apt-get update -qq -o Acquire::Languages=none \
         debhelper \
         debhelper \
         devscripts \
         devscripts \
         dh-systemd \
         dh-systemd \
-        dh-virtualenv \
-        equivs \
         lsb-release \
         lsb-release \
         python3-dev \
         python3-dev \
         python3-pip \
         python3-pip \
         python3-setuptools \
         python3-setuptools \
         python3-venv \
         python3-venv \
-        sqlite3 \
-        wget
+        sqlite3
+
+COPY --from=builder /dh-virtualenv_1.1-1_all.deb /
+RUN apt-get install -yq /dh-virtualenv_1.1-1_all.deb
 
 
 WORKDIR /synapse/source
 WORKDIR /synapse/source
 ENTRYPOINT ["bash","/synapse/source/docker/build_debian.sh"]
 ENTRYPOINT ["bash","/synapse/source/docker/build_debian.sh"]

+ 0 - 14
docker/build_debian.sh

@@ -6,20 +6,6 @@ set -ex
 
 
 DIST=`lsb_release -c -s`
 DIST=`lsb_release -c -s`
 
 
-# We need to build a newer dh_virtualenv on older OSes like Xenial.
-if [ "$DIST" = 'xenial' ]; then
-    mkdir -p /tmp/dhvenv
-    cd /tmp/dhvenv
-    wget https://github.com/spotify/dh-virtualenv/archive/1.1.tar.gz
-    tar xvf 1.1.tar.gz
-    cd dh-virtualenv-1.1/
-    env DEBIAN_FRONTEND=noninteractive mk-build-deps -ri -t "apt-get -yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io"
-    dpkg-buildpackage -us -uc -b
-    cd /tmp/dhvenv
-    apt-get install -yqq ./dh-virtualenv_1.1-1_all.deb
-fi
-
-
 # we get a read-only copy of the source: make a writeable copy
 # we get a read-only copy of the source: make a writeable copy
 cp -aT /synapse/source /synapse/build
 cp -aT /synapse/source /synapse/build
 cd /synapse/build
 cd /synapse/build

+ 0 - 46
docker/build_debian_packages.sh

@@ -1,46 +0,0 @@
-#!/bin/bash
-
-# Build the Debian packages using Docker images.
-#
-# This script builds the Docker images and then executes them sequentially, each
-# one building a Debian package for the targeted operating system. It is
-# designed to be a "single command" to produce all the images.
-#
-# By default, builds for all known distributions, but a list of distributions
-# can be passed on the commandline for debugging.
-
-set -ex
-
-cd `dirname $0`
-
-if [ $# -lt 1 ]; then
-    DISTS=(
-        debian:stretch
-        debian:buster
-        debian:sid
-        ubuntu:xenial
-        ubuntu:bionic
-        ubuntu:cosmic
-    )
-else
-    DISTS=("$@")
-fi
-
-# Make the dir where the debs will live.
-#
-# Note that we deliberately put this outside the source tree, otherwise we tend
-# to get source packages which are full of debs. (We could hack around that
-# with more magic in the build_debian.sh script, but that doesn't solve the
-# problem for natively-run dpkg-buildpakage).
-
-mkdir -p ../../debs
-
-# Build each OS image;
-for i in "${DISTS[@]}"; do
-    TAG=$(echo ${i} | cut -d ":" -f 2)
-    docker build --tag dh-venv-builder:${TAG} --build-arg distro=${i} -f Dockerfile-dhvirtualenv .
-    docker run -it --rm --volume=$(pwd)/../\:/synapse/source:ro --volume=$(pwd)/../../debs:/debs \
-           -e TARGET_USERID=$(id -u) \
-           -e TARGET_GROUPID=$(id -g) \
-           dh-venv-builder:${TAG}
-done

+ 154 - 0
scripts-dev/build_debian_packages

@@ -0,0 +1,154 @@
+#!/usr/bin/env python3
+
+# Build the Debian packages using Docker images.
+#
+# This script builds the Docker images and then executes them sequentially, each
+# one building a Debian package for the targeted operating system. It is
+# designed to be a "single command" to produce all the images.
+#
+# By default, builds for all known distributions, but a list of distributions
+# can be passed on the commandline for debugging.
+
+import argparse
+from concurrent.futures import ThreadPoolExecutor
+import os
+import signal
+import subprocess
+import sys
+import threading
+
+DISTS = (
+    "debian:stretch",
+    "debian:buster",
+    "debian:sid",
+    "ubuntu:xenial",
+    "ubuntu:bionic",
+    "ubuntu:cosmic",
+)
+
+DESC = '''\
+Builds .debs for synapse, using a Docker image for the build environment.
+
+By default, builds for all known distributions, but a list of distributions
+can be passed on the commandline for debugging.
+'''
+
+
+class Builder(object):
+    def __init__(self, redirect_stdout=False):
+        self.redirect_stdout = redirect_stdout
+        self.active_containers = set()
+        self._lock = threading.Lock()
+        self._failed = False
+
+    def run_build(self, dist):
+        """Build deb for a single distribution"""
+
+        if self._failed:
+            print("not building %s due to earlier failure" % (dist, ))
+            raise Exception("failed")
+
+        try:
+            self._inner_build(dist)
+        except Exception as e:
+            print("build of %s failed: %s" % (dist, e), file=sys.stderr)
+            self._failed = True
+            raise
+
+    def _inner_build(self, dist):
+        projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+        os.chdir(projdir)
+
+        tag = dist.split(":", 1)[1]
+
+        # Make the dir where the debs will live.
+        #
+        # Note that we deliberately put this outside the source tree, otherwise
+        # we tend to get source packages which are full of debs. (We could hack
+        # around that with more magic in the build_debian.sh script, but that
+        # doesn't solve the problem for natively-run dpkg-buildpakage).
+        debsdir = os.path.join(projdir, '../debs')
+        os.makedirs(debsdir, exist_ok=True)
+
+        if self.redirect_stdout:
+            logfile = os.path.join(debsdir, "%s.buildlog" % (tag, ))
+            print("building %s: directing output to %s" % (dist, logfile))
+            stdout = open(logfile, "w")
+        else:
+            stdout = None
+
+        # first build a docker image for the build environment
+        subprocess.check_call([
+            "docker", "build",
+            "--tag", "dh-venv-builder:" + tag,
+            "--build-arg", "distro=" + dist,
+            "-f", "docker/Dockerfile-dhvirtualenv",
+            "docker",
+        ], stdout=stdout, stderr=subprocess.STDOUT)
+
+        container_name = "synapse_build_" + tag
+        with self._lock:
+            self.active_containers.add(container_name)
+
+        # then run the build itself
+        subprocess.check_call([
+            "docker", "run",
+            "--rm",
+            "--name", container_name,
+            "--volume=" + projdir + ":/synapse/source:ro",
+            "--volume=" + debsdir + ":/debs",
+            "-e", "TARGET_USERID=%i" % (os.getuid(), ),
+            "-e", "TARGET_GROUPID=%i" % (os.getgid(), ),
+            "dh-venv-builder:" + tag,
+        ], stdout=stdout, stderr=subprocess.STDOUT)
+
+        with self._lock:
+            self.active_containers.remove(container_name)
+
+        if stdout is not None:
+            stdout.close()
+            print("Completed build of %s" % (dist, ))
+
+    def kill_containers(self):
+        with self._lock:
+            active = list(self.active_containers)
+
+        for c in active:
+            print("killing container %s" % (c,))
+            subprocess.run([
+                "docker", "kill", c,
+            ], stdout=subprocess.DEVNULL)
+            with self._lock:
+                self.active_containers.remove(c)
+
+
+def run_builds(dists, jobs=1):
+    builder = Builder(redirect_stdout=(jobs > 1))
+
+    def sig(signum, _frame):
+        print("Caught SIGINT")
+        builder.kill_containers()
+    signal.signal(signal.SIGINT, sig)
+
+    with ThreadPoolExecutor(max_workers=jobs) as e:
+        res = e.map(builder.run_build, dists)
+
+    # make sure we consume the iterable so that exceptions are raised.
+    for r in res:
+        pass
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser(
+        description=DESC,
+    )
+    parser.add_argument(
+        '-j', '--jobs', type=int, default=1,
+        help='specify the number of builds to run in parallel',
+    )
+    parser.add_argument(
+        'dist', nargs='*', default=DISTS,
+        help='a list of distributions to build for. Default: %(default)s',
+    )
+    args = parser.parse_args()
+    run_builds(dists=args.dist, jobs=args.jobs)

+ 1 - 1
synapse/__init__.py

@@ -27,4 +27,4 @@ try:
 except ImportError:
 except ImportError:
     pass
     pass
 
 
-__version__ = "0.34.0"
+__version__ = "0.34.1.1"

+ 27 - 38
synapse/api/auth.py

@@ -300,20 +300,28 @@ class Auth(object):
         Raises:
         Raises:
             AuthError if no user by that token exists or the token is invalid.
             AuthError if no user by that token exists or the token is invalid.
         """
         """
-        try:
-            user_id, guest = self._parse_and_validate_macaroon(token, rights)
-        except _InvalidMacaroonException:
-            # doesn't look like a macaroon: treat it as an opaque token which
-            # must be in the database.
-            # TODO: it would be nice to get rid of this, but apparently some
-            # people use access tokens which aren't macaroons
+
+        if rights == "access":
+            # first look in the database
             r = yield self._look_up_user_by_access_token(token)
             r = yield self._look_up_user_by_access_token(token)
-            defer.returnValue(r)
+            if r:
+                defer.returnValue(r)
 
 
+        # otherwise it needs to be a valid macaroon
         try:
         try:
+            user_id, guest = self._parse_and_validate_macaroon(token, rights)
             user = UserID.from_string(user_id)
             user = UserID.from_string(user_id)
 
 
-            if guest:
+            if rights == "access":
+                if not guest:
+                    # non-guest access tokens must be in the database
+                    logger.warning("Unrecognised access token - not in store.")
+                    raise AuthError(
+                        self.TOKEN_NOT_FOUND_HTTP_STATUS,
+                        "Unrecognised access token.",
+                        errcode=Codes.UNKNOWN_TOKEN,
+                    )
+
                 # Guest access tokens are not stored in the database (there can
                 # Guest access tokens are not stored in the database (there can
                 # only be one access token per guest, anyway).
                 # only be one access token per guest, anyway).
                 #
                 #
@@ -354,31 +362,15 @@ class Auth(object):
                     "device_id": None,
                     "device_id": None,
                 }
                 }
             else:
             else:
-                # This codepath exists for several reasons:
-                #   * so that we can actually return a token ID, which is used
-                #     in some parts of the schema (where we probably ought to
-                #     use device IDs instead)
-                #   * the only way we currently have to invalidate an
-                #     access_token is by removing it from the database, so we
-                #     have to check here that it is still in the db
-                #   * some attributes (notably device_id) aren't stored in the
-                #     macaroon. They probably should be.
-                # TODO: build the dictionary from the macaroon once the
-                # above are fixed
-                ret = yield self._look_up_user_by_access_token(token)
-                if ret["user"] != user:
-                    logger.error(
-                        "Macaroon user (%s) != DB user (%s)",
-                        user,
-                        ret["user"]
-                    )
-                    raise AuthError(
-                        self.TOKEN_NOT_FOUND_HTTP_STATUS,
-                        "User mismatch in macaroon",
-                        errcode=Codes.UNKNOWN_TOKEN
-                    )
+                raise RuntimeError("Unknown rights setting %s", rights)
             defer.returnValue(ret)
             defer.returnValue(ret)
-        except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
+        except (
+            _InvalidMacaroonException,
+            pymacaroons.exceptions.MacaroonException,
+            TypeError,
+            ValueError,
+        ) as e:
+            logger.warning("Invalid macaroon in auth: %s %s", type(e), e)
             raise AuthError(
             raise AuthError(
                 self.TOKEN_NOT_FOUND_HTTP_STATUS, "Invalid macaroon passed.",
                 self.TOKEN_NOT_FOUND_HTTP_STATUS, "Invalid macaroon passed.",
                 errcode=Codes.UNKNOWN_TOKEN
                 errcode=Codes.UNKNOWN_TOKEN
@@ -508,11 +500,8 @@ class Auth(object):
     def _look_up_user_by_access_token(self, token):
     def _look_up_user_by_access_token(self, token):
         ret = yield self.store.get_user_by_access_token(token)
         ret = yield self.store.get_user_by_access_token(token)
         if not ret:
         if not ret:
-            logger.warn("Unrecognised access token - not in store.")
-            raise AuthError(
-                self.TOKEN_NOT_FOUND_HTTP_STATUS, "Unrecognised access token.",
-                errcode=Codes.UNKNOWN_TOKEN
-            )
+            defer.returnValue(None)
+
         # we use ret.get() below because *lots* of unit tests stub out
         # we use ret.get() below because *lots* of unit tests stub out
         # get_user_by_access_token in a way where it only returns a couple of
         # get_user_by_access_token in a way where it only returns a couple of
         # the fields.
         # the fields.

+ 1 - 1
synapse/api/constants.py

@@ -128,4 +128,4 @@ class UserTypes(object):
     'admin' and 'guest' users should also be UserTypes. Normal users are type None
     'admin' and 'guest' users should also be UserTypes. Normal users are type None
     """
     """
     SUPPORT = "support"
     SUPPORT = "support"
-    ALL_USER_TYPES = (SUPPORT)
+    ALL_USER_TYPES = (SUPPORT,)

+ 18 - 0
synapse/api/errors.py

@@ -348,6 +348,24 @@ class IncompatibleRoomVersionError(SynapseError):
         )
         )
 
 
 
 
+class RequestSendFailed(RuntimeError):
+    """Sending a HTTP request over federation failed due to not being able to
+    talk to the remote server for some reason.
+
+    This exception is used to differentiate "expected" errors that arise due to
+    networking (e.g. DNS failures, connection timeouts etc), versus unexpected
+    errors (like programming errors).
+    """
+    def __init__(self, inner_exception, can_retry):
+        super(RequestSendFailed, self).__init__(
+            "Failed to send request: %s: %s" % (
+                type(inner_exception).__name__, inner_exception,
+            )
+        )
+        self.inner_exception = inner_exception
+        self.can_retry = can_retry
+
+
 def cs_error(msg, code=Codes.UNKNOWN, **kwargs):
 def cs_error(msg, code=Codes.UNKNOWN, **kwargs):
     """ Utility method for constructing an error response for client-server
     """ Utility method for constructing an error response for client-server
     interactions.
     interactions.

+ 8 - 3
synapse/api/filtering.py

@@ -12,6 +12,8 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
+from six import text_type
+
 import jsonschema
 import jsonschema
 from canonicaljson import json
 from canonicaljson import json
 from jsonschema import FormatChecker
 from jsonschema import FormatChecker
@@ -353,7 +355,7 @@ class Filter(object):
             sender = event.user_id
             sender = event.user_id
             room_id = None
             room_id = None
             ev_type = "m.presence"
             ev_type = "m.presence"
-            is_url = False
+            contains_url = False
         else:
         else:
             sender = event.get("sender", None)
             sender = event.get("sender", None)
             if not sender:
             if not sender:
@@ -368,13 +370,16 @@ class Filter(object):
 
 
             room_id = event.get("room_id", None)
             room_id = event.get("room_id", None)
             ev_type = event.get("type", None)
             ev_type = event.get("type", None)
-            is_url = "url" in event.get("content", {})
+
+            content = event.get("content", {})
+            # check if there is a string url field in the content for filtering purposes
+            contains_url = isinstance(content.get("url"), text_type)
 
 
         return self.check_fields(
         return self.check_fields(
             room_id,
             room_id,
             sender,
             sender,
             ev_type,
             ev_type,
-            is_url,
+            contains_url,
         )
         )
 
 
     def check_fields(self, room_id, sender, event_type, contains_url):
     def check_fields(self, room_id, sender, event_type, contains_url):

+ 1 - 8
synapse/app/__init__.py

@@ -19,15 +19,8 @@ from synapse import python_dependencies  # noqa: E402
 
 
 sys.dont_write_bytecode = True
 sys.dont_write_bytecode = True
 
 
-
 try:
 try:
     python_dependencies.check_requirements()
     python_dependencies.check_requirements()
 except python_dependencies.DependencyException as e:
 except python_dependencies.DependencyException as e:
-    message = "\n".join([
-        "Missing Requirements: %s" % (", ".join(e.dependencies),),
-        "To install run:",
-        "    pip install --upgrade --force %s" % (" ".join(e.dependencies),),
-        "",
-    ])
-    sys.stderr.writelines(message)
+    sys.stderr.writelines(e.message)
     sys.exit(1)
     sys.exit(1)

+ 1 - 1
synapse/config/__main__.py

@@ -16,7 +16,7 @@ from synapse.config._base import ConfigError
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":
     import sys
     import sys
-    from homeserver import HomeServerConfig
+    from synapse.config.homeserver import HomeServerConfig
 
 
     action = sys.argv[1]
     action = sys.argv[1]
 
 

+ 2 - 5
synapse/config/key.py

@@ -57,8 +57,8 @@ class KeyConfig(Config):
             # Unfortunately, there are people out there that don't have this
             # Unfortunately, there are people out there that don't have this
             # set. Lets just be "nice" and derive one from their secret key.
             # set. Lets just be "nice" and derive one from their secret key.
             logger.warn("Config is missing missing macaroon_secret_key")
             logger.warn("Config is missing missing macaroon_secret_key")
-            seed = self.signing_key[0].seed
-            self.macaroon_secret_key = hashlib.sha256(seed)
+            seed = bytes(self.signing_key[0])
+            self.macaroon_secret_key = hashlib.sha256(seed).digest()
 
 
         self.expire_access_token = config.get("expire_access_token", False)
         self.expire_access_token = config.get("expire_access_token", False)
 
 
@@ -83,9 +83,6 @@ class KeyConfig(Config):
         # a secret which is used to sign access tokens. If none is specified,
         # a secret which is used to sign access tokens. If none is specified,
         # the registration_shared_secret is used, if one is given; otherwise,
         # the registration_shared_secret is used, if one is given; otherwise,
         # a secret key is derived from the signing key.
         # a secret key is derived from the signing key.
-        #
-        # Note that changing this will invalidate any active access tokens, so
-        # all clients will have to log back in.
         %(macaroon_secret_key)s
         %(macaroon_secret_key)s
 
 
         # Used to enable access token expiration.
         # Used to enable access token expiration.

+ 37 - 1
synapse/config/server.py

@@ -1,6 +1,6 @@
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
 # Copyright 2014-2016 OpenMarket Ltd
 # Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2017 New Vector Ltd
+# Copyright 2017-2018 New Vector Ltd
 #
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # you may not use this file except in compliance with the License.
@@ -18,6 +18,7 @@ import logging
 import os.path
 import os.path
 
 
 from synapse.http.endpoint import parse_and_validate_server_name
 from synapse.http.endpoint import parse_and_validate_server_name
+from synapse.python_dependencies import DependencyException, check_requirements
 
 
 from ._base import Config, ConfigError
 from ._base import Config, ConfigError
 
 
@@ -204,6 +205,8 @@ class ServerConfig(Config):
                 ]
                 ]
             })
             })
 
 
+        _check_resource_config(self.listeners)
+
     def default_config(self, server_name, data_dir_path, **kwargs):
     def default_config(self, server_name, data_dir_path, **kwargs):
         _, bind_port = parse_and_validate_server_name(server_name)
         _, bind_port = parse_and_validate_server_name(server_name)
         if bind_port is not None:
         if bind_port is not None:
@@ -465,3 +468,36 @@ def _warn_if_webclient_configured(listeners):
                 if name == 'webclient':
                 if name == 'webclient':
                     logger.warning(NO_MORE_WEB_CLIENT_WARNING)
                     logger.warning(NO_MORE_WEB_CLIENT_WARNING)
                     return
                     return
+
+
+KNOWN_RESOURCES = (
+    'client',
+    'consent',
+    'federation',
+    'keys',
+    'media',
+    'metrics',
+    'replication',
+    'static',
+    'webclient',
+)
+
+
+def _check_resource_config(listeners):
+    resource_names = set(
+        res_name
+        for listener in listeners
+        for res in listener.get("resources", [])
+        for res_name in res.get("names", [])
+    )
+
+    for resource in resource_names:
+        if resource not in KNOWN_RESOURCES:
+            raise ConfigError(
+                "Unknown listener resource '%s'" % (resource, )
+            )
+        if resource == "consent":
+            try:
+                check_requirements('resources.consent')
+            except DependencyException as e:
+                raise ConfigError(e.message)

+ 19 - 5
synapse/federation/transaction_queue.py

@@ -22,7 +22,11 @@ from prometheus_client import Counter
 from twisted.internet import defer
 from twisted.internet import defer
 
 
 import synapse.metrics
 import synapse.metrics
-from synapse.api.errors import FederationDeniedError, HttpResponseException
+from synapse.api.errors import (
+    FederationDeniedError,
+    HttpResponseException,
+    RequestSendFailed,
+)
 from synapse.handlers.presence import format_user_presence_state, get_interested_remotes
 from synapse.handlers.presence import format_user_presence_state, get_interested_remotes
 from synapse.metrics import (
 from synapse.metrics import (
     LaterGauge,
     LaterGauge,
@@ -518,11 +522,21 @@ class TransactionQueue(object):
             )
             )
         except FederationDeniedError as e:
         except FederationDeniedError as e:
             logger.info(e)
             logger.info(e)
-        except Exception as e:
-            logger.warn(
-                "TX [%s] Failed to send transaction: %s",
+        except HttpResponseException as e:
+            logger.warning(
+                "TX [%s] Received %d response to transaction: %s",
+                destination, e.code, e,
+            )
+        except RequestSendFailed as e:
+            logger.warning("TX [%s] Failed to send transaction: %s", destination, e)
+
+            for p, _ in pending_pdus:
+                logger.info("Failed to send event %s to %s", p.event_id,
+                            destination)
+        except Exception:
+            logger.exception(
+                "TX [%s] Failed to send transaction",
                 destination,
                 destination,
-                e,
             )
             )
             for p, _ in pending_pdus:
             for p, _ in pending_pdus:
                 logger.info("Failed to send event %s to %s", p.event_id,
                 logger.info("Failed to send event %s to %s", p.event_id,

+ 19 - 0
synapse/handlers/device.py

@@ -532,6 +532,25 @@ class DeviceListEduUpdater(object):
 
 
                 stream_id = result["stream_id"]
                 stream_id = result["stream_id"]
                 devices = result["devices"]
                 devices = result["devices"]
+
+                # If the remote server has more than ~1000 devices for this user
+                # we assume that something is going horribly wrong (e.g. a bot
+                # that logs in and creates a new device every time it tries to
+                # send a message).  Maintaining lots of devices per user in the
+                # cache can cause serious performance issues as if this request
+                # takes more than 60s to complete, internal replication from the
+                # inbound federation worker to the synapse master may time out
+                # causing the inbound federation to fail and causing the remote
+                # server to retry, causing a DoS.  So in this scenario we give
+                # up on storing the total list of devices and only handle the
+                # delta instead.
+                if len(devices) > 1000:
+                    logger.warn(
+                        "Ignoring device list snapshot for %s as it has >1K devs (%d)",
+                        user_id, len(devices)
+                    )
+                    devices = []
+
                 yield self.store.update_remote_device_list_cache(
                 yield self.store.update_remote_device_list_cache(
                     user_id, devices, stream_id,
                     user_id, devices, stream_id,
                 )
                 )

+ 6 - 3
synapse/handlers/identity.py

@@ -167,18 +167,21 @@ class IdentityHandler(BaseHandler):
             "mxid": mxid,
             "mxid": mxid,
             "threepid": threepid,
             "threepid": threepid,
         }
         }
-        headers = {}
+
         # we abuse the federation http client to sign the request, but we have to send it
         # we abuse the federation http client to sign the request, but we have to send it
         # using the normal http client since we don't want the SRV lookup and want normal
         # using the normal http client since we don't want the SRV lookup and want normal
         # 'browser-like' HTTPS.
         # 'browser-like' HTTPS.
-        self.federation_http_client.sign_request(
+        auth_headers = self.federation_http_client.build_auth_headers(
             destination=None,
             destination=None,
             method='POST',
             method='POST',
             url_bytes='/_matrix/identity/api/v1/3pid/unbind'.encode('ascii'),
             url_bytes='/_matrix/identity/api/v1/3pid/unbind'.encode('ascii'),
-            headers_dict=headers,
             content=content,
             content=content,
             destination_is=id_server,
             destination_is=id_server,
         )
         )
+        headers = {
+            b"Authorization": auth_headers,
+        }
+
         try:
         try:
             yield self.http_client.post_json_get_json(
             yield self.http_client.post_json_get_json(
                 url,
                 url,

+ 11 - 10
synapse/handlers/pagination.py

@@ -235,6 +235,17 @@ class PaginationHandler(object):
                 "room_key", next_key
                 "room_key", next_key
             )
             )
 
 
+        if events:
+            if event_filter:
+                events = event_filter.filter(events)
+
+            events = yield filter_events_for_client(
+                self.store,
+                user_id,
+                events,
+                is_peeking=(member_event_id is None),
+            )
+
         if not events:
         if not events:
             defer.returnValue({
             defer.returnValue({
                 "chunk": [],
                 "chunk": [],
@@ -242,16 +253,6 @@ class PaginationHandler(object):
                 "end": next_token.to_string(),
                 "end": next_token.to_string(),
             })
             })
 
 
-        if event_filter:
-            events = event_filter.filter(events)
-
-        events = yield filter_events_for_client(
-            self.store,
-            user_id,
-            events,
-            is_peeking=(member_event_id is None),
-        )
-
         state = None
         state = None
         if event_filter and event_filter.lazy_load_members():
         if event_filter and event_filter.lazy_load_members():
             # TODO: remove redundant members
             # TODO: remove redundant members

+ 158 - 115
synapse/http/matrixfederationclient.py

@@ -19,7 +19,7 @@ import random
 import sys
 import sys
 from io import BytesIO
 from io import BytesIO
 
 
-from six import PY3, string_types
+from six import PY3, raise_from, string_types
 from six.moves import urllib
 from six.moves import urllib
 
 
 import attr
 import attr
@@ -41,6 +41,7 @@ from synapse.api.errors import (
     Codes,
     Codes,
     FederationDeniedError,
     FederationDeniedError,
     HttpResponseException,
     HttpResponseException,
+    RequestSendFailed,
     SynapseError,
     SynapseError,
 )
 )
 from synapse.http.endpoint import matrix_federation_endpoint
 from synapse.http.endpoint import matrix_federation_endpoint
@@ -228,19 +229,18 @@ class MatrixFederationHttpClient(object):
             backoff_on_404 (bool): Back off if we get a 404
             backoff_on_404 (bool): Back off if we get a 404
 
 
         Returns:
         Returns:
-            Deferred: resolves with the http response object on success.
-
-            Fails with ``HttpResponseException``: if we get an HTTP response
-                code >= 300.
-
-            Fails with ``NotRetryingDestination`` if we are not yet ready
-                to retry this server.
-
-            Fails with ``FederationDeniedError`` if this destination
-                is not on our federation whitelist
-
-            (May also fail with plenty of other Exceptions for things like DNS
-                failures, connection failures, SSL failures.)
+            Deferred[twisted.web.client.Response]: resolves with the HTTP
+            response object on success.
+
+        Raises:
+            HttpResponseException: If we get an HTTP response code >= 300
+                (except 429).
+            NotRetryingDestination: If we are not yet ready to retry this
+                server.
+            FederationDeniedError: If this destination  is not on our
+                federation whitelist
+            RequestSendFailed: If there were problems connecting to the
+                remote, due to e.g. DNS failures, connection timeouts etc.
         """
         """
         if timeout:
         if timeout:
             _sec_timeout = timeout / 1000
             _sec_timeout = timeout / 1000
@@ -298,9 +298,9 @@ class MatrixFederationHttpClient(object):
                     json = request.get_json()
                     json = request.get_json()
                     if json:
                     if json:
                         headers_dict[b"Content-Type"] = [b"application/json"]
                         headers_dict[b"Content-Type"] = [b"application/json"]
-                        self.sign_request(
+                        auth_headers = self.build_auth_headers(
                             destination_bytes, method_bytes, url_to_sign_bytes,
                             destination_bytes, method_bytes, url_to_sign_bytes,
-                            headers_dict, json,
+                            json,
                         )
                         )
                         data = encode_canonical_json(json)
                         data = encode_canonical_json(json)
                         producer = FileBodyProducer(
                         producer = FileBodyProducer(
@@ -309,11 +309,12 @@ class MatrixFederationHttpClient(object):
                         )
                         )
                     else:
                     else:
                         producer = None
                         producer = None
-                        self.sign_request(
+                        auth_headers = self.build_auth_headers(
                             destination_bytes, method_bytes, url_to_sign_bytes,
                             destination_bytes, method_bytes, url_to_sign_bytes,
-                            headers_dict,
                         )
                         )
 
 
+                    headers_dict[b"Authorization"] = auth_headers
+
                     logger.info(
                     logger.info(
                         "{%s} [%s] Sending request: %s %s",
                         "{%s} [%s] Sending request: %s %s",
                         request.txn_id, request.destination, request.method,
                         request.txn_id, request.destination, request.method,
@@ -335,23 +336,74 @@ class MatrixFederationHttpClient(object):
                         reactor=self.hs.get_reactor(),
                         reactor=self.hs.get_reactor(),
                     )
                     )
 
 
-                    with Measure(self.clock, "outbound_request"):
-                        response = yield make_deferred_yieldable(
-                            request_deferred,
+                    try:
+                        with Measure(self.clock, "outbound_request"):
+                            response = yield make_deferred_yieldable(
+                                request_deferred,
+                            )
+                    except DNSLookupError as e:
+                        raise_from(RequestSendFailed(e, can_retry=retry_on_dns_fail), e)
+                    except Exception as e:
+                        raise_from(RequestSendFailed(e, can_retry=True), e)
+
+                    logger.info(
+                        "{%s} [%s] Got response headers: %d %s",
+                        request.txn_id,
+                        request.destination,
+                        response.code,
+                        response.phrase.decode('ascii', errors='replace'),
+                    )
+
+                    if 200 <= response.code < 300:
+                        pass
+                    else:
+                        # :'(
+                        # Update transactions table?
+                        d = treq.content(response)
+                        d = timeout_deferred(
+                            d,
+                            timeout=_sec_timeout,
+                            reactor=self.hs.get_reactor(),
+                        )
+
+                        try:
+                            body = yield make_deferred_yieldable(d)
+                        except Exception as e:
+                            # Eh, we're already going to raise an exception so lets
+                            # ignore if this fails.
+                            logger.warn(
+                                "{%s} [%s] Failed to get error response: %s %s: %s",
+                                request.txn_id,
+                                request.destination,
+                                request.method,
+                                url_str,
+                                _flatten_response_never_received(e),
+                            )
+                            body = None
+
+                        e = HttpResponseException(
+                            response.code, response.phrase, body
                         )
                         )
 
 
+                        # Retry if the error is a 429 (Too Many Requests),
+                        # otherwise just raise a standard HttpResponseException
+                        if response.code == 429:
+                            raise_from(RequestSendFailed(e, can_retry=True), e)
+                        else:
+                            raise e
+
                     break
                     break
-                except Exception as e:
+                except RequestSendFailed as e:
                     logger.warn(
                     logger.warn(
                         "{%s} [%s] Request failed: %s %s: %s",
                         "{%s} [%s] Request failed: %s %s: %s",
                         request.txn_id,
                         request.txn_id,
                         request.destination,
                         request.destination,
                         request.method,
                         request.method,
                         url_str,
                         url_str,
-                        _flatten_response_never_received(e),
+                        _flatten_response_never_received(e.inner_exception),
                     )
                     )
 
 
-                    if not retry_on_dns_fail and isinstance(e, DNSLookupError):
+                    if not e.can_retry:
                         raise
                         raise
 
 
                     if retries_left and not timeout:
                     if retries_left and not timeout:
@@ -376,50 +428,36 @@ class MatrixFederationHttpClient(object):
                     else:
                     else:
                         raise
                         raise
 
 
-            logger.info(
-                "{%s} [%s] Got response headers: %d %s",
-                request.txn_id,
-                request.destination,
-                response.code,
-                response.phrase.decode('ascii', errors='replace'),
-            )
-
-            if 200 <= response.code < 300:
-                pass
-            else:
-                # :'(
-                # Update transactions table?
-                d = treq.content(response)
-                d = timeout_deferred(
-                    d,
-                    timeout=_sec_timeout,
-                    reactor=self.hs.get_reactor(),
-                )
-                body = yield make_deferred_yieldable(d)
-                raise HttpResponseException(
-                    response.code, response.phrase, body
-                )
+                except Exception as e:
+                    logger.warn(
+                        "{%s} [%s] Request failed: %s %s: %s",
+                        request.txn_id,
+                        request.destination,
+                        request.method,
+                        url_str,
+                        _flatten_response_never_received(e),
+                    )
+                    raise
 
 
             defer.returnValue(response)
             defer.returnValue(response)
 
 
-    def sign_request(self, destination, method, url_bytes, headers_dict,
-                     content=None, destination_is=None):
+    def build_auth_headers(
+        self, destination, method, url_bytes, content=None, destination_is=None,
+    ):
         """
         """
-        Signs a request by adding an Authorization header to headers_dict
+        Builds the Authorization headers for a federation request
         Args:
         Args:
             destination (bytes|None): The desination home server of the request.
             destination (bytes|None): The desination home server of the request.
                 May be None if the destination is an identity server, in which case
                 May be None if the destination is an identity server, in which case
                 destination_is must be non-None.
                 destination_is must be non-None.
             method (bytes): The HTTP method of the request
             method (bytes): The HTTP method of the request
             url_bytes (bytes): The URI path of the request
             url_bytes (bytes): The URI path of the request
-            headers_dict (dict[bytes, list[bytes]]): Dictionary of request headers to
-                append to
             content (object): The body of the request
             content (object): The body of the request
             destination_is (bytes): As 'destination', but if the destination is an
             destination_is (bytes): As 'destination', but if the destination is an
                 identity server
                 identity server
 
 
         Returns:
         Returns:
-            None
+            list[bytes]: a list of headers to be added as "Authorization:" headers
         """
         """
         request = {
         request = {
             "method": method,
             "method": method,
@@ -446,8 +484,7 @@ class MatrixFederationHttpClient(object):
                     self.server_name, key, sig,
                     self.server_name, key, sig,
                 )).encode('ascii')
                 )).encode('ascii')
             )
             )
-
-        headers_dict[b"Authorization"] = auth_headers
+        return auth_headers
 
 
     @defer.inlineCallbacks
     @defer.inlineCallbacks
     def put_json(self, destination, path, args={}, data={},
     def put_json(self, destination, path, args={}, data={},
@@ -477,17 +514,18 @@ class MatrixFederationHttpClient(object):
                 requests)
                 requests)
 
 
         Returns:
         Returns:
-            Deferred: Succeeds when we get a 2xx HTTP response. The result
-            will be the decoded JSON body.
-
-            Fails with ``HttpResponseException`` if we get an HTTP response
-            code >= 300.
-
-            Fails with ``NotRetryingDestination`` if we are not yet ready
-            to retry this server.
-
-            Fails with ``FederationDeniedError`` if this destination
-            is not on our federation whitelist
+            Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
+            result will be the decoded JSON body.
+
+        Raises:
+            HttpResponseException: If we get an HTTP response code >= 300
+                (except 429).
+            NotRetryingDestination: If we are not yet ready to retry this
+                server.
+            FederationDeniedError: If this destination  is not on our
+                federation whitelist
+            RequestSendFailed: If there were problems connecting to the
+                remote, due to e.g. DNS failures, connection timeouts etc.
         """
         """
 
 
         request = MatrixFederationRequest(
         request = MatrixFederationRequest(
@@ -531,17 +569,18 @@ class MatrixFederationHttpClient(object):
                 try the request anyway.
                 try the request anyway.
             args (dict): query params
             args (dict): query params
         Returns:
         Returns:
-            Deferred: Succeeds when we get a 2xx HTTP response. The result
-            will be the decoded JSON body.
-
-            Fails with ``HttpResponseException`` if we get an HTTP response
-            code >= 300.
-
-            Fails with ``NotRetryingDestination`` if we are not yet ready
-            to retry this server.
-
-            Fails with ``FederationDeniedError`` if this destination
-            is not on our federation whitelist
+            Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
+            result will be the decoded JSON body.
+
+        Raises:
+            HttpResponseException: If we get an HTTP response code >= 300
+                (except 429).
+            NotRetryingDestination: If we are not yet ready to retry this
+                server.
+            FederationDeniedError: If this destination  is not on our
+                federation whitelist
+            RequestSendFailed: If there were problems connecting to the
+                remote, due to e.g. DNS failures, connection timeouts etc.
         """
         """
 
 
         request = MatrixFederationRequest(
         request = MatrixFederationRequest(
@@ -586,17 +625,18 @@ class MatrixFederationHttpClient(object):
             ignore_backoff (bool): true to ignore the historical backoff data
             ignore_backoff (bool): true to ignore the historical backoff data
                 and try the request anyway.
                 and try the request anyway.
         Returns:
         Returns:
-            Deferred: Succeeds when we get a 2xx HTTP response. The result
-            will be the decoded JSON body.
-
-            Fails with ``HttpResponseException`` if we get an HTTP response
-            code >= 300.
-
-            Fails with ``NotRetryingDestination`` if we are not yet ready
-            to retry this server.
-
-            Fails with ``FederationDeniedError`` if this destination
-            is not on our federation whitelist
+            Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
+            result will be the decoded JSON body.
+
+        Raises:
+            HttpResponseException: If we get an HTTP response code >= 300
+                (except 429).
+            NotRetryingDestination: If we are not yet ready to retry this
+                server.
+            FederationDeniedError: If this destination  is not on our
+                federation whitelist
+            RequestSendFailed: If there were problems connecting to the
+                remote, due to e.g. DNS failures, connection timeouts etc.
         """
         """
         logger.debug("get_json args: %s", args)
         logger.debug("get_json args: %s", args)
 
 
@@ -637,17 +677,18 @@ class MatrixFederationHttpClient(object):
             ignore_backoff (bool): true to ignore the historical backoff data and
             ignore_backoff (bool): true to ignore the historical backoff data and
                 try the request anyway.
                 try the request anyway.
         Returns:
         Returns:
-            Deferred: Succeeds when we get a 2xx HTTP response. The result
-            will be the decoded JSON body.
-
-            Fails with ``HttpResponseException`` if we get an HTTP response
-            code >= 300.
-
-            Fails with ``NotRetryingDestination`` if we are not yet ready
-            to retry this server.
-
-            Fails with ``FederationDeniedError`` if this destination
-            is not on our federation whitelist
+            Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
+            result will be the decoded JSON body.
+
+        Raises:
+            HttpResponseException: If we get an HTTP response code >= 300
+                (except 429).
+            NotRetryingDestination: If we are not yet ready to retry this
+                server.
+            FederationDeniedError: If this destination  is not on our
+                federation whitelist
+            RequestSendFailed: If there were problems connecting to the
+                remote, due to e.g. DNS failures, connection timeouts etc.
         """
         """
         request = MatrixFederationRequest(
         request = MatrixFederationRequest(
             method="DELETE",
             method="DELETE",
@@ -680,18 +721,20 @@ class MatrixFederationHttpClient(object):
             args (dict): Optional dictionary used to create the query string.
             args (dict): Optional dictionary used to create the query string.
             ignore_backoff (bool): true to ignore the historical backoff data
             ignore_backoff (bool): true to ignore the historical backoff data
                 and try the request anyway.
                 and try the request anyway.
-        Returns:
-            Deferred: resolves with an (int,dict) tuple of the file length and
-            a dict of the response headers.
-
-            Fails with ``HttpResponseException`` if we get an HTTP response code
-            >= 300
-
-            Fails with ``NotRetryingDestination`` if we are not yet ready
-            to retry this server.
 
 
-            Fails with ``FederationDeniedError`` if this destination
-            is not on our federation whitelist
+        Returns:
+            Deferred[tuple[int, dict]]: Resolves with an (int,dict) tuple of
+            the file length and a dict of the response headers.
+
+        Raises:
+            HttpResponseException: If we get an HTTP response code >= 300
+                (except 429).
+            NotRetryingDestination: If we are not yet ready to retry this
+                server.
+            FederationDeniedError: If this destination  is not on our
+                federation whitelist
+            RequestSendFailed: If there were problems connecting to the
+                remote, due to e.g. DNS failures, connection timeouts etc.
         """
         """
         request = MatrixFederationRequest(
         request = MatrixFederationRequest(
             method="GET",
             method="GET",
@@ -784,21 +827,21 @@ def check_content_type_is_json(headers):
         headers (twisted.web.http_headers.Headers): headers to check
         headers (twisted.web.http_headers.Headers): headers to check
 
 
     Raises:
     Raises:
-        RuntimeError if the
+        RequestSendFailed: if the Content-Type header is missing or isn't JSON
 
 
     """
     """
     c_type = headers.getRawHeaders(b"Content-Type")
     c_type = headers.getRawHeaders(b"Content-Type")
     if c_type is None:
     if c_type is None:
-        raise RuntimeError(
+        raise RequestSendFailed(RuntimeError(
             "No Content-Type header"
             "No Content-Type header"
-        )
+        ), can_retry=False)
 
 
     c_type = c_type[0].decode('ascii')  # only the first header
     c_type = c_type[0].decode('ascii')  # only the first header
     val, options = cgi.parse_header(c_type)
     val, options = cgi.parse_header(c_type)
     if val != "application/json":
     if val != "application/json":
-        raise RuntimeError(
+        raise RequestSendFailed(RuntimeError(
             "Content-Type not application/json: was '%s'" % c_type
             "Content-Type not application/json: was '%s'" % c_type
-        )
+        ), can_retry=False)
 
 
 
 
 def encode_query_args(args):
 def encode_query_args(args):

+ 47 - 23
synapse/python_dependencies.py

@@ -40,7 +40,11 @@ REQUIREMENTS = [
     "signedjson>=1.0.0",
     "signedjson>=1.0.0",
     "pynacl>=1.2.1",
     "pynacl>=1.2.1",
     "service_identity>=16.0.0",
     "service_identity>=16.0.0",
-    "Twisted>=17.1.0",
+
+    # our logcontext handling relies on the ability to cancel inlineCallbacks
+    # (https://twistedmatrix.com/trac/ticket/4632) which landed in Twisted 18.7.
+    "Twisted>=18.7.0",
+
     "treq>=15.1",
     "treq>=15.1",
     # Twisted has required pyopenssl 16.0 since about Twisted 16.6.
     # Twisted has required pyopenssl 16.0 since about Twisted 16.6.
     "pyopenssl>=16.0.0",
     "pyopenssl>=16.0.0",
@@ -52,22 +56,29 @@ REQUIREMENTS = [
     "pillow>=3.1.2",
     "pillow>=3.1.2",
     "sortedcontainers>=1.4.4",
     "sortedcontainers>=1.4.4",
     "psutil>=2.0.0",
     "psutil>=2.0.0",
-    "pymacaroons-pynacl>=0.9.3",
-    "msgpack-python>=0.4.2",
+    "pymacaroons>=0.13.0",
+    "msgpack>=0.5.0",
     "phonenumbers>=8.2.0",
     "phonenumbers>=8.2.0",
     "six>=1.10",
     "six>=1.10",
     # prometheus_client 0.4.0 changed the format of counter metrics
     # prometheus_client 0.4.0 changed the format of counter metrics
     # (cf https://github.com/matrix-org/synapse/issues/4001)
     # (cf https://github.com/matrix-org/synapse/issues/4001)
     "prometheus_client>=0.0.18,<0.4.0",
     "prometheus_client>=0.0.18,<0.4.0",
+
     # we use attr.s(slots), which arrived in 16.0.0
     # we use attr.s(slots), which arrived in 16.0.0
-    "attrs>=16.0.0",
+    # Twisted 18.7.0 requires attrs>=17.4.0
+    "attrs>=17.4.0",
+
     "netaddr>=0.7.18",
     "netaddr>=0.7.18",
 ]
 ]
 
 
 CONDITIONAL_REQUIREMENTS = {
 CONDITIONAL_REQUIREMENTS = {
-    "email.enable_notifs": ["Jinja2>=2.8", "bleach>=1.4.2"],
+    "email.enable_notifs": ["Jinja2>=2.9", "bleach>=1.4.2"],
     "matrix-synapse-ldap3": ["matrix-synapse-ldap3>=0.1"],
     "matrix-synapse-ldap3": ["matrix-synapse-ldap3>=0.1"],
     "postgres": ["psycopg2>=2.6"],
     "postgres": ["psycopg2>=2.6"],
+
+    # ConsentResource uses select_autoescape, which arrived in jinja 2.9
+    "resources.consent": ["Jinja2>=2.9"],
+
     "saml2": ["pysaml2>=4.5.0"],
     "saml2": ["pysaml2>=4.5.0"],
     "url_preview": ["lxml>=3.5.0"],
     "url_preview": ["lxml>=3.5.0"],
     "test": ["mock>=2.0"],
     "test": ["mock>=2.0"],
@@ -83,19 +94,31 @@ def list_requirements():
 
 
 
 
 class DependencyException(Exception):
 class DependencyException(Exception):
+    @property
+    def message(self):
+        return "\n".join([
+            "Missing Requirements: %s" % (", ".join(self.dependencies),),
+            "To install run:",
+            "    pip install --upgrade --force %s" % (" ".join(self.dependencies),),
+            "",
+        ])
+
     @property
     @property
     def dependencies(self):
     def dependencies(self):
         for i in self.args[0]:
         for i in self.args[0]:
             yield '"' + i + '"'
             yield '"' + i + '"'
 
 
 
 
-def check_requirements(_get_distribution=get_distribution):
-
+def check_requirements(for_feature=None, _get_distribution=get_distribution):
     deps_needed = []
     deps_needed = []
     errors = []
     errors = []
 
 
-    # Check the base dependencies exist -- they all must be installed.
-    for dependency in REQUIREMENTS:
+    if for_feature:
+        reqs = CONDITIONAL_REQUIREMENTS[for_feature]
+    else:
+        reqs = REQUIREMENTS
+
+    for dependency in reqs:
         try:
         try:
             _get_distribution(dependency)
             _get_distribution(dependency)
         except VersionConflict as e:
         except VersionConflict as e:
@@ -108,23 +131,24 @@ def check_requirements(_get_distribution=get_distribution):
             deps_needed.append(dependency)
             deps_needed.append(dependency)
             errors.append("Needed %s but it was not installed" % (dependency,))
             errors.append("Needed %s but it was not installed" % (dependency,))
 
 
-    # Check the optional dependencies are up to date. We allow them to not be
-    # installed.
-    OPTS = sum(CONDITIONAL_REQUIREMENTS.values(), [])
-
-    for dependency in OPTS:
-        try:
-            _get_distribution(dependency)
-        except VersionConflict:
-            deps_needed.append(dependency)
-            errors.append("Needed %s but it was not installed" % (dependency,))
-        except DistributionNotFound:
-            # If it's not found, we don't care
-            pass
+    if not for_feature:
+        # Check the optional dependencies are up to date. We allow them to not be
+        # installed.
+        OPTS = sum(CONDITIONAL_REQUIREMENTS.values(), [])
+
+        for dependency in OPTS:
+            try:
+                _get_distribution(dependency)
+            except VersionConflict:
+                deps_needed.append(dependency)
+                errors.append("Needed %s but it was not installed" % (dependency,))
+            except DistributionNotFound:
+                # If it's not found, we don't care
+                pass
 
 
     if deps_needed:
     if deps_needed:
         for e in errors:
         for e in errors:
-            logging.exception(e)
+            logging.error(e)
 
 
         raise DependencyException(deps_needed)
         raise DependencyException(deps_needed)
 
 

+ 33 - 1
synapse/rest/client/v2_alpha/account_data.py

@@ -17,7 +17,7 @@ import logging
 
 
 from twisted.internet import defer
 from twisted.internet import defer
 
 
-from synapse.api.errors import AuthError, SynapseError
+from synapse.api.errors import AuthError, NotFoundError, SynapseError
 from synapse.http.servlet import RestServlet, parse_json_object_from_request
 from synapse.http.servlet import RestServlet, parse_json_object_from_request
 
 
 from ._base import client_v2_patterns
 from ._base import client_v2_patterns
@@ -28,6 +28,7 @@ logger = logging.getLogger(__name__)
 class AccountDataServlet(RestServlet):
 class AccountDataServlet(RestServlet):
     """
     """
     PUT /user/{user_id}/account_data/{account_dataType} HTTP/1.1
     PUT /user/{user_id}/account_data/{account_dataType} HTTP/1.1
+    GET /user/{user_id}/account_data/{account_dataType} HTTP/1.1
     """
     """
     PATTERNS = client_v2_patterns(
     PATTERNS = client_v2_patterns(
         "/user/(?P<user_id>[^/]*)/account_data/(?P<account_data_type>[^/]*)"
         "/user/(?P<user_id>[^/]*)/account_data/(?P<account_data_type>[^/]*)"
@@ -57,10 +58,26 @@ class AccountDataServlet(RestServlet):
 
 
         defer.returnValue((200, {}))
         defer.returnValue((200, {}))
 
 
+    @defer.inlineCallbacks
+    def on_GET(self, request, user_id, account_data_type):
+        requester = yield self.auth.get_user_by_req(request)
+        if user_id != requester.user.to_string():
+            raise AuthError(403, "Cannot get account data for other users.")
+
+        event = yield self.store.get_global_account_data_by_type_for_user(
+            account_data_type, user_id,
+        )
+
+        if event is None:
+            raise NotFoundError("Account data not found")
+
+        defer.returnValue((200, event))
+
 
 
 class RoomAccountDataServlet(RestServlet):
 class RoomAccountDataServlet(RestServlet):
     """
     """
     PUT /user/{user_id}/rooms/{room_id}/account_data/{account_dataType} HTTP/1.1
     PUT /user/{user_id}/rooms/{room_id}/account_data/{account_dataType} HTTP/1.1
+    GET /user/{user_id}/rooms/{room_id}/account_data/{account_dataType} HTTP/1.1
     """
     """
     PATTERNS = client_v2_patterns(
     PATTERNS = client_v2_patterns(
         "/user/(?P<user_id>[^/]*)"
         "/user/(?P<user_id>[^/]*)"
@@ -99,6 +116,21 @@ class RoomAccountDataServlet(RestServlet):
 
 
         defer.returnValue((200, {}))
         defer.returnValue((200, {}))
 
 
+    @defer.inlineCallbacks
+    def on_GET(self, request, user_id, room_id, account_data_type):
+        requester = yield self.auth.get_user_by_req(request)
+        if user_id != requester.user.to_string():
+            raise AuthError(403, "Cannot get account data for other users.")
+
+        event = yield self.store.get_account_data_for_room_and_type(
+            user_id, room_id, account_data_type,
+        )
+
+        if event is None:
+            raise NotFoundError("Room account data not found")
+
+        defer.returnValue((200, event))
+
 
 
 def register_servlets(hs, http_server):
 def register_servlets(hs, http_server):
     AccountDataServlet(hs).register(http_server)
     AccountDataServlet(hs).register(http_server)

+ 4 - 3
synapse/rest/media/v1/media_repository.py

@@ -30,6 +30,7 @@ from synapse.api.errors import (
     FederationDeniedError,
     FederationDeniedError,
     HttpResponseException,
     HttpResponseException,
     NotFoundError,
     NotFoundError,
+    RequestSendFailed,
     SynapseError,
     SynapseError,
 )
 )
 from synapse.metrics.background_process_metrics import run_as_background_process
 from synapse.metrics.background_process_metrics import run_as_background_process
@@ -372,10 +373,10 @@ class MediaRepository(object):
                         "allow_remote": "false",
                         "allow_remote": "false",
                     }
                     }
                 )
                 )
-            except twisted.internet.error.DNSLookupError as e:
-                logger.warn("HTTP error fetching remote media %s/%s: %r",
+            except RequestSendFailed as e:
+                logger.warn("Request failed fetching remote media %s/%s: %r",
                             server_name, media_id, e)
                             server_name, media_id, e)
-                raise NotFoundError()
+                raise SynapseError(502, "Failed to fetch remote media")
 
 
             except HttpResponseException as e:
             except HttpResponseException as e:
                 logger.warn("HTTP error fetching remote media %s/%s: %s",
                 logger.warn("HTTP error fetching remote media %s/%s: %s",

+ 9 - 1
synapse/storage/_base.py

@@ -632,11 +632,19 @@ class SQLBaseStore(object):
         if lock:
         if lock:
             self.database_engine.lock_table(txn, table)
             self.database_engine.lock_table(txn, table)
 
 
+        def _getwhere(key):
+            # If the value we're passing in is None (aka NULL), we need to use
+            # IS, not =, as NULL = NULL equals NULL (False).
+            if keyvalues[key] is None:
+                return "%s IS ?" % (key,)
+            else:
+                return "%s = ?" % (key,)
+
         # First try to update.
         # First try to update.
         sql = "UPDATE %s SET %s WHERE %s" % (
         sql = "UPDATE %s SET %s WHERE %s" % (
             table,
             table,
             ", ".join("%s = ?" % (k,) for k in values),
             ", ".join("%s = ?" % (k,) for k in values),
-            " AND ".join("%s = ?" % (k,) for k in keyvalues),
+            " AND ".join(_getwhere(k) for k in keyvalues)
         )
         )
         sqlargs = list(values.values()) + list(keyvalues.values())
         sqlargs = list(values.values()) + list(keyvalues.values())
 
 

+ 127 - 6
synapse/storage/client_ips.py

@@ -65,16 +65,27 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
             columns=["last_seen"],
             columns=["last_seen"],
         )
         )
 
 
+        self.register_background_update_handler(
+            "user_ips_remove_dupes",
+            self._remove_user_ip_dupes,
+        )
+
         # Register a unique index
         # Register a unique index
         self.register_background_index_update(
         self.register_background_index_update(
             "user_ips_device_unique_index",
             "user_ips_device_unique_index",
-            index_name="user_ips_device_unique_id",
+            index_name="user_ips_user_token_ip_unique_index",
             table="user_ips",
             table="user_ips",
-            columns=["user_id", "access_token", "ip", "user_agent", "device_id"],
+            columns=["user_id", "access_token", "ip"],
             unique=True,
             unique=True,
         )
         )
 
 
-        # (user_id, access_token, ip) -> (user_agent, device_id, last_seen)
+        # Drop the old non-unique index
+        self.register_background_update_handler(
+            "user_ips_drop_nonunique_index",
+            self._remove_user_ip_nonunique,
+        )
+
+        # (user_id, access_token, ip,) -> (user_agent, device_id, last_seen)
         self._batch_row_update = {}
         self._batch_row_update = {}
 
 
         self._client_ip_looper = self._clock.looping_call(
         self._client_ip_looper = self._clock.looping_call(
@@ -84,6 +95,116 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
             "before", "shutdown", self._update_client_ips_batch
             "before", "shutdown", self._update_client_ips_batch
         )
         )
 
 
+    @defer.inlineCallbacks
+    def _remove_user_ip_nonunique(self, progress, batch_size):
+        def f(conn):
+            txn = conn.cursor()
+            txn.execute(
+                "DROP INDEX IF EXISTS user_ips_user_ip"
+            )
+            txn.close()
+
+        yield self.runWithConnection(f)
+        yield self._end_background_update("user_ips_drop_nonunique_index")
+        defer.returnValue(1)
+
+    @defer.inlineCallbacks
+    def _remove_user_ip_dupes(self, progress, batch_size):
+
+        last_seen_progress = progress.get("last_seen", 0)
+
+        def get_last_seen(txn):
+            txn.execute(
+                """
+                SELECT last_seen FROM user_ips
+                WHERE last_seen > ?
+                ORDER BY last_seen
+                LIMIT 1
+                OFFSET ?
+                """,
+                (last_seen_progress, batch_size)
+            )
+            results = txn.fetchone()
+            return results
+
+        # Get a last seen that's sufficiently far away enough from the last one
+        last_seen = yield self.runInteraction(
+            "user_ips_dups_get_last_seen", get_last_seen
+        )
+
+        if not last_seen:
+            # If we get a None then we're reaching the end and just need to
+            # delete the last batch.
+            last = True
+
+            # We fake not having an upper bound by using a future date, by
+            # just multiplying the current time by two....
+            last_seen = int(self.clock.time_msec()) * 2
+        else:
+            last = False
+            last_seen = last_seen[0]
+
+        def remove(txn, last_seen_progress, last_seen):
+            # This works by looking at all entries in the given time span, and
+            # then for each (user_id, access_token, ip) tuple in that range
+            # checking for any duplicates in the rest of the table (via a join).
+            # It then only returns entries which have duplicates, and the max
+            # last_seen across all duplicates, which can the be used to delete
+            # all other duplicates.
+            # It is efficient due to the existence of (user_id, access_token,
+            # ip) and (last_seen) indices.
+            txn.execute(
+                """
+                SELECT user_id, access_token, ip,
+                       MAX(device_id), MAX(user_agent), MAX(last_seen)
+                FROM (
+                    SELECT user_id, access_token, ip
+                    FROM user_ips
+                    WHERE ? <= last_seen AND last_seen < ?
+                    ORDER BY last_seen
+                ) c
+                INNER JOIN user_ips USING (user_id, access_token, ip)
+                GROUP BY user_id, access_token, ip
+                HAVING count(*) > 1""",
+                (last_seen_progress, last_seen)
+            )
+            res = txn.fetchall()
+
+            # We've got some duplicates
+            for i in res:
+                user_id, access_token, ip, device_id, user_agent, last_seen = i
+
+                # Drop all the duplicates
+                txn.execute(
+                    """
+                    DELETE FROM user_ips
+                    WHERE user_id = ? AND access_token = ? AND ip = ?
+                    """,
+                    (user_id, access_token, ip)
+                )
+
+                # Add in one to be the last_seen
+                txn.execute(
+                    """
+                    INSERT INTO user_ips
+                    (user_id, access_token, ip, device_id, user_agent, last_seen)
+                    VALUES (?, ?, ?, ?, ?, ?)
+                    """,
+                    (user_id, access_token, ip, device_id, user_agent, last_seen)
+                )
+
+            self._background_update_progress_txn(
+                txn, "user_ips_remove_dupes", {"last_seen": last_seen}
+            )
+
+        yield self.runInteraction(
+            "user_ips_dups_remove", remove, last_seen_progress, last_seen
+        )
+        if last:
+            yield self._end_background_update("user_ips_remove_dupes")
+
+        defer.returnValue(batch_size)
+
     @defer.inlineCallbacks
     @defer.inlineCallbacks
     def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id,
     def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id,
                          now=None):
                          now=None):
@@ -139,10 +260,10 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
                         "user_id": user_id,
                         "user_id": user_id,
                         "access_token": access_token,
                         "access_token": access_token,
                         "ip": ip,
                         "ip": ip,
-                        "user_agent": user_agent,
-                        "device_id": device_id,
                     },
                     },
                     values={
                     values={
+                        "user_agent": user_agent,
+                        "device_id": device_id,
                         "last_seen": last_seen,
                         "last_seen": last_seen,
                     },
                     },
                     lock=False,
                     lock=False,
@@ -239,7 +360,7 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
         results = {}
         results = {}
 
 
         for key in self._batch_row_update:
         for key in self._batch_row_update:
-            uid, access_token, ip = key
+            uid, access_token, ip, = key
             if uid == user_id:
             if uid == user_id:
                 user_agent, _, last_seen = self._batch_row_update[key]
                 user_agent, _, last_seen = self._batch_row_update[key]
                 results[(access_token, ip)] = (user_agent, last_seen)
                 results[(access_token, ip)] = (user_agent, last_seen)

+ 25 - 25
synapse/storage/registration.py

@@ -114,6 +114,31 @@ class RegistrationWorkerStore(SQLBaseStore):
 
 
         return None
         return None
 
 
+    @cachedInlineCallbacks()
+    def is_support_user(self, user_id):
+        """Determines if the user is of type UserTypes.SUPPORT
+
+        Args:
+            user_id (str): user id to test
+
+        Returns:
+            Deferred[bool]: True if user is of type UserTypes.SUPPORT
+        """
+        res = yield self.runInteraction(
+            "is_support_user", self.is_support_user_txn, user_id
+        )
+        defer.returnValue(res)
+
+    def is_support_user_txn(self, txn, user_id):
+        res = self._simple_select_one_onecol_txn(
+            txn=txn,
+            table="users",
+            keyvalues={"name": user_id},
+            retcol="user_type",
+            allow_none=True,
+        )
+        return True if res == UserTypes.SUPPORT else False
+
 
 
 class RegistrationStore(RegistrationWorkerStore,
 class RegistrationStore(RegistrationWorkerStore,
                         background_updates.BackgroundUpdateStore):
                         background_updates.BackgroundUpdateStore):
@@ -465,31 +490,6 @@ class RegistrationStore(RegistrationWorkerStore,
 
 
         defer.returnValue(res if res else False)
         defer.returnValue(res if res else False)
 
 
-    @cachedInlineCallbacks()
-    def is_support_user(self, user_id):
-        """Determines if the user is of type UserTypes.SUPPORT
-
-        Args:
-            user_id (str): user id to test
-
-        Returns:
-            Deferred[bool]: True if user is of type UserTypes.SUPPORT
-        """
-        res = yield self.runInteraction(
-            "is_support_user", self.is_support_user_txn, user_id
-        )
-        defer.returnValue(res)
-
-    def is_support_user_txn(self, txn, user_id):
-        res = self._simple_select_one_onecol_txn(
-            txn=txn,
-            table="users",
-            keyvalues={"name": user_id},
-            retcol="user_type",
-            allow_none=True,
-        )
-        return True if res == UserTypes.SUPPORT else False
-
     @defer.inlineCallbacks
     @defer.inlineCallbacks
     def user_add_threepid(self, user_id, medium, address, validated_at, added_at):
     def user_add_threepid(self, user_id, medium, address, validated_at, added_at):
         yield self._simple_upsert("user_threepids", {
         yield self._simple_upsert("user_threepids", {

+ 26 - 0
synapse/storage/schema/delta/53/user_ips_index.sql

@@ -0,0 +1,26 @@
+/* Copyright 2018 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- delete duplicates
+INSERT INTO background_updates (update_name, progress_json) VALUES
+  ('user_ips_remove_dupes', '{}');
+
+-- add a new unique index to user_ips table
+INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
+  ('user_ips_device_unique_index', '{}', 'user_ips_remove_dupes');
+
+-- drop the old original index
+INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
+  ('user_ips_drop_nonunique_index', '{}', 'user_ips_device_unique_index');

+ 3 - 1
synapse/util/async_helpers.py

@@ -387,12 +387,14 @@ def timeout_deferred(deferred, timeout, reactor, on_timeout_cancel=None):
     deferred that wraps and times out the given deferred, correctly handling
     deferred that wraps and times out the given deferred, correctly handling
     the case where the given deferred's canceller throws.
     the case where the given deferred's canceller throws.
 
 
+    (See https://twistedmatrix.com/trac/ticket/9534)
+
     NOTE: Unlike `Deferred.addTimeout`, this function returns a new deferred
     NOTE: Unlike `Deferred.addTimeout`, this function returns a new deferred
 
 
     Args:
     Args:
         deferred (Deferred)
         deferred (Deferred)
         timeout (float): Timeout in seconds
         timeout (float): Timeout in seconds
-        reactor (twisted.internet.reactor): The twisted reactor to use
+        reactor (twisted.interfaces.IReactorTime): The twisted reactor to use
         on_timeout_cancel (callable): A callable which is called immediately
         on_timeout_cancel (callable): A callable which is called immediately
             after the deferred times out, and not if this deferred is
             after the deferred times out, and not if this deferred is
             otherwise cancelled before the timeout.
             otherwise cancelled before the timeout.

+ 3 - 1
synctl

@@ -156,7 +156,9 @@ def main():
         write(
         write(
             "No config file found\n"
             "No config file found\n"
             "To generate a config file, run '%s -c %s --generate-config"
             "To generate a config file, run '%s -c %s --generate-config"
-            " --server-name=<server name>'\n" % (" ".join(SYNAPSE), options.configfile),
+            " --server-name=<server name> --report-stats=<yes/no>'\n" % (
+                " ".join(SYNAPSE), options.configfile,
+            ),
             stream=sys.stderr,
             stream=sys.stderr,
         )
         )
         sys.exit(1)
         sys.exit(1)

+ 1 - 154
tests/api/test_auth.py

@@ -194,8 +194,6 @@ class AuthTestCase(unittest.TestCase):
 
 
     @defer.inlineCallbacks
     @defer.inlineCallbacks
     def test_get_user_from_macaroon(self):
     def test_get_user_from_macaroon(self):
-        # TODO(danielwh): Remove this mock when we remove the
-        # get_user_by_access_token fallback.
         self.store.get_user_by_access_token = Mock(
         self.store.get_user_by_access_token = Mock(
             return_value={"name": "@baldrick:matrix.org", "device_id": "device"}
             return_value={"name": "@baldrick:matrix.org", "device_id": "device"}
         )
         )
@@ -220,6 +218,7 @@ class AuthTestCase(unittest.TestCase):
     @defer.inlineCallbacks
     @defer.inlineCallbacks
     def test_get_guest_user_from_macaroon(self):
     def test_get_guest_user_from_macaroon(self):
         self.store.get_user_by_id = Mock(return_value={"is_guest": True})
         self.store.get_user_by_id = Mock(return_value={"is_guest": True})
+        self.store.get_user_by_access_token = Mock(return_value=None)
 
 
         user_id = "@baldrick:matrix.org"
         user_id = "@baldrick:matrix.org"
         macaroon = pymacaroons.Macaroon(
         macaroon = pymacaroons.Macaroon(
@@ -240,158 +239,6 @@ class AuthTestCase(unittest.TestCase):
         self.assertTrue(is_guest)
         self.assertTrue(is_guest)
         self.store.get_user_by_id.assert_called_with(user_id)
         self.store.get_user_by_id.assert_called_with(user_id)
 
 
-    @defer.inlineCallbacks
-    def test_get_user_from_macaroon_user_db_mismatch(self):
-        self.store.get_user_by_access_token = Mock(
-            return_value={"name": "@percy:matrix.org"}
-        )
-
-        user = "@baldrick:matrix.org"
-        macaroon = pymacaroons.Macaroon(
-            location=self.hs.config.server_name,
-            identifier="key",
-            key=self.hs.config.macaroon_secret_key,
-        )
-        macaroon.add_first_party_caveat("gen = 1")
-        macaroon.add_first_party_caveat("type = access")
-        macaroon.add_first_party_caveat("user_id = %s" % (user,))
-        with self.assertRaises(AuthError) as cm:
-            yield self.auth.get_user_by_access_token(macaroon.serialize())
-        self.assertEqual(401, cm.exception.code)
-        self.assertIn("User mismatch", cm.exception.msg)
-
-    @defer.inlineCallbacks
-    def test_get_user_from_macaroon_missing_caveat(self):
-        # TODO(danielwh): Remove this mock when we remove the
-        # get_user_by_access_token fallback.
-        self.store.get_user_by_access_token = Mock(
-            return_value={"name": "@baldrick:matrix.org"}
-        )
-
-        macaroon = pymacaroons.Macaroon(
-            location=self.hs.config.server_name,
-            identifier="key",
-            key=self.hs.config.macaroon_secret_key,
-        )
-        macaroon.add_first_party_caveat("gen = 1")
-        macaroon.add_first_party_caveat("type = access")
-
-        with self.assertRaises(AuthError) as cm:
-            yield self.auth.get_user_by_access_token(macaroon.serialize())
-        self.assertEqual(401, cm.exception.code)
-        self.assertIn("No user caveat", cm.exception.msg)
-
-    @defer.inlineCallbacks
-    def test_get_user_from_macaroon_wrong_key(self):
-        # TODO(danielwh): Remove this mock when we remove the
-        # get_user_by_access_token fallback.
-        self.store.get_user_by_access_token = Mock(
-            return_value={"name": "@baldrick:matrix.org"}
-        )
-
-        user = "@baldrick:matrix.org"
-        macaroon = pymacaroons.Macaroon(
-            location=self.hs.config.server_name,
-            identifier="key",
-            key=self.hs.config.macaroon_secret_key + "wrong",
-        )
-        macaroon.add_first_party_caveat("gen = 1")
-        macaroon.add_first_party_caveat("type = access")
-        macaroon.add_first_party_caveat("user_id = %s" % (user,))
-
-        with self.assertRaises(AuthError) as cm:
-            yield self.auth.get_user_by_access_token(macaroon.serialize())
-        self.assertEqual(401, cm.exception.code)
-        self.assertIn("Invalid macaroon", cm.exception.msg)
-
-    @defer.inlineCallbacks
-    def test_get_user_from_macaroon_unknown_caveat(self):
-        # TODO(danielwh): Remove this mock when we remove the
-        # get_user_by_access_token fallback.
-        self.store.get_user_by_access_token = Mock(
-            return_value={"name": "@baldrick:matrix.org"}
-        )
-
-        user = "@baldrick:matrix.org"
-        macaroon = pymacaroons.Macaroon(
-            location=self.hs.config.server_name,
-            identifier="key",
-            key=self.hs.config.macaroon_secret_key,
-        )
-        macaroon.add_first_party_caveat("gen = 1")
-        macaroon.add_first_party_caveat("type = access")
-        macaroon.add_first_party_caveat("user_id = %s" % (user,))
-        macaroon.add_first_party_caveat("cunning > fox")
-
-        with self.assertRaises(AuthError) as cm:
-            yield self.auth.get_user_by_access_token(macaroon.serialize())
-        self.assertEqual(401, cm.exception.code)
-        self.assertIn("Invalid macaroon", cm.exception.msg)
-
-    @defer.inlineCallbacks
-    def test_get_user_from_macaroon_expired(self):
-        # TODO(danielwh): Remove this mock when we remove the
-        # get_user_by_access_token fallback.
-        self.store.get_user_by_access_token = Mock(
-            return_value={"name": "@baldrick:matrix.org"}
-        )
-
-        self.store.get_user_by_access_token = Mock(
-            return_value={"name": "@baldrick:matrix.org"}
-        )
-
-        user = "@baldrick:matrix.org"
-        macaroon = pymacaroons.Macaroon(
-            location=self.hs.config.server_name,
-            identifier="key",
-            key=self.hs.config.macaroon_secret_key,
-        )
-        macaroon.add_first_party_caveat("gen = 1")
-        macaroon.add_first_party_caveat("type = access")
-        macaroon.add_first_party_caveat("user_id = %s" % (user,))
-        macaroon.add_first_party_caveat("time < -2000")  # ms
-
-        self.hs.clock.now = 5000  # seconds
-        self.hs.config.expire_access_token = True
-        # yield self.auth.get_user_by_access_token(macaroon.serialize())
-        # TODO(daniel): Turn on the check that we validate expiration, when we
-        # validate expiration (and remove the above line, which will start
-        # throwing).
-        with self.assertRaises(AuthError) as cm:
-            yield self.auth.get_user_by_access_token(macaroon.serialize())
-        self.assertEqual(401, cm.exception.code)
-        self.assertIn("Invalid macaroon", cm.exception.msg)
-
-    @defer.inlineCallbacks
-    def test_get_user_from_macaroon_with_valid_duration(self):
-        # TODO(danielwh): Remove this mock when we remove the
-        # get_user_by_access_token fallback.
-        self.store.get_user_by_access_token = Mock(
-            return_value={"name": "@baldrick:matrix.org"}
-        )
-
-        self.store.get_user_by_access_token = Mock(
-            return_value={"name": "@baldrick:matrix.org"}
-        )
-
-        user_id = "@baldrick:matrix.org"
-        macaroon = pymacaroons.Macaroon(
-            location=self.hs.config.server_name,
-            identifier="key",
-            key=self.hs.config.macaroon_secret_key,
-        )
-        macaroon.add_first_party_caveat("gen = 1")
-        macaroon.add_first_party_caveat("type = access")
-        macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
-        macaroon.add_first_party_caveat("time < 900000000")  # ms
-
-        self.hs.clock.now = 5000  # seconds
-        self.hs.config.expire_access_token = True
-
-        user_info = yield self.auth.get_user_by_access_token(macaroon.serialize())
-        user = user_info["user"]
-        self.assertEqual(UserID.from_string(user_id), user)
-
     @defer.inlineCallbacks
     @defer.inlineCallbacks
     def test_cannot_use_regular_token_as_guest(self):
     def test_cannot_use_regular_token_as_guest(self):
         USER_ID = "@percy:matrix.org"
         USER_ID = "@percy:matrix.org"

+ 10 - 3
tests/http/test_fedclient.py

@@ -20,6 +20,7 @@ from twisted.internet.error import ConnectingCancelledError, DNSLookupError
 from twisted.web.client import ResponseNeverReceived
 from twisted.web.client import ResponseNeverReceived
 from twisted.web.http import HTTPChannel
 from twisted.web.http import HTTPChannel
 
 
+from synapse.api.errors import RequestSendFailed
 from synapse.http.matrixfederationclient import (
 from synapse.http.matrixfederationclient import (
     MatrixFederationHttpClient,
     MatrixFederationHttpClient,
     MatrixFederationRequest,
     MatrixFederationRequest,
@@ -49,7 +50,8 @@ class FederationClientTests(HomeserverTestCase):
         self.pump()
         self.pump()
 
 
         f = self.failureResultOf(d)
         f = self.failureResultOf(d)
-        self.assertIsInstance(f.value, DNSLookupError)
+        self.assertIsInstance(f.value, RequestSendFailed)
+        self.assertIsInstance(f.value.inner_exception, DNSLookupError)
 
 
     def test_client_never_connect(self):
     def test_client_never_connect(self):
         """
         """
@@ -76,7 +78,11 @@ class FederationClientTests(HomeserverTestCase):
         self.reactor.advance(10.5)
         self.reactor.advance(10.5)
         f = self.failureResultOf(d)
         f = self.failureResultOf(d)
 
 
-        self.assertIsInstance(f.value, (ConnectingCancelledError, TimeoutError))
+        self.assertIsInstance(f.value, RequestSendFailed)
+        self.assertIsInstance(
+            f.value.inner_exception,
+            (ConnectingCancelledError, TimeoutError),
+        )
 
 
     def test_client_connect_no_response(self):
     def test_client_connect_no_response(self):
         """
         """
@@ -107,7 +113,8 @@ class FederationClientTests(HomeserverTestCase):
         self.reactor.advance(10.5)
         self.reactor.advance(10.5)
         f = self.failureResultOf(d)
         f = self.failureResultOf(d)
 
 
-        self.assertIsInstance(f.value, ResponseNeverReceived)
+        self.assertIsInstance(f.value, RequestSendFailed)
+        self.assertIsInstance(f.value.inner_exception, ResponseNeverReceived)
 
 
     def test_client_gets_headers(self):
     def test_client_gets_headers(self):
         """
         """

+ 71 - 0
tests/storage/test_client_ips.py

@@ -62,6 +62,77 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
             r,
             r,
         )
         )
 
 
+    def test_insert_new_client_ip_none_device_id(self):
+        """
+        An insert with a device ID of NULL will not create a new entry, but
+        update an existing entry in the user_ips table.
+        """
+        self.reactor.advance(12345678)
+
+        user_id = "@user:id"
+
+        # Add & trigger the storage loop
+        self.get_success(
+            self.store.insert_client_ip(
+                user_id, "access_token", "ip", "user_agent", None
+            )
+        )
+        self.reactor.advance(200)
+        self.pump(0)
+
+        result = self.get_success(
+            self.store._simple_select_list(
+                table="user_ips",
+                keyvalues={"user_id": user_id},
+                retcols=["access_token", "ip", "user_agent", "device_id", "last_seen"],
+                desc="get_user_ip_and_agents",
+            )
+        )
+
+        self.assertEqual(
+            result,
+            [
+                {
+                    'access_token': 'access_token',
+                    'ip': 'ip',
+                    'user_agent': 'user_agent',
+                    'device_id': None,
+                    'last_seen': 12345678000,
+                }
+            ],
+        )
+
+        # Add another & trigger the storage loop
+        self.get_success(
+            self.store.insert_client_ip(
+                user_id, "access_token", "ip", "user_agent", None
+            )
+        )
+        self.reactor.advance(10)
+        self.pump(0)
+
+        result = self.get_success(
+            self.store._simple_select_list(
+                table="user_ips",
+                keyvalues={"user_id": user_id},
+                retcols=["access_token", "ip", "user_agent", "device_id", "last_seen"],
+                desc="get_user_ip_and_agents",
+            )
+        )
+        # Only one result, has been upserted.
+        self.assertEqual(
+            result,
+            [
+                {
+                    'access_token': 'access_token',
+                    'ip': 'ip',
+                    'user_agent': 'user_agent',
+                    'device_id': None,
+                    'last_seen': 12345878000,
+                }
+            ],
+        )
+
     def test_disabled_monthly_active_user(self):
     def test_disabled_monthly_active_user(self):
         self.hs.config.limit_usage_by_mau = False
         self.hs.config.limit_usage_by_mau = False
         self.hs.config.max_mau_value = 50
         self.hs.config.max_mau_value = 50

+ 104 - 0
tests/util/test_async_utils.py

@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from twisted.internet import defer
+from twisted.internet.defer import CancelledError, Deferred
+from twisted.internet.task import Clock
+
+from synapse.util import logcontext
+from synapse.util.async_helpers import timeout_deferred
+from synapse.util.logcontext import LoggingContext
+
+from tests.unittest import TestCase
+
+
+class TimeoutDeferredTest(TestCase):
+    def setUp(self):
+        self.clock = Clock()
+
+    def test_times_out(self):
+        """Basic test case that checks that the original deferred is cancelled and that
+        the timing-out deferred is errbacked
+        """
+        cancelled = [False]
+
+        def canceller(_d):
+            cancelled[0] = True
+
+        non_completing_d = Deferred(canceller)
+        timing_out_d = timeout_deferred(non_completing_d, 1.0, self.clock)
+
+        self.assertNoResult(timing_out_d)
+        self.assertFalse(cancelled[0], "deferred was cancelled prematurely")
+
+        self.clock.pump((1.0, ))
+
+        self.assertTrue(cancelled[0], "deferred was not cancelled by timeout")
+        self.failureResultOf(timing_out_d, defer.TimeoutError, )
+
+    def test_times_out_when_canceller_throws(self):
+        """Test that we have successfully worked around
+        https://twistedmatrix.com/trac/ticket/9534"""
+
+        def canceller(_d):
+            raise Exception("can't cancel this deferred")
+
+        non_completing_d = Deferred(canceller)
+        timing_out_d = timeout_deferred(non_completing_d, 1.0, self.clock)
+
+        self.assertNoResult(timing_out_d)
+
+        self.clock.pump((1.0, ))
+
+        self.failureResultOf(timing_out_d, defer.TimeoutError, )
+
+    def test_logcontext_is_preserved_on_cancellation(self):
+        blocking_was_cancelled = [False]
+
+        @defer.inlineCallbacks
+        def blocking():
+            non_completing_d = Deferred()
+            with logcontext.PreserveLoggingContext():
+                try:
+                    yield non_completing_d
+                except CancelledError:
+                    blocking_was_cancelled[0] = True
+                    raise
+
+        with logcontext.LoggingContext("one") as context_one:
+            # the errbacks should be run in the test logcontext
+            def errback(res, deferred_name):
+                self.assertIs(
+                    LoggingContext.current_context(), context_one,
+                    "errback %s run in unexpected logcontext %s" % (
+                        deferred_name, LoggingContext.current_context(),
+                    )
+                )
+                return res
+
+            original_deferred = blocking()
+            original_deferred.addErrback(errback, "orig")
+            timing_out_d = timeout_deferred(original_deferred, 1.0, self.clock)
+            self.assertNoResult(timing_out_d)
+            self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel)
+            timing_out_d.addErrback(errback, "timingout")
+
+            self.clock.pump((1.0, ))
+
+            self.assertTrue(
+                blocking_was_cancelled[0],
+                "non-completing deferred was not cancelled",
+            )
+            self.failureResultOf(timing_out_d, defer.TimeoutError, )
+            self.assertIs(LoggingContext.current_context(), context_one)