diff --git a/.buildkite/scripts/create_postgres_db.py b/.buildkite/scripts/create_postgres_db.py deleted file mode 100755 index 956339de5cf1..000000000000 --- a/.buildkite/scripts/create_postgres_db.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Copyright 2019 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging - -from synapse.storage.engines import create_engine - -logger = logging.getLogger("create_postgres_db") - -if __name__ == "__main__": - # Create a PostgresEngine. - db_engine = create_engine({"name": "psycopg2", "args": {}}) - - # Connect to postgres to create the base database. - # We use "postgres" as a database because it's bound to exist and the "synapse" one - # doesn't exist yet. - db_conn = db_engine.module.connect( - user="postgres", host="postgres", password="postgres", dbname="postgres" - ) - db_conn.autocommit = True - cur = db_conn.cursor() - cur.execute("CREATE DATABASE synapse;") - cur.close() - db_conn.close() diff --git a/.buildkite/scripts/postgres_exec.py b/.buildkite/scripts/postgres_exec.py new file mode 100755 index 000000000000..086b391724ec --- /dev/null +++ b/.buildkite/scripts/postgres_exec.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +import psycopg2 + +# a very simple replacment for `psql`, to make up for the lack of the postgres client +# libraries in the synapse docker image. + +# We use "postgres" as a database because it's bound to exist and the "synapse" one +# doesn't exist yet. +db_conn = psycopg2.connect( + user="postgres", host="postgres", password="postgres", dbname="postgres" +) +db_conn.autocommit = True +cur = db_conn.cursor() +for c in sys.argv[1:]: + cur.execute(c) diff --git a/.buildkite/scripts/test_old_deps.sh b/.buildkite/scripts/test_old_deps.sh index 9fe5b696b0f7..9270d55f0461 100755 --- a/.buildkite/scripts/test_old_deps.sh +++ b/.buildkite/scripts/test_old_deps.sh @@ -1,16 +1,16 @@ #!/usr/bin/env bash -# this script is run by buildkite in a plain `xenial` container; it installs the -# minimal requirements for tox and hands over to the py35-old tox environment. +# this script is run by buildkite in a plain `bionic` container; it installs the +# minimal requirements for tox and hands over to the py3-old tox environment. set -ex apt-get update -apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox +apt-get install -y python3 python3-dev python3-pip libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox export LANG="C.UTF-8" # Prevent virtualenv from auto-updating pip to an incompatible version export VIRTUALENV_NO_DOWNLOAD=1 -exec tox -e py35-old,combine +exec tox -e py3-old,combine diff --git a/.buildkite/scripts/test_synapse_port_db.sh b/.buildkite/scripts/test_synapse_port_db.sh index 8914319e3825..a7e245476937 100755 --- a/.buildkite/scripts/test_synapse_port_db.sh +++ b/.buildkite/scripts/test_synapse_port_db.sh @@ -1,10 +1,10 @@ #!/usr/bin/env bash # -# Test script for 'synapse_port_db', which creates a virtualenv, installs Synapse along -# with additional dependencies needed for the test (such as coverage or the PostgreSQL -# driver), update the schema of the test SQLite database and run background updates on it, -# create an empty test database in PostgreSQL, then run the 'synapse_port_db' script to -# test porting the SQLite database to the PostgreSQL database (with coverage). +# Test script for 'synapse_port_db'. +# - sets up synapse and deps +# - runs the port script on a prepopulated test sqlite db +# - also runs it against an new sqlite db + set -xe cd `dirname $0`/../.. @@ -22,15 +22,32 @@ echo "--- Generate the signing key" # Generate the server's signing key. python -m synapse.app.homeserver --generate-keys -c .buildkite/sqlite-config.yaml -echo "--- Prepare the databases" +echo "--- Prepare test database" # Make sure the SQLite3 database is using the latest schema and has no pending background update. scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml # Create the PostgreSQL database. -./.buildkite/scripts/create_postgres_db.py +./.buildkite/scripts/postgres_exec.py "CREATE DATABASE synapse" + +echo "+++ Run synapse_port_db against test database" +coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml + +##### + +# Now do the same again, on an empty database. + +echo "--- Prepare empty SQLite database" + +# we do this by deleting the sqlite db, and then doing the same again. +rm .buildkite/test_db.db + +scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml -echo "+++ Run synapse_port_db" +# re-create the PostgreSQL database. +./.buildkite/scripts/postgres_exec.py \ + "DROP DATABASE synapse" \ + "CREATE DATABASE synapse" -# Run the script +echo "+++ Run synapse_port_db against empty database" coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 000000000000..e7f3be1b4ea9 --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,322 @@ +name: Tests + +on: + push: + branches: ["develop", "release-*"] + pull_request: + +jobs: + lint: + runs-on: ubuntu-latest + strategy: + matrix: + toxenv: + - "check-sampleconfig" + - "check_codestyle" + - "check_isort" + - "mypy" + - "packaging" + + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + - run: pip install tox + - run: tox -e ${{ matrix.toxenv }} + + lint-crlf: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Check line endings + run: scripts-dev/check_line_terminators.sh + + lint-newsfile: + if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + - run: pip install tox + - name: Patch Buildkite-specific test script + run: | + sed -i -e 's/\$BUILDKITE_PULL_REQUEST/${{ github.event.number }}/' \ + scripts-dev/check-newsfragment + - run: scripts-dev/check-newsfragment + + lint-sdist: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + with: + python-version: "3.x" + - run: pip install wheel + - run: python setup.py sdist bdist_wheel + - uses: actions/upload-artifact@v2 + with: + name: Python Distributions + path: dist/* + + # Dummy step to gate other tests on without repeating the whole list + linting-done: + if: ${{ always() }} # Run this even if prior jobs were skipped + needs: [lint, lint-crlf, lint-newsfile, lint-sdist] + runs-on: ubuntu-latest + steps: + - run: "true" + + trial: + if: ${{ !failure() }} # Allow previous steps to be skipped, but not fail + needs: linting-done + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.6", "3.7", "3.8", "3.9"] + database: ["sqlite"] + include: + # Newest Python without optional deps + - python-version: "3.9" + toxenv: "py-noextras,combine" + + # Oldest Python with PostgreSQL + - python-version: "3.6" + database: "postgres" + postgres-version: "9.6" + + # Newest Python with PostgreSQL + - python-version: "3.9" + database: "postgres" + postgres-version: "13" + + steps: + - uses: actions/checkout@v2 + - run: sudo apt-get -qq install xmlsec1 + - name: Set up PostgreSQL ${{ matrix.postgres-version }} + if: ${{ matrix.postgres-version }} + run: | + docker run -d -p 5432:5432 \ + -e POSTGRES_PASSWORD=postgres \ + -e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \ + postgres:${{ matrix.postgres-version }} + - uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - run: pip install tox + - name: Await PostgreSQL + if: ${{ matrix.postgres-version }} + timeout-minutes: 2 + run: until pg_isready -h localhost; do sleep 1; done + - run: tox -e py,combine + env: + TRIAL_FLAGS: "--jobs=2" + SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }} + SYNAPSE_POSTGRES_HOST: localhost + SYNAPSE_POSTGRES_USER: postgres + SYNAPSE_POSTGRES_PASSWORD: postgres + - name: Dump logs + # Note: Dumps to workflow logs instead of using actions/upload-artifact + # This keeps logs colocated with failing jobs + # It also ignores find's exit code; this is a best effort affair + run: >- + find _trial_temp -name '*.log' + -exec echo "::group::{}" \; + -exec cat {} \; + -exec echo "::endgroup::" \; + || true + + trial-olddeps: + if: ${{ !failure() }} # Allow previous steps to be skipped, but not fail + needs: linting-done + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Test with old deps + uses: docker://ubuntu:bionic # For old python and sqlite + with: + workdir: /github/workspace + entrypoint: .buildkite/scripts/test_old_deps.sh + env: + TRIAL_FLAGS: "--jobs=2" + - name: Dump logs + # Note: Dumps to workflow logs instead of using actions/upload-artifact + # This keeps logs colocated with failing jobs + # It also ignores find's exit code; this is a best effort affair + run: >- + find _trial_temp -name '*.log' + -exec echo "::group::{}" \; + -exec cat {} \; + -exec echo "::endgroup::" \; + || true + + trial-pypy: + # Very slow; only run if the branch name includes 'pypy' + if: ${{ contains(github.ref, 'pypy') && !failure() }} + needs: linting-done + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["pypy-3.6"] + + steps: + - uses: actions/checkout@v2 + - run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev + - uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - run: pip install tox + - run: tox -e py,combine + env: + TRIAL_FLAGS: "--jobs=2" + - name: Dump logs + # Note: Dumps to workflow logs instead of using actions/upload-artifact + # This keeps logs colocated with failing jobs + # It also ignores find's exit code; this is a best effort affair + run: >- + find _trial_temp -name '*.log' + -exec echo "::group::{}" \; + -exec cat {} \; + -exec echo "::endgroup::" \; + || true + + sytest: + if: ${{ !failure() }} + needs: linting-done + runs-on: ubuntu-latest + container: + image: matrixdotorg/sytest-synapse:${{ matrix.sytest-tag }} + volumes: + - ${{ github.workspace }}:/src + env: + BUILDKITE_BRANCH: ${{ github.head_ref }} + POSTGRES: ${{ matrix.postgres && 1}} + MULTI_POSTGRES: ${{ (matrix.postgres == 'multi-postgres') && 1}} + WORKERS: ${{ matrix.workers && 1 }} + REDIS: ${{ matrix.redis && 1 }} + BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }} + + strategy: + fail-fast: false + matrix: + include: + - sytest-tag: bionic + + - sytest-tag: bionic + postgres: postgres + + - sytest-tag: testing + postgres: postgres + + - sytest-tag: bionic + postgres: multi-postgres + workers: workers + + - sytest-tag: buster + postgres: multi-postgres + workers: workers + + - sytest-tag: buster + postgres: postgres + workers: workers + redis: redis + + steps: + - uses: actions/checkout@v2 + - name: Prepare test blacklist + run: cat sytest-blacklist .buildkite/worker-blacklist > synapse-blacklist-with-workers + - name: Run SyTest + run: /bootstrap.sh synapse + working-directory: /src + - name: Dump results.tap + if: ${{ always() }} + run: cat /logs/results.tap + - name: Upload SyTest logs + uses: actions/upload-artifact@v2 + if: ${{ always() }} + with: + name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }}) + path: | + /logs/results.tap + /logs/**/*.log* + + portdb: + if: ${{ !failure() }} # Allow previous steps to be skipped, but not fail + needs: linting-done + runs-on: ubuntu-latest + strategy: + matrix: + include: + - python-version: "3.6" + postgres-version: "9.6" + + - python-version: "3.9" + postgres-version: "13" + + services: + postgres: + image: postgres:${{ matrix.postgres-version }} + ports: + - 5432:5432 + env: + POSTGRES_PASSWORD: "postgres" + POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8" + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + steps: + - uses: actions/checkout@v2 + - run: sudo apt-get -qq install xmlsec1 + - uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Patch Buildkite-specific test scripts + run: | + sed -i -e 's/host="postgres"/host="localhost"/' .buildkite/scripts/postgres_exec.py + sed -i -e 's/host: postgres/host: localhost/' .buildkite/postgres-config.yaml + sed -i -e 's|/src/||' .buildkite/{sqlite,postgres}-config.yaml + sed -i -e 's/\$TOP/\$GITHUB_WORKSPACE/' .coveragerc + - run: .buildkite/scripts/test_synapse_port_db.sh + + complement: + if: ${{ !failure() }} + needs: linting-done + runs-on: ubuntu-latest + container: + # https://github.com/matrix-org/complement/blob/master/dockerfiles/ComplementCIBuildkite.Dockerfile + image: matrixdotorg/complement:latest + env: + CI: true + ports: + - 8448:8448 + volumes: + - /var/run/docker.sock:/var/run/docker.sock + + steps: + - name: Run actions/checkout@v2 for synapse + uses: actions/checkout@v2 + with: + path: synapse + + - name: Run actions/checkout@v2 for complement + uses: actions/checkout@v2 + with: + repository: "matrix-org/complement" + path: complement + + # Build initial Synapse image + - run: docker build -t matrixdotorg/synapse:latest -f docker/Dockerfile . + working-directory: synapse + + # Build a ready-to-run Synapse image based on the initial image above. + # This new image includes a config file, keys for signing and TLS, and + # other settings to make it suitable for testing under Complement. + - run: docker build -t complement-synapse -f Synapse.Dockerfile . + working-directory: complement/dockerfiles + + # Run Complement + - run: go test -v -tags synapse_blacklist ./tests + env: + COMPLEMENT_BASE_IMAGE: complement-synapse:latest + working-directory: complement diff --git a/CHANGES.md b/CHANGES.md index 27483532d038..709436da978c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,298 @@ +Synapse 1.34.0 (2021-05-17) +=========================== + +This release deprecates the `room_invite_state_types` configuration setting. See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.34.0/UPGRADE.rst#upgrading-to-v1340) for instructions on updating your configuration file to use the new `room_prejoin_state` setting. + +This release also deprecates the `POST /_synapse/admin/v1/rooms//delete` admin API route. Server administrators are encouraged to update their scripts to use the new `DELETE /_synapse/admin/v1/rooms/` route instead. + + +No significant changes since v1.34.0rc1. + + +Synapse 1.34.0rc1 (2021-05-12) +============================== + +Features +-------- + +- Add experimental option to track memory usage of the caches. ([\#9881](https://github.com/matrix-org/synapse/issues/9881)) +- Add support for `DELETE /_synapse/admin/v1/rooms/`. ([\#9889](https://github.com/matrix-org/synapse/issues/9889)) +- Add limits to how often Synapse will GC, ensuring that large servers do not end up GC thrashing if `gc_thresholds` has not been correctly set. ([\#9902](https://github.com/matrix-org/synapse/issues/9902)) +- Improve performance of sending events for worker-based deployments using Redis. ([\#9905](https://github.com/matrix-org/synapse/issues/9905), [\#9950](https://github.com/matrix-org/synapse/issues/9950), [\#9951](https://github.com/matrix-org/synapse/issues/9951)) +- Improve performance after joining a large room when presence is enabled. ([\#9910](https://github.com/matrix-org/synapse/issues/9910), [\#9916](https://github.com/matrix-org/synapse/issues/9916)) +- Support stable identifiers for [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772) Spaces. `m.space.child` events will now be taken into account when populating the experimental spaces summary response. Please see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.34.0/UPGRADE.rst#upgrading-to-v1340) if you have customised `room_invite_state_types` in your configuration. ([\#9915](https://github.com/matrix-org/synapse/issues/9915), [\#9966](https://github.com/matrix-org/synapse/issues/9966)) +- Improve performance of backfilling in large rooms. ([\#9935](https://github.com/matrix-org/synapse/issues/9935)) +- Add a config option to allow you to prevent device display names from being shared over federation. Contributed by @aaronraimist. ([\#9945](https://github.com/matrix-org/synapse/issues/9945)) +- Update support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary. ([\#9947](https://github.com/matrix-org/synapse/issues/9947), [\#9954](https://github.com/matrix-org/synapse/issues/9954)) + + +Bugfixes +-------- + +- Fix a bug introduced in v1.32.0 where the associated connection was improperly logged for SQL logging statements. ([\#9895](https://github.com/matrix-org/synapse/issues/9895)) +- Correct the type hint for the `user_may_create_room_alias` method of spam checkers. It is provided a `RoomAlias`, not a `str`. ([\#9896](https://github.com/matrix-org/synapse/issues/9896)) +- Fix bug where user directory could get out of sync if room visibility and membership changed in quick succession. ([\#9910](https://github.com/matrix-org/synapse/issues/9910)) +- Include the `origin_server_ts` property in the experimental [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946) support to allow clients to properly sort rooms. ([\#9928](https://github.com/matrix-org/synapse/issues/9928)) +- Fix bugs introduced in v1.23.0 which made the PostgreSQL port script fail when run with a newly-created SQLite database. ([\#9930](https://github.com/matrix-org/synapse/issues/9930)) +- Fix a bug introduced in Synapse 1.29.0 which caused `m.room_key_request` to-device messages sent from one user to another to be dropped. ([\#9961](https://github.com/matrix-org/synapse/issues/9961), [\#9965](https://github.com/matrix-org/synapse/issues/9965)) +- Fix a bug introduced in v1.27.0 preventing users and appservices exempt from ratelimiting from creating rooms with many invitees. ([\#9968](https://github.com/matrix-org/synapse/issues/9968)) + + +Updates to the Docker image +--------------------------- + +- Add `startup_delay` to docker healthcheck to reduce waiting time for coming online and update the documentation with extra options. Contributed by @Maquis196. ([\#9913](https://github.com/matrix-org/synapse/issues/9913)) + + +Improved Documentation +---------------------- + +- Add `port` argument to the Postgres database sample config section. ([\#9911](https://github.com/matrix-org/synapse/issues/9911)) + + +Deprecations and Removals +------------------------- + +- Mark as deprecated `POST /_synapse/admin/v1/rooms//delete`. ([\#9889](https://github.com/matrix-org/synapse/issues/9889)) + + +Internal Changes +---------------- + +- Reduce the length of Synapse's access tokens. ([\#5588](https://github.com/matrix-org/synapse/issues/5588)) +- Export jemalloc stats to Prometheus if it is being used. ([\#9882](https://github.com/matrix-org/synapse/issues/9882)) +- Add type hints to presence handler. ([\#9885](https://github.com/matrix-org/synapse/issues/9885)) +- Reduce memory usage of the LRU caches. ([\#9886](https://github.com/matrix-org/synapse/issues/9886)) +- Add type hints to the `synapse.handlers` module. ([\#9896](https://github.com/matrix-org/synapse/issues/9896)) +- Time response time for external cache requests. ([\#9904](https://github.com/matrix-org/synapse/issues/9904)) +- Minor fixes to the `make_full_schema.sh` script. ([\#9931](https://github.com/matrix-org/synapse/issues/9931)) +- Move database schema files into a common directory. ([\#9932](https://github.com/matrix-org/synapse/issues/9932)) +- Add debug logging for lost/delayed to-device messages. ([\#9959](https://github.com/matrix-org/synapse/issues/9959)) + + +Synapse 1.33.2 (2021-05-11) +=========================== + +Due to the security issue highlighted below, server administrators are encouraged to update Synapse. We are not aware of these vulnerabilities being exploited in the wild. + +Security advisory +----------------- + +This release fixes a denial of service attack ([CVE-2021-29471](https://github.com/matrix-org/synapse/security/advisories/GHSA-x345-32rc-8h85)) against Synapse's push rules implementation. Server admins are encouraged to upgrade. + +Internal Changes +---------------- + +- Unpin attrs dependency. ([\#9946](https://github.com/matrix-org/synapse/issues/9946)) + + +Synapse 1.33.1 (2021-05-06) +=========================== + +Bugfixes +-------- + +- Fix bug where `/sync` would break if using the latest version of `attrs` dependency, by pinning to a previous version. ([\#9937](https://github.com/matrix-org/synapse/issues/9937)) + + +Synapse 1.33.0 (2021-05-05) +=========================== + +Features +-------- + +- Build Debian packages for Ubuntu 21.04 (Hirsute Hippo). ([\#9909](https://github.com/matrix-org/synapse/issues/9909)) + + +Synapse 1.33.0rc2 (2021-04-29) +============================== + +Bugfixes +-------- + +- Fix tight loop when handling presence replication when using workers. Introduced in v1.33.0rc1. ([\#9900](https://github.com/matrix-org/synapse/issues/9900)) + + +Synapse 1.33.0rc1 (2021-04-28) +============================== + +Features +-------- + +- Update experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership. ([\#9800](https://github.com/matrix-org/synapse/issues/9800), [\#9814](https://github.com/matrix-org/synapse/issues/9814)) +- Add experimental support for handling presence on a worker. ([\#9819](https://github.com/matrix-org/synapse/issues/9819), [\#9820](https://github.com/matrix-org/synapse/issues/9820), [\#9828](https://github.com/matrix-org/synapse/issues/9828), [\#9850](https://github.com/matrix-org/synapse/issues/9850)) +- Return a new template when an user attempts to renew their account multiple times with the same token, stating that their account is set to expire. This replaces the invalid token template that would previously be shown in this case. This change concerns the optional account validity feature. ([\#9832](https://github.com/matrix-org/synapse/issues/9832)) + + +Bugfixes +-------- + +- Fixes the OIDC SSO flow when using a `public_baseurl` value including a non-root URL path. ([\#9726](https://github.com/matrix-org/synapse/issues/9726)) +- Fix thumbnail generation for some sites with non-standard content types. Contributed by @rkfg. ([\#9788](https://github.com/matrix-org/synapse/issues/9788)) +- Add some sanity checks to identity server passed to 3PID bind/unbind endpoints. ([\#9802](https://github.com/matrix-org/synapse/issues/9802)) +- Limit the size of HTTP responses read over federation. ([\#9833](https://github.com/matrix-org/synapse/issues/9833)) +- Fix a bug which could cause Synapse to get stuck in a loop of resyncing device lists. ([\#9867](https://github.com/matrix-org/synapse/issues/9867)) +- Fix a long-standing bug where errors from federation did not propagate to the client. ([\#9868](https://github.com/matrix-org/synapse/issues/9868)) + + +Improved Documentation +---------------------- + +- Add a note to the docker docs mentioning that we mirror upstream's supported Docker platforms. ([\#9801](https://github.com/matrix-org/synapse/issues/9801)) + + +Internal Changes +---------------- + +- Add a dockerfile for running Synapse in worker-mode under Complement. ([\#9162](https://github.com/matrix-org/synapse/issues/9162)) +- Apply `pyupgrade` across the codebase. ([\#9786](https://github.com/matrix-org/synapse/issues/9786)) +- Move some replication processing out of `generic_worker`. ([\#9796](https://github.com/matrix-org/synapse/issues/9796)) +- Replace `HomeServer.get_config()` with inline references. ([\#9815](https://github.com/matrix-org/synapse/issues/9815)) +- Rename some handlers and config modules to not duplicate the top-level module. ([\#9816](https://github.com/matrix-org/synapse/issues/9816)) +- Fix a long-standing bug which caused `max_upload_size` to not be correctly enforced. ([\#9817](https://github.com/matrix-org/synapse/issues/9817)) +- Reduce CPU usage of the user directory by reusing existing calculated room membership. ([\#9821](https://github.com/matrix-org/synapse/issues/9821)) +- Small speed up for joining large remote rooms. ([\#9825](https://github.com/matrix-org/synapse/issues/9825)) +- Introduce flake8-bugbear to the test suite and fix some of its lint violations. ([\#9838](https://github.com/matrix-org/synapse/issues/9838)) +- Only store the raw data in the in-memory caches, rather than objects that include references to e.g. the data stores. ([\#9845](https://github.com/matrix-org/synapse/issues/9845)) +- Limit length of accepted email addresses. ([\#9855](https://github.com/matrix-org/synapse/issues/9855)) +- Remove redundant `synapse.types.Collection` type definition. ([\#9856](https://github.com/matrix-org/synapse/issues/9856)) +- Handle recently added rate limits correctly when using `--no-rate-limit` with the demo scripts. ([\#9858](https://github.com/matrix-org/synapse/issues/9858)) +- Disable invite rate-limiting by default when running the unit tests. ([\#9871](https://github.com/matrix-org/synapse/issues/9871)) +- Pass a reactor into `SynapseSite` to make testing easier. ([\#9874](https://github.com/matrix-org/synapse/issues/9874)) +- Make `DomainSpecificString` an `attrs` class. ([\#9875](https://github.com/matrix-org/synapse/issues/9875)) +- Add type hints to `synapse.api.auth` and `synapse.api.auth_blocking` modules. ([\#9876](https://github.com/matrix-org/synapse/issues/9876)) +- Remove redundant `_PushHTTPChannel` test class. ([\#9878](https://github.com/matrix-org/synapse/issues/9878)) +- Remove backwards-compatibility code for Python versions < 3.6. ([\#9879](https://github.com/matrix-org/synapse/issues/9879)) +- Small performance improvement around handling new local presence updates. ([\#9887](https://github.com/matrix-org/synapse/issues/9887)) + + +Synapse 1.32.2 (2021-04-22) +=========================== + +This release includes a fix for a regression introduced in 1.32.0. + +Bugfixes +-------- + +- Fix a regression in Synapse 1.32.0 and 1.32.1 which caused `LoggingContext` errors in plugins. ([\#9857](https://github.com/matrix-org/synapse/issues/9857)) + + +Synapse 1.32.1 (2021-04-21) +=========================== + +This release fixes [a regression](https://github.com/matrix-org/synapse/issues/9853) +in Synapse 1.32.0 that caused connected Prometheus instances to become unstable. + +However, as this release is still subject to the `LoggingContext` change in 1.32.0, +it is recommended to remain on or downgrade to 1.31.0. + +Bugfixes +-------- + +- Fix a regression in Synapse 1.32.0 which caused Synapse to report large numbers of Prometheus time series, potentially overwhelming Prometheus instances. ([\#9854](https://github.com/matrix-org/synapse/issues/9854)) + + +Synapse 1.32.0 (2021-04-20) +=========================== + +**Note:** This release introduces [a regression](https://github.com/matrix-org/synapse/issues/9853) +that can overwhelm connected Prometheus instances. This issue was not present in +1.32.0rc1. If affected, it is recommended to downgrade to 1.31.0 in the meantime, and +follow [these instructions](https://github.com/matrix-org/synapse/pull/9854#issuecomment-823472183) +to clean up any excess writeahead logs. + +**Note:** This release also mistakenly included a change that may affected Synapse +modules that import `synapse.logging.context.LoggingContext`, such as +[synapse-s3-storage-provider](https://github.com/matrix-org/synapse-s3-storage-provider). +This will be fixed in a later Synapse version. + +**Note:** This release requires Python 3.6+ and Postgres 9.6+ or SQLite 3.22+. + +This release removes the deprecated `GET /_synapse/admin/v1/users/` admin API. Please use the [v2 API](https://github.com/matrix-org/synapse/blob/develop/docs/admin_api/user_admin_api.rst#query-user-account) instead, which has improved capabilities. + +This release requires Application Services to use type `m.login.application_service` when registering users via the `/_matrix/client/r0/register` endpoint to comply with the spec. Please ensure your Application Services are up to date. + +If you are using the `packages.matrix.org` Debian repository for Synapse packages, +note that we have recently updated the expiry date on the gpg signing key. If you see an +error similar to `The following signatures were invalid: EXPKEYSIG F473DD4473365DE1`, you +will need to get a fresh copy of the keys. You can do so with: + +```sh +sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg +``` + +Bugfixes +-------- + +- Fix the log lines of nested logging contexts. Broke in 1.32.0rc1. ([\#9829](https://github.com/matrix-org/synapse/issues/9829)) + + +Synapse 1.32.0rc1 (2021-04-13) +============================== + +Features +-------- + +- Add a Synapse module for routing presence updates between users. ([\#9491](https://github.com/matrix-org/synapse/issues/9491)) +- Add an admin API to manage ratelimit for a specific user. ([\#9648](https://github.com/matrix-org/synapse/issues/9648)) +- Include request information in structured logging output. ([\#9654](https://github.com/matrix-org/synapse/issues/9654)) +- Add `order_by` to the admin API `GET /_synapse/admin/v2/users`. Contributed by @dklimpel. ([\#9691](https://github.com/matrix-org/synapse/issues/9691)) +- Replace the `room_invite_state_types` configuration setting with `room_prejoin_state`. ([\#9700](https://github.com/matrix-org/synapse/issues/9700)) +- Add experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership. ([\#9717](https://github.com/matrix-org/synapse/issues/9717), [\#9735](https://github.com/matrix-org/synapse/issues/9735)) +- Update experimental support for Spaces: include `m.room.create` in the room state sent with room-invites. ([\#9710](https://github.com/matrix-org/synapse/issues/9710)) +- Synapse now requires Python 3.6 or later. It also requires Postgres 9.6 or later or SQLite 3.22 or later. ([\#9766](https://github.com/matrix-org/synapse/issues/9766)) + + +Bugfixes +-------- + +- Prevent `synapse_forward_extremities` and `synapse_excess_extremity_events` Prometheus metrics from initially reporting zero-values after startup. ([\#8926](https://github.com/matrix-org/synapse/issues/8926)) +- Fix recently added ratelimits to correctly honour the application service `rate_limited` flag. ([\#9711](https://github.com/matrix-org/synapse/issues/9711)) +- Fix longstanding bug which caused `duplicate key value violates unique constraint "remote_media_cache_thumbnails_media_origin_media_id_thumbna_key"` errors. ([\#9725](https://github.com/matrix-org/synapse/issues/9725)) +- Fix bug where sharded federation senders could get stuck repeatedly querying the DB in a loop, using lots of CPU. ([\#9770](https://github.com/matrix-org/synapse/issues/9770)) +- Fix duplicate logging of exceptions thrown during federation transaction processing. ([\#9780](https://github.com/matrix-org/synapse/issues/9780)) + + +Updates to the Docker image +--------------------------- + +- Move opencontainers labels to the final Docker image such that users can inspect them. ([\#9765](https://github.com/matrix-org/synapse/issues/9765)) + + +Improved Documentation +---------------------- + +- Make the `allowed_local_3pids` regex example in the sample config stricter. ([\#9719](https://github.com/matrix-org/synapse/issues/9719)) + + +Deprecations and Removals +------------------------- + +- Remove old admin API `GET /_synapse/admin/v1/users/`. ([\#9401](https://github.com/matrix-org/synapse/issues/9401)) +- Make `/_matrix/client/r0/register` expect a type of `m.login.application_service` when an Application Service registers a user, to align with [the relevant spec](https://spec.matrix.org/unstable/application-service-api/#server-admin-style-permissions). ([\#9548](https://github.com/matrix-org/synapse/issues/9548)) + + +Internal Changes +---------------- + +- Replace deprecated `imp` module with successor `importlib`. Contributed by Cristina Muñoz. ([\#9718](https://github.com/matrix-org/synapse/issues/9718)) +- Experiment with GitHub Actions for CI. ([\#9661](https://github.com/matrix-org/synapse/issues/9661)) +- Introduce flake8-bugbear to the test suite and fix some of its lint violations. ([\#9682](https://github.com/matrix-org/synapse/issues/9682)) +- Update `scripts-dev/complement.sh` to use a local checkout of Complement, allow running a subset of tests and have it use Synapse's Complement test blacklist. ([\#9685](https://github.com/matrix-org/synapse/issues/9685)) +- Improve Jaeger tracing for `to_device` messages. ([\#9686](https://github.com/matrix-org/synapse/issues/9686)) +- Add release helper script for automating part of the Synapse release process. ([\#9713](https://github.com/matrix-org/synapse/issues/9713)) +- Add type hints to expiring cache. ([\#9730](https://github.com/matrix-org/synapse/issues/9730)) +- Convert various testcases to `HomeserverTestCase`. ([\#9736](https://github.com/matrix-org/synapse/issues/9736)) +- Start linting mypy with `no_implicit_optional`. ([\#9742](https://github.com/matrix-org/synapse/issues/9742)) +- Add missing type hints to federation handler and server. ([\#9743](https://github.com/matrix-org/synapse/issues/9743)) +- Check that a `ConfigError` is raised, rather than simply `Exception`, when appropriate in homeserver config file generation tests. ([\#9753](https://github.com/matrix-org/synapse/issues/9753)) +- Fix incompatibility with `tox` 2.5. ([\#9769](https://github.com/matrix-org/synapse/issues/9769)) +- Enable Complement tests for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary API. ([\#9771](https://github.com/matrix-org/synapse/issues/9771)) +- Use mock from the standard library instead of a separate package. ([\#9772](https://github.com/matrix-org/synapse/issues/9772)) +- Update Black configuration to target Python 3.6. ([\#9781](https://github.com/matrix-org/synapse/issues/9781)) +- Add option to skip unit tests when building Debian packages. ([\#9793](https://github.com/matrix-org/synapse/issues/9793)) + + Synapse 1.31.0 (2021-04-06) =========================== diff --git a/README.rst b/README.rst index 655a2bf3bed0..1a5503572ee4 100644 --- a/README.rst +++ b/README.rst @@ -393,7 +393,12 @@ massive excess of outgoing federation requests (see `discussion indicate that your server is also issuing far more outgoing federation requests than can be accounted for by your users' activity, this is a likely cause. The misbehavior can be worked around by setting -``use_presence: false`` in the Synapse config file. +the following in the Synapse config file: + +.. code-block:: yaml + + presence: + enabled: false People can't accept room invitations from me -------------------------------------------- diff --git a/UPGRADE.rst b/UPGRADE.rst index ba488e1041e6..9f61aad4120d 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -85,6 +85,98 @@ for example: wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb +Upgrading to v1.34.0 +==================== + +``room_invite_state_types`` configuration setting +----------------------------------------------- + +The ``room_invite_state_types`` configuration setting has been deprecated and +replaced with ``room_prejoin_state``. See the `sample configuration file `_. + +If you have set ``room_invite_state_types`` to the default value you should simply +remove it from your configuration file. The default value used to be: + +.. code:: yaml + + room_invite_state_types: + - "m.room.join_rules" + - "m.room.canonical_alias" + - "m.room.avatar" + - "m.room.encryption" + - "m.room.name" + +If you have customised this value, you should remove ``room_invite_state_types`` and +configure ``room_prejoin_state`` instead. + + + +Upgrading to v1.33.0 +==================== + +Account Validity HTML templates can now display a user's expiration date +------------------------------------------------------------------------ + +This may affect you if you have enabled the account validity feature, and have made use of a +custom HTML template specified by the ``account_validity.template_dir`` or ``account_validity.account_renewed_html_path`` +Synapse config options. + +The template can now accept an ``expiration_ts`` variable, which represents the unix timestamp in milliseconds for the +future date of which their account has been renewed until. See the +`default template `_ +for an example of usage. + +ALso note that a new HTML template, ``account_previously_renewed.html``, has been added. This is is shown to users +when they attempt to renew their account with a valid renewal token that has already been used before. The default +template contents can been found +`here `_, +and can also accept an ``expiration_ts`` variable. This template replaces the error message users would previously see +upon attempting to use a valid renewal token more than once. + + +Upgrading to v1.32.0 +==================== + +Regression causing connected Prometheus instances to become overwhelmed +----------------------------------------------------------------------- + +This release introduces `a regression `_ +that can overwhelm connected Prometheus instances. This issue is not present in +Synapse v1.32.0rc1. + +If you have been affected, please downgrade to 1.31.0. You then may need to +remove excess writeahead logs in order for Prometheus to recover. Instructions +for doing so are provided +`here `_. + +Dropping support for old Python, Postgres and SQLite versions +------------------------------------------------------------- + +In line with our `deprecation policy `_, +we've dropped support for Python 3.5 and PostgreSQL 9.5, as they are no longer supported upstream. + +This release of Synapse requires Python 3.6+ and PostgresSQL 9.6+ or SQLite 3.22+. + +Removal of old List Accounts Admin API +-------------------------------------- + +The deprecated v1 "list accounts" admin API (``GET /_synapse/admin/v1/users/``) has been removed in this version. + +The `v2 list accounts API `_ +has been available since Synapse 1.7.0 (2019-12-13), and is accessible under ``GET /_synapse/admin/v2/users``. + +The deprecation of the old endpoint was announced with Synapse 1.28.0 (released on 2021-02-25). + +Application Services must use type ``m.login.application_service`` when registering users +----------------------------------------------------------------------------------------- + +In compliance with the +`Application Service spec `_, +Application Services are now required to use the ``m.login.application_service`` type when registering users via the +``/_matrix/client/r0/register`` endpoint. This behaviour was deprecated in Synapse v1.30.0. + +Please ensure your Application Services are up to date. + Upgrading to v1.29.0 ==================== diff --git a/contrib/cmdclient/console.py b/contrib/cmdclient/console.py index 67e032244ecc..856dd437db91 100755 --- a/contrib/cmdclient/console.py +++ b/contrib/cmdclient/console.py @@ -24,6 +24,7 @@ import time import urllib from http import TwistedHttpClient +from typing import Optional import nacl.encoding import nacl.signing @@ -718,7 +719,7 @@ def _run_and_pprint( method, path, data=None, - query_params={"access_token": None}, + query_params: Optional[dict] = None, alt_text=None, ): """Runs an HTTP request and pretty prints the output. @@ -729,6 +730,8 @@ def _run_and_pprint( data: Raw JSON data if any query_params: dict of query parameters to add to the url """ + query_params = query_params or {"access_token": None} + url = self._url() + path if "access_token" in query_params: query_params["access_token"] = self._tok() diff --git a/contrib/cmdclient/http.py b/contrib/cmdclient/http.py index 851e80c25bb4..1310f078e3ac 100644 --- a/contrib/cmdclient/http.py +++ b/contrib/cmdclient/http.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,6 +15,7 @@ import json import urllib from pprint import pformat +from typing import Optional from twisted.internet import defer, reactor from twisted.web.client import Agent, readBody @@ -85,8 +85,9 @@ def get_json(self, url, args=None): body = yield readBody(response) defer.returnValue(json.loads(body)) - def _create_put_request(self, url, json_data, headers_dict={}): + def _create_put_request(self, url, json_data, headers_dict: Optional[dict] = None): """Wrapper of _create_request to issue a PUT request""" + headers_dict = headers_dict or {} if "Content-Type" not in headers_dict: raise defer.error(RuntimeError("Must include Content-Type header for PUTs")) @@ -95,14 +96,22 @@ def _create_put_request(self, url, json_data, headers_dict={}): "PUT", url, producer=_JsonProducer(json_data), headers_dict=headers_dict ) - def _create_get_request(self, url, headers_dict={}): + def _create_get_request(self, url, headers_dict: Optional[dict] = None): """Wrapper of _create_request to issue a GET request""" - return self._create_request("GET", url, headers_dict=headers_dict) + return self._create_request("GET", url, headers_dict=headers_dict or {}) @defer.inlineCallbacks def do_request( - self, method, url, data=None, qparams=None, jsonreq=True, headers={} + self, + method, + url, + data=None, + qparams=None, + jsonreq=True, + headers: Optional[dict] = None, ): + headers = headers or {} + if qparams: url = "%s?%s" % (url, urllib.urlencode(qparams, True)) @@ -123,8 +132,12 @@ def do_request( defer.returnValue(json.loads(body)) @defer.inlineCallbacks - def _create_request(self, method, url, producer=None, headers_dict={}): + def _create_request( + self, method, url, producer=None, headers_dict: Optional[dict] = None + ): """Creates and sends a request to the given url""" + headers_dict = headers_dict or {} + headers_dict["User-Agent"] = ["Synapse Cmd Client"] retries_left = 5 diff --git a/contrib/experiments/test_messaging.py b/contrib/experiments/test_messaging.py index 7fbc7d8fc6fd..31b8a6822504 100644 --- a/contrib/experiments/test_messaging.py +++ b/contrib/experiments/test_messaging.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/debian/build_virtualenv b/debian/build_virtualenv index cad7d1688398..21caad90cc58 100755 --- a/debian/build_virtualenv +++ b/debian/build_virtualenv @@ -50,15 +50,24 @@ PACKAGE_BUILD_DIR="debian/matrix-synapse-py3" VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse" TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python" -# we copy the tests to a temporary directory so that we can put them on the -# PYTHONPATH without putting the uninstalled synapse on the pythonpath. -tmpdir=`mktemp -d` -trap "rm -r $tmpdir" EXIT +case "$DEB_BUILD_OPTIONS" in + *nocheck*) + # Skip running tests if "nocheck" present in $DEB_BUILD_OPTIONS + ;; + + *) + # Copy tests to a temporary directory so that we can put them on the + # PYTHONPATH without putting the uninstalled synapse on the pythonpath. + tmpdir=`mktemp -d` + trap "rm -r $tmpdir" EXIT + + cp -r tests "$tmpdir" -cp -r tests "$tmpdir" + PYTHONPATH="$tmpdir" \ + "${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests -PYTHONPATH="$tmpdir" \ - "${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests + ;; +esac # build the config file "${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_config" \ diff --git a/debian/changelog b/debian/changelog index 09602ff54bdd..bf99ae772c9e 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,49 @@ +matrix-synapse-py3 (1.34.0) stable; urgency=medium + + * New synapse release 1.34.0. + + -- Synapse Packaging team Mon, 17 May 2021 11:34:18 +0100 + +matrix-synapse-py3 (1.33.2) stable; urgency=medium + + * New synapse release 1.33.2. + + -- Synapse Packaging team Tue, 11 May 2021 11:17:59 +0100 + +matrix-synapse-py3 (1.33.1) stable; urgency=medium + + * New synapse release 1.33.1. + + -- Synapse Packaging team Thu, 06 May 2021 14:06:33 +0100 + +matrix-synapse-py3 (1.33.0) stable; urgency=medium + + * New synapse release 1.33.0. + + -- Synapse Packaging team Wed, 05 May 2021 14:15:27 +0100 + +matrix-synapse-py3 (1.32.2) stable; urgency=medium + + * New synapse release 1.32.2. + + -- Synapse Packaging team Wed, 22 Apr 2021 12:43:52 +0100 + +matrix-synapse-py3 (1.32.1) stable; urgency=medium + + * New synapse release 1.32.1. + + -- Synapse Packaging team Wed, 21 Apr 2021 14:00:55 +0100 + +matrix-synapse-py3 (1.32.0) stable; urgency=medium + + [ Dan Callahan ] + * Skip tests when DEB_BUILD_OPTIONS contains "nocheck". + + [ Synapse Packaging team ] + * New synapse release 1.32.0. + + -- Synapse Packaging team Tue, 20 Apr 2021 14:28:39 +0100 + matrix-synapse-py3 (1.31.0) stable; urgency=medium * New synapse release 1.31.0. diff --git a/demo/start.sh b/demo/start.sh index 621a5698b86e..bc4854091b56 100755 --- a/demo/start.sh +++ b/demo/start.sh @@ -96,18 +96,48 @@ for port in 8080 8081 8082; do # Check script parameters if [ $# -eq 1 ]; then if [ $1 = "--no-rate-limit" ]; then - # messages rate limit - echo 'rc_messages_per_second: 1000' >> $DIR/etc/$port.config - echo 'rc_message_burst_count: 1000' >> $DIR/etc/$port.config - - # registration rate limit - printf 'rc_registration:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config - - # login rate limit - echo 'rc_login:' >> $DIR/etc/$port.config - printf ' address:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config - printf ' account:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config - printf ' failed_attempts:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config + + # Disable any rate limiting + ratelimiting=$(cat <<-RC + rc_message: + per_second: 1000 + burst_count: 1000 + rc_registration: + per_second: 1000 + burst_count: 1000 + rc_login: + address: + per_second: 1000 + burst_count: 1000 + account: + per_second: 1000 + burst_count: 1000 + failed_attempts: + per_second: 1000 + burst_count: 1000 + rc_admin_redaction: + per_second: 1000 + burst_count: 1000 + rc_joins: + local: + per_second: 1000 + burst_count: 1000 + remote: + per_second: 1000 + burst_count: 1000 + rc_3pid_validation: + per_second: 1000 + burst_count: 1000 + rc_invites: + per_room: + per_second: 1000 + burst_count: 1000 + per_user: + per_second: 1000 + burst_count: 1000 + RC + ) + echo "${ratelimiting}" >> $DIR/etc/$port.config fi fi diff --git a/docker/Dockerfile b/docker/Dockerfile index 5b7bf02776d6..2bdc607e66e6 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -18,11 +18,6 @@ ARG PYTHON_VERSION=3.8 ### FROM docker.io/python:${PYTHON_VERSION}-slim as builder -LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse' -LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md' -LABEL org.opencontainers.image.source='https://github.com/matrix-org/synapse.git' -LABEL org.opencontainers.image.licenses='Apache-2.0' - # install the OS build deps RUN apt-get update && apt-get install -y \ build-essential \ @@ -66,6 +61,11 @@ RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse FROM docker.io/python:${PYTHON_VERSION}-slim +LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse' +LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md' +LABEL org.opencontainers.image.source='https://github.com/matrix-org/synapse.git' +LABEL org.opencontainers.image.licenses='Apache-2.0' + RUN apt-get update && apt-get install -y \ curl \ gosu \ @@ -88,5 +88,5 @@ EXPOSE 8008/tcp 8009/tcp 8448/tcp ENTRYPOINT ["/start.py"] -HEALTHCHECK --interval=1m --timeout=5s \ +HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \ CMD curl -fSs http://localhost:8008/health || exit 1 diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers new file mode 100644 index 000000000000..969cf9728658 --- /dev/null +++ b/docker/Dockerfile-workers @@ -0,0 +1,23 @@ +# Inherit from the official Synapse docker image +FROM matrixdotorg/synapse + +# Install deps +RUN apt-get update +RUN apt-get install -y supervisor redis nginx + +# Remove the default nginx sites +RUN rm /etc/nginx/sites-enabled/default + +# Copy Synapse worker, nginx and supervisord configuration template files +COPY ./docker/conf-workers/* /conf/ + +# Expose nginx listener port +EXPOSE 8080/tcp + +# Volume for user-editable config files, logs etc. +VOLUME ["/data"] + +# A script to read environment variables and create the necessary +# files to run the desired worker configuration. Will start supervisord. +COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py +ENTRYPOINT ["/configure_workers_and_start.py"] diff --git a/docker/README-testing.md b/docker/README-testing.md new file mode 100644 index 000000000000..6a5baf9e2835 --- /dev/null +++ b/docker/README-testing.md @@ -0,0 +1,140 @@ +# Running tests against a dockerised Synapse + +It's possible to run integration tests against Synapse +using [Complement](https://github.com/matrix-org/complement). Complement is a Matrix Spec +compliance test suite for homeservers, and supports any homeserver docker image configured +to listen on ports 8008/8448. This document contains instructions for building Synapse +docker images that can be run inside Complement for testing purposes. + +Note that running Synapse's unit tests from within the docker image is not supported. + +## Testing with SQLite and single-process Synapse + +> Note that `scripts-dev/complement.sh` is a script that will automatically build +> and run an SQLite-based, single-process of Synapse against Complement. + +The instructions below will set up Complement testing for a single-process, +SQLite-based Synapse deployment. + +Start by building the base Synapse docker image. If you wish to run tests with the latest +release of Synapse, instead of your current checkout, you can skip this step. From the +root of the repository: + +```sh +docker build -t matrixdotorg/synapse -f docker/Dockerfile . +``` + +This will build an image with the tag `matrixdotorg/synapse`. + +Next, build the Synapse image for Complement. You will need a local checkout +of Complement. Change to the root of your Complement checkout and run: + +```sh +docker build -t complement-synapse -f "dockerfiles/Synapse.Dockerfile" dockerfiles +``` + +This will build an image with the tag `complement-synapse`, which can be handed to +Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to +[Complement's documentation](https://github.com/matrix-org/complement/#running) for +how to run the tests, as well as the various available command line flags. + +## Testing with PostgreSQL and single or multi-process Synapse + +The above docker image only supports running Synapse with SQLite and in a +single-process topology. The following instructions are used to build a Synapse image for +Complement that supports either single or multi-process topology with a PostgreSQL +database backend. + +As with the single-process image, build the base Synapse docker image. If you wish to run +tests with the latest release of Synapse, instead of your current checkout, you can skip +this step. From the root of the repository: + +```sh +docker build -t matrixdotorg/synapse -f docker/Dockerfile . +``` + +This will build an image with the tag `matrixdotorg/synapse`. + +Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`. +Again, from the root of the repository: + +```sh +docker build -t matrixdotorg/synapse-workers -f docker/Dockerfile-workers . +``` + +This will build an image with the tag` matrixdotorg/synapse-workers`. + +It's worth noting at this point that this image is fully functional, and +can be used for testing against locally. See instructions for using the container +under +[Running the Dockerfile-worker image standalone](#running-the-dockerfile-worker-image-standalone) +below. + +Finally, build the Synapse image for Complement, which is based on +`matrixdotorg/synapse-workers`. You will need a local checkout of Complement. Change to +the root of your Complement checkout and run: + +```sh +docker build -t matrixdotorg/complement-synapse-workers -f dockerfiles/SynapseWorkers.Dockerfile dockerfiles +``` + +This will build an image with the tag `complement-synapse`, which can be handed to +Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to +[Complement's documentation](https://github.com/matrix-org/complement/#running) for +how to run the tests, as well as the various available command line flags. + +## Running the Dockerfile-worker image standalone + +For manual testing of a multi-process Synapse instance in Docker, +[Dockerfile-workers](Dockerfile-workers) is a Dockerfile that will produce an image +bundling all necessary components together for a workerised homeserver instance. + +This includes any desired Synapse worker processes, a nginx to route traffic accordingly, +a redis for worker communication and a supervisord instance to start up and monitor all +processes. You will need to provide your own postgres container to connect to, and TLS +is not handled by the container. + +Once you've built the image using the above instructions, you can run it. Be sure +you've set up a volume according to the [usual Synapse docker instructions](README.md). +Then run something along the lines of: + +``` +docker run -d --name synapse \ + --mount type=volume,src=synapse-data,dst=/data \ + -p 8008:8008 \ + -e SYNAPSE_SERVER_NAME=my.matrix.host \ + -e SYNAPSE_REPORT_STATS=no \ + -e POSTGRES_HOST=postgres \ + -e POSTGRES_USER=postgres \ + -e POSTGRES_PASSWORD=somesecret \ + -e SYNAPSE_WORKER_TYPES=synchrotron,media_repository,user_dir \ + -e SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1 \ + matrixdotorg/synapse-workers +``` + +...substituting `POSTGRES*` variables for those that match a postgres host you have +available (usually a running postgres docker container). + +The `SYNAPSE_WORKER_TYPES` environment variable is a comma-separated list of workers to +use when running the container. All possible worker names are defined by the keys of the +`WORKERS_CONFIG` variable in [this script](configure_workers_and_start.py), which the +Dockerfile makes use of to generate appropriate worker, nginx and supervisord config +files. + +Sharding is supported for a subset of workers, in line with the +[worker documentation](../docs/workers.md). To run multiple instances of a given worker +type, simply specify the type multiple times in `SYNAPSE_WORKER_TYPES` +(e.g `SYNAPSE_WORKER_TYPES=event_creator,event_creator...`). + +Otherwise, `SYNAPSE_WORKER_TYPES` can either be left empty or unset to spawn no workers +(leaving only the main process). The container is configured to use redis-based worker +mode. + +Logs for workers and the main process are logged to stdout and can be viewed with +standard `docker logs` tooling. Worker logs contain their worker name +after the timestamp. + +Setting `SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1` will cause worker logs to be written to +`/logs/.log`. Logs are kept for 1 week and rotate every day at 00: +00, according to the container's clock. Logging for the main process must still be +configured by modifying the homeserver's log config in your Synapse data volume. diff --git a/docker/README.md b/docker/README.md index 3a7dc585e7b5..c8d3c4b3daf3 100644 --- a/docker/README.md +++ b/docker/README.md @@ -2,13 +2,16 @@ This Docker image will run Synapse as a single process. By default it uses a sqlite database; for production use you should connect it to a separate -postgres database. +postgres database. The image also does *not* provide a TURN server. -The image also does *not* provide a TURN server. +This image should work on all platforms that are supported by Docker upstream. +Note that Docker's WS1-backend Linux Containers on Windows +platform is [experimental](https://github.com/docker/for-win/issues/6470) and +is not supported by this image. ## Volumes -By default, the image expects a single volume, located at ``/data``, that will hold: +By default, the image expects a single volume, located at `/data`, that will hold: * configuration files; * uploaded media and thumbnails; @@ -16,11 +19,11 @@ By default, the image expects a single volume, located at ``/data``, that will h * the appservices configuration. You are free to use separate volumes depending on storage endpoints at your -disposal. For instance, ``/data/media`` could be stored on a large but low +disposal. For instance, `/data/media` could be stored on a large but low performance hdd storage while other files could be stored on high performance endpoints. -In order to setup an application service, simply create an ``appservices`` +In order to setup an application service, simply create an `appservices` directory in the data volume and write the application service Yaml configuration file there. Multiple application services are supported. @@ -53,6 +56,8 @@ The following environment variables are supported in `generate` mode: * `SYNAPSE_SERVER_NAME` (mandatory): the server public hostname. * `SYNAPSE_REPORT_STATS` (mandatory, `yes` or `no`): whether to enable anonymous statistics reporting. +* `SYNAPSE_HTTP_PORT`: the port Synapse should listen on for http traffic. + Defaults to `8008`. * `SYNAPSE_CONFIG_DIR`: where additional config files (such as the log config and event signing key) will be stored. Defaults to `/data`. * `SYNAPSE_CONFIG_PATH`: path to the file to be generated. Defaults to @@ -73,6 +78,8 @@ docker run -d --name synapse \ matrixdotorg/synapse:latest ``` +(assuming 8008 is the port Synapse is configured to listen on for http traffic.) + You can then check that it has started correctly with: ``` @@ -184,6 +191,16 @@ whilst running the above `docker run` commands. ``` --no-healthcheck ``` + +## Disabling the healthcheck in docker-compose file + +If you wish to disable the healthcheck via docker-compose, append the following to your service configuration. + +``` + healthcheck: + disable: true +``` + ## Setting custom healthcheck on docker run If you wish to point the healthcheck at a different port with docker command, add the following @@ -195,17 +212,18 @@ If you wish to point the healthcheck at a different port with docker command, ad ## Setting the healthcheck in docker-compose file You can add the following to set a custom healthcheck in a docker compose file. -You will need version >2.1 for this to work. +You will need docker-compose version >2.1 for this to work. ``` healthcheck: test: ["CMD", "curl", "-fSs", "http://localhost:8008/health"] - interval: 1m - timeout: 10s + interval: 15s + timeout: 5s retries: 3 + start_period: 5s ``` ## Using jemalloc Jemalloc is embedded in the image and will be used instead of the default allocator. -You can read about jemalloc by reading the Synapse [README](../README.md) \ No newline at end of file +You can read about jemalloc by reading the Synapse [README](../README.md). diff --git a/docker/conf-workers/nginx.conf.j2 b/docker/conf-workers/nginx.conf.j2 new file mode 100644 index 000000000000..1081979e06a0 --- /dev/null +++ b/docker/conf-workers/nginx.conf.j2 @@ -0,0 +1,27 @@ +# This file contains the base config for the reverse proxy, as part of ../Dockerfile-workers. +# configure_workers_and_start.py uses and amends to this file depending on the workers +# that have been selected. + +{{ upstream_directives }} + +server { + # Listen on an unoccupied port number + listen 8008; + listen [::]:8008; + + server_name localhost; + + # Nginx by default only allows file uploads up to 1M in size + # Increase client_max_body_size to match max_upload_size defined in homeserver.yaml + client_max_body_size 100M; + +{{ worker_locations }} + + # Send all other traffic to the main process + location ~* ^(\\/_matrix|\\/_synapse) { + proxy_pass http://localhost:8080; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $host; + } +} diff --git a/docker/conf-workers/shared.yaml.j2 b/docker/conf-workers/shared.yaml.j2 new file mode 100644 index 000000000000..f94b8c6aca0f --- /dev/null +++ b/docker/conf-workers/shared.yaml.j2 @@ -0,0 +1,9 @@ +# This file contains the base for the shared homeserver config file between Synapse workers, +# as part of ./Dockerfile-workers. +# configure_workers_and_start.py uses and amends to this file depending on the workers +# that have been selected. + +redis: + enabled: true + +{{ shared_worker_config }} \ No newline at end of file diff --git a/docker/conf-workers/supervisord.conf.j2 b/docker/conf-workers/supervisord.conf.j2 new file mode 100644 index 000000000000..0de2c6143b5a --- /dev/null +++ b/docker/conf-workers/supervisord.conf.j2 @@ -0,0 +1,41 @@ +# This file contains the base config for supervisord, as part of ../Dockerfile-workers. +# configure_workers_and_start.py uses and amends to this file depending on the workers +# that have been selected. +[supervisord] +nodaemon=true +user=root + +[program:nginx] +command=/usr/sbin/nginx -g "daemon off;" +priority=500 +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +username=www-data +autorestart=true + +[program:redis] +command=/usr/bin/redis-server /etc/redis/redis.conf --daemonize no +priority=1 +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +username=redis +autorestart=true + +[program:synapse_main] +command=/usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml +priority=10 +# Log startup failures to supervisord's stdout/err +# Regular synapse logs will still go in the configured data directory +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +autorestart=unexpected +exitcodes=0 + +# Additional process blocks +{{ worker_config }} \ No newline at end of file diff --git a/docker/conf-workers/worker.yaml.j2 b/docker/conf-workers/worker.yaml.j2 new file mode 100644 index 000000000000..42131afc9583 --- /dev/null +++ b/docker/conf-workers/worker.yaml.j2 @@ -0,0 +1,26 @@ +# This is a configuration template for a single worker instance, and is +# used by Dockerfile-workers. +# Values will be change depending on whichever workers are selected when +# running that image. + +worker_app: "{{ app }}" +worker_name: "{{ name }}" + +# The replication listener on the main synapse process. +worker_replication_host: 127.0.0.1 +worker_replication_http_port: 9093 + +worker_listeners: + - type: http + port: {{ port }} +{% if listener_resources %} + resources: + - names: +{%- for resource in listener_resources %} + - {{ resource }} +{%- endfor %} +{% endif %} + +worker_log_config: {{ worker_log_config_filepath }} + +{{ worker_extra_conf }} diff --git a/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml index 0dea62a87d5f..2b23d7f428d7 100644 --- a/docker/conf/homeserver.yaml +++ b/docker/conf/homeserver.yaml @@ -40,7 +40,9 @@ listeners: compress: false {% endif %} - - port: 8008 + # Allow configuring in case we want to reverse proxy 8008 + # using another process in the same container + - port: {{ SYNAPSE_HTTP_PORT or 8008 }} tls: false bind_addresses: ['::'] type: http @@ -173,18 +175,10 @@ report_stats: False ## API Configuration ## -room_invite_state_types: - - "m.room.join_rules" - - "m.room.canonical_alias" - - "m.room.avatar" - - "m.room.name" - {% if SYNAPSE_APPSERVICES %} app_service_config_files: {% for appservice in SYNAPSE_APPSERVICES %} - "{{ appservice }}" {% endfor %} -{% else %} -app_service_config_files: [] {% endif %} macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}" diff --git a/docker/conf/log.config b/docker/conf/log.config index 491bbcc87ad7..34572bc0f36a 100644 --- a/docker/conf/log.config +++ b/docker/conf/log.config @@ -2,9 +2,34 @@ version: 1 formatters: precise: - format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' +{% if worker_name %} + format: '%(asctime)s - worker:{{ worker_name }} - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' +{% else %} + format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' +{% endif %} handlers: + file: + class: logging.handlers.TimedRotatingFileHandler + formatter: precise + filename: {{ LOG_FILE_PATH or "homeserver.log" }} + when: "midnight" + backupCount: 6 # Does not include the current log file. + encoding: utf8 + + # Default to buffering writes to log file for efficiency. This means that + # there will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR + # logs will still be flushed immediately. + buffer: + class: logging.handlers.MemoryHandler + target: file + # The capacity is the number of log lines that are buffered before + # being written to disk. Increasing this will lead to better + # performance, at the expensive of it taking longer for log lines to + # be written to disk. + capacity: 10 + flushLevel: 30 # Flush for WARNING logs as well + console: class: logging.StreamHandler formatter: precise @@ -17,6 +42,11 @@ loggers: root: level: {{ SYNAPSE_LOG_LEVEL or "INFO" }} + +{% if LOG_FILE_PATH %} + handlers: [console, buffer] +{% else %} handlers: [console] +{% endif %} disable_existing_loggers: false diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py new file mode 100755 index 000000000000..4be6afc65d9a --- /dev/null +++ b/docker/configure_workers_and_start.py @@ -0,0 +1,558 @@ +#!/usr/bin/env python +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script reads environment variables and generates a shared Synapse worker, +# nginx and supervisord configs depending on the workers requested. +# +# The environment variables it reads are: +# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver. +# * SYNAPSE_REPORT_STATS: Whether to report stats. +# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG +# below. Leave empty for no workers, or set to '*' for all possible workers. +# +# NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined +# in the project's README), this script may be run multiple times, and functionality should +# continue to work if so. + +import os +import subprocess +import sys + +import jinja2 +import yaml + +MAIN_PROCESS_HTTP_LISTENER_PORT = 8080 + + +WORKERS_CONFIG = { + "pusher": { + "app": "synapse.app.pusher", + "listener_resources": [], + "endpoint_patterns": [], + "shared_extra_conf": {"start_pushers": False}, + "worker_extra_conf": "", + }, + "user_dir": { + "app": "synapse.app.user_dir", + "listener_resources": ["client"], + "endpoint_patterns": [ + "^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$" + ], + "shared_extra_conf": {"update_user_directory": False}, + "worker_extra_conf": "", + }, + "media_repository": { + "app": "synapse.app.media_repository", + "listener_resources": ["media"], + "endpoint_patterns": [ + "^/_matrix/media/", + "^/_synapse/admin/v1/purge_media_cache$", + "^/_synapse/admin/v1/room/.*/media.*$", + "^/_synapse/admin/v1/user/.*/media.*$", + "^/_synapse/admin/v1/media/.*$", + "^/_synapse/admin/v1/quarantine_media/.*$", + ], + "shared_extra_conf": {"enable_media_repo": False}, + "worker_extra_conf": "enable_media_repo: true", + }, + "appservice": { + "app": "synapse.app.appservice", + "listener_resources": [], + "endpoint_patterns": [], + "shared_extra_conf": {"notify_appservices": False}, + "worker_extra_conf": "", + }, + "federation_sender": { + "app": "synapse.app.federation_sender", + "listener_resources": [], + "endpoint_patterns": [], + "shared_extra_conf": {"send_federation": False}, + "worker_extra_conf": "", + }, + "synchrotron": { + "app": "synapse.app.generic_worker", + "listener_resources": ["client"], + "endpoint_patterns": [ + "^/_matrix/client/(v2_alpha|r0)/sync$", + "^/_matrix/client/(api/v1|v2_alpha|r0)/events$", + "^/_matrix/client/(api/v1|r0)/initialSync$", + "^/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync$", + ], + "shared_extra_conf": {}, + "worker_extra_conf": "", + }, + "federation_reader": { + "app": "synapse.app.generic_worker", + "listener_resources": ["federation"], + "endpoint_patterns": [ + "^/_matrix/federation/(v1|v2)/event/", + "^/_matrix/federation/(v1|v2)/state/", + "^/_matrix/federation/(v1|v2)/state_ids/", + "^/_matrix/federation/(v1|v2)/backfill/", + "^/_matrix/federation/(v1|v2)/get_missing_events/", + "^/_matrix/federation/(v1|v2)/publicRooms", + "^/_matrix/federation/(v1|v2)/query/", + "^/_matrix/federation/(v1|v2)/make_join/", + "^/_matrix/federation/(v1|v2)/make_leave/", + "^/_matrix/federation/(v1|v2)/send_join/", + "^/_matrix/federation/(v1|v2)/send_leave/", + "^/_matrix/federation/(v1|v2)/invite/", + "^/_matrix/federation/(v1|v2)/query_auth/", + "^/_matrix/federation/(v1|v2)/event_auth/", + "^/_matrix/federation/(v1|v2)/exchange_third_party_invite/", + "^/_matrix/federation/(v1|v2)/user/devices/", + "^/_matrix/federation/(v1|v2)/get_groups_publicised$", + "^/_matrix/key/v2/query", + ], + "shared_extra_conf": {}, + "worker_extra_conf": "", + }, + "federation_inbound": { + "app": "synapse.app.generic_worker", + "listener_resources": ["federation"], + "endpoint_patterns": ["/_matrix/federation/(v1|v2)/send/"], + "shared_extra_conf": {}, + "worker_extra_conf": "", + }, + "event_persister": { + "app": "synapse.app.generic_worker", + "listener_resources": ["replication"], + "endpoint_patterns": [], + "shared_extra_conf": {}, + "worker_extra_conf": "", + }, + "background_worker": { + "app": "synapse.app.generic_worker", + "listener_resources": [], + "endpoint_patterns": [], + # This worker cannot be sharded. Therefore there should only ever be one background + # worker, and it should be named background_worker1 + "shared_extra_conf": {"run_background_tasks_on": "background_worker1"}, + "worker_extra_conf": "", + }, + "event_creator": { + "app": "synapse.app.generic_worker", + "listener_resources": ["client"], + "endpoint_patterns": [ + "^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/redact", + "^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send", + "^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$", + "^/_matrix/client/(api/v1|r0|unstable)/join/", + "^/_matrix/client/(api/v1|r0|unstable)/profile/", + ], + "shared_extra_conf": {}, + "worker_extra_conf": "", + }, + "frontend_proxy": { + "app": "synapse.app.frontend_proxy", + "listener_resources": ["client", "replication"], + "endpoint_patterns": ["^/_matrix/client/(api/v1|r0|unstable)/keys/upload"], + "shared_extra_conf": {}, + "worker_extra_conf": ( + "worker_main_http_uri: http://127.0.0.1:%d" + % (MAIN_PROCESS_HTTP_LISTENER_PORT,), + ), + }, +} + +# Templates for sections that may be inserted multiple times in config files +SUPERVISORD_PROCESS_CONFIG_BLOCK = """ +[program:synapse_{name}] +command=/usr/local/bin/python -m {app} \ + --config-path="{config_path}" \ + --config-path=/conf/workers/shared.yaml \ + --config-path=/conf/workers/{name}.yaml +autorestart=unexpected +priority=500 +exitcodes=0 +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +""" + +NGINX_LOCATION_CONFIG_BLOCK = """ + location ~* {endpoint} { + proxy_pass {upstream}; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $host; + } +""" + +NGINX_UPSTREAM_CONFIG_BLOCK = """ +upstream {upstream_worker_type} { +{body} +} +""" + + +# Utility functions +def log(txt: str): + """Log something to the stdout. + + Args: + txt: The text to log. + """ + print(txt) + + +def error(txt: str): + """Log something and exit with an error code. + + Args: + txt: The text to log in error. + """ + log(txt) + sys.exit(2) + + +def convert(src: str, dst: str, **template_vars): + """Generate a file from a template + + Args: + src: Path to the input file. + dst: Path to write to. + template_vars: The arguments to replace placeholder variables in the template with. + """ + # Read the template file + with open(src) as infile: + template = infile.read() + + # Generate a string from the template. We disable autoescape to prevent template + # variables from being escaped. + rendered = jinja2.Template(template, autoescape=False).render(**template_vars) + + # Write the generated contents to a file + # + # We use append mode in case the files have already been written to by something else + # (for instance, as part of the instructions in a dockerfile). + with open(dst, "a") as outfile: + # In case the existing file doesn't end with a newline + outfile.write("\n") + + outfile.write(rendered) + + +def add_sharding_to_shared_config( + shared_config: dict, + worker_type: str, + worker_name: str, + worker_port: int, +) -> None: + """Given a dictionary representing a config file shared across all workers, + append sharded worker information to it for the current worker_type instance. + + Args: + shared_config: The config dict that all worker instances share (after being converted to YAML) + worker_type: The type of worker (one of those defined in WORKERS_CONFIG). + worker_name: The name of the worker instance. + worker_port: The HTTP replication port that the worker instance is listening on. + """ + # The instance_map config field marks the workers that write to various replication streams + instance_map = shared_config.setdefault("instance_map", {}) + + # Worker-type specific sharding config + if worker_type == "pusher": + shared_config.setdefault("pusher_instances", []).append(worker_name) + + elif worker_type == "federation_sender": + shared_config.setdefault("federation_sender_instances", []).append(worker_name) + + elif worker_type == "event_persister": + # Event persisters write to the events stream, so we need to update + # the list of event stream writers + shared_config.setdefault("stream_writers", {}).setdefault("events", []).append( + worker_name + ) + + # Map of stream writer instance names to host/ports combos + instance_map[worker_name] = { + "host": "localhost", + "port": worker_port, + } + + elif worker_type == "media_repository": + # The first configured media worker will run the media background jobs + shared_config.setdefault("media_instance_running_background_jobs", worker_name) + + +def generate_base_homeserver_config(): + """Starts Synapse and generates a basic homeserver config, which will later be + modified for worker support. + + Raises: CalledProcessError if calling start.py returned a non-zero exit code. + """ + # start.py already does this for us, so just call that. + # note that this script is copied in in the official, monolith dockerfile + os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT) + subprocess.check_output(["/usr/local/bin/python", "/start.py", "migrate_config"]) + + +def generate_worker_files(environ, config_path: str, data_dir: str): + """Read the desired list of workers from environment variables and generate + shared homeserver, nginx and supervisord configs. + + Args: + environ: _Environ[str] + config_path: Where to output the generated Synapse main worker config file. + data_dir: The location of the synapse data directory. Where log and + user-facing config files live. + """ + # Note that yaml cares about indentation, so care should be taken to insert lines + # into files at the correct indentation below. + + # shared_config is the contents of a Synapse config file that will be shared amongst + # the main Synapse process as well as all workers. + # It is intended mainly for disabling functionality when certain workers are spun up, + # and adding a replication listener. + + # First read the original config file and extract the listeners block. Then we'll add + # another listener for replication. Later we'll write out the result. + listeners = [ + { + "port": 9093, + "bind_address": "127.0.0.1", + "type": "http", + "resources": [{"names": ["replication"]}], + } + ] + with open(config_path) as file_stream: + original_config = yaml.safe_load(file_stream) + original_listeners = original_config.get("listeners") + if original_listeners: + listeners += original_listeners + + # The shared homeserver config. The contents of which will be inserted into the + # base shared worker jinja2 template. + # + # This config file will be passed to all workers, included Synapse's main process. + shared_config = {"listeners": listeners} + + # The supervisord config. The contents of which will be inserted into the + # base supervisord jinja2 template. + # + # Supervisord will be in charge of running everything, from redis to nginx to Synapse + # and all of its worker processes. Load the config template, which defines a few + # services that are necessary to run. + supervisord_config = "" + + # Upstreams for load-balancing purposes. This dict takes the form of a worker type to the + # ports of each worker. For example: + # { + # worker_type: {1234, 1235, ...}} + # } + # and will be used to construct 'upstream' nginx directives. + nginx_upstreams = {} + + # A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be + # placed after the proxy_pass directive. The main benefit to representing this data as a + # dict over a str is that we can easily deduplicate endpoints across multiple instances + # of the same worker. + # + # An nginx site config that will be amended to depending on the workers that are + # spun up. To be placed in /etc/nginx/conf.d. + nginx_locations = {} + + # Read the desired worker configuration from the environment + worker_types = environ.get("SYNAPSE_WORKER_TYPES") + if worker_types is None: + # No workers, just the main process + worker_types = [] + else: + # Split type names by comma + worker_types = worker_types.split(",") + + # Create the worker configuration directory if it doesn't already exist + os.makedirs("/conf/workers", exist_ok=True) + + # Start worker ports from this arbitrary port + worker_port = 18009 + + # A counter of worker_type -> int. Used for determining the name for a given + # worker type when generating its config file, as each worker's name is just + # worker_type + instance # + worker_type_counter = {} + + # For each worker type specified by the user, create config values + for worker_type in worker_types: + worker_type = worker_type.strip() + + worker_config = WORKERS_CONFIG.get(worker_type) + if worker_config: + worker_config = worker_config.copy() + else: + log(worker_type + " is an unknown worker type! It will be ignored") + continue + + new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1 + worker_type_counter[worker_type] = new_worker_count + + # Name workers by their type concatenated with an incrementing number + # e.g. federation_reader1 + worker_name = worker_type + str(new_worker_count) + worker_config.update( + {"name": worker_name, "port": worker_port, "config_path": config_path} + ) + + # Update the shared config with any worker-type specific options + shared_config.update(worker_config["shared_extra_conf"]) + + # Check if more than one instance of this worker type has been specified + worker_type_total_count = worker_types.count(worker_type) + if worker_type_total_count > 1: + # Update the shared config with sharding-related options if necessary + add_sharding_to_shared_config( + shared_config, worker_type, worker_name, worker_port + ) + + # Enable the worker in supervisord + supervisord_config += SUPERVISORD_PROCESS_CONFIG_BLOCK.format_map(worker_config) + + # Add nginx location blocks for this worker's endpoints (if any are defined) + for pattern in worker_config["endpoint_patterns"]: + # Determine whether we need to load-balance this worker + if worker_type_total_count > 1: + # Create or add to a load-balanced upstream for this worker + nginx_upstreams.setdefault(worker_type, set()).add(worker_port) + + # Upstreams are named after the worker_type + upstream = "http://" + worker_type + else: + upstream = "http://localhost:%d" % (worker_port,) + + # Note that this endpoint should proxy to this upstream + nginx_locations[pattern] = upstream + + # Write out the worker's logging config file + + # Check whether we should write worker logs to disk, in addition to the console + extra_log_template_args = {} + if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"): + extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format( + dir=data_dir, name=worker_name + ) + + # Render and write the file + log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name) + convert( + "/conf/log.config", + log_config_filepath, + worker_name=worker_name, + **extra_log_template_args, + ) + + # Then a worker config file + convert( + "/conf/worker.yaml.j2", + "/conf/workers/{name}.yaml".format(name=worker_name), + **worker_config, + worker_log_config_filepath=log_config_filepath, + ) + + worker_port += 1 + + # Build the nginx location config blocks + nginx_location_config = "" + for endpoint, upstream in nginx_locations.items(): + nginx_location_config += NGINX_LOCATION_CONFIG_BLOCK.format( + endpoint=endpoint, + upstream=upstream, + ) + + # Determine the load-balancing upstreams to configure + nginx_upstream_config = "" + for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items(): + body = "" + for port in upstream_worker_ports: + body += " server localhost:%d;\n" % (port,) + + # Add to the list of configured upstreams + nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format( + upstream_worker_type=upstream_worker_type, + body=body, + ) + + # Finally, we'll write out the config files. + + # Shared homeserver config + convert( + "/conf/shared.yaml.j2", + "/conf/workers/shared.yaml", + shared_worker_config=yaml.dump(shared_config), + ) + + # Nginx config + convert( + "/conf/nginx.conf.j2", + "/etc/nginx/conf.d/matrix-synapse.conf", + worker_locations=nginx_location_config, + upstream_directives=nginx_upstream_config, + ) + + # Supervisord config + convert( + "/conf/supervisord.conf.j2", + "/etc/supervisor/conf.d/supervisord.conf", + main_config_path=config_path, + worker_config=supervisord_config, + ) + + # Ensure the logging directory exists + log_dir = data_dir + "/logs" + if not os.path.exists(log_dir): + os.mkdir(log_dir) + + +def start_supervisord(): + """Starts up supervisord which then starts and monitors all other necessary processes + + Raises: CalledProcessError if calling start.py return a non-zero exit code. + """ + subprocess.run(["/usr/bin/supervisord"], stdin=subprocess.PIPE) + + +def main(args, environ): + config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data") + config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml") + data_dir = environ.get("SYNAPSE_DATA_DIR", "/data") + + # override SYNAPSE_NO_TLS, we don't support TLS in worker mode, + # this needs to be handled by a frontend proxy + environ["SYNAPSE_NO_TLS"] = "yes" + + # Generate the base homeserver config if one does not yet exist + if not os.path.exists(config_path): + log("Generating base homeserver config") + generate_base_homeserver_config() + + # This script may be run multiple times (mostly by Complement, see note at top of file). + # Don't re-configure workers in this instance. + mark_filepath = "/conf/workers_have_been_configured" + if not os.path.exists(mark_filepath): + # Always regenerate all other config files + generate_worker_files(environ, config_path, data_dir) + + # Mark workers as being configured + with open(mark_filepath, "w") as f: + f.write("") + + # Start supervisord, which will start Synapse, all of the configured worker + # processes, redis, nginx etc. according to the config we created above. + start_supervisord() + + +if __name__ == "__main__": + main(sys.argv, os.environ) diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md index bc737b30f59e..01d3882426b3 100644 --- a/docs/admin_api/rooms.md +++ b/docs/admin_api/rooms.md @@ -427,7 +427,7 @@ the new room. Users on other servers will be unaffected. The API is: ``` -POST /_synapse/admin/v1/rooms//delete +DELETE /_synapse/admin/v1/rooms/ ``` with a body of: @@ -528,6 +528,15 @@ You will have to manually handle, if you so choose, the following: * Users that would have been booted from the room (and will have been force-joined to the Content Violation room). * Removal of the Content Violation room if desired. +## Deprecated endpoint + +The previous deprecated API will be removed in a future release, it was: + +``` +POST /_synapse/admin/v1/rooms//delete +``` + +It behaves the same way than the current endpoint except the path and the method. # Make Room Admin API diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst index 8d4ec5a6f913..dbce9c90b6c7 100644 --- a/docs/admin_api/user_admin_api.rst +++ b/docs/admin_api/user_admin_api.rst @@ -111,35 +111,16 @@ List Accounts ============= This API returns all local user accounts. +By default, the response is ordered by ascending user ID. -The api is:: +The API is:: GET /_synapse/admin/v2/users?from=0&limit=10&guests=false To use it, you will need to authenticate by providing an ``access_token`` for a server admin: see `README.rst `_. -The parameter ``from`` is optional but used for pagination, denoting the -offset in the returned results. This should be treated as an opaque value and -not explicitly set to anything other than the return value of ``next_token`` -from a previous call. - -The parameter ``limit`` is optional but is used for pagination, denoting the -maximum number of items to return in this call. Defaults to ``100``. - -The parameter ``user_id`` is optional and filters to only return users with user IDs -that contain this value. This parameter is ignored when using the ``name`` parameter. - -The parameter ``name`` is optional and filters to only return users with user ID localparts -**or** displaynames that contain this value. - -The parameter ``guests`` is optional and if ``false`` will **exclude** guest users. -Defaults to ``true`` to include guest users. - -The parameter ``deactivated`` is optional and if ``true`` will **include** deactivated users. -Defaults to ``false`` to exclude deactivated users. - -A JSON body is returned with the following shape: +A response body like the following is returned: .. code:: json @@ -175,6 +156,66 @@ with ``from`` set to the value of ``next_token``. This will return a new page. If the endpoint does not return a ``next_token`` then there are no more users to paginate through. +**Parameters** + +The following parameters should be set in the URL: + +- ``user_id`` - Is optional and filters to only return users with user IDs + that contain this value. This parameter is ignored when using the ``name`` parameter. +- ``name`` - Is optional and filters to only return users with user ID localparts + **or** displaynames that contain this value. +- ``guests`` - string representing a bool - Is optional and if ``false`` will **exclude** guest users. + Defaults to ``true`` to include guest users. +- ``deactivated`` - string representing a bool - Is optional and if ``true`` will **include** deactivated users. + Defaults to ``false`` to exclude deactivated users. +- ``limit`` - string representing a positive integer - Is optional but is used for pagination, + denoting the maximum number of items to return in this call. Defaults to ``100``. +- ``from`` - string representing a positive integer - Is optional but used for pagination, + denoting the offset in the returned results. This should be treated as an opaque value and + not explicitly set to anything other than the return value of ``next_token`` from a previous call. + Defaults to ``0``. +- ``order_by`` - The method by which to sort the returned list of users. + If the ordered field has duplicates, the second order is always by ascending ``name``, + which guarantees a stable ordering. Valid values are: + + - ``name`` - Users are ordered alphabetically by ``name``. This is the default. + - ``is_guest`` - Users are ordered by ``is_guest`` status. + - ``admin`` - Users are ordered by ``admin`` status. + - ``user_type`` - Users are ordered alphabetically by ``user_type``. + - ``deactivated`` - Users are ordered by ``deactivated`` status. + - ``shadow_banned`` - Users are ordered by ``shadow_banned`` status. + - ``displayname`` - Users are ordered alphabetically by ``displayname``. + - ``avatar_url`` - Users are ordered alphabetically by avatar URL. + +- ``dir`` - Direction of media order. Either ``f`` for forwards or ``b`` for backwards. + Setting this value to ``b`` will reverse the above sort order. Defaults to ``f``. + +Caution. The database only has indexes on the columns ``name`` and ``created_ts``. +This means that if a different sort order is used (``is_guest``, ``admin``, +``user_type``, ``deactivated``, ``shadow_banned``, ``avatar_url`` or ``displayname``), +this can cause a large load on the database, especially for large environments. + +**Response** + +The following fields are returned in the JSON response body: + +- ``users`` - An array of objects, each containing information about an user. + User objects contain the following fields: + + - ``name`` - string - Fully-qualified user ID (ex. ``@user:server.com``). + - ``is_guest`` - bool - Status if that user is a guest account. + - ``admin`` - bool - Status if that user is a server administrator. + - ``user_type`` - string - Type of the user. Normal users are type ``None``. + This allows user type specific behaviour. There are also types ``support`` and ``bot``. + - ``deactivated`` - bool - Status if that user has been marked as deactivated. + - ``shadow_banned`` - bool - Status if that user has been marked as shadow banned. + - ``displayname`` - string - The user's display name if they have set one. + - ``avatar_url`` - string - The user's avatar URL if they have set one. + +- ``next_token``: string representing a positive integer - Indication for pagination. See above. +- ``total`` - integer - Total number of media. + + Query current sessions for a user ================================= @@ -823,3 +864,118 @@ The following parameters should be set in the URL: - ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must be local. + +Override ratelimiting for users +=============================== + +This API allows to override or disable ratelimiting for a specific user. +There are specific APIs to set, get and delete a ratelimit. + +Get status of ratelimit +----------------------- + +The API is:: + + GET /_synapse/admin/v1/users//override_ratelimit + +To use it, you will need to authenticate by providing an ``access_token`` for a +server admin: see `README.rst `_. + +A response body like the following is returned: + +.. code:: json + + { + "messages_per_second": 0, + "burst_count": 0 + } + +**Parameters** + +The following parameters should be set in the URL: + +- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must + be local. + +**Response** + +The following fields are returned in the JSON response body: + +- ``messages_per_second`` - integer - The number of actions that can + be performed in a second. `0` mean that ratelimiting is disabled for this user. +- ``burst_count`` - integer - How many actions that can be performed before + being limited. + +If **no** custom ratelimit is set, an empty JSON dict is returned. + +.. code:: json + + {} + +Set ratelimit +------------- + +The API is:: + + POST /_synapse/admin/v1/users//override_ratelimit + +To use it, you will need to authenticate by providing an ``access_token`` for a +server admin: see `README.rst `_. + +A response body like the following is returned: + +.. code:: json + + { + "messages_per_second": 0, + "burst_count": 0 + } + +**Parameters** + +The following parameters should be set in the URL: + +- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must + be local. + +Body parameters: + +- ``messages_per_second`` - positive integer, optional. The number of actions that can + be performed in a second. Defaults to ``0``. +- ``burst_count`` - positive integer, optional. How many actions that can be performed + before being limited. Defaults to ``0``. + +To disable users' ratelimit set both values to ``0``. + +**Response** + +The following fields are returned in the JSON response body: + +- ``messages_per_second`` - integer - The number of actions that can + be performed in a second. +- ``burst_count`` - integer - How many actions that can be performed before + being limited. + +Delete ratelimit +---------------- + +The API is:: + + DELETE /_synapse/admin/v1/users//override_ratelimit + +To use it, you will need to authenticate by providing an ``access_token`` for a +server admin: see `README.rst `_. + +An empty JSON dict is returned. + +.. code:: json + + {} + +**Parameters** + +The following parameters should be set in the URL: + +- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must + be local. + diff --git a/docs/code_style.md b/docs/code_style.md index 190f8ab2de88..28fb7277c41b 100644 --- a/docs/code_style.md +++ b/docs/code_style.md @@ -128,6 +128,9 @@ Some guidelines follow: will be if no sub-options are enabled). - Lines should be wrapped at 80 characters. - Use two-space indents. +- `true` and `false` are spelt thus (as opposed to `True`, etc.) +- Use single quotes (`'`) rather than double-quotes (`"`) or backticks + (`` ` ``) to refer to configuration options. Example: diff --git a/docs/presence_router_module.md b/docs/presence_router_module.md new file mode 100644 index 000000000000..d6566d978d06 --- /dev/null +++ b/docs/presence_router_module.md @@ -0,0 +1,235 @@ +# Presence Router Module + +Synapse supports configuring a module that can specify additional users +(local or remote) to should receive certain presence updates from local +users. + +Note that routing presence via Application Service transactions is not +currently supported. + +The presence routing module is implemented as a Python class, which will +be imported by the running Synapse. + +## Python Presence Router Class + +The Python class is instantiated with two objects: + +* A configuration object of some type (see below). +* An instance of `synapse.module_api.ModuleApi`. + +It then implements methods related to presence routing. + +Note that one method of `ModuleApi` that may be useful is: + +```python +async def ModuleApi.send_local_online_presence_to(users: Iterable[str]) -> None +``` + +which can be given a list of local or remote MXIDs to broadcast known, online user +presence to (for those users that the receiving user is considered interested in). +It does not include state for users who are currently offline, and it can only be +called on workers that support sending federation. + +### Module structure + +Below is a list of possible methods that can be implemented, and whether they are +required. + +#### `parse_config` + +```python +def parse_config(config_dict: dict) -> Any +``` + +**Required.** A static method that is passed a dictionary of config options, and + should return a validated config object. This method is described further in + [Configuration](#configuration). + +#### `get_users_for_states` + +```python +async def get_users_for_states( + self, + state_updates: Iterable[UserPresenceState], +) -> Dict[str, Set[UserPresenceState]]: +``` + +**Required.** An asynchronous method that is passed an iterable of user presence +state. This method can determine whether a given presence update should be sent to certain +users. It does this by returning a dictionary with keys representing local or remote +Matrix User IDs, and values being a python set +of `synapse.handlers.presence.UserPresenceState` instances. + +Synapse will then attempt to send the specified presence updates to each user when +possible. + +#### `get_interested_users` + +```python +async def get_interested_users(self, user_id: str) -> Union[Set[str], str] +``` + +**Required.** An asynchronous method that is passed a single Matrix User ID. This +method is expected to return the users that the passed in user may be interested in the +presence of. Returned users may be local or remote. The presence routed as a result of +what this method returns is sent in addition to the updates already sent between users +that share a room together. Presence updates are deduplicated. + +This method should return a python set of Matrix User IDs, or the object +`synapse.events.presence_router.PresenceRouter.ALL_USERS` to indicate that the passed +user should receive presence information for *all* known users. + +For clarity, if the user `@alice:example.org` is passed to this method, and the Set +`{"@bob:example.com", "@charlie:somewhere.org"}` is returned, this signifies that Alice +should receive presence updates sent by Bob and Charlie, regardless of whether these +users share a room. + +### Example + +Below is an example implementation of a presence router class. + +```python +from typing import Dict, Iterable, Set, Union +from synapse.events.presence_router import PresenceRouter +from synapse.handlers.presence import UserPresenceState +from synapse.module_api import ModuleApi + +class PresenceRouterConfig: + def __init__(self): + # Config options with their defaults + # A list of users to always send all user presence updates to + self.always_send_to_users = [] # type: List[str] + + # A list of users to ignore presence updates for. Does not affect + # shared-room presence relationships + self.blacklisted_users = [] # type: List[str] + +class ExamplePresenceRouter: + """An example implementation of synapse.presence_router.PresenceRouter. + Supports routing all presence to a configured set of users, or a subset + of presence from certain users to members of certain rooms. + + Args: + config: A configuration object. + module_api: An instance of Synapse's ModuleApi. + """ + def __init__(self, config: PresenceRouterConfig, module_api: ModuleApi): + self._config = config + self._module_api = module_api + + @staticmethod + def parse_config(config_dict: dict) -> PresenceRouterConfig: + """Parse a configuration dictionary from the homeserver config, do + some validation and return a typed PresenceRouterConfig. + + Args: + config_dict: The configuration dictionary. + + Returns: + A validated config object. + """ + # Initialise a typed config object + config = PresenceRouterConfig() + always_send_to_users = config_dict.get("always_send_to_users") + blacklisted_users = config_dict.get("blacklisted_users") + + # Do some validation of config options... otherwise raise a + # synapse.config.ConfigError. + config.always_send_to_users = always_send_to_users + config.blacklisted_users = blacklisted_users + + return config + + async def get_users_for_states( + self, + state_updates: Iterable[UserPresenceState], + ) -> Dict[str, Set[UserPresenceState]]: + """Given an iterable of user presence updates, determine where each one + needs to go. Returned results will not affect presence updates that are + sent between users who share a room. + + Args: + state_updates: An iterable of user presence state updates. + + Returns: + A dictionary of user_id -> set of UserPresenceState that the user should + receive. + """ + destination_users = {} # type: Dict[str, Set[UserPresenceState] + + # Ignore any updates for blacklisted users + desired_updates = set() + for update in state_updates: + if update.state_key not in self._config.blacklisted_users: + desired_updates.add(update) + + # Send all presence updates to specific users + for user_id in self._config.always_send_to_users: + destination_users[user_id] = desired_updates + + return destination_users + + async def get_interested_users( + self, + user_id: str, + ) -> Union[Set[str], PresenceRouter.ALL_USERS]: + """ + Retrieve a list of users that `user_id` is interested in receiving the + presence of. This will be in addition to those they share a room with. + Optionally, the object PresenceRouter.ALL_USERS can be returned to indicate + that this user should receive all incoming local and remote presence updates. + + Note that this method will only be called for local users. + + Args: + user_id: A user requesting presence updates. + + Returns: + A set of user IDs to return additional presence updates for, or + PresenceRouter.ALL_USERS to return presence updates for all other users. + """ + if user_id in self._config.always_send_to_users: + return PresenceRouter.ALL_USERS + + return set() +``` + +#### A note on `get_users_for_states` and `get_interested_users` + +Both of these methods are effectively two different sides of the same coin. The logic +regarding which users should receive updates for other users should be the same +between them. + +`get_users_for_states` is called when presence updates come in from either federation +or local users, and is used to either direct local presence to remote users, or to +wake up the sync streams of local users to collect remote presence. + +In contrast, `get_interested_users` is used to determine the users that presence should +be fetched for when a local user is syncing. This presence is then retrieved, before +being fed through `get_users_for_states` once again, with only the syncing user's +routing information pulled from the resulting dictionary. + +Their routing logic should thus line up, else you may run into unintended behaviour. + +## Configuration + +Once you've crafted your module and installed it into the same Python environment as +Synapse, amend your homeserver config file with the following. + +```yaml +presence: + routing_module: + module: my_module.ExamplePresenceRouter + config: + # Any configuration options for your module. The below is an example. + # of setting options for ExamplePresenceRouter. + always_send_to_users: ["@presence_gobbler:example.org"] + blacklisted_users: + - "@alice:example.com" + - "@bob:example.com" + ... +``` + +The contents of `config` will be passed as a Python dictionary to the static +`parse_config` method of your class. The object returned by this method will +then be passed to the `__init__` method of your module as `config`. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 17cda71adc82..67ad57b1aa9a 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -82,9 +82,28 @@ pid_file: DATADIR/homeserver.pid # #soft_file_limit: 0 -# Set to false to disable presence tracking on this homeserver. +# Presence tracking allows users to see the state (e.g online/offline) +# of other local and remote users. # -#use_presence: false +presence: + # Uncomment to disable presence tracking on this homeserver. This option + # replaces the previous top-level 'use_presence' option. + # + #enabled: false + + # Presence routers are third-party modules that can specify additional logic + # to where presence updates from users are routed. + # + presence_router: + # The custom module's class. Uncomment to use a custom presence router module. + # + #module: "my_custom_router.PresenceRouter" + + # Configuration options of the custom module. Refer to your module's + # documentation for available options. + # + #config: + # example_option: 'something' # Whether to require authentication to retrieve profile data (avatars, # display names) of other users through the client API. Defaults to @@ -133,6 +152,16 @@ pid_file: DATADIR/homeserver.pid # #gc_thresholds: [700, 10, 10] +# The minimum time in seconds between each GC for a generation, regardless of +# the GC thresholds. This ensures that we don't do GC too frequently. +# +# A value of `[1s, 10s, 30s]` indicates that a second must pass between consecutive +# generation 0 GCs, etc. +# +# Defaults to `[1s, 10s, 30s]`. +# +#gc_min_interval: [0.5s, 30s, 1m] + # Set the limit on the returned events in the timeline in the get # and sync operations. The default value is 100. -1 means no upper limit. # @@ -712,6 +741,12 @@ acme: # #allow_profile_lookup_over_federation: false +# Uncomment to disable device display name lookup over federation. By default, the +# Federation API allows other homeservers to obtain device display names of any user +# on this homeserver. Defaults to 'true'. +# +#allow_device_name_lookup_over_federation: false + ## Caching ## @@ -791,6 +826,7 @@ caches: # password: secretpassword # database: synapse # host: localhost +# port: 5432 # cp_min: 5 # cp_max: 10 # @@ -1156,69 +1192,6 @@ url_preview_accept_language: # #enable_registration: false -# Optional account validity configuration. This allows for accounts to be denied -# any request after a given period. -# -# Once this feature is enabled, Synapse will look for registered users without an -# expiration date at startup and will add one to every account it found using the -# current settings at that time. -# This means that, if a validity period is set, and Synapse is restarted (it will -# then derive an expiration date from the current validity period), and some time -# after that the validity period changes and Synapse is restarted, the users' -# expiration dates won't be updated unless their account is manually renewed. This -# date will be randomly selected within a range [now + period - d ; now + period], -# where d is equal to 10% of the validity period. -# -account_validity: - # The account validity feature is disabled by default. Uncomment the - # following line to enable it. - # - #enabled: true - - # The period after which an account is valid after its registration. When - # renewing the account, its validity period will be extended by this amount - # of time. This parameter is required when using the account validity - # feature. - # - #period: 6w - - # The amount of time before an account's expiry date at which Synapse will - # send an email to the account's email address with a renewal link. By - # default, no such emails are sent. - # - # If you enable this setting, you will also need to fill out the 'email' and - # 'public_baseurl' configuration sections. - # - #renew_at: 1w - - # The subject of the email sent out with the renewal link. '%(app)s' can be - # used as a placeholder for the 'app_name' parameter from the 'email' - # section. - # - # Note that the placeholder must be written '%(app)s', including the - # trailing 's'. - # - # If this is not set, a default value is used. - # - #renew_email_subject: "Renew your %(app)s account" - - # Directory in which Synapse will try to find templates for the HTML files to - # serve to the user when trying to renew an account. If not set, default - # templates from within the Synapse package will be used. - # - #template_dir: "res/templates" - - # File within 'template_dir' giving the HTML to be displayed to the user after - # they successfully renewed their account. If not set, default text is used. - # - #account_renewed_html_path: "account_renewed.html" - - # File within 'template_dir' giving the HTML to be displayed when the user - # tries to renew an account with an invalid renewal token. If not set, - # default text is used. - # - #invalid_token_html_path: "invalid_token.html" - # Time that a user's session remains valid for, after they log in. # # Note that this is not currently compatible with guest logins. @@ -1246,9 +1219,9 @@ account_validity: # #allowed_local_3pids: # - medium: email -# pattern: '.*@matrix\.org' +# pattern: '^[^@]+@matrix\.org$' # - medium: email -# pattern: '.*@vector\.im' +# pattern: '^[^@]+@vector\.im$' # - medium: msisdn # pattern: '\+44' @@ -1413,6 +1386,91 @@ account_threepid_delegates: #auto_join_rooms_for_guests: false +## Account Validity ## + +# Optional account validity configuration. This allows for accounts to be denied +# any request after a given period. +# +# Once this feature is enabled, Synapse will look for registered users without an +# expiration date at startup and will add one to every account it found using the +# current settings at that time. +# This means that, if a validity period is set, and Synapse is restarted (it will +# then derive an expiration date from the current validity period), and some time +# after that the validity period changes and Synapse is restarted, the users' +# expiration dates won't be updated unless their account is manually renewed. This +# date will be randomly selected within a range [now + period - d ; now + period], +# where d is equal to 10% of the validity period. +# +account_validity: + # The account validity feature is disabled by default. Uncomment the + # following line to enable it. + # + #enabled: true + + # The period after which an account is valid after its registration. When + # renewing the account, its validity period will be extended by this amount + # of time. This parameter is required when using the account validity + # feature. + # + #period: 6w + + # The amount of time before an account's expiry date at which Synapse will + # send an email to the account's email address with a renewal link. By + # default, no such emails are sent. + # + # If you enable this setting, you will also need to fill out the 'email' and + # 'public_baseurl' configuration sections. + # + #renew_at: 1w + + # The subject of the email sent out with the renewal link. '%(app)s' can be + # used as a placeholder for the 'app_name' parameter from the 'email' + # section. + # + # Note that the placeholder must be written '%(app)s', including the + # trailing 's'. + # + # If this is not set, a default value is used. + # + #renew_email_subject: "Renew your %(app)s account" + + # Directory in which Synapse will try to find templates for the HTML files to + # serve to the user when trying to renew an account. If not set, default + # templates from within the Synapse package will be used. + # + # The currently available templates are: + # + # * account_renewed.html: Displayed to the user after they have successfully + # renewed their account. + # + # * account_previously_renewed.html: Displayed to the user if they attempt to + # renew their account with a token that is valid, but that has already + # been used. In this case the account is not renewed again. + # + # * invalid_token.html: Displayed to the user when they try to renew an account + # with an unknown or invalid renewal token. + # + # See https://github.com/matrix-org/synapse/tree/master/synapse/res/templates for + # default template contents. + # + # The file name of some of these templates can be configured below for legacy + # reasons. + # + #template_dir: "res/templates" + + # A custom file name for the 'account_renewed.html' template. + # + # If not set, the file is assumed to be named "account_renewed.html". + # + #account_renewed_html_path: "account_renewed.html" + + # A custom file name for the 'invalid_token.html' template. + # + # If not set, the file is assumed to be named "invalid_token.html". + # + #invalid_token_html_path: "invalid_token.html" + + ## Metrics ### # Enable collection and rendering of performance metrics @@ -1451,14 +1509,32 @@ metrics_flags: ## API Configuration ## -# A list of event types that will be included in the room_invite_state +# Controls for the state that is shared with users who receive an invite +# to a room # -#room_invite_state_types: -# - "m.room.join_rules" -# - "m.room.canonical_alias" -# - "m.room.avatar" -# - "m.room.encryption" -# - "m.room.name" +room_prejoin_state: + # By default, the following state event types are shared with users who + # receive invites to the room: + # + # - m.room.join_rules + # - m.room.canonical_alias + # - m.room.avatar + # - m.room.encryption + # - m.room.name + # - m.room.create + # + # Uncomment the following to disable these defaults (so that only the event + # types listed in 'additional_event_types' are shared). Defaults to 'false'. + # + #disable_default_event_types: true + + # Additional state event types to share with users when they are invited + # to a room. + # + # By default, this list is empty (so only the default event types are shared). + # + #additional_event_types: + # - org.example.custom.event.type # A list of application service config files to use @@ -1842,7 +1918,7 @@ saml2_config: # sub-properties: # # module: The class name of a custom mapping module. Default is -# 'synapse.handlers.oidc_handler.JinjaOidcMappingProvider'. +# 'synapse.handlers.oidc.JinjaOidcMappingProvider'. # See https://github.com/matrix-org/synapse/blob/master/docs/sso_mapping_providers.md#openid-mapping-providers # for information on implementing a custom mapping provider. # diff --git a/docs/sso_mapping_providers.md b/docs/sso_mapping_providers.md index e1d6ede7bac3..50020d1a4ad1 100644 --- a/docs/sso_mapping_providers.md +++ b/docs/sso_mapping_providers.md @@ -106,7 +106,7 @@ A custom mapping provider must specify the following methods: Synapse has a built-in OpenID mapping provider if a custom provider isn't specified in the config. It is located at -[`synapse.handlers.oidc_handler.JinjaOidcMappingProvider`](../synapse/handlers/oidc_handler.py). +[`synapse.handlers.oidc.JinjaOidcMappingProvider`](../synapse/handlers/oidc.py). ## SAML Mapping Providers @@ -190,4 +190,4 @@ A custom mapping provider must specify the following methods: Synapse has a built-in SAML mapping provider if a custom provider isn't specified in the config. It is located at -[`synapse.handlers.saml_handler.DefaultSamlMappingProvider`](../synapse/handlers/saml_handler.py). +[`synapse.handlers.saml.DefaultSamlMappingProvider`](../synapse/handlers/saml.py). diff --git a/mypy.ini b/mypy.ini index 3ae5d4578776..ea655a0d4d92 100644 --- a/mypy.ini +++ b/mypy.ini @@ -8,6 +8,7 @@ show_traceback = True mypy_path = stubs warn_unreachable = True local_partial_types = True +no_implicit_optional = True # To find all folders that pass mypy you run: # @@ -40,7 +41,6 @@ files = synapse/push, synapse/replication, synapse/rest, - synapse/secrets.py, synapse/server.py, synapse/server_notices, synapse/spam_checker_api, @@ -171,3 +171,6 @@ ignore_missing_imports = True [mypy-txacme.*] ignore_missing_imports = True + +[mypy-pympler.*] +ignore_missing_imports = True diff --git a/pyproject.toml b/pyproject.toml index cd880d4e39bf..8bca1fa4efd9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,7 @@ showcontent = true [tool.black] -target-version = ['py35'] +target-version = ['py36'] exclude = ''' ( diff --git a/scripts-dev/build_debian_packages b/scripts-dev/build_debian_packages index d0685c8b35fd..07d018db9985 100755 --- a/scripts-dev/build_debian_packages +++ b/scripts-dev/build_debian_packages @@ -18,14 +18,13 @@ import threading from concurrent.futures import ThreadPoolExecutor DISTS = ( - "debian:stretch", "debian:buster", "debian:bullseye", "debian:sid", - "ubuntu:xenial", - "ubuntu:bionic", - "ubuntu:focal", - "ubuntu:groovy", + "ubuntu:bionic", # 18.04 LTS (our EOL forced by Py36 on 2021-12-23) + "ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14) + "ubuntu:groovy", # 20.10 (EOL 2021-07-07) + "ubuntu:hirsute", # 21.04 (EOL 2022-01-05) ) DESC = '''\ @@ -43,7 +42,7 @@ class Builder(object): self._lock = threading.Lock() self._failed = False - def run_build(self, dist): + def run_build(self, dist, skip_tests=False): """Build deb for a single distribution""" if self._failed: @@ -51,13 +50,13 @@ class Builder(object): raise Exception("failed") try: - self._inner_build(dist) + self._inner_build(dist, skip_tests) except Exception as e: print("build of %s failed: %s" % (dist, e), file=sys.stderr) self._failed = True raise - def _inner_build(self, dist): + def _inner_build(self, dist, skip_tests=False): projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) os.chdir(projdir) @@ -101,6 +100,7 @@ class Builder(object): "--volume=" + debsdir + ":/debs", "-e", "TARGET_USERID=%i" % (os.getuid(), ), "-e", "TARGET_GROUPID=%i" % (os.getgid(), ), + "-e", "DEB_BUILD_OPTIONS=%s" % ("nocheck" if skip_tests else ""), "dh-venv-builder:" + tag, ], stdout=stdout, stderr=subprocess.STDOUT) @@ -124,7 +124,7 @@ class Builder(object): self.active_containers.remove(c) -def run_builds(dists, jobs=1): +def run_builds(dists, jobs=1, skip_tests=False): builder = Builder(redirect_stdout=(jobs > 1)) def sig(signum, _frame): @@ -133,7 +133,7 @@ def run_builds(dists, jobs=1): signal.signal(signal.SIGINT, sig) with ThreadPoolExecutor(max_workers=jobs) as e: - res = e.map(builder.run_build, dists) + res = e.map(lambda dist: builder.run_build(dist, skip_tests), dists) # make sure we consume the iterable so that exceptions are raised. for r in res: @@ -148,9 +148,13 @@ if __name__ == '__main__': '-j', '--jobs', type=int, default=1, help='specify the number of builds to run in parallel', ) + parser.add_argument( + '--no-check', action='store_true', + help='skip running tests after building', + ) parser.add_argument( 'dist', nargs='*', default=DISTS, help='a list of distributions to build for. Default: %(default)s', ) args = parser.parse_args() - run_builds(dists=args.dist, jobs=args.jobs) + run_builds(dists=args.dist, jobs=args.jobs, skip_tests=args.no_check) diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 3cde53f5c051..1612ab522c33 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -1,22 +1,49 @@ -#! /bin/bash -eu +#!/usr/bin/env bash # This script is designed for developers who want to test their code # against Complement. # # It makes a Synapse image which represents the current checkout, -# then downloads Complement and runs it with that image. +# builds a synapse-complement image on top, then runs tests with it. +# +# By default the script will fetch the latest Complement master branch and +# run tests with that. This can be overridden to use a custom Complement +# checkout by setting the COMPLEMENT_DIR environment variable to the +# filepath of a local Complement checkout. +# +# A regular expression of test method names can be supplied as the first +# argument to the script. Complement will then only run those tests. If +# no regex is supplied, all tests are run. For example; +# +# ./complement.sh "TestOutboundFederation(Profile|Send)" +# + +# Exit if a line returns a non-zero exit code +set -e +# Change to the repository root cd "$(dirname $0)/.." +# Check for a user-specified Complement checkout +if [[ -z "$COMPLEMENT_DIR" ]]; then + echo "COMPLEMENT_DIR not set. Fetching the latest Complement checkout..." + wget -Nq https://github.com/matrix-org/complement/archive/master.tar.gz + tar -xzf master.tar.gz + COMPLEMENT_DIR=complement-master + echo "Checkout available at 'complement-master'" +fi + # Build the base Synapse image from the local checkout -docker build -t matrixdotorg/synapse:latest -f docker/Dockerfile . +docker build -t matrixdotorg/synapse -f docker/Dockerfile . +# Build the Synapse monolith image from Complement, based on the above image we just built +docker build -t complement-synapse -f "$COMPLEMENT_DIR/dockerfiles/Synapse.Dockerfile" "$COMPLEMENT_DIR/dockerfiles" -# Download Complement -wget -N https://github.com/matrix-org/complement/archive/master.tar.gz -tar -xzf master.tar.gz -cd complement-master +cd "$COMPLEMENT_DIR" -# Build the Synapse image from Complement, based on the above image we just built -docker build -t complement-synapse -f dockerfiles/Synapse.Dockerfile ./dockerfiles +EXTRA_COMPLEMENT_ARGS="" +if [[ -n "$1" ]]; then + # A test name regex has been set, supply it to Complement + EXTRA_COMPLEMENT_ARGS+="-run $1 " +fi -# Run the tests on the resulting image! -COMPLEMENT_BASE_IMAGE=complement-synapse go test -v -count=1 ./tests +# Run the tests! +COMPLEMENT_BASE_IMAGE=complement-synapse go test -v -tags synapse_blacklist,msc2946,msc3083 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests diff --git a/scripts-dev/definitions.py b/scripts-dev/definitions.py index 313860df139a..c82ddd96776c 100755 --- a/scripts-dev/definitions.py +++ b/scripts-dev/definitions.py @@ -140,7 +140,7 @@ def used_names(prefix, item, defs, names): definitions = {} for directory in args.directories: - for root, dirs, files in os.walk(directory): + for root, _, files in os.walk(directory): for filename in files: if filename.endswith(".py"): filepath = os.path.join(root, filename) diff --git a/scripts-dev/dump_macaroon.py b/scripts-dev/dump_macaroon.py index 980b5e709f96..0ca75d3fe14f 100755 --- a/scripts-dev/dump_macaroon.py +++ b/scripts-dev/dump_macaroon.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python import sys diff --git a/scripts-dev/list_url_patterns.py b/scripts-dev/list_url_patterns.py index 26ad7c67f483..e85420dea876 100755 --- a/scripts-dev/list_url_patterns.py +++ b/scripts-dev/list_url_patterns.py @@ -48,7 +48,7 @@ def find_patterns_in_file(filepath): for directory in args.directories: - for root, dirs, files in os.walk(directory): + for root, _, files in os.walk(directory): for filename in files: if filename.endswith(".py"): filepath = os.path.join(root, filename) diff --git a/scripts-dev/make_full_schema.sh b/scripts-dev/make_full_schema.sh index bc8f9786608d..39bf30d258bd 100755 --- a/scripts-dev/make_full_schema.sh +++ b/scripts-dev/make_full_schema.sh @@ -6,7 +6,7 @@ # It does so by having Synapse generate an up-to-date SQLite DB, then running # synapse_port_db to convert it to Postgres. It then dumps the contents of both. -POSTGRES_HOST="localhost" +export PGHOST="localhost" POSTGRES_DB_NAME="synapse_full_schema.$$" SQLITE_FULL_SCHEMA_OUTPUT_FILE="full.sql.sqlite" @@ -32,7 +32,7 @@ usage() { while getopts "p:co:h" opt; do case $opt in p) - POSTGRES_USERNAME=$OPTARG + export PGUSER=$OPTARG ;; c) # Print all commands that are being executed @@ -69,7 +69,7 @@ if [ ${#unsatisfied_requirements} -ne 0 ]; then exit 1 fi -if [ -z "$POSTGRES_USERNAME" ]; then +if [ -z "$PGUSER" ]; then echo "No postgres username supplied" usage exit 1 @@ -84,8 +84,9 @@ fi # Create the output directory if it doesn't exist mkdir -p "$OUTPUT_DIR" -read -rsp "Postgres password for '$POSTGRES_USERNAME': " POSTGRES_PASSWORD +read -rsp "Postgres password for '$PGUSER': " PGPASSWORD echo "" +export PGPASSWORD # Exit immediately if a command fails set -e @@ -131,9 +132,9 @@ report_stats: false database: name: "psycopg2" args: - user: "$POSTGRES_USERNAME" - host: "$POSTGRES_HOST" - password: "$POSTGRES_PASSWORD" + user: "$PGUSER" + host: "$PGHOST" + password: "$PGPASSWORD" database: "$POSTGRES_DB_NAME" # Suppress the key server warning. @@ -150,7 +151,7 @@ scripts-dev/update_database --database-config "$SQLITE_CONFIG" # Create the PostgreSQL database. echo "Creating postgres database..." -createdb $POSTGRES_DB_NAME +createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_DB_NAME" echo "Copying data from SQLite3 to Postgres with synapse_port_db..." if [ -z "$COVERAGE" ]; then @@ -181,7 +182,7 @@ DROP TABLE user_directory_search_docsize; DROP TABLE user_directory_search_stat; " sqlite3 "$SQLITE_DB" <<< "$SQL" -psql $POSTGRES_DB_NAME -U "$POSTGRES_USERNAME" -w <<< "$SQL" +psql "$POSTGRES_DB_NAME" -w <<< "$SQL" echo "Dumping SQLite3 schema to '$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE'..." sqlite3 "$SQLITE_DB" ".dump" > "$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE" diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py index 18df68305b88..1217e148747d 100644 --- a/scripts-dev/mypy_synapse_plugin.py +++ b/scripts-dev/mypy_synapse_plugin.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/scripts-dev/release.py b/scripts-dev/release.py new file mode 100755 index 000000000000..1042fa48bc8c --- /dev/null +++ b/scripts-dev/release.py @@ -0,0 +1,244 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""An interactive script for doing a release. See `run()` below. +""" + +import subprocess +import sys +from typing import Optional + +import click +import git +from packaging import version +from redbaron import RedBaron + + +@click.command() +def run(): + """An interactive script to walk through the initial stages of creating a + release, including creating release branch, updating changelog and pushing to + GitHub. + + Requires the dev dependencies be installed, which can be done via: + + pip install -e .[dev] + + """ + + # Make sure we're in a git repo. + try: + repo = git.Repo() + except git.InvalidGitRepositoryError: + raise click.ClickException("Not in Synapse repo.") + + if repo.is_dirty(): + raise click.ClickException("Uncommitted changes exist.") + + click.secho("Updating git repo...") + repo.remote().fetch() + + # Parse the AST and load the `__version__` node so that we can edit it + # later. + with open("synapse/__init__.py") as f: + red = RedBaron(f.read()) + + version_node = None + for node in red: + if node.type != "assignment": + continue + + if node.target.type != "name": + continue + + if node.target.value != "__version__": + continue + + version_node = node + break + + if not version_node: + print("Failed to find '__version__' definition in synapse/__init__.py") + sys.exit(1) + + # Parse the current version. + current_version = version.parse(version_node.value.value.strip('"')) + assert isinstance(current_version, version.Version) + + # Figure out what sort of release we're doing and calcuate the new version. + rc = click.confirm("RC", default=True) + if current_version.pre: + # If the current version is an RC we don't need to bump any of the + # version numbers (other than the RC number). + base_version = "{}.{}.{}".format( + current_version.major, + current_version.minor, + current_version.micro, + ) + + if rc: + new_version = "{}.{}.{}rc{}".format( + current_version.major, + current_version.minor, + current_version.micro, + current_version.pre[1] + 1, + ) + else: + new_version = base_version + else: + # If this is a new release cycle then we need to know if its a major + # version bump or a hotfix. + release_type = click.prompt( + "Release type", + type=click.Choice(("major", "hotfix")), + show_choices=True, + default="major", + ) + + if release_type == "major": + base_version = new_version = "{}.{}.{}".format( + current_version.major, + current_version.minor + 1, + 0, + ) + if rc: + new_version = "{}.{}.{}rc1".format( + current_version.major, + current_version.minor + 1, + 0, + ) + + else: + base_version = new_version = "{}.{}.{}".format( + current_version.major, + current_version.minor, + current_version.micro + 1, + ) + if rc: + new_version = "{}.{}.{}rc1".format( + current_version.major, + current_version.minor, + current_version.micro + 1, + ) + + # Confirm the calculated version is OK. + if not click.confirm(f"Create new version: {new_version}?", default=True): + click.get_current_context().abort() + + # Switch to the release branch. + release_branch_name = f"release-v{base_version}" + release_branch = find_ref(repo, release_branch_name) + if release_branch: + if release_branch.is_remote(): + # If the release branch only exists on the remote we check it out + # locally. + repo.git.checkout(release_branch_name) + release_branch = repo.active_branch + else: + # If a branch doesn't exist we create one. We ask which one branch it + # should be based off, defaulting to sensible values depending on the + # release type. + if current_version.is_prerelease: + default = release_branch_name + elif release_type == "major": + default = "develop" + else: + default = "master" + + branch_name = click.prompt( + "Which branch should the release be based on?", default=default + ) + + base_branch = find_ref(repo, branch_name) + if not base_branch: + print(f"Could not find base branch {branch_name}!") + click.get_current_context().abort() + + # Check out the base branch and ensure it's up to date + repo.head.reference = base_branch + repo.head.reset(index=True, working_tree=True) + if not base_branch.is_remote(): + update_branch(repo) + + # Create the new release branch + release_branch = repo.create_head(release_branch_name, commit=base_branch) + + # Switch to the release branch and ensure its up to date. + repo.git.checkout(release_branch_name) + update_branch(repo) + + # Update the `__version__` variable and write it back to the file. + version_node.value = '"' + new_version + '"' + with open("synapse/__init__.py", "w") as f: + f.write(red.dumps()) + + # Generate changelogs + subprocess.run("python3 -m towncrier", shell=True) + + # Generate debian changelogs if its not an RC. + if not rc: + subprocess.run( + f'dch -M -v {new_version} "New synapse release {new_version}."', shell=True + ) + subprocess.run('dch -M -r -D stable ""', shell=True) + + # Show the user the changes and ask if they want to edit the change log. + repo.git.add("-u") + subprocess.run("git diff --cached", shell=True) + + if click.confirm("Edit changelog?", default=False): + click.edit(filename="CHANGES.md") + + # Commit the changes. + repo.git.add("-u") + repo.git.commit(f"-m {new_version}") + + # We give the option to bail here in case the user wants to make sure things + # are OK before pushing. + if not click.confirm("Push branch to github?", default=True): + print("") + print("Run when ready to push:") + print("") + print(f"\tgit push -u {repo.remote().name} {repo.active_branch.name}") + print("") + sys.exit(0) + + # Otherwise, push and open the changelog in the browser. + repo.git.push("-u", repo.remote().name, repo.active_branch.name) + + click.launch( + f"https://github.com/matrix-org/synapse/blob/{repo.active_branch.name}/CHANGES.md" + ) + + +def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]: + """Find the branch/ref, looking first locally then in the remote.""" + if ref_name in repo.refs: + return repo.refs[ref_name] + elif ref_name in repo.remote().refs: + return repo.remote().refs[ref_name] + else: + return None + + +def update_branch(repo: git.Repo): + """Ensure branch is up to date if it has a remote""" + if repo.active_branch.tracking_branch(): + repo.git.merge(repo.active_branch.tracking_branch().name) + + +if __name__ == "__main__": + run() diff --git a/scripts-dev/sign_json b/scripts-dev/sign_json index 44553fb79aa8..4a43d3f2b058 100755 --- a/scripts-dev/sign_json +++ b/scripts-dev/sign_json @@ -1,6 +1,5 @@ #!/usr/bin/env python # -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/scripts-dev/update_database b/scripts-dev/update_database index 56365e2b58bf..87f709b6ed43 100755 --- a/scripts-dev/update_database +++ b/scripts-dev/update_database @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/scripts/export_signing_key b/scripts/export_signing_key index 8aec9d802bf4..0ed167ea855c 100755 --- a/scripts/export_signing_key +++ b/scripts/export_signing_key @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/scripts/generate_log_config b/scripts/generate_log_config index a13a5634a30e..e72a0dafb769 100755 --- a/scripts/generate_log_config +++ b/scripts/generate_log_config @@ -1,6 +1,5 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/scripts/generate_signing_key.py b/scripts/generate_signing_key.py index 16d7c4f38284..07df25a8099d 100755 --- a/scripts/generate_signing_key.py +++ b/scripts/generate_signing_key.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/scripts/move_remote_media_to_new_store.py b/scripts/move_remote_media_to_new_store.py index 8477955a906d..875aa4781f49 100755 --- a/scripts/move_remote_media_to_new_store.py +++ b/scripts/move_remote_media_to_new_store.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user index 8b9d30877de9..00104b9d62cb 100755 --- a/scripts/register_new_matrix_user +++ b/scripts/register_new_matrix_user @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 58edf6af6c8c..5fb5bb35f77d 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. @@ -635,8 +634,11 @@ class Porter(object): "device_inbox_sequence", ("device_inbox", "device_federation_outbox") ) await self._setup_sequence( - "account_data_sequence", ("room_account_data", "room_tags_revisions", "account_data")) - await self._setup_sequence("receipts_sequence", ("receipts_linearized", )) + "account_data_sequence", + ("room_account_data", "room_tags_revisions", "account_data"), + ) + await self._setup_sequence("receipts_sequence", ("receipts_linearized",)) + await self._setup_sequence("presence_stream_sequence", ("presence_stream",)) await self._setup_auth_chain_sequence() # Step 3. Get tables. @@ -911,10 +913,11 @@ class Porter(object): (curr_forward_id + 1,), ) - txn.execute( - "ALTER SEQUENCE events_backfill_stream_seq RESTART WITH %s", - (curr_backward_id + 1,), - ) + if curr_backward_id: + txn.execute( + "ALTER SEQUENCE events_backfill_stream_seq RESTART WITH %s", + (curr_backward_id + 1,), + ) await self.postgres_store.db_pool.runInteraction( "_setup_events_stream_seqs", _setup_events_stream_seqs_set_pos, @@ -952,10 +955,11 @@ class Porter(object): (curr_chain_id,), ) - await self.postgres_store.db_pool.runInteraction( - "_setup_event_auth_chain_id", r, - ) - + if curr_chain_id is not None: + await self.postgres_store.db_pool.runInteraction( + "_setup_event_auth_chain_id", + r, + ) ############################################## diff --git a/setup.cfg b/setup.cfg index 7329eed213d7..e5ceb7ed1933 100644 --- a/setup.cfg +++ b/setup.cfg @@ -18,16 +18,14 @@ ignore = # E203: whitespace before ':' (which is contrary to pep8?) # E731: do not assign a lambda expression, use a def # E501: Line too long (black enforces this for us) -# B00*: Subsection of the bugbear suite (TODO: add in remaining fixes) -ignore=W503,W504,E203,E731,E501,B006,B007,B008 +ignore=W503,W504,E203,E731,E501 [isort] line_length = 88 -sections=FUTURE,STDLIB,COMPAT,THIRDPARTY,TWISTED,FIRSTPARTY,TESTS,LOCALFOLDER +sections=FUTURE,STDLIB,THIRDPARTY,TWISTED,FIRSTPARTY,TESTS,LOCALFOLDER default_section=THIRDPARTY known_first_party = synapse known_tests=tests -known_compat = mock known_twisted=twisted,OpenSSL multi_line_output=3 include_trailing_comma=true diff --git a/setup.py b/setup.py index 29e9971dc1d8..e2e488761dbf 100755 --- a/setup.py +++ b/setup.py @@ -103,6 +103,13 @@ def exec_file(path_segments): "flake8", ] +CONDITIONAL_REQUIREMENTS["dev"] = CONDITIONAL_REQUIREMENTS["lint"] + [ + # The following are used by the release script + "click==7.1.2", + "redbaron==0.9.2", + "GitPython==3.1.14", +] + CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.812", "mypy-zope==0.2.13"] # Dependencies which are exclusively required by unit test code. This is @@ -110,7 +117,7 @@ def exec_file(path_segments): # Tests assume that all optional dependencies are installed. # # parameterized_class decorator was introduced in parameterized 0.7.0 -CONDITIONAL_REQUIREMENTS["test"] = ["mock>=2.0", "parameterized>=0.7.0"] +CONDITIONAL_REQUIREMENTS["test"] = ["parameterized>=0.7.0"] setup( name="matrix-synapse", @@ -123,13 +130,12 @@ def exec_file(path_segments): zip_safe=False, long_description=long_description, long_description_content_type="text/x-rst", - python_requires="~=3.5", + python_requires="~=3.6", classifiers=[ "Development Status :: 5 - Production/Stable", "Topic :: Communications :: Chat", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", diff --git a/stubs/frozendict.pyi b/stubs/frozendict.pyi index 0368ba47038b..24c6f3af77b1 100644 --- a/stubs/frozendict.pyi +++ b/stubs/frozendict.pyi @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/stubs/txredisapi.pyi b/stubs/txredisapi.pyi index 080ca40287d2..c1a06ae022f6 100644 --- a/stubs/txredisapi.pyi +++ b/stubs/txredisapi.pyi @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/__init__.py b/synapse/__init__.py index 1d2883acf67d..7498a6016f0b 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018-9 New Vector Ltd # @@ -22,8 +21,8 @@ import sys # Check that we're not running on an unsupported Python version. -if sys.version_info < (3, 5): - print("Synapse requires Python 3.5 or above.") +if sys.version_info < (3, 6): + print("Synapse requires Python 3.6 or above.") sys.exit(1) # Twisted and canonicaljson will fail to import when this file is executed to @@ -48,7 +47,7 @@ except ImportError: pass -__version__ = "1.31.0" +__version__ = "1.34.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py index dfe26dea6dfc..dae986c78869 100644 --- a/synapse/_scripts/register_new_matrix_user.py +++ b/synapse/_scripts/register_new_matrix_user.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2018 New Vector # diff --git a/synapse/api/__init__.py b/synapse/api/__init__.py index bfebb0f644f4..5e83dba2ed6f 100644 --- a/synapse/api/__init__.py +++ b/synapse/api/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 7d9930ae7b7c..efc926d0941d 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,14 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import pymacaroons from netaddr import IPAddress from twisted.web.server import Request -import synapse.types from synapse import event_auth from synapse.api.auth_blocking import AuthBlocking from synapse.api.constants import EventTypes, HistoryVisibility, Membership @@ -37,11 +35,14 @@ from synapse.http.site import SynapseRequest from synapse.logging import opentracing as opentracing from synapse.storage.databases.main.registration import TokenLookupResult -from synapse.types import StateMap, UserID +from synapse.types import Requester, StateMap, UserID, create_requester from synapse.util.caches.lrucache import LruCache from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry from synapse.util.metrics import Measure +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) @@ -66,9 +67,10 @@ class Auth: """ FIXME: This class contains a mix of functions for authenticating users of our client-server API and authenticating events added to room graphs. + The latter should be moved to synapse.handlers.event_auth.EventAuthHandler. """ - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): self.hs = hs self.clock = hs.get_clock() self.store = hs.get_datastore() @@ -80,19 +82,21 @@ def __init__(self, hs): self._auth_blocking = AuthBlocking(self.hs) - self._account_validity = hs.config.account_validity + self._account_validity_enabled = ( + hs.config.account_validity.account_validity_enabled + ) self._track_appservice_user_ips = hs.config.track_appservice_user_ips self._macaroon_secret_key = hs.config.macaroon_secret_key async def check_from_context( self, room_version: str, event, context, do_sig_check=True - ): + ) -> None: prev_state_ids = await context.get_prev_state_ids() auth_events_ids = self.compute_auth_events( event, prev_state_ids, for_verification=True ) - auth_events = await self.store.get_events(auth_events_ids) - auth_events = {(e.type, e.state_key): e for e in auth_events.values()} + auth_events_by_id = await self.store.get_events(auth_events_ids) + auth_events = {(e.type, e.state_key): e for e in auth_events_by_id.values()} room_version_obj = KNOWN_ROOM_VERSIONS[room_version] event_auth.check( @@ -149,17 +153,11 @@ async def check_user_in_room( raise AuthError(403, "User %s not in room %s" % (user_id, room_id)) - async def check_host_in_room(self, room_id, host): + async def check_host_in_room(self, room_id: str, host: str) -> bool: with Measure(self.clock, "check_host_in_room"): - latest_event_ids = await self.store.is_host_joined(room_id, host) - return latest_event_ids - - def can_federate(self, event, auth_events): - creation_event = auth_events.get((EventTypes.Create, "")) + return await self.store.is_host_joined(room_id, host) - return creation_event.content.get("m.federate", True) is True - - def get_public_keys(self, invite_event): + def get_public_keys(self, invite_event: EventBase) -> List[Dict[str, Any]]: return event_auth.get_public_keys(invite_event) async def get_user_by_req( @@ -168,7 +166,7 @@ async def get_user_by_req( allow_guest: bool = False, rights: str = "access", allow_expired: bool = False, - ) -> synapse.types.Requester: + ) -> Requester: """Get a registered user's ID. Args: @@ -194,7 +192,7 @@ async def get_user_by_req( access_token = self.get_access_token_from_request(request) user_id, app_service = await self._get_appservice_user_id(request) - if user_id: + if user_id and app_service: if ip_addr and self._track_appservice_user_ips: await self.store.insert_client_ip( user_id=user_id, @@ -204,9 +202,7 @@ async def get_user_by_req( device_id="dummy-device", # stubbed ) - requester = synapse.types.create_requester( - user_id, app_service=app_service - ) + requester = create_requester(user_id, app_service=app_service) request.requester = user_id opentracing.set_tag("authenticated_entity", user_id) @@ -223,7 +219,7 @@ async def get_user_by_req( shadow_banned = user_info.shadow_banned # Deny the request if the user account has expired. - if self._account_validity.enabled and not allow_expired: + if self._account_validity_enabled and not allow_expired: if await self.store.is_account_expired( user_info.user_id, self.clock.time_msec() ): @@ -249,7 +245,7 @@ async def get_user_by_req( errcode=Codes.GUEST_ACCESS_FORBIDDEN, ) - requester = synapse.types.create_requester( + requester = create_requester( user_info.user_id, token_id, is_guest, @@ -269,7 +265,9 @@ async def get_user_by_req( except KeyError: raise MissingClientTokenError() - async def _get_appservice_user_id(self, request): + async def _get_appservice_user_id( + self, request: Request + ) -> Tuple[Optional[str], Optional[ApplicationService]]: app_service = self.store.get_app_service_by_token( self.get_access_token_from_request(request) ) @@ -281,6 +279,9 @@ async def _get_appservice_user_id(self, request): if ip_address not in app_service.ip_range_whitelist: return None, None + # This will always be set by the time Twisted calls us. + assert request.args is not None + if b"user_id" not in request.args: return app_service.sender, app_service @@ -385,7 +386,9 @@ async def get_user_by_access_token( logger.warning("Invalid macaroon in auth: %s %s", type(e), e) raise InvalidClientTokenError("Invalid macaroon passed.") - def _parse_and_validate_macaroon(self, token, rights="access"): + def _parse_and_validate_macaroon( + self, token: str, rights: str = "access" + ) -> Tuple[str, bool]: """Takes a macaroon and tries to parse and validate it. This is cached if and only if rights == access and there isn't an expiry. @@ -430,15 +433,16 @@ def _parse_and_validate_macaroon(self, token, rights="access"): return user_id, guest - def validate_macaroon(self, macaroon, type_string, user_id): + def validate_macaroon( + self, macaroon: pymacaroons.Macaroon, type_string: str, user_id: str + ) -> None: """ validate that a Macaroon is understood by and was signed by this server. Args: - macaroon(pymacaroons.Macaroon): The macaroon to validate - type_string(str): The kind of token required (e.g. "access", - "delete_pusher") - user_id (str): The user_id required + macaroon: The macaroon to validate + type_string: The kind of token required (e.g. "access", "delete_pusher") + user_id: The user_id required """ v = pymacaroons.Verifier() @@ -463,9 +467,7 @@ def get_appservice_by_req(self, request: SynapseRequest) -> ApplicationService: if not service: logger.warning("Unrecognised appservice access token.") raise InvalidClientTokenError() - request.requester = synapse.types.create_requester( - service.sender, app_service=service - ) + request.requester = create_requester(service.sender, app_service=service) return service async def is_server_admin(self, user: UserID) -> bool: @@ -517,7 +519,7 @@ def compute_auth_events( return auth_ids - async def check_can_change_room_list(self, room_id: str, user: UserID): + async def check_can_change_room_list(self, room_id: str, user: UserID) -> bool: """Determine whether the user is allowed to edit the room's entry in the published room list. @@ -552,11 +554,11 @@ async def check_can_change_room_list(self, room_id: str, user: UserID): return user_level >= send_level @staticmethod - def has_access_token(request: Request): + def has_access_token(request: Request) -> bool: """Checks if the request has an access_token. Returns: - bool: False if no access_token was given, True otherwise. + False if no access_token was given, True otherwise. """ # This will always be set by the time Twisted calls us. assert request.args is not None @@ -566,13 +568,13 @@ def has_access_token(request: Request): return bool(query_params) or bool(auth_headers) @staticmethod - def get_access_token_from_request(request: Request): + def get_access_token_from_request(request: Request) -> str: """Extracts the access_token from the request. Args: request: The http request. Returns: - unicode: The access_token + The access_token Raises: MissingClientTokenError: If there isn't a single access_token in the request @@ -647,5 +649,5 @@ async def check_user_in_room_or_world_readable( % (user_id, room_id), ) - def check_auth_blocking(self, *args, **kwargs): - return self._auth_blocking.check_auth_blocking(*args, **kwargs) + async def check_auth_blocking(self, *args, **kwargs) -> None: + await self._auth_blocking.check_auth_blocking(*args, **kwargs) diff --git a/synapse/api/auth_blocking.py b/synapse/api/auth_blocking.py index d8088f524ac7..e6bced93d5fa 100644 --- a/synapse/api/auth_blocking.py +++ b/synapse/api/auth_blocking.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,18 +13,21 @@ # limitations under the License. import logging -from typing import Optional +from typing import TYPE_CHECKING, Optional from synapse.api.constants import LimitBlockingTypes, UserTypes from synapse.api.errors import Codes, ResourceLimitError from synapse.config.server import is_threepid_reserved from synapse.types import Requester +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) class AuthBlocking: - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): self.store = hs.get_datastore() self._server_notices_mxid = hs.config.server_notices_mxid @@ -44,7 +46,7 @@ async def check_auth_blocking( threepid: Optional[dict] = None, user_type: Optional[str] = None, requester: Optional[Requester] = None, - ): + ) -> None: """Checks if the user should be rejected for some external reason, such as monthly active user limiting or global disable flag diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 8f37d2cf3bdc..3940da5c8880 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2018-2019 New Vector Ltd @@ -18,6 +17,9 @@ """Contains constants from the specification.""" +# the max size of a (canonical-json-encoded) event +MAX_PDU_SIZE = 65536 + # the "depth" field on events is limited to 2**63 - 1 MAX_DEPTH = 2 ** 63 - 1 @@ -59,6 +61,8 @@ class JoinRules: KNOCK = "knock" INVITE = "invite" PRIVATE = "private" + # As defined for MSC3083. + MSC3083_RESTRICTED = "restricted" class LoginType: @@ -71,6 +75,11 @@ class LoginType: DUMMY = "m.login.dummy" +# This is used in the `type` parameter for /register when called by +# an appservice to register a new user. +APP_SERVICE_REGISTRATION_TYPE = "m.login.application_service" + + class EventTypes: Member = "m.room.member" Create = "m.room.create" @@ -101,13 +110,18 @@ class EventTypes: Dummy = "org.matrix.dummy_event" + SpaceChild = "m.space.child" + SpaceParent = "m.space.parent" MSC1772_SPACE_CHILD = "org.matrix.msc1772.space.child" MSC1772_SPACE_PARENT = "org.matrix.msc1772.space.parent" +class ToDeviceEventTypes: + RoomKeyRequest = "m.room_key_request" + + class EduTypes: Presence = "m.presence" - RoomKeyRequest = "m.room_key_request" class RejectedReason: @@ -165,6 +179,7 @@ class EventContentFields: SELF_DESTRUCT_AFTER = "org.matrix.self_destruct_after" # cf https://github.com/matrix-org/matrix-doc/pull/1772 + ROOM_TYPE = "type" MSC1772_ROOM_TYPE = "org.matrix.msc1772.type" diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 2a789ea3e823..0231c79079e1 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 5caf336fd0cb..ce49a0ad58d1 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2018-2019 New Vector Ltd diff --git a/synapse/api/presence.py b/synapse/api/presence.py index b9a8e294609e..a3bf0348d115 100644 --- a/synapse/api/presence.py +++ b/synapse/api/presence.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py index c3f07bc1a3e8..b9a10283f44a 100644 --- a/synapse/api/ratelimiting.py +++ b/synapse/api/ratelimiting.py @@ -17,6 +17,7 @@ from typing import Hashable, Optional, Tuple from synapse.api.errors import LimitExceededError +from synapse.storage.databases.main import DataStore from synapse.types import Requester from synapse.util import Clock @@ -31,10 +32,13 @@ class Ratelimiter: burst_count: How many actions that can be performed before being limited. """ - def __init__(self, clock: Clock, rate_hz: float, burst_count: int): + def __init__( + self, store: DataStore, clock: Clock, rate_hz: float, burst_count: int + ): self.clock = clock self.rate_hz = rate_hz self.burst_count = burst_count + self.store = store # A ordered dictionary keeping track of actions, when they were last # performed and how often. Each entry is a mapping from a key of arbitrary type @@ -46,60 +50,36 @@ def __init__(self, clock: Clock, rate_hz: float, burst_count: int): OrderedDict() ) # type: OrderedDict[Hashable, Tuple[float, int, float]] - def can_requester_do_action( - self, - requester: Requester, - rate_hz: Optional[float] = None, - burst_count: Optional[int] = None, - update: bool = True, - _time_now_s: Optional[int] = None, - ) -> Tuple[bool, float]: - """Can the requester perform the action? - - Args: - requester: The requester to key off when rate limiting. The user property - will be used. - rate_hz: The long term number of actions that can be performed in a second. - Overrides the value set during instantiation if set. - burst_count: How many actions that can be performed before being limited. - Overrides the value set during instantiation if set. - update: Whether to count this check as performing the action - _time_now_s: The current time. Optional, defaults to the current time according - to self.clock. Only used by tests. - - Returns: - A tuple containing: - * A bool indicating if they can perform the action now - * The reactor timestamp for when the action can be performed next. - -1 if rate_hz is less than or equal to zero - """ - # Disable rate limiting of users belonging to any AS that is configured - # not to be rate limited in its registration file (rate_limited: true|false). - if requester.app_service and not requester.app_service.is_rate_limited(): - return True, -1.0 - - return self.can_do_action( - requester.user.to_string(), rate_hz, burst_count, update, _time_now_s - ) - - def can_do_action( + async def can_do_action( self, - key: Hashable, + requester: Optional[Requester], + key: Optional[Hashable] = None, rate_hz: Optional[float] = None, burst_count: Optional[int] = None, update: bool = True, + n_actions: int = 1, _time_now_s: Optional[int] = None, ) -> Tuple[bool, float]: """Can the entity (e.g. user or IP address) perform the action? + Checks if the user has ratelimiting disabled in the database by looking + for null/zero values in the `ratelimit_override` table. (Non-zero + values aren't honoured, as they're specific to the event sending + ratelimiter, rather than all ratelimiters) + Args: - key: The key we should use when rate limiting. Can be a user ID - (when sending events), an IP address, etc. + requester: The requester that is doing the action, if any. Used to check + if the user has ratelimits disabled in the database. + key: An arbitrary key used to classify an action. Defaults to the + requester's user ID. rate_hz: The long term number of actions that can be performed in a second. Overrides the value set during instantiation if set. burst_count: How many actions that can be performed before being limited. Overrides the value set during instantiation if set. update: Whether to count this check as performing the action + n_actions: The number of times the user wants to do this action. If the user + cannot do all of the actions, the user's action count is not incremented + at all. _time_now_s: The current time. Optional, defaults to the current time according to self.clock. Only used by tests. @@ -109,6 +89,30 @@ def can_do_action( * The reactor timestamp for when the action can be performed next. -1 if rate_hz is less than or equal to zero """ + if key is None: + if not requester: + raise ValueError("Must supply at least one of `requester` or `key`") + + key = requester.user.to_string() + + if requester: + # Disable rate limiting of users belonging to any AS that is configured + # not to be rate limited in its registration file (rate_limited: true|false). + if requester.app_service and not requester.app_service.is_rate_limited(): + return True, -1.0 + + # Check if ratelimiting has been disabled for the user. + # + # Note that we don't use the returned rate/burst count, as the table + # is specifically for the event sending ratelimiter. Instead, we + # only use it to (somewhat cheekily) infer whether the user should + # be subject to any rate limiting or not. + override = await self.store.get_ratelimit_for_user( + requester.authenticated_entity + ) + if override and not override.messages_per_second: + return True, -1.0 + # Override default values if set time_now_s = _time_now_s if _time_now_s is not None else self.clock.time() rate_hz = rate_hz if rate_hz is not None else self.rate_hz @@ -124,17 +128,20 @@ def can_do_action( time_delta = time_now_s - time_start performed_count = action_count - time_delta * rate_hz if performed_count < 0: - # Allow, reset back to count 1 - allowed = True + performed_count = 0 time_start = time_now_s - action_count = 1.0 - elif performed_count > burst_count - 1.0: + + # This check would be easier read as performed_count + n_actions > burst_count, + # but performed_count might be a very precise float (with lots of numbers + # following the point) in which case Python might round it up when adding it to + # n_actions. Writing it this way ensures it doesn't happen. + if performed_count > burst_count - n_actions: # Deny, we have exceeded our burst count allowed = False else: # We haven't reached our limit yet allowed = True - action_count += 1.0 + action_count = performed_count + n_actions if update: self.actions[key] = (action_count, time_start, rate_hz) @@ -175,23 +182,36 @@ def _prune_message_counts(self, time_now_s: int): else: del self.actions[key] - def ratelimit( + async def ratelimit( self, - key: Hashable, + requester: Optional[Requester], + key: Optional[Hashable] = None, rate_hz: Optional[float] = None, burst_count: Optional[int] = None, update: bool = True, + n_actions: int = 1, _time_now_s: Optional[int] = None, ): """Checks if an action can be performed. If not, raises a LimitExceededError + Checks if the user has ratelimiting disabled in the database by looking + for null/zero values in the `ratelimit_override` table. (Non-zero + values aren't honoured, as they're specific to the event sending + ratelimiter, rather than all ratelimiters) + Args: - key: An arbitrary key used to classify an action + requester: The requester that is doing the action, if any. Used to check for + if the user has ratelimits disabled. + key: An arbitrary key used to classify an action. Defaults to the + requester's user ID. rate_hz: The long term number of actions that can be performed in a second. Overrides the value set during instantiation if set. burst_count: How many actions that can be performed before being limited. Overrides the value set during instantiation if set. update: Whether to count this check as performing the action + n_actions: The number of times the user wants to do this action. If the user + cannot do all of the actions, the user's action count is not incremented + at all. _time_now_s: The current time. Optional, defaults to the current time according to self.clock. Only used by tests. @@ -201,11 +221,13 @@ def ratelimit( """ time_now_s = _time_now_s if _time_now_s is not None else self.clock.time() - allowed, time_allowed = self.can_do_action( + allowed, time_allowed = await self.can_do_action( + requester, key, rate_hz=rate_hz, burst_count=burst_count, update=update, + n_actions=n_actions, _time_now_s=time_now_s, ) diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index de2cc15d33df..c9f9596ada92 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -57,7 +56,7 @@ class RoomVersion: state_res = attr.ib(type=int) # one of the StateResolutionVersions enforce_key_validity = attr.ib(type=bool) - # bool: before MSC2261/MSC2432, m.room.aliases had special auth rules and redaction rules + # Before MSC2261/MSC2432, m.room.aliases had special auth rules and redaction rules special_case_aliases_auth = attr.ib(type=bool) # Strictly enforce canonicaljson, do not allow: # * Integers outside the range of [-2 ^ 53 + 1, 2 ^ 53 - 1] @@ -69,6 +68,8 @@ class RoomVersion: limit_notifications_power_levels = attr.ib(type=bool) # MSC2174/MSC2176: Apply updated redaction rules algorithm. msc2176_redaction_rules = attr.ib(type=bool) + # MSC3083: Support the 'restricted' join_rule. + msc3083_join_rules = attr.ib(type=bool) class RoomVersions: @@ -82,6 +83,7 @@ class RoomVersions: strict_canonicaljson=False, limit_notifications_power_levels=False, msc2176_redaction_rules=False, + msc3083_join_rules=False, ) V2 = RoomVersion( "2", @@ -93,6 +95,7 @@ class RoomVersions: strict_canonicaljson=False, limit_notifications_power_levels=False, msc2176_redaction_rules=False, + msc3083_join_rules=False, ) V3 = RoomVersion( "3", @@ -104,6 +107,7 @@ class RoomVersions: strict_canonicaljson=False, limit_notifications_power_levels=False, msc2176_redaction_rules=False, + msc3083_join_rules=False, ) V4 = RoomVersion( "4", @@ -115,6 +119,7 @@ class RoomVersions: strict_canonicaljson=False, limit_notifications_power_levels=False, msc2176_redaction_rules=False, + msc3083_join_rules=False, ) V5 = RoomVersion( "5", @@ -126,6 +131,7 @@ class RoomVersions: strict_canonicaljson=False, limit_notifications_power_levels=False, msc2176_redaction_rules=False, + msc3083_join_rules=False, ) V6 = RoomVersion( "6", @@ -137,6 +143,7 @@ class RoomVersions: strict_canonicaljson=True, limit_notifications_power_levels=True, msc2176_redaction_rules=False, + msc3083_join_rules=False, ) MSC2176 = RoomVersion( "org.matrix.msc2176", @@ -148,6 +155,19 @@ class RoomVersions: strict_canonicaljson=True, limit_notifications_power_levels=True, msc2176_redaction_rules=True, + msc3083_join_rules=False, + ) + MSC3083 = RoomVersion( + "org.matrix.msc3083", + RoomDisposition.UNSTABLE, + EventFormatVersions.V3, + StateResolutionVersions.V2, + enforce_key_validity=True, + special_case_aliases_auth=False, + strict_canonicaljson=True, + limit_notifications_power_levels=True, + msc2176_redaction_rules=False, + msc3083_join_rules=True, ) @@ -162,4 +182,5 @@ class RoomVersions: RoomVersions.V6, RoomVersions.MSC2176, ) + # Note that we do not include MSC3083 here unless it is enabled in the config. } # type: Dict[str, RoomVersion] diff --git a/synapse/api/urls.py b/synapse/api/urls.py index 6379c86ddea0..4b1f213c75f8 100644 --- a/synapse/api/urls.py +++ b/synapse/api/urls.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/app/__init__.py b/synapse/app/__init__.py index d1a2cd5e192b..f9940491e8e4 100644 --- a/synapse/app/__init__.py +++ b/synapse/app/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 3912c8994cf0..59918d789ec5 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # Copyright 2019-2021 The Matrix.org Foundation C.I.C # @@ -31,12 +30,14 @@ from twisted.protocols.tls import TLSMemoryBIOFactory import synapse +from synapse.api.constants import MAX_PDU_SIZE from synapse.app import check_bind_error from synapse.app.phone_stats_home import start_phone_stats_home -from synapse.config.server import ListenerConfig +from synapse.config.homeserver import HomeServerConfig from synapse.crypto import context_factory from synapse.logging.context import PreserveLoggingContext from synapse.metrics.background_process_metrics import wrap_as_background_process +from synapse.metrics.jemalloc import setup_jemalloc_stats from synapse.util.async_helpers import Linearizer from synapse.util.daemonize import daemonize_process from synapse.util.rlimit import change_resource_limit @@ -115,6 +116,7 @@ def start_reactor( def run(): logger.info("Running") + setup_jemalloc_stats() change_resource_limit(soft_file_limit) if gc_thresholds: gc.set_threshold(*gc_thresholds) @@ -289,7 +291,7 @@ def refresh_certificate(hs): logger.info("Context factories updated.") -async def start(hs: "synapse.server.HomeServer", listeners: Iterable[ListenerConfig]): +async def start(hs: "synapse.server.HomeServer"): """ Start a Synapse server or worker. @@ -301,7 +303,6 @@ async def start(hs: "synapse.server.HomeServer", listeners: Iterable[ListenerCon Args: hs: homeserver instance - listeners: Listener configuration ('listeners' in homeserver.yaml) """ # Set up the SIGHUP machinery. if hasattr(signal, "SIGHUP"): @@ -337,7 +338,7 @@ def run_sighup(*args, **kwargs): synapse.logging.opentracing.init_tracer(hs) # type: ignore[attr-defined] # noqa # It is now safe to start your Synapse. - hs.start_listening(listeners) + hs.start_listening() hs.get_datastore().db_pool.start_profiling() hs.get_pusherpool().start() @@ -531,3 +532,25 @@ def sdnotify(state): # this is a bit surprising, since we don't expect to have a NOTIFY_SOCKET # unless systemd is expecting us to notify it. logger.warning("Unable to send notification to systemd: %s", e) + + +def max_request_body_size(config: HomeServerConfig) -> int: + """Get a suitable maximum size for incoming HTTP requests""" + + # Other than media uploads, the biggest request we expect to see is a fully-loaded + # /federation/v1/send request. + # + # The main thing in such a request is up to 50 PDUs, and up to 100 EDUs. PDUs are + # limited to 65536 bytes (possibly slightly more if the sender didn't use canonical + # json encoding); there is no specced limit to EDUs (see + # https://github.com/matrix-org/matrix-doc/issues/3121). + # + # in short, we somewhat arbitrarily limit requests to 200 * 64K (about 12.5M) + # + max_request_size = 200 * MAX_PDU_SIZE + + # if we have a media repo enabled, we may need to allow larger uploads than that + if config.media.can_load_media_repo: + max_request_size = max(max_request_size, config.media.max_upload_size) + + return max_request_size diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 9f99651aa219..68ae19c977ce 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2019 Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -71,12 +70,6 @@ class AdminCmdSlavedStore( class AdminCmdServer(HomeServer): DATASTORE_CLASS = AdminCmdSlavedStore - def _listen_http(self, listener_config): - pass - - def start_listening(self, listeners): - pass - async def export_data_command(hs, args): """Export data for a user. @@ -233,7 +226,7 @@ def start(config_options): async def run(): with LoggingContext("command"): - _base.start(ss, []) + _base.start(ss) await args.func(ss, args) _base.start_worker_reactor( diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index add43147b31f..2d50060ffb0b 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index add43147b31f..2d50060ffb0b 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index e9c098c4e7de..57af28f10a57 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index add43147b31f..2d50060ffb0b 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index add43147b31f..2d50060ffb0b 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index add43147b31f..2d50060ffb0b 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 3df2aa5c2bb0..f730cdbd78f4 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # @@ -14,12 +13,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import contextlib import logging import sys -from typing import Dict, Iterable, Optional, Set - -from typing_extensions import ContextManager +from typing import Dict, Optional from twisted.internet import address from twisted.web.resource import IResource @@ -36,29 +32,18 @@ SERVER_KEY_V2_PREFIX, ) from synapse.app import _base -from synapse.app._base import register_start +from synapse.app._base import max_request_body_size, register_start from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.config.server import ListenerConfig -from synapse.federation import send_queue from synapse.federation.transport.server import TransportLayerServer -from synapse.handlers.presence import ( - BasePresenceHandler, - PresenceState, - get_interested_parties, -) from synapse.http.server import JsonResource, OptionsResource from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource -from synapse.replication.http.presence import ( - ReplicationBumpPresenceActiveTime, - ReplicationPresenceSetState, -) from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore @@ -70,7 +55,6 @@ from synapse.replication.slave.storage.filtering import SlavedFilteringStore from synapse.replication.slave.storage.groups import SlavedGroupServerStore from synapse.replication.slave.storage.keys import SlavedKeyStore -from synapse.replication.slave.storage.presence import SlavedPresenceStore from synapse.replication.slave.storage.profile import SlavedProfileStore from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore from synapse.replication.slave.storage.pushers import SlavedPusherStore @@ -78,21 +62,8 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.replication.slave.storage.room import RoomStore from synapse.replication.slave.storage.transactions import SlavedTransactionStore -from synapse.replication.tcp.client import ReplicationDataHandler -from synapse.replication.tcp.commands import ClearUserSyncsCommand -from synapse.replication.tcp.streams import ( - AccountDataStream, - DeviceListsStream, - GroupServerStream, - PresenceStream, - PushersStream, - PushRulesStream, - ReceiptsStream, - TagAccountDataStream, - ToDeviceStream, -) from synapse.rest.admin import register_servlets_for_media_repo -from synapse.rest.client.v1 import events, login, room +from synapse.rest.client.v1 import events, login, presence, room from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet from synapse.rest.client.v1.profile import ( ProfileAvatarURLRestServlet, @@ -129,7 +100,7 @@ from synapse.rest.health import HealthResource from synapse.rest.key.v2 import KeyApiV2Resource from synapse.rest.synapse.client import build_synapse_client_resource_tree -from synapse.server import HomeServer, cache_in_self +from synapse.server import HomeServer from synapse.storage.databases.main.censor_events import CensorEventsStore from synapse.storage.databases.main.client_ips import ClientIpWorkerStore from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyStore @@ -138,40 +109,18 @@ from synapse.storage.databases.main.monthly_active_users import ( MonthlyActiveUsersWorkerStore, ) -from synapse.storage.databases.main.presence import UserPresenceState +from synapse.storage.databases.main.presence import PresenceStore from synapse.storage.databases.main.search import SearchWorkerStore from synapse.storage.databases.main.stats import StatsStore from synapse.storage.databases.main.transactions import TransactionWorkerStore from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore from synapse.storage.databases.main.user_directory import UserDirectoryStore -from synapse.types import ReadReceipt -from synapse.util.async_helpers import Linearizer from synapse.util.httpresourcetree import create_resource_tree from synapse.util.versionstring import get_version_string logger = logging.getLogger("synapse.app.generic_worker") -class PresenceStatusStubServlet(RestServlet): - """If presence is disabled this servlet can be used to stub out setting - presence status. - """ - - PATTERNS = client_patterns("/presence/(?P[^/]*)/status") - - def __init__(self, hs): - super().__init__() - self.auth = hs.get_auth() - - async def on_GET(self, request, user_id): - await self.auth.get_user_by_req(request) - return 200, {"presence": "offline"} - - async def on_PUT(self, request, user_id): - await self.auth.get_user_by_req(request) - return 200, {} - - class KeyUploadServlet(RestServlet): """An implementation of the `KeyUploadServlet` that responds to read only requests, but otherwise proxies through to the master instance. @@ -265,213 +214,6 @@ async def on_POST(self, request: Request, device_id: Optional[str]): return 200, {"one_time_key_counts": result} -class _NullContextManager(ContextManager[None]): - """A context manager which does nothing.""" - - def __exit__(self, exc_type, exc_val, exc_tb): - pass - - -UPDATE_SYNCING_USERS_MS = 10 * 1000 - - -class GenericWorkerPresence(BasePresenceHandler): - def __init__(self, hs): - super().__init__(hs) - self.hs = hs - self.is_mine_id = hs.is_mine_id - - self._presence_enabled = hs.config.use_presence - - # The number of ongoing syncs on this process, by user id. - # Empty if _presence_enabled is false. - self._user_to_num_current_syncs = {} # type: Dict[str, int] - - self.notifier = hs.get_notifier() - self.instance_id = hs.get_instance_id() - - # user_id -> last_sync_ms. Lists the users that have stopped syncing - # but we haven't notified the master of that yet - self.users_going_offline = {} - - self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs) - self._set_state_client = ReplicationPresenceSetState.make_client(hs) - - self._send_stop_syncing_loop = self.clock.looping_call( - self.send_stop_syncing, UPDATE_SYNCING_USERS_MS - ) - - self._busy_presence_enabled = hs.config.experimental.msc3026_enabled - - hs.get_reactor().addSystemEventTrigger( - "before", - "shutdown", - run_as_background_process, - "generic_presence.on_shutdown", - self._on_shutdown, - ) - - def _on_shutdown(self): - if self._presence_enabled: - self.hs.get_tcp_replication().send_command( - ClearUserSyncsCommand(self.instance_id) - ) - - def send_user_sync(self, user_id, is_syncing, last_sync_ms): - if self._presence_enabled: - self.hs.get_tcp_replication().send_user_sync( - self.instance_id, user_id, is_syncing, last_sync_ms - ) - - def mark_as_coming_online(self, user_id): - """A user has started syncing. Send a UserSync to the master, unless they - had recently stopped syncing. - - Args: - user_id (str) - """ - going_offline = self.users_going_offline.pop(user_id, None) - if not going_offline: - # Safe to skip because we haven't yet told the master they were offline - self.send_user_sync(user_id, True, self.clock.time_msec()) - - def mark_as_going_offline(self, user_id): - """A user has stopped syncing. We wait before notifying the master as - its likely they'll come back soon. This allows us to avoid sending - a stopped syncing immediately followed by a started syncing notification - to the master - - Args: - user_id (str) - """ - self.users_going_offline[user_id] = self.clock.time_msec() - - def send_stop_syncing(self): - """Check if there are any users who have stopped syncing a while ago - and haven't come back yet. If there are poke the master about them. - """ - now = self.clock.time_msec() - for user_id, last_sync_ms in list(self.users_going_offline.items()): - if now - last_sync_ms > UPDATE_SYNCING_USERS_MS: - self.users_going_offline.pop(user_id, None) - self.send_user_sync(user_id, False, last_sync_ms) - - async def user_syncing( - self, user_id: str, affect_presence: bool - ) -> ContextManager[None]: - """Record that a user is syncing. - - Called by the sync and events servlets to record that a user has connected to - this worker and is waiting for some events. - """ - if not affect_presence or not self._presence_enabled: - return _NullContextManager() - - curr_sync = self._user_to_num_current_syncs.get(user_id, 0) - self._user_to_num_current_syncs[user_id] = curr_sync + 1 - - # If we went from no in flight sync to some, notify replication - if self._user_to_num_current_syncs[user_id] == 1: - self.mark_as_coming_online(user_id) - - def _end(): - # We check that the user_id is in user_to_num_current_syncs because - # user_to_num_current_syncs may have been cleared if we are - # shutting down. - if user_id in self._user_to_num_current_syncs: - self._user_to_num_current_syncs[user_id] -= 1 - - # If we went from one in flight sync to non, notify replication - if self._user_to_num_current_syncs[user_id] == 0: - self.mark_as_going_offline(user_id) - - @contextlib.contextmanager - def _user_syncing(): - try: - yield - finally: - _end() - - return _user_syncing() - - async def notify_from_replication(self, states, stream_id): - parties = await get_interested_parties(self.store, states) - room_ids_to_states, users_to_states = parties - - self.notifier.on_new_event( - "presence_key", - stream_id, - rooms=room_ids_to_states.keys(), - users=users_to_states.keys(), - ) - - async def process_replication_rows(self, token, rows): - states = [ - UserPresenceState( - row.user_id, - row.state, - row.last_active_ts, - row.last_federation_update_ts, - row.last_user_sync_ts, - row.status_msg, - row.currently_active, - ) - for row in rows - ] - - for state in states: - self.user_to_current_state[state.user_id] = state - - stream_id = token - await self.notify_from_replication(states, stream_id) - - def get_currently_syncing_users_for_replication(self) -> Iterable[str]: - return [ - user_id - for user_id, count in self._user_to_num_current_syncs.items() - if count > 0 - ] - - async def set_state(self, target_user, state, ignore_status_msg=False): - """Set the presence state of the user.""" - presence = state["presence"] - - valid_presence = ( - PresenceState.ONLINE, - PresenceState.UNAVAILABLE, - PresenceState.OFFLINE, - PresenceState.BUSY, - ) - - if presence not in valid_presence or ( - presence == PresenceState.BUSY and not self._busy_presence_enabled - ): - raise SynapseError(400, "Invalid presence state") - - user_id = target_user.to_string() - - # If presence is disabled, no-op - if not self.hs.config.use_presence: - return - - # Proxy request to master - await self._set_state_client( - user_id=user_id, state=state, ignore_status_msg=ignore_status_msg - ) - - async def bump_presence_active_time(self, user): - """We've seen the user do something that indicates they're interacting - with the app. - """ - # If presence is disabled, no-op - if not self.hs.config.use_presence: - return - - # Proxy request to master - user_id = user.to_string() - await self._bump_active_client(user_id=user_id) - - class GenericWorkerSlavedStore( # FIXME(#3714): We need to add UserDirectoryStore as we write directly # rather than going via the correct worker. @@ -479,6 +221,7 @@ class GenericWorkerSlavedStore( StatsStore, UIAuthWorkerStore, EndToEndRoomKeyStore, + PresenceStore, SlavedDeviceInboxStore, SlavedDeviceStore, SlavedReceiptsStore, @@ -497,7 +240,6 @@ class GenericWorkerSlavedStore( SlavedTransactionStore, SlavedProfileStore, SlavedClientIpStore, - SlavedPresenceStore, SlavedFilteringStore, MonthlyActiveUsersWorkerStore, MediaRepositoryStore, @@ -565,10 +307,7 @@ def _listen_http(self, listener_config: ListenerConfig): user_directory.register_servlets(self, resource) - # If presence is disabled, use the stub servlet that does - # not allow sending presence - if not self.config.use_presence: - PresenceStatusStubServlet(self).register(resource) + presence.register_servlets(self, resource) groups.register_servlets(self, resource) @@ -628,14 +367,16 @@ def _listen_http(self, listener_config: ListenerConfig): listener_config, root_resource, self.version_string, + max_request_body_size=max_request_body_size(self.config), + reactor=self.get_reactor(), ), reactor=self.get_reactor(), ) logger.info("Synapse worker now listening on port %d", port) - def start_listening(self, listeners: Iterable[ListenerConfig]): - for listener in listeners: + def start_listening(self): + for listener in self.config.worker_listeners: if listener.type == "http": self._listen_http(listener) elif listener.type == "manhole": @@ -643,7 +384,7 @@ def start_listening(self, listeners: Iterable[ListenerConfig]): listener.bind_addresses, listener.port, manhole_globals={"hs": self} ) elif listener.type == "metrics": - if not self.get_config().enable_metrics: + if not self.config.enable_metrics: logger.warning( ( "Metrics listener configured, but " @@ -657,234 +398,6 @@ def start_listening(self, listeners: Iterable[ListenerConfig]): self.get_tcp_replication().start_replication(self) - @cache_in_self - def get_replication_data_handler(self): - return GenericWorkerReplicationHandler(self) - - @cache_in_self - def get_presence_handler(self): - return GenericWorkerPresence(self) - - -class GenericWorkerReplicationHandler(ReplicationDataHandler): - def __init__(self, hs): - super().__init__(hs) - - self.store = hs.get_datastore() - self.presence_handler = hs.get_presence_handler() # type: GenericWorkerPresence - self.notifier = hs.get_notifier() - - self.notify_pushers = hs.config.start_pushers - self.pusher_pool = hs.get_pusherpool() - - self.send_handler = None # type: Optional[FederationSenderHandler] - if hs.config.send_federation: - self.send_handler = FederationSenderHandler(hs) - - async def on_rdata(self, stream_name, instance_name, token, rows): - await super().on_rdata(stream_name, instance_name, token, rows) - await self._process_and_notify(stream_name, instance_name, token, rows) - - async def _process_and_notify(self, stream_name, instance_name, token, rows): - try: - if self.send_handler: - await self.send_handler.process_replication_rows( - stream_name, token, rows - ) - - if stream_name == PushRulesStream.NAME: - self.notifier.on_new_event( - "push_rules_key", token, users=[row.user_id for row in rows] - ) - elif stream_name in (AccountDataStream.NAME, TagAccountDataStream.NAME): - self.notifier.on_new_event( - "account_data_key", token, users=[row.user_id for row in rows] - ) - elif stream_name == ReceiptsStream.NAME: - self.notifier.on_new_event( - "receipt_key", token, rooms=[row.room_id for row in rows] - ) - await self.pusher_pool.on_new_receipts( - token, token, {row.room_id for row in rows} - ) - elif stream_name == ToDeviceStream.NAME: - entities = [row.entity for row in rows if row.entity.startswith("@")] - if entities: - self.notifier.on_new_event("to_device_key", token, users=entities) - elif stream_name == DeviceListsStream.NAME: - all_room_ids = set() # type: Set[str] - for row in rows: - if row.entity.startswith("@"): - room_ids = await self.store.get_rooms_for_user(row.entity) - all_room_ids.update(room_ids) - self.notifier.on_new_event("device_list_key", token, rooms=all_room_ids) - elif stream_name == PresenceStream.NAME: - await self.presence_handler.process_replication_rows(token, rows) - elif stream_name == GroupServerStream.NAME: - self.notifier.on_new_event( - "groups_key", token, users=[row.user_id for row in rows] - ) - elif stream_name == PushersStream.NAME: - for row in rows: - if row.deleted: - self.stop_pusher(row.user_id, row.app_id, row.pushkey) - else: - await self.start_pusher(row.user_id, row.app_id, row.pushkey) - except Exception: - logger.exception("Error processing replication") - - async def on_position(self, stream_name: str, instance_name: str, token: int): - await super().on_position(stream_name, instance_name, token) - # Also call on_rdata to ensure that stream positions are properly reset. - await self.on_rdata(stream_name, instance_name, token, []) - - def stop_pusher(self, user_id, app_id, pushkey): - if not self.notify_pushers: - return - - key = "%s:%s" % (app_id, pushkey) - pushers_for_user = self.pusher_pool.pushers.get(user_id, {}) - pusher = pushers_for_user.pop(key, None) - if pusher is None: - return - logger.info("Stopping pusher %r / %r", user_id, key) - pusher.on_stop() - - async def start_pusher(self, user_id, app_id, pushkey): - if not self.notify_pushers: - return - - key = "%s:%s" % (app_id, pushkey) - logger.info("Starting pusher %r / %r", user_id, key) - return await self.pusher_pool.start_pusher_by_id(app_id, pushkey, user_id) - - def on_remote_server_up(self, server: str): - """Called when get a new REMOTE_SERVER_UP command.""" - - # Let's wake up the transaction queue for the server in case we have - # pending stuff to send to it. - if self.send_handler: - self.send_handler.wake_destination(server) - - -class FederationSenderHandler: - """Processes the fedration replication stream - - This class is only instantiate on the worker responsible for sending outbound - federation transactions. It receives rows from the replication stream and forwards - the appropriate entries to the FederationSender class. - """ - - def __init__(self, hs: GenericWorkerServer): - self.store = hs.get_datastore() - self._is_mine_id = hs.is_mine_id - self.federation_sender = hs.get_federation_sender() - self._hs = hs - - # Stores the latest position in the federation stream we've gotten up - # to. This is always set before we use it. - self.federation_position = None - - self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer") - - def wake_destination(self, server: str): - self.federation_sender.wake_destination(server) - - async def process_replication_rows(self, stream_name, token, rows): - # The federation stream contains things that we want to send out, e.g. - # presence, typing, etc. - if stream_name == "federation": - send_queue.process_rows_for_federation(self.federation_sender, rows) - await self.update_token(token) - - # ... and when new receipts happen - elif stream_name == ReceiptsStream.NAME: - await self._on_new_receipts(rows) - - # ... as well as device updates and messages - elif stream_name == DeviceListsStream.NAME: - # The entities are either user IDs (starting with '@') whose devices - # have changed, or remote servers that we need to tell about - # changes. - hosts = {row.entity for row in rows if not row.entity.startswith("@")} - for host in hosts: - self.federation_sender.send_device_messages(host) - - elif stream_name == ToDeviceStream.NAME: - # The to_device stream includes stuff to be pushed to both local - # clients and remote servers, so we ignore entities that start with - # '@' (since they'll be local users rather than destinations). - hosts = {row.entity for row in rows if not row.entity.startswith("@")} - for host in hosts: - self.federation_sender.send_device_messages(host) - - async def _on_new_receipts(self, rows): - """ - Args: - rows (Iterable[synapse.replication.tcp.streams.ReceiptsStream.ReceiptsStreamRow]): - new receipts to be processed - """ - for receipt in rows: - # we only want to send on receipts for our own users - if not self._is_mine_id(receipt.user_id): - continue - receipt_info = ReadReceipt( - receipt.room_id, - receipt.receipt_type, - receipt.user_id, - [receipt.event_id], - receipt.data, - ) - await self.federation_sender.send_read_receipt(receipt_info) - - async def update_token(self, token): - """Update the record of where we have processed to in the federation stream. - - Called after we have processed a an update received over replication. Sends - a FEDERATION_ACK back to the master, and stores the token that we have processed - in `federation_stream_position` so that we can restart where we left off. - """ - self.federation_position = token - - # We save and send the ACK to master asynchronously, so we don't block - # processing on persistence. We don't need to do this operation for - # every single RDATA we receive, we just need to do it periodically. - - if self._fed_position_linearizer.is_queued(None): - # There is already a task queued up to save and send the token, so - # no need to queue up another task. - return - - run_as_background_process("_save_and_send_ack", self._save_and_send_ack) - - async def _save_and_send_ack(self): - """Save the current federation position in the database and send an ACK - to master with where we're up to. - """ - try: - # We linearize here to ensure we don't have races updating the token - # - # XXX this appears to be redundant, since the ReplicationCommandHandler - # has a linearizer which ensures that we only process one line of - # replication data at a time. Should we remove it, or is it doing useful - # service for robustness? Or could we replace it with an assertion that - # we're not being re-entered? - - with (await self._fed_position_linearizer.queue(None)): - # We persist and ack the same position, so we take a copy of it - # here as otherwise it can get modified from underneath us. - current_position = self.federation_position - - await self.store.update_federation_out_pos( - "federation", current_position - ) - - # We ACK this token over replication so that the master can drop - # its in memory queues - self._hs.get_tcp_replication().send_federation_ack(current_position) - except Exception: - logger.exception("Error updating federation stream position") - def start(config_options): try: @@ -941,6 +454,10 @@ def start(config_options): config.server.update_user_directory = False synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts + synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage + + if config.server.gc_seconds: + synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds hs = GenericWorkerServer( config.server_name, @@ -956,7 +473,7 @@ def start(config_options): # streams. Will no-op if no streams can be written to by this worker. hs.get_replication_streamer() - register_start(_base.start, hs, config.worker_listeners) + register_start(_base.start, hs) _base.start_worker_reactor("synapse-generic-worker", config) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 3bfe9d507ff9..b2501ee4d7f0 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # @@ -18,7 +17,7 @@ import logging import os import sys -from typing import Iterable, Iterator +from typing import Iterator from twisted.internet import reactor from twisted.web.resource import EncodingResourceWrapper, IResource @@ -37,7 +36,13 @@ WEB_CLIENT_PREFIX, ) from synapse.app import _base -from synapse.app._base import listen_ssl, listen_tcp, quit_with_error, register_start +from synapse.app._base import ( + listen_ssl, + listen_tcp, + max_request_body_size, + quit_with_error, + register_start, +) from synapse.config._base import ConfigError from synapse.config.emailconfig import ThreepidBehaviour from synapse.config.homeserver import HomeServerConfig @@ -127,19 +132,21 @@ def _listener_http(self, config: HomeServerConfig, listener_config: ListenerConf else: root_resource = OptionsResource() - root_resource = create_resource_tree(resources, root_resource) + site = SynapseSite( + "synapse.access.%s.%s" % ("https" if tls else "http", site_tag), + site_tag, + listener_config, + create_resource_tree(resources, root_resource), + self.version_string, + max_request_body_size=max_request_body_size(self.config), + reactor=self.get_reactor(), + ) if tls: ports = listen_ssl( bind_addresses, port, - SynapseSite( - "synapse.access.https.%s" % (site_tag,), - site_tag, - listener_config, - root_resource, - self.version_string, - ), + site, self.tls_server_context_factory, reactor=self.get_reactor(), ) @@ -149,13 +156,7 @@ def _listener_http(self, config: HomeServerConfig, listener_config: ListenerConf ports = listen_tcp( bind_addresses, port, - SynapseSite( - "synapse.access.http.%s" % (site_tag,), - site_tag, - listener_config, - root_resource, - self.version_string, - ), + site, reactor=self.get_reactor(), ) logger.info("Synapse now listening on TCP port %d", port) @@ -192,7 +193,7 @@ def _configure_named_resource(self, name, compress=False): } ) - if self.get_config().threepid_behaviour_email == ThreepidBehaviour.LOCAL: + if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: from synapse.rest.synapse.client.password_reset import ( PasswordResetSubmitTokenResource, ) @@ -231,7 +232,7 @@ def _configure_named_resource(self, name, compress=False): ) if name in ["media", "federation", "client"]: - if self.get_config().enable_media_repo: + if self.config.enable_media_repo: media_repo = self.get_media_repository_resource() resources.update( {MEDIA_PREFIX: media_repo, LEGACY_MEDIA_PREFIX: media_repo} @@ -245,7 +246,7 @@ def _configure_named_resource(self, name, compress=False): resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self) if name == "webclient": - webclient_loc = self.get_config().web_client_location + webclient_loc = self.config.web_client_location if webclient_loc is None: logger.warning( @@ -266,7 +267,7 @@ def _configure_named_resource(self, name, compress=False): # https://twistedmatrix.com/trac/ticket/7678 resources[WEB_CLIENT_PREFIX] = File(webclient_loc) - if name == "metrics" and self.get_config().enable_metrics: + if name == "metrics" and self.config.enable_metrics: resources[METRICS_PREFIX] = MetricsResource(RegistryProxy) if name == "replication": @@ -274,18 +275,18 @@ def _configure_named_resource(self, name, compress=False): return resources - def start_listening(self, listeners: Iterable[ListenerConfig]): - config = self.get_config() - - if config.redis_enabled: + def start_listening(self): + if self.config.redis_enabled: # If redis is enabled we connect via the replication command handler # in the same way as the workers (since we're effectively a client # rather than a server). self.get_tcp_replication().start_replication(self) - for listener in listeners: + for listener in self.config.server.listeners: if listener.type == "http": - self._listening_services.extend(self._listener_http(config, listener)) + self._listening_services.extend( + self._listener_http(self.config, listener) + ) elif listener.type == "manhole": _base.listen_manhole( listener.bind_addresses, listener.port, manhole_globals={"hs": self} @@ -299,7 +300,7 @@ def start_listening(self, listeners: Iterable[ListenerConfig]): for s in services: reactor.addSystemEventTrigger("before", "shutdown", s.stopListening) elif listener.type == "metrics": - if not self.get_config().enable_metrics: + if not self.config.enable_metrics: logger.warning( ( "Metrics listener configured, but " @@ -340,6 +341,10 @@ def setup(config_options): sys.exit(0) events.USE_FROZEN_DICTS = config.use_frozen_dicts + synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage + + if config.server.gc_seconds: + synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds hs = SynapseHomeServer( config.server_name, @@ -413,7 +418,7 @@ async def start(): # Loading the provider metadata also ensures the provider config is valid. await oidc.load_metadata() - await _base.start(hs, config.listeners) + await _base.start(hs) hs.get_datastore().db_pool.updates.start_doing_background_updates() diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index add43147b31f..2d50060ffb0b 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index add43147b31f..2d50060ffb0b 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index add43147b31f..2d50060ffb0b 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index 503d44f687d7..a368efb354ed 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 0bfc5e445f5f..6504c6bd3f59 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 9d3bbe3b8b05..fe04d7a67293 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 366c476f807a..6a2ce99b55dc 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -49,7 +48,7 @@ components. """ import logging -from typing import List +from typing import List, Optional from synapse.appservice import ApplicationService, ApplicationServiceState from synapse.events import EventBase @@ -191,11 +190,11 @@ async def send( self, service: ApplicationService, events: List[EventBase], - ephemeral: List[JsonDict] = [], + ephemeral: Optional[List[JsonDict]] = None, ): try: txn = await self.store.create_appservice_txn( - service=service, events=events, ephemeral=ephemeral + service=service, events=events, ephemeral=ephemeral or [] ) service_is_up = await self._is_service_up(service) if service_is_up: diff --git a/synapse/config/__init__.py b/synapse/config/__init__.py index 1e76e9559df6..d2f889159e75 100644 --- a/synapse/config/__init__.py +++ b/synapse/config/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/__main__.py b/synapse/config/__main__.py index 65043d5b5b5f..b5b6735a8faa 100644 --- a/synapse/config/__main__.py +++ b/synapse/config/__main__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/_base.py b/synapse/config/_base.py index ba9cd63cf2d5..08e2c2c543a0 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi index e896fd34e23c..ff9abbc23212 100644 --- a/synapse/config/_base.pyi +++ b/synapse/config/_base.pyi @@ -1,21 +1,22 @@ from typing import Any, Iterable, List, Optional from synapse.config import ( + account_validity, api, appservice, auth, captcha, cas, - consent_config, + consent, database, emailconfig, experimental, groups, - jwt_config, + jwt, key, logger, metrics, - oidc_config, + oidc, password_auth_providers, push, ratelimiting, @@ -23,9 +24,9 @@ from synapse.config import ( registration, repository, room_directory, - saml2_config, + saml2, server, - server_notices_config, + server_notices, spam_checker, sso, stats, @@ -59,15 +60,16 @@ class RootConfig: captcha: captcha.CaptchaConfig voip: voip.VoipConfig registration: registration.RegistrationConfig + account_validity: account_validity.AccountValidityConfig metrics: metrics.MetricsConfig api: api.ApiConfig appservice: appservice.AppServiceConfig key: key.KeyConfig - saml2: saml2_config.SAML2Config + saml2: saml2.SAML2Config cas: cas.CasConfig sso: sso.SSOConfig - oidc: oidc_config.OIDCConfig - jwt: jwt_config.JWTConfig + oidc: oidc.OIDCConfig + jwt: jwt.JWTConfig auth: auth.AuthConfig email: emailconfig.EmailConfig worker: workers.WorkerConfig @@ -76,9 +78,9 @@ class RootConfig: spamchecker: spam_checker.SpamCheckerConfig groups: groups.GroupsConfig userdirectory: user_directory.UserDirectoryConfig - consent: consent_config.ConsentConfig + consent: consent.ConsentConfig stats: stats.StatsConfig - servernotices: server_notices_config.ServerNoticesConfig + servernotices: server_notices.ServerNoticesConfig roomdirectory: room_directory.RoomDirectoryConfig thirdpartyrules: third_party_event_rules.ThirdPartyRulesConfig tracer: tracer.TracerConfig diff --git a/synapse/config/_util.py b/synapse/config/_util.py index 8fce7f6bb133..3edb4b71068f 100644 --- a/synapse/config/_util.py +++ b/synapse/config/_util.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/account_validity.py b/synapse/config/account_validity.py new file mode 100644 index 000000000000..c58a7d95a785 --- /dev/null +++ b/synapse/config/account_validity.py @@ -0,0 +1,165 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from synapse.config._base import Config, ConfigError + + +class AccountValidityConfig(Config): + section = "account_validity" + + def read_config(self, config, **kwargs): + account_validity_config = config.get("account_validity") or {} + self.account_validity_enabled = account_validity_config.get("enabled", False) + self.account_validity_renew_by_email_enabled = ( + "renew_at" in account_validity_config + ) + + if self.account_validity_enabled: + if "period" in account_validity_config: + self.account_validity_period = self.parse_duration( + account_validity_config["period"] + ) + else: + raise ConfigError("'period' is required when using account validity") + + if "renew_at" in account_validity_config: + self.account_validity_renew_at = self.parse_duration( + account_validity_config["renew_at"] + ) + + if "renew_email_subject" in account_validity_config: + self.account_validity_renew_email_subject = account_validity_config[ + "renew_email_subject" + ] + else: + self.account_validity_renew_email_subject = "Renew your %(app)s account" + + self.account_validity_startup_job_max_delta = ( + self.account_validity_period * 10.0 / 100.0 + ) + + if self.account_validity_renew_by_email_enabled: + if not self.public_baseurl: + raise ConfigError("Can't send renewal emails without 'public_baseurl'") + + # Load account validity templates. + account_validity_template_dir = account_validity_config.get("template_dir") + + account_renewed_template_filename = account_validity_config.get( + "account_renewed_html_path", "account_renewed.html" + ) + invalid_token_template_filename = account_validity_config.get( + "invalid_token_html_path", "invalid_token.html" + ) + + # Read and store template content + ( + self.account_validity_account_renewed_template, + self.account_validity_account_previously_renewed_template, + self.account_validity_invalid_token_template, + ) = self.read_templates( + [ + account_renewed_template_filename, + "account_previously_renewed.html", + invalid_token_template_filename, + ], + account_validity_template_dir, + ) + + def generate_config_section(self, **kwargs): + return """\ + ## Account Validity ## + + # Optional account validity configuration. This allows for accounts to be denied + # any request after a given period. + # + # Once this feature is enabled, Synapse will look for registered users without an + # expiration date at startup and will add one to every account it found using the + # current settings at that time. + # This means that, if a validity period is set, and Synapse is restarted (it will + # then derive an expiration date from the current validity period), and some time + # after that the validity period changes and Synapse is restarted, the users' + # expiration dates won't be updated unless their account is manually renewed. This + # date will be randomly selected within a range [now + period - d ; now + period], + # where d is equal to 10% of the validity period. + # + account_validity: + # The account validity feature is disabled by default. Uncomment the + # following line to enable it. + # + #enabled: true + + # The period after which an account is valid after its registration. When + # renewing the account, its validity period will be extended by this amount + # of time. This parameter is required when using the account validity + # feature. + # + #period: 6w + + # The amount of time before an account's expiry date at which Synapse will + # send an email to the account's email address with a renewal link. By + # default, no such emails are sent. + # + # If you enable this setting, you will also need to fill out the 'email' and + # 'public_baseurl' configuration sections. + # + #renew_at: 1w + + # The subject of the email sent out with the renewal link. '%(app)s' can be + # used as a placeholder for the 'app_name' parameter from the 'email' + # section. + # + # Note that the placeholder must be written '%(app)s', including the + # trailing 's'. + # + # If this is not set, a default value is used. + # + #renew_email_subject: "Renew your %(app)s account" + + # Directory in which Synapse will try to find templates for the HTML files to + # serve to the user when trying to renew an account. If not set, default + # templates from within the Synapse package will be used. + # + # The currently available templates are: + # + # * account_renewed.html: Displayed to the user after they have successfully + # renewed their account. + # + # * account_previously_renewed.html: Displayed to the user if they attempt to + # renew their account with a token that is valid, but that has already + # been used. In this case the account is not renewed again. + # + # * invalid_token.html: Displayed to the user when they try to renew an account + # with an unknown or invalid renewal token. + # + # See https://github.com/matrix-org/synapse/tree/master/synapse/res/templates for + # default template contents. + # + # The file name of some of these templates can be configured below for legacy + # reasons. + # + #template_dir: "res/templates" + + # A custom file name for the 'account_renewed.html' template. + # + # If not set, the file is assumed to be named "account_renewed.html". + # + #account_renewed_html_path: "account_renewed.html" + + # A custom file name for the 'invalid_token.html' template. + # + # If not set, the file is assumed to be named "invalid_token.html". + # + #invalid_token_html_path: "invalid_token.html" + """ diff --git a/synapse/config/api.py b/synapse/config/api.py index 74cd53a8ed34..b18044f9822a 100644 --- a/synapse/config/api.py +++ b/synapse/config/api.py @@ -1,4 +1,4 @@ -# Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2015-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,38 +12,129 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging +from typing import Iterable + from synapse.api.constants import EventTypes +from synapse.config._base import Config, ConfigError +from synapse.config._util import validate_config +from synapse.types import JsonDict -from ._base import Config +logger = logging.getLogger(__name__) class ApiConfig(Config): section = "api" - def read_config(self, config, **kwargs): - self.room_invite_state_types = config.get( - "room_invite_state_types", - [ - EventTypes.JoinRules, - EventTypes.CanonicalAlias, - EventTypes.RoomAvatar, - EventTypes.RoomEncryption, - EventTypes.Name, - ], + def read_config(self, config: JsonDict, **kwargs): + validate_config(_MAIN_SCHEMA, config, ()) + self.room_prejoin_state = list(self._get_prejoin_state_types(config)) + + def generate_config_section(cls, **kwargs) -> str: + formatted_default_state_types = "\n".join( + " # - %s" % (t,) for t in _DEFAULT_PREJOIN_STATE_TYPES ) - def generate_config_section(cls, **kwargs): return """\ ## API Configuration ## - # A list of event types that will be included in the room_invite_state + # Controls for the state that is shared with users who receive an invite + # to a room # - #room_invite_state_types: - # - "{JoinRules}" - # - "{CanonicalAlias}" - # - "{RoomAvatar}" - # - "{RoomEncryption}" - # - "{Name}" - """.format( - **vars(EventTypes) - ) + room_prejoin_state: + # By default, the following state event types are shared with users who + # receive invites to the room: + # +%(formatted_default_state_types)s + # + # Uncomment the following to disable these defaults (so that only the event + # types listed in 'additional_event_types' are shared). Defaults to 'false'. + # + #disable_default_event_types: true + + # Additional state event types to share with users when they are invited + # to a room. + # + # By default, this list is empty (so only the default event types are shared). + # + #additional_event_types: + # - org.example.custom.event.type + """ % { + "formatted_default_state_types": formatted_default_state_types + } + + def _get_prejoin_state_types(self, config: JsonDict) -> Iterable[str]: + """Get the event types to include in the prejoin state + + Parses the config and returns an iterable of the event types to be included. + """ + room_prejoin_state_config = config.get("room_prejoin_state") or {} + + # backwards-compatibility support for room_invite_state_types + if "room_invite_state_types" in config: + # if both "room_invite_state_types" and "room_prejoin_state" are set, then + # we don't really know what to do. + if room_prejoin_state_config: + raise ConfigError( + "Can't specify both 'room_invite_state_types' and 'room_prejoin_state' " + "in config" + ) + + logger.warning(_ROOM_INVITE_STATE_TYPES_WARNING) + + yield from config["room_invite_state_types"] + return + + if not room_prejoin_state_config.get("disable_default_event_types"): + yield from _DEFAULT_PREJOIN_STATE_TYPES + + yield from room_prejoin_state_config.get("additional_event_types", []) + + +_ROOM_INVITE_STATE_TYPES_WARNING = """\ +WARNING: The 'room_invite_state_types' configuration setting is now deprecated, +and replaced with 'room_prejoin_state'. New features may not work correctly +unless 'room_invite_state_types' is removed. See the sample configuration file for +details of 'room_prejoin_state'. +-------------------------------------------------------------------------------- +""" + +_DEFAULT_PREJOIN_STATE_TYPES = [ + EventTypes.JoinRules, + EventTypes.CanonicalAlias, + EventTypes.RoomAvatar, + EventTypes.RoomEncryption, + EventTypes.Name, + # Per MSC1772. + EventTypes.Create, +] + + +# room_prejoin_state can either be None (as it is in the default config), or +# an object containing other config settings +_ROOM_PREJOIN_STATE_CONFIG_SCHEMA = { + "oneOf": [ + { + "type": "object", + "properties": { + "disable_default_event_types": {"type": "boolean"}, + "additional_event_types": { + "type": "array", + "items": {"type": "string"}, + }, + }, + }, + {"type": "null"}, + ] +} + +# the legacy room_invite_state_types setting +_ROOM_INVITE_STATE_TYPES_SCHEMA = {"type": "array", "items": {"type": "string"}} + +_MAIN_SCHEMA = { + "type": "object", + "properties": { + "room_prejoin_state": _ROOM_PREJOIN_STATE_CONFIG_SCHEMA, + "room_invite_state_types": _ROOM_INVITE_STATE_TYPES_SCHEMA, + }, +} diff --git a/synapse/config/auth.py b/synapse/config/auth.py index 9aabaadf9e54..e10d641a9655 100644 --- a/synapse/config/auth.py +++ b/synapse/config/auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # diff --git a/synapse/config/cache.py b/synapse/config/cache.py index 4e8abbf88aeb..91165ee1cee0 100644 --- a/synapse/config/cache.py +++ b/synapse/config/cache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,6 +17,8 @@ import threading from typing import Callable, Dict +from synapse.python_dependencies import DependencyException, check_requirements + from ._base import Config, ConfigError # The prefix for all cache factor-related environment variables @@ -190,6 +191,15 @@ def read_config(self, config, **kwargs): ) self.cache_factors[cache] = factor + self.track_memory_usage = cache_config.get("track_memory_usage", False) + if self.track_memory_usage: + try: + check_requirements("cache_memory") + except DependencyException as e: + raise ConfigError( + e.message # noqa: B306, DependencyException.message is a property + ) + # Resize all caches (if necessary) with the new factors we've loaded self.resize_all_caches() diff --git a/synapse/config/cas.py b/synapse/config/cas.py index dbf50859659f..901f4123e187 100644 --- a/synapse/config/cas.py +++ b/synapse/config/cas.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/consent_config.py b/synapse/config/consent.py similarity index 99% rename from synapse/config/consent_config.py rename to synapse/config/consent.py index c47f364b146d..30d07cc219bc 100644 --- a/synapse/config/consent_config.py +++ b/synapse/config/consent.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/database.py b/synapse/config/database.py index e7889b9c20a3..c76ef1e1de8d 100644 --- a/synapse/config/database.py +++ b/synapse/config/database.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # @@ -59,6 +58,7 @@ # password: secretpassword # database: synapse # host: localhost +# port: 5432 # cp_min: 5 # cp_max: 10 # diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 52505ac5d2b5..5564d7d097d4 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. @@ -300,7 +299,7 @@ def read_config(self, config, **kwargs): "client_base_url", email_config.get("riot_base_url", None) ) - if self.account_validity.renew_by_email_enabled: + if self.account_validity_renew_by_email_enabled: expiry_template_html = email_config.get( "expiry_template_html", "notice_expiry.html" ) diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 86f4d9af9dda..a693fba87779 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions from synapse.config._base import Config from synapse.types import JsonDict @@ -27,7 +27,11 @@ def read_config(self, config: JsonDict, **kwargs): # MSC2858 (multiple SSO identity providers) self.msc2858_enabled = experimental.get("msc2858_enabled", False) # type: bool - # Spaces (MSC1772, MSC2946, etc) + + # Spaces (MSC1772, MSC2946, MSC3083, etc) self.spaces_enabled = experimental.get("spaces_enabled", False) # type: bool + if self.spaces_enabled: + KNOWN_ROOM_VERSIONS[RoomVersions.MSC3083.identifier] = RoomVersions.MSC3083 + # MSC3026 (busy presence state) self.msc3026_enabled = experimental.get("msc3026_enabled", False) # type: bool diff --git a/synapse/config/federation.py b/synapse/config/federation.py index 55e4db54425d..cdd7a1ef054e 100644 --- a/synapse/config/federation.py +++ b/synapse/config/federation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -45,6 +44,10 @@ def read_config(self, config, **kwargs): "allow_profile_lookup_over_federation", True ) + self.allow_device_name_lookup_over_federation = config.get( + "allow_device_name_lookup_over_federation", True + ) + def generate_config_section(self, config_dir_path, server_name, **kwargs): return """\ ## Federation ## @@ -76,6 +79,12 @@ def generate_config_section(self, config_dir_path, server_name, **kwargs): # on this homeserver. Defaults to 'true'. # #allow_profile_lookup_over_federation: false + + # Uncomment to disable device display name lookup over federation. By default, the + # Federation API allows other homeservers to obtain device display names of any user + # on this homeserver. Defaults to 'true'. + # + #allow_device_name_lookup_over_federation: false """ diff --git a/synapse/config/groups.py b/synapse/config/groups.py index 7b7860ea713f..15c2e64bda2f 100644 --- a/synapse/config/groups.py +++ b/synapse/config/groups.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 64a2429f7787..c23b66c88c76 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # @@ -13,25 +12,25 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from ._base import RootConfig +from .account_validity import AccountValidityConfig from .api import ApiConfig from .appservice import AppServiceConfig from .auth import AuthConfig from .cache import CacheConfig from .captcha import CaptchaConfig from .cas import CasConfig -from .consent_config import ConsentConfig +from .consent import ConsentConfig from .database import DatabaseConfig from .emailconfig import EmailConfig from .experimental import ExperimentalConfig from .federation import FederationConfig from .groups import GroupsConfig -from .jwt_config import JWTConfig +from .jwt import JWTConfig from .key import KeyConfig from .logger import LoggingConfig from .metrics import MetricsConfig -from .oidc_config import OIDCConfig +from .oidc import OIDCConfig from .password_auth_providers import PasswordAuthProviderConfig from .push import PushConfig from .ratelimiting import RatelimitConfig @@ -40,9 +39,9 @@ from .repository import ContentRepositoryConfig from .room import RoomConfig from .room_directory import RoomDirectoryConfig -from .saml2_config import SAML2Config +from .saml2 import SAML2Config from .server import ServerConfig -from .server_notices_config import ServerNoticesConfig +from .server_notices import ServerNoticesConfig from .spam_checker import SpamCheckerConfig from .sso import SSOConfig from .stats import StatsConfig @@ -69,6 +68,7 @@ class HomeServerConfig(RootConfig): CaptchaConfig, VoipConfig, RegistrationConfig, + AccountValidityConfig, MetricsConfig, ApiConfig, AppServiceConfig, diff --git a/synapse/config/jwt_config.py b/synapse/config/jwt.py similarity index 99% rename from synapse/config/jwt_config.py rename to synapse/config/jwt.py index f30330abb6d5..9e07e7300814 100644 --- a/synapse/config/jwt_config.py +++ b/synapse/config/jwt.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015 Niklas Riekenbrauck # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/key.py b/synapse/config/key.py index 350ff1d6654c..94a9063043a2 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 999aecce5c78..813076dfe2c9 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -32,7 +31,6 @@ ) import synapse -from synapse.app import _base as appbase from synapse.logging._structured import setup_structured_logging from synapse.logging.context import LoggingContextFilter from synapse.logging.filter import MetadataFilter @@ -319,6 +317,8 @@ def setup_logging( # Perform one-time logging configuration. _setup_stdlib_logging(config, log_config_path, logBeginner=logBeginner) # Add a SIGHUP handler to reload the logging configuration, if one is available. + from synapse.app import _base as appbase + appbase.register_sighup(_reload_logging_config, log_config_path) # Log immediately so we can grep backwards. diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py index 2b289f4208d0..7ac82edb0ed1 100644 --- a/synapse/config/metrics.py +++ b/synapse/config/metrics.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # diff --git a/synapse/config/oidc_config.py b/synapse/config/oidc.py similarity index 97% rename from synapse/config/oidc_config.py rename to synapse/config/oidc.py index 05733ec41dcb..ea0abf5aa20d 100644 --- a/synapse/config/oidc_config.py +++ b/synapse/config/oidc.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Quentin Gliech # Copyright 2020-2021 The Matrix.org Foundation C.I.C. # @@ -15,20 +14,23 @@ # limitations under the License. from collections import Counter -from typing import Iterable, List, Mapping, Optional, Tuple, Type +from typing import Collection, Iterable, List, Mapping, Optional, Tuple, Type import attr from synapse.config._util import validate_config from synapse.config.sso import SsoAttributeRequirement from synapse.python_dependencies import DependencyException, check_requirements -from synapse.types import Collection, JsonDict +from synapse.types import JsonDict from synapse.util.module_loader import load_module from synapse.util.stringutils import parse_and_validate_mxc_uri from ._base import Config, ConfigError, read_file -DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.oidc_handler.JinjaOidcMappingProvider" +DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.oidc.JinjaOidcMappingProvider" +# The module that JinjaOidcMappingProvider is in was renamed, we want to +# transparently handle both the same. +LEGACY_USER_MAPPING_PROVIDER = "synapse.handlers.oidc_handler.JinjaOidcMappingProvider" class OIDCConfig(Config): @@ -404,6 +406,8 @@ def _parse_oidc_config_dict( """ ump_config = oidc_config.get("user_mapping_provider", {}) ump_config.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER) + if ump_config.get("module") == LEGACY_USER_MAPPING_PROVIDER: + ump_config["module"] = DEFAULT_USER_MAPPING_PROVIDER ump_config.setdefault("config", {}) ( diff --git a/synapse/config/password_auth_providers.py b/synapse/config/password_auth_providers.py index 85d07c4f8f2a..1cf69734bb5f 100644 --- a/synapse/config/password_auth_providers.py +++ b/synapse/config/password_auth_providers.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 Openmarket # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/push.py b/synapse/config/push.py index 7831a2ef7921..6ef8491caf4b 100644 --- a/synapse/config/push.py +++ b/synapse/config/push.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 New Vector Ltd # diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py index 3f3997f4e53b..7a8d5851c40b 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict +from typing import Dict, Optional from ._base import Config @@ -21,8 +21,10 @@ class RateLimitConfig: def __init__( self, config: Dict[str, float], - defaults={"per_second": 0.17, "burst_count": 3.0}, + defaults: Optional[Dict[str, float]] = None, ): + defaults = defaults or {"per_second": 0.17, "burst_count": 3.0} + self.per_second = config.get("per_second", defaults["per_second"]) self.burst_count = int(config.get("burst_count", defaults["burst_count"])) diff --git a/synapse/config/redis.py b/synapse/config/redis.py index 1373302335b3..33104af73472 100644 --- a/synapse/config/redis.py +++ b/synapse/config/redis.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/registration.py b/synapse/config/registration.py index ead007ba5afb..e6f52b4f40ea 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,74 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - -import pkg_resources - from synapse.api.constants import RoomCreationPreset from synapse.config._base import Config, ConfigError from synapse.types import RoomAlias, UserID from synapse.util.stringutils import random_string_with_symbols, strtobool -class AccountValidityConfig(Config): - section = "accountvalidity" - - def __init__(self, config, synapse_config): - if config is None: - return - super().__init__() - self.enabled = config.get("enabled", False) - self.renew_by_email_enabled = "renew_at" in config - - if self.enabled: - if "period" in config: - self.period = self.parse_duration(config["period"]) - else: - raise ConfigError("'period' is required when using account validity") - - if "renew_at" in config: - self.renew_at = self.parse_duration(config["renew_at"]) - - if "renew_email_subject" in config: - self.renew_email_subject = config["renew_email_subject"] - else: - self.renew_email_subject = "Renew your %(app)s account" - - self.startup_job_max_delta = self.period * 10.0 / 100.0 - - if self.renew_by_email_enabled: - if "public_baseurl" not in synapse_config: - raise ConfigError("Can't send renewal emails without 'public_baseurl'") - - template_dir = config.get("template_dir") - - if not template_dir: - template_dir = pkg_resources.resource_filename("synapse", "res/templates") - - if "account_renewed_html_path" in config: - file_path = os.path.join(template_dir, config["account_renewed_html_path"]) - - self.account_renewed_html_content = self.read_file( - file_path, "account_validity.account_renewed_html_path" - ) - else: - self.account_renewed_html_content = ( - "Your account has been successfully renewed." - ) - - if "invalid_token_html_path" in config: - file_path = os.path.join(template_dir, config["invalid_token_html_path"]) - - self.invalid_token_html_content = self.read_file( - file_path, "account_validity.invalid_token_html_path" - ) - else: - self.invalid_token_html_content = ( - "Invalid renewal token." - ) - - class RegistrationConfig(Config): section = "registration" @@ -93,10 +30,6 @@ def read_config(self, config, **kwargs): str(config["disable_registration"]) ) - self.account_validity = AccountValidityConfig( - config.get("account_validity") or {}, config - ) - self.registrations_require_3pid = config.get("registrations_require_3pid", []) self.allowed_local_3pids = config.get("allowed_local_3pids", []) self.enable_3pid_lookup = config.get("enable_3pid_lookup", True) @@ -208,69 +141,6 @@ def generate_config_section(self, generate_secrets=False, **kwargs): # #enable_registration: false - # Optional account validity configuration. This allows for accounts to be denied - # any request after a given period. - # - # Once this feature is enabled, Synapse will look for registered users without an - # expiration date at startup and will add one to every account it found using the - # current settings at that time. - # This means that, if a validity period is set, and Synapse is restarted (it will - # then derive an expiration date from the current validity period), and some time - # after that the validity period changes and Synapse is restarted, the users' - # expiration dates won't be updated unless their account is manually renewed. This - # date will be randomly selected within a range [now + period - d ; now + period], - # where d is equal to 10%% of the validity period. - # - account_validity: - # The account validity feature is disabled by default. Uncomment the - # following line to enable it. - # - #enabled: true - - # The period after which an account is valid after its registration. When - # renewing the account, its validity period will be extended by this amount - # of time. This parameter is required when using the account validity - # feature. - # - #period: 6w - - # The amount of time before an account's expiry date at which Synapse will - # send an email to the account's email address with a renewal link. By - # default, no such emails are sent. - # - # If you enable this setting, you will also need to fill out the 'email' and - # 'public_baseurl' configuration sections. - # - #renew_at: 1w - - # The subject of the email sent out with the renewal link. '%%(app)s' can be - # used as a placeholder for the 'app_name' parameter from the 'email' - # section. - # - # Note that the placeholder must be written '%%(app)s', including the - # trailing 's'. - # - # If this is not set, a default value is used. - # - #renew_email_subject: "Renew your %%(app)s account" - - # Directory in which Synapse will try to find templates for the HTML files to - # serve to the user when trying to renew an account. If not set, default - # templates from within the Synapse package will be used. - # - #template_dir: "res/templates" - - # File within 'template_dir' giving the HTML to be displayed to the user after - # they successfully renewed their account. If not set, default text is used. - # - #account_renewed_html_path: "account_renewed.html" - - # File within 'template_dir' giving the HTML to be displayed when the user - # tries to renew an account with an invalid renewal token. If not set, - # default text is used. - # - #invalid_token_html_path: "invalid_token.html" - # Time that a user's session remains valid for, after they log in. # # Note that this is not currently compatible with guest logins. @@ -298,9 +168,9 @@ def generate_config_section(self, generate_secrets=False, **kwargs): # #allowed_local_3pids: # - medium: email - # pattern: '.*@matrix\\.org' + # pattern: '^[^@]+@matrix\\.org$' # - medium: email - # pattern: '.*@vector\\.im' + # pattern: '^[^@]+@vector\\.im$' # - medium: msisdn # pattern: '\\+44' diff --git a/synapse/config/repository.py b/synapse/config/repository.py index 061c4ec83fc7..c78a83abe16e 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014, 2015 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -71,6 +70,7 @@ def parse_thumbnail_requirements(thumbnail_sizes): jpeg_thumbnail = ThumbnailRequirement(width, height, method, "image/jpeg") png_thumbnail = ThumbnailRequirement(width, height, method, "image/png") requirements.setdefault("image/jpeg", []).append(jpeg_thumbnail) + requirements.setdefault("image/jpg", []).append(jpeg_thumbnail) requirements.setdefault("image/webp", []).append(jpeg_thumbnail) requirements.setdefault("image/gif", []).append(png_thumbnail) requirements.setdefault("image/png", []).append(png_thumbnail) diff --git a/synapse/config/room.py b/synapse/config/room.py index 692d7a19361e..d889d90dbc7b 100644 --- a/synapse/config/room.py +++ b/synapse/config/room.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py index 2dd719c388ac..56981cac79c2 100644 --- a/synapse/config/room_directory.py +++ b/synapse/config/room_directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2.py similarity index 97% rename from synapse/config/saml2_config.py rename to synapse/config/saml2.py index 6db9cb5ced3c..3d1218c8d14b 100644 --- a/synapse/config/saml2_config.py +++ b/synapse/config/saml2.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # @@ -26,7 +25,10 @@ logger = logging.getLogger(__name__) -DEFAULT_USER_MAPPING_PROVIDER = ( +DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.saml.DefaultSamlMappingProvider" +# The module that DefaultSamlMappingProvider is in was renamed, we want to +# transparently handle both the same. +LEGACY_USER_MAPPING_PROVIDER = ( "synapse.handlers.saml_handler.DefaultSamlMappingProvider" ) @@ -98,6 +100,8 @@ def read_config(self, config, **kwargs): # Use the default user mapping provider if not set ump_dict.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER) + if ump_dict.get("module") == LEGACY_USER_MAPPING_PROVIDER: + ump_dict["module"] = DEFAULT_USER_MAPPING_PROVIDER # Ensure a config is present ump_dict["config"] = ump_dict.get("config") or {} diff --git a/synapse/config/server.py b/synapse/config/server.py index 5f8910b6e149..c290a35a9285 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. @@ -20,13 +19,14 @@ import os.path import re from textwrap import indent -from typing import Any, Dict, Iterable, List, Optional, Set +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple import attr import yaml from netaddr import AddrFormatError, IPNetwork, IPSet from synapse.api.room_versions import KNOWN_ROOM_VERSIONS +from synapse.util.module_loader import load_module from synapse.util.stringutils import parse_and_validate_server_name from ._base import Config, ConfigError @@ -235,10 +235,27 @@ def read_config(self, config, **kwargs): self.print_pidfile = config.get("print_pidfile") self.user_agent_suffix = config.get("user_agent_suffix") self.use_frozen_dicts = config.get("use_frozen_dicts", False) + self.public_baseurl = config.get("public_baseurl") + if self.public_baseurl is not None: + if self.public_baseurl[-1] != "/": + self.public_baseurl += "/" # Whether to enable user presence. - self.use_presence = config.get("use_presence", True) + presence_config = config.get("presence") or {} + self.use_presence = presence_config.get("enabled") + if self.use_presence is None: + self.use_presence = config.get("use_presence", True) + + # Custom presence router module + self.presence_router_module_class = None + self.presence_router_config = None + presence_router_config = presence_config.get("presence_router") + if presence_router_config: + ( + self.presence_router_module_class, + self.presence_router_config, + ) = load_module(presence_router_config, ("presence", "presence_router")) # Whether to update the user directory or not. This should be set to # false only if we are updating the user directory in a worker @@ -394,10 +411,6 @@ def read_config(self, config, **kwargs): config_path=("federation_ip_range_blacklist",), ) - if self.public_baseurl is not None: - if self.public_baseurl[-1] != "/": - self.public_baseurl += "/" - # (undocumented) option for torturing the worker-mode replication a bit, # for testing. The value defines the number of milliseconds to pause before # sending out any replication updates. @@ -559,6 +572,7 @@ def read_config(self, config, **kwargs): _warn_if_webclient_configured(self.listeners) self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None)) + self.gc_seconds = self.read_gc_intervals(config.get("gc_min_interval", None)) @attr.s class LimitRemoteRoomsConfig: @@ -834,9 +848,28 @@ def generate_config_section( # #soft_file_limit: 0 - # Set to false to disable presence tracking on this homeserver. + # Presence tracking allows users to see the state (e.g online/offline) + # of other local and remote users. # - #use_presence: false + presence: + # Uncomment to disable presence tracking on this homeserver. This option + # replaces the previous top-level 'use_presence' option. + # + #enabled: false + + # Presence routers are third-party modules that can specify additional logic + # to where presence updates from users are routed. + # + presence_router: + # The custom module's class. Uncomment to use a custom presence router module. + # + #module: "my_custom_router.PresenceRouter" + + # Configuration options of the custom module. Refer to your module's + # documentation for available options. + # + #config: + # example_option: 'something' # Whether to require authentication to retrieve profile data (avatars, # display names) of other users through the client API. Defaults to @@ -885,6 +918,16 @@ def generate_config_section( # #gc_thresholds: [700, 10, 10] + # The minimum time in seconds between each GC for a generation, regardless of + # the GC thresholds. This ensures that we don't do GC too frequently. + # + # A value of `[1s, 10s, 30s]` indicates that a second must pass between consecutive + # generation 0 GCs, etc. + # + # Defaults to `[1s, 10s, 30s]`. + # + #gc_min_interval: [0.5s, 30s, 1m] + # Set the limit on the returned events in the timeline in the get # and sync operations. The default value is 100. -1 means no upper limit. # @@ -1273,6 +1316,24 @@ def add_arguments(parser): help="Turn on the twisted telnet manhole service on the given port.", ) + def read_gc_intervals(self, durations) -> Optional[Tuple[float, float, float]]: + """Reads the three durations for the GC min interval option, returning seconds.""" + if durations is None: + return None + + try: + if len(durations) != 3: + raise ValueError() + return ( + self.parse_duration(durations[0]) / 1000, + self.parse_duration(durations[1]) / 1000, + self.parse_duration(durations[2]) / 1000, + ) + except Exception: + raise ConfigError( + "Value of `gc_min_interval` must be a list of three durations if set" + ) + def is_threepid_reserved(reserved_threepids, threepid): """Check the threepid against the reserved threepid config diff --git a/synapse/config/server_notices_config.py b/synapse/config/server_notices.py similarity index 99% rename from synapse/config/server_notices_config.py rename to synapse/config/server_notices.py index 57f69dc8e27d..48bf3241b659 100644 --- a/synapse/config/server_notices_config.py +++ b/synapse/config/server_notices.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/spam_checker.py b/synapse/config/spam_checker.py index 3d05abc1586a..447ba3303b3a 100644 --- a/synapse/config/spam_checker.py +++ b/synapse/config/spam_checker.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/sso.py b/synapse/config/sso.py index 243cc681e88d..af645c930d0d 100644 --- a/synapse/config/sso.py +++ b/synapse/config/sso.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/stats.py b/synapse/config/stats.py index 2258329a52d8..3d44b5120106 100644 --- a/synapse/config/stats.py +++ b/synapse/config/stats.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/third_party_event_rules.py b/synapse/config/third_party_event_rules.py index c04e1c4e077e..f502ff539e27 100644 --- a/synapse/config/third_party_event_rules.py +++ b/synapse/config/third_party_event_rules.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/tls.py b/synapse/config/tls.py index ad37b93c0252..7df4e4c3e6b4 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +17,7 @@ import warnings from datetime import datetime from hashlib import sha256 -from typing import List, Optional +from typing import List, Optional, Pattern from unpaddedbase64 import encode_base64 @@ -125,7 +124,7 @@ def read_config(self, config: dict, config_dir_path: str, **kwargs): fed_whitelist_entries = [] # Support globs (*) in whitelist values - self.federation_certificate_verification_whitelist = [] # type: List[str] + self.federation_certificate_verification_whitelist = [] # type: List[Pattern] for entry in fed_whitelist_entries: try: entry_regex = glob_to_regex(entry.encode("ascii").decode("ascii")) @@ -270,7 +269,7 @@ def generate_config_section( tls_certificate_path, tls_private_key_path, acme_domain, - **kwargs + **kwargs, ): """If the acme_domain is specified acme will be enabled. If the TLS paths are not specified the default will be certs in the diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py index 727a1e700838..db22b5b19fb6 100644 --- a/synapse/config/tracer.py +++ b/synapse/config/tracer.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C.d # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/user_directory.py b/synapse/config/user_directory.py index 8d05ef173c05..4cbf79eeed4c 100644 --- a/synapse/config/user_directory.py +++ b/synapse/config/user_directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/workers.py b/synapse/config/workers.py index ac92375a85e6..462630201d2e 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -65,6 +64,14 @@ class WriterLocations: Attributes: events: The instances that write to the event and backfill streams. typing: The instance that writes to the typing stream. + to_device: The instances that write to the to_device stream. Currently + can only be a single instance. + account_data: The instances that write to the account data streams. Currently + can only be a single instance. + receipts: The instances that write to the receipts stream. Currently + can only be a single instance. + presence: The instances that write to the presence stream. Currently + can only be a single instance. """ events = attr.ib( @@ -86,6 +93,11 @@ class WriterLocations: type=List[str], converter=_instance_to_list_converter, ) + presence = attr.ib( + default=["master"], + type=List[str], + converter=_instance_to_list_converter, + ) class WorkerConfig(Config): @@ -189,7 +201,14 @@ def read_config(self, config, **kwargs): # Check that the configured writers for events and typing also appears in # `instance_map`. - for stream in ("events", "typing", "to_device", "account_data", "receipts"): + for stream in ( + "events", + "typing", + "to_device", + "account_data", + "receipts", + "presence", + ): instances = _instance_to_list_converter(getattr(self.writers, stream)) for instance in instances: if instance != "master" and instance not in self.instance_map: @@ -216,6 +235,11 @@ def read_config(self, config, **kwargs): if len(self.writers.events) == 0: raise ConfigError("Must specify at least one instance to handle `events`.") + if len(self.writers.presence) != 1: + raise ConfigError( + "Must only specify one instance to handle `presence` messages." + ) + self.events_shard_config = RoutableShardedWorkerHandlingConfig( self.writers.events ) diff --git a/synapse/crypto/__init__.py b/synapse/crypto/__init__.py index bfebb0f644f4..5e83dba2ed6f 100644 --- a/synapse/crypto/__init__.py +++ b/synapse/crypto/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py index 8fb116ae182c..0f2b632e4738 100644 --- a/synapse/crypto/event_signing.py +++ b/synapse/crypto/event_signing.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index d5fb51513b59..5f18ef77489d 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017, 2018 New Vector Ltd # @@ -502,7 +501,7 @@ async def get_keys( class BaseV2KeyFetcher(KeyFetcher): def __init__(self, hs: "HomeServer"): self.store = hs.get_datastore() - self.config = hs.get_config() + self.config = hs.config async def process_v2_response( self, from_server: str, response_json: JsonDict, time_added_ms: int diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 91ad5b3d3cf0..70c556566e69 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # @@ -15,14 +14,14 @@ # limitations under the License. import logging -from typing import List, Optional, Set, Tuple +from typing import Any, Dict, List, Optional, Set, Tuple from canonicaljson import encode_canonical_json from signedjson.key import decode_verify_key_bytes from signedjson.sign import SignatureVerifyException, verify_signed_json from unpaddedbase64 import decode_base64 -from synapse.api.constants import EventTypes, JoinRules, Membership +from synapse.api.constants import MAX_PDU_SIZE, EventTypes, JoinRules, Membership from synapse.api.errors import AuthError, EventSizeError, SynapseError from synapse.api.room_versions import ( KNOWN_ROOM_VERSIONS, @@ -162,7 +161,7 @@ def check( logger.debug("Auth events: %s", [a.event_id for a in auth_events.values()]) if event.type == EventTypes.Member: - _is_membership_change_allowed(event, auth_events) + _is_membership_change_allowed(room_version_obj, event, auth_events) logger.debug("Allowing! %s", event) return @@ -206,7 +205,7 @@ def too_big(field): too_big("type") if len(event.event_id) > 255: too_big("event_id") - if len(encode_canonical_json(event.get_pdu_json())) > 65536: + if len(encode_canonical_json(event.get_pdu_json())) > MAX_PDU_SIZE: too_big("event") @@ -220,8 +219,19 @@ def _can_federate(event: EventBase, auth_events: StateMap[EventBase]) -> bool: def _is_membership_change_allowed( - event: EventBase, auth_events: StateMap[EventBase] + room_version: RoomVersion, event: EventBase, auth_events: StateMap[EventBase] ) -> None: + """ + Confirms that the event which changes membership is an allowed change. + + Args: + room_version: The version of the room. + event: The event to check. + auth_events: The current auth events of the room. + + Raises: + AuthError if the event is not allowed. + """ membership = event.content["membership"] # Check if this is the room creator joining: @@ -315,14 +325,19 @@ def _is_membership_change_allowed( if user_level < invite_level: raise AuthError(403, "You don't have permission to invite users") elif Membership.JOIN == membership: - # Joins are valid iff caller == target and they were: - # invited: They are accepting the invitation - # joined: It's a NOOP + # Joins are valid iff caller == target and: + # * They are not banned. + # * They are accepting a previously sent invitation. + # * They are already joined (it's a NOOP). + # * The room is public or restricted. if event.user_id != target_user_id: raise AuthError(403, "Cannot force another user to join.") elif target_banned: raise AuthError(403, "You are banned from this room") - elif join_rule == JoinRules.PUBLIC: + elif join_rule == JoinRules.PUBLIC or ( + room_version.msc3083_join_rules + and join_rule == JoinRules.MSC3083_RESTRICTED + ): pass elif join_rule == JoinRules.INVITE: if not caller_in_room and not caller_invited: @@ -655,7 +670,7 @@ def _verify_third_party_invite(event: EventBase, auth_events: StateMap[EventBase public_key = public_key_object["public_key"] try: for server, signature_block in signed["signatures"].items(): - for key_name, encoded_signature in signature_block.items(): + for key_name in signature_block.keys(): if not key_name.startswith("ed25519:"): continue verify_key = decode_verify_key_bytes( @@ -673,7 +688,7 @@ def _verify_third_party_invite(event: EventBase, auth_events: StateMap[EventBase return False -def get_public_keys(invite_event): +def get_public_keys(invite_event: EventBase) -> List[Dict[str, Any]]: public_keys = [] if "public_key" in invite_event.content: o = {"public_key": invite_event.content["public_key"]} diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 8f6b955d17b7..c8b52cbc7a09 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. @@ -330,9 +329,11 @@ def __init__( self, event_dict: JsonDict, room_version: RoomVersion, - internal_metadata_dict: JsonDict = {}, + internal_metadata_dict: Optional[JsonDict] = None, rejected_reason: Optional[str] = None, ): + internal_metadata_dict = internal_metadata_dict or {} + event_dict = dict(event_dict) # Signatures is a dict of dicts, and this is faster than doing a @@ -386,9 +387,11 @@ def __init__( self, event_dict: JsonDict, room_version: RoomVersion, - internal_metadata_dict: JsonDict = {}, + internal_metadata_dict: Optional[JsonDict] = None, rejected_reason: Optional[str] = None, ): + internal_metadata_dict = internal_metadata_dict or {} + event_dict = dict(event_dict) # Signatures is a dict of dicts, and this is faster than doing a @@ -507,9 +510,11 @@ def _event_type_from_format_version(format_version: int) -> Type[EventBase]: def make_event_from_dict( event_dict: JsonDict, room_version: RoomVersion = RoomVersions.V1, - internal_metadata_dict: JsonDict = {}, + internal_metadata_dict: Optional[JsonDict] = None, rejected_reason: Optional[str] = None, ) -> EventBase: """Construct an EventBase from the given event dict""" event_type = _event_type_from_format_version(room_version.event_format) - return event_type(event_dict, room_version, internal_metadata_dict, rejected_reason) + return event_type( + event_dict, room_version, internal_metadata_dict or {}, rejected_reason + ) diff --git a/synapse/events/builder.py b/synapse/events/builder.py index c1c0426f6ea0..5793553a8883 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/events/presence_router.py b/synapse/events/presence_router.py new file mode 100644 index 000000000000..6c37c8a7a430 --- /dev/null +++ b/synapse/events/presence_router.py @@ -0,0 +1,103 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING, Dict, Iterable, Set, Union + +from synapse.api.presence import UserPresenceState + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +class PresenceRouter: + """ + A module that the homeserver will call upon to help route user presence updates to + additional destinations. If a custom presence router is configured, calls will be + passed to that instead. + """ + + ALL_USERS = "ALL" + + def __init__(self, hs: "HomeServer"): + self.custom_presence_router = None + + # Check whether a custom presence router module has been configured + if hs.config.presence_router_module_class: + # Initialise the module + self.custom_presence_router = hs.config.presence_router_module_class( + config=hs.config.presence_router_config, module_api=hs.get_module_api() + ) + + # Ensure the module has implemented the required methods + required_methods = ["get_users_for_states", "get_interested_users"] + for method_name in required_methods: + if not hasattr(self.custom_presence_router, method_name): + raise Exception( + "PresenceRouter module '%s' must implement all required methods: %s" + % ( + hs.config.presence_router_module_class.__name__, + ", ".join(required_methods), + ) + ) + + async def get_users_for_states( + self, + state_updates: Iterable[UserPresenceState], + ) -> Dict[str, Set[UserPresenceState]]: + """ + Given an iterable of user presence updates, determine where each one + needs to go. + + Args: + state_updates: An iterable of user presence state updates. + + Returns: + A dictionary of user_id -> set of UserPresenceState, indicating which + presence updates each user should receive. + """ + if self.custom_presence_router is not None: + # Ask the custom module + return await self.custom_presence_router.get_users_for_states( + state_updates=state_updates + ) + + # Don't include any extra destinations for presence updates + return {} + + async def get_interested_users(self, user_id: str) -> Union[Set[str], ALL_USERS]: + """ + Retrieve a list of users that `user_id` is interested in receiving the + presence of. This will be in addition to those they share a room with. + Optionally, the object PresenceRouter.ALL_USERS can be returned to indicate + that this user should receive all incoming local and remote presence updates. + + Note that this method will only be called for local users, but can return users + that are local or remote. + + Args: + user_id: A user requesting presence updates. + + Returns: + A set of user IDs to return presence updates for, or ALL_USERS to return all + known updates. + """ + if self.custom_presence_router is not None: + # Ask the custom module for interested users + return await self.custom_presence_router.get_interested_users( + user_id=user_id + ) + + # A custom presence router is not defined. + # Don't report any additional interested users + return set() diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index 7295df74fed6..f8d898c3b1d1 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index a9185987a237..d5fa19509498 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # @@ -16,12 +15,12 @@ import inspect import logging -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Collection, Dict, List, Optional, Tuple, Union from synapse.rest.media.v1._base import FileInfo from synapse.rest.media.v1.media_storage import ReadableFileWrapper from synapse.spam_checker_api import RegistrationBehaviour -from synapse.types import Collection +from synapse.types import RoomAlias from synapse.util.async_helpers import maybe_awaitable if TYPE_CHECKING: @@ -115,7 +114,9 @@ async def user_may_create_room(self, userid: str) -> bool: return True - async def user_may_create_room_alias(self, userid: str, room_alias: str) -> bool: + async def user_may_create_room_alias( + self, userid: str, room_alias: RoomAlias + ) -> bool: """Checks if a given user may create a room alias If this method returns false, the association request will be rejected. diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index 9767d2394050..f7944fd8344f 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 0f8a3b5ad839..7d7cd9aaee5a 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/events/validator.py b/synapse/events/validator.py index f8f3b1a31e0b..fa6987d7cbac 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/federation/__init__.py b/synapse/federation/__init__.py index f5f0bdfca3ec..46300cba2564 100644 --- a/synapse/federation/__init__.py +++ b/synapse/federation/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 383737520afa..949dcd46141f 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index afdb5bf2fafb..a5b6a611952b 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -102,7 +101,7 @@ def __init__(self, hs: "HomeServer"): max_len=1000, expiry_ms=120 * 1000, reset_expiry_on_get=False, - ) + ) # type: ExpiringCache[str, EventBase] def _clear_tried_cache(self): """Clear pdu_destination_tried cache""" @@ -452,6 +451,28 @@ async def get_event_auth( return signed_auth + def _is_unknown_endpoint( + self, e: HttpResponseException, synapse_error: Optional[SynapseError] = None + ) -> bool: + """ + Returns true if the response was due to an endpoint being unimplemented. + + Args: + e: The error response received from the remote server. + synapse_error: The above error converted to a SynapseError. This is + automatically generated if not provided. + + """ + if synapse_error is None: + synapse_error = e.to_synapse_error() + # There is no good way to detect an "unknown" endpoint. + # + # Dendrite returns a 404 (with no body); synapse returns a 400 + # with M_UNRECOGNISED. + return e.code == 404 or ( + e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED + ) + async def _try_destination_list( self, description: str, @@ -469,9 +490,9 @@ async def _try_destination_list( callback: Function to run for each server. Passed a single argument: the server_name to try. - If the callback raises a CodeMessageException with a 300/400 code, - attempts to perform the operation stop immediately and the exception is - reraised. + If the callback raises a CodeMessageException with a 300/400 code or + an UnsupportedRoomVersionError, attempts to perform the operation + stop immediately and the exception is reraised. Otherwise, if the callback raises an Exception the error is logged and the next server tried. Normally the stacktrace is logged but this is @@ -493,8 +514,7 @@ async def _try_destination_list( continue try: - res = await callback(destination) - return res + return await callback(destination) except InvalidResponseError as e: logger.warning("Failed to %s via %s: %s", description, destination, e) except UnsupportedRoomVersionError: @@ -503,17 +523,15 @@ async def _try_destination_list( synapse_error = e.to_synapse_error() failover = False + # Failover on an internal server error, or if the destination + # doesn't implemented the endpoint for some reason. if 500 <= e.code < 600: failover = True - elif failover_on_unknown_endpoint: - # there is no good way to detect an "unknown" endpoint. Dendrite - # returns a 404 (with no body); synapse returns a 400 - # with M_UNRECOGNISED. - if e.code == 404 or ( - e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED - ): - failover = True + elif failover_on_unknown_endpoint and self._is_unknown_endpoint( + e, synapse_error + ): + failover = True if not failover: raise synapse_error from e @@ -571,9 +589,8 @@ async def make_membership_event( UnsupportedRoomVersionError: if remote responds with a room version we don't understand. - SynapseError: if the chosen remote server returns a 300/400 code. - - RuntimeError: if no servers were reachable. + SynapseError: if the chosen remote server returns a 300/400 code, or + no servers successfully handle the request. """ valid_memberships = {Membership.JOIN, Membership.LEAVE} if membership not in valid_memberships: @@ -643,9 +660,8 @@ async def send_join( ``auth_chain``. Raises: - SynapseError: if the chosen remote server returns a 300/400 code. - - RuntimeError: if no servers were reachable. + SynapseError: if the chosen remote server returns a 300/400 code, or + no servers successfully handle the request. """ async def send_request(destination) -> Dict[str, Any]: @@ -674,7 +690,7 @@ async def send_request(destination) -> Dict[str, Any]: if create_event is None: # If the state doesn't have a create event then the room is # invalid, and it would fail auth checks anyway. - raise SynapseError(400, "No create event in state") + raise InvalidResponseError("No create event in state") # the room version should be sane. create_room_version = create_event.content.get( @@ -747,16 +763,11 @@ async def _do_send_join(self, destination: str, pdu: EventBase) -> JsonDict: content=pdu.get_pdu_json(time_now), ) except HttpResponseException as e: - if e.code in [400, 404]: - err = e.to_synapse_error() - - # If we receive an error response that isn't a generic error, or an - # unrecognised endpoint error, we assume that the remote understands - # the v2 invite API and this is a legitimate error. - if err.errcode not in [Codes.UNKNOWN, Codes.UNRECOGNIZED]: - raise err - else: - raise e.to_synapse_error() + # If an error is received that is due to an unrecognised endpoint, + # fallback to the v1 endpoint. Otherwise consider it a legitmate error + # and raise. + if not self._is_unknown_endpoint(e): + raise logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API") @@ -803,6 +814,11 @@ async def _do_send_invite( Returns: The event as a dict as returned by the remote server + + Raises: + SynapseError: if the remote server returns an error or if the server + only supports the v1 endpoint and a room version other than "1" + or "2" is requested. """ time_now = self._clock.time_msec() @@ -818,28 +834,19 @@ async def _do_send_invite( }, ) except HttpResponseException as e: - if e.code in [400, 404]: - err = e.to_synapse_error() - - # If we receive an error response that isn't a generic error, we - # assume that the remote understands the v2 invite API and this - # is a legitimate error. - if err.errcode != Codes.UNKNOWN: - raise err - - # Otherwise, we assume that the remote server doesn't understand - # the v2 invite API. That's ok provided the room uses old-style event - # IDs. + # If an error is received that is due to an unrecognised endpoint, + # fallback to the v1 endpoint if the room uses old-style event IDs. + # Otherwise consider it a legitmate error and raise. + err = e.to_synapse_error() + if self._is_unknown_endpoint(e, err): if room_version.event_format != EventFormatVersions.V1: raise SynapseError( 400, "User's homeserver does not support this room version", Codes.UNSUPPORTED_ROOM_VERSION, ) - elif e.code in (403, 429): - raise e.to_synapse_error() else: - raise + raise err # Didn't work, try v1 API. # Note the v1 API returns a tuple of `(200, content)` @@ -866,9 +873,8 @@ async def send_leave(self, destinations: Iterable[str], pdu: EventBase) -> None: pdu: event to be sent Raises: - SynapseError if the chosen remote server returns a 300/400 code. - - RuntimeError if no servers were reachable. + SynapseError: if the chosen remote server returns a 300/400 code, or + no servers successfully handle the request. """ async def send_request(destination: str) -> None: @@ -890,16 +896,11 @@ async def _do_send_leave(self, destination: str, pdu: EventBase) -> JsonDict: content=pdu.get_pdu_json(time_now), ) except HttpResponseException as e: - if e.code in [400, 404]: - err = e.to_synapse_error() - - # If we receive an error response that isn't a generic error, or an - # unrecognised endpoint error, we assume that the remote understands - # the v2 invite API and this is a legitimate error. - if err.errcode not in [Codes.UNKNOWN, Codes.UNRECOGNIZED]: - raise err - else: - raise e.to_synapse_error() + # If an error is received that is due to an unrecognised endpoint, + # fallback to the v1 endpoint. Otherwise consider it a legitmate error + # and raise. + if not self._is_unknown_endpoint(e): + raise logger.debug("Couldn't send_leave with the v2 API, falling back to the v1 API") diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index d84e362070d9..ace30aa45078 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # Copyright 2019 Matrix.org Federation C.I.C @@ -45,7 +44,6 @@ SynapseError, UnsupportedRoomVersionError, ) -from synapse.api.ratelimiting import Ratelimiter from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events import EventBase from synapse.federation.federation_base import FederationBase, event_from_pdu_json @@ -137,7 +135,7 @@ def __init__(self, hs: "HomeServer"): ) # type: ResponseCache[Tuple[str, str]] self._federation_metrics_domains = ( - hs.get_config().federation.federation_metrics_domains + hs.config.federation.federation_metrics_domains ) async def on_backfill_request( @@ -739,22 +737,20 @@ async def _handle_received_pdu(self, origin: str, pdu: EventBase) -> None: await self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True) - def __str__(self): + def __str__(self) -> str: return "" % self.server_name async def exchange_third_party_invite( self, sender_user_id: str, target_user_id: str, room_id: str, signed: Dict - ): - ret = await self.handler.exchange_third_party_invite( + ) -> None: + await self.handler.exchange_third_party_invite( sender_user_id, target_user_id, room_id, signed ) - return ret - async def on_exchange_third_party_invite_request(self, event_dict: Dict): - ret = await self.handler.on_exchange_third_party_invite_request(event_dict) - return ret + async def on_exchange_third_party_invite_request(self, event_dict: Dict) -> None: + await self.handler.on_exchange_third_party_invite_request(event_dict) - async def check_server_matches_acl(self, server_name: str, room_id: str): + async def check_server_matches_acl(self, server_name: str, room_id: str) -> None: """Check if the given server is allowed by the server ACLs in the room Args: @@ -868,16 +864,9 @@ def __init__(self, hs: "HomeServer"): # EDU received. self._edu_type_to_instance = {} # type: Dict[str, List[str]] - # A rate limiter for incoming room key requests per origin. - self._room_key_request_rate_limiter = Ratelimiter( - clock=self.clock, - rate_hz=self.config.rc_key_requests.per_second, - burst_count=self.config.rc_key_requests.burst_count, - ) - def register_edu_handler( self, edu_type: str, handler: Callable[[str, JsonDict], Awaitable[None]] - ): + ) -> None: """Sets the handler callable that will be used to handle an incoming federation EDU of the given type. @@ -896,7 +885,7 @@ def register_edu_handler( def register_query_handler( self, query_type: str, handler: Callable[[dict], Awaitable[JsonDict]] - ): + ) -> None: """Sets the handler callable that will be used to handle an incoming federation query of the given type. @@ -914,26 +903,20 @@ def register_query_handler( self.query_handlers[query_type] = handler - def register_instance_for_edu(self, edu_type: str, instance_name: str): + def register_instance_for_edu(self, edu_type: str, instance_name: str) -> None: """Register that the EDU handler is on a different instance than master.""" self._edu_type_to_instance[edu_type] = [instance_name] - def register_instances_for_edu(self, edu_type: str, instance_names: List[str]): + def register_instances_for_edu( + self, edu_type: str, instance_names: List[str] + ) -> None: """Register that the EDU handler is on multiple instances.""" self._edu_type_to_instance[edu_type] = instance_names - async def on_edu(self, edu_type: str, origin: str, content: dict): + async def on_edu(self, edu_type: str, origin: str, content: dict) -> None: if not self.config.use_presence and edu_type == EduTypes.Presence: return - # If the incoming room key requests from a particular origin are over - # the limit, drop them. - if ( - edu_type == EduTypes.RoomKeyRequest - and not self._room_key_request_rate_limiter.can_do_action(origin) - ): - return - # Check if we have a handler on this instance handler = self.edu_handlers.get(edu_type) if handler: diff --git a/synapse/federation/persistence.py b/synapse/federation/persistence.py index ce5fc758f0e6..2f9c9bc2cdc8 100644 --- a/synapse/federation/persistence.py +++ b/synapse/federation/persistence.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 0c18c49abb70..65d76ea97415 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -77,9 +76,6 @@ def __init__(self, hs: "HomeServer"): # Pending presence map user_id -> UserPresenceState self.presence_map = {} # type: Dict[str, UserPresenceState] - # Stream position -> list[user_id] - self.presence_changed = SortedDict() # type: SortedDict[int, List[str]] - # Stores the destinations we need to explicitly send presence to about a # given user. # Stream position -> (user_id, destinations) @@ -97,7 +93,7 @@ def __init__(self, hs: "HomeServer"): self.edus = SortedDict() # type: SortedDict[int, Edu] - # stream ID for the next entry into presence_changed/keyed_edu_changed/edus. + # stream ID for the next entry into keyed_edu_changed/edus. self.pos = 1 # map from stream ID to the time that stream entry was generated, so that we @@ -118,7 +114,6 @@ def register(name: str, queue: Sized) -> None: for queue_name in [ "presence_map", - "presence_changed", "keyed_edu", "keyed_edu_changed", "edus", @@ -156,23 +151,12 @@ def _clear_queue_before_pos(self, position_to_delete: int) -> None: """Clear all the queues from before a given position""" with Measure(self.clock, "send_queue._clear"): # Delete things out of presence maps - keys = self.presence_changed.keys() - i = self.presence_changed.bisect_left(position_to_delete) - for key in keys[:i]: - del self.presence_changed[key] - - user_ids = { - user_id for uids in self.presence_changed.values() for user_id in uids - } - keys = self.presence_destinations.keys() i = self.presence_destinations.bisect_left(position_to_delete) for key in keys[:i]: del self.presence_destinations[key] - user_ids.update( - user_id for user_id, _ in self.presence_destinations.values() - ) + user_ids = {user_id for user_id, _ in self.presence_destinations.values()} to_del = [ user_id for user_id in self.presence_map if user_id not in user_ids @@ -245,23 +229,6 @@ async def send_read_receipt(self, receipt: ReadReceipt) -> None: """ # nothing to do here: the replication listener will handle it. - def send_presence(self, states: List[UserPresenceState]) -> None: - """As per FederationSender - - Args: - states - """ - pos = self._next_pos() - - # We only want to send presence for our own users, so lets always just - # filter here just in case. - local_states = [s for s in states if self.is_mine_id(s.user_id)] - - self.presence_map.update({state.user_id: state for state in local_states}) - self.presence_changed[pos] = [state.user_id for state in local_states] - - self.notifier.on_new_replication_data() - def send_presence_to_destinations( self, states: Iterable[UserPresenceState], destinations: Iterable[str] ) -> None: @@ -326,18 +293,6 @@ async def get_replication_rows( # of the federation stream. rows = [] # type: List[Tuple[int, BaseFederationRow]] - # Fetch changed presence - i = self.presence_changed.bisect_right(from_token) - j = self.presence_changed.bisect_right(to_token) + 1 - dest_user_ids = [ - (pos, user_id) - for pos, user_id_list in self.presence_changed.items()[i:j] - for user_id in user_id_list - ] - - for (key, user_id) in dest_user_ids: - rows.append((key, PresenceRow(state=self.presence_map[user_id]))) - # Fetch presence to send to destinations i = self.presence_destinations.bisect_right(from_token) j = self.presence_destinations.bisect_right(to_token) + 1 @@ -428,22 +383,6 @@ def add_to_buffer(self, buff): raise NotImplementedError() -class PresenceRow( - BaseFederationRow, namedtuple("PresenceRow", ("state",)) # UserPresenceState -): - TypeId = "p" - - @staticmethod - def from_data(data): - return PresenceRow(state=UserPresenceState.from_dict(data)) - - def to_data(self): - return self.state.as_dict() - - def add_to_buffer(self, buff): - buff.presence.append(self.state) - - class PresenceDestinationsRow( BaseFederationRow, namedtuple( @@ -507,7 +446,6 @@ def add_to_buffer(self, buff): _rowtypes = ( - PresenceRow, PresenceDestinationsRow, KeyedEduRow, EduRow, @@ -519,7 +457,6 @@ def add_to_buffer(self, buff): ParsedFederationStreamData = namedtuple( "ParsedFederationStreamData", ( - "presence", # list(UserPresenceState) "presence_destinations", # list of tuples of UserPresenceState and destinations "keyed_edus", # dict of destination -> { key -> Edu } "edus", # dict of destination -> [Edu] @@ -544,7 +481,6 @@ def process_rows_for_federation( # them into the appropriate collection and then send them off. buff = ParsedFederationStreamData( - presence=[], presence_destinations=[], keyed_edus={}, edus={}, @@ -560,18 +496,15 @@ def process_rows_for_federation( parsed_row = RowType.from_data(row.data) parsed_row.add_to_buffer(buff) - if buff.presence: - transaction_queue.send_presence(buff.presence) - for state, destinations in buff.presence_destinations: transaction_queue.send_presence_to_destinations( states=[state], destinations=destinations ) - for destination, edu_map in buff.keyed_edus.items(): + for edu_map in buff.keyed_edus.values(): for key, edu in edu_map.items(): transaction_queue.send_edu(edu, key) - for destination, edu_list in buff.edus.items(): + for edu_list in buff.edus.values(): for edu in edu_list: transaction_queue.send_edu(edu, None) diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 8babb1ebbe4e..deb40f461096 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,12 +26,7 @@ from synapse.federation.sender.per_destination_queue import PerDestinationQueue from synapse.federation.sender.transaction_manager import TransactionManager from synapse.federation.units import Edu -from synapse.handlers.presence import get_interested_remotes -from synapse.logging.context import ( - make_deferred_yieldable, - preserve_fn, - run_in_background, -) +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.metrics import ( LaterGauge, event_processing_loop_counter, @@ -41,9 +35,10 @@ ) from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import JsonDict, ReadReceipt, RoomStreamToken -from synapse.util.metrics import Measure, measure_func +from synapse.util.metrics import Measure if TYPE_CHECKING: + from synapse.events.presence_router import PresenceRouter from synapse.server import HomeServer logger = logging.getLogger(__name__) @@ -85,15 +80,6 @@ async def send_read_receipt(self, receipt: ReadReceipt) -> None: """ raise NotImplementedError() - @abc.abstractmethod - def send_presence(self, states: List[UserPresenceState]) -> None: - """Send the new presence states to the appropriate destinations. - - This actually queues up the presence states ready for sending and - triggers a background task to process them and send out the transactions. - """ - raise NotImplementedError() - @abc.abstractmethod def send_presence_to_destinations( self, states: Iterable[UserPresenceState], destinations: Iterable[str] @@ -162,6 +148,7 @@ def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() self.is_mine_id = hs.is_mine_id + self._presence_router = None # type: Optional[PresenceRouter] self._transaction_manager = TransactionManager(hs) self._instance_name = hs.get_instance_name() @@ -181,11 +168,6 @@ def __init__(self, hs: "HomeServer"): ), ) - # Map of user_id -> UserPresenceState for all the pending presence - # to be sent out by user_id. Entries here get processed and put in - # pending_presence_by_dest - self.pending_presence = {} # type: Dict[str, UserPresenceState] - LaterGauge( "synapse_federation_transaction_queue_pending_pdus", "", @@ -206,8 +188,6 @@ def __init__(self, hs: "HomeServer"): self._is_processing = False self._last_poked_id = -1 - self._processing_pending_presence = False - # map from room_id to a set of PerDestinationQueues which we believe are # awaiting a call to flush_read_receipts_for_room. The presence of an entry # here for a given room means that we are rate-limiting RR flushes to that room, @@ -517,48 +497,6 @@ def _flush_rrs_for_room(self, room_id: str) -> None: for queue in queues: queue.flush_read_receipts_for_room(room_id) - @preserve_fn # the caller should not yield on this - async def send_presence(self, states: List[UserPresenceState]) -> None: - """Send the new presence states to the appropriate destinations. - - This actually queues up the presence states ready for sending and - triggers a background task to process them and send out the transactions. - """ - if not self.hs.config.use_presence: - # No-op if presence is disabled. - return - - # First we queue up the new presence by user ID, so multiple presence - # updates in quick succession are correctly handled. - # We only want to send presence for our own users, so lets always just - # filter here just in case. - self.pending_presence.update( - {state.user_id: state for state in states if self.is_mine_id(state.user_id)} - ) - - # We then handle the new pending presence in batches, first figuring - # out the destinations we need to send each state to and then poking it - # to attempt a new transaction. We linearize this so that we don't - # accidentally mess up the ordering and send multiple presence updates - # in the wrong order - if self._processing_pending_presence: - return - - self._processing_pending_presence = True - try: - while True: - states_map = self.pending_presence - self.pending_presence = {} - - if not states_map: - break - - await self._process_presence_inner(list(states_map.values())) - except Exception: - logger.exception("Error sending presence states to servers") - finally: - self._processing_pending_presence = False - def send_presence_to_destinations( self, states: Iterable[UserPresenceState], destinations: Iterable[str] ) -> None: @@ -570,6 +508,10 @@ def send_presence_to_destinations( # No-op if presence is disabled. return + # Ensure we only send out presence states for local users. + for state in states: + assert self.is_mine_id(state.user_id) + for destination in destinations: if destination == self.server_name: continue @@ -579,25 +521,6 @@ def send_presence_to_destinations( continue self._get_per_destination_queue(destination).send_presence(states) - @measure_func("txnqueue._process_presence") - async def _process_presence_inner(self, states: List[UserPresenceState]) -> None: - """Given a list of states populate self.pending_presence_by_dest and - poke to send a new transaction to each destination - """ - hosts_and_states = await get_interested_remotes(self.store, states, self.state) - - for destinations, states in hosts_and_states: - for destination in destinations: - if destination == self.server_name: - continue - - if not self._federation_shard_config.should_handle( - self._instance_name, destination - ): - continue - - self._get_per_destination_queue(destination).send_presence(states) - def build_and_send_edu( self, destination: str, @@ -717,16 +640,18 @@ async def _wake_destinations_needing_catchup(self) -> None: self._catchup_after_startup_timer = None break + last_processed = destinations_to_wake[-1] + destinations_to_wake = [ d for d in destinations_to_wake if self._federation_shard_config.should_handle(self._instance_name, d) ] - for last_processed in destinations_to_wake: + for destination in destinations_to_wake: logger.info( "Destination %s has outstanding catch-up, waking up.", last_processed, ) - self.wake_destination(last_processed) + self.wake_destination(destination) await self.clock.sleep(CATCH_UP_STARTUP_INTERVAL_SEC) diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 89df9a619b74..3a2efd56eea9 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # @@ -29,6 +28,8 @@ from synapse.events import EventBase from synapse.federation.units import Edu from synapse.handlers.presence import format_user_presence_state +from synapse.logging import issue9533_logger +from synapse.logging.opentracing import SynapseTags, set_tag from synapse.metrics import sent_transactions_counter from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import ReadReceipt @@ -557,6 +558,13 @@ async def _get_to_device_message_edus(self, limit: int) -> Tuple[List[Edu], int] contents, stream_id = await self._store.get_new_device_msgs_for_remote( self._destination, last_device_stream_id, to_device_stream_id, limit ) + for content in contents: + message_id = content.get("message_id") + if not message_id: + continue + + set_tag(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id) + edus = [ Edu( origin=self._server_name, @@ -567,6 +575,14 @@ async def _get_to_device_message_edus(self, limit: int) -> Tuple[List[Edu], int] for content in contents ] + if edus: + issue9533_logger.debug( + "Sending %i to-device messages to %s, up to stream id %i", + len(edus), + self._destination, + stream_id, + ) + return (edus, stream_id) def _start_catching_up(self) -> None: diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py index 07b740c2f2fc..72a635830b9a 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -57,7 +56,7 @@ def __init__(self, hs: "synapse.server.HomeServer"): self._transport_layer = hs.get_federation_transport_client() self._federation_metrics_domains = ( - hs.get_config().federation.federation_metrics_domains + hs.config.federation.federation_metrics_domains ) # HACK to get unique tx id diff --git a/synapse/federation/transport/__init__.py b/synapse/federation/transport/__init__.py index 5db733af98ca..3c9a0f694482 100644 --- a/synapse/federation/transport/__init__.py +++ b/synapse/federation/transport/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 6aee47c43116..497848a2b75b 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # @@ -996,6 +995,7 @@ async def get_space_summary( returned per space exclude_rooms: a list of any rooms we can skip """ + # TODO When switching to the stable endpoint, use GET instead of POST. path = _create_path( FEDERATION_UNSTABLE_PREFIX, "/org.matrix.msc2946/spaces/%s", room_id ) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 84e39c5a468d..e1b746247469 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. @@ -425,13 +424,9 @@ async def on_PUT(self, origin, content, query, transaction_id): logger.exception(e) return 400, {"error": "Invalid transaction"} - try: - code, response = await self.handler.on_incoming_transaction( - origin, transaction_data - ) - except Exception: - logger.exception("on_incoming_transaction failed") - raise + code, response = await self.handler.on_incoming_transaction( + origin, transaction_data + ) return code, response @@ -620,8 +615,8 @@ class FederationThirdPartyInviteExchangeServlet(BaseFederationServlet): PATH = "/exchange_third_party_invite/(?P[^/]*)" async def on_PUT(self, origin, content, query, room_id): - content = await self.handler.on_exchange_third_party_invite_request(content) - return 200, content + await self.handler.on_exchange_third_party_invite_request(content) + return 200, {} class FederationClientKeysQueryServlet(BaseFederationServlet): @@ -1381,6 +1376,32 @@ class FederationSpaceSummaryServlet(BaseFederationServlet): PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc2946" PATH = "/spaces/(?P[^/]*)" + async def on_GET( + self, + origin: str, + content: JsonDict, + query: Mapping[bytes, Sequence[bytes]], + room_id: str, + ) -> Tuple[int, JsonDict]: + suggested_only = parse_boolean_from_args(query, "suggested_only", default=False) + max_rooms_per_space = parse_integer_from_args(query, "max_rooms_per_space") + + exclude_rooms = [] + if b"exclude_rooms" in query: + try: + exclude_rooms = [ + room_id.decode("ascii") for room_id in query[b"exclude_rooms"] + ] + except Exception: + raise SynapseError( + 400, "Bad query parameter for exclude_rooms", Codes.INVALID_PARAM + ) + + return 200, await self.handler.federation_space_summary( + room_id, suggested_only, max_rooms_per_space, exclude_rooms + ) + + # TODO When switching to the stable endpoint, remove the POST handler. async def on_POST( self, origin: str, diff --git a/synapse/federation/units.py b/synapse/federation/units.py index b662c4262120..c83a261918c0 100644 --- a/synapse/federation/units.py +++ b/synapse/federation/units.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,6 +17,7 @@ """ import logging +from typing import Optional import attr @@ -98,7 +98,7 @@ class Transaction(JsonEncodedObject): "pdus", ] - def __init__(self, transaction_id=None, pdus=[], **kwargs): + def __init__(self, transaction_id=None, pdus: Optional[list] = None, **kwargs): """If we include a list of pdus then we decode then as PDU's automatically. """ @@ -107,7 +107,7 @@ def __init__(self, transaction_id=None, pdus=[], **kwargs): if "edus" in kwargs and not kwargs["edus"]: del kwargs["edus"] - super().__init__(transaction_id=transaction_id, pdus=pdus, **kwargs) + super().__init__(transaction_id=transaction_id, pdus=pdus or [], **kwargs) @staticmethod def create_new(pdus, **kwargs): diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index 368c44708dac..d2fc8be5f571 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 4b16a4ac29ea..a06d060ebf4a 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # Copyright 2018 New Vector Ltd # Copyright 2019 Michael Telatynski <7t3chguy@gmail.com> diff --git a/synapse/handlers/__init__.py b/synapse/handlers/__init__.py index bfebb0f644f4..5e83dba2ed6f 100644 --- a/synapse/handlers/__init__.py +++ b/synapse/handlers/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index aade2c4a3ad4..d800e1691280 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -49,7 +48,7 @@ def __init__(self, hs: "HomeServer"): # The rate_hz and burst_count are overridden on a per-user basis self.request_ratelimiter = Ratelimiter( - clock=self.clock, rate_hz=0, burst_count=0 + store=self.store, clock=self.clock, rate_hz=0, burst_count=0 ) self._rc_message = self.hs.config.rc_message @@ -57,6 +56,7 @@ def __init__(self, hs: "HomeServer"): # by the presence of rate limits in the config if self.hs.config.rc_admin_redaction: self.admin_redaction_ratelimiter = Ratelimiter( + store=self.store, clock=self.clock, rate_hz=self.hs.config.rc_admin_redaction.per_second, burst_count=self.hs.config.rc_admin_redaction.burst_count, @@ -91,11 +91,6 @@ async def ratelimit(self, requester, update=True, is_admin_redaction=False): if app_service is not None: return # do not ratelimit app service senders - # Disable rate limiting of users belonging to any AS that is configured - # not to be rate limited in its registration file (rate_limited: true|false). - if requester.app_service and not requester.app_service.is_rate_limited(): - return - messages_per_second = self._rc_message.per_second burst_count = self._rc_message.burst_count @@ -113,11 +108,11 @@ async def ratelimit(self, requester, update=True, is_admin_redaction=False): if is_admin_redaction and self.admin_redaction_ratelimiter: # If we have separate config for admin redactions, use a separate # ratelimiter as to not have user_ids clash - self.admin_redaction_ratelimiter.ratelimit(user_id, update=update) + await self.admin_redaction_ratelimiter.ratelimit(requester, update=update) else: # Override rate and burst count per-user - self.request_ratelimiter.ratelimit( - user_id, + await self.request_ratelimiter.ratelimit( + requester, rate_hz=messages_per_second, burst_count=burst_count, update=update, diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py index 1ce6d697ed30..affb54e0ee4f 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2021 The Matrix.org Foundation C.I.C. # diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index d781bb251de8..5b927f10b3a9 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +17,7 @@ import logging from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText -from typing import TYPE_CHECKING, List +from typing import TYPE_CHECKING, List, Optional, Tuple from synapse.api.errors import StoreError, SynapseError from synapse.logging.context import make_deferred_yieldable @@ -40,28 +39,44 @@ def __init__(self, hs: "HomeServer"): self.sendmail = self.hs.get_sendmail() self.clock = self.hs.get_clock() - self._account_validity = self.hs.config.account_validity + self._account_validity_enabled = ( + hs.config.account_validity.account_validity_enabled + ) + self._account_validity_renew_by_email_enabled = ( + hs.config.account_validity.account_validity_renew_by_email_enabled + ) + + self._account_validity_period = None + if self._account_validity_enabled: + self._account_validity_period = ( + hs.config.account_validity.account_validity_period + ) if ( - self._account_validity.enabled - and self._account_validity.renew_by_email_enabled + self._account_validity_enabled + and self._account_validity_renew_by_email_enabled ): # Don't do email-specific configuration if renewal by email is disabled. - self._template_html = self.config.account_validity_template_html - self._template_text = self.config.account_validity_template_text + self._template_html = ( + hs.config.account_validity.account_validity_template_html + ) + self._template_text = ( + hs.config.account_validity.account_validity_template_text + ) + account_validity_renew_email_subject = ( + hs.config.account_validity.account_validity_renew_email_subject + ) try: - app_name = self.hs.config.email_app_name + app_name = hs.config.email_app_name - self._subject = self._account_validity.renew_email_subject % { - "app": app_name - } + self._subject = account_validity_renew_email_subject % {"app": app_name} - self._from_string = self.hs.config.email_notif_from % {"app": app_name} + self._from_string = hs.config.email_notif_from % {"app": app_name} except Exception: # If substitution failed, fall back to the bare strings. - self._subject = self._account_validity.renew_email_subject - self._from_string = self.hs.config.email_notif_from + self._subject = account_validity_renew_email_subject + self._from_string = hs.config.email_notif_from self._raw_from = email.utils.parseaddr(self._from_string)[1] @@ -221,47 +236,87 @@ async def _get_renewal_token(self, user_id: str) -> str: attempts += 1 raise StoreError(500, "Couldn't generate a unique string as refresh string.") - async def renew_account(self, renewal_token: str) -> bool: + async def renew_account(self, renewal_token: str) -> Tuple[bool, bool, int]: """Renews the account attached to a given renewal token by pushing back the expiration date by the current validity period in the server's configuration. + If it turns out that the token is valid but has already been used, then the + token is considered stale. A token is stale if the 'token_used_ts_ms' db column + is non-null. + Args: renewal_token: Token sent with the renewal request. Returns: - Whether the provided token is valid. + A tuple containing: + * A bool representing whether the token is valid and unused. + * A bool which is `True` if the token is valid, but stale. + * An int representing the user's expiry timestamp as milliseconds since the + epoch, or 0 if the token was invalid. """ try: - user_id = await self.store.get_user_from_renewal_token(renewal_token) + ( + user_id, + current_expiration_ts, + token_used_ts, + ) = await self.store.get_user_from_renewal_token(renewal_token) except StoreError: - return False + return False, False, 0 + + # Check whether this token has already been used. + if token_used_ts: + logger.info( + "User '%s' attempted to use previously used token '%s' to renew account", + user_id, + renewal_token, + ) + return False, True, current_expiration_ts logger.debug("Renewing an account for user %s", user_id) - await self.renew_account_for_user(user_id) - return True + # Renew the account. Pass the renewal_token here so that it is not cleared. + # We want to keep the token around in case the user attempts to renew their + # account with the same token twice (clicking the email link twice). + # + # In that case, the token will be accepted, but the account's expiration ts + # will remain unchanged. + new_expiration_ts = await self.renew_account_for_user( + user_id, renewal_token=renewal_token + ) + + return True, False, new_expiration_ts async def renew_account_for_user( - self, user_id: str, expiration_ts: int = None, email_sent: bool = False + self, + user_id: str, + expiration_ts: Optional[int] = None, + email_sent: bool = False, + renewal_token: Optional[str] = None, ) -> int: """Renews the account attached to a given user by pushing back the expiration date by the current validity period in the server's configuration. Args: - renewal_token: Token sent with the renewal request. + user_id: The ID of the user to renew. expiration_ts: New expiration date. Defaults to now + validity period. - email_sen: Whether an email has been sent for this validity period. - Defaults to False. + email_sent: Whether an email has been sent for this validity period. + renewal_token: Token sent with the renewal request. The user's token + will be cleared if this is None. Returns: New expiration date for this account, as a timestamp in milliseconds since epoch. """ + now = self.clock.time_msec() if expiration_ts is None: - expiration_ts = self.clock.time_msec() + self._account_validity.period + expiration_ts = now + self._account_validity_period await self.store.set_account_validity_for_user( - user_id=user_id, expiration_ts=expiration_ts, email_sent=email_sent + user_id=user_id, + expiration_ts=expiration_ts, + email_sent=email_sent, + renewal_token=renewal_token, + token_used_ts=now, ) return expiration_ts diff --git a/synapse/handlers/acme.py b/synapse/handlers/acme.py index 2a25af62880c..16ab93f58048 100644 --- a/synapse/handlers/acme.py +++ b/synapse/handlers/acme.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/acme_issuing_service.py b/synapse/handlers/acme_issuing_service.py index ae2a9dd9c219..a972d3fa0af6 100644 --- a/synapse/handlers/acme_issuing_service.py +++ b/synapse/handlers/acme_issuing_service.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index c494de49a35b..f72ded038e83 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 996f9e5debc8..177310f0beaa 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Dict, List, Optional, Union +from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Union from prometheus_client import Counter @@ -34,7 +33,7 @@ wrap_as_background_process, ) from synapse.storage.databases.main.directory import RoomAliasMapping -from synapse.types import Collection, JsonDict, RoomAlias, RoomStreamToken, UserID +from synapse.types import JsonDict, RoomAlias, RoomStreamToken, UserID from synapse.util.metrics import Measure if TYPE_CHECKING: @@ -182,7 +181,7 @@ def notify_interested_services_ephemeral( self, stream_key: str, new_token: Optional[int], - users: Collection[Union[str, UserID]] = [], + users: Optional[Collection[Union[str, UserID]]] = None, ): """This is called by the notifier in the background when a ephemeral event handled by the homeserver. @@ -215,7 +214,7 @@ def notify_interested_services_ephemeral( # We only start a new background process if necessary rather than # optimistically (to cut down on overhead). self._notify_interested_services_ephemeral( - services, stream_key, new_token, users + services, stream_key, new_token, users or [] ) @wrap_as_background_process("notify_interested_services_ephemeral") diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index d537ea813785..8a6666a4ade6 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2019 - 2020 The Matrix.org Foundation C.I.C. @@ -18,6 +17,7 @@ import time import unicodedata import urllib.parse +from binascii import crc32 from typing import ( TYPE_CHECKING, Any, @@ -35,6 +35,7 @@ import attr import bcrypt import pymacaroons +import unpaddedbase64 from twisted.web.server import Request @@ -67,6 +68,7 @@ from synapse.util.async_helpers import maybe_awaitable from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry from synapse.util.msisdn import phone_number_to_msisdn +from synapse.util.stringutils import base62_encode from synapse.util.threepids import canonicalise_email if TYPE_CHECKING: @@ -238,6 +240,7 @@ def __init__(self, hs: "HomeServer"): # Ratelimiter for failed auth during UIA. Uses same ratelimit config # as per `rc_login.failed_attempts`. self._failed_uia_attempts_ratelimiter = Ratelimiter( + store=self.store, clock=self.clock, rate_hz=self.hs.config.rc_login_failed_attempts.per_second, burst_count=self.hs.config.rc_login_failed_attempts.burst_count, @@ -248,6 +251,7 @@ def __init__(self, hs: "HomeServer"): # Ratelimitier for failed /login attempts self._failed_login_attempts_ratelimiter = Ratelimiter( + store=self.store, clock=hs.get_clock(), rate_hz=self.hs.config.rc_login_failed_attempts.per_second, burst_count=self.hs.config.rc_login_failed_attempts.burst_count, @@ -352,7 +356,7 @@ async def validate_user_via_ui_auth( requester_user_id = requester.user.to_string() # Check if we should be ratelimited due to too many previous failed attempts - self._failed_uia_attempts_ratelimiter.ratelimit(requester_user_id, update=False) + await self._failed_uia_attempts_ratelimiter.ratelimit(requester, update=False) # build a list of supported flows supported_ui_auth_types = await self._get_available_ui_auth_types( @@ -373,7 +377,9 @@ def get_new_session_data() -> JsonDict: ) except LoginError: # Update the ratelimiter to say we failed (`can_do_action` doesn't raise). - self._failed_uia_attempts_ratelimiter.can_do_action(requester_user_id) + await self._failed_uia_attempts_ratelimiter.can_do_action( + requester, + ) raise # find the completed login type @@ -805,10 +811,12 @@ async def get_access_token_for_user_id( logger.info( "Logging in user %s as %s%s", user_id, puppets_user_id, fmt_expiry ) + target_user_id_obj = UserID.from_string(puppets_user_id) else: logger.info( "Logging in user %s on device %s%s", user_id, device_id, fmt_expiry ) + target_user_id_obj = UserID.from_string(user_id) if ( not is_appservice_ghost @@ -816,7 +824,7 @@ async def get_access_token_for_user_id( ): await self.auth.check_auth_blocking(user_id) - access_token = self.macaroon_gen.generate_access_token(user_id) + access_token = self.generate_access_token(target_user_id_obj) await self.store.add_access_token_to_user( user_id=user_id, token=access_token, @@ -982,8 +990,8 @@ async def validate_login( # We also apply account rate limiting using the 3PID as a key, as # otherwise using 3PID bypasses the ratelimiting based on user ID. if ratelimit: - self._failed_login_attempts_ratelimiter.ratelimit( - (medium, address), update=False + await self._failed_login_attempts_ratelimiter.ratelimit( + None, (medium, address), update=False ) # Check for login providers that support 3pid login types @@ -1016,8 +1024,8 @@ async def validate_login( # this code path, which is fine as then the per-user ratelimit # will kick in below. if ratelimit: - self._failed_login_attempts_ratelimiter.can_do_action( - (medium, address) + await self._failed_login_attempts_ratelimiter.can_do_action( + None, (medium, address) ) raise LoginError(403, "", errcode=Codes.FORBIDDEN) @@ -1039,8 +1047,8 @@ async def validate_login( # Check if we've hit the failed ratelimit (but don't update it) if ratelimit: - self._failed_login_attempts_ratelimiter.ratelimit( - qualified_user_id.lower(), update=False + await self._failed_login_attempts_ratelimiter.ratelimit( + None, qualified_user_id.lower(), update=False ) try: @@ -1051,8 +1059,8 @@ async def validate_login( # exception and masking the LoginError. The actual ratelimiting # should have happened above. if ratelimit: - self._failed_login_attempts_ratelimiter.can_do_action( - qualified_user_id.lower() + await self._failed_login_attempts_ratelimiter.can_do_action( + None, qualified_user_id.lower() ) raise @@ -1189,6 +1197,19 @@ async def _check_local_password(self, user_id: str, password: str) -> Optional[s return None return user_id + def generate_access_token(self, for_user: UserID) -> str: + """Generates an opaque string, for use as an access token""" + + # we use the following format for access tokens: + # syt___ + + b64local = unpaddedbase64.encode_base64(for_user.localpart.encode("utf-8")) + random_string = stringutils.random_string(20) + base = f"syt_{b64local}_{random_string}" + + crc = base62_encode(crc32(base.encode("ascii")), minwidth=6) + return f"{base}_{crc}" + async def validate_short_term_login_token( self, login_token: str ) -> LoginTokenAttributes: @@ -1245,7 +1266,7 @@ async def delete_access_tokens_for_user( # see if any of our auth providers want to know about this for provider in self.password_providers: - for token, token_id, device_id in tokens_and_devices: + for token, _, device_id in tokens_and_devices: await provider.on_logged_out( user_id=user_id, device_id=device_id, access_token=token ) @@ -1582,10 +1603,7 @@ class MacaroonGenerator: hs = attr.ib() - def generate_access_token( - self, user_id: str, extra_caveats: Optional[List[str]] = None - ) -> str: - extra_caveats = extra_caveats or [] + def generate_guest_access_token(self, user_id: str) -> str: macaroon = self._generate_base_macaroon(user_id) macaroon.add_first_party_caveat("type = access") # Include a nonce, to make sure that each login gets a different @@ -1593,8 +1611,7 @@ def generate_access_token( macaroon.add_first_party_caveat( "nonce = %s" % (stringutils.random_string_with_symbols(16),) ) - for caveat in extra_caveats: - macaroon.add_first_party_caveat(caveat) + macaroon.add_first_party_caveat("guest = true") return macaroon.serialize() def generate_short_term_login_token( diff --git a/synapse/handlers/cas_handler.py b/synapse/handlers/cas.py similarity index 99% rename from synapse/handlers/cas_handler.py rename to synapse/handlers/cas.py index 5060936f943a..7346ccfe9396 100644 --- a/synapse/handlers/cas_handler.py +++ b/synapse/handlers/cas.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 2bcd8f5435a1..45d2404ddebf 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017, 2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # @@ -50,7 +49,9 @@ def __init__(self, hs: "HomeServer"): if hs.config.run_background_tasks: hs.get_reactor().callWhenRunning(self._start_user_parting) - self._account_validity_enabled = hs.config.account_validity.enabled + self._account_validity_enabled = ( + hs.config.account_validity.account_validity_enabled + ) async def deactivate_account( self, diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 54293d0b9c83..95bdc5902a2b 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # Copyright 2019,2020 The Matrix.org Foundation C.I.C. @@ -15,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Collection, Dict, Iterable, List, Optional, Set, Tuple from synapse.api import errors from synapse.api.constants import EventTypes @@ -29,7 +28,6 @@ from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import ( - Collection, JsonDict, StreamToken, UserID, @@ -157,8 +155,7 @@ async def get_user_ids_changed( # The user may have left the room # TODO: Check if they actually did or if we were just invited. if room_id not in room_ids: - for key, event_id in current_state_ids.items(): - etype, state_key = key + for etype, state_key in current_state_ids.keys(): if etype != EventTypes.Member: continue possibly_left.add(state_key) @@ -180,8 +177,7 @@ async def get_user_ids_changed( log_kv( {"event": "encountered empty previous state", "room_id": room_id} ) - for key, event_id in current_state_ids.items(): - etype, state_key = key + for etype, state_key in current_state_ids.keys(): if etype != EventTypes.Member: continue possibly_changed.add(state_key) @@ -199,8 +195,7 @@ async def get_user_ids_changed( for state_dict in prev_state_ids.values(): member_event = state_dict.get((EventTypes.Member, user_id), None) if not member_event or member_event != current_member_id: - for key, event_id in current_state_ids.items(): - etype, state_key = key + for etype, state_key in current_state_ids.keys(): if etype != EventTypes.Member: continue possibly_changed.add(state_key) @@ -631,7 +626,7 @@ def __init__(self, hs: "HomeServer", device_handler: DeviceHandler): max_len=10000, expiry_ms=30 * 60 * 1000, iterable=True, - ) + ) # type: ExpiringCache[str, Set[str]] # Attempt to resync out of sync device lists every 30s. self._resync_retry_in_progress = False @@ -715,7 +710,7 @@ async def _handle_device_updates(self, user_id: str) -> None: # This can happen since we batch updates return - for device_id, stream_id, prev_ids, content in pending_updates: + for device_id, stream_id, prev_ids, _ in pending_updates: logger.debug( "Handling update %r/%r, ID: %r, prev: %r ", user_id, @@ -741,7 +736,7 @@ async def _handle_device_updates(self, user_id: str) -> None: else: # Simply update the single device, since we know that is the only # change (because of the single prev_id matching the current cache) - for device_id, stream_id, prev_ids, content in pending_updates: + for device_id, stream_id, _, content in pending_updates: await self.store.update_remote_device_list_cache_entry( user_id, device_id, content, stream_id ) @@ -760,7 +755,7 @@ async def _need_to_do_resync( """Given a list of updates for a user figure out if we need to do a full resync, or whether we have enough data that we can just apply the delta. """ - seen_updates = self._seen_updates.get(user_id, set()) + seen_updates = self._seen_updates.get(user_id, set()) # type: Set[str] extremity = await self.store.get_device_list_last_stream_id_for_remote(user_id) @@ -930,6 +925,10 @@ async def user_device_resync( else: cached_devices = await self.store.get_cached_devices_for_user(user_id) if cached_devices == {d["device_id"]: d for d in devices}: + logging.info( + "Skipping device list resync for %s, as our cache matches already", + user_id, + ) devices = [] ignore_devices = True @@ -945,6 +944,9 @@ async def user_device_resync( await self.store.update_remote_device_list_cache( user_id, devices, stream_id ) + # mark the cache as valid, whether or not we actually processed any device + # list updates. + await self.store.mark_remote_user_device_cache_as_valid(user_id) device_ids = [device["device_id"] for device in devices] # Handle cross-signing keys. diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py index eb547743be9f..580b941595cf 100644 --- a/synapse/handlers/devicemessage.py +++ b/synapse/handlers/devicemessage.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,15 +15,15 @@ import logging from typing import TYPE_CHECKING, Any, Dict -from synapse.api.constants import EduTypes +from synapse.api.constants import ToDeviceEventTypes from synapse.api.errors import SynapseError from synapse.api.ratelimiting import Ratelimiter from synapse.logging.context import run_in_background from synapse.logging.opentracing import ( + SynapseTags, get_active_span_text_map, log_kv, set_tag, - start_active_span, ) from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet from synapse.types import JsonDict, Requester, UserID, get_domain_from_id @@ -80,7 +79,10 @@ def __init__(self, hs: "HomeServer"): ReplicationUserDevicesResyncRestServlet.make_client(hs) ) + # a rate limiter for room key requests. The keys are + # (sending_user_id, sending_device_id). self._ratelimiter = Ratelimiter( + store=self.store, clock=hs.get_clock(), rate_hz=hs.config.rc_key_requests.per_second, burst_count=hs.config.rc_key_requests.burst_count, @@ -100,12 +102,25 @@ async def on_direct_to_device_edu(self, origin: str, content: JsonDict) -> None: for user_id, by_device in content["messages"].items(): # we use UserID.from_string to catch invalid user ids if not self.is_mine(UserID.from_string(user_id)): - logger.warning("Request for keys for non-local user %s", user_id) + logger.warning("To-device message to non-local user %s", user_id) raise SynapseError(400, "Not a user here") if not by_device: continue + # Ratelimit key requests by the sending user. + if message_type == ToDeviceEventTypes.RoomKeyRequest: + allowed, _ = await self._ratelimiter.can_do_action( + None, (sender_user_id, None) + ) + if not allowed: + logger.info( + "Dropping room_key_request from %s to %s due to rate limit", + sender_user_id, + user_id, + ) + continue + messages_by_device = { device_id: { "content": message_content, @@ -182,20 +197,29 @@ async def send_device_message( ) -> None: sender_user_id = requester.user.to_string() - set_tag("number_of_messages", len(messages)) + message_id = random_string(16) + set_tag(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id) + + log_kv({"number_of_to_device_messages": len(messages)}) set_tag("sender", sender_user_id) local_messages = {} remote_messages = {} # type: Dict[str, Dict[str, Dict[str, JsonDict]]] for user_id, by_device in messages.items(): # Ratelimit local cross-user key requests by the sending device. if ( - message_type == EduTypes.RoomKeyRequest + message_type == ToDeviceEventTypes.RoomKeyRequest and user_id != sender_user_id - and self._ratelimiter.can_do_action( - (sender_user_id, requester.device_id) - ) ): - continue + allowed, _ = await self._ratelimiter.can_do_action( + requester, (sender_user_id, requester.device_id) + ) + if not allowed: + logger.info( + "Dropping room_key_request from %s to %s due to rate limit", + sender_user_id, + user_id, + ) + continue # we use UserID.from_string to catch invalid user ids if self.is_mine(UserID.from_string(user_id)): @@ -204,32 +228,35 @@ async def send_device_message( "content": message_content, "type": message_type, "sender": sender_user_id, + "message_id": message_id, } for device_id, message_content in by_device.items() } if messages_by_device: local_messages[user_id] = messages_by_device + log_kv( + { + "user_id": user_id, + "device_id": list(messages_by_device), + } + ) else: destination = get_domain_from_id(user_id) remote_messages.setdefault(destination, {})[user_id] = by_device - message_id = random_string(16) - context = get_active_span_text_map() remote_edu_contents = {} for destination, messages in remote_messages.items(): - with start_active_span("to_device_for_user"): - set_tag("destination", destination) - remote_edu_contents[destination] = { - "messages": messages, - "sender": sender_user_id, - "type": message_type, - "message_id": message_id, - "org.matrix.opentracing_context": json_encoder.encode(context), - } + log_kv({"destination": destination}) + remote_edu_contents[destination] = { + "messages": messages, + "sender": sender_user_id, + "type": message_type, + "message_id": message_id, + "org.matrix.opentracing_context": json_encoder.encode(context), + } - log_kv({"local_messages": local_messages}) stream_id = await self.store.add_messages_to_device_inbox( local_messages, remote_edu_contents ) @@ -238,7 +265,6 @@ async def send_device_message( "to_device_key", stream_id, users=local_messages.keys() ) - log_kv({"remote_messages": remote_messages}) if self.federation_sender: for destination in remote_messages.keys(): # Enqueue a new federation transaction to send the new diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index abcf86352dad..4064a2b85913 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +14,7 @@ import logging import string -from typing import Iterable, List, Optional +from typing import TYPE_CHECKING, Iterable, List, Optional from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes from synapse.api.errors import ( @@ -28,15 +27,19 @@ SynapseError, ) from synapse.appservice import ApplicationService -from synapse.types import Requester, RoomAlias, UserID, get_domain_from_id +from synapse.storage.databases.main.directory import RoomAliasMapping +from synapse.types import JsonDict, Requester, RoomAlias, UserID, get_domain_from_id from ._base import BaseHandler +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) class DirectoryHandler(BaseHandler): - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) self.state = hs.get_state_handler() @@ -61,7 +64,7 @@ async def _create_association( room_id: str, servers: Optional[Iterable[str]] = None, creator: Optional[str] = None, - ): + ) -> None: # general association creation for both human users and app services for wchar in string.whitespace: @@ -75,7 +78,7 @@ async def _create_association( # TODO(erikj): Add transactions. # TODO(erikj): Check if there is a current association. if not servers: - users = await self.state.get_current_users_in_room(room_id) + users = await self.store.get_users_in_room(room_id) servers = {get_domain_from_id(u) for u in users} if not servers: @@ -105,8 +108,9 @@ async def create_association( """ user_id = requester.user.to_string() + room_alias_str = room_alias.to_string() - if len(room_alias.to_string()) > MAX_ALIAS_LENGTH: + if len(room_alias_str) > MAX_ALIAS_LENGTH: raise SynapseError( 400, "Can't create aliases longer than %s characters" % MAX_ALIAS_LENGTH, @@ -115,7 +119,7 @@ async def create_association( service = requester.app_service if service: - if not service.is_interested_in_alias(room_alias.to_string()): + if not service.is_interested_in_alias(room_alias_str): raise SynapseError( 400, "This application service has not reserved this kind of alias.", @@ -139,7 +143,7 @@ async def create_association( raise AuthError(403, "This user is not permitted to create this alias") if not self.config.is_alias_creation_allowed( - user_id, room_id, room_alias.to_string() + user_id, room_id, room_alias_str ): # Lets just return a generic message, as there may be all sorts of # reasons why we said no. TODO: Allow configurable error messages @@ -212,7 +216,7 @@ async def delete_association( async def delete_appservice_association( self, service: ApplicationService, room_alias: RoomAlias - ): + ) -> None: if not service.is_interested_in_alias(room_alias.to_string()): raise SynapseError( 400, @@ -221,7 +225,7 @@ async def delete_appservice_association( ) await self._delete_association(room_alias) - async def _delete_association(self, room_alias: RoomAlias): + async def _delete_association(self, room_alias: RoomAlias) -> str: if not self.hs.is_mine(room_alias): raise SynapseError(400, "Room alias must be local") @@ -229,17 +233,19 @@ async def _delete_association(self, room_alias: RoomAlias): return room_id - async def get_association(self, room_alias: RoomAlias): + async def get_association(self, room_alias: RoomAlias) -> JsonDict: room_id = None if self.hs.is_mine(room_alias): - result = await self.get_association_from_room_alias(room_alias) + result = await self.get_association_from_room_alias( + room_alias + ) # type: Optional[RoomAliasMapping] if result: room_id = result.room_id servers = result.servers else: try: - result = await self.federation.make_query( + fed_result = await self.federation.make_query( destination=room_alias.domain, query_type="directory", args={"room_alias": room_alias.to_string()}, @@ -249,13 +255,13 @@ async def get_association(self, room_alias: RoomAlias): except CodeMessageException as e: logging.warning("Error retrieving alias") if e.code == 404: - result = None + fed_result = None else: raise - if result and "room_id" in result and "servers" in result: - room_id = result["room_id"] - servers = result["servers"] + if fed_result and "room_id" in fed_result and "servers" in fed_result: + room_id = fed_result["room_id"] + servers = fed_result["servers"] if not room_id: raise SynapseError( @@ -264,7 +270,7 @@ async def get_association(self, room_alias: RoomAlias): Codes.NOT_FOUND, ) - users = await self.state.get_current_users_in_room(room_id) + users = await self.store.get_users_in_room(room_id) extra_servers = {get_domain_from_id(u) for u in users} servers = set(extra_servers) | set(servers) @@ -276,7 +282,7 @@ async def get_association(self, room_alias: RoomAlias): return {"room_id": room_id, "servers": servers} - async def on_directory_query(self, args): + async def on_directory_query(self, args: JsonDict) -> JsonDict: room_alias = RoomAlias.from_string(args["room_alias"]) if not self.hs.is_mine(room_alias): raise SynapseError(400, "Room Alias is not hosted on this homeserver") @@ -294,7 +300,7 @@ async def on_directory_query(self, args): async def _update_canonical_alias( self, requester: Requester, user_id: str, room_id: str, room_alias: RoomAlias - ): + ) -> None: """ Send an updated canonical alias event if the removed alias was set as the canonical alias or listed in the alt_aliases field. @@ -345,7 +351,9 @@ async def _update_canonical_alias( ratelimit=False, ) - async def get_association_from_room_alias(self, room_alias: RoomAlias): + async def get_association_from_room_alias( + self, room_alias: RoomAlias + ) -> Optional[RoomAliasMapping]: result = await self.store.get_association_from_room_alias(room_alias) if not result: # Query AS to see if it exists @@ -373,7 +381,7 @@ def can_modify_alias(self, alias: RoomAlias, user_id: Optional[str] = None) -> b # either no interested services, or no service with an exclusive lock return True - async def _user_can_delete_alias(self, alias: RoomAlias, user_id: str): + async def _user_can_delete_alias(self, alias: RoomAlias, user_id: str) -> bool: """Determine whether a user can delete an alias. One of the following must be true: @@ -395,14 +403,13 @@ async def _user_can_delete_alias(self, alias: RoomAlias, user_id: str): if not room_id: return False - res = await self.auth.check_can_change_room_list( + return await self.auth.check_can_change_room_list( room_id, UserID.from_string(user_id) ) - return res async def edit_published_room_list( self, requester: Requester, room_id: str, visibility: str - ): + ) -> None: """Edit the entry of the room in the published room list. requester @@ -470,7 +477,7 @@ async def edit_published_room_list( async def edit_published_appservice_room_list( self, appservice_id: str, network_id: str, room_id: str, visibility: str - ): + ) -> None: """Add or remove a room from the appservice/network specific public room list. @@ -500,5 +507,4 @@ async def get_aliases_for_room( room_id, requester.user.to_string() ) - aliases = await self.store.get_aliases_for_room(room_id) - return aliases + return await self.store.get_aliases_for_room(room_id) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 2ad9b6d930e7..974487800da5 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018-2019 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. @@ -38,7 +37,6 @@ ) from synapse.util import json_decoder, unwrapFirstError from synapse.util.async_helpers import Linearizer -from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.retryutils import NotRetryingDestination if TYPE_CHECKING: @@ -1008,7 +1006,7 @@ async def _process_other_signatures( return signature_list, failures async def _get_e2e_cross_signing_verify_key( - self, user_id: str, key_type: str, from_user_id: str = None + self, user_id: str, key_type: str, from_user_id: Optional[str] = None ) -> Tuple[JsonDict, str, VerifyKey]: """Fetch locally or remotely query for a cross-signing public key. @@ -1292,17 +1290,6 @@ def __init__(self, hs: "HomeServer", e2e_keys_handler: E2eKeysHandler): # user_id -> list of updates waiting to be handled. self._pending_updates = {} # type: Dict[str, List[Tuple[JsonDict, JsonDict]]] - # Recently seen stream ids. We don't bother keeping these in the DB, - # but they're useful to have them about to reduce the number of spurious - # resyncs. - self._seen_updates = ExpiringCache( - cache_name="signing_key_update_edu", - clock=self.clock, - max_len=10000, - expiry_ms=30 * 60 * 1000, - iterable=True, - ) - async def incoming_signing_key_update( self, origin: str, edu_content: JsonDict ) -> None: diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index a910d246d692..31742236a94d 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017, 2018 New Vector Ltd # Copyright 2019 Matrix.org Foundation C.I.C. # diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py new file mode 100644 index 000000000000..eff639f40760 --- /dev/null +++ b/synapse/handlers/event_auth.py @@ -0,0 +1,86 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from synapse.api.constants import EventTypes, JoinRules +from synapse.api.room_versions import RoomVersion +from synapse.types import StateMap + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +class EventAuthHandler: + """ + This class contains methods for authenticating events added to room graphs. + """ + + def __init__(self, hs: "HomeServer"): + self._store = hs.get_datastore() + + async def can_join_without_invite( + self, state_ids: StateMap[str], room_version: RoomVersion, user_id: str + ) -> bool: + """ + Check whether a user can join a room without an invite. + + When joining a room with restricted joined rules (as defined in MSC3083), + the membership of spaces must be checked during join. + + Args: + state_ids: The state of the room as it currently is. + room_version: The room version of the room being joined. + user_id: The user joining the room. + + Returns: + True if the user can join the room, false otherwise. + """ + # This only applies to room versions which support the new join rule. + if not room_version.msc3083_join_rules: + return True + + # If there's no join rule, then it defaults to invite (so this doesn't apply). + join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None) + if not join_rules_event_id: + return True + + # If the join rule is not restricted, this doesn't apply. + join_rules_event = await self._store.get_event(join_rules_event_id) + if join_rules_event.content.get("join_rule") != JoinRules.MSC3083_RESTRICTED: + return True + + # If allowed is of the wrong form, then only allow invited users. + allowed_spaces = join_rules_event.content.get("allow", []) + if not isinstance(allowed_spaces, list): + return False + + # Get the list of joined rooms and see if there's an overlap. + joined_rooms = await self._store.get_rooms_for_user(user_id) + + # Pull out the other room IDs, invalid data gets filtered. + for space in allowed_spaces: + if not isinstance(space, dict): + continue + + space_id = space.get("space") + if not isinstance(space_id, str): + continue + + # The user was joined to one of the spaces specified, they can join + # this room! + if space_id in joined_rooms: + return True + + # The user was not in any of the required spaces. + return False diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index f46cab73251c..f134f1e234b6 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -104,7 +103,7 @@ async def get_stream( # Send down presence. if event.state_key == auth_user_id: # Send down presence for everyone in the room. - users = await self.state.get_current_users_in_room( + users = await self.store.get_users_in_room( event.room_id ) # type: Iterable[str] else: diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 598a66f74cf4..798ed75b30fa 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. @@ -21,7 +20,17 @@ import logging from collections.abc import Container from http import HTTPStatus -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Sequence, Tuple, Union +from typing import ( + TYPE_CHECKING, + Dict, + Iterable, + List, + Optional, + Sequence, + Set, + Tuple, + Union, +) import attr from signedjson.key import decode_verify_key_bytes @@ -94,7 +103,7 @@ @attr.s(slots=True) class _NewEventInfo: - """Holds information about a received event, ready for passing to _handle_new_events + """Holds information about a received event, ready for passing to _auth_and_persist_events Attributes: event: the received event @@ -137,6 +146,7 @@ def __init__(self, hs: "HomeServer"): self.is_mine_id = hs.is_mine_id self.spam_checker = hs.get_spam_checker() self.event_creation_handler = hs.get_event_creation_handler() + self._event_auth_handler = hs.get_event_auth_handler() self._message_handler = hs.get_message_handler() self._server_notices_mxid = hs.config.server_notices_mxid self.config = hs.config @@ -171,15 +181,17 @@ def __init__(self, hs: "HomeServer"): self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages - async def on_receive_pdu(self, origin, pdu, sent_to_us_directly=False) -> None: + async def on_receive_pdu( + self, origin: str, pdu: EventBase, sent_to_us_directly: bool = False + ) -> None: """Process a PDU received via a federation /send/ transaction, or via backfill of missing prev_events Args: - origin (str): server which initiated the /send/ transaction. Will + origin: server which initiated the /send/ transaction. Will be used to fetch missing events or state. - pdu (FrozenEvent): received PDU - sent_to_us_directly (bool): True if this event was pushed to us; False if + pdu: received PDU + sent_to_us_directly: True if this event was pushed to us; False if we pulled it as the result of a missing prev_event. """ @@ -411,13 +423,15 @@ async def on_receive_pdu(self, origin, pdu, sent_to_us_directly=False) -> None: await self._process_received_pdu(origin, pdu, state=state) - async def _get_missing_events_for_pdu(self, origin, pdu, prevs, min_depth): + async def _get_missing_events_for_pdu( + self, origin: str, pdu: EventBase, prevs: Set[str], min_depth: int + ) -> None: """ Args: - origin (str): Origin of the pdu. Will be called to get the missing events + origin: Origin of the pdu. Will be called to get the missing events pdu: received pdu - prevs (set(str)): List of event ids which we are missing - min_depth (int): Minimum depth of events to return. + prevs: List of event ids which we are missing + min_depth: Minimum depth of events to return. """ room_id = pdu.room_id @@ -538,8 +552,12 @@ async def _get_state_for_room( destination: str, room_id: str, event_id: str, - ) -> Tuple[List[EventBase], List[EventBase]]: - """Requests all of the room state at a given event from a remote homeserver. + ) -> List[EventBase]: + """Requests all of the room state at a given event from a remote + homeserver. + + Will also fetch any missing events reported in the `auth_chain_ids` + section of `/state_ids`. Args: destination: The remote homeserver to query for the state. @@ -547,8 +565,7 @@ async def _get_state_for_room( event_id: The id of the event we want the state at. Returns: - A list of events in the state, not including the event itself, and - a list of events in the auth chain for the given event. + A list of events in the state, not including the event itself. """ ( state_event_ids, @@ -557,68 +574,53 @@ async def _get_state_for_room( destination, room_id, event_id=event_id ) - desired_events = set(state_event_ids + auth_event_ids) + # Fetch the state events from the DB, and check we have the auth events. + event_map = await self.store.get_events(state_event_ids, allow_rejected=True) + auth_events_in_store = await self.store.have_seen_events(auth_event_ids) - event_map = await self._get_events_from_store_or_dest( - destination, room_id, desired_events - ) - - failed_to_fetch = desired_events - event_map.keys() - if failed_to_fetch: - logger.warning( - "Failed to fetch missing state/auth events for %s %s", - event_id, - failed_to_fetch, + # Check for missing events. We handle state and auth event seperately, + # as we want to pull the state from the DB, but we don't for the auth + # events. (Note: we likely won't use the majority of the auth chain, and + # it can be *huge* for large rooms, so it's worth ensuring that we don't + # unnecessarily pull it from the DB). + missing_state_events = set(state_event_ids) - set(event_map) + missing_auth_events = set(auth_event_ids) - set(auth_events_in_store) + if missing_state_events or missing_auth_events: + await self._get_events_and_persist( + destination=destination, + room_id=room_id, + events=missing_state_events | missing_auth_events, ) - remote_state = [ - event_map[e_id] for e_id in state_event_ids if e_id in event_map - ] - - auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map] - auth_chain.sort(key=lambda e: e.depth) - - return remote_state, auth_chain - - async def _get_events_from_store_or_dest( - self, destination: str, room_id: str, event_ids: Iterable[str] - ) -> Dict[str, EventBase]: - """Fetch events from a remote destination, checking if we already have them. - - Persists any events we don't already have as outliers. - - If we fail to fetch any of the events, a warning will be logged, and the event - will be omitted from the result. Likewise, any events which turn out not to - be in the given room. - - This function *does not* automatically get missing auth events of the - newly fetched events. Callers must include the full auth chain of - of the missing events in the `event_ids` argument, to ensure that any - missing auth events are correctly fetched. + if missing_state_events: + new_events = await self.store.get_events( + missing_state_events, allow_rejected=True + ) + event_map.update(new_events) - Returns: - map from event_id to event - """ - fetched_events = await self.store.get_events(event_ids, allow_rejected=True) + missing_state_events.difference_update(new_events) - missing_events = set(event_ids) - fetched_events.keys() + if missing_state_events: + logger.warning( + "Failed to fetch missing state events for %s %s", + event_id, + missing_state_events, + ) - if missing_events: - logger.debug( - "Fetching unknown state/auth events %s for room %s", - missing_events, - room_id, - ) + if missing_auth_events: + auth_events_in_store = await self.store.have_seen_events( + missing_auth_events + ) + missing_auth_events.difference_update(auth_events_in_store) - await self._get_events_and_persist( - destination=destination, room_id=room_id, events=missing_events - ) + if missing_auth_events: + logger.warning( + "Failed to fetch missing auth events for %s %s", + event_id, + missing_auth_events, + ) - # we need to make sure we re-load from the database to get the rejected - # state correct. - fetched_events.update( - (await self.store.get_events(missing_events, allow_rejected=True)) - ) + remote_state = list(event_map.values()) # check for events which were in the wrong room. # @@ -626,8 +628,8 @@ async def _get_events_from_store_or_dest( # auth_events at an event in room A are actually events in room B bad_events = [ - (event_id, event.room_id) - for event_id, event in fetched_events.items() + (event.event_id, event.room_id) + for event in remote_state if event.room_id != room_id ] @@ -644,9 +646,10 @@ async def _get_events_from_store_or_dest( room_id, ) - del fetched_events[bad_event_id] + if bad_events: + remote_state = [e for e in remote_state if e.room_id == room_id] - return fetched_events + return remote_state async def _get_state_after_missing_prev_event( self, @@ -778,7 +781,7 @@ async def _process_received_pdu( origin: str, event: EventBase, state: Optional[Iterable[EventBase]], - ): + ) -> None: """Called when we have a new pdu. We need to do auth checks and put it through the StateHandler. @@ -794,7 +797,10 @@ async def _process_received_pdu( logger.debug("Processing event: %s", event) try: - await self._handle_new_event(origin, event, state=state) + context = await self.state_handler.compute_event_context( + event, old_state=state + ) + await self._auth_and_persist_event(origin, event, context, state=state) except AuthError as e: raise FederationError("ERROR", e.code, e.msg, affected=event.event_id) @@ -887,7 +893,9 @@ async def _resync_device(self, sender: str) -> None: logger.exception("Failed to resync device for %s", sender) @log_function - async def backfill(self, dest, room_id, limit, extremities): + async def backfill( + self, dest: str, room_id: str, limit: int, extremities: List[str] + ) -> List[EventBase]: """Trigger a backfill request to `dest` for the given `room_id` This will attempt to get more events from the remote. If the other side @@ -944,27 +952,23 @@ async def backfill(self, dest, room_id, limit, extremities): # For each edge get the current state. - auth_events = {} state_events = {} events_to_state = {} for e_id in edges: - state, auth = await self._get_state_for_room( + state = await self._get_state_for_room( destination=dest, room_id=room_id, event_id=e_id, ) - auth_events.update({a.event_id: a for a in auth}) - auth_events.update({s.event_id: s for s in state}) state_events.update({s.event_id: s for s in state}) events_to_state[e_id] = state required_auth = { a_id - for event in events - + list(state_events.values()) - + list(auth_events.values()) + for event in events + list(state_events.values()) for a_id in event.auth_event_ids() } + auth_events = await self.store.get_events(required_auth, allow_rejected=True) auth_events.update( {e_id: event_map[e_id] for e_id in required_auth if e_id in event_map} ) @@ -995,7 +999,9 @@ async def backfill(self, dest, room_id, limit, extremities): ) if ev_infos: - await self._handle_new_events(dest, room_id, ev_infos, backfilled=True) + await self._auth_and_persist_events( + dest, room_id, ev_infos, backfilled=True + ) # Step 2: Persist the rest of the events in the chunk one by one events.sort(key=lambda e: e.depth) @@ -1008,10 +1014,12 @@ async def backfill(self, dest, room_id, limit, extremities): # non-outliers assert not event.internal_metadata.is_outlier() + context = await self.state_handler.compute_event_context(event) + # We store these one at a time since each event depends on the # previous to work out the state. # TODO: We can probably do something more clever here. - await self._handle_new_event(dest, event, backfilled=True) + await self._auth_and_persist_event(dest, event, context, backfilled=True) return events @@ -1142,16 +1150,15 @@ async def maybe_backfill( curr_state = await self.state_handler.get_current_state(room_id) - def get_domains_from_state(state): + def get_domains_from_state(state: StateMap[EventBase]) -> List[Tuple[str, int]]: """Get joined domains from state Args: - state (dict[tuple, FrozenEvent]): State map from type/state - key to event. + state: State map from type/state key to event. Returns: - list[tuple[str, int]]: Returns a list of servers with the - lowest depth of their joins. Sorted by lowest depth first. + Returns a list of servers with the lowest depth of their joins. + Sorted by lowest depth first. """ joined_users = [ (state_key, int(event.depth)) @@ -1179,7 +1186,7 @@ def get_domains_from_state(state): domain for domain, depth in curr_domains if domain != self.server_name ] - async def try_backfill(domains): + async def try_backfill(domains: List[str]) -> bool: # TODO: Should we try multiple of these at a time? for dom in domains: try: @@ -1258,21 +1265,25 @@ async def try_backfill(domains): } for e_id, _ in sorted_extremeties_tuple: - likely_domains = get_domains_from_state(states[e_id]) + likely_extremeties_domains = get_domains_from_state(states[e_id]) success = await try_backfill( - [dom for dom, _ in likely_domains if dom not in tried_domains] + [ + dom + for dom, _ in likely_extremeties_domains + if dom not in tried_domains + ] ) if success: return True - tried_domains.update(dom for dom, _ in likely_domains) + tried_domains.update(dom for dom, _ in likely_extremeties_domains) return False async def _get_events_and_persist( self, destination: str, room_id: str, events: Iterable[str] - ): + ) -> None: """Fetch the given events from a server, and persist them as outliers. This function *does not* recursively get missing auth events of the @@ -1342,13 +1353,13 @@ async def get_event(event_id: str): event_infos.append(_NewEventInfo(event, None, auth)) - await self._handle_new_events( + await self._auth_and_persist_events( destination, room_id, event_infos, ) - def _sanity_check_event(self, ev): + def _sanity_check_event(self, ev: EventBase) -> None: """ Do some early sanity checks of a received event @@ -1357,9 +1368,7 @@ def _sanity_check_event(self, ev): or cascade of event fetches. Args: - ev (synapse.events.EventBase): event to be checked - - Returns: None + ev: event to be checked Raises: SynapseError if the event does not pass muster @@ -1380,7 +1389,7 @@ def _sanity_check_event(self, ev): ) raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many auth_events") - async def send_invite(self, target_host, event): + async def send_invite(self, target_host: str, event: EventBase) -> EventBase: """Sends the invite to the remote server for signing. Invites must be signed by the invitee's server before distribution. @@ -1528,12 +1537,13 @@ async def do_invite_join( run_in_background(self._handle_queued_pdus, room_queue) - async def _handle_queued_pdus(self, room_queue): + async def _handle_queued_pdus( + self, room_queue: List[Tuple[EventBase, str]] + ) -> None: """Process PDUs which got queued up while we were busy send_joining. Args: - room_queue (list[FrozenEvent, str]): list of PDUs to be processed - and the servers that sent them + room_queue: list of PDUs to be processed and the servers that sent them """ for p, origin in room_queue: try: @@ -1612,7 +1622,7 @@ async def on_make_join_request( return event - async def on_send_join_request(self, origin, pdu): + async def on_send_join_request(self, origin: str, pdu: EventBase) -> JsonDict: """We have received a join event for a room. Fully process it and respond with the current state and auth chains. """ @@ -1649,16 +1659,47 @@ async def on_send_join_request(self, origin, pdu): # would introduce the danger of backwards-compatibility problems. event.internal_metadata.send_on_behalf_of = origin - context = await self._handle_new_event(origin, event) + # Calculate the event context. + context = await self.state_handler.compute_event_context(event) + + # Get the state before the new event. + prev_state_ids = await context.get_prev_state_ids() + + # Check if the user is already in the room or invited to the room. + user_id = event.state_key + prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None) + newly_joined = True + user_is_invited = False + if prev_member_event_id: + prev_member_event = await self.store.get_event(prev_member_event_id) + newly_joined = prev_member_event.membership != Membership.JOIN + user_is_invited = prev_member_event.membership == Membership.INVITE + + # If the member is not already in the room, and not invited, check if + # they should be allowed access via membership in a space. + if ( + newly_joined + and not user_is_invited + and not await self._event_auth_handler.can_join_without_invite( + prev_state_ids, + event.room_version, + user_id, + ) + ): + raise AuthError( + 403, + "You do not belong to any of the required spaces to join this room.", + ) + + # Persist the event. + await self._auth_and_persist_event(origin, event, context) logger.debug( - "on_send_join_request: After _handle_new_event: %s, sigs: %s", + "on_send_join_request: After _auth_and_persist_event: %s, sigs: %s", event.event_id, event.signatures, ) - prev_state_ids = await context.get_prev_state_ids() - state_ids = list(prev_state_ids.values()) auth_chain = await self.store.get_auth_chain(event.room_id, state_ids) @@ -1668,7 +1709,7 @@ async def on_send_join_request(self, origin, pdu): async def on_invite_request( self, origin: str, event: EventBase, room_version: RoomVersion - ): + ) -> EventBase: """We've got an invite event. Process and persist it. Sign it. Respond with the now signed event. @@ -1711,7 +1752,7 @@ async def on_invite_request( member_handler = self.hs.get_room_member_handler() # We don't rate limit based on room ID, as that should be done by # sending server. - member_handler.ratelimit_invite(None, event.state_key) + await member_handler.ratelimit_invite(None, None, event.state_key) # keep a record of the room version, if we don't yet know it. # (this may get overwritten if we later get a different room version in a @@ -1772,7 +1813,7 @@ async def _make_and_verify_event( room_id: str, user_id: str, membership: str, - content: JsonDict = {}, + content: JsonDict, params: Optional[Dict[str, Union[str, Iterable[str]]]] = None, ) -> Tuple[str, EventBase, RoomVersion]: ( @@ -1841,7 +1882,7 @@ async def on_make_leave_request( return event - async def on_send_leave_request(self, origin, pdu): + async def on_send_leave_request(self, origin: str, pdu: EventBase) -> None: """ We have received a leave event for a room. Fully process it.""" event = pdu @@ -1861,10 +1902,11 @@ async def on_send_leave_request(self, origin, pdu): event.internal_metadata.outlier = False - await self._handle_new_event(origin, event) + context = await self.state_handler.compute_event_context(event) + await self._auth_and_persist_event(origin, event, context) logger.debug( - "on_send_leave_request: After _handle_new_event: %s, sigs: %s", + "on_send_leave_request: After _auth_and_persist_event: %s, sigs: %s", event.event_id, event.signatures, ) @@ -1969,14 +2011,47 @@ async def get_persisted_pdu( else: return None - async def get_min_depth_for_context(self, context): + async def get_min_depth_for_context(self, context: str) -> int: return await self.store.get_min_depth(context) - async def _handle_new_event( - self, origin, event, state=None, auth_events=None, backfilled=False - ): - context = await self._prep_event( - origin, event, state=state, auth_events=auth_events, backfilled=backfilled + async def _auth_and_persist_event( + self, + origin: str, + event: EventBase, + context: EventContext, + state: Optional[Iterable[EventBase]] = None, + auth_events: Optional[MutableStateMap[EventBase]] = None, + backfilled: bool = False, + ) -> None: + """ + Process an event by performing auth checks and then persisting to the database. + + Args: + origin: The host the event originates from. + event: The event itself. + context: + The event context. + + NB that this function potentially modifies it. + state: + The state events used to check the event for soft-fail. If this is + not provided the current state events will be used. + auth_events: + Map from (event_type, state_key) to event + + Normally, our calculated auth_events based on the state of the room + at the event's position in the DAG, though occasionally (eg if the + event is an outlier), may be the auth events claimed by the remote + server. + backfilled: True if the event was backfilled. + """ + context = await self._check_event_auth( + origin, + event, + context, + state=state, + auth_events=auth_events, + backfilled=backfilled, ) try: @@ -1998,9 +2073,7 @@ async def _handle_new_event( ) raise - return context - - async def _handle_new_events( + async def _auth_and_persist_events( self, origin: str, room_id: str, @@ -2018,9 +2091,13 @@ async def _handle_new_events( async def prep(ev_info: _NewEventInfo): event = ev_info.event with nested_logging_context(suffix=event.event_id): - res = await self._prep_event( + res = await self.state_handler.compute_event_context( + event, old_state=ev_info.state + ) + res = await self._check_event_auth( origin, event, + res, state=ev_info.state, auth_events=ev_info.auth_events, backfilled=backfilled, @@ -2155,49 +2232,6 @@ async def _persist_auth_tree( room_id, [(event, new_event_context)] ) - async def _prep_event( - self, - origin: str, - event: EventBase, - state: Optional[Iterable[EventBase]], - auth_events: Optional[MutableStateMap[EventBase]], - backfilled: bool, - ) -> EventContext: - context = await self.state_handler.compute_event_context(event, old_state=state) - - if not auth_events: - prev_state_ids = await context.get_prev_state_ids() - auth_events_ids = self.auth.compute_auth_events( - event, prev_state_ids, for_verification=True - ) - auth_events_x = await self.store.get_events(auth_events_ids) - auth_events = {(e.type, e.state_key): e for e in auth_events_x.values()} - - # This is a hack to fix some old rooms where the initial join event - # didn't reference the create event in its auth events. - if event.type == EventTypes.Member and not event.auth_event_ids(): - if len(event.prev_event_ids()) == 1 and event.depth < 5: - c = await self.store.get_event( - event.prev_event_ids()[0], allow_none=True - ) - if c and c.type == EventTypes.Create: - auth_events[(c.type, c.state_key)] = c - - context = await self.do_auth(origin, event, context, auth_events=auth_events) - - if not context.rejected: - await self._check_for_soft_fail(event, state, backfilled) - - if event.type == EventTypes.GuestAccess and not context.rejected: - await self.maybe_kick_guest_users(event) - - # If we are going to send this event over federation we precaclculate - # the joined hosts. - if event.internal_metadata.get_send_on_behalf_of(): - await self.event_creation_handler.cache_joined_hosts_for_event(event) - - return context - async def _check_for_soft_fail( self, event: EventBase, state: Optional[Iterable[EventBase]], backfilled: bool ) -> None: @@ -2280,40 +2314,14 @@ async def _check_for_soft_fail( logger.warning("Soft-failing %r because %s", event, e) event.internal_metadata.soft_failed = True - async def on_query_auth( - self, origin, event_id, room_id, remote_auth_chain, rejects, missing - ): - in_room = await self.auth.check_host_in_room(room_id, origin) - if not in_room: - raise AuthError(403, "Host not in room.") - - event = await self.store.get_event(event_id, check_room_id=room_id) - - # Just go through and process each event in `remote_auth_chain`. We - # don't want to fall into the trap of `missing` being wrong. - for e in remote_auth_chain: - try: - await self._handle_new_event(origin, e) - except AuthError: - pass - - # Now get the current auth_chain for the event. - local_auth_chain = await self.store.get_auth_chain( - room_id, list(event.auth_event_ids()), include_given=True - ) - - # TODO: Check if we would now reject event_id. If so we need to tell - # everyone. - - ret = await self.construct_auth_difference(local_auth_chain, remote_auth_chain) - - logger.debug("on_query_auth returning: %s", ret) - - return ret - async def on_get_missing_events( - self, origin, room_id, earliest_events, latest_events, limit - ): + self, + origin: str, + room_id: str, + earliest_events: List[str], + latest_events: List[str], + limit: int, + ) -> List[EventBase]: in_room = await self.auth.check_host_in_room(room_id, origin) if not in_room: raise AuthError(403, "Host not in room.") @@ -2334,19 +2342,28 @@ async def on_get_missing_events( return missing_events - async def do_auth( + async def _check_event_auth( self, origin: str, event: EventBase, context: EventContext, - auth_events: MutableStateMap[EventBase], + state: Optional[Iterable[EventBase]], + auth_events: Optional[MutableStateMap[EventBase]], + backfilled: bool, ) -> EventContext: """ + Checks whether an event should be rejected (for failing auth checks). Args: - origin: - event: + origin: The host the event originates from. + event: The event itself. context: + The event context. + + NB that this function potentially modifies it. + state: + The state events used to check the event for soft-fail. If this is + not provided the current state events will be used. auth_events: Map from (event_type, state_key) to event @@ -2356,12 +2373,34 @@ async def do_auth( server. Also NB that this function adds entries to it. + + If this is not provided, it is calculated from the previous state IDs. + backfilled: True if the event was backfilled. + Returns: - updated context object + The updated context object. """ room_version = await self.store.get_room_version_id(event.room_id) room_version_obj = KNOWN_ROOM_VERSIONS[room_version] + if not auth_events: + prev_state_ids = await context.get_prev_state_ids() + auth_events_ids = self.auth.compute_auth_events( + event, prev_state_ids, for_verification=True + ) + auth_events_x = await self.store.get_events(auth_events_ids) + auth_events = {(e.type, e.state_key): e for e in auth_events_x.values()} + + # This is a hack to fix some old rooms where the initial join event + # didn't reference the create event in its auth events. + if event.type == EventTypes.Member and not event.auth_event_ids(): + if len(event.prev_event_ids()) == 1 and event.depth < 5: + c = await self.store.get_event( + event.prev_event_ids()[0], allow_none=True + ) + if c and c.type == EventTypes.Create: + auth_events[(c.type, c.state_key)] = c + try: context = await self._update_auth_events_and_context_for_auth( origin, event, context, auth_events @@ -2383,6 +2422,19 @@ async def do_auth( logger.warning("Failed auth resolution for %r because %s", event, e) context.rejected = RejectedReason.AUTH_ERROR + if not context.rejected: + await self._check_for_soft_fail(event, state, backfilled) + + if event.type == EventTypes.GuestAccess and not context.rejected: + await self.maybe_kick_guest_users(event) + + # If we are going to send this event over federation we precaclculate + # the joined hosts. + if event.internal_metadata.get_send_on_behalf_of(): + await self.event_creation_handler.cache_joined_hosts_for_event( + event, context + ) + return context async def _update_auth_events_and_context_for_auth( @@ -2392,7 +2444,7 @@ async def _update_auth_events_and_context_for_auth( context: EventContext, auth_events: MutableStateMap[EventBase], ) -> EventContext: - """Helper for do_auth. See there for docs. + """Helper for _check_event_auth. See there for docs. Checks whether a given event has the expected auth events. If it doesn't then we talk to the remote server to compare state to see if @@ -2472,9 +2524,14 @@ async def _update_auth_events_and_context_for_auth( e.internal_metadata.outlier = True logger.debug( - "do_auth %s missing_auth: %s", event.event_id, e.event_id + "_check_event_auth %s missing_auth: %s", + event.event_id, + e.event_id, + ) + context = await self.state_handler.compute_event_context(e) + await self._auth_and_persist_event( + origin, e, context, auth_events=auth ) - await self._handle_new_event(origin, e, auth_events=auth) if e.event_id in event_auth_events: auth_events[(e.type, e.state_key)] = e @@ -2617,8 +2674,8 @@ async def construct_auth_difference( assumes that we have already processed all events in remote_auth Params: - local_auth (list) - remote_auth (list) + local_auth + remote_auth Returns: dict @@ -2742,8 +2799,8 @@ def get_next(it, opt=None): @log_function async def exchange_third_party_invite( - self, sender_user_id, target_user_id, room_id, signed - ): + self, sender_user_id: str, target_user_id: str, room_id: str, signed: JsonDict + ) -> None: third_party_invite = {"signed": signed} event_dict = { @@ -2835,8 +2892,12 @@ async def on_exchange_third_party_invite_request( await member_handler.send_membership_event(None, event, context) async def add_display_name_to_third_party_invite( - self, room_version, event_dict, event, context - ): + self, + room_version: str, + event_dict: JsonDict, + event: EventBase, + context: EventContext, + ) -> Tuple[EventBase, EventContext]: key = ( EventTypes.ThirdPartyInvite, event.content["third_party_invite"]["signed"]["token"], @@ -2872,13 +2933,13 @@ async def add_display_name_to_third_party_invite( EventValidator().validate_new(event, self.config) return (event, context) - async def _check_signature(self, event, context): + async def _check_signature(self, event: EventBase, context: EventContext) -> None: """ Checks that the signature in the event is consistent with its invite. Args: - event (Event): The m.room.member event to check - context (EventContext): + event: The m.room.member event to check + context: Raises: AuthError: if signature didn't match any keys, or key has been @@ -2908,7 +2969,7 @@ async def _check_signature(self, event, context): try: # for each sig on the third_party_invite block of the actual invite for server, signature_block in signed["signatures"].items(): - for key_name, encoded_signature in signature_block.items(): + for key_name in signature_block.keys(): if not key_name.startswith("ed25519:"): continue @@ -2964,13 +3025,13 @@ async def _check_signature(self, event, context): raise last_exception - async def _check_key_revocation(self, public_key, url): + async def _check_key_revocation(self, public_key: str, url: str) -> None: """ Checks whether public_key has been revoked. Args: - public_key (str): base-64 encoded public key. - url (str): Key revocation URL. + public_key: base-64 encoded public key. + url: Key revocation URL. Raises: AuthError: if they key has been revoked. diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index a41ca5df9c9c..157f2ff2189b 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 5f346f6d6d28..33d16fbf9c36 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2018 New Vector Ltd @@ -16,10 +15,9 @@ # limitations under the License. """Utilities for interacting with Identity Servers""" - import logging import urllib.parse -from typing import Awaitable, Callable, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Tuple from synapse.api.errors import ( CodeMessageException, @@ -35,17 +33,24 @@ from synapse.types import JsonDict, Requester from synapse.util import json_decoder from synapse.util.hash import sha256_and_url_safe_base64 -from synapse.util.stringutils import assert_valid_client_secret, random_string +from synapse.util.stringutils import ( + assert_valid_client_secret, + random_string, + valid_id_server_location, +) from ._base import BaseHandler +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) id_server_scheme = "https://" class IdentityHandler(BaseHandler): - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) # An HTTP client for contacting trusted URLs. @@ -61,22 +66,24 @@ def __init__(self, hs): # Ratelimiters for `/requestToken` endpoints. self._3pid_validation_ratelimiter_ip = Ratelimiter( + store=self.store, clock=hs.get_clock(), rate_hz=hs.config.ratelimiting.rc_3pid_validation.per_second, burst_count=hs.config.ratelimiting.rc_3pid_validation.burst_count, ) self._3pid_validation_ratelimiter_address = Ratelimiter( + store=self.store, clock=hs.get_clock(), rate_hz=hs.config.ratelimiting.rc_3pid_validation.per_second, burst_count=hs.config.ratelimiting.rc_3pid_validation.burst_count, ) - def ratelimit_request_token_requests( + async def ratelimit_request_token_requests( self, request: SynapseRequest, medium: str, address: str, - ): + ) -> None: """Used to ratelimit requests to `/requestToken` by IP and address. Args: @@ -85,8 +92,12 @@ def ratelimit_request_token_requests( address: The actual threepid ID, e.g. the phone number or email address """ - self._3pid_validation_ratelimiter_ip.ratelimit((medium, request.getClientIP())) - self._3pid_validation_ratelimiter_address.ratelimit((medium, address)) + await self._3pid_validation_ratelimiter_ip.ratelimit( + None, (medium, request.getClientIP()) + ) + await self._3pid_validation_ratelimiter_address.ratelimit( + None, (medium, address) + ) async def threepid_from_creds( self, id_server: str, creds: Dict[str, str] @@ -167,6 +178,11 @@ async def bind_threepid( server with, if necessary. Required if use_v2 is true use_v2: Whether to use v2 Identity Service API endpoints. Defaults to True + Raises: + SynapseError: On any of the following conditions + - the supplied id_server is not a valid identity server name + - we failed to contact the supplied identity server + Returns: The response from the identity server """ @@ -176,6 +192,12 @@ async def bind_threepid( if id_access_token is None: use_v2 = False + if not valid_id_server_location(id_server): + raise SynapseError( + 400, + "id_server must be a valid hostname with optional port and path components", + ) + # Decide which API endpoint URLs to use headers = {} bind_data = {"sid": sid, "client_secret": client_secret, "mxid": mxid} @@ -264,12 +286,21 @@ async def try_unbind_threepid_with_id_server( id_server: Identity server to unbind from Raises: - SynapseError: If we failed to contact the identity server + SynapseError: On any of the following conditions + - the supplied id_server is not a valid identity server name + - we failed to contact the supplied identity server Returns: True on success, otherwise False if the identity server doesn't support unbinding """ + + if not valid_id_server_location(id_server): + raise SynapseError( + 400, + "id_server must be a valid hostname with optional port and path components", + ) + url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,) url_bytes = "/_matrix/identity/api/v1/3pid/unbind".encode("ascii") diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 13f8152283f2..76242865ae28 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 1b7c065b34e1..9f365eb5ad5a 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. @@ -16,10 +15,11 @@ # limitations under the License. import logging import random -from typing import TYPE_CHECKING, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple from canonicaljson import encode_canonical_json +from twisted.internet import defer from twisted.internet.interfaces import IDelayedCall from synapse import event_auth @@ -44,14 +44,15 @@ from synapse.events.builder import EventBuilder from synapse.events.snapshot import EventContext from synapse.events.validator import EventValidator -from synapse.logging.context import run_in_background +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.send_event import ReplicationSendEventRestServlet from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.state import StateFilter from synapse.types import Requester, RoomAlias, StreamToken, UserID, create_requester -from synapse.util import json_decoder, json_encoder -from synapse.util.async_helpers import Linearizer +from synapse.util import json_decoder, json_encoder, log_failure +from synapse.util.async_helpers import Linearizer, unwrapFirstError +from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.metrics import measure_func from synapse.visibility import filter_events_for_client @@ -67,7 +68,7 @@ class MessageHandler: """Contains some read only APIs to get state about a room""" - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() self.clock = hs.get_clock() self.state = hs.get_state_handler() @@ -92,7 +93,7 @@ async def get_room_data( room_id: str, event_type: str, state_key: str, - ) -> dict: + ) -> Optional[EventBase]: """Get data from a room. Args: @@ -116,6 +117,10 @@ async def get_room_data( data = await self.state.get_current_state(room_id, event_type, state_key) elif membership == Membership.LEAVE: key = (event_type, state_key) + # If the membership is not JOIN, then the event ID should exist. + assert ( + membership_event_id is not None + ), "check_user_in_room_or_world_readable returned invalid data" room_state = await self.state_store.get_state_for_events( [membership_event_id], StateFilter.from_types([key]) ) @@ -137,7 +142,7 @@ async def get_state_events( self, user_id: str, room_id: str, - state_filter: StateFilter = StateFilter.all(), + state_filter: Optional[StateFilter] = None, at_token: Optional[StreamToken] = None, is_guest: bool = False, ) -> List[dict]: @@ -164,6 +169,8 @@ async def get_state_events( AuthError (403) if the user doesn't have permission to view members of this room. """ + state_filter = state_filter or StateFilter.all() + if at_token: # FIXME this claims to get the state at a stream position, but # get_recent_events_for_room operates by topo ordering. This therefore @@ -185,10 +192,12 @@ async def get_state_events( event = last_events[0] if visible_events: - room_state = await self.state_store.get_state_for_events( + room_state_events = await self.state_store.get_state_for_events( [event.event_id], state_filter=state_filter ) - room_state = room_state[event.event_id] + room_state = room_state_events[ + event.event_id + ] # type: Mapping[Any, EventBase] else: raise AuthError( 403, @@ -209,10 +218,14 @@ async def get_state_events( ) room_state = await self.store.get_events(state_ids.values()) elif membership == Membership.LEAVE: - room_state = await self.state_store.get_state_for_events( + # If the membership is not JOIN, then the event ID should exist. + assert ( + membership_event_id is not None + ), "check_user_in_room_or_world_readable returned invalid data" + room_state_events = await self.state_store.get_state_for_events( [membership_event_id], state_filter=state_filter ) - room_state = room_state[membership_event_id] + room_state = room_state_events[membership_event_id] now = self.clock.time_msec() events = await self._event_serializer.serialize_events( @@ -247,7 +260,7 @@ async def get_joined_members(self, requester: Requester, room_id: str) -> dict: "Getting joined members after leaving is not implemented" ) - users_with_profile = await self.state.get_current_users_in_room(room_id) + users_with_profile = await self.store.get_users_in_room_with_profiles(room_id) # If this is an AS, double check that they are allowed to see the members. # This can either be because the AS user is in the room or because there @@ -385,7 +398,7 @@ def __init__(self, hs: "HomeServer"): self._events_shard_config = self.config.worker.events_shard_config self._instance_name = hs.get_instance_name() - self.room_invite_state_types = self.hs.config.room_invite_state_types + self.room_invite_state_types = self.hs.config.api.room_prejoin_state self.membership_types_to_include_profile_data_in = ( {Membership.JOIN, Membership.INVITE} @@ -446,6 +459,19 @@ def __init__(self, hs: "HomeServer"): self._external_cache = hs.get_external_cache() + # Stores the state groups we've recently added to the joined hosts + # external cache. Note that the timeout must be significantly less than + # the TTL on the external cache. + self._external_cache_joined_hosts_updates = ( + None + ) # type: Optional[ExpiringCache] + if self._external_cache.is_enabled(): + self._external_cache_joined_hosts_updates = ExpiringCache( + "_external_cache_joined_hosts_updates", + self.clock, + expiry_ms=30 * 60 * 1000, + ) + async def create_event( self, requester: Requester, @@ -874,7 +900,7 @@ async def handle_new_client_event( event: EventBase, context: EventContext, ratelimit: bool = True, - extra_users: List[UserID] = [], + extra_users: Optional[List[UserID]] = None, ignore_shadow_ban: bool = False, ) -> EventBase: """Processes a new event. @@ -902,6 +928,7 @@ async def handle_new_client_event( Raises: ShadowBanError if the requester has been shadow-banned. """ + extra_users = extra_users or [] # we don't apply shadow-banning to membership events here. Invites are blocked # higher up the stack, and we allow shadow-banned users to send join and leave @@ -953,9 +980,43 @@ async def handle_new_client_event( logger.exception("Failed to encode content: %r", event.content) raise - await self.action_generator.handle_push_actions_for_event(event, context) + # We now persist the event (and update the cache in parallel, since we + # don't want to block on it). + result = await make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background( + self._persist_event, + requester=requester, + event=event, + context=context, + ratelimit=ratelimit, + extra_users=extra_users, + ), + run_in_background( + self.cache_joined_hosts_for_event, event, context + ).addErrback(log_failure, "cache_joined_hosts_for_event failed"), + ], + consumeErrors=True, + ) + ).addErrback(unwrapFirstError) - await self.cache_joined_hosts_for_event(event) + return result[0] + + async def _persist_event( + self, + requester: Requester, + event: EventBase, + context: EventContext, + ratelimit: bool = True, + extra_users: Optional[List[UserID]] = None, + ) -> EventBase: + """Actually persists the event. Should only be called by + `handle_new_client_event`, and see its docstring for documentation of + the arguments. + """ + + await self.action_generator.handle_push_actions_for_event(event, context) try: # If we're a worker we need to hit out to the master. @@ -996,7 +1057,9 @@ async def handle_new_client_event( await self.store.remove_push_actions_from_staging(event.event_id) raise - async def cache_joined_hosts_for_event(self, event: EventBase) -> None: + async def cache_joined_hosts_for_event( + self, event: EventBase, context: EventContext + ) -> None: """Precalculate the joined hosts at the event, when using Redis, so that external federation senders don't have to recalculate it themselves. """ @@ -1004,6 +1067,9 @@ async def cache_joined_hosts_for_event(self, event: EventBase) -> None: if not self._external_cache.is_enabled(): return + # If external cache is enabled we should always have this. + assert self._external_cache_joined_hosts_updates is not None + # We actually store two mappings, event ID -> prev state group, # state group -> joined hosts, which is much more space efficient # than event ID -> joined hosts. @@ -1011,22 +1077,28 @@ async def cache_joined_hosts_for_event(self, event: EventBase) -> None: # Note: We have to cache event ID -> prev state group, as we don't # store that in the DB. # - # Note: We always set the state group -> joined hosts cache, even if - # we already set it, so that the expiry time is reset. + # Note: We set the state group -> joined hosts cache if it hasn't been + # set for a while, so that the expiry time is reset. state_entry = await self.state.resolve_state_groups_for_events( event.room_id, event_ids=event.prev_event_ids() ) if state_entry.state_group: - joined_hosts = await self.store.get_joined_hosts(event.room_id, state_entry) - await self._external_cache.set( "event_to_prev_state_group", event.event_id, state_entry.state_group, expiry_ms=60 * 60 * 1000, ) + + if state_entry.state_group in self._external_cache_joined_hosts_updates: + return + + joined_hosts = await self.store.get_joined_hosts(event.room_id, state_entry) + + # Note that the expiry times must be larger than the expiry time in + # _external_cache_joined_hosts_updates. await self._external_cache.set( "get_joined_hosts", str(state_entry.state_group), @@ -1034,6 +1106,8 @@ async def cache_joined_hosts_for_event(self, event: EventBase) -> None: expiry_ms=60 * 60 * 1000, ) + self._external_cache_joined_hosts_updates[state_entry.state_group] = None + async def _validate_canonical_alias( self, directory_handler, room_alias_str: str, expected_room_id: str ) -> None: @@ -1071,7 +1145,7 @@ async def persist_and_notify_client_event( event: EventBase, context: EventContext, ratelimit: bool = True, - extra_users: List[UserID] = [], + extra_users: Optional[List[UserID]] = None, ) -> EventBase: """Called when we have fully built the event, have already calculated the push actions for the event, and checked auth. @@ -1083,6 +1157,8 @@ async def persist_and_notify_client_event( it was de-duplicated (e.g. because we had already persisted an event with the same transaction ID.) """ + extra_users = extra_users or [] + assert self.storage.persistence is not None assert self._events_shard_config.should_handle( self._instance_name, event.room_id diff --git a/synapse/handlers/oidc_handler.py b/synapse/handlers/oidc.py similarity index 98% rename from synapse/handlers/oidc_handler.py rename to synapse/handlers/oidc.py index 6624212d6ff2..ee6e41c0e4d9 100644 --- a/synapse/handlers/oidc_handler.py +++ b/synapse/handlers/oidc.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Quentin Gliech # Copyright 2021 The Matrix.org Foundation C.I.C. # @@ -16,7 +15,7 @@ import inspect import logging from typing import TYPE_CHECKING, Dict, Generic, List, Optional, TypeVar, Union -from urllib.parse import urlencode +from urllib.parse import urlencode, urlparse import attr import pymacaroons @@ -38,10 +37,7 @@ from twisted.web.http_headers import Headers from synapse.config import ConfigError -from synapse.config.oidc_config import ( - OidcProviderClientSecretJwtKey, - OidcProviderConfig, -) +from synapse.config.oidc import OidcProviderClientSecretJwtKey, OidcProviderConfig from synapse.handlers.sso import MappingException, UserAttributes from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable @@ -72,8 +68,8 @@ # # Here we have the names of the cookies, and the options we use to set them. _SESSION_COOKIES = [ - (b"oidc_session", b"Path=/_synapse/client/oidc; HttpOnly; Secure; SameSite=None"), - (b"oidc_session_no_samesite", b"Path=/_synapse/client/oidc; HttpOnly"), + (b"oidc_session", b"HttpOnly; Secure; SameSite=None"), + (b"oidc_session_no_samesite", b"HttpOnly"), ] #: A token exchanged from the token endpoint, as per RFC6749 sec 5.1. and @@ -283,6 +279,13 @@ def __init__( self._config = provider self._callback_url = hs.config.oidc_callback_url # type: str + # Calculate the prefix for OIDC callback paths based on the public_baseurl. + # We'll insert this into the Path= parameter of any session cookies we set. + public_baseurl_path = urlparse(hs.config.server.public_baseurl).path + self._callback_path_prefix = ( + public_baseurl_path.encode("utf-8") + b"_synapse/client/oidc" + ) + self._oidc_attribute_requirements = provider.attribute_requirements self._scopes = provider.scopes self._user_profile_method = provider.user_profile_method @@ -783,8 +786,13 @@ async def handle_redirect_request( for cookie_name, options in _SESSION_COOKIES: request.cookies.append( - b"%s=%s; Max-Age=3600; %s" - % (cookie_name, cookie.encode("utf-8"), options) + b"%s=%s; Max-Age=3600; Path=%s; %s" + % ( + cookie_name, + cookie.encode("utf-8"), + self._callback_path_prefix, + options, + ) ) metadata = await self.load_metadata() @@ -961,6 +969,11 @@ async def grandfather_existing_users() -> Optional[str]: # and attempt to match it. attributes = await oidc_response_to_user_attributes(failures=0) + if attributes.localpart is None: + # If no localpart is returned then we will generate one, so + # there is no need to search for existing users. + return None + user_id = UserID(attributes.localpart, self._server_name).to_string() users = await self._store.get_users_by_id_case_insensitive(user_id) if users: diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 66dc886c8100..1e1186c29e7d 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # Copyright 2017 - 2018 New Vector Ltd # diff --git a/synapse/handlers/password_policy.py b/synapse/handlers/password_policy.py index 92cefa11aadc..cd21efdcc642 100644 --- a/synapse/handlers/password_policy.py +++ b/synapse/handlers/password_policy.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index da92feacc9eb..6fd1f34289f8 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # @@ -23,9 +22,23 @@ - should_notify """ import abc +import contextlib import logging +from bisect import bisect from contextlib import contextmanager -from typing import TYPE_CHECKING, Dict, Iterable, List, Set, Tuple +from typing import ( + TYPE_CHECKING, + Callable, + Collection, + Dict, + FrozenSet, + Iterable, + List, + Optional, + Set, + Tuple, + Union, +) from prometheus_client import Counter from typing_extensions import ContextManager @@ -34,15 +47,22 @@ from synapse.api.constants import EventTypes, Membership, PresenceState from synapse.api.errors import SynapseError from synapse.api.presence import UserPresenceState +from synapse.events.presence_router import PresenceRouter from synapse.logging.context import run_in_background from synapse.logging.utils import log_function from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.state import StateHandler +from synapse.replication.http.presence import ( + ReplicationBumpPresenceActiveTime, + ReplicationPresenceSetState, +) +from synapse.replication.http.streams import ReplicationGetStreamUpdates +from synapse.replication.tcp.commands import ClearUserSyncsCommand +from synapse.replication.tcp.streams import PresenceFederationStream, PresenceStream from synapse.storage.databases.main import DataStore -from synapse.types import Collection, JsonDict, UserID, get_domain_from_id +from synapse.types import JsonDict, UserID, get_domain_from_id from synapse.util.async_helpers import Linearizer -from synapse.util.caches.descriptors import cached +from synapse.util.caches.descriptors import _CacheContext, cached from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer @@ -94,15 +114,29 @@ # are dead. EXTERNAL_PROCESS_EXPIRY = 5 * 60 * 1000 +# Delay before a worker tells the presence handler that a user has stopped +# syncing. +UPDATE_SYNCING_USERS_MS = 10 * 1000 + assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER class BasePresenceHandler(abc.ABC): - """Parts of the PresenceHandler that are shared between workers and master""" + """Parts of the PresenceHandler that are shared between workers and presence + writer""" def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() self.store = hs.get_datastore() + self.presence_router = hs.get_presence_router() + self.state = hs.get_state_handler() + self.is_mine_id = hs.is_mine_id + + self._federation = None + if hs.should_send_federation(): + self._federation = hs.get_federation_sender() + + self._federation_queue = PresenceFederationQueue(hs, self) self._busy_presence_enabled = hs.config.experimental.msc3026_enabled @@ -198,17 +232,306 @@ async def bump_presence_active_time(self, user: UserID): with the app. """ + async def update_external_syncs_row( + self, process_id: str, user_id: str, is_syncing: bool, sync_time_msec: int + ) -> None: + """Update the syncing users for an external process as a delta. + + This is a no-op when presence is handled by a different worker. + + Args: + process_id: An identifier for the process the users are + syncing against. This allows synapse to process updates + as user start and stop syncing against a given process. + user_id: The user who has started or stopped syncing + is_syncing: Whether or not the user is now syncing + sync_time_msec: Time in ms when the user was last syncing + """ + pass + + async def update_external_syncs_clear(self, process_id: str) -> None: + """Marks all users that had been marked as syncing by a given process + as offline. + + Used when the process has stopped/disappeared. + + This is a no-op when presence is handled by a different worker. + """ + pass + + async def process_replication_rows( + self, stream_name: str, instance_name: str, token: int, rows: list + ): + """Process streams received over replication.""" + await self._federation_queue.process_replication_rows( + stream_name, instance_name, token, rows + ) + + def get_federation_queue(self) -> "PresenceFederationQueue": + """Get the presence federation queue.""" + return self._federation_queue + + async def maybe_send_presence_to_interested_destinations( + self, states: List[UserPresenceState] + ): + """If this instance is a federation sender, send the states to all + destinations that are interested. Filters out any states for remote + users. + """ + + if not self._federation: + return + + states = [s for s in states if self.is_mine_id(s.user_id)] + + if not states: + return + + hosts_and_states = await get_interested_remotes( + self.store, + self.presence_router, + states, + ) + + for destinations, states in hosts_and_states: + self._federation.send_presence_to_destinations(states, destinations) + + +class _NullContextManager(ContextManager[None]): + """A context manager which does nothing.""" + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + + +class WorkerPresenceHandler(BasePresenceHandler): + def __init__(self, hs: "HomeServer"): + super().__init__(hs) + self.hs = hs + + self._presence_writer_instance = hs.config.worker.writers.presence[0] + + self._presence_enabled = hs.config.use_presence + + # Route presence EDUs to the right worker + hs.get_federation_registry().register_instances_for_edu( + "m.presence", + hs.config.worker.writers.presence, + ) + + # The number of ongoing syncs on this process, by user id. + # Empty if _presence_enabled is false. + self._user_to_num_current_syncs = {} # type: Dict[str, int] + + self.notifier = hs.get_notifier() + self.instance_id = hs.get_instance_id() + + # user_id -> last_sync_ms. Lists the users that have stopped syncing but + # we haven't notified the presence writer of that yet + self.users_going_offline = {} # type: Dict[str, int] + + self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs) + self._set_state_client = ReplicationPresenceSetState.make_client(hs) + + self._send_stop_syncing_loop = self.clock.looping_call( + self.send_stop_syncing, UPDATE_SYNCING_USERS_MS + ) + + self._busy_presence_enabled = hs.config.experimental.msc3026_enabled + + hs.get_reactor().addSystemEventTrigger( + "before", + "shutdown", + run_as_background_process, + "generic_presence.on_shutdown", + self._on_shutdown, + ) + + def _on_shutdown(self) -> None: + if self._presence_enabled: + self.hs.get_tcp_replication().send_command( + ClearUserSyncsCommand(self.instance_id) + ) + + def send_user_sync(self, user_id: str, is_syncing: bool, last_sync_ms: int) -> None: + if self._presence_enabled: + self.hs.get_tcp_replication().send_user_sync( + self.instance_id, user_id, is_syncing, last_sync_ms + ) + + def mark_as_coming_online(self, user_id: str) -> None: + """A user has started syncing. Send a UserSync to the presence writer, + unless they had recently stopped syncing. + """ + going_offline = self.users_going_offline.pop(user_id, None) + if not going_offline: + # Safe to skip because we haven't yet told the presence writer they + # were offline + self.send_user_sync(user_id, True, self.clock.time_msec()) + + def mark_as_going_offline(self, user_id: str) -> None: + """A user has stopped syncing. We wait before notifying the presence + writer as its likely they'll come back soon. This allows us to avoid + sending a stopped syncing immediately followed by a started syncing + notification to the presence writer + """ + self.users_going_offline[user_id] = self.clock.time_msec() + + def send_stop_syncing(self) -> None: + """Check if there are any users who have stopped syncing a while ago and + haven't come back yet. If there are poke the presence writer about them. + """ + now = self.clock.time_msec() + for user_id, last_sync_ms in list(self.users_going_offline.items()): + if now - last_sync_ms > UPDATE_SYNCING_USERS_MS: + self.users_going_offline.pop(user_id, None) + self.send_user_sync(user_id, False, last_sync_ms) + + async def user_syncing( + self, user_id: str, affect_presence: bool + ) -> ContextManager[None]: + """Record that a user is syncing. + + Called by the sync and events servlets to record that a user has connected to + this worker and is waiting for some events. + """ + if not affect_presence or not self._presence_enabled: + return _NullContextManager() + + curr_sync = self._user_to_num_current_syncs.get(user_id, 0) + self._user_to_num_current_syncs[user_id] = curr_sync + 1 + + # If we went from no in flight sync to some, notify replication + if self._user_to_num_current_syncs[user_id] == 1: + self.mark_as_coming_online(user_id) + + def _end(): + # We check that the user_id is in user_to_num_current_syncs because + # user_to_num_current_syncs may have been cleared if we are + # shutting down. + if user_id in self._user_to_num_current_syncs: + self._user_to_num_current_syncs[user_id] -= 1 + + # If we went from one in flight sync to non, notify replication + if self._user_to_num_current_syncs[user_id] == 0: + self.mark_as_going_offline(user_id) + + @contextlib.contextmanager + def _user_syncing(): + try: + yield + finally: + _end() + + return _user_syncing() + + async def notify_from_replication( + self, states: List[UserPresenceState], stream_id: int + ) -> None: + parties = await get_interested_parties(self.store, self.presence_router, states) + room_ids_to_states, users_to_states = parties + + self.notifier.on_new_event( + "presence_key", + stream_id, + rooms=room_ids_to_states.keys(), + users=users_to_states.keys(), + ) + + # If this is a federation sender, notify about presence updates. + await self.maybe_send_presence_to_interested_destinations(states) + + async def process_replication_rows( + self, stream_name: str, instance_name: str, token: int, rows: list + ): + await super().process_replication_rows(stream_name, instance_name, token, rows) + + if stream_name != PresenceStream.NAME: + return + + states = [ + UserPresenceState( + row.user_id, + row.state, + row.last_active_ts, + row.last_federation_update_ts, + row.last_user_sync_ts, + row.status_msg, + row.currently_active, + ) + for row in rows + ] + + for state in states: + self.user_to_current_state[state.user_id] = state + + stream_id = token + await self.notify_from_replication(states, stream_id) + + def get_currently_syncing_users_for_replication(self) -> Iterable[str]: + return [ + user_id + for user_id, count in self._user_to_num_current_syncs.items() + if count > 0 + ] + + async def set_state( + self, + target_user: UserID, + state: JsonDict, + ignore_status_msg: bool = False, + ) -> None: + """Set the presence state of the user.""" + presence = state["presence"] + + valid_presence = ( + PresenceState.ONLINE, + PresenceState.UNAVAILABLE, + PresenceState.OFFLINE, + PresenceState.BUSY, + ) + + if presence not in valid_presence or ( + presence == PresenceState.BUSY and not self._busy_presence_enabled + ): + raise SynapseError(400, "Invalid presence state") + + user_id = target_user.to_string() + + # If presence is disabled, no-op + if not self.hs.config.use_presence: + return + + # Proxy request to instance that writes presence + await self._set_state_client( + instance_name=self._presence_writer_instance, + user_id=user_id, + state=state, + ignore_status_msg=ignore_status_msg, + ) + + async def bump_presence_active_time(self, user: UserID) -> None: + """We've seen the user do something that indicates they're interacting + with the app. + """ + # If presence is disabled, no-op + if not self.hs.config.use_presence: + return + + # Proxy request to instance that writes presence + user_id = user.to_string() + await self._bump_active_client( + instance_name=self._presence_writer_instance, user_id=user_id + ) + class PresenceHandler(BasePresenceHandler): def __init__(self, hs: "HomeServer"): super().__init__(hs) self.hs = hs - self.is_mine_id = hs.is_mine_id self.server_name = hs.hostname self.wheel_timer = WheelTimer() self.notifier = hs.get_notifier() - self.federation = hs.get_federation_sender() - self.state = hs.get_state_handler() self._presence_enabled = hs.config.use_presence federation_registry = hs.get_federation_registry() @@ -271,8 +594,8 @@ def __init__(self, hs: "HomeServer"): # we assume that all the sync requests on that process have stopped. # Stored as a dict from process_id to set of user_id, and a dict of # process_id to millisecond timestamp last updated. - self.external_process_to_current_syncs = {} # type: Dict[int, Set[str]] - self.external_process_last_updated_ms = {} # type: Dict[int, int] + self.external_process_to_current_syncs = {} # type: Dict[str, Set[str]] + self.external_process_last_updated_ms = {} # type: Dict[str, int] self.external_sync_linearizer = Linearizer(name="external_sync_linearizer") @@ -312,7 +635,7 @@ def run_persister(): self._event_pos = self.store.get_current_events_token() self._event_processing = False - async def _on_shutdown(self): + async def _on_shutdown(self) -> None: """Gets called when shutting down. This lets us persist any updates that we haven't yet persisted, e.g. updates that only changes some internal timers. This allows changes to persist across startup without having to @@ -341,7 +664,7 @@ async def _on_shutdown(self): ) logger.info("Finished _on_shutdown") - async def _persist_unpersisted_changes(self): + async def _persist_unpersisted_changes(self) -> None: """We periodically persist the unpersisted changes, as otherwise they may stack up and slow down shutdown times. """ @@ -415,6 +738,13 @@ async def _update_states(self, new_states: Iterable[UserPresenceState]) -> None: self.unpersisted_users_changes |= {s.user_id for s in new_states} self.unpersisted_users_changes -= set(to_notify.keys()) + # Check if we need to resend any presence states to remote hosts. We + # only do this for states that haven't been updated in a while to + # ensure that the remote host doesn't time the presence state out. + # + # Note that since these are states that have *not* been updated, + # they won't get sent down the normal presence replication stream, + # and so we have to explicitly send them via the federation stream. to_federation_ping = { user_id: state for user_id, state in to_federation_ping.items() @@ -423,9 +753,18 @@ async def _update_states(self, new_states: Iterable[UserPresenceState]) -> None: if to_federation_ping: federation_presence_out_counter.inc(len(to_federation_ping)) - self._push_to_remotes(to_federation_ping.values()) + hosts_and_states = await get_interested_remotes( + self.store, + self.presence_router, + list(to_federation_ping.values()), + ) - async def _handle_timeouts(self): + for destinations, states in hosts_and_states: + self._federation_queue.send_presence_to_destinations( + states, destinations + ) + + async def _handle_timeouts(self) -> None: """Checks the presence of users that have timed out and updates as appropriate. """ @@ -477,7 +816,7 @@ async def _handle_timeouts(self): return await self._update_states(changes) - async def bump_presence_active_time(self, user): + async def bump_presence_active_time(self, user: UserID) -> None: """We've seen the user do something that indicates they're interacting with the app. """ @@ -574,17 +913,17 @@ def get_currently_syncing_users_for_replication(self) -> Iterable[str]: return [] async def update_external_syncs_row( - self, process_id, user_id, is_syncing, sync_time_msec - ): + self, process_id: str, user_id: str, is_syncing: bool, sync_time_msec: int + ) -> None: """Update the syncing users for an external process as a delta. Args: - process_id (str): An identifier for the process the users are + process_id: An identifier for the process the users are syncing against. This allows synapse to process updates as user start and stop syncing against a given process. - user_id (str): The user who has started or stopped syncing - is_syncing (bool): Whether or not the user is now syncing - sync_time_msec(int): Time in ms when the user was last syncing + user_id: The user who has started or stopped syncing + is_syncing: Whether or not the user is now syncing + sync_time_msec: Time in ms when the user was last syncing """ with (await self.external_sync_linearizer.queue(process_id)): prev_state = await self.current_state_for_user(user_id) @@ -621,7 +960,7 @@ async def update_external_syncs_row( self.external_process_last_updated_ms[process_id] = self.clock.time_msec() - async def update_external_syncs_clear(self, process_id): + async def update_external_syncs_clear(self, process_id: str) -> None: """Marks all users that had been marked as syncing by a given process as offline. @@ -642,18 +981,18 @@ async def update_external_syncs_clear(self, process_id): ) self.external_process_last_updated_ms.pop(process_id, None) - async def current_state_for_user(self, user_id): + async def current_state_for_user(self, user_id: str) -> UserPresenceState: """Get the current presence state for a user.""" res = await self.current_state_for_users([user_id]) return res[user_id] - async def _persist_and_notify(self, states): + async def _persist_and_notify(self, states: List[UserPresenceState]) -> None: """Persist states in the database, poke the notifier and send to interested remote servers """ stream_id, max_token = await self.store.update_presence(states) - parties = await get_interested_parties(self.store, states) + parties = await get_interested_parties(self.store, self.presence_router, states) room_ids_to_states, users_to_states = parties self.notifier.on_new_event( @@ -663,17 +1002,12 @@ async def _persist_and_notify(self, states): users=[UserID.from_string(u) for u in users_to_states], ) - self._push_to_remotes(states) - - def _push_to_remotes(self, states): - """Sends state updates to remote servers. - - Args: - states (list(UserPresenceState)) - """ - self.federation.send_presence(states) + # We only want to poke the local federation sender, if any, as other + # workers will receive the presence updates via the presence replication + # stream (which is updated by `store.update_presence`). + await self.maybe_send_presence_to_interested_destinations(states) - async def incoming_presence(self, origin, content): + async def incoming_presence(self, origin: str, content: JsonDict) -> None: """Called when we receive a `m.presence` EDU from a remote server.""" if not self._presence_enabled: return @@ -723,7 +1057,9 @@ async def incoming_presence(self, origin, content): federation_presence_counter.inc(len(updates)) await self._update_states(updates) - async def set_state(self, target_user, state, ignore_status_msg=False): + async def set_state( + self, target_user: UserID, state: JsonDict, ignore_status_msg: bool = False + ) -> None: """Set the presence state of the user.""" status_msg = state.get("status_msg", None) presence = state["presence"] @@ -757,7 +1093,7 @@ async def set_state(self, target_user, state, ignore_status_msg=False): await self._update_states([prev_state.copy_and_replace(**new_fields)]) - async def is_visible(self, observed_user, observer_user): + async def is_visible(self, observed_user: UserID, observer_user: UserID) -> bool: """Returns whether a user can see another user's presence.""" observer_room_ids = await self.store.get_rooms_for_user( observer_user.to_string() @@ -812,7 +1148,7 @@ async def get_all_presence_updates( ) return rows - def notify_new_event(self): + def notify_new_event(self) -> None: """Called when new events have happened. Handles users and servers joining rooms and require being sent presence. """ @@ -831,7 +1167,7 @@ async def _process_presence(): run_as_background_process("presence.notify_new_event", _process_presence) - async def _unsafe_process(self): + async def _unsafe_process(self) -> None: # Loop round handling deltas until we're up to date while True: with Measure(self.clock, "presence_delta"): @@ -847,7 +1183,16 @@ async def _unsafe_process(self): max_pos, deltas = await self.store.get_current_state_deltas( self._event_pos, room_max_stream_ordering ) - await self._handle_state_delta(deltas) + + # We may get multiple deltas for different rooms, but we want to + # handle them on a room by room basis, so we batch them up by + # room. + deltas_by_room: Dict[str, List[JsonDict]] = {} + for delta in deltas: + deltas_by_room.setdefault(delta["room_id"], []).append(delta) + + for room_id, deltas_for_room in deltas_by_room.items(): + await self._handle_state_delta(room_id, deltas_for_room) self._event_pos = max_pos @@ -856,17 +1201,21 @@ async def _unsafe_process(self): max_pos ) - async def _handle_state_delta(self, deltas): - """Process current state deltas to find new joins that need to be - handled. + async def _handle_state_delta(self, room_id: str, deltas: List[JsonDict]) -> None: + """Process current state deltas for the room to find new joins that need + to be handled. """ - # A map of destination to a set of user state that they should receive - presence_destinations = {} # type: Dict[str, Set[UserPresenceState]] + + # Sets of newly joined users. Note that if the local server is + # joining a remote room for the first time we'll see both the joining + # user and all remote users as newly joined. + newly_joined_users = set() for delta in deltas: + assert room_id == delta["room_id"] + typ = delta["type"] state_key = delta["state_key"] - room_id = delta["room_id"] event_id = delta["event_id"] prev_event_id = delta["prev_event_id"] @@ -895,72 +1244,55 @@ async def _handle_state_delta(self, deltas): # Ignore changes to join events. continue - # Retrieve any user presence state updates that need to be sent as a result, - # and the destinations that need to receive it - destinations, user_presence_states = await self._on_user_joined_room( - room_id, state_key - ) - - # Insert the destinations and respective updates into our destinations dict - for destination in destinations: - presence_destinations.setdefault(destination, set()).update( - user_presence_states - ) - - # Send out user presence updates for each destination - for destination, user_state_set in presence_destinations.items(): - self.federation.send_presence_to_destinations( - destinations=[destination], states=user_state_set - ) - - async def _on_user_joined_room( - self, room_id: str, user_id: str - ) -> Tuple[List[str], List[UserPresenceState]]: - """Called when we detect a user joining the room via the current state - delta stream. Returns the destinations that need to be updated and the - presence updates to send to them. + newly_joined_users.add(state_key) - Args: - room_id: The ID of the room that the user has joined. - user_id: The ID of the user that has joined the room. - - Returns: - A tuple of destinations and presence updates to send to them. - """ - if self.is_mine_id(user_id): - # If this is a local user then we need to send their presence - # out to hosts in the room (who don't already have it) - - # TODO: We should be able to filter the hosts down to those that - # haven't previously seen the user - - remote_hosts = await self.state.get_current_hosts_in_room(room_id) - - # Filter out ourselves. - filtered_remote_hosts = [ - host for host in remote_hosts if host != self.server_name - ] - - state = await self.current_state_for_user(user_id) - return filtered_remote_hosts, [state] - else: - # A remote user has joined the room, so we need to: - # 1. Check if this is a new server in the room - # 2. If so send any presence they don't already have for - # local users in the room. - - # TODO: We should be able to filter the users down to those that - # the server hasn't previously seen - - # TODO: Check that this is actually a new server joining the - # room. - - remote_host = get_domain_from_id(user_id) + if not newly_joined_users: + # If nobody has joined then there's nothing to do. + return - users = await self.state.get_current_users_in_room(room_id) - user_ids = list(filter(self.is_mine_id, users)) + # We want to send: + # 1. presence states of all local users in the room to newly joined + # remote servers + # 2. presence states of newly joined users to all remote servers in + # the room. + # + # TODO: Only send presence states to remote hosts that don't already + # have them (because they already share rooms). + + # Get all the users who were already in the room, by fetching the + # current users in the room and removing the newly joined users. + users = await self.store.get_users_in_room(room_id) + prev_users = set(users) - newly_joined_users + + # Construct sets for all the local users and remote hosts that were + # already in the room + prev_local_users = [] + prev_remote_hosts = set() + for user_id in prev_users: + if self.is_mine_id(user_id): + prev_local_users.append(user_id) + else: + prev_remote_hosts.add(get_domain_from_id(user_id)) + + # Similarly, construct sets for all the local users and remote hosts + # that were *not* already in the room. Care needs to be taken with the + # calculating the remote hosts, as a host may have already been in the + # room even if there is a newly joined user from that host. + newly_joined_local_users = [] + newly_joined_remote_hosts = set() + for user_id in newly_joined_users: + if self.is_mine_id(user_id): + newly_joined_local_users.append(user_id) + else: + host = get_domain_from_id(user_id) + if host not in prev_remote_hosts: + newly_joined_remote_hosts.add(host) - states_d = await self.current_state_for_users(user_ids) + # Send presence states of all local users in the room to newly joined + # remote servers. (We actually only send states for local users already + # in the room, as we'll send states for newly joined local users below.) + if prev_local_users and newly_joined_remote_hosts: + local_states = await self.current_state_for_users(prev_local_users) # Filter out old presence, i.e. offline presence states where # the user hasn't been active for a week. We can change this @@ -970,16 +1302,30 @@ async def _on_user_joined_room( now = self.clock.time_msec() states = [ state - for state in states_d.values() + for state in local_states.values() if state.state != PresenceState.OFFLINE or now - state.last_active_ts < 7 * 24 * 60 * 60 * 1000 or state.status_msg is not None ] - return [remote_host], states + self._federation_queue.send_presence_to_destinations( + destinations=newly_joined_remote_hosts, + states=states, + ) + + # Send presence states of newly joined users to all remote servers in + # the room + if newly_joined_local_users and ( + prev_remote_hosts or newly_joined_remote_hosts + ): + local_states = await self.current_state_for_users(newly_joined_local_users) + self._federation_queue.send_presence_to_destinations( + destinations=prev_remote_hosts | newly_joined_remote_hosts, + states=list(local_states.values()), + ) -def should_notify(old_state, new_state): +def should_notify(old_state: UserPresenceState, new_state: UserPresenceState) -> bool: """Decides if a presence state change should be sent to interested parties.""" if old_state == new_state: return False @@ -1015,7 +1361,9 @@ def should_notify(old_state, new_state): return False -def format_user_presence_state(state, now, include_user_id=True): +def format_user_presence_state( + state: UserPresenceState, now: int, include_user_id: bool = True +) -> JsonDict: """Convert UserPresenceState to a format that can be sent down to clients and to other servers. @@ -1041,21 +1389,25 @@ def __init__(self, hs: "HomeServer"): # # Presence -> Notifier -> PresenceEventSource -> Presence # + # Same with get_module_api, get_presence_router + # + # AuthHandler -> Notifier -> PresenceEventSource -> ModuleApi -> AuthHandler self.get_presence_handler = hs.get_presence_handler + self.get_module_api = hs.get_module_api + self.get_presence_router = hs.get_presence_router self.clock = hs.get_clock() self.store = hs.get_datastore() - self.state = hs.get_state_handler() @log_function async def get_new_events( self, - user, - from_key, - room_ids=None, - include_offline=True, - explicit_room_id=None, - **kwargs - ): + user: UserID, + from_key: Optional[int], + room_ids: Optional[List[str]] = None, + include_offline: bool = True, + explicit_room_id: Optional[str] = None, + **kwargs, + ) -> Tuple[List[UserPresenceState], int]: # The process for getting presence events are: # 1. Get the rooms the user is in. # 2. Get the list of user in the rooms. @@ -1068,7 +1420,17 @@ async def get_new_events( # We don't try and limit the presence updates by the current token, as # sending down the rare duplicate is not a concern. + user_id = user.to_string() + stream_change_cache = self.store.presence_stream_cache + with Measure(self.clock, "presence.get_new_events"): + if user_id in self.get_module_api()._send_full_presence_to_local_users: + # This user has been specified by a module to receive all current, online + # user presence. Removing from_key and setting include_offline to false + # will do effectively this. + from_key = None + include_offline = False + if from_key is not None: from_key = int(from_key) @@ -1091,59 +1453,209 @@ async def get_new_events( # doesn't return. C.f. #5503. return [], max_token - presence = self.get_presence_handler() - stream_change_cache = self.store.presence_stream_cache - + # Figure out which other users this user should receive updates for users_interested_in = await self._get_interested_in(user, explicit_room_id) - user_ids_changed = set() # type: Collection[str] - changed = None - if from_key: - changed = stream_change_cache.get_all_entities_changed(from_key) + # We have a set of users that we're interested in the presence of. We want to + # cross-reference that with the users that have actually changed their presence. - if changed is not None and len(changed) < 500: - assert isinstance(user_ids_changed, set) + # Check whether this user should see all user updates - # For small deltas, its quicker to get all changes and then - # work out if we share a room or they're in our presence list - get_updates_counter.labels("stream").inc() - for other_user_id in changed: - if other_user_id in users_interested_in: - user_ids_changed.add(other_user_id) - else: - # Too many possible updates. Find all users we can see and check - # if any of them have changed. - get_updates_counter.labels("full").inc() + if users_interested_in == PresenceRouter.ALL_USERS: + # Provide presence state for all users + presence_updates = await self._filter_all_presence_updates_for_user( + user_id, include_offline, from_key + ) - if from_key: - user_ids_changed = stream_change_cache.get_entities_changed( - users_interested_in, from_key + # Remove the user from the list of users to receive all presence + if user_id in self.get_module_api()._send_full_presence_to_local_users: + self.get_module_api()._send_full_presence_to_local_users.remove( + user_id ) + + return presence_updates, max_token + + # Make mypy happy. users_interested_in should now be a set + assert not isinstance(users_interested_in, str) + + # The set of users that we're interested in and that have had a presence update. + # We'll actually pull the presence updates for these users at the end. + interested_and_updated_users = ( + set() + ) # type: Union[Set[str], FrozenSet[str]] + + if from_key: + # First get all users that have had a presence update + updated_users = stream_change_cache.get_all_entities_changed(from_key) + + # Cross-reference users we're interested in with those that have had updates. + # Use a slightly-optimised method for processing smaller sets of updates. + if updated_users is not None and len(updated_users) < 500: + # For small deltas, it's quicker to get all changes and then + # cross-reference with the users we're interested in + get_updates_counter.labels("stream").inc() + for other_user_id in updated_users: + if other_user_id in users_interested_in: + # mypy thinks this variable could be a FrozenSet as it's possibly set + # to one in the `get_entities_changed` call below, and `add()` is not + # method on a FrozenSet. That doesn't affect us here though, as + # `interested_and_updated_users` is clearly a set() above. + interested_and_updated_users.add(other_user_id) # type: ignore else: - user_ids_changed = users_interested_in + # Too many possible updates. Find all users we can see and check + # if any of them have changed. + get_updates_counter.labels("full").inc() + + interested_and_updated_users = ( + stream_change_cache.get_entities_changed( + users_interested_in, from_key + ) + ) + else: + # No from_key has been specified. Return the presence for all users + # this user is interested in + interested_and_updated_users = users_interested_in + + # Retrieve the current presence state for each user + users_to_state = await self.get_presence_handler().current_state_for_users( + interested_and_updated_users + ) + presence_updates = list(users_to_state.values()) + + # Remove the user from the list of users to receive all presence + if user_id in self.get_module_api()._send_full_presence_to_local_users: + self.get_module_api()._send_full_presence_to_local_users.remove(user_id) + + if not include_offline: + # Filter out offline presence states + presence_updates = self._filter_offline_presence_state(presence_updates) - updates = await presence.current_state_for_users(user_ids_changed) + return presence_updates, max_token - if include_offline: - return (list(updates.values()), max_token) + async def _filter_all_presence_updates_for_user( + self, + user_id: str, + include_offline: bool, + from_key: Optional[int] = None, + ) -> List[UserPresenceState]: + """ + Computes the presence updates a user should receive. + + First pulls presence updates from the database. Then consults PresenceRouter + for whether any updates should be excluded by user ID. + + Args: + user_id: The User ID of the user to compute presence updates for. + include_offline: Whether to include offline presence states from the results. + from_key: The minimum stream ID of updates to pull from the database + before filtering. + + Returns: + A list of presence states for the given user to receive. + """ + if from_key: + # Only return updates since the last sync + updated_users = self.store.presence_stream_cache.get_all_entities_changed( + from_key + ) + if not updated_users: + updated_users = [] + + # Get the actual presence update for each change + users_to_state = await self.get_presence_handler().current_state_for_users( + updated_users + ) + presence_updates = list(users_to_state.values()) + + if not include_offline: + # Filter out offline states + presence_updates = self._filter_offline_presence_state(presence_updates) else: - return ( - [s for s in updates.values() if s.state != PresenceState.OFFLINE], - max_token, + users_to_state = await self.store.get_presence_for_all_users( + include_offline=include_offline ) - def get_current_key(self): + presence_updates = list(users_to_state.values()) + + # TODO: This feels wildly inefficient, and it's unfortunate we need to ask the + # module for information on a number of users when we then only take the info + # for a single user + + # Filter through the presence router + users_to_state_set = await self.get_presence_router().get_users_for_states( + presence_updates + ) + + # We only want the mapping for the syncing user + presence_updates = list(users_to_state_set[user_id]) + + # Return presence information for all users + return presence_updates + + def _filter_offline_presence_state( + self, presence_updates: Iterable[UserPresenceState] + ) -> List[UserPresenceState]: + """Given an iterable containing user presence updates, return a list with any offline + presence states removed. + + Args: + presence_updates: Presence states to filter + + Returns: + A new list with any offline presence states removed. + """ + return [ + update + for update in presence_updates + if update.state != PresenceState.OFFLINE + ] + + def get_current_key(self) -> int: return self.store.get_current_presence_token() @cached(num_args=2, cache_context=True) - async def _get_interested_in(self, user, explicit_room_id, cache_context): + async def _get_interested_in( + self, + user: UserID, + explicit_room_id: Optional[str] = None, + cache_context: Optional[_CacheContext] = None, + ) -> Union[Set[str], str]: """Returns the set of users that the given user should see presence - updates for + updates for. + + Args: + user: The user to retrieve presence updates for. + explicit_room_id: The users that are in the room will be returned. + + Returns: + A set of user IDs to return presence updates for, or "ALL" to return all + known updates. """ user_id = user.to_string() users_interested_in = set() users_interested_in.add(user_id) # So that we receive our own presence + # cache_context isn't likely to ever be None due to the @cached decorator, + # but we can't have a non-optional argument after the optional argument + # explicit_room_id either. Assert cache_context is not None so we can use it + # without mypy complaining. + assert cache_context + + # Check with the presence router whether we should poll additional users for + # their presence information + additional_users = await self.get_presence_router().get_interested_users( + user.to_string() + ) + if additional_users == PresenceRouter.ALL_USERS: + # If the module requested that this user see the presence updates of *all* + # users, then simply return that instead of calculating what rooms this + # user shares + return PresenceRouter.ALL_USERS + + # Add the additional users from the router + users_interested_in.update(additional_users) + + # Find the users who share a room with this user users_who_share_room = await self.store.get_users_who_share_room_with_user( user_id, on_invalidate=cache_context.invalidate ) @@ -1158,15 +1670,20 @@ async def _get_interested_in(self, user, explicit_room_id, cache_context): return users_interested_in -def handle_timeouts(user_states, is_mine_fn, syncing_user_ids, now): +def handle_timeouts( + user_states: List[UserPresenceState], + is_mine_fn: Callable[[str], bool], + syncing_user_ids: Set[str], + now: int, +) -> List[UserPresenceState]: """Checks the presence of users that have timed out and updates as appropriate. Args: - user_states(list): List of UserPresenceState's to check. - is_mine_fn (fn): Function that returns if a user_id is ours - syncing_user_ids (set): Set of user_ids with active syncs. - now (int): Current time in ms. + user_states: List of UserPresenceState's to check. + is_mine_fn: Function that returns if a user_id is ours + syncing_user_ids: Set of user_ids with active syncs. + now: Current time in ms. Returns: List of UserPresenceState updates @@ -1183,14 +1700,16 @@ def handle_timeouts(user_states, is_mine_fn, syncing_user_ids, now): return list(changes.values()) -def handle_timeout(state, is_mine, syncing_user_ids, now): +def handle_timeout( + state: UserPresenceState, is_mine: bool, syncing_user_ids: Set[str], now: int +) -> Optional[UserPresenceState]: """Checks the presence of the user to see if any of the timers have elapsed Args: - state (UserPresenceState) - is_mine (bool): Whether the user is ours - syncing_user_ids (set): Set of user_ids with active syncs. - now (int): Current time in ms. + state + is_mine: Whether the user is ours + syncing_user_ids: Set of user_ids with active syncs. + now: Current time in ms. Returns: A UserPresenceState update or None if no update. @@ -1242,23 +1761,29 @@ def handle_timeout(state, is_mine, syncing_user_ids, now): return state if changed else None -def handle_update(prev_state, new_state, is_mine, wheel_timer, now): +def handle_update( + prev_state: UserPresenceState, + new_state: UserPresenceState, + is_mine: bool, + wheel_timer: WheelTimer, + now: int, +) -> Tuple[UserPresenceState, bool, bool]: """Given a presence update: 1. Add any appropriate timers. 2. Check if we should notify anyone. Args: - prev_state (UserPresenceState) - new_state (UserPresenceState) - is_mine (bool): Whether the user is ours - wheel_timer (WheelTimer) - now (int): Time now in ms + prev_state + new_state + is_mine: Whether the user is ours + wheel_timer + now: Time now in ms Returns: 3-tuple: `(new_state, persist_and_notify, federation_ping)` where: - new_state: is the state to actually persist - - persist_and_notify (bool): whether to persist and notify people - - federation_ping (bool): whether we should send a ping over federation + - persist_and_notify: whether to persist and notify people + - federation_ping: whether we should send a ping over federation """ user_id = new_state.user_id @@ -1314,14 +1839,15 @@ def handle_update(prev_state, new_state, is_mine, wheel_timer, now): async def get_interested_parties( - store: DataStore, states: List[UserPresenceState] + store: DataStore, presence_router: PresenceRouter, states: List[UserPresenceState] ) -> Tuple[Dict[str, List[UserPresenceState]], Dict[str, List[UserPresenceState]]]: """Given a list of states return which entities (rooms, users) are interested in the given states. Args: - store - states + store: The homeserver's data store. + presence_router: A module for augmenting the destinations for presence updates. + states: A list of incoming user presence updates. Returns: A 2-tuple of `(room_ids_to_states, users_to_states)`, @@ -1337,11 +1863,21 @@ async def get_interested_parties( # Always notify self users_to_states.setdefault(state.user_id, []).append(state) + # Ask a presence routing module for any additional parties if one + # is loaded. + router_users_to_states = await presence_router.get_users_for_states(states) + + # Update the dictionaries with additional destinations and state to send + for user_id, user_states in router_users_to_states.items(): + users_to_states.setdefault(user_id, []).extend(user_states) + return room_ids_to_states, users_to_states async def get_interested_remotes( - store: DataStore, states: List[UserPresenceState], state_handler: StateHandler + store: DataStore, + presence_router: PresenceRouter, + states: List[UserPresenceState], ) -> List[Tuple[Collection[str], List[UserPresenceState]]]: """Given a list of presence states figure out which remote servers should be sent which. @@ -1349,9 +1885,9 @@ async def get_interested_remotes( All the presence states should be for local users only. Args: - store - states - state_handler + store: The homeserver's data store. + presence_router: A module for augmenting the destinations for presence updates. + states: A list of incoming user presence updates. Returns: A list of 2-tuples of destinations and states, where for @@ -1363,10 +1899,13 @@ async def get_interested_remotes( # First we look up the rooms each user is in (as well as any explicit # subscriptions), then for each distinct room we look up the remote # hosts in those rooms. - room_ids_to_states, users_to_states = await get_interested_parties(store, states) + room_ids_to_states, users_to_states = await get_interested_parties( + store, presence_router, states + ) for room_id, states in room_ids_to_states.items(): - hosts = await state_handler.get_current_hosts_in_room(room_id) + user_ids = await store.get_users_in_room(room_id) + hosts = {get_domain_from_id(user_id) for user_id in user_ids} hosts_and_states.append((hosts, states)) for user_id, states in users_to_states.items(): @@ -1374,3 +1913,220 @@ async def get_interested_remotes( hosts_and_states.append(([host], states)) return hosts_and_states + + +class PresenceFederationQueue: + """Handles sending ad hoc presence updates over federation, which are *not* + due to state updates (that get handled via the presence stream), e.g. + federation pings and sending existing present states to newly joined hosts. + + Only the last N minutes will be queued, so if a federation sender instance + is down for longer then some updates will be dropped. This is OK as presence + is ephemeral, and so it will self correct eventually. + + On workers the class tracks the last received position of the stream from + replication, and handles querying for missed updates over HTTP replication, + c.f. `get_current_token` and `get_replication_rows`. + """ + + # How long to keep entries in the queue for. Workers that are down for + # longer than this duration will miss out on older updates. + _KEEP_ITEMS_IN_QUEUE_FOR_MS = 5 * 60 * 1000 + + # How often to check if we can expire entries from the queue. + _CLEAR_ITEMS_EVERY_MS = 60 * 1000 + + def __init__(self, hs: "HomeServer", presence_handler: BasePresenceHandler): + self._clock = hs.get_clock() + self._notifier = hs.get_notifier() + self._instance_name = hs.get_instance_name() + self._presence_handler = presence_handler + self._repl_client = ReplicationGetStreamUpdates.make_client(hs) + + # Should we keep a queue of recent presence updates? We only bother if + # another process may be handling federation sending. + self._queue_presence_updates = True + + # Whether this instance is a presence writer. + self._presence_writer = self._instance_name in hs.config.worker.writers.presence + + # The FederationSender instance, if this process sends federation traffic directly. + self._federation = None + + if hs.should_send_federation(): + self._federation = hs.get_federation_sender() + + # We don't bother queuing up presence states if only this instance + # is sending federation. + if hs.config.worker.federation_shard_config.instances == [ + self._instance_name + ]: + self._queue_presence_updates = False + + # The queue of recently queued updates as tuples of: `(timestamp, + # stream_id, destinations, user_ids)`. We don't store the full states + # for efficiency, and remote workers will already have the full states + # cached. + self._queue = [] # type: List[Tuple[int, int, Collection[str], Set[str]]] + + self._next_id = 1 + + # Map from instance name to current token + self._current_tokens = {} # type: Dict[str, int] + + if self._queue_presence_updates: + self._clock.looping_call(self._clear_queue, self._CLEAR_ITEMS_EVERY_MS) + + def _clear_queue(self): + """Clear out older entries from the queue.""" + clear_before = self._clock.time_msec() - self._KEEP_ITEMS_IN_QUEUE_FOR_MS + + # The queue is sorted by timestamp, so we can bisect to find the right + # place to purge before. Note that we are searching using a 1-tuple with + # the time, which does The Right Thing since the queue is a tuple where + # the first item is a timestamp. + index = bisect(self._queue, (clear_before,)) + self._queue = self._queue[index:] + + def send_presence_to_destinations( + self, states: Collection[UserPresenceState], destinations: Collection[str] + ) -> None: + """Send the presence states to the given destinations. + + Will forward to the local federation sender (if there is one) and queue + to send over replication (if there are other federation sender instances.). + + Must only be called on the presence writer process. + """ + + # This should only be called on a presence writer. + assert self._presence_writer + + if self._federation: + self._federation.send_presence_to_destinations( + states=states, + destinations=destinations, + ) + + if not self._queue_presence_updates: + return + + now = self._clock.time_msec() + + stream_id = self._next_id + self._next_id += 1 + + self._queue.append((now, stream_id, destinations, {s.user_id for s in states})) + + self._notifier.notify_replication() + + def get_current_token(self, instance_name: str) -> int: + """Get the current position of the stream. + + On workers this returns the last stream ID received from replication. + """ + if instance_name == self._instance_name: + return self._next_id - 1 + else: + return self._current_tokens.get(instance_name, 0) + + async def get_replication_rows( + self, + instance_name: str, + from_token: int, + upto_token: int, + target_row_count: int, + ) -> Tuple[List[Tuple[int, Tuple[str, str]]], int, bool]: + """Get all the updates between the two tokens. + + We return rows in the form of `(destination, user_id)` to keep the size + of each row bounded (rather than returning the sets in a row). + + On workers this will query the presence writer process via HTTP replication. + """ + if instance_name != self._instance_name: + # If not local we query over http replication from the presence + # writer + result = await self._repl_client( + instance_name=instance_name, + stream_name=PresenceFederationStream.NAME, + from_token=from_token, + upto_token=upto_token, + ) + return result["updates"], result["upto_token"], result["limited"] + + # If the from_token is the current token then there's nothing to return + # and we can trivially no-op. + if from_token == self._next_id - 1: + return [], upto_token, False + + # We can find the correct position in the queue by noting that there is + # exactly one entry per stream ID, and that the last entry has an ID of + # `self._next_id - 1`, so we can count backwards from the end. + # + # Since we are returning all states in the range `from_token < stream_id + # <= upto_token` we look for the index with a `stream_id` of `from_token + # + 1`. + # + # Since the start of the queue is periodically truncated we need to + # handle the case where `from_token` stream ID has already been dropped. + start_idx = max(from_token + 1 - self._next_id, -len(self._queue)) + + to_send = [] # type: List[Tuple[int, Tuple[str, str]]] + limited = False + new_id = upto_token + for _, stream_id, destinations, user_ids in self._queue[start_idx:]: + if stream_id <= from_token: + # Paranoia check that we are actually only sending states that + # are have stream_id strictly greater than from_token. We should + # never hit this. + logger.warning( + "Tried returning presence federation stream ID: %d less than from_token: %d (next_id: %d, len: %d)", + stream_id, + from_token, + self._next_id, + len(self._queue), + ) + continue + + if stream_id > upto_token: + break + + new_id = stream_id + + to_send.extend( + (stream_id, (destination, user_id)) + for destination in destinations + for user_id in user_ids + ) + + if len(to_send) > target_row_count: + limited = True + break + + return to_send, new_id, limited + + async def process_replication_rows( + self, stream_name: str, instance_name: str, token: int, rows: list + ): + if stream_name != PresenceFederationStream.NAME: + return + + # We keep track of the current tokens (so that we can catch up with anything we missed after a disconnect) + self._current_tokens[instance_name] = token + + # If we're a federation sender we pull out the presence states to send + # and forward them on. + if not self._federation: + return + + hosts_to_users = {} # type: Dict[str, Set[str]] + for row in rows: + hosts_to_users.setdefault(row.destination, set()).add(row.user_id) + + for host, user_ids in hosts_to_users.items(): + states = await self._presence_handler.current_state_for_users(user_ids) + self._federation.send_presence_to_destinations( + states=states.values(), + destinations=[host], + ) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index a755363c3f44..05b4a97b590b 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/read_marker.py b/synapse/handlers/read_marker.py index a54fe1968e3f..c679a8303ed4 100644 --- a/synapse/handlers/read_marker.py +++ b/synapse/handlers/read_marker.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index dbfe9bfacadc..f782d9db3205 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 0fc2bf15d520..4ceef3fab3d9 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -169,7 +168,7 @@ async def register_user( user_type: Optional[str] = None, default_display_name: Optional[str] = None, address: Optional[str] = None, - bind_emails: Iterable[str] = [], + bind_emails: Optional[Iterable[str]] = None, by_admin: bool = False, user_agent_ips: Optional[List[Tuple[str, str]]] = None, auth_provider_id: Optional[str] = None, @@ -204,7 +203,9 @@ async def register_user( Raises: SynapseError if there was a problem registering. """ - self.check_registration_ratelimit(address) + bind_emails = bind_emails or [] + + await self.check_registration_ratelimit(address) result = await self.spam_checker.check_registration_for_spam( threepid, @@ -583,7 +584,7 @@ def check_user_id_not_appservice_exclusive( errcode=Codes.EXCLUSIVE, ) - def check_registration_ratelimit(self, address: Optional[str]) -> None: + async def check_registration_ratelimit(self, address: Optional[str]) -> None: """A simple helper method to check whether the registration rate limit has been hit for a given IP address @@ -597,7 +598,7 @@ def check_registration_ratelimit(self, address: Optional[str]) -> None: if not address: return - self.ratelimiter.ratelimit(address) + await self.ratelimiter.ratelimit(None, address) async def register_with_store( self, @@ -721,9 +722,7 @@ class and RegisterDeviceReplicationServlet. ) if is_guest: assert valid_until_ms is None - access_token = self.macaroon_gen.generate_access_token( - user_id, ["guest = true"] - ) + access_token = self.macaroon_gen.generate_guest_access_token(user_id) else: access_token = await self._auth_handler.get_access_token_for_user_id( user_id, diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 4b3d0d72e387..835d874ceedb 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # Copyright 2018-2019 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. @@ -33,7 +32,14 @@ RoomCreationPreset, RoomEncryptionAlgorithms, ) -from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError +from synapse.api.errors import ( + AuthError, + Codes, + LimitExceededError, + NotFoundError, + StoreError, + SynapseError, +) from synapse.api.filtering import Filter from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion from synapse.events import EventBase @@ -127,10 +133,6 @@ def __init__(self, hs: "HomeServer"): self.third_party_event_rules = hs.get_third_party_event_rules() - self._invite_burst_count = ( - hs.config.ratelimiting.rc_invites_per_room.burst_count - ) - async def upgrade_room( self, requester: Requester, old_room_id: str, new_version: RoomVersion ) -> str: @@ -677,8 +679,18 @@ async def create_room( invite_3pid_list = [] invite_list = [] - if len(invite_list) + len(invite_3pid_list) > self._invite_burst_count: - raise SynapseError(400, "Cannot invite so many users at once") + if invite_list or invite_3pid_list: + try: + # If there are invites in the request, see if the ratelimiting settings + # allow that number of invites to be sent from the current user. + await self.room_member_handler.ratelimit_multiple_invites( + requester, + room_id=None, + n_invites=len(invite_list) + len(invite_3pid_list), + update=False, + ) + except LimitExceededError: + raise SynapseError(400, "Cannot invite so many users at once") await self.event_creation_handler.assert_accepted_privacy_policy(requester) @@ -1328,7 +1340,7 @@ async def shutdown_room( new_room_id = None logger.info("Shutting down room %r", room_id) - users = await self.state.get_current_users_in_room(room_id) + users = await self.store.get_users_in_room(room_id) kicked_users = [] failed_to_kick_users = [] for user_id in users: diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 924b81db7c1d..141c9c044400 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 4d20ed835764..9a092da71597 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016-2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -64,6 +63,7 @@ def __init__(self, hs: "HomeServer"): self.profile_handler = hs.get_profile_handler() self.event_creation_handler = hs.get_event_creation_handler() self.account_data_handler = hs.get_account_data_handler() + self.event_auth_handler = hs.get_event_auth_handler() self.member_linearizer = Linearizer(name="member") @@ -75,22 +75,26 @@ def __init__(self, hs: "HomeServer"): self.allow_per_room_profiles = self.config.allow_per_room_profiles self._join_rate_limiter_local = Ratelimiter( + store=self.store, clock=self.clock, rate_hz=hs.config.ratelimiting.rc_joins_local.per_second, burst_count=hs.config.ratelimiting.rc_joins_local.burst_count, ) self._join_rate_limiter_remote = Ratelimiter( + store=self.store, clock=self.clock, rate_hz=hs.config.ratelimiting.rc_joins_remote.per_second, burst_count=hs.config.ratelimiting.rc_joins_remote.burst_count, ) self._invites_per_room_limiter = Ratelimiter( + store=self.store, clock=self.clock, rate_hz=hs.config.ratelimiting.rc_invites_per_room.per_second, burst_count=hs.config.ratelimiting.rc_invites_per_room.burst_count, ) self._invites_per_user_limiter = Ratelimiter( + store=self.store, clock=self.clock, rate_hz=hs.config.ratelimiting.rc_invites_per_user.per_second, burst_count=hs.config.ratelimiting.rc_invites_per_user.burst_count, @@ -159,15 +163,45 @@ async def _user_left_room(self, target: UserID, room_id: str) -> None: async def forget(self, user: UserID, room_id: str) -> None: raise NotImplementedError() - def ratelimit_invite(self, room_id: Optional[str], invitee_user_id: str): + async def ratelimit_multiple_invites( + self, + requester: Optional[Requester], + room_id: Optional[str], + n_invites: int, + update: bool = True, + ): + """Ratelimit more than one invite sent by the given requester in the given room. + + Args: + requester: The requester sending the invites. + room_id: The room the invites are being sent in. + n_invites: The amount of invites to ratelimit for. + update: Whether to update the ratelimiter's cache. + + Raises: + LimitExceededError: The requester can't send that many invites in the room. + """ + await self._invites_per_room_limiter.ratelimit( + requester, + room_id, + update=update, + n_actions=n_invites, + ) + + async def ratelimit_invite( + self, + requester: Optional[Requester], + room_id: Optional[str], + invitee_user_id: str, + ): """Ratelimit invites by room and by target user. If room ID is missing then we just rate limit by target user. """ if room_id: - self._invites_per_room_limiter.ratelimit(room_id) + await self._invites_per_room_limiter.ratelimit(requester, room_id) - self._invites_per_user_limiter.ratelimit(invitee_user_id) + await self._invites_per_user_limiter.ratelimit(requester, invitee_user_id) async def _local_membership_update( self, @@ -226,9 +260,25 @@ async def _local_membership_update( if event.membership == Membership.JOIN: newly_joined = True + user_is_invited = False if prev_member_event_id: prev_member_event = await self.store.get_event(prev_member_event_id) newly_joined = prev_member_event.membership != Membership.JOIN + user_is_invited = prev_member_event.membership == Membership.INVITE + + # If the member is not already in the room and is not accepting an invite, + # check if they should be allowed access via membership in a space. + if ( + newly_joined + and not user_is_invited + and not await self.event_auth_handler.can_join_without_invite( + prev_state_ids, event.room_version, user_id + ) + ): + raise AuthError( + 403, + "You do not belong to any of the required spaces to join this room.", + ) # Only rate-limit if the user actually joined the room, otherwise we'll end # up blocking profile updates. @@ -237,7 +287,7 @@ async def _local_membership_update( ( allowed, time_allowed, - ) = self._join_rate_limiter_local.can_requester_do_action(requester) + ) = await self._join_rate_limiter_local.can_do_action(requester) if not allowed: raise LimitExceededError( @@ -421,9 +471,7 @@ async def update_membership_locked( if effective_membership_state == Membership.INVITE: target_id = target.to_string() if ratelimit: - # Don't ratelimit application services. - if not requester.app_service or requester.app_service.is_rate_limited(): - self.ratelimit_invite(room_id, target_id) + await self.ratelimit_invite(requester, room_id, target_id) # block any attempts to invite the server notices mxid if target_id == self._server_notices_mxid: @@ -534,7 +582,7 @@ async def update_membership_locked( ( allowed, time_allowed, - ) = self._join_rate_limiter_remote.can_requester_do_action( + ) = await self._join_rate_limiter_remote.can_do_action( requester, ) @@ -1021,7 +1069,7 @@ async def _is_server_notice_room(self, room_id: str) -> bool: class RoomMemberMasterHandler(RoomMemberHandler): - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) self.distributor = hs.get_distributor() diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py index 3a90fc0c16d3..3e89dd2315f8 100644 --- a/synapse/handlers/room_member_worker.py +++ b/synapse/handlers/room_member_worker.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml.py similarity index 99% rename from synapse/handlers/saml_handler.py rename to synapse/handlers/saml.py index ec2ba11c7584..80ba65b9e019 100644 --- a/synapse/handlers/saml_handler.py +++ b/synapse/handlers/saml.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index d742dfbd5333..4e718d3f633b 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py index f98a338ec5f5..a63fac828342 100644 --- a/synapse/handlers/set_password.py +++ b/synapse/handlers/set_password.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/space_summary.py b/synapse/handlers/space_summary.py index 5d9418969d84..e35d91832b42 100644 --- a/synapse/handlers/space_summary.py +++ b/synapse/handlers/space_summary.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,6 +14,7 @@ import itertools import logging +import re from collections import deque from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence, Set, Tuple, cast @@ -227,6 +227,23 @@ async def _summarize_local_room( suggested_only: bool, max_children: Optional[int], ) -> Tuple[Sequence[JsonDict], Sequence[JsonDict]]: + """ + Generate a room entry and a list of event entries for a given room. + + Args: + requester: The requesting user, or None if this is over federation. + room_id: The room ID to summarize. + suggested_only: True if only suggested children should be returned. + Otherwise, all children are returned. + max_children: The maximum number of children to return for this node. + + Returns: + A tuple of: + An iterable of a single value of the room. + + An iterable of the sorted children events. This may be limited + to a maximum size or may include all children. + """ if not await self._is_room_accessible(room_id, requester): return (), () @@ -289,6 +306,7 @@ async def _summarize_remote_room( ev.data for ev in res.events if ev.event_type == EventTypes.MSC1772_SPACE_CHILD + or ev.event_type == EventTypes.SpaceChild ) async def _is_room_accessible(self, room_id: str, requester: Optional[str]) -> bool: @@ -332,7 +350,9 @@ async def _build_room_entry(self, room_id: str) -> JsonDict: ) # TODO: update once MSC1772 lands - room_type = create_event.content.get(EventContentFields.MSC1772_ROOM_TYPE) + room_type = create_event.content.get(EventContentFields.ROOM_TYPE) + if not room_type: + room_type = create_event.content.get(EventContentFields.MSC1772_ROOM_TYPE) entry = { "room_id": stats["room_id"], @@ -345,6 +365,7 @@ async def _build_room_entry(self, room_id: str) -> JsonDict: stats["history_visibility"] == HistoryVisibility.WORLD_READABLE ), "guest_can_join": stats["guest_access"] == "can_join", + "creation_ts": create_event.origin_server_ts, "room_type": room_type, } @@ -354,6 +375,18 @@ async def _build_room_entry(self, room_id: str) -> JsonDict: return room_entry async def _get_child_events(self, room_id: str) -> Iterable[EventBase]: + """ + Get the child events for a given room. + + The returned results are sorted for stability. + + Args: + room_id: The room id to get the children of. + + Returns: + An iterable of sorted child events. + """ + # look for child rooms/spaces. current_state_ids = await self._store.get_current_state_ids(room_id) @@ -361,13 +394,15 @@ async def _get_child_events(self, room_id: str) -> Iterable[EventBase]: [ event_id for key, event_id in current_state_ids.items() - # TODO: update once MSC1772 lands + # TODO: update once MSC1772 has been FCP for a period of time. if key[0] == EventTypes.MSC1772_SPACE_CHILD + or key[0] == EventTypes.SpaceChild ] ) - # filter out any events without a "via" (which implies it has been redacted) - return (e for e in events if _has_valid_via(e)) + # filter out any events without a "via" (which implies it has been redacted), + # and order to ensure we return stable results. + return sorted(filter(_has_valid_via, events), key=_child_events_comparison_key) @attr.s(frozen=True, slots=True) @@ -393,3 +428,39 @@ def _is_suggested_child_event(edge_event: EventBase) -> bool: return True logger.debug("Ignorning not-suggested child %s", edge_event.state_key) return False + + +# Order may only contain characters in the range of \x20 (space) to \x7F (~). +_INVALID_ORDER_CHARS_RE = re.compile(r"[^\x20-\x7F]") + + +def _child_events_comparison_key(child: EventBase) -> Tuple[bool, Optional[str], str]: + """ + Generate a value for comparing two child events for ordering. + + The rules for ordering are supposed to be: + + 1. The 'order' key, if it is valid. + 2. The 'origin_server_ts' of the 'm.room.create' event. + 3. The 'room_id'. + + But we skip step 2 since we may not have any state from the room. + + Args: + child: The event for generating a comparison key. + + Returns: + The comparison key as a tuple of: + False if the ordering is valid. + The ordering field. + The room ID. + """ + order = child.content.get("order") + # If order is not a string or doesn't meet the requirements, ignore it. + if not isinstance(order, str): + order = None + elif len(order) > 50 or _INVALID_ORDER_CHARS_RE.search(order): + order = None + + # Items without an order come last. + return (order is None, order, child.room_id) diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index 415b1c2d17c7..044ff06d8402 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,6 +18,7 @@ Any, Awaitable, Callable, + Collection, Dict, Iterable, List, @@ -41,7 +41,7 @@ from synapse.http import get_request_user_agent from synapse.http.server import respond_with_html, respond_with_redirect from synapse.http.site import SynapseRequest -from synapse.types import Collection, JsonDict, UserID, contains_invalid_mxid_characters +from synapse.types import JsonDict, UserID, contains_invalid_mxid_characters from synapse.util.async_helpers import Linearizer from synapse.util.stringutils import random_string diff --git a/synapse/handlers/state_deltas.py b/synapse/handlers/state_deltas.py index ee8f87e59a36..077c7c064965 100644 --- a/synapse/handlers/state_deltas.py +++ b/synapse/handlers/state_deltas.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index 8730f99d03ba..383e34026e9b 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index ee607e6e6576..0fcc1532da8d 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2018, 2019 New Vector Ltd # @@ -15,7 +14,17 @@ # limitations under the License. import itertools import logging -from typing import TYPE_CHECKING, Any, Dict, FrozenSet, List, Optional, Set, Tuple +from typing import ( + TYPE_CHECKING, + Any, + Collection, + Dict, + FrozenSet, + List, + Optional, + Set, + Tuple, +) import attr from prometheus_client import Counter @@ -24,11 +33,11 @@ from synapse.api.filtering import FilterCollection from synapse.events import EventBase from synapse.logging.context import current_context +from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, start_active_span from synapse.push.clientformat import format_push_rules_for_user from synapse.storage.roommember import MemberSummary from synapse.storage.state import StateFilter from synapse.types import ( - Collection, JsonDict, MutableStateMap, Requester, @@ -251,13 +260,13 @@ def __init__(self, hs: "HomeServer"): self.storage = hs.get_storage() self.state_store = self.storage.state - # ExpiringCache((User, Device)) -> LruCache(state_key => event_id) + # ExpiringCache((User, Device)) -> LruCache(user_id => event_id) self.lazy_loaded_members_cache = ExpiringCache( "lazy_loaded_members_cache", self.clock, max_len=0, expiry_ms=LAZY_LOADED_MEMBERS_CACHE_MAX_AGE, - ) + ) # type: ExpiringCache[Tuple[str, Optional[str]], LruCache[str, str]] async def wait_for_sync_for_user( self, @@ -340,7 +349,14 @@ async def current_sync_for_user( full_state: bool = False, ) -> SyncResult: """Get the sync for client needed to match what the server has now.""" - return await self.generate_sync_result(sync_config, since_token, full_state) + with start_active_span("current_sync_for_user"): + log_kv({"since_token": since_token}) + sync_result = await self.generate_sync_result( + sync_config, since_token, full_state + ) + + set_tag(SynapseTags.SYNC_RESULT, bool(sync_result)) + return sync_result async def push_rules_for_user(self, user: UserID) -> JsonDict: user_id = user.to_string() @@ -540,7 +556,7 @@ async def _load_filtered_recents( ) async def get_state_after_event( - self, event: EventBase, state_filter: StateFilter = StateFilter.all() + self, event: EventBase, state_filter: Optional[StateFilter] = None ) -> StateMap[str]: """ Get the room state after the given event @@ -550,7 +566,7 @@ async def get_state_after_event( state_filter: The state filter used to fetch state from the database. """ state_ids = await self.state_store.get_state_ids_for_event( - event.event_id, state_filter=state_filter + event.event_id, state_filter=state_filter or StateFilter.all() ) if event.is_state(): state_ids = dict(state_ids) @@ -561,7 +577,7 @@ async def get_state_at( self, room_id: str, stream_position: StreamToken, - state_filter: StateFilter = StateFilter.all(), + state_filter: Optional[StateFilter] = None, ) -> StateMap[str]: """Get the room state at a particular stream position @@ -581,7 +597,7 @@ async def get_state_at( if last_events: last_event = last_events[-1] state = await self.get_state_after_event( - last_event, state_filter=state_filter + last_event, state_filter=state_filter or StateFilter.all() ) else: @@ -725,8 +741,10 @@ async def compute_summary( def get_lazy_loaded_members_cache( self, cache_key: Tuple[str, Optional[str]] - ) -> LruCache: - cache = self.lazy_loaded_members_cache.get(cache_key) + ) -> LruCache[str, str]: + cache = self.lazy_loaded_members_cache.get( + cache_key + ) # type: Optional[LruCache[str, str]] if cache is None: logger.debug("creating LruCache for %r", cache_key) cache = LruCache(LAZY_LOADED_MEMBERS_CACHE_MAX_SIZE) @@ -964,6 +982,7 @@ async def generate_sync_result( # to query up to a given point. # Always use the `now_token` in `SyncResultBuilder` now_token = self.event_sources.get_current_token() + log_kv({"now_token": now_token}) logger.debug( "Calculating sync response for %r between %s and %s", @@ -1171,7 +1190,7 @@ async def _generate_sync_entry_for_device_list( # Step 1b, check for newly joined rooms for room_id in newly_joined_rooms: - joined_users = await self.state.get_current_users_in_room(room_id) + joined_users = await self.store.get_users_in_room(room_id) newly_joined_or_invited_users.update(joined_users) # TODO: Check that these users are actually new, i.e. either they @@ -1187,7 +1206,7 @@ async def _generate_sync_entry_for_device_list( # Now find users that we no longer track for room_id in newly_left_rooms: - left_users = await self.state.get_current_users_in_room(room_id) + left_users = await self.store.get_users_in_room(room_id) newly_left_users.update(left_users) # Remove any users that we still share a room with. @@ -1225,6 +1244,13 @@ async def _generate_sync_entry_for_to_device( user_id, device_id, since_stream_id, now_token.to_device_key ) + for message in messages: + # We pop here as we shouldn't be sending the message ID down + # `/sync` + message_id = message.pop("message_id", None) + if message_id: + set_tag(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id) + logger.debug( "Returning %d to-device messages between %d and %d (current token: %d)", len(messages), @@ -1335,7 +1361,7 @@ async def _generate_sync_entry_for_presence( extra_users_ids = set(newly_joined_or_invited_users) for room_id in newly_joined_rooms: - users = await self.state.get_current_users_in_room(room_id) + users = await self.store.get_users_in_room(room_id) extra_users_ids.update(users) extra_users_ids.discard(user.to_string()) diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 096d199f4cf1..e22393adc48d 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +18,10 @@ from synapse.api.errors import AuthError, ShadowBanError, SynapseError from synapse.appservice import ApplicationService -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import ( + run_as_background_process, + wrap_as_background_process, +) from synapse.replication.tcp.streams import TypingStream from synapse.types import JsonDict, Requester, UserID, get_domain_from_id from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -86,6 +88,7 @@ def _reset(self) -> None: self._member_last_federation_poke = {} self.wheel_timer = WheelTimer(bucket_size=5000) + @wrap_as_background_process("typing._handle_timeouts") def _handle_timeouts(self) -> None: logger.debug("Checking for typing timeouts") diff --git a/synapse/handlers/ui_auth/__init__.py b/synapse/handlers/ui_auth/__init__.py index a68d5e790e3a..4c3b669faeef 100644 --- a/synapse/handlers/ui_auth/__init__.py +++ b/synapse/handlers/ui_auth/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py index 3d66bf305e7f..5414ce77d83c 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,7 @@ # limitations under the License. import logging -from typing import Any +from typing import TYPE_CHECKING, Any from twisted.web.client import PartialDownloadError @@ -23,13 +22,16 @@ from synapse.config.emailconfig import ThreepidBehaviour from synapse.util import json_decoder +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) class UserInteractiveAuthChecker: """Abstract base class for an interactive auth checker""" - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): pass def is_enabled(self) -> bool: @@ -58,10 +60,10 @@ async def check_auth(self, authdict: dict, clientip: str) -> Any: class DummyAuthChecker(UserInteractiveAuthChecker): AUTH_TYPE = LoginType.DUMMY - def is_enabled(self): + def is_enabled(self) -> bool: return True - async def check_auth(self, authdict, clientip): + async def check_auth(self, authdict: dict, clientip: str) -> Any: return True @@ -71,24 +73,24 @@ class TermsAuthChecker(UserInteractiveAuthChecker): def is_enabled(self): return True - async def check_auth(self, authdict, clientip): + async def check_auth(self, authdict: dict, clientip: str) -> Any: return True class RecaptchaAuthChecker(UserInteractiveAuthChecker): AUTH_TYPE = LoginType.RECAPTCHA - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) self._enabled = bool(hs.config.recaptcha_private_key) self._http_client = hs.get_proxied_http_client() self._url = hs.config.recaptcha_siteverify_api self._secret = hs.config.recaptcha_private_key - def is_enabled(self): + def is_enabled(self) -> bool: return self._enabled - async def check_auth(self, authdict, clientip): + async def check_auth(self, authdict: dict, clientip: str) -> Any: try: user_response = authdict["response"] except KeyError: @@ -133,11 +135,11 @@ async def check_auth(self, authdict, clientip): class _BaseThreepidAuthChecker: - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): self.hs = hs self.store = hs.get_datastore() - async def _check_threepid(self, medium, authdict): + async def _check_threepid(self, medium: str, authdict: dict) -> dict: if "threepid_creds" not in authdict: raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM) @@ -207,31 +209,31 @@ async def _check_threepid(self, medium, authdict): class EmailIdentityAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChecker): AUTH_TYPE = LoginType.EMAIL_IDENTITY - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): UserInteractiveAuthChecker.__init__(self, hs) _BaseThreepidAuthChecker.__init__(self, hs) - def is_enabled(self): + def is_enabled(self) -> bool: return self.hs.config.threepid_behaviour_email in ( ThreepidBehaviour.REMOTE, ThreepidBehaviour.LOCAL, ) - async def check_auth(self, authdict, clientip): + async def check_auth(self, authdict: dict, clientip: str) -> Any: return await self._check_threepid("email", authdict) class MsisdnAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChecker): AUTH_TYPE = LoginType.MSISDN - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): UserInteractiveAuthChecker.__init__(self, hs) _BaseThreepidAuthChecker.__init__(self, hs) - def is_enabled(self): + def is_enabled(self) -> bool: return bool(self.hs.config.account_threepid_delegate_msisdn) - async def check_auth(self, authdict, clientip): + async def check_auth(self, authdict: dict, clientip: str) -> Any: return await self._check_threepid("msisdn", authdict) diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index b121286d9563..dacc4f3076e7 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -45,7 +44,6 @@ def __init__(self, hs: "HomeServer"): super().__init__(hs) self.store = hs.get_datastore() - self.state = hs.get_state_handler() self.server_name = hs.hostname self.clock = hs.get_clock() self.notifier = hs.get_notifier() @@ -303,10 +301,12 @@ async def _handle_room_publicity_change( # ignore the change return - users_with_profile = await self.state.get_current_users_in_room(room_id) + other_users_in_room_with_profiles = ( + await self.store.get_users_in_room_with_profiles(room_id) + ) # Remove every user from the sharing tables for that room. - for user_id in users_with_profile.keys(): + for user_id in other_users_in_room_with_profiles.keys(): await self.store.remove_user_who_share_room(user_id, room_id) # Then, re-add them to the tables. @@ -315,7 +315,7 @@ async def _handle_room_publicity_change( # which when ran over an entire room, will result in the same values # being added multiple times. The batching upserts shouldn't make this # too bad, though. - for user_id, profile in users_with_profile.items(): + for user_id, profile in other_users_in_room_with_profiles.items(): await self._handle_new_user(room_id, user_id, profile) async def _handle_new_user( @@ -337,7 +337,7 @@ async def _handle_new_user( room_id ) # Now we update users who share rooms with users. - users_with_profile = await self.state.get_current_users_in_room(room_id) + other_users_in_room = await self.store.get_users_in_room(room_id) if is_public: await self.store.add_users_in_public_rooms(room_id, (user_id,)) @@ -353,14 +353,14 @@ async def _handle_new_user( # We don't care about appservice users. if not is_appservice: - for other_user_id in users_with_profile: + for other_user_id in other_users_in_room: if user_id == other_user_id: continue to_insert.add((user_id, other_user_id)) # Next we need to update for every local user in the room - for other_user_id in users_with_profile: + for other_user_id in other_users_in_room: if user_id == other_user_id: continue diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py index 142b007d010e..ed4671b7deee 100644 --- a/synapse/http/__init__.py +++ b/synapse/http/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/http/additional_resource.py b/synapse/http/additional_resource.py index 479746c9c56c..55ea97a07f29 100644 --- a/synapse/http/additional_resource.py +++ b/synapse/http/additional_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/http/client.py b/synapse/http/client.py index a0caba84e4ba..5f40f16e24d6 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # @@ -34,6 +33,7 @@ from canonicaljson import encode_canonical_json from netaddr import AddrFormatError, IPAddress, IPSet from prometheus_client import Counter +from typing_extensions import Protocol from zope.interface import implementer, provider from OpenSSL import SSL @@ -297,7 +297,7 @@ class SimpleHttpClient: def __init__( self, hs: "HomeServer", - treq_args: Dict[str, Any] = {}, + treq_args: Optional[Dict[str, Any]] = None, ip_whitelist: Optional[IPSet] = None, ip_blacklist: Optional[IPSet] = None, use_proxy: bool = False, @@ -317,7 +317,7 @@ def __init__( self._ip_whitelist = ip_whitelist self._ip_blacklist = ip_blacklist - self._extra_treq_args = treq_args + self._extra_treq_args = treq_args or {} self.user_agent = hs.version_string self.clock = hs.get_clock() @@ -590,7 +590,7 @@ async def put_json( uri: str, json_body: Any, args: Optional[QueryParams] = None, - headers: RawHeaders = None, + headers: Optional[RawHeaders] = None, ) -> Any: """Puts some json to the given URI. @@ -755,6 +755,16 @@ def _timeout_to_request_timed_out_error(f: Failure): return f +class ByteWriteable(Protocol): + """The type of object which must be passed into read_body_with_max_size. + + Typically this is a file object. + """ + + def write(self, data: bytes) -> int: + pass + + class BodyExceededMaxSize(Exception): """The maximum allowed size of the HTTP body was exceeded.""" @@ -791,7 +801,7 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol): transport = None # type: Optional[ITCPTransport] def __init__( - self, stream: BinaryIO, deferred: defer.Deferred, max_size: Optional[int] + self, stream: ByteWriteable, deferred: defer.Deferred, max_size: Optional[int] ): self.stream = stream self.deferred = deferred @@ -831,7 +841,7 @@ def connectionLost(self, reason: Failure = connectionDone) -> None: def read_body_with_max_size( - response: IResponse, stream: BinaryIO, max_size: Optional[int] + response: IResponse, stream: ByteWriteable, max_size: Optional[int] ) -> defer.Deferred: """ Read a HTTP response body to a file-object. Optionally enforcing a maximum file size. diff --git a/synapse/http/connectproxyclient.py b/synapse/http/connectproxyclient.py index b797e3ce80b3..17e1c5abb13d 100644 --- a/synapse/http/connectproxyclient.py +++ b/synapse/http/connectproxyclient.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/http/federation/__init__.py b/synapse/http/federation/__init__.py index 1453d045718f..743fb9904a8f 100644 --- a/synapse/http/federation/__init__.py +++ b/synapse/http/federation/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 5935a125fd60..950770201a79 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py index d9620032d2d7..b8ed4ec905d4 100644 --- a/synapse/http/federation/srv_resolver.py +++ b/synapse/http/federation/srv_resolver.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # diff --git a/synapse/http/federation/well_known_resolver.py b/synapse/http/federation/well_known_resolver.py index ce4079f15c4c..20d39a4ea64f 100644 --- a/synapse/http/federation/well_known_resolver.py +++ b/synapse/http/federation/well_known_resolver.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 5f01ebd3d472..bb837b7b1979 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -1,6 +1,4 @@ -# -*- coding: utf-8 -*- -# Copyright 2014-2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd +# Copyright 2014-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,11 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. import cgi +import codecs import logging import random import sys +import typing import urllib.parse -from io import BytesIO +from io import BytesIO, StringIO from typing import Callable, Dict, List, Optional, Tuple, Union import attr @@ -73,6 +73,9 @@ "synapse_http_matrixfederationclient_responses", "", ["method", "code"] ) +# a federation response can be rather large (eg a big state_ids is 50M or so), so we +# need a generous limit here. +MAX_RESPONSE_SIZE = 100 * 1024 * 1024 MAX_LONG_RETRIES = 10 MAX_SHORT_RETRIES = 3 @@ -168,12 +171,27 @@ async def _handle_json_response( try: check_content_type_is_json(response.headers) - # Use the custom JSON decoder (partially re-implements treq.json_content). - d = treq.text_content(response, encoding="utf-8") - d.addCallback(json_decoder.decode) + buf = StringIO() + d = read_body_with_max_size(response, BinaryIOWrapper(buf), MAX_RESPONSE_SIZE) d = timeout_deferred(d, timeout=timeout_sec, reactor=reactor) + def parse(_len: int): + return json_decoder.decode(buf.getvalue()) + + d.addCallback(parse) + body = await make_deferred_yieldable(d) + except BodyExceededMaxSize as e: + # The response was too big. + logger.warning( + "{%s} [%s] JSON response exceeded max size %i - %s %s", + request.txn_id, + request.destination, + MAX_RESPONSE_SIZE, + request.method, + request.uri.decode("ascii"), + ) + raise RequestSendFailed(e, can_retry=False) from e except ValueError as e: # The JSON content was invalid. logger.warning( @@ -219,6 +237,18 @@ async def _handle_json_response( return body +class BinaryIOWrapper: + """A wrapper for a TextIO which converts from bytes on the fly.""" + + def __init__(self, file: typing.TextIO, encoding="utf-8", errors="strict"): + self.decoder = codecs.getincrementaldecoder(encoding)(errors) + self.file = file + + def write(self, b: Union[bytes, bytearray]) -> int: + self.file.write(self.decoder.decode(b)) + return len(b) + + class MatrixFederationHttpClient: """HTTP client used to talk to other homeservers over the federation protocol. Send client certificates and signs requests. @@ -272,7 +302,7 @@ async def _send_request_with_optional_trailing_slash( self, request: MatrixFederationRequest, try_trailing_slash_on_400: bool = False, - **send_request_args + **send_request_args, ) -> IResponse: """Wrapper for _send_request which can optionally retry the request upon receiving a combination of a 400 HTTP response code and a diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py index 16ec850064dd..7dfae8b786b9 100644 --- a/synapse/http/proxyagent.py +++ b/synapse/http/proxyagent.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,7 +26,7 @@ from twisted.web.client import URI, BrowserLikePolicyForHTTPS, _AgentBase from twisted.web.error import SchemeNotSupported from twisted.web.http_headers import Headers -from twisted.web.iweb import IAgent +from twisted.web.iweb import IAgent, IPolicyForHTTPS from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint @@ -88,12 +87,14 @@ def __init__( self, reactor, proxy_reactor=None, - contextFactory=BrowserLikePolicyForHTTPS(), + contextFactory: Optional[IPolicyForHTTPS] = None, connectTimeout=None, bindAddress=None, pool=None, use_proxy=False, ): + contextFactory = contextFactory or BrowserLikePolicyForHTTPS() + _AgentBase.__init__(self, reactor, pool) if proxy_reactor is None: diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 0ec5d941b8fe..602f93c49710 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/http/server.py b/synapse/http/server.py index fa89260850e6..845651e60634 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 0e637f47016f..31897546a967 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/http/site.py b/synapse/http/site.py index 47754aff43ca..671fd3fbcc29 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -14,19 +14,24 @@ import contextlib import logging import time -from typing import Optional, Type, Union +from typing import Optional, Tuple, Union import attr from zope.interface import implementer -from twisted.internet.interfaces import IAddress +from twisted.internet.interfaces import IAddress, IReactorTime from twisted.python.failure import Failure +from twisted.web.resource import IResource from twisted.web.server import Request, Site from synapse.config.server import ListenerConfig from synapse.http import get_request_user_agent, redact_uri from synapse.http.request_metrics import RequestMetrics, requests_counter -from synapse.logging.context import LoggingContext, PreserveLoggingContext +from synapse.logging.context import ( + ContextRequest, + LoggingContext, + PreserveLoggingContext, +) from synapse.types import Requester logger = logging.getLogger(__name__) @@ -45,6 +50,7 @@ class SynapseRequest(Request): * Redaction of access_token query-params in __repr__ * Logging at start and end * Metrics to record CPU, wallclock and DB time by endpoint. + * A limit to the size of request which will be accepted It also provides a method `processing`, which returns a context manager. If this method is called, the request won't be logged until the context manager is closed; @@ -55,15 +61,16 @@ class SynapseRequest(Request): logcontext: the log context for this request """ - def __init__(self, channel, *args, **kw): + def __init__(self, channel, *args, max_request_body_size=1024, **kw): Request.__init__(self, channel, *args, **kw) + self._max_request_body_size = max_request_body_size self.site = channel.site # type: SynapseSite self._channel = channel # this is used by the tests self.start_time = 0.0 # The requester, if authenticated. For federation requests this is the # server name, for client requests this is the Requester object. - self.requester = None # type: Optional[Union[Requester, str]] + self._requester = None # type: Optional[Union[Requester, str]] # we can't yet create the logcontext, as we don't know the method. self.logcontext = None # type: Optional[LoggingContext] @@ -93,6 +100,43 @@ def __repr__(self): self.site.site_tag, ) + def handleContentChunk(self, data): + # we should have a `content` by now. + assert self.content, "handleContentChunk() called before gotLength()" + if self.content.tell() + len(data) > self._max_request_body_size: + logger.warning( + "Aborting connection from %s because the request exceeds maximum size", + self.client, + ) + self.transport.abortConnection() + return + super().handleContentChunk(data) + + @property + def requester(self) -> Optional[Union[Requester, str]]: + return self._requester + + @requester.setter + def requester(self, value: Union[Requester, str]) -> None: + # Store the requester, and update some properties based on it. + + # This should only be called once. + assert self._requester is None + + self._requester = value + + # A logging context should exist by now (and have a ContextRequest). + assert self.logcontext is not None + assert self.logcontext.request is not None + + ( + requester, + authenticated_entity, + ) = self.get_authenticated_entity() + self.logcontext.request.requester = requester + # If there's no authenticated entity, it was the requester. + self.logcontext.request.authenticated_entity = authenticated_entity or requester + def get_request_id(self): return "%s-%i" % (self.get_method(), self.request_seq) @@ -126,13 +170,60 @@ def get_method(self) -> str: return self.method.decode("ascii") return method + def get_authenticated_entity(self) -> Tuple[Optional[str], Optional[str]]: + """ + Get the "authenticated" entity of the request, which might be the user + performing the action, or a user being puppeted by a server admin. + + Returns: + A tuple: + The first item is a string representing the user making the request. + + The second item is a string or None representing the user who + authenticated when making this request. See + Requester.authenticated_entity. + """ + # Convert the requester into a string that we can log + if isinstance(self._requester, str): + return self._requester, None + elif isinstance(self._requester, Requester): + requester = self._requester.user.to_string() + authenticated_entity = self._requester.authenticated_entity + + # If this is a request where the target user doesn't match the user who + # authenticated (e.g. and admin is puppetting a user) then we return both. + if self._requester.user.to_string() != authenticated_entity: + return requester, authenticated_entity + + return requester, None + elif self._requester is not None: + # This shouldn't happen, but we log it so we don't lose information + # and can see that we're doing something wrong. + return repr(self._requester), None # type: ignore[unreachable] + + return None, None + def render(self, resrc): # this is called once a Resource has been found to serve the request; in our # case the Resource in question will normally be a JsonResource. # create a LogContext for this request request_id = self.get_request_id() - self.logcontext = LoggingContext(request_id, request=request_id) + self.logcontext = LoggingContext( + request_id, + request=ContextRequest( + request_id=request_id, + ip_address=self.getClientIP(), + site_tag=self.site.site_tag, + # The requester is going to be unknown at this point. + requester=None, + authenticated_entity=None, + method=self.get_method(), + url=self.get_redacted_uri(), + protocol=self.clientproto.decode("ascii", errors="replace"), + user_agent=get_request_user_agent(self), + ), + ) # override the Server header which is set by twisted self.setHeader("Server", self.site.server_version_string) @@ -277,25 +368,6 @@ def _finished_processing(self): # to the client (nb may be negative) response_send_time = self.finish_time - self._processing_finished_time - # Convert the requester into a string that we can log - authenticated_entity = None - if isinstance(self.requester, str): - authenticated_entity = self.requester - elif isinstance(self.requester, Requester): - authenticated_entity = self.requester.authenticated_entity - - # If this is a request where the target user doesn't match the user who - # authenticated (e.g. and admin is puppetting a user) then we log both. - if self.requester.user.to_string() != authenticated_entity: - authenticated_entity = "{},{}".format( - authenticated_entity, - self.requester.user.to_string(), - ) - elif self.requester is not None: - # This shouldn't happen, but we log it so we don't lose information - # and can see that we're doing something wrong. - authenticated_entity = repr(self.requester) # type: ignore[unreachable] - user_agent = get_request_user_agent(self, "-") code = str(self.code) @@ -305,6 +377,13 @@ def _finished_processing(self): code += "!" log_level = logging.INFO if self._should_log_request() else logging.DEBUG + + # If this is a request where the target user doesn't match the user who + # authenticated (e.g. and admin is puppetting a user) then we log both. + requester, authenticated_entity = self.get_authenticated_entity() + if authenticated_entity: + requester = "{}.{}".format(authenticated_entity, requester) + self.site.access_logger.log( log_level, "%s - %s - {%s}" @@ -312,7 +391,7 @@ def _finished_processing(self): ' %sB %s "%s %s %s" "%s" [%d dbevts]', self.getClientIP(), self.site.site_tag, - authenticated_entity, + requester, processing_time, response_send_time, usage.ru_utime, @@ -421,29 +500,55 @@ class _XForwardedForAddress: class SynapseSite(Site): """ - Subclass of a twisted http Site that does access logging with python's - standard logging + Synapse-specific twisted http Site + + This does two main things. + + First, it replaces the requestFactory in use so that we build SynapseRequests + instead of regular t.w.server.Requests. All of the constructor params are really + just parameters for SynapseRequest. + + Second, it inhibits the log() method called by Request.finish, since SynapseRequest + does its own logging. """ def __init__( self, - logger_name, - site_tag, + logger_name: str, + site_tag: str, config: ListenerConfig, - resource, + resource: IResource, server_version_string, - *args, - **kwargs + max_request_body_size: int, + reactor: IReactorTime, ): - Site.__init__(self, resource, *args, **kwargs) + """ + + Args: + logger_name: The name of the logger to use for access logs. + site_tag: A tag to use for this site - mostly in access logs. + config: Configuration for the HTTP listener corresponding to this site + resource: The base of the resource tree to be used for serving requests on + this site + server_version_string: A string to present for the Server header + max_request_body_size: Maximum request body length to allow before + dropping the connection + reactor: reactor to be used to manage connection timeouts + """ + Site.__init__(self, resource, reactor=reactor) self.site_tag = site_tag assert config.http_options is not None proxied = config.http_options.x_forwarded - self.requestFactory = ( - XForwardedForRequest if proxied else SynapseRequest - ) # type: Type[Request] + request_class = XForwardedForRequest if proxied else SynapseRequest + + def request_factory(channel, queued) -> Request: + return request_class( + channel, max_request_body_size=max_request_body_size, queued=queued + ) + + self.requestFactory = request_factory # type: ignore self.access_logger = logging.getLogger(logger_name) self.server_version_string = server_version_string.encode("ascii") diff --git a/synapse/logging/__init__.py b/synapse/logging/__init__.py index b28b7b2ef761..b50a4f95eb3a 100644 --- a/synapse/logging/__init__.py +++ b/synapse/logging/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,8 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -# These are imported to allow for nicer logging configuration files. +import logging + from synapse.logging._remote import RemoteHandler from synapse.logging._terse_json import JsonFormatter, TerseJsonFormatter +# These are imported to allow for nicer logging configuration files. __all__ = ["RemoteHandler", "JsonFormatter", "TerseJsonFormatter"] + +# Debug logger for https://github.com/matrix-org/synapse/issues/9533 etc +issue9533_logger = logging.getLogger("synapse.9533_debug") diff --git a/synapse/logging/_remote.py b/synapse/logging/_remote.py index 643492ceaf83..c515690b38f0 100644 --- a/synapse/logging/_remote.py +++ b/synapse/logging/_remote.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -227,11 +226,11 @@ def _handle_pressure(self) -> None: old_buffer = self._buffer self._buffer = deque() - for i in range(buffer_split): + for _ in range(buffer_split): self._buffer.append(old_buffer.popleft()) end_buffer = [] - for i in range(buffer_split): + for _ in range(buffer_split): end_buffer.append(old_buffer.pop()) self._buffer.extend(reversed(end_buffer)) diff --git a/synapse/logging/_structured.py b/synapse/logging/_structured.py index 3e054f615c48..c7a971a9d60b 100644 --- a/synapse/logging/_structured.py +++ b/synapse/logging/_structured.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py index 2fbf5549a1fb..8002a250a268 100644 --- a/synapse/logging/_terse_json.py +++ b/synapse/logging/_terse_json.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 03cf3c2b8ed3..7fc11a9ac2f8 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -22,7 +22,6 @@ See doc/log_contexts.rst for details on how this works. """ - import inspect import logging import threading @@ -30,6 +29,7 @@ import warnings from typing import TYPE_CHECKING, Optional, Tuple, TypeVar, Union +import attr from typing_extensions import Literal from twisted.internet import defer, threads @@ -181,6 +181,29 @@ def __sub__(self, other: "ContextResourceUsage") -> "ContextResourceUsage": return res +@attr.s(slots=True) +class ContextRequest: + """ + A bundle of attributes from the SynapseRequest object. + + This exists to: + + * Avoid a cycle between LoggingContext and SynapseRequest. + * Be a single variable that can be passed from parent LoggingContexts to + their children. + """ + + request_id = attr.ib(type=str) + ip_address = attr.ib(type=str) + site_tag = attr.ib(type=str) + requester = attr.ib(type=Optional[str]) + authenticated_entity = attr.ib(type=Optional[str]) + method = attr.ib(type=str) + url = attr.ib(type=str) + protocol = attr.ib(type=str) + user_agent = attr.ib(type=str) + + LoggingContextOrSentinel = Union["LoggingContext", "_Sentinel"] @@ -235,7 +258,8 @@ class LoggingContext: child to the parent Args: - name (str): Name for the context for debugging. + name: Name for the context for logging. If this is omitted, it is + inherited from the parent context. parent_context (LoggingContext|None): The parent of the new context """ @@ -256,10 +280,9 @@ def __init__( self, name: Optional[str] = None, parent_context: "Optional[LoggingContext]" = None, - request: Optional[str] = None, + request: Optional[ContextRequest] = None, ) -> None: self.previous_context = current_context() - self.name = name # track the resources used by this context so far self._resource_usage = ContextResourceUsage() @@ -281,16 +304,27 @@ def __init__( self.parent_context = parent_context if self.parent_context is not None: - self.parent_context.copy_to(self) + # we track the current request_id + self.request = self.parent_context.request + + # we also track the current scope: + self.scope = self.parent_context.scope if request is not None: # the request param overrides the request from the parent context self.request = request + # if we don't have a `name`, but do have a parent context, use its name. + if self.parent_context and name is None: + name = str(self.parent_context) + if name is None: + raise ValueError( + "LoggingContext must be given either a name or a parent context" + ) + self.name = name + def __str__(self) -> str: - if self.request: - return str(self.request) - return "%s@%x" % (self.name, id(self)) + return self.name @classmethod def current_context(cls) -> LoggingContextOrSentinel: @@ -556,8 +590,23 @@ def filter(self, record: logging.LogRecord) -> Literal[True]: # we end up in a death spiral of infinite loops, so let's check, for # robustness' sake. if context is not None: - # Logging is interested in the request. - record.request = context.request # type: ignore + # Logging is interested in the request ID. Note that for backwards + # compatibility this is stored as the "request" on the record. + record.request = str(context) # type: ignore + + # Add some data from the HTTP request. + request = context.request + if request is None: + return True + + record.ip_address = request.ip_address # type: ignore + record.site_tag = request.site_tag # type: ignore + record.requester = request.requester # type: ignore + record.authenticated_entity = request.authenticated_entity # type: ignore + record.method = request.method # type: ignore + record.url = request.url # type: ignore + record.protocol = request.protocol # type: ignore + record.user_agent = request.user_agent # type: ignore return True @@ -630,8 +679,8 @@ def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSe def nested_logging_context(suffix: str) -> LoggingContext: """Creates a new logging context as a child of another. - The nested logging context will have a 'request' made up of the parent context's - request, plus the given suffix. + The nested logging context will have a 'name' made up of the parent context's + name, plus the given suffix. CPU/db usage stats will be added to the parent context's on exit. @@ -641,7 +690,7 @@ def nested_logging_context(suffix: str) -> LoggingContext: # ... do stuff Args: - suffix: suffix to add to the parent context's 'request'. + suffix: suffix to add to the parent context's 'name'. Returns: LoggingContext: new logging context. @@ -652,12 +701,14 @@ def nested_logging_context(suffix: str) -> LoggingContext: "Starting nested logging context from sentinel context: metrics will be lost" ) parent_context = None - prefix = "" else: assert isinstance(curr_context, LoggingContext) parent_context = curr_context - prefix = str(parent_context.request) - return LoggingContext(parent_context=parent_context, request=prefix + "-" + suffix) + prefix = str(curr_context) + return LoggingContext( + prefix + "-" + suffix, + parent_context=parent_context, + ) def preserve_fn(f): @@ -847,7 +898,7 @@ def defer_to_threadpool(reactor, threadpool, f, *args, **kwargs): parent_context = curr_context def g(): - with LoggingContext(parent_context=parent_context): + with LoggingContext(str(curr_context), parent_context=parent_context): return f(*args, **kwargs) return make_deferred_yieldable(threads.deferToThreadPool(reactor, threadpool, g)) diff --git a/synapse/logging/filter.py b/synapse/logging/filter.py index 1baf8dd67934..ed51a4726cda 100644 --- a/synapse/logging/filter.py +++ b/synapse/logging/filter.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/logging/formatter.py b/synapse/logging/formatter.py index 11f60a77f795..c0f12ecd15b8 100644 --- a/synapse/logging/formatter.py +++ b/synapse/logging/formatter.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index aa146e8bb8b2..fba2fa390434 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -259,6 +258,14 @@ def report_span(self, span): logger = logging.getLogger(__name__) +class SynapseTags: + # The message ID of any to_device message processed + TO_DEVICE_MESSAGE_ID = "to_device.message_id" + + # Whether the sync response has new data to be returned to the client. + SYNC_RESULT = "sync.new_data" + + # Block everything by default # A regex which matches the server_names to expose traces for. # None means 'block everything'. @@ -478,7 +485,7 @@ def start_active_span_from_request( def start_active_span_from_edu( edu_content, operation_name, - references=[], + references: Optional[list] = None, tags=None, start_time=None, ignore_active_span=False, @@ -493,6 +500,7 @@ def start_active_span_from_edu( For the other args see opentracing.tracer """ + references = references or [] if opentracing is None: return noop_context_manager() diff --git a/synapse/logging/scopecontextmanager.py b/synapse/logging/scopecontextmanager.py index 7b9c65745627..b1e8e08fe96f 100644 --- a/synapse/logging/scopecontextmanager.py +++ b/synapse/logging/scopecontextmanager.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/logging/utils.py b/synapse/logging/utils.py index fd3543ab0428..08895e72eedd 100644 --- a/synapse/logging/utils.py +++ b/synapse/logging/utils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 3b499efc07d3..fef28466694a 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -214,7 +213,12 @@ class GaugeBucketCollector: Prometheus, and optimise for that case. """ - __slots__ = ("_name", "_documentation", "_bucket_bounds", "_metric") + __slots__ = ( + "_name", + "_documentation", + "_bucket_bounds", + "_metric", + ) def __init__( self, @@ -242,11 +246,16 @@ def __init__( if self._bucket_bounds[-1] != float("inf"): self._bucket_bounds.append(float("inf")) - self._metric = self._values_to_metric([]) + # We initially set this to None. We won't report metrics until + # this has been initialised after a successful data update + self._metric = None # type: Optional[GaugeHistogramMetricFamily] + registry.register(self) def collect(self): - yield self._metric + # Don't report metrics unless we've already collected some data + if self._metric is not None: + yield self._metric def update_data(self, values: Iterable[float]): """Update the data to be reported by the metric @@ -526,6 +535,13 @@ def collect(self): REGISTRY.register(ReactorLastSeenMetric()) +# The minimum time in seconds between GCs for each generation, regardless of the current GC +# thresholds and counts. +MIN_TIME_BETWEEN_GCS = (1.0, 10.0, 30.0) + +# The time (in seconds since the epoch) of the last time we did a GC for each generation. +_last_gc = [0.0, 0.0, 0.0] + def runUntilCurrentTimer(reactor, func): @functools.wraps(func) @@ -566,11 +582,16 @@ def f(*args, **kwargs): return ret # Check if we need to do a manual GC (since its been disabled), and do - # one if necessary. + # one if necessary. Note we go in reverse order as e.g. a gen 1 GC may + # promote an object into gen 2, and we don't want to handle the same + # object multiple times. threshold = gc.get_threshold() counts = gc.get_count() for i in (2, 1, 0): - if threshold[i] < counts[i]: + # We check if we need to do one based on a straightforward + # comparison between the threshold and count. We also do an extra + # check to make sure that we don't a GC too often. + if threshold[i] < counts[i] and MIN_TIME_BETWEEN_GCS[i] < end - _last_gc[i]: if i == 0: logger.debug("Collecting gc %d", i) else: @@ -580,6 +601,8 @@ def f(*args, **kwargs): unreachable = gc.collect(i) end = time.time() + _last_gc[i] = end + gc_time.labels(i).observe(end - start) gc_unreachable.labels(i).set(unreachable) @@ -606,6 +629,7 @@ def f(*args, **kwargs): except AttributeError: pass + __all__ = [ "MetricsResource", "generate_latest", diff --git a/synapse/metrics/_exposition.py b/synapse/metrics/_exposition.py index 71320a140223..8002be56e0e8 100644 --- a/synapse/metrics/_exposition.py +++ b/synapse/metrics/_exposition.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015-2019 Prometheus Python Client Developers # Copyright 2019 Matrix.org Foundation C.I.C. # diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index b56986d8e753..714caf84c379 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,7 +15,7 @@ import logging import threading from functools import wraps -from typing import TYPE_CHECKING, Dict, Optional, Set +from typing import TYPE_CHECKING, Dict, Optional, Set, Union from prometheus_client.core import REGISTRY, Counter, Gauge @@ -199,11 +198,11 @@ async def run(): _background_process_start_count.labels(desc).inc() _background_process_in_flight_count.labels(desc).inc() - with BackgroundProcessLoggingContext(desc, "%s-%i" % (desc, count)) as context: + with BackgroundProcessLoggingContext(desc, count) as context: try: ctx = noop_context_manager() if bg_start_span: - ctx = start_active_span(desc, tags={"request_id": context.request}) + ctx = start_active_span(desc, tags={"request_id": str(context)}) with ctx: return await maybe_awaitable(func(*args, **kwargs)) except Exception: @@ -244,9 +243,20 @@ class BackgroundProcessLoggingContext(LoggingContext): __slots__ = ["_proc"] - def __init__(self, name: str, request: Optional[str] = None): - super().__init__(name, request=request) + def __init__(self, name: str, instance_id: Optional[Union[int, str]] = None): + """ + Args: + name: The name of the background process. Each distinct `name` gets a + separate prometheus time series. + + instance_id: an identifer to add to `name` to distinguish this instance of + the named background process in the logs. If this is `None`, one is + made up based on id(self). + """ + if instance_id is None: + instance_id = id(self) + super().__init__("%s-%s" % (name, instance_id)) self._proc = _BackgroundProcess(name, self) def start(self, rusage: "Optional[resource._RUsage]"): diff --git a/synapse/metrics/jemalloc.py b/synapse/metrics/jemalloc.py new file mode 100644 index 000000000000..29ab6c0229df --- /dev/null +++ b/synapse/metrics/jemalloc.py @@ -0,0 +1,196 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import ctypes +import logging +import os +import re +from typing import Optional + +from synapse.metrics import REGISTRY, GaugeMetricFamily + +logger = logging.getLogger(__name__) + + +def _setup_jemalloc_stats(): + """Checks to see if jemalloc is loaded, and hooks up a collector to record + statistics exposed by jemalloc. + """ + + # Try to find the loaded jemalloc shared library, if any. We need to + # introspect into what is loaded, rather than loading whatever is on the + # path, as if we load a *different* jemalloc version things will seg fault. + + # We look in `/proc/self/maps`, which only exists on linux. + if not os.path.exists("/proc/self/maps"): + logger.debug("Not looking for jemalloc as no /proc/self/maps exist") + return + + # We're looking for a path at the end of the line that includes + # "libjemalloc". + regex = re.compile(r"/\S+/libjemalloc.*$") + + jemalloc_path = None + with open("/proc/self/maps") as f: + for line in f: + match = regex.search(line.strip()) + if match: + jemalloc_path = match.group() + + if not jemalloc_path: + # No loaded jemalloc was found. + logger.debug("jemalloc not found") + return + + logger.debug("Found jemalloc at %s", jemalloc_path) + + jemalloc = ctypes.CDLL(jemalloc_path) + + def _mallctl( + name: str, read: bool = True, write: Optional[int] = None + ) -> Optional[int]: + """Wrapper around `mallctl` for reading and writing integers to + jemalloc. + + Args: + name: The name of the option to read from/write to. + read: Whether to try and read the value. + write: The value to write, if given. + + Returns: + The value read if `read` is True, otherwise None. + + Raises: + An exception if `mallctl` returns a non-zero error code. + """ + + input_var = None + input_var_ref = None + input_len_ref = None + if read: + input_var = ctypes.c_size_t(0) + input_len = ctypes.c_size_t(ctypes.sizeof(input_var)) + + input_var_ref = ctypes.byref(input_var) + input_len_ref = ctypes.byref(input_len) + + write_var_ref = None + write_len = ctypes.c_size_t(0) + if write is not None: + write_var = ctypes.c_size_t(write) + write_len = ctypes.c_size_t(ctypes.sizeof(write_var)) + + write_var_ref = ctypes.byref(write_var) + + # The interface is: + # + # int mallctl( + # const char *name, + # void *oldp, + # size_t *oldlenp, + # void *newp, + # size_t newlen + # ) + # + # Where oldp/oldlenp is a buffer where the old value will be written to + # (if not null), and newp/newlen is the buffer with the new value to set + # (if not null). Note that they're all references *except* newlen. + result = jemalloc.mallctl( + name.encode("ascii"), + input_var_ref, + input_len_ref, + write_var_ref, + write_len, + ) + + if result != 0: + raise Exception("Failed to call mallctl") + + if input_var is None: + return None + + return input_var.value + + def _jemalloc_refresh_stats() -> None: + """Request that jemalloc updates its internal statistics. This needs to + be called before querying for stats, otherwise it will return stale + values. + """ + try: + _mallctl("epoch", read=False, write=1) + except Exception as e: + logger.warning("Failed to reload jemalloc stats: %s", e) + + class JemallocCollector: + """Metrics for internal jemalloc stats.""" + + def collect(self): + _jemalloc_refresh_stats() + + g = GaugeMetricFamily( + "jemalloc_stats_app_memory_bytes", + "The stats reported by jemalloc", + labels=["type"], + ) + + # Read the relevant global stats from jemalloc. Note that these may + # not be accurate if python is configured to use its internal small + # object allocator (which is on by default, disable by setting the + # env `PYTHONMALLOC=malloc`). + # + # See the jemalloc manpage for details about what each value means, + # roughly: + # - allocated ─ Total number of bytes allocated by the app + # - active ─ Total number of bytes in active pages allocated by + # the application, this is bigger than `allocated`. + # - resident ─ Maximum number of bytes in physically resident data + # pages mapped by the allocator, comprising all pages dedicated + # to allocator metadata, pages backing active allocations, and + # unused dirty pages. This is bigger than `active`. + # - mapped ─ Total number of bytes in active extents mapped by the + # allocator. + # - metadata ─ Total number of bytes dedicated to jemalloc + # metadata. + for t in ( + "allocated", + "active", + "resident", + "mapped", + "metadata", + ): + try: + value = _mallctl(f"stats.{t}") + except Exception as e: + # There was an error fetching the value, skip. + logger.warning("Failed to read jemalloc stats.%s: %s", t, e) + continue + + g.add_metric([t], value=value) + + yield g + + REGISTRY.register(JemallocCollector()) + + logger.debug("Added jemalloc stats") + + +def setup_jemalloc_stats(): + """Try to setup jemalloc stats, if jemalloc is loaded.""" + + try: + _setup_jemalloc_stats() + except Exception as e: + # This should only happen if we find the loaded jemalloc library, but + # fail to load it somehow (e.g. we somehow picked the wrong version). + logger.info("Failed to setup collector to record jemalloc stats: %s", e) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 781e02fbbb31..a1a2b9aeccd3 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # @@ -14,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Any, Generator, Iterable, Optional, Tuple +from typing import TYPE_CHECKING, Any, Generator, Iterable, List, Optional, Tuple from twisted.internet import defer @@ -50,11 +49,21 @@ def __init__(self, hs, auth_handler): self._auth = hs.get_auth() self._auth_handler = auth_handler self._server_name = hs.hostname + self._presence_stream = hs.get_event_sources().sources["presence"] + self._state = hs.get_state_handler() # We expose these as properties below in order to attach a helpful docstring. self._http_client = hs.get_simple_http_client() # type: SimpleHttpClient self._public_room_list_manager = PublicRoomListManager(hs) + # The next time these users sync, they will receive the current presence + # state of all local users. Users are added by send_local_online_presence_to, + # and removed after a successful sync. + # + # We make this a private variable to deter modules from accessing it directly, + # though other classes in Synapse will still do so. + self._send_full_presence_to_local_users = set() + @property def http_client(self): """Allows making outbound HTTP requests to remote resources. @@ -118,7 +127,7 @@ def check_user_exists(self, user_id): return defer.ensureDeferred(self._auth_handler.check_user_exists(user_id)) @defer.inlineCallbacks - def register(self, localpart, displayname=None, emails=[]): + def register(self, localpart, displayname=None, emails: Optional[List[str]] = None): """Registers a new user with given localpart and optional displayname, emails. Also returns an access token for the new user. @@ -138,11 +147,13 @@ def register(self, localpart, displayname=None, emails=[]): logger.warning( "Using deprecated ModuleApi.register which creates a dummy user device." ) - user_id = yield self.register_user(localpart, displayname, emails) + user_id = yield self.register_user(localpart, displayname, emails or []) _, access_token = yield self.register_device(user_id) return user_id, access_token - def register_user(self, localpart, displayname=None, emails=[]): + def register_user( + self, localpart, displayname=None, emails: Optional[List[str]] = None + ): """Registers a new user with given localpart and optional displayname, emails. Args: @@ -161,7 +172,7 @@ def register_user(self, localpart, displayname=None, emails=[]): self._hs.get_registration_handler().register_user( localpart=localpart, default_display_name=displayname, - bind_emails=emails, + bind_emails=emails or [], ) ) @@ -385,6 +396,49 @@ async def create_and_send_event_into_room(self, event_dict: JsonDict) -> EventBa return event + async def send_local_online_presence_to(self, users: Iterable[str]) -> None: + """ + Forces the equivalent of a presence initial_sync for a set of local or remote + users. The users will receive presence for all currently online users that they + are considered interested in. + + Updates to remote users will be sent immediately, whereas local users will receive + them on their next sync attempt. + + Note that this method can only be run on the main or federation_sender worker + processes. + """ + if not self._hs.should_send_federation(): + raise Exception( + "send_local_online_presence_to can only be run " + "on processes that send federation", + ) + + for user in users: + if self._hs.is_mine_id(user): + # Modify SyncHandler._generate_sync_entry_for_presence to call + # presence_source.get_new_events with an empty `from_key` if + # that user's ID were in a list modified by ModuleApi somewhere. + # That user would then get all presence state on next incremental sync. + + # Force a presence initial_sync for this user next time + self._send_full_presence_to_local_users.add(user) + else: + # Retrieve presence state for currently online users that this user + # is considered interested in + presence_events, _ = await self._presence_stream.get_new_events( + UserID.from_string(user), from_key=None, include_offline=False + ) + + # Send to remote destinations. + + # We pull out the presence handler here to break a cyclic + # dependency between the presence router and module API. + presence_handler = self._hs.get_presence_handler() + await presence_handler.maybe_send_presence_to_interested_destinations( + presence_events + ) + class PublicRoomListManager: """Contains methods for adding to, removing from and querying whether a room diff --git a/synapse/module_api/errors.py b/synapse/module_api/errors.py index b15441772c26..d24864c5492a 100644 --- a/synapse/module_api/errors.py +++ b/synapse/module_api/errors.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/notifier.py b/synapse/notifier.py index 1374aae49051..24b4e6649f18 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,6 +17,7 @@ from typing import ( Awaitable, Callable, + Collection, Dict, Iterable, List, @@ -38,17 +38,13 @@ from synapse.api.errors import AuthError from synapse.events import EventBase from synapse.handlers.presence import format_user_presence_state +from synapse.logging import issue9533_logger from synapse.logging.context import PreserveLoggingContext +from synapse.logging.opentracing import log_kv, start_active_span from synapse.logging.utils import log_function from synapse.metrics import LaterGauge from synapse.streams.config import PaginationConfig -from synapse.types import ( - Collection, - PersistedEventPosition, - RoomStreamToken, - StreamToken, - UserID, -) +from synapse.types import PersistedEventPosition, RoomStreamToken, StreamToken, UserID from synapse.util.async_helpers import ObservableDeferred, timeout_deferred from synapse.util.metrics import Measure from synapse.visibility import filter_events_for_client @@ -136,6 +132,15 @@ def notify( self.last_notified_ms = time_now_ms noify_deferred = self.notify_deferred + log_kv( + { + "notify": self.user_id, + "stream": stream_key, + "stream_id": stream_id, + "listeners": self.count_listeners(), + } + ) + users_woken_by_stream_counter.labels(stream_key).inc() with PreserveLoggingContext(): @@ -266,7 +271,7 @@ def on_new_room_event( event: EventBase, event_pos: PersistedEventPosition, max_room_stream_token: RoomStreamToken, - extra_users: Collection[UserID] = [], + extra_users: Optional[Collection[UserID]] = None, ): """Unwraps event and calls `on_new_room_event_args`.""" self.on_new_room_event_args( @@ -276,7 +281,7 @@ def on_new_room_event( state_key=event.get("state_key"), membership=event.content.get("membership"), max_room_stream_token=max_room_stream_token, - extra_users=extra_users, + extra_users=extra_users or [], ) def on_new_room_event_args( @@ -287,7 +292,7 @@ def on_new_room_event_args( membership: Optional[str], event_pos: PersistedEventPosition, max_room_stream_token: RoomStreamToken, - extra_users: Collection[UserID] = [], + extra_users: Optional[Collection[UserID]] = None, ): """Used by handlers to inform the notifier something has happened in the room, room event wise. @@ -303,7 +308,7 @@ def on_new_room_event_args( self.pending_new_room_events.append( _PendingRoomEventEntry( event_pos=event_pos, - extra_users=extra_users, + extra_users=extra_users or [], room_id=room_id, type=event_type, state_key=state_key, @@ -372,14 +377,14 @@ def _notify_app_services_ephemeral( self, stream_key: str, new_token: Union[int, RoomStreamToken], - users: Collection[Union[str, UserID]] = [], + users: Optional[Collection[Union[str, UserID]]] = None, ): try: stream_token = None if isinstance(new_token, int): stream_token = new_token self.appservice_handler.notify_interested_services_ephemeral( - stream_key, stream_token, users + stream_key, stream_token, users or [] ) except Exception: logger.exception("Error notifying application services of event") @@ -394,16 +399,26 @@ def on_new_event( self, stream_key: str, new_token: Union[int, RoomStreamToken], - users: Collection[Union[str, UserID]] = [], - rooms: Collection[str] = [], + users: Optional[Collection[Union[str, UserID]]] = None, + rooms: Optional[Collection[str]] = None, ): """Used to inform listeners that something has happened event wise. Will wake up all listeners for the given users and rooms. """ + users = users or [] + rooms = rooms or [] + with Measure(self.clock, "on_new_event"): user_streams = set() + log_kv( + { + "waking_up_explicit_users": len(users), + "waking_up_explicit_rooms": len(rooms), + } + ) + for user in users: user_stream = self.user_to_user_stream.get(str(user)) if user_stream is not None: @@ -412,6 +427,13 @@ def on_new_event( for room in rooms: user_streams |= self.room_to_user_streams.get(room, set()) + if stream_key == "to_device_key": + issue9533_logger.debug( + "to-device messages stream id %s, awaking streams for %s", + new_token, + users, + ) + time_now_ms = self.clock.time_msec() for user_stream in user_streams: try: @@ -476,12 +498,34 @@ async def wait_for_events( (end_time - now) / 1000.0, self.hs.get_reactor(), ) - with PreserveLoggingContext(): - await listener.deferred + + with start_active_span("wait_for_events.deferred"): + log_kv( + { + "wait_for_events": "sleep", + "token": prev_token, + } + ) + + with PreserveLoggingContext(): + await listener.deferred + + log_kv( + { + "wait_for_events": "woken", + "token": user_stream.current_token, + } + ) current_token = user_stream.current_token result = await callback(prev_token, current_token) + log_kv( + { + "wait_for_events": "result", + "result": bool(result), + } + ) if result: break @@ -489,8 +533,10 @@ async def wait_for_events( # has happened between the old prev_token and the current_token prev_token = current_token except defer.TimeoutError: + log_kv({"wait_for_events": "timeout"}) break except defer.CancelledError: + log_kv({"wait_for_events": "cancelled"}) break if result is None: @@ -507,7 +553,7 @@ async def get_events_for( pagination_config: PaginationConfig, timeout: int, is_guest: bool = False, - explicit_room_id: str = None, + explicit_room_id: Optional[str] = None, ) -> EventStreamResult: """For the given user and rooms, return any new events for them. If there are no new events wait for up to `timeout` milliseconds for any diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 9fc3da49a281..2c23afe8e3ab 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/push/action_generator.py b/synapse/push/action_generator.py index 38a47a600f8e..60758df01664 100644 --- a/synapse/push/action_generator.py +++ b/synapse/push/action_generator.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 1897f5915390..350646f45888 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015 OpenMarket Ltd # Copyright 2017 New Vector Ltd # @@ -107,6 +106,10 @@ def __init__(self, hs: "HomeServer"): self.store = hs.get_datastore() self.auth = hs.get_auth() + # Used by `RulesForRoom` to ensure only one thing mutates the cache at a + # time. Keyed off room_id. + self._rules_linearizer = Linearizer(name="rules_for_room") + self.room_push_rule_cache_metrics = register_cache( "cache", "room_push_rule_cache", @@ -124,7 +127,16 @@ async def _get_rules_for_event( dict of user_id -> push_rules """ room_id = event.room_id - rules_for_room = self._get_rules_for_room(room_id) + + rules_for_room_data = self._get_rules_for_room(room_id) + rules_for_room = RulesForRoom( + hs=self.hs, + room_id=room_id, + rules_for_room_cache=self._get_rules_for_room.cache, + room_push_rule_cache_metrics=self.room_push_rule_cache_metrics, + linearizer=self._rules_linearizer, + cached_data=rules_for_room_data, + ) rules_by_user = await rules_for_room.get_rules(event, context) @@ -143,17 +155,12 @@ async def _get_rules_for_event( return rules_by_user @lru_cache() - def _get_rules_for_room(self, room_id: str) -> "RulesForRoom": - """Get the current RulesForRoom object for the given room id""" - # It's important that RulesForRoom gets added to self._get_rules_for_room.cache + def _get_rules_for_room(self, room_id: str) -> "RulesForRoomData": + """Get the current RulesForRoomData object for the given room id""" + # It's important that the RulesForRoomData object gets added to self._get_rules_for_room.cache # before any lookup methods get called on it as otherwise there may be # a race if invalidate_all gets called (which assumes its in the cache) - return RulesForRoom( - self.hs, - room_id, - self._get_rules_for_room.cache, - self.room_push_rule_cache_metrics, - ) + return RulesForRoomData() async def _get_power_levels_and_sender_level( self, event: EventBase, context: EventContext @@ -283,11 +290,49 @@ def _condition_checker( return True +@attr.s(slots=True) +class RulesForRoomData: + """The data stored in the cache by `RulesForRoom`. + + We don't store `RulesForRoom` directly in the cache as we want our caches to + *only* include data, and not references to e.g. the data stores. + """ + + # event_id -> (user_id, state) + member_map = attr.ib(type=Dict[str, Tuple[str, str]], factory=dict) + # user_id -> rules + rules_by_user = attr.ib(type=Dict[str, List[Dict[str, dict]]], factory=dict) + + # The last state group we updated the caches for. If the state_group of + # a new event comes along, we know that we can just return the cached + # result. + # On invalidation of the rules themselves (if the user changes them), + # we invalidate everything and set state_group to `object()` + state_group = attr.ib(type=Union[object, int], factory=object) + + # A sequence number to keep track of when we're allowed to update the + # cache. We bump the sequence number when we invalidate the cache. If + # the sequence number changes while we're calculating stuff we should + # not update the cache with it. + sequence = attr.ib(type=int, default=0) + + # A cache of user_ids that we *know* aren't interesting, e.g. user_ids + # owned by AS's, or remote users, etc. (I.e. users we will never need to + # calculate push for) + # These never need to be invalidated as we will never set up push for + # them. + uninteresting_user_set = attr.ib(type=Set[str], factory=set) + + class RulesForRoom: """Caches push rules for users in a room. This efficiently handles users joining/leaving the room by not invalidating the entire cache for the room. + + A new instance is constructed for each call to + `BulkPushRuleEvaluator._get_rules_for_event`, with the cached data from + previous calls passed in. """ def __init__( @@ -296,6 +341,8 @@ def __init__( room_id: str, rules_for_room_cache: LruCache, room_push_rule_cache_metrics: CacheMetric, + linearizer: Linearizer, + cached_data: RulesForRoomData, ): """ Args: @@ -304,38 +351,21 @@ def __init__( rules_for_room_cache: The cache object that caches these RoomsForUser objects. room_push_rule_cache_metrics: The metrics object + linearizer: The linearizer used to ensure only one thing mutates + the cache at a time. Keyed off room_id + cached_data: Cached data from previous calls to `self.get_rules`, + can be mutated. """ self.room_id = room_id self.is_mine_id = hs.is_mine_id self.store = hs.get_datastore() self.room_push_rule_cache_metrics = room_push_rule_cache_metrics - self.linearizer = Linearizer(name="rules_for_room") - - # event_id -> (user_id, state) - self.member_map = {} # type: Dict[str, Tuple[str, str]] - # user_id -> rules - self.rules_by_user = {} # type: Dict[str, List[Dict[str, dict]]] - - # The last state group we updated the caches for. If the state_group of - # a new event comes along, we know that we can just return the cached - # result. - # On invalidation of the rules themselves (if the user changes them), - # we invalidate everything and set state_group to `object()` - self.state_group = object() - - # A sequence number to keep track of when we're allowed to update the - # cache. We bump the sequence number when we invalidate the cache. If - # the sequence number changes while we're calculating stuff we should - # not update the cache with it. - self.sequence = 0 - - # A cache of user_ids that we *know* aren't interesting, e.g. user_ids - # owned by AS's, or remote users, etc. (I.e. users we will never need to - # calculate push for) - # These never need to be invalidated as we will never set up push for - # them. - self.uninteresting_user_set = set() # type: Set[str] + # Used to ensure only one thing mutates the cache at a time. Keyed off + # room_id. + self.linearizer = linearizer + + self.data = cached_data # We need to be clever on the invalidating caches callbacks, as # otherwise the invalidation callback holds a reference to the object, @@ -353,25 +383,25 @@ async def get_rules( """ state_group = context.state_group - if state_group and self.state_group == state_group: + if state_group and self.data.state_group == state_group: logger.debug("Using cached rules for %r", self.room_id) self.room_push_rule_cache_metrics.inc_hits() - return self.rules_by_user + return self.data.rules_by_user - with (await self.linearizer.queue(())): - if state_group and self.state_group == state_group: + with (await self.linearizer.queue(self.room_id)): + if state_group and self.data.state_group == state_group: logger.debug("Using cached rules for %r", self.room_id) self.room_push_rule_cache_metrics.inc_hits() - return self.rules_by_user + return self.data.rules_by_user self.room_push_rule_cache_metrics.inc_misses() ret_rules_by_user = {} missing_member_event_ids = {} - if state_group and self.state_group == context.prev_group: + if state_group and self.data.state_group == context.prev_group: # If we have a simple delta then we can reuse most of the previous # results. - ret_rules_by_user = self.rules_by_user + ret_rules_by_user = self.data.rules_by_user current_state_ids = context.delta_ids push_rules_delta_state_cache_metric.inc_hits() @@ -394,24 +424,24 @@ async def get_rules( if typ != EventTypes.Member: continue - if user_id in self.uninteresting_user_set: + if user_id in self.data.uninteresting_user_set: continue if not self.is_mine_id(user_id): - self.uninteresting_user_set.add(user_id) + self.data.uninteresting_user_set.add(user_id) continue if self.store.get_if_app_services_interested_in_user(user_id): - self.uninteresting_user_set.add(user_id) + self.data.uninteresting_user_set.add(user_id) continue event_id = current_state_ids[key] - res = self.member_map.get(event_id, None) + res = self.data.member_map.get(event_id, None) if res: user_id, state = res if state == Membership.JOIN: - rules = self.rules_by_user.get(user_id, None) + rules = self.data.rules_by_user.get(user_id, None) if rules: ret_rules_by_user[user_id] = rules continue @@ -431,7 +461,7 @@ async def get_rules( else: # The push rules didn't change but lets update the cache anyway self.update_cache( - self.sequence, + self.data.sequence, members={}, # There were no membership changes rules_by_user=ret_rules_by_user, state_group=state_group, @@ -462,7 +492,7 @@ async def _update_rules_with_member_event_ids( for. Used when updating the cache. event: The event we are currently computing push rules for. """ - sequence = self.sequence + sequence = self.data.sequence rows = await self.store.get_membership_from_event_ids(member_event_ids.values()) @@ -502,23 +532,11 @@ async def _update_rules_with_member_event_ids( self.update_cache(sequence, members, ret_rules_by_user, state_group) - def invalidate_all(self) -> None: - # Note: Don't hand this function directly to an invalidation callback - # as it keeps a reference to self and will stop this instance from being - # GC'd if it gets dropped from the rules_to_user cache. Instead use - # `self.invalidate_all_cb` - logger.debug("Invalidating RulesForRoom for %r", self.room_id) - self.sequence += 1 - self.state_group = object() - self.member_map = {} - self.rules_by_user = {} - push_rules_invalidation_counter.inc() - def update_cache(self, sequence, members, rules_by_user, state_group) -> None: - if sequence == self.sequence: - self.member_map.update(members) - self.rules_by_user = rules_by_user - self.state_group = state_group + if sequence == self.data.sequence: + self.data.member_map.update(members) + self.data.rules_by_user = rules_by_user + self.data.state_group = state_group @attr.attrs(slots=True, frozen=True) @@ -536,6 +554,10 @@ class _Invalidation: room_id = attr.ib(type=str) def __call__(self) -> None: - rules = self.cache.get(self.room_id, None, update_metrics=False) - if rules: - rules.invalidate_all() + rules_data = self.cache.get(self.room_id, None, update_metrics=False) + if rules_data: + rules_data.sequence += 1 + rules_data.state_group = object() + rules_data.member_map = {} + rules_data.rules_by_user = {} + push_rules_invalidation_counter.inc() diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py index 0cadba761afe..2ee0ccd58aa9 100644 --- a/synapse/push/clientformat.py +++ b/synapse/push/clientformat.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index c0968dc7a141..99a18874d1cf 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,8 +19,9 @@ from twisted.internet.interfaces import IDelayedCall from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.push import Pusher, PusherConfig, ThrottleParams +from synapse.push import Pusher, PusherConfig, PusherConfigException, ThrottleParams from synapse.push.mailer import Mailer +from synapse.util.threepids import validate_email if TYPE_CHECKING: from synapse.server import HomeServer @@ -72,6 +72,12 @@ def __init__(self, hs: "HomeServer", pusher_config: PusherConfig, mailer: Mailer self._is_processing = False + # Make sure that the email is valid. + try: + validate_email(self.email) + except ValueError: + raise PusherConfigException("Invalid email") + def on_started(self, should_check_for_notifs: bool) -> None: """Called when this pusher has been started. diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 26af5309c1bb..06bf5f8ada84 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 New Vector Ltd # diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 2e5161de2c5b..c4b43b0d3fc8 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/push/presentable_names.py b/synapse/push/presentable_names.py index 04c2c1482ce2..412941393fe3 100644 --- a/synapse/push/presentable_names.py +++ b/synapse/push/presentable_names.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index ba1877adcd96..98b90a4f516e 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 New Vector Ltd # @@ -20,6 +19,7 @@ from synapse.events import EventBase from synapse.types import UserID +from synapse.util import glob_to_regex, re_word_boundary from synapse.util.caches.lrucache import LruCache logger = logging.getLogger(__name__) @@ -184,7 +184,7 @@ def _contains_display_name(self, display_name: str) -> bool: r = regex_cache.get((display_name, False, True), None) if not r: r1 = re.escape(display_name) - r1 = _re_word_boundary(r1) + r1 = re_word_boundary(r1) r = re.compile(r1, flags=re.IGNORECASE) regex_cache[(display_name, False, True)] = r @@ -213,7 +213,7 @@ def _glob_matches(glob: str, value: str, word_boundary: bool = False) -> bool: try: r = regex_cache.get((glob, True, word_boundary), None) if not r: - r = _glob_to_re(glob, word_boundary) + r = glob_to_regex(glob, word_boundary) regex_cache[(glob, True, word_boundary)] = r return bool(r.search(value)) except re.error: @@ -221,56 +221,6 @@ def _glob_matches(glob: str, value: str, word_boundary: bool = False) -> bool: return False -def _glob_to_re(glob: str, word_boundary: bool) -> Pattern: - """Generates regex for a given glob. - - Args: - glob - word_boundary: Whether to match against word boundaries or entire string. - """ - if IS_GLOB.search(glob): - r = re.escape(glob) - - r = r.replace(r"\*", ".*?") - r = r.replace(r"\?", ".") - - # handle [abc], [a-z] and [!a-z] style ranges. - r = GLOB_REGEX.sub( - lambda x: ( - "[%s%s]" % (x.group(1) and "^" or "", x.group(2).replace(r"\\\-", "-")) - ), - r, - ) - if word_boundary: - r = _re_word_boundary(r) - - return re.compile(r, flags=re.IGNORECASE) - else: - r = "^" + r + "$" - - return re.compile(r, flags=re.IGNORECASE) - elif word_boundary: - r = re.escape(glob) - r = _re_word_boundary(r) - - return re.compile(r, flags=re.IGNORECASE) - else: - r = "^" + re.escape(glob) + "$" - return re.compile(r, flags=re.IGNORECASE) - - -def _re_word_boundary(r: str) -> str: - """ - Adds word boundary characters to the start and end of an - expression to require that the match occur as a whole word, - but do so respecting the fact that strings starting or ending - with non-word characters will change word boundaries. - """ - # we can't use \b as it chokes on unicode. however \W seems to be okay - # as shorthand for [^0-9A-Za-z_]. - return r"(^|\W)%s(\W|$)" % (r,) - - def _flatten_dict( d: Union[EventBase, dict], prefix: Optional[List[str]] = None, diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py index df34103224fe..9c85200c0fb4 100644 --- a/synapse/push/push_tools.py +++ b/synapse/push/push_tools.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/push/pusher.py b/synapse/push/pusher.py index cb9412785060..c51938b8cf48 100644 --- a/synapse/push/pusher.py +++ b/synapse/push/pusher.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 4c7f5fecee98..579fcdf47250 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -63,7 +62,9 @@ def __init__(self, hs: "HomeServer"): self.store = self.hs.get_datastore() self.clock = self.hs.get_clock() - self._account_validity = hs.config.account_validity + self._account_validity_enabled = ( + hs.config.account_validity.account_validity_enabled + ) # We shard the handling of push notifications by user ID. self._pusher_shard_config = hs.config.push.pusher_shard_config @@ -237,7 +238,7 @@ async def _on_new_notifications(self, max_token: RoomStreamToken) -> None: for u in users_affected: # Don't push if the user account has expired - if self._account_validity.enabled: + if self._account_validity_enabled: expired = await self.store.is_account_expired( u, self.clock.time_msec() ) @@ -267,7 +268,7 @@ async def on_new_receipts( for u in users_affected: # Don't push if the user account has expired - if self._account_validity.enabled: + if self._account_validity_enabled: expired = await self.store.is_account_expired( u, self.clock.time_msec() ) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 2a1c925ee8b9..989523c82374 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -78,14 +78,15 @@ # we use attr.validators.deep_iterable, which arrived in 19.1.0 (Note: # Fedora 31 only has 19.1, so if we want to upgrade we should wait until 33 # is out in November.) - "attrs>=19.1.0", + # Note: 21.1.0 broke `/sync`, see #9936 + "attrs>=19.1.0,!=21.1.0", "netaddr>=0.7.18", "Jinja2>=2.9", "bleach>=1.4.3", "typing-extensions>=3.7.4", # We enforce that we have a `cryptography` version that bundles an `openssl` # with the latest security patches. - "cryptography>=3.4.7;python_version>='3.6'", + "cryptography>=3.4.7", ] CONDITIONAL_REQUIREMENTS = { @@ -100,14 +101,9 @@ # that use the protocol, such as Let's Encrypt. "acme": [ "txacme>=0.9.2", - # txacme depends on eliot. Eliot 1.8.0 is incompatible with - # python 3.5.2, as per https://github.com/itamarst/eliot/issues/418 - "eliot<1.8.0;python_version<'3.5.3'", ], "saml2": [ - # pysaml2 6.4.0 is incompatible with Python 3.5 (see https://github.com/IdentityPython/pysaml2/issues/749) - "pysaml2>=4.5.0,<6.4.0;python_version<'3.6'", - "pysaml2>=4.5.0;python_version>='3.6'", + "pysaml2>=4.5.0", ], "oidc": ["authlib>=0.14.0"], # systemd-python is necessary for logging to the systemd journal via @@ -121,6 +117,8 @@ # hiredis is not a *strict* dependency, but it makes things much faster. # (if it is not installed, we fall back to slow code.) "redis": ["txredisapi>=1.4.7", "hiredis"], + # Required to use experimental `caches.track_memory_usage` config option. + "cache_memory": ["pympler"], } ALL_OPTIONAL_REQUIREMENTS = set() # type: Set[str] diff --git a/synapse/replication/__init__.py b/synapse/replication/__init__.py index b7df13c9eebc..f43a360a807c 100644 --- a/synapse/replication/__init__.py +++ b/synapse/replication/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/http/__init__.py b/synapse/replication/http/__init__.py index cb4a52dbe9b4..ba8114ac9e13 100644 --- a/synapse/replication/http/__init__.py +++ b/synapse/replication/http/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index b7aa0c280fe5..5685cf212144 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -159,7 +158,10 @@ async def _handle_request(self, request, **kwargs): def make_client(cls, hs): """Create a client that makes requests. - Returns a callable that accepts the same parameters as `_serialize_payload`. + Returns a callable that accepts the same parameters as + `_serialize_payload`, and also accepts an optional `instance_name` + parameter to specify which instance to hit (the instance must be in + the `instance_map` config). """ clock = hs.get_clock() client = hs.get_simple_http_client() diff --git a/synapse/replication/http/account_data.py b/synapse/replication/http/account_data.py index 60899b6ad622..70e951af6376 100644 --- a/synapse/replication/http/account_data.py +++ b/synapse/replication/http/account_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py index 807b85d2e124..5a5818ef61e2 100644 --- a/synapse/replication/http/devices.py +++ b/synapse/replication/http/devices.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py index 82ea3b895f9a..79cadb7b574c 100644 --- a/synapse/replication/http/federation.py +++ b/synapse/replication/http/federation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/http/login.py b/synapse/replication/http/login.py index 4ec1bfa6eaf9..c2e8c0029312 100644 --- a/synapse/replication/http/login.py +++ b/synapse/replication/http/login.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index c10992ff51e5..289a397d6885 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/http/presence.py b/synapse/replication/http/presence.py index bc9aa82cb495..f25307620d55 100644 --- a/synapse/replication/http/presence.py +++ b/synapse/replication/http/presence.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/http/push.py b/synapse/replication/http/push.py index 054ed64d34dd..139427cb1f29 100644 --- a/synapse/replication/http/push.py +++ b/synapse/replication/http/push.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py index d005f3876717..d6dd7242eb20 100644 --- a/synapse/replication/http/register.py +++ b/synapse/replication/http/register.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -77,7 +76,7 @@ async def _serialize_payload( async def _handle_request(self, request, user_id): content = parse_json_object_from_request(request) - self.registration_handler.check_registration_ratelimit(content["address"]) + await self.registration_handler.check_registration_ratelimit(content["address"]) await self.registration_handler.register_with_store( user_id=user_id, diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py index a4c5b4429207..fae5ffa451d3 100644 --- a/synapse/replication/http/send_event.py +++ b/synapse/replication/http/send_event.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/http/streams.py b/synapse/replication/http/streams.py index 309159e3048b..9afa147d00c1 100644 --- a/synapse/replication/http/streams.py +++ b/synapse/replication/http/streams.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/__init__.py b/synapse/replication/slave/__init__.py index b7df13c9eebc..f43a360a807c 100644 --- a/synapse/replication/slave/__init__.py +++ b/synapse/replication/slave/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/__init__.py b/synapse/replication/slave/storage/__init__.py index b7df13c9eebc..f43a360a807c 100644 --- a/synapse/replication/slave/storage/__init__.py +++ b/synapse/replication/slave/storage/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py index 693c9ab901cd..faa99387a745 100644 --- a/synapse/replication/slave/storage/_base.py +++ b/synapse/replication/slave/storage/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/_slaved_id_tracker.py b/synapse/replication/slave/storage/_slaved_id_tracker.py index 0d39a93ed229..2cb7489047f7 100644 --- a/synapse/replication/slave/storage/_slaved_id_tracker.py +++ b/synapse/replication/slave/storage/_slaved_id_tracker.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/account_data.py b/synapse/replication/slave/storage/account_data.py index 21afe5f15551..ee74ee7d8547 100644 --- a/synapse/replication/slave/storage/account_data.py +++ b/synapse/replication/slave/storage/account_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/replication/slave/storage/appservice.py b/synapse/replication/slave/storage/appservice.py index 0f8d7037bde1..29f50c0add5f 100644 --- a/synapse/replication/slave/storage/appservice.py +++ b/synapse/replication/slave/storage/appservice.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py index 0f5b7adef781..8730966380de 100644 --- a/synapse/replication/slave/storage/client_ips.py +++ b/synapse/replication/slave/storage/client_ips.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/deviceinbox.py b/synapse/replication/slave/storage/deviceinbox.py index 1260f6d1412f..e94075108465 100644 --- a/synapse/replication/slave/storage/deviceinbox.py +++ b/synapse/replication/slave/storage/deviceinbox.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py index e0d86240dd19..70207420a6b0 100644 --- a/synapse/replication/slave/storage/devices.py +++ b/synapse/replication/slave/storage/devices.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/directory.py b/synapse/replication/slave/storage/directory.py index 1945bcf9a8d8..71fde0c96cc6 100644 --- a/synapse/replication/slave/storage/directory.py +++ b/synapse/replication/slave/storage/directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index fbffe6d85c28..d4d3f8c44876 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/replication/slave/storage/filtering.py b/synapse/replication/slave/storage/filtering.py index 6a232528610b..37875bc9730f 100644 --- a/synapse/replication/slave/storage/filtering.py +++ b/synapse/replication/slave/storage/filtering.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/groups.py b/synapse/replication/slave/storage/groups.py index 30955bcbfe0f..e9bdc3847006 100644 --- a/synapse/replication/slave/storage/groups.py +++ b/synapse/replication/slave/storage/groups.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/keys.py b/synapse/replication/slave/storage/keys.py index 961579751cdf..a00b38c512a0 100644 --- a/synapse/replication/slave/storage/keys.py +++ b/synapse/replication/slave/storage/keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/presence.py b/synapse/replication/slave/storage/presence.py deleted file mode 100644 index 55620c03d8c3..000000000000 --- a/synapse/replication/slave/storage/presence.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.replication.tcp.streams import PresenceStream -from synapse.storage import DataStore -from synapse.storage.database import DatabasePool -from synapse.storage.databases.main.presence import PresenceStore -from synapse.util.caches.stream_change_cache import StreamChangeCache - -from ._base import BaseSlavedStore -from ._slaved_id_tracker import SlavedIdTracker - - -class SlavedPresenceStore(BaseSlavedStore): - def __init__(self, database: DatabasePool, db_conn, hs): - super().__init__(database, db_conn, hs) - self._presence_id_gen = SlavedIdTracker(db_conn, "presence_stream", "stream_id") - - self._presence_on_startup = self._get_active_presence(db_conn) # type: ignore - - self.presence_stream_cache = StreamChangeCache( - "PresenceStreamChangeCache", self._presence_id_gen.get_current_token() - ) - - _get_active_presence = DataStore._get_active_presence - take_presence_startup_info = DataStore.take_presence_startup_info - _get_presence_for_user = PresenceStore.__dict__["_get_presence_for_user"] - get_presence_for_users = PresenceStore.__dict__["get_presence_for_users"] - - def get_current_presence_token(self): - return self._presence_id_gen.get_current_token() - - def process_replication_rows(self, stream_name, instance_name, token, rows): - if stream_name == PresenceStream.NAME: - self._presence_id_gen.advance(instance_name, token) - for row in rows: - self.presence_stream_cache.entity_has_changed(row.user_id, token) - self._get_presence_for_user.invalidate((row.user_id,)) - return super().process_replication_rows(stream_name, instance_name, token, rows) diff --git a/synapse/replication/slave/storage/profile.py b/synapse/replication/slave/storage/profile.py index f85b20a07177..99f4a2264287 100644 --- a/synapse/replication/slave/storage/profile.py +++ b/synapse/replication/slave/storage/profile.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/push_rule.py b/synapse/replication/slave/storage/push_rule.py index de904c943cc0..4d5f86286213 100644 --- a/synapse/replication/slave/storage/push_rule.py +++ b/synapse/replication/slave/storage/push_rule.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/replication/slave/storage/pushers.py b/synapse/replication/slave/storage/pushers.py index 93161c3dfb97..2672a2c94b15 100644 --- a/synapse/replication/slave/storage/pushers.py +++ b/synapse/replication/slave/storage/pushers.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/replication/slave/storage/receipts.py b/synapse/replication/slave/storage/receipts.py index 3dfdd9961d26..3826b87decaf 100644 --- a/synapse/replication/slave/storage/receipts.py +++ b/synapse/replication/slave/storage/receipts.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/replication/slave/storage/registration.py b/synapse/replication/slave/storage/registration.py index a40f064e2b63..5dae35a9605f 100644 --- a/synapse/replication/slave/storage/registration.py +++ b/synapse/replication/slave/storage/registration.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/room.py b/synapse/replication/slave/storage/room.py index 109ac6bea141..8cc6de3f4698 100644 --- a/synapse/replication/slave/storage/room.py +++ b/synapse/replication/slave/storage/room.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/transactions.py b/synapse/replication/slave/storage/transactions.py index 2091ac0df67d..a59e54392441 100644 --- a/synapse/replication/slave/storage/transactions.py +++ b/synapse/replication/slave/storage/transactions.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/tcp/__init__.py b/synapse/replication/tcp/__init__.py index 1b8718b11daa..1fa60af8e6bc 100644 --- a/synapse/replication/tcp/__init__.py +++ b/synapse/replication/tcp/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 3455839d672f..62d780917500 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,22 +14,35 @@ """A replication client for use by synapse workers. """ import logging -from typing import TYPE_CHECKING, Dict, List, Tuple +from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple from twisted.internet.defer import Deferred from twisted.internet.protocol import ReconnectingClientFactory from synapse.api.constants import EventTypes +from synapse.federation import send_queue +from synapse.federation.sender import FederationSender from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol -from synapse.replication.tcp.streams import TypingStream +from synapse.replication.tcp.streams import ( + AccountDataStream, + DeviceListsStream, + GroupServerStream, + PushersStream, + PushRulesStream, + ReceiptsStream, + TagAccountDataStream, + ToDeviceStream, + TypingStream, +) from synapse.replication.tcp.streams.events import ( EventsStream, EventsStreamEventRow, EventsStreamRow, ) -from synapse.types import PersistedEventPosition, UserID -from synapse.util.async_helpers import timeout_deferred +from synapse.types import PersistedEventPosition, ReadReceipt, UserID +from synapse.util.async_helpers import Linearizer, timeout_deferred from synapse.util.metrics import Measure if TYPE_CHECKING: @@ -39,7 +51,6 @@ logger = logging.getLogger(__name__) - # How long we allow callers to wait for replication updates before timing out. _WAIT_FOR_REPLICATION_TIMEOUT_SECONDS = 30 @@ -106,6 +117,14 @@ def __init__(self, hs: "HomeServer"): self._instance_name = hs.get_instance_name() self._typing_handler = hs.get_typing_handler() + self._notify_pushers = hs.config.start_pushers + self._pusher_pool = hs.get_pusherpool() + self._presence_handler = hs.get_presence_handler() + + self.send_handler = None # type: Optional[FederationSenderHandler] + if hs.should_send_federation(): + self.send_handler = FederationSenderHandler(hs) + # Map from stream to list of deferreds waiting for the stream to # arrive at a particular position. The lists are sorted by stream position. self._streams_to_waiters = {} # type: Dict[str, List[Tuple[int, Deferred]]] @@ -126,13 +145,51 @@ async def on_rdata( """ self.store.process_replication_rows(stream_name, instance_name, token, rows) + if self.send_handler: + await self.send_handler.process_replication_rows(stream_name, token, rows) + if stream_name == TypingStream.NAME: self._typing_handler.process_replication_rows(token, rows) self.notifier.on_new_event( "typing_key", token, rooms=[row.room_id for row in rows] ) - - if stream_name == EventsStream.NAME: + elif stream_name == PushRulesStream.NAME: + self.notifier.on_new_event( + "push_rules_key", token, users=[row.user_id for row in rows] + ) + elif stream_name in (AccountDataStream.NAME, TagAccountDataStream.NAME): + self.notifier.on_new_event( + "account_data_key", token, users=[row.user_id for row in rows] + ) + elif stream_name == ReceiptsStream.NAME: + self.notifier.on_new_event( + "receipt_key", token, rooms=[row.room_id for row in rows] + ) + await self._pusher_pool.on_new_receipts( + token, token, {row.room_id for row in rows} + ) + elif stream_name == ToDeviceStream.NAME: + entities = [row.entity for row in rows if row.entity.startswith("@")] + if entities: + self.notifier.on_new_event("to_device_key", token, users=entities) + elif stream_name == DeviceListsStream.NAME: + all_room_ids = set() # type: Set[str] + for row in rows: + if row.entity.startswith("@"): + room_ids = await self.store.get_rooms_for_user(row.entity) + all_room_ids.update(room_ids) + self.notifier.on_new_event("device_list_key", token, rooms=all_room_ids) + elif stream_name == GroupServerStream.NAME: + self.notifier.on_new_event( + "groups_key", token, users=[row.user_id for row in rows] + ) + elif stream_name == PushersStream.NAME: + for row in rows: + if row.deleted: + self.stop_pusher(row.user_id, row.app_id, row.pushkey) + else: + await self.start_pusher(row.user_id, row.app_id, row.pushkey) + elif stream_name == EventsStream.NAME: # We shouldn't get multiple rows per token for events stream, so # we don't need to optimise this for multiple rows. for row in rows: @@ -160,6 +217,10 @@ async def on_rdata( membership=row.data.membership, ) + await self._presence_handler.process_replication_rows( + stream_name, instance_name, token, rows + ) + # Notify any waiting deferreds. The list is ordered by position so we # just iterate through the list until we reach a position that is # greater than the received row position. @@ -191,7 +252,7 @@ async def on_rdata( waiting_list[:] = waiting_list[index_of_first_deferred_not_called:] async def on_position(self, stream_name: str, instance_name: str, token: int): - self.store.process_replication_rows(stream_name, instance_name, token, []) + await self.on_rdata(stream_name, instance_name, token, []) # We poke the generic "replication" notifier to wake anything up that # may be streaming. @@ -200,6 +261,11 @@ async def on_position(self, stream_name: str, instance_name: str, token: int): def on_remote_server_up(self, server: str): """Called when get a new REMOTE_SERVER_UP command.""" + # Let's wake up the transaction queue for the server in case we have + # pending stuff to send to it. + if self.send_handler: + self.send_handler.wake_destination(server) + async def wait_for_stream_position( self, instance_name: str, stream_name: str, position: int ): @@ -236,3 +302,153 @@ async def wait_for_stream_position( logger.info( "Finished waiting for repl stream %r to reach %s", stream_name, position ) + + def stop_pusher(self, user_id, app_id, pushkey): + if not self._notify_pushers: + return + + key = "%s:%s" % (app_id, pushkey) + pushers_for_user = self._pusher_pool.pushers.get(user_id, {}) + pusher = pushers_for_user.pop(key, None) + if pusher is None: + return + logger.info("Stopping pusher %r / %r", user_id, key) + pusher.on_stop() + + async def start_pusher(self, user_id, app_id, pushkey): + if not self._notify_pushers: + return + + key = "%s:%s" % (app_id, pushkey) + logger.info("Starting pusher %r / %r", user_id, key) + return await self._pusher_pool.start_pusher_by_id(app_id, pushkey, user_id) + + +class FederationSenderHandler: + """Processes the fedration replication stream + + This class is only instantiate on the worker responsible for sending outbound + federation transactions. It receives rows from the replication stream and forwards + the appropriate entries to the FederationSender class. + """ + + def __init__(self, hs: "HomeServer"): + assert hs.should_send_federation() + + self.store = hs.get_datastore() + self._is_mine_id = hs.is_mine_id + self._hs = hs + + # We need to make a temporary value to ensure that mypy picks up the + # right type. We know we should have a federation sender instance since + # `should_send_federation` is True. + sender = hs.get_federation_sender() + assert isinstance(sender, FederationSender) + self.federation_sender = sender + + # Stores the latest position in the federation stream we've gotten up + # to. This is always set before we use it. + self.federation_position = None # type: Optional[int] + + self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer") + + def wake_destination(self, server: str): + self.federation_sender.wake_destination(server) + + async def process_replication_rows(self, stream_name, token, rows): + # The federation stream contains things that we want to send out, e.g. + # presence, typing, etc. + if stream_name == "federation": + send_queue.process_rows_for_federation(self.federation_sender, rows) + await self.update_token(token) + + # ... and when new receipts happen + elif stream_name == ReceiptsStream.NAME: + await self._on_new_receipts(rows) + + # ... as well as device updates and messages + elif stream_name == DeviceListsStream.NAME: + # The entities are either user IDs (starting with '@') whose devices + # have changed, or remote servers that we need to tell about + # changes. + hosts = {row.entity for row in rows if not row.entity.startswith("@")} + for host in hosts: + self.federation_sender.send_device_messages(host) + + elif stream_name == ToDeviceStream.NAME: + # The to_device stream includes stuff to be pushed to both local + # clients and remote servers, so we ignore entities that start with + # '@' (since they'll be local users rather than destinations). + hosts = {row.entity for row in rows if not row.entity.startswith("@")} + for host in hosts: + self.federation_sender.send_device_messages(host) + + async def _on_new_receipts(self, rows): + """ + Args: + rows (Iterable[synapse.replication.tcp.streams.ReceiptsStream.ReceiptsStreamRow]): + new receipts to be processed + """ + for receipt in rows: + # we only want to send on receipts for our own users + if not self._is_mine_id(receipt.user_id): + continue + receipt_info = ReadReceipt( + receipt.room_id, + receipt.receipt_type, + receipt.user_id, + [receipt.event_id], + receipt.data, + ) + await self.federation_sender.send_read_receipt(receipt_info) + + async def update_token(self, token): + """Update the record of where we have processed to in the federation stream. + + Called after we have processed a an update received over replication. Sends + a FEDERATION_ACK back to the master, and stores the token that we have processed + in `federation_stream_position` so that we can restart where we left off. + """ + self.federation_position = token + + # We save and send the ACK to master asynchronously, so we don't block + # processing on persistence. We don't need to do this operation for + # every single RDATA we receive, we just need to do it periodically. + + if self._fed_position_linearizer.is_queued(None): + # There is already a task queued up to save and send the token, so + # no need to queue up another task. + return + + run_as_background_process("_save_and_send_ack", self._save_and_send_ack) + + async def _save_and_send_ack(self): + """Save the current federation position in the database and send an ACK + to master with where we're up to. + """ + # We should only be calling this once we've got a token. + assert self.federation_position is not None + + try: + # We linearize here to ensure we don't have races updating the token + # + # XXX this appears to be redundant, since the ReplicationCommandHandler + # has a linearizer which ensures that we only process one line of + # replication data at a time. Should we remove it, or is it doing useful + # service for robustness? Or could we replace it with an assertion that + # we're not being re-entered? + + with (await self._fed_position_linearizer.queue(None)): + # We persist and ack the same position, so we take a copy of it + # here as otherwise it can get modified from underneath us. + current_position = self.federation_position + + await self.store.update_federation_out_pos( + "federation", current_position + ) + + # We ACK this token over replication so that the master can drop + # its in memory queues + self._hs.get_tcp_replication().send_federation_ack(current_position) + except Exception: + logger.exception("Error updating federation stream position") diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index 8abed1f52d3e..505d450e1991 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/tcp/external_cache.py b/synapse/replication/tcp/external_cache.py index d89a36f25a59..b402f82810fa 100644 --- a/synapse/replication/tcp/external_cache.py +++ b/synapse/replication/tcp/external_cache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,7 +15,7 @@ import logging from typing import TYPE_CHECKING, Any, Optional -from prometheus_client import Counter +from prometheus_client import Counter, Histogram from synapse.logging.context import make_deferred_yieldable from synapse.util import json_decoder, json_encoder @@ -36,6 +35,20 @@ labelnames=["cache_name", "hit"], ) +response_timer = Histogram( + "synapse_external_cache_response_time_seconds", + "Time taken to get a response from Redis for a cache get/set request", + labelnames=["method"], + buckets=( + 0.001, + 0.002, + 0.005, + 0.01, + 0.02, + 0.05, + ), +) + logger = logging.getLogger(__name__) @@ -73,13 +86,14 @@ async def set(self, cache_name: str, key: str, value: Any, expiry_ms: int) -> No logger.debug("Caching %s %s: %r", cache_name, key, encoded_value) - return await make_deferred_yieldable( - self._redis_connection.set( - self._get_redis_key(cache_name, key), - encoded_value, - pexpire=expiry_ms, + with response_timer.labels("set").time(): + return await make_deferred_yieldable( + self._redis_connection.set( + self._get_redis_key(cache_name, key), + encoded_value, + pexpire=expiry_ms, + ) ) - ) async def get(self, cache_name: str, key: str) -> Optional[Any]: """Look up a key/value in the named cache.""" @@ -87,9 +101,10 @@ async def get(self, cache_name: str, key: str) -> Optional[Any]: if self._redis_connection is None: return None - result = await make_deferred_yieldable( - self._redis_connection.get(self._get_redis_key(cache_name, key)) - ) + with response_timer.labels("get").time(): + result = await make_deferred_yieldable( + self._redis_connection.get(self._get_redis_key(cache_name, key)) + ) logger.debug("Got cache result %s %s: %r", cache_name, key, result) diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index a8894beadfd1..7ced4c543c26 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # @@ -56,6 +55,8 @@ CachesStream, EventsStream, FederationStream, + PresenceFederationStream, + PresenceStream, ReceiptsStream, Stream, TagAccountDataStream, @@ -100,6 +101,10 @@ def __init__(self, hs: "HomeServer"): self._instance_id = hs.get_instance_id() self._instance_name = hs.get_instance_name() + self._is_presence_writer = ( + hs.get_instance_name() in hs.config.worker.writers.presence + ) + self._streams = { stream.NAME: stream(hs) for stream in STREAMS_MAP.values() } # type: Dict[str, Stream] @@ -154,6 +159,14 @@ def __init__(self, hs: "HomeServer"): continue + if isinstance(stream, (PresenceStream, PresenceFederationStream)): + # Only add PresenceStream as a source on the instance in charge + # of presence. + if self._is_presence_writer: + self._streams_to_replicate.append(stream) + + continue + # Only add any other streams if we're on master. if hs.config.worker_app is not None: continue @@ -351,7 +364,7 @@ def on_USER_SYNC( ) -> Optional[Awaitable[None]]: user_sync_counter.inc() - if self._is_master: + if self._is_presence_writer: return self._presence_handler.update_external_syncs_row( cmd.instance_id, cmd.user_id, cmd.is_syncing, cmd.last_sync_ms ) @@ -361,7 +374,7 @@ def on_USER_SYNC( def on_CLEAR_USER_SYNC( self, conn: IReplicationConnection, cmd: ClearUserSyncsCommand ) -> Optional[Awaitable[None]]: - if self._is_master: + if self._is_presence_writer: return self._presence_handler.update_external_syncs_clear(cmd.instance_id) else: return None diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index e829add2578d..6e3705364f47 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -50,7 +49,7 @@ import logging import struct from inspect import isawaitable -from typing import TYPE_CHECKING, List, Optional +from typing import TYPE_CHECKING, Collection, List, Optional from prometheus_client import Counter from zope.interface import Interface, implementer @@ -77,7 +76,6 @@ ServerCommand, parse_command_from_line, ) -from synapse.types import Collection from synapse.util import Clock from synapse.util.stringutils import random_string @@ -184,8 +182,9 @@ def __init__(self, clock: Clock, handler: "ReplicationCommandHandler"): # a logcontext which we use for processing incoming commands. We declare it as a # background process so that the CPU stats get reported to prometheus. - ctx_name = "replication-conn-%s" % self.conn_id - self._logging_context = BackgroundProcessLoggingContext(ctx_name, ctx_name) + self._logging_context = BackgroundProcessLoggingContext( + "replication-conn", self.conn_id + ) def connectionMade(self): logger.info("[%s] Connection established", self.id()) diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index 2f4d407f9482..6a2c2655e4bd 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -60,7 +59,7 @@ class ConstantProperty(Generic[T, V]): constant = attr.ib() # type: V - def __get__(self, obj: Optional[T], objtype: Type[T] = None) -> V: + def __get__(self, obj: Optional[T], objtype: Optional[Type[T]] = None) -> V: return self.constant def __set__(self, obj: Optional[T], value: V): diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 2018f9f29ed5..bd47d8425814 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/tcp/streams/__init__.py b/synapse/replication/tcp/streams/__init__.py index d1a61c331480..4c0023c68aee 100644 --- a/synapse/replication/tcp/streams/__init__.py +++ b/synapse/replication/tcp/streams/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # Copyright 2019 New Vector Ltd # @@ -31,6 +30,7 @@ CachesStream, DeviceListsStream, GroupServerStream, + PresenceFederationStream, PresenceStream, PublicRoomsStream, PushersStream, @@ -51,6 +51,7 @@ EventsStream, BackfillStream, PresenceStream, + PresenceFederationStream, TypingStream, ReceiptsStream, PushRulesStream, @@ -72,6 +73,7 @@ "Stream", "BackfillStream", "PresenceStream", + "PresenceFederationStream", "TypingStream", "ReceiptsStream", "PushRulesStream", diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 3dfee7674344..b03824925a91 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # Copyright 2019 New Vector Ltd # @@ -273,15 +272,22 @@ class PresenceStream(Stream): NAME = "presence" ROW_TYPE = PresenceStreamRow - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): store = hs.get_datastore() - if hs.config.worker_app is None: - # on the master, query the presence handler + if hs.get_instance_name() in hs.config.worker.writers.presence: + # on the presence writer, query the presence handler presence_handler = hs.get_presence_handler() - update_function = presence_handler.get_all_presence_updates + + from synapse.handlers.presence import PresenceHandler + + assert isinstance(presence_handler, PresenceHandler) + + update_function = ( + presence_handler.get_all_presence_updates + ) # type: UpdateFunction else: - # Query master process + # Query presence writer process update_function = make_http_update_function(hs, self.NAME) super().__init__( @@ -291,6 +297,30 @@ def __init__(self, hs): ) +class PresenceFederationStream(Stream): + """A stream used to send ad hoc presence updates over federation. + + Streams the remote destination and the user ID of the presence state to + send. + """ + + @attr.s(slots=True, auto_attribs=True) + class PresenceFederationStreamRow: + destination: str + user_id: str + + NAME = "presence_federation" + ROW_TYPE = PresenceFederationStreamRow + + def __init__(self, hs: "HomeServer"): + federation_queue = hs.get_presence_handler().get_federation_queue() + super().__init__( + hs.get_instance_name(), + federation_queue.get_current_token, + federation_queue.get_replication_rows, + ) + + class TypingStream(Stream): TypingStreamRow = namedtuple( "TypingStreamRow", ("room_id", "user_ids") # str # list(str) diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py index fa5e37ba7bc5..e7e87bac9241 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # Copyright 2019 New Vector Ltd # diff --git a/synapse/replication/tcp/streams/federation.py b/synapse/replication/tcp/streams/federation.py index 9bb8e9e17731..096a85d36306 100644 --- a/synapse/replication/tcp/streams/federation.py +++ b/synapse/replication/tcp/streams/federation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # Copyright 2019 New Vector Ltd # diff --git a/synapse/res/templates/account_previously_renewed.html b/synapse/res/templates/account_previously_renewed.html new file mode 100644 index 000000000000..b751359bdfb7 --- /dev/null +++ b/synapse/res/templates/account_previously_renewed.html @@ -0,0 +1 @@ +Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}. diff --git a/synapse/res/templates/account_renewed.html b/synapse/res/templates/account_renewed.html index 894da030afb7..e8c0f52f0542 100644 --- a/synapse/res/templates/account_renewed.html +++ b/synapse/res/templates/account_renewed.html @@ -1 +1 @@ -Your account has been successfully renewed. +Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}. diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 40f5c32db2df..79d52d2dcb43 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 8457db1e2275..9cb9a9f6aa93 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018-2019 New Vector Ltd # Copyright 2020, 2021 The Matrix.org Foundation C.I.C. @@ -54,6 +53,7 @@ AccountValidityRenewServlet, DeactivateAccountRestServlet, PushersRestServlet, + RateLimitRestServlet, ResetPasswordRestServlet, SearchUsersRestServlet, ShadowBanRestServlet, @@ -62,7 +62,6 @@ UserMembershipRestServlet, UserRegisterServlet, UserRestServletV2, - UsersRestServlet, UsersRestServletV2, UserTokenRestServlet, WhoisRestServlet, @@ -240,6 +239,7 @@ def register_servlets(hs, http_server): ShadowBanRestServlet(hs).register(http_server) ForwardExtremitiesRestServlet(hs).register(http_server) RoomEventContextServlet(hs).register(http_server) + RateLimitRestServlet(hs).register(http_server) def register_servlets_for_client_rest_resource(hs, http_server): @@ -248,7 +248,6 @@ def register_servlets_for_client_rest_resource(hs, http_server): PurgeHistoryStatusRestServlet(hs).register(http_server) DeactivateAccountRestServlet(hs).register(http_server) PurgeHistoryRestServlet(hs).register(http_server) - UsersRestServlet(hs).register(http_server) ResetPasswordRestServlet(hs).register(http_server) SearchUsersRestServlet(hs).register(http_server) ShutdownRoomRestServlet(hs).register(http_server) diff --git a/synapse/rest/admin/_base.py b/synapse/rest/admin/_base.py index 7681e55b5832..f203f6fdc6e5 100644 --- a/synapse/rest/admin/_base.py +++ b/synapse/rest/admin/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/admin/devices.py b/synapse/rest/admin/devices.py index 5996de11c3ed..5715190a78cc 100644 --- a/synapse/rest/admin/devices.py +++ b/synapse/rest/admin/devices.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Dirk Klimpel # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/admin/event_reports.py b/synapse/rest/admin/event_reports.py index 381c3fe6853d..bbfcaf723b7b 100644 --- a/synapse/rest/admin/event_reports.py +++ b/synapse/rest/admin/event_reports.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Dirk Klimpel # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/admin/groups.py b/synapse/rest/admin/groups.py index ebc587aa0603..3b3ffde0b65c 100644 --- a/synapse/rest/admin/groups.py +++ b/synapse/rest/admin/groups.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index 40646ef241cc..24dd46113a29 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018-2019 New Vector Ltd # diff --git a/synapse/rest/admin/purge_room_servlet.py b/synapse/rest/admin/purge_room_servlet.py index 49966ee3e044..2365ff7a0f26 100644 --- a/synapse/rest/admin/purge_room_servlet.py +++ b/synapse/rest/admin/purge_room_servlet.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index cfe1bebb91b2..f289ffe3d084 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -38,9 +37,11 @@ from synapse.util import json_decoder if TYPE_CHECKING: + from synapse.api.auth import Auth + from synapse.handlers.pagination import PaginationHandler + from synapse.handlers.room import RoomShutdownHandler from synapse.server import HomeServer - logger = logging.getLogger(__name__) @@ -147,50 +148,14 @@ def __init__(self, hs: "HomeServer"): async def on_POST( self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - await assert_user_is_admin(self.auth, requester.user) - - content = parse_json_object_from_request(request) - - block = content.get("block", False) - if not isinstance(block, bool): - raise SynapseError( - HTTPStatus.BAD_REQUEST, - "Param 'block' must be a boolean, if given", - Codes.BAD_JSON, - ) - - purge = content.get("purge", True) - if not isinstance(purge, bool): - raise SynapseError( - HTTPStatus.BAD_REQUEST, - "Param 'purge' must be a boolean, if given", - Codes.BAD_JSON, - ) - - force_purge = content.get("force_purge", False) - if not isinstance(force_purge, bool): - raise SynapseError( - HTTPStatus.BAD_REQUEST, - "Param 'force_purge' must be a boolean, if given", - Codes.BAD_JSON, - ) - - ret = await self.room_shutdown_handler.shutdown_room( - room_id=room_id, - new_room_user_id=content.get("new_room_user_id"), - new_room_name=content.get("room_name"), - message=content.get("message"), - requester_user_id=requester.user.to_string(), - block=block, + return await _delete_room( + request, + room_id, + self.auth, + self.room_shutdown_handler, + self.pagination_handler, ) - # Purge room - if purge: - await self.pagination_handler.purge_room(room_id, force=force_purge) - - return (200, ret) - class ListRoomRestServlet(RestServlet): """ @@ -283,7 +248,22 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: class RoomRestServlet(RestServlet): - """Get room details. + """Manage a room. + + On GET : Get details of a room. + + On DELETE : Delete a room from server. + + It is a combination and improvement of shutdown and purge room. + + Shuts down a room by removing all local users from the room. + Blocking all future invites and joins to the room is optional. + + If desired any local aliases will be repointed to a new room + created by `new_room_user_id` and kicked users will be auto- + joined to the new room. + + If 'purge' is true, it will remove all traces of a room from the database. TODO: Add on_POST to allow room creation without joining the room """ @@ -294,6 +274,8 @@ def __init__(self, hs: "HomeServer"): self.hs = hs self.auth = hs.get_auth() self.store = hs.get_datastore() + self.room_shutdown_handler = hs.get_room_shutdown_handler() + self.pagination_handler = hs.get_pagination_handler() async def on_GET( self, request: SynapseRequest, room_id: str @@ -309,6 +291,17 @@ async def on_GET( return (200, ret) + async def on_DELETE( + self, request: SynapseRequest, room_id: str + ) -> Tuple[int, JsonDict]: + return await _delete_room( + request, + room_id, + self.auth, + self.room_shutdown_handler, + self.pagination_handler, + ) + class RoomMembersRestServlet(RestServlet): """ @@ -695,3 +688,55 @@ async def on_GET( ) return 200, results + + +async def _delete_room( + request: SynapseRequest, + room_id: str, + auth: "Auth", + room_shutdown_handler: "RoomShutdownHandler", + pagination_handler: "PaginationHandler", +) -> Tuple[int, JsonDict]: + requester = await auth.get_user_by_req(request) + await assert_user_is_admin(auth, requester.user) + + content = parse_json_object_from_request(request) + + block = content.get("block", False) + if not isinstance(block, bool): + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "Param 'block' must be a boolean, if given", + Codes.BAD_JSON, + ) + + purge = content.get("purge", True) + if not isinstance(purge, bool): + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "Param 'purge' must be a boolean, if given", + Codes.BAD_JSON, + ) + + force_purge = content.get("force_purge", False) + if not isinstance(force_purge, bool): + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "Param 'force_purge' must be a boolean, if given", + Codes.BAD_JSON, + ) + + ret = await room_shutdown_handler.shutdown_room( + room_id=room_id, + new_room_user_id=content.get("new_room_user_id"), + new_room_name=content.get("room_name"), + message=content.get("message"), + requester_user_id=requester.user.to_string(), + block=block, + ) + + # Purge room + if purge: + await pagination_handler.purge_room(room_id, force=force_purge) + + return (200, ret) diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py index f495666f4a71..cc3ab5854b0b 100644 --- a/synapse/rest/admin/server_notice_servlet.py +++ b/synapse/rest/admin/server_notice_servlet.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/admin/statistics.py b/synapse/rest/admin/statistics.py index f2490e382dcf..948de94ccd6a 100644 --- a/synapse/rest/admin/statistics.py +++ b/synapse/rest/admin/statistics.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Dirk Klimpel # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 309bd2771bdb..8c9d21d3ea1b 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,6 +14,7 @@ import hashlib import hmac import logging +import secrets from http import HTTPStatus from typing import TYPE_CHECKING, Dict, List, Optional, Tuple @@ -36,6 +36,7 @@ ) from synapse.rest.client.v2_alpha._base import client_patterns from synapse.storage.databases.main.media_repository import MediaSortOrder +from synapse.storage.databases.main.stats import UserSortOrder from synapse.types import JsonDict, UserID if TYPE_CHECKING: @@ -44,29 +45,6 @@ logger = logging.getLogger(__name__) -class UsersRestServlet(RestServlet): - PATTERNS = admin_patterns("/users/(?P[^/]*)$") - - def __init__(self, hs: "HomeServer"): - self.hs = hs - self.store = hs.get_datastore() - self.auth = hs.get_auth() - self.admin_handler = hs.get_admin_handler() - - async def on_GET( - self, request: SynapseRequest, user_id: str - ) -> Tuple[int, List[JsonDict]]: - target_user = UserID.from_string(user_id) - await assert_requester_is_admin(self.auth, request) - - if not self.hs.is_mine(target_user): - raise SynapseError(400, "Can only users a local user") - - ret = await self.store.get_users() - - return 200, ret - - class UsersRestServletV2(RestServlet): PATTERNS = admin_patterns("/users$", "v2") @@ -117,8 +95,26 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: guests = parse_boolean(request, "guests", default=True) deactivated = parse_boolean(request, "deactivated", default=False) + order_by = parse_string( + request, + "order_by", + default=UserSortOrder.NAME.value, + allowed_values=( + UserSortOrder.NAME.value, + UserSortOrder.DISPLAYNAME.value, + UserSortOrder.GUEST.value, + UserSortOrder.ADMIN.value, + UserSortOrder.DEACTIVATED.value, + UserSortOrder.USER_TYPE.value, + UserSortOrder.AVATAR_URL.value, + UserSortOrder.SHADOW_BANNED.value, + ), + ) + + direction = parse_string(request, "dir", default="f", allowed_values=("f", "b")) + users, total = await self.store.get_users_paginate( - start, limit, user_id, name, guests, deactivated + start, limit, user_id, name, guests, deactivated, order_by, direction ) ret = {"users": users, "total": total} if (start + limit) < total: @@ -380,7 +376,7 @@ def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: """ self._clear_old_nonces() - nonce = self.hs.get_secrets().token_hex(64) + nonce = secrets.token_hex(64) self.nonces[nonce] = int(self.reactor.seconds()) return 200, {"nonce": nonce} @@ -985,3 +981,114 @@ async def on_POST( await self.store.set_shadow_banned(UserID.from_string(user_id), True) return 200, {} + + +class RateLimitRestServlet(RestServlet): + """An admin API to override ratelimiting for an user. + + Example: + POST /_synapse/admin/v1/users/@test:example.com/override_ratelimit + { + "messages_per_second": 0, + "burst_count": 0 + } + 200 OK + { + "messages_per_second": 0, + "burst_count": 0 + } + """ + + PATTERNS = admin_patterns("/users/(?P[^/]*)/override_ratelimit") + + def __init__(self, hs: "HomeServer"): + self.hs = hs + self.store = hs.get_datastore() + self.auth = hs.get_auth() + + async def on_GET( + self, request: SynapseRequest, user_id: str + ) -> Tuple[int, JsonDict]: + await assert_requester_is_admin(self.auth, request) + + if not self.hs.is_mine_id(user_id): + raise SynapseError(400, "Can only lookup local users") + + if not await self.store.get_user_by_id(user_id): + raise NotFoundError("User not found") + + ratelimit = await self.store.get_ratelimit_for_user(user_id) + + if ratelimit: + # convert `null` to `0` for consistency + # both values do the same in retelimit handler + ret = { + "messages_per_second": 0 + if ratelimit.messages_per_second is None + else ratelimit.messages_per_second, + "burst_count": 0 + if ratelimit.burst_count is None + else ratelimit.burst_count, + } + else: + ret = {} + + return 200, ret + + async def on_POST( + self, request: SynapseRequest, user_id: str + ) -> Tuple[int, JsonDict]: + await assert_requester_is_admin(self.auth, request) + + if not self.hs.is_mine_id(user_id): + raise SynapseError(400, "Only local users can be ratelimited") + + if not await self.store.get_user_by_id(user_id): + raise NotFoundError("User not found") + + body = parse_json_object_from_request(request, allow_empty_body=True) + + messages_per_second = body.get("messages_per_second", 0) + burst_count = body.get("burst_count", 0) + + if not isinstance(messages_per_second, int) or messages_per_second < 0: + raise SynapseError( + 400, + "%r parameter must be a positive int" % (messages_per_second,), + errcode=Codes.INVALID_PARAM, + ) + + if not isinstance(burst_count, int) or burst_count < 0: + raise SynapseError( + 400, + "%r parameter must be a positive int" % (burst_count,), + errcode=Codes.INVALID_PARAM, + ) + + await self.store.set_ratelimit_for_user( + user_id, messages_per_second, burst_count + ) + ratelimit = await self.store.get_ratelimit_for_user(user_id) + assert ratelimit is not None + + ret = { + "messages_per_second": ratelimit.messages_per_second, + "burst_count": ratelimit.burst_count, + } + + return 200, ret + + async def on_DELETE( + self, request: SynapseRequest, user_id: str + ) -> Tuple[int, JsonDict]: + await assert_requester_is_admin(self.auth, request) + + if not self.hs.is_mine_id(user_id): + raise SynapseError(400, "Only local users can be ratelimited") + + if not await self.store.get_user_by_id(user_id): + raise NotFoundError("User not found") + + await self.store.delete_ratelimit_for_user(user_id) + + return 200, {} diff --git a/synapse/rest/client/__init__.py b/synapse/rest/client/__init__.py index fe0ac3f8e952..629e2df74a4f 100644 --- a/synapse/rest/client/__init__.py +++ b/synapse/rest/client/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py index 7be5c0fb8807..94ff3719ce88 100644 --- a/synapse/rest/client/transactions.py +++ b/synapse/rest/client/transactions.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v1/__init__.py b/synapse/rest/client/v1/__init__.py index bfebb0f644f4..5e83dba2ed6f 100644 --- a/synapse/rest/client/v1/__init__.py +++ b/synapse/rest/client/v1/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index e5af26b176dc..ae92a3df8e35 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index 6de4078290ba..ee7454996e5a 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py index 91da0ee57303..bef1edc838ab 100644 --- a/synapse/rest/client/v1/initial_sync.py +++ b/synapse/rest/client/v1/initial_sync.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index e4c352f572a2..42e709ec14bd 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -74,11 +73,13 @@ def __init__(self, hs: "HomeServer"): self._well_known_builder = WellKnownBuilder(hs) self._address_ratelimiter = Ratelimiter( + store=hs.get_datastore(), clock=hs.get_clock(), rate_hz=self.hs.config.rc_login_address.per_second, burst_count=self.hs.config.rc_login_address.burst_count, ) self._account_ratelimiter = Ratelimiter( + store=hs.get_datastore(), clock=hs.get_clock(), rate_hz=self.hs.config.rc_login_account.per_second, burst_count=self.hs.config.rc_login_account.burst_count, @@ -141,20 +142,22 @@ async def on_POST(self, request: SynapseRequest): appservice = self.auth.get_appservice_by_req(request) if appservice.is_rate_limited(): - self._address_ratelimiter.ratelimit(request.getClientIP()) + await self._address_ratelimiter.ratelimit( + None, request.getClientIP() + ) result = await self._do_appservice_login(login_submission, appservice) elif self.jwt_enabled and ( login_submission["type"] == LoginRestServlet.JWT_TYPE or login_submission["type"] == LoginRestServlet.JWT_TYPE_DEPRECATED ): - self._address_ratelimiter.ratelimit(request.getClientIP()) + await self._address_ratelimiter.ratelimit(None, request.getClientIP()) result = await self._do_jwt_login(login_submission) elif login_submission["type"] == LoginRestServlet.TOKEN_TYPE: - self._address_ratelimiter.ratelimit(request.getClientIP()) + await self._address_ratelimiter.ratelimit(None, request.getClientIP()) result = await self._do_token_login(login_submission) else: - self._address_ratelimiter.ratelimit(request.getClientIP()) + await self._address_ratelimiter.ratelimit(None, request.getClientIP()) result = await self._do_other_login(login_submission) except KeyError: raise SynapseError(400, "Missing JSON keys.") @@ -258,7 +261,7 @@ async def _complete_login( # too often. This happens here rather than before as we don't # necessarily know the user before now. if ratelimit: - self._account_ratelimiter.ratelimit(user_id.lower()) + await self._account_ratelimiter.ratelimit(None, user_id.lower()) if create_non_existent_users: canonical_uid = await self.auth_handler.check_user_exists(user_id) diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index ad8cea49c6ed..5aa7908d73a6 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index 23a529f8e3d3..2b24fe5aa65f 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -36,10 +35,15 @@ def __init__(self, hs): self.clock = hs.get_clock() self.auth = hs.get_auth() + self._use_presence = hs.config.server.use_presence + async def on_GET(self, request, user_id): requester = await self.auth.get_user_by_req(request) user = UserID.from_string(user_id) + if not self._use_presence: + return 200, {"presence": "offline"} + if requester.user != user: allowed = await self.presence_handler.is_visible( observed_user=user, observer_user=requester.user @@ -81,7 +85,7 @@ async def on_PUT(self, request, user_id): except Exception: raise SynapseError(400, "Unable to parse state") - if self.hs.config.use_presence: + if self._use_presence: await self.presence_handler.set_state(user, state) return 200, {} diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index 717c5f2b108a..f42f4b35674f 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 241e535917a7..be29a0b39ec6 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index 0c148a213db4..18102eca6c1b 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 525efdf22137..51813cccbe72 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # @@ -1021,6 +1020,7 @@ async def on_GET( max_rooms_per_space=parse_integer(request, "max_rooms_per_space"), ) + # TODO When switching to the stable endpoint, remove the POST handler. async def on_POST( self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py index d07ca2c47cd2..c780ffded5e9 100644 --- a/synapse/rest/client/v1/voip.py +++ b/synapse/rest/client/v1/voip.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/__init__.py b/synapse/rest/client/v2_alpha/__init__.py index bfebb0f644f4..5e83dba2ed6f 100644 --- a/synapse/rest/client/v2_alpha/__init__.py +++ b/synapse/rest/client/v2_alpha/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/_base.py b/synapse/rest/client/v2_alpha/_base.py index f016b4f1bd41..0443f4571c1c 100644 --- a/synapse/rest/client/v2_alpha/_base.py +++ b/synapse/rest/client/v2_alpha/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index c2ba790babde..085561d3e9d8 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2018 New Vector Ltd @@ -40,7 +39,7 @@ from synapse.push.mailer import Mailer from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.stringutils import assert_valid_client_secret, random_string -from synapse.util.threepids import canonicalise_email, check_3pid_allowed +from synapse.util.threepids import check_3pid_allowed, validate_email from ._base import client_patterns, interactive_auth_handler @@ -93,7 +92,7 @@ async def on_POST(self, request): # Stored in the database "foo@bar.com" # User requests with "FOO@bar.com" would raise a Not Found error try: - email = canonicalise_email(body["email"]) + email = validate_email(body["email"]) except ValueError as e: raise SynapseError(400, str(e)) send_attempt = body["send_attempt"] @@ -103,7 +102,9 @@ async def on_POST(self, request): # Raise if the provided next_link value isn't valid assert_valid_next_link(self.hs, next_link) - self.identity_handler.ratelimit_request_token_requests(request, "email", email) + await self.identity_handler.ratelimit_request_token_requests( + request, "email", email + ) # The email will be sent to the stored address. # This avoids a potential account hijack by requesting a password reset to @@ -246,7 +247,7 @@ async def on_POST(self, request): # We store all email addresses canonicalised in the DB. # (See add_threepid in synapse/handlers/auth.py) try: - threepid["address"] = canonicalise_email(threepid["address"]) + threepid["address"] = validate_email(threepid["address"]) except ValueError as e: raise SynapseError(400, str(e)) # if using email, we must know about the email they're authing with! @@ -374,7 +375,7 @@ async def on_POST(self, request): # Otherwise the email will be sent to "FOO@bar.com" and stored as # "foo@bar.com" in database. try: - email = canonicalise_email(body["email"]) + email = validate_email(body["email"]) except ValueError as e: raise SynapseError(400, str(e)) send_attempt = body["send_attempt"] @@ -387,7 +388,9 @@ async def on_POST(self, request): Codes.THREEPID_DENIED, ) - self.identity_handler.ratelimit_request_token_requests(request, "email", email) + await self.identity_handler.ratelimit_request_token_requests( + request, "email", email + ) if next_link: # Raise if the provided next_link value isn't valid @@ -468,7 +471,7 @@ async def on_POST(self, request): Codes.THREEPID_DENIED, ) - self.identity_handler.ratelimit_request_token_requests( + await self.identity_handler.ratelimit_request_token_requests( request, "msisdn", msisdn ) diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py index 3f28c0bc3eb6..7517e9304e8d 100644 --- a/synapse/rest/client/v2_alpha/account_data.py +++ b/synapse/rest/client/v2_alpha/account_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/account_validity.py b/synapse/rest/client/v2_alpha/account_validity.py index bd7f9ae2039b..2d1ad3d3fbf0 100644 --- a/synapse/rest/client/v2_alpha/account_validity.py +++ b/synapse/rest/client/v2_alpha/account_validity.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -37,24 +36,40 @@ def __init__(self, hs): self.hs = hs self.account_activity_handler = hs.get_account_validity_handler() self.auth = hs.get_auth() - self.success_html = hs.config.account_validity.account_renewed_html_content - self.failure_html = hs.config.account_validity.invalid_token_html_content + self.account_renewed_template = ( + hs.config.account_validity.account_validity_account_renewed_template + ) + self.account_previously_renewed_template = ( + hs.config.account_validity.account_validity_account_previously_renewed_template + ) + self.invalid_token_template = ( + hs.config.account_validity.account_validity_invalid_token_template + ) async def on_GET(self, request): if b"token" not in request.args: raise SynapseError(400, "Missing renewal token") renewal_token = request.args[b"token"][0] - token_valid = await self.account_activity_handler.renew_account( + ( + token_valid, + token_stale, + expiration_ts, + ) = await self.account_activity_handler.renew_account( renewal_token.decode("utf8") ) if token_valid: status_code = 200 - response = self.success_html + response = self.account_renewed_template.render(expiration_ts=expiration_ts) + elif token_stale: + status_code = 200 + response = self.account_previously_renewed_template.render( + expiration_ts=expiration_ts + ) else: status_code = 404 - response = self.failure_html + response = self.invalid_token_template.render(expiration_ts=expiration_ts) respond_with_html(request, status_code, response) @@ -72,10 +87,12 @@ def __init__(self, hs): self.hs = hs self.account_activity_handler = hs.get_account_validity_handler() self.auth = hs.get_auth() - self.account_validity = self.hs.config.account_validity + self.account_validity_renew_by_email_enabled = ( + hs.config.account_validity.account_validity_renew_by_email_enabled + ) async def on_POST(self, request): - if not self.account_validity.renew_by_email_enabled: + if not self.account_validity_renew_by_email_enabled: raise AuthError( 403, "Account renewal via email is disabled on this server." ) diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index 75ece1c911d0..6ea1b50a625e 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/capabilities.py b/synapse/rest/client/v2_alpha/capabilities.py index 44ccf10ed45e..6a240214845d 100644 --- a/synapse/rest/client/v2_alpha/capabilities.py +++ b/synapse/rest/client/v2_alpha/capabilities.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index 3d07aadd39ba..9af05f9b11a4 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py index 7cc692643b1d..411667a9c8d9 100644 --- a/synapse/rest/client/v2_alpha/filter.py +++ b/synapse/rest/client/v2_alpha/filter.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index 08fb6b2b06ad..6285680c00c8 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index f092e5b3a2de..a57ccbb5e5d5 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. diff --git a/synapse/rest/client/v2_alpha/notifications.py b/synapse/rest/client/v2_alpha/notifications.py index 87063ec8b1e1..0ede643c2d91 100644 --- a/synapse/rest/client/v2_alpha/notifications.py +++ b/synapse/rest/client/v2_alpha/notifications.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/openid.py b/synapse/rest/client/v2_alpha/openid.py index 5b996e2d6318..d3322acc384b 100644 --- a/synapse/rest/client/v2_alpha/openid.py +++ b/synapse/rest/client/v2_alpha/openid.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/password_policy.py b/synapse/rest/client/v2_alpha/password_policy.py index 68b27ff23a46..a83927aee641 100644 --- a/synapse/rest/client/v2_alpha/password_policy.py +++ b/synapse/rest/client/v2_alpha/password_policy.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/read_marker.py b/synapse/rest/client/v2_alpha/read_marker.py index 55c6688f529f..5988fa47e5e9 100644 --- a/synapse/rest/client/v2_alpha/read_marker.py +++ b/synapse/rest/client/v2_alpha/read_marker.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py index 6f7246a39429..8cf4aebdbec3 100644 --- a/synapse/rest/client/v2_alpha/receipts.py +++ b/synapse/rest/client/v2_alpha/receipts.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 8f68d8dfc8fa..a30a5df1b195 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015 - 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # @@ -13,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import hmac import logging import random @@ -22,7 +20,7 @@ import synapse import synapse.api.auth import synapse.types -from synapse.api.constants import LoginType +from synapse.api.constants import APP_SERVICE_REGISTRATION_TYPE, LoginType from synapse.api.errors import ( Codes, InteractiveAuthIncompleteError, @@ -32,7 +30,7 @@ ) from synapse.config import ConfigError from synapse.config.captcha import CaptchaConfig -from synapse.config.consent_config import ConsentConfig +from synapse.config.consent import ConsentConfig from synapse.config.emailconfig import ThreepidBehaviour from synapse.config.ratelimiting import FederationRateLimitConfig from synapse.config.registration import RegistrationConfig @@ -51,7 +49,11 @@ from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.stringutils import assert_valid_client_secret, random_string -from synapse.util.threepids import canonicalise_email, check_3pid_allowed +from synapse.util.threepids import ( + canonicalise_email, + check_3pid_allowed, + validate_email, +) from ._base import client_patterns, interactive_auth_handler @@ -113,7 +115,7 @@ async def on_POST(self, request): # (See on_POST in EmailThreepidRequestTokenRestServlet # in synapse/rest/client/v2_alpha/account.py) try: - email = canonicalise_email(body["email"]) + email = validate_email(body["email"]) except ValueError as e: raise SynapseError(400, str(e)) send_attempt = body["send_attempt"] @@ -126,7 +128,9 @@ async def on_POST(self, request): Codes.THREEPID_DENIED, ) - self.identity_handler.ratelimit_request_token_requests(request, "email", email) + await self.identity_handler.ratelimit_request_token_requests( + request, "email", email + ) existing_user_id = await self.hs.get_datastore().get_user_id_by_threepid( "email", email @@ -208,7 +212,7 @@ async def on_POST(self, request): Codes.THREEPID_DENIED, ) - self.identity_handler.ratelimit_request_token_requests( + await self.identity_handler.ratelimit_request_token_requests( request, "msisdn", msisdn ) @@ -406,7 +410,7 @@ async def on_POST(self, request): client_addr = request.getClientIP() - self.ratelimiter.ratelimit(client_addr, update=False) + await self.ratelimiter.ratelimit(None, client_addr, update=False) kind = b"user" if b"kind" in request.args: @@ -428,15 +432,20 @@ async def on_POST(self, request): raise SynapseError(400, "Invalid username") desired_username = body["username"] - appservice = None - if self.auth.has_access_token(request): - appservice = self.auth.get_appservice_by_req(request) - # fork off as soon as possible for ASes which have completely # different registration flows to normal users # == Application Service Registration == - if appservice: + if body.get("type") == APP_SERVICE_REGISTRATION_TYPE: + if not self.auth.has_access_token(request): + raise SynapseError( + 400, + "Appservice token must be provided when using a type of m.login.application_service", + ) + + # Verify the AS + self.auth.get_appservice_by_req(request) + # Set the desired user according to the AS API (which uses the # 'user' key not 'username'). Since this is a new addition, we'll # fallback to 'username' if they gave one. @@ -457,6 +466,11 @@ async def on_POST(self, request): ) return 200, result + elif self.auth.has_access_token(request): + raise SynapseError( + 400, + "An access token should not be provided on requests to /register (except if type is m.login.application_service)", + ) # == Normal User Registration == (everyone else) if not self._registration_enabled: diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index fe765da23c5b..c7da6759dbf8 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/report_event.py b/synapse/rest/client/v2_alpha/report_event.py index 215d619ca102..2c169abbf313 100644 --- a/synapse/rest/client/v2_alpha/report_event.py +++ b/synapse/rest/client/v2_alpha/report_event.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 53de97923fa1..263596be8658 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017, 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py index 147920767fd2..6d1b083acb47 100644 --- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py +++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/v2_alpha/sendtodevice.py index 79c1b526eedd..f8dcee603ce8 100644 --- a/synapse/rest/client/v2_alpha/sendtodevice.py +++ b/synapse/rest/client/v2_alpha/sendtodevice.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/shared_rooms.py b/synapse/rest/client/v2_alpha/shared_rooms.py index c866d5151c99..d2e7f04b406c 100644 --- a/synapse/rest/client/v2_alpha/shared_rooms.py +++ b/synapse/rest/client/v2_alpha/shared_rooms.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Half-Shot # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 3481770c83e2..95ee3f1b84f3 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/v2_alpha/tags.py index a97cd66c5234..c14f83be1878 100644 --- a/synapse/rest/client/v2_alpha/tags.py +++ b/synapse/rest/client/v2_alpha/tags.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py index 0c127a1b5fd8..b5c67c9bb67e 100644 --- a/synapse/rest/client/v2_alpha/thirdparty.py +++ b/synapse/rest/client/v2_alpha/thirdparty.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/tokenrefresh.py b/synapse/rest/client/v2_alpha/tokenrefresh.py index 79317c74bae1..b2f858545cbe 100644 --- a/synapse/rest/client/v2_alpha/tokenrefresh.py +++ b/synapse/rest/client/v2_alpha/tokenrefresh.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py index ad598cefe00e..7e8912f0b919 100644 --- a/synapse/rest/client/v2_alpha/user_directory.py +++ b/synapse/rest/client/v2_alpha/user_directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 3e3d8839f494..4582c274c7ad 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2018-2019 New Vector Ltd diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py index 8b9ef26cf2c0..b19cd8afc53e 100644 --- a/synapse/rest/consent/consent_resource.py +++ b/synapse/rest/consent/consent_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -33,14 +32,6 @@ logger = logging.getLogger(__name__) -# use hmac.compare_digest if we have it (python 2.7.7), else just use equality -if hasattr(hmac, "compare_digest"): - compare_digest = hmac.compare_digest -else: - - def compare_digest(a, b): - return a == b - class ConsentResource(DirectServeHtmlResource): """A twisted Resource to display a privacy policy and gather consent to it @@ -210,5 +201,5 @@ def _check_hash(self, userid, userhmac): .encode("ascii") ) - if not compare_digest(want_mac, userhmac): + if not hmac.compare_digest(want_mac, userhmac): raise SynapseError(HTTPStatus.FORBIDDEN, "HMAC incorrect") diff --git a/synapse/rest/health.py b/synapse/rest/health.py index 0170950bf382..4487b54abf39 100644 --- a/synapse/rest/health.py +++ b/synapse/rest/health.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/key/__init__.py b/synapse/rest/key/__init__.py index fe0ac3f8e952..629e2df74a4f 100644 --- a/synapse/rest/key/__init__.py +++ b/synapse/rest/key/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/key/v2/__init__.py b/synapse/rest/key/v2/__init__.py index cb5abcf826be..c6c63073eac0 100644 --- a/synapse/rest/key/v2/__init__.py +++ b/synapse/rest/key/v2/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/key/v2/local_key_resource.py b/synapse/rest/key/v2/local_key_resource.py index d8e8e48c1c1e..e8dbe240d81e 100644 --- a/synapse/rest/key/v2/local_key_resource.py +++ b/synapse/rest/key/v2/local_key_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index c57ac22e5894..f648678b0903 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -144,7 +144,7 @@ async def query_keys(self, request, query, query_remote_on_cache_miss=False): # Note that the value is unused. cache_misses = {} # type: Dict[str, Dict[str, int]] - for (server_name, key_id, from_server), results in cached.items(): + for (server_name, key_id, _), results in cached.items(): results = [(result["ts_added_ms"], result) for result in results] if not results and key_id is not None: @@ -206,7 +206,7 @@ async def query_keys(self, request, query, query_remote_on_cache_miss=False): # Cast to bytes since postgresql returns a memoryview. json_results.add(bytes(most_recent_result["key_json"])) else: - for ts_added, result in results: + for _, result in results: # Cast to bytes since postgresql returns a memoryview. json_results.add(bytes(result["key_json"])) diff --git a/synapse/rest/media/v1/__init__.py b/synapse/rest/media/v1/__init__.py index 3b8c96e267fe..d20186bbd095 100644 --- a/synapse/rest/media/v1/__init__.py +++ b/synapse/rest/media/v1/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index 636694707117..0fb4cd81f1a2 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019-2021 The Matrix.org Foundation C.I.C. # diff --git a/synapse/rest/media/v1/config_resource.py b/synapse/rest/media/v1/config_resource.py index c41a7ab412c9..a1d36e5cf189 100644 --- a/synapse/rest/media/v1/config_resource.py +++ b/synapse/rest/media/v1/config_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 Will Hunt # Copyright 2020-2021 The Matrix.org Foundation C.I.C. # @@ -31,7 +30,7 @@ class MediaConfigResource(DirectServeJsonResource): def __init__(self, hs: "HomeServer"): super().__init__() - config = hs.get_config() + config = hs.config self.clock = hs.get_clock() self.auth = hs.get_auth() self.limits_dict = {"m.upload.size": config.max_upload_size} diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/v1/download_resource.py index 5dadaeaf5701..cd2468f9c59a 100644 --- a/synapse/rest/media/v1/download_resource.py +++ b/synapse/rest/media/v1/download_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020-2021 The Matrix.org Foundation C.I.C. # diff --git a/synapse/rest/media/v1/filepath.py b/synapse/rest/media/v1/filepath.py index 7792f26e7846..09531ebf548d 100644 --- a/synapse/rest/media/v1/filepath.py +++ b/synapse/rest/media/v1/filepath.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020-2021 The Matrix.org Foundation C.I.C. # @@ -22,7 +21,7 @@ NEW_FORMAT_ID_RE = re.compile(r"^\d\d\d\d-\d\d-\d\d") -def _wrap_in_base_path(func: "Callable[..., str]") -> "Callable[..., str]": +def _wrap_in_base_path(func: Callable[..., str]) -> Callable[..., str]: """Takes a function that returns a relative path and turns it into an absolute path based on the location of the primary media store """ diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 0c041b542d4d..e8a875b90093 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018-2021 The Matrix.org Foundation C.I.C. # @@ -468,6 +467,9 @@ async def _download_remote_file( return media_info def _get_thumbnail_requirements(self, media_type): + scpos = media_type.find(";") + if scpos > 0: + media_type = media_type[:scpos] return self.thumbnail_requirements.get(media_type, ()) def _generate_thumbnail( diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index b1b1c9e6ecc9..c7fd97c46c9d 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index c4ed9dfdb40f..0adfb1a70f73 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2020-2021 The Matrix.org Foundation C.I.C. # @@ -175,7 +174,7 @@ def __init__( clock=self.clock, # don't spider URLs more often than once an hour expiry_ms=ONE_HOUR, - ) + ) # type: ExpiringCache[str, ObservableDeferred] if self._worker_run_media_background_jobs: self._cleaner_loop = self.clock.looping_call( diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py index 031947557d8b..0ff6ad3c0c22 100644 --- a/synapse/rest/media/v1/storage_provider.py +++ b/synapse/rest/media/v1/storage_provider.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py index af802bc0b125..a029d426f0b6 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020-2021 The Matrix.org Foundation C.I.C. # diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py index 988f52c78f73..37fe582390e3 100644 --- a/synapse/rest/media/v1/thumbnailer.py +++ b/synapse/rest/media/v1/thumbnailer.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020-2021 The Matrix.org Foundation C.I.C. # diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py index 0138b2e2d127..024a105bf209 100644 --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/v1/upload_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020-2021 The Matrix.org Foundation C.I.C. # @@ -52,8 +51,6 @@ async def _async_render_OPTIONS(self, request: Request) -> None: async def _async_render_POST(self, request: SynapseRequest) -> None: requester = await self.auth.get_user_by_req(request) - # TODO: The checks here are a bit late. The content will have - # already been uploaded to a tmp file at this point content_length = request.getHeader("Content-Length") if content_length is None: raise SynapseError(msg="Request must specify a Content-Length", code=400) diff --git a/synapse/rest/synapse/__init__.py b/synapse/rest/synapse/__init__.py index c0b733488b5f..6ef4fbe8f77b 100644 --- a/synapse/rest/synapse/__init__.py +++ b/synapse/rest/synapse/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/synapse/client/__init__.py b/synapse/rest/synapse/client/__init__.py index 9eeb970580ed..47a2f72b3273 100644 --- a/synapse/rest/synapse/client/__init__.py +++ b/synapse/rest/synapse/client/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/synapse/client/new_user_consent.py b/synapse/rest/synapse/client/new_user_consent.py index 78ee0b5e88eb..488b97b32e02 100644 --- a/synapse/rest/synapse/client/new_user_consent.py +++ b/synapse/rest/synapse/client/new_user_consent.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -62,6 +61,15 @@ async def _async_render_GET(self, request: Request) -> None: self._sso_handler.render_error(request, "bad_session", e.msg, code=e.code) return + # It should be impossible to get here without having first been through + # the pick-a-username step, which ensures chosen_localpart gets set. + if not session.chosen_localpart: + logger.warning("Session has no user name selected") + self._sso_handler.render_error( + request, "no_user", "No user name has been selected.", code=400 + ) + return + user_id = UserID(session.chosen_localpart, self._server_name) user_profile = { "display_name": session.display_name, diff --git a/synapse/rest/synapse/client/oidc/__init__.py b/synapse/rest/synapse/client/oidc/__init__.py index 64c0deb75dde..36ba40165621 100644 --- a/synapse/rest/synapse/client/oidc/__init__.py +++ b/synapse/rest/synapse/client/oidc/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Quentin Gliech # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/synapse/client/oidc/callback_resource.py b/synapse/rest/synapse/client/oidc/callback_resource.py index 1af33f0a45ba..7785f17e9094 100644 --- a/synapse/rest/synapse/client/oidc/callback_resource.py +++ b/synapse/rest/synapse/client/oidc/callback_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Quentin Gliech # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/synapse/client/password_reset.py b/synapse/rest/synapse/client/password_reset.py index d26ce46efcfe..f2800bf2db1d 100644 --- a/synapse/rest/synapse/client/password_reset.py +++ b/synapse/rest/synapse/client/password_reset.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/synapse/client/pick_idp.py b/synapse/rest/synapse/client/pick_idp.py index 9550b829980d..d3a94a9349be 100644 --- a/synapse/rest/synapse/client/pick_idp.py +++ b/synapse/rest/synapse/client/pick_idp.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/synapse/client/pick_username.py b/synapse/rest/synapse/client/pick_username.py index d9ffe84489af..9b002cc15ebe 100644 --- a/synapse/rest/synapse/client/pick_username.py +++ b/synapse/rest/synapse/client/pick_username.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/synapse/client/saml2/__init__.py b/synapse/rest/synapse/client/saml2/__init__.py index 3e8235ee1e37..781ccb237c89 100644 --- a/synapse/rest/synapse/client/saml2/__init__.py +++ b/synapse/rest/synapse/client/saml2/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/synapse/client/saml2/metadata_resource.py b/synapse/rest/synapse/client/saml2/metadata_resource.py index 1e8526e22e71..b37c7083dc7f 100644 --- a/synapse/rest/synapse/client/saml2/metadata_resource.py +++ b/synapse/rest/synapse/client/saml2/metadata_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/synapse/client/saml2/response_resource.py b/synapse/rest/synapse/client/saml2/response_resource.py index 4dfadf1bfb07..774ccd870fa3 100644 --- a/synapse/rest/synapse/client/saml2/response_resource.py +++ b/synapse/rest/synapse/client/saml2/response_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2018 New Vector Ltd # diff --git a/synapse/rest/synapse/client/sso_register.py b/synapse/rest/synapse/client/sso_register.py index f2acce2437ea..70cd148a760e 100644 --- a/synapse/rest/synapse/client/sso_register.py +++ b/synapse/rest/synapse/client/sso_register.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py index f591cc6c5c7b..19ac3af33753 100644 --- a/synapse/rest/well_known.py +++ b/synapse/rest/well_known.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/secrets.py b/synapse/secrets.py deleted file mode 100644 index 7939db75e781..000000000000 --- a/synapse/secrets.py +++ /dev/null @@ -1,45 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Injectable secrets module for Synapse. - -See https://docs.python.org/3/library/secrets.html#module-secrets for the API -used in Python 3.6, and the API emulated in Python 2.7. -""" -import sys - -# secrets is available since python 3.6 -if sys.version_info[0:2] >= (3, 6): - import secrets - - class Secrets: - def token_bytes(self, nbytes: int = 32) -> bytes: - return secrets.token_bytes(nbytes) - - def token_hex(self, nbytes: int = 32) -> str: - return secrets.token_hex(nbytes) - - -else: - import binascii - import os - - class Secrets: - def token_bytes(self, nbytes: int = 32) -> bytes: - return os.urandom(nbytes) - - def token_hex(self, nbytes: int = 32) -> str: - return binascii.hexlify(self.token_bytes(nbytes)).decode("ascii") diff --git a/synapse/server.py b/synapse/server.py index e85b9391faae..2337d2d9b450 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. @@ -51,6 +50,7 @@ from synapse.crypto.context_factory import RegularPolicyForHTTPS from synapse.crypto.keyring import Keyring from synapse.events.builder import EventBuilderFactory +from synapse.events.presence_router import PresenceRouter from synapse.events.spamcheck import SpamChecker from synapse.events.third_party_rules import ThirdPartyEventRules from synapse.events.utils import EventClientSerializer @@ -70,13 +70,14 @@ from synapse.handlers.admin import AdminHandler from synapse.handlers.appservice import ApplicationServicesHandler from synapse.handlers.auth import AuthHandler, MacaroonGenerator -from synapse.handlers.cas_handler import CasHandler +from synapse.handlers.cas import CasHandler from synapse.handlers.deactivate_account import DeactivateAccountHandler from synapse.handlers.device import DeviceHandler, DeviceWorkerHandler from synapse.handlers.devicemessage import DeviceMessageHandler from synapse.handlers.directory import DirectoryHandler from synapse.handlers.e2e_keys import E2eKeysHandler from synapse.handlers.e2e_room_keys import E2eRoomKeysHandler +from synapse.handlers.event_auth import EventAuthHandler from synapse.handlers.events import EventHandler, EventStreamHandler from synapse.handlers.federation import FederationHandler from synapse.handlers.groups_local import GroupsLocalHandler, GroupsLocalWorkerHandler @@ -85,7 +86,11 @@ from synapse.handlers.message import EventCreationHandler, MessageHandler from synapse.handlers.pagination import PaginationHandler from synapse.handlers.password_policy import PasswordPolicyHandler -from synapse.handlers.presence import PresenceHandler +from synapse.handlers.presence import ( + BasePresenceHandler, + PresenceHandler, + WorkerPresenceHandler, +) from synapse.handlers.profile import ProfileHandler from synapse.handlers.read_marker import ReadMarkerHandler from synapse.handlers.receipts import ReceiptsHandler @@ -121,7 +126,6 @@ MediaRepository, MediaRepositoryResource, ) -from synapse.secrets import Secrets from synapse.server_notices.server_notices_manager import ServerNoticesManager from synapse.server_notices.server_notices_sender import ServerNoticesSender from synapse.server_notices.worker_server_notices_sender import ( @@ -141,8 +145,8 @@ if TYPE_CHECKING: from txredisapi import RedisProtocol - from synapse.handlers.oidc_handler import OidcHandler - from synapse.handlers.saml_handler import SamlHandler + from synapse.handlers.oidc import OidcHandler + from synapse.handlers.saml import SamlHandler T = TypeVar("T", bound=Callable[..., Any]) @@ -282,6 +286,14 @@ def setup(self) -> None: if self.config.run_background_tasks: self.setup_background_tasks() + def start_listening(self) -> None: + """Start the HTTP, manhole, metrics, etc listeners + + Does nothing in this base class; overridden in derived classes to start the + appropriate listeners. + """ + pass + def setup_background_tasks(self) -> None: """ Some handlers have side effects on instantiation (like registering @@ -319,9 +331,6 @@ def get_datastores(self) -> Databases: return self.datastores - def get_config(self) -> HomeServerConfig: - return self.config - @cache_in_self def get_distributor(self) -> Distributor: return Distributor() @@ -329,6 +338,7 @@ def get_distributor(self) -> Distributor: @cache_in_self def get_registration_ratelimiter(self) -> Ratelimiter: return Ratelimiter( + store=self.get_datastore(), clock=self.get_clock(), rate_hz=self.config.rc_registration.per_second, burst_count=self.config.rc_registration.burst_count, @@ -414,8 +424,11 @@ def get_state_resolution_handler(self) -> StateResolutionHandler: return StateResolutionHandler(self) @cache_in_self - def get_presence_handler(self) -> PresenceHandler: - return PresenceHandler(self) + def get_presence_handler(self) -> BasePresenceHandler: + if self.get_instance_name() in self.config.worker.writers.presence: + return PresenceHandler(self) + else: + return WorkerPresenceHandler(self) @cache_in_self def get_typing_writer_handler(self) -> TypingWriterHandler: @@ -424,6 +437,10 @@ def get_typing_writer_handler(self) -> TypingWriterHandler: else: raise Exception("Workers cannot write typing") + @cache_in_self + def get_presence_router(self) -> PresenceRouter: + return PresenceRouter(self) + @cache_in_self def get_typing_handler(self) -> FollowerTypingHandler: if self.config.worker.writers.typing == self.get_instance_name(): @@ -623,10 +640,6 @@ def get_groups_attestation_signing(self) -> GroupAttestationSigning: def get_groups_attestation_renewer(self) -> GroupAttestionRenewer: return GroupAttestionRenewer(self) - @cache_in_self - def get_secrets(self) -> Secrets: - return Secrets() - @cache_in_self def get_stats_handler(self) -> StatsHandler: return StatsHandler(self) @@ -687,13 +700,13 @@ def get_cas_handler(self) -> CasHandler: @cache_in_self def get_saml_handler(self) -> "SamlHandler": - from synapse.handlers.saml_handler import SamlHandler + from synapse.handlers.saml import SamlHandler return SamlHandler(self) @cache_in_self def get_oidc_handler(self) -> "OidcHandler": - from synapse.handlers.oidc_handler import OidcHandler + from synapse.handlers.oidc import OidcHandler return OidcHandler(self) @@ -737,6 +750,10 @@ def get_account_data_handler(self) -> AccountDataHandler: def get_space_summary_handler(self) -> SpaceSummaryHandler: return SpaceSummaryHandler(self) + @cache_in_self + def get_event_auth_handler(self) -> EventAuthHandler: + return EventAuthHandler(self) + @cache_in_self def get_external_cache(self) -> ExternalCache: return ExternalCache(self) diff --git a/synapse/server_notices/consent_server_notices.py b/synapse/server_notices/consent_server_notices.py index a9349bf9a17a..e65f6f88fe74 100644 --- a/synapse/server_notices/consent_server_notices.py +++ b/synapse/server_notices/consent_server_notices.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index a18a2e76c999..e4b0bc5c7235 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index 144e1da78e2e..f19075b76050 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/server_notices/server_notices_sender.py b/synapse/server_notices/server_notices_sender.py index 965c6458893d..c875b15b326d 100644 --- a/synapse/server_notices/server_notices_sender.py +++ b/synapse/server_notices/server_notices_sender.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/server_notices/worker_server_notices_sender.py b/synapse/server_notices/worker_server_notices_sender.py index c76bd5746044..cc5331849102 100644 --- a/synapse/server_notices/worker_server_notices_sender.py +++ b/synapse/server_notices/worker_server_notices_sender.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/spam_checker_api/__init__.py b/synapse/spam_checker_api/__init__.py index 3ce25bb012a0..73018f2d002e 100644 --- a/synapse/spam_checker_api/__init__.py +++ b/synapse/spam_checker_api/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index c3d6e80c49f7..a1770f620e59 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # @@ -20,8 +19,10 @@ Any, Awaitable, Callable, + Collection, DefaultDict, Dict, + FrozenSet, Iterable, List, Optional, @@ -46,7 +47,7 @@ from synapse.state import v1, v2 from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.roommember import ProfileInfo -from synapse.types import Collection, StateMap +from synapse.types import StateMap from synapse.util.async_helpers import Linearizer from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.metrics import Measure, measure_func @@ -212,19 +213,23 @@ async def get_current_state_ids( return ret.state async def get_current_users_in_room( - self, room_id: str, latest_event_ids: Optional[List[str]] = None + self, room_id: str, latest_event_ids: List[str] ) -> Dict[str, ProfileInfo]: """ Get the users who are currently in a room. + Note: This is much slower than using the equivalent method + `DataStore.get_users_in_room` or `DataStore.get_users_in_room_with_profiles`, + so this should only be used when wanting the users at a particular point + in the room. + Args: room_id: The ID of the room. latest_event_ids: Precomputed list of latest event IDs. Will be computed if None. Returns: Dictionary of user IDs to their profileinfo. """ - if not latest_event_ids: - latest_event_ids = await self.store.get_latest_event_ids_in_room(room_id) + assert latest_event_ids is not None logger.debug("calling resolve_state_groups from get_current_users_in_room") @@ -515,7 +520,7 @@ def __init__(self, hs): expiry_ms=EVICTION_TIMEOUT_SECONDS * 1000, iterable=True, reset_expiry_on_get=True, - ) + ) # type: ExpiringCache[FrozenSet[int], _StateCacheEntry] # # stuff for tracking time spent on state-res by room @@ -536,7 +541,7 @@ async def resolve_state_groups( state_groups_ids: Dict[int, StateMap[str]], event_map: Optional[Dict[str, EventBase]], state_res_store: "StateResolutionStore", - ): + ) -> _StateCacheEntry: """Resolves conflicts between a set of state groups Always generates a new state group (unless we hit the cache), so should diff --git a/synapse/state/v1.py b/synapse/state/v1.py index ce255da6fd07..318e9988130f 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/state/v2.py b/synapse/state/v2.py index e73a548ee412..008644cd9862 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,6 +18,7 @@ from typing import ( Any, Callable, + Collection, Dict, Generator, Iterable, @@ -38,7 +38,7 @@ from synapse.api.errors import AuthError from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events import EventBase -from synapse.types import Collection, MutableStateMap, StateMap +from synapse.types import MutableStateMap, StateMap from synapse.util import Clock logger = logging.getLogger(__name__) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 0b9007e51fd3..105e4e1fec1b 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018,2019 New Vector Ltd # diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 240905329fb7..3d98d3f5f822 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. @@ -17,13 +16,13 @@ import logging import random from abc import ABCMeta -from typing import TYPE_CHECKING, Any, Iterable, Optional, Union +from typing import TYPE_CHECKING, Any, Collection, Iterable, Optional, Union from synapse.storage.database import LoggingTransaction # noqa: F401 from synapse.storage.database import make_in_list_sql_clause # noqa: F401 from synapse.storage.database import DatabasePool from synapse.storage.types import Connection -from synapse.types import Collection, StreamToken, get_domain_from_id +from synapse.types import StreamToken, get_domain_from_id from synapse.util import json_decoder if TYPE_CHECKING: @@ -70,6 +69,7 @@ def _invalidate_state_caches( self._attempt_to_invalidate_cache("is_host_joined", (room_id, host)) self._attempt_to_invalidate_cache("get_users_in_room", (room_id,)) + self._attempt_to_invalidate_cache("get_users_in_room_with_profiles", (room_id,)) self._attempt_to_invalidate_cache("get_room_summary", (room_id,)) self._attempt_to_invalidate_cache("get_current_state_ids", (room_id,)) @@ -115,7 +115,7 @@ def db_to_json(db_content: Union[memoryview, bytes, bytearray, str]) -> Any: db_content = db_content.tobytes() # Decode it to a Unicode string before feeding it to the JSON decoder, since - # Python 3.5 does not support deserializing bytes. + # it only supports handling strings if isinstance(db_content, (bytes, bytearray)): db_content = db_content.decode("utf8") diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index ccb06aab391d..142787fdfd1c 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 94590e7b458d..a761ad603bbb 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. @@ -21,6 +20,7 @@ from typing import ( Any, Callable, + Collection, Dict, Iterable, Iterator, @@ -49,7 +49,6 @@ from synapse.storage.background_updates import BackgroundUpdater from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine from synapse.storage.types import Connection, Cursor -from synapse.types import Collection # python 3 does not have a maximum int value MAX_TXN_ID = 2 ** 63 - 1 @@ -172,10 +171,7 @@ def __getattr__(self, name): # The type of entry which goes on our after_callbacks and exception_callbacks lists. -# -# Python 3.5.2 doesn't support Callable with an ellipsis, so we wrap it in quotes so -# that mypy sees the type but the runtime python doesn't. -_CallbackListEntry = Tuple["Callable[..., None]", Iterable[Any], Dict[str, Any]] +_CallbackListEntry = Tuple[Callable[..., None], Iterable[Any], Dict[str, Any]] R = TypeVar("R") @@ -222,7 +218,7 @@ def __init__( self.after_callbacks = after_callbacks self.exception_callbacks = exception_callbacks - def call_after(self, callback: "Callable[..., None]", *args: Any, **kwargs: Any): + def call_after(self, callback: Callable[..., None], *args: Any, **kwargs: Any): """Call the given callback on the main twisted thread after the transaction has finished. Used to invalidate the caches on the correct thread. @@ -234,7 +230,7 @@ def call_after(self, callback: "Callable[..., None]", *args: Any, **kwargs: Any) self.after_callbacks.append((callback, args, kwargs)) def call_on_exception( - self, callback: "Callable[..., None]", *args: Any, **kwargs: Any + self, callback: Callable[..., None], *args: Any, **kwargs: Any ): # if self.exception_callbacks is None, that means that whatever constructed the # LoggingTransaction isn't expecting there to be any callbacks; assert that @@ -486,9 +482,9 @@ def new_transaction( desc: str, after_callbacks: List[_CallbackListEntry], exception_callbacks: List[_CallbackListEntry], - func: "Callable[..., R]", + func: Callable[..., R], *args: Any, - **kwargs: Any + **kwargs: Any, ) -> R: """Start a new database transaction with the given connection. @@ -619,10 +615,10 @@ def new_transaction( async def runInteraction( self, desc: str, - func: "Callable[..., R]", + func: Callable[..., R], *args: Any, db_autocommit: bool = False, - **kwargs: Any + **kwargs: Any, ) -> R: """Starts a transaction on the database and runs a given function @@ -679,10 +675,10 @@ async def runInteraction( async def runWithConnection( self, - func: "Callable[..., R]", + func: Callable[..., R], *args: Any, db_autocommit: bool = False, - **kwargs: Any + **kwargs: Any, ) -> R: """Wraps the .runWithConnection() method on the underlying db_pool. @@ -719,7 +715,9 @@ def inner_func(conn, *args, **kwargs): # pool). assert not self.engine.in_transaction(conn) - with LoggingContext("runWithConnection", parent_context) as context: + with LoggingContext( + str(curr_context), parent_context=parent_context + ) as context: sched_duration_sec = monotonic_time() - start_time sql_scheduling_timer.observe(sched_duration_sec) context.add_database_scheduled(sched_duration_sec) @@ -775,7 +773,7 @@ async def execute( desc: str, decoder: Optional[Callable[[Cursor], R]], query: str, - *args: Any + *args: Any, ) -> R: """Runs a single query for a result set. @@ -900,7 +898,7 @@ async def simple_upsert( table: str, keyvalues: Dict[str, Any], values: Dict[str, Any], - insertion_values: Dict[str, Any] = {}, + insertion_values: Optional[Dict[str, Any]] = None, desc: str = "simple_upsert", lock: bool = True, ) -> Optional[bool]: @@ -927,6 +925,8 @@ async def simple_upsert( Native upserts always return None. Emulated upserts return True if a new entry was created, False if an existing one was updated. """ + insertion_values = insertion_values or {} + attempts = 0 while True: try: @@ -964,7 +964,7 @@ def simple_upsert_txn( table: str, keyvalues: Dict[str, Any], values: Dict[str, Any], - insertion_values: Dict[str, Any] = {}, + insertion_values: Optional[Dict[str, Any]] = None, lock: bool = True, ) -> Optional[bool]: """ @@ -982,6 +982,8 @@ def simple_upsert_txn( Native upserts always return None. Emulated upserts return True if a new entry was created, False if an existing one was updated. """ + insertion_values = insertion_values or {} + if self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables: self.simple_upsert_txn_native_upsert( txn, table, keyvalues, values, insertion_values=insertion_values @@ -1003,7 +1005,7 @@ def simple_upsert_txn_emulated( table: str, keyvalues: Dict[str, Any], values: Dict[str, Any], - insertion_values: Dict[str, Any] = {}, + insertion_values: Optional[Dict[str, Any]] = None, lock: bool = True, ) -> bool: """ @@ -1017,6 +1019,8 @@ def simple_upsert_txn_emulated( Returns True if a new entry was created, False if an existing one was updated. """ + insertion_values = insertion_values or {} + # We need to lock the table :(, unless we're *really* careful if lock: self.engine.lock_table(txn, table) @@ -1077,7 +1081,7 @@ def simple_upsert_txn_native_upsert( table: str, keyvalues: Dict[str, Any], values: Dict[str, Any], - insertion_values: Dict[str, Any] = {}, + insertion_values: Optional[Dict[str, Any]] = None, ) -> None: """ Use the native UPSERT functionality in recent PostgreSQL versions. @@ -1090,7 +1094,7 @@ def simple_upsert_txn_native_upsert( """ allvalues = {} # type: Dict[str, Any] allvalues.update(keyvalues) - allvalues.update(insertion_values) + allvalues.update(insertion_values or {}) if not values: latter = "NOTHING" @@ -1513,7 +1517,7 @@ async def simple_select_many_batch( column: str, iterable: Iterable[Any], retcols: Iterable[str], - keyvalues: Dict[str, Any] = {}, + keyvalues: Optional[Dict[str, Any]] = None, desc: str = "simple_select_many_batch", batch_size: int = 100, ) -> List[Any]: @@ -1531,6 +1535,8 @@ async def simple_select_many_batch( desc: description of the transaction, for logging and metrics batch_size: the number of rows for each select query """ + keyvalues = keyvalues or {} + results = [] # type: List[Dict[str, Any]] if not iterable: @@ -2059,69 +2065,18 @@ def make_in_list_sql_clause( KV = TypeVar("KV") -def make_tuple_comparison_clause( - database_engine: BaseDatabaseEngine, keys: List[Tuple[str, KV]] -) -> Tuple[str, List[KV]]: +def make_tuple_comparison_clause(keys: List[Tuple[str, KV]]) -> Tuple[str, List[KV]]: """Returns a tuple comparison SQL clause - Depending what the SQL engine supports, builds a SQL clause that looks like either - "(a, b) > (?, ?)", or "(a > ?) OR (a == ? AND b > ?)". + Builds a SQL clause that looks like "(a, b) > (?, ?)" Args: - database_engine keys: A set of (column, value) pairs to be compared. Returns: A tuple of SQL query and the args """ - if database_engine.supports_tuple_comparison: - return ( - "(%s) > (%s)" % (",".join(k[0] for k in keys), ",".join("?" for _ in keys)), - [k[1] for k in keys], - ) - - # we want to build a clause - # (a > ?) OR - # (a == ? AND b > ?) OR - # (a == ? AND b == ? AND c > ?) - # ... - # (a == ? AND b == ? AND ... AND z > ?) - # - # or, equivalently: - # - # (a > ? OR (a == ? AND - # (b > ? OR (b == ? AND - # ... - # (y > ? OR (y == ? AND - # z > ? - # )) - # ... - # )) - # )) - # - # which itself is equivalent to (and apparently easier for the query optimiser): - # - # (a >= ? AND (a > ? OR - # (b >= ? AND (b > ? OR - # ... - # (y >= ? AND (y > ? OR - # z > ? - # )) - # ... - # )) - # )) - # - # - - clause = "" - args = [] # type: List[KV] - for k, v in keys[:-1]: - clause = clause + "(%s >= ? AND (%s > ? OR " % (k, k) - args.extend([v, v]) - - (k, v) = keys[-1] - clause += "%s > ?" % (k,) - args.append(v) - - clause += "))" * (len(keys) - 1) - return clause, args + return ( + "(%s) > (%s)" % (",".join(k[0] for k in keys), ",".join("?" for _ in keys)), + [k[1] for k in keys], + ) diff --git a/synapse/storage/databases/__init__.py b/synapse/storage/databases/__init__.py index 379c78bb83bb..20b755056b7f 100644 --- a/synapse/storage/databases/__init__.py +++ b/synapse/storage/databases/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 1d44c3aa2c29..49c7606d5138 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # Copyright 2019-2021 The Matrix.org Foundation C.I.C. @@ -18,9 +17,9 @@ import logging from typing import List, Optional, Tuple -from synapse.api.constants import PresenceState from synapse.config.homeserver import HomeServerConfig from synapse.storage.database import DatabasePool +from synapse.storage.databases.main.stats import UserSortOrder from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import ( IdGenerator, @@ -51,7 +50,7 @@ from .metrics import ServerMetricsStore from .monthly_active_users import MonthlyActiveUsersStore from .openid import OpenIdStore -from .presence import PresenceStore, UserPresenceState +from .presence import PresenceStore from .profile import ProfileStore from .purge_events import PurgeEventsStore from .push_rule import PushRuleStore @@ -126,9 +125,6 @@ def __init__(self, database: DatabasePool, db_conn, hs): self._clock = hs.get_clock() self.database_engine = database.engine - self._presence_id_gen = StreamIdGenerator( - db_conn, "presence_stream", "stream_id" - ) self._public_room_id_gen = StreamIdGenerator( db_conn, "public_room_list_stream", "stream_id" ) @@ -177,21 +173,6 @@ def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) - self._presence_on_startup = self._get_active_presence(db_conn) - - presence_cache_prefill, min_presence_val = self.db_pool.get_cache_dict( - db_conn, - "presence_stream", - entity_column="user_id", - stream_column="stream_id", - max_value=self._presence_id_gen.get_current_token(), - ) - self.presence_stream_cache = StreamChangeCache( - "PresenceStreamChangeCache", - min_presence_val, - prefilled_cache=presence_cache_prefill, - ) - device_list_max = self._device_list_id_gen.get_current_token() self._device_list_stream_cache = StreamChangeCache( "DeviceListStreamChangeCache", device_list_max @@ -238,32 +219,6 @@ def __init__(self, database: DatabasePool, db_conn, hs): def get_device_stream_token(self) -> int: return self._device_list_id_gen.get_current_token() - def take_presence_startup_info(self): - active_on_startup = self._presence_on_startup - self._presence_on_startup = None - return active_on_startup - - def _get_active_presence(self, db_conn): - """Fetch non-offline presence from the database so that we can register - the appropriate time outs. - """ - - sql = ( - "SELECT user_id, state, last_active_ts, last_federation_update_ts," - " last_user_sync_ts, status_msg, currently_active FROM presence_stream" - " WHERE state != ?" - ) - - txn = db_conn.cursor() - txn.execute(sql, (PresenceState.OFFLINE,)) - rows = self.db_pool.cursor_to_dict(txn) - txn.close() - - for row in rows: - row["currently_active"] = bool(row["currently_active"]) - - return [UserPresenceState(**row) for row in rows] - async def get_users(self) -> List[JsonDict]: """Function to retrieve a list of users in users table. @@ -292,6 +247,8 @@ async def get_users_paginate( name: Optional[str] = None, guests: bool = True, deactivated: bool = False, + order_by: UserSortOrder = UserSortOrder.USER_ID.value, + direction: str = "f", ) -> Tuple[List[JsonDict], int]: """Function to retrieve a paginated list of users from users list. This will return a json list of users and the @@ -304,6 +261,8 @@ async def get_users_paginate( name: search for local part of user_id or display name guests: whether to in include guest users deactivated: whether to include deactivated users + order_by: the sort order of the returned list + direction: sort ascending or descending Returns: A tuple of a list of mappings from user to information and a count of total users. """ @@ -312,6 +271,14 @@ def get_users_paginate_txn(txn): filters = [] args = [self.hs.config.server_name] + # Set ordering + order_by_column = UserSortOrder(order_by).value + + if direction == "b": + order = "DESC" + else: + order = "ASC" + # `name` is in database already in lower case if name: filters.append("(name LIKE ? OR LOWER(displayname) LIKE ?)") @@ -339,10 +306,15 @@ def get_users_paginate_txn(txn): txn.execute(sql, args) count = txn.fetchone()[0] - sql = ( - "SELECT name, user_type, is_guest, admin, deactivated, shadow_banned, displayname, avatar_url " - + sql_base - + " ORDER BY u.name LIMIT ? OFFSET ?" + sql = """ + SELECT name, user_type, is_guest, admin, deactivated, shadow_banned, displayname, avatar_url + {sql_base} + ORDER BY {order_by_column} {order}, u.name ASC + LIMIT ? OFFSET ? + """.format( + sql_base=sql_base, + order_by_column=order_by_column, + order=order, ) args += [limit, start] txn.execute(sql, args) diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index a277a1ef1349..1d02795f4346 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 85bb853d33cf..9f182c2a890f 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 1e7637a6f5c4..ecc1f935e228 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/censor_events.py b/synapse/storage/databases/main/censor_events.py index 3e26d5ba8797..f22c1f241b65 100644 --- a/synapse/storage/databases/main/censor_events.py +++ b/synapse/storage/databases/main/censor_events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index 6d18e692b0a9..d60010e942ab 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -298,7 +297,6 @@ def _devices_last_seen_update_txn(txn): # times, which is fine. where_clause, where_args = make_tuple_comparison_clause( - self.database_engine, [("user_id", last_user_id), ("device_id", last_device_id)], ) diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 691080ce742b..50e7ddd7355b 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,6 +15,7 @@ import logging from typing import List, Optional, Tuple +from synapse.logging import issue9533_logger from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.replication.tcp.streams import ToDeviceStream from synapse.storage._base import SQLBaseStore, db_to_json @@ -405,6 +405,13 @@ def add_messages_txn(txn, now_ms, stream_id): ], ) + if remote_messages_by_destination: + issue9533_logger.debug( + "Queued outgoing to-device messages with stream_id %i for %s", + stream_id, + list(remote_messages_by_destination.keys()), + ) + async with self._device_inbox_id_gen.get_next() as stream_id: now_ms = self.clock.time_msec() await self.db_pool.runInteraction( @@ -534,6 +541,16 @@ def _add_messages_to_local_device_inbox_txn( ], ) + issue9533_logger.debug( + "Stored to-device messages with stream_id %i for %s", + stream_id, + [ + (user_id, device_id) + for (user_id, messages_by_device) in local_by_user_then_device.items() + for device_id in messages_by_device.keys() + ], + ) + class DeviceInboxBackgroundUpdateStore(SQLBaseStore): DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop" diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index d327e9aa0b8c..c9346de31673 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # Copyright 2019,2020 The Matrix.org Foundation C.I.C. @@ -16,7 +15,7 @@ # limitations under the License. import abc import logging -from typing import Any, Dict, Iterable, List, Optional, Set, Tuple +from typing import Any, Collection, Dict, Iterable, List, Optional, Set, Tuple from synapse.api.errors import Codes, StoreError from synapse.logging.opentracing import ( @@ -32,7 +31,7 @@ LoggingTransaction, make_tuple_comparison_clause, ) -from synapse.types import Collection, JsonDict, get_verify_key_from_cross_signing_key +from synapse.types import JsonDict, get_verify_key_from_cross_signing_key from synapse.util import json_decoder, json_encoder from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.lrucache import LruCache @@ -718,7 +717,15 @@ async def mark_remote_user_device_cache_as_stale(self, user_id: str) -> None: keyvalues={"user_id": user_id}, values={}, insertion_values={"added_ts": self._clock.time_msec()}, - desc="make_remote_user_device_cache_as_stale", + desc="mark_remote_user_device_cache_as_stale", + ) + + async def mark_remote_user_device_cache_as_valid(self, user_id: str) -> None: + # Remove the database entry that says we need to resync devices, after a resync + await self.db_pool.simple_delete( + table="device_lists_remote_resync", + keyvalues={"user_id": user_id}, + desc="mark_remote_user_device_cache_as_valid", ) async def mark_remote_user_device_list_as_unsubscribed(self, user_id: str) -> None: @@ -985,7 +992,7 @@ async def _remove_duplicate_outbound_pokes(self, progress, batch_size): def _txn(txn): clause, args = make_tuple_comparison_clause( - self.db_pool.engine, [(x, last_row[x]) for x in KEY_COLS] + [(x, last_row[x]) for x in KEY_COLS] ) sql = """ SELECT stream_id, destination, user_id, device_id, MAX(ts) AS ts @@ -1290,15 +1297,6 @@ def _update_remote_device_list_cache_txn( lock=False, ) - # If we're replacing the remote user's device list cache presumably - # we've done a full resync, so we remove the entry that says we need - # to resync - self.db_pool.simple_delete_txn( - txn, - table="device_lists_remote_resync", - keyvalues={"user_id": user_id}, - ) - async def add_device_change_to_streams( self, user_id: str, device_ids: Collection[str], hosts: List[str] ): diff --git a/synapse/storage/databases/main/directory.py b/synapse/storage/databases/main/directory.py index 267b948397be..86075bc55b23 100644 --- a/synapse/storage/databases/main/directory.py +++ b/synapse/storage/databases/main/directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py index 12cecceec2a4..b15fb71e6258 100644 --- a/synapse/storage/databases/main/e2e_room_keys.py +++ b/synapse/storage/databases/main/e2e_room_keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # Copyright 2019 Matrix.org Foundation C.I.C. # diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index f1e7859d26e7..398d6b6acb85 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # Copyright 2019,2020 The Matrix.org Foundation C.I.C. @@ -85,7 +84,9 @@ async def get_e2e_device_keys_for_federation_query( if keys: result["keys"] = keys - device_display_name = device.display_name + device_display_name = None + if self.hs.config.allow_device_name_lookup_over_federation: + device_display_name = device.display_name if device_display_name: result["device_display_name"] = device_display_name diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index a956be491a6e..ff81d5cd1768 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +14,7 @@ import itertools import logging from queue import Empty, PriorityQueue -from typing import Dict, Iterable, List, Set, Tuple +from typing import Collection, Dict, Iterable, List, Set, Tuple from synapse.api.errors import StoreError from synapse.events import EventBase @@ -26,7 +25,6 @@ from synapse.storage.databases.main.signatures import SignatureWorkerStore from synapse.storage.engines import PostgresEngine from synapse.storage.types import Cursor -from synapse.types import Collection from synapse.util.caches.descriptors import cached from synapse.util.caches.lrucache import LruCache from synapse.util.iterutils import batch_iter diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 78245ad5bd30..584532211837 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 98dac19a9525..fd25c8112d73 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018-2019 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. @@ -171,7 +170,7 @@ async def _persist_events_and_state_updates( ) async with stream_ordering_manager as stream_orderings: - for (event, context), stream in zip(events_and_contexts, stream_orderings): + for (event, _), stream in zip(events_and_contexts, stream_orderings): event.internal_metadata.stream_ordering = stream await self.db_pool.runInteraction( @@ -298,7 +297,7 @@ def _get_prevs_before_rejected_txn(txn, batch): txn.execute(sql + clause, args) to_recursively_check = [] - for event_id, prev_event_id, metadata, rejected in txn: + for _, prev_event_id, metadata, rejected in txn: if prev_event_id in existing_prevs: continue @@ -320,8 +319,8 @@ def _persist_events_txn( txn: LoggingTransaction, events_and_contexts: List[Tuple[EventBase, EventContext]], backfilled: bool, - state_delta_for_room: Dict[str, DeltaState] = {}, - new_forward_extremeties: Dict[str, List[str]] = {}, + state_delta_for_room: Optional[Dict[str, DeltaState]] = None, + new_forward_extremeties: Optional[Dict[str, List[str]]] = None, ): """Insert some number of room events into the necessary database tables. @@ -342,6 +341,9 @@ def _persist_events_txn( extremities. """ + state_delta_for_room = state_delta_for_room or {} + new_forward_extremeties = new_forward_extremeties or {} + all_events_and_contexts = events_and_contexts min_stream_order = events_and_contexts[0][0].internal_metadata.stream_ordering @@ -1125,7 +1127,7 @@ def _upsert_room_version_txn(self, txn: LoggingTransaction, room_id: str): def _update_forward_extremities_txn( self, txn, new_forward_extremities, max_stream_order ): - for room_id, new_extrem in new_forward_extremities.items(): + for room_id in new_forward_extremities.keys(): self.db_pool.simple_delete_txn( txn, table="event_forward_extremities", keyvalues={"room_id": room_id} ) @@ -1376,24 +1378,28 @@ def get_internal_metadata(event): ], ) - for event, _ in events_and_contexts: - if not event.internal_metadata.is_redacted(): - # If we're persisting an unredacted event we go and ensure - # that we mark any redactions that reference this event as - # requiring censoring. - self.db_pool.simple_update_txn( - txn, - table="redactions", - keyvalues={"redacts": event.event_id}, - updatevalues={"have_censored": False}, + # If we're persisting an unredacted event we go and ensure + # that we mark any redactions that reference this event as + # requiring censoring. + sql = "UPDATE redactions SET have_censored = ? WHERE redacts = ?" + txn.execute_batch( + sql, + ( + ( + False, + event.event_id, ) + for event, _ in events_and_contexts + if not event.internal_metadata.is_redacted() + ), + ) state_events_and_contexts = [ ec for ec in events_and_contexts if ec[0].is_state() ] state_values = [] - for event, context in state_events_and_contexts: + for event, _ in state_events_and_contexts: vals = { "event_id": event.event_id, "room_id": event.room_id, @@ -1462,7 +1468,7 @@ def _update_metadata_tables_txn( # nothing to do here return - for event, context in events_and_contexts: + for event, _ in events_and_contexts: if event.type == EventTypes.Redaction and event.redacts is not None: # Remove the entries in the event_push_actions table for the # redacted event. @@ -1879,20 +1885,28 @@ def _set_push_actions_for_event_and_users_txn( ), ) - for event, _ in events_and_contexts: - user_ids = self.db_pool.simple_select_onecol_txn( - txn, - table="event_push_actions_staging", - keyvalues={"event_id": event.event_id}, - retcol="user_id", - ) + room_to_event_ids = {} # type: Dict[str, List[str]] + for e, _ in events_and_contexts: + room_to_event_ids.setdefault(e.room_id, []).append(e.event_id) - for uid in user_ids: - txn.call_after( - self.store.get_unread_event_push_actions_by_room_for_user.invalidate_many, - (event.room_id, uid), + for room_id, event_ids in room_to_event_ids.items(): + rows = self.db_pool.simple_select_many_txn( + txn, + table="event_push_actions_staging", + column="event_id", + iterable=event_ids, + keyvalues={}, + retcols=("user_id",), ) + user_ids = {row["user_id"] for row in rows} + + for user_id in user_ids: + txn.call_after( + self.store.get_unread_event_push_actions_by_room_for_user.invalidate_many, + (room_id, user_id), + ) + # Now we delete the staging area for *all* events that were being # persisted. txn.execute_batch( diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 78367ea58ded..cbe4be1437a0 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -838,7 +837,6 @@ def _calculate_chain_cover_txn( # We want to do a `(topological_ordering, stream_ordering) > (?,?)` # comparison, but that is not supported on older SQLite versions tuple_clause, tuple_args = make_tuple_comparison_clause( - self.database_engine, [ ("events.room_id", last_room_id), ("topological_ordering", last_depth), diff --git a/synapse/storage/databases/main/events_forward_extremities.py b/synapse/storage/databases/main/events_forward_extremities.py index b3703ae161bd..6d2688d71189 100644 --- a/synapse/storage/databases/main/events_forward_extremities.py +++ b/synapse/storage/databases/main/events_forward_extremities.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 952d4969b220..2c823e09cfa6 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,7 +15,16 @@ import logging import threading from collections import namedtuple -from typing import Dict, Iterable, List, Optional, Tuple, overload +from typing import ( + Collection, + Container, + Dict, + Iterable, + List, + Optional, + Tuple, + overload, +) from constantly import NamedConstant, Names from typing_extensions import Literal @@ -46,7 +54,7 @@ from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator from synapse.storage.util.sequence import build_sequence_generator -from synapse.types import Collection, JsonDict, get_domain_from_id +from synapse.types import JsonDict, get_domain_from_id from synapse.util.caches.descriptors import cached from synapse.util.caches.lrucache import LruCache from synapse.util.iterutils import batch_iter @@ -544,7 +552,7 @@ def _get_events_from_cache(self, events, allow_rejected, update_metrics=True): async def get_stripped_room_state_from_event_context( self, context: EventContext, - state_types_to_include: List[EventTypes], + state_types_to_include: Container[str], membership_user_id: Optional[str] = None, ) -> List[JsonDict]: """ diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py index d2f5b9a50220..bb244a03c0a8 100644 --- a/synapse/storage/databases/main/filtering.py +++ b/synapse/storage/databases/main/filtering.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/group_server.py b/synapse/storage/databases/main/group_server.py index ac07e0197b88..66ad363bfb29 100644 --- a/synapse/storage/databases/main/group_server.py +++ b/synapse/storage/databases/main/group_server.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # Copyright 2018 New Vector Ltd # @@ -1027,8 +1026,8 @@ async def add_user_to_group( user_id: str, is_admin: bool = False, is_public: bool = True, - local_attestation: dict = None, - remote_attestation: dict = None, + local_attestation: Optional[dict] = None, + remote_attestation: Optional[dict] = None, ) -> None: """Add a user to the group server. @@ -1171,7 +1170,7 @@ async def register_user_group_membership( user_id: str, membership: str, is_admin: bool = False, - content: JsonDict = {}, + content: Optional[JsonDict] = None, local_attestation: Optional[dict] = None, remote_attestation: Optional[dict] = None, is_publicised: bool = False, @@ -1192,6 +1191,8 @@ async def register_user_group_membership( is_publicised: Whether this should be publicised. """ + content = content or {} + def _register_user_group_membership_txn(txn, next_id): # TODO: Upsert? self.db_pool.simple_delete_txn( diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py index d504323b0330..0e8680783404 100644 --- a/synapse/storage/databases/main/keys.py +++ b/synapse/storage/databases/main/keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd. # diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index 4f3d19256233..c584868188e9 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020-2021 The Matrix.org Foundation C.I.C. # @@ -22,6 +21,9 @@ BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD = ( "media_repository_drop_index_wo_method" ) +BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD_2 = ( + "media_repository_drop_index_wo_method_2" +) class MediaSortOrder(Enum): @@ -85,23 +87,35 @@ def __init__(self, database: DatabasePool, db_conn, hs): unique=True, ) + # the original impl of _drop_media_index_without_method was broken (see + # https://github.com/matrix-org/synapse/issues/8649), so we replace the original + # impl with a no-op and run the fixed migration as + # media_repository_drop_index_wo_method_2. + self.db_pool.updates.register_noop_background_update( + BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD + ) self.db_pool.updates.register_background_update_handler( - BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD, + BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD_2, self._drop_media_index_without_method, ) async def _drop_media_index_without_method(self, progress, batch_size): + """background update handler which removes the old constraints. + + Note that this is only run on postgres. + """ + def f(txn): txn.execute( "ALTER TABLE local_media_repository_thumbnails DROP CONSTRAINT IF EXISTS local_media_repository_thumbn_media_id_thumbnail_width_thum_key" ) txn.execute( - "ALTER TABLE remote_media_cache_thumbnails DROP CONSTRAINT IF EXISTS remote_media_repository_thumbn_media_id_thumbnail_width_thum_key" + "ALTER TABLE remote_media_cache_thumbnails DROP CONSTRAINT IF EXISTS remote_media_cache_thumbnails_media_origin_media_id_thumbna_key" ) await self.db_pool.runInteraction("drop_media_indices_without_method", f) await self.db_pool.updates._end_background_update( - BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD + BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD_2 ) return 1 diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index 614a418a158a..c3f551d37756 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index 757da3d55dd1..fe25638289cd 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index 0ff693a3109f..db22fab23ea0 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,16 +12,69 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, List, Tuple +from typing import TYPE_CHECKING, Dict, List, Tuple -from synapse.api.presence import UserPresenceState +from synapse.api.presence import PresenceState, UserPresenceState +from synapse.replication.tcp.streams import PresenceStream from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause +from synapse.storage.database import DatabasePool +from synapse.storage.engines import PostgresEngine +from synapse.storage.types import Connection +from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator from synapse.util.caches.descriptors import cached, cachedList +from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.iterutils import batch_iter +if TYPE_CHECKING: + from synapse.server import HomeServer + class PresenceStore(SQLBaseStore): + def __init__( + self, + database: DatabasePool, + db_conn: Connection, + hs: "HomeServer", + ): + super().__init__(database, db_conn, hs) + + self._can_persist_presence = ( + hs.get_instance_name() in hs.config.worker.writers.presence + ) + + if isinstance(database.engine, PostgresEngine): + self._presence_id_gen = MultiWriterIdGenerator( + db_conn=db_conn, + db=database, + stream_name="presence_stream", + instance_name=self._instance_name, + tables=[("presence_stream", "instance_name", "stream_id")], + sequence_name="presence_stream_sequence", + writers=hs.config.worker.writers.to_device, + ) + else: + self._presence_id_gen = StreamIdGenerator( + db_conn, "presence_stream", "stream_id" + ) + + self._presence_on_startup = self._get_active_presence(db_conn) + + presence_cache_prefill, min_presence_val = self.db_pool.get_cache_dict( + db_conn, + "presence_stream", + entity_column="user_id", + stream_column="stream_id", + max_value=self._presence_id_gen.get_current_token(), + ) + self.presence_stream_cache = StreamChangeCache( + "PresenceStreamChangeCache", + min_presence_val, + prefilled_cache=presence_cache_prefill, + ) + async def update_presence(self, presence_states): + assert self._can_persist_presence + stream_ordering_manager = self._presence_id_gen.get_next_mult( len(presence_states) ) @@ -58,6 +110,7 @@ def _update_presence_txn(self, txn, stream_orderings, presence_states): "last_user_sync_ts": state.last_user_sync_ts, "status_msg": state.status_msg, "currently_active": state.currently_active, + "instance_name": self._instance_name, } for stream_id, state in zip(stream_orderings, presence_states) ], @@ -217,3 +270,37 @@ async def get_presence_for_all_users( def get_current_presence_token(self): return self._presence_id_gen.get_current_token() + + def _get_active_presence(self, db_conn: Connection): + """Fetch non-offline presence from the database so that we can register + the appropriate time outs. + """ + + sql = ( + "SELECT user_id, state, last_active_ts, last_federation_update_ts," + " last_user_sync_ts, status_msg, currently_active FROM presence_stream" + " WHERE state != ?" + ) + + txn = db_conn.cursor() + txn.execute(sql, (PresenceState.OFFLINE,)) + rows = self.db_pool.cursor_to_dict(txn) + txn.close() + + for row in rows: + row["currently_active"] = bool(row["currently_active"]) + + return [UserPresenceState(**row) for row in rows] + + def take_presence_startup_info(self): + active_on_startup = self._presence_on_startup + self._presence_on_startup = None + return active_on_startup + + def process_replication_rows(self, stream_name, instance_name, token, rows): + if stream_name == PresenceStream.NAME: + self._presence_id_gen.advance(instance_name, token) + for row in rows: + self.presence_stream_cache.entity_has_changed(row.user_id, token) + self._get_presence_for_user.invalidate((row.user_id,)) + return super().process_replication_rows(stream_name, instance_name, token, rows) diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py index ba01d3108a96..9b4e95e134e7 100644 --- a/synapse/storage/databases/main/profile.py +++ b/synapse/storage/databases/main/profile.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index 41f4fe7f95cd..8f83748b5edb 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 9e58dc0e6ae4..db5217633796 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index c65558c2800d..b48fe086d4cc 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 43c852c96c00..3647276acb95 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 90a8f664ef95..6e5ee557d2ef 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019,2020 The Matrix.org Foundation C.I.C. @@ -92,13 +91,25 @@ def __init__( id_column=None, ) - self._account_validity = hs.config.account_validity - if hs.config.run_background_tasks and self._account_validity.enabled: - self._clock.call_later( - 0.0, - self._set_expiration_date_when_missing, + self._account_validity_enabled = ( + hs.config.account_validity.account_validity_enabled + ) + self._account_validity_period = None + self._account_validity_startup_job_max_delta = None + if self._account_validity_enabled: + self._account_validity_period = ( + hs.config.account_validity.account_validity_period + ) + self._account_validity_startup_job_max_delta = ( + hs.config.account_validity.account_validity_startup_job_max_delta ) + if hs.config.run_background_tasks: + self._clock.call_later( + 0.0, + self._set_expiration_date_when_missing, + ) + # Create a background job for culling expired 3PID validity tokens if hs.config.run_background_tasks: self._clock.looping_call( @@ -195,6 +206,7 @@ async def set_account_validity_for_user( expiration_ts: int, email_sent: bool, renewal_token: Optional[str] = None, + token_used_ts: Optional[int] = None, ) -> None: """Updates the account validity properties of the given account, with the given values. @@ -208,6 +220,8 @@ async def set_account_validity_for_user( period. renewal_token: Renewal token the user can use to extend the validity of their account. Defaults to no token. + token_used_ts: A timestamp of when the current token was used to renew + the account. """ def set_account_validity_for_user_txn(txn): @@ -219,6 +233,7 @@ def set_account_validity_for_user_txn(txn): "expiration_ts_ms": expiration_ts, "email_sent": email_sent, "renewal_token": renewal_token, + "token_used_ts_ms": token_used_ts, }, ) self._invalidate_cache_and_stream( @@ -232,7 +247,7 @@ def set_account_validity_for_user_txn(txn): async def set_renewal_token_for_user( self, user_id: str, renewal_token: str ) -> None: - """Defines a renewal token for a given user. + """Defines a renewal token for a given user, and clears the token_used timestamp. Args: user_id: ID of the user to set the renewal token for. @@ -245,26 +260,40 @@ async def set_renewal_token_for_user( await self.db_pool.simple_update_one( table="account_validity", keyvalues={"user_id": user_id}, - updatevalues={"renewal_token": renewal_token}, + updatevalues={"renewal_token": renewal_token, "token_used_ts_ms": None}, desc="set_renewal_token_for_user", ) - async def get_user_from_renewal_token(self, renewal_token: str) -> str: - """Get a user ID from a renewal token. + async def get_user_from_renewal_token( + self, renewal_token: str + ) -> Tuple[str, int, Optional[int]]: + """Get a user ID and renewal status from a renewal token. Args: renewal_token: The renewal token to perform the lookup with. Returns: - The ID of the user to which the token belongs. + A tuple of containing the following values: + * The ID of a user to which the token belongs. + * An int representing the user's expiry timestamp as milliseconds since the + epoch, or 0 if the token was invalid. + * An optional int representing the timestamp of when the user renewed their + account timestamp as milliseconds since the epoch. None if the account + has not been renewed using the current token yet. """ - return await self.db_pool.simple_select_one_onecol( + ret_dict = await self.db_pool.simple_select_one( table="account_validity", keyvalues={"renewal_token": renewal_token}, - retcol="user_id", + retcols=["user_id", "expiration_ts_ms", "token_used_ts_ms"], desc="get_user_from_renewal_token", ) + return ( + ret_dict["user_id"], + ret_dict["expiration_ts_ms"], + ret_dict["token_used_ts_ms"], + ) + async def get_renewal_token_for_user(self, user_id: str) -> str: """Get the renewal token associated with a given user ID. @@ -303,7 +332,7 @@ def select_users_txn(txn, now_ms, renew_at): "get_users_expiring_soon", select_users_txn, self._clock.time_msec(), - self.config.account_validity.renew_at, + self.config.account_validity_renew_at, ) async def set_renewal_mail_status(self, user_id: str, email_sent: bool) -> None: @@ -965,11 +994,11 @@ def set_expiration_date_for_user_txn(self, txn, user_id, use_delta=False): delta equal to 10% of the validity period. """ now_ms = self._clock.time_msec() - expiration_ts = now_ms + self._account_validity.period + expiration_ts = now_ms + self._account_validity_period if use_delta: expiration_ts = self.rand.randrange( - expiration_ts - self._account_validity.startup_job_max_delta, + expiration_ts - self._account_validity_startup_job_max_delta, expiration_ts, ) @@ -1413,7 +1442,7 @@ def _register_user( except self.database_engine.module.IntegrityError: raise StoreError(400, "User ID already taken.", errcode=Codes.USER_IN_USE) - if self._account_validity.enabled: + if self._account_validity_enabled: self.set_expiration_date_for_user_txn(txn, user_id) if create_profile_with_displayname: diff --git a/synapse/storage/databases/main/rejections.py b/synapse/storage/databases/main/rejections.py index 1e361aaa9a73..167318b314ae 100644 --- a/synapse/storage/databases/main/rejections.py +++ b/synapse/storage/databases/main/rejections.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 5cd61547f733..2bbf6d6a95ed 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 9cbcd53026d8..5f38634f48da 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # @@ -521,13 +520,11 @@ def _get_rooms_paginate_txn(txn): ) @cached(max_entries=10000) - async def get_ratelimit_for_user(self, user_id): - """Check if there are any overrides for ratelimiting for the given - user + async def get_ratelimit_for_user(self, user_id: str) -> Optional[RatelimitOverride]: + """Check if there are any overrides for ratelimiting for the given user Args: - user_id (str) - + user_id: user ID of the user Returns: RatelimitOverride if there is an override, else None. If the contents of RatelimitOverride are None or 0 then ratelimitng has been @@ -549,6 +546,62 @@ async def get_ratelimit_for_user(self, user_id): else: return None + async def set_ratelimit_for_user( + self, user_id: str, messages_per_second: int, burst_count: int + ) -> None: + """Sets whether a user is set an overridden ratelimit. + Args: + user_id: user ID of the user + messages_per_second: The number of actions that can be performed in a second. + burst_count: How many actions that can be performed before being limited. + """ + + def set_ratelimit_txn(txn): + self.db_pool.simple_upsert_txn( + txn, + table="ratelimit_override", + keyvalues={"user_id": user_id}, + values={ + "messages_per_second": messages_per_second, + "burst_count": burst_count, + }, + ) + + self._invalidate_cache_and_stream( + txn, self.get_ratelimit_for_user, (user_id,) + ) + + await self.db_pool.runInteraction("set_ratelimit", set_ratelimit_txn) + + async def delete_ratelimit_for_user(self, user_id: str) -> None: + """Delete an overridden ratelimit for a user. + Args: + user_id: user ID of the user + """ + + def delete_ratelimit_txn(txn): + row = self.db_pool.simple_select_one_txn( + txn, + table="ratelimit_override", + keyvalues={"user_id": user_id}, + retcols=["user_id"], + allow_none=True, + ) + + if not row: + return + + # They are there, delete them. + self.db_pool.simple_delete_one_txn( + txn, "ratelimit_override", keyvalues={"user_id": user_id} + ) + + self._invalidate_cache_and_stream( + txn, self.get_ratelimit_for_user, (user_id,) + ) + + await self.db_pool.runInteraction("delete_ratelimit", delete_ratelimit_txn) + @cached() async def get_retention_policy_for_room(self, room_id): """Get the retention policy for a given room. diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index a9216ca9ae52..5fc3bb5a7d7b 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # @@ -14,7 +13,20 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Dict, FrozenSet, Iterable, List, Optional, Set, Tuple +from typing import ( + TYPE_CHECKING, + Collection, + Dict, + FrozenSet, + Iterable, + List, + Optional, + Set, + Tuple, + Union, +) + +import attr from synapse.api.constants import EventTypes, Membership from synapse.events import EventBase @@ -34,7 +46,7 @@ ProfileInfo, RoomsForUser, ) -from synapse.types import Collection, PersistedEventPosition, get_domain_from_id +from synapse.types import PersistedEventPosition, StateMap, get_domain_from_id from synapse.util.async_helpers import Linearizer from synapse.util.caches import intern_string from synapse.util.caches.descriptors import _CacheContext, cached, cachedList @@ -54,6 +66,10 @@ class RoomMemberWorkerStore(EventsWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) + # Used by `_get_joined_hosts` to ensure only one thing mutates the cache + # at a time. Keyed by room_id. + self._joined_host_linearizer = Linearizer("_JoinedHostsCache") + # Is the current_state_events.membership up to date? Or is the # background update still running? self._current_state_events_membership_up_to_date = False @@ -174,6 +190,37 @@ def get_users_in_room_txn(self, txn, room_id: str) -> List[str]: txn.execute(sql, (room_id, Membership.JOIN)) return [r[0] for r in txn] + @cached(max_entries=100000, iterable=True) + async def get_users_in_room_with_profiles( + self, room_id: str + ) -> Dict[str, ProfileInfo]: + """Get a mapping from user ID to profile information for all users in a given room. + + Args: + room_id: The ID of the room to retrieve the users of. + + Returns: + A mapping from user ID to ProfileInfo. + """ + + def _get_users_in_room_with_profiles(txn) -> Dict[str, ProfileInfo]: + sql = """ + SELECT state_key, display_name, avatar_url FROM room_memberships as m + INNER JOIN current_state_events as c + ON m.event_id = c.event_id + AND m.room_id = c.room_id + AND m.user_id = c.state_key + WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ? + """ + txn.execute(sql, (room_id, Membership.JOIN)) + + return {r[0]: ProfileInfo(display_name=r[1], avatar_url=r[2]) for r in txn} + + return await self.db_pool.runInteraction( + "get_users_in_room_with_profiles", + _get_users_in_room_with_profiles, + ) + @cached(max_entries=100000) async def get_room_summary(self, room_id: str) -> Dict[str, MemberSummary]: """Get the details of a room roughly suitable for use by the room @@ -704,19 +751,82 @@ async def get_joined_hosts(self, room_id: str, state_entry): @cached(num_args=2, max_entries=10000, iterable=True) async def _get_joined_hosts( - self, room_id, state_group, current_state_ids, state_entry - ): - # We don't use `state_group`, its there so that we can cache based - # on it. However, its important that its never None, since two current_state's - # with a state_group of None are likely to be different. + self, + room_id: str, + state_group: int, + current_state_ids: StateMap[str], + state_entry: "_StateCacheEntry", + ) -> FrozenSet[str]: + # We don't use `state_group`, its there so that we can cache based on + # it. However, its important that its never None, since two + # current_state's with a state_group of None are likely to be different. + # + # The `state_group` must match the `state_entry.state_group` (if not None). assert state_group is not None - + assert state_entry.state_group is None or state_entry.state_group == state_group + + # We use a secondary cache of previous work to allow us to build up the + # joined hosts for the given state group based on previous state groups. + # + # We cache one object per room containing the results of the last state + # group we got joined hosts for. The idea is that generally + # `get_joined_hosts` is called with the "current" state group for the + # room, and so consecutive calls will be for consecutive state groups + # which point to the previous state group. cache = await self._get_joined_hosts_cache(room_id) - return await cache.get_destinations(state_entry) + + # If the state group in the cache matches, we already have the data we need. + if state_entry.state_group == cache.state_group: + return frozenset(cache.hosts_to_joined_users) + + # Since we'll mutate the cache we need to lock. + with (await self._joined_host_linearizer.queue(room_id)): + if state_entry.state_group == cache.state_group: + # Same state group, so nothing to do. We've already checked for + # this above, but the cache may have changed while waiting on + # the lock. + pass + elif state_entry.prev_group == cache.state_group: + # The cached work is for the previous state group, so we work out + # the delta. + for (typ, state_key), event_id in state_entry.delta_ids.items(): + if typ != EventTypes.Member: + continue + + host = intern_string(get_domain_from_id(state_key)) + user_id = state_key + known_joins = cache.hosts_to_joined_users.setdefault(host, set()) + + event = await self.get_event(event_id) + if event.membership == Membership.JOIN: + known_joins.add(user_id) + else: + known_joins.discard(user_id) + + if not known_joins: + cache.hosts_to_joined_users.pop(host, None) + else: + # The cache doesn't match the state group or prev state group, + # so we calculate the result from first principles. + joined_users = await self.get_joined_users_from_state( + room_id, state_entry + ) + + cache.hosts_to_joined_users = {} + for user_id in joined_users: + host = intern_string(get_domain_from_id(user_id)) + cache.hosts_to_joined_users.setdefault(host, set()).add(user_id) + + if state_entry.state_group: + cache.state_group = state_entry.state_group + else: + cache.state_group = object() + + return frozenset(cache.hosts_to_joined_users) @cached(max_entries=10000) def _get_joined_hosts_cache(self, room_id: str) -> "_JoinedHostsCache": - return _JoinedHostsCache(self, room_id) + return _JoinedHostsCache() @cached(num_args=2) async def did_forget(self, user_id: str, room_id: str) -> bool: @@ -1026,71 +1136,18 @@ def f(txn): await self.db_pool.runInteraction("forget_membership", f) +@attr.s(slots=True) class _JoinedHostsCache: - """Cache for joined hosts in a room that is optimised to handle updates - via state deltas. - """ - - def __init__(self, store, room_id): - self.store = store - self.room_id = room_id - - self.hosts_to_joined_users = {} + """The cached data used by the `_get_joined_hosts_cache`.""" - self.state_group = object() + # Dict of host to the set of their users in the room at the state group. + hosts_to_joined_users = attr.ib(type=Dict[str, Set[str]], factory=dict) - self.linearizer = Linearizer("_JoinedHostsCache") - - self._len = 0 - - async def get_destinations(self, state_entry: "_StateCacheEntry") -> Set[str]: - """Get set of destinations for a state entry - - Args: - state_entry - - Returns: - The destinations as a set. - """ - if state_entry.state_group == self.state_group: - return frozenset(self.hosts_to_joined_users) - - with (await self.linearizer.queue(())): - if state_entry.state_group == self.state_group: - pass - elif state_entry.prev_group == self.state_group: - for (typ, state_key), event_id in state_entry.delta_ids.items(): - if typ != EventTypes.Member: - continue - - host = intern_string(get_domain_from_id(state_key)) - user_id = state_key - known_joins = self.hosts_to_joined_users.setdefault(host, set()) - - event = await self.store.get_event(event_id) - if event.membership == Membership.JOIN: - known_joins.add(user_id) - else: - known_joins.discard(user_id) - - if not known_joins: - self.hosts_to_joined_users.pop(host, None) - else: - joined_users = await self.store.get_joined_users_from_state( - self.room_id, state_entry - ) - - self.hosts_to_joined_users = {} - for user_id in joined_users: - host = intern_string(get_domain_from_id(user_id)) - self.hosts_to_joined_users.setdefault(host, set()).add(user_id) - - if state_entry.state_group: - self.state_group = state_entry.state_group - else: - self.state_group = object() - self._len = sum(len(v) for v in self.hosts_to_joined_users.values()) - return frozenset(self.hosts_to_joined_users) + # The state group `hosts_to_joined_users` is derived from. Will be an object + # if the instance is newly created or if the state is not based on a state + # group. (An object is used as a sentinel value to ensure that it never is + # equal to anything else). + state_group = attr.ib(type=Union[object, int], factory=object) def __len__(self): - return self._len + return sum(len(v) for v in self.hosts_to_joined_users.values()) diff --git a/synapse/storage/databases/main/schema/full_schemas/README.md b/synapse/storage/databases/main/schema/full_schemas/README.md deleted file mode 100644 index c00f28719091..000000000000 --- a/synapse/storage/databases/main/schema/full_schemas/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# Synapse Database Schemas - -These schemas are used as a basis to create brand new Synapse databases, on both -SQLite3 and Postgres. - -## Building full schema dumps - -If you want to recreate these schemas, they need to be made from a database that -has had all background updates run. - -To do so, use `scripts-dev/make_full_schema.sh`. This will produce new -`full.sql.postgres ` and `full.sql.sqlite` files. - -Ensure postgres is installed and your user has the ability to run bash commands -such as `createdb`, then call - - ./scripts-dev/make_full_schema.sh -p postgres_username -o output_dir/ - -There are currently two folders with full-schema snapshots. `16` is a snapshot -from 2015, for historical reference. The other contains the most recent full -schema snapshot. diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index f5e7d9ef98fc..6480d5a9f5eb 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,7 +15,7 @@ import logging import re from collections import namedtuple -from typing import List, Optional, Set +from typing import Collection, List, Optional, Set from synapse.api.errors import SynapseError from synapse.events import EventBase @@ -24,7 +23,6 @@ from synapse.storage.database import DatabasePool from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.engines import PostgresEngine, Sqlite3Engine -from synapse.types import Collection logger = logging.getLogger(__name__) diff --git a/synapse/storage/databases/main/signatures.py b/synapse/storage/databases/main/signatures.py index c8c67953e47b..ab2159c2d378 100644 --- a/synapse/storage/databases/main/signatures.py +++ b/synapse/storage/databases/main/signatures.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index a7f371732fd7..1757064a686f 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # @@ -190,7 +189,7 @@ def _get_current_state_ids_txn(txn): # FIXME: how should this be cached? async def get_filtered_current_state_ids( - self, room_id: str, state_filter: StateFilter = StateFilter.all() + self, room_id: str, state_filter: Optional[StateFilter] = None ) -> StateMap[str]: """Get the current state event of a given type for a room based on the current_state_events table. This may not be as up-to-date as the result @@ -205,7 +204,9 @@ async def get_filtered_current_state_ids( Map from type/state_key to event ID. """ - where_clause, where_args = state_filter.make_sql_filter_clause() + where_clause, where_args = ( + state_filter or StateFilter.all() + ).make_sql_filter_clause() if not where_clause: # We delegate to the cached version diff --git a/synapse/storage/databases/main/state_deltas.py b/synapse/storage/databases/main/state_deltas.py index 0dbb501f16dc..bff7d0404f1f 100644 --- a/synapse/storage/databases/main/state_deltas.py +++ b/synapse/storage/databases/main/state_deltas.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index 1c99393c657c..ae9f88096593 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018, 2019 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # @@ -66,18 +65,37 @@ class UserSortOrder(Enum): """ Enum to define the sorting method used when returning users - with get_users_media_usage_paginate + with get_users_paginate in __init__.py + and get_users_media_usage_paginate in stats.py - MEDIA_LENGTH = ordered by size of uploaded media. Smallest to largest. - MEDIA_COUNT = ordered by number of uploaded media. Smallest to largest. + When moves this to __init__.py gets `builtins.ImportError` with + `most likely due to a circular import` + + MEDIA_LENGTH = ordered by size of uploaded media. + MEDIA_COUNT = ordered by number of uploaded media. USER_ID = ordered alphabetically by `user_id`. + NAME = ordered alphabetically by `user_id`. This is for compatibility reasons, + as the user_id is returned in the name field in the response in list users admin API. DISPLAYNAME = ordered alphabetically by `displayname` + GUEST = ordered by `is_guest` + ADMIN = ordered by `admin` + DEACTIVATED = ordered by `deactivated` + USER_TYPE = ordered alphabetically by `user_type` + AVATAR_URL = ordered alphabetically by `avatar_url` + SHADOW_BANNED = ordered by `shadow_banned` """ MEDIA_LENGTH = "media_length" MEDIA_COUNT = "media_count" USER_ID = "user_id" + NAME = "name" DISPLAYNAME = "displayname" + GUEST = "is_guest" + ADMIN = "admin" + DEACTIVATED = "deactivated" + USER_TYPE = "user_type" + AVATAR_URL = "avatar_url" + SHADOW_BANNED = "shadow_banned" class StatsStore(StateDeltasStore): diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 91f8abb67d59..7581c7d3ff92 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2018-2019 New Vector Ltd @@ -38,7 +37,7 @@ import abc import logging from collections import namedtuple -from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Set, Tuple from twisted.internet import defer @@ -54,7 +53,7 @@ from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine from synapse.storage.util.id_generators import MultiWriterIdGenerator -from synapse.types import Collection, PersistedEventPosition, RoomStreamToken +from synapse.types import PersistedEventPosition, RoomStreamToken from synapse.util.caches.descriptors import cached from synapse.util.caches.stream_change_cache import StreamChangeCache diff --git a/synapse/storage/databases/main/tags.py b/synapse/storage/databases/main/tags.py index 50067eabfc5c..1d62c6140f00 100644 --- a/synapse/storage/databases/main/tags.py +++ b/synapse/storage/databases/main/tags.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index b7072f1f5ef2..82335e7a9dad 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/ui_auth.py b/synapse/storage/databases/main/ui_auth.py index 5473ec1485d0..22c05cdde7df 100644 --- a/synapse/storage/databases/main/ui_auth.py +++ b/synapse/storage/databases/main/ui_auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 1026f321e51e..a6bfb4902a1c 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -143,8 +142,6 @@ async def _populate_user_directory_process_rooms(self, progress, batch_size): batch_size (int): Maximum number of state events to process per cycle. """ - state = self.hs.get_state_handler() - # If we don't have progress filed, delete everything. if not progress: await self.delete_all_from_user_dir() @@ -198,7 +195,7 @@ def _get_next_batch(txn): room_id ) - users_with_profile = await state.get_current_users_in_room(room_id) + users_with_profile = await self.get_users_in_room_with_profiles(room_id) user_ids = set(users_with_profile) # Update each user in the user directory. diff --git a/synapse/storage/databases/main/user_erasure_store.py b/synapse/storage/databases/main/user_erasure_store.py index f9575b1f1fd8..acf6b2fb643d 100644 --- a/synapse/storage/databases/main/user_erasure_store.py +++ b/synapse/storage/databases/main/user_erasure_store.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/state/__init__.py b/synapse/storage/databases/state/__init__.py index c90d0228993c..e5100d610818 100644 --- a/synapse/storage/databases/state/__init__.py +++ b/synapse/storage/databases/state/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index 1fd333b707e1..c2891cb07f11 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,6 +13,7 @@ # limitations under the License. import logging +from typing import Optional from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool @@ -73,8 +73,10 @@ def _count_state_group_hops_txn(self, txn, state_group): return count def _get_state_groups_from_groups_txn( - self, txn, groups, state_filter=StateFilter.all() + self, txn, groups, state_filter: Optional[StateFilter] = None ): + state_filter = state_filter or StateFilter.all() + results = {group: {} for group in groups} where_clause, where_args = state_filter.make_sql_filter_clause() diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index 97ec65f757e3..e38461adbc72 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +14,7 @@ import logging from collections import namedtuple -from typing import Dict, Iterable, List, Set, Tuple +from typing import Dict, Iterable, List, Optional, Set, Tuple from synapse.api.constants import EventTypes from synapse.storage._base import SQLBaseStore @@ -210,7 +209,7 @@ def _get_state_for_group_using_cache(self, cache, group, state_filter): return state_filter.filter_state(state_dict_ids), not missing_types async def _get_state_for_groups( - self, groups: Iterable[int], state_filter: StateFilter = StateFilter.all() + self, groups: Iterable[int], state_filter: Optional[StateFilter] = None ) -> Dict[int, MutableStateMap[str]]: """Gets the state at each of a list of state groups, optionally filtering by type/state_key @@ -223,6 +222,7 @@ async def _get_state_for_groups( Returns: Dict of state group to state map. """ + state_filter = state_filter or StateFilter.all() member_filter, non_member_filter = state_filter.get_member_split() diff --git a/synapse/storage/engines/__init__.py b/synapse/storage/engines/__init__.py index d15ccfacdeb7..9abc02046ee9 100644 --- a/synapse/storage/engines/__init__.py +++ b/synapse/storage/engines/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py index cca839c70f59..1882bfd9cf8d 100644 --- a/synapse/storage/engines/_base.py +++ b/synapse/storage/engines/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -42,14 +41,6 @@ def can_native_upsert(self) -> bool: """ ... - @property - @abc.abstractmethod - def supports_tuple_comparison(self) -> bool: - """ - Do we support comparing tuples, i.e. `(a, b) > (c, d)`? - """ - ... - @property @abc.abstractmethod def supports_using_any_list(self) -> bool: diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index 80a3558aec3e..21411c5fea5c 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -47,8 +46,8 @@ def check_database(self, db_conn, allow_outdated_version: bool = False): self._version = db_conn.server_version # Are we on a supported PostgreSQL version? - if not allow_outdated_version and self._version < 90500: - raise RuntimeError("Synapse requires PostgreSQL 9.5+ or above.") + if not allow_outdated_version and self._version < 90600: + raise RuntimeError("Synapse requires PostgreSQL 9.6 or above.") with db_conn.cursor() as txn: txn.execute("SHOW SERVER_ENCODING") @@ -129,13 +128,6 @@ def can_native_upsert(self): """ return True - @property - def supports_tuple_comparison(self): - """ - Do we support comparing tuples, i.e. `(a, b) > (c, d)`? - """ - return True - @property def supports_using_any_list(self): """Do we support using `a = ANY(?)` and passing a list""" diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index b87e7798daab..5fe1b205e140 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -56,14 +55,6 @@ def can_native_upsert(self): """ return self.module.sqlite_version_info >= (3, 24, 0) - @property - def supports_tuple_comparison(self): - """ - Do we support comparing tuples, i.e. `(a, b) > (c, d)`? This requires - SQLite 3.15+. - """ - return self.module.sqlite_version_info >= (3, 15, 0) - @property def supports_using_any_list(self): """Do we support using `a = ANY(?)` and passing a list""" @@ -72,8 +63,11 @@ def supports_using_any_list(self): def check_database(self, db_conn, allow_outdated_version: bool = False): if not allow_outdated_version: version = self.module.sqlite_version_info - if version < (3, 11, 0): - raise RuntimeError("Synapse requires sqlite 3.11 or above.") + # Synapse is untested against older SQLite versions, and we don't want + # to let users upgrade to a version of Synapse with broken support for their + # sqlite version, because it risks leaving them with a half-upgraded db. + if version < (3, 22, 0): + raise RuntimeError("Synapse requires sqlite 3.22 or above.") def check_new_database(self, txn): """Gets called when setting up a brand new database. This allows us to diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index c03871f3933e..540adb878174 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd. # diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index 3a0d6fb32e84..33dc752d8fd0 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018-2019 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. @@ -18,7 +17,7 @@ import itertools import logging from collections import deque, namedtuple -from typing import Dict, Iterable, List, Optional, Set, Tuple +from typing import Collection, Dict, Iterable, List, Optional, Set, Tuple from prometheus_client import Counter, Histogram @@ -33,7 +32,6 @@ from synapse.storage.databases.main.events import DeltaState from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.types import ( - Collection, PersistedEventPosition, RoomStreamToken, StateMap, diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 6c3c2da5201f..3799d46734ae 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # @@ -13,12 +12,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import imp +import importlib.util import logging import os import re from collections import Counter -from typing import Generator, Iterable, List, Optional, TextIO, Tuple +from typing import Collection, Generator, Iterable, List, Optional, TextIO, Tuple import attr from typing_extensions import Counter as CounterType @@ -27,17 +26,13 @@ from synapse.storage.database import LoggingDatabaseConnection from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.engines.postgres import PostgresEngine +from synapse.storage.schema import SCHEMA_VERSION from synapse.storage.types import Cursor -from synapse.types import Collection logger = logging.getLogger(__name__) -# Remember to update this number every time a change is made to database -# schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 59 - -dir_path = os.path.abspath(os.path.dirname(__file__)) +schema_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "schema") class PrepareDatabaseException(Exception): @@ -169,7 +164,14 @@ def _setup_new_database( Example directory structure: - schema/ + schema/ + common/ + delta/ + ... + full_schemas/ + 11/ + foo.sql + main/ delta/ ... full_schemas/ @@ -177,15 +179,14 @@ def _setup_new_database( test.sql ... 11/ - foo.sql bar.sql ... In the example foo.sql and bar.sql would be run, and then any delta files for versions strictly greater than 11. - Note: we apply the full schemas and deltas from the top level `schema/` - folder as well those in the data stores specified. + Note: we apply the full schemas and deltas from the `schema/common` + folder as well those in the databases specified. Args: cur: a database cursor @@ -197,12 +198,12 @@ def _setup_new_database( # configured to our liking. database_engine.check_new_database(cur) - current_dir = os.path.join(dir_path, "schema", "full_schemas") + full_schemas_dir = os.path.join(schema_path, "common", "full_schemas") # First we find the highest full schema version we have valid_versions = [] - for filename in os.listdir(current_dir): + for filename in os.listdir(full_schemas_dir): try: ver = int(filename) except ValueError: @@ -220,15 +221,13 @@ def _setup_new_database( logger.debug("Initialising schema v%d", max_current_ver) - # Now lets find all the full schema files, both in the global schema and - # in data store schemas. - directories = [os.path.join(current_dir, str(max_current_ver))] + # Now let's find all the full schema files, both in the common schema and + # in database schemas. + directories = [os.path.join(full_schemas_dir, str(max_current_ver))] directories.extend( os.path.join( - dir_path, - "databases", + schema_path, database, - "schema", "full_schemas", str(max_current_ver), ) @@ -359,6 +358,9 @@ def _upgrade_existing_database( check_database_before_upgrade(cur, database_engine, config) start_ver = current_version + + # if we got to this schema version by running a full_schema rather than a series + # of deltas, we should not run the deltas for this version. if not upgraded: start_ver += 1 @@ -387,12 +389,10 @@ def _upgrade_existing_database( # directories for schema updates. # First we find the directories to search in - delta_dir = os.path.join(dir_path, "schema", "delta", str(v)) + delta_dir = os.path.join(schema_path, "common", "delta", str(v)) directories = [delta_dir] for database in databases: - directories.append( - os.path.join(dir_path, "databases", database, "schema", "delta", str(v)) - ) + directories.append(os.path.join(schema_path, database, "delta", str(v))) # Used to check if we have any duplicate file names file_name_counter = Counter() # type: CounterType[str] @@ -454,8 +454,13 @@ def _upgrade_existing_database( ) module_name = "synapse.storage.v%d_%s" % (v, root_name) - with open(absolute_path) as python_file: - module = imp.load_source(module_name, absolute_path, python_file) # type: ignore + + spec = importlib.util.spec_from_file_location( + module_name, absolute_path + ) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) # type: ignore + logger.info("Running script %s", relative_path) module.run_create(cur, database_engine) # type: ignore if not is_empty: @@ -618,8 +623,8 @@ def _get_or_create_schema_state( txn: Cursor, database_engine: BaseDatabaseEngine ) -> Optional[Tuple[int, List[str], bool]]: # Bluntly try creating the schema_version tables. - schema_path = os.path.join(dir_path, "schema", "schema_version.sql") - executescript(txn, schema_path) + sql_path = os.path.join(schema_path, "common", "schema_version.sql") + executescript(txn, sql_path) txn.execute("SELECT version, upgraded FROM schema_version") row = txn.fetchone() diff --git a/synapse/storage/purge_events.py b/synapse/storage/purge_events.py index ad954990a752..30669beb7c6a 100644 --- a/synapse/storage/purge_events.py +++ b/synapse/storage/purge_events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index f47cec0d8647..2d5c21ef72c0 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index 2564f34b4713..c552dbf04c8e 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index d2ff4da6b9fe..c34fbf21bc42 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/storage/schema/README.md b/synapse/storage/schema/README.md new file mode 100644 index 000000000000..030153db64a8 --- /dev/null +++ b/synapse/storage/schema/README.md @@ -0,0 +1,37 @@ +# Synapse Database Schemas + +This directory contains the schema files used to build Synapse databases. + +Synapse supports splitting its datastore across multiple physical databases (which can +be useful for large installations), and the schema files are therefore split according +to the logical database they are apply to. + +At the time of writing, the following "logical" databases are supported: + +* `state` - used to store Matrix room state (more specifically, `state_groups`, + their relationships and contents.) +* `main` - stores everything else. + +Addionally, the `common` directory contains schema files for tables which must be +present on *all* physical databases. + +## Full schema dumps + +In the `full_schemas` directories, only the most recently-numbered snapshot is useful +(`54` at the time of writing). Older snapshots (eg, `16`) are present for historical +reference only. + +## Building full schema dumps + +If you want to recreate these schemas, they need to be made from a database that +has had all background updates run. + +To do so, use `scripts-dev/make_full_schema.sh`. This will produce new +`full.sql.postgres` and `full.sql.sqlite` files. + +Ensure postgres is installed, then run: + + ./scripts-dev/make_full_schema.sh -p postgres_username -o output_dir/ + +NB at the time of writing, this script predates the split into separate `state`/`main` +databases so will require updates to handle that correctly. diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py new file mode 100644 index 000000000000..f0d9f2316762 --- /dev/null +++ b/synapse/storage/schema/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Remember to update this number every time a change is made to database +# schema files, so the users will be informed on server restarts. +SCHEMA_VERSION = 59 diff --git a/synapse/storage/schema/delta/25/00background_updates.sql b/synapse/storage/schema/common/delta/25/00background_updates.sql similarity index 100% rename from synapse/storage/schema/delta/25/00background_updates.sql rename to synapse/storage/schema/common/delta/25/00background_updates.sql diff --git a/synapse/storage/schema/delta/35/00background_updates_add_col.sql b/synapse/storage/schema/common/delta/35/00background_updates_add_col.sql similarity index 100% rename from synapse/storage/schema/delta/35/00background_updates_add_col.sql rename to synapse/storage/schema/common/delta/35/00background_updates_add_col.sql diff --git a/synapse/storage/schema/delta/58/00background_update_ordering.sql b/synapse/storage/schema/common/delta/58/00background_update_ordering.sql similarity index 100% rename from synapse/storage/schema/delta/58/00background_update_ordering.sql rename to synapse/storage/schema/common/delta/58/00background_update_ordering.sql diff --git a/synapse/storage/schema/full_schemas/54/full.sql b/synapse/storage/schema/common/full_schemas/54/full.sql similarity index 100% rename from synapse/storage/schema/full_schemas/54/full.sql rename to synapse/storage/schema/common/full_schemas/54/full.sql diff --git a/synapse/storage/schema/schema_version.sql b/synapse/storage/schema/common/schema_version.sql similarity index 100% rename from synapse/storage/schema/schema_version.sql rename to synapse/storage/schema/common/schema_version.sql diff --git a/synapse/storage/databases/main/schema/delta/12/v12.sql b/synapse/storage/schema/main/delta/12/v12.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/12/v12.sql rename to synapse/storage/schema/main/delta/12/v12.sql diff --git a/synapse/storage/databases/main/schema/delta/13/v13.sql b/synapse/storage/schema/main/delta/13/v13.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/13/v13.sql rename to synapse/storage/schema/main/delta/13/v13.sql diff --git a/synapse/storage/databases/main/schema/delta/14/v14.sql b/synapse/storage/schema/main/delta/14/v14.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/14/v14.sql rename to synapse/storage/schema/main/delta/14/v14.sql diff --git a/synapse/storage/databases/main/schema/delta/15/appservice_txns.sql b/synapse/storage/schema/main/delta/15/appservice_txns.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/15/appservice_txns.sql rename to synapse/storage/schema/main/delta/15/appservice_txns.sql diff --git a/synapse/storage/databases/main/schema/delta/15/presence_indices.sql b/synapse/storage/schema/main/delta/15/presence_indices.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/15/presence_indices.sql rename to synapse/storage/schema/main/delta/15/presence_indices.sql diff --git a/synapse/storage/databases/main/schema/delta/15/v15.sql b/synapse/storage/schema/main/delta/15/v15.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/15/v15.sql rename to synapse/storage/schema/main/delta/15/v15.sql diff --git a/synapse/storage/databases/main/schema/delta/16/events_order_index.sql b/synapse/storage/schema/main/delta/16/events_order_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/16/events_order_index.sql rename to synapse/storage/schema/main/delta/16/events_order_index.sql diff --git a/synapse/storage/databases/main/schema/delta/16/remote_media_cache_index.sql b/synapse/storage/schema/main/delta/16/remote_media_cache_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/16/remote_media_cache_index.sql rename to synapse/storage/schema/main/delta/16/remote_media_cache_index.sql diff --git a/synapse/storage/databases/main/schema/delta/16/remove_duplicates.sql b/synapse/storage/schema/main/delta/16/remove_duplicates.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/16/remove_duplicates.sql rename to synapse/storage/schema/main/delta/16/remove_duplicates.sql diff --git a/synapse/storage/databases/main/schema/delta/16/room_alias_index.sql b/synapse/storage/schema/main/delta/16/room_alias_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/16/room_alias_index.sql rename to synapse/storage/schema/main/delta/16/room_alias_index.sql diff --git a/synapse/storage/databases/main/schema/delta/16/unique_constraints.sql b/synapse/storage/schema/main/delta/16/unique_constraints.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/16/unique_constraints.sql rename to synapse/storage/schema/main/delta/16/unique_constraints.sql diff --git a/synapse/storage/databases/main/schema/delta/16/users.sql b/synapse/storage/schema/main/delta/16/users.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/16/users.sql rename to synapse/storage/schema/main/delta/16/users.sql diff --git a/synapse/storage/databases/main/schema/delta/17/drop_indexes.sql b/synapse/storage/schema/main/delta/17/drop_indexes.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/17/drop_indexes.sql rename to synapse/storage/schema/main/delta/17/drop_indexes.sql diff --git a/synapse/storage/databases/main/schema/delta/17/server_keys.sql b/synapse/storage/schema/main/delta/17/server_keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/17/server_keys.sql rename to synapse/storage/schema/main/delta/17/server_keys.sql diff --git a/synapse/storage/databases/main/schema/delta/17/user_threepids.sql b/synapse/storage/schema/main/delta/17/user_threepids.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/17/user_threepids.sql rename to synapse/storage/schema/main/delta/17/user_threepids.sql diff --git a/synapse/storage/databases/main/schema/delta/18/server_keys_bigger_ints.sql b/synapse/storage/schema/main/delta/18/server_keys_bigger_ints.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/18/server_keys_bigger_ints.sql rename to synapse/storage/schema/main/delta/18/server_keys_bigger_ints.sql diff --git a/synapse/storage/databases/main/schema/delta/19/event_index.sql b/synapse/storage/schema/main/delta/19/event_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/19/event_index.sql rename to synapse/storage/schema/main/delta/19/event_index.sql diff --git a/synapse/storage/databases/main/schema/delta/20/dummy.sql b/synapse/storage/schema/main/delta/20/dummy.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/20/dummy.sql rename to synapse/storage/schema/main/delta/20/dummy.sql diff --git a/synapse/storage/databases/main/schema/delta/20/pushers.py b/synapse/storage/schema/main/delta/20/pushers.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/20/pushers.py rename to synapse/storage/schema/main/delta/20/pushers.py diff --git a/synapse/storage/databases/main/schema/delta/21/end_to_end_keys.sql b/synapse/storage/schema/main/delta/21/end_to_end_keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/21/end_to_end_keys.sql rename to synapse/storage/schema/main/delta/21/end_to_end_keys.sql diff --git a/synapse/storage/databases/main/schema/delta/21/receipts.sql b/synapse/storage/schema/main/delta/21/receipts.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/21/receipts.sql rename to synapse/storage/schema/main/delta/21/receipts.sql diff --git a/synapse/storage/databases/main/schema/delta/22/receipts_index.sql b/synapse/storage/schema/main/delta/22/receipts_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/22/receipts_index.sql rename to synapse/storage/schema/main/delta/22/receipts_index.sql diff --git a/synapse/storage/databases/main/schema/delta/22/user_threepids_unique.sql b/synapse/storage/schema/main/delta/22/user_threepids_unique.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/22/user_threepids_unique.sql rename to synapse/storage/schema/main/delta/22/user_threepids_unique.sql diff --git a/synapse/storage/databases/main/schema/delta/24/stats_reporting.sql b/synapse/storage/schema/main/delta/24/stats_reporting.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/24/stats_reporting.sql rename to synapse/storage/schema/main/delta/24/stats_reporting.sql diff --git a/synapse/storage/databases/main/schema/delta/25/fts.py b/synapse/storage/schema/main/delta/25/fts.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/25/fts.py rename to synapse/storage/schema/main/delta/25/fts.py diff --git a/synapse/storage/databases/main/schema/delta/25/guest_access.sql b/synapse/storage/schema/main/delta/25/guest_access.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/25/guest_access.sql rename to synapse/storage/schema/main/delta/25/guest_access.sql diff --git a/synapse/storage/databases/main/schema/delta/25/history_visibility.sql b/synapse/storage/schema/main/delta/25/history_visibility.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/25/history_visibility.sql rename to synapse/storage/schema/main/delta/25/history_visibility.sql diff --git a/synapse/storage/databases/main/schema/delta/25/tags.sql b/synapse/storage/schema/main/delta/25/tags.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/25/tags.sql rename to synapse/storage/schema/main/delta/25/tags.sql diff --git a/synapse/storage/databases/main/schema/delta/26/account_data.sql b/synapse/storage/schema/main/delta/26/account_data.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/26/account_data.sql rename to synapse/storage/schema/main/delta/26/account_data.sql diff --git a/synapse/storage/databases/main/schema/delta/27/account_data.sql b/synapse/storage/schema/main/delta/27/account_data.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/27/account_data.sql rename to synapse/storage/schema/main/delta/27/account_data.sql diff --git a/synapse/storage/databases/main/schema/delta/27/forgotten_memberships.sql b/synapse/storage/schema/main/delta/27/forgotten_memberships.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/27/forgotten_memberships.sql rename to synapse/storage/schema/main/delta/27/forgotten_memberships.sql diff --git a/synapse/storage/databases/main/schema/delta/27/ts.py b/synapse/storage/schema/main/delta/27/ts.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/27/ts.py rename to synapse/storage/schema/main/delta/27/ts.py diff --git a/synapse/storage/databases/main/schema/delta/28/event_push_actions.sql b/synapse/storage/schema/main/delta/28/event_push_actions.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/28/event_push_actions.sql rename to synapse/storage/schema/main/delta/28/event_push_actions.sql diff --git a/synapse/storage/databases/main/schema/delta/28/events_room_stream.sql b/synapse/storage/schema/main/delta/28/events_room_stream.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/28/events_room_stream.sql rename to synapse/storage/schema/main/delta/28/events_room_stream.sql diff --git a/synapse/storage/databases/main/schema/delta/28/public_roms_index.sql b/synapse/storage/schema/main/delta/28/public_roms_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/28/public_roms_index.sql rename to synapse/storage/schema/main/delta/28/public_roms_index.sql diff --git a/synapse/storage/databases/main/schema/delta/28/receipts_user_id_index.sql b/synapse/storage/schema/main/delta/28/receipts_user_id_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/28/receipts_user_id_index.sql rename to synapse/storage/schema/main/delta/28/receipts_user_id_index.sql diff --git a/synapse/storage/databases/main/schema/delta/28/upgrade_times.sql b/synapse/storage/schema/main/delta/28/upgrade_times.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/28/upgrade_times.sql rename to synapse/storage/schema/main/delta/28/upgrade_times.sql diff --git a/synapse/storage/databases/main/schema/delta/28/users_is_guest.sql b/synapse/storage/schema/main/delta/28/users_is_guest.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/28/users_is_guest.sql rename to synapse/storage/schema/main/delta/28/users_is_guest.sql diff --git a/synapse/storage/databases/main/schema/delta/29/push_actions.sql b/synapse/storage/schema/main/delta/29/push_actions.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/29/push_actions.sql rename to synapse/storage/schema/main/delta/29/push_actions.sql diff --git a/synapse/storage/databases/main/schema/delta/30/alias_creator.sql b/synapse/storage/schema/main/delta/30/alias_creator.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/30/alias_creator.sql rename to synapse/storage/schema/main/delta/30/alias_creator.sql diff --git a/synapse/storage/databases/main/schema/delta/30/as_users.py b/synapse/storage/schema/main/delta/30/as_users.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/30/as_users.py rename to synapse/storage/schema/main/delta/30/as_users.py diff --git a/synapse/storage/databases/main/schema/delta/30/deleted_pushers.sql b/synapse/storage/schema/main/delta/30/deleted_pushers.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/30/deleted_pushers.sql rename to synapse/storage/schema/main/delta/30/deleted_pushers.sql diff --git a/synapse/storage/databases/main/schema/delta/30/presence_stream.sql b/synapse/storage/schema/main/delta/30/presence_stream.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/30/presence_stream.sql rename to synapse/storage/schema/main/delta/30/presence_stream.sql diff --git a/synapse/storage/databases/main/schema/delta/30/public_rooms.sql b/synapse/storage/schema/main/delta/30/public_rooms.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/30/public_rooms.sql rename to synapse/storage/schema/main/delta/30/public_rooms.sql diff --git a/synapse/storage/databases/main/schema/delta/30/push_rule_stream.sql b/synapse/storage/schema/main/delta/30/push_rule_stream.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/30/push_rule_stream.sql rename to synapse/storage/schema/main/delta/30/push_rule_stream.sql diff --git a/synapse/storage/databases/main/schema/delta/30/threepid_guest_access_tokens.sql b/synapse/storage/schema/main/delta/30/threepid_guest_access_tokens.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/30/threepid_guest_access_tokens.sql rename to synapse/storage/schema/main/delta/30/threepid_guest_access_tokens.sql diff --git a/synapse/storage/databases/main/schema/delta/31/invites.sql b/synapse/storage/schema/main/delta/31/invites.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/31/invites.sql rename to synapse/storage/schema/main/delta/31/invites.sql diff --git a/synapse/storage/databases/main/schema/delta/31/local_media_repository_url_cache.sql b/synapse/storage/schema/main/delta/31/local_media_repository_url_cache.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/31/local_media_repository_url_cache.sql rename to synapse/storage/schema/main/delta/31/local_media_repository_url_cache.sql diff --git a/synapse/storage/databases/main/schema/delta/31/pushers.py b/synapse/storage/schema/main/delta/31/pushers.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/31/pushers.py rename to synapse/storage/schema/main/delta/31/pushers.py diff --git a/synapse/storage/databases/main/schema/delta/31/pushers_index.sql b/synapse/storage/schema/main/delta/31/pushers_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/31/pushers_index.sql rename to synapse/storage/schema/main/delta/31/pushers_index.sql diff --git a/synapse/storage/databases/main/schema/delta/31/search_update.py b/synapse/storage/schema/main/delta/31/search_update.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/31/search_update.py rename to synapse/storage/schema/main/delta/31/search_update.py diff --git a/synapse/storage/databases/main/schema/delta/32/events.sql b/synapse/storage/schema/main/delta/32/events.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/32/events.sql rename to synapse/storage/schema/main/delta/32/events.sql diff --git a/synapse/storage/databases/main/schema/delta/32/openid.sql b/synapse/storage/schema/main/delta/32/openid.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/32/openid.sql rename to synapse/storage/schema/main/delta/32/openid.sql diff --git a/synapse/storage/databases/main/schema/delta/32/pusher_throttle.sql b/synapse/storage/schema/main/delta/32/pusher_throttle.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/32/pusher_throttle.sql rename to synapse/storage/schema/main/delta/32/pusher_throttle.sql diff --git a/synapse/storage/databases/main/schema/delta/32/remove_indices.sql b/synapse/storage/schema/main/delta/32/remove_indices.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/32/remove_indices.sql rename to synapse/storage/schema/main/delta/32/remove_indices.sql diff --git a/synapse/storage/databases/main/schema/delta/32/reports.sql b/synapse/storage/schema/main/delta/32/reports.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/32/reports.sql rename to synapse/storage/schema/main/delta/32/reports.sql diff --git a/synapse/storage/databases/main/schema/delta/33/access_tokens_device_index.sql b/synapse/storage/schema/main/delta/33/access_tokens_device_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/33/access_tokens_device_index.sql rename to synapse/storage/schema/main/delta/33/access_tokens_device_index.sql diff --git a/synapse/storage/databases/main/schema/delta/33/devices.sql b/synapse/storage/schema/main/delta/33/devices.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/33/devices.sql rename to synapse/storage/schema/main/delta/33/devices.sql diff --git a/synapse/storage/databases/main/schema/delta/33/devices_for_e2e_keys.sql b/synapse/storage/schema/main/delta/33/devices_for_e2e_keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/33/devices_for_e2e_keys.sql rename to synapse/storage/schema/main/delta/33/devices_for_e2e_keys.sql diff --git a/synapse/storage/databases/main/schema/delta/33/devices_for_e2e_keys_clear_unknown_device.sql b/synapse/storage/schema/main/delta/33/devices_for_e2e_keys_clear_unknown_device.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/33/devices_for_e2e_keys_clear_unknown_device.sql rename to synapse/storage/schema/main/delta/33/devices_for_e2e_keys_clear_unknown_device.sql diff --git a/synapse/storage/databases/main/schema/delta/33/event_fields.py b/synapse/storage/schema/main/delta/33/event_fields.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/33/event_fields.py rename to synapse/storage/schema/main/delta/33/event_fields.py diff --git a/synapse/storage/databases/main/schema/delta/33/remote_media_ts.py b/synapse/storage/schema/main/delta/33/remote_media_ts.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/33/remote_media_ts.py rename to synapse/storage/schema/main/delta/33/remote_media_ts.py diff --git a/synapse/storage/databases/main/schema/delta/33/user_ips_index.sql b/synapse/storage/schema/main/delta/33/user_ips_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/33/user_ips_index.sql rename to synapse/storage/schema/main/delta/33/user_ips_index.sql diff --git a/synapse/storage/databases/main/schema/delta/34/appservice_stream.sql b/synapse/storage/schema/main/delta/34/appservice_stream.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/34/appservice_stream.sql rename to synapse/storage/schema/main/delta/34/appservice_stream.sql diff --git a/synapse/storage/databases/main/schema/delta/34/cache_stream.py b/synapse/storage/schema/main/delta/34/cache_stream.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/34/cache_stream.py rename to synapse/storage/schema/main/delta/34/cache_stream.py diff --git a/synapse/storage/databases/main/schema/delta/34/device_inbox.sql b/synapse/storage/schema/main/delta/34/device_inbox.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/34/device_inbox.sql rename to synapse/storage/schema/main/delta/34/device_inbox.sql diff --git a/synapse/storage/databases/main/schema/delta/34/push_display_name_rename.sql b/synapse/storage/schema/main/delta/34/push_display_name_rename.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/34/push_display_name_rename.sql rename to synapse/storage/schema/main/delta/34/push_display_name_rename.sql diff --git a/synapse/storage/databases/main/schema/delta/34/received_txn_purge.py b/synapse/storage/schema/main/delta/34/received_txn_purge.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/34/received_txn_purge.py rename to synapse/storage/schema/main/delta/34/received_txn_purge.py diff --git a/synapse/storage/databases/main/schema/delta/35/contains_url.sql b/synapse/storage/schema/main/delta/35/contains_url.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/35/contains_url.sql rename to synapse/storage/schema/main/delta/35/contains_url.sql diff --git a/synapse/storage/databases/main/schema/delta/35/device_outbox.sql b/synapse/storage/schema/main/delta/35/device_outbox.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/35/device_outbox.sql rename to synapse/storage/schema/main/delta/35/device_outbox.sql diff --git a/synapse/storage/databases/main/schema/delta/35/device_stream_id.sql b/synapse/storage/schema/main/delta/35/device_stream_id.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/35/device_stream_id.sql rename to synapse/storage/schema/main/delta/35/device_stream_id.sql diff --git a/synapse/storage/databases/main/schema/delta/35/event_push_actions_index.sql b/synapse/storage/schema/main/delta/35/event_push_actions_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/35/event_push_actions_index.sql rename to synapse/storage/schema/main/delta/35/event_push_actions_index.sql diff --git a/synapse/storage/databases/main/schema/delta/35/public_room_list_change_stream.sql b/synapse/storage/schema/main/delta/35/public_room_list_change_stream.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/35/public_room_list_change_stream.sql rename to synapse/storage/schema/main/delta/35/public_room_list_change_stream.sql diff --git a/synapse/storage/databases/main/schema/delta/35/stream_order_to_extrem.sql b/synapse/storage/schema/main/delta/35/stream_order_to_extrem.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/35/stream_order_to_extrem.sql rename to synapse/storage/schema/main/delta/35/stream_order_to_extrem.sql diff --git a/synapse/storage/databases/main/schema/delta/36/readd_public_rooms.sql b/synapse/storage/schema/main/delta/36/readd_public_rooms.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/36/readd_public_rooms.sql rename to synapse/storage/schema/main/delta/36/readd_public_rooms.sql diff --git a/synapse/storage/databases/main/schema/delta/37/remove_auth_idx.py b/synapse/storage/schema/main/delta/37/remove_auth_idx.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/37/remove_auth_idx.py rename to synapse/storage/schema/main/delta/37/remove_auth_idx.py diff --git a/synapse/storage/databases/main/schema/delta/37/user_threepids.sql b/synapse/storage/schema/main/delta/37/user_threepids.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/37/user_threepids.sql rename to synapse/storage/schema/main/delta/37/user_threepids.sql diff --git a/synapse/storage/databases/main/schema/delta/38/postgres_fts_gist.sql b/synapse/storage/schema/main/delta/38/postgres_fts_gist.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/38/postgres_fts_gist.sql rename to synapse/storage/schema/main/delta/38/postgres_fts_gist.sql diff --git a/synapse/storage/databases/main/schema/delta/39/appservice_room_list.sql b/synapse/storage/schema/main/delta/39/appservice_room_list.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/39/appservice_room_list.sql rename to synapse/storage/schema/main/delta/39/appservice_room_list.sql diff --git a/synapse/storage/databases/main/schema/delta/39/device_federation_stream_idx.sql b/synapse/storage/schema/main/delta/39/device_federation_stream_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/39/device_federation_stream_idx.sql rename to synapse/storage/schema/main/delta/39/device_federation_stream_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/39/event_push_index.sql b/synapse/storage/schema/main/delta/39/event_push_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/39/event_push_index.sql rename to synapse/storage/schema/main/delta/39/event_push_index.sql diff --git a/synapse/storage/databases/main/schema/delta/39/federation_out_position.sql b/synapse/storage/schema/main/delta/39/federation_out_position.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/39/federation_out_position.sql rename to synapse/storage/schema/main/delta/39/federation_out_position.sql diff --git a/synapse/storage/databases/main/schema/delta/39/membership_profile.sql b/synapse/storage/schema/main/delta/39/membership_profile.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/39/membership_profile.sql rename to synapse/storage/schema/main/delta/39/membership_profile.sql diff --git a/synapse/storage/databases/main/schema/delta/40/current_state_idx.sql b/synapse/storage/schema/main/delta/40/current_state_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/40/current_state_idx.sql rename to synapse/storage/schema/main/delta/40/current_state_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/40/device_inbox.sql b/synapse/storage/schema/main/delta/40/device_inbox.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/40/device_inbox.sql rename to synapse/storage/schema/main/delta/40/device_inbox.sql diff --git a/synapse/storage/databases/main/schema/delta/40/device_list_streams.sql b/synapse/storage/schema/main/delta/40/device_list_streams.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/40/device_list_streams.sql rename to synapse/storage/schema/main/delta/40/device_list_streams.sql diff --git a/synapse/storage/databases/main/schema/delta/40/event_push_summary.sql b/synapse/storage/schema/main/delta/40/event_push_summary.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/40/event_push_summary.sql rename to synapse/storage/schema/main/delta/40/event_push_summary.sql diff --git a/synapse/storage/databases/main/schema/delta/40/pushers.sql b/synapse/storage/schema/main/delta/40/pushers.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/40/pushers.sql rename to synapse/storage/schema/main/delta/40/pushers.sql diff --git a/synapse/storage/databases/main/schema/delta/41/device_list_stream_idx.sql b/synapse/storage/schema/main/delta/41/device_list_stream_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/41/device_list_stream_idx.sql rename to synapse/storage/schema/main/delta/41/device_list_stream_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/41/device_outbound_index.sql b/synapse/storage/schema/main/delta/41/device_outbound_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/41/device_outbound_index.sql rename to synapse/storage/schema/main/delta/41/device_outbound_index.sql diff --git a/synapse/storage/databases/main/schema/delta/41/event_search_event_id_idx.sql b/synapse/storage/schema/main/delta/41/event_search_event_id_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/41/event_search_event_id_idx.sql rename to synapse/storage/schema/main/delta/41/event_search_event_id_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/41/ratelimit.sql b/synapse/storage/schema/main/delta/41/ratelimit.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/41/ratelimit.sql rename to synapse/storage/schema/main/delta/41/ratelimit.sql diff --git a/synapse/storage/databases/main/schema/delta/42/current_state_delta.sql b/synapse/storage/schema/main/delta/42/current_state_delta.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/42/current_state_delta.sql rename to synapse/storage/schema/main/delta/42/current_state_delta.sql diff --git a/synapse/storage/databases/main/schema/delta/42/device_list_last_id.sql b/synapse/storage/schema/main/delta/42/device_list_last_id.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/42/device_list_last_id.sql rename to synapse/storage/schema/main/delta/42/device_list_last_id.sql diff --git a/synapse/storage/databases/main/schema/delta/42/event_auth_state_only.sql b/synapse/storage/schema/main/delta/42/event_auth_state_only.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/42/event_auth_state_only.sql rename to synapse/storage/schema/main/delta/42/event_auth_state_only.sql diff --git a/synapse/storage/databases/main/schema/delta/42/user_dir.py b/synapse/storage/schema/main/delta/42/user_dir.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/42/user_dir.py rename to synapse/storage/schema/main/delta/42/user_dir.py diff --git a/synapse/storage/databases/main/schema/delta/43/blocked_rooms.sql b/synapse/storage/schema/main/delta/43/blocked_rooms.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/43/blocked_rooms.sql rename to synapse/storage/schema/main/delta/43/blocked_rooms.sql diff --git a/synapse/storage/databases/main/schema/delta/43/quarantine_media.sql b/synapse/storage/schema/main/delta/43/quarantine_media.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/43/quarantine_media.sql rename to synapse/storage/schema/main/delta/43/quarantine_media.sql diff --git a/synapse/storage/databases/main/schema/delta/43/url_cache.sql b/synapse/storage/schema/main/delta/43/url_cache.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/43/url_cache.sql rename to synapse/storage/schema/main/delta/43/url_cache.sql diff --git a/synapse/storage/databases/main/schema/delta/43/user_share.sql b/synapse/storage/schema/main/delta/43/user_share.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/43/user_share.sql rename to synapse/storage/schema/main/delta/43/user_share.sql diff --git a/synapse/storage/databases/main/schema/delta/44/expire_url_cache.sql b/synapse/storage/schema/main/delta/44/expire_url_cache.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/44/expire_url_cache.sql rename to synapse/storage/schema/main/delta/44/expire_url_cache.sql diff --git a/synapse/storage/databases/main/schema/delta/45/group_server.sql b/synapse/storage/schema/main/delta/45/group_server.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/45/group_server.sql rename to synapse/storage/schema/main/delta/45/group_server.sql diff --git a/synapse/storage/databases/main/schema/delta/45/profile_cache.sql b/synapse/storage/schema/main/delta/45/profile_cache.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/45/profile_cache.sql rename to synapse/storage/schema/main/delta/45/profile_cache.sql diff --git a/synapse/storage/databases/main/schema/delta/46/drop_refresh_tokens.sql b/synapse/storage/schema/main/delta/46/drop_refresh_tokens.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/46/drop_refresh_tokens.sql rename to synapse/storage/schema/main/delta/46/drop_refresh_tokens.sql diff --git a/synapse/storage/databases/main/schema/delta/46/drop_unique_deleted_pushers.sql b/synapse/storage/schema/main/delta/46/drop_unique_deleted_pushers.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/46/drop_unique_deleted_pushers.sql rename to synapse/storage/schema/main/delta/46/drop_unique_deleted_pushers.sql diff --git a/synapse/storage/databases/main/schema/delta/46/group_server.sql b/synapse/storage/schema/main/delta/46/group_server.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/46/group_server.sql rename to synapse/storage/schema/main/delta/46/group_server.sql diff --git a/synapse/storage/databases/main/schema/delta/46/local_media_repository_url_idx.sql b/synapse/storage/schema/main/delta/46/local_media_repository_url_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/46/local_media_repository_url_idx.sql rename to synapse/storage/schema/main/delta/46/local_media_repository_url_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/46/user_dir_null_room_ids.sql b/synapse/storage/schema/main/delta/46/user_dir_null_room_ids.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/46/user_dir_null_room_ids.sql rename to synapse/storage/schema/main/delta/46/user_dir_null_room_ids.sql diff --git a/synapse/storage/databases/main/schema/delta/46/user_dir_typos.sql b/synapse/storage/schema/main/delta/46/user_dir_typos.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/46/user_dir_typos.sql rename to synapse/storage/schema/main/delta/46/user_dir_typos.sql diff --git a/synapse/storage/databases/main/schema/delta/47/last_access_media.sql b/synapse/storage/schema/main/delta/47/last_access_media.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/47/last_access_media.sql rename to synapse/storage/schema/main/delta/47/last_access_media.sql diff --git a/synapse/storage/databases/main/schema/delta/47/postgres_fts_gin.sql b/synapse/storage/schema/main/delta/47/postgres_fts_gin.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/47/postgres_fts_gin.sql rename to synapse/storage/schema/main/delta/47/postgres_fts_gin.sql diff --git a/synapse/storage/databases/main/schema/delta/47/push_actions_staging.sql b/synapse/storage/schema/main/delta/47/push_actions_staging.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/47/push_actions_staging.sql rename to synapse/storage/schema/main/delta/47/push_actions_staging.sql diff --git a/synapse/storage/databases/main/schema/delta/48/add_user_consent.sql b/synapse/storage/schema/main/delta/48/add_user_consent.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/48/add_user_consent.sql rename to synapse/storage/schema/main/delta/48/add_user_consent.sql diff --git a/synapse/storage/databases/main/schema/delta/48/add_user_ips_last_seen_index.sql b/synapse/storage/schema/main/delta/48/add_user_ips_last_seen_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/48/add_user_ips_last_seen_index.sql rename to synapse/storage/schema/main/delta/48/add_user_ips_last_seen_index.sql diff --git a/synapse/storage/databases/main/schema/delta/48/deactivated_users.sql b/synapse/storage/schema/main/delta/48/deactivated_users.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/48/deactivated_users.sql rename to synapse/storage/schema/main/delta/48/deactivated_users.sql diff --git a/synapse/storage/databases/main/schema/delta/48/group_unique_indexes.py b/synapse/storage/schema/main/delta/48/group_unique_indexes.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/48/group_unique_indexes.py rename to synapse/storage/schema/main/delta/48/group_unique_indexes.py diff --git a/synapse/storage/databases/main/schema/delta/48/groups_joinable.sql b/synapse/storage/schema/main/delta/48/groups_joinable.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/48/groups_joinable.sql rename to synapse/storage/schema/main/delta/48/groups_joinable.sql diff --git a/synapse/storage/databases/main/schema/delta/49/add_user_consent_server_notice_sent.sql b/synapse/storage/schema/main/delta/49/add_user_consent_server_notice_sent.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/49/add_user_consent_server_notice_sent.sql rename to synapse/storage/schema/main/delta/49/add_user_consent_server_notice_sent.sql diff --git a/synapse/storage/databases/main/schema/delta/49/add_user_daily_visits.sql b/synapse/storage/schema/main/delta/49/add_user_daily_visits.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/49/add_user_daily_visits.sql rename to synapse/storage/schema/main/delta/49/add_user_daily_visits.sql diff --git a/synapse/storage/databases/main/schema/delta/49/add_user_ips_last_seen_only_index.sql b/synapse/storage/schema/main/delta/49/add_user_ips_last_seen_only_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/49/add_user_ips_last_seen_only_index.sql rename to synapse/storage/schema/main/delta/49/add_user_ips_last_seen_only_index.sql diff --git a/synapse/storage/databases/main/schema/delta/50/add_creation_ts_users_index.sql b/synapse/storage/schema/main/delta/50/add_creation_ts_users_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/50/add_creation_ts_users_index.sql rename to synapse/storage/schema/main/delta/50/add_creation_ts_users_index.sql diff --git a/synapse/storage/databases/main/schema/delta/50/erasure_store.sql b/synapse/storage/schema/main/delta/50/erasure_store.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/50/erasure_store.sql rename to synapse/storage/schema/main/delta/50/erasure_store.sql diff --git a/synapse/storage/databases/main/schema/delta/50/make_event_content_nullable.py b/synapse/storage/schema/main/delta/50/make_event_content_nullable.py similarity index 99% rename from synapse/storage/databases/main/schema/delta/50/make_event_content_nullable.py rename to synapse/storage/schema/main/delta/50/make_event_content_nullable.py index b1684a8441dc..acd6ad1e1fca 100644 --- a/synapse/storage/databases/main/schema/delta/50/make_event_content_nullable.py +++ b/synapse/storage/schema/main/delta/50/make_event_content_nullable.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/schema/delta/51/e2e_room_keys.sql b/synapse/storage/schema/main/delta/51/e2e_room_keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/51/e2e_room_keys.sql rename to synapse/storage/schema/main/delta/51/e2e_room_keys.sql diff --git a/synapse/storage/databases/main/schema/delta/51/monthly_active_users.sql b/synapse/storage/schema/main/delta/51/monthly_active_users.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/51/monthly_active_users.sql rename to synapse/storage/schema/main/delta/51/monthly_active_users.sql diff --git a/synapse/storage/databases/main/schema/delta/52/add_event_to_state_group_index.sql b/synapse/storage/schema/main/delta/52/add_event_to_state_group_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/52/add_event_to_state_group_index.sql rename to synapse/storage/schema/main/delta/52/add_event_to_state_group_index.sql diff --git a/synapse/storage/databases/main/schema/delta/52/device_list_streams_unique_idx.sql b/synapse/storage/schema/main/delta/52/device_list_streams_unique_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/52/device_list_streams_unique_idx.sql rename to synapse/storage/schema/main/delta/52/device_list_streams_unique_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/52/e2e_room_keys.sql b/synapse/storage/schema/main/delta/52/e2e_room_keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/52/e2e_room_keys.sql rename to synapse/storage/schema/main/delta/52/e2e_room_keys.sql diff --git a/synapse/storage/databases/main/schema/delta/53/add_user_type_to_users.sql b/synapse/storage/schema/main/delta/53/add_user_type_to_users.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/add_user_type_to_users.sql rename to synapse/storage/schema/main/delta/53/add_user_type_to_users.sql diff --git a/synapse/storage/databases/main/schema/delta/53/drop_sent_transactions.sql b/synapse/storage/schema/main/delta/53/drop_sent_transactions.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/drop_sent_transactions.sql rename to synapse/storage/schema/main/delta/53/drop_sent_transactions.sql diff --git a/synapse/storage/databases/main/schema/delta/53/event_format_version.sql b/synapse/storage/schema/main/delta/53/event_format_version.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/event_format_version.sql rename to synapse/storage/schema/main/delta/53/event_format_version.sql diff --git a/synapse/storage/databases/main/schema/delta/53/user_dir_populate.sql b/synapse/storage/schema/main/delta/53/user_dir_populate.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/user_dir_populate.sql rename to synapse/storage/schema/main/delta/53/user_dir_populate.sql diff --git a/synapse/storage/databases/main/schema/delta/53/user_ips_index.sql b/synapse/storage/schema/main/delta/53/user_ips_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/user_ips_index.sql rename to synapse/storage/schema/main/delta/53/user_ips_index.sql diff --git a/synapse/storage/databases/main/schema/delta/53/user_share.sql b/synapse/storage/schema/main/delta/53/user_share.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/user_share.sql rename to synapse/storage/schema/main/delta/53/user_share.sql diff --git a/synapse/storage/databases/main/schema/delta/53/user_threepid_id.sql b/synapse/storage/schema/main/delta/53/user_threepid_id.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/user_threepid_id.sql rename to synapse/storage/schema/main/delta/53/user_threepid_id.sql diff --git a/synapse/storage/databases/main/schema/delta/53/users_in_public_rooms.sql b/synapse/storage/schema/main/delta/53/users_in_public_rooms.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/users_in_public_rooms.sql rename to synapse/storage/schema/main/delta/53/users_in_public_rooms.sql diff --git a/synapse/storage/databases/main/schema/delta/54/account_validity_with_renewal.sql b/synapse/storage/schema/main/delta/54/account_validity_with_renewal.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/account_validity_with_renewal.sql rename to synapse/storage/schema/main/delta/54/account_validity_with_renewal.sql diff --git a/synapse/storage/databases/main/schema/delta/54/add_validity_to_server_keys.sql b/synapse/storage/schema/main/delta/54/add_validity_to_server_keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/add_validity_to_server_keys.sql rename to synapse/storage/schema/main/delta/54/add_validity_to_server_keys.sql diff --git a/synapse/storage/databases/main/schema/delta/54/delete_forward_extremities.sql b/synapse/storage/schema/main/delta/54/delete_forward_extremities.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/delete_forward_extremities.sql rename to synapse/storage/schema/main/delta/54/delete_forward_extremities.sql diff --git a/synapse/storage/databases/main/schema/delta/54/drop_legacy_tables.sql b/synapse/storage/schema/main/delta/54/drop_legacy_tables.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/drop_legacy_tables.sql rename to synapse/storage/schema/main/delta/54/drop_legacy_tables.sql diff --git a/synapse/storage/databases/main/schema/delta/54/drop_presence_list.sql b/synapse/storage/schema/main/delta/54/drop_presence_list.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/drop_presence_list.sql rename to synapse/storage/schema/main/delta/54/drop_presence_list.sql diff --git a/synapse/storage/databases/main/schema/delta/54/relations.sql b/synapse/storage/schema/main/delta/54/relations.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/relations.sql rename to synapse/storage/schema/main/delta/54/relations.sql diff --git a/synapse/storage/databases/main/schema/delta/54/stats.sql b/synapse/storage/schema/main/delta/54/stats.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/stats.sql rename to synapse/storage/schema/main/delta/54/stats.sql diff --git a/synapse/storage/databases/main/schema/delta/54/stats2.sql b/synapse/storage/schema/main/delta/54/stats2.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/stats2.sql rename to synapse/storage/schema/main/delta/54/stats2.sql diff --git a/synapse/storage/databases/main/schema/delta/55/access_token_expiry.sql b/synapse/storage/schema/main/delta/55/access_token_expiry.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/55/access_token_expiry.sql rename to synapse/storage/schema/main/delta/55/access_token_expiry.sql diff --git a/synapse/storage/databases/main/schema/delta/55/track_threepid_validations.sql b/synapse/storage/schema/main/delta/55/track_threepid_validations.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/55/track_threepid_validations.sql rename to synapse/storage/schema/main/delta/55/track_threepid_validations.sql diff --git a/synapse/storage/databases/main/schema/delta/55/users_alter_deactivated.sql b/synapse/storage/schema/main/delta/55/users_alter_deactivated.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/55/users_alter_deactivated.sql rename to synapse/storage/schema/main/delta/55/users_alter_deactivated.sql diff --git a/synapse/storage/databases/main/schema/delta/56/add_spans_to_device_lists.sql b/synapse/storage/schema/main/delta/56/add_spans_to_device_lists.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/add_spans_to_device_lists.sql rename to synapse/storage/schema/main/delta/56/add_spans_to_device_lists.sql diff --git a/synapse/storage/databases/main/schema/delta/56/current_state_events_membership.sql b/synapse/storage/schema/main/delta/56/current_state_events_membership.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/current_state_events_membership.sql rename to synapse/storage/schema/main/delta/56/current_state_events_membership.sql diff --git a/synapse/storage/databases/main/schema/delta/56/current_state_events_membership_mk2.sql b/synapse/storage/schema/main/delta/56/current_state_events_membership_mk2.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/current_state_events_membership_mk2.sql rename to synapse/storage/schema/main/delta/56/current_state_events_membership_mk2.sql diff --git a/synapse/storage/databases/main/schema/delta/56/delete_keys_from_deleted_backups.sql b/synapse/storage/schema/main/delta/56/delete_keys_from_deleted_backups.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/delete_keys_from_deleted_backups.sql rename to synapse/storage/schema/main/delta/56/delete_keys_from_deleted_backups.sql diff --git a/synapse/storage/databases/main/schema/delta/56/destinations_failure_ts.sql b/synapse/storage/schema/main/delta/56/destinations_failure_ts.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/destinations_failure_ts.sql rename to synapse/storage/schema/main/delta/56/destinations_failure_ts.sql diff --git a/synapse/storage/databases/main/schema/delta/56/destinations_retry_interval_type.sql.postgres b/synapse/storage/schema/main/delta/56/destinations_retry_interval_type.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/destinations_retry_interval_type.sql.postgres rename to synapse/storage/schema/main/delta/56/destinations_retry_interval_type.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/56/device_stream_id_insert.sql b/synapse/storage/schema/main/delta/56/device_stream_id_insert.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/device_stream_id_insert.sql rename to synapse/storage/schema/main/delta/56/device_stream_id_insert.sql diff --git a/synapse/storage/databases/main/schema/delta/56/devices_last_seen.sql b/synapse/storage/schema/main/delta/56/devices_last_seen.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/devices_last_seen.sql rename to synapse/storage/schema/main/delta/56/devices_last_seen.sql diff --git a/synapse/storage/databases/main/schema/delta/56/drop_unused_event_tables.sql b/synapse/storage/schema/main/delta/56/drop_unused_event_tables.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/drop_unused_event_tables.sql rename to synapse/storage/schema/main/delta/56/drop_unused_event_tables.sql diff --git a/synapse/storage/databases/main/schema/delta/56/event_expiry.sql b/synapse/storage/schema/main/delta/56/event_expiry.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/event_expiry.sql rename to synapse/storage/schema/main/delta/56/event_expiry.sql diff --git a/synapse/storage/databases/main/schema/delta/56/event_labels.sql b/synapse/storage/schema/main/delta/56/event_labels.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/event_labels.sql rename to synapse/storage/schema/main/delta/56/event_labels.sql diff --git a/synapse/storage/databases/main/schema/delta/56/event_labels_background_update.sql b/synapse/storage/schema/main/delta/56/event_labels_background_update.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/event_labels_background_update.sql rename to synapse/storage/schema/main/delta/56/event_labels_background_update.sql diff --git a/synapse/storage/databases/main/schema/delta/56/fix_room_keys_index.sql b/synapse/storage/schema/main/delta/56/fix_room_keys_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/fix_room_keys_index.sql rename to synapse/storage/schema/main/delta/56/fix_room_keys_index.sql diff --git a/synapse/storage/databases/main/schema/delta/56/hidden_devices.sql b/synapse/storage/schema/main/delta/56/hidden_devices.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/hidden_devices.sql rename to synapse/storage/schema/main/delta/56/hidden_devices.sql diff --git a/synapse/storage/databases/main/schema/delta/56/hidden_devices_fix.sql.sqlite b/synapse/storage/schema/main/delta/56/hidden_devices_fix.sql.sqlite similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/hidden_devices_fix.sql.sqlite rename to synapse/storage/schema/main/delta/56/hidden_devices_fix.sql.sqlite diff --git a/synapse/storage/databases/main/schema/delta/56/nuke_empty_communities_from_db.sql b/synapse/storage/schema/main/delta/56/nuke_empty_communities_from_db.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/nuke_empty_communities_from_db.sql rename to synapse/storage/schema/main/delta/56/nuke_empty_communities_from_db.sql diff --git a/synapse/storage/databases/main/schema/delta/56/public_room_list_idx.sql b/synapse/storage/schema/main/delta/56/public_room_list_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/public_room_list_idx.sql rename to synapse/storage/schema/main/delta/56/public_room_list_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/56/redaction_censor.sql b/synapse/storage/schema/main/delta/56/redaction_censor.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/redaction_censor.sql rename to synapse/storage/schema/main/delta/56/redaction_censor.sql diff --git a/synapse/storage/databases/main/schema/delta/56/redaction_censor2.sql b/synapse/storage/schema/main/delta/56/redaction_censor2.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/redaction_censor2.sql rename to synapse/storage/schema/main/delta/56/redaction_censor2.sql diff --git a/synapse/storage/databases/main/schema/delta/56/redaction_censor3_fix_update.sql.postgres b/synapse/storage/schema/main/delta/56/redaction_censor3_fix_update.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/redaction_censor3_fix_update.sql.postgres rename to synapse/storage/schema/main/delta/56/redaction_censor3_fix_update.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/56/redaction_censor4.sql b/synapse/storage/schema/main/delta/56/redaction_censor4.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/redaction_censor4.sql rename to synapse/storage/schema/main/delta/56/redaction_censor4.sql diff --git a/synapse/storage/databases/main/schema/delta/56/remove_tombstoned_rooms_from_directory.sql b/synapse/storage/schema/main/delta/56/remove_tombstoned_rooms_from_directory.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/remove_tombstoned_rooms_from_directory.sql rename to synapse/storage/schema/main/delta/56/remove_tombstoned_rooms_from_directory.sql diff --git a/synapse/storage/databases/main/schema/delta/56/room_key_etag.sql b/synapse/storage/schema/main/delta/56/room_key_etag.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/room_key_etag.sql rename to synapse/storage/schema/main/delta/56/room_key_etag.sql diff --git a/synapse/storage/databases/main/schema/delta/56/room_membership_idx.sql b/synapse/storage/schema/main/delta/56/room_membership_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/room_membership_idx.sql rename to synapse/storage/schema/main/delta/56/room_membership_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/56/room_retention.sql b/synapse/storage/schema/main/delta/56/room_retention.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/room_retention.sql rename to synapse/storage/schema/main/delta/56/room_retention.sql diff --git a/synapse/storage/databases/main/schema/delta/56/signing_keys.sql b/synapse/storage/schema/main/delta/56/signing_keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/signing_keys.sql rename to synapse/storage/schema/main/delta/56/signing_keys.sql diff --git a/synapse/storage/databases/main/schema/delta/56/signing_keys_nonunique_signatures.sql b/synapse/storage/schema/main/delta/56/signing_keys_nonunique_signatures.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/signing_keys_nonunique_signatures.sql rename to synapse/storage/schema/main/delta/56/signing_keys_nonunique_signatures.sql diff --git a/synapse/storage/databases/main/schema/delta/56/stats_separated.sql b/synapse/storage/schema/main/delta/56/stats_separated.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/stats_separated.sql rename to synapse/storage/schema/main/delta/56/stats_separated.sql diff --git a/synapse/storage/databases/main/schema/delta/56/unique_user_filter_index.py b/synapse/storage/schema/main/delta/56/unique_user_filter_index.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/unique_user_filter_index.py rename to synapse/storage/schema/main/delta/56/unique_user_filter_index.py diff --git a/synapse/storage/databases/main/schema/delta/56/user_external_ids.sql b/synapse/storage/schema/main/delta/56/user_external_ids.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/user_external_ids.sql rename to synapse/storage/schema/main/delta/56/user_external_ids.sql diff --git a/synapse/storage/databases/main/schema/delta/56/users_in_public_rooms_idx.sql b/synapse/storage/schema/main/delta/56/users_in_public_rooms_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/users_in_public_rooms_idx.sql rename to synapse/storage/schema/main/delta/56/users_in_public_rooms_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/57/delete_old_current_state_events.sql b/synapse/storage/schema/main/delta/57/delete_old_current_state_events.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/delete_old_current_state_events.sql rename to synapse/storage/schema/main/delta/57/delete_old_current_state_events.sql diff --git a/synapse/storage/databases/main/schema/delta/57/device_list_remote_cache_stale.sql b/synapse/storage/schema/main/delta/57/device_list_remote_cache_stale.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/device_list_remote_cache_stale.sql rename to synapse/storage/schema/main/delta/57/device_list_remote_cache_stale.sql diff --git a/synapse/storage/databases/main/schema/delta/57/local_current_membership.py b/synapse/storage/schema/main/delta/57/local_current_membership.py similarity index 99% rename from synapse/storage/databases/main/schema/delta/57/local_current_membership.py rename to synapse/storage/schema/main/delta/57/local_current_membership.py index 44917f0a2ef3..66989222e6d2 100644 --- a/synapse/storage/databases/main/schema/delta/57/local_current_membership.py +++ b/synapse/storage/schema/main/delta/57/local_current_membership.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/schema/delta/57/remove_sent_outbound_pokes.sql b/synapse/storage/schema/main/delta/57/remove_sent_outbound_pokes.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/remove_sent_outbound_pokes.sql rename to synapse/storage/schema/main/delta/57/remove_sent_outbound_pokes.sql diff --git a/synapse/storage/databases/main/schema/delta/57/rooms_version_column.sql b/synapse/storage/schema/main/delta/57/rooms_version_column.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/rooms_version_column.sql rename to synapse/storage/schema/main/delta/57/rooms_version_column.sql diff --git a/synapse/storage/databases/main/schema/delta/57/rooms_version_column_2.sql.postgres b/synapse/storage/schema/main/delta/57/rooms_version_column_2.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/rooms_version_column_2.sql.postgres rename to synapse/storage/schema/main/delta/57/rooms_version_column_2.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/57/rooms_version_column_2.sql.sqlite b/synapse/storage/schema/main/delta/57/rooms_version_column_2.sql.sqlite similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/rooms_version_column_2.sql.sqlite rename to synapse/storage/schema/main/delta/57/rooms_version_column_2.sql.sqlite diff --git a/synapse/storage/databases/main/schema/delta/57/rooms_version_column_3.sql.postgres b/synapse/storage/schema/main/delta/57/rooms_version_column_3.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/rooms_version_column_3.sql.postgres rename to synapse/storage/schema/main/delta/57/rooms_version_column_3.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/57/rooms_version_column_3.sql.sqlite b/synapse/storage/schema/main/delta/57/rooms_version_column_3.sql.sqlite similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/rooms_version_column_3.sql.sqlite rename to synapse/storage/schema/main/delta/57/rooms_version_column_3.sql.sqlite diff --git a/synapse/storage/databases/main/schema/delta/58/02remove_dup_outbound_pokes.sql b/synapse/storage/schema/main/delta/58/02remove_dup_outbound_pokes.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/02remove_dup_outbound_pokes.sql rename to synapse/storage/schema/main/delta/58/02remove_dup_outbound_pokes.sql diff --git a/synapse/storage/databases/main/schema/delta/58/03persist_ui_auth.sql b/synapse/storage/schema/main/delta/58/03persist_ui_auth.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/03persist_ui_auth.sql rename to synapse/storage/schema/main/delta/58/03persist_ui_auth.sql diff --git a/synapse/storage/databases/main/schema/delta/58/05cache_instance.sql.postgres b/synapse/storage/schema/main/delta/58/05cache_instance.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/05cache_instance.sql.postgres rename to synapse/storage/schema/main/delta/58/05cache_instance.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/58/06dlols_unique_idx.py b/synapse/storage/schema/main/delta/58/06dlols_unique_idx.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/06dlols_unique_idx.py rename to synapse/storage/schema/main/delta/58/06dlols_unique_idx.py diff --git a/synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.postgres b/synapse/storage/schema/main/delta/58/07add_method_to_thumbnail_constraint.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.postgres rename to synapse/storage/schema/main/delta/58/07add_method_to_thumbnail_constraint.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite b/synapse/storage/schema/main/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite rename to synapse/storage/schema/main/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite diff --git a/synapse/storage/databases/main/schema/delta/58/07persist_ui_auth_ips.sql b/synapse/storage/schema/main/delta/58/07persist_ui_auth_ips.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/07persist_ui_auth_ips.sql rename to synapse/storage/schema/main/delta/58/07persist_ui_auth_ips.sql diff --git a/synapse/storage/databases/main/schema/delta/58/08_media_safe_from_quarantine.sql.postgres b/synapse/storage/schema/main/delta/58/08_media_safe_from_quarantine.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/08_media_safe_from_quarantine.sql.postgres rename to synapse/storage/schema/main/delta/58/08_media_safe_from_quarantine.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/58/08_media_safe_from_quarantine.sql.sqlite b/synapse/storage/schema/main/delta/58/08_media_safe_from_quarantine.sql.sqlite similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/08_media_safe_from_quarantine.sql.sqlite rename to synapse/storage/schema/main/delta/58/08_media_safe_from_quarantine.sql.sqlite diff --git a/synapse/storage/databases/main/schema/delta/58/09shadow_ban.sql b/synapse/storage/schema/main/delta/58/09shadow_ban.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/09shadow_ban.sql rename to synapse/storage/schema/main/delta/58/09shadow_ban.sql diff --git a/synapse/storage/databases/main/schema/delta/58/10_pushrules_enabled_delete_obsolete.sql b/synapse/storage/schema/main/delta/58/10_pushrules_enabled_delete_obsolete.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/10_pushrules_enabled_delete_obsolete.sql rename to synapse/storage/schema/main/delta/58/10_pushrules_enabled_delete_obsolete.sql diff --git a/synapse/storage/databases/main/schema/delta/58/10drop_local_rejections_stream.sql b/synapse/storage/schema/main/delta/58/10drop_local_rejections_stream.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/10drop_local_rejections_stream.sql rename to synapse/storage/schema/main/delta/58/10drop_local_rejections_stream.sql diff --git a/synapse/storage/databases/main/schema/delta/58/10federation_pos_instance_name.sql b/synapse/storage/schema/main/delta/58/10federation_pos_instance_name.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/10federation_pos_instance_name.sql rename to synapse/storage/schema/main/delta/58/10federation_pos_instance_name.sql diff --git a/synapse/storage/databases/main/schema/delta/58/11dehydration.sql b/synapse/storage/schema/main/delta/58/11dehydration.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/11dehydration.sql rename to synapse/storage/schema/main/delta/58/11dehydration.sql diff --git a/synapse/storage/databases/main/schema/delta/58/11fallback.sql b/synapse/storage/schema/main/delta/58/11fallback.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/11fallback.sql rename to synapse/storage/schema/main/delta/58/11fallback.sql diff --git a/synapse/storage/databases/main/schema/delta/58/11user_id_seq.py b/synapse/storage/schema/main/delta/58/11user_id_seq.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/11user_id_seq.py rename to synapse/storage/schema/main/delta/58/11user_id_seq.py diff --git a/synapse/storage/databases/main/schema/delta/58/12room_stats.sql b/synapse/storage/schema/main/delta/58/12room_stats.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/12room_stats.sql rename to synapse/storage/schema/main/delta/58/12room_stats.sql diff --git a/synapse/storage/databases/main/schema/delta/58/13remove_presence_allow_inbound.sql b/synapse/storage/schema/main/delta/58/13remove_presence_allow_inbound.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/13remove_presence_allow_inbound.sql rename to synapse/storage/schema/main/delta/58/13remove_presence_allow_inbound.sql diff --git a/synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql b/synapse/storage/schema/main/delta/58/14events_instance_name.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql rename to synapse/storage/schema/main/delta/58/14events_instance_name.sql diff --git a/synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql.postgres b/synapse/storage/schema/main/delta/58/14events_instance_name.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql.postgres rename to synapse/storage/schema/main/delta/58/14events_instance_name.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/58/15_catchup_destination_rooms.sql b/synapse/storage/schema/main/delta/58/15_catchup_destination_rooms.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/15_catchup_destination_rooms.sql rename to synapse/storage/schema/main/delta/58/15_catchup_destination_rooms.sql diff --git a/synapse/storage/databases/main/schema/delta/58/15unread_count.sql b/synapse/storage/schema/main/delta/58/15unread_count.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/15unread_count.sql rename to synapse/storage/schema/main/delta/58/15unread_count.sql diff --git a/synapse/storage/databases/main/schema/delta/58/16populate_stats_process_rooms_fix.sql b/synapse/storage/schema/main/delta/58/16populate_stats_process_rooms_fix.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/16populate_stats_process_rooms_fix.sql rename to synapse/storage/schema/main/delta/58/16populate_stats_process_rooms_fix.sql diff --git a/synapse/storage/databases/main/schema/delta/58/17_catchup_last_successful.sql b/synapse/storage/schema/main/delta/58/17_catchup_last_successful.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/17_catchup_last_successful.sql rename to synapse/storage/schema/main/delta/58/17_catchup_last_successful.sql diff --git a/synapse/storage/databases/main/schema/delta/58/18stream_positions.sql b/synapse/storage/schema/main/delta/58/18stream_positions.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/18stream_positions.sql rename to synapse/storage/schema/main/delta/58/18stream_positions.sql diff --git a/synapse/storage/databases/main/schema/delta/58/19instance_map.sql.postgres b/synapse/storage/schema/main/delta/58/19instance_map.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/19instance_map.sql.postgres rename to synapse/storage/schema/main/delta/58/19instance_map.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/58/19txn_id.sql b/synapse/storage/schema/main/delta/58/19txn_id.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/19txn_id.sql rename to synapse/storage/schema/main/delta/58/19txn_id.sql diff --git a/synapse/storage/databases/main/schema/delta/58/20instance_name_event_tables.sql b/synapse/storage/schema/main/delta/58/20instance_name_event_tables.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/20instance_name_event_tables.sql rename to synapse/storage/schema/main/delta/58/20instance_name_event_tables.sql diff --git a/synapse/storage/databases/main/schema/delta/58/20user_daily_visits.sql b/synapse/storage/schema/main/delta/58/20user_daily_visits.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/20user_daily_visits.sql rename to synapse/storage/schema/main/delta/58/20user_daily_visits.sql diff --git a/synapse/storage/databases/main/schema/delta/58/21as_device_stream.sql b/synapse/storage/schema/main/delta/58/21as_device_stream.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/21as_device_stream.sql rename to synapse/storage/schema/main/delta/58/21as_device_stream.sql diff --git a/synapse/storage/databases/main/schema/delta/58/21drop_device_max_stream_id.sql b/synapse/storage/schema/main/delta/58/21drop_device_max_stream_id.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/21drop_device_max_stream_id.sql rename to synapse/storage/schema/main/delta/58/21drop_device_max_stream_id.sql diff --git a/synapse/storage/databases/main/schema/delta/58/22puppet_token.sql b/synapse/storage/schema/main/delta/58/22puppet_token.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/22puppet_token.sql rename to synapse/storage/schema/main/delta/58/22puppet_token.sql diff --git a/synapse/storage/databases/main/schema/delta/58/22users_have_local_media.sql b/synapse/storage/schema/main/delta/58/22users_have_local_media.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/22users_have_local_media.sql rename to synapse/storage/schema/main/delta/58/22users_have_local_media.sql diff --git a/synapse/storage/databases/main/schema/delta/58/23e2e_cross_signing_keys_idx.sql b/synapse/storage/schema/main/delta/58/23e2e_cross_signing_keys_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/23e2e_cross_signing_keys_idx.sql rename to synapse/storage/schema/main/delta/58/23e2e_cross_signing_keys_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/58/24drop_event_json_index.sql b/synapse/storage/schema/main/delta/58/24drop_event_json_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/24drop_event_json_index.sql rename to synapse/storage/schema/main/delta/58/24drop_event_json_index.sql diff --git a/synapse/storage/databases/main/schema/delta/58/25user_external_ids_user_id_idx.sql b/synapse/storage/schema/main/delta/58/25user_external_ids_user_id_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/25user_external_ids_user_id_idx.sql rename to synapse/storage/schema/main/delta/58/25user_external_ids_user_id_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/58/26access_token_last_validated.sql b/synapse/storage/schema/main/delta/58/26access_token_last_validated.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/26access_token_last_validated.sql rename to synapse/storage/schema/main/delta/58/26access_token_last_validated.sql diff --git a/synapse/storage/databases/main/schema/delta/58/27local_invites.sql b/synapse/storage/schema/main/delta/58/27local_invites.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/27local_invites.sql rename to synapse/storage/schema/main/delta/58/27local_invites.sql diff --git a/synapse/storage/databases/main/schema/delta/58/28drop_last_used_column.sql.postgres b/synapse/storage/schema/main/delta/58/28drop_last_used_column.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/28drop_last_used_column.sql.postgres rename to synapse/storage/schema/main/delta/58/28drop_last_used_column.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/58/28drop_last_used_column.sql.sqlite b/synapse/storage/schema/main/delta/58/28drop_last_used_column.sql.sqlite similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/28drop_last_used_column.sql.sqlite rename to synapse/storage/schema/main/delta/58/28drop_last_used_column.sql.sqlite diff --git a/synapse/storage/databases/main/schema/delta/59/01ignored_user.py b/synapse/storage/schema/main/delta/59/01ignored_user.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/01ignored_user.py rename to synapse/storage/schema/main/delta/59/01ignored_user.py diff --git a/synapse/storage/databases/main/schema/delta/59/02shard_send_to_device.sql b/synapse/storage/schema/main/delta/59/02shard_send_to_device.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/02shard_send_to_device.sql rename to synapse/storage/schema/main/delta/59/02shard_send_to_device.sql diff --git a/synapse/storage/databases/main/schema/delta/59/03shard_send_to_device_sequence.sql.postgres b/synapse/storage/schema/main/delta/59/03shard_send_to_device_sequence.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/03shard_send_to_device_sequence.sql.postgres rename to synapse/storage/schema/main/delta/59/03shard_send_to_device_sequence.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/59/04_event_auth_chains.sql b/synapse/storage/schema/main/delta/59/04_event_auth_chains.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/04_event_auth_chains.sql rename to synapse/storage/schema/main/delta/59/04_event_auth_chains.sql diff --git a/synapse/storage/databases/main/schema/delta/59/04_event_auth_chains.sql.postgres b/synapse/storage/schema/main/delta/59/04_event_auth_chains.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/04_event_auth_chains.sql.postgres rename to synapse/storage/schema/main/delta/59/04_event_auth_chains.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/59/04drop_account_data.sql b/synapse/storage/schema/main/delta/59/04drop_account_data.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/04drop_account_data.sql rename to synapse/storage/schema/main/delta/59/04drop_account_data.sql diff --git a/synapse/storage/databases/main/schema/delta/59/05cache_invalidation.sql b/synapse/storage/schema/main/delta/59/05cache_invalidation.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/05cache_invalidation.sql rename to synapse/storage/schema/main/delta/59/05cache_invalidation.sql diff --git a/synapse/storage/databases/main/schema/delta/59/06chain_cover_index.sql b/synapse/storage/schema/main/delta/59/06chain_cover_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/06chain_cover_index.sql rename to synapse/storage/schema/main/delta/59/06chain_cover_index.sql diff --git a/synapse/storage/databases/main/schema/delta/59/06shard_account_data.sql b/synapse/storage/schema/main/delta/59/06shard_account_data.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/06shard_account_data.sql rename to synapse/storage/schema/main/delta/59/06shard_account_data.sql diff --git a/synapse/storage/databases/main/schema/delta/59/06shard_account_data.sql.postgres b/synapse/storage/schema/main/delta/59/06shard_account_data.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/06shard_account_data.sql.postgres rename to synapse/storage/schema/main/delta/59/06shard_account_data.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/59/07shard_account_data_fix.sql b/synapse/storage/schema/main/delta/59/07shard_account_data_fix.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/07shard_account_data_fix.sql rename to synapse/storage/schema/main/delta/59/07shard_account_data_fix.sql diff --git a/synapse/storage/databases/main/schema/delta/59/08delete_pushers_for_deactivated_accounts.sql b/synapse/storage/schema/main/delta/59/08delete_pushers_for_deactivated_accounts.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/08delete_pushers_for_deactivated_accounts.sql rename to synapse/storage/schema/main/delta/59/08delete_pushers_for_deactivated_accounts.sql diff --git a/synapse/storage/databases/main/schema/delta/59/08delete_stale_pushers.sql b/synapse/storage/schema/main/delta/59/08delete_stale_pushers.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/08delete_stale_pushers.sql rename to synapse/storage/schema/main/delta/59/08delete_stale_pushers.sql diff --git a/synapse/storage/databases/main/schema/delta/59/09rejected_events_metadata.sql b/synapse/storage/schema/main/delta/59/09rejected_events_metadata.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/09rejected_events_metadata.sql rename to synapse/storage/schema/main/delta/59/09rejected_events_metadata.sql diff --git a/synapse/storage/databases/main/schema/delta/59/10delete_purged_chain_cover.sql b/synapse/storage/schema/main/delta/59/10delete_purged_chain_cover.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/10delete_purged_chain_cover.sql rename to synapse/storage/schema/main/delta/59/10delete_purged_chain_cover.sql diff --git a/synapse/storage/schema/main/delta/59/11drop_thumbnail_constraint.sql.postgres b/synapse/storage/schema/main/delta/59/11drop_thumbnail_constraint.sql.postgres new file mode 100644 index 000000000000..54c1bca3b1ee --- /dev/null +++ b/synapse/storage/schema/main/delta/59/11drop_thumbnail_constraint.sql.postgres @@ -0,0 +1,22 @@ +/* Copyright 2021 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- drop old constraints on remote_media_cache_thumbnails +-- +-- This was originally part of 57.07, but it was done wrong, per +-- https://github.com/matrix-org/synapse/issues/8649, so we do it again. +INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES + (5911, 'media_repository_drop_index_wo_method_2', '{}', 'remote_media_repository_thumbnails_method_idx'); + diff --git a/synapse/storage/schema/main/delta/59/12account_validity_token_used_ts_ms.sql b/synapse/storage/schema/main/delta/59/12account_validity_token_used_ts_ms.sql new file mode 100644 index 000000000000..4836dac16ebf --- /dev/null +++ b/synapse/storage/schema/main/delta/59/12account_validity_token_used_ts_ms.sql @@ -0,0 +1,18 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Track when users renew their account using the value of the 'renewal_token' column. +-- This field should be set to NULL after a fresh token is generated. +ALTER TABLE account_validity ADD token_used_ts_ms BIGINT; diff --git a/synapse/storage/schema/main/delta/59/12presence_stream_instance.sql b/synapse/storage/schema/main/delta/59/12presence_stream_instance.sql new file mode 100644 index 000000000000..b6ba0bda1a81 --- /dev/null +++ b/synapse/storage/schema/main/delta/59/12presence_stream_instance.sql @@ -0,0 +1,18 @@ +/* Copyright 2021 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Add a column to specify which instance wrote the row. Historic rows have +-- `NULL`, which indicates that the master instance wrote them. +ALTER TABLE presence_stream ADD COLUMN instance_name TEXT; diff --git a/synapse/storage/schema/main/delta/59/12presence_stream_instance_seq.sql.postgres b/synapse/storage/schema/main/delta/59/12presence_stream_instance_seq.sql.postgres new file mode 100644 index 000000000000..02b182adf984 --- /dev/null +++ b/synapse/storage/schema/main/delta/59/12presence_stream_instance_seq.sql.postgres @@ -0,0 +1,20 @@ +/* Copyright 2021 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE SEQUENCE IF NOT EXISTS presence_stream_sequence; + +SELECT setval('presence_stream_sequence', ( + SELECT COALESCE(MAX(stream_id), 1) FROM presence_stream +)); diff --git a/synapse/storage/databases/main/schema/full_schemas/16/application_services.sql b/synapse/storage/schema/main/full_schemas/16/application_services.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/application_services.sql rename to synapse/storage/schema/main/full_schemas/16/application_services.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/event_edges.sql b/synapse/storage/schema/main/full_schemas/16/event_edges.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/event_edges.sql rename to synapse/storage/schema/main/full_schemas/16/event_edges.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/event_signatures.sql b/synapse/storage/schema/main/full_schemas/16/event_signatures.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/event_signatures.sql rename to synapse/storage/schema/main/full_schemas/16/event_signatures.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/im.sql b/synapse/storage/schema/main/full_schemas/16/im.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/im.sql rename to synapse/storage/schema/main/full_schemas/16/im.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/keys.sql b/synapse/storage/schema/main/full_schemas/16/keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/keys.sql rename to synapse/storage/schema/main/full_schemas/16/keys.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/media_repository.sql b/synapse/storage/schema/main/full_schemas/16/media_repository.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/media_repository.sql rename to synapse/storage/schema/main/full_schemas/16/media_repository.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/presence.sql b/synapse/storage/schema/main/full_schemas/16/presence.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/presence.sql rename to synapse/storage/schema/main/full_schemas/16/presence.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/profiles.sql b/synapse/storage/schema/main/full_schemas/16/profiles.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/profiles.sql rename to synapse/storage/schema/main/full_schemas/16/profiles.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/push.sql b/synapse/storage/schema/main/full_schemas/16/push.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/push.sql rename to synapse/storage/schema/main/full_schemas/16/push.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/redactions.sql b/synapse/storage/schema/main/full_schemas/16/redactions.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/redactions.sql rename to synapse/storage/schema/main/full_schemas/16/redactions.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/room_aliases.sql b/synapse/storage/schema/main/full_schemas/16/room_aliases.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/room_aliases.sql rename to synapse/storage/schema/main/full_schemas/16/room_aliases.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/state.sql b/synapse/storage/schema/main/full_schemas/16/state.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/state.sql rename to synapse/storage/schema/main/full_schemas/16/state.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/transactions.sql b/synapse/storage/schema/main/full_schemas/16/transactions.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/transactions.sql rename to synapse/storage/schema/main/full_schemas/16/transactions.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/users.sql b/synapse/storage/schema/main/full_schemas/16/users.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/users.sql rename to synapse/storage/schema/main/full_schemas/16/users.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/54/full.sql.postgres b/synapse/storage/schema/main/full_schemas/54/full.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/54/full.sql.postgres rename to synapse/storage/schema/main/full_schemas/54/full.sql.postgres diff --git a/synapse/storage/databases/main/schema/full_schemas/54/full.sql.sqlite b/synapse/storage/schema/main/full_schemas/54/full.sql.sqlite similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/54/full.sql.sqlite rename to synapse/storage/schema/main/full_schemas/54/full.sql.sqlite diff --git a/synapse/storage/databases/main/schema/full_schemas/54/stream_positions.sql b/synapse/storage/schema/main/full_schemas/54/stream_positions.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/54/stream_positions.sql rename to synapse/storage/schema/main/full_schemas/54/stream_positions.sql diff --git a/synapse/storage/databases/state/schema/delta/23/drop_state_index.sql b/synapse/storage/schema/state/delta/23/drop_state_index.sql similarity index 100% rename from synapse/storage/databases/state/schema/delta/23/drop_state_index.sql rename to synapse/storage/schema/state/delta/23/drop_state_index.sql diff --git a/synapse/storage/databases/state/schema/delta/30/state_stream.sql b/synapse/storage/schema/state/delta/30/state_stream.sql similarity index 100% rename from synapse/storage/databases/state/schema/delta/30/state_stream.sql rename to synapse/storage/schema/state/delta/30/state_stream.sql diff --git a/synapse/storage/databases/state/schema/delta/32/remove_state_indices.sql b/synapse/storage/schema/state/delta/32/remove_state_indices.sql similarity index 100% rename from synapse/storage/databases/state/schema/delta/32/remove_state_indices.sql rename to synapse/storage/schema/state/delta/32/remove_state_indices.sql diff --git a/synapse/storage/databases/state/schema/delta/35/add_state_index.sql b/synapse/storage/schema/state/delta/35/add_state_index.sql similarity index 100% rename from synapse/storage/databases/state/schema/delta/35/add_state_index.sql rename to synapse/storage/schema/state/delta/35/add_state_index.sql diff --git a/synapse/storage/databases/state/schema/delta/35/state.sql b/synapse/storage/schema/state/delta/35/state.sql similarity index 100% rename from synapse/storage/databases/state/schema/delta/35/state.sql rename to synapse/storage/schema/state/delta/35/state.sql diff --git a/synapse/storage/databases/state/schema/delta/35/state_dedupe.sql b/synapse/storage/schema/state/delta/35/state_dedupe.sql similarity index 100% rename from synapse/storage/databases/state/schema/delta/35/state_dedupe.sql rename to synapse/storage/schema/state/delta/35/state_dedupe.sql diff --git a/synapse/storage/databases/state/schema/delta/47/state_group_seq.py b/synapse/storage/schema/state/delta/47/state_group_seq.py similarity index 100% rename from synapse/storage/databases/state/schema/delta/47/state_group_seq.py rename to synapse/storage/schema/state/delta/47/state_group_seq.py diff --git a/synapse/storage/databases/state/schema/delta/56/state_group_room_idx.sql b/synapse/storage/schema/state/delta/56/state_group_room_idx.sql similarity index 100% rename from synapse/storage/databases/state/schema/delta/56/state_group_room_idx.sql rename to synapse/storage/schema/state/delta/56/state_group_room_idx.sql diff --git a/synapse/storage/databases/state/schema/full_schemas/54/full.sql b/synapse/storage/schema/state/full_schemas/54/full.sql similarity index 100% rename from synapse/storage/databases/state/schema/full_schemas/54/full.sql rename to synapse/storage/schema/state/full_schemas/54/full.sql diff --git a/synapse/storage/databases/state/schema/full_schemas/54/sequence.sql.postgres b/synapse/storage/schema/state/full_schemas/54/sequence.sql.postgres similarity index 100% rename from synapse/storage/databases/state/schema/full_schemas/54/sequence.sql.postgres rename to synapse/storage/schema/state/full_schemas/54/sequence.sql.postgres diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 2e277a21c458..cfafba22c5e0 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -449,7 +448,7 @@ def _get_state_groups_from_groups( return self.stores.state._get_state_groups_from_groups(groups, state_filter) async def get_state_for_events( - self, event_ids: Iterable[str], state_filter: StateFilter = StateFilter.all() + self, event_ids: Iterable[str], state_filter: Optional[StateFilter] = None ) -> Dict[str, StateMap[EventBase]]: """Given a list of event_ids and type tuples, return a list of state dicts for each event. @@ -465,7 +464,7 @@ async def get_state_for_events( groups = set(event_to_groups.values()) group_to_state = await self.stores.state._get_state_for_groups( - groups, state_filter + groups, state_filter or StateFilter.all() ) state_event_map = await self.stores.main.get_events( @@ -485,7 +484,7 @@ async def get_state_for_events( return {event: event_to_state[event] for event in event_ids} async def get_state_ids_for_events( - self, event_ids: Iterable[str], state_filter: StateFilter = StateFilter.all() + self, event_ids: Iterable[str], state_filter: Optional[StateFilter] = None ) -> Dict[str, StateMap[str]]: """ Get the state dicts corresponding to a list of events, containing the event_ids @@ -502,7 +501,7 @@ async def get_state_ids_for_events( groups = set(event_to_groups.values()) group_to_state = await self.stores.state._get_state_for_groups( - groups, state_filter + groups, state_filter or StateFilter.all() ) event_to_state = { @@ -513,7 +512,7 @@ async def get_state_ids_for_events( return {event: event_to_state[event] for event in event_ids} async def get_state_for_event( - self, event_id: str, state_filter: StateFilter = StateFilter.all() + self, event_id: str, state_filter: Optional[StateFilter] = None ) -> StateMap[EventBase]: """ Get the state dict corresponding to a particular event @@ -525,11 +524,13 @@ async def get_state_for_event( Returns: A dict from (type, state_key) -> state_event """ - state_map = await self.get_state_for_events([event_id], state_filter) + state_map = await self.get_state_for_events( + [event_id], state_filter or StateFilter.all() + ) return state_map[event_id] async def get_state_ids_for_event( - self, event_id: str, state_filter: StateFilter = StateFilter.all() + self, event_id: str, state_filter: Optional[StateFilter] = None ) -> StateMap[str]: """ Get the state dict corresponding to a particular event @@ -541,11 +542,13 @@ async def get_state_ids_for_event( Returns: A dict from (type, state_key) -> state_event """ - state_map = await self.get_state_ids_for_events([event_id], state_filter) + state_map = await self.get_state_ids_for_events( + [event_id], state_filter or StateFilter.all() + ) return state_map[event_id] def _get_state_for_groups( - self, groups: Iterable[int], state_filter: StateFilter = StateFilter.all() + self, groups: Iterable[int], state_filter: Optional[StateFilter] = None ) -> Awaitable[Dict[int, MutableStateMap[str]]]: """Gets the state at each of a list of state groups, optionally filtering by type/state_key @@ -558,7 +561,9 @@ def _get_state_for_groups( Returns: Dict of state group to state map. """ - return self.stores.state._get_state_for_groups(groups, state_filter) + return self.stores.state._get_state_for_groups( + groups, state_filter or StateFilter.all() + ) async def store_state_group( self, diff --git a/synapse/storage/types.py b/synapse/storage/types.py index 17291c9d5e22..57f4883bf47a 100644 --- a/synapse/storage/types.py +++ b/synapse/storage/types.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/util/__init__.py b/synapse/storage/util/__init__.py index bfebb0f644f4..5e83dba2ed6f 100644 --- a/synapse/storage/util/__init__.py +++ b/synapse/storage/util/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index d4643c4fdf30..b1bd3a52d98f 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +16,7 @@ import threading from collections import OrderedDict from contextlib import contextmanager -from typing import Dict, List, Optional, Set, Tuple, Union +from typing import Dict, Iterable, List, Optional, Set, Tuple, Union import attr @@ -91,7 +90,14 @@ class StreamIdGenerator: # ... persist event ... """ - def __init__(self, db_conn, table, column, extra_tables=[], step=1): + def __init__( + self, + db_conn, + table, + column, + extra_tables: Iterable[Tuple[str, str]] = (), + step=1, + ): assert step != 0 self._lock = threading.Lock() self._step = step diff --git a/synapse/storage/util/sequence.py b/synapse/storage/util/sequence.py index 36a67e701994..30b6b8e0caea 100644 --- a/synapse/storage/util/sequence.py +++ b/synapse/storage/util/sequence.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/streams/__init__.py b/synapse/streams/__init__.py index bfebb0f644f4..5e83dba2ed6f 100644 --- a/synapse/streams/__init__.py +++ b/synapse/streams/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/streams/config.py b/synapse/streams/config.py index fdda21d16584..13d300588bbc 100644 --- a/synapse/streams/config.py +++ b/synapse/streams/config.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/streams/events.py b/synapse/streams/events.py index 92fd5d489f0a..20fceaa93528 100644 --- a/synapse/streams/events.py +++ b/synapse/streams/events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/types.py b/synapse/types.py index b08ce9014028..e52cd7ffd4b9 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # @@ -16,13 +15,11 @@ import abc import re import string -import sys from collections import namedtuple from typing import ( TYPE_CHECKING, Any, Dict, - Iterable, Mapping, MutableMapping, Optional, @@ -51,18 +48,6 @@ from synapse.appservice.api import ApplicationService from synapse.storage.databases.main import DataStore -# define a version of typing.Collection that works on python 3.5 -if sys.version_info[:3] >= (3, 6, 0): - from typing import Collection -else: - from typing import Container, Sized - - T_co = TypeVar("T_co", covariant=True) - - class Collection(Iterable[T_co], Container[T_co], Sized): # type: ignore - __slots__ = () - - # Define a state map type from type/state_key to T (usually an event ID or # event) T = TypeVar("T") @@ -214,9 +199,8 @@ def get_localpart_from_id(string): DS = TypeVar("DS", bound="DomainSpecificString") -class DomainSpecificString( - namedtuple("DomainSpecificString", ("localpart", "domain")), metaclass=abc.ABCMeta -): +@attr.s(slots=True, frozen=True, repr=False) +class DomainSpecificString(metaclass=abc.ABCMeta): """Common base class among ID/name strings that have a local part and a domain name, prefixed with a sigil. @@ -228,11 +212,8 @@ class DomainSpecificString( SIGIL = abc.abstractproperty() # type: str # type: ignore - # Deny iteration because it will bite you if you try to create a singleton - # set by: - # users = set(user) - def __iter__(self): - raise ValueError("Attempted to iterate a %s" % (type(self).__name__,)) + localpart = attr.ib(type=str) + domain = attr.ib(type=str) # Because this class is a namedtuple of strings and booleans, it is deeply # immutable. @@ -287,30 +268,35 @@ def is_valid(cls: Type[DS], s: str) -> bool: __repr__ = to_string +@attr.s(slots=True, frozen=True, repr=False) class UserID(DomainSpecificString): """Structure representing a user ID.""" SIGIL = "@" +@attr.s(slots=True, frozen=True, repr=False) class RoomAlias(DomainSpecificString): """Structure representing a room name.""" SIGIL = "#" +@attr.s(slots=True, frozen=True, repr=False) class RoomID(DomainSpecificString): """Structure representing a room id. """ SIGIL = "!" +@attr.s(slots=True, frozen=True, repr=False) class EventID(DomainSpecificString): """Structure representing an event id. """ SIGIL = "$" +@attr.s(slots=True, frozen=True, repr=False) class GroupID(DomainSpecificString): """Structure representing a group ID.""" diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 517686f0a67d..b69f562ca586 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,6 +15,7 @@ import json import logging import re +from typing import Pattern import attr from frozendict import frozendict @@ -27,6 +27,9 @@ logger = logging.getLogger(__name__) +_WILDCARD_RUN = re.compile(r"([\?\*]+)") + + def _reject_invalid_json(val): """Do not allow Infinity, -Infinity, or NaN values in JSON.""" raise ValueError("Invalid JSON value: '%s'" % val) @@ -159,25 +162,54 @@ def log_failure(failure, msg, consumeErrors=True): return failure -def glob_to_regex(glob): +def glob_to_regex(glob: str, word_boundary: bool = False) -> Pattern: """Converts a glob to a compiled regex object. - The regex is anchored at the beginning and end of the string. - Args: - glob (str) + glob: pattern to match + word_boundary: If True, the pattern will be allowed to match at word boundaries + anywhere in the string. Otherwise, the pattern is anchored at the start and + end of the string. Returns: - re.RegexObject + compiled regex pattern """ - res = "" - for c in glob: - if c == "*": - res = res + ".*" - elif c == "?": - res = res + "." + + # Patterns with wildcards must be simplified to avoid performance cliffs + # - The glob `?**?**?` is equivalent to the glob `???*` + # - The glob `???*` is equivalent to the regex `.{3,}` + chunks = [] + for chunk in _WILDCARD_RUN.split(glob): + # No wildcards? re.escape() + if not _WILDCARD_RUN.match(chunk): + chunks.append(re.escape(chunk)) + continue + + # Wildcards? Simplify. + qmarks = chunk.count("?") + if "*" in chunk: + chunks.append(".{%d,}" % qmarks) else: - res = res + re.escape(c) + chunks.append(".{%d}" % qmarks) + + res = "".join(chunks) - # \A anchors at start of string, \Z at end of string - return re.compile(r"\A" + res + r"\Z", re.IGNORECASE) + if word_boundary: + res = re_word_boundary(res) + else: + # \A anchors at start of string, \Z at end of string + res = r"\A" + res + r"\Z" + + return re.compile(res, re.IGNORECASE) + + +def re_word_boundary(r: str) -> str: + """ + Adds word boundary characters to the start and end of an + expression to require that the match occur as a whole word, + but do so respecting the fact that strings starting or ending + with non-word characters will change word boundaries. + """ + # we can't use \b as it chokes on unicode. however \W seems to be okay + # as shorthand for [^0-9A-Za-z_]. + return r"(^|\W)%s(\W|$)" % (r,) diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index c3b2d981eadb..5c55bb0125d9 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 48f64eeb387b..ca36f07c205b 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2019, 2020 The Matrix.org Foundation C.I.C. # @@ -25,6 +24,11 @@ logger = logging.getLogger(__name__) + +# Whether to track estimated memory usage of the LruCaches. +TRACK_MEMORY_USAGE = False + + caches_by_name = {} # type: Dict[str, Sized] collectors_by_name = {} # type: Dict[str, CacheMetric] @@ -33,6 +37,11 @@ cache_evicted = Gauge("synapse_util_caches_cache:evicted_size", "", ["name"]) cache_total = Gauge("synapse_util_caches_cache:total", "", ["name"]) cache_max_size = Gauge("synapse_util_caches_cache_max_size", "", ["name"]) +cache_memory_usage = Gauge( + "synapse_util_caches_cache_size_bytes", + "Estimated memory usage of the caches", + ["name"], +) response_cache_size = Gauge("synapse_util_caches_response_cache:size", "", ["name"]) response_cache_hits = Gauge("synapse_util_caches_response_cache:hits", "", ["name"]) @@ -53,6 +62,7 @@ class CacheMetric: hits = attr.ib(default=0) misses = attr.ib(default=0) evicted_size = attr.ib(default=0) + memory_usage = attr.ib(default=None) def inc_hits(self): self.hits += 1 @@ -63,6 +73,19 @@ def inc_misses(self): def inc_evictions(self, size=1): self.evicted_size += size + def inc_memory_usage(self, memory: int): + if self.memory_usage is None: + self.memory_usage = 0 + + self.memory_usage += memory + + def dec_memory_usage(self, memory: int): + self.memory_usage -= memory + + def clear_memory_usage(self): + if self.memory_usage is not None: + self.memory_usage = 0 + def describe(self): return [] @@ -82,6 +105,13 @@ def collect(self): cache_total.labels(self._cache_name).set(self.hits + self.misses) if getattr(self._cache, "max_size", None): cache_max_size.labels(self._cache_name).set(self._cache.max_size) + + if TRACK_MEMORY_USAGE: + # self.memory_usage can be None if nothing has been inserted + # into the cache yet. + cache_memory_usage.labels(self._cache_name).set( + self.memory_usage or 0 + ) if self._collect_callback: self._collect_callback() except Exception as e: diff --git a/synapse/util/caches/cached_call.py b/synapse/util/caches/cached_call.py index 3ee0f2317a5e..a301c9e89bf9 100644 --- a/synapse/util/caches/cached_call.py +++ b/synapse/util/caches/cached_call.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index 1adc92eb905f..484097a48a45 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. @@ -283,7 +282,9 @@ def eb(_fail): # we return a new Deferred which will be called before any subsequent observers. return observable.observe() - def prefill(self, key: KT, value: VT, callback: Callable[[], None] = None): + def prefill( + self, key: KT, value: VT, callback: Optional[Callable[[], None]] = None + ): callbacks = [callback] if callback else [] self.cache.set(key, value, callbacks=callbacks) diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 4e843799147d..ac4a078b260b 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py index b3b413b02cd2..56d94d96ce0b 100644 --- a/synapse/util/caches/dictionary_cache.py +++ b/synapse/util/caches/dictionary_cache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index e15f7ee698e5..ac47a31cd7ef 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,40 +14,50 @@ import logging from collections import OrderedDict +from typing import Any, Generic, Optional, TypeVar, Union, overload + +import attr +from typing_extensions import Literal from synapse.config import cache as cache_config from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.util import Clock from synapse.util.caches import register_cache logger = logging.getLogger(__name__) -SENTINEL = object() +SENTINEL = object() # type: Any + +T = TypeVar("T") +KT = TypeVar("KT") +VT = TypeVar("VT") -class ExpiringCache: + +class ExpiringCache(Generic[KT, VT]): def __init__( self, - cache_name, - clock, - max_len=0, - expiry_ms=0, - reset_expiry_on_get=False, - iterable=False, + cache_name: str, + clock: Clock, + max_len: int = 0, + expiry_ms: int = 0, + reset_expiry_on_get: bool = False, + iterable: bool = False, ): """ Args: - cache_name (str): Name of this cache, used for logging. - clock (Clock) - max_len (int): Max size of dict. If the dict grows larger than this + cache_name: Name of this cache, used for logging. + clock + max_len: Max size of dict. If the dict grows larger than this then the oldest items get automatically evicted. Default is 0, which indicates there is no max limit. - expiry_ms (int): How long before an item is evicted from the cache + expiry_ms: How long before an item is evicted from the cache in milliseconds. Default is 0, indicating items never get evicted based on time. - reset_expiry_on_get (bool): If true, will reset the expiry time for + reset_expiry_on_get: If true, will reset the expiry time for an item on access. Defaults to False. - iterable (bool): If true, the size is calculated by summing the + iterable: If true, the size is calculated by summing the sizes of all entries, rather than the number of entries. """ self._cache_name = cache_name @@ -62,7 +71,7 @@ def __init__( self._expiry_ms = expiry_ms self._reset_expiry_on_get = reset_expiry_on_get - self._cache = OrderedDict() + self._cache = OrderedDict() # type: OrderedDict[KT, _CacheEntry] self.iterable = iterable @@ -79,12 +88,12 @@ def f(): self._clock.looping_call(f, self._expiry_ms / 2) - def __setitem__(self, key, value): + def __setitem__(self, key: KT, value: VT) -> None: now = self._clock.time_msec() self._cache[key] = _CacheEntry(now, value) self.evict() - def evict(self): + def evict(self) -> None: # Evict if there are now too many items while self._max_size and len(self) > self._max_size: _key, value = self._cache.popitem(last=False) @@ -93,7 +102,7 @@ def evict(self): else: self.metrics.inc_evictions() - def __getitem__(self, key): + def __getitem__(self, key: KT) -> VT: try: entry = self._cache[key] self.metrics.inc_hits() @@ -106,7 +115,7 @@ def __getitem__(self, key): return entry.value - def pop(self, key, default=SENTINEL): + def pop(self, key: KT, default: T = SENTINEL) -> Union[VT, T]: """Removes and returns the value with the given key from the cache. If the key isn't in the cache then `default` will be returned if @@ -115,29 +124,40 @@ def pop(self, key, default=SENTINEL): Identical functionality to `dict.pop(..)`. """ - value = self._cache.pop(key, default) + value = self._cache.pop(key, SENTINEL) + # The key was not found. if value is SENTINEL: - raise KeyError(key) + if default is SENTINEL: + raise KeyError(key) + return default - return value + return value.value - def __contains__(self, key): + def __contains__(self, key: KT) -> bool: return key in self._cache - def get(self, key, default=None): + @overload + def get(self, key: KT, default: Literal[None] = None) -> Optional[VT]: + ... + + @overload + def get(self, key: KT, default: T) -> Union[VT, T]: + ... + + def get(self, key: KT, default: Optional[T] = None) -> Union[VT, Optional[T]]: try: return self[key] except KeyError: return default - def setdefault(self, key, value): + def setdefault(self, key: KT, value: VT) -> VT: try: return self[key] except KeyError: self[key] = value return value - def _prune_cache(self): + def _prune_cache(self) -> None: if not self._expiry_ms: # zero expiry time means don't expire. This should never get called # since we have this check in start too. @@ -166,7 +186,7 @@ def _prune_cache(self): len(self), ) - def __len__(self): + def __len__(self) -> int: if self.iterable: return sum(len(entry.value) for entry in self._cache.values()) else: @@ -190,9 +210,7 @@ def set_cache_factor(self, factor: float) -> bool: return False +@attr.s(slots=True) class _CacheEntry: - __slots__ = ["time", "value"] - - def __init__(self, time, value): - self.time = time - self.value = value + time = attr.ib(type=int) + value = attr.ib() diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 60bb6ff642f2..1be675e014af 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,8 +17,10 @@ from typing import ( Any, Callable, + Collection, Generic, Iterable, + List, Optional, Type, TypeVar, @@ -31,9 +32,36 @@ from typing_extensions import Literal from synapse.config import cache as cache_config +from synapse.util import caches from synapse.util.caches import CacheMetric, register_cache from synapse.util.caches.treecache import TreeCache +try: + from pympler.asizeof import Asizer + + def _get_size_of(val: Any, *, recurse=True) -> int: + """Get an estimate of the size in bytes of the object. + + Args: + val: The object to size. + recurse: If true will include referenced values in the size, + otherwise only sizes the given object. + """ + # Ignore singleton values when calculating memory usage. + if val in ((), None, ""): + return 0 + + sizer = Asizer() + sizer.exclude_refs((), None, "") + return sizer.asizeof(val, limit=100 if recurse else 0) + + +except ImportError: + + def _get_size_of(val: Any, *, recurse=True) -> int: + return 0 + + # Function type: the type used for invalidation callbacks FT = TypeVar("FT", bound=Callable[..., Any]) @@ -55,14 +83,69 @@ def enumerate_leaves(node, depth): class _Node: - __slots__ = ["prev_node", "next_node", "key", "value", "callbacks"] + __slots__ = ["prev_node", "next_node", "key", "value", "callbacks", "memory"] - def __init__(self, prev_node, next_node, key, value, callbacks=set()): + def __init__( + self, + prev_node, + next_node, + key, + value, + callbacks: Collection[Callable[[], None]] = (), + ): self.prev_node = prev_node self.next_node = next_node self.key = key self.value = value - self.callbacks = callbacks + + # Set of callbacks to run when the node gets deleted. We store as a list + # rather than a set to keep memory usage down (and since we expect few + # entries per node, the performance of checking for duplication in a + # list vs using a set is negligible). + # + # Note that we store this as an optional list to keep the memory + # footprint down. Storing `None` is free as its a singleton, while empty + # lists are 56 bytes (and empty sets are 216 bytes, if we did the naive + # thing and used sets). + self.callbacks = None # type: Optional[List[Callable[[], None]]] + + self.add_callbacks(callbacks) + + self.memory = 0 + if caches.TRACK_MEMORY_USAGE: + self.memory = ( + _get_size_of(key) + + _get_size_of(value) + + _get_size_of(self.callbacks, recurse=False) + + _get_size_of(self, recurse=False) + ) + self.memory += _get_size_of(self.memory, recurse=False) + + def add_callbacks(self, callbacks: Collection[Callable[[], None]]) -> None: + """Add to stored list of callbacks, removing duplicates.""" + + if not callbacks: + return + + if not self.callbacks: + self.callbacks = [] + + for callback in callbacks: + if callback not in self.callbacks: + self.callbacks.append(callback) + + def run_and_clear_callbacks(self) -> None: + """Run all callbacks and clear the stored list of callbacks. Used when + the node is being deleted. + """ + + if not self.callbacks: + return + + for callback in self.callbacks: + callback() + + self.callbacks = None class LruCache(Generic[KT, VT]): @@ -176,7 +259,7 @@ def cache_len(): self.len = synchronized(cache_len) - def add_node(key, value, callbacks=set()): + def add_node(key, value, callbacks: Collection[Callable[[], None]] = ()): prev_node = list_root next_node = prev_node.next_node node = _Node(prev_node, next_node, key, value, callbacks) @@ -187,6 +270,9 @@ def add_node(key, value, callbacks=set()): if size_callback: cached_cache_len[0] += size_callback(node.value) + if caches.TRACK_MEMORY_USAGE and metrics: + metrics.inc_memory_usage(node.memory) + def move_node_to_front(node): prev_node = node.prev_node next_node = node.next_node @@ -210,16 +296,18 @@ def delete_node(node): deleted_len = size_callback(node.value) cached_cache_len[0] -= deleted_len - for cb in node.callbacks: - cb() - node.callbacks.clear() + node.run_and_clear_callbacks() + + if caches.TRACK_MEMORY_USAGE and metrics: + metrics.dec_memory_usage(node.memory) + return deleted_len @overload def cache_get( key: KT, default: Literal[None] = None, - callbacks: Iterable[Callable[[], None]] = ..., + callbacks: Collection[Callable[[], None]] = ..., update_metrics: bool = ..., ) -> Optional[VT]: ... @@ -228,7 +316,7 @@ def cache_get( def cache_get( key: KT, default: T, - callbacks: Iterable[Callable[[], None]] = ..., + callbacks: Collection[Callable[[], None]] = ..., update_metrics: bool = ..., ) -> Union[T, VT]: ... @@ -237,13 +325,13 @@ def cache_get( def cache_get( key: KT, default: Optional[T] = None, - callbacks: Iterable[Callable[[], None]] = [], + callbacks: Collection[Callable[[], None]] = (), update_metrics: bool = True, ): node = cache.get(key, None) if node is not None: move_node_to_front(node) - node.callbacks.update(callbacks) + node.add_callbacks(callbacks) if update_metrics and metrics: metrics.inc_hits() return node.value @@ -253,16 +341,14 @@ def cache_get( return default @synchronized - def cache_set(key: KT, value: VT, callbacks: Iterable[Callable[[], None]] = []): + def cache_set(key: KT, value: VT, callbacks: Iterable[Callable[[], None]] = ()): node = cache.get(key, None) if node is not None: # We sometimes store large objects, e.g. dicts, which cause # the inequality check to take a long time. So let's only do # the check if we have some callbacks to call. - if node.callbacks and value != node.value: - for cb in node.callbacks: - cb() - node.callbacks.clear() + if value != node.value: + node.run_and_clear_callbacks() # We don't bother to protect this by value != node.value as # generally size_callback will be cheap compared with equality @@ -272,7 +358,7 @@ def cache_set(key: KT, value: VT, callbacks: Iterable[Callable[[], None]] = []): cached_cache_len[0] -= size_callback(node.value) cached_cache_len[0] += size_callback(value) - node.callbacks.update(callbacks) + node.add_callbacks(callbacks) move_node_to_front(node) node.value = value @@ -325,12 +411,14 @@ def cache_clear() -> None: list_root.next_node = list_root list_root.prev_node = list_root for node in cache.values(): - for cb in node.callbacks: - cb() + node.run_and_clear_callbacks() cache.clear() if size_callback: cached_cache_len[0] = 0 + if caches.TRACK_MEMORY_USAGE and metrics: + metrics.clear_memory_usage() + @synchronized def cache_contains(key: KT) -> bool: return key in cache diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index 46ea8e09644c..25ea1bcc915e 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -111,7 +110,7 @@ def remove(r): return result.observe() def wrap( - self, key: T, callback: "Callable[..., Any]", *args: Any, **kwargs: Any + self, key: T, callback: Callable[..., Any], *args: Any, **kwargs: Any ) -> defer.Deferred: """Wrap together a *get* and *set* call, taking care of logcontexts diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 644e9e778a29..e81e468899ea 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,11 +14,10 @@ import logging import math -from typing import Dict, FrozenSet, List, Mapping, Optional, Set, Union +from typing import Collection, Dict, FrozenSet, List, Mapping, Optional, Set, Union from sortedcontainers import SortedDict -from synapse.types import Collection from synapse.util import caches logger = logging.getLogger(__name__) diff --git a/synapse/util/caches/ttlcache.py b/synapse/util/caches/ttlcache.py index 96a82749408e..c276107d5690 100644 --- a/synapse/util/caches/ttlcache.py +++ b/synapse/util/caches/ttlcache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/daemonize.py b/synapse/util/daemonize.py index 23393cf49bb7..31b24dd18882 100644 --- a/synapse/util/daemonize.py +++ b/synapse/util/daemonize.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2012, 2013, 2014 Ilya Otyutskiy # Copyright 2020 The Matrix.org Foundation C.I.C. # diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py index 3c47285d05ca..1f803aef6d1b 100644 --- a/synapse/util/distributor.py +++ b/synapse/util/distributor.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py index 68dc632491dc..e946189f9a72 100644 --- a/synapse/util/file_consumer.py +++ b/synapse/util/file_consumer.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/frozenutils.py b/synapse/util/frozenutils.py index 5ca2e71e604e..2ac7c2913cdc 100644 --- a/synapse/util/frozenutils.py +++ b/synapse/util/frozenutils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/hash.py b/synapse/util/hash.py index 359168704e1f..ba676e176240 100644 --- a/synapse/util/hash.py +++ b/synapse/util/hash.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/iterutils.py b/synapse/util/iterutils.py index 98707c119deb..abfdc2983261 100644 --- a/synapse/util/iterutils.py +++ b/synapse/util/iterutils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # @@ -16,6 +15,7 @@ import heapq from itertools import islice from typing import ( + Collection, Dict, Generator, Iterable, @@ -27,8 +27,6 @@ TypeVar, ) -from synapse.types import Collection - T = TypeVar("T") diff --git a/synapse/util/jsonobject.py b/synapse/util/jsonobject.py index e3a8ed5b2f27..abc12f08374d 100644 --- a/synapse/util/jsonobject.py +++ b/synapse/util/jsonobject.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/macaroons.py b/synapse/util/macaroons.py index 12cdd53327cd..f6ebfd7e7d41 100644 --- a/synapse/util/macaroons.py +++ b/synapse/util/macaroons.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Quentin Gliech # Copyright 2021 The Matrix.org Foundation C.I.C. # diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 1023c856d143..6d14351bd262 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -105,7 +104,13 @@ class Measure: "start", ] - def __init__(self, clock, name): + def __init__(self, clock, name: str): + """ + Args: + clock: A n object with a "time()" method, which returns the current + time in seconds. + name: The name of the metric to report. + """ self.clock = clock self.name = name curr_context = current_context() @@ -118,10 +123,8 @@ def __init__(self, clock, name): else: assert isinstance(curr_context, LoggingContext) parent_context = curr_context - self._logging_context = LoggingContext( - "Measure[%s]" % (self.name,), parent_context - ) - self.start = None + self._logging_context = LoggingContext(str(curr_context), parent_context) + self.start = None # type: Optional[int] def __enter__(self) -> "Measure": if self.start is not None: diff --git a/synapse/util/module_loader.py b/synapse/util/module_loader.py index d184e2a90cb6..8acbe276e4bc 100644 --- a/synapse/util/module_loader.py +++ b/synapse/util/module_loader.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/msisdn.py b/synapse/util/msisdn.py index c8bcbe297ab4..bbbdebf2648e 100644 --- a/synapse/util/msisdn.py +++ b/synapse/util/msisdn.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/patch_inline_callbacks.py b/synapse/util/patch_inline_callbacks.py index d9f9ae99d639..eed0291cae88 100644 --- a/synapse/util/patch_inline_callbacks.py +++ b/synapse/util/patch_inline_callbacks.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 70d11e1ec378..a654c6968492 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 4ab379e42998..f9c370a814cc 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/rlimit.py b/synapse/util/rlimit.py index 207cd17c2a50..bf812ab5166a 100644 --- a/synapse/util/rlimit.py +++ b/synapse/util/rlimit.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py index 9ce7873ab573..4f25cd1d26ae 100644 --- a/synapse/util/stringutils.py +++ b/synapse/util/stringutils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # @@ -133,6 +132,38 @@ def parse_and_validate_server_name(server_name: str) -> Tuple[str, Optional[int] return host, port +def valid_id_server_location(id_server: str) -> bool: + """Check whether an identity server location, such as the one passed as the + `id_server` parameter to `/_matrix/client/r0/account/3pid/bind`, is valid. + + A valid identity server location consists of a valid hostname and optional + port number, optionally followed by any number of `/` delimited path + components, without any fragment or query string parts. + + Args: + id_server: identity server location string to validate + + Returns: + True if valid, False otherwise. + """ + + components = id_server.split("/", 1) + + host = components[0] + + try: + parse_and_validate_server_name(host) + except ValueError: + return False + + if len(components) < 2: + # no path + return True + + path = components[1] + return "#" not in path and "?" not in path + + def parse_and_validate_mxc_uri(mxc: str) -> Tuple[str, Optional[int], str]: """Parse the given string as an MXC URI @@ -189,3 +220,23 @@ def strtobool(val: str) -> bool: return False else: raise ValueError("invalid truth value %r" % (val,)) + + +_BASE62 = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + + +def base62_encode(num: int, minwidth: int = 1) -> str: + """Encode a number using base62 + + Args: + num: number to be encoded + minwidth: width to pad to, if the number is small + """ + res = "" + while num: + num, rem = divmod(num, 62) + res = _BASE62[rem] + res + + # pad to minimum width + pad = "0" * (minwidth - len(res)) + return pad + res diff --git a/synapse/util/templates.py b/synapse/util/templates.py index 392dae4a40f5..38543dd1ea19 100644 --- a/synapse/util/templates.py +++ b/synapse/util/templates.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/threepids.py b/synapse/util/threepids.py index 43c2e0ac230c..a1cf1960b08f 100644 --- a/synapse/util/threepids.py +++ b/synapse/util/threepids.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,6 +18,16 @@ logger = logging.getLogger(__name__) +# it's unclear what the maximum length of an email address is. RFC3696 (as corrected +# by errata) says: +# the upper limit on address lengths should normally be considered to be 254. +# +# In practice, mail servers appear to be more tolerant and allow 400 characters +# or so. Let's allow 500, which should be plenty for everyone. +# +MAX_EMAIL_ADDRESS_LENGTH = 500 + + def check_3pid_allowed(hs, medium, address): """Checks whether a given format of 3PID is allowed to be used on this HS @@ -71,3 +80,23 @@ def canonicalise_email(address: str) -> str: raise ValueError("Unable to parse email address") return parts[0].casefold() + "@" + parts[1].lower() + + +def validate_email(address: str) -> str: + """Does some basic validation on an email address. + + Returns the canonicalised email, as returned by `canonicalise_email`. + + Raises a ValueError if the email is invalid. + """ + # First we try canonicalising in case that fails + address = canonicalise_email(address) + + # Email addresses have to be at least 3 characters. + if len(address) < 3: + raise ValueError("Unable to parse email address") + + if len(address) > MAX_EMAIL_ADDRESS_LENGTH: + raise ValueError("Unable to parse email address") + + return address diff --git a/synapse/util/versionstring.py b/synapse/util/versionstring.py index ab7d03af3a87..dfa30a62296a 100644 --- a/synapse/util/versionstring.py +++ b/synapse/util/versionstring.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/wheel_timer.py b/synapse/util/wheel_timer.py index be3b22469db1..61814aff241f 100644 --- a/synapse/util/wheel_timer.py +++ b/synapse/util/wheel_timer.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/visibility.py b/synapse/visibility.py index ff53a49b3a70..490fb26e8114 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synctl b/synctl index 56c0e3940fc8..ccf404accb45 100755 --- a/synctl +++ b/synctl @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synmark/__init__.py b/synmark/__init__.py index 3d4ec3e1846b..2cc00b0f03d3 100644 --- a/synmark/__init__.py +++ b/synmark/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synmark/__main__.py b/synmark/__main__.py index f55968a5a420..35a59e347a46 100644 --- a/synmark/__main__.py +++ b/synmark/__main__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synmark/suites/logging.py b/synmark/suites/logging.py index c306891b27f4..9419892e957c 100644 --- a/synmark/suites/logging.py +++ b/synmark/suites/logging.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,8 +15,7 @@ import logging import warnings from io import StringIO - -from mock import Mock +from unittest.mock import Mock from pyperf import perf_counter diff --git a/synmark/suites/lrucache.py b/synmark/suites/lrucache.py index 69ab042ccc48..9b4a4241493f 100644 --- a/synmark/suites/lrucache.py +++ b/synmark/suites/lrucache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synmark/suites/lrucache_evict.py b/synmark/suites/lrucache_evict.py index 532b1cc70220..0ee202ed3626 100644 --- a/synmark/suites/lrucache_evict.py +++ b/synmark/suites/lrucache_evict.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/__init__.py b/tests/__init__.py index ed805db1c2d0..5fced5cc4c3e 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index 34f72ae795c7..1b0a81575739 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock import pymacaroons @@ -22,13 +21,11 @@ from synapse.api.errors import ( AuthError, Codes, - InvalidClientCredentialsError, InvalidClientTokenError, MissingClientTokenError, ResourceLimitError, ) from synapse.storage.databases.main.registration import TokenLookupResult -from synapse.types import UserID from tests import unittest from tests.test_utils import simple_async_mock @@ -254,67 +251,6 @@ def test_get_guest_user_from_macaroon(self): self.assertTrue(user_info.is_guest) self.store.get_user_by_id.assert_called_with(user_id) - def test_cannot_use_regular_token_as_guest(self): - USER_ID = "@percy:matrix.org" - self.store.add_access_token_to_user = simple_async_mock(None) - self.store.get_device = simple_async_mock(None) - - token = self.get_success( - self.hs.get_auth_handler().get_access_token_for_user_id( - USER_ID, "DEVICE", valid_until_ms=None - ) - ) - self.store.add_access_token_to_user.assert_called_with( - user_id=USER_ID, - token=token, - device_id="DEVICE", - valid_until_ms=None, - puppets_user_id=None, - ) - - async def get_user(tok): - if token != tok: - return None - return TokenLookupResult( - user_id=USER_ID, - is_guest=False, - token_id=1234, - device_id="DEVICE", - ) - - self.store.get_user_by_access_token = get_user - self.store.get_user_by_id = simple_async_mock({"is_guest": False}) - - # check the token works - request = Mock(args={}) - request.args[b"access_token"] = [token.encode("ascii")] - request.requestHeaders.getRawHeaders = mock_getRawHeaders() - requester = self.get_success( - self.auth.get_user_by_req(request, allow_guest=True) - ) - self.assertEqual(UserID.from_string(USER_ID), requester.user) - self.assertFalse(requester.is_guest) - - # add an is_guest caveat - mac = pymacaroons.Macaroon.deserialize(token) - mac.add_first_party_caveat("guest = true") - guest_tok = mac.serialize() - - # the token should *not* work now - request = Mock(args={}) - request.args[b"access_token"] = [guest_tok.encode("ascii")] - request.requestHeaders.getRawHeaders = mock_getRawHeaders() - - cm = self.get_failure( - self.auth.get_user_by_req(request, allow_guest=True), - InvalidClientCredentialsError, - ) - - self.assertEqual(401, cm.value.code) - self.assertEqual("Guest access token used for regular user", cm.value.msg) - - self.store.get_user_by_id.assert_called_with(USER_ID) - def test_blocking_mau(self): self.auth_blocking._limit_usage_by_mau = False self.auth_blocking._max_mau_value = 50 diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index ab7d2907247e..f44c91a373f9 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2018-2019 New Vector Ltd diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py index 483418192c4b..dcf0110c16e0 100644 --- a/tests/api/test_ratelimiting.py +++ b/tests/api/test_ratelimiting.py @@ -5,38 +5,25 @@ from tests import unittest -class TestRatelimiter(unittest.TestCase): +class TestRatelimiter(unittest.HomeserverTestCase): def test_allowed_via_can_do_action(self): - limiter = Ratelimiter(clock=None, rate_hz=0.1, burst_count=1) - allowed, time_allowed = limiter.can_do_action(key="test_id", _time_now_s=0) - self.assertTrue(allowed) - self.assertEquals(10.0, time_allowed) - - allowed, time_allowed = limiter.can_do_action(key="test_id", _time_now_s=5) - self.assertFalse(allowed) - self.assertEquals(10.0, time_allowed) - - allowed, time_allowed = limiter.can_do_action(key="test_id", _time_now_s=10) - self.assertTrue(allowed) - self.assertEquals(20.0, time_allowed) - - def test_allowed_user_via_can_requester_do_action(self): - user_requester = create_requester("@user:example.com") - limiter = Ratelimiter(clock=None, rate_hz=0.1, burst_count=1) - allowed, time_allowed = limiter.can_requester_do_action( - user_requester, _time_now_s=0 + limiter = Ratelimiter( + store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=1 + ) + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(None, key="test_id", _time_now_s=0) ) self.assertTrue(allowed) self.assertEquals(10.0, time_allowed) - allowed, time_allowed = limiter.can_requester_do_action( - user_requester, _time_now_s=5 + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(None, key="test_id", _time_now_s=5) ) self.assertFalse(allowed) self.assertEquals(10.0, time_allowed) - allowed, time_allowed = limiter.can_requester_do_action( - user_requester, _time_now_s=10 + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(None, key="test_id", _time_now_s=10) ) self.assertTrue(allowed) self.assertEquals(20.0, time_allowed) @@ -51,21 +38,23 @@ def test_allowed_appservice_ratelimited_via_can_requester_do_action(self): ) as_requester = create_requester("@user:example.com", app_service=appservice) - limiter = Ratelimiter(clock=None, rate_hz=0.1, burst_count=1) - allowed, time_allowed = limiter.can_requester_do_action( - as_requester, _time_now_s=0 + limiter = Ratelimiter( + store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=1 + ) + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(as_requester, _time_now_s=0) ) self.assertTrue(allowed) self.assertEquals(10.0, time_allowed) - allowed, time_allowed = limiter.can_requester_do_action( - as_requester, _time_now_s=5 + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(as_requester, _time_now_s=5) ) self.assertFalse(allowed) self.assertEquals(10.0, time_allowed) - allowed, time_allowed = limiter.can_requester_do_action( - as_requester, _time_now_s=10 + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(as_requester, _time_now_s=10) ) self.assertTrue(allowed) self.assertEquals(20.0, time_allowed) @@ -80,73 +69,89 @@ def test_allowed_appservice_via_can_requester_do_action(self): ) as_requester = create_requester("@user:example.com", app_service=appservice) - limiter = Ratelimiter(clock=None, rate_hz=0.1, burst_count=1) - allowed, time_allowed = limiter.can_requester_do_action( - as_requester, _time_now_s=0 + limiter = Ratelimiter( + store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=1 + ) + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(as_requester, _time_now_s=0) ) self.assertTrue(allowed) self.assertEquals(-1, time_allowed) - allowed, time_allowed = limiter.can_requester_do_action( - as_requester, _time_now_s=5 + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(as_requester, _time_now_s=5) ) self.assertTrue(allowed) self.assertEquals(-1, time_allowed) - allowed, time_allowed = limiter.can_requester_do_action( - as_requester, _time_now_s=10 + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(as_requester, _time_now_s=10) ) self.assertTrue(allowed) self.assertEquals(-1, time_allowed) def test_allowed_via_ratelimit(self): - limiter = Ratelimiter(clock=None, rate_hz=0.1, burst_count=1) + limiter = Ratelimiter( + store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=1 + ) # Shouldn't raise - limiter.ratelimit(key="test_id", _time_now_s=0) + self.get_success_or_raise(limiter.ratelimit(None, key="test_id", _time_now_s=0)) # Should raise with self.assertRaises(LimitExceededError) as context: - limiter.ratelimit(key="test_id", _time_now_s=5) + self.get_success_or_raise( + limiter.ratelimit(None, key="test_id", _time_now_s=5) + ) self.assertEqual(context.exception.retry_after_ms, 5000) # Shouldn't raise - limiter.ratelimit(key="test_id", _time_now_s=10) + self.get_success_or_raise( + limiter.ratelimit(None, key="test_id", _time_now_s=10) + ) def test_allowed_via_can_do_action_and_overriding_parameters(self): """Test that we can override options of can_do_action that would otherwise fail an action """ # Create a Ratelimiter with a very low allowed rate_hz and burst_count - limiter = Ratelimiter(clock=None, rate_hz=0.1, burst_count=1) + limiter = Ratelimiter( + store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=1 + ) # First attempt should be allowed - allowed, time_allowed = limiter.can_do_action( - ("test_id",), - _time_now_s=0, + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action( + None, + ("test_id",), + _time_now_s=0, + ) ) self.assertTrue(allowed) self.assertEqual(10.0, time_allowed) # Second attempt, 1s later, will fail - allowed, time_allowed = limiter.can_do_action( - ("test_id",), - _time_now_s=1, + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action( + None, + ("test_id",), + _time_now_s=1, + ) ) self.assertFalse(allowed) self.assertEqual(10.0, time_allowed) # But, if we allow 10 actions/sec for this request, we should be allowed # to continue. - allowed, time_allowed = limiter.can_do_action( - ("test_id",), _time_now_s=1, rate_hz=10.0 + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(None, ("test_id",), _time_now_s=1, rate_hz=10.0) ) self.assertTrue(allowed) self.assertEqual(1.1, time_allowed) # Similarly if we allow a burst of 10 actions - allowed, time_allowed = limiter.can_do_action( - ("test_id",), _time_now_s=1, burst_count=10 + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(None, ("test_id",), _time_now_s=1, burst_count=10) ) self.assertTrue(allowed) self.assertEqual(1.0, time_allowed) @@ -156,29 +161,129 @@ def test_allowed_via_ratelimit_and_overriding_parameters(self): fail an action """ # Create a Ratelimiter with a very low allowed rate_hz and burst_count - limiter = Ratelimiter(clock=None, rate_hz=0.1, burst_count=1) + limiter = Ratelimiter( + store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=1 + ) # First attempt should be allowed - limiter.ratelimit(key=("test_id",), _time_now_s=0) + self.get_success_or_raise( + limiter.ratelimit(None, key=("test_id",), _time_now_s=0) + ) # Second attempt, 1s later, will fail with self.assertRaises(LimitExceededError) as context: - limiter.ratelimit(key=("test_id",), _time_now_s=1) + self.get_success_or_raise( + limiter.ratelimit(None, key=("test_id",), _time_now_s=1) + ) self.assertEqual(context.exception.retry_after_ms, 9000) # But, if we allow 10 actions/sec for this request, we should be allowed # to continue. - limiter.ratelimit(key=("test_id",), _time_now_s=1, rate_hz=10.0) + self.get_success_or_raise( + limiter.ratelimit(None, key=("test_id",), _time_now_s=1, rate_hz=10.0) + ) # Similarly if we allow a burst of 10 actions - limiter.ratelimit(key=("test_id",), _time_now_s=1, burst_count=10) + self.get_success_or_raise( + limiter.ratelimit(None, key=("test_id",), _time_now_s=1, burst_count=10) + ) def test_pruning(self): - limiter = Ratelimiter(clock=None, rate_hz=0.1, burst_count=1) - limiter.can_do_action(key="test_id_1", _time_now_s=0) + limiter = Ratelimiter( + store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=1 + ) + self.get_success_or_raise( + limiter.can_do_action(None, key="test_id_1", _time_now_s=0) + ) self.assertIn("test_id_1", limiter.actions) - limiter.can_do_action(key="test_id_2", _time_now_s=10) + self.get_success_or_raise( + limiter.can_do_action(None, key="test_id_2", _time_now_s=10) + ) self.assertNotIn("test_id_1", limiter.actions) + + def test_db_user_override(self): + """Test that users that have ratelimiting disabled in the DB aren't + ratelimited. + """ + store = self.hs.get_datastore() + + user_id = "@user:test" + requester = create_requester(user_id) + + self.get_success( + store.db_pool.simple_insert( + table="ratelimit_override", + values={ + "user_id": user_id, + "messages_per_second": None, + "burst_count": None, + }, + desc="test_db_user_override", + ) + ) + + limiter = Ratelimiter(store=store, clock=None, rate_hz=0.1, burst_count=1) + + # Shouldn't raise + for _ in range(20): + self.get_success_or_raise(limiter.ratelimit(requester, _time_now_s=0)) + + def test_multiple_actions(self): + limiter = Ratelimiter( + store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=3 + ) + # Test that 4 actions aren't allowed with a maximum burst of 3. + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(None, key="test_id", n_actions=4, _time_now_s=0) + ) + self.assertFalse(allowed) + + # Test that 3 actions are allowed with a maximum burst of 3. + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(None, key="test_id", n_actions=3, _time_now_s=0) + ) + self.assertTrue(allowed) + self.assertEquals(10.0, time_allowed) + + # Test that, after doing these 3 actions, we can't do any more action without + # waiting. + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(None, key="test_id", n_actions=1, _time_now_s=0) + ) + self.assertFalse(allowed) + self.assertEquals(10.0, time_allowed) + + # Test that after waiting we can do only 1 action. + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action( + None, + key="test_id", + update=False, + n_actions=1, + _time_now_s=10, + ) + ) + self.assertTrue(allowed) + # The time allowed is the current time because we could still repeat the action + # once. + self.assertEquals(10.0, time_allowed) + + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(None, key="test_id", n_actions=2, _time_now_s=10) + ) + self.assertFalse(allowed) + # The time allowed doesn't change despite allowed being False because, while we + # don't allow 2 actions, we could still do 1. + self.assertEquals(10.0, time_allowed) + + # Test that after waiting a bit more we can do 2 actions. + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(None, key="test_id", n_actions=2, _time_now_s=20) + ) + self.assertTrue(allowed) + # The time allowed is the current time because we could still repeat the action + # once. + self.assertEquals(20.0, time_allowed) diff --git a/tests/app/test_frontend_proxy.py b/tests/app/test_frontend_proxy.py deleted file mode 100644 index e0ca28882981..000000000000 --- a/tests/app/test_frontend_proxy.py +++ /dev/null @@ -1,84 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.app.generic_worker import GenericWorkerServer - -from tests.server import make_request -from tests.unittest import HomeserverTestCase - - -class FrontendProxyTests(HomeserverTestCase): - def make_homeserver(self, reactor, clock): - - hs = self.setup_test_homeserver( - federation_http_client=None, homeserver_to_use=GenericWorkerServer - ) - - return hs - - def default_config(self): - c = super().default_config() - c["worker_app"] = "synapse.app.frontend_proxy" - - c["worker_listeners"] = [ - { - "type": "http", - "port": 8080, - "bind_addresses": ["0.0.0.0"], - "resources": [{"names": ["client"]}], - } - ] - - return c - - def test_listen_http_with_presence_enabled(self): - """ - When presence is on, the stub servlet will not register. - """ - # Presence is on - self.hs.config.use_presence = True - - # Listen with the config - self.hs._listen_http(self.hs.config.worker.worker_listeners[0]) - - # Grab the resource from the site that was told to listen - self.assertEqual(len(self.reactor.tcpServers), 1) - site = self.reactor.tcpServers[0][1] - - channel = make_request(self.reactor, site, "PUT", "presence/a/status") - - # 400 + unrecognised, because nothing is registered - self.assertEqual(channel.code, 400) - self.assertEqual(channel.json_body["errcode"], "M_UNRECOGNIZED") - - def test_listen_http_with_presence_disabled(self): - """ - When presence is off, the stub servlet will register. - """ - # Presence is off - self.hs.config.use_presence = False - - # Listen with the config - self.hs._listen_http(self.hs.config.worker.worker_listeners[0]) - - # Grab the resource from the site that was told to listen - self.assertEqual(len(self.reactor.tcpServers), 1) - site = self.reactor.tcpServers[0][1] - - channel = make_request(self.reactor, site, "PUT", "presence/a/status") - - # 401, because the stub servlet still checks authentication - self.assertEqual(channel.code, 401) - self.assertEqual(channel.json_body["errcode"], "M_MISSING_TOKEN") diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 467033e201be..264e10108242 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock, patch +from unittest.mock import Mock, patch from parameterized import parameterized @@ -110,7 +109,7 @@ def test_openid_listener(self, names, expectation): } # Listen with the config - self.hs._listener_http(self.hs.get_config(), parse_listener_def(config)) + self.hs._listener_http(self.hs.config, parse_listener_def(config)) # Grab the resource from the site that was told to listen site = self.reactor.tcpServers[0][1] diff --git a/tests/appservice/__init__.py b/tests/appservice/__init__.py index fe0ac3f8e952..629e2df74a4f 100644 --- a/tests/appservice/__init__.py +++ b/tests/appservice/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py index 0bffeb115081..f386b5e128bf 100644 --- a/tests/appservice/test_appservice.py +++ b/tests/appservice/test_appservice.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import re - -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index 97f8cad0ddd4..a2b5ed2030d0 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer diff --git a/tests/config/__init__.py b/tests/config/__init__.py index b7df13c9eebc..f43a360a807c 100644 --- a/tests/config/__init__.py +++ b/tests/config/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/config/test_base.py b/tests/config/test_base.py index 42ee5f56d93e..84ae3b88ae9b 100644 --- a/tests/config/test_base.py +++ b/tests/config/test_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/config/test_cache.py b/tests/config/test_cache.py index 2b7f09c14b27..857d9cd0969b 100644 --- a/tests/config/test_cache.py +++ b/tests/config/test_cache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/config/test_database.py b/tests/config/test_database.py index f675bde68e9e..9eca10bbe9b6 100644 --- a/tests/config/test_database.py +++ b/tests/config/test_database.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/config/test_generate.py b/tests/config/test_generate.py index 463855ecc8db..fdfbb0e38e9c 100644 --- a/tests/config/test_generate.py +++ b/tests/config/test_generate.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/config/test_load.py b/tests/config/test_load.py index 734a9983e832..ebe2c0516570 100644 --- a/tests/config/test_load.py +++ b/tests/config/test_load.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,6 +19,7 @@ import yaml +from synapse.config import ConfigError from synapse.config.homeserver import HomeServerConfig from tests import unittest @@ -35,9 +35,9 @@ def tearDown(self): def test_load_fails_if_server_name_missing(self): self.generate_config_and_remove_lines_containing("server_name") - with self.assertRaises(Exception): + with self.assertRaises(ConfigError): HomeServerConfig.load_config("", ["-c", self.file]) - with self.assertRaises(Exception): + with self.assertRaises(ConfigError): HomeServerConfig.load_or_generate_config("", ["-c", self.file]) def test_generates_and_loads_macaroon_secret_key(self): diff --git a/tests/config/test_ratelimiting.py b/tests/config/test_ratelimiting.py index 13ab282384e3..3c7bb32e079d 100644 --- a/tests/config/test_ratelimiting.py +++ b/tests/config/test_ratelimiting.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/config/test_room_directory.py b/tests/config/test_room_directory.py index 0ec10019b3aa..db745815eff2 100644 --- a/tests/config/test_room_directory.py +++ b/tests/config/test_room_directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/config/test_server.py b/tests/config/test_server.py index 98af7aa67577..6f2b9e997d19 100644 --- a/tests/config/test_server.py +++ b/tests/config/test_server.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/config/test_tls.py b/tests/config/test_tls.py index ec32d4b1ca50..183034f7d4aa 100644 --- a/tests/config/test_tls.py +++ b/tests/config/test_tls.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # Copyright 2019 Matrix.org Foundation C.I.C. # diff --git a/tests/config/test_util.py b/tests/config/test_util.py index 10363e3765d9..3d4929daacf0 100644 --- a/tests/config/test_util.py +++ b/tests/config/test_util.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/crypto/__init__.py b/tests/crypto/__init__.py index bfebb0f644f4..5e83dba2ed6f 100644 --- a/tests/crypto/__init__.py +++ b/tests/crypto/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/crypto/test_event_signing.py b/tests/crypto/test_event_signing.py index 62f639a18d0b..1c920157f506 100644 --- a/tests/crypto/test_event_signing.py +++ b/tests/crypto/test_event_signing.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 30fcc4c1bfcc..2775dfd8807d 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. import time +from unittest.mock import Mock -from mock import Mock - +import attr import canonicaljson import signedjson.key import signedjson.sign @@ -68,6 +67,11 @@ def sign_response(self, res): signedjson.sign.sign_json(res, self.server_name, self.key) +@attr.s(slots=True) +class FakeRequest: + id = attr.ib() + + @logcontext_clean class KeyringTestCase(unittest.HomeserverTestCase): def check_context(self, val, expected): @@ -89,7 +93,7 @@ def test_verify_json_objects_for_server_awaits_previous_requests(self): first_lookup_deferred = Deferred() async def first_lookup_fetch(keys_to_fetch): - self.assertEquals(current_context().request, "context_11") + self.assertEquals(current_context().request.id, "context_11") self.assertEqual(keys_to_fetch, {"server10": {get_key_id(key1): 0}}) await make_deferred_yieldable(first_lookup_deferred) @@ -102,9 +106,7 @@ async def first_lookup_fetch(keys_to_fetch): mock_fetcher.get_keys.side_effect = first_lookup_fetch async def first_lookup(): - with LoggingContext("context_11") as context_11: - context_11.request = "context_11" - + with LoggingContext("context_11", request=FakeRequest("context_11")): res_deferreds = kr.verify_json_objects_for_server( [("server10", json1, 0, "test10"), ("server11", {}, 0, "test11")] ) @@ -130,7 +132,7 @@ async def first_lookup(): # should block rather than start a second call async def second_lookup_fetch(keys_to_fetch): - self.assertEquals(current_context().request, "context_12") + self.assertEquals(current_context().request.id, "context_12") return { "server10": { get_key_id(key1): FetchKeyResult(get_verify_key(key1), 100) @@ -142,9 +144,7 @@ async def second_lookup_fetch(keys_to_fetch): second_lookup_state = [0] async def second_lookup(): - with LoggingContext("context_12") as context_12: - context_12.request = "context_12" - + with LoggingContext("context_12", request=FakeRequest("context_12")): res_deferreds_2 = kr.verify_json_objects_for_server( [("server10", json1, 0, "test")] ) @@ -589,10 +589,7 @@ def get_key_id(key): @defer.inlineCallbacks def run_in_context(f, *args, **kwargs): - with LoggingContext("testctx") as ctx: - # we set the "request" prop to make it easier to follow what's going on in the - # logs. - ctx.request = "testctx" + with LoggingContext("testctx"): rv = yield f(*args, **kwargs) return rv diff --git a/tests/events/test_presence_router.py b/tests/events/test_presence_router.py new file mode 100644 index 000000000000..01d257307c6b --- /dev/null +++ b/tests/events/test_presence_router.py @@ -0,0 +1,385 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Dict, Iterable, List, Optional, Set, Tuple, Union +from unittest.mock import Mock + +import attr + +from synapse.api.constants import EduTypes +from synapse.events.presence_router import PresenceRouter +from synapse.federation.units import Transaction +from synapse.handlers.presence import UserPresenceState +from synapse.module_api import ModuleApi +from synapse.rest import admin +from synapse.rest.client.v1 import login, presence, room +from synapse.types import JsonDict, StreamToken, create_requester + +from tests.handlers.test_sync import generate_sync_config +from tests.unittest import FederatingHomeserverTestCase, TestCase, override_config + + +@attr.s +class PresenceRouterTestConfig: + users_who_should_receive_all_presence = attr.ib(type=List[str], default=[]) + + +class PresenceRouterTestModule: + def __init__(self, config: PresenceRouterTestConfig, module_api: ModuleApi): + self._config = config + self._module_api = module_api + + async def get_users_for_states( + self, state_updates: Iterable[UserPresenceState] + ) -> Dict[str, Set[UserPresenceState]]: + users_to_state = { + user_id: set(state_updates) + for user_id in self._config.users_who_should_receive_all_presence + } + return users_to_state + + async def get_interested_users( + self, user_id: str + ) -> Union[Set[str], PresenceRouter.ALL_USERS]: + if user_id in self._config.users_who_should_receive_all_presence: + return PresenceRouter.ALL_USERS + + return set() + + @staticmethod + def parse_config(config_dict: dict) -> PresenceRouterTestConfig: + """Parse a configuration dictionary from the homeserver config, do + some validation and return a typed PresenceRouterConfig. + + Args: + config_dict: The configuration dictionary. + + Returns: + A validated config object. + """ + # Initialise a typed config object + config = PresenceRouterTestConfig() + + config.users_who_should_receive_all_presence = config_dict.get( + "users_who_should_receive_all_presence" + ) + + return config + + +class PresenceRouterTestCase(FederatingHomeserverTestCase): + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + presence.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + return self.setup_test_homeserver( + federation_transport_client=Mock(spec=["send_transaction"]), + ) + + def prepare(self, reactor, clock, homeserver): + self.sync_handler = self.hs.get_sync_handler() + self.module_api = homeserver.get_module_api() + + @override_config( + { + "presence": { + "presence_router": { + "module": __name__ + ".PresenceRouterTestModule", + "config": { + "users_who_should_receive_all_presence": [ + "@presence_gobbler:test", + ] + }, + } + }, + "send_federation": True, + } + ) + def test_receiving_all_presence(self): + """Test that a user that does not share a room with another other can receive + presence for them, due to presence routing. + """ + # Create a user who should receive all presence of others + self.presence_receiving_user_id = self.register_user( + "presence_gobbler", "monkey" + ) + self.presence_receiving_user_tok = self.login("presence_gobbler", "monkey") + + # And two users who should not have any special routing + self.other_user_one_id = self.register_user("other_user_one", "monkey") + self.other_user_one_tok = self.login("other_user_one", "monkey") + self.other_user_two_id = self.register_user("other_user_two", "monkey") + self.other_user_two_tok = self.login("other_user_two", "monkey") + + # Put the other two users in a room with each other + room_id = self.helper.create_room_as( + self.other_user_one_id, tok=self.other_user_one_tok + ) + + self.helper.invite( + room_id, + self.other_user_one_id, + self.other_user_two_id, + tok=self.other_user_one_tok, + ) + self.helper.join(room_id, self.other_user_two_id, tok=self.other_user_two_tok) + # User one sends some presence + send_presence_update( + self, + self.other_user_one_id, + self.other_user_one_tok, + "online", + "boop", + ) + + # Check that the presence receiving user gets user one's presence when syncing + presence_updates, sync_token = sync_presence( + self, self.presence_receiving_user_id + ) + self.assertEqual(len(presence_updates), 1) + + presence_update = presence_updates[0] # type: UserPresenceState + self.assertEqual(presence_update.user_id, self.other_user_one_id) + self.assertEqual(presence_update.state, "online") + self.assertEqual(presence_update.status_msg, "boop") + + # Have all three users send presence + send_presence_update( + self, + self.other_user_one_id, + self.other_user_one_tok, + "online", + "user_one", + ) + send_presence_update( + self, + self.other_user_two_id, + self.other_user_two_tok, + "online", + "user_two", + ) + send_presence_update( + self, + self.presence_receiving_user_id, + self.presence_receiving_user_tok, + "online", + "presence_gobbler", + ) + + # Check that the presence receiving user gets everyone's presence + presence_updates, _ = sync_presence( + self, self.presence_receiving_user_id, sync_token + ) + self.assertEqual(len(presence_updates), 3) + + # But that User One only get itself and User Two's presence + presence_updates, _ = sync_presence(self, self.other_user_one_id) + self.assertEqual(len(presence_updates), 2) + + found = False + for update in presence_updates: + if update.user_id == self.other_user_two_id: + self.assertEqual(update.state, "online") + self.assertEqual(update.status_msg, "user_two") + found = True + + self.assertTrue(found) + + @override_config( + { + "presence": { + "presence_router": { + "module": __name__ + ".PresenceRouterTestModule", + "config": { + "users_who_should_receive_all_presence": [ + "@presence_gobbler1:test", + "@presence_gobbler2:test", + "@far_away_person:island", + ] + }, + } + }, + "send_federation": True, + } + ) + def test_send_local_online_presence_to_with_module(self): + """Tests that send_local_presence_to_users sends local online presence to a set + of specified local and remote users, with a custom PresenceRouter module enabled. + """ + # Create a user who will send presence updates + self.other_user_id = self.register_user("other_user", "monkey") + self.other_user_tok = self.login("other_user", "monkey") + + # And another two users that will also send out presence updates, as well as receive + # theirs and everyone else's + self.presence_receiving_user_one_id = self.register_user( + "presence_gobbler1", "monkey" + ) + self.presence_receiving_user_one_tok = self.login("presence_gobbler1", "monkey") + self.presence_receiving_user_two_id = self.register_user( + "presence_gobbler2", "monkey" + ) + self.presence_receiving_user_two_tok = self.login("presence_gobbler2", "monkey") + + # Have all three users send some presence updates + send_presence_update( + self, + self.other_user_id, + self.other_user_tok, + "online", + "I'm online!", + ) + send_presence_update( + self, + self.presence_receiving_user_one_id, + self.presence_receiving_user_one_tok, + "online", + "I'm also online!", + ) + send_presence_update( + self, + self.presence_receiving_user_two_id, + self.presence_receiving_user_two_tok, + "unavailable", + "I'm in a meeting!", + ) + + # Mark each presence-receiving user for receiving all user presence + self.get_success( + self.module_api.send_local_online_presence_to( + [ + self.presence_receiving_user_one_id, + self.presence_receiving_user_two_id, + ] + ) + ) + + # Perform a sync for each user + + # The other user should only receive their own presence + presence_updates, _ = sync_presence(self, self.other_user_id) + self.assertEqual(len(presence_updates), 1) + + presence_update = presence_updates[0] # type: UserPresenceState + self.assertEqual(presence_update.user_id, self.other_user_id) + self.assertEqual(presence_update.state, "online") + self.assertEqual(presence_update.status_msg, "I'm online!") + + # Whereas both presence receiving users should receive everyone's presence updates + presence_updates, _ = sync_presence(self, self.presence_receiving_user_one_id) + self.assertEqual(len(presence_updates), 3) + presence_updates, _ = sync_presence(self, self.presence_receiving_user_two_id) + self.assertEqual(len(presence_updates), 3) + + # Test that sending to a remote user works + remote_user_id = "@far_away_person:island" + + # Note that due to the remote user being in our module's + # users_who_should_receive_all_presence config, they would have + # received user presence updates already. + # + # Thus we reset the mock, and try sending all online local user + # presence again + self.hs.get_federation_transport_client().send_transaction.reset_mock() + + # Broadcast local user online presence + self.get_success( + self.module_api.send_local_online_presence_to([remote_user_id]) + ) + + # Check that the expected presence updates were sent + expected_users = [ + self.other_user_id, + self.presence_receiving_user_one_id, + self.presence_receiving_user_two_id, + ] + + calls = ( + self.hs.get_federation_transport_client().send_transaction.call_args_list + ) + for call in calls: + call_args = call[0] + federation_transaction = call_args[0] # type: Transaction + + # Get the sent EDUs in this transaction + edus = federation_transaction.get_dict()["edus"] + + for edu in edus: + # Make sure we're only checking presence-type EDUs + if edu["edu_type"] != EduTypes.Presence: + continue + + # EDUs can contain multiple presence updates + for presence_update in edu["content"]["push"]: + # Check for presence updates that contain the user IDs we're after + expected_users.remove(presence_update["user_id"]) + + # Ensure that no offline states are being sent out + self.assertNotEqual(presence_update["presence"], "offline") + + self.assertEqual(len(expected_users), 0) + + +def send_presence_update( + testcase: TestCase, + user_id: str, + access_token: str, + presence_state: str, + status_message: Optional[str] = None, +) -> JsonDict: + # Build the presence body + body = {"presence": presence_state} + if status_message: + body["status_msg"] = status_message + + # Update the user's presence state + channel = testcase.make_request( + "PUT", "/presence/%s/status" % (user_id,), body, access_token=access_token + ) + testcase.assertEqual(channel.code, 200) + + return channel.json_body + + +def sync_presence( + testcase: TestCase, + user_id: str, + since_token: Optional[StreamToken] = None, +) -> Tuple[List[UserPresenceState], StreamToken]: + """Perform a sync request for the given user and return the user presence updates + they've received, as well as the next_batch token. + + This method assumes testcase.sync_handler points to the homeserver's sync handler. + + Args: + testcase: The testcase that is currently being run. + user_id: The ID of the user to generate a sync response for. + since_token: An optional token to indicate from at what point to sync from. + + Returns: + A tuple containing a list of presence updates, and the sync response's + next_batch token. + """ + requester = create_requester(user_id) + sync_config = generate_sync_config(requester.user.to_string()) + sync_result = testcase.get_success( + testcase.sync_handler.wait_for_sync_for_user( + requester, sync_config, since_token + ) + ) + + return sync_result.presence, sync_result.next_batch diff --git a/tests/events/test_snapshot.py b/tests/events/test_snapshot.py index ec85324c0c62..48e98aac797d 100644 --- a/tests/events/test_snapshot.py +++ b/tests/events/test_snapshot.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 8ba36c60748b..9274ce4c396d 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the 'License'); diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index 8186b8ca013c..1a809b2a6ae0 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 Matrix.org Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from synapse.api.errors import Codes, SynapseError from synapse.rest import admin diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py index 95eac6a5a34c..802c5ad299d3 100644 --- a/tests/federation/test_federation_catch_up.py +++ b/tests/federation/test_federation_catch_up.py @@ -1,6 +1,5 @@ from typing import List, Tuple - -from mock import Mock +from unittest.mock import Mock from synapse.api.constants import EventTypes from synapse.events import EventBase diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index ecc3faa57218..b00dd143d677 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional - -from mock import Mock +from unittest.mock import Mock from signedjson import key, sign from signedjson.types import BaseKey, SigningKey diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index cfeccc05779e..173789156459 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # Copyright 2019 Matrix.org Federation C.I.C # @@ -75,6 +74,25 @@ def test_block_ip_literals(self): self.assertFalse(server_matches_acl_event("[1:2::]", e)) self.assertTrue(server_matches_acl_event("1:2:3:4", e)) + def test_wildcard_matching(self): + e = _create_acl_event({"allow": ["good*.com"]}) + self.assertTrue( + server_matches_acl_event("good.com", e), + "* matches 0 characters", + ) + self.assertTrue( + server_matches_acl_event("GOOD.COM", e), + "pattern is case-insensitive", + ) + self.assertTrue( + server_matches_acl_event("good.aa.com", e), + "* matches several characters, including '.'", + ) + self.assertFalse( + server_matches_acl_event("ishgood.com", e), + "pattern does not allow prefixes", + ) + class StateQueryTests(unittest.FederatingHomeserverTestCase): diff --git a/tests/federation/transport/test_server.py b/tests/federation/transport/test_server.py index 85500e169c88..84fa72b9ff14 100644 --- a/tests/federation/transport/test_server.py +++ b/tests/federation/transport/test_server.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/handlers/test_admin.py b/tests/handlers/test_admin.py index a01fdd083981..18a734daf461 100644 --- a/tests/handlers/test_admin.py +++ b/tests/handlers/test_admin.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,8 +13,7 @@ # limitations under the License. from collections import Counter - -from mock import Mock +from unittest.mock import Mock import synapse.api.errors import synapse.handlers.admin diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index d5d3fdd99a9e..b037b12a0f3d 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py index c9f889b5117d..5f3350e490da 100644 --- a/tests/handlers/test_auth.py +++ b/tests/handlers/test_auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,17 +11,22 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock import pymacaroons from synapse.api.errors import AuthError, ResourceLimitError +from synapse.rest import admin from tests import unittest from tests.test_utils import make_awaitable class AuthTestCase(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets, + ] + def prepare(self, reactor, clock, hs): self.auth_handler = hs.get_auth_handler() self.macaroon_generator = hs.get_macaroon_generator() @@ -36,16 +40,10 @@ def prepare(self, reactor, clock, hs): self.small_number_of_users = 1 self.large_number_of_users = 100 - def test_token_is_a_macaroon(self): - token = self.macaroon_generator.generate_access_token("some_user") - # Check that we can parse the thing with pymacaroons - macaroon = pymacaroons.Macaroon.deserialize(token) - # The most basic of sanity checks - if "some_user" not in macaroon.inspect(): - self.fail("some_user was not in %s" % macaroon.inspect()) + self.user1 = self.register_user("a_user", "pass") def test_macaroon_caveats(self): - token = self.macaroon_generator.generate_access_token("a_user") + token = self.macaroon_generator.generate_guest_access_token("a_user") macaroon = pymacaroons.Macaroon.deserialize(token) def verify_gen(caveat): @@ -60,19 +58,23 @@ def verify_type(caveat): def verify_nonce(caveat): return caveat.startswith("nonce =") + def verify_guest(caveat): + return caveat == "guest = true" + v = pymacaroons.Verifier() v.satisfy_general(verify_gen) v.satisfy_general(verify_user) v.satisfy_general(verify_type) v.satisfy_general(verify_nonce) + v.satisfy_general(verify_guest) v.verify(macaroon, self.hs.config.macaroon_secret_key) def test_short_term_login_token_gives_user_id(self): token = self.macaroon_generator.generate_short_term_login_token( - "a_user", "", 5000 + self.user1, "", 5000 ) res = self.get_success(self.auth_handler.validate_short_term_login_token(token)) - self.assertEqual("a_user", res.user_id) + self.assertEqual(self.user1, res.user_id) self.assertEqual("", res.auth_provider_id) # when we advance the clock, the token should be rejected @@ -84,22 +86,22 @@ def test_short_term_login_token_gives_user_id(self): def test_short_term_login_token_gives_auth_provider(self): token = self.macaroon_generator.generate_short_term_login_token( - "a_user", auth_provider_id="my_idp" + self.user1, auth_provider_id="my_idp" ) res = self.get_success(self.auth_handler.validate_short_term_login_token(token)) - self.assertEqual("a_user", res.user_id) + self.assertEqual(self.user1, res.user_id) self.assertEqual("my_idp", res.auth_provider_id) def test_short_term_login_token_cannot_replace_user_id(self): token = self.macaroon_generator.generate_short_term_login_token( - "a_user", "", 5000 + self.user1, "", 5000 ) macaroon = pymacaroons.Macaroon.deserialize(token) res = self.get_success( self.auth_handler.validate_short_term_login_token(macaroon.serialize()) ) - self.assertEqual("a_user", res.user_id) + self.assertEqual(self.user1, res.user_id) # add another "user_id" caveat, which might allow us to override the # user_id. @@ -115,7 +117,7 @@ def test_mau_limits_disabled(self): # Ensure does not throw exception self.get_success( self.auth_handler.get_access_token_for_user_id( - "user_a", device_id=None, valid_until_ms=None + self.user1, device_id=None, valid_until_ms=None ) ) @@ -133,7 +135,7 @@ def test_mau_limits_exceeded_large(self): self.get_failure( self.auth_handler.get_access_token_for_user_id( - "user_a", device_id=None, valid_until_ms=None + self.user1, device_id=None, valid_until_ms=None ), ResourceLimitError, ) @@ -161,7 +163,7 @@ def test_mau_limits_parity(self): # If not in monthly active cohort self.get_failure( self.auth_handler.get_access_token_for_user_id( - "user_a", device_id=None, valid_until_ms=None + self.user1, device_id=None, valid_until_ms=None ), ResourceLimitError, ) @@ -178,7 +180,7 @@ def test_mau_limits_parity(self): ) self.get_success( self.auth_handler.get_access_token_for_user_id( - "user_a", device_id=None, valid_until_ms=None + self.user1, device_id=None, valid_until_ms=None ) ) self.get_success( @@ -196,7 +198,7 @@ def test_mau_limits_not_exceeded(self): # Ensure does not raise exception self.get_success( self.auth_handler.get_access_token_for_user_id( - "user_a", device_id=None, valid_until_ms=None + self.user1, device_id=None, valid_until_ms=None ) ) @@ -211,6 +213,6 @@ def test_mau_limits_not_exceeded(self): def _get_macaroon(self): token = self.macaroon_generator.generate_short_term_login_token( - "user_a", "", 5000 + self.user1, "", 5000 ) return pymacaroons.Macaroon.deserialize(token) diff --git a/tests/handlers/test_cas.py b/tests/handlers/test_cas.py index 7975af243c7c..b625995d1253 100644 --- a/tests/handlers/test_cas.py +++ b/tests/handlers/test_cas.py @@ -11,9 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock -from synapse.handlers.cas_handler import CasResponse +from synapse.handlers.cas import CasResponse from tests.test_utils import simple_async_mock from tests.unittest import HomeserverTestCase, override_config diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 821629bc38a3..84c38b295db1 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 863d8737b2f8..1908d3c2c6fb 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,7 @@ # limitations under the License. -from mock import Mock +from unittest.mock import Mock import synapse import synapse.api.errors diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 5e86c5e56bf4..61a00130b814 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. @@ -14,7 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock from signedjson import key as key, sign as sign diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index d7498aa51a80..9b7e7a8e9aff 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2017 New Vector Ltd # Copyright 2019 Matrix.org Foundation C.I.C. @@ -16,8 +15,7 @@ # limitations under the License. import copy - -import mock +from unittest import mock from synapse.api.errors import SynapseError diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 3af361195b57..8796af45edb4 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -223,7 +222,7 @@ def create_invite(): room_version, ) - for i in range(3): + for _ in range(3): event = create_invite() self.get_success( self.handler.on_invite_request( diff --git a/tests/handlers/test_message.py b/tests/handlers/test_message.py index a0d1ebdbe3c1..a8a9fc5b628e 100644 --- a/tests/handlers/test_message.py +++ b/tests/handlers/test_message.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index c7796fb837bc..a25c89bd5bd3 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Quentin Gliech # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,10 +13,9 @@ # limitations under the License. import json import os +from unittest.mock import ANY, Mock, patch from urllib.parse import parse_qs, urlparse -from mock import ANY, Mock, patch - import pymacaroons from synapse.handlers.sso import MappingException @@ -501,7 +499,7 @@ def test_callback(self): self.assertRenderedError("fetch_error") # Handle code exchange failure - from synapse.handlers.oidc_handler import OidcError + from synapse.handlers.oidc import OidcError self.provider._exchange_code = simple_async_mock( raises=OidcError("invalid_request") @@ -585,7 +583,7 @@ def test_exchange_code(self): body=b'{"error": "foo", "error_description": "bar"}', ) ) - from synapse.handlers.oidc_handler import OidcError + from synapse.handlers.oidc import OidcError exc = self.get_failure(self.provider._exchange_code(code), OidcError) self.assertEqual(exc.value.error, "foo") @@ -1128,7 +1126,7 @@ def _generate_oidc_session_token( client_redirect_url: str, ui_auth_session_id: str = "", ) -> str: - from synapse.handlers.oidc_handler import OidcSessionData + from synapse.handlers.oidc import OidcSessionData return self.handler._token_generator.generate_oidc_session_token( state=state, @@ -1154,7 +1152,7 @@ async def _make_callback_with_userinfo( userinfo: the OIDC userinfo dict client_redirect_url: the URL to redirect to on success. """ - from synapse.handlers.oidc_handler import OidcSessionData + from synapse.handlers.oidc import OidcSessionData handler = hs.get_oidc_handler() provider = handler._providers["oidc"] diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index a98a65ae67e4..32651db09669 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,8 +15,7 @@ """Tests for the password_auth_provider interface""" from typing import Any, Type, Union - -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 77330f59a987..1ffab709fcb8 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,7 @@ # limitations under the License. -from mock import Mock, call +from unittest.mock import Mock, call from signedjson.key import generate_signing_key @@ -22,6 +21,7 @@ from synapse.api.presence import UserPresenceState from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events.builder import EventBuilder +from synapse.federation.sender import FederationSender from synapse.handlers.presence import ( EXTERNAL_PROCESS_EXPIRY, FEDERATION_PING_INTERVAL, @@ -472,6 +472,190 @@ def test_external_process_timeout(self): self.assertEqual(state.state, PresenceState.OFFLINE) +class PresenceFederationQueueTestCase(unittest.HomeserverTestCase): + def prepare(self, reactor, clock, hs): + self.presence_handler = hs.get_presence_handler() + self.clock = hs.get_clock() + self.instance_name = hs.get_instance_name() + + self.queue = self.presence_handler.get_federation_queue() + + def test_send_and_get(self): + state1 = UserPresenceState.default("@user1:test") + state2 = UserPresenceState.default("@user2:test") + state3 = UserPresenceState.default("@user3:test") + + prev_token = self.queue.get_current_token(self.instance_name) + + self.queue.send_presence_to_destinations((state1, state2), ("dest1", "dest2")) + self.queue.send_presence_to_destinations((state3,), ("dest3",)) + + now_token = self.queue.get_current_token(self.instance_name) + + rows, upto_token, limited = self.get_success( + self.queue.get_replication_rows("master", prev_token, now_token, 10) + ) + + self.assertEqual(upto_token, now_token) + self.assertFalse(limited) + + expected_rows = [ + (1, ("dest1", "@user1:test")), + (1, ("dest2", "@user1:test")), + (1, ("dest1", "@user2:test")), + (1, ("dest2", "@user2:test")), + (2, ("dest3", "@user3:test")), + ] + + self.assertCountEqual(rows, expected_rows) + + now_token = self.queue.get_current_token(self.instance_name) + rows, upto_token, limited = self.get_success( + self.queue.get_replication_rows("master", upto_token, now_token, 10) + ) + self.assertEqual(upto_token, now_token) + self.assertFalse(limited) + self.assertCountEqual(rows, []) + + def test_send_and_get_split(self): + state1 = UserPresenceState.default("@user1:test") + state2 = UserPresenceState.default("@user2:test") + state3 = UserPresenceState.default("@user3:test") + + prev_token = self.queue.get_current_token(self.instance_name) + + self.queue.send_presence_to_destinations((state1, state2), ("dest1", "dest2")) + + now_token = self.queue.get_current_token(self.instance_name) + + self.queue.send_presence_to_destinations((state3,), ("dest3",)) + + rows, upto_token, limited = self.get_success( + self.queue.get_replication_rows("master", prev_token, now_token, 10) + ) + + self.assertEqual(upto_token, now_token) + self.assertFalse(limited) + + expected_rows = [ + (1, ("dest1", "@user1:test")), + (1, ("dest2", "@user1:test")), + (1, ("dest1", "@user2:test")), + (1, ("dest2", "@user2:test")), + ] + + self.assertCountEqual(rows, expected_rows) + + now_token = self.queue.get_current_token(self.instance_name) + rows, upto_token, limited = self.get_success( + self.queue.get_replication_rows("master", upto_token, now_token, 10) + ) + + self.assertEqual(upto_token, now_token) + self.assertFalse(limited) + + expected_rows = [ + (2, ("dest3", "@user3:test")), + ] + + self.assertCountEqual(rows, expected_rows) + + def test_clear_queue_all(self): + state1 = UserPresenceState.default("@user1:test") + state2 = UserPresenceState.default("@user2:test") + state3 = UserPresenceState.default("@user3:test") + + prev_token = self.queue.get_current_token(self.instance_name) + + self.queue.send_presence_to_destinations((state1, state2), ("dest1", "dest2")) + self.queue.send_presence_to_destinations((state3,), ("dest3",)) + + self.reactor.advance(10 * 60 * 1000) + + now_token = self.queue.get_current_token(self.instance_name) + + rows, upto_token, limited = self.get_success( + self.queue.get_replication_rows("master", prev_token, now_token, 10) + ) + self.assertEqual(upto_token, now_token) + self.assertFalse(limited) + self.assertCountEqual(rows, []) + + prev_token = self.queue.get_current_token(self.instance_name) + + self.queue.send_presence_to_destinations((state1, state2), ("dest1", "dest2")) + self.queue.send_presence_to_destinations((state3,), ("dest3",)) + + now_token = self.queue.get_current_token(self.instance_name) + + rows, upto_token, limited = self.get_success( + self.queue.get_replication_rows("master", prev_token, now_token, 10) + ) + self.assertEqual(upto_token, now_token) + self.assertFalse(limited) + + expected_rows = [ + (3, ("dest1", "@user1:test")), + (3, ("dest2", "@user1:test")), + (3, ("dest1", "@user2:test")), + (3, ("dest2", "@user2:test")), + (4, ("dest3", "@user3:test")), + ] + + self.assertCountEqual(rows, expected_rows) + + def test_partially_clear_queue(self): + state1 = UserPresenceState.default("@user1:test") + state2 = UserPresenceState.default("@user2:test") + state3 = UserPresenceState.default("@user3:test") + + prev_token = self.queue.get_current_token(self.instance_name) + + self.queue.send_presence_to_destinations((state1, state2), ("dest1", "dest2")) + + self.reactor.advance(2 * 60 * 1000) + + self.queue.send_presence_to_destinations((state3,), ("dest3",)) + + self.reactor.advance(4 * 60 * 1000) + + now_token = self.queue.get_current_token(self.instance_name) + + rows, upto_token, limited = self.get_success( + self.queue.get_replication_rows("master", prev_token, now_token, 10) + ) + self.assertEqual(upto_token, now_token) + self.assertFalse(limited) + + expected_rows = [ + (2, ("dest3", "@user3:test")), + ] + self.assertCountEqual(rows, []) + + prev_token = self.queue.get_current_token(self.instance_name) + + self.queue.send_presence_to_destinations((state1, state2), ("dest1", "dest2")) + self.queue.send_presence_to_destinations((state3,), ("dest3",)) + + now_token = self.queue.get_current_token(self.instance_name) + + rows, upto_token, limited = self.get_success( + self.queue.get_replication_rows("master", prev_token, now_token, 10) + ) + self.assertEqual(upto_token, now_token) + self.assertFalse(limited) + + expected_rows = [ + (3, ("dest1", "@user1:test")), + (3, ("dest2", "@user1:test")), + (3, ("dest1", "@user2:test")), + (3, ("dest2", "@user2:test")), + (4, ("dest3", "@user3:test")), + ] + + self.assertCountEqual(rows, expected_rows) + + class PresenceJoinTestCase(unittest.HomeserverTestCase): """Tests remote servers get told about presence of users in the room when they join and when new local users join. @@ -483,10 +667,17 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver( - "server", federation_http_client=None, federation_sender=Mock() + "server", + federation_http_client=None, + federation_sender=Mock(spec=FederationSender), ) return hs + def default_config(self): + config = super().default_config() + config["send_federation"] = True + return config + def prepare(self, reactor, clock, hs): self.federation_sender = hs.get_federation_sender() self.event_builder_factory = hs.get_event_builder_factory() @@ -530,9 +721,6 @@ def test_remote_joins(self): # Add a new remote server to the room self._add_new_user(room_id, "@alice:server2") - # We shouldn't have sent out any local presence *updates* - self.federation_sender.send_presence.assert_not_called() - # When new server is joined we send it the local users presence states. # We expect to only see user @test2:server, as @test:server is offline # and has a zero last_active_ts @@ -541,7 +729,7 @@ def test_remote_joins(self): ) self.assertEqual(expected_state.state, PresenceState.ONLINE) self.federation_sender.send_presence_to_destinations.assert_called_once_with( - destinations=["server2"], states={expected_state} + destinations={"server2"}, states=[expected_state] ) # @@ -551,9 +739,8 @@ def test_remote_joins(self): self.federation_sender.reset_mock() self._add_new_user(room_id, "@bob:server3") - self.federation_sender.send_presence.assert_not_called() self.federation_sender.send_presence_to_destinations.assert_called_once_with( - destinations=["server3"], states={expected_state} + destinations={"server3"}, states=[expected_state] ) def test_remote_gets_presence_when_local_user_joins(self): @@ -596,22 +783,13 @@ def test_remote_gets_presence_when_local_user_joins(self): self.reactor.pump([0]) # Wait for presence updates to be handled - # We shouldn't have sent out any local presence *updates* - self.federation_sender.send_presence.assert_not_called() - # We expect to only send test2 presence to server2 and server3 expected_state = self.get_success( self.presence_handler.current_state_for_user("@test2:server") ) self.assertEqual(expected_state.state, PresenceState.ONLINE) - self.assertEqual( - self.federation_sender.send_presence_to_destinations.call_count, 2 - ) - self.federation_sender.send_presence_to_destinations.assert_any_call( - destinations=["server3"], states={expected_state} - ) - self.federation_sender.send_presence_to_destinations.assert_any_call( - destinations=["server2"], states={expected_state} + self.federation_sender.send_presence_to_destinations.assert_called_once_with( + destinations={"server2", "server3"}, states=[expected_state] ) def _add_new_user(self, room_id, user_id): diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index 75c6a4e21cb5..5330a9b34e22 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock import synapse.types from synapse.api.errors import AuthError, SynapseError diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 94b69035945b..bd4319052338 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from synapse.api.auth import Auth from synapse.api.constants import UserTypes @@ -49,10 +48,6 @@ def prepare(self, reactor, clock, hs): self.mock_distributor = Mock() self.mock_distributor.declare("registered_user") self.mock_captcha_client = Mock() - self.macaroon_generator = Mock( - generate_access_token=Mock(return_value="secret") - ) - self.hs.get_macaroon_generator = Mock(return_value=self.macaroon_generator) self.handler = self.hs.get_registration_handler() self.store = self.hs.get_datastore() self.lots_of_users = 100 @@ -68,8 +63,8 @@ def test_user_is_created_and_logged_in_if_doesnt_exist(self): self.get_or_create_user(requester, frank.localpart, "Frankie") ) self.assertEquals(result_user_id, user_id) - self.assertTrue(result_token is not None) - self.assertEquals(result_token, "secret") + self.assertIsInstance(result_token, str) + self.assertGreater(len(result_token), 20) def test_if_user_exists(self): store = self.hs.get_datastore() @@ -501,7 +496,7 @@ def check_registration_for_spam( user_id = self.get_success(self.handler.register_user(localpart="user")) # Get an access token. - token = self.macaroon_generator.generate_access_token(user_id) + token = "testtok" self.get_success( self.store.add_access_token_to_user( user_id=user_id, token=token, device_id=None, valid_until_ms=None @@ -578,7 +573,7 @@ async def get_or_create_user( user = UserID(localpart, self.hs.hostname) user_id = user.to_string() - token = self.macaroon_generator.generate_access_token(user_id) + token = self.hs.get_auth_handler().generate_access_token(user) if need_register: await self.handler.register_with_store( diff --git a/tests/handlers/test_saml.py b/tests/handlers/test_saml.py index 30efd43b4060..8cfc184fefc9 100644 --- a/tests/handlers/test_saml.py +++ b/tests/handlers/test_saml.py @@ -13,8 +13,7 @@ # limitations under the License. from typing import Optional - -from mock import Mock +from unittest.mock import Mock import attr diff --git a/tests/handlers/test_space_summary.py b/tests/handlers/test_space_summary.py new file mode 100644 index 000000000000..2c5e81531b9e --- /dev/null +++ b/tests/handlers/test_space_summary.py @@ -0,0 +1,81 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional +from unittest import mock + +from synapse.handlers.space_summary import _child_events_comparison_key + +from tests import unittest + + +def _create_event(room_id: str, order: Optional[Any] = None): + result = mock.Mock() + result.room_id = room_id + result.content = {} + if order is not None: + result.content["order"] = order + return result + + +def _order(*events): + return sorted(events, key=_child_events_comparison_key) + + +class TestSpaceSummarySort(unittest.TestCase): + def test_no_order_last(self): + """An event with no ordering is placed behind those with an ordering.""" + ev1 = _create_event("!abc:test") + ev2 = _create_event("!xyz:test", "xyz") + + self.assertEqual([ev2, ev1], _order(ev1, ev2)) + + def test_order(self): + """The ordering should be used.""" + ev1 = _create_event("!abc:test", "xyz") + ev2 = _create_event("!xyz:test", "abc") + + self.assertEqual([ev2, ev1], _order(ev1, ev2)) + + def test_order_room_id(self): + """Room ID is a tie-breaker for ordering.""" + ev1 = _create_event("!abc:test", "abc") + ev2 = _create_event("!xyz:test", "abc") + + self.assertEqual([ev1, ev2], _order(ev1, ev2)) + + def test_invalid_ordering_type(self): + """Invalid orderings are considered the same as missing.""" + ev1 = _create_event("!abc:test", 1) + ev2 = _create_event("!xyz:test", "xyz") + + self.assertEqual([ev2, ev1], _order(ev1, ev2)) + + ev1 = _create_event("!abc:test", {}) + self.assertEqual([ev2, ev1], _order(ev1, ev2)) + + ev1 = _create_event("!abc:test", []) + self.assertEqual([ev2, ev1], _order(ev1, ev2)) + + ev1 = _create_event("!abc:test", True) + self.assertEqual([ev2, ev1], _order(ev1, ev2)) + + def test_invalid_ordering_value(self): + """Invalid orderings are considered the same as missing.""" + ev1 = _create_event("!abc:test", "foo\n") + ev2 = _create_event("!xyz:test", "xyz") + + self.assertEqual([ev2, ev1], _order(ev1, ev2)) + + ev1 = _create_event("!abc:test", "a" * 51) + self.assertEqual([ev2, ev1], _order(ev1, ev2)) diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py index 312c0a0d4172..c9d4fd93368f 100644 --- a/tests/handlers/test_stats.py +++ b/tests/handlers/test_stats.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index e62586142eb3..c8b43305f433 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -37,7 +36,7 @@ def prepare(self, reactor, clock, hs): def test_wait_for_sync_for_user_auth_blocking(self): user_id1 = "@user1:test" user_id2 = "@user2:test" - sync_config = self._generate_sync_config(user_id1) + sync_config = generate_sync_config(user_id1) requester = create_requester(user_id1) self.reactor.advance(100) # So we get not 0 time @@ -60,7 +59,7 @@ def test_wait_for_sync_for_user_auth_blocking(self): self.auth_blocking._hs_disabled = False - sync_config = self._generate_sync_config(user_id2) + sync_config = generate_sync_config(user_id2) requester = create_requester(user_id2) e = self.get_failure( @@ -69,11 +68,12 @@ def test_wait_for_sync_for_user_auth_blocking(self): ) self.assertEquals(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) - def _generate_sync_config(self, user_id): - return SyncConfig( - user=UserID(user_id.split(":")[0][1:], user_id.split(":")[1]), - filter_collection=DEFAULT_FILTER_COLLECTION, - is_guest=False, - request_key="request_key", - device_id="device_id", - ) + +def generate_sync_config(user_id: str) -> SyncConfig: + return SyncConfig( + user=UserID(user_id.split(":")[0][1:], user_id.split(":")[1]), + filter_collection=DEFAULT_FILTER_COLLECTION, + is_guest=False, + request_key="request_key", + device_id="device_id", + ) diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 24e71381965a..0c89487eaf34 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,8 +15,7 @@ import json from typing import Dict - -from mock import ANY, Mock, call +from unittest.mock import ANY, Mock, call from twisted.internet import defer from twisted.web.resource import Resource diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 98b2f5b38377..daac37abd876 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer diff --git a/tests/http/__init__.py b/tests/http/__init__.py index 3e5a856584bd..e74f7f5b48f1 100644 --- a/tests/http/__init__.py +++ b/tests/http/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/http/federation/__init__.py b/tests/http/federation/__init__.py index 1453d045718f..743fb9904a8f 100644 --- a/tests/http/federation/__init__.py +++ b/tests/http/federation/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 4c56253da549..e45980316b6d 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging - -from mock import Mock +from typing import Optional +from unittest.mock import Mock import treq from netaddr import IPSet @@ -180,7 +179,11 @@ def _make_get_request(self, uri): _check_logcontext(context) def _handle_well_known_connection( - self, client_factory, expected_sni, content, response_headers={} + self, + client_factory, + expected_sni, + content, + response_headers: Optional[dict] = None, ): """Handle an outgoing HTTPs connection: wire it up to a server, check that the request is for a .well-known, and send the response. @@ -202,10 +205,12 @@ def _handle_well_known_connection( self.assertEqual( request.requestHeaders.getRawHeaders(b"user-agent"), [b"test-agent"] ) - self._send_well_known_response(request, content, headers=response_headers) + self._send_well_known_response(request, content, headers=response_headers or {}) return well_known_server - def _send_well_known_response(self, request, content, headers={}): + def _send_well_known_response( + self, request, content, headers: Optional[dict] = None + ): """Check that an incoming request looks like a valid .well-known request, and send back the response. """ @@ -213,7 +218,7 @@ def _send_well_known_response(self, request, content, headers={}): self.assertEqual(request.path, b"/.well-known/matrix/server") self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"testserv"]) # send back a response - for k, v in headers.items(): + for k, v in (headers or {}).items(): request.setHeader(k, v) request.write(content) request.finish() diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py index fee2985d350e..c49be33b9f7c 100644 --- a/tests/http/federation/test_srv_resolver.py +++ b/tests/http/federation/test_srv_resolver.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # @@ -14,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer from twisted.internet.defer import Deferred diff --git a/tests/http/test_additional_resource.py b/tests/http/test_additional_resource.py index 453391a5a5f0..768c2ba4ead3 100644 --- a/tests/http/test_additional_resource.py +++ b/tests/http/test_additional_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/http/test_client.py b/tests/http/test_client.py index 0ce181a51e94..7e2f2a01cc07 100644 --- a/tests/http/test_client.py +++ b/tests/http/test_client.py @@ -13,8 +13,7 @@ # limitations under the License. from io import BytesIO - -from mock import Mock +from unittest.mock import Mock from netaddr import IPSet diff --git a/tests/http/test_endpoint.py b/tests/http/test_endpoint.py index d06ea518cee6..1f9a2f9b1d32 100644 --- a/tests/http/test_endpoint.py +++ b/tests/http/test_endpoint.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py index 9c52c8fdca14..ed9a884d761b 100644 --- a/tests/http/test_fedclient.py +++ b/tests/http/test_fedclient.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from netaddr import IPSet from parameterized import parameterized @@ -27,6 +26,7 @@ from synapse.api.errors import RequestSendFailed from synapse.http.matrixfederationclient import ( + MAX_RESPONSE_SIZE, MatrixFederationHttpClient, MatrixFederationRequest, ) @@ -561,3 +561,61 @@ def test_json_error(self, return_value): f = self.failureResultOf(test_d) self.assertIsInstance(f.value, RequestSendFailed) + + def test_too_big(self): + """ + Test what happens if a huge response is returned from the remote endpoint. + """ + + test_d = defer.ensureDeferred(self.cl.get_json("testserv:8008", "foo/bar")) + + self.pump() + + # Nothing happened yet + self.assertNoResult(test_d) + + # Make sure treq is trying to connect + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, factory, _timeout, _bindAddress) = clients[0] + self.assertEqual(host, "1.2.3.4") + self.assertEqual(port, 8008) + + # complete the connection and wire it up to a fake transport + protocol = factory.buildProtocol(None) + transport = StringTransport() + protocol.makeConnection(transport) + + # that should have made it send the request to the transport + self.assertRegex(transport.value(), b"^GET /foo/bar") + self.assertRegex(transport.value(), b"Host: testserv:8008") + + # Deferred is still without a result + self.assertNoResult(test_d) + + # Send it a huge HTTP response + protocol.dataReceived( + b"HTTP/1.1 200 OK\r\n" + b"Server: Fake\r\n" + b"Content-Type: application/json\r\n" + b"\r\n" + ) + + self.pump() + + # should still be waiting + self.assertNoResult(test_d) + + sent = 0 + chunk_size = 1024 * 512 + while not test_d.called: + protocol.dataReceived(b"a" * chunk_size) + sent += chunk_size + self.assertLessEqual(sent, MAX_RESPONSE_SIZE) + + self.assertEqual(sent, MAX_RESPONSE_SIZE) + + f = self.failureResultOf(test_d) + self.assertIsInstance(f.value, RequestSendFailed) + + self.assertTrue(transport.disconnecting) diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py index 3ea8b5bec7ec..fefc8099c9d2 100644 --- a/tests/http/test_proxyagent.py +++ b/tests/http/test_proxyagent.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/http/test_servlet.py b/tests/http/test_servlet.py index 45089158ceb6..a80bfb9f4eb5 100644 --- a/tests/http/test_servlet.py +++ b/tests/http/test_servlet.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,8 +13,7 @@ # limitations under the License. import json from io import BytesIO - -from mock import Mock +from unittest.mock import Mock from synapse.api.errors import SynapseError from synapse.http.servlet import ( diff --git a/tests/http/test_simple_client.py b/tests/http/test_simple_client.py index a1cf0862d4fe..c85a3665c127 100644 --- a/tests/http/test_simple_client.py +++ b/tests/http/test_simple_client.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from netaddr import IPSet diff --git a/tests/http/test_site.py b/tests/http/test_site.py new file mode 100644 index 000000000000..8c13b4f6931e --- /dev/null +++ b/tests/http/test_site.py @@ -0,0 +1,83 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet.address import IPv6Address +from twisted.test.proto_helpers import StringTransport + +from synapse.app.homeserver import SynapseHomeServer + +from tests.unittest import HomeserverTestCase + + +class SynapseRequestTestCase(HomeserverTestCase): + def make_homeserver(self, reactor, clock): + return self.setup_test_homeserver(homeserver_to_use=SynapseHomeServer) + + def test_large_request(self): + """overlarge HTTP requests should be rejected""" + self.hs.start_listening() + + # find the HTTP server which is configured to listen on port 0 + (port, factory, _backlog, interface) = self.reactor.tcpServers[0] + self.assertEqual(interface, "::") + self.assertEqual(port, 0) + + # as a control case, first send a regular request. + + # complete the connection and wire it up to a fake transport + client_address = IPv6Address("TCP", "::1", "2345") + protocol = factory.buildProtocol(client_address) + transport = StringTransport() + protocol.makeConnection(transport) + + protocol.dataReceived( + b"POST / HTTP/1.1\r\n" + b"Connection: close\r\n" + b"Transfer-Encoding: chunked\r\n" + b"\r\n" + b"0\r\n" + b"\r\n" + ) + + while not transport.disconnecting: + self.reactor.advance(1) + + # we should get a 404 + self.assertRegex(transport.value().decode(), r"^HTTP/1\.1 404 ") + + # now send an oversized request + protocol = factory.buildProtocol(client_address) + transport = StringTransport() + protocol.makeConnection(transport) + + protocol.dataReceived( + b"POST / HTTP/1.1\r\n" + b"Connection: close\r\n" + b"Transfer-Encoding: chunked\r\n" + b"\r\n" + ) + + # we deliberately send all the data in one big chunk, to ensure that + # twisted isn't buffering the data in the chunked transfer decoder. + # we start with the chunk size, in hex. (We won't actually send this much) + protocol.dataReceived(b"10000000\r\n") + sent = 0 + while not transport.disconnected: + self.assertLess(sent, 0x10000000, "connection did not drop") + protocol.dataReceived(b"\0" * 1024) + sent += 1024 + + # default max upload size is 50M, so it should drop on the next buffer after + # that. + self.assertEqual(sent, 50 * 1024 * 1024 + 1024) diff --git a/tests/logging/__init__.py b/tests/logging/__init__.py index a58d51441c6e..1acf5666a856 100644 --- a/tests/logging/__init__.py +++ b/tests/logging/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/logging/test_remote_handler.py b/tests/logging/test_remote_handler.py index 4bc27a1d7d1c..b0d046fe0079 100644 --- a/tests/logging/test_remote_handler.py +++ b/tests/logging/test_remote_handler.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/logging/test_terse_json.py b/tests/logging/test_terse_json.py index 48a74e2eee75..116071692976 100644 --- a/tests/logging/test_terse_json.py +++ b/tests/logging/test_terse_json.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,15 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import json import logging -from io import StringIO +from io import BytesIO, StringIO +from unittest.mock import Mock, patch + +from twisted.web.server import Request +from synapse.http.site import SynapseRequest from synapse.logging._terse_json import JsonFormatter, TerseJsonFormatter from synapse.logging.context import LoggingContext, LoggingContextFilter from tests.logging import LoggerCleanupMixin +from tests.server import FakeChannel from tests.unittest import TestCase @@ -120,7 +123,7 @@ def test_with_context(self): handler.addFilter(LoggingContextFilter()) logger = self.get_logger(handler) - with LoggingContext(request="test"): + with LoggingContext("name"): logger.info("Hello there, %s!", "wally") log = self.get_log_line() @@ -134,4 +137,63 @@ def test_with_context(self): ] self.assertCountEqual(log.keys(), expected_log_keys) self.assertEqual(log["log"], "Hello there, wally!") - self.assertEqual(log["request"], "test") + self.assertEqual(log["request"], "name") + + def test_with_request_context(self): + """ + Information from the logging context request should be added to the JSON response. + """ + handler = logging.StreamHandler(self.output) + handler.setFormatter(JsonFormatter()) + handler.addFilter(LoggingContextFilter()) + logger = self.get_logger(handler) + + # A full request isn't needed here. + site = Mock(spec=["site_tag", "server_version_string", "getResourceFor"]) + site.site_tag = "test-site" + site.server_version_string = "Server v1" + request = SynapseRequest(FakeChannel(site, None)) + # Call requestReceived to finish instantiating the object. + request.content = BytesIO() + # Partially skip some of the internal processing of SynapseRequest. + request._started_processing = Mock() + request.request_metrics = Mock(spec=["name"]) + with patch.object(Request, "render"): + request.requestReceived(b"POST", b"/_matrix/client/versions", b"1.1") + + # Also set the requester to ensure the processing works. + request.requester = "@foo:test" + + with LoggingContext( + request.get_request_id(), parent_context=request.logcontext + ): + logger.info("Hello there, %s!", "wally") + + log = self.get_log_line() + + # The terse logger includes additional request information, if possible. + expected_log_keys = [ + "log", + "level", + "namespace", + "request", + "ip_address", + "site_tag", + "requester", + "authenticated_entity", + "method", + "url", + "protocol", + "user_agent", + ] + self.assertCountEqual(log.keys(), expected_log_keys) + self.assertEqual(log["log"], "Hello there, wally!") + self.assertTrue(log["request"].startswith("POST-")) + self.assertEqual(log["ip_address"], "127.0.0.1") + self.assertEqual(log["site_tag"], "test-site") + self.assertEqual(log["requester"], "@foo:test") + self.assertEqual(log["authenticated_entity"], "@foo:test") + self.assertEqual(log["method"], "POST") + self.assertEqual(log["url"], "/_matrix/client/versions") + self.assertEqual(log["protocol"], "1.1") + self.assertEqual(log["user_agent"], "") diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index edacd1b566ba..742ad14b8c3a 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,27 +11,39 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock +from synapse.api.constants import EduTypes from synapse.events import EventBase +from synapse.federation.units import Transaction +from synapse.handlers.presence import UserPresenceState from synapse.rest import admin -from synapse.rest.client.v1 import login, room +from synapse.rest.client.v1 import login, presence, room from synapse.types import create_requester -from tests.unittest import HomeserverTestCase +from tests.events.test_presence_router import send_presence_update, sync_presence +from tests.test_utils.event_injection import inject_member_event +from tests.unittest import FederatingHomeserverTestCase, override_config -class ModuleApiTestCase(HomeserverTestCase): +class ModuleApiTestCase(FederatingHomeserverTestCase): servlets = [ admin.register_servlets, login.register_servlets, room.register_servlets, + presence.register_servlets, ] def prepare(self, reactor, clock, homeserver): self.store = homeserver.get_datastore() self.module_api = homeserver.get_module_api() self.event_creation_handler = homeserver.get_event_creation_handler() + self.sync_handler = homeserver.get_sync_handler() + + def make_homeserver(self, reactor, clock): + return self.setup_test_homeserver( + federation_transport_client=Mock(spec=["send_transaction"]), + ) def test_can_register_user(self): """Tests that an external module can register a user""" @@ -205,3 +216,161 @@ def test_public_rooms(self): ) ) self.assertFalse(is_in_public_rooms) + + # The ability to send federation is required by send_local_online_presence_to. + @override_config({"send_federation": True}) + def test_send_local_online_presence_to(self): + """Tests that send_local_presence_to_users sends local online presence to local users.""" + # Create a user who will send presence updates + self.presence_receiver_id = self.register_user("presence_receiver", "monkey") + self.presence_receiver_tok = self.login("presence_receiver", "monkey") + + # And another user that will send presence updates out + self.presence_sender_id = self.register_user("presence_sender", "monkey") + self.presence_sender_tok = self.login("presence_sender", "monkey") + + # Put them in a room together so they will receive each other's presence updates + room_id = self.helper.create_room_as( + self.presence_receiver_id, + tok=self.presence_receiver_tok, + ) + self.helper.join(room_id, self.presence_sender_id, tok=self.presence_sender_tok) + + # Presence sender comes online + send_presence_update( + self, + self.presence_sender_id, + self.presence_sender_tok, + "online", + "I'm online!", + ) + + # Presence receiver should have received it + presence_updates, sync_token = sync_presence(self, self.presence_receiver_id) + self.assertEqual(len(presence_updates), 1) + + presence_update = presence_updates[0] # type: UserPresenceState + self.assertEqual(presence_update.user_id, self.presence_sender_id) + self.assertEqual(presence_update.state, "online") + + # Syncing again should result in no presence updates + presence_updates, sync_token = sync_presence( + self, self.presence_receiver_id, sync_token + ) + self.assertEqual(len(presence_updates), 0) + + # Trigger sending local online presence + self.get_success( + self.module_api.send_local_online_presence_to( + [ + self.presence_receiver_id, + ] + ) + ) + + # Presence receiver should have received online presence again + presence_updates, sync_token = sync_presence( + self, self.presence_receiver_id, sync_token + ) + self.assertEqual(len(presence_updates), 1) + + presence_update = presence_updates[0] # type: UserPresenceState + self.assertEqual(presence_update.user_id, self.presence_sender_id) + self.assertEqual(presence_update.state, "online") + + # Presence sender goes offline + send_presence_update( + self, + self.presence_sender_id, + self.presence_sender_tok, + "offline", + "I slink back into the darkness.", + ) + + # Trigger sending local online presence + self.get_success( + self.module_api.send_local_online_presence_to( + [ + self.presence_receiver_id, + ] + ) + ) + + # Presence receiver should *not* have received offline state + presence_updates, sync_token = sync_presence( + self, self.presence_receiver_id, sync_token + ) + self.assertEqual(len(presence_updates), 0) + + @override_config({"send_federation": True}) + def test_send_local_online_presence_to_federation(self): + """Tests that send_local_presence_to_users sends local online presence to remote users.""" + # Create a user who will send presence updates + self.presence_sender_id = self.register_user("presence_sender", "monkey") + self.presence_sender_tok = self.login("presence_sender", "monkey") + + # And a room they're a part of + room_id = self.helper.create_room_as( + self.presence_sender_id, + tok=self.presence_sender_tok, + ) + + # Mark them as online + send_presence_update( + self, + self.presence_sender_id, + self.presence_sender_tok, + "online", + "I'm online!", + ) + + # Make up a remote user to send presence to + remote_user_id = "@far_away_person:island" + + # Create a join membership event for the remote user into the room. + # This allows presence information to flow from one user to the other. + self.get_success( + inject_member_event( + self.hs, + room_id, + sender=remote_user_id, + target=remote_user_id, + membership="join", + ) + ) + + # The remote user would have received the existing room members' presence + # when they joined the room. + # + # Thus we reset the mock, and try sending online local user + # presence again + self.hs.get_federation_transport_client().send_transaction.reset_mock() + + # Broadcast local user online presence + self.get_success( + self.module_api.send_local_online_presence_to([remote_user_id]) + ) + + # Check that a presence update was sent as part of a federation transaction + found_update = False + calls = ( + self.hs.get_federation_transport_client().send_transaction.call_args_list + ) + for call in calls: + call_args = call[0] + federation_transaction = call_args[0] # type: Transaction + + # Get the sent EDUs in this transaction + edus = federation_transaction.get_dict()["edus"] + + for edu in edus: + # Make sure we're only checking presence-type EDUs + if edu["edu_type"] != EduTypes.Presence: + continue + + # EDUs can contain multiple presence updates + for presence_update in edu["content"]["push"]: + if presence_update["user_id"] == self.presence_sender_id: + found_update = True + + self.assertTrue(found_update) diff --git a/tests/push/test_email.py b/tests/push/test_email.py index 941cf4242954..e04bc5c9a661 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/push/test_http.py b/tests/push/test_http.py index 60f0820cffbd..ffd75b14914f 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from twisted.internet.defer import Deferred diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index 4a841f5bb844..a52e89e4074b 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Any, Dict + from synapse.api.room_versions import RoomVersions from synapse.events import FrozenEvent from synapse.push import push_rule_evaluator @@ -67,6 +68,170 @@ def test_display_name(self): # A display name with spaces should work fine. self.assertTrue(evaluator.matches(condition, "@user:test", "foo bar")) + def _assert_matches( + self, condition: Dict[str, Any], content: Dict[str, Any], msg=None + ) -> None: + evaluator = self._get_evaluator(content) + self.assertTrue(evaluator.matches(condition, "@user:test", "display_name"), msg) + + def _assert_not_matches( + self, condition: Dict[str, Any], content: Dict[str, Any], msg=None + ) -> None: + evaluator = self._get_evaluator(content) + self.assertFalse( + evaluator.matches(condition, "@user:test", "display_name"), msg + ) + + def test_event_match_body(self): + """Check that event_match conditions on content.body work as expected""" + + # if the key is `content.body`, the pattern matches substrings. + + # non-wildcards should match + condition = { + "kind": "event_match", + "key": "content.body", + "pattern": "foobaz", + } + self._assert_matches( + condition, + {"body": "aaa FoobaZ zzz"}, + "patterns should match and be case-insensitive", + ) + self._assert_not_matches( + condition, + {"body": "aa xFoobaZ yy"}, + "pattern should only match at word boundaries", + ) + self._assert_not_matches( + condition, + {"body": "aa foobazx yy"}, + "pattern should only match at word boundaries", + ) + + # wildcards should match + condition = { + "kind": "event_match", + "key": "content.body", + "pattern": "f?o*baz", + } + + self._assert_matches( + condition, + {"body": "aaa FoobarbaZ zzz"}, + "* should match string and pattern should be case-insensitive", + ) + self._assert_matches( + condition, {"body": "aa foobaz yy"}, "* should match 0 characters" + ) + self._assert_not_matches( + condition, {"body": "aa fobbaz yy"}, "? should not match 0 characters" + ) + self._assert_not_matches( + condition, {"body": "aa fiiobaz yy"}, "? should not match 2 characters" + ) + self._assert_not_matches( + condition, + {"body": "aa xfooxbaz yy"}, + "pattern should only match at word boundaries", + ) + self._assert_not_matches( + condition, + {"body": "aa fooxbazx yy"}, + "pattern should only match at word boundaries", + ) + + # test backslashes + condition = { + "kind": "event_match", + "key": "content.body", + "pattern": r"f\oobaz", + } + self._assert_matches( + condition, + {"body": r"F\oobaz"}, + "backslash should match itself", + ) + condition = { + "kind": "event_match", + "key": "content.body", + "pattern": r"f\?obaz", + } + self._assert_matches( + condition, + {"body": r"F\oobaz"}, + r"? after \ should match any character", + ) + + def test_event_match_non_body(self): + """Check that event_match conditions on other keys work as expected""" + + # if the key is anything other than 'content.body', the pattern must match the + # whole value. + + # non-wildcards should match + condition = { + "kind": "event_match", + "key": "content.value", + "pattern": "foobaz", + } + self._assert_matches( + condition, + {"value": "FoobaZ"}, + "patterns should match and be case-insensitive", + ) + self._assert_not_matches( + condition, + {"value": "xFoobaZ"}, + "pattern should only match at the start/end of the value", + ) + self._assert_not_matches( + condition, + {"value": "FoobaZz"}, + "pattern should only match at the start/end of the value", + ) + + # wildcards should match + condition = { + "kind": "event_match", + "key": "content.value", + "pattern": "f?o*baz", + } + self._assert_matches( + condition, + {"value": "FoobarbaZ"}, + "* should match string and pattern should be case-insensitive", + ) + self._assert_matches( + condition, {"value": "foobaz"}, "* should match 0 characters" + ) + self._assert_not_matches( + condition, {"value": "fobbaz"}, "? should not match 0 characters" + ) + self._assert_not_matches( + condition, {"value": "fiiobaz"}, "? should not match 2 characters" + ) + self._assert_not_matches( + condition, + {"value": "xfooxbaz"}, + "pattern should only match at the start/end of the value", + ) + self._assert_not_matches( + condition, + {"value": "fooxbazx"}, + "pattern should only match at the start/end of the value", + ) + self._assert_not_matches( + condition, + {"value": "x\nfooxbaz"}, + "pattern should not match after a newline", + ) + self._assert_not_matches( + condition, + {"value": "fooxbaz\nx"}, + "pattern should not match before a newline", + ) + def test_no_body(self): """Not having a body shouldn't break the evaluator.""" evaluator = self._get_evaluator({}) diff --git a/tests/replication/__init__.py b/tests/replication/__init__.py index b7df13c9eebc..f43a360a807c 100644 --- a/tests/replication/__init__.py +++ b/tests/replication/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/_base.py b/tests/replication/_base.py index 1d4a59286241..624bd1b92722 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,22 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import Any, Callable, Dict, List, Optional, Tuple, Type +from typing import Any, Callable, Dict, List, Optional, Tuple -from twisted.internet.interfaces import IConsumer, IPullProducer, IReactorTime from twisted.internet.protocol import Protocol -from twisted.internet.task import LoopingCall -from twisted.web.http import HTTPChannel from twisted.web.resource import Resource -from twisted.web.server import Request, Site -from synapse.app.generic_worker import ( - GenericWorkerReplicationHandler, - GenericWorkerServer, -) +from synapse.app.generic_worker import GenericWorkerServer from synapse.http.server import JsonResource from synapse.http.site import SynapseRequest, SynapseSite from synapse.replication.http import ReplicationRestResource +from synapse.replication.tcp.client import ReplicationDataHandler from synapse.replication.tcp.handler import ReplicationCommandHandler from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol from synapse.replication.tcp.resource import ( @@ -36,7 +29,6 @@ ServerReplicationStreamProtocol, ) from synapse.server import HomeServer -from synapse.util import Clock from tests import unittest from tests.server import FakeTransport @@ -157,7 +149,19 @@ def handle_http_replication_attempt(self) -> SynapseRequest: client_protocol = client_factory.buildProtocol(None) # Set up the server side protocol - channel = _PushHTTPChannel(self.reactor, SynapseRequest, self.site) + channel = self.site.buildProtocol(None) + + # hook into the channel's request factory so that we can keep a record + # of the requests + requests: List[SynapseRequest] = [] + real_request_factory = channel.requestFactory + + def request_factory(*args, **kwargs): + request = real_request_factory(*args, **kwargs) + requests.append(request) + return request + + channel.requestFactory = request_factory # Connect client to server and vice versa. client_to_server_transport = FakeTransport( @@ -179,7 +183,10 @@ def handle_http_replication_attempt(self) -> SynapseRequest: server_to_client_transport.loseConnection() client_to_server_transport.loseConnection() - return channel.request + # there should have been exactly one request + self.assertEqual(len(requests), 1) + + return requests[0] def assert_request_is_get_repl_stream_updates( self, request: SynapseRequest, stream_name: str @@ -266,7 +273,7 @@ def create_test_resource(self): return resource def make_worker_hs( - self, worker_app: str, extra_config: dict = {}, **kwargs + self, worker_app: str, extra_config: Optional[dict] = None, **kwargs ) -> HomeServer: """Make a new worker HS instance, correctly connecting replcation stream to the master HS. @@ -283,7 +290,7 @@ def make_worker_hs( config = self._get_worker_hs_config() config["worker_app"] = worker_app - config.update(extra_config) + config.update(extra_config or {}) worker_hs = self.setup_test_homeserver( homeserver_to_use=GenericWorkerServer, @@ -352,6 +359,8 @@ def make_worker_hs( config=worker_hs.config.server.listeners[0], resource=resource, server_version_string="1", + max_request_body_size=4096, + reactor=self.reactor, ) if worker_hs.config.redis.redis_enabled: @@ -389,7 +398,7 @@ def _handle_http_replication_attempt(self, hs, repl_port): client_protocol = client_factory.buildProtocol(None) # Set up the server side protocol - channel = _PushHTTPChannel(self.reactor, SynapseRequest, self._hs_to_site[hs]) + channel = self._hs_to_site[hs].buildProtocol(None) # Connect client to server and vice versa. client_to_server_transport = FakeTransport( @@ -432,7 +441,7 @@ def connect_any_redis_attempts(self): server_protocol.makeConnection(server_to_client_transport) -class TestReplicationDataHandler(GenericWorkerReplicationHandler): +class TestReplicationDataHandler(ReplicationDataHandler): """Drop-in for ReplicationDataHandler which just collects RDATA rows""" def __init__(self, hs: HomeServer): @@ -447,112 +456,6 @@ async def on_rdata(self, stream_name, instance_name, token, rows): self.received_rdata_rows.append((stream_name, token, r)) -class _PushHTTPChannel(HTTPChannel): - """A HTTPChannel that wraps pull producers to push producers. - - This is a hack to get around the fact that HTTPChannel transparently wraps a - pull producer (which is what Synapse uses to reply to requests) with - `_PullToPush` to convert it to a push producer. Unfortunately `_PullToPush` - uses the standard reactor rather than letting us use our test reactor, which - makes it very hard to test. - """ - - def __init__( - self, reactor: IReactorTime, request_factory: Type[Request], site: Site - ): - super().__init__() - self.reactor = reactor - self.requestFactory = request_factory - self.site = site - - self._pull_to_push_producer = None # type: Optional[_PullToPushProducer] - - def registerProducer(self, producer, streaming): - # Convert pull producers to push producer. - if not streaming: - self._pull_to_push_producer = _PullToPushProducer( - self.reactor, producer, self - ) - producer = self._pull_to_push_producer - - super().registerProducer(producer, True) - - def unregisterProducer(self): - if self._pull_to_push_producer: - # We need to manually stop the _PullToPushProducer. - self._pull_to_push_producer.stop() - - def checkPersistence(self, request, version): - """Check whether the connection can be re-used""" - # We hijack this to always say no for ease of wiring stuff up in - # `handle_http_replication_attempt`. - request.responseHeaders.setRawHeaders(b"connection", [b"close"]) - return False - - def requestDone(self, request): - # Store the request for inspection. - self.request = request - super().requestDone(request) - - -class _PullToPushProducer: - """A push producer that wraps a pull producer.""" - - def __init__( - self, reactor: IReactorTime, producer: IPullProducer, consumer: IConsumer - ): - self._clock = Clock(reactor) - self._producer = producer - self._consumer = consumer - - # While running we use a looping call with a zero delay to call - # resumeProducing on given producer. - self._looping_call = None # type: Optional[LoopingCall] - - # We start writing next reactor tick. - self._start_loop() - - def _start_loop(self): - """Start the looping call to""" - - if not self._looping_call: - # Start a looping call which runs every tick. - self._looping_call = self._clock.looping_call(self._run_once, 0) - - def stop(self): - """Stops calling resumeProducing.""" - if self._looping_call: - self._looping_call.stop() - self._looping_call = None - - def pauseProducing(self): - """Implements IPushProducer""" - self.stop() - - def resumeProducing(self): - """Implements IPushProducer""" - self._start_loop() - - def stopProducing(self): - """Implements IPushProducer""" - self.stop() - self._producer.stopProducing() - - def _run_once(self): - """Calls resumeProducing on producer once.""" - - try: - self._producer.resumeProducing() - except Exception: - logger.exception("Failed to call resumeProducing") - try: - self._consumer.unregisterProducer() - except Exception: - pass - - self.stopProducing() - - class FakeRedisPubSubServer: """A fake Redis server for pub/sub.""" diff --git a/tests/replication/slave/__init__.py b/tests/replication/slave/__init__.py index b7df13c9eebc..f43a360a807c 100644 --- a/tests/replication/slave/__init__.py +++ b/tests/replication/slave/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/slave/storage/__init__.py b/tests/replication/slave/storage/__init__.py index b7df13c9eebc..f43a360a807c 100644 --- a/tests/replication/slave/storage/__init__.py +++ b/tests/replication/slave/storage/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/slave/storage/_base.py b/tests/replication/slave/storage/_base.py index 56497b8476ee..83e89383f64f 100644 --- a/tests/replication/slave/storage/_base.py +++ b/tests/replication/slave/storage/_base.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from tests.replication._base import BaseStreamTestCase diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index 0ceb0f935cd4..db80a0bdbdaf 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import Iterable, Optional from canonicaljson import encode_canonical_json @@ -332,15 +333,18 @@ def build_event( room_id=ROOM_ID, type="m.room.message", key=None, - internal={}, + internal: Optional[dict] = None, depth=None, - prev_events=[], - auth_events=[], - prev_state=[], + prev_events: Optional[list] = None, + auth_events: Optional[list] = None, + prev_state: Optional[list] = None, redacts=None, - push_actions=[], - **content + push_actions: Iterable = frozenset(), + **content, ): + prev_events = prev_events or [] + auth_events = auth_events or [] + prev_state = prev_state or [] if depth is None: depth = self.event_id @@ -369,7 +373,7 @@ def build_event( if redacts is not None: event_dict["redacts"] = redacts - event = make_event_from_dict(event_dict, internal_metadata_dict=internal) + event = make_event_from_dict(event_dict, internal_metadata_dict=internal or {}) self.event_id += 1 state_handler = self.hs.get_state_handler() diff --git a/tests/replication/tcp/__init__.py b/tests/replication/tcp/__init__.py index 1453d045718f..743fb9904a8f 100644 --- a/tests/replication/tcp/__init__.py +++ b/tests/replication/tcp/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/tcp/streams/__init__.py b/tests/replication/tcp/streams/__init__.py index 1453d045718f..743fb9904a8f 100644 --- a/tests/replication/tcp/streams/__init__.py +++ b/tests/replication/tcp/streams/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/tcp/streams/test_account_data.py b/tests/replication/tcp/streams/test_account_data.py index 153634d4eeaf..cdd052001b6d 100644 --- a/tests/replication/tcp/streams/test_account_data.py +++ b/tests/replication/tcp/streams/test_account_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/tcp/streams/test_events.py b/tests/replication/tcp/streams/test_events.py index 77856fc30445..f51fa0a79e9e 100644 --- a/tests/replication/tcp/streams/test_events.py +++ b/tests/replication/tcp/streams/test_events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -240,7 +239,7 @@ def test_update_function_huge_state_change(self): # the state rows are unsorted state_rows = [] # type: List[EventsStreamCurrentStateRow] - for stream_name, token, row in received_rows: + for stream_name, _, row in received_rows: self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "state") @@ -357,7 +356,7 @@ def test_update_function_state_row_limit(self): # the state rows are unsorted state_rows = [] # type: List[EventsStreamCurrentStateRow] - for j in range(STATES_PER_USER + 1): + for _ in range(STATES_PER_USER + 1): stream_name, token, row = received_rows.pop(0) self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) diff --git a/tests/replication/tcp/streams/test_federation.py b/tests/replication/tcp/streams/test_federation.py index aa4bf1c7e3b0..ffec06a0d653 100644 --- a/tests/replication/tcp/streams/test_federation.py +++ b/tests/replication/tcp/streams/test_federation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/tcp/streams/test_receipts.py b/tests/replication/tcp/streams/test_receipts.py index 56b062ecc1d6..7f5d932f0bb2 100644 --- a/tests/replication/tcp/streams/test_receipts.py +++ b/tests/replication/tcp/streams/test_receipts.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +14,7 @@ # type: ignore -from mock import Mock +from unittest.mock import Mock from synapse.replication.tcp.streams._base import ReceiptsStream diff --git a/tests/replication/tcp/streams/test_typing.py b/tests/replication/tcp/streams/test_typing.py index ca49d4dd3af2..ecd360c2d068 100644 --- a/tests/replication/tcp/streams/test_typing.py +++ b/tests/replication/tcp/streams/test_typing.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from synapse.handlers.typing import RoomMember from synapse.replication.tcp.streams import TypingStream diff --git a/tests/replication/tcp/test_commands.py b/tests/replication/tcp/test_commands.py index 60c10a441a87..cca7ebb7195d 100644 --- a/tests/replication/tcp/test_commands.py +++ b/tests/replication/tcp/test_commands.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/tcp/test_remote_server_up.py b/tests/replication/tcp/test_remote_server_up.py index 1fe9d5b4d076..262c35cef3c6 100644 --- a/tests/replication/tcp/test_remote_server_up.py +++ b/tests/replication/tcp/test_remote_server_up.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/test_auth.py b/tests/replication/test_auth.py index f8fd8a843c82..1346e0e160a4 100644 --- a/tests/replication/test_auth.py +++ b/tests/replication/test_auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/test_client_reader_shard.py b/tests/replication/test_client_reader_shard.py index 5da1d5dc4d8b..b9751efdc53b 100644 --- a/tests/replication/test_client_reader_shard.py +++ b/tests/replication/test_client_reader_shard.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/test_federation_ack.py b/tests/replication/test_federation_ack.py index 0d9e3bb11dba..04a869e29549 100644 --- a/tests/replication/test_federation_ack.py +++ b/tests/replication/test_federation_ack.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock from synapse.app.generic_worker import GenericWorkerServer from synapse.replication.tcp.commands import FederationAckCommand diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py index 2f2d117858f0..48ab3aa4e36b 100644 --- a/tests/replication/test_federation_sender_shard.py +++ b/tests/replication/test_federation_sender_shard.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging - -from mock import Mock +from unittest.mock import Mock from synapse.api.constants import EventTypes, Membership from synapse.events.builder import EventBuilderFactory diff --git a/tests/replication/test_multi_media_repo.py b/tests/replication/test_multi_media_repo.py index b0800f98408f..76e6644353d0 100644 --- a/tests/replication/test_multi_media_repo.py +++ b/tests/replication/test_multi_media_repo.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/test_pusher_shard.py b/tests/replication/test_pusher_shard.py index ab2988a6ba47..1e4e3821b9df 100644 --- a/tests/replication/test_pusher_shard.py +++ b/tests/replication/test_pusher_shard.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging - -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer diff --git a/tests/replication/test_sharded_event_persister.py b/tests/replication/test_sharded_event_persister.py index c9b773fbd215..d739eb6b17f0 100644 --- a/tests/replication/test_sharded_event_persister.py +++ b/tests/replication/test_sharded_event_persister.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging - -from mock import patch +from unittest.mock import patch from synapse.api.room_versions import RoomVersion from synapse.rest import admin diff --git a/tests/rest/__init__.py b/tests/rest/__init__.py index fe0ac3f8e952..629e2df74a4f 100644 --- a/tests/rest/__init__.py +++ b/tests/rest/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/admin/__init__.py b/tests/rest/admin/__init__.py index 1453d045718f..743fb9904a8f 100644 --- a/tests/rest/admin/__init__.py +++ b/tests/rest/admin/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 057e27372e1e..2f7090e5543b 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,8 +16,7 @@ import os import urllib.parse from binascii import unhexlify - -from mock import Mock +from unittest.mock import Mock from twisted.internet.defer import Deferred diff --git a/tests/rest/admin/test_device.py b/tests/rest/admin/test_device.py index 2a1bcf1760ed..120730b76417 100644 --- a/tests/rest/admin/test_device.py +++ b/tests/rest/admin/test_device.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Dirk Klimpel # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -431,7 +430,7 @@ def test_get_devices(self): """ # Create devices number_devices = 5 - for n in range(number_devices): + for _ in range(number_devices): self.login("user", "pass") # Get devices @@ -548,7 +547,7 @@ def test_delete_devices(self): # Create devices number_devices = 5 - for n in range(number_devices): + for _ in range(number_devices): self.login("user", "pass") # Get devices diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py index e30ffe4fa0c1..29341bc6e98f 100644 --- a/tests/rest/admin/test_event_reports.py +++ b/tests/rest/admin/test_event_reports.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Dirk Klimpel # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -49,22 +48,22 @@ def prepare(self, reactor, clock, hs): self.helper.join(self.room_id2, user=self.admin_user, tok=self.admin_user_tok) # Two rooms and two users. Every user sends and reports every room event - for i in range(5): + for _ in range(5): self._create_event_and_report( room_id=self.room_id1, user_tok=self.other_user_tok, ) - for i in range(5): + for _ in range(5): self._create_event_and_report( room_id=self.room_id2, user_tok=self.other_user_tok, ) - for i in range(5): + for _ in range(5): self._create_event_and_report( room_id=self.room_id1, user_tok=self.admin_user_tok, ) - for i in range(5): + for _ in range(5): self._create_event_and_report( room_id=self.room_id2, user_tok=self.admin_user_tok, diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index 31db472cd32b..ac7b21970033 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Dirk Klimpel # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index b55160b70afa..ee071c2477b4 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Dirk Klimpel # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,8 +15,9 @@ import json import urllib.parse from typing import List, Optional +from unittest.mock import Mock -from mock import Mock +from parameterized import parameterized_class import synapse.rest.admin from synapse.api.constants import EventTypes, Membership @@ -146,6 +146,13 @@ def _assert_peek(self, room_id, expect_code): ) +@parameterized_class( + ("method", "url_template"), + [ + ("POST", "/_synapse/admin/v1/rooms/%s/delete"), + ("DELETE", "/_synapse/admin/v1/rooms/%s"), + ], +) class DeleteRoomTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets, @@ -177,7 +184,7 @@ def prepare(self, reactor, clock, hs): self.room_id = self.helper.create_room_as( self.other_user, tok=self.other_user_tok ) - self.url = "/_synapse/admin/v1/rooms/%s/delete" % self.room_id + self.url = self.url_template % self.room_id def test_requester_is_no_admin(self): """ @@ -185,7 +192,7 @@ def test_requester_is_no_admin(self): """ channel = self.make_request( - "POST", + self.method, self.url, json.dumps({}), access_token=self.other_user_tok, @@ -198,10 +205,10 @@ def test_room_does_not_exist(self): """ Check that unknown rooms/server return error 404. """ - url = "/_synapse/admin/v1/rooms/!unknown:test/delete" + url = self.url_template % "!unknown:test" channel = self.make_request( - "POST", + self.method, url, json.dumps({}), access_token=self.admin_user_tok, @@ -214,10 +221,10 @@ def test_room_is_not_valid(self): """ Check that invalid room names, return an error 400. """ - url = "/_synapse/admin/v1/rooms/invalidroom/delete" + url = self.url_template % "invalidroom" channel = self.make_request( - "POST", + self.method, url, json.dumps({}), access_token=self.admin_user_tok, @@ -236,7 +243,7 @@ def test_new_room_user_does_not_exist(self): body = json.dumps({"new_room_user_id": "@unknown:test"}) channel = self.make_request( - "POST", + self.method, self.url, content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, @@ -255,7 +262,7 @@ def test_new_room_user_is_not_local(self): body = json.dumps({"new_room_user_id": "@not:exist.bla"}) channel = self.make_request( - "POST", + self.method, self.url, content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, @@ -274,7 +281,7 @@ def test_block_is_not_bool(self): body = json.dumps({"block": "NotBool"}) channel = self.make_request( - "POST", + self.method, self.url, content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, @@ -290,7 +297,7 @@ def test_purge_is_not_bool(self): body = json.dumps({"purge": "NotBool"}) channel = self.make_request( - "POST", + self.method, self.url, content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, @@ -316,7 +323,7 @@ def test_purge_room_and_block(self): body = json.dumps({"block": True, "purge": True}) channel = self.make_request( - "POST", + self.method, self.url.encode("ascii"), content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, @@ -349,7 +356,7 @@ def test_purge_room_and_not_block(self): body = json.dumps({"block": False, "purge": True}) channel = self.make_request( - "POST", + self.method, self.url.encode("ascii"), content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, @@ -383,7 +390,7 @@ def test_block_room_and_not_purge(self): body = json.dumps({"block": False, "purge": False}) channel = self.make_request( - "POST", + self.method, self.url.encode("ascii"), content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, @@ -428,10 +435,9 @@ def test_shutdown_room_consent(self): self._is_member(room_id=self.room_id, user_id=self.other_user) # Test that the admin can still send shutdown - url = "/_synapse/admin/v1/rooms/%s/delete" % self.room_id channel = self.make_request( - "POST", - url.encode("ascii"), + self.method, + self.url, json.dumps({"new_room_user_id": self.admin_user}), access_token=self.admin_user_tok, ) @@ -475,10 +481,9 @@ def test_shutdown_room_block_peek(self): self._is_member(room_id=self.room_id, user_id=self.other_user) # Test that the admin can still send shutdown - url = "/_synapse/admin/v1/rooms/%s/delete" % self.room_id channel = self.make_request( - "POST", - url.encode("ascii"), + self.method, + self.url, json.dumps({"new_room_user_id": self.admin_user}), access_token=self.admin_user_tok, ) @@ -617,7 +622,7 @@ def test_list_rooms(self): # Create 3 test rooms total_rooms = 3 room_ids = [] - for x in range(total_rooms): + for _ in range(total_rooms): room_id = self.helper.create_room_as( self.admin_user, tok=self.admin_user_tok ) @@ -681,7 +686,7 @@ def test_list_rooms_pagination(self): # Create 5 test rooms total_rooms = 5 room_ids = [] - for x in range(total_rooms): + for _ in range(total_rooms): room_id = self.helper.create_room_as( self.admin_user, tok=self.admin_user_tok ) @@ -1579,7 +1584,7 @@ def test_context_as_admin(self): channel.json_body["event"]["event_id"], events[midway]["event_id"] ) - for i, found_event in enumerate(channel.json_body["events_before"]): + for found_event in channel.json_body["events_before"]: for j, posted_event in enumerate(events): if found_event["event_id"] == posted_event["event_id"]: self.assertTrue(j < midway) @@ -1587,7 +1592,7 @@ def test_context_as_admin(self): else: self.fail("Event %s from events_before not found" % j) - for i, found_event in enumerate(channel.json_body["events_after"]): + for found_event in channel.json_body["events_after"]: for j, posted_event in enumerate(events): if found_event["event_id"] == posted_event["event_id"]: self.assertTrue(j > midway) diff --git a/tests/rest/admin/test_statistics.py b/tests/rest/admin/test_statistics.py index 1f1d11f527d5..79cac4266bf1 100644 --- a/tests/rest/admin/test_statistics.py +++ b/tests/rest/admin/test_statistics.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Dirk Klimpel # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -468,7 +467,7 @@ def _create_media(self, user_token: str, number_media: int): number_media: Number of media to be created for the user """ upload_resource = self.media_repo.children[b"upload"] - for i in range(number_media): + for _ in range(number_media): # file size is 67 Byte image_data = unhexlify( b"89504e470d0a1a0a0000000d4948445200000001000000010806" diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index cf61f284cb98..d599a4c984d9 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,8 +18,7 @@ import urllib.parse from binascii import unhexlify from typing import List, Optional - -from mock import Mock +from unittest.mock import Mock, patch import synapse.rest.admin from synapse.api.constants import UserTypes @@ -28,7 +26,7 @@ from synapse.api.room_versions import RoomVersions from synapse.rest.client.v1 import login, logout, profile, room from synapse.rest.client.v2_alpha import devices, sync -from synapse.types import JsonDict +from synapse.types import JsonDict, UserID from tests import unittest from tests.server import FakeSite, make_request @@ -56,8 +54,6 @@ def make_homeserver(self, reactor, clock): self.datastore = Mock(return_value=Mock()) self.datastore.get_current_state_deltas = Mock(return_value=(0, [])) - self.secrets = Mock() - self.hs = self.setup_test_homeserver() self.hs.config.registration_shared_secret = "shared" @@ -86,14 +82,13 @@ def test_get_nonce(self): Calling GET on the endpoint will return a randomised nonce, using the homeserver's secrets provider. """ - secrets = Mock() - secrets.token_hex = Mock(return_value="abcd") - - self.hs.get_secrets = Mock(return_value=secrets) + with patch("secrets.token_hex") as token_hex: + # Patch secrets.token_hex for the duration of this context + token_hex.return_value = "abcd" - channel = self.make_request("GET", self.url) + channel = self.make_request("GET", self.url) - self.assertEqual(channel.json_body, {"nonce": "abcd"}) + self.assertEqual(channel.json_body, {"nonce": "abcd"}) def test_expired_nonce(self): """ @@ -467,6 +462,8 @@ class UsersListTestCase(unittest.HomeserverTestCase): url = "/_synapse/admin/v2/users" def prepare(self, reactor, clock, hs): + self.store = hs.get_datastore() + self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") @@ -634,6 +631,26 @@ def test_invalid_parameter(self): self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) + # unkown order_by + channel = self.make_request( + "GET", + self.url + "?order_by=bar", + access_token=self.admin_user_tok, + ) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) + + # invalid search order + channel = self.make_request( + "GET", + self.url + "?dir=bar", + access_token=self.admin_user_tok, + ) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) + def test_limit(self): """ Testing list of users with limit @@ -759,6 +776,103 @@ def test_next_token(self): self.assertEqual(len(channel.json_body["users"]), 1) self.assertNotIn("next_token", channel.json_body) + def test_order_by(self): + """ + Testing order list with parameter `order_by` + """ + + user1 = self.register_user("user1", "pass1", admin=False, displayname="Name Z") + user2 = self.register_user("user2", "pass2", admin=False, displayname="Name Y") + + # Modify user + self.get_success(self.store.set_user_deactivated_status(user1, True)) + self.get_success(self.store.set_shadow_banned(UserID.from_string(user1), True)) + + # Set avatar URL to all users, that no user has a NULL value to avoid + # different sort order between SQlite and PostreSQL + self.get_success(self.store.set_profile_avatar_url("user1", "mxc://url3")) + self.get_success(self.store.set_profile_avatar_url("user2", "mxc://url2")) + self.get_success(self.store.set_profile_avatar_url("admin", "mxc://url1")) + + # order by default (name) + self._order_test([self.admin_user, user1, user2], None) + self._order_test([self.admin_user, user1, user2], None, "f") + self._order_test([user2, user1, self.admin_user], None, "b") + + # order by name + self._order_test([self.admin_user, user1, user2], "name") + self._order_test([self.admin_user, user1, user2], "name", "f") + self._order_test([user2, user1, self.admin_user], "name", "b") + + # order by displayname + self._order_test([user2, user1, self.admin_user], "displayname") + self._order_test([user2, user1, self.admin_user], "displayname", "f") + self._order_test([self.admin_user, user1, user2], "displayname", "b") + + # order by is_guest + # like sort by ascending name, as no guest user here + self._order_test([self.admin_user, user1, user2], "is_guest") + self._order_test([self.admin_user, user1, user2], "is_guest", "f") + self._order_test([self.admin_user, user1, user2], "is_guest", "b") + + # order by admin + self._order_test([user1, user2, self.admin_user], "admin") + self._order_test([user1, user2, self.admin_user], "admin", "f") + self._order_test([self.admin_user, user1, user2], "admin", "b") + + # order by deactivated + self._order_test([self.admin_user, user2, user1], "deactivated") + self._order_test([self.admin_user, user2, user1], "deactivated", "f") + self._order_test([user1, self.admin_user, user2], "deactivated", "b") + + # order by user_type + # like sort by ascending name, as no special user type here + self._order_test([self.admin_user, user1, user2], "user_type") + self._order_test([self.admin_user, user1, user2], "user_type", "f") + self._order_test([self.admin_user, user1, user2], "is_guest", "b") + + # order by shadow_banned + self._order_test([self.admin_user, user2, user1], "shadow_banned") + self._order_test([self.admin_user, user2, user1], "shadow_banned", "f") + self._order_test([user1, self.admin_user, user2], "shadow_banned", "b") + + # order by avatar_url + self._order_test([self.admin_user, user2, user1], "avatar_url") + self._order_test([self.admin_user, user2, user1], "avatar_url", "f") + self._order_test([user1, user2, self.admin_user], "avatar_url", "b") + + def _order_test( + self, + expected_user_list: List[str], + order_by: Optional[str], + dir: Optional[str] = None, + ): + """Request the list of users in a certain order. Assert that order is what + we expect + Args: + expected_user_list: The list of user_id in the order we expect to get + back from the server + order_by: The type of ordering to give the server + dir: The direction of ordering to give the server + """ + + url = self.url + "?deactivated=true&" + if order_by is not None: + url += "order_by=%s&" % (order_by,) + if dir is not None and dir in ("b", "f"): + url += "dir=%s" % (dir,) + channel = self.make_request( + "GET", + url.encode("ascii"), + access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(channel.json_body["total"], len(expected_user_list)) + + returned_order = [row["name"] for row in channel.json_body["users"]] + self.assertEqual(expected_user_list, returned_order) + self._check_fields(channel.json_body["users"]) + def _check_fields(self, content: JsonDict): """Checks that the expected user attributes are present in content Args: @@ -1820,7 +1934,7 @@ def test_get_rooms(self): # Create rooms and join other_user_tok = self.login("user", "pass") number_rooms = 5 - for n in range(number_rooms): + for _ in range(number_rooms): self.helper.create_room_as(self.other_user, tok=other_user_tok) # Get rooms @@ -2400,7 +2514,7 @@ def _create_media_for_user(self, user_token: str, number_media: int): user_token: Access token of the user number_media: Number of media to be created for the user """ - for i in range(number_media): + for _ in range(number_media): # file size is 67 Byte image_data = unhexlify( b"89504e470d0a1a0a0000000d4948445200000001000000010806" @@ -2893,3 +3007,287 @@ def test_success(self): # Ensure the user is shadow-banned (and the cache was cleared). result = self.get_success(self.store.get_user_by_access_token(other_user_token)) self.assertTrue(result.shadow_banned) + + +class RateLimitTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.store = hs.get_datastore() + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.other_user = self.register_user("user", "pass") + self.url = ( + "/_synapse/admin/v1/users/%s/override_ratelimit" + % urllib.parse.quote(self.other_user) + ) + + def test_no_auth(self): + """ + Try to get information of a user without authentication. + """ + channel = self.make_request("GET", self.url, b"{}") + + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + channel = self.make_request("POST", self.url, b"{}") + + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + channel = self.make_request("DELETE", self.url, b"{}") + + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + def test_requester_is_no_admin(self): + """ + If the user is not a server admin, an error is returned. + """ + other_user_token = self.login("user", "pass") + + channel = self.make_request( + "GET", + self.url, + access_token=other_user_token, + ) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + channel = self.make_request( + "POST", + self.url, + access_token=other_user_token, + ) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + channel = self.make_request( + "DELETE", + self.url, + access_token=other_user_token, + ) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_user_does_not_exist(self): + """ + Tests that a lookup for a user that does not exist returns a 404 + """ + url = "/_synapse/admin/v1/users/@unknown_person:test/override_ratelimit" + + channel = self.make_request( + "GET", + url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(404, channel.code, msg=channel.json_body) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + + channel = self.make_request( + "POST", + url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(404, channel.code, msg=channel.json_body) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + + channel = self.make_request( + "DELETE", + url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(404, channel.code, msg=channel.json_body) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + + def test_user_is_not_local(self): + """ + Tests that a lookup for a user that is not a local returns a 400 + """ + url = ( + "/_synapse/admin/v1/users/@unknown_person:unknown_domain/override_ratelimit" + ) + + channel = self.make_request( + "GET", + url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual("Can only lookup local users", channel.json_body["error"]) + + channel = self.make_request( + "POST", + url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual( + "Only local users can be ratelimited", channel.json_body["error"] + ) + + channel = self.make_request( + "DELETE", + url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual( + "Only local users can be ratelimited", channel.json_body["error"] + ) + + def test_invalid_parameter(self): + """ + If parameters are invalid, an error is returned. + """ + # messages_per_second is a string + channel = self.make_request( + "POST", + self.url, + access_token=self.admin_user_tok, + content={"messages_per_second": "string"}, + ) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + # messages_per_second is negative + channel = self.make_request( + "POST", + self.url, + access_token=self.admin_user_tok, + content={"messages_per_second": -1}, + ) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + # burst_count is a string + channel = self.make_request( + "POST", + self.url, + access_token=self.admin_user_tok, + content={"burst_count": "string"}, + ) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + # burst_count is negative + channel = self.make_request( + "POST", + self.url, + access_token=self.admin_user_tok, + content={"burst_count": -1}, + ) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + def test_return_zero_when_null(self): + """ + If values in database are `null` API should return an int `0` + """ + + self.get_success( + self.store.db_pool.simple_upsert( + table="ratelimit_override", + keyvalues={"user_id": self.other_user}, + values={ + "messages_per_second": None, + "burst_count": None, + }, + ) + ) + + # request status + channel = self.make_request( + "GET", + self.url, + access_token=self.admin_user_tok, + ) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(0, channel.json_body["messages_per_second"]) + self.assertEqual(0, channel.json_body["burst_count"]) + + def test_success(self): + """ + Rate-limiting (set/update/delete) should succeed for an admin. + """ + # request status + channel = self.make_request( + "GET", + self.url, + access_token=self.admin_user_tok, + ) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertNotIn("messages_per_second", channel.json_body) + self.assertNotIn("burst_count", channel.json_body) + + # set ratelimit + channel = self.make_request( + "POST", + self.url, + access_token=self.admin_user_tok, + content={"messages_per_second": 10, "burst_count": 11}, + ) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(10, channel.json_body["messages_per_second"]) + self.assertEqual(11, channel.json_body["burst_count"]) + + # update ratelimit + channel = self.make_request( + "POST", + self.url, + access_token=self.admin_user_tok, + content={"messages_per_second": 20, "burst_count": 21}, + ) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(20, channel.json_body["messages_per_second"]) + self.assertEqual(21, channel.json_body["burst_count"]) + + # request status + channel = self.make_request( + "GET", + self.url, + access_token=self.admin_user_tok, + ) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(20, channel.json_body["messages_per_second"]) + self.assertEqual(21, channel.json_body["burst_count"]) + + # delete ratelimit + channel = self.make_request( + "DELETE", + self.url, + access_token=self.admin_user_tok, + ) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertNotIn("messages_per_second", channel.json_body) + self.assertNotIn("burst_count", channel.json_body) + + # request status + channel = self.make_request( + "GET", + self.url, + access_token=self.admin_user_tok, + ) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertNotIn("messages_per_second", channel.json_body) + self.assertNotIn("burst_count", channel.json_body) diff --git a/tests/rest/client/__init__.py b/tests/rest/client/__init__.py index fe0ac3f8e952..629e2df74a4f 100644 --- a/tests/rest/client/__init__.py +++ b/tests/rest/client/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/test_consent.py b/tests/rest/client/test_consent.py index c74693e9b288..5cc62a910a43 100644 --- a/tests/rest/client/test_consent.py +++ b/tests/rest/client/test_consent.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/test_ephemeral_message.py b/tests/rest/client/test_ephemeral_message.py index 56937dcd2ee4..eec0fc01f938 100644 --- a/tests/rest/client/test_ephemeral_message.py +++ b/tests/rest/client/test_ephemeral_message.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/test_identity.py b/tests/rest/client/test_identity.py index c0a9fc6925d9..478296ba0efa 100644 --- a/tests/rest/client/test_identity.py +++ b/tests/rest/client/test_identity.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/test_power_levels.py b/tests/rest/client/test_power_levels.py index 5256c11fe672..ba5ad47df5a0 100644 --- a/tests/rest/client/test_power_levels.py +++ b/tests/rest/client/test_power_levels.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/test_redactions.py b/tests/rest/client/test_redactions.py index e0c74591b643..dfd85221d01c 100644 --- a/tests/rest/client/test_redactions.py +++ b/tests/rest/client/test_redactions.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index aee99bb6a0aa..e1a6e73e17be 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from synapse.api.constants import EventTypes from synapse.rest import admin diff --git a/tests/rest/client/test_shadow_banned.py b/tests/rest/client/test_shadow_banned.py index d2cce44032fa..288ee128886b 100644 --- a/tests/rest/client/test_shadow_banned.py +++ b/tests/rest/client/test_shadow_banned.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock, patch +from unittest.mock import Mock, patch import synapse.rest.admin from synapse.api.constants import EventTypes diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index bf3901427707..e1fe72fc5d4c 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the 'License'); @@ -14,8 +13,7 @@ # limitations under the License. import threading from typing import Dict - -from mock import Mock +from unittest.mock import Mock from synapse.events import EventBase from synapse.module_api import ModuleApi diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index 171632e195e2..3b5747cb12b8 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -1,4 +1,4 @@ -from mock import Mock, call +from unittest.mock import Mock, call from twisted.internet import defer, reactor diff --git a/tests/rest/client/v1/__init__.py b/tests/rest/client/v1/__init__.py index bfebb0f644f4..5e83dba2ed6f 100644 --- a/tests/rest/client/v1/__init__.py +++ b/tests/rest/client/v1/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/v1/test_directory.py b/tests/rest/client/v1/test_directory.py index edd1d184f871..8ed470490b4a 100644 --- a/tests/rest/client/v1/test_directory.py +++ b/tests/rest/client/v1/test_directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/v1/test_events.py b/tests/rest/client/v1/test_events.py index 2ae896db1ec9..852bda408c7f 100644 --- a/tests/rest/client/v1/test_events.py +++ b/tests/rest/client/v1/test_events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +14,7 @@ """ Tests REST events for /events paths.""" -from mock import Mock +from unittest.mock import Mock import synapse.rest.admin from synapse.rest.client.v1 import events, login, room diff --git a/tests/rest/client/v1/test_login.py b/tests/rest/client/v1/test_login.py index 988821b16f4d..605b9523162e 100644 --- a/tests/rest/client/v1/test_login.py +++ b/tests/rest/client/v1/test_login.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,10 +15,9 @@ import time import urllib.parse from typing import Any, Dict, List, Optional, Union +from unittest.mock import Mock from urllib.parse import urlencode -from mock import Mock - import pymacaroons from twisted.web.resource import Resource diff --git a/tests/rest/client/v1/test_presence.py b/tests/rest/client/v1/test_presence.py index 94a5154834ec..409f3949dc09 100644 --- a/tests/rest/client/v1/test_presence.py +++ b/tests/rest/client/v1/test_presence.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,10 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer +from synapse.handlers.presence import PresenceHandler from synapse.rest.client.v1 import presence from synapse.types import UserID @@ -33,7 +33,7 @@ class PresenceTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): - presence_handler = Mock() + presence_handler = Mock(spec=PresenceHandler) presence_handler.set_state.return_value = defer.succeed(None) hs = self.setup_test_homeserver( @@ -60,12 +60,12 @@ def test_put_presence(self): self.assertEqual(channel.code, 200) self.assertEqual(self.hs.get_presence_handler().set_state.call_count, 1) + @unittest.override_config({"use_presence": False}) def test_put_presence_disabled(self): """ PUT to the status endpoint with use_presence disabled will NOT call set_state on the presence handler. """ - self.hs.config.use_presence = False body = {"presence": "here", "status_msg": "beep boop"} channel = self.make_request( diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index f3448c94dd9d..165ad33fb740 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/v1/test_push_rule_attrs.py b/tests/rest/client/v1/test_push_rule_attrs.py index 2bc512d75e7b..d0776160824c 100644 --- a/tests/rest/client/v1/test_push_rule_attrs.py +++ b/tests/rest/client/v1/test_push_rule_attrs.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index ed65f645fc2c..7c4bdcdfdd45 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2018-2019 New Vector Ltd @@ -19,10 +18,10 @@ """Tests REST events for /rooms paths.""" import json +from typing import Iterable +from unittest.mock import Mock from urllib import parse as urlparse -from mock import Mock - import synapse.rest.admin from synapse.api.constants import EventContentFields, EventTypes, Membership from synapse.handlers.pagination import PurgeStatus @@ -207,7 +206,9 @@ def test_topic_perms(self): ) self.assertEquals(403, channel.code, msg=channel.result["body"]) - def _test_get_membership(self, room=None, members=[], expect_code=None): + def _test_get_membership( + self, room=None, members: Iterable = frozenset(), expect_code=None + ): for member in members: path = "/rooms/%s/state/m.room.member/%s" % (room, member) channel = self.make_request("GET", path) @@ -462,6 +463,43 @@ def test_post_room_invitees_invalid_mxid(self): ) self.assertEquals(400, channel.code) + @unittest.override_config({"rc_invites": {"per_room": {"burst_count": 3}}}) + def test_post_room_invitees_ratelimit(self): + """Test that invites sent when creating a room are ratelimited by a RateLimiter, + which ratelimits them correctly, including by not limiting when the requester is + exempt from ratelimiting. + """ + + # Build the request's content. We use local MXIDs because invites over federation + # are more difficult to mock. + content = json.dumps( + { + "invite": [ + "@alice1:red", + "@alice2:red", + "@alice3:red", + "@alice4:red", + ] + } + ).encode("utf8") + + # Test that the invites are correctly ratelimited. + channel = self.make_request("POST", "/createRoom", content) + self.assertEqual(400, channel.code) + self.assertEqual( + "Cannot invite so many users at once", + channel.json_body["error"], + ) + + # Add the current user to the ratelimit overrides, allowing them no ratelimiting. + self.get_success( + self.hs.get_datastore().set_ratelimit_for_user(self.user_id, 0, 0) + ) + + # Test that the invites aren't ratelimited anymore. + channel = self.make_request("POST", "/createRoom", content) + self.assertEqual(200, channel.code) + class RoomTopicTestCase(RoomBase): """ Tests /rooms/$room_id/topic REST events. """ @@ -645,7 +683,7 @@ def test_invites_by_rooms_ratelimit(self): def test_invites_by_users_ratelimit(self): """Tests that invites to a specific user are actually rate-limited.""" - for i in range(3): + for _ in range(3): room_id = self.helper.create_room_as(self.user_id) self.helper.invite(room_id, self.user_id, "@other-users:red") @@ -667,7 +705,7 @@ class RoomJoinRatelimitTestCase(RoomBase): ) def test_join_local_ratelimit(self): """Tests that local joins are actually rate-limited.""" - for i in range(3): + for _ in range(3): self.helper.create_room_as(self.user_id) self.helper.create_room_as(self.user_id, expect_code=429) @@ -732,7 +770,7 @@ def test_join_local_ratelimit_idempotent(self): for path in paths_to_test: # Make sure we send more requests than the rate-limiting config would allow # if all of these requests ended up joining the user to a room. - for i in range(4): + for _ in range(4): channel = self.make_request("POST", path % room_id, {}) self.assertEquals(channel.code, 200) diff --git a/tests/rest/client/v1/test_typing.py b/tests/rest/client/v1/test_typing.py index 329dbd06def2..0aad48a162b4 100644 --- a/tests/rest/client/v1/test_typing.py +++ b/tests/rest/client/v1/test_typing.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector # @@ -16,7 +15,7 @@ """Tests REST events for /rooms paths.""" -from mock import Mock +from unittest.mock import Mock from synapse.rest.client.v1 import room from synapse.types import UserID diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py index 946740aa5d51..ed55a640afd2 100644 --- a/tests/rest/client/v1/utils.py +++ b/tests/rest/client/v1/utils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2018-2019 New Vector Ltd @@ -21,8 +20,7 @@ import time import urllib.parse from typing import Any, Dict, Mapping, MutableMapping, Optional - -from mock import patch +from unittest.mock import patch import attr @@ -132,7 +130,7 @@ def change_membership( src: str, targ: str, membership: str, - extra_data: dict = {}, + extra_data: Optional[dict] = None, tok: Optional[str] = None, expect_code: int = 200, ) -> None: @@ -156,7 +154,7 @@ def change_membership( path = path + "?access_token=%s" % tok data = {"membership": membership} - data.update(extra_data) + data.update(extra_data or {}) channel = make_request( self.hs.get_reactor(), @@ -187,7 +185,13 @@ def send(self, room_id, body=None, txn_id=None, tok=None, expect_code=200): ) def send_event( - self, room_id, type, content={}, txn_id=None, tok=None, expect_code=200 + self, + room_id, + type, + content: Optional[dict] = None, + txn_id=None, + tok=None, + expect_code=200, ): if txn_id is None: txn_id = "m%s" % (str(time.time())) @@ -201,7 +205,7 @@ def send_event( self.site, "PUT", path, - json.dumps(content).encode("utf8"), + json.dumps(content or {}).encode("utf8"), ) assert ( diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py index e72b61963d11..4ef19145d1b9 100644 --- a/tests/rest/client/v2_alpha/test_account.py +++ b/tests/rest/client/v2_alpha/test_account.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. diff --git a/tests/rest/client/v2_alpha/test_auth.py b/tests/rest/client/v2_alpha/test_auth.py index 9734a2159a1a..485e3650c3f2 100644 --- a/tests/rest/client/v2_alpha/test_auth.py +++ b/tests/rest/client/v2_alpha/test_auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector # Copyright 2020-2021 The Matrix.org Foundation C.I.C # @@ -13,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Union +from typing import Optional, Union from twisted.internet.defer import succeed @@ -74,7 +73,10 @@ def register(self, expected_response: int, body: JsonDict) -> FakeChannel: return channel def recaptcha( - self, session: str, expected_post_response: int, post_session: str = None + self, + session: str, + expected_post_response: int, + post_session: Optional[str] = None, ) -> None: """Get and respond to a fallback recaptcha. Returns the second request.""" if post_session is None: diff --git a/tests/rest/client/v2_alpha/test_capabilities.py b/tests/rest/client/v2_alpha/test_capabilities.py index 287a1a485c93..874052c61ca2 100644 --- a/tests/rest/client/v2_alpha/test_capabilities.py +++ b/tests/rest/client/v2_alpha/test_capabilities.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/v2_alpha/test_filter.py b/tests/rest/client/v2_alpha/test_filter.py index f761c4493616..c7e47725b789 100644 --- a/tests/rest/client/v2_alpha/test_filter.py +++ b/tests/rest/client/v2_alpha/test_filter.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/v2_alpha/test_password_policy.py b/tests/rest/client/v2_alpha/test_password_policy.py index 5ebc5707a5f1..6f07ff6cbbca 100644 --- a/tests/rest/client/v2_alpha/test_password_policy.py +++ b/tests/rest/client/v2_alpha/test_password_policy.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 27db4f551e2e..1cad5f00eb20 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. @@ -14,7 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import datetime import json import os @@ -22,7 +20,7 @@ import pkg_resources import synapse.rest.admin -from synapse.api.constants import LoginType +from synapse.api.constants import APP_SERVICE_REGISTRATION_TYPE, LoginType from synapse.api.errors import Codes from synapse.appservice import ApplicationService from synapse.rest.client.v1 import login, logout @@ -59,7 +57,9 @@ def test_POST_appservice_registration_valid(self): ) self.hs.get_datastore().services_cache.append(appservice) - request_data = json.dumps({"username": "as_user_kermit"}) + request_data = json.dumps( + {"username": "as_user_kermit", "type": APP_SERVICE_REGISTRATION_TYPE} + ) channel = self.make_request( b"POST", self.url + b"?access_token=i_am_an_app_service", request_data @@ -69,9 +69,31 @@ def test_POST_appservice_registration_valid(self): det_data = {"user_id": user_id, "home_server": self.hs.hostname} self.assertDictContainsSubset(det_data, channel.json_body) + def test_POST_appservice_registration_no_type(self): + as_token = "i_am_an_app_service" + + appservice = ApplicationService( + as_token, + self.hs.config.server_name, + id="1234", + namespaces={"users": [{"regex": r"@as_user.*", "exclusive": True}]}, + sender="@as:test", + ) + + self.hs.get_datastore().services_cache.append(appservice) + request_data = json.dumps({"username": "as_user_kermit"}) + + channel = self.make_request( + b"POST", self.url + b"?access_token=i_am_an_app_service", request_data + ) + + self.assertEquals(channel.result["code"], b"400", channel.result) + def test_POST_appservice_registration_invalid(self): self.appservice = None # no application service exists - request_data = json.dumps({"username": "kermit"}) + request_data = json.dumps( + {"username": "kermit", "type": APP_SERVICE_REGISTRATION_TYPE} + ) channel = self.make_request( b"POST", self.url + b"?access_token=i_am_an_app_service", request_data ) @@ -288,6 +310,57 @@ def test_request_token_existing_email_inhibit_error(self): self.assertIsNotNone(channel.json_body.get("sid")) + @unittest.override_config( + { + "public_baseurl": "https://test_server", + "email": { + "smtp_host": "mail_server", + "smtp_port": 2525, + "notif_from": "sender@host", + }, + } + ) + def test_reject_invalid_email(self): + """Check that bad emails are rejected""" + + # Test for email with multiple @ + channel = self.make_request( + "POST", + b"register/email/requestToken", + {"client_secret": "foobar", "email": "email@@email", "send_attempt": 1}, + ) + self.assertEquals(400, channel.code, channel.result) + # Check error to ensure that we're not erroring due to a bug in the test. + self.assertEquals( + channel.json_body, + {"errcode": "M_UNKNOWN", "error": "Unable to parse email address"}, + ) + + # Test for email with no @ + channel = self.make_request( + "POST", + b"register/email/requestToken", + {"client_secret": "foobar", "email": "email", "send_attempt": 1}, + ) + self.assertEquals(400, channel.code, channel.result) + self.assertEquals( + channel.json_body, + {"errcode": "M_UNKNOWN", "error": "Unable to parse email address"}, + ) + + # Test for super long email + email = "a@" + "a" * 1000 + channel = self.make_request( + "POST", + b"register/email/requestToken", + {"client_secret": "foobar", "email": email, "send_attempt": 1}, + ) + self.assertEquals(400, channel.code, channel.result) + self.assertEquals( + channel.json_body, + {"errcode": "M_UNKNOWN", "error": "Unable to parse email address"}, + ) + class AccountValidityTestCase(unittest.HomeserverTestCase): @@ -470,8 +543,8 @@ def test_renewal_email(self): (user_id, tok) = self.create_user() - # Move 6 days forward. This should trigger a renewal email to be sent. - self.reactor.advance(datetime.timedelta(days=6).total_seconds()) + # Move 5 days forward. This should trigger a renewal email to be sent. + self.reactor.advance(datetime.timedelta(days=5).total_seconds()) self.assertEqual(len(self.email_attempts), 1) # Retrieving the URL from the email is too much pain for now, so we @@ -482,14 +555,32 @@ def test_renewal_email(self): self.assertEquals(channel.result["code"], b"200", channel.result) # Check that we're getting HTML back. - content_type = None - for header in channel.result.get("headers", []): - if header[0] == b"Content-Type": - content_type = header[1] - self.assertEqual(content_type, b"text/html; charset=utf-8", channel.result) + content_type = channel.headers.getRawHeaders(b"Content-Type") + self.assertEqual(content_type, [b"text/html; charset=utf-8"], channel.result) # Check that the HTML we're getting is the one we expect on a successful renewal. - expected_html = self.hs.config.account_validity.account_renewed_html_content + expiration_ts = self.get_success(self.store.get_expiration_ts_for_user(user_id)) + expected_html = self.hs.config.account_validity.account_validity_account_renewed_template.render( + expiration_ts=expiration_ts + ) + self.assertEqual( + channel.result["body"], expected_html.encode("utf8"), channel.result + ) + + # Move 1 day forward. Try to renew with the same token again. + url = "/_matrix/client/unstable/account_validity/renew?token=%s" % renewal_token + channel = self.make_request(b"GET", url) + self.assertEquals(channel.result["code"], b"200", channel.result) + + # Check that we're getting HTML back. + content_type = channel.headers.getRawHeaders(b"Content-Type") + self.assertEqual(content_type, [b"text/html; charset=utf-8"], channel.result) + + # Check that the HTML we're getting is the one we expect when reusing a + # token. The account expiration date should not have changed. + expected_html = self.hs.config.account_validity.account_validity_account_previously_renewed_template.render( + expiration_ts=expiration_ts + ) self.assertEqual( channel.result["body"], expected_html.encode("utf8"), channel.result ) @@ -509,15 +600,14 @@ def test_renewal_invalid_token(self): self.assertEquals(channel.result["code"], b"404", channel.result) # Check that we're getting HTML back. - content_type = None - for header in channel.result.get("headers", []): - if header[0] == b"Content-Type": - content_type = header[1] - self.assertEqual(content_type, b"text/html; charset=utf-8", channel.result) + content_type = channel.headers.getRawHeaders(b"Content-Type") + self.assertEqual(content_type, [b"text/html; charset=utf-8"], channel.result) # Check that the HTML we're getting is the one we expect when using an # invalid/unknown token. - expected_html = self.hs.config.account_validity.invalid_token_html_content + expected_html = ( + self.hs.config.account_validity.account_validity_invalid_token_template.render() + ) self.assertEqual( channel.result["body"], expected_html.encode("utf8"), channel.result ) @@ -625,7 +715,12 @@ def make_homeserver(self, reactor, clock): config["account_validity"] = {"enabled": False} self.hs = self.setup_test_homeserver(config=config) - self.hs.config.account_validity.period = self.validity_period + + # We need to set these directly, instead of in the homeserver config dict above. + # This is due to account validity-related config options not being read by + # Synapse when account_validity.enabled is False. + self.hs.get_datastore()._account_validity_period = self.validity_period + self.hs.get_datastore()._account_validity_startup_job_max_delta = self.max_delta self.store = self.hs.get_datastore() diff --git a/tests/rest/client/v2_alpha/test_relations.py b/tests/rest/client/v2_alpha/test_relations.py index e7bb5583fc48..856aa8682f7e 100644 --- a/tests/rest/client/v2_alpha/test_relations.py +++ b/tests/rest/client/v2_alpha/test_relations.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,6 +15,7 @@ import itertools import json import urllib +from typing import Optional from synapse.api.constants import EventTypes, RelationTypes from synapse.rest import admin @@ -681,7 +681,7 @@ def _send_relation( relation_type, event_type, key=None, - content={}, + content: Optional[dict] = None, access_token=None, parent_id=None, ): @@ -713,7 +713,7 @@ def _send_relation( "POST", "/_matrix/client/unstable/rooms/%s/send_relation/%s/%s/%s%s" % (self.room, original_id, relation_type, event_type, query), - json.dumps(content).encode("utf-8"), + json.dumps(content or {}).encode("utf-8"), access_token=access_token, ) return channel diff --git a/tests/rest/client/v2_alpha/test_sendtodevice.py b/tests/rest/client/v2_alpha/test_sendtodevice.py new file mode 100644 index 000000000000..c9c99cc5d7a8 --- /dev/null +++ b/tests/rest/client/v2_alpha/test_sendtodevice.py @@ -0,0 +1,201 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.rest import admin +from synapse.rest.client.v1 import login +from synapse.rest.client.v2_alpha import sendtodevice, sync + +from tests.unittest import HomeserverTestCase, override_config + + +class SendToDeviceTestCase(HomeserverTestCase): + servlets = [ + admin.register_servlets, + login.register_servlets, + sendtodevice.register_servlets, + sync.register_servlets, + ] + + def test_user_to_user(self): + """A to-device message from one user to another should get delivered""" + + user1 = self.register_user("u1", "pass") + user1_tok = self.login("u1", "pass", "d1") + + user2 = self.register_user("u2", "pass") + user2_tok = self.login("u2", "pass", "d2") + + # send the message + test_msg = {"foo": "bar"} + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.test/1234", + content={"messages": {user2: {"d2": test_msg}}}, + access_token=user1_tok, + ) + self.assertEqual(chan.code, 200, chan.result) + + # check it appears + channel = self.make_request("GET", "/sync", access_token=user2_tok) + self.assertEqual(channel.code, 200, channel.result) + expected_result = { + "events": [ + { + "sender": user1, + "type": "m.test", + "content": test_msg, + } + ] + } + self.assertEqual(channel.json_body["to_device"], expected_result) + + # it should re-appear if we do another sync + channel = self.make_request("GET", "/sync", access_token=user2_tok) + self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.json_body["to_device"], expected_result) + + # it should *not* appear if we do an incremental sync + sync_token = channel.json_body["next_batch"] + channel = self.make_request( + "GET", f"/sync?since={sync_token}", access_token=user2_tok + ) + self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.json_body.get("to_device", {}).get("events", []), []) + + @override_config({"rc_key_requests": {"per_second": 10, "burst_count": 2}}) + def test_local_room_key_request(self): + """m.room_key_request has special-casing; test from local user""" + user1 = self.register_user("u1", "pass") + user1_tok = self.login("u1", "pass", "d1") + + user2 = self.register_user("u2", "pass") + user2_tok = self.login("u2", "pass", "d2") + + # send three messages + for i in range(3): + chan = self.make_request( + "PUT", + f"/_matrix/client/r0/sendToDevice/m.room_key_request/{i}", + content={"messages": {user2: {"d2": {"idx": i}}}}, + access_token=user1_tok, + ) + self.assertEqual(chan.code, 200, chan.result) + + # now sync: we should get two of the three + channel = self.make_request("GET", "/sync", access_token=user2_tok) + self.assertEqual(channel.code, 200, channel.result) + msgs = channel.json_body["to_device"]["events"] + self.assertEqual(len(msgs), 2) + for i in range(2): + self.assertEqual( + msgs[i], + {"sender": user1, "type": "m.room_key_request", "content": {"idx": i}}, + ) + sync_token = channel.json_body["next_batch"] + + # ... time passes + self.reactor.advance(1) + + # and we can send more messages + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.room_key_request/3", + content={"messages": {user2: {"d2": {"idx": 3}}}}, + access_token=user1_tok, + ) + self.assertEqual(chan.code, 200, chan.result) + + # ... which should arrive + channel = self.make_request( + "GET", f"/sync?since={sync_token}", access_token=user2_tok + ) + self.assertEqual(channel.code, 200, channel.result) + msgs = channel.json_body["to_device"]["events"] + self.assertEqual(len(msgs), 1) + self.assertEqual( + msgs[0], + {"sender": user1, "type": "m.room_key_request", "content": {"idx": 3}}, + ) + + @override_config({"rc_key_requests": {"per_second": 10, "burst_count": 2}}) + def test_remote_room_key_request(self): + """m.room_key_request has special-casing; test from remote user""" + user2 = self.register_user("u2", "pass") + user2_tok = self.login("u2", "pass", "d2") + + federation_registry = self.hs.get_federation_registry() + + # send three messages + for i in range(3): + self.get_success( + federation_registry.on_edu( + "m.direct_to_device", + "remote_server", + { + "sender": "@user:remote_server", + "type": "m.room_key_request", + "messages": {user2: {"d2": {"idx": i}}}, + "message_id": f"{i}", + }, + ) + ) + + # now sync: we should get two of the three + channel = self.make_request("GET", "/sync", access_token=user2_tok) + self.assertEqual(channel.code, 200, channel.result) + msgs = channel.json_body["to_device"]["events"] + self.assertEqual(len(msgs), 2) + for i in range(2): + self.assertEqual( + msgs[i], + { + "sender": "@user:remote_server", + "type": "m.room_key_request", + "content": {"idx": i}, + }, + ) + sync_token = channel.json_body["next_batch"] + + # ... time passes + self.reactor.advance(1) + + # and we can send more messages + self.get_success( + federation_registry.on_edu( + "m.direct_to_device", + "remote_server", + { + "sender": "@user:remote_server", + "type": "m.room_key_request", + "messages": {user2: {"d2": {"idx": 3}}}, + "message_id": "3", + }, + ) + ) + + # ... which should arrive + channel = self.make_request( + "GET", f"/sync?since={sync_token}", access_token=user2_tok + ) + self.assertEqual(channel.code, 200, channel.result) + msgs = channel.json_body["to_device"]["events"] + self.assertEqual(len(msgs), 1) + self.assertEqual( + msgs[0], + { + "sender": "@user:remote_server", + "type": "m.room_key_request", + "content": {"idx": 3}, + }, + ) diff --git a/tests/rest/client/v2_alpha/test_shared_rooms.py b/tests/rest/client/v2_alpha/test_shared_rooms.py index dd83a1f8ff77..cedb9614a8ae 100644 --- a/tests/rest/client/v2_alpha/test_shared_rooms.py +++ b/tests/rest/client/v2_alpha/test_shared_rooms.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Half-Shot # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py index 2dbf42397a6b..dbcbdf159a1a 100644 --- a/tests/rest/client/v2_alpha/test_sync.py +++ b/tests/rest/client/v2_alpha/test_sync.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018-2019 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # diff --git a/tests/rest/client/v2_alpha/test_upgrade_room.py b/tests/rest/client/v2_alpha/test_upgrade_room.py index d890d11863a5..5f3f15fc57cd 100644 --- a/tests/rest/client/v2_alpha/test_upgrade_room.py +++ b/tests/rest/client/v2_alpha/test_upgrade_room.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py index 9d0d0ef41466..3b275bc23b47 100644 --- a/tests/rest/key/v2/test_remote_key_resource.py +++ b/tests/rest/key/v2/test_remote_key_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,8 +13,7 @@ # limitations under the License. import urllib.parse from io import BytesIO, StringIO - -from mock import Mock +from unittest.mock import Mock import signedjson.key from canonicaljson import encode_canonical_json diff --git a/tests/rest/media/__init__.py b/tests/rest/media/__init__.py index a354d38ca859..b1ee10cfcc44 100644 --- a/tests/rest/media/__init__.py +++ b/tests/rest/media/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/media/v1/__init__.py b/tests/rest/media/v1/__init__.py index a354d38ca859..b1ee10cfcc44 100644 --- a/tests/rest/media/v1/__init__.py +++ b/tests/rest/media/v1/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/media/v1/test_base.py b/tests/rest/media/v1/test_base.py index ebd78692082f..f761e23f1bf0 100644 --- a/tests/rest/media/v1/test_base.py +++ b/tests/rest/media/v1/test_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index 9f77125fd445..4a213d13ddf9 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,10 +17,9 @@ from binascii import unhexlify from io import BytesIO from typing import Optional +from unittest.mock import Mock from urllib import parse -from mock import Mock - import attr from parameterized import parameterized_class from PIL import Image as Image diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py index 696850243391..d3ef7bb4c637 100644 --- a/tests/rest/media/v1/test_url_preview.py +++ b/tests/rest/media/v1/test_url_preview.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,8 +14,7 @@ import json import os import re - -from mock import patch +from unittest.mock import patch from twisted.internet._resolver import HostResolution from twisted.internet.address import IPv4Address, IPv6Address diff --git a/tests/rest/test_health.py b/tests/rest/test_health.py index 32acd93dc11a..01d48c3860d4 100644 --- a/tests/rest/test_health.py +++ b/tests/rest/test_health.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py index 14de0921be7b..ac0e42775239 100644 --- a/tests/rest/test_well_known.py +++ b/tests/rest/test_well_known.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/scripts/test_new_matrix_user.py b/tests/scripts/test_new_matrix_user.py index 6f56893f5e91..6f3c365c9aad 100644 --- a/tests/scripts/test_new_matrix_user.py +++ b/tests/scripts/test_new_matrix_user.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from synapse._scripts.register_new_matrix_user import request_registration diff --git a/tests/server.py b/tests/server.py index b535a5d886ca..9df8cda24fb8 100644 --- a/tests/server.py +++ b/tests/server.py @@ -603,12 +603,6 @@ def flush(self, maxbytes=None): if self.disconnected: return - if not hasattr(self.other, "transport"): - # the other has no transport yet; reschedule - if self.autoflush: - self._reactor.callLater(0.0, self.flush) - return - if maxbytes is not None: to_write = self.buffer[:maxbytes] else: diff --git a/tests/server_notices/test_consent.py b/tests/server_notices/test_consent.py index 4dd5a361784e..ac98259b7ee6 100644 --- a/tests/server_notices/test_consent.py +++ b/tests/server_notices/test_consent.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index d40d65b06a8b..d46521ccdc0f 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018, 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index 66e3cafe8e9f..43fc79ca746d 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index 1ac4ebc61d4c..200b9198f910 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # @@ -14,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import secrets from tests import unittest @@ -22,7 +22,7 @@ class UpsertManyTests(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): self.storage = hs.get_datastore() - self.table_name = "table_" + hs.get_secrets().token_hex(6) + self.table_name = "table_" + secrets.token_hex(6) self.get_success( self.storage.db_pool.runInteraction( "create", diff --git a/tests/storage/test_account_data.py b/tests/storage/test_account_data.py index 38444e48e295..01af49a16b0f 100644 --- a/tests/storage/test_account_data.py +++ b/tests/storage/test_account_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index 1ce29af5fd9d..666bffe2574f 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,8 +14,7 @@ import json import os import tempfile - -from mock import Mock +from unittest.mock import Mock import yaml diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index 1b4fae0bb555..069db0edc43a 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -1,4 +1,4 @@ -from mock import Mock +from unittest.mock import Mock from synapse.storage.background_updates import BackgroundUpdater diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index eac7e4dcd2fa..3b45a7efd899 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,8 +14,7 @@ from collections import OrderedDict - -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py index 779113868880..77c4fe721c1d 100644 --- a/tests/storage/test_cleanup_extrems.py +++ b/tests/storage/test_cleanup_extrems.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,9 +13,7 @@ # limitations under the License. import os.path -from unittest.mock import patch - -from mock import Mock +from unittest.mock import Mock, patch import synapse.rest.admin from synapse.api.constants import EventTypes @@ -50,10 +47,8 @@ def run_background_update(self): ) schema_path = os.path.join( - prepare_database.dir_path, - "databases", + prepare_database.schema_path, "main", - "schema", "delta", "54", "delete_forward_extremities.sql", diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index 34e65260970e..e57fce9694bf 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # @@ -14,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock import synapse.rest.admin from synapse.http.site import XForwardedForRequest @@ -390,7 +389,7 @@ def test_old_user_ips_pruned(self): class ClientIpAuthTestCase(unittest.HomeserverTestCase): servlets = [ - synapse.rest.admin.register_servlets_for_client_rest_resource, + synapse.rest.admin.register_servlets, login.register_servlets, ] @@ -434,7 +433,7 @@ def _runtest(self, headers, expected_ip, make_request_args): self.reactor, self.site, "GET", - "/_synapse/admin/v1/users/" + self.user_id, + "/_synapse/admin/v2/users/" + self.user_id, access_token=access_token, custom_headers=headers1.items(), **make_request_args, diff --git a/tests/storage/test_database.py b/tests/storage/test_database.py index 5a77c84962d9..6fbac0ab1466 100644 --- a/tests/storage/test_database.py +++ b/tests/storage/test_database.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -36,17 +35,6 @@ def _stub_db_engine(**kwargs) -> BaseDatabaseEngine: class TupleComparisonClauseTestCase(unittest.TestCase): def test_native_tuple_comparison(self): - db_engine = _stub_db_engine(supports_tuple_comparison=True) - clause, args = make_tuple_comparison_clause(db_engine, [("a", 1), ("b", 2)]) + clause, args = make_tuple_comparison_clause([("a", 1), ("b", 2)]) self.assertEqual(clause, "(a,b) > (?,?)") self.assertEqual(args, [1, 2]) - - def test_emulated_tuple_comparison(self): - db_engine = _stub_db_engine(supports_tuple_comparison=False) - clause, args = make_tuple_comparison_clause( - db_engine, [("a", 1), ("b", 2), ("c", 3)] - ) - self.assertEqual( - clause, "(a >= ? AND (a > ? OR (b >= ? AND (b > ? OR c > ?))))" - ) - self.assertEqual(args, [1, 1, 2, 2, 3]) diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py index dabc1c5f0912..6790aa524291 100644 --- a/tests/storage/test_devices.py +++ b/tests/storage/test_devices.py @@ -1,5 +1,4 @@ -# -*- coding: utf-8 -*- -# Copyright 2016 OpenMarket Ltd +# Copyright 2016-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,32 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer - import synapse.api.errors -import tests.unittest -import tests.utils - - -class DeviceStoreTestCase(tests.unittest.TestCase): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.store = None # type: synapse.storage.DataStore +from tests.unittest import HomeserverTestCase - @defer.inlineCallbacks - def setUp(self): - hs = yield tests.utils.setup_test_homeserver(self.addCleanup) +class DeviceStoreTestCase(HomeserverTestCase): + def prepare(self, reactor, clock, hs): self.store = hs.get_datastore() - @defer.inlineCallbacks def test_store_new_device(self): - yield defer.ensureDeferred( + self.get_success( self.store.store_device("user_id", "device_id", "display_name") ) - res = yield defer.ensureDeferred(self.store.get_device("user_id", "device_id")) + res = self.get_success(self.store.get_device("user_id", "device_id")) self.assertDictContainsSubset( { "user_id": "user_id", @@ -48,19 +36,18 @@ def test_store_new_device(self): res, ) - @defer.inlineCallbacks def test_get_devices_by_user(self): - yield defer.ensureDeferred( + self.get_success( self.store.store_device("user_id", "device1", "display_name 1") ) - yield defer.ensureDeferred( + self.get_success( self.store.store_device("user_id", "device2", "display_name 2") ) - yield defer.ensureDeferred( + self.get_success( self.store.store_device("user_id2", "device3", "display_name 3") ) - res = yield defer.ensureDeferred(self.store.get_devices_by_user("user_id")) + res = self.get_success(self.store.get_devices_by_user("user_id")) self.assertEqual(2, len(res.keys())) self.assertDictContainsSubset( { @@ -79,43 +66,41 @@ def test_get_devices_by_user(self): res["device2"], ) - @defer.inlineCallbacks def test_count_devices_by_users(self): - yield defer.ensureDeferred( + self.get_success( self.store.store_device("user_id", "device1", "display_name 1") ) - yield defer.ensureDeferred( + self.get_success( self.store.store_device("user_id", "device2", "display_name 2") ) - yield defer.ensureDeferred( + self.get_success( self.store.store_device("user_id2", "device3", "display_name 3") ) - res = yield defer.ensureDeferred(self.store.count_devices_by_users()) + res = self.get_success(self.store.count_devices_by_users()) self.assertEqual(0, res) - res = yield defer.ensureDeferred(self.store.count_devices_by_users(["unknown"])) + res = self.get_success(self.store.count_devices_by_users(["unknown"])) self.assertEqual(0, res) - res = yield defer.ensureDeferred(self.store.count_devices_by_users(["user_id"])) + res = self.get_success(self.store.count_devices_by_users(["user_id"])) self.assertEqual(2, res) - res = yield defer.ensureDeferred( + res = self.get_success( self.store.count_devices_by_users(["user_id", "user_id2"]) ) self.assertEqual(3, res) - @defer.inlineCallbacks def test_get_device_updates_by_remote(self): device_ids = ["device_id1", "device_id2"] # Add two device updates with a single stream_id - yield defer.ensureDeferred( + self.get_success( self.store.add_device_change_to_streams("user_id", device_ids, ["somehost"]) ) # Get all device updates ever meant for this remote - now_stream_id, device_updates = yield defer.ensureDeferred( + now_stream_id, device_updates = self.get_success( self.store.get_device_updates_by_remote("somehost", -1, limit=100) ) @@ -131,37 +116,35 @@ def _check_devices_in_updates(self, expected_device_ids, device_updates): } self.assertEqual(received_device_ids, set(expected_device_ids)) - @defer.inlineCallbacks def test_update_device(self): - yield defer.ensureDeferred( + self.get_success( self.store.store_device("user_id", "device_id", "display_name 1") ) - res = yield defer.ensureDeferred(self.store.get_device("user_id", "device_id")) + res = self.get_success(self.store.get_device("user_id", "device_id")) self.assertEqual("display_name 1", res["display_name"]) # do a no-op first - yield defer.ensureDeferred(self.store.update_device("user_id", "device_id")) - res = yield defer.ensureDeferred(self.store.get_device("user_id", "device_id")) + self.get_success(self.store.update_device("user_id", "device_id")) + res = self.get_success(self.store.get_device("user_id", "device_id")) self.assertEqual("display_name 1", res["display_name"]) # do the update - yield defer.ensureDeferred( + self.get_success( self.store.update_device( "user_id", "device_id", new_display_name="display_name 2" ) ) # check it worked - res = yield defer.ensureDeferred(self.store.get_device("user_id", "device_id")) + res = self.get_success(self.store.get_device("user_id", "device_id")) self.assertEqual("display_name 2", res["display_name"]) - @defer.inlineCallbacks def test_update_unknown_device(self): - with self.assertRaises(synapse.api.errors.StoreError) as cm: - yield defer.ensureDeferred( - self.store.update_device( - "user_id", "unknown_device_id", new_display_name="display_name 2" - ) - ) - self.assertEqual(404, cm.exception.code) + exc = self.get_failure( + self.store.update_device( + "user_id", "unknown_device_id", new_display_name="display_name 2" + ), + synapse.api.errors.StoreError, + ) + self.assertEqual(404, exc.value.code) diff --git a/tests/storage/test_directory.py b/tests/storage/test_directory.py index da93ca398039..41bef62ca8fc 100644 --- a/tests/storage/test_directory.py +++ b/tests/storage/test_directory.py @@ -1,5 +1,4 @@ -# -*- coding: utf-8 -*- -# Copyright 2014-2016 OpenMarket Ltd +# Copyright 2014-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,28 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. - -from twisted.internet import defer - from synapse.types import RoomAlias, RoomID -from tests import unittest -from tests.utils import setup_test_homeserver +from tests.unittest import HomeserverTestCase -class DirectoryStoreTestCase(unittest.TestCase): - @defer.inlineCallbacks - def setUp(self): - hs = yield setup_test_homeserver(self.addCleanup) - +class DirectoryStoreTestCase(HomeserverTestCase): + def prepare(self, reactor, clock, hs): self.store = hs.get_datastore() self.room = RoomID.from_string("!abcde:test") self.alias = RoomAlias.from_string("#my-room:test") - @defer.inlineCallbacks def test_room_to_alias(self): - yield defer.ensureDeferred( + self.get_success( self.store.create_room_alias_association( room_alias=self.alias, room_id=self.room.to_string(), servers=["test"] ) @@ -42,16 +33,11 @@ def test_room_to_alias(self): self.assertEquals( ["#my-room:test"], - ( - yield defer.ensureDeferred( - self.store.get_aliases_for_room(self.room.to_string()) - ) - ), + (self.get_success(self.store.get_aliases_for_room(self.room.to_string()))), ) - @defer.inlineCallbacks def test_alias_to_room(self): - yield defer.ensureDeferred( + self.get_success( self.store.create_room_alias_association( room_alias=self.alias, room_id=self.room.to_string(), servers=["test"] ) @@ -59,28 +45,19 @@ def test_alias_to_room(self): self.assertObjectHasAttributes( {"room_id": self.room.to_string(), "servers": ["test"]}, - ( - yield defer.ensureDeferred( - self.store.get_association_from_room_alias(self.alias) - ) - ), + (self.get_success(self.store.get_association_from_room_alias(self.alias))), ) - @defer.inlineCallbacks def test_delete_alias(self): - yield defer.ensureDeferred( + self.get_success( self.store.create_room_alias_association( room_alias=self.alias, room_id=self.room.to_string(), servers=["test"] ) ) - room_id = yield defer.ensureDeferred(self.store.delete_room_alias(self.alias)) + room_id = self.get_success(self.store.delete_room_alias(self.alias)) self.assertEqual(self.room.to_string(), room_id) self.assertIsNone( - ( - yield defer.ensureDeferred( - self.store.get_association_from_room_alias(self.alias) - ) - ) + (self.get_success(self.store.get_association_from_room_alias(self.alias))) ) diff --git a/tests/storage/test_e2e_room_keys.py b/tests/storage/test_e2e_room_keys.py index 3d7760d5d9cf..9b6b42542532 100644 --- a/tests/storage/test_e2e_room_keys.py +++ b/tests/storage/test_e2e_room_keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_end_to_end_keys.py b/tests/storage/test_end_to_end_keys.py index 3fc4bb13b64c..3bf6e337f4e1 100644 --- a/tests/storage/test_end_to_end_keys.py +++ b/tests/storage/test_end_to_end_keys.py @@ -1,5 +1,4 @@ -# -*- coding: utf-8 -*- -# Copyright 2016 OpenMarket Ltd +# Copyright 2016-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,30 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer +from tests.unittest import HomeserverTestCase -import tests.unittest -import tests.utils - -class EndToEndKeyStoreTestCase(tests.unittest.TestCase): - @defer.inlineCallbacks - def setUp(self): - hs = yield tests.utils.setup_test_homeserver(self.addCleanup) +class EndToEndKeyStoreTestCase(HomeserverTestCase): + def prepare(self, reactor, clock, hs): self.store = hs.get_datastore() - @defer.inlineCallbacks def test_key_without_device_name(self): now = 1470174257070 json = {"key": "value"} - yield defer.ensureDeferred(self.store.store_device("user", "device", None)) + self.get_success(self.store.store_device("user", "device", None)) - yield defer.ensureDeferred( - self.store.set_e2e_device_keys("user", "device", now, json) - ) + self.get_success(self.store.set_e2e_device_keys("user", "device", now, json)) - res = yield defer.ensureDeferred( + res = self.get_success( self.store.get_e2e_device_keys_for_cs_api((("user", "device"),)) ) self.assertIn("user", res) @@ -44,38 +35,32 @@ def test_key_without_device_name(self): dev = res["user"]["device"] self.assertDictContainsSubset(json, dev) - @defer.inlineCallbacks def test_reupload_key(self): now = 1470174257070 json = {"key": "value"} - yield defer.ensureDeferred(self.store.store_device("user", "device", None)) + self.get_success(self.store.store_device("user", "device", None)) - changed = yield defer.ensureDeferred( + changed = self.get_success( self.store.set_e2e_device_keys("user", "device", now, json) ) self.assertTrue(changed) # If we try to upload the same key then we should be told nothing # changed - changed = yield defer.ensureDeferred( + changed = self.get_success( self.store.set_e2e_device_keys("user", "device", now, json) ) self.assertFalse(changed) - @defer.inlineCallbacks def test_get_key_with_device_name(self): now = 1470174257070 json = {"key": "value"} - yield defer.ensureDeferred( - self.store.set_e2e_device_keys("user", "device", now, json) - ) - yield defer.ensureDeferred( - self.store.store_device("user", "device", "display_name") - ) + self.get_success(self.store.set_e2e_device_keys("user", "device", now, json)) + self.get_success(self.store.store_device("user", "device", "display_name")) - res = yield defer.ensureDeferred( + res = self.get_success( self.store.get_e2e_device_keys_for_cs_api((("user", "device"),)) ) self.assertIn("user", res) @@ -85,29 +70,28 @@ def test_get_key_with_device_name(self): {"key": "value", "unsigned": {"device_display_name": "display_name"}}, dev ) - @defer.inlineCallbacks def test_multiple_devices(self): now = 1470174257070 - yield defer.ensureDeferred(self.store.store_device("user1", "device1", None)) - yield defer.ensureDeferred(self.store.store_device("user1", "device2", None)) - yield defer.ensureDeferred(self.store.store_device("user2", "device1", None)) - yield defer.ensureDeferred(self.store.store_device("user2", "device2", None)) + self.get_success(self.store.store_device("user1", "device1", None)) + self.get_success(self.store.store_device("user1", "device2", None)) + self.get_success(self.store.store_device("user2", "device1", None)) + self.get_success(self.store.store_device("user2", "device2", None)) - yield defer.ensureDeferred( + self.get_success( self.store.set_e2e_device_keys("user1", "device1", now, {"key": "json11"}) ) - yield defer.ensureDeferred( + self.get_success( self.store.set_e2e_device_keys("user1", "device2", now, {"key": "json12"}) ) - yield defer.ensureDeferred( + self.get_success( self.store.set_e2e_device_keys("user2", "device1", now, {"key": "json21"}) ) - yield defer.ensureDeferred( + self.get_success( self.store.set_e2e_device_keys("user2", "device2", now, {"key": "json22"}) ) - res = yield defer.ensureDeferred( + res = self.get_success( self.store.get_e2e_device_keys_for_cs_api( (("user1", "device1"), ("user2", "device2")) ) diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py index 16daa66cc919..d87f124c2638 100644 --- a/tests/storage/test_event_chain.py +++ b/tests/storage/test_event_chain.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the 'License'); diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index d597d712d675..a0e22594785f 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the 'License'); diff --git a/tests/storage/test_event_metrics.py b/tests/storage/test_event_metrics.py index 7691f2d790fb..088fbb247b50 100644 --- a/tests/storage/test_event_metrics.py +++ b/tests/storage/test_event_metrics.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the 'License'); @@ -39,12 +38,12 @@ def test_exposed_to_prometheus(self): last_event = None # Make a real event chain - for i in range(event_count): + for _ in range(event_count): ev = self.create_and_send_event(room_id, user, False, last_event) last_event = [ev] # Sprinkle in some extremities - for i in range(extrems): + for _ in range(extrems): ev = self.create_and_send_event(room_id, user, False, last_event) # Let it run for a while, then pull out the statistics from the diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py index 485f1ee033c4..1930b37eda1e 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py @@ -1,5 +1,4 @@ -# -*- coding: utf-8 -*- -# Copyright 2016 OpenMarket Ltd +# Copyright 2016-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,12 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock -from twisted.internet import defer - -import tests.unittest -import tests.utils +from tests.unittest import HomeserverTestCase USER_ID = "@user:example.com" @@ -30,37 +26,31 @@ ] -class EventPushActionsStoreTestCase(tests.unittest.TestCase): - @defer.inlineCallbacks - def setUp(self): - hs = yield tests.utils.setup_test_homeserver(self.addCleanup) +class EventPushActionsStoreTestCase(HomeserverTestCase): + def prepare(self, reactor, clock, hs): self.store = hs.get_datastore() self.persist_events_store = hs.get_datastores().persist_events - @defer.inlineCallbacks def test_get_unread_push_actions_for_user_in_range_for_http(self): - yield defer.ensureDeferred( + self.get_success( self.store.get_unread_push_actions_for_user_in_range_for_http( USER_ID, 0, 1000, 20 ) ) - @defer.inlineCallbacks def test_get_unread_push_actions_for_user_in_range_for_email(self): - yield defer.ensureDeferred( + self.get_success( self.store.get_unread_push_actions_for_user_in_range_for_email( USER_ID, 0, 1000, 20 ) ) - @defer.inlineCallbacks def test_count_aggregation(self): room_id = "!foo:example.com" user_id = "@user1235:example.com" - @defer.inlineCallbacks def _assert_counts(noitf_count, highlight_count): - counts = yield defer.ensureDeferred( + counts = self.get_success( self.store.db_pool.runInteraction( "", self.store._get_unread_counts_by_pos_txn, room_id, user_id, 0 ) @@ -74,7 +64,6 @@ def _assert_counts(noitf_count, highlight_count): }, ) - @defer.inlineCallbacks def _inject_actions(stream, action): event = Mock() event.room_id = room_id @@ -82,14 +71,14 @@ def _inject_actions(stream, action): event.internal_metadata.stream_ordering = stream event.depth = stream - yield defer.ensureDeferred( + self.get_success( self.store.add_push_actions_to_staging( event.event_id, {user_id: action}, False, ) ) - yield defer.ensureDeferred( + self.get_success( self.store.db_pool.runInteraction( "", self.persist_events_store._set_push_actions_for_event_and_users_txn, @@ -99,14 +88,14 @@ def _inject_actions(stream, action): ) def _rotate(stream): - return defer.ensureDeferred( + self.get_success( self.store.db_pool.runInteraction( "", self.store._rotate_notifs_before_txn, stream ) ) def _mark_read(stream, depth): - return defer.ensureDeferred( + self.get_success( self.store.db_pool.runInteraction( "", self.store._remove_old_push_actions_before_txn, @@ -116,49 +105,48 @@ def _mark_read(stream, depth): ) ) - yield _assert_counts(0, 0) - yield _inject_actions(1, PlAIN_NOTIF) - yield _assert_counts(1, 0) - yield _rotate(2) - yield _assert_counts(1, 0) + _assert_counts(0, 0) + _inject_actions(1, PlAIN_NOTIF) + _assert_counts(1, 0) + _rotate(2) + _assert_counts(1, 0) - yield _inject_actions(3, PlAIN_NOTIF) - yield _assert_counts(2, 0) - yield _rotate(4) - yield _assert_counts(2, 0) + _inject_actions(3, PlAIN_NOTIF) + _assert_counts(2, 0) + _rotate(4) + _assert_counts(2, 0) - yield _inject_actions(5, PlAIN_NOTIF) - yield _mark_read(3, 3) - yield _assert_counts(1, 0) + _inject_actions(5, PlAIN_NOTIF) + _mark_read(3, 3) + _assert_counts(1, 0) - yield _mark_read(5, 5) - yield _assert_counts(0, 0) + _mark_read(5, 5) + _assert_counts(0, 0) - yield _inject_actions(6, PlAIN_NOTIF) - yield _rotate(7) + _inject_actions(6, PlAIN_NOTIF) + _rotate(7) - yield defer.ensureDeferred( + self.get_success( self.store.db_pool.simple_delete( table="event_push_actions", keyvalues={"1": 1}, desc="" ) ) - yield _assert_counts(1, 0) + _assert_counts(1, 0) - yield _mark_read(7, 7) - yield _assert_counts(0, 0) + _mark_read(7, 7) + _assert_counts(0, 0) - yield _inject_actions(8, HIGHLIGHT) - yield _assert_counts(1, 1) - yield _rotate(9) - yield _assert_counts(1, 1) - yield _rotate(10) - yield _assert_counts(1, 1) + _inject_actions(8, HIGHLIGHT) + _assert_counts(1, 1) + _rotate(9) + _assert_counts(1, 1) + _rotate(10) + _assert_counts(1, 1) - @defer.inlineCallbacks def test_find_first_stream_ordering_after_ts(self): def add_event(so, ts): - return defer.ensureDeferred( + self.get_success( self.store.db_pool.simple_insert( "events", { @@ -177,24 +165,16 @@ def add_event(so, ts): ) # start with the base case where there are no events in the table - r = yield defer.ensureDeferred( - self.store.find_first_stream_ordering_after_ts(11) - ) + r = self.get_success(self.store.find_first_stream_ordering_after_ts(11)) self.assertEqual(r, 0) # now with one event - yield add_event(2, 10) - r = yield defer.ensureDeferred( - self.store.find_first_stream_ordering_after_ts(9) - ) + add_event(2, 10) + r = self.get_success(self.store.find_first_stream_ordering_after_ts(9)) self.assertEqual(r, 2) - r = yield defer.ensureDeferred( - self.store.find_first_stream_ordering_after_ts(10) - ) + r = self.get_success(self.store.find_first_stream_ordering_after_ts(10)) self.assertEqual(r, 2) - r = yield defer.ensureDeferred( - self.store.find_first_stream_ordering_after_ts(11) - ) + r = self.get_success(self.store.find_first_stream_ordering_after_ts(11)) self.assertEqual(r, 3) # add a bunch of dummy events to the events table @@ -205,39 +185,27 @@ def add_event(so, ts): (10, 130), (20, 140), ): - yield add_event(stream_ordering, ts) + add_event(stream_ordering, ts) - r = yield defer.ensureDeferred( - self.store.find_first_stream_ordering_after_ts(110) - ) + r = self.get_success(self.store.find_first_stream_ordering_after_ts(110)) self.assertEqual(r, 3, "First event after 110ms should be 3, was %i" % r) # 4 and 5 are both after 120: we want 4 rather than 5 - r = yield defer.ensureDeferred( - self.store.find_first_stream_ordering_after_ts(120) - ) + r = self.get_success(self.store.find_first_stream_ordering_after_ts(120)) self.assertEqual(r, 4, "First event after 120ms should be 4, was %i" % r) - r = yield defer.ensureDeferred( - self.store.find_first_stream_ordering_after_ts(129) - ) + r = self.get_success(self.store.find_first_stream_ordering_after_ts(129)) self.assertEqual(r, 10, "First event after 129ms should be 10, was %i" % r) # check we can get the last event - r = yield defer.ensureDeferred( - self.store.find_first_stream_ordering_after_ts(140) - ) + r = self.get_success(self.store.find_first_stream_ordering_after_ts(140)) self.assertEqual(r, 20, "First event after 14ms should be 20, was %i" % r) # off the end - r = yield defer.ensureDeferred( - self.store.find_first_stream_ordering_after_ts(160) - ) + r = self.get_success(self.store.find_first_stream_ordering_after_ts(160)) self.assertEqual(r, 21) # check we can find an event at ordering zero - yield add_event(0, 5) - r = yield defer.ensureDeferred( - self.store.find_first_stream_ordering_after_ts(1) - ) + add_event(0, 5) + r = self.get_success(self.store.find_first_stream_ordering_after_ts(1)) self.assertEqual(r, 0) diff --git a/tests/storage/test_events.py b/tests/storage/test_events.py index ed898b8dbb70..617bc8091fa8 100644 --- a/tests/storage/test_events.py +++ b/tests/storage/test_events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index aad6bc907e43..792b1c44c198 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import List, Optional + from synapse.storage.database import DatabasePool from synapse.storage.engines import IncorrectDatabaseSetup from synapse.storage.util.id_generators import MultiWriterIdGenerator @@ -43,7 +44,7 @@ def _setup_db(self, txn): ) def _create_id_generator( - self, instance_name="master", writers=["master"] + self, instance_name="master", writers: Optional[List[str]] = None ) -> MultiWriterIdGenerator: def _create(conn): return MultiWriterIdGenerator( @@ -53,7 +54,7 @@ def _create(conn): instance_name=instance_name, tables=[("foobar", "instance_name", "stream_id")], sequence_name="foobar_seq", - writers=writers, + writers=writers or ["master"], ) return self.get_success_or_raise(self.db_pool.runWithConnection(_create)) @@ -476,7 +477,7 @@ def _setup_db(self, txn): ) def _create_id_generator( - self, instance_name="master", writers=["master"] + self, instance_name="master", writers: Optional[List[str]] = None ) -> MultiWriterIdGenerator: def _create(conn): return MultiWriterIdGenerator( @@ -486,7 +487,7 @@ def _create(conn): instance_name=instance_name, tables=[("foobar", "instance_name", "stream_id")], sequence_name="foobar_seq", - writers=writers, + writers=writers or ["master"], positive=False, ) @@ -612,7 +613,7 @@ def _setup_db(self, txn): ) def _create_id_generator( - self, instance_name="master", writers=["master"] + self, instance_name="master", writers: Optional[List[str]] = None ) -> MultiWriterIdGenerator: def _create(conn): return MultiWriterIdGenerator( @@ -625,7 +626,7 @@ def _create(conn): ("foobar2", "instance_name", "stream_id"), ], sequence_name="foobar_seq", - writers=writers, + writers=writers or ["master"], ) return self.get_success_or_raise(self.db_pool.runWithConnection(_create)) diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py index 95f309fbbc41..a94b5fd721f2 100644 --- a/tests/storage/test_keys.py +++ b/tests/storage/test_keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_main.py b/tests/storage/test_main.py index e9e3bca3bf59..d2b7b8995200 100644 --- a/tests/storage/test_main.py +++ b/tests/storage/test_main.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Awesome Technologies Innovationslabor GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 5858c7fcc4d2..944dbc34a26b 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py index ea63bd56b408..8a446da848d7 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py @@ -1,5 +1,4 @@ -# -*- coding: utf-8 -*- -# Copyright 2014-2016 OpenMarket Ltd +# Copyright 2014-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,59 +12,50 @@ # See the License for the specific language governing permissions and # limitations under the License. - -from twisted.internet import defer - from synapse.types import UserID from tests import unittest -from tests.utils import setup_test_homeserver - -class ProfileStoreTestCase(unittest.TestCase): - @defer.inlineCallbacks - def setUp(self): - hs = yield setup_test_homeserver(self.addCleanup) +class ProfileStoreTestCase(unittest.HomeserverTestCase): + def prepare(self, reactor, clock, hs): self.store = hs.get_datastore() self.u_frank = UserID.from_string("@frank:test") - @defer.inlineCallbacks def test_displayname(self): - yield defer.ensureDeferred(self.store.create_profile(self.u_frank.localpart)) + self.get_success(self.store.create_profile(self.u_frank.localpart)) - yield defer.ensureDeferred( + self.get_success( self.store.set_profile_displayname(self.u_frank.localpart, "Frank") ) self.assertEquals( "Frank", ( - yield defer.ensureDeferred( + self.get_success( self.store.get_profile_displayname(self.u_frank.localpart) ) ), ) # test set to None - yield defer.ensureDeferred( + self.get_success( self.store.set_profile_displayname(self.u_frank.localpart, None) ) self.assertIsNone( ( - yield defer.ensureDeferred( + self.get_success( self.store.get_profile_displayname(self.u_frank.localpart) ) ) ) - @defer.inlineCallbacks def test_avatar_url(self): - yield defer.ensureDeferred(self.store.create_profile(self.u_frank.localpart)) + self.get_success(self.store.create_profile(self.u_frank.localpart)) - yield defer.ensureDeferred( + self.get_success( self.store.set_profile_avatar_url( self.u_frank.localpart, "http://my.site/here" ) @@ -74,20 +64,20 @@ def test_avatar_url(self): self.assertEquals( "http://my.site/here", ( - yield defer.ensureDeferred( + self.get_success( self.store.get_profile_avatar_url(self.u_frank.localpart) ) ), ) # test set to None - yield defer.ensureDeferred( + self.get_success( self.store.set_profile_avatar_url(self.u_frank.localpart, None) ) self.assertIsNone( ( - yield defer.ensureDeferred( + self.get_success( self.store.get_profile_avatar_url(self.u_frank.localpart) ) ) diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py index 41af8c484784..54c5b470c789 100644 --- a/tests/storage/test_purge.py +++ b/tests/storage/test_purge.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index b2a0e6085678..bb31ab756d0d 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -1,6 +1,4 @@ -# -*- coding: utf-8 -*- -# Copyright 2014-2016 OpenMarket Ltd -# Copyright 2019 The Matrix.org Foundation C.I.C. +# Copyright 2014-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,11 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional from canonicaljson import json -from twisted.internet import defer - from synapse.api.constants import EventTypes, Membership from synapse.api.room_versions import RoomVersions from synapse.types import RoomID, UserID @@ -50,10 +47,15 @@ def prepare(self, reactor, clock, hs): self.depth = 1 def inject_room_member( - self, room, user, membership, replaces_state=None, extra_content={} + self, + room, + user, + membership, + replaces_state=None, + extra_content: Optional[dict] = None, ): content = {"membership": membership} - content.update(extra_content) + content.update(extra_content or {}) builder = self.event_builder_factory.for_room_version( RoomVersions.V1, { @@ -230,10 +232,9 @@ def __init__(self, base_builder, event_id): self._base_builder = base_builder self._event_id = event_id - @defer.inlineCallbacks - def build(self, prev_event_ids, auth_event_ids): - built_event = yield defer.ensureDeferred( - self._base_builder.build(prev_event_ids, auth_event_ids) + async def build(self, prev_event_ids, auth_event_ids): + built_event = await self._base_builder.build( + prev_event_ids, auth_event_ids ) built_event._event_id = self._event_id diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index 4eb41c46e8cd..97480652824d 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -1,5 +1,4 @@ -# -*- coding: utf-8 -*- -# Copyright 2014-2016 OpenMarket Ltd +# Copyright 2014-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,21 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. - -from twisted.internet import defer - from synapse.api.constants import UserTypes from synapse.api.errors import ThreepidValidationError -from tests import unittest -from tests.utils import setup_test_homeserver - +from tests.unittest import HomeserverTestCase -class RegistrationStoreTestCase(unittest.TestCase): - @defer.inlineCallbacks - def setUp(self): - hs = yield setup_test_homeserver(self.addCleanup) +class RegistrationStoreTestCase(HomeserverTestCase): + def prepare(self, reactor, clock, hs): self.store = hs.get_datastore() self.user_id = "@my-user:test" @@ -35,9 +27,8 @@ def setUp(self): self.pwhash = "{xx1}123456789" self.device_id = "akgjhdjklgshg" - @defer.inlineCallbacks def test_register(self): - yield defer.ensureDeferred(self.store.register_user(self.user_id, self.pwhash)) + self.get_success(self.store.register_user(self.user_id, self.pwhash)) self.assertEquals( { @@ -49,93 +40,81 @@ def test_register(self): "consent_version": None, "consent_server_notice_sent": None, "appservice_id": None, - "creation_ts": 1000, + "creation_ts": 0, "user_type": None, "deactivated": 0, "shadow_banned": 0, }, - (yield defer.ensureDeferred(self.store.get_user_by_id(self.user_id))), + (self.get_success(self.store.get_user_by_id(self.user_id))), ) - @defer.inlineCallbacks def test_add_tokens(self): - yield defer.ensureDeferred(self.store.register_user(self.user_id, self.pwhash)) - yield defer.ensureDeferred( + self.get_success(self.store.register_user(self.user_id, self.pwhash)) + self.get_success( self.store.add_access_token_to_user( self.user_id, self.tokens[1], self.device_id, valid_until_ms=None ) ) - result = yield defer.ensureDeferred( - self.store.get_user_by_access_token(self.tokens[1]) - ) + result = self.get_success(self.store.get_user_by_access_token(self.tokens[1])) self.assertEqual(result.user_id, self.user_id) self.assertEqual(result.device_id, self.device_id) self.assertIsNotNone(result.token_id) - @defer.inlineCallbacks def test_user_delete_access_tokens(self): # add some tokens - yield defer.ensureDeferred(self.store.register_user(self.user_id, self.pwhash)) - yield defer.ensureDeferred( + self.get_success(self.store.register_user(self.user_id, self.pwhash)) + self.get_success( self.store.add_access_token_to_user( self.user_id, self.tokens[0], device_id=None, valid_until_ms=None ) ) - yield defer.ensureDeferred( + self.get_success( self.store.add_access_token_to_user( self.user_id, self.tokens[1], self.device_id, valid_until_ms=None ) ) # now delete some - yield defer.ensureDeferred( + self.get_success( self.store.user_delete_access_tokens(self.user_id, device_id=self.device_id) ) # check they were deleted - user = yield defer.ensureDeferred( - self.store.get_user_by_access_token(self.tokens[1]) - ) + user = self.get_success(self.store.get_user_by_access_token(self.tokens[1])) self.assertIsNone(user, "access token was not deleted by device_id") # check the one not associated with the device was not deleted - user = yield defer.ensureDeferred( - self.store.get_user_by_access_token(self.tokens[0]) - ) + user = self.get_success(self.store.get_user_by_access_token(self.tokens[0])) self.assertEqual(self.user_id, user.user_id) # now delete the rest - yield defer.ensureDeferred(self.store.user_delete_access_tokens(self.user_id)) + self.get_success(self.store.user_delete_access_tokens(self.user_id)) - user = yield defer.ensureDeferred( - self.store.get_user_by_access_token(self.tokens[0]) - ) + user = self.get_success(self.store.get_user_by_access_token(self.tokens[0])) self.assertIsNone(user, "access token was not deleted without device_id") - @defer.inlineCallbacks def test_is_support_user(self): TEST_USER = "@test:test" SUPPORT_USER = "@support:test" - res = yield defer.ensureDeferred(self.store.is_support_user(None)) + res = self.get_success(self.store.is_support_user(None)) self.assertFalse(res) - yield defer.ensureDeferred( + self.get_success( self.store.register_user(user_id=TEST_USER, password_hash=None) ) - res = yield defer.ensureDeferred(self.store.is_support_user(TEST_USER)) + res = self.get_success(self.store.is_support_user(TEST_USER)) self.assertFalse(res) - yield defer.ensureDeferred( + self.get_success( self.store.register_user( user_id=SUPPORT_USER, password_hash=None, user_type=UserTypes.SUPPORT ) ) - res = yield defer.ensureDeferred(self.store.is_support_user(SUPPORT_USER)) + res = self.get_success(self.store.is_support_user(SUPPORT_USER)) self.assertTrue(res) - @defer.inlineCallbacks def test_3pid_inhibit_invalid_validation_session_error(self): """Tests that enabling the configuration option to inhibit 3PID errors on /requestToken also inhibits validation errors caused by an unknown session ID. @@ -143,30 +122,28 @@ def test_3pid_inhibit_invalid_validation_session_error(self): # Check that, with the config setting set to false (the default value), a # validation error is caused by the unknown session ID. - try: - yield defer.ensureDeferred( - self.store.validate_threepid_session( - "fake_sid", - "fake_client_secret", - "fake_token", - 0, - ) - ) - except ThreepidValidationError as e: - self.assertEquals(e.msg, "Unknown session_id", e) + e = self.get_failure( + self.store.validate_threepid_session( + "fake_sid", + "fake_client_secret", + "fake_token", + 0, + ), + ThreepidValidationError, + ) + self.assertEquals(e.value.msg, "Unknown session_id", e) # Set the config setting to true. self.store._ignore_unknown_session_error = True # Check that now the validation error is caused by the token not matching. - try: - yield defer.ensureDeferred( - self.store.validate_threepid_session( - "fake_sid", - "fake_client_secret", - "fake_token", - 0, - ) - ) - except ThreepidValidationError as e: - self.assertEquals(e.msg, "Validation token not found or has expired", e) + e = self.get_failure( + self.store.validate_threepid_session( + "fake_sid", + "fake_client_secret", + "fake_token", + 0, + ), + ThreepidValidationError, + ) + self.assertEquals(e.value.msg, "Validation token not found or has expired", e) diff --git a/tests/storage/test_room.py b/tests/storage/test_room.py index bc8400f24072..70257bf21027 100644 --- a/tests/storage/test_room.py +++ b/tests/storage/test_room.py @@ -1,5 +1,4 @@ -# -*- coding: utf-8 -*- -# Copyright 2014-2016 OpenMarket Ltd +# Copyright 2014-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,22 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. - -from twisted.internet import defer - from synapse.api.constants import EventTypes from synapse.api.room_versions import RoomVersions from synapse.types import RoomAlias, RoomID, UserID -from tests import unittest -from tests.utils import setup_test_homeserver - +from tests.unittest import HomeserverTestCase -class RoomStoreTestCase(unittest.TestCase): - @defer.inlineCallbacks - def setUp(self): - hs = yield setup_test_homeserver(self.addCleanup) +class RoomStoreTestCase(HomeserverTestCase): + def prepare(self, reactor, clock, hs): # We can't test RoomStore on its own without the DirectoryStore, for # management of the 'room_aliases' table self.store = hs.get_datastore() @@ -37,7 +29,7 @@ def setUp(self): self.alias = RoomAlias.from_string("#a-room-name:test") self.u_creator = UserID.from_string("@creator:test") - yield defer.ensureDeferred( + self.get_success( self.store.store_room( self.room.to_string(), room_creator_user_id=self.u_creator.to_string(), @@ -46,7 +38,6 @@ def setUp(self): ) ) - @defer.inlineCallbacks def test_get_room(self): self.assertDictContainsSubset( { @@ -54,16 +45,12 @@ def test_get_room(self): "creator": self.u_creator.to_string(), "is_public": True, }, - (yield defer.ensureDeferred(self.store.get_room(self.room.to_string()))), + (self.get_success(self.store.get_room(self.room.to_string()))), ) - @defer.inlineCallbacks def test_get_room_unknown_room(self): - self.assertIsNone( - (yield defer.ensureDeferred(self.store.get_room("!uknown:test"))) - ) + self.assertIsNone((self.get_success(self.store.get_room("!uknown:test")))) - @defer.inlineCallbacks def test_get_room_with_stats(self): self.assertDictContainsSubset( { @@ -71,29 +58,17 @@ def test_get_room_with_stats(self): "creator": self.u_creator.to_string(), "public": True, }, - ( - yield defer.ensureDeferred( - self.store.get_room_with_stats(self.room.to_string()) - ) - ), + (self.get_success(self.store.get_room_with_stats(self.room.to_string()))), ) - @defer.inlineCallbacks def test_get_room_with_stats_unknown_room(self): self.assertIsNone( - ( - yield defer.ensureDeferred( - self.store.get_room_with_stats("!uknown:test") - ) - ), + (self.get_success(self.store.get_room_with_stats("!uknown:test"))), ) -class RoomEventsStoreTestCase(unittest.TestCase): - @defer.inlineCallbacks - def setUp(self): - hs = setup_test_homeserver(self.addCleanup) - +class RoomEventsStoreTestCase(HomeserverTestCase): + def prepare(self, reactor, clock, hs): # Room events need the full datastore, for persist_event() and # get_room_state() self.store = hs.get_datastore() @@ -102,7 +77,7 @@ def setUp(self): self.room = RoomID.from_string("!abcde:test") - yield defer.ensureDeferred( + self.get_success( self.store.store_room( self.room.to_string(), room_creator_user_id="@creator:text", @@ -111,23 +86,21 @@ def setUp(self): ) ) - @defer.inlineCallbacks def inject_room_event(self, **kwargs): - yield defer.ensureDeferred( + self.get_success( self.storage.persistence.persist_event( self.event_factory.create_event(room_id=self.room.to_string(), **kwargs) ) ) - @defer.inlineCallbacks def STALE_test_room_name(self): name = "A-Room-Name" - yield self.inject_room_event( + self.inject_room_event( etype=EventTypes.Name, name=name, content={"name": name}, depth=1 ) - state = yield defer.ensureDeferred( + state = self.get_success( self.store.get_current_state(room_id=self.room.to_string()) ) @@ -137,15 +110,14 @@ def STALE_test_room_name(self): state[0], ) - @defer.inlineCallbacks def STALE_test_room_topic(self): topic = "A place for things" - yield self.inject_room_event( + self.inject_room_event( etype=EventTypes.Topic, topic=topic, content={"topic": topic}, depth=1 ) - state = yield defer.ensureDeferred( + state = self.get_success( self.store.get_current_state(room_id=self.room.to_string()) ) diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index d2aed66f6d40..9fa968f6bb30 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index 2471f1267d61..86952645955b 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -1,5 +1,4 @@ -# -*- coding: utf-8 -*- -# Copyright 2018 New Vector Ltd +# Copyright 2018-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,24 +14,18 @@ import logging -from twisted.internet import defer - from synapse.api.constants import EventTypes, Membership from synapse.api.room_versions import RoomVersions from synapse.storage.state import StateFilter from synapse.types import RoomID, UserID -import tests.unittest -import tests.utils +from tests.unittest import HomeserverTestCase logger = logging.getLogger(__name__) -class StateStoreTestCase(tests.unittest.TestCase): - @defer.inlineCallbacks - def setUp(self): - hs = yield tests.utils.setup_test_homeserver(self.addCleanup) - +class StateStoreTestCase(HomeserverTestCase): + def prepare(self, reactor, clock, hs): self.store = hs.get_datastore() self.storage = hs.get_storage() self.state_datastore = self.storage.state.stores.state @@ -44,7 +37,7 @@ def setUp(self): self.room = RoomID.from_string("!abc123:test") - yield defer.ensureDeferred( + self.get_success( self.store.store_room( self.room.to_string(), room_creator_user_id="@creator:text", @@ -53,7 +46,6 @@ def setUp(self): ) ) - @defer.inlineCallbacks def inject_state_event(self, room, sender, typ, state_key, content): builder = self.event_builder_factory.for_room_version( RoomVersions.V1, @@ -66,13 +58,11 @@ def inject_state_event(self, room, sender, typ, state_key, content): }, ) - event, context = yield defer.ensureDeferred( + event, context = self.get_success( self.event_creation_handler.create_new_client_event(builder) ) - yield defer.ensureDeferred( - self.storage.persistence.persist_event(event, context) - ) + self.get_success(self.storage.persistence.persist_event(event, context)) return event @@ -82,16 +72,13 @@ def assertStateMapEqual(self, s1, s2): self.assertEqual(s1[t].event_id, s2[t].event_id) self.assertEqual(len(s1), len(s2)) - @defer.inlineCallbacks def test_get_state_groups_ids(self): - e1 = yield self.inject_state_event( - self.room, self.u_alice, EventTypes.Create, "", {} - ) - e2 = yield self.inject_state_event( + e1 = self.inject_state_event(self.room, self.u_alice, EventTypes.Create, "", {}) + e2 = self.inject_state_event( self.room, self.u_alice, EventTypes.Name, "", {"name": "test room"} ) - state_group_map = yield defer.ensureDeferred( + state_group_map = self.get_success( self.storage.state.get_state_groups_ids(self.room, [e2.event_id]) ) self.assertEqual(len(state_group_map), 1) @@ -101,16 +88,13 @@ def test_get_state_groups_ids(self): {(EventTypes.Create, ""): e1.event_id, (EventTypes.Name, ""): e2.event_id}, ) - @defer.inlineCallbacks def test_get_state_groups(self): - e1 = yield self.inject_state_event( - self.room, self.u_alice, EventTypes.Create, "", {} - ) - e2 = yield self.inject_state_event( + e1 = self.inject_state_event(self.room, self.u_alice, EventTypes.Create, "", {}) + e2 = self.inject_state_event( self.room, self.u_alice, EventTypes.Name, "", {"name": "test room"} ) - state_group_map = yield defer.ensureDeferred( + state_group_map = self.get_success( self.storage.state.get_state_groups(self.room, [e2.event_id]) ) self.assertEqual(len(state_group_map), 1) @@ -118,32 +102,29 @@ def test_get_state_groups(self): self.assertEqual({ev.event_id for ev in state_list}, {e1.event_id, e2.event_id}) - @defer.inlineCallbacks def test_get_state_for_event(self): # this defaults to a linear DAG as each new injection defaults to whatever # forward extremities are currently in the DB for this room. - e1 = yield self.inject_state_event( - self.room, self.u_alice, EventTypes.Create, "", {} - ) - e2 = yield self.inject_state_event( + e1 = self.inject_state_event(self.room, self.u_alice, EventTypes.Create, "", {}) + e2 = self.inject_state_event( self.room, self.u_alice, EventTypes.Name, "", {"name": "test room"} ) - e3 = yield self.inject_state_event( + e3 = self.inject_state_event( self.room, self.u_alice, EventTypes.Member, self.u_alice.to_string(), {"membership": Membership.JOIN}, ) - e4 = yield self.inject_state_event( + e4 = self.inject_state_event( self.room, self.u_bob, EventTypes.Member, self.u_bob.to_string(), {"membership": Membership.JOIN}, ) - e5 = yield self.inject_state_event( + e5 = self.inject_state_event( self.room, self.u_bob, EventTypes.Member, @@ -152,9 +133,7 @@ def test_get_state_for_event(self): ) # check we get the full state as of the final event - state = yield defer.ensureDeferred( - self.storage.state.get_state_for_event(e5.event_id) - ) + state = self.get_success(self.storage.state.get_state_for_event(e5.event_id)) self.assertIsNotNone(e4) @@ -170,7 +149,7 @@ def test_get_state_for_event(self): ) # check we can filter to the m.room.name event (with a '' state key) - state = yield defer.ensureDeferred( + state = self.get_success( self.storage.state.get_state_for_event( e5.event_id, StateFilter.from_types([(EventTypes.Name, "")]) ) @@ -179,7 +158,7 @@ def test_get_state_for_event(self): self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state) # check we can filter to the m.room.name event (with a wildcard None state key) - state = yield defer.ensureDeferred( + state = self.get_success( self.storage.state.get_state_for_event( e5.event_id, StateFilter.from_types([(EventTypes.Name, None)]) ) @@ -188,7 +167,7 @@ def test_get_state_for_event(self): self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state) # check we can grab the m.room.member events (with a wildcard None state key) - state = yield defer.ensureDeferred( + state = self.get_success( self.storage.state.get_state_for_event( e5.event_id, StateFilter.from_types([(EventTypes.Member, None)]) ) @@ -200,7 +179,7 @@ def test_get_state_for_event(self): # check we can grab a specific room member without filtering out the # other event types - state = yield defer.ensureDeferred( + state = self.get_success( self.storage.state.get_state_for_event( e5.event_id, state_filter=StateFilter( @@ -220,7 +199,7 @@ def test_get_state_for_event(self): ) # check that we can grab everything except members - state = yield defer.ensureDeferred( + state = self.get_success( self.storage.state.get_state_for_event( e5.event_id, state_filter=StateFilter( @@ -238,17 +217,14 @@ def test_get_state_for_event(self): ####################################################### room_id = self.room.to_string() - group_ids = yield defer.ensureDeferred( + group_ids = self.get_success( self.storage.state.get_state_groups_ids(room_id, [e5.event_id]) ) group = list(group_ids.keys())[0] # test _get_state_for_group_using_cache correctly filters out members # with types=[] - ( - state_dict, - is_all, - ) = yield self.state_datastore._get_state_for_group_using_cache( + (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_cache, group, state_filter=StateFilter( @@ -265,10 +241,7 @@ def test_get_state_for_event(self): state_dict, ) - ( - state_dict, - is_all, - ) = yield self.state_datastore._get_state_for_group_using_cache( + (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( @@ -281,10 +254,7 @@ def test_get_state_for_event(self): # test _get_state_for_group_using_cache correctly filters in members # with wildcard types - ( - state_dict, - is_all, - ) = yield self.state_datastore._get_state_for_group_using_cache( + (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_cache, group, state_filter=StateFilter( @@ -301,10 +271,7 @@ def test_get_state_for_event(self): state_dict, ) - ( - state_dict, - is_all, - ) = yield self.state_datastore._get_state_for_group_using_cache( + (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( @@ -324,10 +291,7 @@ def test_get_state_for_event(self): # test _get_state_for_group_using_cache correctly filters in members # with specific types - ( - state_dict, - is_all, - ) = yield self.state_datastore._get_state_for_group_using_cache( + (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_cache, group, state_filter=StateFilter( @@ -344,10 +308,7 @@ def test_get_state_for_event(self): state_dict, ) - ( - state_dict, - is_all, - ) = yield self.state_datastore._get_state_for_group_using_cache( + (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( @@ -360,10 +321,7 @@ def test_get_state_for_event(self): # test _get_state_for_group_using_cache correctly filters in members # with specific types - ( - state_dict, - is_all, - ) = yield self.state_datastore._get_state_for_group_using_cache( + (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( @@ -413,10 +371,7 @@ def test_get_state_for_event(self): # test _get_state_for_group_using_cache correctly filters out members # with types=[] room_id = self.room.to_string() - ( - state_dict, - is_all, - ) = yield self.state_datastore._get_state_for_group_using_cache( + (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_cache, group, state_filter=StateFilter( @@ -428,10 +383,7 @@ def test_get_state_for_event(self): self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) room_id = self.room.to_string() - ( - state_dict, - is_all, - ) = yield self.state_datastore._get_state_for_group_using_cache( + (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( @@ -444,10 +396,7 @@ def test_get_state_for_event(self): # test _get_state_for_group_using_cache correctly filters in members # wildcard types - ( - state_dict, - is_all, - ) = yield self.state_datastore._get_state_for_group_using_cache( + (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_cache, group, state_filter=StateFilter( @@ -458,10 +407,7 @@ def test_get_state_for_event(self): self.assertEqual(is_all, False) self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) - ( - state_dict, - is_all, - ) = yield self.state_datastore._get_state_for_group_using_cache( + (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( @@ -480,10 +426,7 @@ def test_get_state_for_event(self): # test _get_state_for_group_using_cache correctly filters in members # with specific types - ( - state_dict, - is_all, - ) = yield self.state_datastore._get_state_for_group_using_cache( + (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_cache, group, state_filter=StateFilter( @@ -494,10 +437,7 @@ def test_get_state_for_event(self): self.assertEqual(is_all, False) self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) - ( - state_dict, - is_all, - ) = yield self.state_datastore._get_state_for_group_using_cache( + (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( @@ -510,10 +450,7 @@ def test_get_state_for_event(self): # test _get_state_for_group_using_cache correctly filters in members # with specific types - ( - state_dict, - is_all, - ) = yield self.state_datastore._get_state_for_group_using_cache( + (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_cache, group, state_filter=StateFilter( @@ -524,10 +461,7 @@ def test_get_state_for_event(self): self.assertEqual(is_all, False) self.assertDictEqual({}, state_dict) - ( - state_dict, - is_all, - ) = yield self.state_datastore._get_state_for_group_using_cache( + (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( diff --git a/tests/storage/test_transactions.py b/tests/storage/test_transactions.py index 8e817e2c7f8c..b7f7eae8d096 100644 --- a/tests/storage/test_transactions.py +++ b/tests/storage/test_transactions.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index a6f63f4aafef..222e5d129d73 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -1,5 +1,4 @@ -# -*- coding: utf-8 -*- -# Copyright 2018 New Vector Ltd +# Copyright 2018-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer - -from tests import unittest -from tests.utils import setup_test_homeserver +from tests.unittest import HomeserverTestCase, override_config ALICE = "@alice:a" BOB = "@bob:b" @@ -25,73 +21,52 @@ BELA = "@somenickname:a" -class UserDirectoryStoreTestCase(unittest.TestCase): - @defer.inlineCallbacks - def setUp(self): - self.hs = yield setup_test_homeserver(self.addCleanup) - self.store = self.hs.get_datastore() +class UserDirectoryStoreTestCase(HomeserverTestCase): + def prepare(self, reactor, clock, hs): + self.store = hs.get_datastore() # alice and bob are both in !room_id. bobby is not but shares # a homeserver with alice. - yield defer.ensureDeferred( - self.store.update_profile_in_user_dir(ALICE, "alice", None) - ) - yield defer.ensureDeferred( - self.store.update_profile_in_user_dir(BOB, "bob", None) - ) - yield defer.ensureDeferred( - self.store.update_profile_in_user_dir(BOBBY, "bobby", None) - ) - yield defer.ensureDeferred( - self.store.update_profile_in_user_dir(BELA, "Bela", None) - ) - yield defer.ensureDeferred( - self.store.add_users_in_public_rooms("!room:id", (ALICE, BOB)) - ) + self.get_success(self.store.update_profile_in_user_dir(ALICE, "alice", None)) + self.get_success(self.store.update_profile_in_user_dir(BOB, "bob", None)) + self.get_success(self.store.update_profile_in_user_dir(BOBBY, "bobby", None)) + self.get_success(self.store.update_profile_in_user_dir(BELA, "Bela", None)) + self.get_success(self.store.add_users_in_public_rooms("!room:id", (ALICE, BOB))) - @defer.inlineCallbacks def test_search_user_dir(self): # normally when alice searches the directory she should just find # bob because bobby doesn't share a room with her. - r = yield defer.ensureDeferred(self.store.search_user_dir(ALICE, "bob", 10)) + r = self.get_success(self.store.search_user_dir(ALICE, "bob", 10)) self.assertFalse(r["limited"]) self.assertEqual(1, len(r["results"])) self.assertDictEqual( r["results"][0], {"user_id": BOB, "display_name": "bob", "avatar_url": None} ) - @defer.inlineCallbacks + @override_config({"user_directory": {"search_all_users": True}}) def test_search_user_dir_all_users(self): - self.hs.config.user_directory_search_all_users = True - try: - r = yield defer.ensureDeferred(self.store.search_user_dir(ALICE, "bob", 10)) - self.assertFalse(r["limited"]) - self.assertEqual(2, len(r["results"])) - self.assertDictEqual( - r["results"][0], - {"user_id": BOB, "display_name": "bob", "avatar_url": None}, - ) - self.assertDictEqual( - r["results"][1], - {"user_id": BOBBY, "display_name": "bobby", "avatar_url": None}, - ) - finally: - self.hs.config.user_directory_search_all_users = False + r = self.get_success(self.store.search_user_dir(ALICE, "bob", 10)) + self.assertFalse(r["limited"]) + self.assertEqual(2, len(r["results"])) + self.assertDictEqual( + r["results"][0], + {"user_id": BOB, "display_name": "bob", "avatar_url": None}, + ) + self.assertDictEqual( + r["results"][1], + {"user_id": BOBBY, "display_name": "bobby", "avatar_url": None}, + ) - @defer.inlineCallbacks + @override_config({"user_directory": {"search_all_users": True}}) def test_search_user_dir_stop_words(self): """Tests that a user can look up another user by searching for the start if its display name even if that name happens to be a common English word that would usually be ignored in full text searches. """ - self.hs.config.user_directory_search_all_users = True - try: - r = yield defer.ensureDeferred(self.store.search_user_dir(ALICE, "be", 10)) - self.assertFalse(r["limited"]) - self.assertEqual(1, len(r["results"])) - self.assertDictEqual( - r["results"][0], - {"user_id": BELA, "display_name": "Bela", "avatar_url": None}, - ) - finally: - self.hs.config.user_directory_search_all_users = False + r = self.get_success(self.store.search_user_dir(ALICE, "be", 10)) + self.assertFalse(r["limited"]) + self.assertEqual(1, len(r["results"])) + self.assertDictEqual( + r["results"][0], + {"user_id": BELA, "display_name": "Bela", "avatar_url": None}, + ) diff --git a/tests/test_distributor.py b/tests/test_distributor.py index b57f36e6ac26..f8341041ee6e 100644 --- a/tests/test_distributor.py +++ b/tests/test_distributor.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # @@ -14,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock, patch +from unittest.mock import Mock, patch from synapse.util.distributor import Distributor diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index 3f2691ee6bec..88888319ccb7 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -207,6 +206,226 @@ def test_msc2209(self): do_sig_check=False, ) + def test_join_rules_public(self): + """ + Test joining a public room. + """ + creator = "@creator:example.com" + pleb = "@joiner:example.com" + + auth_events = { + ("m.room.create", ""): _create_event(creator), + ("m.room.member", creator): _join_event(creator), + ("m.room.join_rules", ""): _join_rules_event(creator, "public"), + } + + # Check join. + event_auth.check( + RoomVersions.V6, + _join_event(pleb), + auth_events, + do_sig_check=False, + ) + + # A user cannot be force-joined to a room. + with self.assertRaises(AuthError): + event_auth.check( + RoomVersions.V6, + _member_event(pleb, "join", sender=creator), + auth_events, + do_sig_check=False, + ) + + # Banned should be rejected. + auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban") + with self.assertRaises(AuthError): + event_auth.check( + RoomVersions.V6, + _join_event(pleb), + auth_events, + do_sig_check=False, + ) + + # A user who left can re-join. + auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave") + event_auth.check( + RoomVersions.V6, + _join_event(pleb), + auth_events, + do_sig_check=False, + ) + + # A user can send a join if they're in the room. + auth_events[("m.room.member", pleb)] = _member_event(pleb, "join") + event_auth.check( + RoomVersions.V6, + _join_event(pleb), + auth_events, + do_sig_check=False, + ) + + # A user can accept an invite. + auth_events[("m.room.member", pleb)] = _member_event( + pleb, "invite", sender=creator + ) + event_auth.check( + RoomVersions.V6, + _join_event(pleb), + auth_events, + do_sig_check=False, + ) + + def test_join_rules_invite(self): + """ + Test joining an invite only room. + """ + creator = "@creator:example.com" + pleb = "@joiner:example.com" + + auth_events = { + ("m.room.create", ""): _create_event(creator), + ("m.room.member", creator): _join_event(creator), + ("m.room.join_rules", ""): _join_rules_event(creator, "invite"), + } + + # A join without an invite is rejected. + with self.assertRaises(AuthError): + event_auth.check( + RoomVersions.V6, + _join_event(pleb), + auth_events, + do_sig_check=False, + ) + + # A user cannot be force-joined to a room. + with self.assertRaises(AuthError): + event_auth.check( + RoomVersions.V6, + _member_event(pleb, "join", sender=creator), + auth_events, + do_sig_check=False, + ) + + # Banned should be rejected. + auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban") + with self.assertRaises(AuthError): + event_auth.check( + RoomVersions.V6, + _join_event(pleb), + auth_events, + do_sig_check=False, + ) + + # A user who left cannot re-join. + auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave") + with self.assertRaises(AuthError): + event_auth.check( + RoomVersions.V6, + _join_event(pleb), + auth_events, + do_sig_check=False, + ) + + # A user can send a join if they're in the room. + auth_events[("m.room.member", pleb)] = _member_event(pleb, "join") + event_auth.check( + RoomVersions.V6, + _join_event(pleb), + auth_events, + do_sig_check=False, + ) + + # A user can accept an invite. + auth_events[("m.room.member", pleb)] = _member_event( + pleb, "invite", sender=creator + ) + event_auth.check( + RoomVersions.V6, + _join_event(pleb), + auth_events, + do_sig_check=False, + ) + + def test_join_rules_msc3083_restricted(self): + """ + Test joining a restricted room from MSC3083. + + This is pretty much the same test as public. + """ + creator = "@creator:example.com" + pleb = "@joiner:example.com" + + auth_events = { + ("m.room.create", ""): _create_event(creator), + ("m.room.member", creator): _join_event(creator), + ("m.room.join_rules", ""): _join_rules_event(creator, "restricted"), + } + + # Older room versions don't understand this join rule + with self.assertRaises(AuthError): + event_auth.check( + RoomVersions.V6, + _join_event(pleb), + auth_events, + do_sig_check=False, + ) + + # Check join. + event_auth.check( + RoomVersions.MSC3083, + _join_event(pleb), + auth_events, + do_sig_check=False, + ) + + # A user cannot be force-joined to a room. + with self.assertRaises(AuthError): + event_auth.check( + RoomVersions.MSC3083, + _member_event(pleb, "join", sender=creator), + auth_events, + do_sig_check=False, + ) + + # Banned should be rejected. + auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban") + with self.assertRaises(AuthError): + event_auth.check( + RoomVersions.MSC3083, + _join_event(pleb), + auth_events, + do_sig_check=False, + ) + + # A user who left can re-join. + auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave") + event_auth.check( + RoomVersions.MSC3083, + _join_event(pleb), + auth_events, + do_sig_check=False, + ) + + # A user can send a join if they're in the room. + auth_events[("m.room.member", pleb)] = _member_event(pleb, "join") + event_auth.check( + RoomVersions.MSC3083, + _join_event(pleb), + auth_events, + do_sig_check=False, + ) + + # A user can accept an invite. + auth_events[("m.room.member", pleb)] = _member_event( + pleb, "invite", sender=creator + ) + event_auth.check( + RoomVersions.MSC3083, + _join_event(pleb), + auth_events, + do_sig_check=False, + ) + # helpers for making events @@ -225,19 +444,24 @@ def _create_event(user_id): ) -def _join_event(user_id): +def _member_event(user_id, membership, sender=None): return make_event_from_dict( { "room_id": TEST_ROOM_ID, "event_id": _get_event_id(), "type": "m.room.member", - "sender": user_id, + "sender": sender or user_id, "state_key": user_id, - "content": {"membership": "join"}, + "content": {"membership": membership}, + "prev_events": [], } ) +def _join_event(user_id): + return _member_event(user_id, "join") + + def _power_levels_event(sender, content): return make_event_from_dict( { @@ -277,6 +501,21 @@ def _random_state_event(sender): ) +def _join_rules_event(sender, join_rule): + return make_event_from_dict( + { + "room_id": TEST_ROOM_ID, + "event_id": _get_event_id(), + "type": "m.room.join_rules", + "sender": sender, + "state_key": "", + "content": { + "join_rule": join_rule, + }, + } + ) + + event_count = 0 diff --git a/tests/test_federation.py b/tests/test_federation.py index fc9aab32d063..0ed8326f55b8 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from twisted.internet.defer import succeed @@ -76,8 +75,10 @@ def setUp(self): ) self.handler = self.homeserver.get_federation_handler() - self.handler.do_auth = lambda origin, event, context, auth_events: succeed( - context + self.handler._check_event_auth = ( + lambda origin, event, context, state, auth_events, backfilled: succeed( + context + ) ) self.client = self.homeserver.get_federation_client() self.client._check_sigs_and_hash_and_fetch = lambda dest, pdus, **k: succeed( @@ -134,7 +135,7 @@ async def post_json(destination, path, data, headers=None, timeout=0): } ) - with LoggingContext(): + with LoggingContext("test-context"): failure = self.get_failure( self.handler.on_receive_pdu( "test.serv", lying_event, sent_to_us_directly=True diff --git a/tests/test_mau.py b/tests/test_mau.py index 75d28a42dfe5..fa6ef92b3bd8 100644 --- a/tests/test_mau.py +++ b/tests/test_mau.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +14,7 @@ """Tests REST events for /rooms paths.""" -import json - -from synapse.api.constants import LoginType +from synapse.api.constants import APP_SERVICE_REGISTRATION_TYPE, LoginType from synapse.api.errors import Codes, HttpResponseException, SynapseError from synapse.appservice import ApplicationService from synapse.rest.client.v2_alpha import register, sync @@ -113,7 +110,7 @@ def test_as_ignores_mau(self): ) ) - self.create_user("as_kermit4", token=as_token) + self.create_user("as_kermit4", token=as_token, appservice=True) def test_allowed_after_a_month_mau(self): # Create and sync so that the MAU counts get updated @@ -232,14 +229,15 @@ def test_tracked_but_not_limited(self): self.reactor.advance(100) self.assertEqual(2, self.successResultOf(count)) - def create_user(self, localpart, token=None): - request_data = json.dumps( - { - "username": localpart, - "password": "monkey", - "auth": {"type": LoginType.DUMMY}, - } - ) + def create_user(self, localpart, token=None, appservice=False): + request_data = { + "username": localpart, + "password": "monkey", + "auth": {"type": LoginType.DUMMY}, + } + + if appservice: + request_data["type"] = APP_SERVICE_REGISTRATION_TYPE channel = self.make_request( "POST", diff --git a/tests/test_metrics.py b/tests/test_metrics.py index f696fcf89ef9..b4574b2ffed2 100644 --- a/tests/test_metrics.py +++ b/tests/test_metrics.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # Copyright 2019 Matrix.org Foundation C.I.C. # diff --git a/tests/test_phone_home.py b/tests/test_phone_home.py index e7aed092c275..09707a74d731 100644 --- a/tests/test_phone_home.py +++ b/tests/test_phone_home.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,8 +13,7 @@ # limitations under the License. import resource - -import mock +from unittest import mock from synapse.app.phone_stats_home import phone_stats_home diff --git a/tests/test_preview.py b/tests/test_preview.py index ea8329991816..cac3d81ac14d 100644 --- a/tests/test_preview.py +++ b/tests/test_preview.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/test_server.py b/tests/test_server.py index 55cde7f62f48..407e172e41de 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -202,6 +202,8 @@ def _make_request(self, method, path): parse_listener_def({"type": "http", "port": 0}), self.resource, "1.0", + max_request_body_size=1234, + reactor=self.reactor, ) # render the request and return the channel diff --git a/tests/test_state.py b/tests/test_state.py index 6227a3ba9555..62f70958732c 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,8 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -from mock import Mock +from typing import List, Optional +from unittest.mock import Mock from twisted.internet import defer @@ -37,8 +36,8 @@ def create_event( state_key=None, depth=2, event_id=None, - prev_events=[], - **kwargs + prev_events: Optional[List[str]] = None, + **kwargs, ): global _next_event_id @@ -58,7 +57,7 @@ def create_event( "sender": "@user_id:example.com", "room_id": "!room_id:example.com", "depth": depth, - "prev_events": prev_events, + "prev_events": prev_events or [], } if state_key is not None: diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py index a743cdc3a937..0df480db9f17 100644 --- a/tests/test_terms_auth.py +++ b/tests/test_terms_auth.py @@ -13,8 +13,7 @@ # limitations under the License. import json - -from mock import Mock +from unittest.mock import Mock from twisted.test.proto_helpers import MemoryReactorClock diff --git a/tests/test_test_utils.py b/tests/test_test_utils.py index b921ac52c07a..f2ef1c6051eb 100644 --- a/tests/test_test_utils.py +++ b/tests/test_test_utils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/test_types.py b/tests/test_types.py index acdeea7a099b..d7881021d3f5 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py index 43898d8142d9..be6302d170e8 100644 --- a/tests/test_utils/__init__.py +++ b/tests/test_utils/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # Copyright 2020 The Matrix.org Foundation C.I.C # @@ -21,8 +20,7 @@ import warnings from asyncio import Future from typing import Any, Awaitable, Callable, TypeVar - -from mock import Mock +from unittest.mock import Mock import attr diff --git a/tests/test_utils/event_injection.py b/tests/test_utils/event_injection.py index c3c4a93e1f9b..e9ec9e085b53 100644 --- a/tests/test_utils/event_injection.py +++ b/tests/test_utils/event_injection.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # Copyright 2020 The Matrix.org Foundation C.I.C # @@ -33,7 +32,7 @@ async def inject_member_event( membership: str, target: Optional[str] = None, extra_content: Optional[dict] = None, - **kwargs + **kwargs, ) -> EventBase: """Inject a membership event into a room.""" if target is None: @@ -58,7 +57,7 @@ async def inject_event( hs: synapse.server.HomeServer, room_version: Optional[str] = None, prev_event_ids: Optional[List[str]] = None, - **kwargs + **kwargs, ) -> EventBase: """Inject a generic event into a room @@ -83,7 +82,7 @@ async def create_event( hs: synapse.server.HomeServer, room_version: Optional[str] = None, prev_event_ids: Optional[List[str]] = None, - **kwargs + **kwargs, ) -> Tuple[EventBase, EventContext]: if room_version is None: room_version = await hs.get_datastore().get_room_version_id(kwargs["room_id"]) diff --git a/tests/test_utils/html_parsers.py b/tests/test_utils/html_parsers.py index ad563eb3f0b8..1fbb38f4be03 100644 --- a/tests/test_utils/html_parsers.py +++ b/tests/test_utils/html_parsers.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/test_utils/logging_setup.py b/tests/test_utils/logging_setup.py index 74568b34f8c8..51a197a8c621 100644 --- a/tests/test_utils/logging_setup.py +++ b/tests/test_utils/logging_setup.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/test_visibility.py b/tests/test_visibility.py index 510b63011470..94b19788d737 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging - -from mock import Mock +from typing import Optional +from unittest.mock import Mock from twisted.internet import defer from twisted.internet.defer import succeed @@ -147,9 +146,11 @@ def inject_visibility(self, user_id, visibility): return event @defer.inlineCallbacks - def inject_room_member(self, user_id, membership="join", extra_content={}): + def inject_room_member( + self, user_id, membership="join", extra_content: Optional[dict] = None + ): content = {"membership": membership} - content.update(extra_content) + content.update(extra_content or {}) builder = self.event_builder_factory.for_room_version( RoomVersions.V1, { diff --git a/tests/unittest.py b/tests/unittest.py index 58a4daa1ec86..74db7c08f1ee 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector # Copyright 2019 Matrix.org Federation C.I.C @@ -19,10 +18,10 @@ import hmac import inspect import logging +import secrets import time from typing import Callable, Dict, Iterable, Optional, Tuple, Type, TypeVar, Union - -from mock import Mock, patch +from unittest.mock import Mock, patch from canonicaljson import json @@ -135,7 +134,7 @@ def tearDown(orig): def assertObjectHasAttributes(self, attrs, obj): """Asserts that the given object has each of the attributes given, and that the value of each matches according to assertEquals.""" - for (key, value) in attrs.items(): + for key in attrs.keys(): if not hasattr(obj, key): raise AssertionError("Expected obj to have a '.%s'" % key) try: @@ -249,6 +248,8 @@ def setUp(self): config=self.hs.config.server.listeners[0], resource=self.resource, server_version_string="1", + max_request_body_size=1234, + reactor=self.reactor, ) from tests.rest.client.v1.utils import RestHelper @@ -471,7 +472,7 @@ def setup_test_homeserver(self, *args, **kwargs): kwargs["config"] = config_obj async def run_bg_updates(): - with LoggingContext("run_bg_updates", request="run_bg_updates-1"): + with LoggingContext("run_bg_updates"): while not await stor.db_pool.updates.has_completed_background_updates(): await stor.db_pool.updates.do_next_background_update(1) @@ -626,7 +627,6 @@ def create_and_send_event( str: The new event's ID. """ event_creator = self.hs.get_event_creation_handler() - secrets = self.hs.get_secrets() requester = create_requester(user) event, context = self.get_success( diff --git a/tests/util/__init__.py b/tests/util/__init__.py index bfebb0f644f4..5e83dba2ed6f 100644 --- a/tests/util/__init__.py +++ b/tests/util/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/caches/__init__.py b/tests/util/caches/__init__.py index 451dae3b6c8e..830e2dfe916c 100644 --- a/tests/util/caches/__init__.py +++ b/tests/util/caches/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/caches/test_cached_call.py b/tests/util/caches/test_cached_call.py index f349b5ced071..80b97167bac0 100644 --- a/tests/util/caches/test_cached_call.py +++ b/tests/util/caches/test_cached_call.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/caches/test_deferred_cache.py b/tests/util/caches/test_deferred_cache.py index c24c33ee9132..54a88a83255b 100644 --- a/tests/util/caches/test_deferred_cache.py +++ b/tests/util/caches/test_deferred_cache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index afb11b9caf2d..178ac8a68cc7 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # @@ -15,8 +14,7 @@ # limitations under the License. import logging from typing import Set - -import mock +from unittest import mock from twisted.internet import defer, reactor @@ -232,8 +230,7 @@ def inner_fn(): @defer.inlineCallbacks def do_lookup(): - with LoggingContext() as c1: - c1.name = "c1" + with LoggingContext("c1") as c1: r = yield obj.fn(1) self.assertEqual(current_context(), c1) return r @@ -275,8 +272,7 @@ def inner_fn(): @defer.inlineCallbacks def do_lookup(): - with LoggingContext() as c1: - c1.name = "c1" + with LoggingContext("c1") as c1: try: d = obj.fn(1) self.assertEqual( @@ -661,14 +657,13 @@ def fn(self, arg1, arg2): @descriptors.cachedList("fn", "args1") async def list_fn(self, args1, arg2): - assert current_context().request == "c1" + assert current_context().name == "c1" # we want this to behave like an asynchronous function await run_on_reactor() - assert current_context().request == "c1" + assert current_context().name == "c1" return self.mock(args1, arg2) - with LoggingContext() as c1: - c1.request = "c1" + with LoggingContext("c1") as c1: obj = Cls() obj.mock.return_value = {10: "fish", 20: "chips"} d1 = obj.list_fn([10, 20], 2) diff --git a/tests/util/caches/test_ttlcache.py b/tests/util/caches/test_ttlcache.py index 816795c13659..fe8314057da1 100644 --- a/tests/util/caches/test_ttlcache.py +++ b/tests/util/caches/test_ttlcache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from synapse.util.caches.ttlcache import TTLCache diff --git a/tests/util/test_async_utils.py b/tests/util/test_async_utils.py index 17fd86d02de7..069f875962f5 100644 --- a/tests/util/test_async_utils.py +++ b/tests/util/test_async_utils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_dict_cache.py b/tests/util/test_dict_cache.py index 2f41333f4c11..bee66dee4328 100644 --- a/tests/util/test_dict_cache.py +++ b/tests/util/test_dict_cache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_expiring_cache.py b/tests/util/test_expiring_cache.py index 49ffeebd0ed5..e6e13ba06cc8 100644 --- a/tests/util/test_expiring_cache.py +++ b/tests/util/test_expiring_cache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_file_consumer.py b/tests/util/test_file_consumer.py index 2012263184f6..3bb469540527 100644 --- a/tests/util/test_file_consumer.py +++ b/tests/util/test_file_consumer.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,8 +15,7 @@ import threading from io import StringIO - -from mock import NonCallableMock +from unittest.mock import NonCallableMock from twisted.internet import defer, reactor diff --git a/tests/util/test_glob_to_regex.py b/tests/util/test_glob_to_regex.py new file mode 100644 index 000000000000..220accb92b65 --- /dev/null +++ b/tests/util/test_glob_to_regex.py @@ -0,0 +1,59 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from synapse.util import glob_to_regex + +from tests.unittest import TestCase + + +class GlobToRegexTestCase(TestCase): + def test_literal_match(self): + """patterns without wildcards should match""" + pat = glob_to_regex("foobaz") + self.assertTrue( + pat.match("FoobaZ"), "patterns should match and be case-insensitive" + ) + self.assertFalse( + pat.match("x foobaz"), "pattern should not match at word boundaries" + ) + + def test_wildcard_match(self): + pat = glob_to_regex("f?o*baz") + + self.assertTrue( + pat.match("FoobarbaZ"), + "* should match string and pattern should be case-insensitive", + ) + self.assertTrue(pat.match("foobaz"), "* should match 0 characters") + self.assertFalse(pat.match("fooxaz"), "the character after * must match") + self.assertFalse(pat.match("fobbaz"), "? should not match 0 characters") + self.assertFalse(pat.match("fiiobaz"), "? should not match 2 characters") + + def test_multi_wildcard(self): + """patterns with multiple wildcards in a row should match""" + pat = glob_to_regex("**baz") + self.assertTrue(pat.match("agsgsbaz"), "** should match any string") + self.assertTrue(pat.match("baz"), "** should match the empty string") + self.assertEqual(pat.pattern, r"\A.{0,}baz\Z") + + pat = glob_to_regex("*?baz") + self.assertTrue(pat.match("agsgsbaz"), "*? should match any string") + self.assertTrue(pat.match("abaz"), "*? should match a single char") + self.assertFalse(pat.match("baz"), "*? should not match the empty string") + self.assertEqual(pat.pattern, r"\A.{1,}baz\Z") + + pat = glob_to_regex("a?*?*?baz") + self.assertTrue(pat.match("a g baz"), "?*?*? should match 3 chars") + self.assertFalse(pat.match("a..baz"), "?*?*? should not match 2 chars") + self.assertTrue(pat.match("a.gg.baz"), "?*?*? should match 4 chars") + self.assertEqual(pat.pattern, r"\Aa.{3,}baz\Z") diff --git a/tests/util/test_itertools.py b/tests/util/test_itertools.py index e931a7ec1852..1bd0b45d940a 100644 --- a/tests/util/test_itertools.py +++ b/tests/util/test_itertools.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_linearizer.py b/tests/util/test_linearizer.py index 0e52811948b2..c4a3917b2301 100644 --- a/tests/util/test_linearizer.py +++ b/tests/util/test_linearizer.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py index 58ee918f6533..5d9c4665aa58 100644 --- a/tests/util/test_logcontext.py +++ b/tests/util/test_logcontext.py @@ -17,11 +17,10 @@ class LoggingContextTestCase(unittest.TestCase): def _check_test_key(self, value): - self.assertEquals(current_context().request, value) + self.assertEquals(current_context().name, value) def test_with_context(self): - with LoggingContext() as context_one: - context_one.request = "test" + with LoggingContext("test"): self._check_test_key("test") @defer.inlineCallbacks @@ -30,15 +29,13 @@ def test_sleep(self): @defer.inlineCallbacks def competing_callback(): - with LoggingContext() as competing_context: - competing_context.request = "competing" + with LoggingContext("competing"): yield clock.sleep(0) self._check_test_key("competing") reactor.callLater(0, competing_callback) - with LoggingContext() as context_one: - context_one.request = "one" + with LoggingContext("one"): yield clock.sleep(0) self._check_test_key("one") @@ -47,9 +44,7 @@ def _test_run_in_background(self, function): callback_completed = [False] - with LoggingContext() as context_one: - context_one.request = "one" - + with LoggingContext("one"): # fire off function, but don't wait on it. d2 = run_in_background(function) @@ -133,9 +128,7 @@ def blocking_function(): sentinel_context = current_context() - with LoggingContext() as context_one: - context_one.request = "one" - + with LoggingContext("one"): d1 = make_deferred_yieldable(blocking_function()) # make sure that the context was reset by make_deferred_yieldable self.assertIs(current_context(), sentinel_context) @@ -149,9 +142,7 @@ def blocking_function(): def test_make_deferred_yieldable_with_chained_deferreds(self): sentinel_context = current_context() - with LoggingContext() as context_one: - context_one.request = "one" - + with LoggingContext("one"): d1 = make_deferred_yieldable(_chained_deferred_function()) # make sure that the context was reset by make_deferred_yieldable self.assertIs(current_context(), sentinel_context) @@ -166,9 +157,7 @@ def test_make_deferred_yieldable_on_non_deferred(self): """Check that make_deferred_yieldable does the right thing when its argument isn't actually a deferred""" - with LoggingContext() as context_one: - context_one.request = "one" - + with LoggingContext("one"): d1 = make_deferred_yieldable("bum") self._check_test_key("one") @@ -177,9 +166,9 @@ def test_make_deferred_yieldable_on_non_deferred(self): self._check_test_key("one") def test_nested_logging_context(self): - with LoggingContext(request="foo"): + with LoggingContext("foo"): nested_context = nested_logging_context(suffix="bar") - self.assertEqual(nested_context.request, "foo-bar") + self.assertEqual(nested_context.name, "foo-bar") @defer.inlineCallbacks def test_make_deferred_yieldable_with_await(self): @@ -193,9 +182,7 @@ async def blocking_function(): sentinel_context = current_context() - with LoggingContext() as context_one: - context_one.request = "one" - + with LoggingContext("one"): d1 = make_deferred_yieldable(blocking_function()) # make sure that the context was reset by make_deferred_yieldable self.assertIs(current_context(), sentinel_context) diff --git a/tests/util/test_logformatter.py b/tests/util/test_logformatter.py index 0fb60caacb1a..a2e08281e6c5 100644 --- a/tests/util/test_logformatter.py +++ b/tests/util/test_logformatter.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_lrucache.py b/tests/util/test_lrucache.py index a739a6aaaf8c..df3e27779fd2 100644 --- a/tests/util/test_lrucache.py +++ b/tests/util/test_lrucache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,7 @@ # limitations under the License. -from mock import Mock +from unittest.mock import Mock from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache diff --git a/tests/util/test_ratelimitutils.py b/tests/util/test_ratelimitutils.py index 4d1aee91d537..34aaffe85954 100644 --- a/tests/util/test_ratelimitutils.py +++ b/tests/util/test_ratelimitutils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional + from synapse.config.homeserver import HomeServerConfig from synapse.util.ratelimitutils import FederationRateLimiter @@ -89,9 +90,9 @@ def _await_resolution(reactor, d): return (reactor.seconds() - start_time) * 1000 -def build_rc_config(settings={}): +def build_rc_config(settings: Optional[dict] = None): config_dict = default_config("test") - config_dict.update(settings) + config_dict.update(settings or {}) config = HomeServerConfig() config.parse_config_dict(config_dict, "", "") return config.rc_federation diff --git a/tests/util/test_retryutils.py b/tests/util/test_retryutils.py index 5f46ed0cefd9..9b2be83a43a1 100644 --- a/tests/util/test_retryutils.py +++ b/tests/util/test_retryutils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_rwlock.py b/tests/util/test_rwlock.py index d3dea3b52a8a..a10071c70fc9 100644 --- a/tests/util/test_rwlock.py +++ b/tests/util/test_rwlock.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_stringutils.py b/tests/util/test_stringutils.py index 8491f7cc8346..ad4dd7f0078f 100644 --- a/tests/util/test_stringutils.py +++ b/tests/util/test_stringutils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,7 @@ # limitations under the License. from synapse.api.errors import SynapseError -from synapse.util.stringutils import assert_valid_client_secret +from synapse.util.stringutils import assert_valid_client_secret, base62_encode from .. import unittest @@ -46,3 +45,9 @@ def test_client_secret_regex(self): for client_secret in bad: with self.assertRaises(SynapseError): assert_valid_client_secret(client_secret) + + def test_base62_encode(self): + self.assertEqual("0", base62_encode(0)) + self.assertEqual("10", base62_encode(62)) + self.assertEqual("1c", base62_encode(100)) + self.assertEqual("001c", base62_encode(100, minwidth=4)) diff --git a/tests/util/test_threepids.py b/tests/util/test_threepids.py index 5513724d87cf..d957b953bb09 100644 --- a/tests/util/test_threepids.py +++ b/tests/util/test_threepids.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Dirk Klimpel # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_treecache.py b/tests/util/test_treecache.py index a5f226120835..3b077af27e90 100644 --- a/tests/util/test_treecache.py +++ b/tests/util/test_treecache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_wheel_timer.py b/tests/util/test_wheel_timer.py index 03201a4d9b90..0d5039de0406 100644 --- a/tests/util/test_wheel_timer.py +++ b/tests/util/test_wheel_timer.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/utils.py b/tests/utils.py index be80b1376089..6bd008dcfe21 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018-2019 New Vector Ltd # @@ -21,10 +20,9 @@ import uuid import warnings from typing import Type +from unittest.mock import Mock, patch from urllib import parse as urlparse -from mock import Mock, patch - from twisted.internet import defer from synapse.api.constants import EventTypes @@ -122,7 +120,6 @@ def default_config(name, parse=False): "enable_registration_captcha": False, "macaroon_secret_key": "not even a little secret", "trusted_third_party_id_servers": [], - "room_invite_state_types": [], "password_providers": [], "worker_replication_url": "", "worker_app": None, @@ -156,6 +153,10 @@ def default_config(name, parse=False): "local": {"per_second": 10000, "burst_count": 10000}, "remote": {"per_second": 10000, "burst_count": 10000}, }, + "rc_invites": { + "per_room": {"per_second": 10000, "burst_count": 10000}, + "per_user": {"per_second": 10000, "burst_count": 10000}, + }, "rc_3pid_validation": {"per_second": 10000, "burst_count": 10000}, "saml2_enabled": False, "public_baseurl": None, @@ -192,7 +193,7 @@ def setup_test_homeserver( config=None, reactor=None, homeserver_to_use: Type[HomeServer] = TestHomeServer, - **kwargs + **kwargs, ): """ Setup a homeserver suitable for running tests against. Keyword arguments @@ -306,7 +307,7 @@ def cleanup(): # database for a few more seconds due to flakiness, preventing # us from dropping it when the test is over. If we can't drop # it, warn and move on. - for x in range(5): + for _ in range(5): try: cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,)) db_conn.commit() diff --git a/tox.ini b/tox.ini index 9ff70fe312c9..ecd609271d1d 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,8 @@ [tox] -envlist = packaging, py35, py36, py37, py38, py39, check_codestyle, check_isort +envlist = packaging, py36, py37, py38, py39, check_codestyle, check_isort + +# we require tox>=2.3.2 for the fix to https://github.com/tox-dev/tox/issues/208 +minversion = 2.3.2 [base] deps = @@ -18,13 +21,11 @@ deps = # installed on that). # # anyway, make sure that we have a recent enough setuptools. - setuptools>=18.5 ; python_version >= '3.6' - setuptools>=18.5,<51.0.0 ; python_version < '3.6' + setuptools>=18.5 # we also need a semi-recent version of pip, because old ones fail to # install the "enum34" dependency of cryptography. - pip>=10 ; python_version >= '3.6' - pip>=10,<21.0 ; python_version < '3.6' + pip>=10 # directories/files we run the linters on. # if you update this list, make sure to do the same in scripts-dev/lint.sh @@ -48,6 +49,7 @@ deps = extras = # install the optional dependendencies for tox environments without # '-noextras' in their name + # (this requires tox 3) !noextras: all test @@ -74,8 +76,6 @@ commands = # we use "env" rather than putting a value in `setenv` so that it is not # inherited by other tox environments. # - # keep this in sync with the copy in `testenv:py35-old`. - # /usr/bin/env COVERAGE_PROCESS_START={toxinidir}/.coveragerc "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:} # As of twisted 16.4, trial tries to import the tests as a package (previously @@ -103,8 +103,9 @@ usedevelop=true # A test suite for the oldest supported versions of Python libraries, to catch # any uses of APIs not available in them. -[testenv:py35-old] -skip_install=True +[testenv:py3-old] +skip_install = true +usedevelop = false deps = # Old automat version for Twisted Automat == 0.3.0 @@ -120,11 +121,7 @@ commands = # Install Synapse itself. This won't update any libraries. pip install -e ".[test]" - # we have to duplicate the command from `testenv` rather than refer to it - # as `{[testenv]commands}`, because we run on ubuntu xenial, which has - # tox 2.3.1, and https://github.com/tox-dev/tox/issues/208. - # - /usr/bin/env COVERAGE_PROCESS_START={toxinidir}/.coveragerc "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:} + {[testenv]commands} [testenv:benchmark] deps = @@ -136,7 +133,8 @@ commands = python -m synmark {posargs:} [testenv:packaging] -skip_install=True +skip_install = true +usedevelop = false deps = check-manifest commands = @@ -154,7 +152,8 @@ extras = lint commands = isort -c --df --sp setup.cfg {[base]lint_targets} [testenv:check-newsfragment] -skip_install = True +skip_install = true +usedevelop = false deps = towncrier>=18.6.0rc1 commands = python -m towncrier.check --compare-with=origin/develop @@ -163,24 +162,26 @@ commands = commands = {toxinidir}/scripts-dev/generate_sample_config --check [testenv:combine] -skip_install = True +skip_install = true +usedevelop = false deps = coverage - pip>=10 ; python_version >= '3.6' - pip>=10,<21.0 ; python_version < '3.6' + pip>=10 commands= coverage combine coverage report [testenv:cov-erase] -skip_install = True +skip_install = true +usedevelop = false deps = coverage commands= coverage erase [testenv:cov-html] -skip_install = True +skip_install = true +usedevelop = false deps = coverage commands=