From ea01d4c2de65f29cf23e2d28786bfc10bd5fd881 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 24 Sep 2021 15:27:09 +0100 Subject: [PATCH 001/111] Update postgresql testing script (#10906) - Use sytest:bionic. Sytest:latest is two years old (do we want CI to push out latest at all?) and comes with Python 3.5, which we explictly no longer support. The script now runs under PostgreSQL 10 as a result. - Advertise script in the docs - Move pg testing script to scripts-dev directory - Write to host as the script's exector, not root A few changes to make it speedier to re-run the tests: - Create blank DB in the container, not the script, so we don't have to `initdb` each time - Use a named volume to persist the tox environment, so we don't have to fetch and install a bunch of packages from PyPI each time Co-authored-by: reivilibre --- .gitignore | 1 + changelog.d/10906.misc | 1 + docker/Dockerfile-pgtests | 24 +++++++++++-- docker/run_pg_tests.sh | 7 ++-- docs/development/contributing_guide.md | 47 ++++++++++++++++++++++++++ scripts-dev/test_postgresql.sh | 19 +++++++++++ test_postgresql.sh | 12 ------- 7 files changed, 92 insertions(+), 19 deletions(-) create mode 100644 changelog.d/10906.misc create mode 100755 scripts-dev/test_postgresql.sh delete mode 100755 test_postgresql.sh diff --git a/.gitignore b/.gitignore index 6b9257b5c95b..fe137f337019 100644 --- a/.gitignore +++ b/.gitignore @@ -40,6 +40,7 @@ __pycache__/ /.coverage* /.mypy_cache/ /.tox +/.tox-pg-container /build/ /coverage.* /dist/ diff --git a/changelog.d/10906.misc b/changelog.d/10906.misc new file mode 100644 index 000000000000..20a1cbfbd0b7 --- /dev/null +++ b/changelog.d/10906.misc @@ -0,0 +1 @@ +Update development testing script `test_postgresql.sh` to use a supported Python version and make re-runs quicker. \ No newline at end of file diff --git a/docker/Dockerfile-pgtests b/docker/Dockerfile-pgtests index 3bfee845c658..92b804d1937f 100644 --- a/docker/Dockerfile-pgtests +++ b/docker/Dockerfile-pgtests @@ -1,6 +1,6 @@ # Use the Sytest image that comes with a lot of the build dependencies # pre-installed -FROM matrixdotorg/sytest:latest +FROM matrixdotorg/sytest:bionic # The Sytest image doesn't come with python, so install that RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip @@ -8,5 +8,23 @@ RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip # We need tox to run the tests in run_pg_tests.sh RUN python3 -m pip install tox -ADD run_pg_tests.sh /pg_tests.sh -ENTRYPOINT /pg_tests.sh +# Initialise the db +RUN su -c '/usr/lib/postgresql/10/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="C.UTF-8" --lc-ctype="C.UTF-8" --username=postgres' postgres + +# Add a user with our UID and GID so that files get created on the host owned +# by us, not root. +ARG UID +ARG GID +RUN groupadd --gid $GID user +RUN useradd --uid $UID --gid $GID --groups sudo --no-create-home user + +# Ensure we can start postgres by sudo-ing as the postgres user. +RUN apt-get update && apt-get -qq install -y sudo +RUN echo "user ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + +ADD run_pg_tests.sh /run_pg_tests.sh +# Use the "exec form" of ENTRYPOINT (https://docs.docker.com/engine/reference/builder/#entrypoint) +# so that we can `docker run` this container and pass arguments to pg_tests.sh +ENTRYPOINT ["/run_pg_tests.sh"] + +USER user diff --git a/docker/run_pg_tests.sh b/docker/run_pg_tests.sh index 1fd08cb62bc6..58e2177d34c2 100755 --- a/docker/run_pg_tests.sh +++ b/docker/run_pg_tests.sh @@ -10,11 +10,10 @@ set -e # Set PGUSER so Synapse's tests know what user to connect to the database with export PGUSER=postgres -# Initialise & start the database -su -c '/usr/lib/postgresql/9.6/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="en_US.UTF-8" --lc-ctype="en_US.UTF-8" --username=postgres' postgres -su -c '/usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres +# Start the database +sudo -u postgres /usr/lib/postgresql/10/bin/pg_ctl -w -D /var/lib/postgresql/data start # Run the tests cd /src export TRIAL_FLAGS="-j 4" -tox --workdir=/tmp -e py35-postgres +tox --workdir=./.tox-pg-container -e py36-postgres "$@" diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 97352b0f267c..713366368cbe 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -170,6 +170,53 @@ To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`: SYNAPSE_TEST_LOG_LEVEL=DEBUG trial tests ``` +### Running tests under PostgreSQL + +Invoking `trial` as above will use an in-memory SQLite database. This is great for +quick development and testing. However, we recommend using a PostgreSQL database +in production (and indeed, we have some code paths specific to each database). +This means that we need to run our unit tests against PostgreSQL too. Our CI does +this automatically for pull requests and release candidates, but it's sometimes +useful to reproduce this locally. + +To do so, [configure Postgres](../postgres.md) and run `trial` with the +following environment variables matching your configuration: + +- `SYNAPSE_POSTGRES` to anything nonempty +- `SYNAPSE_POSTGRES_HOST` +- `SYNAPSE_POSTGRES_USER` +- `SYNAPSE_POSTGRES_PASSWORD` + +For example: + +```shell +export SYNAPSE_POSTGRES=1 +export SYNAPSE_POSTGRES_HOST=localhost +export SYNAPSE_POSTGRES_USER=postgres +export SYNAPSE_POSTGRES_PASSWORD=mydevenvpassword +trial +``` + +#### Prebuilt container + +Since configuring PostgreSQL can be fiddly, we can make use of a pre-made +Docker container to set up PostgreSQL and run our tests for us. To do so, run + +```shell +scripts-dev/test_postgresql.sh +``` + +Any extra arguments to the script will be passed to `tox` and then to `trial`, +so we can run a specific test in this container with e.g. + +```shell +scripts-dev/test_postgresql.sh tests.replication.test_sharded_event_persister.EventPersisterShardTestCase +``` + +The container creates a folder in your Synapse checkout called +`.tox-pg-container` and uses this as a tox environment. The output of any +`trial` runs goes into `_trial_temp` in your synapse source directory — the same +as running `trial` directly on your host machine. ## Run the integration tests ([Sytest](https://github.com/matrix-org/sytest)). diff --git a/scripts-dev/test_postgresql.sh b/scripts-dev/test_postgresql.sh new file mode 100755 index 000000000000..43cfa256e4da --- /dev/null +++ b/scripts-dev/test_postgresql.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# This script builds the Docker image to run the PostgreSQL tests, and then runs +# the tests. It uses a dedicated tox environment so that we don't have to +# rebuild it each time. + +# Command line arguments to this script are forwarded to "tox" and then to "trial". + +set -e + +# Build, and tag +docker build docker/ \ + --build-arg "UID=$(id -u)" \ + --build-arg "GID=$(id -g)" \ + -f docker/Dockerfile-pgtests \ + -t synapsepgtests + +# Run, mounting the current directory into /src +docker run --rm -it -v "$(pwd):/src" -v synapse-pg-test-tox:/tox synapsepgtests "$@" diff --git a/test_postgresql.sh b/test_postgresql.sh deleted file mode 100755 index c10828fbbcf8..000000000000 --- a/test_postgresql.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -# This script builds the Docker image to run the PostgreSQL tests, and then runs -# the tests. - -set -e - -# Build, and tag -docker build docker/ -f docker/Dockerfile-pgtests -t synapsepgtests - -# Run, mounting the current directory into /src -docker run --rm -it -v $(pwd)\:/src synapsepgtests From b10257e87972d158f4b6a0c7d1fe7239014ea10a Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 24 Sep 2021 16:38:23 +0200 Subject: [PATCH 002/111] Add a spamchecker callback to allow or deny room creation based on invites (#10898) This is in the context of creating new module callbacks that modules in https://github.com/matrix-org/synapse-dinsic can use, in an effort to reconcile the spam checker API in synapse-dinsic with the one in mainline. This adds a callback that's fairly similar to user_may_create_room except it also allows processing based on the invites sent at room creation. --- changelog.d/10898.feature | 1 + docs/modules/spam_checker_callbacks.md | 29 ++++++ synapse/events/spamcheck.py | 42 +++++++++ synapse/handlers/room.py | 14 ++- tests/rest/client/test_rooms.py | 119 ++++++++++++++++++++++++- 5 files changed, 199 insertions(+), 6 deletions(-) create mode 100644 changelog.d/10898.feature diff --git a/changelog.d/10898.feature b/changelog.d/10898.feature new file mode 100644 index 000000000000..97fa39fd0c2b --- /dev/null +++ b/changelog.d/10898.feature @@ -0,0 +1 @@ +Add a `user_may_create_room_with_invites` spam checker callback to allow modules to allow or deny a room creation request based on the invites and/or 3PID invites it includes. diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md index 81574a015cf4..7920ac5f8fc3 100644 --- a/docs/modules/spam_checker_callbacks.md +++ b/docs/modules/spam_checker_callbacks.md @@ -38,6 +38,35 @@ async def user_may_create_room(user: str) -> bool Called when processing a room creation request. The module must return a `bool` indicating whether the given user (represented by their Matrix user ID) is allowed to create a room. +### `user_may_create_room_with_invites` + +```python +async def user_may_create_room_with_invites( + user: str, + invites: List[str], + threepid_invites: List[Dict[str, str]], +) -> bool +``` + +Called when processing a room creation request (right after `user_may_create_room`). +The module is given the Matrix user ID of the user trying to create a room, as well as a +list of Matrix users to invite and a list of third-party identifiers (3PID, e.g. email +addresses) to invite. + +An invited Matrix user to invite is represented by their Matrix user IDs, and an invited +3PIDs is represented by a dict that includes the 3PID medium (e.g. "email") through its +`medium` key and its address (e.g. "alice@example.com") through its `address` key. + +See [the Matrix specification](https://matrix.org/docs/spec/appendices#pid-types) for more +information regarding third-party identifiers. + +If no invite and/or 3PID invite were specified in the room creation request, the +corresponding list(s) will be empty. + +**Note**: This callback is not called when a room is cloned (e.g. during a room upgrade) +since no invites are sent when cloning a room. To cover this case, modules also need to +implement `user_may_create_room`. + ### `user_may_create_room_alias` ```python diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 19ee246f9643..c389f70b8d70 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -46,6 +46,9 @@ ] USER_MAY_INVITE_CALLBACK = Callable[[str, str, str], Awaitable[bool]] USER_MAY_CREATE_ROOM_CALLBACK = Callable[[str], Awaitable[bool]] +USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK = Callable[ + [str, List[str], List[Dict[str, str]]], Awaitable[bool] +] USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[[str, RoomAlias], Awaitable[bool]] USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]] CHECK_USERNAME_FOR_SPAM_CALLBACK = Callable[[Dict[str, str]], Awaitable[bool]] @@ -164,6 +167,9 @@ def __init__(self): self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = [] self._user_may_invite_callbacks: List[USER_MAY_INVITE_CALLBACK] = [] self._user_may_create_room_callbacks: List[USER_MAY_CREATE_ROOM_CALLBACK] = [] + self._user_may_create_room_with_invites_callbacks: List[ + USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK + ] = [] self._user_may_create_room_alias_callbacks: List[ USER_MAY_CREATE_ROOM_ALIAS_CALLBACK ] = [] @@ -183,6 +189,9 @@ def register_callbacks( check_event_for_spam: Optional[CHECK_EVENT_FOR_SPAM_CALLBACK] = None, user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None, user_may_create_room: Optional[USER_MAY_CREATE_ROOM_CALLBACK] = None, + user_may_create_room_with_invites: Optional[ + USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK + ] = None, user_may_create_room_alias: Optional[ USER_MAY_CREATE_ROOM_ALIAS_CALLBACK ] = None, @@ -203,6 +212,11 @@ def register_callbacks( if user_may_create_room is not None: self._user_may_create_room_callbacks.append(user_may_create_room) + if user_may_create_room_with_invites is not None: + self._user_may_create_room_with_invites_callbacks.append( + user_may_create_room_with_invites, + ) + if user_may_create_room_alias is not None: self._user_may_create_room_alias_callbacks.append( user_may_create_room_alias, @@ -283,6 +297,34 @@ async def user_may_create_room(self, userid: str) -> bool: return True + async def user_may_create_room_with_invites( + self, + userid: str, + invites: List[str], + threepid_invites: List[Dict[str, str]], + ) -> bool: + """Checks if a given user may create a room with invites + + If this method returns false, the creation request will be rejected. + + Args: + userid: The ID of the user attempting to create a room + invites: The IDs of the Matrix users to be invited if the room creation is + allowed. + threepid_invites: The threepids to be invited if the room creation is allowed, + as a dict including a "medium" key indicating the threepid's medium (e.g. + "email") and an "address" key indicating the threepid's address (e.g. + "alice@example.com") + + Returns: + True if the user may create the room, otherwise False + """ + for callback in self._user_may_create_room_with_invites_callbacks: + if await callback(userid, invites, threepid_invites) is False: + return False + + return True + async def user_may_create_room_alias( self, userid: str, room_alias: RoomAlias ) -> bool: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 408b7d7b7472..8fede5e935d0 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -649,8 +649,16 @@ async def create_room( requester, config, is_requester_admin=is_requester_admin ) - if not is_requester_admin and not await self.spam_checker.user_may_create_room( - user_id + invite_3pid_list = config.get("invite_3pid", []) + invite_list = config.get("invite", []) + + if not is_requester_admin and not ( + await self.spam_checker.user_may_create_room(user_id) + and await self.spam_checker.user_may_create_room_with_invites( + user_id, + invite_list, + invite_3pid_list, + ) ): raise SynapseError(403, "You are not permitted to create rooms") @@ -684,8 +692,6 @@ async def create_room( if mapping: raise SynapseError(400, "Room alias already taken", Codes.ROOM_IN_USE) - invite_3pid_list = config.get("invite_3pid", []) - invite_list = config.get("invite", []) for i in invite_list: try: uid = UserID.from_string(i) diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index ef847f0f5ff9..30bdaa9c2712 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -18,7 +18,7 @@ """Tests REST events for /rooms paths.""" import json -from typing import Iterable +from typing import Dict, Iterable, List, Optional from unittest.mock import Mock, call from urllib import parse as urlparse @@ -30,7 +30,7 @@ from synapse.handlers.pagination import PurgeStatus from synapse.rest import admin from synapse.rest.client import account, directory, login, profile, room, sync -from synapse.types import JsonDict, RoomAlias, UserID, create_requester +from synapse.types import JsonDict, Requester, RoomAlias, UserID, create_requester from synapse.util.stringutils import random_string from tests import unittest @@ -669,6 +669,121 @@ def test_post_room_invitees_ratelimit(self): channel = self.make_request("POST", "/createRoom", content) self.assertEqual(200, channel.code) + def test_spamchecker_invites(self): + """Tests the user_may_create_room_with_invites spam checker callback.""" + + # Mock do_3pid_invite, so we don't fail from failing to send a 3PID invite to an + # IS. + async def do_3pid_invite( + room_id: str, + inviter: UserID, + medium: str, + address: str, + id_server: str, + requester: Requester, + txn_id: Optional[str], + id_access_token: Optional[str] = None, + ) -> int: + return 0 + + do_3pid_invite_mock = Mock(side_effect=do_3pid_invite) + self.hs.get_room_member_handler().do_3pid_invite = do_3pid_invite_mock + + # Add a mock callback for user_may_create_room_with_invites. Make it allow any + # room creation request for now. + return_value = True + + async def user_may_create_room_with_invites( + user: str, + invites: List[str], + threepid_invites: List[Dict[str, str]], + ) -> bool: + return return_value + + callback_mock = Mock(side_effect=user_may_create_room_with_invites) + self.hs.get_spam_checker()._user_may_create_room_with_invites_callbacks.append( + callback_mock, + ) + + # The MXIDs we'll try to invite. + invited_mxids = [ + "@alice1:red", + "@alice2:red", + "@alice3:red", + "@alice4:red", + ] + + # The 3PIDs we'll try to invite. + invited_3pids = [ + { + "id_server": "example.com", + "id_access_token": "sometoken", + "medium": "email", + "address": "alice1@example.com", + }, + { + "id_server": "example.com", + "id_access_token": "sometoken", + "medium": "email", + "address": "alice2@example.com", + }, + { + "id_server": "example.com", + "id_access_token": "sometoken", + "medium": "email", + "address": "alice3@example.com", + }, + ] + + # Create a room and invite the Matrix users, and check that it succeeded. + channel = self.make_request( + "POST", + "/createRoom", + json.dumps({"invite": invited_mxids}).encode("utf8"), + ) + self.assertEqual(200, channel.code) + + # Check that the callback was called with the right arguments. + expected_call_args = ((self.user_id, invited_mxids, []),) + self.assertEquals( + callback_mock.call_args, + expected_call_args, + callback_mock.call_args, + ) + + # Create a room and invite the 3PIDs, and check that it succeeded. + channel = self.make_request( + "POST", + "/createRoom", + json.dumps({"invite_3pid": invited_3pids}).encode("utf8"), + ) + self.assertEqual(200, channel.code) + + # Check that do_3pid_invite was called the right amount of time + self.assertEquals(do_3pid_invite_mock.call_count, len(invited_3pids)) + + # Check that the callback was called with the right arguments. + expected_call_args = ((self.user_id, [], invited_3pids),) + self.assertEquals( + callback_mock.call_args, + expected_call_args, + callback_mock.call_args, + ) + + # Now deny any room creation. + return_value = False + + # Create a room and invite the 3PIDs, and check that it failed. + channel = self.make_request( + "POST", + "/createRoom", + json.dumps({"invite_3pid": invited_3pids}).encode("utf8"), + ) + self.assertEqual(403, channel.code) + + # Check that do_3pid_invite wasn't called this time. + self.assertEquals(do_3pid_invite_mock.call_count, len(invited_3pids)) + class RoomTopicTestCase(RoomBase): """Tests /rooms/$room_id/topic REST events.""" From d138187045dd3c51689c19124d65ee62e37db755 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 24 Sep 2021 17:09:12 -0500 Subject: [PATCH 003/111] Document changes to schema version 61 - 64 (#10917) As pointed out by @richvdh, https://github.com/matrix-org/synapse/pull/10838#discussion_r715424244 Retroactively summarize `61` - `64` --- changelog.d/10917.misc | 1 + synapse/storage/schema/__init__.py | 11 +++++++++++ 2 files changed, 12 insertions(+) create mode 100644 changelog.d/10917.misc diff --git a/changelog.d/10917.misc b/changelog.d/10917.misc new file mode 100644 index 000000000000..9ce6eef94bf1 --- /dev/null +++ b/changelog.d/10917.misc @@ -0,0 +1 @@ +Document and summarize changes in schema version `61` - `64`. diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index aa2ce44c6c90..573e05a482c0 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -27,11 +27,22 @@ Changes in SCHEMA_VERSION = 61: - The `user_stats_historical` and `room_stats_historical` tables are not written and are not read (previously, they were written but not read). + - MSC2716: Add `insertion_events` and `insertion_event_edges` tables to keep track + of insertion events in order to navigate historical chunks of messages. + - MSC2716: Add `chunk_events` table to track how the chunk is labeled and + determines which insertion event it points to. + +Changes in SCHEMA_VERSION = 62: + - MSC2716: Add `insertion_event_extremities` table that keeps track of which + insertion events need to be backfilled. Changes in SCHEMA_VERSION = 63: - The `public_room_list_stream` table is not written nor read to (previously, it was written and read to, but not for any significant purpose). https://github.com/matrix-org/synapse/pull/10565 + +Changes in SCHEMA_VERSION = 64: + - MSC2716: Rename related tables and columns from "chunks" to "batches". """ From 6c83c2710760a4f551d1a925fc9b1a19ae8797c1 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 27 Sep 2021 11:29:23 +0100 Subject: [PATCH 004/111] Fix race conditions when creating media store and config directories (#10913) --- changelog.d/10913.bugfix | 1 + synapse/config/_base.py | 9 ++------- synapse/rest/media/v1/media_storage.py | 6 ++---- synapse/rest/media/v1/storage_provider.py | 3 +-- 4 files changed, 6 insertions(+), 13 deletions(-) create mode 100644 changelog.d/10913.bugfix diff --git a/changelog.d/10913.bugfix b/changelog.d/10913.bugfix new file mode 100644 index 000000000000..a0015c82413c --- /dev/null +++ b/changelog.d/10913.bugfix @@ -0,0 +1 @@ +Fix race conditions when creating media store and config directories. diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 2cc242782add..d974a1a2a814 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -200,11 +200,7 @@ def check_file(cls, file_path, config_name): @classmethod def ensure_directory(cls, dir_path): dir_path = cls.abspath(dir_path) - try: - os.makedirs(dir_path) - except OSError as e: - if e.errno != errno.EEXIST: - raise + os.makedirs(dir_path, exist_ok=True) if not os.path.isdir(dir_path): raise ConfigError("%s is not a directory" % (dir_path,)) return dir_path @@ -693,8 +689,7 @@ def load_or_generate_config(cls, description, argv): open_private_ports=config_args.open_private_ports, ) - if not path_exists(config_dir_path): - os.makedirs(config_dir_path) + os.makedirs(config_dir_path, exist_ok=True) with open(config_path, "w") as config_file: config_file.write(config_str) config_file.write("\n\n# vim:ft=yaml") diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index 01fada8fb5a3..fca239d8c7ec 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -132,8 +132,7 @@ def store_into_file( fname = os.path.join(self.local_media_directory, path) dirname = os.path.dirname(fname) - if not os.path.exists(dirname): - os.makedirs(dirname) + os.makedirs(dirname, exist_ok=True) finished_called = [False] @@ -244,8 +243,7 @@ async def ensure_media_is_in_local_cache(self, file_info: FileInfo) -> str: return legacy_local_path dirname = os.path.dirname(local_path) - if not os.path.exists(dirname): - os.makedirs(dirname) + os.makedirs(dirname, exist_ok=True) for provider in self.storage_providers: res: Any = await provider.fetch(path, file_info) diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py index 289e4297f2e6..da78fcee5e0c 100644 --- a/synapse/rest/media/v1/storage_provider.py +++ b/synapse/rest/media/v1/storage_provider.py @@ -138,8 +138,7 @@ async def store_file(self, path: str, file_info: FileInfo) -> None: backup_fname = os.path.join(self.base_directory, path) dirname = os.path.dirname(backup_fname) - if not os.path.exists(dirname): - os.makedirs(dirname) + os.makedirs(dirname, exist_ok=True) await defer_to_thread( self.hs.get_reactor(), shutil.copyfile, primary_fname, backup_fname From f7768f62cbf7579a1a91e694f83d47d275373369 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 27 Sep 2021 12:55:27 +0100 Subject: [PATCH 005/111] Avoid storing URL cache files in storage providers (#10911) URL cache files are short-lived and it does not make sense to offload them (eg. to the cloud) or back them up. --- changelog.d/10911.bugfix | 1 + docs/upgrade.md | 7 + synapse/rest/media/v1/filepath.py | 11 +- synapse/rest/media/v1/preview_url_resource.py | 1 - synapse/rest/media/v1/storage_provider.py | 10 ++ tests/rest/media/v1/test_url_preview.py | 130 ++++++++++++++++++ 6 files changed, 154 insertions(+), 6 deletions(-) create mode 100644 changelog.d/10911.bugfix diff --git a/changelog.d/10911.bugfix b/changelog.d/10911.bugfix new file mode 100644 index 000000000000..96e36bb15a5a --- /dev/null +++ b/changelog.d/10911.bugfix @@ -0,0 +1 @@ +Avoid storing URL cache files in storage providers. Server admins may safely delete the `url_cache/` and `url_cache_thumbnails/` directories from any configured storage providers to reclaim space. diff --git a/docs/upgrade.md b/docs/upgrade.md index f9b832cb3fce..a8221372df50 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -85,6 +85,13 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.44.0 + +## The URL preview cache is no longer mirrored to storage providers +The `url_cache/` and `url_cache_thumbnails/` directories in the media store are +no longer mirrored to storage providers. These two directories can be safely +deleted from any configured storage providers to reclaim space. + # Upgrading to v1.43.0 ## The spaces summary APIs can now be handled by workers diff --git a/synapse/rest/media/v1/filepath.py b/synapse/rest/media/v1/filepath.py index 39bbe4e8742c..08bd85f66445 100644 --- a/synapse/rest/media/v1/filepath.py +++ b/synapse/rest/media/v1/filepath.py @@ -195,23 +195,24 @@ def url_cache_thumbnail_rel( url_cache_thumbnail = _wrap_in_base_path(url_cache_thumbnail_rel) - def url_cache_thumbnail_directory(self, media_id: str) -> str: + def url_cache_thumbnail_directory_rel(self, media_id: str) -> str: # Media id is of the form # E.g.: 2017-09-28-fsdRDt24DS234dsf if NEW_FORMAT_ID_RE.match(media_id): - return os.path.join( - self.base_path, "url_cache_thumbnails", media_id[:10], media_id[11:] - ) + return os.path.join("url_cache_thumbnails", media_id[:10], media_id[11:]) else: return os.path.join( - self.base_path, "url_cache_thumbnails", media_id[0:2], media_id[2:4], media_id[4:], ) + url_cache_thumbnail_directory = _wrap_in_base_path( + url_cache_thumbnail_directory_rel + ) + def url_cache_thumbnail_dirs_to_delete(self, media_id: str) -> List[str]: "The dirs to try and remove if we delete the media_id thumbnails" # Media id is of the form diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 0b0c4d646978..79a42b24556e 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -485,7 +485,6 @@ def _start_expire_url_cache_data(self) -> Deferred: async def _expire_url_cache_data(self) -> None: """Clean up expired url cache content, media and thumbnails.""" - # TODO: Delete from backup media store assert self._worker_run_media_background_jobs diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py index da78fcee5e0c..18bf977d3d9f 100644 --- a/synapse/rest/media/v1/storage_provider.py +++ b/synapse/rest/media/v1/storage_provider.py @@ -93,6 +93,11 @@ async def store_file(self, path: str, file_info: FileInfo) -> None: if file_info.server_name and not self.store_remote: return None + if file_info.url_cache: + # The URL preview cache is short lived and not worth offloading or + # backing up. + return None + if self.store_synchronous: # store_file is supposed to return an Awaitable, but guard # against improper implementations. @@ -110,6 +115,11 @@ async def store() -> None: run_in_background(store) async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]: + if file_info.url_cache: + # Files in the URL preview cache definitely aren't stored here, + # so avoid any potentially slow I/O or network access. + return None + # store_file is supposed to return an Awaitable, but guard # against improper implementations. return await maybe_awaitable(self.backend.fetch(path, file_info)) diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py index d83dfacfedc0..4d09b5d07ef7 100644 --- a/tests/rest/media/v1/test_url_preview.py +++ b/tests/rest/media/v1/test_url_preview.py @@ -21,6 +21,7 @@ from twisted.test.proto_helpers import AccumulatingProtocol from synapse.config.oembed import OEmbedEndpointConfig +from synapse.util.stringutils import parse_and_validate_mxc_uri from tests import unittest from tests.server import FakeTransport @@ -721,3 +722,132 @@ def test_oembed_format(self): "og:description": "Content Preview", }, ) + + def _download_image(self): + """Downloads an image into the URL cache. + + Returns: + A (host, media_id) tuple representing the MXC URI of the image. + """ + self.lookups["cdn.twitter.com"] = [(IPv4Address, "10.1.2.3")] + + channel = self.make_request( + "GET", + "preview_url?url=http://cdn.twitter.com/matrixdotorg", + shorthand=False, + await_result=False, + ) + self.pump() + + client = self.reactor.tcpClients[0][2].buildProtocol(None) + server = AccumulatingProtocol() + server.makeConnection(FakeTransport(client, self.reactor)) + client.makeConnection(FakeTransport(server, self.reactor)) + client.dataReceived( + b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\nContent-Type: image/png\r\n\r\n" + % (len(SMALL_PNG),) + + SMALL_PNG + ) + + self.pump() + self.assertEqual(channel.code, 200) + body = channel.json_body + mxc_uri = body["og:image"] + host, _port, media_id = parse_and_validate_mxc_uri(mxc_uri) + self.assertIsNone(_port) + return host, media_id + + def test_storage_providers_exclude_files(self): + """Test that files are not stored in or fetched from storage providers.""" + host, media_id = self._download_image() + + rel_file_path = self.preview_url.filepaths.url_cache_filepath_rel(media_id) + media_store_path = os.path.join(self.media_store_path, rel_file_path) + storage_provider_path = os.path.join(self.storage_path, rel_file_path) + + # Check storage + self.assertTrue(os.path.isfile(media_store_path)) + self.assertFalse( + os.path.isfile(storage_provider_path), + "URL cache file was unexpectedly stored in a storage provider", + ) + + # Check fetching + channel = self.make_request( + "GET", + f"download/{host}/{media_id}", + shorthand=False, + await_result=False, + ) + self.pump() + self.assertEqual(channel.code, 200) + + # Move cached file into the storage provider + os.makedirs(os.path.dirname(storage_provider_path), exist_ok=True) + os.rename(media_store_path, storage_provider_path) + + channel = self.make_request( + "GET", + f"download/{host}/{media_id}", + shorthand=False, + await_result=False, + ) + self.pump() + self.assertEqual( + channel.code, + 404, + "URL cache file was unexpectedly retrieved from a storage provider", + ) + + def test_storage_providers_exclude_thumbnails(self): + """Test that thumbnails are not stored in or fetched from storage providers.""" + host, media_id = self._download_image() + + rel_thumbnail_path = ( + self.preview_url.filepaths.url_cache_thumbnail_directory_rel(media_id) + ) + media_store_thumbnail_path = os.path.join( + self.media_store_path, rel_thumbnail_path + ) + storage_provider_thumbnail_path = os.path.join( + self.storage_path, rel_thumbnail_path + ) + + # Check storage + self.assertTrue(os.path.isdir(media_store_thumbnail_path)) + self.assertFalse( + os.path.isdir(storage_provider_thumbnail_path), + "URL cache thumbnails were unexpectedly stored in a storage provider", + ) + + # Check fetching + channel = self.make_request( + "GET", + f"thumbnail/{host}/{media_id}?width=32&height=32&method=scale", + shorthand=False, + await_result=False, + ) + self.pump() + self.assertEqual(channel.code, 200) + + # Remove the original, otherwise thumbnails will regenerate + rel_file_path = self.preview_url.filepaths.url_cache_filepath_rel(media_id) + media_store_path = os.path.join(self.media_store_path, rel_file_path) + os.remove(media_store_path) + + # Move cached thumbnails into the storage provider + os.makedirs(os.path.dirname(storage_provider_thumbnail_path), exist_ok=True) + os.rename(media_store_thumbnail_path, storage_provider_thumbnail_path) + + channel = self.make_request( + "GET", + f"thumbnail/{host}/{media_id}?width=32&height=32&method=scale", + shorthand=False, + await_result=False, + ) + self.pump() + self.assertEqual( + channel.code, + 404, + "URL cache thumbnail was unexpectedly retrieved from a storage provider", + ) From d37841787a9e152938ddb39af5bc1d93d04bc640 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 27 Sep 2021 15:39:49 +0100 Subject: [PATCH 006/111] Sign the git tag in release script (#10925) --- changelog.d/10925.misc | 1 + scripts-dev/release.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10925.misc diff --git a/changelog.d/10925.misc b/changelog.d/10925.misc new file mode 100644 index 000000000000..0c8027ecc29f --- /dev/null +++ b/changelog.d/10925.misc @@ -0,0 +1 @@ +Update release script to sign the newly created git tags. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index a339260c4371..ab2d860ab8bd 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -276,7 +276,7 @@ def tag(gh_token: Optional[str]): if click.confirm("Edit text?", default=False): changes = click.edit(changes, require_save=False) - repo.create_tag(tag_name, message=changes) + repo.create_tag(tag_name, message=changes, sign=True) if not click.confirm("Push tag to GitHub?", default=True): print("") From 707d5e4e48e839dabd34e4b67426fe8382a2c978 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Sep 2021 10:37:58 +0100 Subject: [PATCH 007/111] Encode JSON responses on a thread in C, mk2 (#10905) Currently we use `JsonEncoder.iterencode` to write JSON responses, which ensures that we don't block the main reactor thread when encoding huge objects. The downside to this is that `iterencode` falls back to using a pure Python encoder that is *much* less efficient and can easily burn a lot of CPU for huge responses. To fix this, while still ensuring we don't block the reactor loop, we encode the JSON on a threadpool using the standard `JsonEncoder.encode` functions, which is backed by a C library. Doing so, however, requires `respond_with_json` to have access to the reactor, which it previously didn't. There are two ways of doing this: 1. threading through the reactor object, which is a bit fiddly as e.g. `DirectServeJsonResource` doesn't currently take a reactor, but is exposed to modules and so is a PITA to change; or 2. expose the reactor in `SynapseRequest`, which requires updating a bunch of servlet types. I went with the latter as that is just a mechanical change, and I think makes sense as a request already has a reactor associated with it (via its http channel). --- changelog.d/10905.feature | 1 + synapse/http/server.py | 72 +++++++++++++++++++++++++++++-------- synapse/push/emailpusher.py | 2 +- synapse/util/iterutils.py | 19 ++++++++-- 4 files changed, 76 insertions(+), 18 deletions(-) create mode 100644 changelog.d/10905.feature diff --git a/changelog.d/10905.feature b/changelog.d/10905.feature new file mode 100644 index 000000000000..07e7b2c6a75e --- /dev/null +++ b/changelog.d/10905.feature @@ -0,0 +1 @@ +Speed up responding with large JSON objects to requests. diff --git a/synapse/http/server.py b/synapse/http/server.py index e28b56abb945..1a50305dcfdc 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -21,7 +21,6 @@ import urllib from http import HTTPStatus from inspect import isawaitable -from io import BytesIO from typing import ( Any, Awaitable, @@ -37,7 +36,7 @@ ) import jinja2 -from canonicaljson import iterencode_canonical_json +from canonicaljson import encode_canonical_json from typing_extensions import Protocol from zope.interface import implementer @@ -45,7 +44,7 @@ from twisted.python import failure from twisted.web import resource from twisted.web.server import NOT_DONE_YET, Request -from twisted.web.static import File, NoRangeStaticProducer +from twisted.web.static import File from twisted.web.util import redirectTo from synapse.api.errors import ( @@ -56,10 +55,11 @@ UnrecognizedRequestError, ) from synapse.http.site import SynapseRequest -from synapse.logging.context import preserve_fn +from synapse.logging.context import defer_to_thread, preserve_fn, run_in_background from synapse.logging.opentracing import trace_servlet from synapse.util import json_encoder from synapse.util.caches import intern_dict +from synapse.util.iterutils import chunk_seq logger = logging.getLogger(__name__) @@ -620,12 +620,11 @@ def stopProducing(self) -> None: self._request = None -def _encode_json_bytes(json_object: Any) -> Iterator[bytes]: +def _encode_json_bytes(json_object: Any) -> bytes: """ Encode an object into JSON. Returns an iterator of bytes. """ - for chunk in json_encoder.iterencode(json_object): - yield chunk.encode("utf-8") + return json_encoder.encode(json_object).encode("utf-8") def respond_with_json( @@ -659,7 +658,7 @@ def respond_with_json( return None if canonical_json: - encoder = iterencode_canonical_json + encoder = encode_canonical_json else: encoder = _encode_json_bytes @@ -670,7 +669,9 @@ def respond_with_json( if send_cors: set_cors_headers(request) - _ByteProducer(request, encoder(json_object)) + run_in_background( + _async_write_json_to_request_in_thread, request, encoder, json_object + ) return NOT_DONE_YET @@ -706,15 +707,56 @@ def respond_with_json_bytes( if send_cors: set_cors_headers(request) - # note that this is zero-copy (the bytesio shares a copy-on-write buffer with - # the original `bytes`). - bytes_io = BytesIO(json_bytes) - - producer = NoRangeStaticProducer(request, bytes_io) - producer.start() + _write_bytes_to_request(request, json_bytes) return NOT_DONE_YET +async def _async_write_json_to_request_in_thread( + request: SynapseRequest, + json_encoder: Callable[[Any], bytes], + json_object: Any, +): + """Encodes the given JSON object on a thread and then writes it to the + request. + + This is done so that encoding large JSON objects doesn't block the reactor + thread. + + Note: We don't use JsonEncoder.iterencode here as that falls back to the + Python implementation (rather than the C backend), which is *much* more + expensive. + """ + + json_str = await defer_to_thread(request.reactor, json_encoder, json_object) + + _write_bytes_to_request(request, json_str) + + +def _write_bytes_to_request(request: Request, bytes_to_write: bytes) -> None: + """Writes the bytes to the request using an appropriate producer. + + Note: This should be used instead of `Request.write` to correctly handle + large response bodies. + """ + + # The problem with dumping all of the response into the `Request` object at + # once (via `Request.write`) is that doing so starts the timeout for the + # next request to be received: so if it takes longer than 60s to stream back + # the response to the client, the client never gets it. + # + # The correct solution is to use a Producer; then the timeout is only + # started once all of the content is sent over the TCP connection. + + # To make sure we don't write all of the bytes at once we split it up into + # chunks. + chunk_size = 4096 + bytes_generator = chunk_seq(bytes_to_write, chunk_size) + + # We use a `_ByteProducer` here rather than `NoRangeStaticProducer` as the + # unit tests can't cope with being given a pull producer. + _ByteProducer(request, bytes_generator) + + def set_cors_headers(request: Request): """Set the CORS headers so that javascript running in a web browsers can use this API diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index e08e125cb8a5..cf5abdfbda49 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -184,7 +184,7 @@ async def _unsafe_process(self) -> None: should_notify_at = max(notif_ready_at, room_ready_at) - if should_notify_at < self.clock.time_msec(): + if should_notify_at <= self.clock.time_msec(): # one of our notifications is ready for sending, so we send # *one* email updating the user on their notifications, # we then consider all previously outstanding notifications diff --git a/synapse/util/iterutils.py b/synapse/util/iterutils.py index 8ac3eab2f54f..4938ddf70321 100644 --- a/synapse/util/iterutils.py +++ b/synapse/util/iterutils.py @@ -21,13 +21,28 @@ Iterable, Iterator, Mapping, - Sequence, Set, + Sized, Tuple, TypeVar, ) +from typing_extensions import Protocol + T = TypeVar("T") +S = TypeVar("S", bound="_SelfSlice") + + +class _SelfSlice(Sized, Protocol): + """A helper protocol that matches types where taking a slice results in the + same type being returned. + + This is more specific than `Sequence`, which allows another `Sequence` to be + returned. + """ + + def __getitem__(self: S, i: slice) -> S: + ... def batch_iter(iterable: Iterable[T], size: int) -> Iterator[Tuple[T, ...]]: @@ -46,7 +61,7 @@ def batch_iter(iterable: Iterable[T], size: int) -> Iterator[Tuple[T, ...]]: return iter(lambda: tuple(islice(sourceiter, size)), ()) -def chunk_seq(iseq: Sequence[T], maxlen: int) -> Iterable[Sequence[T]]: +def chunk_seq(iseq: S, maxlen: int) -> Iterator[S]: """Split the given sequence into chunks of the given size The last chunk may be shorter than the given size. From a8bbf085761095c49b04af1a08fc67b1a781617d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Sep 2021 12:13:51 +0100 Subject: [PATCH 008/111] Fix debian package builds. (#10931) This was due to dh-virtualenv builds being broken due to Shpinx removing deprecated APIs. --- changelog.d/10931.bugfix | 1 + docker/Dockerfile-dhvirtualenv | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelog.d/10931.bugfix diff --git a/changelog.d/10931.bugfix b/changelog.d/10931.bugfix new file mode 100644 index 000000000000..3f30c9ccf158 --- /dev/null +++ b/changelog.d/10931.bugfix @@ -0,0 +1 @@ +Fix debian builds due to dh-virtualenv no longer being able to build their docs. diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv index 017be8555ea9..1dd88140c7a4 100644 --- a/docker/Dockerfile-dhvirtualenv +++ b/docker/Dockerfile-dhvirtualenv @@ -47,8 +47,9 @@ RUN apt-get update -qq -o Acquire::Languages=none \ && cd /dh-virtualenv \ && env DEBIAN_FRONTEND=noninteractive mk-build-deps -ri -t "apt-get -y --no-install-recommends" -# build it -RUN cd /dh-virtualenv && dpkg-buildpackage -us -uc -b +# Build it. Note that building the docs doesn't work due to differences in +# Sphinx APIs across versions/distros. +RUN cd /dh-virtualenv && DEB_BUILD_OPTIONS=nodoc dpkg-buildpackage -us -uc -b ### ### Stage 1 From 3c50192d3f564ecc2e70441157f309610bbee1cd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Sep 2021 13:42:21 +0100 Subject: [PATCH 009/111] 1.44.0rc1 --- CHANGES.md | 72 +++++++++++++++++++++++++++++++++++++++ changelog.d/10659.misc | 1 - changelog.d/10690.bugfix | 1 - changelog.d/10776.feature | 1 - changelog.d/10777.misc | 1 - changelog.d/10782.bugfix | 1 - changelog.d/10785.misc | 1 - changelog.d/10796.misc | 1 - changelog.d/10807.bugfix | 1 - changelog.d/10810.bugfix | 1 - changelog.d/10812.misc | 1 - changelog.d/10814.feature | 1 - changelog.d/10815.misc | 1 - changelog.d/10816.misc | 1 - changelog.d/10817.misc | 1 - changelog.d/10819.feature | 1 - changelog.d/10820.misc | 1 - changelog.d/10823.misc | 1 - changelog.d/10826.misc | 2 -- changelog.d/10827.bugfix | 1 - changelog.d/10829.misc | 1 - changelog.d/10831.misc | 1 - changelog.d/10833.misc | 1 - changelog.d/10834.misc | 1 - changelog.d/10835.misc | 1 - changelog.d/10838.misc | 1 - changelog.d/10839.misc | 1 - changelog.d/10843.bugfix | 1 - changelog.d/10845.doc | 1 - changelog.d/10856.misc | 1 - changelog.d/10859.bugfix | 1 - changelog.d/10865.doc | 1 - changelog.d/10867.misc | 1 - changelog.d/10868.feature | 1 - changelog.d/10869.doc | 1 - changelog.d/10873.bugfix | 1 - changelog.d/10875.bugfix | 1 - changelog.d/10879.misc | 1 - changelog.d/10880.misc | 1 - changelog.d/10881.bugfix | 1 - changelog.d/10883.misc | 1 - changelog.d/10884.misc | 1 - changelog.d/10885.misc | 1 - changelog.d/10887.bugfix | 1 - changelog.d/10889.misc | 1 - changelog.d/10891.misc | 1 - changelog.d/10893.misc | 1 - changelog.d/10896.misc | 1 - changelog.d/10897.misc | 1 - changelog.d/10898.feature | 1 - changelog.d/10901.misc | 1 - changelog.d/10905.feature | 1 - changelog.d/10906.misc | 1 - changelog.d/10907.bugfix | 1 - changelog.d/10911.bugfix | 1 - changelog.d/10913.bugfix | 1 - changelog.d/10917.misc | 1 - changelog.d/10925.misc | 1 - changelog.d/10931.bugfix | 1 - debian/changelog | 6 ++++ synapse/__init__.py | 2 +- 61 files changed, 79 insertions(+), 60 deletions(-) delete mode 100644 changelog.d/10659.misc delete mode 100644 changelog.d/10690.bugfix delete mode 100644 changelog.d/10776.feature delete mode 100644 changelog.d/10777.misc delete mode 100644 changelog.d/10782.bugfix delete mode 100644 changelog.d/10785.misc delete mode 100644 changelog.d/10796.misc delete mode 100644 changelog.d/10807.bugfix delete mode 100644 changelog.d/10810.bugfix delete mode 100644 changelog.d/10812.misc delete mode 100644 changelog.d/10814.feature delete mode 100644 changelog.d/10815.misc delete mode 100644 changelog.d/10816.misc delete mode 100644 changelog.d/10817.misc delete mode 100644 changelog.d/10819.feature delete mode 100644 changelog.d/10820.misc delete mode 100644 changelog.d/10823.misc delete mode 100644 changelog.d/10826.misc delete mode 100644 changelog.d/10827.bugfix delete mode 100644 changelog.d/10829.misc delete mode 100644 changelog.d/10831.misc delete mode 100644 changelog.d/10833.misc delete mode 100644 changelog.d/10834.misc delete mode 100644 changelog.d/10835.misc delete mode 100644 changelog.d/10838.misc delete mode 100644 changelog.d/10839.misc delete mode 100644 changelog.d/10843.bugfix delete mode 100644 changelog.d/10845.doc delete mode 100644 changelog.d/10856.misc delete mode 100644 changelog.d/10859.bugfix delete mode 100644 changelog.d/10865.doc delete mode 100644 changelog.d/10867.misc delete mode 100644 changelog.d/10868.feature delete mode 100644 changelog.d/10869.doc delete mode 100644 changelog.d/10873.bugfix delete mode 100644 changelog.d/10875.bugfix delete mode 100644 changelog.d/10879.misc delete mode 100644 changelog.d/10880.misc delete mode 100644 changelog.d/10881.bugfix delete mode 100644 changelog.d/10883.misc delete mode 100644 changelog.d/10884.misc delete mode 100644 changelog.d/10885.misc delete mode 100644 changelog.d/10887.bugfix delete mode 100644 changelog.d/10889.misc delete mode 100644 changelog.d/10891.misc delete mode 100644 changelog.d/10893.misc delete mode 100644 changelog.d/10896.misc delete mode 100644 changelog.d/10897.misc delete mode 100644 changelog.d/10898.feature delete mode 100644 changelog.d/10901.misc delete mode 100644 changelog.d/10905.feature delete mode 100644 changelog.d/10906.misc delete mode 100644 changelog.d/10907.bugfix delete mode 100644 changelog.d/10911.bugfix delete mode 100644 changelog.d/10913.bugfix delete mode 100644 changelog.d/10917.misc delete mode 100644 changelog.d/10925.misc delete mode 100644 changelog.d/10931.bugfix diff --git a/CHANGES.md b/CHANGES.md index 652f4b7955c1..da4d98ac2b25 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,75 @@ +Synapse 1.44.0rc1 (2021-09-28) +============================== + +Features +-------- + +- Only allow the [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send?chunk_id=xxx` endpoint to connect to an already existing insertion event. ([\#10776](https://github.com/matrix-org/synapse/issues/10776)) +- Improve oEmbed previews by processing the author name, photo, and video information. ([\#10814](https://github.com/matrix-org/synapse/issues/10814), [\#10819](https://github.com/matrix-org/synapse/issues/10819)) +- Speed up responding with large JSON objects to requests. ([\#10868](https://github.com/matrix-org/synapse/issues/10868), [\#10905](https://github.com/matrix-org/synapse/issues/10905)) +- Add a `user_may_create_room_with_invites` spam checker callback to allow modules to allow or deny a room creation request based on the invites and/or 3PID invites it includes. ([\#10898](https://github.com/matrix-org/synapse/issues/10898)) + + +Bugfixes +-------- + +- Fix a long-standing bug that caused an `AssertionError` when purging history in certain rooms. Contributed by @Kokokokoka. ([\#10690](https://github.com/matrix-org/synapse/issues/10690)) +- Fix a long-standing bug which caused deactivated users that were later reactivated to be missing from the user directory. ([\#10782](https://github.com/matrix-org/synapse/issues/10782)) +- Allow sending a membership event to unban a user. Contributed by @aaronraimist. ([\#10807](https://github.com/matrix-org/synapse/issues/10807)) +- Fix a case where logging contexts would go missing when federation requests time out. ([\#10810](https://github.com/matrix-org/synapse/issues/10810)) +- Fix error in deprecated `/initialSync` endpoint when using the undocumented `from` and `to` parameters. ([\#10827](https://github.com/matrix-org/synapse/issues/10827)) +- Fix a bug causing the `remove_stale_pushers` background job to repeatedly fail and log errors. This bug affected Synapse servers that had been upgraded from version 1.28 or older and are using SQLite. ([\#10843](https://github.com/matrix-org/synapse/issues/10843)) +- Fix a bug in Unicode support of the room search admin API. It is now possible to search for rooms with non-ASCII characters. ([\#10859](https://github.com/matrix-org/synapse/issues/10859)) +- Fix a bug introduced in Synapse 1.37.0 which caused `knock` events which we sent to remote servers to be incorrectly stored in the local database. ([\#10873](https://github.com/matrix-org/synapse/issues/10873)) +- Fix invalidating one-time key count cache after claiming keys. Contributed by Tulir at Beeper. ([\#10875](https://github.com/matrix-org/synapse/issues/10875)) +- Fix application service users being subject to MAU blocking if MAU had been reached, even if configured not to be blocked. ([\#10881](https://github.com/matrix-org/synapse/issues/10881)) +- Allow the `.` and `~` characters when creating registration tokens as per the change to [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231). ([\#10887](https://github.com/matrix-org/synapse/issues/10887)) +- Fix a long-standing bug which could cause events pulled over federation to be incorrectly rejected. ([\#10907](https://github.com/matrix-org/synapse/issues/10907)) +- Avoid storing URL cache files in storage providers. Server admins may safely delete the `url_cache/` and `url_cache_thumbnails/` directories from any configured storage providers to reclaim space. ([\#10911](https://github.com/matrix-org/synapse/issues/10911)) +- Fix race conditions when creating media store and config directories. ([\#10913](https://github.com/matrix-org/synapse/issues/10913)) +- Fix debian builds due to dh-virtualenv no longer being able to build their docs. ([\#10931](https://github.com/matrix-org/synapse/issues/10931)) + + +Improved Documentation +---------------------- + +- Fix some crashes in the Module API example code, by adding JSON encoding/decoding. ([\#10845](https://github.com/matrix-org/synapse/issues/10845)) +- Add developer documentation about experimental configuration flags. ([\#10865](https://github.com/matrix-org/synapse/issues/10865)) +- Properly remove deleted files from GitHub pages when generating the documentation. ([\#10869](https://github.com/matrix-org/synapse/issues/10869)) + + +Internal Changes +---------------- + +- Fix GitHub Actions config so we can run sytest on synapse from parallel branches. ([\#10659](https://github.com/matrix-org/synapse/issues/10659)) +- Split out [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) meta events to their own fields in the `/batch_send` response. ([\#10777](https://github.com/matrix-org/synapse/issues/10777)) +- Add missing type hints to REST servlets. ([\#10785](https://github.com/matrix-org/synapse/issues/10785), [\#10817](https://github.com/matrix-org/synapse/issues/10817)) +- Simplify the internal logic which maintains the user directory database tables. ([\#10796](https://github.com/matrix-org/synapse/issues/10796)) +- Use direct references to config flags. ([\#10812](https://github.com/matrix-org/synapse/issues/10812), [\#10885](https://github.com/matrix-org/synapse/issues/10885), [\#10893](https://github.com/matrix-org/synapse/issues/10893), [\#10897](https://github.com/matrix-org/synapse/issues/10897)) +- Specify the type of token in generic "Invalid token" error messages. ([\#10815](https://github.com/matrix-org/synapse/issues/10815)) +- Make `StateFilter` frozen so it is hashable. ([\#10816](https://github.com/matrix-org/synapse/issues/10816)) +- Fix a long-standing bug where an `m.room.message` event containing a null byte would cause an internal server error. ([\#10820](https://github.com/matrix-org/synapse/issues/10820)) +- Add type hints to the state database. ([\#10823](https://github.com/matrix-org/synapse/issues/10823)) +- Opt out of cache expiry for `get_users_who_share_room_with_user`, to hopefully improve `/sync` performance when you + haven't synced recently. ([\#10826](https://github.com/matrix-org/synapse/issues/10826)) +- Track cache eviction rates more finely in Prometheus' monitoring. ([\#10829](https://github.com/matrix-org/synapse/issues/10829)) +- Add missing type hints to handlers. ([\#10831](https://github.com/matrix-org/synapse/issues/10831), [\#10856](https://github.com/matrix-org/synapse/issues/10856)) +- Extend the ModuleApi to let plug-ins check whether an ID is local and to access IP + User Agent data. ([\#10833](https://github.com/matrix-org/synapse/issues/10833)) +- Factor out PNG image data to a constant to be used in several tests. ([\#10834](https://github.com/matrix-org/synapse/issues/10834)) +- Add a test to ensure state events sent by modules get persisted correctly. ([\#10835](https://github.com/matrix-org/synapse/issues/10835)) +- Rename [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) fields and event types from `chunk` to `batch` to match the `/batch_send` endpoint. ([\#10838](https://github.com/matrix-org/synapse/issues/10838)) +- Rename [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` query parameter from `?prev_event` to more obvious usage with `?prev_event_id`. ([\#10839](https://github.com/matrix-org/synapse/issues/10839)) +- Add type hints to `synapse.http.site`. ([\#10867](https://github.com/matrix-org/synapse/issues/10867)) +- Include outlier status when we log V2 or V3 events. ([\#10879](https://github.com/matrix-org/synapse/issues/10879)) +- Break down Grafana's cache expiry time series based on reason for eviction---see #10829. ([\#10880](https://github.com/matrix-org/synapse/issues/10880)) +- Clean up some of the federation event authentication code for clarity. ([\#10883](https://github.com/matrix-org/synapse/issues/10883), [\#10884](https://github.com/matrix-org/synapse/issues/10884), [\#10896](https://github.com/matrix-org/synapse/issues/10896), [\#10901](https://github.com/matrix-org/synapse/issues/10901)) +- Clean up some unnecessary parentheses in places around the codebase. ([\#10889](https://github.com/matrix-org/synapse/issues/10889)) +- Improve type hinting in the user directory code. ([\#10891](https://github.com/matrix-org/synapse/issues/10891)) +- Update development testing script `test_postgresql.sh` to use a supported Python version and make re-runs quicker. ([\#10906](https://github.com/matrix-org/synapse/issues/10906)) +- Document and summarize changes in schema version `61` - `64`. ([\#10917](https://github.com/matrix-org/synapse/issues/10917)) +- Update release script to sign the newly created git tags. ([\#10925](https://github.com/matrix-org/synapse/issues/10925)) + + Synapse 1.43.0 (2021-09-21) =========================== diff --git a/changelog.d/10659.misc b/changelog.d/10659.misc deleted file mode 100644 index d677a521c3f5..000000000000 --- a/changelog.d/10659.misc +++ /dev/null @@ -1 +0,0 @@ -Fix GitHub Actions config so we can run sytest on synapse from parallel branches. \ No newline at end of file diff --git a/changelog.d/10690.bugfix b/changelog.d/10690.bugfix deleted file mode 100644 index 059eea7464eb..000000000000 --- a/changelog.d/10690.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug that caused an `AssertionError` when purging history in certain rooms. Contributed by @Kokokokoka. diff --git a/changelog.d/10776.feature b/changelog.d/10776.feature deleted file mode 100644 index aec0685a3d96..000000000000 --- a/changelog.d/10776.feature +++ /dev/null @@ -1 +0,0 @@ -Only allow the [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send?chunk_id=xxx` endpoint to connect to an already existing insertion event. diff --git a/changelog.d/10777.misc b/changelog.d/10777.misc deleted file mode 100644 index aed78a16f550..000000000000 --- a/changelog.d/10777.misc +++ /dev/null @@ -1 +0,0 @@ -Split out [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) meta events to their own fields in the `/batch_send` response. diff --git a/changelog.d/10782.bugfix b/changelog.d/10782.bugfix deleted file mode 100644 index 3e410447cc15..000000000000 --- a/changelog.d/10782.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug which caused deactivated users that were later reactivated to be missing from the user directory. \ No newline at end of file diff --git a/changelog.d/10785.misc b/changelog.d/10785.misc deleted file mode 100644 index 39a37b90b1b3..000000000000 --- a/changelog.d/10785.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to REST servlets. diff --git a/changelog.d/10796.misc b/changelog.d/10796.misc deleted file mode 100644 index 1873b2386aac..000000000000 --- a/changelog.d/10796.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify the internal logic which maintains the user directory database tables. \ No newline at end of file diff --git a/changelog.d/10807.bugfix b/changelog.d/10807.bugfix deleted file mode 100644 index be03f5c738a0..000000000000 --- a/changelog.d/10807.bugfix +++ /dev/null @@ -1 +0,0 @@ -Allow sending a membership event to unban a user. Contributed by @aaronraimist. \ No newline at end of file diff --git a/changelog.d/10810.bugfix b/changelog.d/10810.bugfix deleted file mode 100644 index 43e91f1f51f8..000000000000 --- a/changelog.d/10810.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a case where logging contexts would go missing when federation requests time out. diff --git a/changelog.d/10812.misc b/changelog.d/10812.misc deleted file mode 100644 index 586a0b3a9670..000000000000 --- a/changelog.d/10812.misc +++ /dev/null @@ -1 +0,0 @@ -Use direct references to config flags. diff --git a/changelog.d/10814.feature b/changelog.d/10814.feature deleted file mode 100644 index 4fa95a6cc968..000000000000 --- a/changelog.d/10814.feature +++ /dev/null @@ -1 +0,0 @@ -Improve oEmbed previews by processing the author name, photo, and video information. diff --git a/changelog.d/10815.misc b/changelog.d/10815.misc deleted file mode 100644 index fc2534dc14be..000000000000 --- a/changelog.d/10815.misc +++ /dev/null @@ -1 +0,0 @@ -Specify the type of token in generic "Invalid token" error messages. \ No newline at end of file diff --git a/changelog.d/10816.misc b/changelog.d/10816.misc deleted file mode 100644 index 2ca55b334a3c..000000000000 --- a/changelog.d/10816.misc +++ /dev/null @@ -1 +0,0 @@ -Make `StateFilter` frozen so it is hashable. diff --git a/changelog.d/10817.misc b/changelog.d/10817.misc deleted file mode 100644 index 39a37b90b1b3..000000000000 --- a/changelog.d/10817.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to REST servlets. diff --git a/changelog.d/10819.feature b/changelog.d/10819.feature deleted file mode 100644 index 4fa95a6cc968..000000000000 --- a/changelog.d/10819.feature +++ /dev/null @@ -1 +0,0 @@ -Improve oEmbed previews by processing the author name, photo, and video information. diff --git a/changelog.d/10820.misc b/changelog.d/10820.misc deleted file mode 100644 index 4373bf6f6b6c..000000000000 --- a/changelog.d/10820.misc +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where an `m.room.message` event containing a null byte would cause an internal server error. \ No newline at end of file diff --git a/changelog.d/10823.misc b/changelog.d/10823.misc deleted file mode 100644 index 0532969900f8..000000000000 --- a/changelog.d/10823.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to the state database. diff --git a/changelog.d/10826.misc b/changelog.d/10826.misc deleted file mode 100644 index 53e56fc362f3..000000000000 --- a/changelog.d/10826.misc +++ /dev/null @@ -1,2 +0,0 @@ -Opt out of cache expiry for `get_users_who_share_room_with_user`, to hopefully improve `/sync` performance when you -haven't synced recently. diff --git a/changelog.d/10827.bugfix b/changelog.d/10827.bugfix deleted file mode 100644 index 11a618bf8293..000000000000 --- a/changelog.d/10827.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix error in deprecated `/initialSync` endpoint when using the undocumented `from` and `to` parameters. diff --git a/changelog.d/10829.misc b/changelog.d/10829.misc deleted file mode 100644 index ac5fd6b047e5..000000000000 --- a/changelog.d/10829.misc +++ /dev/null @@ -1 +0,0 @@ -Track cache eviction rates more finely in Prometheus' monitoring. \ No newline at end of file diff --git a/changelog.d/10831.misc b/changelog.d/10831.misc deleted file mode 100644 index f09af2e00a3b..000000000000 --- a/changelog.d/10831.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to handlers. diff --git a/changelog.d/10833.misc b/changelog.d/10833.misc deleted file mode 100644 index f23c0a1a023a..000000000000 --- a/changelog.d/10833.misc +++ /dev/null @@ -1 +0,0 @@ -Extend the ModuleApi to let plug-ins check whether an ID is local and to access IP + User Agent data. diff --git a/changelog.d/10834.misc b/changelog.d/10834.misc deleted file mode 100644 index 037695e6e9c8..000000000000 --- a/changelog.d/10834.misc +++ /dev/null @@ -1 +0,0 @@ -Factor out PNG image data to a constant to be used in several tests. diff --git a/changelog.d/10835.misc b/changelog.d/10835.misc deleted file mode 100644 index 0c3d13477e96..000000000000 --- a/changelog.d/10835.misc +++ /dev/null @@ -1 +0,0 @@ -Add a test to ensure state events sent by modules get persisted correctly. diff --git a/changelog.d/10838.misc b/changelog.d/10838.misc deleted file mode 100644 index b1977d0a2ed7..000000000000 --- a/changelog.d/10838.misc +++ /dev/null @@ -1 +0,0 @@ -Rename [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) fields and event types from `chunk` to `batch` to match the `/batch_send` endpoint. diff --git a/changelog.d/10839.misc b/changelog.d/10839.misc deleted file mode 100644 index d0e10f31d52a..000000000000 --- a/changelog.d/10839.misc +++ /dev/null @@ -1 +0,0 @@ -Rename [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` query parameter from `?prev_event` to more obvious usage with `?prev_event_id`. diff --git a/changelog.d/10843.bugfix b/changelog.d/10843.bugfix deleted file mode 100644 index 5027a1dbefa8..000000000000 --- a/changelog.d/10843.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug causing the `remove_stale_pushers` background job to repeatedly fail and log errors. This bug affected Synapse servers that had been upgraded from version 1.28 or older and are using SQLite. diff --git a/changelog.d/10845.doc b/changelog.d/10845.doc deleted file mode 100644 index a13c845ae648..000000000000 --- a/changelog.d/10845.doc +++ /dev/null @@ -1 +0,0 @@ -Fix some crashes in the Module API example code, by adding JSON encoding/decoding. diff --git a/changelog.d/10856.misc b/changelog.d/10856.misc deleted file mode 100644 index f09af2e00a3b..000000000000 --- a/changelog.d/10856.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to handlers. diff --git a/changelog.d/10859.bugfix b/changelog.d/10859.bugfix deleted file mode 100644 index c1bfe22d5405..000000000000 --- a/changelog.d/10859.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug in Unicode support of the room search admin API. It is now possible to search for rooms with non-ASCII characters. \ No newline at end of file diff --git a/changelog.d/10865.doc b/changelog.d/10865.doc deleted file mode 100644 index deeb0eedf363..000000000000 --- a/changelog.d/10865.doc +++ /dev/null @@ -1 +0,0 @@ -Add developer documentation about experimental configuration flags. diff --git a/changelog.d/10867.misc b/changelog.d/10867.misc deleted file mode 100644 index 01e51fbc6eb9..000000000000 --- a/changelog.d/10867.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to `synapse.http.site`. diff --git a/changelog.d/10868.feature b/changelog.d/10868.feature deleted file mode 100644 index 07e7b2c6a75e..000000000000 --- a/changelog.d/10868.feature +++ /dev/null @@ -1 +0,0 @@ -Speed up responding with large JSON objects to requests. diff --git a/changelog.d/10869.doc b/changelog.d/10869.doc deleted file mode 100644 index c117386072c3..000000000000 --- a/changelog.d/10869.doc +++ /dev/null @@ -1 +0,0 @@ -Properly remove deleted files from GitHub pages when generating the documentation. diff --git a/changelog.d/10873.bugfix b/changelog.d/10873.bugfix deleted file mode 100644 index 32b2e50fd915..000000000000 --- a/changelog.d/10873.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.37.0 which caused `knock` events which we sent to remote servers to be incorrectly stored in the local database. diff --git a/changelog.d/10875.bugfix b/changelog.d/10875.bugfix deleted file mode 100644 index 6f370da5c7c9..000000000000 --- a/changelog.d/10875.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix invalidating one-time key count cache after claiming keys. Contributed by Tulir at Beeper. diff --git a/changelog.d/10879.misc b/changelog.d/10879.misc deleted file mode 100644 index acc04930fa51..000000000000 --- a/changelog.d/10879.misc +++ /dev/null @@ -1 +0,0 @@ -Include outlier status when we log V2 or V3 events. diff --git a/changelog.d/10880.misc b/changelog.d/10880.misc deleted file mode 100644 index 5f58d6198c68..000000000000 --- a/changelog.d/10880.misc +++ /dev/null @@ -1 +0,0 @@ -Break down Grafana's cache expiry time series based on reason for eviction---see #10829. \ No newline at end of file diff --git a/changelog.d/10881.bugfix b/changelog.d/10881.bugfix deleted file mode 100644 index 0a8905cc46ed..000000000000 --- a/changelog.d/10881.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix application service users being subject to MAU blocking if MAU had been reached, even if configured not to be blocked. diff --git a/changelog.d/10883.misc b/changelog.d/10883.misc deleted file mode 100644 index 9a765435dbe4..000000000000 --- a/changelog.d/10883.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up some of the federation event authentication code for clarity. diff --git a/changelog.d/10884.misc b/changelog.d/10884.misc deleted file mode 100644 index 9a765435dbe4..000000000000 --- a/changelog.d/10884.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up some of the federation event authentication code for clarity. diff --git a/changelog.d/10885.misc b/changelog.d/10885.misc deleted file mode 100644 index 586a0b3a9670..000000000000 --- a/changelog.d/10885.misc +++ /dev/null @@ -1 +0,0 @@ -Use direct references to config flags. diff --git a/changelog.d/10887.bugfix b/changelog.d/10887.bugfix deleted file mode 100644 index 2d1f67489a89..000000000000 --- a/changelog.d/10887.bugfix +++ /dev/null @@ -1 +0,0 @@ -Allow the `.` and `~` characters when creating registration tokens as per the change to [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231). diff --git a/changelog.d/10889.misc b/changelog.d/10889.misc deleted file mode 100644 index 6d60188f55a6..000000000000 --- a/changelog.d/10889.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up some unnecessary parentheses in places around the codebase. \ No newline at end of file diff --git a/changelog.d/10891.misc b/changelog.d/10891.misc deleted file mode 100644 index 6eecea4065e9..000000000000 --- a/changelog.d/10891.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hinting in the user directory code. \ No newline at end of file diff --git a/changelog.d/10893.misc b/changelog.d/10893.misc deleted file mode 100644 index 586a0b3a9670..000000000000 --- a/changelog.d/10893.misc +++ /dev/null @@ -1 +0,0 @@ -Use direct references to config flags. diff --git a/changelog.d/10896.misc b/changelog.d/10896.misc deleted file mode 100644 index 41de99584239..000000000000 --- a/changelog.d/10896.misc +++ /dev/null @@ -1 +0,0 @@ - Clean up some of the federation event authentication code for clarity. diff --git a/changelog.d/10897.misc b/changelog.d/10897.misc deleted file mode 100644 index 586a0b3a9670..000000000000 --- a/changelog.d/10897.misc +++ /dev/null @@ -1 +0,0 @@ -Use direct references to config flags. diff --git a/changelog.d/10898.feature b/changelog.d/10898.feature deleted file mode 100644 index 97fa39fd0c2b..000000000000 --- a/changelog.d/10898.feature +++ /dev/null @@ -1 +0,0 @@ -Add a `user_may_create_room_with_invites` spam checker callback to allow modules to allow or deny a room creation request based on the invites and/or 3PID invites it includes. diff --git a/changelog.d/10901.misc b/changelog.d/10901.misc deleted file mode 100644 index 9a765435dbe4..000000000000 --- a/changelog.d/10901.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up some of the federation event authentication code for clarity. diff --git a/changelog.d/10905.feature b/changelog.d/10905.feature deleted file mode 100644 index 07e7b2c6a75e..000000000000 --- a/changelog.d/10905.feature +++ /dev/null @@ -1 +0,0 @@ -Speed up responding with large JSON objects to requests. diff --git a/changelog.d/10906.misc b/changelog.d/10906.misc deleted file mode 100644 index 20a1cbfbd0b7..000000000000 --- a/changelog.d/10906.misc +++ /dev/null @@ -1 +0,0 @@ -Update development testing script `test_postgresql.sh` to use a supported Python version and make re-runs quicker. \ No newline at end of file diff --git a/changelog.d/10907.bugfix b/changelog.d/10907.bugfix deleted file mode 100644 index 601b341f9fa6..000000000000 --- a/changelog.d/10907.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug which could cause events pulled over federation to be incorrectly rejected. diff --git a/changelog.d/10911.bugfix b/changelog.d/10911.bugfix deleted file mode 100644 index 96e36bb15a5a..000000000000 --- a/changelog.d/10911.bugfix +++ /dev/null @@ -1 +0,0 @@ -Avoid storing URL cache files in storage providers. Server admins may safely delete the `url_cache/` and `url_cache_thumbnails/` directories from any configured storage providers to reclaim space. diff --git a/changelog.d/10913.bugfix b/changelog.d/10913.bugfix deleted file mode 100644 index a0015c82413c..000000000000 --- a/changelog.d/10913.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix race conditions when creating media store and config directories. diff --git a/changelog.d/10917.misc b/changelog.d/10917.misc deleted file mode 100644 index 9ce6eef94bf1..000000000000 --- a/changelog.d/10917.misc +++ /dev/null @@ -1 +0,0 @@ -Document and summarize changes in schema version `61` - `64`. diff --git a/changelog.d/10925.misc b/changelog.d/10925.misc deleted file mode 100644 index 0c8027ecc29f..000000000000 --- a/changelog.d/10925.misc +++ /dev/null @@ -1 +0,0 @@ -Update release script to sign the newly created git tags. diff --git a/changelog.d/10931.bugfix b/changelog.d/10931.bugfix deleted file mode 100644 index 3f30c9ccf158..000000000000 --- a/changelog.d/10931.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix debian builds due to dh-virtualenv no longer being able to build their docs. diff --git a/debian/changelog b/debian/changelog index 4b07d0412875..191bb97c5ed2 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.44.0~rc1) stable; urgency=medium + + * New synapse release 1.44.0~rc1. + + -- Synapse Packaging team Tue, 28 Sep 2021 13:41:28 +0100 + matrix-synapse-py3 (1.43.0) stable; urgency=medium * New synapse release 1.43.0. diff --git a/synapse/__init__.py b/synapse/__init__.py index 5f5cff1dfdcd..a1fec8ad2bec 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.43.0" +__version__ = "1.44.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From c3ccad7785cd71372673136f329d5fa098ab9f04 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 28 Sep 2021 08:44:19 -0400 Subject: [PATCH 010/111] Only do restricted join rules signature checks for room versions 8/9. (#10927) Otherwise the presence of a (bogus, unused) field could cause auth checks to fail. --- changelog.d/10927.bugfix | 1 + synapse/event_auth.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10927.bugfix diff --git a/changelog.d/10927.bugfix b/changelog.d/10927.bugfix new file mode 100644 index 000000000000..fd24288c5499 --- /dev/null +++ b/changelog.d/10927.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse v1.40.0 where the signature checks for room version 8/9 could be applied to earlier room versions in some situations. diff --git a/synapse/event_auth.py b/synapse/event_auth.py index fc50a0e71a7d..5d7c6fa858fb 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -113,7 +113,8 @@ def check( raise AuthError(403, "Event not signed by sending server") is_invite_via_allow_rule = ( - event.type == EventTypes.Member + room_version_obj.msc3083_join_rules + and event.type == EventTypes.Member and event.membership == Membership.JOIN and "join_authorised_via_users_server" in event.content ) From bc69d49362dfa0ee2e917427c61a7b67c0d78b34 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Sep 2021 13:48:42 +0100 Subject: [PATCH 011/111] Fixup changelog --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index da4d98ac2b25..a7a9abf79cee 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -19,8 +19,8 @@ Bugfixes - Fix a case where logging contexts would go missing when federation requests time out. ([\#10810](https://github.com/matrix-org/synapse/issues/10810)) - Fix error in deprecated `/initialSync` endpoint when using the undocumented `from` and `to` parameters. ([\#10827](https://github.com/matrix-org/synapse/issues/10827)) - Fix a bug causing the `remove_stale_pushers` background job to repeatedly fail and log errors. This bug affected Synapse servers that had been upgraded from version 1.28 or older and are using SQLite. ([\#10843](https://github.com/matrix-org/synapse/issues/10843)) -- Fix a bug in Unicode support of the room search admin API. It is now possible to search for rooms with non-ASCII characters. ([\#10859](https://github.com/matrix-org/synapse/issues/10859)) -- Fix a bug introduced in Synapse 1.37.0 which caused `knock` events which we sent to remote servers to be incorrectly stored in the local database. ([\#10873](https://github.com/matrix-org/synapse/issues/10873)) +- Fix a bug in Unicode support of the room search admin API breaking search for rooms with non-ASCII characters. ([\#10859](https://github.com/matrix-org/synapse/issues/10859)) +- Fix a bug introduced in Synapse 1.37.0 which caused `knock` membership events which we sent to remote servers to be incorrectly stored in the local database. ([\#10873](https://github.com/matrix-org/synapse/issues/10873)) - Fix invalidating one-time key count cache after claiming keys. Contributed by Tulir at Beeper. ([\#10875](https://github.com/matrix-org/synapse/issues/10875)) - Fix application service users being subject to MAU blocking if MAU had been reached, even if configured not to be blocked. ([\#10881](https://github.com/matrix-org/synapse/issues/10881)) - Allow the `.` and `~` characters when creating registration tokens as per the change to [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231). ([\#10887](https://github.com/matrix-org/synapse/issues/10887)) From 2b9d174791833d8eb8ee40d98cc59d187c2eb205 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Sep 2021 13:50:05 +0100 Subject: [PATCH 012/111] Fixup changelog --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index a7a9abf79cee..0b209edd4c92 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -52,7 +52,7 @@ Internal Changes - Add type hints to the state database. ([\#10823](https://github.com/matrix-org/synapse/issues/10823)) - Opt out of cache expiry for `get_users_who_share_room_with_user`, to hopefully improve `/sync` performance when you haven't synced recently. ([\#10826](https://github.com/matrix-org/synapse/issues/10826)) -- Track cache eviction rates more finely in Prometheus' monitoring. ([\#10829](https://github.com/matrix-org/synapse/issues/10829)) +- Track cache eviction rates more finely in Prometheus's monitoring. ([\#10829](https://github.com/matrix-org/synapse/issues/10829)) - Add missing type hints to handlers. ([\#10831](https://github.com/matrix-org/synapse/issues/10831), [\#10856](https://github.com/matrix-org/synapse/issues/10856)) - Extend the ModuleApi to let plug-ins check whether an ID is local and to access IP + User Agent data. ([\#10833](https://github.com/matrix-org/synapse/issues/10833)) - Factor out PNG image data to a constant to be used in several tests. ([\#10834](https://github.com/matrix-org/synapse/issues/10834)) @@ -61,7 +61,7 @@ Internal Changes - Rename [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` query parameter from `?prev_event` to more obvious usage with `?prev_event_id`. ([\#10839](https://github.com/matrix-org/synapse/issues/10839)) - Add type hints to `synapse.http.site`. ([\#10867](https://github.com/matrix-org/synapse/issues/10867)) - Include outlier status when we log V2 or V3 events. ([\#10879](https://github.com/matrix-org/synapse/issues/10879)) -- Break down Grafana's cache expiry time series based on reason for eviction---see #10829. ([\#10880](https://github.com/matrix-org/synapse/issues/10880)) +- Break down Grafana's cache expiry time series based on reason for eviction, c.f. #10829. ([\#10880](https://github.com/matrix-org/synapse/issues/10880)) - Clean up some of the federation event authentication code for clarity. ([\#10883](https://github.com/matrix-org/synapse/issues/10883), [\#10884](https://github.com/matrix-org/synapse/issues/10884), [\#10896](https://github.com/matrix-org/synapse/issues/10896), [\#10901](https://github.com/matrix-org/synapse/issues/10901)) - Clean up some unnecessary parentheses in places around the codebase. ([\#10889](https://github.com/matrix-org/synapse/issues/10889)) - Improve type hinting in the user directory code. ([\#10891](https://github.com/matrix-org/synapse/issues/10891)) From eb2c7e51c460a83b7880eefc66eb9ca6a8adab94 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 28 Sep 2021 09:24:40 -0400 Subject: [PATCH 013/111] Clean-up type hints in server config (#10915) By using attrs instead of dicts to store configuration. Also updates some of the attrs classes to use proper type hints and auto_attribs. --- changelog.d/10915.misc | 1 + synapse/config/server.py | 100 ++++++++++++++++----------------- synapse/handlers/pagination.py | 8 +-- 3 files changed, 54 insertions(+), 55 deletions(-) create mode 100644 changelog.d/10915.misc diff --git a/changelog.d/10915.misc b/changelog.d/10915.misc new file mode 100644 index 000000000000..1ce2910ffa66 --- /dev/null +++ b/changelog.d/10915.misc @@ -0,0 +1 @@ +Clean-up configuration helper classes for the `ServerConfig` class. diff --git a/synapse/config/server.py b/synapse/config/server.py index ad8715da2967..041412d7ad89 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -19,7 +19,7 @@ import os.path import re from textwrap import indent -from typing import Any, Dict, Iterable, List, Optional, Set, Tuple +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import attr import yaml @@ -184,49 +184,74 @@ def generate_ip_set( @attr.s(frozen=True) class HttpResourceConfig: - names = attr.ib( - type=List[str], + names: List[str] = attr.ib( factory=list, validator=attr.validators.deep_iterable(attr.validators.in_(KNOWN_RESOURCES)), # type: ignore ) - compress = attr.ib( - type=bool, + compress: bool = attr.ib( default=False, validator=attr.validators.optional(attr.validators.instance_of(bool)), # type: ignore[arg-type] ) -@attr.s(frozen=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class HttpListenerConfig: """Object describing the http-specific parts of the config of a listener""" - x_forwarded = attr.ib(type=bool, default=False) - resources = attr.ib(type=List[HttpResourceConfig], factory=list) - additional_resources = attr.ib(type=Dict[str, dict], factory=dict) - tag = attr.ib(type=str, default=None) + x_forwarded: bool = False + resources: List[HttpResourceConfig] = attr.ib(factory=list) + additional_resources: Dict[str, dict] = attr.ib(factory=dict) + tag: Optional[str] = None -@attr.s(frozen=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class ListenerConfig: """Object describing the configuration of a single listener.""" - port = attr.ib(type=int, validator=attr.validators.instance_of(int)) - bind_addresses = attr.ib(type=List[str]) - type = attr.ib(type=str, validator=attr.validators.in_(KNOWN_LISTENER_TYPES)) - tls = attr.ib(type=bool, default=False) + port: int = attr.ib(validator=attr.validators.instance_of(int)) + bind_addresses: List[str] + type: str = attr.ib(validator=attr.validators.in_(KNOWN_LISTENER_TYPES)) + tls: bool = False # http_options is only populated if type=http - http_options = attr.ib(type=Optional[HttpListenerConfig], default=None) + http_options: Optional[HttpListenerConfig] = None -@attr.s(frozen=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class ManholeConfig: """Object describing the configuration of the manhole""" - username = attr.ib(type=str, validator=attr.validators.instance_of(str)) - password = attr.ib(type=str, validator=attr.validators.instance_of(str)) - priv_key = attr.ib(type=Optional[Key]) - pub_key = attr.ib(type=Optional[Key]) + username: str = attr.ib(validator=attr.validators.instance_of(str)) + password: str = attr.ib(validator=attr.validators.instance_of(str)) + priv_key: Optional[Key] + pub_key: Optional[Key] + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class RetentionConfig: + """Object describing the configuration of the manhole""" + + interval: int + shortest_max_lifetime: Optional[int] + longest_max_lifetime: Optional[int] + + +@attr.s(frozen=True) +class LimitRemoteRoomsConfig: + enabled: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False) + complexity: Union[float, int] = attr.ib( + validator=attr.validators.instance_of( + (float, int) # type: ignore[arg-type] # noqa + ), + default=1.0, + ) + complexity_error: str = attr.ib( + validator=attr.validators.instance_of(str), + default=ROOM_COMPLEXITY_TOO_GREAT, + ) + admins_can_join: bool = attr.ib( + validator=attr.validators.instance_of(bool), default=False + ) class ServerConfig(Config): @@ -519,7 +544,7 @@ def read_config(self, config, **kwargs): " greater than 'allowed_lifetime_max'" ) - self.retention_purge_jobs: List[Dict[str, Optional[int]]] = [] + self.retention_purge_jobs: List[RetentionConfig] = [] for purge_job_config in retention_config.get("purge_jobs", []): interval_config = purge_job_config.get("interval") @@ -553,20 +578,12 @@ def read_config(self, config, **kwargs): ) self.retention_purge_jobs.append( - { - "interval": interval, - "shortest_max_lifetime": shortest_max_lifetime, - "longest_max_lifetime": longest_max_lifetime, - } + RetentionConfig(interval, shortest_max_lifetime, longest_max_lifetime) ) if not self.retention_purge_jobs: self.retention_purge_jobs = [ - { - "interval": self.parse_duration("1d"), - "shortest_max_lifetime": None, - "longest_max_lifetime": None, - } + RetentionConfig(self.parse_duration("1d"), None, None) ] self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])] @@ -591,25 +608,6 @@ def read_config(self, config, **kwargs): self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None)) self.gc_seconds = self.read_gc_intervals(config.get("gc_min_interval", None)) - @attr.s - class LimitRemoteRoomsConfig: - enabled = attr.ib( - validator=attr.validators.instance_of(bool), default=False - ) - complexity = attr.ib( - validator=attr.validators.instance_of( - (float, int) # type: ignore[arg-type] # noqa - ), - default=1.0, - ) - complexity_error = attr.ib( - validator=attr.validators.instance_of(str), - default=ROOM_COMPLEXITY_TOO_GREAT, - ) - admins_can_join = attr.ib( - validator=attr.validators.instance_of(bool), default=False - ) - self.limit_remote_rooms = LimitRemoteRoomsConfig( **(config.get("limit_remote_rooms") or {}) ) diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 08b93b3ec138..a5301ece6f3a 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -92,16 +92,16 @@ def __init__(self, hs: "HomeServer"): if hs.config.worker.run_background_tasks and hs.config.retention_enabled: # Run the purge jobs described in the configuration file. - for job in hs.config.retention_purge_jobs: + for job in hs.config.server.retention_purge_jobs: logger.info("Setting up purge job with config: %s", job) self.clock.looping_call( run_as_background_process, - job["interval"], + job.interval, "purge_history_for_rooms_in_range", self.purge_history_for_rooms_in_range, - job["shortest_max_lifetime"], - job["longest_max_lifetime"], + job.shortest_max_lifetime, + job.longest_max_lifetime, ) async def purge_history_for_rooms_in_range( From 37bb93d1818eeda0d64c02cb772c8dee5596194f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Sep 2021 14:36:19 +0100 Subject: [PATCH 014/111] Fix exception responding to request that has been closed (#10932) Introduced in #10905 --- changelog.d/10932.feature | 1 + synapse/http/server.py | 14 +++++++++++--- 2 files changed, 12 insertions(+), 3 deletions(-) create mode 100644 changelog.d/10932.feature diff --git a/changelog.d/10932.feature b/changelog.d/10932.feature new file mode 100644 index 000000000000..07e7b2c6a75e --- /dev/null +++ b/changelog.d/10932.feature @@ -0,0 +1 @@ +Speed up responding with large JSON objects to requests. diff --git a/synapse/http/server.py b/synapse/http/server.py index 1a50305dcfdc..0df1bfbeef7a 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -561,9 +561,17 @@ def __init__( self._iterator = iterator self._paused = False - # Register the producer and start producing data. - self._request.registerProducer(self, True) - self.resumeProducing() + try: + self._request.registerProducer(self, True) + except RuntimeError as e: + logger.info("Connection disconnected before response was written: %r", e) + + # We drop our references to data we'll not use. + self._request = None + self._iterator = iter(()) + else: + # Start producing if `registerProducer` was successful + self.resumeProducing() def _send_data(self, data: List[bytes]) -> None: """ From 2622b28c5cbe38c60c556544aa7502a8684ee60b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 28 Sep 2021 15:25:07 +0100 Subject: [PATCH 015/111] Inline `_check_event_auth` for outliers (#10926) * Inline `_check_event_auth` for outliers When we are persisting an outlier, most of `_check_event_auth` is redundant: * `_update_auth_events_and_context_for_auth` does nothing, because the `input_auth_events` are (now) exactly the event's auth_events, which means that `missing_auth` is empty. * we don't care about soft-fail, kicking guest users or `send_on_behalf_of` for outliers ... so the only thing that matters is the auth itself, so let's just do that. * `_auth_and_persist_fetched_events_inner`: de-async `prep` `prep` no longer calls any `async` methods, so let's make it synchronous. * Simplify `_check_event_auth` We no longer need to support outliers here, which makes things rather simpler. * changelog * lint --- changelog.d/10896.misc | 2 +- changelog.d/10926.misc | 1 + synapse/handlers/federation_event.py | 93 +++++++++++----------------- tests/test_federation.py | 1 - 4 files changed, 38 insertions(+), 59 deletions(-) create mode 100644 changelog.d/10926.misc diff --git a/changelog.d/10896.misc b/changelog.d/10896.misc index 41de99584239..9a765435dbe4 100644 --- a/changelog.d/10896.misc +++ b/changelog.d/10896.misc @@ -1 +1 @@ - Clean up some of the federation event authentication code for clarity. +Clean up some of the federation event authentication code for clarity. diff --git a/changelog.d/10926.misc b/changelog.d/10926.misc new file mode 100644 index 000000000000..9a765435dbe4 --- /dev/null +++ b/changelog.d/10926.misc @@ -0,0 +1 @@ +Clean up some of the federation event authentication code for clarity. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 01fd84112252..2c4644b4a32d 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -68,11 +68,7 @@ UserID, get_domain_from_id, ) -from synapse.util.async_helpers import ( - Linearizer, - concurrently_execute, - yieldable_gather_results, -) +from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.iterutils import batch_iter from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import shortstr @@ -1189,7 +1185,10 @@ async def _auth_and_persist_fetched_events_inner( allow_rejected=True, ) - async def prep(event: EventBase) -> Optional[Tuple[EventBase, EventContext]]: + room_version = await self._store.get_room_version_id(room_id) + room_version_obj = KNOWN_ROOM_VERSIONS[room_version] + + def prep(event: EventBase) -> Optional[Tuple[EventBase, EventContext]]: with nested_logging_context(suffix=event.event_id): auth = {} for auth_event_id in event.auth_event_ids(): @@ -1207,17 +1206,15 @@ async def prep(event: EventBase) -> Optional[Tuple[EventBase, EventContext]]: auth[(ae.type, ae.state_key)] = ae context = EventContext.for_outlier() - context = await self._check_event_auth( - origin, - event, - context, - claimed_auth_event_map=auth, - ) + try: + event_auth.check(room_version_obj, event, auth_events=auth) + except AuthError as e: + logger.warning("Rejecting %r because %s", event, e) + context.rejected = RejectedReason.AUTH_ERROR + return event, context - events_to_persist = ( - x for x in await yieldable_gather_results(prep, fetched_events) if x - ) + events_to_persist = (x for x in (prep(event) for event in fetched_events) if x) await self.persist_events_and_notify(room_id, tuple(events_to_persist)) async def _check_event_auth( @@ -1226,7 +1223,6 @@ async def _check_event_auth( event: EventBase, context: EventContext, state: Optional[Iterable[EventBase]] = None, - claimed_auth_event_map: Optional[StateMap[EventBase]] = None, backfilled: bool = False, ) -> EventContext: """ @@ -1242,42 +1238,36 @@ async def _check_event_auth( The state events used to check the event for soft-fail. If this is not provided the current state events will be used. - claimed_auth_event_map: - A map of (type, state_key) => event for the event's claimed auth_events. - Possibly including events that were rejected, or are in the wrong room. - - Only populated when populating outliers. - backfilled: True if the event was backfilled. Returns: The updated context object. """ - # claimed_auth_event_map should be given iff the event is an outlier - assert bool(claimed_auth_event_map) == event.internal_metadata.outlier + # This method should only be used for non-outliers + assert not event.internal_metadata.outlier room_version = await self._store.get_room_version_id(event.room_id) room_version_obj = KNOWN_ROOM_VERSIONS[room_version] - if claimed_auth_event_map: - # if we have a copy of the auth events from the event, use that as the - # basis for auth. - auth_events = claimed_auth_event_map - else: - # otherwise, we calculate what the auth events *should* be, and use that - prev_state_ids = await context.get_prev_state_ids() - auth_events_ids = self._event_auth_handler.compute_auth_events( - event, prev_state_ids, for_verification=True - ) - auth_events_x = await self._store.get_events(auth_events_ids) - auth_events = {(e.type, e.state_key): e for e in auth_events_x.values()} + # calculate what the auth events *should* be, to use as a basis for auth. + prev_state_ids = await context.get_prev_state_ids() + auth_events_ids = self._event_auth_handler.compute_auth_events( + event, prev_state_ids, for_verification=True + ) + auth_events_x = await self._store.get_events(auth_events_ids) + calculated_auth_event_map = { + (e.type, e.state_key): e for e in auth_events_x.values() + } try: ( context, auth_events_for_auth, ) = await self._update_auth_events_and_context_for_auth( - origin, event, context, auth_events + origin, + event, + context, + calculated_auth_event_map=calculated_auth_event_map, ) except Exception: # We don't really mind if the above fails, so lets not fail @@ -1289,7 +1279,7 @@ async def _check_event_auth( "Ignoring failure and continuing processing of event.", event.event_id, ) - auth_events_for_auth = auth_events + auth_events_for_auth = calculated_auth_event_map try: event_auth.check(room_version_obj, event, auth_events=auth_events_for_auth) @@ -1425,7 +1415,7 @@ async def _update_auth_events_and_context_for_auth( origin: str, event: EventBase, context: EventContext, - input_auth_events: StateMap[EventBase], + calculated_auth_event_map: StateMap[EventBase], ) -> Tuple[EventContext, StateMap[EventBase]]: """Helper for _check_event_auth. See there for docs. @@ -1443,19 +1433,17 @@ async def _update_auth_events_and_context_for_auth( event: context: - input_auth_events: - Map from (event_type, state_key) to event - - Normally, our calculated auth_events based on the state of the room - at the event's position in the DAG, though occasionally (eg if the - event is an outlier), may be the auth events claimed by the remote - server. + calculated_auth_event_map: + Our calculated auth_events based on the state of the room + at the event's position in the DAG. Returns: updated context, updated auth event map """ - # take a copy of input_auth_events before we modify it. - auth_events: MutableStateMap[EventBase] = dict(input_auth_events) + assert not event.internal_metadata.outlier + + # take a copy of calculated_auth_event_map before we modify it. + auth_events: MutableStateMap[EventBase] = dict(calculated_auth_event_map) event_auth_events = set(event.auth_event_ids()) @@ -1496,15 +1484,6 @@ async def _update_auth_events_and_context_for_auth( } ) - if event.internal_metadata.is_outlier(): - # XXX: given that, for an outlier, we'll be working with the - # event's *claimed* auth events rather than those we calculated: - # (a) is there any point in this test, since different_auth below will - # obviously be empty - # (b) alternatively, why don't we do it earlier? - logger.info("Skipping auth_event fetch for outlier") - return context, auth_events - different_auth = event_auth_events.difference( e.event_id for e in auth_events.values() ) diff --git a/tests/test_federation.py b/tests/test_federation.py index c51e018da1a1..24fc77d7a772 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -82,7 +82,6 @@ async def _check_event_auth( event, context, state=None, - claimed_auth_event_map=None, backfilled=False, ): return context From 8aaa4b7b5df5e851a5f3dd74cd3062c9f94f0066 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 28 Sep 2021 15:25:36 +0100 Subject: [PATCH 016/111] Drop backwards-compatibility support for "outlier" (#10903) Before Synapse 1.31 (#9411), we relied on `outlier` being stored in the `internal_metadata` column. We can now assume nobody will roll back their deployment that far and drop the legacy support. --- changelog.d/10903.misc | 1 + synapse/storage/databases/main/events.py | 22 +--------------------- synapse/storage/schema/__init__.py | 6 ++---- 3 files changed, 4 insertions(+), 25 deletions(-) create mode 100644 changelog.d/10903.misc diff --git a/changelog.d/10903.misc b/changelog.d/10903.misc new file mode 100644 index 000000000000..2716ccb08c4f --- /dev/null +++ b/changelog.d/10903.misc @@ -0,0 +1 @@ +Drop old functionality which maintained database compatibility with Synapse versions before 1.31. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 584f818ff361..cc4e31ec3011 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1276,13 +1276,6 @@ def _update_outliers_txn(self, txn, events_and_contexts): logger.exception("") raise - # update the stored internal_metadata to update the "outlier" flag. - # TODO: This is unused as of Synapse 1.31. Remove it once we are happy - # to drop backwards-compatibility with 1.30. - metadata_json = json_encoder.encode(event.internal_metadata.get_dict()) - sql = "UPDATE event_json SET internal_metadata = ? WHERE event_id = ?" - txn.execute(sql, (metadata_json, event.event_id)) - # Add an entry to the ex_outlier_stream table to replicate the # change in outlier status to our workers. stream_order = event.internal_metadata.stream_ordering @@ -1327,19 +1320,6 @@ def event_dict(event): d.pop("redacted_because", None) return d - def get_internal_metadata(event): - im = event.internal_metadata.get_dict() - - # temporary hack for database compatibility with Synapse 1.30 and earlier: - # store the `outlier` flag inside the internal_metadata json as well as in - # the `events` table, so that if anyone rolls back to an older Synapse, - # things keep working. This can be removed once we are happy to drop support - # for that - if event.internal_metadata.is_outlier(): - im["outlier"] = True - - return im - self.db_pool.simple_insert_many_txn( txn, table="event_json", @@ -1348,7 +1328,7 @@ def get_internal_metadata(event): "event_id": event.event_id, "room_id": event.room_id, "internal_metadata": json_encoder.encode( - get_internal_metadata(event) + event.internal_metadata.get_dict() ), "json": json_encoder.encode(event_dict(event)), "format_version": event.format_version, diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 573e05a482c0..1aee741a8bd6 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -12,9 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# When updating these values, please leave a short summary of the changes below. - -SCHEMA_VERSION = 64 +SCHEMA_VERSION = 64 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -46,7 +44,7 @@ """ -SCHEMA_COMPAT_VERSION = 59 +SCHEMA_COMPAT_VERSION = 60 # 60: "outlier" not in internal_metadata. """Limit on how far the synapse codebase can be rolled back without breaking db compat This value is stored in the database, and checked on startup. If the value in the From 0f007fe009dde43a3a85aacee12cd51cd603bd1c Mon Sep 17 00:00:00 2001 From: Hillery Shay Date: Tue, 28 Sep 2021 09:13:23 -0700 Subject: [PATCH 017/111] Update utility code to handle C implementations of frozendict (#10902) * update _handle_frozendict to work with c implementations of frozen dict * add changelog * add clarifying comment to _handle_frozendict --- changelog.d/10902.misc | 1 + synapse/util/__init__.py | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10902.misc diff --git a/changelog.d/10902.misc b/changelog.d/10902.misc new file mode 100644 index 000000000000..2cd79887f6f7 --- /dev/null +++ b/changelog.d/10902.misc @@ -0,0 +1 @@ +Update utility code to handle C implementations of frozendict. \ No newline at end of file diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index bd234549bd85..64daff59df0d 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -50,7 +50,13 @@ def _handle_frozendict(obj: Any) -> Dict[Any, Any]: if type(obj) is frozendict: # fishing the protected dict out of the object is a bit nasty, # but we don't really want the overhead of copying the dict. - return obj._dict + try: + return obj._dict + except AttributeError: + # When the C implementation of frozendict is used, + # there isn't a `_dict` attribute with a dict + # so we resort to making a copy of the frozendict + return dict(obj) raise TypeError( "Object of type %s is not JSON serializable" % obj.__class__.__name__ ) From 62800a8fe3b531369c09bb859e90f4b97cd98584 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Sep 2021 17:32:31 +0100 Subject: [PATCH 018/111] Add #10932 to release --- changelog.d/10932.feature | 1 - 1 file changed, 1 deletion(-) delete mode 100644 changelog.d/10932.feature diff --git a/changelog.d/10932.feature b/changelog.d/10932.feature deleted file mode 100644 index 07e7b2c6a75e..000000000000 --- a/changelog.d/10932.feature +++ /dev/null @@ -1 +0,0 @@ -Speed up responding with large JSON objects to requests. From 9fd057b8c5a8c5748e7d8137d1485c38abd9602f Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 28 Sep 2021 21:23:16 -0500 Subject: [PATCH 019/111] Ensure `(room_id, next_batch_id)` is unique to avoid cross-talk/conflicts between batches (MSC2716) (#10877) Part of [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) Part of https://github.com/matrix-org/synapse/issues/10737 --- changelog.d/10877.feature | 1 + synapse/handlers/message.py | 34 ++++++++++++++++++++ synapse/rest/client/room_batch.py | 6 ++-- synapse/storage/databases/main/room_batch.py | 6 ++-- 4 files changed, 43 insertions(+), 4 deletions(-) create mode 100644 changelog.d/10877.feature diff --git a/changelog.d/10877.feature b/changelog.d/10877.feature new file mode 100644 index 000000000000..06a246c108a7 --- /dev/null +++ b/changelog.d/10877.feature @@ -0,0 +1 @@ +Ensure `(room_id, next_batch_id)` is unique across [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) insertion events in rooms to avoid cross-talk/conflicts between batches. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index c66aefe2c4c5..07aadf3f3c94 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -16,6 +16,7 @@ # limitations under the License. import logging import random +from http import HTTPStatus from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple from canonicaljson import encode_canonical_json @@ -1461,6 +1462,39 @@ async def persist_and_notify_client_event( if prev_state_ids: raise AuthError(403, "Changing the room create event is forbidden") + if event.type == EventTypes.MSC2716_INSERTION: + room_version = await self.store.get_room_version_id(event.room_id) + room_version_obj = KNOWN_ROOM_VERSIONS[room_version] + + create_event = await self.store.get_create_event_for_room(event.room_id) + room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR) + + # Only check an insertion event if the room version + # supports it or the event is from the room creator. + if room_version_obj.msc2716_historical or ( + self.config.experimental.msc2716_enabled + and event.sender == room_creator + ): + next_batch_id = event.content.get( + EventContentFields.MSC2716_NEXT_BATCH_ID + ) + conflicting_insertion_event_id = ( + await self.store.get_insertion_event_by_batch_id( + event.room_id, next_batch_id + ) + ) + if conflicting_insertion_event_id is not None: + # The current insertion event that we're processing is invalid + # because an insertion event already exists in the room with the + # same next_batch_id. We can't allow multiple because the batch + # pointing will get weird, e.g. we can't determine which insertion + # event the batch event is pointing to. + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "Another insertion event already exists with the same next_batch_id", + errcode=Codes.INVALID_PARAM, + ) + # Mark any `m.historical` messages as backfilled so they don't appear # in `/sync` and have the proper decrementing `stream_ordering` as we import backfilled = False diff --git a/synapse/rest/client/room_batch.py b/synapse/rest/client/room_batch.py index bf14ec384ea3..1dffcc314793 100644 --- a/synapse/rest/client/room_batch.py +++ b/synapse/rest/client/room_batch.py @@ -306,11 +306,13 @@ async def on_POST( # Verify the batch_id_from_query corresponds to an actual insertion event # and have the batch connected. corresponding_insertion_event_id = ( - await self.store.get_insertion_event_by_batch_id(batch_id_from_query) + await self.store.get_insertion_event_by_batch_id( + room_id, batch_id_from_query + ) ) if corresponding_insertion_event_id is None: raise SynapseError( - 400, + HTTPStatus.BAD_REQUEST, "No insertion event corresponds to the given ?batch_id", errcode=Codes.INVALID_PARAM, ) diff --git a/synapse/storage/databases/main/room_batch.py b/synapse/storage/databases/main/room_batch.py index a383388757aa..300a563c9e09 100644 --- a/synapse/storage/databases/main/room_batch.py +++ b/synapse/storage/databases/main/room_batch.py @@ -18,7 +18,9 @@ class RoomBatchStore(SQLBaseStore): - async def get_insertion_event_by_batch_id(self, batch_id: str) -> Optional[str]: + async def get_insertion_event_by_batch_id( + self, room_id: str, batch_id: str + ) -> Optional[str]: """Retrieve a insertion event ID. Args: @@ -30,7 +32,7 @@ async def get_insertion_event_by_batch_id(self, batch_id: str) -> Optional[str]: """ return await self.db_pool.simple_select_one_onecol( table="insertion_events", - keyvalues={"next_batch_id": batch_id}, + keyvalues={"room_id": room_id, "next_batch_id": batch_id}, retcol="event_id", allow_none=True, ) From 2be0fde3d65c2dec7fb088de20736b9e81ada948 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Wed, 29 Sep 2021 10:24:37 +0100 Subject: [PATCH 020/111] Fix empty `url_cache_thumbnails/yyyy-mm-dd/` directories being left behind (#10924) --- changelog.d/10924.bugfix | 1 + synapse/rest/media/v1/preview_url_resource.py | 74 +++++++++++-------- tests/rest/media/v1/test_url_preview.py | 31 ++++++++ 3 files changed, 75 insertions(+), 31 deletions(-) create mode 100644 changelog.d/10924.bugfix diff --git a/changelog.d/10924.bugfix b/changelog.d/10924.bugfix new file mode 100644 index 000000000000..c73a51e32fe2 --- /dev/null +++ b/changelog.d/10924.bugfix @@ -0,0 +1 @@ +Fix a bug where empty `yyyy-mm-dd/` directories would be left behind in the media store's `url_cache_thumbnails/` directory. diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 79a42b24556e..044f44a3977e 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -73,6 +73,7 @@ ONE_HOUR = 60 * 60 * 1000 ONE_DAY = 24 * ONE_HOUR +IMAGE_CACHE_EXPIRY_MS = 2 * ONE_DAY @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -496,6 +497,27 @@ async def _expire_url_cache_data(self) -> None: logger.info("Still running DB updates; skipping expiry") return + def try_remove_parent_dirs(dirs: Iterable[str]) -> None: + """Attempt to remove the given chain of parent directories + + Args: + dirs: The list of directory paths to delete, with children appearing + before their parents. + """ + for dir in dirs: + try: + os.rmdir(dir) + except FileNotFoundError: + # Already deleted, continue with deleting the rest + pass + except OSError as e: + # Failed, skip deleting the rest of the parent dirs + if e.errno != errno.ENOTEMPTY: + logger.warning( + "Failed to remove media directory: %r: %s", dir, e + ) + break + # First we delete expired url cache entries media_ids = await self.store.get_expired_url_cache(now) @@ -504,20 +526,16 @@ async def _expire_url_cache_data(self) -> None: fname = self.filepaths.url_cache_filepath(media_id) try: os.remove(fname) + except FileNotFoundError: + pass # If the path doesn't exist, meh except OSError as e: - # If the path doesn't exist, meh - if e.errno != errno.ENOENT: - logger.warning("Failed to remove media: %r: %s", media_id, e) - continue + logger.warning("Failed to remove media: %r: %s", media_id, e) + continue removed_media.append(media_id) - try: - dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id) - for dir in dirs: - os.rmdir(dir) - except Exception: - pass + dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id) + try_remove_parent_dirs(dirs) await self.store.delete_url_cache(removed_media) @@ -530,7 +548,7 @@ async def _expire_url_cache_data(self) -> None: # These may be cached for a bit on the client (i.e., they # may have a room open with a preview url thing open). # So we wait a couple of days before deleting, just in case. - expire_before = now - 2 * ONE_DAY + expire_before = now - IMAGE_CACHE_EXPIRY_MS media_ids = await self.store.get_url_cache_media_before(expire_before) removed_media = [] @@ -538,36 +556,30 @@ async def _expire_url_cache_data(self) -> None: fname = self.filepaths.url_cache_filepath(media_id) try: os.remove(fname) + except FileNotFoundError: + pass # If the path doesn't exist, meh except OSError as e: - # If the path doesn't exist, meh - if e.errno != errno.ENOENT: - logger.warning("Failed to remove media: %r: %s", media_id, e) - continue + logger.warning("Failed to remove media: %r: %s", media_id, e) + continue - try: - dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id) - for dir in dirs: - os.rmdir(dir) - except Exception: - pass + dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id) + try_remove_parent_dirs(dirs) thumbnail_dir = self.filepaths.url_cache_thumbnail_directory(media_id) try: shutil.rmtree(thumbnail_dir) + except FileNotFoundError: + pass # If the path doesn't exist, meh except OSError as e: - # If the path doesn't exist, meh - if e.errno != errno.ENOENT: - logger.warning("Failed to remove media: %r: %s", media_id, e) - continue + logger.warning("Failed to remove media: %r: %s", media_id, e) + continue removed_media.append(media_id) - try: - dirs = self.filepaths.url_cache_thumbnail_dirs_to_delete(media_id) - for dir in dirs: - os.rmdir(dir) - except Exception: - pass + dirs = self.filepaths.url_cache_thumbnail_dirs_to_delete(media_id) + # Note that one of the directories to be deleted has already been + # removed by the `rmtree` above. + try_remove_parent_dirs(dirs) await self.store.delete_url_cache_media(removed_media) diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py index 4d09b5d07ef7..ce43de780b51 100644 --- a/tests/rest/media/v1/test_url_preview.py +++ b/tests/rest/media/v1/test_url_preview.py @@ -21,11 +21,13 @@ from twisted.test.proto_helpers import AccumulatingProtocol from synapse.config.oembed import OEmbedEndpointConfig +from synapse.rest.media.v1.preview_url_resource import IMAGE_CACHE_EXPIRY_MS from synapse.util.stringutils import parse_and_validate_mxc_uri from tests import unittest from tests.server import FakeTransport from tests.test_utils import SMALL_PNG +from tests.utils import MockClock try: import lxml @@ -851,3 +853,32 @@ def test_storage_providers_exclude_thumbnails(self): 404, "URL cache thumbnail was unexpectedly retrieved from a storage provider", ) + + def test_cache_expiry(self): + """Test that URL cache files and thumbnails are cleaned up properly on expiry.""" + self.preview_url.clock = MockClock() + + _host, media_id = self._download_image() + + file_path = self.preview_url.filepaths.url_cache_filepath(media_id) + file_dirs = self.preview_url.filepaths.url_cache_filepath_dirs_to_delete( + media_id + ) + thumbnail_dir = self.preview_url.filepaths.url_cache_thumbnail_directory( + media_id + ) + thumbnail_dirs = self.preview_url.filepaths.url_cache_thumbnail_dirs_to_delete( + media_id + ) + + self.assertTrue(os.path.isfile(file_path)) + self.assertTrue(os.path.isdir(thumbnail_dir)) + + self.preview_url.clock.advance_time_msec(IMAGE_CACHE_EXPIRY_MS + 1) + self.get_success(self.preview_url._expire_url_cache_data()) + + for path in [file_path] + file_dirs + [thumbnail_dir] + thumbnail_dirs: + self.assertFalse( + os.path.exists(path), + f"{os.path.relpath(path, self.media_store_path)} was not deleted", + ) From 5279b9161b323cccdb74dcdf1a68fa7e19f091d4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 29 Sep 2021 10:57:10 +0100 Subject: [PATCH 021/111] Use `RoomVersion` objects (#10934) Various refactors to use `RoomVersion` objects instead of room version identifiers. --- changelog.d/10934.misc | 1 + synapse/events/builder.py | 20 --------------- synapse/handlers/federation.py | 46 +++++++++++++++++++--------------- synapse/handlers/message.py | 27 +++++++++++++++----- synapse/handlers/room.py | 4 +-- 5 files changed, 50 insertions(+), 48 deletions(-) create mode 100644 changelog.d/10934.misc diff --git a/changelog.d/10934.misc b/changelog.d/10934.misc new file mode 100644 index 000000000000..56c640ec9e91 --- /dev/null +++ b/changelog.d/10934.misc @@ -0,0 +1 @@ +Refactor various parts of the codebase to use `RoomVersion` objects instead of room version identifier strings. diff --git a/synapse/events/builder.py b/synapse/events/builder.py index 87e2bb123b6d..50f2a4c1f418 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -18,10 +18,8 @@ from nacl.signing import SigningKey from synapse.api.constants import MAX_DEPTH -from synapse.api.errors import UnsupportedRoomVersionError from synapse.api.room_versions import ( KNOWN_EVENT_FORMAT_VERSIONS, - KNOWN_ROOM_VERSIONS, EventFormatVersions, RoomVersion, ) @@ -197,24 +195,6 @@ def __init__(self, hs: "HomeServer"): self.state = hs.get_state_handler() self._event_auth_handler = hs.get_event_auth_handler() - def new(self, room_version: str, key_values: dict) -> EventBuilder: - """Generate an event builder appropriate for the given room version - - Deprecated: use for_room_version with a RoomVersion object instead - - Args: - room_version: Version of the room that we're creating an event builder for - key_values: Fields used as the basis of the new event - - Returns: - EventBuilder - """ - v = KNOWN_ROOM_VERSIONS.get(room_version) - if not v: - # this can happen if support is withdrawn for a room version - raise UnsupportedRoomVersionError() - return self.for_room_version(v, key_values) - def for_room_version( self, room_version: RoomVersion, key_values: dict ) -> EventBuilder: diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index b17ef2a9a104..16c435ee866a 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -718,8 +718,8 @@ async def on_make_join_request( state_ids, ) - builder = self.event_builder_factory.new( - room_version.identifier, + builder = self.event_builder_factory.for_room_version( + room_version, { "type": EventTypes.Member, "content": event_content, @@ -897,9 +897,9 @@ async def on_make_leave_request( ) raise SynapseError(403, "User not from origin", Codes.FORBIDDEN) - room_version = await self.store.get_room_version_id(room_id) - builder = self.event_builder_factory.new( - room_version, + room_version_obj = await self.store.get_room_version(room_id) + builder = self.event_builder_factory.for_room_version( + room_version_obj, { "type": EventTypes.Member, "content": {"membership": Membership.LEAVE}, @@ -917,7 +917,7 @@ async def on_make_leave_request( # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_leave_request` await self._event_auth_handler.check_from_context( - room_version, event, context, do_sig_check=False + room_version_obj.identifier, event, context, do_sig_check=False ) except AuthError as e: logger.warning("Failed to create new leave %r because %s", event, e) @@ -949,10 +949,10 @@ async def on_make_knock_request( ) raise SynapseError(403, "User not from origin", Codes.FORBIDDEN) - room_version = await self.store.get_room_version_id(room_id) + room_version_obj = await self.store.get_room_version(room_id) - builder = self.event_builder_factory.new( - room_version, + builder = self.event_builder_factory.for_room_version( + room_version_obj, { "type": EventTypes.Member, "content": {"membership": Membership.KNOCK}, @@ -979,7 +979,7 @@ async def on_make_knock_request( # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_knock_request` await self._event_auth_handler.check_from_context( - room_version, event, context, do_sig_check=False + room_version_obj.identifier, event, context, do_sig_check=False ) except AuthError as e: logger.warning("Failed to create new knock %r because %s", event, e) @@ -1245,8 +1245,10 @@ async def exchange_third_party_invite( } if await self._event_auth_handler.check_host_in_room(room_id, self.hs.hostname): - room_version = await self.store.get_room_version_id(room_id) - builder = self.event_builder_factory.new(room_version, event_dict) + room_version_obj = await self.store.get_room_version(room_id) + builder = self.event_builder_factory.for_room_version( + room_version_obj, event_dict + ) EventValidator().validate_builder(builder) event, context = await self.event_creation_handler.create_new_client_event( @@ -1254,7 +1256,7 @@ async def exchange_third_party_invite( ) event, context = await self.add_display_name_to_third_party_invite( - room_version, event_dict, event, context + room_version_obj, event_dict, event, context ) EventValidator().validate_new(event, self.config) @@ -1265,7 +1267,7 @@ async def exchange_third_party_invite( try: await self._event_auth_handler.check_from_context( - room_version, event, context + room_version_obj.identifier, event, context ) except AuthError as e: logger.warning("Denying new third party invite %r because %s", event, e) @@ -1299,22 +1301,24 @@ async def on_exchange_third_party_invite_request( """ assert_params_in_dict(event_dict, ["room_id"]) - room_version = await self.store.get_room_version_id(event_dict["room_id"]) + room_version_obj = await self.store.get_room_version(event_dict["room_id"]) # NB: event_dict has a particular specced format we might need to fudge # if we change event formats too much. - builder = self.event_builder_factory.new(room_version, event_dict) + builder = self.event_builder_factory.for_room_version( + room_version_obj, event_dict + ) event, context = await self.event_creation_handler.create_new_client_event( builder=builder ) event, context = await self.add_display_name_to_third_party_invite( - room_version, event_dict, event, context + room_version_obj, event_dict, event, context ) try: await self._event_auth_handler.check_from_context( - room_version, event, context + room_version_obj.identifier, event, context ) except AuthError as e: logger.warning("Denying third party invite %r because %s", event, e) @@ -1331,7 +1335,7 @@ async def on_exchange_third_party_invite_request( async def add_display_name_to_third_party_invite( self, - room_version: str, + room_version_obj: RoomVersion, event_dict: JsonDict, event: EventBase, context: EventContext, @@ -1363,7 +1367,9 @@ async def add_display_name_to_third_party_invite( # auth checks. If we need the invite and don't have it then the # auth check code will explode appropriately. - builder = self.event_builder_factory.new(room_version, event_dict) + builder = self.event_builder_factory.for_room_version( + room_version_obj, event_dict + ) EventValidator().validate_builder(builder) event, context = await self.event_creation_handler.create_new_client_event( builder=builder diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 07aadf3f3c94..39c18ecf9988 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -40,6 +40,7 @@ NotFoundError, ShadowBanError, SynapseError, + UnsupportedRoomVersionError, ) from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions from synapse.api.urls import ConsentURIBuilder @@ -550,16 +551,22 @@ async def create_event( await self.auth.check_auth_blocking(requester=requester) if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "": - room_version = event_dict["content"]["room_version"] + room_version_id = event_dict["content"]["room_version"] + room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id) + if not room_version_obj: + # this can happen if support is withdrawn for a room version + raise UnsupportedRoomVersionError(room_version_id) else: try: - room_version = await self.store.get_room_version_id( + room_version_obj = await self.store.get_room_version( event_dict["room_id"] ) except NotFoundError: raise AuthError(403, "Unknown room") - builder = self.event_builder_factory.new(room_version, event_dict) + builder = self.event_builder_factory.for_room_version( + room_version_obj, event_dict + ) self.validator.validate_builder(builder) @@ -1070,9 +1077,17 @@ async def handle_new_client_event( EventTypes.Create, "", ): - room_version = event.content.get("room_version", RoomVersions.V1.identifier) + room_version_id = event.content.get( + "room_version", RoomVersions.V1.identifier + ) + room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id) + if not room_version_obj: + raise UnsupportedRoomVersionError( + "Attempt to create a room with unsupported room version %s" + % (room_version_id,) + ) else: - room_version = await self.store.get_room_version_id(event.room_id) + room_version_obj = await self.store.get_room_version(event.room_id) if event.internal_metadata.is_out_of_band_membership(): # the only sort of out-of-band-membership events we expect to see here are @@ -1082,7 +1097,7 @@ async def handle_new_client_event( else: try: await self._event_auth_handler.check_from_context( - room_version, event, context + room_version_obj.identifier, event, context ) except AuthError as err: logger.warning("Denying new event %r because %s", event, err) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 8fede5e935d0..dc4fab22238d 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -237,9 +237,9 @@ async def _upgrade_room( }, }, ) - old_room_version = await self.store.get_room_version_id(old_room_id) + old_room_version = await self.store.get_room_version(old_room_id) await self._event_auth_handler.check_from_context( - old_room_version, tombstone_event, tombstone_context + old_room_version.identifier, tombstone_event, tombstone_context ) await self.clone_existing_room( From 67815cc3db971f3fd191e6e161e88037dee387d3 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 29 Sep 2021 11:00:56 +0100 Subject: [PATCH 022/111] Tweak changelog --- CHANGES.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 0b209edd4c92..a8163802c287 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ Features -------- - Only allow the [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send?chunk_id=xxx` endpoint to connect to an already existing insertion event. ([\#10776](https://github.com/matrix-org/synapse/issues/10776)) -- Improve oEmbed previews by processing the author name, photo, and video information. ([\#10814](https://github.com/matrix-org/synapse/issues/10814), [\#10819](https://github.com/matrix-org/synapse/issues/10819)) +- Improve oEmbed URL previews by processing the author name, photo, and video information. ([\#10814](https://github.com/matrix-org/synapse/issues/10814), [\#10819](https://github.com/matrix-org/synapse/issues/10819)) - Speed up responding with large JSON objects to requests. ([\#10868](https://github.com/matrix-org/synapse/issues/10868), [\#10905](https://github.com/matrix-org/synapse/issues/10905)) - Add a `user_may_create_room_with_invites` spam checker callback to allow modules to allow or deny a room creation request based on the invites and/or 3PID invites it includes. ([\#10898](https://github.com/matrix-org/synapse/issues/10898)) @@ -54,14 +54,14 @@ Internal Changes haven't synced recently. ([\#10826](https://github.com/matrix-org/synapse/issues/10826)) - Track cache eviction rates more finely in Prometheus's monitoring. ([\#10829](https://github.com/matrix-org/synapse/issues/10829)) - Add missing type hints to handlers. ([\#10831](https://github.com/matrix-org/synapse/issues/10831), [\#10856](https://github.com/matrix-org/synapse/issues/10856)) -- Extend the ModuleApi to let plug-ins check whether an ID is local and to access IP + User Agent data. ([\#10833](https://github.com/matrix-org/synapse/issues/10833)) +- Extend the Module API to let plug-ins check whether an ID is local and to access IP + User Agent data. ([\#10833](https://github.com/matrix-org/synapse/issues/10833)) - Factor out PNG image data to a constant to be used in several tests. ([\#10834](https://github.com/matrix-org/synapse/issues/10834)) - Add a test to ensure state events sent by modules get persisted correctly. ([\#10835](https://github.com/matrix-org/synapse/issues/10835)) - Rename [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) fields and event types from `chunk` to `batch` to match the `/batch_send` endpoint. ([\#10838](https://github.com/matrix-org/synapse/issues/10838)) - Rename [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` query parameter from `?prev_event` to more obvious usage with `?prev_event_id`. ([\#10839](https://github.com/matrix-org/synapse/issues/10839)) - Add type hints to `synapse.http.site`. ([\#10867](https://github.com/matrix-org/synapse/issues/10867)) - Include outlier status when we log V2 or V3 events. ([\#10879](https://github.com/matrix-org/synapse/issues/10879)) -- Break down Grafana's cache expiry time series based on reason for eviction, c.f. #10829. ([\#10880](https://github.com/matrix-org/synapse/issues/10880)) +- Break down Grafana's cache expiry time series based on reason for eviction, c.f. [\#10829](https://github.com/matrix-org/synapse/issues/10829). ([\#10880](https://github.com/matrix-org/synapse/issues/10880)) - Clean up some of the federation event authentication code for clarity. ([\#10883](https://github.com/matrix-org/synapse/issues/10883), [\#10884](https://github.com/matrix-org/synapse/issues/10884), [\#10896](https://github.com/matrix-org/synapse/issues/10896), [\#10901](https://github.com/matrix-org/synapse/issues/10901)) - Clean up some unnecessary parentheses in places around the codebase. ([\#10889](https://github.com/matrix-org/synapse/issues/10889)) - Improve type hinting in the user directory code. ([\#10891](https://github.com/matrix-org/synapse/issues/10891)) From 1b9ce5e8a6ed37484665b595e3ed01a8e26f9dd7 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 29 Sep 2021 11:09:00 +0100 Subject: [PATCH 023/111] Indicate when bugs were introduced and tidy up --- CHANGES.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index a8163802c287..e27b4aa9420e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -15,19 +15,17 @@ Bugfixes - Fix a long-standing bug that caused an `AssertionError` when purging history in certain rooms. Contributed by @Kokokokoka. ([\#10690](https://github.com/matrix-org/synapse/issues/10690)) - Fix a long-standing bug which caused deactivated users that were later reactivated to be missing from the user directory. ([\#10782](https://github.com/matrix-org/synapse/issues/10782)) -- Allow sending a membership event to unban a user. Contributed by @aaronraimist. ([\#10807](https://github.com/matrix-org/synapse/issues/10807)) -- Fix a case where logging contexts would go missing when federation requests time out. ([\#10810](https://github.com/matrix-org/synapse/issues/10810)) -- Fix error in deprecated `/initialSync` endpoint when using the undocumented `from` and `to` parameters. ([\#10827](https://github.com/matrix-org/synapse/issues/10827)) +- Fix a long-standing bug that caused unbanning a user by sending a membership event to fail. Contributed by @aaronraimist. ([\#10807](https://github.com/matrix-org/synapse/issues/10807)) +- Fix a long-standing bug where logging contexts would go missing when federation requests time out. ([\#10810](https://github.com/matrix-org/synapse/issues/10810)) +- Fix a long-standing bug causing an error in the deprecated `/initialSync` endpoint when using the undocumented `from` and `to` parameters. ([\#10827](https://github.com/matrix-org/synapse/issues/10827)) - Fix a bug causing the `remove_stale_pushers` background job to repeatedly fail and log errors. This bug affected Synapse servers that had been upgraded from version 1.28 or older and are using SQLite. ([\#10843](https://github.com/matrix-org/synapse/issues/10843)) -- Fix a bug in Unicode support of the room search admin API breaking search for rooms with non-ASCII characters. ([\#10859](https://github.com/matrix-org/synapse/issues/10859)) +- Fix a long-standing bug in Unicode support of the room search admin API breaking search for rooms with non-ASCII characters. ([\#10859](https://github.com/matrix-org/synapse/issues/10859)) - Fix a bug introduced in Synapse 1.37.0 which caused `knock` membership events which we sent to remote servers to be incorrectly stored in the local database. ([\#10873](https://github.com/matrix-org/synapse/issues/10873)) -- Fix invalidating one-time key count cache after claiming keys. Contributed by Tulir at Beeper. ([\#10875](https://github.com/matrix-org/synapse/issues/10875)) -- Fix application service users being subject to MAU blocking if MAU had been reached, even if configured not to be blocked. ([\#10881](https://github.com/matrix-org/synapse/issues/10881)) -- Allow the `.` and `~` characters when creating registration tokens as per the change to [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231). ([\#10887](https://github.com/matrix-org/synapse/issues/10887)) +- Fix invalidating one-time key count cache after claiming keys. The bug was introduced in Synapse v1.41.0. Contributed by Tulir at Beeper. ([\#10875](https://github.com/matrix-org/synapse/issues/10875)) +- Fix a long-standing bug causing application service users to be subject to MAU blocking if the MAU limit had been reached, even if configured not to be blocked. ([\#10881](https://github.com/matrix-org/synapse/issues/10881)) - Fix a long-standing bug which could cause events pulled over federation to be incorrectly rejected. ([\#10907](https://github.com/matrix-org/synapse/issues/10907)) -- Avoid storing URL cache files in storage providers. Server admins may safely delete the `url_cache/` and `url_cache_thumbnails/` directories from any configured storage providers to reclaim space. ([\#10911](https://github.com/matrix-org/synapse/issues/10911)) -- Fix race conditions when creating media store and config directories. ([\#10913](https://github.com/matrix-org/synapse/issues/10913)) -- Fix debian builds due to dh-virtualenv no longer being able to build their docs. ([\#10931](https://github.com/matrix-org/synapse/issues/10931)) +- Fix a long-standing bug causing URL cache files to be stored in storage providers. Server admins may safely delete the `url_cache/` and `url_cache_thumbnails/` directories from any configured storage providers to reclaim space. ([\#10911](https://github.com/matrix-org/synapse/issues/10911)) +- Fix a long-standing bug leading to race conditions when creating media store and config directories. ([\#10913](https://github.com/matrix-org/synapse/issues/10913)) Improved Documentation @@ -53,7 +51,7 @@ Internal Changes - Opt out of cache expiry for `get_users_who_share_room_with_user`, to hopefully improve `/sync` performance when you haven't synced recently. ([\#10826](https://github.com/matrix-org/synapse/issues/10826)) - Track cache eviction rates more finely in Prometheus's monitoring. ([\#10829](https://github.com/matrix-org/synapse/issues/10829)) -- Add missing type hints to handlers. ([\#10831](https://github.com/matrix-org/synapse/issues/10831), [\#10856](https://github.com/matrix-org/synapse/issues/10856)) +- Add missing type hints to `synapse.handlers`. ([\#10831](https://github.com/matrix-org/synapse/issues/10831), [\#10856](https://github.com/matrix-org/synapse/issues/10856)) - Extend the Module API to let plug-ins check whether an ID is local and to access IP + User Agent data. ([\#10833](https://github.com/matrix-org/synapse/issues/10833)) - Factor out PNG image data to a constant to be used in several tests. ([\#10834](https://github.com/matrix-org/synapse/issues/10834)) - Add a test to ensure state events sent by modules get persisted correctly. ([\#10835](https://github.com/matrix-org/synapse/issues/10835)) @@ -63,11 +61,13 @@ Internal Changes - Include outlier status when we log V2 or V3 events. ([\#10879](https://github.com/matrix-org/synapse/issues/10879)) - Break down Grafana's cache expiry time series based on reason for eviction, c.f. [\#10829](https://github.com/matrix-org/synapse/issues/10829). ([\#10880](https://github.com/matrix-org/synapse/issues/10880)) - Clean up some of the federation event authentication code for clarity. ([\#10883](https://github.com/matrix-org/synapse/issues/10883), [\#10884](https://github.com/matrix-org/synapse/issues/10884), [\#10896](https://github.com/matrix-org/synapse/issues/10896), [\#10901](https://github.com/matrix-org/synapse/issues/10901)) +- Allow the `.` and `~` characters when creating registration tokens as per the change to [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231). ([\#10887](https://github.com/matrix-org/synapse/issues/10887)) - Clean up some unnecessary parentheses in places around the codebase. ([\#10889](https://github.com/matrix-org/synapse/issues/10889)) - Improve type hinting in the user directory code. ([\#10891](https://github.com/matrix-org/synapse/issues/10891)) - Update development testing script `test_postgresql.sh` to use a supported Python version and make re-runs quicker. ([\#10906](https://github.com/matrix-org/synapse/issues/10906)) -- Document and summarize changes in schema version `61` - `64`. ([\#10917](https://github.com/matrix-org/synapse/issues/10917)) +- Document and summarize changes in schema version `61` – `64`. ([\#10917](https://github.com/matrix-org/synapse/issues/10917)) - Update release script to sign the newly created git tags. ([\#10925](https://github.com/matrix-org/synapse/issues/10925)) +- Fix Debian builds due to `dh-virtualenv` no longer being able to build their docs. ([\#10931](https://github.com/matrix-org/synapse/issues/10931)) Synapse 1.43.0 (2021-09-21) From 13032b6603d91d9960592fe2506bb5dcb4ae1ad8 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 29 Sep 2021 11:13:03 +0100 Subject: [PATCH 024/111] Bump the date because the release ran over --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index e27b4aa9420e..271e2271fb24 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,4 +1,4 @@ -Synapse 1.44.0rc1 (2021-09-28) +Synapse 1.44.0rc1 (2021-09-29) ============================== Features From 8cef1ab2ac8d1602ea6a087384059d104097140f Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 29 Sep 2021 04:32:45 -0600 Subject: [PATCH 025/111] Implement MSC3069: Guest support on whoami (#9655) --- changelog.d/9655.feature | 1 + synapse/rest/client/account.py | 8 +++-- tests/rest/client/test_account.py | 49 +++++++++++++++++++++++++++---- 3 files changed, 51 insertions(+), 7 deletions(-) create mode 100644 changelog.d/9655.feature diff --git a/changelog.d/9655.feature b/changelog.d/9655.feature new file mode 100644 index 000000000000..70cac230d848 --- /dev/null +++ b/changelog.d/9655.feature @@ -0,0 +1 @@ +Add [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069) support to `/account/whoami`. \ No newline at end of file diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 6a7608d60b97..bacb82833022 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -878,9 +878,13 @@ def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) + requester = await self.auth.get_user_by_req(request, allow_guest=True) - response = {"user_id": requester.user.to_string()} + response = { + "user_id": requester.user.to_string(), + # MSC: https://github.com/matrix-org/matrix-doc/pull/3069 + "org.matrix.msc3069.is_guest": bool(requester.is_guest), + } # Appservices and similar accounts do not have device IDs # that we can report on, so exclude them for compliance. diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index 9e9e953cf4b2..64b0b8458b6e 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -470,13 +470,45 @@ class WhoamiTestCase(unittest.HomeserverTestCase): register.register_servlets, ] + def default_config(self): + config = super().default_config() + config["allow_guest_access"] = True + return config + def test_GET_whoami(self): device_id = "wouldgohere" user_id = self.register_user("kermit", "test") tok = self.login("kermit", "test", device_id=device_id) - whoami = self.whoami(tok) - self.assertEqual(whoami, {"user_id": user_id, "device_id": device_id}) + whoami = self._whoami(tok) + self.assertEqual( + whoami, + { + "user_id": user_id, + "device_id": device_id, + # Unstable until MSC3069 enters spec + "org.matrix.msc3069.is_guest": False, + }, + ) + + def test_GET_whoami_guests(self): + channel = self.make_request( + b"POST", b"/_matrix/client/r0/register?kind=guest", b"{}" + ) + tok = channel.json_body["access_token"] + user_id = channel.json_body["user_id"] + device_id = channel.json_body["device_id"] + + whoami = self._whoami(tok) + self.assertEqual( + whoami, + { + "user_id": user_id, + "device_id": device_id, + # Unstable until MSC3069 enters spec + "org.matrix.msc3069.is_guest": True, + }, + ) def test_GET_whoami_appservices(self): user_id = "@as:test" @@ -491,11 +523,18 @@ def test_GET_whoami_appservices(self): ) self.hs.get_datastore().services_cache.append(appservice) - whoami = self.whoami(as_token) - self.assertEqual(whoami, {"user_id": user_id}) + whoami = self._whoami(as_token) + self.assertEqual( + whoami, + { + "user_id": user_id, + # Unstable until MSC3069 enters spec + "org.matrix.msc3069.is_guest": False, + }, + ) self.assertFalse(hasattr(whoami, "device_id")) - def whoami(self, tok): + def _whoami(self, tok): channel = self.make_request("GET", "account/whoami", {}, access_token=tok) self.assertEqual(channel.code, 200) return channel.json_body From 94b620a5edd6b5bc55c8aad6e00a11cc6bf210fa Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 29 Sep 2021 06:44:15 -0400 Subject: [PATCH 026/111] Use direct references for configuration variables (part 6). (#10916) --- changelog.d/10916.misc | 1 + synapse/app/_base.py | 8 ++++---- synapse/app/admin_cmd.py | 4 ++-- synapse/app/generic_worker.py | 2 +- synapse/app/homeserver.py | 14 +++++++------- synapse/app/phone_stats_home.py | 8 ++++---- synapse/config/_base.py | 2 +- synapse/config/server.py | 4 +--- synapse/events/presence_router.py | 6 +++--- synapse/events/utils.py | 2 +- synapse/federation/transport/server/__init__.py | 2 +- synapse/handlers/directory.py | 2 +- synapse/handlers/federation.py | 2 +- synapse/handlers/identity.py | 2 +- synapse/handlers/message.py | 14 ++++++++------ synapse/handlers/pagination.py | 14 ++++++++++---- synapse/handlers/profile.py | 2 +- synapse/handlers/register.py | 2 +- synapse/handlers/room.py | 2 +- synapse/handlers/room_member.py | 14 +++++++------- synapse/handlers/search.py | 2 +- synapse/handlers/user_directory.py | 2 +- synapse/http/matrixfederationclient.py | 10 +++++----- synapse/replication/tcp/resource.py | 2 +- synapse/rest/client/account.py | 10 +++++----- synapse/rest/client/capabilities.py | 4 ++-- synapse/rest/client/filter.py | 2 +- synapse/rest/client/profile.py | 6 +++--- synapse/rest/client/register.py | 6 +++--- synapse/rest/client/room.py | 2 +- synapse/rest/client/shared_rooms.py | 2 +- synapse/rest/client/sync.py | 2 +- .../resource_limits_server_notices.py | 8 ++++---- synapse/storage/databases/main/censor_events.py | 8 +++++--- synapse/storage/databases/main/client_ips.py | 2 +- synapse/storage/databases/main/events.py | 2 +- .../storage/databases/main/monthly_active_users.py | 12 ++++++------ synapse/storage/databases/main/registration.py | 2 +- synapse/storage/databases/main/room.py | 8 ++++---- synapse/storage/databases/main/search.py | 4 ++-- synapse/storage/prepare_database.py | 2 +- tests/api/test_auth.py | 14 +++++++------- tests/federation/test_federation_server.py | 2 +- tests/handlers/test_register.py | 14 +++++++------- tests/http/test_fedclient.py | 2 +- tests/rest/admin/test_user.py | 6 +++--- tests/rest/client/test_account.py | 2 +- tests/rest/client/test_capabilities.py | 2 +- tests/rest/client/test_presence.py | 2 +- tests/rest/client/test_register.py | 4 ++-- .../test_resource_limits_server_notices.py | 2 +- tests/storage/test_monthly_active_users.py | 14 +++++++------- tests/test_mau.py | 2 +- tests/unittest.py | 2 +- 54 files changed, 141 insertions(+), 132 deletions(-) create mode 100644 changelog.d/10916.misc diff --git a/changelog.d/10916.misc b/changelog.d/10916.misc new file mode 100644 index 000000000000..586a0b3a9670 --- /dev/null +++ b/changelog.d/10916.misc @@ -0,0 +1 @@ +Use direct references to config flags. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 548f6dcde977..749bc1deb913 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -86,11 +86,11 @@ def start_worker_reactor(appname, config, run_command=reactor.run): start_reactor( appname, - soft_file_limit=config.soft_file_limit, - gc_thresholds=config.gc_thresholds, + soft_file_limit=config.server.soft_file_limit, + gc_thresholds=config.server.gc_thresholds, pid_file=config.worker.worker_pid_file, daemonize=config.worker.worker_daemonize, - print_pidfile=config.print_pidfile, + print_pidfile=config.server.print_pidfile, logger=logger, run_command=run_command, ) @@ -298,7 +298,7 @@ def refresh_certificate(hs): Refresh the TLS certificates that Synapse is using by re-reading them from disk and updating the TLS context factories to use them. """ - if not hs.config.has_tls_listener(): + if not hs.config.server.has_tls_listener(): return hs.config.read_certificate_from_disk() diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index f2c5b75247b6..556bcc124e38 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -195,14 +195,14 @@ def start(config_options): config.logging.no_redirect_stdio = True # Explicitly disable background processes - config.update_user_directory = False + config.server.update_user_directory = False config.worker.run_background_tasks = False config.start_pushers = False config.pusher_shard_config.instances = [] config.send_federation = False config.federation_shard_config.instances = [] - synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts + synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts ss = AdminCmdServer( config.server.server_name, diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 3036e1b4a03c..7489f31d9add 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -462,7 +462,7 @@ def start(config_options): # For other worker types we force this to off. config.server.update_user_directory = False - synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts + synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage if config.server.gc_seconds: diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 205831dcda06..2b2d4bbf83fe 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -248,7 +248,7 @@ def _configure_named_resource(self, name, compress=False): resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self) if name == "webclient": - webclient_loc = self.config.web_client_location + webclient_loc = self.config.server.web_client_location if webclient_loc is None: logger.warning( @@ -343,7 +343,7 @@ def setup(config_options): # generating config files and shouldn't try to continue. sys.exit(0) - events.USE_FROZEN_DICTS = config.use_frozen_dicts + events.USE_FROZEN_DICTS = config.server.use_frozen_dicts synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage if config.server.gc_seconds: @@ -439,11 +439,11 @@ def profiled(*args, **kargs): _base.start_reactor( "synapse-homeserver", - soft_file_limit=hs.config.soft_file_limit, - gc_thresholds=hs.config.gc_thresholds, - pid_file=hs.config.pid_file, - daemonize=hs.config.daemonize, - print_pidfile=hs.config.print_pidfile, + soft_file_limit=hs.config.server.soft_file_limit, + gc_thresholds=hs.config.server.gc_thresholds, + pid_file=hs.config.server.pid_file, + daemonize=hs.config.server.daemonize, + print_pidfile=hs.config.server.print_pidfile, logger=logger, ) diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py index 49e7a45e5ce2..fcd01e833c84 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py @@ -74,7 +74,7 @@ async def phone_stats_home(hs, stats, stats_process=_stats_process): store = hs.get_datastore() stats["homeserver"] = hs.config.server.server_name - stats["server_context"] = hs.config.server_context + stats["server_context"] = hs.config.server.server_context stats["timestamp"] = now stats["uptime_seconds"] = uptime version = sys.version_info @@ -171,7 +171,7 @@ async def generate_monthly_active_users(): current_mau_count_by_service = {} reserved_users = () store = hs.get_datastore() - if hs.config.limit_usage_by_mau or hs.config.mau_stats_only: + if hs.config.server.limit_usage_by_mau or hs.config.server.mau_stats_only: current_mau_count = await store.get_monthly_active_count() current_mau_count_by_service = ( await store.get_monthly_active_count_by_service() @@ -183,9 +183,9 @@ async def generate_monthly_active_users(): current_mau_by_service_gauge.labels(app_service).set(float(count)) registered_reserved_users_mau_gauge.set(float(len(reserved_users))) - max_mau_gauge.set(float(hs.config.max_mau_value)) + max_mau_gauge.set(float(hs.config.server.max_mau_value)) - if hs.config.limit_usage_by_mau or hs.config.mau_stats_only: + if hs.config.server.limit_usage_by_mau or hs.config.server.mau_stats_only: generate_monthly_active_users() clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000) # End of monthly active user settings diff --git a/synapse/config/_base.py b/synapse/config/_base.py index d974a1a2a814..26152b092472 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -327,7 +327,7 @@ def __getattr__(self, item: str) -> Any: """ Redirect lookups on this object either to config objects, or values on config objects, so that `config.tls.blah` works, as well as legacy uses - of things like `config.server_name`. It will first look up the config + of things like `config.server.server_name`. It will first look up the config section name, and then values on those config classes. """ if item in self._configs.keys(): diff --git a/synapse/config/server.py b/synapse/config/server.py index 041412d7ad89..818b80635786 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -1,6 +1,4 @@ -# Copyright 2014-2016 OpenMarket Ltd -# Copyright 2017-2018 New Vector Ltd -# Copyright 2019 The Matrix.org Foundation C.I.C. +# Copyright 2014-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/synapse/events/presence_router.py b/synapse/events/presence_router.py index eb4556cdc10a..68b8b19024b4 100644 --- a/synapse/events/presence_router.py +++ b/synapse/events/presence_router.py @@ -45,11 +45,11 @@ def load_legacy_presence_router(hs: "HomeServer"): configuration, and registers the hooks they implement. """ - if hs.config.presence_router_module_class is None: + if hs.config.server.presence_router_module_class is None: return - module = hs.config.presence_router_module_class - config = hs.config.presence_router_config + module = hs.config.server.presence_router_module_class + config = hs.config.server.presence_router_config api = hs.get_module_api() presence_router = module(config=config, module_api=api) diff --git a/synapse/events/utils.py b/synapse/events/utils.py index f86113a448c3..a13fb0148fc8 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -372,7 +372,7 @@ class EventClientSerializer: def __init__(self, hs): self.store = hs.get_datastore() self.experimental_msc1849_support_enabled = ( - hs.config.experimental_msc1849_support_enabled + hs.config.server.experimental_msc1849_support_enabled ) async def serialize_event( diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py index 95176ba6f9e8..c32539bf5a52 100644 --- a/synapse/federation/transport/server/__init__.py +++ b/synapse/federation/transport/server/__init__.py @@ -117,7 +117,7 @@ def __init__( ): super().__init__(hs, authenticator, ratelimiter, server_name) self.handler = hs.get_room_list_handler() - self.allow_access = hs.config.allow_public_rooms_over_federation + self.allow_access = hs.config.server.allow_public_rooms_over_federation async def on_GET( self, origin: str, content: Literal[None], query: Dict[bytes, List[bytes]] diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 5cfba3c8176f..9078781d5a3d 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -49,7 +49,7 @@ def __init__(self, hs: "HomeServer"): self.store = hs.get_datastore() self.config = hs.config self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search - self.require_membership = hs.config.require_membership_for_aliases + self.require_membership = hs.config.server.require_membership_for_aliases self.third_party_event_rules = hs.get_third_party_event_rules() self.federation = hs.get_federation_client() diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 16c435ee866a..3b0b895b0777 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -762,7 +762,7 @@ async def on_invite_request( if is_blocked: raise SynapseError(403, "This room has been blocked on this server") - if self.hs.config.block_non_admin_invites: + if self.hs.config.server.block_non_admin_invites: raise SynapseError(403, "This server does not accept room invites") if not await self.spam_checker.user_may_invite( diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index fe8a9958924b..a0640fcac0c6 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -57,7 +57,7 @@ def __init__(self, hs: "HomeServer"): self.http_client = SimpleHttpClient(hs) # An HTTP client for contacting identity servers specified by clients. self.blacklisting_http_client = SimpleHttpClient( - hs, ip_blacklist=hs.config.federation_ip_range_blacklist + hs, ip_blacklist=hs.config.server.federation_ip_range_blacklist ) self.federation_http_client = hs.get_federation_http_client() self.hs = hs diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 39c18ecf9988..3b8cc50ec020 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -81,7 +81,7 @@ def __init__(self, hs: "HomeServer"): self.storage = hs.get_storage() self.state_store = self.storage.state self._event_serializer = hs.get_event_client_serializer() - self._ephemeral_events_enabled = hs.config.enable_ephemeral_messages + self._ephemeral_events_enabled = hs.config.server.enable_ephemeral_messages # The scheduled call to self._expire_event. None if no call is currently # scheduled. @@ -415,7 +415,9 @@ def __init__(self, hs: "HomeServer"): self.server_name = hs.hostname self.notifier = hs.get_notifier() self.config = hs.config - self.require_membership_for_aliases = hs.config.require_membership_for_aliases + self.require_membership_for_aliases = ( + hs.config.server.require_membership_for_aliases + ) self._events_shard_config = self.config.worker.events_shard_config self._instance_name = hs.get_instance_name() @@ -425,7 +427,7 @@ def __init__(self, hs: "HomeServer"): Membership.JOIN, Membership.KNOCK, } - if self.hs.config.include_profile_data_on_invite: + if self.hs.config.server.include_profile_data_on_invite: self.membership_types_to_include_profile_data_in.add(Membership.INVITE) self.send_event = ReplicationSendEventRestServlet.make_client(hs) @@ -461,11 +463,11 @@ def __init__(self, hs: "HomeServer"): # self._rooms_to_exclude_from_dummy_event_insertion: Dict[str, int] = {} # The number of forward extremeities before a dummy event is sent. - self._dummy_events_threshold = hs.config.dummy_events_threshold + self._dummy_events_threshold = hs.config.server.dummy_events_threshold if ( self.config.worker.run_background_tasks - and self.config.cleanup_extremities_with_dummy_events + and self.config.server.cleanup_extremities_with_dummy_events ): self.clock.looping_call( lambda: run_as_background_process( @@ -477,7 +479,7 @@ def __init__(self, hs: "HomeServer"): self._message_handler = hs.get_message_handler() - self._ephemeral_events_enabled = hs.config.enable_ephemeral_messages + self._ephemeral_events_enabled = hs.config.server.enable_ephemeral_messages self._external_cache = hs.get_external_cache() diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index a5301ece6f3a..176e4dfdd432 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -85,12 +85,18 @@ def __init__(self, hs: "HomeServer"): self._purges_by_id: Dict[str, PurgeStatus] = {} self._event_serializer = hs.get_event_client_serializer() - self._retention_default_max_lifetime = hs.config.retention_default_max_lifetime + self._retention_default_max_lifetime = ( + hs.config.server.retention_default_max_lifetime + ) - self._retention_allowed_lifetime_min = hs.config.retention_allowed_lifetime_min - self._retention_allowed_lifetime_max = hs.config.retention_allowed_lifetime_max + self._retention_allowed_lifetime_min = ( + hs.config.server.retention_allowed_lifetime_min + ) + self._retention_allowed_lifetime_max = ( + hs.config.server.retention_allowed_lifetime_max + ) - if hs.config.worker.run_background_tasks and hs.config.retention_enabled: + if hs.config.worker.run_background_tasks and hs.config.server.retention_enabled: # Run the purge jobs described in the configuration file. for job in hs.config.server.retention_purge_jobs: logger.info("Setting up purge job with config: %s", job) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index b23a1541bc33..425c0d4973e6 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -397,7 +397,7 @@ async def check_profile_query_allowed( # when building a membership event. In this case, we must allow the # lookup. if ( - not self.hs.config.limit_profile_requests_to_users_who_share_rooms + not self.hs.config.server.limit_profile_requests_to_users_who_share_rooms or not requester ): return diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 4f99f137a212..4a7ccb882e43 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -854,7 +854,7 @@ async def post_registration_actions( # Necessary due to auth checks prior to the threepid being # written to the db if is_threepid_reserved( - self.hs.config.mau_limits_reserved_threepids, threepid + self.hs.config.server.mau_limits_reserved_threepids, threepid ): await self.store.upsert_monthly_active_user(user_id) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index dc4fab22238d..bf8a85f563d8 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -666,7 +666,7 @@ async def create_room( await self.ratelimit(requester) room_version_id = config.get( - "room_version", self.config.default_room_version.identifier + "room_version", self.config.server.default_room_version.identifier ) if not isinstance(room_version_id, str): diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 1a56c82fbd9e..02103f6c9aa8 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -90,7 +90,7 @@ def __init__(self, hs: "HomeServer"): self.third_party_event_rules = hs.get_third_party_event_rules() self._server_notices_mxid = self.config.servernotices.server_notices_mxid self._enable_lookup = hs.config.enable_3pid_lookup - self.allow_per_room_profiles = self.config.allow_per_room_profiles + self.allow_per_room_profiles = self.config.server.allow_per_room_profiles self._join_rate_limiter_local = Ratelimiter( store=self.store, @@ -617,7 +617,7 @@ async def update_membership_locked( is_requester_admin = await self.auth.is_server_admin(requester.user) if not is_requester_admin: - if self.config.block_non_admin_invites: + if self.config.server.block_non_admin_invites: logger.info( "Blocking invite: user is not admin and non-admin " "invites disabled" @@ -1222,7 +1222,7 @@ async def do_3pid_invite( Raises: ShadowBanError if the requester has been shadow-banned. """ - if self.config.block_non_admin_invites: + if self.config.server.block_non_admin_invites: is_requester_admin = await self.auth.is_server_admin(requester.user) if not is_requester_admin: raise SynapseError( @@ -1420,7 +1420,7 @@ async def _is_remote_room_too_complex( Returns: bool of whether the complexity is too great, or None if unable to be fetched """ - max_complexity = self.hs.config.limit_remote_rooms.complexity + max_complexity = self.hs.config.server.limit_remote_rooms.complexity complexity = await self.federation_handler.get_room_complexity( remote_room_hosts, room_id ) @@ -1436,7 +1436,7 @@ async def _is_local_room_too_complex(self, room_id: str) -> bool: Args: room_id: The room ID to check for complexity. """ - max_complexity = self.hs.config.limit_remote_rooms.complexity + max_complexity = self.hs.config.server.limit_remote_rooms.complexity complexity = await self.store.get_room_complexity(room_id) return complexity["v1"] > max_complexity @@ -1472,7 +1472,7 @@ async def _remote_join( if too_complex is True: raise SynapseError( code=400, - msg=self.hs.config.limit_remote_rooms.complexity_error, + msg=self.hs.config.server.limit_remote_rooms.complexity_error, errcode=Codes.RESOURCE_LIMIT_EXCEEDED, ) @@ -1507,7 +1507,7 @@ async def _remote_join( ) raise SynapseError( code=400, - msg=self.hs.config.limit_remote_rooms.complexity_error, + msg=self.hs.config.server.limit_remote_rooms.complexity_error, errcode=Codes.RESOURCE_LIMIT_EXCEEDED, ) diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 8226d6f5a1a8..6d3333ee00f3 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -105,7 +105,7 @@ async def search( dict to be returned to the client with results of search """ - if not self.hs.config.enable_search: + if not self.hs.config.server.enable_search: raise SynapseError(400, "Search is disabled on this homeserver") batch_group = None diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index b91e7cb501c5..f4430ce3c9aa 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -60,7 +60,7 @@ def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() self.notifier = hs.get_notifier() self.is_mine_id = hs.is_mine_id - self.update_user_directory = hs.config.update_user_directory + self.update_user_directory = hs.config.server.update_user_directory self.search_all_users = hs.config.userdirectory.user_directory_search_all_users self.spam_checker = hs.get_spam_checker() # The current position in the current_state_delta stream diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index cdc36b8d2570..4f592246860b 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -327,23 +327,23 @@ def __init__(self, hs, tls_client_options_factory): self.reactor = hs.get_reactor() user_agent = hs.version_string - if hs.config.user_agent_suffix: - user_agent = "%s %s" % (user_agent, hs.config.user_agent_suffix) + if hs.config.server.user_agent_suffix: + user_agent = "%s %s" % (user_agent, hs.config.server.user_agent_suffix) user_agent = user_agent.encode("ascii") federation_agent = MatrixFederationAgent( self.reactor, tls_client_options_factory, user_agent, - hs.config.federation_ip_range_whitelist, - hs.config.federation_ip_range_blacklist, + hs.config.server.federation_ip_range_whitelist, + hs.config.server.federation_ip_range_blacklist, ) # Use a BlacklistingAgentWrapper to prevent circumventing the IP # blacklist via IP literals in server names self.agent = BlacklistingAgentWrapper( federation_agent, - ip_blacklist=hs.config.federation_ip_range_blacklist, + ip_blacklist=hs.config.server.federation_ip_range_blacklist, ) self.clock = hs.get_clock() diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 030852cb5bc9..80f9b23bfd74 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -71,7 +71,7 @@ def __init__(self, hs): self.notifier = hs.get_notifier() self._instance_name = hs.get_instance_name() - self._replication_torture_level = hs.config.replication_torture_level + self._replication_torture_level = hs.config.server.replication_torture_level self.notifier.add_replication_callback(self.on_notifier_poke) diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index bacb82833022..fff133ef1048 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -119,7 +119,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: ) if existing_user_id is None: - if self.config.request_token_inhibit_3pid_errors: + if self.config.server.request_token_inhibit_3pid_errors: # Make the client think the operation succeeded. See the rationale in the # comments for request_token_inhibit_3pid_errors. # Also wait for some random amount of time between 100ms and 1s to make it @@ -403,7 +403,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: existing_user_id = await self.store.get_user_id_by_threepid("email", email) if existing_user_id is not None: - if self.config.request_token_inhibit_3pid_errors: + if self.config.server.request_token_inhibit_3pid_errors: # Make the client think the operation succeeded. See the rationale in the # comments for request_token_inhibit_3pid_errors. # Also wait for some random amount of time between 100ms and 1s to make it @@ -486,7 +486,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: existing_user_id = await self.store.get_user_id_by_threepid("msisdn", msisdn) if existing_user_id is not None: - if self.hs.config.request_token_inhibit_3pid_errors: + if self.hs.config.server.request_token_inhibit_3pid_errors: # Make the client think the operation succeeded. See the rationale in the # comments for request_token_inhibit_3pid_errors. # Also wait for some random amount of time between 100ms and 1s to make it @@ -857,8 +857,8 @@ def assert_valid_next_link(hs: "HomeServer", next_link: str) -> None: # If the domain whitelist is set, the domain must be in it if ( valid - and hs.config.next_link_domain_whitelist is not None - and next_link_parsed.hostname not in hs.config.next_link_domain_whitelist + and hs.config.server.next_link_domain_whitelist is not None + and next_link_parsed.hostname not in hs.config.server.next_link_domain_whitelist ): valid = False diff --git a/synapse/rest/client/capabilities.py b/synapse/rest/client/capabilities.py index 65b3b5ce2cc5..d6b62564132a 100644 --- a/synapse/rest/client/capabilities.py +++ b/synapse/rest/client/capabilities.py @@ -44,10 +44,10 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await self.auth.get_user_by_req(request, allow_guest=True) change_password = self.auth_handler.can_change_password() - response = { + response: JsonDict = { "capabilities": { "m.room_versions": { - "default": self.config.default_room_version.identifier, + "default": self.config.server.default_room_version.identifier, "available": { v.identifier: v.disposition for v in KNOWN_ROOM_VERSIONS.values() diff --git a/synapse/rest/client/filter.py b/synapse/rest/client/filter.py index 6ed60c74181f..cc1c2f973140 100644 --- a/synapse/rest/client/filter.py +++ b/synapse/rest/client/filter.py @@ -90,7 +90,7 @@ async def on_POST( raise AuthError(403, "Can only create filters for local users") content = parse_json_object_from_request(request) - set_timeline_upper_limit(content, self.hs.config.filter_timeline_limit) + set_timeline_upper_limit(content, self.hs.config.server.filter_timeline_limit) filter_id = await self.filtering.add_user_filter( user_localpart=target_user.localpart, user_filter=content diff --git a/synapse/rest/client/profile.py b/synapse/rest/client/profile.py index d0f20de569c0..c684636c0a01 100644 --- a/synapse/rest/client/profile.py +++ b/synapse/rest/client/profile.py @@ -41,7 +41,7 @@ async def on_GET( ) -> Tuple[int, JsonDict]: requester_user = None - if self.hs.config.require_auth_for_profile_requests: + if self.hs.config.server.require_auth_for_profile_requests: requester = await self.auth.get_user_by_req(request) requester_user = requester.user @@ -94,7 +94,7 @@ async def on_GET( ) -> Tuple[int, JsonDict]: requester_user = None - if self.hs.config.require_auth_for_profile_requests: + if self.hs.config.server.require_auth_for_profile_requests: requester = await self.auth.get_user_by_req(request) requester_user = requester.user @@ -146,7 +146,7 @@ async def on_GET( ) -> Tuple[int, JsonDict]: requester_user = None - if self.hs.config.require_auth_for_profile_requests: + if self.hs.config.server.require_auth_for_profile_requests: requester = await self.auth.get_user_by_req(request) requester_user = requester.user diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index 48b0062cf434..a6eb6f641067 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -129,7 +129,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: ) if existing_user_id is not None: - if self.hs.config.request_token_inhibit_3pid_errors: + if self.hs.config.server.request_token_inhibit_3pid_errors: # Make the client think the operation succeeded. See the rationale in the # comments for request_token_inhibit_3pid_errors. # Also wait for some random amount of time between 100ms and 1s to make it @@ -209,7 +209,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: ) if existing_user_id is not None: - if self.hs.config.request_token_inhibit_3pid_errors: + if self.hs.config.server.request_token_inhibit_3pid_errors: # Make the client think the operation succeeded. See the rationale in the # comments for request_token_inhibit_3pid_errors. # Also wait for some random amount of time between 100ms and 1s to make it @@ -682,7 +682,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: # written to the db if threepid: if is_threepid_reserved( - self.hs.config.mau_limits_reserved_threepids, threepid + self.hs.config.server.mau_limits_reserved_threepids, threepid ): await self.store.upsert_monthly_active_user(registered_user_id) diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index bf46dc60f262..ed95189b6d8b 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -369,7 +369,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: # Option to allow servers to require auth when accessing # /publicRooms via CS API. This is especially helpful in private # federations. - if not self.hs.config.allow_public_rooms_without_auth: + if not self.hs.config.server.allow_public_rooms_without_auth: raise # We allow people to not be authed if they're just looking at our diff --git a/synapse/rest/client/shared_rooms.py b/synapse/rest/client/shared_rooms.py index 1d90493eb082..09a46737de06 100644 --- a/synapse/rest/client/shared_rooms.py +++ b/synapse/rest/client/shared_rooms.py @@ -42,7 +42,7 @@ def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() - self.user_directory_active = hs.config.update_user_directory + self.user_directory_active = hs.config.server.update_user_directory async def on_GET( self, request: SynapseRequest, user_id: str diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 1259058b9b13..913216a7c49f 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -155,7 +155,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: try: filter_object = json_decoder.decode(filter_id) set_timeline_upper_limit( - filter_object, self.hs.config.filter_timeline_limit + filter_object, self.hs.config.server.filter_timeline_limit ) except Exception: raise SynapseError(400, "Invalid filter JSON") diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 073b0d754fd6..8522930b5048 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -47,9 +47,9 @@ def __init__(self, hs: "HomeServer"): self._notifier = hs.get_notifier() self._enabled = ( - hs.config.limit_usage_by_mau + hs.config.server.limit_usage_by_mau and self._server_notices_manager.is_enabled() - and not hs.config.hs_disabled + and not hs.config.server.hs_disabled ) async def maybe_send_server_notice_to_user(self, user_id: str) -> None: @@ -98,7 +98,7 @@ async def maybe_send_server_notice_to_user(self, user_id: str) -> None: try: if ( limit_type == LimitBlockingTypes.MONTHLY_ACTIVE_USER - and not self._config.mau_limit_alerting + and not self._config.server.mau_limit_alerting ): # We have hit the MAU limit, but MAU alerting is disabled: # reset room if necessary and return @@ -149,7 +149,7 @@ async def _apply_limit_block_notification( "body": event_body, "msgtype": ServerNoticeMsgType, "server_notice_type": ServerNoticeLimitReached, - "admin_contact": self._config.admin_contact, + "admin_contact": self._config.server.admin_contact, "limit_type": event_limit_type, } event = await self._server_notices_manager.send_notice( diff --git a/synapse/storage/databases/main/censor_events.py b/synapse/storage/databases/main/censor_events.py index 6305414e3d55..eee07227ef39 100644 --- a/synapse/storage/databases/main/censor_events.py +++ b/synapse/storage/databases/main/censor_events.py @@ -36,7 +36,7 @@ def __init__(self, database: DatabasePool, db_conn, hs: "HomeServer"): if ( hs.config.worker.run_background_tasks - and self.hs.config.redaction_retention_period is not None + and self.hs.config.server.redaction_retention_period is not None ): hs.get_clock().looping_call(self._censor_redactions, 5 * 60 * 1000) @@ -48,7 +48,7 @@ async def _censor_redactions(self): By censor we mean update the event_json table with the redacted event. """ - if self.hs.config.redaction_retention_period is None: + if self.hs.config.server.redaction_retention_period is None: return if not ( @@ -60,7 +60,9 @@ async def _censor_redactions(self): # created. return - before_ts = self._clock.time_msec() - self.hs.config.redaction_retention_period + before_ts = ( + self._clock.time_msec() - self.hs.config.server.redaction_retention_period + ) # We fetch all redactions that: # 1. point to an event we have, diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index 7e33ae578c7b..0e1d97aaebe0 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -353,7 +353,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore): def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) - self.user_ips_max_age = hs.config.user_ips_max_age + self.user_ips_max_age = hs.config.server.user_ips_max_age if hs.config.worker.run_background_tasks and self.user_ips_max_age: self._clock.looping_call(self._prune_old_user_ips, 5 * 1000) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index cc4e31ec3011..bc7d213fe2a2 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -104,7 +104,7 @@ def __init__( self._clock = hs.get_clock() self._instance_name = hs.get_instance_name() - self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages + self._ephemeral_messages_enabled = hs.config.server.enable_ephemeral_messages self.is_mine_id = hs.is_mine_id # Ideally we'd move these ID gens here, unfortunately some other ID diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index b76ee51a9b46..a14ac03d4b6e 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -32,8 +32,8 @@ def __init__(self, database: DatabasePool, db_conn, hs): self._clock = hs.get_clock() self.hs = hs - self._limit_usage_by_mau = hs.config.limit_usage_by_mau - self._max_mau_value = hs.config.max_mau_value + self._limit_usage_by_mau = hs.config.server.limit_usage_by_mau + self._max_mau_value = hs.config.server.max_mau_value @cached(num_args=0) async def get_monthly_active_count(self) -> int: @@ -96,8 +96,8 @@ async def get_registered_reserved_users(self) -> List[str]: """ users = [] - for tp in self.hs.config.mau_limits_reserved_threepids[ - : self.hs.config.max_mau_value + for tp in self.hs.config.server.mau_limits_reserved_threepids[ + : self.hs.config.server.max_mau_value ]: user_id = await self.hs.get_datastore().get_user_id_by_threepid( tp["medium"], tp["address"] @@ -212,7 +212,7 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) - self._mau_stats_only = hs.config.mau_stats_only + self._mau_stats_only = hs.config.server.mau_stats_only # Do not add more reserved users than the total allowable number self.db_pool.new_transaction( @@ -221,7 +221,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): [], [], self._initialise_reserved_users, - hs.config.mau_limits_reserved_threepids[: self._max_mau_value], + hs.config.server.mau_limits_reserved_threepids[: self._max_mau_value], ) def _initialise_reserved_users(self, txn, threepids): diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index c83089ee6395..7279b0924e89 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -207,7 +207,7 @@ async def is_trial_user(self, user_id: str) -> bool: return False now = self._clock.time_msec() - trial_duration_ms = self.config.mau_trial_days * 24 * 60 * 60 * 1000 + trial_duration_ms = self.config.server.mau_trial_days * 24 * 60 * 60 * 1000 is_trial = (now - info["creation_ts"] * 1000) < trial_duration_ms return is_trial diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 118b390e9346..d69eaf80cefe 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -679,8 +679,8 @@ def get_retention_policy_for_room_txn(txn): # policy. if not ret: return { - "min_lifetime": self.config.retention_default_min_lifetime, - "max_lifetime": self.config.retention_default_max_lifetime, + "min_lifetime": self.config.server.retention_default_min_lifetime, + "max_lifetime": self.config.server.retention_default_max_lifetime, } row = ret[0] @@ -690,10 +690,10 @@ def get_retention_policy_for_room_txn(txn): # The default values will be None if no default policy has been defined, or if one # of the attributes is missing from the default policy. if row["min_lifetime"] is None: - row["min_lifetime"] = self.config.retention_default_min_lifetime + row["min_lifetime"] = self.config.server.retention_default_min_lifetime if row["max_lifetime"] is None: - row["max_lifetime"] = self.config.retention_default_max_lifetime + row["max_lifetime"] = self.config.server.retention_default_max_lifetime return row diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index 2a1e99e17a90..c85383c97542 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -51,7 +51,7 @@ def store_search_entries_txn( txn: entries: entries to be added to the table """ - if not self.hs.config.enable_search: + if not self.hs.config.server.enable_search: return if isinstance(self.database_engine, PostgresEngine): sql = ( @@ -105,7 +105,7 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) - if not hs.config.enable_search: + if not hs.config.server.enable_search: return self.db_pool.updates.register_background_update_handler( diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index f31880b8ec93..a63eaddfdc1c 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -366,7 +366,7 @@ def _upgrade_existing_database( + "new for the server to understand" ) - # some of the deltas assume that config.server_name is set correctly, so now + # some of the deltas assume that server_name is set correctly, so now # is a good time to run the sanity check. if not is_empty and "main" in databases: from synapse.storage.databases.main import check_database_before_upgrade diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index cccff7af265a..3aa9ba3c43ac 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -217,7 +217,7 @@ def test_get_user_from_macaroon(self): user_id = "@baldrick:matrix.org" macaroon = pymacaroons.Macaroon( - location=self.hs.config.server_name, + location=self.hs.config.server.server_name, identifier="key", key=self.hs.config.key.macaroon_secret_key, ) @@ -239,7 +239,7 @@ def test_get_guest_user_from_macaroon(self): user_id = "@baldrick:matrix.org" macaroon = pymacaroons.Macaroon( - location=self.hs.config.server_name, + location=self.hs.config.server.server_name, identifier="key", key=self.hs.config.key.macaroon_secret_key, ) @@ -268,7 +268,7 @@ def test_blocking_mau(self): self.store.get_monthly_active_count = simple_async_mock(lots_of_users) e = self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError) - self.assertEquals(e.value.admin_contact, self.hs.config.admin_contact) + self.assertEquals(e.value.admin_contact, self.hs.config.server.admin_contact) self.assertEquals(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.assertEquals(e.value.code, 403) @@ -303,7 +303,7 @@ def test_blocking_mau__appservice_requester_allowed_when_not_tracking_ips(self): appservice = ApplicationService( "abcd", - self.hs.config.server_name, + self.hs.config.server.server_name, id="1234", namespaces={ "users": [{"regex": "@_appservice.*:sender", "exclusive": True}] @@ -332,7 +332,7 @@ def test_blocking_mau__appservice_requester_disallowed_when_tracking_ips(self): appservice = ApplicationService( "abcd", - self.hs.config.server_name, + self.hs.config.server.server_name, id="1234", namespaces={ "users": [{"regex": "@_appservice.*:sender", "exclusive": True}] @@ -372,7 +372,7 @@ def test_hs_disabled(self): self.auth_blocking._hs_disabled = True self.auth_blocking._hs_disabled_message = "Reason for being disabled" e = self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError) - self.assertEquals(e.value.admin_contact, self.hs.config.admin_contact) + self.assertEquals(e.value.admin_contact, self.hs.config.server.admin_contact) self.assertEquals(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.assertEquals(e.value.code, 403) @@ -387,7 +387,7 @@ def test_hs_disabled_no_server_notices_user(self): self.auth_blocking._hs_disabled = True self.auth_blocking._hs_disabled_message = "Reason for being disabled" e = self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError) - self.assertEquals(e.value.admin_contact, self.hs.config.admin_contact) + self.assertEquals(e.value.admin_contact, self.hs.config.server.admin_contact) self.assertEquals(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.assertEquals(e.value.code, 403) diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index 0b60cc426119..03e1e11f492f 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -120,7 +120,7 @@ def test_without_event_id(self): self.assertEqual( channel.json_body["room_version"], - self.hs.config.default_room_version.identifier, + self.hs.config.server.default_room_version.identifier, ) members = set( diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index d3efb67e3ea9..bd05a2c2d15d 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -175,20 +175,20 @@ def test_if_user_exists(self): self.assertTrue(result_token is not None) def test_mau_limits_when_disabled(self): - self.hs.config.limit_usage_by_mau = False + self.hs.config.server.limit_usage_by_mau = False # Ensure does not throw exception self.get_success(self.get_or_create_user(self.requester, "a", "display_name")) def test_get_or_create_user_mau_not_blocked(self): - self.hs.config.limit_usage_by_mau = True + self.hs.config.server.limit_usage_by_mau = True self.store.count_monthly_users = Mock( - return_value=make_awaitable(self.hs.config.max_mau_value - 1) + return_value=make_awaitable(self.hs.config.server.max_mau_value - 1) ) # Ensure does not throw exception self.get_success(self.get_or_create_user(self.requester, "c", "User")) def test_get_or_create_user_mau_blocked(self): - self.hs.config.limit_usage_by_mau = True + self.hs.config.server.limit_usage_by_mau = True self.store.get_monthly_active_count = Mock( return_value=make_awaitable(self.lots_of_users) ) @@ -198,7 +198,7 @@ def test_get_or_create_user_mau_blocked(self): ) self.store.get_monthly_active_count = Mock( - return_value=make_awaitable(self.hs.config.max_mau_value) + return_value=make_awaitable(self.hs.config.server.max_mau_value) ) self.get_failure( self.get_or_create_user(self.requester, "b", "display_name"), @@ -206,7 +206,7 @@ def test_get_or_create_user_mau_blocked(self): ) def test_register_mau_blocked(self): - self.hs.config.limit_usage_by_mau = True + self.hs.config.server.limit_usage_by_mau = True self.store.get_monthly_active_count = Mock( return_value=make_awaitable(self.lots_of_users) ) @@ -215,7 +215,7 @@ def test_register_mau_blocked(self): ) self.store.get_monthly_active_count = Mock( - return_value=make_awaitable(self.hs.config.max_mau_value) + return_value=make_awaitable(self.hs.config.server.max_mau_value) ) self.get_failure( self.handler.register_user(localpart="local_part"), ResourceLimitError diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py index d9a8b077d390..638babae6995 100644 --- a/tests/http/test_fedclient.py +++ b/tests/http/test_fedclient.py @@ -226,7 +226,7 @@ def test_client_ip_range_blacklist(self): """Ensure that Synapse does not try to connect to blacklisted IPs""" # Set up the ip_range blacklist - self.hs.config.federation_ip_range_blacklist = IPSet( + self.hs.config.server.federation_ip_range_blacklist = IPSet( ["127.0.0.0/8", "fe80::/64"] ) self.reactor.lookups["internal"] = "127.0.0.1" diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index ee3ae9cce46b..a285d5a7fea0 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -422,7 +422,7 @@ def test_register_mau_limit_reached(self): # Set monthly active users to the limit store.get_monthly_active_count = Mock( - return_value=make_awaitable(self.hs.config.max_mau_value) + return_value=make_awaitable(self.hs.config.server.max_mau_value) ) # Check that the blocking of monthly active users is working as expected # The registration of a new user fails due to the limit @@ -1485,7 +1485,7 @@ def test_create_user_mau_limit_reached_active_admin(self): # Set monthly active users to the limit self.store.get_monthly_active_count = Mock( - return_value=make_awaitable(self.hs.config.max_mau_value) + return_value=make_awaitable(self.hs.config.server.max_mau_value) ) # Check that the blocking of monthly active users is working as expected # The registration of a new user fails due to the limit @@ -1522,7 +1522,7 @@ def test_create_user_mau_limit_reached_passive_admin(self): # Set monthly active users to the limit self.store.get_monthly_active_count = Mock( - return_value=make_awaitable(self.hs.config.max_mau_value) + return_value=make_awaitable(self.hs.config.server.max_mau_value) ) # Check that the blocking of monthly active users is working as expected # The registration of a new user fails due to the limit diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index 64b0b8458b6e..2f44547bfb4c 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -516,7 +516,7 @@ def test_GET_whoami_appservices(self): appservice = ApplicationService( as_token, - self.hs.config.server_name, + self.hs.config.server.server_name, id="1234", namespaces={"users": [{"regex": user_id, "exclusive": True}]}, sender=user_id, diff --git a/tests/rest/client/test_capabilities.py b/tests/rest/client/test_capabilities.py index 422361b62a5a..b9e36025520e 100644 --- a/tests/rest/client/test_capabilities.py +++ b/tests/rest/client/test_capabilities.py @@ -55,7 +55,7 @@ def test_get_room_version_capabilities(self): self.assertTrue(room_version in KNOWN_ROOM_VERSIONS, "" + room_version) self.assertEqual( - self.config.default_room_version.identifier, + self.config.server.default_room_version.identifier, capabilities["m.room_versions"]["default"], ) diff --git a/tests/rest/client/test_presence.py b/tests/rest/client/test_presence.py index 1d152352d176..56fe1a3d0133 100644 --- a/tests/rest/client/test_presence.py +++ b/tests/rest/client/test_presence.py @@ -50,7 +50,7 @@ def test_put_presence(self): PUT to the status endpoint with use_presence enabled will call set_state on the presence handler. """ - self.hs.config.use_presence = True + self.hs.config.server.use_presence = True body = {"presence": "here", "status_msg": "beep boop"} channel = self.make_request( diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index 72a5a11b461f..af135d57e196 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -50,7 +50,7 @@ def test_POST_appservice_registration_valid(self): appservice = ApplicationService( as_token, - self.hs.config.server_name, + self.hs.config.server.server_name, id="1234", namespaces={"users": [{"regex": r"@as_user.*", "exclusive": True}]}, sender="@as:test", @@ -74,7 +74,7 @@ def test_POST_appservice_registration_no_type(self): appservice = ApplicationService( as_token, - self.hs.config.server_name, + self.hs.config.server.server_name, id="1234", namespaces={"users": [{"regex": r"@as_user.*", "exclusive": True}]}, sender="@as:test", diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index 7f25200a5d75..36c495954ff3 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -346,7 +346,7 @@ def _trigger_notice_and_join(self): invites = [] # Register as many users as the MAU limit allows. - for i in range(self.hs.config.max_mau_value): + for i in range(self.hs.config.server.max_mau_value): localpart = "user%d" % i user_id = self.register_user(localpart, "password") tok = self.login(localpart, "password") diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 944dbc34a26b..d6b4cdd788ff 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -51,7 +51,7 @@ def prepare(self, reactor, clock, homeserver): @override_config({"max_mau_value": 3, "mau_limit_reserved_threepids": gen_3pids(3)}) def test_initialise_reserved_users(self): - threepids = self.hs.config.mau_limits_reserved_threepids + threepids = self.hs.config.server.mau_limits_reserved_threepids # register three users, of which two have reserved 3pids, and a third # which is a support user. @@ -101,9 +101,9 @@ def test_initialise_reserved_users(self): # XXX some of this is redundant. poking things into the config shouldn't # work, and in any case it's not obvious what we expect to happen when # we advance the reactor. - self.hs.config.max_mau_value = 0 + self.hs.config.server.max_mau_value = 0 self.reactor.advance(FORTY_DAYS) - self.hs.config.max_mau_value = 5 + self.hs.config.server.max_mau_value = 5 self.get_success(self.store.reap_monthly_active_users()) @@ -183,7 +183,7 @@ def test_reap_monthly_active_users(self): self.get_success(d) count = self.get_success(self.store.get_monthly_active_count()) - self.assertEqual(count, self.hs.config.max_mau_value) + self.assertEqual(count, self.hs.config.server.max_mau_value) self.reactor.advance(FORTY_DAYS) @@ -199,7 +199,7 @@ def test_reap_monthly_active_users(self): def test_reap_monthly_active_users_reserved_users(self): """Tests that reaping correctly handles reaping where reserved users are present""" - threepids = self.hs.config.mau_limits_reserved_threepids + threepids = self.hs.config.server.mau_limits_reserved_threepids initial_users = len(threepids) reserved_user_number = initial_users - 1 for i in range(initial_users): @@ -234,7 +234,7 @@ def test_reap_monthly_active_users_reserved_users(self): self.get_success(d) count = self.get_success(self.store.get_monthly_active_count()) - self.assertEqual(count, self.hs.config.max_mau_value) + self.assertEqual(count, self.hs.config.server.max_mau_value) def test_populate_monthly_users_is_guest(self): # Test that guest users are not added to mau list @@ -294,7 +294,7 @@ def test_get_reserved_real_user_account(self): {"medium": "email", "address": user2_email}, ] - self.hs.config.mau_limits_reserved_threepids = threepids + self.hs.config.server.mau_limits_reserved_threepids = threepids d = self.store.db_pool.runInteraction( "initialise", self.store._initialise_reserved_users, threepids ) diff --git a/tests/test_mau.py b/tests/test_mau.py index 66111eb3674b..80ab40e255ea 100644 --- a/tests/test_mau.py +++ b/tests/test_mau.py @@ -165,7 +165,7 @@ def test_trial_delay(self): @override_config({"mau_trial_days": 1}) def test_trial_users_cant_come_back(self): - self.hs.config.mau_trial_days = 1 + self.hs.config.server.mau_trial_days = 1 # We should be able to register more than the limit initially token1 = self.create_user("kermit1") diff --git a/tests/unittest.py b/tests/unittest.py index 7a6f5954d06c..6d5d87cb78e2 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -232,7 +232,7 @@ def setUp(self): # Honour the `use_frozen_dicts` config option. We have to do this # manually because this is taken care of in the app `start` code, which # we don't run. Plus we want to reset it on tearDown. - events.USE_FROZEN_DICTS = self.hs.config.use_frozen_dicts + events.USE_FROZEN_DICTS = self.hs.config.server.use_frozen_dicts if self.hs is None: raise Exception("No homeserver returned from make_homeserver.") From e32b9f44ee466ad8dad47fdbea7e2711c11b9dc7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 29 Sep 2021 11:57:53 +0100 Subject: [PATCH 027/111] Update installation instructions (#10919) Various updates to the install docs. --- README.rst | 2 +- changelog.d/10919.doc | 1 + docs/setup/installation.md | 328 +++++++++++++++++++------------------ 3 files changed, 167 insertions(+), 164 deletions(-) create mode 100644 changelog.d/10919.doc diff --git a/README.rst b/README.rst index db977c025f7d..524a3a5142ee 100644 --- a/README.rst +++ b/README.rst @@ -288,7 +288,7 @@ Quick start Before setting up a development environment for synapse, make sure you have the system dependencies (such as the python header files) installed - see -`Installing from source `_. +`Platform-specific prerequisites `_. To check out a synapse for development, clone the git repo into a working directory of your choice:: diff --git a/changelog.d/10919.doc b/changelog.d/10919.doc new file mode 100644 index 000000000000..d0bddc3f1b58 --- /dev/null +++ b/changelog.d/10919.doc @@ -0,0 +1 @@ +Minor updates to the installation instructions. diff --git a/docs/setup/installation.md b/docs/setup/installation.md index 06f869cd75fa..874925e92754 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -18,19 +18,179 @@ that your email address is probably `user@example.com` rather than ## Installing Synapse -### Installing from source +### Prebuilt packages + +Prebuilt packages are available for a number of platforms. These are recommended +for most users. + +#### Docker images and Ansible playbooks + +There is an official synapse image available at + which can be used with +the docker-compose file available at +[contrib/docker](https://github.com/matrix-org/synapse/tree/develop/contrib/docker). +Further information on this including configuration options is available in the README +on hub.docker.com. + +Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a +Dockerfile to automate a synapse server in a single Docker image, at + + +Slavi Pantaleev has created an Ansible playbook, +which installs the offical Docker image of Matrix Synapse +along with many other Matrix-related services (Postgres database, Element, coturn, +ma1sd, SSL support, etc.). +For more details, see + + +#### Debian/Ubuntu + +##### Matrix.org packages + +Matrix.org provides Debian/Ubuntu packages of Synapse, for the amd64 +architecture via . + +To install the latest release: + +```sh +sudo apt install -y lsb-release wget apt-transport-https +sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg +echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main" | + sudo tee /etc/apt/sources.list.d/matrix-org.list +sudo apt update +sudo apt install matrix-synapse-py3 +``` + +Packages are also published for release candidates. To enable the prerelease +channel, add `prerelease` to the `sources.list` line. For example: + +```sh +sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg +echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main prerelease" | + sudo tee /etc/apt/sources.list.d/matrix-org.list +sudo apt update +sudo apt install matrix-synapse-py3 +``` + +The fingerprint of the repository signing key (as shown by `gpg +/usr/share/keyrings/matrix-org-archive-keyring.gpg`) is +`AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058`. + +##### Downstream Debian packages + +We do not recommend using the packages from the default Debian `buster` +repository at this time, as they are old and suffer from known security +vulnerabilities. You can install the latest version of Synapse from +[our repository](#matrixorg-packages) or from `buster-backports`. Please +see the [Debian documentation](https://backports.debian.org/Instructions/) +for information on how to use backports. + +If you are using Debian `sid` or testing, Synapse is available in the default +repositories and it should be possible to install it simply with: + +```sh +sudo apt install matrix-synapse +``` + +##### Downstream Ubuntu packages + +We do not recommend using the packages in the default Ubuntu repository +at this time, as they are old and suffer from known security vulnerabilities. +The latest version of Synapse can be installed from [our repository](#matrixorg-packages). + +#### Fedora + +Synapse is in the Fedora repositories as `matrix-synapse`: + +```sh +sudo dnf install matrix-synapse +``` + +Oleg Girko provides Fedora RPMs at + + +#### OpenSUSE + +Synapse is in the OpenSUSE repositories as `matrix-synapse`: + +```sh +sudo zypper install matrix-synapse +``` + +#### SUSE Linux Enterprise Server + +Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at + + +#### ArchLinux + +The quickest way to get up and running with ArchLinux is probably with the community package +, which should pull in most of +the necessary dependencies. + +pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ): + +```sh +sudo pip install --upgrade pip +``` + +If you encounter an error with lib bcrypt causing an Wrong ELF Class: +ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly +compile it under the right architecture. (This should not be needed if +installing under virtualenv): + +```sh +sudo pip uninstall py-bcrypt +sudo pip install py-bcrypt +``` + +#### Void Linux + +Synapse can be found in the void repositories as 'synapse': + +```sh +xbps-install -Su +xbps-install -S synapse +``` + +#### FreeBSD + +Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from: + +- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean` +- Packages: `pkg install py37-matrix-synapse` + +#### OpenBSD + +As of OpenBSD 6.7 Synapse is available as a pre-compiled binary. The filesystem +underlying the homeserver directory (defaults to `/var/synapse`) has to be +mounted with `wxallowed` (cf. `mount(8)`), so creating a separate filesystem +and mounting it to `/var/synapse` should be taken into consideration. + +Installing Synapse: + +```sh +doas pkg_add synapse +``` + +#### NixOS + +Robin Lambertz has packaged Synapse for NixOS at: + + + +### Installing as a Python module from PyPI -(Prebuilt packages are available for some platforms - see [Prebuilt packages](#prebuilt-packages).) +It's also possible to install Synapse as a Python module from PyPI. -When installing from source please make sure that the [Platform-specific prerequisites](#platform-specific-prerequisites) are already installed. +When following this route please make sure that the [Platform-specific prerequisites](#platform-specific-prerequisites) are already installed. System requirements: - POSIX-compliant system (tested on Linux & OS X) -- Python 3.5.2 or later, up to Python 3.9. +- Python 3.6 or later, up to Python 3.9. - At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org - To install the Synapse homeserver run: ```sh @@ -203,164 +363,6 @@ be found at for Windows 10 and for Windows Server. -### Prebuilt packages - -As an alternative to installing from source, prebuilt packages are available -for a number of platforms. - -#### Docker images and Ansible playbooks - -There is an official synapse image available at - which can be used with -the docker-compose file available at -[contrib/docker](https://github.com/matrix-org/synapse/tree/develop/contrib/docker). -Further information on this including configuration options is available in the README -on hub.docker.com. - -Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a -Dockerfile to automate a synapse server in a single Docker image, at - - -Slavi Pantaleev has created an Ansible playbook, -which installs the offical Docker image of Matrix Synapse -along with many other Matrix-related services (Postgres database, Element, coturn, -ma1sd, SSL support, etc.). -For more details, see - - -#### Debian/Ubuntu - -##### Matrix.org packages - -Matrix.org provides Debian/Ubuntu packages of Synapse via -. To install the latest release: - -```sh -sudo apt install -y lsb-release wget apt-transport-https -sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg -echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main" | - sudo tee /etc/apt/sources.list.d/matrix-org.list -sudo apt update -sudo apt install matrix-synapse-py3 -``` - -Packages are also published for release candidates. To enable the prerelease -channel, add `prerelease` to the `sources.list` line. For example: - -```sh -sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg -echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main prerelease" | - sudo tee /etc/apt/sources.list.d/matrix-org.list -sudo apt update -sudo apt install matrix-synapse-py3 -``` - -The fingerprint of the repository signing key (as shown by `gpg -/usr/share/keyrings/matrix-org-archive-keyring.gpg`) is -`AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058`. - -##### Downstream Debian packages - -We do not recommend using the packages from the default Debian `buster` -repository at this time, as they are old and suffer from known security -vulnerabilities. You can install the latest version of Synapse from -[our repository](#matrixorg-packages) or from `buster-backports`. Please -see the [Debian documentation](https://backports.debian.org/Instructions/) -for information on how to use backports. - -If you are using Debian `sid` or testing, Synapse is available in the default -repositories and it should be possible to install it simply with: - -```sh -sudo apt install matrix-synapse -``` - -##### Downstream Ubuntu packages - -We do not recommend using the packages in the default Ubuntu repository -at this time, as they are old and suffer from known security vulnerabilities. -The latest version of Synapse can be installed from [our repository](#matrixorg-packages). - -#### Fedora - -Synapse is in the Fedora repositories as `matrix-synapse`: - -```sh -sudo dnf install matrix-synapse -``` - -Oleg Girko provides Fedora RPMs at - - -#### OpenSUSE - -Synapse is in the OpenSUSE repositories as `matrix-synapse`: - -```sh -sudo zypper install matrix-synapse -``` - -#### SUSE Linux Enterprise Server - -Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at - - -#### ArchLinux - -The quickest way to get up and running with ArchLinux is probably with the community package -, which should pull in most of -the necessary dependencies. - -pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ): - -```sh -sudo pip install --upgrade pip -``` - -If you encounter an error with lib bcrypt causing an Wrong ELF Class: -ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly -compile it under the right architecture. (This should not be needed if -installing under virtualenv): - -```sh -sudo pip uninstall py-bcrypt -sudo pip install py-bcrypt -``` - -#### Void Linux - -Synapse can be found in the void repositories as 'synapse': - -```sh -xbps-install -Su -xbps-install -S synapse -``` - -#### FreeBSD - -Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from: - -- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean` -- Packages: `pkg install py37-matrix-synapse` - -#### OpenBSD - -As of OpenBSD 6.7 Synapse is available as a pre-compiled binary. The filesystem -underlying the homeserver directory (defaults to `/var/synapse`) has to be -mounted with `wxallowed` (cf. `mount(8)`), so creating a separate filesystem -and mounting it to `/var/synapse` should be taken into consideration. - -Installing Synapse: - -```sh -doas pkg_add synapse -``` - -#### NixOS - -Robin Lambertz has packaged Synapse for NixOS at: - - ## Setting up Synapse Once you have installed synapse as above, you will need to configure it. From 176aa55fd5971610727cb10372faf521542653d9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 29 Sep 2021 11:59:43 +0100 Subject: [PATCH 028/111] add event id to logcontext when handling incoming PDUs (#10936) --- changelog.d/10936.misc | 1 + synapse/federation/federation_server.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10936.misc diff --git a/changelog.d/10936.misc b/changelog.d/10936.misc new file mode 100644 index 000000000000..9d1d6e5b02ef --- /dev/null +++ b/changelog.d/10936.misc @@ -0,0 +1 @@ +Include the event id in the logcontext when handling PDUs received over federation. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 638959cbecdb..83f11d6b8872 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -1008,7 +1008,10 @@ async def _process_incoming_pdus_in_room_inner( async with lock: logger.info("handling received PDU: %s", event) try: - await self._federation_event_handler.on_receive_pdu(origin, event) + with nested_logging_context(event.event_id): + await self._federation_event_handler.on_receive_pdu( + origin, event + ) except FederationError as e: # XXX: Ideally we'd inform the remote we failed to process # the event, but we can't return an error in the transaction From 428174f90249ec50f977b5ef5c5cf9f92599ee0a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 29 Sep 2021 18:59:15 +0100 Subject: [PATCH 029/111] Split `event_auth.check` into two parts (#10940) Broadly, the existing `event_auth.check` function has two parts: * a validation section: checks that the event isn't too big, that it has the rught signatures, etc. This bit is independent of the rest of the state in the room, and so need only be done once for each event. * an auth section: ensures that the event is allowed, given the rest of the state in the room. This gets done multiple times, against various sets of room state, because it forms part of the state res algorithm. Currently, this is implemented with `do_sig_check` and `do_size_check` parameters, but I think that makes everything hard to follow. Instead, we split the function in two and call each part separately where it is needed. --- changelog.d/10940.misc | 1 + synapse/event_auth.py | 153 ++++++++++++++++----------- synapse/handlers/event_auth.py | 15 ++- synapse/handlers/federation.py | 30 +++--- synapse/handlers/federation_event.py | 18 +++- synapse/handlers/message.py | 6 +- synapse/handlers/room.py | 6 +- synapse/state/v1.py | 8 +- synapse/state/v2.py | 4 +- tests/test_event_auth.py | 108 +++++++------------ 10 files changed, 177 insertions(+), 172 deletions(-) create mode 100644 changelog.d/10940.misc diff --git a/changelog.d/10940.misc b/changelog.d/10940.misc new file mode 100644 index 000000000000..9a765435dbe4 --- /dev/null +++ b/changelog.d/10940.misc @@ -0,0 +1 @@ +Clean up some of the federation event authentication code for clarity. diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 5d7c6fa858fb..eef354de6ea5 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -41,42 +41,112 @@ logger = logging.getLogger(__name__) -def check( - room_version_obj: RoomVersion, - event: EventBase, - auth_events: StateMap[EventBase], - do_sig_check: bool = True, - do_size_check: bool = True, +def validate_event_for_room_version( + room_version_obj: RoomVersion, event: EventBase ) -> None: - """Checks if this event is correctly authed. + """Ensure that the event complies with the limits, and has the right signatures + + NB: does not *validate* the signatures - it assumes that any signatures present + have already been checked. + + NB: it does not check that the event satisfies the auth rules (that is done in + check_auth_rules_for_event) - these tests are independent of the rest of the state + in the room. + + NB: This is used to check events that have been received over federation. As such, + it can only enforce the checks specified in the relevant room version, to avoid + a split-brain situation where some servers accept such events, and others reject + them. + + TODO: consider moving this into EventValidator Args: - room_version_obj: the version of the room - event: the event being checked. - auth_events: the existing room state. - do_sig_check: True if it should be verified that the sending server - signed the event. - do_size_check: True if the size of the event fields should be verified. + room_version_obj: the version of the room which contains this event + event: the event to be checked Raises: - AuthError if the checks fail - - Returns: - if the auth checks pass. + SynapseError if there is a problem with the event """ - assert isinstance(auth_events, dict) - - if do_size_check: - _check_size_limits(event) + _check_size_limits(event) if not hasattr(event, "room_id"): raise AuthError(500, "Event has no room_id: %s" % event) - room_id = event.room_id + # check that the event has the correct signatures + sender_domain = get_domain_from_id(event.sender) + + is_invite_via_3pid = ( + event.type == EventTypes.Member + and event.membership == Membership.INVITE + and "third_party_invite" in event.content + ) + + # Check the sender's domain has signed the event + if not event.signatures.get(sender_domain): + # We allow invites via 3pid to have a sender from a different + # HS, as the sender must match the sender of the original + # 3pid invite. This is checked further down with the + # other dedicated membership checks. + if not is_invite_via_3pid: + raise AuthError(403, "Event not signed by sender's server") + + if event.format_version in (EventFormatVersions.V1,): + # Only older room versions have event IDs to check. + event_id_domain = get_domain_from_id(event.event_id) + + # Check the origin domain has signed the event + if not event.signatures.get(event_id_domain): + raise AuthError(403, "Event not signed by sending server") + + is_invite_via_allow_rule = ( + room_version_obj.msc3083_join_rules + and event.type == EventTypes.Member + and event.membership == Membership.JOIN + and "join_authorised_via_users_server" in event.content + ) + if is_invite_via_allow_rule: + authoriser_domain = get_domain_from_id( + event.content["join_authorised_via_users_server"] + ) + if not event.signatures.get(authoriser_domain): + raise AuthError(403, "Event not signed by authorising server") + + +def check_auth_rules_for_event( + room_version_obj: RoomVersion, event: EventBase, auth_events: StateMap[EventBase] +) -> None: + """Check that an event complies with the auth rules + + Checks whether an event passes the auth rules with a given set of state events + + Assumes that we have already checked that the event is the right shape (it has + enough signatures, has a room ID, etc). In other words: + + - it's fine for use in state resolution, when we have already decided whether to + accept the event or not, and are now trying to decide whether it should make it + into the room state + + - when we're doing the initial event auth, it is only suitable in combination with + a bunch of other tests. + + Args: + room_version_obj: the version of the room + event: the event being checked. + auth_events: the room state to check the events against. + + Raises: + AuthError if the checks fail + """ + assert isinstance(auth_events, dict) # We need to ensure that the auth events are actually for the same room, to # stop people from using powers they've been granted in other rooms for # example. + # + # Arguably we don't need to do this when we're just doing state res, as presumably + # the state res algorithm isn't silly enough to give us events from different rooms. + # Still, it's easier to do it anyway. + room_id = event.room_id for auth_event in auth_events.values(): if auth_event.room_id != room_id: raise AuthError( @@ -86,45 +156,6 @@ def check( % (event.event_id, room_id, auth_event.event_id, auth_event.room_id), ) - if do_sig_check: - sender_domain = get_domain_from_id(event.sender) - - is_invite_via_3pid = ( - event.type == EventTypes.Member - and event.membership == Membership.INVITE - and "third_party_invite" in event.content - ) - - # Check the sender's domain has signed the event - if not event.signatures.get(sender_domain): - # We allow invites via 3pid to have a sender from a different - # HS, as the sender must match the sender of the original - # 3pid invite. This is checked further down with the - # other dedicated membership checks. - if not is_invite_via_3pid: - raise AuthError(403, "Event not signed by sender's server") - - if event.format_version in (EventFormatVersions.V1,): - # Only older room versions have event IDs to check. - event_id_domain = get_domain_from_id(event.event_id) - - # Check the origin domain has signed the event - if not event.signatures.get(event_id_domain): - raise AuthError(403, "Event not signed by sending server") - - is_invite_via_allow_rule = ( - room_version_obj.msc3083_join_rules - and event.type == EventTypes.Member - and event.membership == Membership.JOIN - and "join_authorised_via_users_server" in event.content - ) - if is_invite_via_allow_rule: - authoriser_domain = get_domain_from_id( - event.content["join_authorised_via_users_server"] - ) - if not event.signatures.get(authoriser_domain): - raise AuthError(403, "Event not signed by authorising server") - # Implementation of https://matrix.org/docs/spec/rooms/v1#authorization-rules # # 1. If type is m.room.create: diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py index cb81fa0986d5..d089c56286a1 100644 --- a/synapse/handlers/event_auth.py +++ b/synapse/handlers/event_auth.py @@ -22,7 +22,8 @@ RestrictedJoinRuleTypes, ) from synapse.api.errors import AuthError, Codes, SynapseError -from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion +from synapse.api.room_versions import RoomVersion +from synapse.event_auth import check_auth_rules_for_event from synapse.events import EventBase from synapse.events.builder import EventBuilder from synapse.events.snapshot import EventContext @@ -45,21 +46,17 @@ def __init__(self, hs: "HomeServer"): self._store = hs.get_datastore() self._server_name = hs.hostname - async def check_from_context( + async def check_auth_rules_from_context( self, - room_version: str, + room_version_obj: RoomVersion, event: EventBase, context: EventContext, - do_sig_check: bool = True, ) -> None: + """Check an event passes the auth rules at its own auth events""" auth_event_ids = event.auth_event_ids() auth_events_by_id = await self._store.get_events(auth_event_ids) auth_events = {(e.type, e.state_key): e for e in auth_events_by_id.values()} - - room_version_obj = KNOWN_ROOM_VERSIONS[room_version] - event_auth.check( - room_version_obj, event, auth_events=auth_events, do_sig_check=do_sig_check - ) + check_auth_rules_for_event(room_version_obj, event, auth_events) def compute_auth_events( self, diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 3b0b895b0777..0a10a5c28aec 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -40,6 +40,10 @@ ) from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion, RoomVersions from synapse.crypto.event_signing import compute_event_signature +from synapse.event_auth import ( + check_auth_rules_for_event, + validate_event_for_room_version, +) from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.events.validator import EventValidator @@ -742,10 +746,9 @@ async def on_make_join_request( # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_join_request` - await self._event_auth_handler.check_from_context( - room_version.identifier, event, context, do_sig_check=False + await self._event_auth_handler.check_auth_rules_from_context( + room_version, event, context ) - return event async def on_invite_request( @@ -916,8 +919,8 @@ async def on_make_leave_request( try: # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_leave_request` - await self._event_auth_handler.check_from_context( - room_version_obj.identifier, event, context, do_sig_check=False + await self._event_auth_handler.check_auth_rules_from_context( + room_version_obj, event, context ) except AuthError as e: logger.warning("Failed to create new leave %r because %s", event, e) @@ -978,8 +981,8 @@ async def on_make_knock_request( try: # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_knock_request` - await self._event_auth_handler.check_from_context( - room_version_obj.identifier, event, context, do_sig_check=False + await self._event_auth_handler.check_auth_rules_from_context( + room_version_obj, event, context ) except AuthError as e: logger.warning("Failed to create new knock %r because %s", event, e) @@ -1168,7 +1171,8 @@ async def _persist_auth_tree( auth_for_e[(EventTypes.Create, "")] = create_event try: - event_auth.check(room_version, e, auth_events=auth_for_e) + validate_event_for_room_version(room_version, e) + check_auth_rules_for_event(room_version, e, auth_for_e) except SynapseError as err: # we may get SynapseErrors here as well as AuthErrors. For # instance, there are a couple of (ancient) events in some @@ -1266,8 +1270,9 @@ async def exchange_third_party_invite( event.internal_metadata.send_on_behalf_of = self.hs.hostname try: - await self._event_auth_handler.check_from_context( - room_version_obj.identifier, event, context + validate_event_for_room_version(room_version_obj, event) + await self._event_auth_handler.check_auth_rules_from_context( + room_version_obj, event, context ) except AuthError as e: logger.warning("Denying new third party invite %r because %s", event, e) @@ -1317,8 +1322,9 @@ async def on_exchange_third_party_invite_request( ) try: - await self._event_auth_handler.check_from_context( - room_version_obj.identifier, event, context + validate_event_for_room_version(room_version_obj, event) + await self._event_auth_handler.check_auth_rules_from_context( + room_version_obj, event, context ) except AuthError as e: logger.warning("Denying third party invite %r because %s", event, e) diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 2c4644b4a32d..e587b5b3b351 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -29,7 +29,6 @@ from prometheus_client import Counter -from synapse import event_auth from synapse.api.constants import ( EventContentFields, EventTypes, @@ -47,7 +46,11 @@ SynapseError, ) from synapse.api.room_versions import KNOWN_ROOM_VERSIONS -from synapse.event_auth import auth_types_for_event +from synapse.event_auth import ( + auth_types_for_event, + check_auth_rules_for_event, + validate_event_for_room_version, +) from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.federation.federation_client import InvalidResponseError @@ -1207,7 +1210,8 @@ def prep(event: EventBase) -> Optional[Tuple[EventBase, EventContext]]: context = EventContext.for_outlier() try: - event_auth.check(room_version_obj, event, auth_events=auth) + validate_event_for_room_version(room_version_obj, event) + check_auth_rules_for_event(room_version_obj, event, auth) except AuthError as e: logger.warning("Rejecting %r because %s", event, e) context.rejected = RejectedReason.AUTH_ERROR @@ -1282,7 +1286,8 @@ async def _check_event_auth( auth_events_for_auth = calculated_auth_event_map try: - event_auth.check(room_version_obj, event, auth_events=auth_events_for_auth) + validate_event_for_room_version(room_version_obj, event) + check_auth_rules_for_event(room_version_obj, event, auth_events_for_auth) except AuthError as e: logger.warning("Failed auth resolution for %r because %s", event, e) context.rejected = RejectedReason.AUTH_ERROR @@ -1394,7 +1399,10 @@ async def _check_for_soft_fail( } try: - event_auth.check(room_version_obj, event, auth_events=current_auth_events) + # TODO: skip the call to validate_event_for_room_version? we should already + # have validated the event. + validate_event_for_room_version(room_version_obj, event) + check_auth_rules_for_event(room_version_obj, event, current_auth_events) except AuthError as e: logger.warning( "Soft-failing %r (from %s) because %s", diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 3b8cc50ec020..cdac53037cb3 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -44,6 +44,7 @@ ) from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions from synapse.api.urls import ConsentURIBuilder +from synapse.event_auth import validate_event_for_room_version from synapse.events import EventBase from synapse.events.builder import EventBuilder from synapse.events.snapshot import EventContext @@ -1098,8 +1099,9 @@ async def handle_new_client_event( assert event.content["membership"] == Membership.LEAVE else: try: - await self._event_auth_handler.check_from_context( - room_version_obj.identifier, event, context + validate_event_for_room_version(room_version_obj, event) + await self._event_auth_handler.check_auth_rules_from_context( + room_version_obj, event, context ) except AuthError as err: logger.warning("Denying new event %r because %s", event, err) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index bf8a85f563d8..873e08258ea0 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -52,6 +52,7 @@ ) from synapse.api.filtering import Filter from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion +from synapse.event_auth import validate_event_for_room_version from synapse.events import EventBase from synapse.events.utils import copy_power_levels_contents from synapse.rest.admin._base import assert_user_is_admin @@ -238,8 +239,9 @@ async def _upgrade_room( }, ) old_room_version = await self.store.get_room_version(old_room_id) - await self._event_auth_handler.check_from_context( - old_room_version.identifier, tombstone_event, tombstone_context + validate_event_for_room_version(old_room_version, tombstone_event) + await self._event_auth_handler.check_auth_rules_from_context( + old_room_version, tombstone_event, tombstone_context ) await self.clone_existing_room( diff --git a/synapse/state/v1.py b/synapse/state/v1.py index 92336d7cc8be..017e6fd92d38 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -329,12 +329,10 @@ def _resolve_auth_events( auth_events[(prev_event.type, prev_event.state_key)] = prev_event try: # The signatures have already been checked at this point - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V1, event, auth_events, - do_sig_check=False, - do_size_check=False, ) prev_event = event except AuthError: @@ -349,12 +347,10 @@ def _resolve_normal_events( for event in _ordered_events(events): try: # The signatures have already been checked at this point - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V1, event, auth_events, - do_sig_check=False, - do_size_check=False, ) return event except AuthError: diff --git a/synapse/state/v2.py b/synapse/state/v2.py index 7b1e8361def4..586b0e12febe 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -546,12 +546,10 @@ async def _iterative_auth_checks( auth_events[key] = event_map[ev_id] try: - event_auth.check( + event_auth.check_auth_rules_for_event( room_version, event, auth_events, - do_sig_check=False, - do_size_check=False, ) resolved_state[(event.type, event.state_key)] = event_id diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index 6ebd01bcbe78..e7a7d008832d 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -37,21 +37,19 @@ def test_random_users_cannot_send_state_before_first_pl(self): } # creator should be able to send state - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V1, _random_state_event(creator), auth_events, - do_sig_check=False, ) # joiner should not be able to send state self.assertRaises( AuthError, - event_auth.check, + event_auth.check_auth_rules_for_event, RoomVersions.V1, _random_state_event(joiner), auth_events, - do_sig_check=False, ) def test_state_default_level(self): @@ -76,19 +74,17 @@ def test_state_default_level(self): # pleb should not be able to send state self.assertRaises( AuthError, - event_auth.check, + event_auth.check_auth_rules_for_event, RoomVersions.V1, _random_state_event(pleb), auth_events, - do_sig_check=False, ), # king should be able to send state - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V1, _random_state_event(king), auth_events, - do_sig_check=False, ) def test_alias_event(self): @@ -101,37 +97,33 @@ def test_alias_event(self): } # creator should be able to send aliases - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V1, _alias_event(creator), auth_events, - do_sig_check=False, ) # Reject an event with no state key. with self.assertRaises(AuthError): - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V1, _alias_event(creator, state_key=""), auth_events, - do_sig_check=False, ) # If the domain of the sender does not match the state key, reject. with self.assertRaises(AuthError): - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V1, _alias_event(creator, state_key="test.com"), auth_events, - do_sig_check=False, ) # Note that the member does *not* need to be in the room. - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V1, _alias_event(other), auth_events, - do_sig_check=False, ) def test_msc2432_alias_event(self): @@ -144,34 +136,30 @@ def test_msc2432_alias_event(self): } # creator should be able to send aliases - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V6, _alias_event(creator), auth_events, - do_sig_check=False, ) # No particular checks are done on the state key. - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V6, _alias_event(creator, state_key=""), auth_events, - do_sig_check=False, ) - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V6, _alias_event(creator, state_key="test.com"), auth_events, - do_sig_check=False, ) # Per standard auth rules, the member must be in the room. with self.assertRaises(AuthError): - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V6, _alias_event(other), auth_events, - do_sig_check=False, ) def test_msc2209(self): @@ -191,20 +179,18 @@ def test_msc2209(self): } # pleb should be able to modify the notifications power level. - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V1, _power_levels_event(pleb, {"notifications": {"room": 100}}), auth_events, - do_sig_check=False, ) # But an MSC2209 room rejects this change. with self.assertRaises(AuthError): - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V6, _power_levels_event(pleb, {"notifications": {"room": 100}}), auth_events, - do_sig_check=False, ) def test_join_rules_public(self): @@ -221,59 +207,53 @@ def test_join_rules_public(self): } # Check join. - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(pleb), auth_events, - do_sig_check=False, ) # A user cannot be force-joined to a room. with self.assertRaises(AuthError): - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V6, _member_event(pleb, "join", sender=creator), auth_events, - do_sig_check=False, ) # Banned should be rejected. auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban") with self.assertRaises(AuthError): - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(pleb), auth_events, - do_sig_check=False, ) # A user who left can re-join. auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave") - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(pleb), auth_events, - do_sig_check=False, ) # A user can send a join if they're in the room. auth_events[("m.room.member", pleb)] = _member_event(pleb, "join") - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(pleb), auth_events, - do_sig_check=False, ) # A user can accept an invite. auth_events[("m.room.member", pleb)] = _member_event( pleb, "invite", sender=creator ) - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(pleb), auth_events, - do_sig_check=False, ) def test_join_rules_invite(self): @@ -291,60 +271,54 @@ def test_join_rules_invite(self): # A join without an invite is rejected. with self.assertRaises(AuthError): - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(pleb), auth_events, - do_sig_check=False, ) # A user cannot be force-joined to a room. with self.assertRaises(AuthError): - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V6, _member_event(pleb, "join", sender=creator), auth_events, - do_sig_check=False, ) # Banned should be rejected. auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban") with self.assertRaises(AuthError): - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(pleb), auth_events, - do_sig_check=False, ) # A user who left cannot re-join. auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave") with self.assertRaises(AuthError): - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(pleb), auth_events, - do_sig_check=False, ) # A user can send a join if they're in the room. auth_events[("m.room.member", pleb)] = _member_event(pleb, "join") - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(pleb), auth_events, - do_sig_check=False, ) # A user can accept an invite. auth_events[("m.room.member", pleb)] = _member_event( pleb, "invite", sender=creator ) - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(pleb), auth_events, - do_sig_check=False, ) def test_join_rules_msc3083_restricted(self): @@ -369,11 +343,10 @@ def test_join_rules_msc3083_restricted(self): # Older room versions don't understand this join rule with self.assertRaises(AuthError): - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(pleb), auth_events, - do_sig_check=False, ) # A properly formatted join event should work. @@ -383,11 +356,10 @@ def test_join_rules_msc3083_restricted(self): "join_authorised_via_users_server": "@creator:example.com" }, ) - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V8, authorised_join_event, auth_events, - do_sig_check=False, ) # A join issued by a specific user works (i.e. the power level checks @@ -399,7 +371,7 @@ def test_join_rules_msc3083_restricted(self): pl_auth_events[("m.room.member", "@inviter:foo.test")] = _join_event( "@inviter:foo.test" ) - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V8, _join_event( pleb, @@ -408,16 +380,14 @@ def test_join_rules_msc3083_restricted(self): }, ), pl_auth_events, - do_sig_check=False, ) # A join which is missing an authorised server is rejected. with self.assertRaises(AuthError): - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V8, _join_event(pleb), auth_events, - do_sig_check=False, ) # An join authorised by a user who is not in the room is rejected. @@ -426,7 +396,7 @@ def test_join_rules_msc3083_restricted(self): creator, {"invite": 100, "users": {"@other:example.com": 150}} ) with self.assertRaises(AuthError): - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V8, _join_event( pleb, @@ -435,13 +405,12 @@ def test_join_rules_msc3083_restricted(self): }, ), auth_events, - do_sig_check=False, ) # A user cannot be force-joined to a room. (This uses an event which # *would* be valid, but is sent be a different user.) with self.assertRaises(AuthError): - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V8, _member_event( pleb, @@ -452,36 +421,32 @@ def test_join_rules_msc3083_restricted(self): }, ), auth_events, - do_sig_check=False, ) # Banned should be rejected. auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban") with self.assertRaises(AuthError): - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V8, authorised_join_event, auth_events, - do_sig_check=False, ) # A user who left can re-join. auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave") - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V8, authorised_join_event, auth_events, - do_sig_check=False, ) # A user can send a join if they're in the room. (This doesn't need to # be authorised since the user is already joined.) auth_events[("m.room.member", pleb)] = _member_event(pleb, "join") - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V8, _join_event(pleb), auth_events, - do_sig_check=False, ) # A user can accept an invite. (This doesn't need to be authorised since @@ -489,11 +454,10 @@ def test_join_rules_msc3083_restricted(self): auth_events[("m.room.member", pleb)] = _member_event( pleb, "invite", sender=creator ) - event_auth.check( + event_auth.check_auth_rules_for_event( RoomVersions.V8, _join_event(pleb), auth_events, - do_sig_check=False, ) From 3aefc7b66d9c7fb98addc71eaf5ef501a4c6a583 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 30 Sep 2021 11:04:40 +0100 Subject: [PATCH 030/111] Refactor user directory tests (#10935) * Pull out GetUserDirectoryTables helper * Don't rebuild the dir in tests that don't need it In #10796 I changed registering a user to add directory entries under. This means we don't have to force a directory regbuild in to tests of the user directory search. * Move test_initial to tests/storage * Add type hints to both test_user_directory files Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/10935.misc | 1 + mypy.ini | 6 + .../storage/databases/main/user_directory.py | 2 +- tests/handlers/test_user_directory.py | 283 ++++++------------ tests/storage/test_user_directory.py | 192 +++++++++++- tests/unittest.py | 4 +- 6 files changed, 288 insertions(+), 200 deletions(-) create mode 100644 changelog.d/10935.misc diff --git a/changelog.d/10935.misc b/changelog.d/10935.misc new file mode 100644 index 000000000000..80529c04cae2 --- /dev/null +++ b/changelog.d/10935.misc @@ -0,0 +1 @@ +Refactor user directory tests in preparation for upcoming changes. diff --git a/mypy.ini b/mypy.ini index 437d0a46a593..568166db3300 100644 --- a/mypy.ini +++ b/mypy.ini @@ -162,6 +162,12 @@ disallow_untyped_defs = True [mypy-synapse.util.wheel_timer] disallow_untyped_defs = True +[mypy-tests.handlers.test_user_directory] +disallow_untyped_defs = True + +[mypy-tests.storage.test_user_directory] +disallow_untyped_defs = True + [mypy-pymacaroons.*] ignore_missing_imports = True diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 90d65edc4267..c26e3e066f9d 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -527,7 +527,7 @@ async def get_user_in_directory(self, user_id: str) -> Optional[Dict[str, str]]: desc="get_user_in_directory", ) - async def update_user_directory_stream_pos(self, stream_id: int) -> None: + async def update_user_directory_stream_pos(self, stream_id: Optional[int]) -> None: await self.db_pool.simple_update_one( table="user_directory_stream_pos", keyvalues={}, diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 266333c5532c..2988befb21b4 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -11,26 +11,37 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Tuple -from unittest.mock import Mock +from unittest.mock import Mock, patch from urllib.parse import quote from twisted.internet import defer +from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin from synapse.api.constants import UserTypes from synapse.api.room_versions import RoomVersion, RoomVersions from synapse.rest.client import login, room, user_directory +from synapse.server import HomeServer from synapse.storage.roommember import ProfileInfo from synapse.types import create_requester +from synapse.util import Clock from tests import unittest +from tests.storage.test_user_directory import GetUserDirectoryTables from tests.unittest import override_config class UserDirectoryTestCase(unittest.HomeserverTestCase): - """ - Tests the UserDirectoryHandler. + """Tests the UserDirectoryHandler. + + We're broadly testing two kinds of things here. + + 1. Check that we correctly update the user directory in response + to events (e.g. join a room, leave a room, change name, make public) + 2. Check that the search logic behaves as expected. + + The background process that rebuilds the user directory is tested in + tests/storage/test_user_directory.py. """ servlets = [ @@ -39,19 +50,19 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def make_homeserver(self, reactor, clock): - + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() config["update_user_directory"] = True return self.setup_test_homeserver(config=config) - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() self.handler = hs.get_user_directory_handler() self.event_builder_factory = self.hs.get_event_builder_factory() self.event_creation_handler = self.hs.get_event_creation_handler() + self.user_dir_helper = GetUserDirectoryTables(self.store) - def test_handle_local_profile_change_with_support_user(self): + def test_handle_local_profile_change_with_support_user(self) -> None: support_user_id = "@support:test" self.get_success( self.store.register_user( @@ -64,7 +75,9 @@ def test_handle_local_profile_change_with_support_user(self): ) self.get_success( - self.handler.handle_local_profile_change(support_user_id, None) + self.handler.handle_local_profile_change( + support_user_id, ProfileInfo("I love support me", None) + ) ) profile = self.get_success(self.store.get_user_in_directory(support_user_id)) self.assertTrue(profile is None) @@ -77,7 +90,7 @@ def test_handle_local_profile_change_with_support_user(self): profile = self.get_success(self.store.get_user_in_directory(regular_user_id)) self.assertTrue(profile["display_name"] == display_name) - def test_handle_local_profile_change_with_deactivated_user(self): + def test_handle_local_profile_change_with_deactivated_user(self) -> None: # create user r_user_id = "@regular:test" self.get_success( @@ -112,7 +125,7 @@ def test_handle_local_profile_change_with_deactivated_user(self): profile = self.get_success(self.store.get_user_in_directory(r_user_id)) self.assertTrue(profile is None) - def test_handle_user_deactivated_support_user(self): + def test_handle_user_deactivated_support_user(self) -> None: s_user_id = "@support:test" self.get_success( self.store.register_user( @@ -120,20 +133,29 @@ def test_handle_user_deactivated_support_user(self): ) ) - self.store.remove_from_user_dir = Mock(return_value=defer.succeed(None)) - self.get_success(self.handler.handle_local_user_deactivated(s_user_id)) - self.store.remove_from_user_dir.not_called() + mock_remove_from_user_dir = Mock(return_value=defer.succeed(None)) + with patch.object( + self.store, "remove_from_user_dir", mock_remove_from_user_dir + ): + self.get_success(self.handler.handle_local_user_deactivated(s_user_id)) + # BUG: the correct spelling is assert_not_called, but that makes the test fail + # and it's not clear that this is actually the behaviour we want. + mock_remove_from_user_dir.not_called() - def test_handle_user_deactivated_regular_user(self): + def test_handle_user_deactivated_regular_user(self) -> None: r_user_id = "@regular:test" self.get_success( self.store.register_user(user_id=r_user_id, password_hash=None) ) - self.store.remove_from_user_dir = Mock(return_value=defer.succeed(None)) - self.get_success(self.handler.handle_local_user_deactivated(r_user_id)) - self.store.remove_from_user_dir.called_once_with(r_user_id) - def test_reactivation_makes_regular_user_searchable(self): + mock_remove_from_user_dir = Mock(return_value=defer.succeed(None)) + with patch.object( + self.store, "remove_from_user_dir", mock_remove_from_user_dir + ): + self.get_success(self.handler.handle_local_user_deactivated(r_user_id)) + mock_remove_from_user_dir.assert_called_once_with(r_user_id) + + def test_reactivation_makes_regular_user_searchable(self) -> None: user = self.register_user("regular", "pass") user_token = self.login(user, "pass") admin_user = self.register_user("admin", "pass", admin=True) @@ -171,7 +193,7 @@ def test_reactivation_makes_regular_user_searchable(self): self.assertEqual(len(s["results"]), 1) self.assertEqual(s["results"][0]["user_id"], user) - def test_private_room(self): + def test_private_room(self) -> None: """ A user can be searched for only by people that are either in a public room, or that share a private chat. @@ -191,11 +213,16 @@ def test_private_room(self): self.helper.join(room, user=u2, tok=u2_token) # Check we have populated the database correctly. - shares_private = self.get_users_who_share_private_rooms() - public_users = self.get_users_in_public_rooms() + shares_private = self.get_success( + self.user_dir_helper.get_users_who_share_private_rooms() + ) + public_users = self.get_success( + self.user_dir_helper.get_users_in_public_rooms() + ) self.assertEqual( - self._compress_shared(shares_private), {(u1, u2, room), (u2, u1, room)} + self.user_dir_helper._compress_shared(shares_private), + {(u1, u2, room), (u2, u1, room)}, ) self.assertEqual(public_users, []) @@ -215,10 +242,14 @@ def test_private_room(self): self.helper.leave(room, user=u2, tok=u2_token) # Check we have removed the values. - shares_private = self.get_users_who_share_private_rooms() - public_users = self.get_users_in_public_rooms() + shares_private = self.get_success( + self.user_dir_helper.get_users_who_share_private_rooms() + ) + public_users = self.get_success( + self.user_dir_helper.get_users_in_public_rooms() + ) - self.assertEqual(self._compress_shared(shares_private), set()) + self.assertEqual(self.user_dir_helper._compress_shared(shares_private), set()) self.assertEqual(public_users, []) # User1 now gets no search results for any of the other users. @@ -228,7 +259,7 @@ def test_private_room(self): s = self.get_success(self.handler.search_users(u1, "user3", 10)) self.assertEqual(len(s["results"]), 0) - def test_spam_checker(self): + def test_spam_checker(self) -> None: """ A user which fails the spam checks will not appear in search results. """ @@ -246,11 +277,16 @@ def test_spam_checker(self): self.helper.join(room, user=u2, tok=u2_token) # Check we have populated the database correctly. - shares_private = self.get_users_who_share_private_rooms() - public_users = self.get_users_in_public_rooms() + shares_private = self.get_success( + self.user_dir_helper.get_users_who_share_private_rooms() + ) + public_users = self.get_success( + self.user_dir_helper.get_users_in_public_rooms() + ) self.assertEqual( - self._compress_shared(shares_private), {(u1, u2, room), (u2, u1, room)} + self.user_dir_helper._compress_shared(shares_private), + {(u1, u2, room), (u2, u1, room)}, ) self.assertEqual(public_users, []) @@ -258,7 +294,7 @@ def test_spam_checker(self): s = self.get_success(self.handler.search_users(u1, "user2", 10)) self.assertEqual(len(s["results"]), 1) - async def allow_all(user_profile): + async def allow_all(user_profile: ProfileInfo) -> bool: # Allow all users. return False @@ -272,7 +308,7 @@ async def allow_all(user_profile): self.assertEqual(len(s["results"]), 1) # Configure a spam checker that filters all users. - async def block_all(user_profile): + async def block_all(user_profile: ProfileInfo) -> bool: # All users are spammy. return True @@ -282,7 +318,7 @@ async def block_all(user_profile): s = self.get_success(self.handler.search_users(u1, "user2", 10)) self.assertEqual(len(s["results"]), 0) - def test_legacy_spam_checker(self): + def test_legacy_spam_checker(self) -> None: """ A spam checker without the expected method should be ignored. """ @@ -300,11 +336,16 @@ def test_legacy_spam_checker(self): self.helper.join(room, user=u2, tok=u2_token) # Check we have populated the database correctly. - shares_private = self.get_users_who_share_private_rooms() - public_users = self.get_users_in_public_rooms() + shares_private = self.get_success( + self.user_dir_helper.get_users_who_share_private_rooms() + ) + public_users = self.get_success( + self.user_dir_helper.get_users_in_public_rooms() + ) self.assertEqual( - self._compress_shared(shares_private), {(u1, u2, room), (u2, u1, room)} + self.user_dir_helper._compress_shared(shares_private), + {(u1, u2, room), (u2, u1, room)}, ) self.assertEqual(public_users, []) @@ -317,134 +358,7 @@ def test_legacy_spam_checker(self): s = self.get_success(self.handler.search_users(u1, "user2", 10)) self.assertEqual(len(s["results"]), 1) - def _compress_shared(self, shared): - """ - Compress a list of users who share rooms dicts to a list of tuples. - """ - r = set() - for i in shared: - r.add((i["user_id"], i["other_user_id"], i["room_id"])) - return r - - def get_users_in_public_rooms(self) -> List[Tuple[str, str]]: - r = self.get_success( - self.store.db_pool.simple_select_list( - "users_in_public_rooms", None, ("user_id", "room_id") - ) - ) - retval = [] - for i in r: - retval.append((i["user_id"], i["room_id"])) - return retval - - def get_users_who_share_private_rooms(self) -> List[Tuple[str, str, str]]: - return self.get_success( - self.store.db_pool.simple_select_list( - "users_who_share_private_rooms", - None, - ["user_id", "other_user_id", "room_id"], - ) - ) - - def _add_background_updates(self): - """ - Add the background updates we need to run. - """ - # Ugh, have to reset this flag - self.store.db_pool.updates._all_done = False - - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - { - "update_name": "populate_user_directory_createtables", - "progress_json": "{}", - }, - ) - ) - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - { - "update_name": "populate_user_directory_process_rooms", - "progress_json": "{}", - "depends_on": "populate_user_directory_createtables", - }, - ) - ) - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - { - "update_name": "populate_user_directory_process_users", - "progress_json": "{}", - "depends_on": "populate_user_directory_process_rooms", - }, - ) - ) - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - { - "update_name": "populate_user_directory_cleanup", - "progress_json": "{}", - "depends_on": "populate_user_directory_process_users", - }, - ) - ) - - def test_initial(self): - """ - The user directory's initial handler correctly updates the search tables. - """ - u1 = self.register_user("user1", "pass") - u1_token = self.login(u1, "pass") - u2 = self.register_user("user2", "pass") - u2_token = self.login(u2, "pass") - u3 = self.register_user("user3", "pass") - u3_token = self.login(u3, "pass") - - room = self.helper.create_room_as(u1, is_public=True, tok=u1_token) - self.helper.invite(room, src=u1, targ=u2, tok=u1_token) - self.helper.join(room, user=u2, tok=u2_token) - - private_room = self.helper.create_room_as(u1, is_public=False, tok=u1_token) - self.helper.invite(private_room, src=u1, targ=u3, tok=u1_token) - self.helper.join(private_room, user=u3, tok=u3_token) - - self.get_success(self.store.update_user_directory_stream_pos(None)) - self.get_success(self.store.delete_all_from_user_dir()) - - shares_private = self.get_users_who_share_private_rooms() - public_users = self.get_users_in_public_rooms() - - # Nothing updated yet - self.assertEqual(shares_private, []) - self.assertEqual(public_users, []) - - # Do the initial population of the user directory via the background update - self._add_background_updates() - - while not self.get_success( - self.store.db_pool.updates.has_completed_background_updates() - ): - self.get_success( - self.store.db_pool.updates.do_next_background_update(100), by=0.1 - ) - - shares_private = self.get_users_who_share_private_rooms() - public_users = self.get_users_in_public_rooms() - - # User 1 and User 2 are in the same public room - self.assertEqual(set(public_users), {(u1, room), (u2, room)}) - - # User 1 and User 3 share private rooms - self.assertEqual( - self._compress_shared(shares_private), - {(u1, u3, private_room), (u3, u1, private_room)}, - ) - - def test_initial_share_all_users(self): + def test_initial_share_all_users(self) -> None: """ Search all users = True means that a user does not have to share a private room with the searching user or be in a public room to be search @@ -457,26 +371,16 @@ def test_initial_share_all_users(self): self.register_user("user2", "pass") u3 = self.register_user("user3", "pass") - # Wipe the user dir - self.get_success(self.store.update_user_directory_stream_pos(None)) - self.get_success(self.store.delete_all_from_user_dir()) - - # Do the initial population of the user directory via the background update - self._add_background_updates() - - while not self.get_success( - self.store.db_pool.updates.has_completed_background_updates() - ): - self.get_success( - self.store.db_pool.updates.do_next_background_update(100), by=0.1 - ) - - shares_private = self.get_users_who_share_private_rooms() - public_users = self.get_users_in_public_rooms() + shares_private = self.get_success( + self.user_dir_helper.get_users_who_share_private_rooms() + ) + public_users = self.get_success( + self.user_dir_helper.get_users_in_public_rooms() + ) # No users share rooms self.assertEqual(public_users, []) - self.assertEqual(self._compress_shared(shares_private), set()) + self.assertEqual(self.user_dir_helper._compress_shared(shares_private), set()) # Despite not sharing a room, search_all_users means we get a search # result. @@ -501,7 +405,7 @@ def test_initial_share_all_users(self): } } ) - def test_prefer_local_users(self): + def test_prefer_local_users(self) -> None: """Tests that local users are shown higher in search results when user_directory.prefer_local_users is True. """ @@ -535,15 +439,6 @@ def test_prefer_local_users(self): local_users = [local_user_1, local_user_2, local_user_3] remote_users = [remote_user_1, remote_user_2, remote_user_3] - # Populate the user directory via background update - self._add_background_updates() - while not self.get_success( - self.store.db_pool.updates.has_completed_background_updates() - ): - self.get_success( - self.store.db_pool.updates.do_next_background_update(100), by=0.1 - ) - # The local searching user searches for the term "user", which other users have # in their user id results = self.get_success( @@ -565,7 +460,7 @@ def _add_user_to_room( room_id: str, room_version: RoomVersion, user_id: str, - ): + ) -> None: # Add a user to the room. builder = self.event_builder_factory.for_room_version( room_version, @@ -597,7 +492,7 @@ class TestUserDirSearchDisabled(unittest.HomeserverTestCase): synapse.rest.admin.register_servlets_for_client_rest_resource, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() config["update_user_directory"] = True hs = self.setup_test_homeserver(config=config) @@ -606,7 +501,7 @@ def make_homeserver(self, reactor, clock): return hs - def test_disabling_room_list(self): + def test_disabling_room_list(self) -> None: self.config.userdirectory.user_directory_search_enabled = True # First we create a room with another user so that user dir is non-empty diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index 222e5d129d73..74c8a8599e7d 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -11,6 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Dict, List, Set, Tuple + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.rest import admin +from synapse.rest.client import login, room +from synapse.server import HomeServer +from synapse.storage import DataStore +from synapse.util import Clock from tests.unittest import HomeserverTestCase, override_config @@ -21,8 +30,183 @@ BELA = "@somenickname:a" +class GetUserDirectoryTables: + """Helper functions that we want to reuse in tests/handlers/test_user_directory.py""" + + def __init__(self, store: DataStore): + self.store = store + + def _compress_shared( + self, shared: List[Dict[str, str]] + ) -> Set[Tuple[str, str, str]]: + """ + Compress a list of users who share rooms dicts to a list of tuples. + """ + r = set() + for i in shared: + r.add((i["user_id"], i["other_user_id"], i["room_id"])) + return r + + async def get_users_in_public_rooms(self) -> List[Tuple[str, str]]: + r = await self.store.db_pool.simple_select_list( + "users_in_public_rooms", None, ("user_id", "room_id") + ) + + retval = [] + for i in r: + retval.append((i["user_id"], i["room_id"])) + return retval + + async def get_users_who_share_private_rooms(self) -> List[Dict[str, str]]: + return await self.store.db_pool.simple_select_list( + "users_who_share_private_rooms", + None, + ["user_id", "other_user_id", "room_id"], + ) + + +class UserDirectoryInitialPopulationTestcase(HomeserverTestCase): + """Ensure that rebuilding the directory writes the correct data to the DB. + + See also tests/handlers/test_user_directory.py for similar checks. They + test the incremental updates, rather than the big rebuild. + """ + + servlets = [ + login.register_servlets, + admin.register_servlets_for_client_rest_resource, + room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastore() + self.user_dir_helper = GetUserDirectoryTables(self.store) + + def _purge_and_rebuild_user_dir(self) -> None: + """Nuke the user directory tables, start the background process to + repopulate them, and wait for the process to complete. This allows us + to inspect the outcome of the background process alone, without any of + the other incremental updates. + """ + self.get_success(self.store.update_user_directory_stream_pos(None)) + self.get_success(self.store.delete_all_from_user_dir()) + + shares_private = self.get_success( + self.user_dir_helper.get_users_who_share_private_rooms() + ) + public_users = self.get_success( + self.user_dir_helper.get_users_in_public_rooms() + ) + + # Nothing updated yet + self.assertEqual(shares_private, []) + self.assertEqual(public_users, []) + + # Ugh, have to reset this flag + self.store.db_pool.updates._all_done = False + + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": "populate_user_directory_createtables", + "progress_json": "{}", + }, + ) + ) + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": "populate_user_directory_process_rooms", + "progress_json": "{}", + "depends_on": "populate_user_directory_createtables", + }, + ) + ) + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": "populate_user_directory_process_users", + "progress_json": "{}", + "depends_on": "populate_user_directory_process_rooms", + }, + ) + ) + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": "populate_user_directory_cleanup", + "progress_json": "{}", + "depends_on": "populate_user_directory_process_users", + }, + ) + ) + + while not self.get_success( + self.store.db_pool.updates.has_completed_background_updates() + ): + self.get_success( + self.store.db_pool.updates.do_next_background_update(100), by=0.1 + ) + + def test_initial(self) -> None: + """ + The user directory's initial handler correctly updates the search tables. + """ + u1 = self.register_user("user1", "pass") + u1_token = self.login(u1, "pass") + u2 = self.register_user("user2", "pass") + u2_token = self.login(u2, "pass") + u3 = self.register_user("user3", "pass") + u3_token = self.login(u3, "pass") + + room = self.helper.create_room_as(u1, is_public=True, tok=u1_token) + self.helper.invite(room, src=u1, targ=u2, tok=u1_token) + self.helper.join(room, user=u2, tok=u2_token) + + private_room = self.helper.create_room_as(u1, is_public=False, tok=u1_token) + self.helper.invite(private_room, src=u1, targ=u3, tok=u1_token) + self.helper.join(private_room, user=u3, tok=u3_token) + + self.get_success(self.store.update_user_directory_stream_pos(None)) + self.get_success(self.store.delete_all_from_user_dir()) + + shares_private = self.get_success( + self.user_dir_helper.get_users_who_share_private_rooms() + ) + public_users = self.get_success( + self.user_dir_helper.get_users_in_public_rooms() + ) + + # Nothing updated yet + self.assertEqual(shares_private, []) + self.assertEqual(public_users, []) + + # Do the initial population of the user directory via the background update + self._purge_and_rebuild_user_dir() + + shares_private = self.get_success( + self.user_dir_helper.get_users_who_share_private_rooms() + ) + public_users = self.get_success( + self.user_dir_helper.get_users_in_public_rooms() + ) + + # User 1 and User 2 are in the same public room + self.assertEqual(set(public_users), {(u1, room), (u2, room)}) + + # User 1 and User 3 share private rooms + self.assertEqual( + self.user_dir_helper._compress_shared(shares_private), + {(u1, u3, private_room), (u3, u1, private_room)}, + ) + + class UserDirectoryStoreTestCase(HomeserverTestCase): - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() # alice and bob are both in !room_id. bobby is not but shares @@ -33,7 +217,7 @@ def prepare(self, reactor, clock, hs): self.get_success(self.store.update_profile_in_user_dir(BELA, "Bela", None)) self.get_success(self.store.add_users_in_public_rooms("!room:id", (ALICE, BOB))) - def test_search_user_dir(self): + def test_search_user_dir(self) -> None: # normally when alice searches the directory she should just find # bob because bobby doesn't share a room with her. r = self.get_success(self.store.search_user_dir(ALICE, "bob", 10)) @@ -44,7 +228,7 @@ def test_search_user_dir(self): ) @override_config({"user_directory": {"search_all_users": True}}) - def test_search_user_dir_all_users(self): + def test_search_user_dir_all_users(self) -> None: r = self.get_success(self.store.search_user_dir(ALICE, "bob", 10)) self.assertFalse(r["limited"]) self.assertEqual(2, len(r["results"])) @@ -58,7 +242,7 @@ def test_search_user_dir_all_users(self): ) @override_config({"user_directory": {"search_all_users": True}}) - def test_search_user_dir_stop_words(self): + def test_search_user_dir_stop_words(self) -> None: """Tests that a user can look up another user by searching for the start if its display name even if that name happens to be a common English word that would usually be ignored in full text searches. diff --git a/tests/unittest.py b/tests/unittest.py index 6d5d87cb78e2..5f93ebf1479a 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -28,6 +28,7 @@ from twisted.internet.defer import Deferred, ensureDeferred, succeed from twisted.python.failure import Failure from twisted.python.threadpool import ThreadPool +from twisted.test.proto_helpers import MemoryReactor from twisted.trial import unittest from twisted.web.resource import Resource @@ -46,6 +47,7 @@ ) from synapse.server import HomeServer from synapse.types import UserID, create_requester +from synapse.util import Clock from synapse.util.httpresourcetree import create_resource_tree from synapse.util.ratelimitutils import FederationRateLimiter @@ -371,7 +373,7 @@ def default_config(self): return config - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer): """ Prepare for the test. This involves things like mocking out parts of the homeserver, or building test data common across the whole test From a03ed5e6ae23e52941e91ecb892a7b5c88964d90 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 30 Sep 2021 11:06:47 +0100 Subject: [PATCH 031/111] Fix issue causing sending presence to ASes to fail (due to incomplete type annotations) (#10944) --- changelog.d/10944.bugfix | 1 + synapse/handlers/presence.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10944.bugfix diff --git a/changelog.d/10944.bugfix b/changelog.d/10944.bugfix new file mode 100644 index 000000000000..49baff7df148 --- /dev/null +++ b/changelog.d/10944.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.44.0rc1 which prevented sending presence events to application services. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 983c837c662e..404afb9402bc 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -52,6 +52,7 @@ from synapse.api.constants import EventTypes, Membership, PresenceState from synapse.api.errors import SynapseError from synapse.api.presence import UserPresenceState +from synapse.appservice import ApplicationService from synapse.events.presence_router import PresenceRouter from synapse.logging.context import run_in_background from synapse.logging.utils import log_function @@ -1521,10 +1522,11 @@ async def get_new_events( user: UserID, from_key: Optional[int], limit: Optional[int] = None, - room_ids: Optional[List[str]] = None, + room_ids: Optional[Collection[str]] = None, is_guest: bool = False, explicit_room_id: Optional[str] = None, include_offline: bool = True, + service: Optional[ApplicationService] = None, ) -> Tuple[List[UserPresenceState], int]: # The process for getting presence events are: # 1. Get the rooms the user is in. From c4bf48ee6fa4662d88a5bf682e79787851fe9cd8 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 28 Sep 2021 22:00:04 -0500 Subject: [PATCH 032/111] Fix event context for outliers in important MSC2716 spot (#10938) Fix event context for outlier causing failures in all of the MSC2716 Complement tests. The `EventContext.for_outlier` refactor happened in https://github.com/matrix-org/synapse/pull/10883 and this spot was left out. --- changelog.d/10938.bugfix | 1 + synapse/handlers/message.py | 13 ++++--------- 2 files changed, 5 insertions(+), 9 deletions(-) create mode 100644 changelog.d/10938.bugfix diff --git a/changelog.d/10938.bugfix b/changelog.d/10938.bugfix new file mode 100644 index 000000000000..9cf0ea8788b9 --- /dev/null +++ b/changelog.d/10938.bugfix @@ -0,0 +1 @@ +Fix bug introduced in Synapse 1.44 which caused the experimental [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` endpoint to return a 500 error. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index c66aefe2c4c5..fd861e94f8a1 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -952,18 +952,13 @@ async def create_new_client_event( depth=depth, ) - old_state = None - # Pass on the outlier property from the builder to the event # after it is created if builder.internal_metadata.outlier: - event.internal_metadata.outlier = builder.internal_metadata.outlier - - # Calculate the state for outliers that pass in their own `auth_event_ids` - if auth_event_ids: - old_state = await self.store.get_events_as_list(auth_event_ids) - - context = await self.state.compute_event_context(event, old_state=old_state) + event.internal_metadata.outlier = True + context = EventContext.for_outlier() + else: + context = await self.state.compute_event_context(event) if requester: context.app_service = requester.app_service From 3412f5c8d8c8aff5bcf9b0e5012dfa2f4e895464 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Thu, 30 Sep 2021 12:40:24 +0100 Subject: [PATCH 033/111] 1.44.0rc2 --- CHANGES.md | 16 ++++++++++++++++ changelog.d/10919.doc | 1 - changelog.d/10938.bugfix | 1 - changelog.d/10944.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 6 files changed, 23 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/10919.doc delete mode 100644 changelog.d/10938.bugfix delete mode 100644 changelog.d/10944.bugfix diff --git a/CHANGES.md b/CHANGES.md index 271e2271fb24..59ff967633c1 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,19 @@ +Synapse 1.44.0rc2 (2021-09-30) +============================== + +Bugfixes +-------- + +- Fix a bug introduced in v1.44.0rc1 which caused the experimental [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` endpoint to return a 500 error. ([\#10938](https://github.com/matrix-org/synapse/issues/10938)) +- Fix a bug introduced in v1.44.0rc1 which prevented sending presence events to application services. ([\#10944](https://github.com/matrix-org/synapse/issues/10944)) + + +Improved Documentation +---------------------- + +- Minor updates to the installation instructions. ([\#10919](https://github.com/matrix-org/synapse/issues/10919)) + + Synapse 1.44.0rc1 (2021-09-29) ============================== diff --git a/changelog.d/10919.doc b/changelog.d/10919.doc deleted file mode 100644 index d0bddc3f1b58..000000000000 --- a/changelog.d/10919.doc +++ /dev/null @@ -1 +0,0 @@ -Minor updates to the installation instructions. diff --git a/changelog.d/10938.bugfix b/changelog.d/10938.bugfix deleted file mode 100644 index 9cf0ea8788b9..000000000000 --- a/changelog.d/10938.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug introduced in Synapse 1.44 which caused the experimental [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` endpoint to return a 500 error. diff --git a/changelog.d/10944.bugfix b/changelog.d/10944.bugfix deleted file mode 100644 index 49baff7df148..000000000000 --- a/changelog.d/10944.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in v1.44.0rc1 which prevented sending presence events to application services. diff --git a/debian/changelog b/debian/changelog index 191bb97c5ed2..b08a5927808d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.44.0~rc2) stable; urgency=medium + + * New synapse release 1.44.0~rc2. + + -- Synapse Packaging team Thu, 30 Sep 2021 12:39:10 +0100 + matrix-synapse-py3 (1.44.0~rc1) stable; urgency=medium * New synapse release 1.44.0~rc1. diff --git a/synapse/__init__.py b/synapse/__init__.py index a1fec8ad2bec..8791c20e2626 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.44.0rc1" +__version__ = "1.44.0rc2" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 29364145b29e84c5dcab076c4e0d436ebf77e4cd Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 30 Sep 2021 12:51:47 +0100 Subject: [PATCH 034/111] Pass str to twisted's IReactorTCP (#10895) This follows a correction made in twisted/twisted#1664 and should fix our Twisted Trial CI job. Until that change is in a twisted release, we'll have to ignore the type of the `host` argument. I've raised #10899 to remind us to review the issue in a few months' time. --- changelog.d/10895.misc | 1 + synapse/handlers/send_email.py | 9 +++++++-- synapse/replication/tcp/handler.py | 8 ++++++-- synapse/replication/tcp/redis.py | 8 +++++++- tests/replication/_base.py | 4 ++-- tests/server.py | 8 ++++---- 6 files changed, 27 insertions(+), 11 deletions(-) create mode 100644 changelog.d/10895.misc diff --git a/changelog.d/10895.misc b/changelog.d/10895.misc new file mode 100644 index 000000000000..d1c822498016 --- /dev/null +++ b/changelog.d/10895.misc @@ -0,0 +1 @@ +Fix type hints to be compatible with an upcoming change to Twisted. \ No newline at end of file diff --git a/synapse/handlers/send_email.py b/synapse/handlers/send_email.py index 25e6b012b7ca..1a062a784cd4 100644 --- a/synapse/handlers/send_email.py +++ b/synapse/handlers/send_email.py @@ -105,8 +105,13 @@ def build_sender_factory(**kwargs: Any) -> ESMTPSenderFactory: # set to enable TLS. factory = build_sender_factory(hostname=smtphost if enable_tls else None) - # the IReactorTCP interface claims host has to be a bytes, which seems to be wrong - reactor.connectTCP(smtphost, smtpport, factory, timeout=30, bindAddress=None) # type: ignore[arg-type] + reactor.connectTCP( + smtphost, # type: ignore[arg-type] + smtpport, + factory, + timeout=30, + bindAddress=None, + ) await make_deferred_yieldable(d) diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 1438a82b602d..d64d1dbacd22 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -315,7 +315,7 @@ def start_replication(self, hs): hs, outbound_redis_connection ) hs.get_reactor().connectTCP( - hs.config.redis.redis_host.encode(), + hs.config.redis.redis_host, # type: ignore[arg-type] hs.config.redis.redis_port, self._factory, ) @@ -324,7 +324,11 @@ def start_replication(self, hs): self._factory = DirectTcpReplicationClientFactory(hs, client_name, self) host = hs.config.worker.worker_replication_host port = hs.config.worker.worker_replication_port - hs.get_reactor().connectTCP(host.encode(), port, self._factory) + hs.get_reactor().connectTCP( + host, # type: ignore[arg-type] + port, + self._factory, + ) def get_streams(self) -> Dict[str, Stream]: """Get a map from stream name to all streams.""" diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index 8c0df627c8d0..062fe2f33e0c 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -364,6 +364,12 @@ def lazyConnection( factory.continueTrying = reconnect reactor = hs.get_reactor() - reactor.connectTCP(host.encode(), port, factory, timeout=30, bindAddress=None) + reactor.connectTCP( + host, # type: ignore[arg-type] + port, + factory, + timeout=30, + bindAddress=None, + ) return factory.handler diff --git a/tests/replication/_base.py b/tests/replication/_base.py index c7555c26dbdd..cdd6e3d3c1df 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -240,7 +240,7 @@ def setUp(self): if self.hs.config.redis.redis_enabled: # Handle attempts to connect to fake redis server. self.reactor.add_tcp_client_callback( - b"localhost", + "localhost", 6379, self.connect_any_redis_attempts, ) @@ -424,7 +424,7 @@ def connect_any_redis_attempts(self): clients = self.reactor.tcpClients while clients: (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0) - self.assertEqual(host, b"localhost") + self.assertEqual(host, "localhost") self.assertEqual(port, 6379) client_protocol = client_factory.buildProtocol(None) diff --git a/tests/server.py b/tests/server.py index 88dfa8058e62..64645651ce5d 100644 --- a/tests/server.py +++ b/tests/server.py @@ -317,7 +317,7 @@ class ThreadedMemoryReactorClock(MemoryReactorClock): def __init__(self): self.threadpool = ThreadPool(self) - self._tcp_callbacks = {} + self._tcp_callbacks: Dict[Tuple[str, int], Callable] = {} self._udp = [] self.lookups: Dict[str, str] = {} self._thread_callbacks: Deque[Callable[[], None]] = deque() @@ -355,7 +355,7 @@ def callFromThread(self, callback, *args, **kwargs): def getThreadPool(self): return self.threadpool - def add_tcp_client_callback(self, host, port, callback): + def add_tcp_client_callback(self, host: str, port: int, callback: Callable): """Add a callback that will be invoked when we receive a connection attempt to the given IP/port using `connectTCP`. @@ -364,7 +364,7 @@ def add_tcp_client_callback(self, host, port, callback): """ self._tcp_callbacks[(host, port)] = callback - def connectTCP(self, host, port, factory, timeout=30, bindAddress=None): + def connectTCP(self, host: str, port: int, factory, timeout=30, bindAddress=None): """Fake L{IReactorTCP.connectTCP}.""" conn = super().connectTCP( @@ -475,7 +475,7 @@ def runInteraction(interaction, *args, **kwargs): return server -def get_clock(): +def get_clock() -> Tuple[ThreadedMemoryReactorClock, Clock]: clock = ThreadedMemoryReactorClock() hs_clock = Clock(clock) return clock, hs_clock From 145cb6d08e2f775da208293a507c1dcd2d4128ce Mon Sep 17 00:00:00 2001 From: Lukas Lihotzki Date: Thu, 30 Sep 2021 14:04:55 +0200 Subject: [PATCH 035/111] Fix getTurnServer response: return an integer ttl (#10922) `ttl` must be an integer according to the OpenAPI spec: https://github.com/matrix-org/matrix-doc/blob/old_master/data/api/client-server/voip.yaml#L70 True division (`/`) returns a float instead (`"ttl": 7200.0`). Floor division (`//`) returns an integer, so the response is spec compliant. Signed-off-by: Lukas Lihotzki --- changelog.d/10922.bugfix | 1 + synapse/rest/client/voip.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10922.bugfix diff --git a/changelog.d/10922.bugfix b/changelog.d/10922.bugfix new file mode 100644 index 000000000000..b7315514e0e7 --- /dev/null +++ b/changelog.d/10922.bugfix @@ -0,0 +1 @@ +Fix a minor bug in the response to `/_matrix/client/r0/voip/turnServer`. Contributed by @lukaslihotzki. diff --git a/synapse/rest/client/voip.py b/synapse/rest/client/voip.py index ea2b8aa45f4e..ea7e025156da 100644 --- a/synapse/rest/client/voip.py +++ b/synapse/rest/client/voip.py @@ -70,7 +70,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: { "username": username, "password": password, - "ttl": userLifetime / 1000, + "ttl": userLifetime // 1000, "uris": turnUris, }, ) From 7d84d2523a02ce90badb6bdee5ffc182170a57fe Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 30 Sep 2021 11:03:29 -0400 Subject: [PATCH 036/111] Fix errors in Synapse logs from unit tests. (#10939) Fix some harmless errors from background processes (mostly due to awaiting Mock objects) that occurred in the Synapse logs during unit tests. --- changelog.d/10939.misc | 1 + tests/appservice/test_scheduler.py | 40 ++++++++++------------ tests/events/test_presence_router.py | 7 +++- tests/federation/test_federation_sender.py | 6 ++-- tests/module_api/test_api.py | 7 +++- 5 files changed, 35 insertions(+), 26 deletions(-) create mode 100644 changelog.d/10939.misc diff --git a/changelog.d/10939.misc b/changelog.d/10939.misc new file mode 100644 index 000000000000..a7cecf8a5b61 --- /dev/null +++ b/changelog.d/10939.misc @@ -0,0 +1 @@ +Fix logged errors in unit tests. diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index a2b5ed2030d0..55f0899bae7d 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -24,7 +24,7 @@ from synapse.logging.context import make_deferred_yieldable from tests import unittest -from tests.test_utils import make_awaitable +from tests.test_utils import simple_async_mock from ..utils import MockClock @@ -49,11 +49,10 @@ def test_single_service_up_txn_sent(self): txn = Mock(id=txn_id, service=service, events=events) # mock methods - self.store.get_appservice_state = Mock( - return_value=defer.succeed(ApplicationServiceState.UP) - ) - txn.send = Mock(return_value=make_awaitable(True)) - self.store.create_appservice_txn = Mock(return_value=defer.succeed(txn)) + self.store.get_appservice_state = simple_async_mock(ApplicationServiceState.UP) + txn.send = simple_async_mock(True) + txn.complete = simple_async_mock(True) + self.store.create_appservice_txn = simple_async_mock(txn) # actual call self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events))) @@ -71,10 +70,10 @@ def test_single_service_down(self): events = [Mock(), Mock()] txn = Mock(id="idhere", service=service, events=events) - self.store.get_appservice_state = Mock( - return_value=defer.succeed(ApplicationServiceState.DOWN) + self.store.get_appservice_state = simple_async_mock( + ApplicationServiceState.DOWN ) - self.store.create_appservice_txn = Mock(return_value=defer.succeed(txn)) + self.store.create_appservice_txn = simple_async_mock(txn) # actual call self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events))) @@ -94,12 +93,10 @@ def test_single_service_up_txn_not_sent(self): txn = Mock(id=txn_id, service=service, events=events) # mock methods - self.store.get_appservice_state = Mock( - return_value=defer.succeed(ApplicationServiceState.UP) - ) - self.store.set_appservice_state = Mock(return_value=defer.succeed(True)) - txn.send = Mock(return_value=make_awaitable(False)) # fails to send - self.store.create_appservice_txn = Mock(return_value=defer.succeed(txn)) + self.store.get_appservice_state = simple_async_mock(ApplicationServiceState.UP) + self.store.set_appservice_state = simple_async_mock(True) + txn.send = simple_async_mock(False) # fails to send + self.store.create_appservice_txn = simple_async_mock(txn) # actual call self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events))) @@ -122,7 +119,7 @@ def setUp(self): self.as_api = Mock() self.store = Mock() self.service = Mock() - self.callback = Mock() + self.callback = simple_async_mock() self.recoverer = _Recoverer( clock=self.clock, as_api=self.as_api, @@ -144,8 +141,8 @@ def take_txn(*args, **kwargs): self.recoverer.recover() # shouldn't have called anything prior to waiting for exp backoff self.assertEquals(0, self.store.get_oldest_unsent_txn.call_count) - txn.send = Mock(return_value=make_awaitable(True)) - txn.complete.return_value = make_awaitable(None) + txn.send = simple_async_mock(True) + txn.complete = simple_async_mock(None) # wait for exp backoff self.clock.advance_time(2) self.assertEquals(1, txn.send.call_count) @@ -170,8 +167,8 @@ def take_txn(*args, **kwargs): self.recoverer.recover() self.assertEquals(0, self.store.get_oldest_unsent_txn.call_count) - txn.send = Mock(return_value=make_awaitable(False)) - txn.complete.return_value = make_awaitable(None) + txn.send = simple_async_mock(False) + txn.complete = simple_async_mock(None) self.clock.advance_time(2) self.assertEquals(1, txn.send.call_count) self.assertEquals(0, txn.complete.call_count) @@ -184,7 +181,7 @@ def take_txn(*args, **kwargs): self.assertEquals(3, txn.send.call_count) self.assertEquals(0, txn.complete.call_count) self.assertEquals(0, self.callback.call_count) - txn.send = Mock(return_value=make_awaitable(True)) # successfully send the txn + txn.send = simple_async_mock(True) # successfully send the txn pop_txn = True # returns the txn the first time, then no more. self.clock.advance_time(16) self.assertEquals(1, txn.send.call_count) # new mock reset call count @@ -195,6 +192,7 @@ def take_txn(*args, **kwargs): class ApplicationServiceSchedulerQueuerTestCase(unittest.TestCase): def setUp(self): self.txn_ctrl = Mock() + self.txn_ctrl.send = simple_async_mock() self.queuer = _ServiceQueuer(self.txn_ctrl, MockClock()) def test_send_single_event_no_queue(self): diff --git a/tests/events/test_presence_router.py b/tests/events/test_presence_router.py index 3b3866bff8fb..3deb14c308b6 100644 --- a/tests/events/test_presence_router.py +++ b/tests/events/test_presence_router.py @@ -26,6 +26,7 @@ from synapse.types import JsonDict, StreamToken, create_requester from tests.handlers.test_sync import generate_sync_config +from tests.test_utils import simple_async_mock from tests.unittest import FederatingHomeserverTestCase, TestCase, override_config @@ -133,8 +134,12 @@ class PresenceRouterTestCase(FederatingHomeserverTestCase): ] def make_homeserver(self, reactor, clock): + # Mock out the calls over federation. + fed_transport_client = Mock(spec=["send_transaction"]) + fed_transport_client.send_transaction = simple_async_mock({}) + hs = self.setup_test_homeserver( - federation_transport_client=Mock(spec=["send_transaction"]), + federation_transport_client=fed_transport_client, ) # Load the modules into the homeserver module_api = hs.get_module_api() diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index 65b18fbd7a14..b457dad6d263 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -336,7 +336,7 @@ def test_unreachable_server(self): recovery """ mock_send_txn = self.hs.get_federation_transport_client().send_transaction - mock_send_txn.side_effect = lambda t, cb: defer.fail("fail") + mock_send_txn.side_effect = lambda t, cb: defer.fail(AssertionError("fail")) # create devices u1 = self.register_user("user", "pass") @@ -376,7 +376,7 @@ def test_prune_outbound_device_pokes1(self): This case tests the behaviour when the server has never been reachable. """ mock_send_txn = self.hs.get_federation_transport_client().send_transaction - mock_send_txn.side_effect = lambda t, cb: defer.fail("fail") + mock_send_txn.side_effect = lambda t, cb: defer.fail(AssertionError("fail")) # create devices u1 = self.register_user("user", "pass") @@ -429,7 +429,7 @@ def test_prune_outbound_device_pokes2(self): # now the server goes offline mock_send_txn = self.hs.get_federation_transport_client().send_transaction - mock_send_txn.side_effect = lambda t, cb: defer.fail("fail") + mock_send_txn.side_effect = lambda t, cb: defer.fail(AssertionError("fail")) self.login("user", "pass", device_id="D2") self.login("user", "pass", device_id="D3") diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 9d38974fba93..e915dd5c7cd2 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -25,6 +25,7 @@ from tests.events.test_presence_router import send_presence_update, sync_presence from tests.replication._base import BaseMultiWorkerStreamTestCase +from tests.test_utils import simple_async_mock from tests.test_utils.event_injection import inject_member_event from tests.unittest import HomeserverTestCase, override_config from tests.utils import USE_POSTGRES_FOR_TESTS @@ -46,8 +47,12 @@ def prepare(self, reactor, clock, homeserver): self.auth_handler = homeserver.get_auth_handler() def make_homeserver(self, reactor, clock): + # Mock out the calls over federation. + fed_transport_client = Mock(spec=["send_transaction"]) + fed_transport_client.send_transaction = simple_async_mock({}) + return self.setup_test_homeserver( - federation_transport_client=Mock(spec=["send_transaction"]), + federation_transport_client=fed_transport_client, ) def test_can_register_user(self): From d1bf5f7c9d669fcf60aadc2c6527447adef2c43c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 30 Sep 2021 11:13:59 -0400 Subject: [PATCH 037/111] Strip "join_authorised_via_users_server" from join events which do not need it. (#10933) This fixes a "Event not signed by authorising server" error when transition room member from join -> join, e.g. when updating a display name or avatar URL for restricted rooms. --- changelog.d/10933.bugfix | 1 + synapse/api/constants.py | 3 +++ synapse/event_auth.py | 12 +++++++----- synapse/events/utils.py | 2 +- synapse/federation/federation_base.py | 6 +++--- synapse/federation/federation_client.py | 6 +++--- synapse/federation/federation_server.py | 6 +++--- synapse/handlers/federation.py | 9 +++++++-- synapse/handlers/room_member.py | 10 +++++++++- tests/events/test_utils.py | 7 ++++--- tests/test_event_auth.py | 9 +++++---- 11 files changed, 46 insertions(+), 25 deletions(-) create mode 100644 changelog.d/10933.bugfix diff --git a/changelog.d/10933.bugfix b/changelog.d/10933.bugfix new file mode 100644 index 000000000000..e0694fea22f5 --- /dev/null +++ b/changelog.d/10933.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse v1.40.0 where changing a user's display name or avatar in a restricted room would cause an authentication error. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 39fd9954d507..a31f037748a3 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -217,6 +217,9 @@ class EventContentFields: # For "marker" events MSC2716_MARKER_INSERTION = "org.matrix.msc2716.marker.insertion" + # The authorising user for joining a restricted room. + AUTHORISING_USER = "join_authorised_via_users_server" + class RoomTypes: """Understood values of the room_type field of m.room.create events.""" diff --git a/synapse/event_auth.py b/synapse/event_auth.py index eef354de6ea5..7a1adc27509e 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -102,11 +102,11 @@ def validate_event_for_room_version( room_version_obj.msc3083_join_rules and event.type == EventTypes.Member and event.membership == Membership.JOIN - and "join_authorised_via_users_server" in event.content + and EventContentFields.AUTHORISING_USER in event.content ) if is_invite_via_allow_rule: authoriser_domain = get_domain_from_id( - event.content["join_authorised_via_users_server"] + event.content[EventContentFields.AUTHORISING_USER] ) if not event.signatures.get(authoriser_domain): raise AuthError(403, "Event not signed by authorising server") @@ -413,7 +413,9 @@ def _is_membership_change_allowed( # Note that if the caller is in the room or invited, then they do # not need to meet the allow rules. if not caller_in_room and not caller_invited: - authorising_user = event.content.get("join_authorised_via_users_server") + authorising_user = event.content.get( + EventContentFields.AUTHORISING_USER + ) if authorising_user is None: raise AuthError(403, "Join event is missing authorising user.") @@ -868,10 +870,10 @@ def auth_types_for_event( auth_types.add(key) if room_version.msc3083_join_rules and membership == Membership.JOIN: - if "join_authorised_via_users_server" in event.content: + if EventContentFields.AUTHORISING_USER in event.content: key = ( EventTypes.Member, - event.content["join_authorised_via_users_server"], + event.content[EventContentFields.AUTHORISING_USER], ) auth_types.add(key) diff --git a/synapse/events/utils.py b/synapse/events/utils.py index a13fb0148fc8..520edbbf61fb 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -105,7 +105,7 @@ def add_fields(*fields): if event_type == EventTypes.Member: add_fields("membership") if room_version.msc3375_redaction_rules: - add_fields("join_authorised_via_users_server") + add_fields(EventContentFields.AUTHORISING_USER) elif event_type == EventTypes.Create: # MSC2176 rules state that create events cannot be redacted. if room_version.msc2176_redaction_rules: diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 024e440ff401..0cd424e12aa1 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -15,7 +15,7 @@ import logging from collections import namedtuple -from synapse.api.constants import MAX_DEPTH, EventTypes, Membership +from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership from synapse.api.errors import Codes, SynapseError from synapse.api.room_versions import EventFormatVersions, RoomVersion from synapse.crypto.event_signing import check_event_content_hash @@ -184,10 +184,10 @@ async def _check_sigs_on_pdu( room_version.msc3083_join_rules and pdu.type == EventTypes.Member and pdu.membership == Membership.JOIN - and "join_authorised_via_users_server" in pdu.content + and EventContentFields.AUTHORISING_USER in pdu.content ): authorising_server = get_domain_from_id( - pdu.content["join_authorised_via_users_server"] + pdu.content[EventContentFields.AUTHORISING_USER] ) try: await keyring.verify_event_for_server( diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 584836c04ad1..2ab4dec88fe6 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -37,7 +37,7 @@ import attr from prometheus_client import Counter -from synapse.api.constants import EventTypes, Membership +from synapse.api.constants import EventContentFields, EventTypes, Membership from synapse.api.errors import ( CodeMessageException, Codes, @@ -875,9 +875,9 @@ async def _execute(pdu: EventBase) -> None: # If the join is being authorised via allow rules, we need to send # the /send_join back to the same server that was originally used # with /make_join. - if "join_authorised_via_users_server" in pdu.content: + if EventContentFields.AUTHORISING_USER in pdu.content: destinations = [ - get_domain_from_id(pdu.content["join_authorised_via_users_server"]) + get_domain_from_id(pdu.content[EventContentFields.AUTHORISING_USER]) ] return await self._try_destination_list( diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 83f11d6b8872..d8c0b86f2301 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -34,7 +34,7 @@ from twisted.internet.abstract import isIPAddress from twisted.python import failure -from synapse.api.constants import EduTypes, EventTypes, Membership +from synapse.api.constants import EduTypes, EventContentFields, EventTypes, Membership from synapse.api.errors import ( AuthError, Codes, @@ -765,11 +765,11 @@ async def _on_send_membership_event( if ( room_version.msc3083_join_rules and event.membership == Membership.JOIN - and "join_authorised_via_users_server" in event.content + and EventContentFields.AUTHORISING_USER in event.content ): # We can only authorise our own users. authorising_server = get_domain_from_id( - event.content["join_authorised_via_users_server"] + event.content[EventContentFields.AUTHORISING_USER] ) if authorising_server != self.server_name: raise SynapseError( diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 0a10a5c28aec..043ca4a224b8 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -27,7 +27,12 @@ from twisted.internet import defer from synapse import event_auth -from synapse.api.constants import EventTypes, Membership, RejectedReason +from synapse.api.constants import ( + EventContentFields, + EventTypes, + Membership, + RejectedReason, +) from synapse.api.errors import ( AuthError, CodeMessageException, @@ -716,7 +721,7 @@ async def on_make_join_request( if include_auth_user_id: event_content[ - "join_authorised_via_users_server" + EventContentFields.AUTHORISING_USER ] = await self._event_auth_handler.get_user_which_could_invite( room_id, state_ids, diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 02103f6c9aa8..29b3e41cc925 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -573,6 +573,14 @@ async def update_membership_locked( errcode=Codes.BAD_JSON, ) + # The event content should *not* include the authorising user as + # it won't be properly signed. Strip it out since it might come + # back from a client updating a display name / avatar. + # + # This only applies to restricted rooms, but there should be no reason + # for a client to include it. Unconditionally remove it. + content.pop(EventContentFields.AUTHORISING_USER, None) + effective_membership_state = action if action in ["kick", "unban"]: effective_membership_state = "leave" @@ -939,7 +947,7 @@ async def _should_perform_remote_join( # be included in the event content in order to efficiently validate # the event. content[ - "join_authorised_via_users_server" + EventContentFields.AUTHORISING_USER ] = await self.event_auth_handler.get_user_which_could_invite( room_id, current_state_ids, diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 5446fda5e7a3..1dea09e4800d 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from synapse.api.constants import EventContentFields from synapse.api.room_versions import RoomVersions from synapse.events import make_event_from_dict from synapse.events.utils import ( @@ -352,7 +353,7 @@ def test_member(self): "event_id": "$test:domain", "content": { "membership": "join", - "join_authorised_via_users_server": "@user:domain", + EventContentFields.AUTHORISING_USER: "@user:domain", "other_key": "stripped", }, }, @@ -372,7 +373,7 @@ def test_member(self): "type": "m.room.member", "content": { "membership": "join", - "join_authorised_via_users_server": "@user:domain", + EventContentFields.AUTHORISING_USER: "@user:domain", "other_key": "stripped", }, }, @@ -380,7 +381,7 @@ def test_member(self): "type": "m.room.member", "content": { "membership": "join", - "join_authorised_via_users_server": "@user:domain", + EventContentFields.AUTHORISING_USER: "@user:domain", }, "signatures": {}, "unsigned": {}, diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index e7a7d008832d..cf407c51cff4 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -16,6 +16,7 @@ from typing import Optional from synapse import event_auth +from synapse.api.constants import EventContentFields from synapse.api.errors import AuthError from synapse.api.room_versions import RoomVersions from synapse.events import EventBase, make_event_from_dict @@ -353,7 +354,7 @@ def test_join_rules_msc3083_restricted(self): authorised_join_event = _join_event( pleb, additional_content={ - "join_authorised_via_users_server": "@creator:example.com" + EventContentFields.AUTHORISING_USER: "@creator:example.com" }, ) event_auth.check_auth_rules_for_event( @@ -376,7 +377,7 @@ def test_join_rules_msc3083_restricted(self): _join_event( pleb, additional_content={ - "join_authorised_via_users_server": "@inviter:foo.test" + EventContentFields.AUTHORISING_USER: "@inviter:foo.test" }, ), pl_auth_events, @@ -401,7 +402,7 @@ def test_join_rules_msc3083_restricted(self): _join_event( pleb, additional_content={ - "join_authorised_via_users_server": "@other:example.com" + EventContentFields.AUTHORISING_USER: "@other:example.com" }, ), auth_events, @@ -417,7 +418,7 @@ def test_join_rules_msc3083_restricted(self): "join", sender=creator, additional_content={ - "join_authorised_via_users_server": "@inviter:foo.test" + EventContentFields.AUTHORISING_USER: "@inviter:foo.test" }, ), auth_events, From 9e5a429c8b082d4cfbc0bd04c1ddde8822fd96b4 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 30 Sep 2021 14:06:02 -0400 Subject: [PATCH 038/111] Clean-up registration tests (#10945) Uses `override_config` and fixes test_auto_create_auto_join_where_no_consent to properly configure auto-join rooms. --- changelog.d/10945.misc | 1 + synapse/handlers/register.py | 4 +- tests/handlers/test_register.py | 89 +++++++++++++++++++-------------- 3 files changed, 56 insertions(+), 38 deletions(-) create mode 100644 changelog.d/10945.misc diff --git a/changelog.d/10945.misc b/changelog.d/10945.misc new file mode 100644 index 000000000000..7cf1f02ad612 --- /dev/null +++ b/changelog.d/10945.misc @@ -0,0 +1 @@ +Fix a broken test to ensure that consent configuration works during registration. diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 4a7ccb882e43..cb4eb0720b5e 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -340,6 +340,8 @@ async def register_user( auth_provider=(auth_provider_id or ""), ).inc() + # If the user does not need to consent at registration, auto-join any + # configured rooms. if not self.hs.config.consent.user_consent_at_registration: if not self.hs.config.auto_join_rooms_for_guests and make_guest: logger.info( @@ -387,7 +389,7 @@ async def _create_and_join_rooms(self, user_id: str) -> None: "preset": self.hs.config.registration.autocreate_auto_join_room_preset, } - # If the configuration providers a user ID to create rooms with, use + # If the configuration provides a user ID to create rooms with, use # that instead of the first user registered. requires_join = False if self.hs.config.registration.auto_join_user_id: diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index bd05a2c2d15d..db691c4c1c30 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -16,7 +16,12 @@ from synapse.api.auth import Auth from synapse.api.constants import UserTypes -from synapse.api.errors import Codes, ResourceLimitError, SynapseError +from synapse.api.errors import ( + CodeMessageException, + Codes, + ResourceLimitError, + SynapseError, +) from synapse.events.spamcheck import load_legacy_spam_checkers from synapse.spam_checker_api import RegistrationBehaviour from synapse.types import RoomAlias, RoomID, UserID, create_requester @@ -120,14 +125,24 @@ def make_homeserver(self, reactor, clock): hs_config = self.default_config() # some of the tests rely on us having a user consent version - hs_config["user_consent"] = { - "version": "test_consent_version", - "template_dir": ".", - } + hs_config.setdefault("user_consent", {}).update( + { + "version": "test_consent_version", + "template_dir": ".", + } + ) hs_config["max_mau_value"] = 50 hs_config["limit_usage_by_mau"] = True - hs = self.setup_test_homeserver(config=hs_config) + # Don't attempt to reach out over federation. + self.mock_federation_client = Mock() + self.mock_federation_client.make_query.side_effect = CodeMessageException( + 500, "" + ) + + hs = self.setup_test_homeserver( + config=hs_config, federation_client=self.mock_federation_client + ) load_legacy_spam_checkers(hs) @@ -138,9 +153,6 @@ def make_homeserver(self, reactor, clock): return hs def prepare(self, reactor, clock, hs): - self.mock_distributor = Mock() - self.mock_distributor.declare("registered_user") - self.mock_captcha_client = Mock() self.handler = self.hs.get_registration_handler() self.store = self.hs.get_datastore() self.lots_of_users = 100 @@ -174,21 +186,21 @@ def test_if_user_exists(self): self.assertEquals(result_user_id, user_id) self.assertTrue(result_token is not None) + @override_config({"limit_usage_by_mau": False}) def test_mau_limits_when_disabled(self): - self.hs.config.server.limit_usage_by_mau = False # Ensure does not throw exception self.get_success(self.get_or_create_user(self.requester, "a", "display_name")) + @override_config({"limit_usage_by_mau": True}) def test_get_or_create_user_mau_not_blocked(self): - self.hs.config.server.limit_usage_by_mau = True self.store.count_monthly_users = Mock( return_value=make_awaitable(self.hs.config.server.max_mau_value - 1) ) # Ensure does not throw exception self.get_success(self.get_or_create_user(self.requester, "c", "User")) + @override_config({"limit_usage_by_mau": True}) def test_get_or_create_user_mau_blocked(self): - self.hs.config.server.limit_usage_by_mau = True self.store.get_monthly_active_count = Mock( return_value=make_awaitable(self.lots_of_users) ) @@ -205,8 +217,8 @@ def test_get_or_create_user_mau_blocked(self): ResourceLimitError, ) + @override_config({"limit_usage_by_mau": True}) def test_register_mau_blocked(self): - self.hs.config.server.limit_usage_by_mau = True self.store.get_monthly_active_count = Mock( return_value=make_awaitable(self.lots_of_users) ) @@ -221,10 +233,10 @@ def test_register_mau_blocked(self): self.handler.register_user(localpart="local_part"), ResourceLimitError ) + @override_config( + {"auto_join_rooms": ["#room:test"], "auto_join_rooms_for_guests": False} + ) def test_auto_join_rooms_for_guests(self): - room_alias_str = "#room:test" - self.hs.config.auto_join_rooms = [room_alias_str] - self.hs.config.auto_join_rooms_for_guests = False user_id = self.get_success( self.handler.register_user(localpart="jeff", make_guest=True), ) @@ -243,34 +255,33 @@ def test_auto_create_auto_join_rooms(self): self.assertTrue(room_id["room_id"] in rooms) self.assertEqual(len(rooms), 1) + @override_config({"auto_join_rooms": []}) def test_auto_create_auto_join_rooms_with_no_rooms(self): - self.hs.config.auto_join_rooms = [] frank = UserID.from_string("@frank:test") user_id = self.get_success(self.handler.register_user(frank.localpart)) self.assertEqual(user_id, frank.to_string()) rooms = self.get_success(self.store.get_rooms_for_user(user_id)) self.assertEqual(len(rooms), 0) + @override_config({"auto_join_rooms": ["#room:another"]}) def test_auto_create_auto_join_where_room_is_another_domain(self): - self.hs.config.auto_join_rooms = ["#room:another"] frank = UserID.from_string("@frank:test") user_id = self.get_success(self.handler.register_user(frank.localpart)) self.assertEqual(user_id, frank.to_string()) rooms = self.get_success(self.store.get_rooms_for_user(user_id)) self.assertEqual(len(rooms), 0) + @override_config( + {"auto_join_rooms": ["#room:test"], "autocreate_auto_join_rooms": False} + ) def test_auto_create_auto_join_where_auto_create_is_false(self): - self.hs.config.autocreate_auto_join_rooms = False - room_alias_str = "#room:test" - self.hs.config.auto_join_rooms = [room_alias_str] user_id = self.get_success(self.handler.register_user(localpart="jeff")) rooms = self.get_success(self.store.get_rooms_for_user(user_id)) self.assertEqual(len(rooms), 0) + @override_config({"auto_join_rooms": ["#room:test"]}) def test_auto_create_auto_join_rooms_when_user_is_not_a_real_user(self): room_alias_str = "#room:test" - self.hs.config.auto_join_rooms = [room_alias_str] - self.store.is_real_user = Mock(return_value=make_awaitable(False)) user_id = self.get_success(self.handler.register_user(localpart="support")) rooms = self.get_success(self.store.get_rooms_for_user(user_id)) @@ -294,10 +305,8 @@ def test_auto_create_auto_join_rooms_when_user_is_the_first_real_user(self): self.assertTrue(room_id["room_id"] in rooms) self.assertEqual(len(rooms), 1) + @override_config({"auto_join_rooms": ["#room:test"]}) def test_auto_create_auto_join_rooms_when_user_is_not_the_first_real_user(self): - room_alias_str = "#room:test" - self.hs.config.auto_join_rooms = [room_alias_str] - self.store.count_real_users = Mock(return_value=make_awaitable(2)) self.store.is_real_user = Mock(return_value=make_awaitable(True)) user_id = self.get_success(self.handler.register_user(localpart="real")) @@ -510,6 +519,17 @@ def test_auto_create_auto_join_room_preset_invalid_permissions(self): self.assertEqual(rooms, set()) self.assertEqual(invited_rooms, []) + @override_config( + { + "user_consent": { + "block_events_error": "Error", + "require_at_registration": True, + }, + "form_secret": "53cr3t", + "public_baseurl": "http://test", + "auto_join_rooms": ["#room:test"], + }, + ) def test_auto_create_auto_join_where_no_consent(self): """Test to ensure that the first user is not auto-joined to a room if they have not given general consent. @@ -521,25 +541,20 @@ def test_auto_create_auto_join_where_no_consent(self): # * The server is configured to auto-join to a room # (and autocreate if necessary) - event_creation_handler = self.hs.get_event_creation_handler() - # (Messing with the internals of event_creation_handler is fragile - # but can't see a better way to do this. One option could be to subclass - # the test with custom config.) - event_creation_handler._block_events_without_consent_error = "Error" - event_creation_handler._consent_uri_builder = Mock() - room_alias_str = "#room:test" - self.hs.config.auto_join_rooms = [room_alias_str] - # When:- - # * the user is registered and post consent actions are called + # * the user is registered user_id = self.get_success(self.handler.register_user(localpart="jeff")) - self.get_success(self.handler.post_consent_actions(user_id)) # Then:- # * Ensure that they have not been joined to the room rooms = self.get_success(self.store.get_rooms_for_user(user_id)) self.assertEqual(len(rooms), 0) + # The user provides consent; ensure they are now in the rooms. + self.get_success(self.handler.post_consent_actions(user_id)) + rooms = self.get_success(self.store.get_rooms_for_user(user_id)) + self.assertEqual(len(rooms), 1) + def test_register_support_user(self): user_id = self.get_success( self.handler.register_user(localpart="user", user_type=UserTypes.SUPPORT) From 7e440520c9b370ce008c6a65c5dd87a360a6457c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 1 Oct 2021 07:02:32 -0400 Subject: [PATCH 039/111] Add type hints to filtering classes. (#10958) --- changelog.d/10958.misc | 1 + synapse/api/filtering.py | 117 +++++++++++++------- synapse/storage/databases/main/filtering.py | 8 +- 3 files changed, 81 insertions(+), 45 deletions(-) create mode 100644 changelog.d/10958.misc diff --git a/changelog.d/10958.misc b/changelog.d/10958.misc new file mode 100644 index 000000000000..409ecc35cbce --- /dev/null +++ b/changelog.d/10958.misc @@ -0,0 +1 @@ +Add type hints to filtering classes. diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index ad1ff6a9df6c..20e91a115dd9 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -15,7 +15,17 @@ # See the License for the specific language governing permissions and # limitations under the License. import json -from typing import List +from typing import ( + TYPE_CHECKING, + Awaitable, + Container, + Iterable, + List, + Optional, + Set, + TypeVar, + Union, +) import jsonschema from jsonschema import FormatChecker @@ -23,7 +33,11 @@ from synapse.api.constants import EventContentFields from synapse.api.errors import SynapseError from synapse.api.presence import UserPresenceState -from synapse.types import RoomID, UserID +from synapse.events import EventBase +from synapse.types import JsonDict, RoomID, UserID + +if TYPE_CHECKING: + from synapse.server import HomeServer FILTER_SCHEMA = { "additionalProperties": False, @@ -120,25 +134,29 @@ @FormatChecker.cls_checks("matrix_room_id") -def matrix_room_id_validator(room_id_str): +def matrix_room_id_validator(room_id_str: str) -> RoomID: return RoomID.from_string(room_id_str) @FormatChecker.cls_checks("matrix_user_id") -def matrix_user_id_validator(user_id_str): +def matrix_user_id_validator(user_id_str: str) -> UserID: return UserID.from_string(user_id_str) class Filtering: - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__() self.store = hs.get_datastore() - async def get_user_filter(self, user_localpart, filter_id): + async def get_user_filter( + self, user_localpart: str, filter_id: Union[int, str] + ) -> "FilterCollection": result = await self.store.get_user_filter(user_localpart, filter_id) return FilterCollection(result) - def add_user_filter(self, user_localpart, user_filter): + def add_user_filter( + self, user_localpart: str, user_filter: JsonDict + ) -> Awaitable[int]: self.check_valid_filter(user_filter) return self.store.add_user_filter(user_localpart, user_filter) @@ -146,13 +164,13 @@ def add_user_filter(self, user_localpart, user_filter): # replace_user_filter at some point? There's no REST API specified for # them however - def check_valid_filter(self, user_filter_json): + def check_valid_filter(self, user_filter_json: JsonDict) -> None: """Check if the provided filter is valid. This inspects all definitions contained within the filter. Args: - user_filter_json(dict): The filter + user_filter_json: The filter Raises: SynapseError: If the filter is not valid. """ @@ -167,8 +185,12 @@ def check_valid_filter(self, user_filter_json): raise SynapseError(400, str(e)) +# Filters work across events, presence EDUs, and account data. +FilterEvent = TypeVar("FilterEvent", EventBase, UserPresenceState, JsonDict) + + class FilterCollection: - def __init__(self, filter_json): + def __init__(self, filter_json: JsonDict): self._filter_json = filter_json room_filter_json = self._filter_json.get("room", {}) @@ -188,25 +210,25 @@ def __init__(self, filter_json): self.event_fields = filter_json.get("event_fields", []) self.event_format = filter_json.get("event_format", "client") - def __repr__(self): + def __repr__(self) -> str: return "" % (json.dumps(self._filter_json),) - def get_filter_json(self): + def get_filter_json(self) -> JsonDict: return self._filter_json - def timeline_limit(self): + def timeline_limit(self) -> int: return self._room_timeline_filter.limit() - def presence_limit(self): + def presence_limit(self) -> int: return self._presence_filter.limit() - def ephemeral_limit(self): + def ephemeral_limit(self) -> int: return self._room_ephemeral_filter.limit() - def lazy_load_members(self): + def lazy_load_members(self) -> bool: return self._room_state_filter.lazy_load_members() - def include_redundant_members(self): + def include_redundant_members(self) -> bool: return self._room_state_filter.include_redundant_members() def filter_presence(self, events): @@ -218,29 +240,31 @@ def filter_account_data(self, events): def filter_room_state(self, events): return self._room_state_filter.filter(self._room_filter.filter(events)) - def filter_room_timeline(self, events): + def filter_room_timeline(self, events: Iterable[FilterEvent]) -> List[FilterEvent]: return self._room_timeline_filter.filter(self._room_filter.filter(events)) - def filter_room_ephemeral(self, events): + def filter_room_ephemeral(self, events: Iterable[FilterEvent]) -> List[FilterEvent]: return self._room_ephemeral_filter.filter(self._room_filter.filter(events)) - def filter_room_account_data(self, events): + def filter_room_account_data( + self, events: Iterable[FilterEvent] + ) -> List[FilterEvent]: return self._room_account_data.filter(self._room_filter.filter(events)) - def blocks_all_presence(self): + def blocks_all_presence(self) -> bool: return ( self._presence_filter.filters_all_types() or self._presence_filter.filters_all_senders() ) - def blocks_all_room_ephemeral(self): + def blocks_all_room_ephemeral(self) -> bool: return ( self._room_ephemeral_filter.filters_all_types() or self._room_ephemeral_filter.filters_all_senders() or self._room_ephemeral_filter.filters_all_rooms() ) - def blocks_all_room_timeline(self): + def blocks_all_room_timeline(self) -> bool: return ( self._room_timeline_filter.filters_all_types() or self._room_timeline_filter.filters_all_senders() @@ -249,7 +273,7 @@ def blocks_all_room_timeline(self): class Filter: - def __init__(self, filter_json): + def __init__(self, filter_json: JsonDict): self.filter_json = filter_json self.types = self.filter_json.get("types", None) @@ -266,20 +290,20 @@ def __init__(self, filter_json): self.labels = self.filter_json.get("org.matrix.labels", None) self.not_labels = self.filter_json.get("org.matrix.not_labels", []) - def filters_all_types(self): + def filters_all_types(self) -> bool: return "*" in self.not_types - def filters_all_senders(self): + def filters_all_senders(self) -> bool: return "*" in self.not_senders - def filters_all_rooms(self): + def filters_all_rooms(self) -> bool: return "*" in self.not_rooms - def check(self, event): + def check(self, event: FilterEvent) -> bool: """Checks whether the filter matches the given event. Returns: - bool: True if the event matches + True if the event matches """ # We usually get the full "events" as dictionaries coming through, # except for presence which actually gets passed around as its own @@ -305,18 +329,25 @@ def check(self, event): room_id = event.get("room_id", None) ev_type = event.get("type", None) - content = event.get("content", {}) + content = event.get("content") or {} # check if there is a string url field in the content for filtering purposes contains_url = isinstance(content.get("url"), str) labels = content.get(EventContentFields.LABELS, []) return self.check_fields(room_id, sender, ev_type, labels, contains_url) - def check_fields(self, room_id, sender, event_type, labels, contains_url): + def check_fields( + self, + room_id: Optional[str], + sender: Optional[str], + event_type: Optional[str], + labels: Container[str], + contains_url: bool, + ) -> bool: """Checks whether the filter matches the given event fields. Returns: - bool: True if the event fields match + True if the event fields match """ literal_keys = { "rooms": lambda v: room_id == v, @@ -343,14 +374,14 @@ def check_fields(self, room_id, sender, event_type, labels, contains_url): return True - def filter_rooms(self, room_ids): + def filter_rooms(self, room_ids: Iterable[str]) -> Set[str]: """Apply the 'rooms' filter to a given list of rooms. Args: - room_ids (list): A list of room_ids. + room_ids: A list of room_ids. Returns: - list: A list of room_ids that match the filter + A list of room_ids that match the filter """ room_ids = set(room_ids) @@ -363,23 +394,23 @@ def filter_rooms(self, room_ids): return room_ids - def filter(self, events): + def filter(self, events: Iterable[FilterEvent]) -> List[FilterEvent]: return list(filter(self.check, events)) - def limit(self): + def limit(self) -> int: return self.filter_json.get("limit", 10) - def lazy_load_members(self): + def lazy_load_members(self) -> bool: return self.filter_json.get("lazy_load_members", False) - def include_redundant_members(self): + def include_redundant_members(self) -> bool: return self.filter_json.get("include_redundant_members", False) - def with_room_ids(self, room_ids): + def with_room_ids(self, room_ids: Iterable[str]) -> "Filter": """Returns a new filter with the given room IDs appended. Args: - room_ids (iterable[unicode]): The room_ids to add + room_ids: The room_ids to add Returns: filter: A new filter including the given rooms and the old @@ -390,8 +421,8 @@ def with_room_ids(self, room_ids): return newFilter -def _matches_wildcard(actual_value, filter_value): - if filter_value.endswith("*"): +def _matches_wildcard(actual_value: Optional[str], filter_value: str) -> bool: + if filter_value.endswith("*") and isinstance(actual_value, str): type_prefix = filter_value[:-1] return actual_value.startswith(type_prefix) else: diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py index bb244a03c0a8..434986fa64aa 100644 --- a/synapse/storage/databases/main/filtering.py +++ b/synapse/storage/databases/main/filtering.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Union + from canonicaljson import encode_canonical_json from synapse.api.errors import Codes, SynapseError @@ -22,7 +24,9 @@ class FilteringStore(SQLBaseStore): @cached(num_args=2) - async def get_user_filter(self, user_localpart, filter_id): + async def get_user_filter( + self, user_localpart: str, filter_id: Union[int, str] + ) -> JsonDict: # filter_id is BIGINT UNSIGNED, so if it isn't a number, fail # with a coherent error message rather than 500 M_UNKNOWN. try: @@ -40,7 +44,7 @@ async def get_user_filter(self, user_localpart, filter_id): return db_to_json(def_json) - async def add_user_filter(self, user_localpart: str, user_filter: JsonDict) -> str: + async def add_user_filter(self, user_localpart: str, user_filter: JsonDict) -> int: def_json = encode_canonical_json(user_filter) # Need an atomic transaction to SELECT the maximal ID so far then From e46ac85d674d90fa01aa49aee9587093ab6d8677 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 1 Oct 2021 12:22:47 +0100 Subject: [PATCH 040/111] type-hint `HomeserverTestcase.setup_test_homeserver` (#10961) * type-hint `HomeserverTestcase.setup_test_homeserver` For better IDE completion. A small drive-by. --- changelog.d/10961.misc | 1 + tests/replication/_base.py | 19 +++++++++++++++---- tests/rest/client/test_login.py | 6 +++--- tests/unittest.py | 4 ++-- 4 files changed, 21 insertions(+), 9 deletions(-) create mode 100644 changelog.d/10961.misc diff --git a/changelog.d/10961.misc b/changelog.d/10961.misc new file mode 100644 index 000000000000..0e35813488dc --- /dev/null +++ b/changelog.d/10961.misc @@ -0,0 +1 @@ +Add type-hint to `HomeserverTestcase.setup_test_homeserver`. \ No newline at end of file diff --git a/tests/replication/_base.py b/tests/replication/_base.py index cdd6e3d3c1df..eac4664b4107 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -70,8 +70,16 @@ def prepare(self, reactor, clock, hs): # databases objects are the same. self.worker_hs.get_datastore().db_pool = hs.get_datastore().db_pool + # Normally we'd pass in the handler to `setup_test_homeserver`, which would + # eventually hit "Install @cache_in_self attributes" in tests/utils.py. + # Unfortunately our handler wants a reference to the homeserver. That leaves + # us with a chicken-and-egg problem. + # We can workaround this: create the homeserver first, create the handler + # and bodge it in after the fact. The bodging requires us to know the + # dirty details of how `cache_in_self` works. We politely ask mypy to + # ignore our dirty dealings. self.test_handler = self._build_replication_data_handler() - self.worker_hs._replication_data_handler = self.test_handler + self.worker_hs._replication_data_handler = self.test_handler # type: ignore[attr-defined] repl_handler = ReplicationCommandHandler(self.worker_hs) self.client = ClientReplicationStreamProtocol( @@ -315,12 +323,15 @@ def make_worker_hs( ) ) + # Copy the port into a new, non-Optional variable so mypy knows we're + # not going to reset `instance_loc` to `None` under its feet. See + # https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions + port = instance_loc.port + self.reactor.add_tcp_client_callback( self.reactor.lookups[instance_loc.host], instance_loc.port, - lambda: self._handle_http_replication_attempt( - worker_hs, instance_loc.port - ), + lambda: self._handle_http_replication_attempt(worker_hs, port), ) store = worker_hs.get_datastore() diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index 371615a015f3..7fd92c94e09d 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -94,9 +94,9 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): self.hs = self.setup_test_homeserver() - self.hs.config.enable_registration = True - self.hs.config.registrations_require_3pid = [] - self.hs.config.auto_join_rooms = [] + self.hs.config.registration.enable_registration = True + self.hs.config.registration.registrations_require_3pid = [] + self.hs.config.registration.auto_join_rooms = [] self.hs.config.captcha.enable_registration_captcha = False return self.hs diff --git a/tests/unittest.py b/tests/unittest.py index 5f93ebf1479a..0807467e3943 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -20,7 +20,7 @@ import logging import secrets import time -from typing import Callable, Dict, Iterable, Optional, Tuple, Type, TypeVar, Union +from typing import Any, Callable, Dict, Iterable, Optional, Tuple, Type, TypeVar, Union from unittest.mock import Mock, patch from canonicaljson import json @@ -449,7 +449,7 @@ def make_request( client_ip, ) - def setup_test_homeserver(self, *args, **kwargs): + def setup_test_homeserver(self, *args: Any, **kwargs: Any) -> HomeServer: """ Set up the test homeserver, meant to be called by the overridable make_homeserver. It automatically passes through the test class's From 32072dcdac0072049832cda6204cd75be2d4e38f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 30 Sep 2021 11:13:59 -0400 Subject: [PATCH 041/111] Strip "join_authorised_via_users_server" from join events which do not need it. (#10933) This fixes a "Event not signed by authorising server" error when transition room member from join -> join, e.g. when updating a display name or avatar URL for restricted rooms. --- changelog.d/10933.bugfix | 1 + synapse/api/constants.py | 3 +++ synapse/event_auth.py | 12 +++++++----- synapse/events/utils.py | 2 +- synapse/federation/federation_base.py | 6 +++--- synapse/federation/federation_client.py | 6 +++--- synapse/federation/federation_server.py | 6 +++--- synapse/handlers/federation.py | 9 +++++++-- synapse/handlers/room_member.py | 10 +++++++++- tests/events/test_utils.py | 7 ++++--- tests/test_event_auth.py | 9 +++++---- 11 files changed, 46 insertions(+), 25 deletions(-) create mode 100644 changelog.d/10933.bugfix diff --git a/changelog.d/10933.bugfix b/changelog.d/10933.bugfix new file mode 100644 index 000000000000..e0694fea22f5 --- /dev/null +++ b/changelog.d/10933.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse v1.40.0 where changing a user's display name or avatar in a restricted room would cause an authentication error. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 39fd9954d507..a31f037748a3 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -217,6 +217,9 @@ class EventContentFields: # For "marker" events MSC2716_MARKER_INSERTION = "org.matrix.msc2716.marker.insertion" + # The authorising user for joining a restricted room. + AUTHORISING_USER = "join_authorised_via_users_server" + class RoomTypes: """Understood values of the room_type field of m.room.create events.""" diff --git a/synapse/event_auth.py b/synapse/event_auth.py index fc50a0e71a7d..650402836ce6 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -115,11 +115,11 @@ def check( is_invite_via_allow_rule = ( event.type == EventTypes.Member and event.membership == Membership.JOIN - and "join_authorised_via_users_server" in event.content + and EventContentFields.AUTHORISING_USER in event.content ) if is_invite_via_allow_rule: authoriser_domain = get_domain_from_id( - event.content["join_authorised_via_users_server"] + event.content[EventContentFields.AUTHORISING_USER] ) if not event.signatures.get(authoriser_domain): raise AuthError(403, "Event not signed by authorising server") @@ -381,7 +381,9 @@ def _is_membership_change_allowed( # Note that if the caller is in the room or invited, then they do # not need to meet the allow rules. if not caller_in_room and not caller_invited: - authorising_user = event.content.get("join_authorised_via_users_server") + authorising_user = event.content.get( + EventContentFields.AUTHORISING_USER + ) if authorising_user is None: raise AuthError(403, "Join event is missing authorising user.") @@ -836,10 +838,10 @@ def auth_types_for_event( auth_types.add(key) if room_version.msc3083_join_rules and membership == Membership.JOIN: - if "join_authorised_via_users_server" in event.content: + if EventContentFields.AUTHORISING_USER in event.content: key = ( EventTypes.Member, - event.content["join_authorised_via_users_server"], + event.content[EventContentFields.AUTHORISING_USER], ) auth_types.add(key) diff --git a/synapse/events/utils.py b/synapse/events/utils.py index f86113a448c3..38fccd1efcf5 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -105,7 +105,7 @@ def add_fields(*fields): if event_type == EventTypes.Member: add_fields("membership") if room_version.msc3375_redaction_rules: - add_fields("join_authorised_via_users_server") + add_fields(EventContentFields.AUTHORISING_USER) elif event_type == EventTypes.Create: # MSC2176 rules state that create events cannot be redacted. if room_version.msc2176_redaction_rules: diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 024e440ff401..0cd424e12aa1 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -15,7 +15,7 @@ import logging from collections import namedtuple -from synapse.api.constants import MAX_DEPTH, EventTypes, Membership +from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership from synapse.api.errors import Codes, SynapseError from synapse.api.room_versions import EventFormatVersions, RoomVersion from synapse.crypto.event_signing import check_event_content_hash @@ -184,10 +184,10 @@ async def _check_sigs_on_pdu( room_version.msc3083_join_rules and pdu.type == EventTypes.Member and pdu.membership == Membership.JOIN - and "join_authorised_via_users_server" in pdu.content + and EventContentFields.AUTHORISING_USER in pdu.content ): authorising_server = get_domain_from_id( - pdu.content["join_authorised_via_users_server"] + pdu.content[EventContentFields.AUTHORISING_USER] ) try: await keyring.verify_event_for_server( diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 584836c04ad1..2ab4dec88fe6 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -37,7 +37,7 @@ import attr from prometheus_client import Counter -from synapse.api.constants import EventTypes, Membership +from synapse.api.constants import EventContentFields, EventTypes, Membership from synapse.api.errors import ( CodeMessageException, Codes, @@ -875,9 +875,9 @@ async def _execute(pdu: EventBase) -> None: # If the join is being authorised via allow rules, we need to send # the /send_join back to the same server that was originally used # with /make_join. - if "join_authorised_via_users_server" in pdu.content: + if EventContentFields.AUTHORISING_USER in pdu.content: destinations = [ - get_domain_from_id(pdu.content["join_authorised_via_users_server"]) + get_domain_from_id(pdu.content[EventContentFields.AUTHORISING_USER]) ] return await self._try_destination_list( diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 638959cbecdb..5f4383eebcd3 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -34,7 +34,7 @@ from twisted.internet.abstract import isIPAddress from twisted.python import failure -from synapse.api.constants import EduTypes, EventTypes, Membership +from synapse.api.constants import EduTypes, EventContentFields, EventTypes, Membership from synapse.api.errors import ( AuthError, Codes, @@ -765,11 +765,11 @@ async def _on_send_membership_event( if ( room_version.msc3083_join_rules and event.membership == Membership.JOIN - and "join_authorised_via_users_server" in event.content + and EventContentFields.AUTHORISING_USER in event.content ): # We can only authorise our own users. authorising_server = get_domain_from_id( - event.content["join_authorised_via_users_server"] + event.content[EventContentFields.AUTHORISING_USER] ) if authorising_server != self.server_name: raise SynapseError( diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index b17ef2a9a104..adbd150e46e5 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -27,7 +27,12 @@ from twisted.internet import defer from synapse import event_auth -from synapse.api.constants import EventTypes, Membership, RejectedReason +from synapse.api.constants import ( + EventContentFields, + EventTypes, + Membership, + RejectedReason, +) from synapse.api.errors import ( AuthError, CodeMessageException, @@ -712,7 +717,7 @@ async def on_make_join_request( if include_auth_user_id: event_content[ - "join_authorised_via_users_server" + EventContentFields.AUTHORISING_USER ] = await self._event_auth_handler.get_user_which_could_invite( room_id, state_ids, diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 1a56c82fbd9e..afa7e4727dc4 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -573,6 +573,14 @@ async def update_membership_locked( errcode=Codes.BAD_JSON, ) + # The event content should *not* include the authorising user as + # it won't be properly signed. Strip it out since it might come + # back from a client updating a display name / avatar. + # + # This only applies to restricted rooms, but there should be no reason + # for a client to include it. Unconditionally remove it. + content.pop(EventContentFields.AUTHORISING_USER, None) + effective_membership_state = action if action in ["kick", "unban"]: effective_membership_state = "leave" @@ -939,7 +947,7 @@ async def _should_perform_remote_join( # be included in the event content in order to efficiently validate # the event. content[ - "join_authorised_via_users_server" + EventContentFields.AUTHORISING_USER ] = await self.event_auth_handler.get_user_which_could_invite( room_id, current_state_ids, diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 5446fda5e7a3..1dea09e4800d 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from synapse.api.constants import EventContentFields from synapse.api.room_versions import RoomVersions from synapse.events import make_event_from_dict from synapse.events.utils import ( @@ -352,7 +353,7 @@ def test_member(self): "event_id": "$test:domain", "content": { "membership": "join", - "join_authorised_via_users_server": "@user:domain", + EventContentFields.AUTHORISING_USER: "@user:domain", "other_key": "stripped", }, }, @@ -372,7 +373,7 @@ def test_member(self): "type": "m.room.member", "content": { "membership": "join", - "join_authorised_via_users_server": "@user:domain", + EventContentFields.AUTHORISING_USER: "@user:domain", "other_key": "stripped", }, }, @@ -380,7 +381,7 @@ def test_member(self): "type": "m.room.member", "content": { "membership": "join", - "join_authorised_via_users_server": "@user:domain", + EventContentFields.AUTHORISING_USER: "@user:domain", }, "signatures": {}, "unsigned": {}, diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index 6ebd01bcbe78..1a4d078780ec 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -16,6 +16,7 @@ from typing import Optional from synapse import event_auth +from synapse.api.constants import EventContentFields from synapse.api.errors import AuthError from synapse.api.room_versions import RoomVersions from synapse.events import EventBase, make_event_from_dict @@ -380,7 +381,7 @@ def test_join_rules_msc3083_restricted(self): authorised_join_event = _join_event( pleb, additional_content={ - "join_authorised_via_users_server": "@creator:example.com" + EventContentFields.AUTHORISING_USER: "@creator:example.com" }, ) event_auth.check( @@ -404,7 +405,7 @@ def test_join_rules_msc3083_restricted(self): _join_event( pleb, additional_content={ - "join_authorised_via_users_server": "@inviter:foo.test" + EventContentFields.AUTHORISING_USER: "@inviter:foo.test" }, ), pl_auth_events, @@ -431,7 +432,7 @@ def test_join_rules_msc3083_restricted(self): _join_event( pleb, additional_content={ - "join_authorised_via_users_server": "@other:example.com" + EventContentFields.AUTHORISING_USER: "@other:example.com" }, ), auth_events, @@ -448,7 +449,7 @@ def test_join_rules_msc3083_restricted(self): "join", sender=creator, additional_content={ - "join_authorised_via_users_server": "@inviter:foo.test" + EventContentFields.AUTHORISING_USER: "@inviter:foo.test" }, ), auth_events, From d1cbad388fc42d483e0e3b107620852f359d2cc8 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 1 Oct 2021 17:22:13 +0100 Subject: [PATCH 042/111] Fix error in `get_user_ip_and_agents` when fetching from the database (#10968) --- changelog.d/10968.bugfix | 1 + synapse/storage/databases/main/client_ips.py | 4 +-- tests/storage/test_client_ips.py | 34 ++++++++++++++++++++ 3 files changed, 37 insertions(+), 2 deletions(-) create mode 100644 changelog.d/10968.bugfix diff --git a/changelog.d/10968.bugfix b/changelog.d/10968.bugfix new file mode 100644 index 000000000000..76624ed73c36 --- /dev/null +++ b/changelog.d/10968.bugfix @@ -0,0 +1 @@ +Fix `/admin/whois/{user_id}` endpoint, which was broken in v1.44.0rc1. diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index 7e33ae578c7b..cc192f5c8786 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -591,8 +591,8 @@ def get_recent(txn): ) results.update( - ((row["access_token"], row["ip"]), (row["user_agent"], row["last_seen"])) - for row in rows + ((access_token, ip), (user_agent, last_seen)) + for access_token, ip, user_agent, last_seen in rows ) return [ { diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index 1c2df54ecc53..3cc8038f1e65 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -15,9 +15,12 @@ from unittest.mock import Mock +from parameterized import parameterized + import synapse.rest.admin from synapse.http.site import XForwardedForRequest from synapse.rest.client import login +from synapse.types import UserID from tests import unittest from tests.server import make_request @@ -143,6 +146,37 @@ def test_insert_new_client_ip_none_device_id(self): ], ) + @parameterized.expand([(False,), (True,)]) + def test_get_user_ip_and_agents(self, after_persisting: bool): + """Test `get_user_ip_and_agents` for persisted and unpersisted data""" + self.reactor.advance(12345678) + + user_id = "@user:id" + user = UserID.from_string(user_id) + + # Insert a user IP + self.get_success( + self.store.insert_client_ip( + user_id, "access_token", "ip", "user_agent", "MY_DEVICE" + ) + ) + + if after_persisting: + # Trigger the storage loop + self.reactor.advance(10) + + self.assertEqual( + self.get_success(self.store.get_user_ip_and_agents(user)), + [ + { + "access_token": "access_token", + "ip": "ip", + "user_agent": "user_agent", + "last_seen": 12345678000, + }, + ], + ) + @override_config({"limit_usage_by_mau": False, "max_mau_value": 50}) def test_disabled_monthly_active_user(self): user_id = "@user:server" From a0f48ee89d88fd7b6da8023dbba607a69073152e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 4 Oct 2021 07:18:54 -0400 Subject: [PATCH 043/111] Use direct references for configuration variables (part 7). (#10959) --- changelog.d/10959.misc | 1 + synapse/handlers/auth.py | 2 +- synapse/handlers/identity.py | 13 +++++++--- synapse/handlers/profile.py | 4 +-- synapse/handlers/register.py | 9 ++++--- synapse/handlers/room_member.py | 2 +- synapse/handlers/ui_auth/checkers.py | 14 +++++----- synapse/rest/admin/users.py | 4 +-- synapse/rest/client/account.py | 22 ++++++++-------- synapse/rest/client/auth.py | 6 +++-- synapse/rest/client/capabilities.py | 6 ++--- synapse/rest/client/login.py | 6 ++--- synapse/rest/client/register.py | 26 +++++++++---------- synapse/rest/well_known.py | 4 +-- .../storage/databases/main/registration.py | 2 +- synapse/util/threepids.py | 4 +-- tests/config/test_load.py | 6 ++--- tests/handlers/test_profile.py | 4 +-- tests/rest/admin/test_user.py | 4 +-- tests/rest/client/test_account.py | 4 +-- tests/rest/client/test_identity.py | 2 +- tests/rest/client/test_register.py | 4 +-- tests/unittest.py | 2 +- 23 files changed, 83 insertions(+), 68 deletions(-) create mode 100644 changelog.d/10959.misc diff --git a/changelog.d/10959.misc b/changelog.d/10959.misc new file mode 100644 index 000000000000..586a0b3a9670 --- /dev/null +++ b/changelog.d/10959.misc @@ -0,0 +1 @@ +Use direct references to config flags. diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index a8c717efd5d0..2d0f3d566c01 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -198,7 +198,7 @@ def __init__(self, hs: "HomeServer"): if inst.is_enabled(): self.checkers[inst.AUTH_TYPE] = inst # type: ignore - self.bcrypt_rounds = hs.config.bcrypt_rounds + self.bcrypt_rounds = hs.config.registration.bcrypt_rounds # we can't use hs.get_module_api() here, because to do so will create an # import loop. diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index a0640fcac0c6..c881475c25ac 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -573,9 +573,15 @@ async def validate_threepid_session( # Try to validate as email if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: + # Remote emails will only be used if a valid identity server is provided. + assert ( + self.hs.config.registration.account_threepid_delegate_email is not None + ) + # Ask our delegated email identity server validation_session = await self.threepid_from_creds( - self.hs.config.account_threepid_delegate_email, threepid_creds + self.hs.config.registration.account_threepid_delegate_email, + threepid_creds, ) elif self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: # Get a validated session matching these details @@ -587,10 +593,11 @@ async def validate_threepid_session( return validation_session # Try to validate as msisdn - if self.hs.config.account_threepid_delegate_msisdn: + if self.hs.config.registration.account_threepid_delegate_msisdn: # Ask our delegated msisdn identity server validation_session = await self.threepid_from_creds( - self.hs.config.account_threepid_delegate_msisdn, threepid_creds + self.hs.config.registration.account_threepid_delegate_msisdn, + threepid_creds, ) return validation_session diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 425c0d4973e6..2e19706c6941 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -178,7 +178,7 @@ async def set_displayname( if not by_admin and target_user != requester.user: raise AuthError(400, "Cannot set another user's displayname") - if not by_admin and not self.hs.config.enable_set_displayname: + if not by_admin and not self.hs.config.registration.enable_set_displayname: profile = await self.store.get_profileinfo(target_user.localpart) if profile.display_name: raise SynapseError( @@ -268,7 +268,7 @@ async def set_avatar_url( if not by_admin and target_user != requester.user: raise AuthError(400, "Cannot set another user's avatar_url") - if not by_admin and not self.hs.config.enable_set_avatar_url: + if not by_admin and not self.hs.config.registration.enable_set_avatar_url: profile = await self.store.get_profileinfo(target_user.localpart) if profile.avatar_url: raise SynapseError( diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index cb4eb0720b5e..441af7a84868 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -116,8 +116,8 @@ def __init__(self, hs: "HomeServer"): self._register_device_client = self.register_device_inner self.pusher_pool = hs.get_pusherpool() - self.session_lifetime = hs.config.session_lifetime - self.access_token_lifetime = hs.config.access_token_lifetime + self.session_lifetime = hs.config.registration.session_lifetime + self.access_token_lifetime = hs.config.registration.access_token_lifetime init_counters_for_auth_provider("") @@ -343,7 +343,10 @@ async def register_user( # If the user does not need to consent at registration, auto-join any # configured rooms. if not self.hs.config.consent.user_consent_at_registration: - if not self.hs.config.auto_join_rooms_for_guests and make_guest: + if ( + not self.hs.config.registration.auto_join_rooms_for_guests + and make_guest + ): logger.info( "Skipping auto-join for %s because auto-join for guests is disabled", user_id, diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 29b3e41cc925..c8fb24a20c2e 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -89,7 +89,7 @@ def __init__(self, hs: "HomeServer"): self.spam_checker = hs.get_spam_checker() self.third_party_event_rules = hs.get_third_party_event_rules() self._server_notices_mxid = self.config.servernotices.server_notices_mxid - self._enable_lookup = hs.config.enable_3pid_lookup + self._enable_lookup = hs.config.registration.enable_3pid_lookup self.allow_per_room_profiles = self.config.server.allow_per_room_profiles self._join_rate_limiter_local = Ratelimiter( diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py index 8f5d465fa1ce..184730ebe8a4 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py @@ -153,21 +153,23 @@ async def _check_threepid(self, medium: str, authdict: dict) -> dict: # msisdns are currently always ThreepidBehaviour.REMOTE if medium == "msisdn": - if not self.hs.config.account_threepid_delegate_msisdn: + if not self.hs.config.registration.account_threepid_delegate_msisdn: raise SynapseError( 400, "Phone number verification is not enabled on this homeserver" ) threepid = await identity_handler.threepid_from_creds( - self.hs.config.account_threepid_delegate_msisdn, threepid_creds + self.hs.config.registration.account_threepid_delegate_msisdn, + threepid_creds, ) elif medium == "email": if ( self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE ): - assert self.hs.config.account_threepid_delegate_email + assert self.hs.config.registration.account_threepid_delegate_email threepid = await identity_handler.threepid_from_creds( - self.hs.config.account_threepid_delegate_email, threepid_creds + self.hs.config.registration.account_threepid_delegate_email, + threepid_creds, ) elif ( self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL @@ -240,7 +242,7 @@ def __init__(self, hs: "HomeServer"): _BaseThreepidAuthChecker.__init__(self, hs) def is_enabled(self) -> bool: - return bool(self.hs.config.account_threepid_delegate_msisdn) + return bool(self.hs.config.registration.account_threepid_delegate_msisdn) async def check_auth(self, authdict: dict, clientip: str) -> Any: return await self._check_threepid("msisdn", authdict) @@ -252,7 +254,7 @@ class RegistrationTokenAuthChecker(UserInteractiveAuthChecker): def __init__(self, hs: "HomeServer"): super().__init__(hs) self.hs = hs - self._enabled = bool(hs.config.registration_requires_token) + self._enabled = bool(hs.config.registration.registration_requires_token) self.store = hs.get_datastore() def is_enabled(self) -> bool: diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 46bfec4623e5..f20aa6530145 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -442,7 +442,7 @@ def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: self._clear_old_nonces() - if not self.hs.config.registration_shared_secret: + if not self.hs.config.registration.registration_shared_secret: raise SynapseError(400, "Shared secret registration is not enabled") body = parse_json_object_from_request(request) @@ -498,7 +498,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: got_mac = body["mac"] want_mac_builder = hmac.new( - key=self.hs.config.registration_shared_secret.encode(), + key=self.hs.config.registration.registration_shared_secret.encode(), digestmod=hashlib.sha1, ) want_mac_builder.update(nonce.encode("utf8")) diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index fff133ef1048..6b272658fc3c 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -130,11 +130,11 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND) if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: - assert self.hs.config.account_threepid_delegate_email + assert self.hs.config.registration.account_threepid_delegate_email # Have the configured identity server handle the request ret = await self.identity_handler.requestEmailToken( - self.hs.config.account_threepid_delegate_email, + self.hs.config.registration.account_threepid_delegate_email, email, client_secret, send_attempt, @@ -414,11 +414,11 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: - assert self.hs.config.account_threepid_delegate_email + assert self.hs.config.registration.account_threepid_delegate_email # Have the configured identity server handle the request ret = await self.identity_handler.requestEmailToken( - self.hs.config.account_threepid_delegate_email, + self.hs.config.registration.account_threepid_delegate_email, email, client_secret, send_attempt, @@ -496,7 +496,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: raise SynapseError(400, "MSISDN is already in use", Codes.THREEPID_IN_USE) - if not self.hs.config.account_threepid_delegate_msisdn: + if not self.hs.config.registration.account_threepid_delegate_msisdn: logger.warning( "No upstream msisdn account_threepid_delegate configured on the server to " "handle this request" @@ -507,7 +507,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: ) ret = await self.identity_handler.requestMsisdnToken( - self.hs.config.account_threepid_delegate_msisdn, + self.hs.config.registration.account_threepid_delegate_msisdn, country, phone_number, client_secret, @@ -604,7 +604,7 @@ def __init__(self, hs: "HomeServer"): self.identity_handler = hs.get_identity_handler() async def on_POST(self, request: Request) -> Tuple[int, JsonDict]: - if not self.config.account_threepid_delegate_msisdn: + if not self.config.registration.account_threepid_delegate_msisdn: raise SynapseError( 400, "This homeserver is not validating phone numbers. Use an identity server " @@ -617,7 +617,7 @@ async def on_POST(self, request: Request) -> Tuple[int, JsonDict]: # Proxy submit_token request to msisdn threepid delegate response = await self.identity_handler.proxy_msisdn_submit_token( - self.config.account_threepid_delegate_msisdn, + self.config.registration.account_threepid_delegate_msisdn, body["client_secret"], body["sid"], body["token"], @@ -644,7 +644,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: return 200, {"threepids": threepids} async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if not self.hs.config.enable_3pid_changes: + if not self.hs.config.registration.enable_3pid_changes: raise SynapseError( 400, "3PID changes are disabled on this server", Codes.FORBIDDEN ) @@ -693,7 +693,7 @@ def __init__(self, hs: "HomeServer"): @interactive_auth_handler async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if not self.hs.config.enable_3pid_changes: + if not self.hs.config.registration.enable_3pid_changes: raise SynapseError( 400, "3PID changes are disabled on this server", Codes.FORBIDDEN ) @@ -801,7 +801,7 @@ def __init__(self, hs: "HomeServer"): self.auth_handler = hs.get_auth_handler() async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if not self.hs.config.enable_3pid_changes: + if not self.hs.config.registration.enable_3pid_changes: raise SynapseError( 400, "3PID changes are disabled on this server", Codes.FORBIDDEN ) diff --git a/synapse/rest/client/auth.py b/synapse/rest/client/auth.py index 282861fae21f..c9ad35a3addc 100644 --- a/synapse/rest/client/auth.py +++ b/synapse/rest/client/auth.py @@ -49,8 +49,10 @@ def __init__(self, hs: "HomeServer"): self.registration_handler = hs.get_registration_handler() self.recaptcha_template = hs.config.captcha.recaptcha_template self.terms_template = hs.config.terms_template - self.registration_token_template = hs.config.registration_token_template - self.success_template = hs.config.fallback_success_template + self.registration_token_template = ( + hs.config.registration.registration_token_template + ) + self.success_template = hs.config.registration.fallback_success_template async def on_GET(self, request: SynapseRequest, stagetype: str) -> None: session = parse_string(request, "session") diff --git a/synapse/rest/client/capabilities.py b/synapse/rest/client/capabilities.py index d6b62564132a..2a3e24ae7e55 100644 --- a/synapse/rest/client/capabilities.py +++ b/synapse/rest/client/capabilities.py @@ -64,13 +64,13 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: if self.config.experimental.msc3283_enabled: response["capabilities"]["org.matrix.msc3283.set_displayname"] = { - "enabled": self.config.enable_set_displayname + "enabled": self.config.registration.enable_set_displayname } response["capabilities"]["org.matrix.msc3283.set_avatar_url"] = { - "enabled": self.config.enable_set_avatar_url + "enabled": self.config.registration.enable_set_avatar_url } response["capabilities"]["org.matrix.msc3283.3pid_changes"] = { - "enabled": self.config.enable_3pid_changes + "enabled": self.config.registration.enable_3pid_changes } return 200, response diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index fa5c173f4b8e..d49a647b0314 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -79,7 +79,7 @@ def __init__(self, hs: "HomeServer"): self.saml2_enabled = hs.config.saml2.saml2_enabled self.cas_enabled = hs.config.cas.cas_enabled self.oidc_enabled = hs.config.oidc.oidc_enabled - self._msc2918_enabled = hs.config.access_token_lifetime is not None + self._msc2918_enabled = hs.config.registration.access_token_lifetime is not None self.auth = hs.get_auth() @@ -447,7 +447,7 @@ class RefreshTokenServlet(RestServlet): def __init__(self, hs: "HomeServer"): self._auth_handler = hs.get_auth_handler() self._clock = hs.get_clock() - self.access_token_lifetime = hs.config.access_token_lifetime + self.access_token_lifetime = hs.config.registration.access_token_lifetime async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: refresh_submission = parse_json_object_from_request(request) @@ -556,7 +556,7 @@ async def on_GET(self, request: SynapseRequest) -> None: def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: LoginRestServlet(hs).register(http_server) - if hs.config.access_token_lifetime is not None: + if hs.config.registration.access_token_lifetime is not None: RefreshTokenServlet(hs).register(http_server) SsoRedirectServlet(hs).register(http_server) if hs.config.cas.cas_enabled: diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index a6eb6f641067..bf3cb3414677 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -140,11 +140,11 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: - assert self.hs.config.account_threepid_delegate_email + assert self.hs.config.registration.account_threepid_delegate_email # Have the configured identity server handle the request ret = await self.identity_handler.requestEmailToken( - self.hs.config.account_threepid_delegate_email, + self.hs.config.registration.account_threepid_delegate_email, email, client_secret, send_attempt, @@ -221,7 +221,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: 400, "Phone number is already in use", Codes.THREEPID_IN_USE ) - if not self.hs.config.account_threepid_delegate_msisdn: + if not self.hs.config.registration.account_threepid_delegate_msisdn: logger.warning( "No upstream msisdn account_threepid_delegate configured on the server to " "handle this request" @@ -231,7 +231,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: ) ret = await self.identity_handler.requestMsisdnToken( - self.hs.config.account_threepid_delegate_msisdn, + self.hs.config.registration.account_threepid_delegate_msisdn, country, phone_number, client_secret, @@ -341,7 +341,7 @@ def __init__(self, hs: "HomeServer"): ) async def on_GET(self, request: Request) -> Tuple[int, JsonDict]: - if not self.hs.config.enable_registration: + if not self.hs.config.registration.enable_registration: raise SynapseError( 403, "Registration has been disabled", errcode=Codes.FORBIDDEN ) @@ -391,7 +391,7 @@ def __init__(self, hs: "HomeServer"): async def on_GET(self, request: Request) -> Tuple[int, JsonDict]: await self.ratelimiter.ratelimit(None, (request.getClientIP(),)) - if not self.hs.config.enable_registration: + if not self.hs.config.registration.enable_registration: raise SynapseError( 403, "Registration has been disabled", errcode=Codes.FORBIDDEN ) @@ -419,8 +419,8 @@ def __init__(self, hs: "HomeServer"): self.ratelimiter = hs.get_registration_ratelimiter() self.password_policy_handler = hs.get_password_policy_handler() self.clock = hs.get_clock() - self._registration_enabled = self.hs.config.enable_registration - self._msc2918_enabled = hs.config.access_token_lifetime is not None + self._registration_enabled = self.hs.config.registration.enable_registration + self._msc2918_enabled = hs.config.registration.access_token_lifetime is not None self._registration_flows = _calculate_registration_flows( hs.config, self.auth_handler @@ -800,7 +800,7 @@ async def _create_registration_details( async def _do_guest_registration( self, params: JsonDict, address: Optional[str] = None ) -> Tuple[int, JsonDict]: - if not self.hs.config.allow_guest_access: + if not self.hs.config.registration.allow_guest_access: raise SynapseError(403, "Guest access is disabled") user_id = await self.registration_handler.register_user( make_guest=True, address=address @@ -849,13 +849,13 @@ def _calculate_registration_flows( """ # FIXME: need a better error than "no auth flow found" for scenarios # where we required 3PID for registration but the user didn't give one - require_email = "email" in config.registrations_require_3pid - require_msisdn = "msisdn" in config.registrations_require_3pid + require_email = "email" in config.registration.registrations_require_3pid + require_msisdn = "msisdn" in config.registration.registrations_require_3pid show_msisdn = True show_email = True - if config.disable_msisdn_registration: + if config.registration.disable_msisdn_registration: show_msisdn = False require_msisdn = False @@ -909,7 +909,7 @@ def _calculate_registration_flows( flow.insert(0, LoginType.RECAPTCHA) # Prepend registration token to all flows if we're requiring a token - if config.registration_requires_token: + if config.registration.registration_requires_token: for flow in flows: flow.insert(0, LoginType.REGISTRATION_TOKEN) diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py index c80a3a99aa2a..7ac01faab4fb 100644 --- a/synapse/rest/well_known.py +++ b/synapse/rest/well_known.py @@ -39,9 +39,9 @@ def get_well_known(self) -> Optional[JsonDict]: result = {"m.homeserver": {"base_url": self._config.server.public_baseurl}} - if self._config.default_identity_server: + if self._config.registration.default_identity_server: result["m.identity_server"] = { - "base_url": self._config.default_identity_server + "base_url": self._config.registration.default_identity_server } return result diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 7279b0924e89..de262fbf5aa2 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -1710,7 +1710,7 @@ async def _bg_user_threepids_grandfather(self, progress, batch_size): We do this by grandfathering in existing user threepids assuming that they used one of the server configured trusted identity servers. """ - id_servers = set(self.config.trusted_third_party_id_servers) + id_servers = set(self.config.registration.trusted_third_party_id_servers) def _bg_user_threepids_grandfather_txn(txn): sql = """ diff --git a/synapse/util/threepids.py b/synapse/util/threepids.py index baa9190a9af2..389adf00f619 100644 --- a/synapse/util/threepids.py +++ b/synapse/util/threepids.py @@ -44,8 +44,8 @@ def check_3pid_allowed(hs: "HomeServer", medium: str, address: str) -> bool: bool: whether the 3PID medium/address is allowed to be added to this HS """ - if hs.config.allowed_local_3pids: - for constraint in hs.config.allowed_local_3pids: + if hs.config.registration.allowed_local_3pids: + for constraint in hs.config.registration.allowed_local_3pids: logger.debug( "Checking 3PID %s (%s) against %s (%s)", address, diff --git a/tests/config/test_load.py b/tests/config/test_load.py index ef6c2beec7ae..8e49ca26d90d 100644 --- a/tests/config/test_load.py +++ b/tests/config/test_load.py @@ -84,16 +84,16 @@ def test_disable_registration(self): ) # Check that disable_registration clobbers enable_registration. config = HomeServerConfig.load_config("", ["-c", self.file]) - self.assertFalse(config.enable_registration) + self.assertFalse(config.registration.enable_registration) config = HomeServerConfig.load_or_generate_config("", ["-c", self.file]) - self.assertFalse(config.enable_registration) + self.assertFalse(config.registration.enable_registration) # Check that either config value is clobbered by the command line. config = HomeServerConfig.load_or_generate_config( "", ["-c", self.file, "--enable-registration"] ) - self.assertTrue(config.enable_registration) + self.assertTrue(config.registration.enable_registration) def test_stats_enabled(self): self.generate_config_and_remove_lines_containing("enable_metrics") diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index 57cc3e264617..c153018fd81b 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -110,7 +110,7 @@ def test_set_my_name(self): ) def test_set_my_name_if_disabled(self): - self.hs.config.enable_set_displayname = False + self.hs.config.registration.enable_set_displayname = False # Setting displayname for the first time is allowed self.get_success( @@ -225,7 +225,7 @@ def test_set_my_avatar(self): ) def test_set_my_avatar_if_disabled(self): - self.hs.config.enable_set_avatar_url = False + self.hs.config.registration.enable_set_avatar_url = False # Setting displayname for the first time is allowed self.get_success( diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index a285d5a7fea0..6ed9e421732b 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -59,7 +59,7 @@ def make_homeserver(self, reactor, clock): self.hs = self.setup_test_homeserver() - self.hs.config.registration_shared_secret = "shared" + self.hs.config.registration.registration_shared_secret = "shared" self.hs.get_media_repository = Mock() self.hs.get_deactivate_account_handler = Mock() @@ -71,7 +71,7 @@ def test_disabled(self): If there is no shared secret, registration through this method will be prevented. """ - self.hs.config.registration_shared_secret = None + self.hs.config.registration.registration_shared_secret = None channel = self.make_request("POST", self.url, b"{}") diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index 2f44547bfb4c..89d85b0a1701 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -664,7 +664,7 @@ def test_ratelimit_by_ip(self): def test_add_email_if_disabled(self): """Test adding email to profile when doing so is disallowed""" - self.hs.config.enable_3pid_changes = False + self.hs.config.registration.enable_3pid_changes = False client_secret = "foobar" session_id = self._request_token(self.email, client_secret) @@ -734,7 +734,7 @@ def test_delete_email(self): def test_delete_email_if_disabled(self): """Test deleting an email from profile when disallowed""" - self.hs.config.enable_3pid_changes = False + self.hs.config.registration.enable_3pid_changes = False # Add a threepid self.get_success( diff --git a/tests/rest/client/test_identity.py b/tests/rest/client/test_identity.py index ca2e8ff8ef01..becb4e8dccf5 100644 --- a/tests/rest/client/test_identity.py +++ b/tests/rest/client/test_identity.py @@ -37,7 +37,7 @@ def make_homeserver(self, reactor, clock): return self.hs def test_3pid_lookup_disabled(self): - self.hs.config.enable_3pid_lookup = False + self.hs.config.registration.enable_3pid_lookup = False self.register_user("kermit", "monkey") tok = self.login("kermit", "monkey") diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index af135d57e196..66dcfc9f8897 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -147,7 +147,7 @@ def test_POST_disabled_registration(self): def test_POST_guest_registration(self): self.hs.config.key.macaroon_secret_key = "test" - self.hs.config.allow_guest_access = True + self.hs.config.registration.allow_guest_access = True channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}") @@ -156,7 +156,7 @@ def test_POST_guest_registration(self): self.assertDictContainsSubset(det_data, channel.json_body) def test_POST_disabled_guest_registration(self): - self.hs.config.allow_guest_access = False + self.hs.config.registration.allow_guest_access = False channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}") diff --git a/tests/unittest.py b/tests/unittest.py index 0807467e3943..1f803564f6c8 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -560,7 +560,7 @@ def register_user( Returns: The MXID of the new user. """ - self.hs.config.registration_shared_secret = "shared" + self.hs.config.registration.registration_shared_secret = "shared" # Create the user channel = self.make_request("GET", "/_synapse/admin/v1/register") From f7b034a24bd5e64f05934453fe7b072894e124db Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 4 Oct 2021 12:45:51 +0100 Subject: [PATCH 044/111] Consistently exclude from user_directory (#10960) * Introduce `should_include_local_users_in_dir` We exclude three kinds of local users from the user_directory tables. At present we don't consistently exclude all three in the same places. This commit introduces a new function to gather those exclusion conditions together. Because we have to handle local and remote users in different ways, I've made that function only consider the case of remote users. It's the caller's responsibility to make the local versus remote distinction clear and correct. A test fixup is required. The test now hits a path which makes db queries against the users table. The expected rows were missing, because we were using a dummy user that hadn't actually been registered. We also add new test cases to covert the exclusion logic. ---- By my reading this makes these changes: * When an app service user registers or changes their profile, they will _not_ be added to the user directory. (Previously only support and deactivated users were excluded). This is consistent with the logic that rebuilds the user directory. See also [the discussion here](https://github.com/matrix-org/synapse/pull/10914#discussion_r716859548). * When rebuilding the directory, exclude support and disabled users from room sharing tables. Previously only appservice users were excluded. * Exclude all three categories of local users when rebuilding the directory. Previously `_populate_user_directory_process_users` didn't do any exclusion. Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/10960.bugfix | 1 + synapse/handlers/user_directory.py | 27 +-- .../storage/databases/main/user_directory.py | 46 ++-- tests/handlers/test_user_directory.py | 200 ++++++++++++++++-- tests/rest/client/test_login.py | 17 +- tests/storage/test_user_directory.py | 146 ++++++++++++- tests/unittest.py | 29 +++ 7 files changed, 409 insertions(+), 57 deletions(-) create mode 100644 changelog.d/10960.bugfix diff --git a/changelog.d/10960.bugfix b/changelog.d/10960.bugfix new file mode 100644 index 000000000000..b4f1c228ea0e --- /dev/null +++ b/changelog.d/10960.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where rebuilding the user directory wouldn't exclude support and disabled users. \ No newline at end of file diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index f4430ce3c9aa..18d8c8744e75 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -132,12 +132,7 @@ async def handle_local_profile_change( # FIXME(#3714): We should probably do this in the same worker as all # the other changes. - # Support users are for diagnostics and should not appear in the user directory. - is_support = await self.store.is_support_user(user_id) - # When change profile information of deactivated user it should not appear in the user directory. - is_deactivated = await self.store.get_user_deactivated_status(user_id) - - if not (is_support or is_deactivated): + if await self.store.should_include_local_user_in_dir(user_id): await self.store.update_profile_in_user_dir( user_id, profile.display_name, profile.avatar_url ) @@ -229,8 +224,10 @@ async def _handle_deltas(self, deltas: List[Dict[str, Any]]) -> None: else: logger.debug("Server is still in room: %r", room_id) - is_support = await self.store.is_support_user(state_key) - if not is_support: + include_in_dir = not self.is_mine_id( + state_key + ) or await self.store.should_include_local_user_in_dir(state_key) + if include_in_dir: if change is MatchChange.no_change: # Handle any profile changes await self._handle_profile_change( @@ -356,13 +353,7 @@ async def _handle_new_user( # First, if they're our user then we need to update for every user if self.is_mine_id(user_id): - - is_appservice = self.store.get_if_app_services_interested_in_user( - user_id - ) - - # We don't care about appservice users. - if not is_appservice: + if await self.store.should_include_local_user_in_dir(user_id): for other_user_id in other_users_in_room: if user_id == other_user_id: continue @@ -374,10 +365,10 @@ async def _handle_new_user( if user_id == other_user_id: continue - is_appservice = self.store.get_if_app_services_interested_in_user( + include_other_user = self.is_mine_id( other_user_id - ) - if self.is_mine_id(other_user_id) and not is_appservice: + ) and await self.store.should_include_local_user_in_dir(other_user_id) + if include_other_user: to_insert.add((other_user_id, user_id)) if to_insert: diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index c26e3e066f9d..5f538947ecf8 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -40,12 +40,10 @@ logger = logging.getLogger(__name__) - TEMP_TABLE = "_temp_populate_user_directory" class UserDirectoryBackgroundUpdateStore(StateDeltasStore): - # How many records do we calculate before sending it to # add_users_who_share_private_rooms? SHARE_PRIVATE_WORKING_SET = 500 @@ -235,6 +233,13 @@ def _get_next_batch( ) users_with_profile = await self.get_users_in_room_with_profiles(room_id) + # Throw away users excluded from the directory. + users_with_profile = { + user_id: profile + for user_id, profile in users_with_profile.items() + if not self.hs.is_mine_id(user_id) + or await self.should_include_local_user_in_dir(user_id) + } # Update each user in the user directory. for user_id, profile in users_with_profile.items(): @@ -246,9 +251,6 @@ def _get_next_batch( if is_public: for user_id in users_with_profile: - if self.get_if_app_services_interested_in_user(user_id): - continue - to_insert.add(user_id) if to_insert: @@ -256,12 +258,12 @@ def _get_next_batch( to_insert.clear() else: for user_id in users_with_profile: + # We want the set of pairs (L, M) where L and M are + # in `users_with_profile` and L is local. + # Do so by looking for the local user L first. if not self.hs.is_mine_id(user_id): continue - if self.get_if_app_services_interested_in_user(user_id): - continue - for other_user_id in users_with_profile: if user_id == other_user_id: continue @@ -349,10 +351,11 @@ def _get_next_batch(txn: LoggingTransaction) -> Optional[List[str]]: ) for user_id in users_to_work_on: - profile = await self.get_profileinfo(get_localpart_from_id(user_id)) - await self.update_profile_in_user_dir( - user_id, profile.display_name, profile.avatar_url - ) + if await self.should_include_local_user_in_dir(user_id): + profile = await self.get_profileinfo(get_localpart_from_id(user_id)) + await self.update_profile_in_user_dir( + user_id, profile.display_name, profile.avatar_url + ) # We've finished processing a user. Delete it from the table. await self.db_pool.simple_delete_one( @@ -369,6 +372,24 @@ def _get_next_batch(txn: LoggingTransaction) -> Optional[List[str]]: return len(users_to_work_on) + async def should_include_local_user_in_dir(self, user: str) -> bool: + """Certain classes of local user are omitted from the user directory. + Is this user one of them? + """ + # App service users aren't usually contactable, so exclude them. + if self.get_if_app_services_interested_in_user(user): + # TODO we might want to make this configurable for each app service + return False + + # Support users are for diagnostics and should not appear in the user directory. + if await self.is_support_user(user): + return False + + # Deactivated users aren't contactable, so should not appear in the user directory. + if await self.get_user_deactivated_status(user): + return False + return True + async def is_room_world_readable_or_publicly_joinable(self, room_id: str) -> bool: """Check if the room is either world_readable or publically joinable""" @@ -537,7 +558,6 @@ async def update_user_directory_stream_pos(self, stream_id: Optional[int]) -> No class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): - # How many records do we calculate before sending it to # add_users_who_share_private_rooms? SHARE_PRIVATE_WORKING_SET = 500 diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 2988befb21b4..b3c3af113b28 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Tuple from unittest.mock import Mock, patch from urllib.parse import quote @@ -20,7 +21,8 @@ import synapse.rest.admin from synapse.api.constants import UserTypes from synapse.api.room_versions import RoomVersion, RoomVersions -from synapse.rest.client import login, room, user_directory +from synapse.appservice import ApplicationService +from synapse.rest.client import login, register, room, user_directory from synapse.server import HomeServer from synapse.storage.roommember import ProfileInfo from synapse.types import create_requester @@ -28,6 +30,7 @@ from tests import unittest from tests.storage.test_user_directory import GetUserDirectoryTables +from tests.test_utils.event_injection import inject_member_event from tests.unittest import override_config @@ -47,13 +50,29 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): servlets = [ login.register_servlets, synapse.rest.admin.register_servlets, + register.register_servlets, room.register_servlets, ] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() config["update_user_directory"] = True - return self.setup_test_homeserver(config=config) + + self.appservice = ApplicationService( + token="i_am_an_app_service", + hostname="test", + id="1234", + namespaces={"users": [{"regex": r"@as_user.*", "exclusive": True}]}, + sender="@as:test", + ) + + mock_load_appservices = Mock(return_value=[self.appservice]) + with patch( + "synapse.storage.databases.main.appservice.load_appservices", + mock_load_appservices, + ): + hs = self.setup_test_homeserver(config=config) + return hs def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() @@ -62,6 +81,137 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.event_creation_handler = self.hs.get_event_creation_handler() self.user_dir_helper = GetUserDirectoryTables(self.store) + def test_normal_user_pair(self) -> None: + """Sanity check that the room-sharing tables are updated correctly.""" + alice = self.register_user("alice", "pass") + alice_token = self.login(alice, "pass") + bob = self.register_user("bob", "pass") + bob_token = self.login(bob, "pass") + + public = self.helper.create_room_as( + alice, + is_public=True, + extra_content={"visibility": "public"}, + tok=alice_token, + ) + private = self.helper.create_room_as(alice, is_public=False, tok=alice_token) + self.helper.invite(private, alice, bob, tok=alice_token) + self.helper.join(public, bob, tok=bob_token) + self.helper.join(private, bob, tok=bob_token) + + # Alice also makes a second public room but no-one else joins + public2 = self.helper.create_room_as( + alice, + is_public=True, + extra_content={"visibility": "public"}, + tok=alice_token, + ) + + users = self.get_success(self.user_dir_helper.get_users_in_user_directory()) + in_public = self.get_success(self.user_dir_helper.get_users_in_public_rooms()) + in_private = self.get_success( + self.user_dir_helper.get_users_who_share_private_rooms() + ) + + self.assertEqual(users, {alice, bob}) + self.assertEqual( + set(in_public), {(alice, public), (bob, public), (alice, public2)} + ) + self.assertEqual( + self.user_dir_helper._compress_shared(in_private), + {(alice, bob, private), (bob, alice, private)}, + ) + + # The next three tests (test_population_excludes_*) all setup + # - A normal user included in the user dir + # - A public and private room created by that user + # - A user excluded from the room dir, belonging to both rooms + + # They match similar logic in storage/test_user_directory. But that tests + # rebuilding the directory; this tests updating it incrementally. + + def test_excludes_support_user(self) -> None: + alice = self.register_user("alice", "pass") + alice_token = self.login(alice, "pass") + support = "@support1:test" + self.get_success( + self.store.register_user( + user_id=support, password_hash=None, user_type=UserTypes.SUPPORT + ) + ) + + public, private = self._create_rooms_and_inject_memberships( + alice, alice_token, support + ) + self._check_only_one_user_in_directory(alice, public) + + def test_excludes_deactivated_user(self) -> None: + admin = self.register_user("admin", "pass", admin=True) + admin_token = self.login(admin, "pass") + user = self.register_user("naughty", "pass") + + # Deactivate the user. + channel = self.make_request( + "PUT", + f"/_synapse/admin/v2/users/{user}", + access_token=admin_token, + content={"deactivated": True}, + ) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["deactivated"], True) + + # Join the deactivated user to rooms owned by the admin. + # Is this something that could actually happen outside of a test? + public, private = self._create_rooms_and_inject_memberships( + admin, admin_token, user + ) + self._check_only_one_user_in_directory(admin, public) + + def test_excludes_appservices_user(self) -> None: + # Register an AS user. + user = self.register_user("user", "pass") + token = self.login(user, "pass") + as_user = self.register_appservice_user("as_user_potato", self.appservice.token) + + # Join the AS user to rooms owned by the normal user. + public, private = self._create_rooms_and_inject_memberships( + user, token, as_user + ) + self._check_only_one_user_in_directory(user, public) + + def _create_rooms_and_inject_memberships( + self, creator: str, token: str, joiner: str + ) -> Tuple[str, str]: + """Create a public and private room as a normal user. + Then get the `joiner` into those rooms. + """ + # TODO: Duplicates the same-named method in UserDirectoryInitialPopulationTest. + public_room = self.helper.create_room_as( + creator, + is_public=True, + # See https://github.com/matrix-org/synapse/issues/10951 + extra_content={"visibility": "public"}, + tok=token, + ) + private_room = self.helper.create_room_as(creator, is_public=False, tok=token) + + # HACK: get the user into these rooms + self.get_success(inject_member_event(self.hs, public_room, joiner, "join")) + self.get_success(inject_member_event(self.hs, private_room, joiner, "join")) + + return public_room, private_room + + def _check_only_one_user_in_directory(self, user: str, public: str) -> None: + users = self.get_success(self.user_dir_helper.get_users_in_user_directory()) + in_public = self.get_success(self.user_dir_helper.get_users_in_public_rooms()) + in_private = self.get_success( + self.user_dir_helper.get_users_who_share_private_rooms() + ) + + self.assertEqual(users, {user}) + self.assertEqual(set(in_public), {(user, public)}) + self.assertEqual(in_private, []) + def test_handle_local_profile_change_with_support_user(self) -> None: support_user_id = "@support:test" self.get_success( @@ -125,6 +275,26 @@ def test_handle_local_profile_change_with_deactivated_user(self) -> None: profile = self.get_success(self.store.get_user_in_directory(r_user_id)) self.assertTrue(profile is None) + def test_handle_local_profile_change_with_appservice_user(self) -> None: + # create user + as_user_id = self.register_appservice_user( + "as_user_alice", self.appservice.token + ) + + # profile is not in directory + profile = self.get_success(self.store.get_user_in_directory(as_user_id)) + self.assertTrue(profile is None) + + # update profile + profile_info = ProfileInfo(avatar_url="avatar_url", display_name="4L1c3") + self.get_success( + self.handler.handle_local_profile_change(as_user_id, profile_info) + ) + + # profile is still not in directory + profile = self.get_success(self.store.get_user_in_directory(as_user_id)) + self.assertTrue(profile is None) + def test_handle_user_deactivated_support_user(self) -> None: s_user_id = "@support:test" self.get_success( @@ -483,8 +653,6 @@ def _add_user_to_room( class TestUserDirSearchDisabled(unittest.HomeserverTestCase): - user_id = "@test:test" - servlets = [ user_directory.register_servlets, room.register_servlets, @@ -504,16 +672,21 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: def test_disabling_room_list(self) -> None: self.config.userdirectory.user_directory_search_enabled = True - # First we create a room with another user so that user dir is non-empty - # for our user - self.helper.create_room_as(self.user_id) + # Create two users and put them in the same room. + u1 = self.register_user("user1", "pass") + u1_token = self.login(u1, "pass") u2 = self.register_user("user2", "pass") - room = self.helper.create_room_as(self.user_id) - self.helper.join(room, user=u2) + u2_token = self.login(u2, "pass") + + room = self.helper.create_room_as(u1, tok=u1_token) + self.helper.join(room, user=u2, tok=u2_token) - # Assert user directory is not empty + # Each should see the other when searching the user directory. channel = self.make_request( - "POST", b"user_directory/search", b'{"search_term":"user2"}' + "POST", + b"user_directory/search", + b'{"search_term":"user2"}', + access_token=u1_token, ) self.assertEquals(200, channel.code, channel.result) self.assertTrue(len(channel.json_body["results"]) > 0) @@ -521,7 +694,10 @@ def test_disabling_room_list(self) -> None: # Disable user directory and check search returns nothing self.config.userdirectory.user_directory_search_enabled = False channel = self.make_request( - "POST", b"user_directory/search", b'{"search_term":"user2"}' + "POST", + b"user_directory/search", + b'{"search_term":"user2"}', + access_token=u1_token, ) self.assertEquals(200, channel.code, channel.result) self.assertTrue(len(channel.json_body["results"]) == 0) diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index 7fd92c94e09d..a63f04bd4167 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -1064,13 +1064,6 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase): register.register_servlets, ] - def register_as_user(self, username): - self.make_request( - b"POST", - "/_matrix/client/r0/register?access_token=%s" % (self.service.token,), - {"username": username}, - ) - def make_homeserver(self, reactor, clock): self.hs = self.setup_test_homeserver() @@ -1107,7 +1100,7 @@ def make_homeserver(self, reactor, clock): def test_login_appservice_user(self): """Test that an appservice user can use /login""" - self.register_as_user(AS_USER) + self.register_appservice_user(AS_USER, self.service.token) params = { "type": login.LoginRestServlet.APPSERVICE_TYPE, @@ -1121,7 +1114,7 @@ def test_login_appservice_user(self): def test_login_appservice_user_bot(self): """Test that the appservice bot can use /login""" - self.register_as_user(AS_USER) + self.register_appservice_user(AS_USER, self.service.token) params = { "type": login.LoginRestServlet.APPSERVICE_TYPE, @@ -1135,7 +1128,7 @@ def test_login_appservice_user_bot(self): def test_login_appservice_wrong_user(self): """Test that non-as users cannot login with the as token""" - self.register_as_user(AS_USER) + self.register_appservice_user(AS_USER, self.service.token) params = { "type": login.LoginRestServlet.APPSERVICE_TYPE, @@ -1149,7 +1142,7 @@ def test_login_appservice_wrong_user(self): def test_login_appservice_wrong_as(self): """Test that as users cannot login with wrong as token""" - self.register_as_user(AS_USER) + self.register_appservice_user(AS_USER, self.service.token) params = { "type": login.LoginRestServlet.APPSERVICE_TYPE, @@ -1165,7 +1158,7 @@ def test_login_appservice_no_token(self): """Test that users must provide a token when using the appservice login method """ - self.register_as_user(AS_USER) + self.register_appservice_user(AS_USER, self.service.token) params = { "type": login.LoginRestServlet.APPSERVICE_TYPE, diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index 74c8a8599e7d..6884ca9b7a3f 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -12,15 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, List, Set, Tuple +from unittest.mock import Mock, patch from twisted.test.proto_helpers import MemoryReactor +from synapse.api.constants import UserTypes +from synapse.appservice import ApplicationService from synapse.rest import admin -from synapse.rest.client import login, room +from synapse.rest.client import login, register, room from synapse.server import HomeServer from synapse.storage import DataStore from synapse.util import Clock +from tests.test_utils.event_injection import inject_member_event from tests.unittest import HomeserverTestCase, override_config ALICE = "@alice:a" @@ -64,6 +68,14 @@ async def get_users_who_share_private_rooms(self) -> List[Dict[str, str]]: ["user_id", "other_user_id", "room_id"], ) + async def get_users_in_user_directory(self) -> Set[str]: + result = await self.store.db_pool.simple_select_list( + "user_directory", + None, + ["user_id"], + ) + return {row["user_id"] for row in result} + class UserDirectoryInitialPopulationTestcase(HomeserverTestCase): """Ensure that rebuilding the directory writes the correct data to the DB. @@ -74,10 +86,28 @@ class UserDirectoryInitialPopulationTestcase(HomeserverTestCase): servlets = [ login.register_servlets, - admin.register_servlets_for_client_rest_resource, + admin.register_servlets, room.register_servlets, + register.register_servlets, ] + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + self.appservice = ApplicationService( + token="i_am_an_app_service", + hostname="test", + id="1234", + namespaces={"users": [{"regex": r"@as_user.*", "exclusive": True}]}, + sender="@as:test", + ) + + mock_load_appservices = Mock(return_value=[self.appservice]) + with patch( + "synapse.storage.databases.main.appservice.load_appservices", + mock_load_appservices, + ): + hs = super().make_homeserver(reactor, clock) + return hs + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() self.user_dir_helper = GetUserDirectoryTables(self.store) @@ -204,6 +234,118 @@ def test_initial(self) -> None: {(u1, u3, private_room), (u3, u1, private_room)}, ) + # All three should have entries in the directory + users = self.get_success(self.user_dir_helper.get_users_in_user_directory()) + self.assertEqual(users, {u1, u2, u3}) + + # The next three tests (test_population_excludes_*) all set up + # - A normal user included in the user dir + # - A public and private room created by that user + # - A user excluded from the room dir, belonging to both rooms + + # They match similar logic in handlers/test_user_directory.py But that tests + # updating the directory; this tests rebuilding it from scratch. + + def _create_rooms_and_inject_memberships( + self, creator: str, token: str, joiner: str + ) -> Tuple[str, str]: + """Create a public and private room as a normal user. + Then get the `joiner` into those rooms. + """ + public_room = self.helper.create_room_as( + creator, + is_public=True, + # See https://github.com/matrix-org/synapse/issues/10951 + extra_content={"visibility": "public"}, + tok=token, + ) + private_room = self.helper.create_room_as(creator, is_public=False, tok=token) + + # HACK: get the user into these rooms + self.get_success(inject_member_event(self.hs, public_room, joiner, "join")) + self.get_success(inject_member_event(self.hs, private_room, joiner, "join")) + + return public_room, private_room + + def _check_room_sharing_tables( + self, normal_user: str, public_room: str, private_room: str + ) -> None: + # After rebuilding the directory, we should only see the normal user. + users = self.get_success(self.user_dir_helper.get_users_in_user_directory()) + self.assertEqual(users, {normal_user}) + in_public_rooms = self.get_success( + self.user_dir_helper.get_users_in_public_rooms() + ) + self.assertEqual(set(in_public_rooms), {(normal_user, public_room)}) + in_private_rooms = self.get_success( + self.user_dir_helper.get_users_who_share_private_rooms() + ) + self.assertEqual(in_private_rooms, []) + + def test_population_excludes_support_user(self) -> None: + # Create a normal and support user. + user = self.register_user("user", "pass") + token = self.login(user, "pass") + support = "@support1:test" + self.get_success( + self.store.register_user( + user_id=support, password_hash=None, user_type=UserTypes.SUPPORT + ) + ) + + # Join the support user to rooms owned by the normal user. + public, private = self._create_rooms_and_inject_memberships( + user, token, support + ) + + # Rebuild the directory. + self._purge_and_rebuild_user_dir() + + # Check the support user is not in the directory. + self._check_room_sharing_tables(user, public, private) + + def test_population_excludes_deactivated_user(self) -> None: + user = self.register_user("naughty", "pass") + admin = self.register_user("admin", "pass", admin=True) + admin_token = self.login(admin, "pass") + + # Deactivate the user. + channel = self.make_request( + "PUT", + f"/_synapse/admin/v2/users/{user}", + access_token=admin_token, + content={"deactivated": True}, + ) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["deactivated"], True) + + # Join the deactivated user to rooms owned by the admin. + # Is this something that could actually happen outside of a test? + public, private = self._create_rooms_and_inject_memberships( + admin, admin_token, user + ) + + # Rebuild the user dir. The deactivated user should be missing. + self._purge_and_rebuild_user_dir() + self._check_room_sharing_tables(admin, public, private) + + def test_population_excludes_appservice_user(self) -> None: + # Register an AS user. + user = self.register_user("user", "pass") + token = self.login(user, "pass") + as_user = self.register_appservice_user("as_user_potato", self.appservice.token) + + # Join the AS user to rooms owned by the normal user. + public, private = self._create_rooms_and_inject_memberships( + user, token, as_user + ) + + # Rebuild the directory. + self._purge_and_rebuild_user_dir() + + # Check the AS user is not in the directory. + self._check_room_sharing_tables(user, public, private) + class UserDirectoryStoreTestCase(HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: diff --git a/tests/unittest.py b/tests/unittest.py index 1f803564f6c8..ae393ee53eee 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -596,6 +596,35 @@ def register_user( user_id = channel.json_body["user_id"] return user_id + def register_appservice_user( + self, + username: str, + appservice_token: str, + ) -> str: + """Register an appservice user as an application service. + Requires the client-facing registration API be registered. + + Args: + username: the user to be registered by an application service. + Should be a full username, i.e. ""@localpart:hostname" as opposed to just "localpart" + appservice_token: the acccess token for that application service. + + Raises: if the request to '/register' does not return 200 OK. + + Returns: the MXID of the new user. + """ + channel = self.make_request( + "POST", + "/_matrix/client/r0/register", + { + "username": username, + "type": "m.login.application_service", + }, + access_token=appservice_token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + return channel.json_body["user_id"] + def login( self, username, From 2d2c6a41fe69d4dab82a773bbffc52df95b6b542 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 4 Oct 2021 14:57:40 +0100 Subject: [PATCH 045/111] 1.44.0rc3 --- CHANGES.md | 10 ++++++++++ changelog.d/10933.bugfix | 1 - changelog.d/10968.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 5 files changed, 17 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/10933.bugfix delete mode 100644 changelog.d/10968.bugfix diff --git a/CHANGES.md b/CHANGES.md index 59ff967633c1..6c2728d407ac 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,13 @@ +Synapse 1.44.0rc3 (2021-10-04) +============================== + +Bugfixes +-------- + +- Fix a bug introduced in Synapse v1.40.0 where changing a user's display name or avatar in a restricted room would cause an authentication error. ([\#10933](https://github.com/matrix-org/synapse/issues/10933)) +- Fix `/admin/whois/{user_id}` endpoint, which was broken in v1.44.0rc1. ([\#10968](https://github.com/matrix-org/synapse/issues/10968)) + + Synapse 1.44.0rc2 (2021-09-30) ============================== diff --git a/changelog.d/10933.bugfix b/changelog.d/10933.bugfix deleted file mode 100644 index e0694fea22f5..000000000000 --- a/changelog.d/10933.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse v1.40.0 where changing a user's display name or avatar in a restricted room would cause an authentication error. diff --git a/changelog.d/10968.bugfix b/changelog.d/10968.bugfix deleted file mode 100644 index 76624ed73c36..000000000000 --- a/changelog.d/10968.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix `/admin/whois/{user_id}` endpoint, which was broken in v1.44.0rc1. diff --git a/debian/changelog b/debian/changelog index b08a5927808d..a0f1bcbdf95c 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.44.0~rc3) stable; urgency=medium + + * New synapse release 1.44.0~rc3. + + -- Synapse Packaging team Mon, 04 Oct 2021 14:57:22 +0100 + matrix-synapse-py3 (1.44.0~rc2) stable; urgency=medium * New synapse release 1.44.0~rc2. diff --git a/synapse/__init__.py b/synapse/__init__.py index 8791c20e2626..a9a7b658b763 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.44.0rc2" +__version__ = "1.44.0rc3" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 30f02404017231ed7e84667f3e1b85e2ed1ae348 Mon Sep 17 00:00:00 2001 From: AndrewFerr Date: Mon, 4 Oct 2021 10:43:03 -0400 Subject: [PATCH 046/111] Make is_public Optional[bool] for create_room_as test util (#10951) (#10963) Signed-off-by: Andrew Ferrazzutti --- changelog.d/10963.misc | 1 + tests/rest/client/utils.py | 13 +++++++------ 2 files changed, 8 insertions(+), 6 deletions(-) create mode 100644 changelog.d/10963.misc diff --git a/changelog.d/10963.misc b/changelog.d/10963.misc new file mode 100644 index 000000000000..daf40155de56 --- /dev/null +++ b/changelog.d/10963.misc @@ -0,0 +1 @@ +Fix the test utility function `create_room_as` so that `is_public=True` will explicitly set the `visibility` parameter of room creation requests to `public`. Contributed by @AndrewFerr. diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py index 3075d3f2884a..71fa87ce9291 100644 --- a/tests/rest/client/utils.py +++ b/tests/rest/client/utils.py @@ -48,7 +48,7 @@ class RestHelper: def create_room_as( self, room_creator: Optional[str] = None, - is_public: bool = True, + is_public: Optional[bool] = None, room_version: Optional[str] = None, tok: Optional[str] = None, expect_code: int = 200, @@ -62,9 +62,10 @@ def create_room_as( Args: room_creator: The user ID to create the room with. - is_public: If True, the `visibility` parameter will be set to the - default (public). Otherwise, the `visibility` parameter will be set - to "private". + is_public: If True, the `visibility` parameter will be set to + "public". If False, it will be set to "private". If left + unspecified, the server will set it to an appropriate default + (which should be "private" as per the CS spec). room_version: The room version to create the room as. Defaults to Synapse's default room version. tok: The access token to use in the request. @@ -77,8 +78,8 @@ def create_room_as( self.auth_user_id = room_creator path = "/_matrix/client/r0/createRoom" content = extra_content or {} - if not is_public: - content["visibility"] = "private" + if is_public is not None: + content["visibility"] = "public" if is_public else "private" if room_version: content["room_version"] = room_version if tok: From eda8c88b84ee7506379a71ac2a7a88c08b759d43 Mon Sep 17 00:00:00 2001 From: Hillery Shay Date: Mon, 4 Oct 2021 08:34:42 -0700 Subject: [PATCH 047/111] Add functionality to remove deactivated users from the monthly_active_users table (#10947) * add test * add function to remove user from monthly active table in deactivate code * add function to remove user from monthly active table * add changelog entry * update changelog number * requested changes * update docstring on new function * fix lint error * Update synapse/storage/databases/main/monthly_active_users.py Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/10947.bugfix | 1 + synapse/handlers/deactivate_account.py | 4 ++ .../databases/main/monthly_active_users.py | 24 ++++++++++++ tests/test_mau.py | 37 +++++++++++++++++-- 4 files changed, 63 insertions(+), 3 deletions(-) create mode 100644 changelog.d/10947.bugfix diff --git a/changelog.d/10947.bugfix b/changelog.d/10947.bugfix new file mode 100644 index 000000000000..40c70d3ece9f --- /dev/null +++ b/changelog.d/10947.bugfix @@ -0,0 +1 @@ +Fixes a long-standing bug wherin deactivated users still count towards the mau limit. \ No newline at end of file diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 9ae5b7750eaf..12bdca744510 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -133,6 +133,10 @@ async def deactivate_account( # delete from user directory await self.user_directory_handler.handle_local_user_deactivated(user_id) + # If the user is present in the monthly active users table + # remove them + await self.store.remove_deactivated_user_from_mau_table(user_id) + # Mark the user as erased, if they asked for that if erase_data: user = UserID.from_string(user_id) diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index a14ac03d4b6e..ec4d47a560aa 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -354,3 +354,27 @@ async def populate_monthly_active_users(self, user_id): await self.upsert_monthly_active_user(user_id) elif now - last_seen_timestamp > LAST_SEEN_GRANULARITY: await self.upsert_monthly_active_user(user_id) + + async def remove_deactivated_user_from_mau_table(self, user_id: str) -> None: + """ + Removes a deactivated user from the monthly active user + table and resets affected caches. + + Args: + user_id(str): the user_id to remove + """ + + rows_deleted = await self.db_pool.simple_delete( + table="monthly_active_users", + keyvalues={"user_id": user_id}, + desc="simple_delete", + ) + + if rows_deleted != 0: + await self.invalidate_cache_and_stream( + "user_last_seen_monthly_active", (user_id,) + ) + await self.invalidate_cache_and_stream("get_monthly_active_count", ()) + await self.invalidate_cache_and_stream( + "get_monthly_active_count_by_service", () + ) diff --git a/tests/test_mau.py b/tests/test_mau.py index 80ab40e255ea..c683c8937ebc 100644 --- a/tests/test_mau.py +++ b/tests/test_mau.py @@ -13,11 +13,11 @@ # limitations under the License. """Tests REST events for /rooms paths.""" - +import synapse.rest.admin from synapse.api.constants import APP_SERVICE_REGISTRATION_TYPE, LoginType from synapse.api.errors import Codes, HttpResponseException, SynapseError from synapse.appservice import ApplicationService -from synapse.rest.client import register, sync +from synapse.rest.client import login, profile, register, sync from tests import unittest from tests.unittest import override_config @@ -26,7 +26,13 @@ class TestMauLimit(unittest.HomeserverTestCase): - servlets = [register.register_servlets, sync.register_servlets] + servlets = [ + register.register_servlets, + sync.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, + profile.register_servlets, + login.register_servlets, + ] def default_config(self): config = default_config("test") @@ -229,6 +235,31 @@ def test_tracked_but_not_limited(self): self.reactor.advance(100) self.assertEqual(2, self.successResultOf(count)) + def test_deactivated_users_dont_count_towards_mau(self): + user1 = self.register_user("madonna", "password") + self.register_user("prince", "password2") + self.register_user("frodo", "onering", True) + + token1 = self.login("madonna", "password") + token2 = self.login("prince", "password2") + admin_token = self.login("frodo", "onering") + + self.do_sync_for_user(token1) + self.do_sync_for_user(token2) + + # Check that mau count is what we expect + count = self.get_success(self.store.get_monthly_active_count()) + self.assertEqual(count, 2) + + # Deactivate user1 + url = "/_synapse/admin/v1/deactivate/%s" % user1 + channel = self.make_request("POST", url, access_token=admin_token) + self.assertIn("success", channel.json_body["id_server_unbind_result"]) + + # Check that deactivated user is no longer counted + count = self.get_success(self.store.get_monthly_active_count()) + self.assertEqual(count, 1) + def create_user(self, localpart, token=None, appservice=False): request_data = { "username": localpart, From 660c8c1415704f0b9c6fe17fe74d40dfefd78f0a Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 5 Oct 2021 12:23:25 +0100 Subject: [PATCH 048/111] Log stack traces when a missing opentracing span is detected (#10983) Make it easier to track down where opentracing spans are going missing by including stack traces in the logs. --- changelog.d/10983.misc | 1 + synapse/logging/opentracing.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/10983.misc diff --git a/changelog.d/10983.misc b/changelog.d/10983.misc new file mode 100644 index 000000000000..235899d14f4a --- /dev/null +++ b/changelog.d/10983.misc @@ -0,0 +1 @@ +Log stack traces when a missing opentracing span is detected. diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 03d2dd94f6f1..5276c4bfcce8 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -339,6 +339,7 @@ def ensure_active_span_inner_2(*args, **kwargs): "There was no active span when trying to %s." " Did you forget to start one or did a context slip?", message, + stack_info=True, ) return ret From 7036a7a60af54dec2e1ad5e4c31a450817a68147 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Tue, 5 Oct 2021 13:35:19 +0200 Subject: [PATCH 049/111] Update links to MSCs in documentation (#10991) Based on matrix-doc switching from master -> main and MSCs being merged. --- changelog.d/10991.doc | 1 + docs/MSC1711_certificates_FAQ.md | 4 ++-- docs/usage/administration/admin_api/registration_tokens.md | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) create mode 100644 changelog.d/10991.doc diff --git a/changelog.d/10991.doc b/changelog.d/10991.doc new file mode 100644 index 000000000000..2f9bb24ca726 --- /dev/null +++ b/changelog.d/10991.doc @@ -0,0 +1 @@ +Update links to MSCs in documentation. Contributed by @dklimpel. \ No newline at end of file diff --git a/docs/MSC1711_certificates_FAQ.md b/docs/MSC1711_certificates_FAQ.md index 7d71c190abed..086899a9d836 100644 --- a/docs/MSC1711_certificates_FAQ.md +++ b/docs/MSC1711_certificates_FAQ.md @@ -3,7 +3,7 @@ ## Historical Note This document was originally written to guide server admins through the upgrade path towards Synapse 1.0. Specifically, -[MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md) +[MSC1711](https://github.com/matrix-org/matrix-doc/blob/main/proposals/1711-x509-for-federation.md) required that all servers present valid TLS certificates on their federation API. Admins were encouraged to achieve compliance from version 0.99.0 (released in February 2019) ahead of version 1.0 (released June 2019) enforcing the @@ -282,7 +282,7 @@ coffin of the Perspectives project (which was already pretty dead). So, the Spec Core Team decided that a better approach would be to mandate valid TLS certificates for federation alongside the rest of the Web. More details can be found in -[MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md#background-the-failure-of-the-perspectives-approach). +[MSC1711](https://github.com/matrix-org/matrix-doc/blob/main/proposals/1711-x509-for-federation.md#background-the-failure-of-the-perspectives-approach). This results in a breaking change, which is disruptive, but absolutely critical for the security model. However, the existence of Let's Encrypt as a trivial diff --git a/docs/usage/administration/admin_api/registration_tokens.md b/docs/usage/administration/admin_api/registration_tokens.md index 828c0277d626..c48d060dcc38 100644 --- a/docs/usage/administration/admin_api/registration_tokens.md +++ b/docs/usage/administration/admin_api/registration_tokens.md @@ -1,7 +1,8 @@ # Registration Tokens This API allows you to manage tokens which can be used to authenticate -registration requests, as proposed in [MSC3231](https://github.com/govynnus/matrix-doc/blob/token-registration/proposals/3231-token-authenticated-registration.md). +registration requests, as proposed in +[MSC3231](https://github.com/matrix-org/matrix-doc/blob/main/proposals/3231-token-authenticated-registration.md). To use it, you will need to enable the `registration_requires_token` config option, and authenticate by providing an `access_token` for a server admin: see [Admin API](../../usage/administration/admin_api). From 6f6e9563387124eb4b4f324e4e1720291015a458 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 5 Oct 2021 12:43:04 +0100 Subject: [PATCH 050/111] Run CI with Python 3.10 and Postgres 14 (#10992) --- .github/workflows/tests.yml | 14 +++++++------- changelog.d/10992.misc | 1 + 2 files changed, 8 insertions(+), 7 deletions(-) create mode 100644 changelog.d/10992.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index fa9c5e036afe..96c39dd9a4bc 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -76,11 +76,11 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.6", "3.7", "3.8", "3.9"] + python-version: ["3.6", "3.7", "3.8", "3.9", "3.10"] database: ["sqlite"] include: # Newest Python without optional deps - - python-version: "3.9" + - python-version: "3.10" toxenv: "py-noextras,combine" # Oldest Python with PostgreSQL @@ -88,10 +88,10 @@ jobs: database: "postgres" postgres-version: "9.6" - # Newest Python with PostgreSQL - - python-version: "3.9" + # Newest Python with newest PostgreSQL + - python-version: "3.10" database: "postgres" - postgres-version: "13" + postgres-version: "14" steps: - uses: actions/checkout@v2 @@ -256,8 +256,8 @@ jobs: - python-version: "3.6" postgres-version: "9.6" - - python-version: "3.9" - postgres-version: "13" + - python-version: "3.10" + postgres-version: "14" services: postgres: diff --git a/changelog.d/10992.misc b/changelog.d/10992.misc new file mode 100644 index 000000000000..60432a559c80 --- /dev/null +++ b/changelog.d/10992.misc @@ -0,0 +1 @@ +Update GHA config to run tests against Python 3.10 and PostgreSQL 14. From cb88ed912b3e984e0a409e4e5fd3c22817a4840d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 5 Oct 2021 12:50:07 +0100 Subject: [PATCH 051/111] `_check_event_auth`: move event validation earlier (#10988) There's little point in doing a fancy state reconciliation dance if the event itself is invalid. Likewise, there's no point checking it again in `_check_for_soft_fail`. --- changelog.d/10988.misc | 1 + synapse/handlers/federation_event.py | 13 +++++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) create mode 100644 changelog.d/10988.misc diff --git a/changelog.d/10988.misc b/changelog.d/10988.misc new file mode 100644 index 000000000000..9a765435dbe4 --- /dev/null +++ b/changelog.d/10988.misc @@ -0,0 +1 @@ +Clean up some of the federation event authentication code for clarity. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index e587b5b3b351..593865433838 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -1250,9 +1250,18 @@ async def _check_event_auth( # This method should only be used for non-outliers assert not event.internal_metadata.outlier + # first of all, check that the event itself is valid. room_version = await self._store.get_room_version_id(event.room_id) room_version_obj = KNOWN_ROOM_VERSIONS[room_version] + try: + validate_event_for_room_version(room_version_obj, event) + except AuthError as e: + logger.warning("While validating received event %r: %s", event, e) + # TODO: use a different rejected reason here? + context.rejected = RejectedReason.AUTH_ERROR + return context + # calculate what the auth events *should* be, to use as a basis for auth. prev_state_ids = await context.get_prev_state_ids() auth_events_ids = self._event_auth_handler.compute_auth_events( @@ -1286,7 +1295,6 @@ async def _check_event_auth( auth_events_for_auth = calculated_auth_event_map try: - validate_event_for_room_version(room_version_obj, event) check_auth_rules_for_event(room_version_obj, event, auth_events_for_auth) except AuthError as e: logger.warning("Failed auth resolution for %r because %s", event, e) @@ -1399,9 +1407,6 @@ async def _check_for_soft_fail( } try: - # TODO: skip the call to validate_event_for_room_version? we should already - # have validated the event. - validate_event_for_room_version(room_version_obj, event) check_auth_rules_for_event(room_version_obj, event, current_auth_events) except AuthError as e: logger.warning( From d099535deb5be31891719c61c3757c5150829053 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 5 Oct 2021 12:50:38 +0100 Subject: [PATCH 052/111] `_update_auth_events_and_context_for_auth`: add some comments (#10987) Add some more comments about wtf is going on here. --- changelog.d/10987.misc | 1 + synapse/handlers/federation_event.py | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+) create mode 100644 changelog.d/10987.misc diff --git a/changelog.d/10987.misc b/changelog.d/10987.misc new file mode 100644 index 000000000000..9a765435dbe4 --- /dev/null +++ b/changelog.d/10987.misc @@ -0,0 +1 @@ +Clean up some of the federation event authentication code for clarity. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 593865433838..aa20d755508d 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -1476,6 +1476,11 @@ async def _update_auth_events_and_context_for_auth( logger.debug("Events %s are in the store", have_events) missing_auth.difference_update(have_events) + # missing_auth is now the set of event_ids which: + # a. are listed in event.auth_events, *and* + # b. are *not* part of our calculated auth events based on room state, *and* + # c. are *not* yet in our database. + if missing_auth: # If we don't have all the auth events, we need to get them. logger.info("auth_events contains unknown events: %s", missing_auth) @@ -1497,10 +1502,31 @@ async def _update_auth_events_and_context_for_auth( } ) + # auth_events now contains + # 1. our *calculated* auth events based on the room state, plus: + # 2. any events which: + # a. are listed in `event.auth_events`, *and* + # b. are not part of our calculated auth events, *and* + # c. were not in our database before the call to /event_auth + # d. have since been added to our database (most likely by /event_auth). + different_auth = event_auth_events.difference( e.event_id for e in auth_events.values() ) + # different_auth is the set of events which *are* in `event.auth_events`, but + # which are *not* in `auth_events`. Comparing with (2.) above, this means + # exclusively the set of `event.auth_events` which we already had in our + # database before any call to /event_auth. + # + # I'm reasonably sure that the fact that events returned by /event_auth are + # blindly added to auth_events (and hence excluded from different_auth) is a bug + # - though it's a very long-standing one (see + # https://github.com/matrix-org/synapse/commit/78015948a7febb18e000651f72f8f58830a55b93#diff-0bc92da3d703202f5b9be2d3f845e375f5b1a6bc6ba61705a8af9be1121f5e42R786 + # from Jan 2015 which seems to add it, though it actually just moves it from + # elsewhere (before that, it gets lost in a mess of huge "various bug fixes" + # PRs). + if not different_auth: return context, auth_events From 787af4a1062fecc350fa14fe2abfc0e9d2f1555e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 5 Oct 2021 13:01:41 +0100 Subject: [PATCH 053/111] Host `cache_joined_hosts_for_event` to caller (#10986) `_check_event_auth` is only called in two places, and only one of those sets `send_on_behalf_of`. Warming the cache isn't really part of auth anyway, so moving it out makes a lot more sense. --- changelog.d/10986.misc | 1 + synapse/handlers/federation_event.py | 18 ++++++++---------- 2 files changed, 9 insertions(+), 10 deletions(-) create mode 100644 changelog.d/10986.misc diff --git a/changelog.d/10986.misc b/changelog.d/10986.misc new file mode 100644 index 000000000000..9a765435dbe4 --- /dev/null +++ b/changelog.d/10986.misc @@ -0,0 +1 @@ +Clean up some of the federation event authentication code for clarity. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index aa20d755508d..9269cb444d46 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -356,6 +356,11 @@ async def on_send_membership_event( ) # all looks good, we can persist the event. + + # First, precalculate the joined hosts so that the federation sender doesn't + # need to. + await self._event_creation_handler.cache_joined_hosts_for_event(event, context) + await self._run_push_actions_and_persist_event(event, context) return event, context @@ -1299,17 +1304,10 @@ async def _check_event_auth( except AuthError as e: logger.warning("Failed auth resolution for %r because %s", event, e) context.rejected = RejectedReason.AUTH_ERROR + return context - if not context.rejected: - await self._check_for_soft_fail(event, state, backfilled, origin=origin) - await self._maybe_kick_guest_users(event) - - # If we are going to send this event over federation we precaclculate - # the joined hosts. - if event.internal_metadata.get_send_on_behalf_of(): - await self._event_creation_handler.cache_joined_hosts_for_event( - event, context - ) + await self._check_for_soft_fail(event, state, backfilled, origin=origin) + await self._maybe_kick_guest_users(event) return context From 3a5b0cbe7ade000245695ec97c13ab5cb3565dc2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 5 Oct 2021 13:23:29 +0100 Subject: [PATCH 054/111] Ensure that we reject events which use rejected events for auth (#10956) When we consider whether to accept events, we should not accept those which depend on rejected events for their auth events. This (together with earlier changes such as https://github.com/matrix-org/synapse/pull/10771 and https://github.com/matrix-org/synapse/pull/10896) forms a partial fix to https://github.com/matrix-org/synapse/issues/9595. There still remain code paths where we do not check the `auth_events` at all. --- changelog.d/10956.bugfix | 1 + synapse/event_auth.py | 6 ++++++ 2 files changed, 7 insertions(+) create mode 100644 changelog.d/10956.bugfix diff --git a/changelog.d/10956.bugfix b/changelog.d/10956.bugfix new file mode 100644 index 000000000000..13b8e5983b73 --- /dev/null +++ b/changelog.d/10956.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug which meant that events received over federation were sometimes incorrectly accepted into the room state. diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 7a1adc27509e..ca0293a3dc86 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -155,6 +155,12 @@ def check_auth_rules_for_event( "which is in room %s" % (event.event_id, room_id, auth_event.event_id, auth_event.room_id), ) + if auth_event.rejected_reason: + raise AuthError( + 403, + "During auth for event %s: found rejected event %s in the state" + % (event.event_id, auth_event.event_id), + ) # Implementation of https://matrix.org/docs/spec/rooms/v1#authorization-rules # From b2c5e79291b9f93cdb39c9a6f7de50e62f45e64e Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 5 Oct 2021 13:45:24 +0100 Subject: [PATCH 055/111] 1.44.0 --- CHANGES.md | 6 ++++++ debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 6c2728d407ac..3f048ba881ae 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.44.0 (2021-10-05) +=========================== + +No significant changes since 1.44.0rc3. + + Synapse 1.44.0rc3 (2021-10-04) ============================== diff --git a/debian/changelog b/debian/changelog index a0f1bcbdf95c..9e878fbc2da3 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.44.0) stable; urgency=medium + + * New synapse release 1.44.0. + + -- Synapse Packaging team Tue, 05 Oct 2021 13:43:57 +0100 + matrix-synapse-py3 (1.44.0~rc3) stable; urgency=medium * New synapse release 1.44.0~rc3. diff --git a/synapse/__init__.py b/synapse/__init__.py index a9a7b658b763..b8979c365ee7 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.44.0rc3" +__version__ = "1.44.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 392863fbf1ee31f8a1997446ab31919a7b6d9a14 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 5 Oct 2021 11:51:57 -0500 Subject: [PATCH 056/111] Fix logic flaw preventing tracking of MSC2716 events in existing room versions (#10962) We correctly allowed using the MSC2716 batch endpoint for the room creator in existing room versions but accidentally didn't track the events because of a logic flaw. This prevented you from connecting subsequent chunks together because it would throw the unknown batch ID error. We only want to process MSC2716 events when: - The room version supports MSC2716 - Any room where the homeserver has the `msc2716_enabled` experimental feature enabled and the event is from the room creator --- changelog.d/10962.bugfix | 1 + synapse/handlers/federation_event.py | 5 ++--- synapse/storage/databases/main/events.py | 10 ++++------ 3 files changed, 7 insertions(+), 9 deletions(-) create mode 100644 changelog.d/10962.bugfix diff --git a/changelog.d/10962.bugfix b/changelog.d/10962.bugfix new file mode 100644 index 000000000000..9b0760d7315f --- /dev/null +++ b/changelog.d/10962.bugfix @@ -0,0 +1 @@ +Fix [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` endpoint rejecting subsequent batches with unknown batch ID error in existing room versions from the room creator. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 9269cb444d46..243be46267cd 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -1015,9 +1015,8 @@ async def _handle_marker_event(self, origin: str, marker_event: EventBase) -> No room_version = await self._store.get_room_version(marker_event.room_id) create_event = await self._store.get_create_event_for_room(marker_event.room_id) room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR) - if ( - not room_version.msc2716_historical - or not self._config.experimental.msc2716_enabled + if not room_version.msc2716_historical and ( + not self._config.experimental.msc2716_enabled or marker_event.sender != room_creator ): return diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index bc7d213fe2a2..19f55c19c5bf 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1763,9 +1763,8 @@ def _handle_insertion_event(self, txn: LoggingTransaction, event: EventBase): retcol="creator", allow_none=True, ) - if ( - not room_version.msc2716_historical - or not self.hs.config.experimental.msc2716_enabled + if not room_version.msc2716_historical and ( + not self.hs.config.experimental.msc2716_enabled or event.sender != room_creator ): return @@ -1825,9 +1824,8 @@ def _handle_batch_event(self, txn: LoggingTransaction, event: EventBase): retcol="creator", allow_none=True, ) - if ( - not room_version.msc2716_historical - or not self.hs.config.experimental.msc2716_enabled + if not room_version.msc2716_historical and ( + not self.hs.config.experimental.msc2716_enabled or event.sender != room_creator ): return From 4f00432ce1a5571dd43f9ddc3ae128c58ae4d063 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 5 Oct 2021 18:35:25 +0100 Subject: [PATCH 057/111] Fix potential leak of per-room profiles when the user dir is rebuilt. (#10981) There are two steps to rebuilding the user directory: 1. a scan over rooms, followed by 2. a scan over local users. The former reads avatars and display names from the `room_memberships` table and therefore contains potentially private avatars and display names. The latter reads from the the `profiles` table which only contains public data; moreover it will overwrite any private profiles that the rooms scan may have written to the user directory. This means that the rebuild could leak private user while the rebuild was in progress, only to later cover up the leaks once the rebuild had completed. This change skips over local users when writing user_directory rows when scanning rooms. Doing so means that it'll take longer for a rebuild to make local users searchable, which is unfortunate. I think a future PR can improve this by swapping the order of the two steps above. (And indeed there's more to do here, e.g. copying from `profiles` without going via Python.) Small tidy-ups while I'm here: * Remove duplicated code from test_initial. This was meant to be pulled into `purge_and_rebuild_user_dir`. * Move `is_public` before updating sharing tables. No functional change; it's still before the first read of `is_public`. * Don't bother creating a set from dict keys. Slightly nicer and makes the code simpler. Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/10981.bugfix | 1 + .../storage/databases/main/user_directory.py | 33 ++++--- tests/storage/test_user_directory.py | 94 +++++++++++++++---- 3 files changed, 99 insertions(+), 29 deletions(-) create mode 100644 changelog.d/10981.bugfix diff --git a/changelog.d/10981.bugfix b/changelog.d/10981.bugfix new file mode 100644 index 000000000000..d7bf66034882 --- /dev/null +++ b/changelog.d/10981.bugfix @@ -0,0 +1 @@ +Fix a bug that could leak local users' per-room nicknames and avatars when the user directory is rebuilt. \ No newline at end of file diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 5f538947ecf8..5c713a732ee9 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -228,10 +228,6 @@ def _get_next_batch( is_in_room = await self.is_host_joined(room_id, self.server_name) if is_in_room: - is_public = await self.is_room_world_readable_or_publicly_joinable( - room_id - ) - users_with_profile = await self.get_users_in_room_with_profiles(room_id) # Throw away users excluded from the directory. users_with_profile = { @@ -241,22 +237,33 @@ def _get_next_batch( or await self.should_include_local_user_in_dir(user_id) } - # Update each user in the user directory. + # Upsert a user_directory record for each remote user we see. for user_id, profile in users_with_profile.items(): + # Local users are processed separately in + # `_populate_user_directory_users`; there we can read from + # the `profiles` table to ensure we don't leak their per-room + # profiles. It also means we write local users to this table + # exactly once, rather than once for every room they're in. + if self.hs.is_mine_id(user_id): + continue + # TODO `users_with_profile` above reads from the `user_directory` + # table, meaning that `profile` is bespoke to this room. + # and this leaks remote users' per-room profiles to the user directory. await self.update_profile_in_user_dir( user_id, profile.display_name, profile.avatar_url ) - to_insert = set() - + # Now update the room sharing tables to include this room. + is_public = await self.is_room_world_readable_or_publicly_joinable( + room_id + ) if is_public: - for user_id in users_with_profile: - to_insert.add(user_id) - - if to_insert: - await self.add_users_in_public_rooms(room_id, to_insert) - to_insert.clear() + if users_with_profile: + await self.add_users_in_public_rooms( + room_id, users_with_profile.keys() + ) else: + to_insert = set() for user_id in users_with_profile: # We want the set of pairs (L, M) where L and M are # in `users_with_profile` and L is local. diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index 6884ca9b7a3f..fddfb8db2875 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -11,17 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, List, Set, Tuple +from typing import Any, Dict, List, Set, Tuple +from unittest import mock from unittest.mock import Mock, patch from twisted.test.proto_helpers import MemoryReactor -from synapse.api.constants import UserTypes +from synapse.api.constants import EventTypes, Membership, UserTypes from synapse.appservice import ApplicationService from synapse.rest import admin from synapse.rest.client import login, register, room from synapse.server import HomeServer from synapse.storage import DataStore +from synapse.storage.roommember import ProfileInfo from synapse.util import Clock from tests.test_utils.event_injection import inject_member_event @@ -52,6 +54,11 @@ def _compress_shared( return r async def get_users_in_public_rooms(self) -> List[Tuple[str, str]]: + """Fetch the entire `users_in_public_rooms` table. + + Returns a list of tuples (user_id, room_id) where room_id is public and + contains the user with the given id. + """ r = await self.store.db_pool.simple_select_list( "users_in_public_rooms", None, ("user_id", "room_id") ) @@ -62,6 +69,13 @@ async def get_users_in_public_rooms(self) -> List[Tuple[str, str]]: return retval async def get_users_who_share_private_rooms(self) -> List[Dict[str, str]]: + """Fetch the entire `users_who_share_private_rooms` table. + + Returns a dict containing "user_id", "other_user_id" and "room_id" keys. + The dicts can be flattened to Tuples with the `_compress_shared` method. + (This seems a little awkward---maybe we could clean this up.) + """ + return await self.store.db_pool.simple_select_list( "users_who_share_private_rooms", None, @@ -69,6 +83,10 @@ async def get_users_who_share_private_rooms(self) -> List[Dict[str, str]]: ) async def get_users_in_user_directory(self) -> Set[str]: + """Fetch the set of users in the `user_directory` table. + + This is useful when checking we've correctly excluded users from the directory. + """ result = await self.store.db_pool.simple_select_list( "user_directory", None, @@ -76,6 +94,25 @@ async def get_users_in_user_directory(self) -> Set[str]: ) return {row["user_id"] for row in result} + async def get_profiles_in_user_directory(self) -> Dict[str, ProfileInfo]: + """Fetch users and their profiles from the `user_directory` table. + + This is useful when we want to inspect display names and avatars. + It's almost the entire contents of the `user_directory` table: the only + thing missing is an unused room_id column. + """ + rows = await self.store.db_pool.simple_select_list( + "user_directory", + None, + ("user_id", "display_name", "avatar_url"), + ) + return { + row["user_id"]: ProfileInfo( + display_name=row["display_name"], avatar_url=row["avatar_url"] + ) + for row in rows + } + class UserDirectoryInitialPopulationTestcase(HomeserverTestCase): """Ensure that rebuilding the directory writes the correct data to the DB. @@ -201,20 +238,6 @@ def test_initial(self) -> None: self.helper.invite(private_room, src=u1, targ=u3, tok=u1_token) self.helper.join(private_room, user=u3, tok=u3_token) - self.get_success(self.store.update_user_directory_stream_pos(None)) - self.get_success(self.store.delete_all_from_user_dir()) - - shares_private = self.get_success( - self.user_dir_helper.get_users_who_share_private_rooms() - ) - public_users = self.get_success( - self.user_dir_helper.get_users_in_public_rooms() - ) - - # Nothing updated yet - self.assertEqual(shares_private, []) - self.assertEqual(public_users, []) - # Do the initial population of the user directory via the background update self._purge_and_rebuild_user_dir() @@ -346,6 +369,45 @@ def test_population_excludes_appservice_user(self) -> None: # Check the AS user is not in the directory. self._check_room_sharing_tables(user, public, private) + def test_population_conceals_private_nickname(self) -> None: + # Make a private room, and set a nickname within + user = self.register_user("aaaa", "pass") + user_token = self.login(user, "pass") + private_room = self.helper.create_room_as(user, is_public=False, tok=user_token) + self.helper.send_state( + private_room, + EventTypes.Member, + state_key=user, + body={"membership": Membership.JOIN, "displayname": "BBBB"}, + tok=user_token, + ) + + # Rebuild the user directory. Make the rescan of the `users` table a no-op + # so we only see the effect of scanning the `room_memberships` table. + async def mocked_process_users(*args: Any, **kwargs: Any) -> int: + await self.store.db_pool.updates._end_background_update( + "populate_user_directory_process_users" + ) + return 1 + + with mock.patch.dict( + self.store.db_pool.updates._background_update_handlers, + populate_user_directory_process_users=mocked_process_users, + ): + self._purge_and_rebuild_user_dir() + + # Local users are ignored by the scan over rooms + users = self.get_success(self.user_dir_helper.get_profiles_in_user_directory()) + self.assertEqual(users, {}) + + # Do a full rebuild including the scan over the `users` table. The local + # user should appear with their profile name. + self._purge_and_rebuild_user_dir() + users = self.get_success(self.user_dir_helper.get_profiles_in_user_directory()) + self.assertEqual( + users, {user: ProfileInfo(display_name="aaaa", avatar_url=None)} + ) + class UserDirectoryStoreTestCase(HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: From 6744273f0b92457247166299981d69e9f4219601 Mon Sep 17 00:00:00 2001 From: Max Kratz Date: Wed, 6 Oct 2021 12:05:07 +0200 Subject: [PATCH 058/111] Remove "reference" wording according Synapse homeserver (#10971) --- README.rst | 7 ++----- changelog.d/10971.doc | 1 + docs/README.md | 6 +++--- docs/welcome_and_overview.md | 5 +++-- 4 files changed, 9 insertions(+), 10 deletions(-) create mode 100644 changelog.d/10971.doc diff --git a/README.rst b/README.rst index 524a3a5142ee..63deb06eac2b 100644 --- a/README.rst +++ b/README.rst @@ -55,11 +55,8 @@ solutions. The hope is for Matrix to act as the building blocks for a new generation of fully open and interoperable messaging and VoIP apps for the internet. -Synapse is a reference "homeserver" implementation of Matrix from the core -development team at matrix.org, written in Python/Twisted. It is intended to -showcase the concept of Matrix and let folks see the spec in the context of a -codebase and let you run your own homeserver and generally help bootstrap the -ecosystem. +Synapse is a Matrix "homeserver" implementation developed by the matrix.org core +team, written in Python 3/Twisted. In Matrix, every user runs one or more Matrix clients, which connect through to a Matrix homeserver. The homeserver stores all their personal chat history and diff --git a/changelog.d/10971.doc b/changelog.d/10971.doc new file mode 100644 index 000000000000..cc6cfe416454 --- /dev/null +++ b/changelog.d/10971.doc @@ -0,0 +1 @@ +Change wording ("reference homeserver") in Synapse repository documentation. Contributed by @maxkratz. diff --git a/docs/README.md b/docs/README.md index e113f55d2a70..6d70f5afff18 100644 --- a/docs/README.md +++ b/docs/README.md @@ -6,9 +6,9 @@ Please update any links to point to the new website instead. ## About This directory currently holds a series of markdown files documenting how to install, use -and develop Synapse, the reference Matrix homeserver. The documentation is readable directly -from this repository, but it is recommended to instead browse through the -[website](https://matrix-org.github.io/synapse) for easier discoverability. +and develop Synapse. The documentation is readable directly from this repository, but it is +recommended to instead browse through the [website](https://matrix-org.github.io/synapse) for +easier discoverability. ## Adding to the documentation diff --git a/docs/welcome_and_overview.md b/docs/welcome_and_overview.md index 30e75984d1ef..9882d9f15942 100644 --- a/docs/welcome_and_overview.md +++ b/docs/welcome_and_overview.md @@ -1,4 +1,5 @@ # Introduction -Welcome to the documentation repository for Synapse, the reference -[Matrix](https://matrix.org) homeserver implementation. \ No newline at end of file +Welcome to the documentation repository for Synapse, a +[Matrix](https://matrix.org) homeserver implementation developed by the matrix.org core +team. From f8d0f72b27e158738f3c75a38399b967f7478011 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 6 Oct 2021 11:20:49 +0100 Subject: [PATCH 059/111] More types for synapse.util, part 1 (#10888) The following modules now pass `disallow_untyped_defs`: * synapse.util.caches.cached_call * synapse.util.caches.lrucache * synapse.util.caches.response_cache * synapse.util.caches.stream_change_cache * synapse.util.caches.ttlcache pass * synapse.util.daemonize * synapse.util.patch_inline_callbacks pass `no-untyped-defs` * synapse.util.versionstring Additional typing in synapse.util.metrics. Didn't get this to pass `no-untyped-defs`, think I'll need to watch #10847 --- changelog.d/10888.misc | 1 + mypy.ini | 24 +++++++++ synapse/util/caches/cached_call.py | 2 +- synapse/util/caches/deferred_cache.py | 11 +++-- synapse/util/caches/lrucache.py | 57 ++++++++++------------ synapse/util/caches/response_cache.py | 6 +-- synapse/util/caches/stream_change_cache.py | 6 +-- synapse/util/caches/ttlcache.py | 12 ++--- synapse/util/daemonize.py | 8 ++- synapse/util/metrics.py | 27 +++++++--- synapse/util/patch_inline_callbacks.py | 28 ++++++++--- synapse/util/versionstring.py | 25 +++++++--- 12 files changed, 134 insertions(+), 73 deletions(-) create mode 100644 changelog.d/10888.misc diff --git a/changelog.d/10888.misc b/changelog.d/10888.misc new file mode 100644 index 000000000000..d9c991788125 --- /dev/null +++ b/changelog.d/10888.misc @@ -0,0 +1 @@ +Improve type hinting in `synapse.util`. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index 568166db3300..86459bdcb62d 100644 --- a/mypy.ini +++ b/mypy.ini @@ -102,9 +102,27 @@ disallow_untyped_defs = True [mypy-synapse.util.batching_queue] disallow_untyped_defs = True +[mypy-synapse.util.caches.cached_call] +disallow_untyped_defs = True + [mypy-synapse.util.caches.dictionary_cache] disallow_untyped_defs = True +[mypy-synapse.util.caches.lrucache] +disallow_untyped_defs = True + +[mypy-synapse.util.caches.response_cache] +disallow_untyped_defs = True + +[mypy-synapse.util.caches.stream_change_cache] +disallow_untyped_defs = True + +[mypy-synapse.util.caches.ttl_cache] +disallow_untyped_defs = True + +[mypy-synapse.util.daemonize] +disallow_untyped_defs = True + [mypy-synapse.util.file_consumer] disallow_untyped_defs = True @@ -141,6 +159,9 @@ disallow_untyped_defs = True [mypy-synapse.util.msisdn] disallow_untyped_defs = True +[mypy-synapse.util.patch_inline_callbacks] +disallow_untyped_defs = True + [mypy-synapse.util.ratelimitutils] disallow_untyped_defs = True @@ -162,6 +183,9 @@ disallow_untyped_defs = True [mypy-synapse.util.wheel_timer] disallow_untyped_defs = True +[mypy-synapse.util.versionstring] +disallow_untyped_defs = True + [mypy-tests.handlers.test_user_directory] disallow_untyped_defs = True diff --git a/synapse/util/caches/cached_call.py b/synapse/util/caches/cached_call.py index e58dd91eda7b..470f4f91a59b 100644 --- a/synapse/util/caches/cached_call.py +++ b/synapse/util/caches/cached_call.py @@ -85,7 +85,7 @@ async def get(self) -> TV: # result in the deferred, since `awaiting` a deferred destroys its result. # (Also, if it's a Failure, GCing the deferred would log a critical error # about unhandled Failures) - def got_result(r): + def got_result(r: Union[TV, Failure]) -> None: self._result = r self._deferred.addBoth(got_result) diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index 6262efe07235..da502aec114d 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -31,6 +31,7 @@ from twisted.internet import defer from twisted.python import failure +from twisted.python.failure import Failure from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.lrucache import LruCache @@ -112,7 +113,7 @@ def metrics_cb() -> None: self.thread: Optional[threading.Thread] = None @property - def max_entries(self): + def max_entries(self) -> int: return self.cache.max_size def check_thread(self) -> None: @@ -258,7 +259,7 @@ def compare_and_pop() -> bool: return False - def cb(result) -> None: + def cb(result: VT) -> None: if compare_and_pop(): self.cache.set(key, result, entry.callbacks) else: @@ -270,7 +271,7 @@ def cb(result) -> None: # not have been. Either way, let's double-check now. entry.invalidate() - def eb(_fail) -> None: + def eb(_fail: Failure) -> None: compare_and_pop() entry.invalidate() @@ -284,11 +285,11 @@ def eb(_fail) -> None: def prefill( self, key: KT, value: VT, callback: Optional[Callable[[], None]] = None - ): + ) -> None: callbacks = [callback] if callback else [] self.cache.set(key, value, callbacks=callbacks) - def invalidate(self, key): + def invalidate(self, key) -> None: """Delete a key, or tree of entries If the cache is backed by a regular dict, then "key" must be of diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 4ff62b403ff1..a0a7a9de3299 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -52,7 +52,7 @@ try: from pympler.asizeof import Asizer - def _get_size_of(val: Any, *, recurse=True) -> int: + def _get_size_of(val: Any, *, recurse: bool = True) -> int: """Get an estimate of the size in bytes of the object. Args: @@ -71,7 +71,7 @@ def _get_size_of(val: Any, *, recurse=True) -> int: except ImportError: - def _get_size_of(val: Any, *, recurse=True) -> int: + def _get_size_of(val: Any, *, recurse: bool = True) -> int: return 0 @@ -85,15 +85,6 @@ def _get_size_of(val: Any, *, recurse=True) -> int: # a general type var, distinct from either KT or VT T = TypeVar("T") - -def enumerate_leaves(node, depth): - if depth == 0: - yield node - else: - for n in node.values(): - yield from enumerate_leaves(n, depth - 1) - - P = TypeVar("P") @@ -102,7 +93,7 @@ class _TimedListNode(ListNode[P]): __slots__ = ["last_access_ts_secs"] - def update_last_access(self, clock: Clock): + def update_last_access(self, clock: Clock) -> None: self.last_access_ts_secs = int(clock.time()) @@ -115,7 +106,7 @@ def update_last_access(self, clock: Clock): @wrap_as_background_process("LruCache._expire_old_entries") -async def _expire_old_entries(clock: Clock, expiry_seconds: int): +async def _expire_old_entries(clock: Clock, expiry_seconds: int) -> None: """Walks the global cache list to find cache entries that haven't been accessed in the given number of seconds. """ @@ -163,7 +154,7 @@ async def _expire_old_entries(clock: Clock, expiry_seconds: int): logger.info("Dropped %d items from caches", i) -def setup_expire_lru_cache_entries(hs: "HomeServer"): +def setup_expire_lru_cache_entries(hs: "HomeServer") -> None: """Start a background job that expires all cache entries if they have not been accessed for the given number of seconds. """ @@ -183,7 +174,7 @@ def setup_expire_lru_cache_entries(hs: "HomeServer"): ) -class _Node: +class _Node(Generic[KT, VT]): __slots__ = [ "_list_node", "_global_list_node", @@ -197,8 +188,8 @@ class _Node: def __init__( self, root: "ListNode[_Node]", - key, - value, + key: KT, + value: VT, cache: "weakref.ReferenceType[LruCache]", clock: Clock, callbacks: Collection[Callable[[], None]] = (), @@ -409,7 +400,7 @@ def evict() -> None: def synchronized(f: FT) -> FT: @wraps(f) - def inner(*args, **kwargs): + def inner(*args: Any, **kwargs: Any) -> Any: with lock: return f(*args, **kwargs) @@ -418,17 +409,19 @@ def inner(*args, **kwargs): cached_cache_len = [0] if size_callback is not None: - def cache_len(): + def cache_len() -> int: return cached_cache_len[0] else: - def cache_len(): + def cache_len() -> int: return len(cache) self.len = synchronized(cache_len) - def add_node(key, value, callbacks: Collection[Callable[[], None]] = ()): + def add_node( + key: KT, value: VT, callbacks: Collection[Callable[[], None]] = () + ) -> None: node = _Node( list_root, key, @@ -446,7 +439,7 @@ def add_node(key, value, callbacks: Collection[Callable[[], None]] = ()): if caches.TRACK_MEMORY_USAGE and metrics: metrics.inc_memory_usage(node.memory) - def move_node_to_front(node: _Node): + def move_node_to_front(node: _Node) -> None: node.move_to_front(real_clock, list_root) def delete_node(node: _Node) -> int: @@ -488,7 +481,7 @@ def cache_get( default: Optional[T] = None, callbacks: Collection[Callable[[], None]] = (), update_metrics: bool = True, - ): + ) -> Union[None, T, VT]: node = cache.get(key, None) if node is not None: move_node_to_front(node) @@ -502,7 +495,9 @@ def cache_get( return default @synchronized - def cache_set(key: KT, value: VT, callbacks: Iterable[Callable[[], None]] = ()): + def cache_set( + key: KT, value: VT, callbacks: Iterable[Callable[[], None]] = () + ) -> None: node = cache.get(key, None) if node is not None: # We sometimes store large objects, e.g. dicts, which cause @@ -547,7 +542,7 @@ def cache_pop(key: KT, default: T) -> Union[T, VT]: ... @synchronized - def cache_pop(key: KT, default: Optional[T] = None): + def cache_pop(key: KT, default: Optional[T] = None) -> Union[None, T, VT]: node = cache.get(key, None) if node: delete_node(node) @@ -612,25 +607,25 @@ def cache_contains(key: KT) -> bool: self.contains = cache_contains self.clear = cache_clear - def __getitem__(self, key): + def __getitem__(self, key: KT) -> VT: result = self.get(key, self.sentinel) if result is self.sentinel: raise KeyError() else: - return result + return cast(VT, result) - def __setitem__(self, key, value): + def __setitem__(self, key: KT, value: VT) -> None: self.set(key, value) - def __delitem__(self, key, value): + def __delitem__(self, key: KT, value: VT) -> None: result = self.pop(key, self.sentinel) if result is self.sentinel: raise KeyError() - def __len__(self): + def __len__(self) -> int: return self.len() - def __contains__(self, key): + def __contains__(self, key: KT) -> bool: return self.contains(key) def set_cache_factor(self, factor: float) -> bool: diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index ed7204336f7f..88ccf443377c 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -104,8 +104,8 @@ def get(self, key: KV) -> Optional[defer.Deferred]: return None def _set( - self, context: ResponseCacheContext[KV], deferred: defer.Deferred - ) -> defer.Deferred: + self, context: ResponseCacheContext[KV], deferred: "defer.Deferred[RV]" + ) -> "defer.Deferred[RV]": """Set the entry for the given key to the given deferred. *deferred* should run its callbacks in the sentinel logcontext (ie, @@ -126,7 +126,7 @@ def _set( key = context.cache_key self.pending_result_cache[key] = result - def on_complete(r): + def on_complete(r: RV) -> RV: # if this cache has a non-zero timeout, and the callback has not cleared # the should_cache bit, we leave it in the cache for now and schedule # its removal later. diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 27b1da235ef3..330709b8b778 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -40,10 +40,10 @@ def __init__( self, name: str, current_stream_pos: int, - max_size=10000, + max_size: int = 10000, prefilled_cache: Optional[Mapping[EntityType, int]] = None, - ): - self._original_max_size = max_size + ) -> None: + self._original_max_size: int = max_size self._max_size = math.floor(max_size) self._entity_to_key: Dict[EntityType, int] = {} diff --git a/synapse/util/caches/ttlcache.py b/synapse/util/caches/ttlcache.py index 46afe3f934ab..0b9ac26b6949 100644 --- a/synapse/util/caches/ttlcache.py +++ b/synapse/util/caches/ttlcache.py @@ -159,12 +159,12 @@ def expire(self) -> None: del self._expiry_list[0] -@attr.s(frozen=True, slots=True) -class _CacheEntry: +@attr.s(frozen=True, slots=True, auto_attribs=True) +class _CacheEntry: # Should be Generic[KT, VT]. See python-attrs/attrs#313 """TTLCache entry""" # expiry_time is the first attribute, so that entries are sorted by expiry. - expiry_time = attr.ib(type=float) - ttl = attr.ib(type=float) - key = attr.ib() - value = attr.ib() + expiry_time: float + ttl: float + key: Any # should be KT + value: Any # should be VT diff --git a/synapse/util/daemonize.py b/synapse/util/daemonize.py index f1a351cfd4a6..de04f34e4e5c 100644 --- a/synapse/util/daemonize.py +++ b/synapse/util/daemonize.py @@ -19,6 +19,8 @@ import os import signal import sys +from types import FrameType, TracebackType +from typing import NoReturn, Type def daemonize_process(pid_file: str, logger: logging.Logger, chdir: str = "/") -> None: @@ -97,7 +99,9 @@ def daemonize_process(pid_file: str, logger: logging.Logger, chdir: str = "/") - # (we don't normally expect reactor.run to raise any exceptions, but this will # also catch any other uncaught exceptions before we get that far.) - def excepthook(type_, value, traceback): + def excepthook( + type_: Type[BaseException], value: BaseException, traceback: TracebackType + ) -> None: logger.critical("Unhanded exception", exc_info=(type_, value, traceback)) sys.excepthook = excepthook @@ -119,7 +123,7 @@ def excepthook(type_, value, traceback): sys.exit(1) # write a log line on SIGTERM. - def sigterm(signum, frame): + def sigterm(signum: signal.Signals, frame: FrameType) -> NoReturn: logger.warning("Caught signal %s. Stopping daemon." % signum) sys.exit(0) diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 1b82dca81b08..1e784b3f1f8d 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -14,9 +14,11 @@ import logging from functools import wraps -from typing import Any, Callable, Optional, TypeVar, cast +from types import TracebackType +from typing import Any, Callable, Optional, Type, TypeVar, cast from prometheus_client import Counter +from typing_extensions import Protocol from synapse.logging.context import ( ContextResourceUsage, @@ -24,6 +26,7 @@ current_context, ) from synapse.metrics import InFlightGauge +from synapse.util import Clock logger = logging.getLogger(__name__) @@ -64,6 +67,10 @@ T = TypeVar("T", bound=Callable[..., Any]) +class HasClock(Protocol): + clock: Clock + + def measure_func(name: Optional[str] = None) -> Callable[[T], T]: """ Used to decorate an async function with a `Measure` context manager. @@ -86,7 +93,7 @@ def wrapper(func: T) -> T: block_name = func.__name__ if name is None else name @wraps(func) - async def measured_func(self, *args, **kwargs): + async def measured_func(self: HasClock, *args: Any, **kwargs: Any) -> Any: with Measure(self.clock, block_name): r = await func(self, *args, **kwargs) return r @@ -104,10 +111,10 @@ class Measure: "start", ] - def __init__(self, clock, name: str): + def __init__(self, clock: Clock, name: str) -> None: """ Args: - clock: A n object with a "time()" method, which returns the current + clock: An object with a "time()" method, which returns the current time in seconds. name: The name of the metric to report. """ @@ -124,7 +131,7 @@ def __init__(self, clock, name: str): assert isinstance(curr_context, LoggingContext) parent_context = curr_context self._logging_context = LoggingContext(str(curr_context), parent_context) - self.start: Optional[int] = None + self.start: Optional[float] = None def __enter__(self) -> "Measure": if self.start is not None: @@ -138,7 +145,12 @@ def __enter__(self) -> "Measure": return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: if self.start is None: raise RuntimeError("Measure() block exited without being entered") @@ -168,8 +180,9 @@ def get_resource_usage(self) -> ContextResourceUsage: """ return self._logging_context.get_resource_usage() - def _update_in_flight(self, metrics): + def _update_in_flight(self, metrics) -> None: """Gets called when processing in flight metrics""" + assert self.start is not None duration = self.clock.time() - self.start metrics.real_time_max = max(metrics.real_time_max, duration) diff --git a/synapse/util/patch_inline_callbacks.py b/synapse/util/patch_inline_callbacks.py index 9dd010af3b0e..1f18654d47e7 100644 --- a/synapse/util/patch_inline_callbacks.py +++ b/synapse/util/patch_inline_callbacks.py @@ -14,7 +14,7 @@ import functools import sys -from typing import Any, Callable, List +from typing import Any, Callable, Generator, List, TypeVar from twisted.internet import defer from twisted.internet.defer import Deferred @@ -24,6 +24,9 @@ _already_patched = False +T = TypeVar("T") + + def do_patch() -> None: """ Patch defer.inlineCallbacks so that it checks the state of the logcontext on exit @@ -37,15 +40,19 @@ def do_patch() -> None: if _already_patched: return - def new_inline_callbacks(f): + def new_inline_callbacks( + f: Callable[..., Generator["Deferred[object]", object, T]] + ) -> Callable[..., "Deferred[T]"]: @functools.wraps(f) - def wrapped(*args, **kwargs): + def wrapped(*args: Any, **kwargs: Any) -> "Deferred[T]": start_context = current_context() changes: List[str] = [] - orig = orig_inline_callbacks(_check_yield_points(f, changes)) + orig: Callable[..., "Deferred[T]"] = orig_inline_callbacks( + _check_yield_points(f, changes) + ) try: - res = orig(*args, **kwargs) + res: "Deferred[T]" = orig(*args, **kwargs) except Exception: if current_context() != start_context: for err in changes: @@ -84,7 +91,7 @@ def wrapped(*args, **kwargs): print(err, file=sys.stderr) raise Exception(err) - def check_ctx(r): + def check_ctx(r: T) -> T: if current_context() != start_context: for err in changes: print(err, file=sys.stderr) @@ -107,7 +114,10 @@ def check_ctx(r): _already_patched = True -def _check_yield_points(f: Callable, changes: List[str]) -> Callable: +def _check_yield_points( + f: Callable[..., Generator["Deferred[object]", object, T]], + changes: List[str], +) -> Callable: """Wraps a generator that is about to be passed to defer.inlineCallbacks checking that after every yield the log contexts are correct. @@ -127,7 +137,9 @@ def _check_yield_points(f: Callable, changes: List[str]) -> Callable: from synapse.logging.context import current_context @functools.wraps(f) - def check_yield_points_inner(*args, **kwargs): + def check_yield_points_inner( + *args: Any, **kwargs: Any + ) -> Generator["Deferred[object]", object, T]: gen = f(*args, **kwargs) last_yield_line_no = gen.gi_frame.f_lineno diff --git a/synapse/util/versionstring.py b/synapse/util/versionstring.py index 1c20b24bbe68..899ee0adc803 100644 --- a/synapse/util/versionstring.py +++ b/synapse/util/versionstring.py @@ -15,14 +15,18 @@ import logging import os import subprocess +from types import ModuleType +from typing import Dict logger = logging.getLogger(__name__) +version_cache: Dict[ModuleType, str] = {} -def get_version_string(module) -> str: + +def get_version_string(module: ModuleType) -> str: """Given a module calculate a git-aware version string for it. - If called on a module not in a git checkout will return `__verison__`. + If called on a module not in a git checkout will return `__version__`. Args: module (module) @@ -31,11 +35,13 @@ def get_version_string(module) -> str: str """ - cached_version = getattr(module, "_synapse_version_string_cache", None) - if cached_version: + cached_version = version_cache.get(module) + if cached_version is not None: return cached_version - version_string = module.__version__ + # We want this to fail loudly with an AttributeError. Type-ignore this so + # mypy only considers the happy path. + version_string = module.__version__ # type: ignore[attr-defined] try: null = open(os.devnull, "w") @@ -97,10 +103,15 @@ def get_version_string(module) -> str: s for s in (git_branch, git_tag, git_commit, git_dirty) if s ) - version_string = "%s (%s)" % (module.__version__, git_version) + version_string = "%s (%s)" % ( + # If the __version__ attribute doesn't exist, we'll have failed + # loudly above. + module.__version__, # type: ignore[attr-defined] + git_version, + ) except Exception as e: logger.info("Failed to check for git repository: %s", e) - module._synapse_version_string_cache = version_string + version_cache[module] = version_string return version_string From c80878d22a013ed68d3929025bbd40074e66af01 Mon Sep 17 00:00:00 2001 From: Nick Barrett Date: Wed, 6 Oct 2021 11:26:18 +0100 Subject: [PATCH 060/111] Add `--run-background-updates` option to `update_database` script. (#10954) Signed-off-by: Nick Barrett --- .ci/scripts/test_synapse_port_db.sh | 4 +- changelog.d/10954.feature | 1 + debian/changelog | 7 +++ debian/matrix-synapse-py3.links | 1 + scripts-dev/lint.sh | 2 +- scripts-dev/make_full_schema.sh | 2 +- .../update_synapse_database | 48 ++++++++++++------- tox.ini | 2 +- 8 files changed, 46 insertions(+), 21 deletions(-) create mode 100644 changelog.d/10954.feature rename scripts-dev/update_database => scripts/update_synapse_database (86%) diff --git a/.ci/scripts/test_synapse_port_db.sh b/.ci/scripts/test_synapse_port_db.sh index 2b4e5ec1707d..50115b3079a8 100755 --- a/.ci/scripts/test_synapse_port_db.sh +++ b/.ci/scripts/test_synapse_port_db.sh @@ -25,7 +25,7 @@ python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml echo "--- Prepare test database" # Make sure the SQLite3 database is using the latest schema and has no pending background update. -scripts-dev/update_database --database-config .ci/sqlite-config.yaml +scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates # Create the PostgreSQL database. .ci/scripts/postgres_exec.py "CREATE DATABASE synapse" @@ -46,7 +46,7 @@ echo "--- Prepare empty SQLite database" # we do this by deleting the sqlite db, and then doing the same again. rm .ci/test_db.db -scripts-dev/update_database --database-config .ci/sqlite-config.yaml +scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates # re-create the PostgreSQL database. .ci/scripts/postgres_exec.py \ diff --git a/changelog.d/10954.feature b/changelog.d/10954.feature new file mode 100644 index 000000000000..94dfa7175c31 --- /dev/null +++ b/changelog.d/10954.feature @@ -0,0 +1 @@ +Include an `update_synapse_database` script in the distribution. Contributed by @Fizzadar at Beeper. diff --git a/debian/changelog b/debian/changelog index 9e878fbc2da3..8e80c78ee7a0 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,10 @@ +matrix-synapse-py3 (1.44.0~rc2+nmu1) UNRELEASED; urgency=medium + + [ Nick @ Beeper ] + * Include an `update_synapse_database` script in the distribution. + + -- root Mon, 04 Oct 2021 13:29:26 +0000 + matrix-synapse-py3 (1.44.0) stable; urgency=medium * New synapse release 1.44.0. diff --git a/debian/matrix-synapse-py3.links b/debian/matrix-synapse-py3.links index 53e29654187a..7eeba180d903 100644 --- a/debian/matrix-synapse-py3.links +++ b/debian/matrix-synapse-py3.links @@ -3,3 +3,4 @@ opt/venvs/matrix-synapse/bin/register_new_matrix_user usr/bin/register_new_matri opt/venvs/matrix-synapse/bin/synapse_port_db usr/bin/synapse_port_db opt/venvs/matrix-synapse/bin/synapse_review_recent_signups usr/bin/synapse_review_recent_signups opt/venvs/matrix-synapse/bin/synctl usr/bin/synctl +opt/venvs/matrix-synapse/bin/update_synapse_database usr/bin/update_synapse_database diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh index 809eff166ab2..b6554a73c115 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh @@ -90,10 +90,10 @@ else "scripts/hash_password" "scripts/register_new_matrix_user" "scripts/synapse_port_db" + "scripts/update_synapse_database" "scripts-dev" "scripts-dev/build_debian_packages" "scripts-dev/sign_json" - "scripts-dev/update_database" "contrib" "synctl" "setup.py" "synmark" "stubs" ".ci" ) fi diff --git a/scripts-dev/make_full_schema.sh b/scripts-dev/make_full_schema.sh index 39bf30d258bd..c3c90f4ec637 100755 --- a/scripts-dev/make_full_schema.sh +++ b/scripts-dev/make_full_schema.sh @@ -147,7 +147,7 @@ python -m synapse.app.homeserver --generate-keys -c "$SQLITE_CONFIG" # Make sure the SQLite3 database is using the latest schema and has no pending background update. echo "Running db background jobs..." -scripts-dev/update_database --database-config "$SQLITE_CONFIG" +scripts/update_synapse_database --database-config --run-background-updates "$SQLITE_CONFIG" # Create the PostgreSQL database. echo "Creating postgres database..." diff --git a/scripts-dev/update_database b/scripts/update_synapse_database similarity index 86% rename from scripts-dev/update_database rename to scripts/update_synapse_database index 87f709b6ed43..26b29b0b4593 100755 --- a/scripts-dev/update_database +++ b/scripts/update_synapse_database @@ -42,10 +42,29 @@ class MockHomeserver(HomeServer): self.version_string = "Synapse/" + get_version_string(synapse) -if __name__ == "__main__": +def run_background_updates(hs): + store = hs.get_datastore() + + async def run_background_updates(): + await store.db_pool.updates.run_background_updates(sleep=False) + # Stop the reactor to exit the script once every background update is run. + reactor.stop() + + def run(): + # Apply all background updates on the database. + defer.ensureDeferred( + run_as_background_process("background_updates", run_background_updates) + ) + + reactor.callWhenRunning(run) + + reactor.run() + + +def main(): parser = argparse.ArgumentParser( description=( - "Updates a synapse database to the latest schema and runs background updates" + "Updates a synapse database to the latest schema and optionally runs background updates" " on it." ) ) @@ -54,7 +73,13 @@ if __name__ == "__main__": "--database-config", type=argparse.FileType("r"), required=True, - help="A database config file for either a SQLite3 database or a PostgreSQL one.", + help="Synapse configuration file, giving the details of the database to be updated", + ) + parser.add_argument( + "--run-background-updates", + action="store_true", + required=False, + help="run background updates after upgrading the database schema", ) args = parser.parse_args() @@ -82,19 +107,10 @@ if __name__ == "__main__": # Setup instantiates the store within the homeserver object and updates the # DB. hs.setup() - store = hs.get_datastore() - async def run_background_updates(): - await store.db_pool.updates.run_background_updates(sleep=False) - # Stop the reactor to exit the script once every background update is run. - reactor.stop() + if args.run_background_updates: + run_background_updates(hs) - def run(): - # Apply all background updates on the database. - defer.ensureDeferred( - run_as_background_process("background_updates", run_background_updates) - ) - reactor.callWhenRunning(run) - - reactor.run() +if __name__ == "__main__": + main() diff --git a/tox.ini b/tox.ini index 5a62ec76c23f..cfe6a0694269 100644 --- a/tox.ini +++ b/tox.ini @@ -41,10 +41,10 @@ lint_targets = scripts/hash_password scripts/register_new_matrix_user scripts/synapse_port_db + scripts/update_synapse_database scripts-dev scripts-dev/build_debian_packages scripts-dev/sign_json - scripts-dev/update_database stubs contrib synctl From 38b7db58859d80f06b8dc94e6a6dd19600778caa Mon Sep 17 00:00:00 2001 From: Max Kratz Date: Wed, 6 Oct 2021 13:20:41 +0200 Subject: [PATCH 061/111] Updated development doc on samling environment for testing. (#10973) --- changelog.d/10973.doc | 1 + docs/development/saml.md | 11 +++++------ 2 files changed, 6 insertions(+), 6 deletions(-) create mode 100644 changelog.d/10973.doc diff --git a/changelog.d/10973.doc b/changelog.d/10973.doc new file mode 100644 index 000000000000..d7429a9da6db --- /dev/null +++ b/changelog.d/10973.doc @@ -0,0 +1 @@ +Fix a dead URL in development documentation (SAML) and change wording from "Riot" to "Element". Contributed by @maxkratz. diff --git a/docs/development/saml.md b/docs/development/saml.md index a9bfd2dc05d6..60a431d68650 100644 --- a/docs/development/saml.md +++ b/docs/development/saml.md @@ -1,10 +1,9 @@ # How to test SAML as a developer without a server -https://capriza.github.io/samling/samling.html (https://github.com/capriza/samling) is a great -resource for being able to tinker with the SAML options within Synapse without needing to -deploy and configure a complicated software stack. +https://fujifish.github.io/samling/samling.html (https://github.com/fujifish/samling) is a great resource for being able to tinker with the +SAML options within Synapse without needing to deploy and configure a complicated software stack. -To make Synapse (and therefore Riot) use it: +To make Synapse (and therefore Element) use it: 1. Use the samling.html URL above or deploy your own and visit the IdP Metadata tab. 2. Copy the XML to your clipboard. @@ -26,9 +25,9 @@ To make Synapse (and therefore Riot) use it: the dependencies are installed and ready to go. 7. Restart Synapse. -Then in Riot: +Then in Element: -1. Visit the login page with a Riot pointing at your homeserver. +1. Visit the login page and point Element towards your homeserver using the `public_baseurl` above. 2. Click the Single Sign-On button. 3. On the samling page, enter a Name Identifier and add a SAML Attribute for `uid=your_localpart`. The response must also be signed. From 370bca32e60a854ab063f1abedb087dacae37e5a Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 6 Oct 2021 13:56:45 +0100 Subject: [PATCH 062/111] Don't drop user dir deltas when server leaves room (#10982) Fix a long-standing bug where a batch of user directory changes would be silently dropped if the server left a room early in the batch. * Pull out `wait_for_background_update` in tests Co-authored-by: Patrick Cloke Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/10982.bugfix | 1 + synapse/handlers/user_directory.py | 2 +- tests/handlers/test_stats.py | 21 ++---------- tests/handlers/test_user_directory.py | 39 +++++++++++++++++++++++ tests/storage/databases/main/test_room.py | 7 +--- tests/storage/test_cleanup_extrems.py | 7 +--- tests/storage/test_client_ips.py | 21 ++---------- tests/storage/test_event_chain.py | 14 ++------ tests/storage/test_roommember.py | 14 ++------ tests/storage/test_user_directory.py | 7 +--- tests/unittest.py | 9 ++++++ 11 files changed, 63 insertions(+), 79 deletions(-) create mode 100644 changelog.d/10982.bugfix diff --git a/changelog.d/10982.bugfix b/changelog.d/10982.bugfix new file mode 100644 index 000000000000..5c9e15eeaa42 --- /dev/null +++ b/changelog.d/10982.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where the remainder of a batch of user directory changes would be silently dropped if the server left a room early in the batch. \ No newline at end of file diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 18d8c8744e75..97f60b58068a 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -220,7 +220,7 @@ async def _handle_deltas(self, deltas: List[Dict[str, Any]]) -> None: for user_id in user_ids: await self._handle_remove_user(room_id, user_id) - return + continue else: logger.debug("Server is still in room: %r", room_id) diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py index 24b7ef6efcf8..56207f4db6f6 100644 --- a/tests/handlers/test_stats.py +++ b/tests/handlers/test_stats.py @@ -103,12 +103,7 @@ def _perform_background_initial_update(self): # Do the initial population of the stats via the background update self._add_background_updates() - while not self.get_success( - self.store.db_pool.updates.has_completed_background_updates() - ): - self.get_success( - self.store.db_pool.updates.do_next_background_update(100), by=0.1 - ) + self.wait_for_background_updates() def test_initial_room(self): """ @@ -140,12 +135,7 @@ def test_initial_room(self): # Do the initial population of the user directory via the background update self._add_background_updates() - while not self.get_success( - self.store.db_pool.updates.has_completed_background_updates() - ): - self.get_success( - self.store.db_pool.updates.do_next_background_update(100), by=0.1 - ) + self.wait_for_background_updates() r = self.get_success(self.get_all_room_state()) @@ -568,12 +558,7 @@ def test_incomplete_stats(self): ) ) - while not self.get_success( - self.store.db_pool.updates.has_completed_background_updates() - ): - self.get_success( - self.store.db_pool.updates.do_next_background_update(100), by=0.1 - ) + self.wait_for_background_updates() r1stats_complete = self._get_current_stats("room", r1) u1stats_complete = self._get_current_stats("user", u1) diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index b3c3af113b28..03fd5a3e2c52 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -363,6 +363,45 @@ def test_reactivation_makes_regular_user_searchable(self) -> None: self.assertEqual(len(s["results"]), 1) self.assertEqual(s["results"][0]["user_id"], user) + def test_process_join_after_server_leaves_room(self) -> None: + alice = self.register_user("alice", "pass") + alice_token = self.login(alice, "pass") + bob = self.register_user("bob", "pass") + bob_token = self.login(bob, "pass") + + # Alice makes two rooms. Bob joins one of them. + room1 = self.helper.create_room_as(alice, tok=alice_token) + room2 = self.helper.create_room_as(alice, tok=alice_token) + print("room1=", room1) + print("room2=", room2) + self.helper.join(room1, bob, tok=bob_token) + + # The user sharing tables should have been updated. + public1 = self.get_success(self.user_dir_helper.get_users_in_public_rooms()) + self.assertEqual(set(public1), {(alice, room1), (alice, room2), (bob, room1)}) + + # Alice leaves room1. The user sharing tables should be updated. + self.helper.leave(room1, alice, tok=alice_token) + public2 = self.get_success(self.user_dir_helper.get_users_in_public_rooms()) + self.assertEqual(set(public2), {(alice, room2), (bob, room1)}) + + # Pause the processing of new events. + dir_handler = self.hs.get_user_directory_handler() + dir_handler.update_user_directory = False + + # Bob leaves one room and joins the other. + self.helper.leave(room1, bob, tok=bob_token) + self.helper.join(room2, bob, tok=bob_token) + + # Process the leave and join in one go. + dir_handler.update_user_directory = True + dir_handler.notify_new_event() + self.wait_for_background_updates() + + # The user sharing tables should have been updated. + public3 = self.get_success(self.user_dir_helper.get_users_in_public_rooms()) + self.assertEqual(set(public3), {(alice, room2), (bob, room2)}) + def test_private_room(self) -> None: """ A user can be searched for only by people that are either in a public diff --git a/tests/storage/databases/main/test_room.py b/tests/storage/databases/main/test_room.py index ffee70715342..7496974da3a8 100644 --- a/tests/storage/databases/main/test_room.py +++ b/tests/storage/databases/main/test_room.py @@ -79,12 +79,7 @@ def test_background_populate_rooms_creator_column(self): self.store.db_pool.updates._all_done = False # Now let's actually drive the updates to completion - while not self.get_success( - self.store.db_pool.updates.has_completed_background_updates() - ): - self.get_success( - self.store.db_pool.updates.do_next_background_update(100), by=0.1 - ) + self.wait_for_background_updates() # Make sure the background update filled in the room creator room_creator_after = self.get_success( diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py index 7cc5e621ba9a..a59c28f89681 100644 --- a/tests/storage/test_cleanup_extrems.py +++ b/tests/storage/test_cleanup_extrems.py @@ -66,12 +66,7 @@ def run_delta_file(txn): # Ugh, have to reset this flag self.store.db_pool.updates._all_done = False - while not self.get_success( - self.store.db_pool.updates.has_completed_background_updates() - ): - self.get_success( - self.store.db_pool.updates.do_next_background_update(100), by=0.1 - ) + self.wait_for_background_updates() def test_soft_failed_extremities_handled_correctly(self): """Test that extremities are correctly calculated in the presence of diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index 3cc8038f1e65..dada4f98c934 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -242,12 +242,7 @@ def test_updating_monthly_active_user_when_space(self): def test_devices_last_seen_bg_update(self): # First make sure we have completed all updates. - while not self.get_success( - self.store.db_pool.updates.has_completed_background_updates() - ): - self.get_success( - self.store.db_pool.updates.do_next_background_update(100), by=0.1 - ) + self.wait_for_background_updates() user_id = "@user:id" device_id = "MY_DEVICE" @@ -311,12 +306,7 @@ def test_devices_last_seen_bg_update(self): self.store.db_pool.updates._all_done = False # Now let's actually drive the updates to completion - while not self.get_success( - self.store.db_pool.updates.has_completed_background_updates() - ): - self.get_success( - self.store.db_pool.updates.do_next_background_update(100), by=0.1 - ) + self.wait_for_background_updates() # We should now get the correct result again result = self.get_success( @@ -337,12 +327,7 @@ def test_devices_last_seen_bg_update(self): def test_old_user_ips_pruned(self): # First make sure we have completed all updates. - while not self.get_success( - self.store.db_pool.updates.has_completed_background_updates() - ): - self.get_success( - self.store.db_pool.updates.do_next_background_update(100), by=0.1 - ) + self.wait_for_background_updates() user_id = "@user:id" device_id = "MY_DEVICE" diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py index 93136f071793..b31c5eb5ecc6 100644 --- a/tests/storage/test_event_chain.py +++ b/tests/storage/test_event_chain.py @@ -578,12 +578,7 @@ def test_background_update_single_room(self): # Ugh, have to reset this flag self.store.db_pool.updates._all_done = False - while not self.get_success( - self.store.db_pool.updates.has_completed_background_updates() - ): - self.get_success( - self.store.db_pool.updates.do_next_background_update(100), by=0.1 - ) + self.wait_for_background_updates() # Test that the `has_auth_chain_index` has been set self.assertTrue(self.get_success(self.store.has_auth_chain_index(room_id))) @@ -619,12 +614,7 @@ def test_background_update_multiple_rooms(self): # Ugh, have to reset this flag self.store.db_pool.updates._all_done = False - while not self.get_success( - self.store.db_pool.updates.has_completed_background_updates() - ): - self.get_success( - self.store.db_pool.updates.do_next_background_update(100), by=0.1 - ) + self.wait_for_background_updates() # Test that the `has_auth_chain_index` has been set self.assertTrue(self.get_success(self.store.has_auth_chain_index(room_id1))) diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index c72dc40510a4..2873e22ccf8c 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -169,12 +169,7 @@ def prepare(self, reactor, clock, homeserver): def test_can_rerun_update(self): # First make sure we have completed all updates. - while not self.get_success( - self.store.db_pool.updates.has_completed_background_updates() - ): - self.get_success( - self.store.db_pool.updates.do_next_background_update(100), by=0.1 - ) + self.wait_for_background_updates() # Now let's create a room, which will insert a membership user = UserID("alice", "test") @@ -197,9 +192,4 @@ def test_can_rerun_update(self): self.store.db_pool.updates._all_done = False # Now let's actually drive the updates to completion - while not self.get_success( - self.store.db_pool.updates.has_completed_background_updates() - ): - self.get_success( - self.store.db_pool.updates.do_next_background_update(100), by=0.1 - ) + self.wait_for_background_updates() diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index fddfb8db2875..9f483ad681c6 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -212,12 +212,7 @@ def _purge_and_rebuild_user_dir(self) -> None: ) ) - while not self.get_success( - self.store.db_pool.updates.has_completed_background_updates() - ): - self.get_success( - self.store.db_pool.updates.do_next_background_update(100), by=0.1 - ) + self.wait_for_background_updates() def test_initial(self) -> None: """ diff --git a/tests/unittest.py b/tests/unittest.py index ae393ee53eee..81c1a9e9d2d2 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -317,6 +317,15 @@ def wait_on_thread(self, deferred, timeout=10): self.reactor.advance(0.01) time.sleep(0.01) + def wait_for_background_updates(self) -> None: + """Block until all background database updates have completed.""" + while not self.get_success( + self.store.db_pool.updates.has_completed_background_updates() + ): + self.get_success( + self.store.db_pool.updates.do_next_background_update(100), by=0.1 + ) + def make_homeserver(self, reactor, clock): """ Make and return a homeserver. From b0460936c8e31a2e0d160d4bba69223036ae26fe Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 6 Oct 2021 16:03:17 +0200 Subject: [PATCH 063/111] Add the synapse-core team as code owners (#10994) Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> --- .github/CODEOWNERS | 2 ++ changelog.d/10994.misc | 1 + 2 files changed, 3 insertions(+) create mode 100644 .github/CODEOWNERS create mode 100644 changelog.d/10994.misc diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 000000000000..d6cd75f1d076 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,2 @@ +# Automatically request reviews from the synapse-core team when a pull request comes in. +* @matrix-org/synapse-core \ No newline at end of file diff --git a/changelog.d/10994.misc b/changelog.d/10994.misc new file mode 100644 index 000000000000..0a8538b01e18 --- /dev/null +++ b/changelog.d/10994.misc @@ -0,0 +1 @@ +Add a `CODEOWNERS` file to automatically request reviews from the `@matrix-org/synapse-core` team on new pull requests. From 829f2a82b042d944fef3df55faec924502cdf20d Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 6 Oct 2021 16:32:16 +0200 Subject: [PATCH 064/111] Add a spamchecker callback to allow or deny room joins (#10910) Co-authored-by: Erik Johnston --- changelog.d/10910.feature | 1 + docs/modules/spam_checker_callbacks.md | 15 ++++ synapse/events/spamcheck.py | 24 ++++++ synapse/handlers/room.py | 2 + synapse/handlers/room_member.py | 31 ++++++++ tests/rest/client/test_rooms.py | 101 +++++++++++++++++++++++++ 6 files changed, 174 insertions(+) create mode 100644 changelog.d/10910.feature diff --git a/changelog.d/10910.feature b/changelog.d/10910.feature new file mode 100644 index 000000000000..aee139f8b6f1 --- /dev/null +++ b/changelog.d/10910.feature @@ -0,0 +1 @@ +Add a spam checker callback to allow or deny room joins. diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md index 7920ac5f8fc3..92376df9938f 100644 --- a/docs/modules/spam_checker_callbacks.md +++ b/docs/modules/spam_checker_callbacks.md @@ -19,6 +19,21 @@ either a `bool` to indicate whether the event must be rejected because of spam, to indicate the event must be rejected because of spam and to give a rejection reason to forward to clients. +### `user_may_join_room` + +```python +async def user_may_join_room(user: str, room: str, is_invited: bool) -> bool +``` + +Called when a user is trying to join a room. The module must return a `bool` to indicate +whether the user can join the room. The user is represented by their Matrix user ID (e.g. +`@alice:example.com`) and the room is represented by its Matrix ID (e.g. +`!room:example.com`). The module is also given a boolean to indicate whether the user +currently has a pending invite in the room. + +This callback isn't called if the join is performed by a server administrator, or in the +context of a room creation. + ### `user_may_invite` ```python diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index c389f70b8d70..ec8863e39715 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -44,6 +44,7 @@ ["synapse.events.EventBase"], Awaitable[Union[bool, str]], ] +USER_MAY_JOIN_ROOM_CALLBACK = Callable[[str, str, bool], Awaitable[bool]] USER_MAY_INVITE_CALLBACK = Callable[[str, str, str], Awaitable[bool]] USER_MAY_CREATE_ROOM_CALLBACK = Callable[[str], Awaitable[bool]] USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK = Callable[ @@ -165,6 +166,7 @@ def run(*args, **kwargs): class SpamChecker: def __init__(self): self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = [] + self._user_may_join_room_callbacks: List[USER_MAY_JOIN_ROOM_CALLBACK] = [] self._user_may_invite_callbacks: List[USER_MAY_INVITE_CALLBACK] = [] self._user_may_create_room_callbacks: List[USER_MAY_CREATE_ROOM_CALLBACK] = [] self._user_may_create_room_with_invites_callbacks: List[ @@ -187,6 +189,7 @@ def __init__(self): def register_callbacks( self, check_event_for_spam: Optional[CHECK_EVENT_FOR_SPAM_CALLBACK] = None, + user_may_join_room: Optional[USER_MAY_JOIN_ROOM_CALLBACK] = None, user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None, user_may_create_room: Optional[USER_MAY_CREATE_ROOM_CALLBACK] = None, user_may_create_room_with_invites: Optional[ @@ -206,6 +209,9 @@ def register_callbacks( if check_event_for_spam is not None: self._check_event_for_spam_callbacks.append(check_event_for_spam) + if user_may_join_room is not None: + self._user_may_join_room_callbacks.append(user_may_join_room) + if user_may_invite is not None: self._user_may_invite_callbacks.append(user_may_invite) @@ -259,6 +265,24 @@ async def check_event_for_spam( return False + async def user_may_join_room(self, user_id: str, room_id: str, is_invited: bool): + """Checks if a given users is allowed to join a room. + Not called when a user creates a room. + + Args: + userid: The ID of the user wanting to join the room + room_id: The ID of the room the user wants to join + is_invited: Whether the user is invited into the room + + Returns: + bool: Whether the user may join the room + """ + for callback in self._user_may_join_room_callbacks: + if await callback(user_id, room_id, is_invited) is False: + return False + + return True + async def user_may_invite( self, inviter_userid: str, invitee_userid: str, room_id: str ) -> bool: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 873e08258ea0..d40dbd761d80 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -860,6 +860,7 @@ async def create_room( "invite", ratelimit=False, content=content, + new_room=True, ) for invite_3pid in invite_3pid_list: @@ -962,6 +963,7 @@ async def send(etype: str, content: JsonDict, **kwargs: Any) -> int: "join", ratelimit=ratelimit, content=creator_join_profile, + new_room=True, ) # We treat the power levels override specially as this needs to be one diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index c8fb24a20c2e..0b79dbcf8d83 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -434,6 +434,7 @@ async def update_membership( third_party_signed: Optional[dict] = None, ratelimit: bool = True, content: Optional[dict] = None, + new_room: bool = False, require_consent: bool = True, outlier: bool = False, prev_event_ids: Optional[List[str]] = None, @@ -451,6 +452,8 @@ async def update_membership( third_party_signed: Information from a 3PID invite. ratelimit: Whether to rate limit the request. content: The content of the created event. + new_room: Whether the membership update is happening in the context of a room + creation. require_consent: Whether consent is required. outlier: Indicates whether the event is an `outlier`, i.e. if it's from an arbitrary point and floating in the DAG as @@ -485,6 +488,7 @@ async def update_membership( third_party_signed=third_party_signed, ratelimit=ratelimit, content=content, + new_room=new_room, require_consent=require_consent, outlier=outlier, prev_event_ids=prev_event_ids, @@ -504,6 +508,7 @@ async def update_membership_locked( third_party_signed: Optional[dict] = None, ratelimit: bool = True, content: Optional[dict] = None, + new_room: bool = False, require_consent: bool = True, outlier: bool = False, prev_event_ids: Optional[List[str]] = None, @@ -523,6 +528,8 @@ async def update_membership_locked( third_party_signed: ratelimit: content: + new_room: Whether the membership update is happening in the context of a room + creation. require_consent: outlier: Indicates whether the event is an `outlier`, i.e. if it's from an arbitrary point and floating in the DAG as @@ -726,6 +733,30 @@ async def update_membership_locked( # so don't really fit into the general auth process. raise AuthError(403, "Guest access not allowed") + # Figure out whether the user is a server admin to determine whether they + # should be able to bypass the spam checker. + if ( + self._server_notices_mxid is not None + and requester.user.to_string() == self._server_notices_mxid + ): + # allow the server notices mxid to join rooms + bypass_spam_checker = True + + else: + bypass_spam_checker = await self.auth.is_server_admin(requester.user) + + inviter = await self._get_inviter(target.to_string(), room_id) + if ( + not bypass_spam_checker + # We assume that if the spam checker allowed the user to create + # a room then they're allowed to join it. + and not new_room + and not await self.spam_checker.user_may_join_room( + target.to_string(), room_id, is_invited=inviter is not None + ) + ): + raise SynapseError(403, "Not allowed to join this room") + # Check if a remote join should be performed. remote_join, remote_room_hosts = await self._should_perform_remote_join( target.to_string(), room_id, remote_room_hosts, content, is_host_in_room diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 30bdaa9c2712..a41ec6a98fc8 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -784,6 +784,30 @@ async def user_may_create_room_with_invites( # Check that do_3pid_invite wasn't called this time. self.assertEquals(do_3pid_invite_mock.call_count, len(invited_3pids)) + def test_spam_checker_may_join_room(self): + """Tests that the user_may_join_room spam checker callback is correctly bypassed + when creating a new room. + """ + + async def user_may_join_room( + mxid: str, + room_id: str, + is_invite: bool, + ) -> bool: + return False + + join_mock = Mock(side_effect=user_may_join_room) + self.hs.get_spam_checker()._user_may_join_room_callbacks.append(join_mock) + + channel = self.make_request( + "POST", + "/createRoom", + {}, + ) + self.assertEquals(channel.code, 200, channel.json_body) + + self.assertEquals(join_mock.call_count, 0) + class RoomTopicTestCase(RoomBase): """Tests /rooms/$room_id/topic REST events.""" @@ -975,6 +999,83 @@ def test_invites_by_users_ratelimit(self): self.helper.invite(room_id, self.user_id, "@other-users:red", expect_code=429) +class RoomJoinTestCase(RoomBase): + + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor, clock, homeserver): + self.user1 = self.register_user("thomas", "hackme") + self.tok1 = self.login("thomas", "hackme") + + self.user2 = self.register_user("teresa", "hackme") + self.tok2 = self.login("teresa", "hackme") + + self.room1 = self.helper.create_room_as(room_creator=self.user1, tok=self.tok1) + self.room2 = self.helper.create_room_as(room_creator=self.user1, tok=self.tok1) + self.room3 = self.helper.create_room_as(room_creator=self.user1, tok=self.tok1) + + def test_spam_checker_may_join_room(self): + """Tests that the user_may_join_room spam checker callback is correctly called + and blocks room joins when needed. + """ + + # Register a dummy callback. Make it allow all room joins for now. + return_value = True + + async def user_may_join_room( + userid: str, + room_id: str, + is_invited: bool, + ) -> bool: + return return_value + + callback_mock = Mock(side_effect=user_may_join_room) + self.hs.get_spam_checker()._user_may_join_room_callbacks.append(callback_mock) + + # Join a first room, without being invited to it. + self.helper.join(self.room1, self.user2, tok=self.tok2) + + # Check that the callback was called with the right arguments. + expected_call_args = ( + ( + self.user2, + self.room1, + False, + ), + ) + self.assertEquals( + callback_mock.call_args, + expected_call_args, + callback_mock.call_args, + ) + + # Join a second room, this time with an invite for it. + self.helper.invite(self.room2, self.user1, self.user2, tok=self.tok1) + self.helper.join(self.room2, self.user2, tok=self.tok2) + + # Check that the callback was called with the right arguments. + expected_call_args = ( + ( + self.user2, + self.room2, + True, + ), + ) + self.assertEquals( + callback_mock.call_args, + expected_call_args, + callback_mock.call_args, + ) + + # Now make the callback deny all room joins, and check that a join actually fails. + return_value = False + self.helper.join(self.room3, self.user2, expect_code=403, tok=self.tok2) + + class RoomJoinRatelimitTestCase(RoomBase): user_id = "@sid1:red" From f4b1a9a527273ef71b2f7d970642b7af45462e0f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 6 Oct 2021 10:47:41 -0400 Subject: [PATCH 065/111] Require direct references to configuration variables. (#10985) This removes the magic allowing accessing configurable variables directly from the config object. It is now required that a specific configuration class is used (e.g. `config.foo` must be replaced with `config.server.foo`). --- changelog.d/10985.misc | 1 + scripts/synapse_port_db | 4 +- scripts/update_synapse_database | 2 +- synapse/app/_base.py | 2 +- synapse/app/admin_cmd.py | 4 +- synapse/app/homeserver.py | 2 +- synapse/config/_base.py | 64 +++---------------- synapse/config/account_validity.py | 2 +- synapse/config/cas.py | 2 +- synapse/config/emailconfig.py | 9 ++- synapse/config/key.py | 6 +- synapse/config/oidc.py | 2 +- synapse/config/registration.py | 7 +- synapse/config/repository.py | 2 +- synapse/config/saml2.py | 2 +- synapse/config/server_notices.py | 4 +- synapse/config/sso.py | 6 +- synapse/handlers/account_validity.py | 8 +-- synapse/handlers/room_member.py | 7 +- synapse/replication/tcp/client.py | 2 +- synapse/replication/tcp/handler.py | 7 +- synapse/rest/client/auth.py | 2 +- synapse/rest/client/push_rule.py | 4 +- synapse/storage/databases/main/push_rule.py | 4 +- .../storage/databases/main/registration.py | 4 +- tests/config/test_base.py | 21 +++--- tests/config/test_cache.py | 50 ++++++--------- tests/config/test_load.py | 12 ++-- tests/config/test_tls.py | 38 +++++------ tests/storage/test_appservice.py | 2 +- tests/storage/test_txn_limit.py | 2 +- 31 files changed, 124 insertions(+), 160 deletions(-) create mode 100644 changelog.d/10985.misc diff --git a/changelog.d/10985.misc b/changelog.d/10985.misc new file mode 100644 index 000000000000..586a0b3a9670 --- /dev/null +++ b/changelog.d/10985.misc @@ -0,0 +1 @@ +Use direct references to config flags. diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index fa6ac6d93aa5..a947d9e49e42 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -215,7 +215,7 @@ class MockHomeserver: def __init__(self, config): self.clock = Clock(reactor) self.config = config - self.hostname = config.server_name + self.hostname = config.server.server_name self.version_string = "Synapse/" + get_version_string(synapse) def get_clock(self): @@ -583,7 +583,7 @@ class Porter(object): return self.postgres_store = self.build_db_store( - self.hs_config.get_single_database() + self.hs_config.database.get_single_database() ) await self.run_background_updates_on_postgres() diff --git a/scripts/update_synapse_database b/scripts/update_synapse_database index 26b29b0b4593..6c088bad9366 100755 --- a/scripts/update_synapse_database +++ b/scripts/update_synapse_database @@ -36,7 +36,7 @@ class MockHomeserver(HomeServer): def __init__(self, config, **kwargs): super(MockHomeserver, self).__init__( - config.server_name, reactor=reactor, config=config, **kwargs + config.server.server_name, reactor=reactor, config=config, **kwargs ) self.version_string = "Synapse/" + get_version_string(synapse) diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 749bc1deb913..4a204a582373 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -301,7 +301,7 @@ def refresh_certificate(hs): if not hs.config.server.has_tls_listener(): return - hs.config.read_certificate_from_disk() + hs.config.tls.read_certificate_from_disk() hs.tls_server_context_factory = context_factory.ServerContextFactory(hs.config) if hs._listening_services: diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 556bcc124e38..13d20af45795 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -197,9 +197,9 @@ def start(config_options): # Explicitly disable background processes config.server.update_user_directory = False config.worker.run_background_tasks = False - config.start_pushers = False + config.worker.start_pushers = False config.pusher_shard_config.instances = [] - config.send_federation = False + config.worker.send_federation = False config.federation_shard_config.instances = [] synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 2b2d4bbf83fe..422f03cc0464 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -234,7 +234,7 @@ def _configure_named_resource(self, name, compress=False): ) if name in ["media", "federation", "client"]: - if self.config.media.enable_media_repo: + if self.config.server.enable_media_repo: media_repo = self.get_media_repository_resource() resources.update( {MEDIA_PREFIX: media_repo, LEGACY_MEDIA_PREFIX: media_repo} diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 26152b092472..7c4428a138c3 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -118,21 +118,6 @@ def __init__(self, root_config=None): "synapse", "res/templates" ) - def __getattr__(self, item: str) -> Any: - """ - Try and fetch a configuration option that does not exist on this class. - - This is so that existing configs that rely on `self.value`, where value - is actually from a different config section, continue to work. - """ - if item in ["generate_config_section", "read_config"]: - raise AttributeError(item) - - if self.root is None: - raise AttributeError(item) - else: - return self.root._get_unclassed_config(self.section, item) - @staticmethod def parse_size(value): if isinstance(value, int): @@ -289,7 +274,9 @@ def read_templates( env.filters.update( { "format_ts": _format_ts_filter, - "mxc_to_http": _create_mxc_to_http_filter(self.public_baseurl), + "mxc_to_http": _create_mxc_to_http_filter( + self.root.server.public_baseurl + ), } ) @@ -311,8 +298,6 @@ class RootConfig: config_classes = [] def __init__(self): - self._configs = OrderedDict() - for config_class in self.config_classes: if config_class.section is None: raise ValueError("%r requires a section name" % (config_class,)) @@ -321,42 +306,7 @@ def __init__(self): conf = config_class(self) except Exception as e: raise Exception("Failed making %s: %r" % (config_class.section, e)) - self._configs[config_class.section] = conf - - def __getattr__(self, item: str) -> Any: - """ - Redirect lookups on this object either to config objects, or values on - config objects, so that `config.tls.blah` works, as well as legacy uses - of things like `config.server.server_name`. It will first look up the config - section name, and then values on those config classes. - """ - if item in self._configs.keys(): - return self._configs[item] - - return self._get_unclassed_config(None, item) - - def _get_unclassed_config(self, asking_section: Optional[str], item: str): - """ - Fetch a config value from one of the instantiated config classes that - has not been fetched directly. - - Args: - asking_section: If this check is coming from a Config child, which - one? This section will not be asked if it has the value. - item: The configuration value key. - - Raises: - AttributeError if no config classes have the config key. The body - will contain what sections were checked. - """ - for key, val in self._configs.items(): - if key == asking_section: - continue - - if item in dir(val): - return getattr(val, item) - - raise AttributeError(item, "not found in %s" % (list(self._configs.keys()),)) + setattr(self, config_class.section, conf) def invoke_all(self, func_name: str, *args, **kwargs) -> MutableMapping[str, Any]: """ @@ -373,9 +323,11 @@ def invoke_all(self, func_name: str, *args, **kwargs) -> MutableMapping[str, Any """ res = OrderedDict() - for name, config in self._configs.items(): + for config_class in self.config_classes: + config = getattr(self, config_class.section) + if hasattr(config, func_name): - res[name] = getattr(config, func_name)(*args, **kwargs) + res[config_class.section] = getattr(config, func_name)(*args, **kwargs) return res diff --git a/synapse/config/account_validity.py b/synapse/config/account_validity.py index ffaffc4931a3..b56c2a24dfc6 100644 --- a/synapse/config/account_validity.py +++ b/synapse/config/account_validity.py @@ -76,7 +76,7 @@ def read_config(self, config, **kwargs): ) if self.account_validity_renew_by_email_enabled: - if not self.public_baseurl: + if not self.root.server.public_baseurl: raise ConfigError("Can't send renewal emails without 'public_baseurl'") # Load account validity templates. diff --git a/synapse/config/cas.py b/synapse/config/cas.py index 901f4123e187..9b58ecf3d839 100644 --- a/synapse/config/cas.py +++ b/synapse/config/cas.py @@ -37,7 +37,7 @@ def read_config(self, config, **kwargs): # The public baseurl is required because it is used by the redirect # template. - public_baseurl = self.public_baseurl + public_baseurl = self.root.server.public_baseurl if not public_baseurl: raise ConfigError("cas_config requires a public_baseurl to be set") diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 936abe6178cb..8ff59aa2f8db 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -19,7 +19,6 @@ import logging import os from enum import Enum -from typing import Optional import attr @@ -135,7 +134,7 @@ def read_config(self, config, **kwargs): # msisdn is currently always remote while Synapse does not support any method of # sending SMS messages ThreepidBehaviour.REMOTE - if self.account_threepid_delegate_email + if self.root.registration.account_threepid_delegate_email else ThreepidBehaviour.LOCAL ) # Prior to Synapse v1.4.0, there was another option that defined whether Synapse would @@ -144,7 +143,7 @@ def read_config(self, config, **kwargs): # identity server in the process. self.using_identity_server_from_trusted_list = False if ( - not self.account_threepid_delegate_email + not self.root.registration.account_threepid_delegate_email and config.get("trust_identity_server_for_password_resets", False) is True ): # Use the first entry in self.trusted_third_party_id_servers instead @@ -156,7 +155,7 @@ def read_config(self, config, **kwargs): # trusted_third_party_id_servers does not contain a scheme whereas # account_threepid_delegate_email is expected to. Presume https - self.account_threepid_delegate_email: Optional[str] = ( + self.root.registration.account_threepid_delegate_email = ( "https://" + first_trusted_identity_server ) self.using_identity_server_from_trusted_list = True @@ -335,7 +334,7 @@ def read_config(self, config, **kwargs): "client_base_url", email_config.get("riot_base_url", None) ) - if self.account_validity_renew_by_email_enabled: + if self.root.account_validity.account_validity_renew_by_email_enabled: expiry_template_html = email_config.get( "expiry_template_html", "notice_expiry.html" ) diff --git a/synapse/config/key.py b/synapse/config/key.py index 94a9063043a2..015dbb8a678b 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -145,11 +145,13 @@ def read_config(self, config, config_dir_path, **kwargs): # list of TrustedKeyServer objects self.key_servers = list( - _parse_key_servers(key_servers, self.federation_verify_certificates) + _parse_key_servers( + key_servers, self.root.tls.federation_verify_certificates + ) ) self.macaroon_secret_key = config.get( - "macaroon_secret_key", self.registration_shared_secret + "macaroon_secret_key", self.root.registration.registration_shared_secret ) if not self.macaroon_secret_key: diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py index 7e67fbada18f..10f579633016 100644 --- a/synapse/config/oidc.py +++ b/synapse/config/oidc.py @@ -58,7 +58,7 @@ def read_config(self, config, **kwargs): "Multiple OIDC providers have the idp_id %r." % idp_id ) - public_baseurl = self.public_baseurl + public_baseurl = self.root.server.public_baseurl if public_baseurl is None: raise ConfigError("oidc_config requires a public_baseurl to be set") self.oidc_callback_url = public_baseurl + "_synapse/client/oidc/callback" diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 7cffdacfa5ce..a3d2a38c4c17 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -45,7 +45,10 @@ def read_config(self, config, **kwargs): account_threepid_delegates = config.get("account_threepid_delegates") or {} self.account_threepid_delegate_email = account_threepid_delegates.get("email") self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn") - if self.account_threepid_delegate_msisdn and not self.public_baseurl: + if ( + self.account_threepid_delegate_msisdn + and not self.root.server.public_baseurl + ): raise ConfigError( "The configuration option `public_baseurl` is required if " "`account_threepid_delegate.msisdn` is set, such that " @@ -85,7 +88,7 @@ def read_config(self, config, **kwargs): if mxid_localpart: # Convert the localpart to a full mxid. self.auto_join_user_id = UserID( - mxid_localpart, self.server_name + mxid_localpart, self.root.server.server_name ).to_string() if self.autocreate_auto_join_rooms: diff --git a/synapse/config/repository.py b/synapse/config/repository.py index 7481f3bf5f0f..69906a98d48a 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -94,7 +94,7 @@ def read_config(self, config, **kwargs): # Only enable the media repo if either the media repo is enabled or the # current worker app is the media repo. if ( - self.enable_media_repo is False + self.root.server.enable_media_repo is False and config.get("worker_app") != "synapse.app.media_repository" ): self.can_load_media_repo = False diff --git a/synapse/config/saml2.py b/synapse/config/saml2.py index 05e983625dc8..9c51b6a25a22 100644 --- a/synapse/config/saml2.py +++ b/synapse/config/saml2.py @@ -199,7 +199,7 @@ def _default_saml_config_dict( """ import saml2 - public_baseurl = self.public_baseurl + public_baseurl = self.root.server.public_baseurl if public_baseurl is None: raise ConfigError("saml2_config requires a public_baseurl to be set") diff --git a/synapse/config/server_notices.py b/synapse/config/server_notices.py index 48bf3241b659..bde4e879d9e9 100644 --- a/synapse/config/server_notices.py +++ b/synapse/config/server_notices.py @@ -73,7 +73,9 @@ def read_config(self, config, **kwargs): return mxid_localpart = c["system_mxid_localpart"] - self.server_notices_mxid = UserID(mxid_localpart, self.server_name).to_string() + self.server_notices_mxid = UserID( + mxid_localpart, self.root.server.server_name + ).to_string() self.server_notices_mxid_display_name = c.get("system_mxid_display_name", None) self.server_notices_mxid_avatar_url = c.get("system_mxid_avatar_url", None) # todo: i18n diff --git a/synapse/config/sso.py b/synapse/config/sso.py index 524a7ff3aaf3..11a9b76aa033 100644 --- a/synapse/config/sso.py +++ b/synapse/config/sso.py @@ -103,8 +103,10 @@ def read_config(self, config, **kwargs): # the client's. # public_baseurl is an optional setting, so we only add the fallback's URL to the # list if it's provided (because we can't figure out what that URL is otherwise). - if self.public_baseurl: - login_fallback_url = self.public_baseurl + "_matrix/static/client/login" + if self.root.server.public_baseurl: + login_fallback_url = ( + self.root.server.public_baseurl + "_matrix/static/client/login" + ) self.sso_client_whitelist.append(login_fallback_url) def generate_config_section(self, **kwargs): diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index 5a5f124ddf2a..87e415df75e8 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -67,12 +67,8 @@ def __init__(self, hs: "HomeServer"): and self._account_validity_renew_by_email_enabled ): # Don't do email-specific configuration if renewal by email is disabled. - self._template_html = ( - hs.config.account_validity.account_validity_template_html - ) - self._template_text = ( - hs.config.account_validity.account_validity_template_text - ) + self._template_html = hs.config.email.account_validity_template_html + self._template_text = hs.config.email.account_validity_template_text self._renew_email_subject = ( hs.config.account_validity.account_validity_renew_email_subject ) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 0b79dbcf8d83..c05461bf2a5a 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -1499,8 +1499,11 @@ async def _remote_join( if len(remote_room_hosts) == 0: raise SynapseError(404, "No known servers") - check_complexity = self.hs.config.limit_remote_rooms.enabled - if check_complexity and self.hs.config.limit_remote_rooms.admins_can_join: + check_complexity = self.hs.config.server.limit_remote_rooms.enabled + if ( + check_complexity + and self.hs.config.server.limit_remote_rooms.admins_can_join + ): check_complexity = not await self.auth.is_server_admin(user) if check_complexity: diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 37769ace48c5..961c17762ede 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -117,7 +117,7 @@ def __init__(self, hs: "HomeServer"): self._instance_name = hs.get_instance_name() self._typing_handler = hs.get_typing_handler() - self._notify_pushers = hs.config.start_pushers + self._notify_pushers = hs.config.worker.start_pushers self._pusher_pool = hs.get_pusherpool() self._presence_handler = hs.get_presence_handler() diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index d64d1dbacd22..6aa931802776 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -171,7 +171,10 @@ def __init__(self, hs: "HomeServer"): if hs.config.worker.worker_app is not None: continue - if stream.NAME == FederationStream.NAME and hs.config.send_federation: + if ( + stream.NAME == FederationStream.NAME + and hs.config.worker.send_federation + ): # We only support federation stream if federation sending # has been disabled on the master. continue @@ -225,7 +228,7 @@ def __init__(self, hs: "HomeServer"): self._is_master = hs.config.worker.worker_app is None self._federation_sender = None - if self._is_master and not hs.config.send_federation: + if self._is_master and not hs.config.worker.send_federation: self._federation_sender = hs.get_federation_sender() self._server_notices_sender = None diff --git a/synapse/rest/client/auth.py b/synapse/rest/client/auth.py index c9ad35a3addc..9c15a04338b1 100644 --- a/synapse/rest/client/auth.py +++ b/synapse/rest/client/auth.py @@ -48,7 +48,7 @@ def __init__(self, hs: "HomeServer"): self.auth_handler = hs.get_auth_handler() self.registration_handler = hs.get_registration_handler() self.recaptcha_template = hs.config.captcha.recaptcha_template - self.terms_template = hs.config.terms_template + self.terms_template = hs.config.consent.terms_template self.registration_token_template = ( hs.config.registration.registration_token_template ) diff --git a/synapse/rest/client/push_rule.py b/synapse/rest/client/push_rule.py index ecebc46e8d0c..6f796d5e5096 100644 --- a/synapse/rest/client/push_rule.py +++ b/synapse/rest/client/push_rule.py @@ -61,7 +61,9 @@ def __init__(self, hs: "HomeServer"): self.notifier = hs.get_notifier() self._is_worker = hs.config.worker.worker_app is not None - self._users_new_default_push_rules = hs.config.users_new_default_push_rules + self._users_new_default_push_rules = ( + hs.config.server.users_new_default_push_rules + ) async def on_PUT(self, request: SynapseRequest, path: str) -> Tuple[int, JsonDict]: if self._is_worker: diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index a7fb8cd84889..b81e33964ac7 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -101,7 +101,9 @@ def __init__(self, database: DatabasePool, db_conn, hs): prefilled_cache=push_rules_prefill, ) - self._users_new_default_push_rules = hs.config.users_new_default_push_rules + self._users_new_default_push_rules = ( + hs.config.server.users_new_default_push_rules + ) @abc.abstractmethod def get_max_push_rules_stream_id(self): diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index de262fbf5aa2..7de4ad7f9b3c 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -1778,7 +1778,9 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore): def __init__(self, database: DatabasePool, db_conn: Connection, hs: "HomeServer"): super().__init__(database, db_conn, hs) - self._ignore_unknown_session_error = hs.config.request_token_inhibit_3pid_errors + self._ignore_unknown_session_error = ( + hs.config.server.request_token_inhibit_3pid_errors + ) self._access_tokens_id_gen = IdGenerator(db_conn, "access_tokens", "id") self._refresh_tokens_id_gen = IdGenerator(db_conn, "refresh_tokens", "id") diff --git a/tests/config/test_base.py b/tests/config/test_base.py index baa5313fb3cc..6a52f862f488 100644 --- a/tests/config/test_base.py +++ b/tests/config/test_base.py @@ -14,23 +14,28 @@ import os.path import tempfile +from unittest.mock import Mock from synapse.config import ConfigError +from synapse.config._base import Config from synapse.util.stringutils import random_string from tests import unittest -class BaseConfigTestCase(unittest.HomeserverTestCase): - def prepare(self, reactor, clock, hs): - self.hs = hs +class BaseConfigTestCase(unittest.TestCase): + def setUp(self): + # The root object needs a server property with a public_baseurl. + root = Mock() + root.server.public_baseurl = "http://test" + self.config = Config(root) def test_loading_missing_templates(self): # Use a temporary directory that exists on the system, but that isn't likely to # contain template files with tempfile.TemporaryDirectory() as tmp_dir: # Attempt to load an HTML template from our custom template directory - template = self.hs.config.read_templates(["sso_error.html"], (tmp_dir,))[0] + template = self.config.read_templates(["sso_error.html"], (tmp_dir,))[0] # If no errors, we should've gotten the default template instead @@ -60,7 +65,7 @@ def test_loading_custom_templates(self): # Attempt to load the template from our custom template directory template = ( - self.hs.config.read_templates([template_filename], (tmp_dir,)) + self.config.read_templates([template_filename], (tmp_dir,)) )[0] # Render the template @@ -97,7 +102,7 @@ def test_multiple_custom_template_directories(self): # Retrieve the template. template = ( - self.hs.config.read_templates( + self.config.read_templates( [template_filename], (td.name for td in tempdirs), ) @@ -118,7 +123,7 @@ def test_multiple_custom_template_directories(self): # Retrieve the template. template = ( - self.hs.config.read_templates( + self.config.read_templates( [other_template_name], (td.name for td in tempdirs), ) @@ -134,6 +139,6 @@ def test_multiple_custom_template_directories(self): def test_loading_template_from_nonexistent_custom_directory(self): with self.assertRaises(ConfigError): - self.hs.config.read_templates( + self.config.read_templates( ["some_filename.html"], ("a_nonexistent_directory",) ) diff --git a/tests/config/test_cache.py b/tests/config/test_cache.py index 857d9cd0969b..f518abdb7a0a 100644 --- a/tests/config/test_cache.py +++ b/tests/config/test_cache.py @@ -12,39 +12,32 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.config._base import Config, RootConfig from synapse.config.cache import CacheConfig, add_resizable_cache from synapse.util.caches.lrucache import LruCache from tests.unittest import TestCase -class FakeServer(Config): - section = "server" - - -class TestConfig(RootConfig): - config_classes = [FakeServer, CacheConfig] - - class CacheConfigTests(TestCase): def setUp(self): # Reset caches before each test - TestConfig().caches.reset() + self.config = CacheConfig() + + def tearDown(self): + self.config.reset() def test_individual_caches_from_environ(self): """ Individual cache factors will be loaded from the environment. """ config = {} - t = TestConfig() - t.caches._environ = { + self.config._environ = { "SYNAPSE_CACHE_FACTOR_SOMETHING_OR_OTHER": "2", "SYNAPSE_NOT_CACHE": "BLAH", } - t.read_config(config, config_dir_path="", data_dir_path="") + self.config.read_config(config, config_dir_path="", data_dir_path="") - self.assertEqual(dict(t.caches.cache_factors), {"something_or_other": 2.0}) + self.assertEqual(dict(self.config.cache_factors), {"something_or_other": 2.0}) def test_config_overrides_environ(self): """ @@ -52,15 +45,14 @@ def test_config_overrides_environ(self): over those in the config. """ config = {"caches": {"per_cache_factors": {"foo": 2, "bar": 3}}} - t = TestConfig() - t.caches._environ = { + self.config._environ = { "SYNAPSE_CACHE_FACTOR_SOMETHING_OR_OTHER": "2", "SYNAPSE_CACHE_FACTOR_FOO": 1, } - t.read_config(config, config_dir_path="", data_dir_path="") + self.config.read_config(config, config_dir_path="", data_dir_path="") self.assertEqual( - dict(t.caches.cache_factors), + dict(self.config.cache_factors), {"foo": 1.0, "bar": 3.0, "something_or_other": 2.0}, ) @@ -76,8 +68,7 @@ def test_individual_instantiated_before_config_load(self): self.assertEqual(cache.max_size, 50) config = {"caches": {"per_cache_factors": {"foo": 3}}} - t = TestConfig() - t.read_config(config, config_dir_path="", data_dir_path="") + self.config.read_config(config) self.assertEqual(cache.max_size, 300) @@ -88,8 +79,7 @@ def test_individual_instantiated_after_config_load(self): there is one. """ config = {"caches": {"per_cache_factors": {"foo": 2}}} - t = TestConfig() - t.read_config(config, config_dir_path="", data_dir_path="") + self.config.read_config(config, config_dir_path="", data_dir_path="") cache = LruCache(100) add_resizable_cache("foo", cache_resize_callback=cache.set_cache_factor) @@ -106,8 +96,7 @@ def test_global_instantiated_before_config_load(self): self.assertEqual(cache.max_size, 50) config = {"caches": {"global_factor": 4}} - t = TestConfig() - t.read_config(config, config_dir_path="", data_dir_path="") + self.config.read_config(config, config_dir_path="", data_dir_path="") self.assertEqual(cache.max_size, 400) @@ -118,8 +107,7 @@ def test_global_instantiated_after_config_load(self): is no per-cache factor. """ config = {"caches": {"global_factor": 1.5}} - t = TestConfig() - t.read_config(config, config_dir_path="", data_dir_path="") + self.config.read_config(config, config_dir_path="", data_dir_path="") cache = LruCache(100) add_resizable_cache("foo", cache_resize_callback=cache.set_cache_factor) @@ -133,12 +121,11 @@ def test_cache_with_asterisk_in_name(self): "per_cache_factors": {"*cache_a*": 5, "cache_b": 6, "cache_c": 2} } } - t = TestConfig() - t.caches._environ = { + self.config._environ = { "SYNAPSE_CACHE_FACTOR_CACHE_A": "2", "SYNAPSE_CACHE_FACTOR_CACHE_B": 3, } - t.read_config(config, config_dir_path="", data_dir_path="") + self.config.read_config(config, config_dir_path="", data_dir_path="") cache_a = LruCache(100) add_resizable_cache("*cache_a*", cache_resize_callback=cache_a.set_cache_factor) @@ -158,11 +145,10 @@ def test_apply_cache_factor_from_config(self): """ config = {"caches": {"event_cache_size": "10k"}} - t = TestConfig() - t.read_config(config, config_dir_path="", data_dir_path="") + self.config.read_config(config, config_dir_path="", data_dir_path="") cache = LruCache( - max_size=t.caches.event_cache_size, + max_size=self.config.event_cache_size, apply_cache_factor_from_config=False, ) add_resizable_cache("event_cache", cache_resize_callback=cache.set_cache_factor) diff --git a/tests/config/test_load.py b/tests/config/test_load.py index 8e49ca26d90d..59635de205bb 100644 --- a/tests/config/test_load.py +++ b/tests/config/test_load.py @@ -49,7 +49,7 @@ def test_generates_and_loads_macaroon_secret_key(self): config = HomeServerConfig.load_config("", ["-c", self.file]) self.assertTrue( - hasattr(config, "macaroon_secret_key"), + hasattr(config.key, "macaroon_secret_key"), "Want config to have attr macaroon_secret_key", ) if len(config.key.macaroon_secret_key) < 5: @@ -60,7 +60,7 @@ def test_generates_and_loads_macaroon_secret_key(self): config = HomeServerConfig.load_or_generate_config("", ["-c", self.file]) self.assertTrue( - hasattr(config, "macaroon_secret_key"), + hasattr(config.key, "macaroon_secret_key"), "Want config to have attr macaroon_secret_key", ) if len(config.key.macaroon_secret_key) < 5: @@ -74,8 +74,12 @@ def test_load_succeeds_if_macaroon_secret_key_missing(self): config1 = HomeServerConfig.load_config("", ["-c", self.file]) config2 = HomeServerConfig.load_config("", ["-c", self.file]) config3 = HomeServerConfig.load_or_generate_config("", ["-c", self.file]) - self.assertEqual(config1.macaroon_secret_key, config2.macaroon_secret_key) - self.assertEqual(config1.macaroon_secret_key, config3.macaroon_secret_key) + self.assertEqual( + config1.key.macaroon_secret_key, config2.key.macaroon_secret_key + ) + self.assertEqual( + config1.key.macaroon_secret_key, config3.key.macaroon_secret_key + ) def test_disable_registration(self): self.generate_config() diff --git a/tests/config/test_tls.py b/tests/config/test_tls.py index b6bc1876b511..9ba5781573fc 100644 --- a/tests/config/test_tls.py +++ b/tests/config/test_tls.py @@ -42,9 +42,9 @@ def test_tls_client_minimum_default(self): """ config = {} t = TestConfig() - t.read_config(config, config_dir_path="", data_dir_path="") + t.tls.read_config(config, config_dir_path="", data_dir_path="") - self.assertEqual(t.federation_client_minimum_tls_version, "1") + self.assertEqual(t.tls.federation_client_minimum_tls_version, "1") def test_tls_client_minimum_set(self): """ @@ -52,29 +52,29 @@ def test_tls_client_minimum_set(self): """ config = {"federation_client_minimum_tls_version": 1} t = TestConfig() - t.read_config(config, config_dir_path="", data_dir_path="") - self.assertEqual(t.federation_client_minimum_tls_version, "1") + t.tls.read_config(config, config_dir_path="", data_dir_path="") + self.assertEqual(t.tls.federation_client_minimum_tls_version, "1") config = {"federation_client_minimum_tls_version": 1.1} t = TestConfig() - t.read_config(config, config_dir_path="", data_dir_path="") - self.assertEqual(t.federation_client_minimum_tls_version, "1.1") + t.tls.read_config(config, config_dir_path="", data_dir_path="") + self.assertEqual(t.tls.federation_client_minimum_tls_version, "1.1") config = {"federation_client_minimum_tls_version": 1.2} t = TestConfig() - t.read_config(config, config_dir_path="", data_dir_path="") - self.assertEqual(t.federation_client_minimum_tls_version, "1.2") + t.tls.read_config(config, config_dir_path="", data_dir_path="") + self.assertEqual(t.tls.federation_client_minimum_tls_version, "1.2") # Also test a string version config = {"federation_client_minimum_tls_version": "1"} t = TestConfig() - t.read_config(config, config_dir_path="", data_dir_path="") - self.assertEqual(t.federation_client_minimum_tls_version, "1") + t.tls.read_config(config, config_dir_path="", data_dir_path="") + self.assertEqual(t.tls.federation_client_minimum_tls_version, "1") config = {"federation_client_minimum_tls_version": "1.2"} t = TestConfig() - t.read_config(config, config_dir_path="", data_dir_path="") - self.assertEqual(t.federation_client_minimum_tls_version, "1.2") + t.tls.read_config(config, config_dir_path="", data_dir_path="") + self.assertEqual(t.tls.federation_client_minimum_tls_version, "1.2") def test_tls_client_minimum_1_point_3_missing(self): """ @@ -91,7 +91,7 @@ def test_tls_client_minimum_1_point_3_missing(self): config = {"federation_client_minimum_tls_version": 1.3} t = TestConfig() with self.assertRaises(ConfigError) as e: - t.read_config(config, config_dir_path="", data_dir_path="") + t.tls.read_config(config, config_dir_path="", data_dir_path="") self.assertEqual( e.exception.args[0], ( @@ -112,8 +112,8 @@ def test_tls_client_minimum_1_point_3_exists(self): config = {"federation_client_minimum_tls_version": 1.3} t = TestConfig() - t.read_config(config, config_dir_path="", data_dir_path="") - self.assertEqual(t.federation_client_minimum_tls_version, "1.3") + t.tls.read_config(config, config_dir_path="", data_dir_path="") + self.assertEqual(t.tls.federation_client_minimum_tls_version, "1.3") def test_tls_client_minimum_set_passed_through_1_2(self): """ @@ -121,7 +121,7 @@ def test_tls_client_minimum_set_passed_through_1_2(self): """ config = {"federation_client_minimum_tls_version": 1.2} t = TestConfig() - t.read_config(config, config_dir_path="", data_dir_path="") + t.tls.read_config(config, config_dir_path="", data_dir_path="") cf = FederationPolicyForHTTPS(t) options = _get_ssl_context_options(cf._verify_ssl_context) @@ -137,7 +137,7 @@ def test_tls_client_minimum_set_passed_through_1_0(self): """ config = {"federation_client_minimum_tls_version": 1} t = TestConfig() - t.read_config(config, config_dir_path="", data_dir_path="") + t.tls.read_config(config, config_dir_path="", data_dir_path="") cf = FederationPolicyForHTTPS(t) options = _get_ssl_context_options(cf._verify_ssl_context) @@ -159,7 +159,7 @@ def test_whitelist_idna_failure(self): } t = TestConfig() e = self.assertRaises( - ConfigError, t.read_config, config, config_dir_path="", data_dir_path="" + ConfigError, t.tls.read_config, config, config_dir_path="", data_dir_path="" ) self.assertIn("IDNA domain names", str(e)) @@ -174,7 +174,7 @@ def test_whitelist_idna_result(self): ] } t = TestConfig() - t.read_config(config, config_dir_path="", data_dir_path="") + t.tls.read_config(config, config_dir_path="", data_dir_path="") cf = FederationPolicyForHTTPS(t) diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index cf9748f21897..f26d5acf9c29 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -126,7 +126,7 @@ def setUp(self): self.db_pool = database._db_pool self.engine = database.engine - db_config = hs.config.get_single_database() + db_config = hs.config.database.get_single_database() self.store = TestTransactionStore( database, make_conn(db_config, self.engine, "test"), hs ) diff --git a/tests/storage/test_txn_limit.py b/tests/storage/test_txn_limit.py index 6ff3ebb13780..ace82cbf420f 100644 --- a/tests/storage/test_txn_limit.py +++ b/tests/storage/test_txn_limit.py @@ -22,7 +22,7 @@ def make_homeserver(self, reactor, clock): return self.setup_test_homeserver(db_txn_limit=1000) def test_config(self): - db_config = self.hs.config.get_single_database() + db_config = self.hs.config.database.get_single_database() self.assertEqual(db_config.config["txn_limit"], 1000) def test_select(self): From 4e5162106436f3fddd12561d316d19fd23148800 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 6 Oct 2021 17:18:13 +0200 Subject: [PATCH 066/111] Add a spamchecker method to allow or deny 3pid invites (#10894) This is in the context of creating new module callbacks that modules in https://github.com/matrix-org/synapse-dinsic can use, in an effort to reconcile the spam checker API in synapse-dinsic with the one in mainline. Note that a module callback already exists for 3pid invites (https://matrix-org.github.io/synapse/develop/modules/third_party_rules_callbacks.html#check_threepid_can_be_invited) but it doesn't check whether the sender of the invite is allowed to send it. --- changelog.d/10894.feature | 1 + docs/modules/spam_checker_callbacks.md | 35 +++++++++++++ synapse/events/spamcheck.py | 35 +++++++++++++ synapse/handlers/room_member.py | 12 +++++ tests/rest/client/test_rooms.py | 70 ++++++++++++++++++++++++++ 5 files changed, 153 insertions(+) create mode 100644 changelog.d/10894.feature diff --git a/changelog.d/10894.feature b/changelog.d/10894.feature new file mode 100644 index 000000000000..a4f968bed100 --- /dev/null +++ b/changelog.d/10894.feature @@ -0,0 +1 @@ +Add a `user_may_send_3pid_invite` spam checker callback for modules to allow or deny 3PID invites. diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md index 92376df9938f..787e99074af2 100644 --- a/docs/modules/spam_checker_callbacks.md +++ b/docs/modules/spam_checker_callbacks.md @@ -44,6 +44,41 @@ Called when processing an invitation. The module must return a `bool` indicating the inviter can invite the invitee to the given room. Both inviter and invitee are represented by their Matrix user ID (e.g. `@alice:example.com`). +### `user_may_send_3pid_invite` + +```python +async def user_may_send_3pid_invite( + inviter: str, + medium: str, + address: str, + room_id: str, +) -> bool +``` + +Called when processing an invitation using a third-party identifier (also called a 3PID, +e.g. an email address or a phone number). The module must return a `bool` indicating +whether the inviter can invite the invitee to the given room. + +The inviter is represented by their Matrix user ID (e.g. `@alice:example.com`), and the +invitee is represented by its medium (e.g. "email") and its address +(e.g. `alice@example.com`). See [the Matrix specification](https://matrix.org/docs/spec/appendices#pid-types) +for more information regarding third-party identifiers. + +For example, a call to this callback to send an invitation to the email address +`alice@example.com` would look like this: + +```python +await user_may_send_3pid_invite( + "@bob:example.com", # The inviter's user ID + "email", # The medium of the 3PID to invite + "alice@example.com", # The address of the 3PID to invite + "!some_room:example.com", # The ID of the room to send the invite into +) +``` + +**Note**: If the third-party identifier is already associated with a matrix user ID, +[`user_may_invite`](#user_may_invite) will be used instead. + ### `user_may_create_room` ```python diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index ec8863e39715..ae4c8ab257fd 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -46,6 +46,7 @@ ] USER_MAY_JOIN_ROOM_CALLBACK = Callable[[str, str, bool], Awaitable[bool]] USER_MAY_INVITE_CALLBACK = Callable[[str, str, str], Awaitable[bool]] +USER_MAY_SEND_3PID_INVITE_CALLBACK = Callable[[str, str, str, str], Awaitable[bool]] USER_MAY_CREATE_ROOM_CALLBACK = Callable[[str], Awaitable[bool]] USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK = Callable[ [str, List[str], List[Dict[str, str]]], Awaitable[bool] @@ -168,6 +169,9 @@ def __init__(self): self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = [] self._user_may_join_room_callbacks: List[USER_MAY_JOIN_ROOM_CALLBACK] = [] self._user_may_invite_callbacks: List[USER_MAY_INVITE_CALLBACK] = [] + self._user_may_send_3pid_invite_callbacks: List[ + USER_MAY_SEND_3PID_INVITE_CALLBACK + ] = [] self._user_may_create_room_callbacks: List[USER_MAY_CREATE_ROOM_CALLBACK] = [] self._user_may_create_room_with_invites_callbacks: List[ USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK @@ -191,6 +195,7 @@ def register_callbacks( check_event_for_spam: Optional[CHECK_EVENT_FOR_SPAM_CALLBACK] = None, user_may_join_room: Optional[USER_MAY_JOIN_ROOM_CALLBACK] = None, user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None, + user_may_send_3pid_invite: Optional[USER_MAY_SEND_3PID_INVITE_CALLBACK] = None, user_may_create_room: Optional[USER_MAY_CREATE_ROOM_CALLBACK] = None, user_may_create_room_with_invites: Optional[ USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK @@ -215,6 +220,11 @@ def register_callbacks( if user_may_invite is not None: self._user_may_invite_callbacks.append(user_may_invite) + if user_may_send_3pid_invite is not None: + self._user_may_send_3pid_invite_callbacks.append( + user_may_send_3pid_invite, + ) + if user_may_create_room is not None: self._user_may_create_room_callbacks.append(user_may_create_room) @@ -304,6 +314,31 @@ async def user_may_invite( return True + async def user_may_send_3pid_invite( + self, inviter_userid: str, medium: str, address: str, room_id: str + ) -> bool: + """Checks if a given user may invite a given threepid into the room + + If this method returns false, the threepid invite will be rejected. + + Note that if the threepid is already associated with a Matrix user ID, Synapse + will call user_may_invite with said user ID instead. + + Args: + inviter_userid: The user ID of the sender of the invitation + medium: The 3PID's medium (e.g. "email") + address: The 3PID's address (e.g. "alice@example.com") + room_id: The room ID + + Returns: + True if the user may send the invite, otherwise False + """ + for callback in self._user_may_send_3pid_invite_callbacks: + if await callback(inviter_userid, medium, address, room_id) is False: + return False + + return True + async def user_may_create_room(self, userid: str) -> bool: """Checks if a given user may create a room diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index c05461bf2a5a..eef337feeb8d 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -1299,10 +1299,22 @@ async def do_3pid_invite( if invitee: # Note that update_membership with an action of "invite" can raise # a ShadowBanError, but this was done above already. + # We don't check the invite against the spamchecker(s) here (through + # user_may_invite) because we'll do it further down the line anyway (in + # update_membership_locked). _, stream_id = await self.update_membership( requester, UserID.from_string(invitee), room_id, "invite", txn_id=txn_id ) else: + # Check if the spamchecker(s) allow this invite to go through. + if not await self.spam_checker.user_may_send_3pid_invite( + inviter_userid=requester.user.to_string(), + medium=medium, + address=address, + room_id=room_id, + ): + raise SynapseError(403, "Cannot send threepid invite") + stream_id = await self._make_and_store_3pid_invite( requester, id_server, diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index a41ec6a98fc8..376853fd6538 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -2531,3 +2531,73 @@ def test_bad_alias(self): """An alias which does not point to the room raises a SynapseError.""" self._set_canonical_alias({"alias": "@unknown:test"}, expected_code=400) self._set_canonical_alias({"alt_aliases": ["@unknown:test"]}, expected_code=400) + + +class ThreepidInviteTestCase(unittest.HomeserverTestCase): + + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor, clock, homeserver): + self.user_id = self.register_user("thomas", "hackme") + self.tok = self.login("thomas", "hackme") + + self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) + + def test_threepid_invite_spamcheck(self): + # Mock a few functions to prevent the test from failing due to failing to talk to + # a remote IS. We keep the mock for _mock_make_and_store_3pid_invite around so we + # can check its call_count later on during the test. + make_invite_mock = Mock(return_value=make_awaitable(0)) + self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock + self.hs.get_identity_handler().lookup_3pid = Mock( + return_value=make_awaitable(None), + ) + + # Add a mock to the spamchecker callbacks for user_may_send_3pid_invite. Make it + # allow everything for now. + mock = Mock(return_value=make_awaitable(True)) + self.hs.get_spam_checker()._user_may_send_3pid_invite_callbacks.append(mock) + + # Send a 3PID invite into the room and check that it succeeded. + email_to_invite = "teresa@example.com" + channel = self.make_request( + method="POST", + path="/rooms/" + self.room_id + "/invite", + content={ + "id_server": "example.com", + "id_access_token": "sometoken", + "medium": "email", + "address": email_to_invite, + }, + access_token=self.tok, + ) + self.assertEquals(channel.code, 200) + + # Check that the callback was called with the right params. + mock.assert_called_with(self.user_id, "email", email_to_invite, self.room_id) + + # Check that the call to send the invite was made. + make_invite_mock.assert_called_once() + + # Now change the return value of the callback to deny any invite and test that + # we can't send the invite. + mock.return_value = make_awaitable(False) + channel = self.make_request( + method="POST", + path="/rooms/" + self.room_id + "/invite", + content={ + "id_server": "example.com", + "id_access_token": "sometoken", + "medium": "email", + "address": email_to_invite, + }, + access_token=self.tok, + ) + self.assertEquals(channel.code, 403) + + # Also check that it stopped before calling _make_and_store_3pid_invite. + make_invite_mock.assert_called_once() From e564bdd1276d8eb8ea3eabc0442a58fb18cd8731 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 6 Oct 2021 18:09:35 +0100 Subject: [PATCH 067/111] Add content to the Synapse documentation intro page (#10990) Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/10990.doc | 1 + docs/development/contributing_guide.md | 2 +- docs/welcome_and_overview.md | 74 ++++++++++++++++++++++++++ 3 files changed, 76 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10990.doc diff --git a/changelog.d/10990.doc b/changelog.d/10990.doc new file mode 100644 index 000000000000..51290d620098 --- /dev/null +++ b/changelog.d/10990.doc @@ -0,0 +1 @@ +Add additional content to the Welcome and Overview page of the documentation. diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 713366368cbe..580a4f7f9854 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -63,7 +63,7 @@ TBD # 5. Get in touch. -Join our developer community on Matrix: #synapse-dev:matrix.org ! +Join our developer community on Matrix: [#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org)! # 6. Pick an issue. diff --git a/docs/welcome_and_overview.md b/docs/welcome_and_overview.md index 9882d9f15942..aab2d6b4f0f6 100644 --- a/docs/welcome_and_overview.md +++ b/docs/welcome_and_overview.md @@ -3,3 +3,77 @@ Welcome to the documentation repository for Synapse, a [Matrix](https://matrix.org) homeserver implementation developed by the matrix.org core team. + +## Installing and using Synapse + +This documentation covers topics for **installation**, **configuration** and +**maintainence** of your Synapse process: + +* Learn how to [install](setup/installation.md) and + [configure](usage/configuration/index.html) your own instance, perhaps with [Single + Sign-On](usage/configuration/user_authentication/index.html). + +* See how to [upgrade](upgrade.md) between Synapse versions. + +* Administer your instance using the [Admin + API](usage/administration/admin_api/index.html), installing [pluggable + modules](modules/index.html), or by accessing the [manhole](manhole.md). + +* Learn how to [read log lines](usage/administration/request_log.md), configure + [logging](usage/configuration/logging_sample_config.md) or set up [structured + logging](structured_logging.md). + +* Scale Synapse through additional [worker processes](workers.md). + +* Set up [monitoring and metrics](metrics-howto.md) to keep an eye on your + Synapse instance's performance. + +## Developing on Synapse + +Contributions are welcome! Synapse is primarily written in +[Python](https://python.org). As a developer, you may be interested in the +following documentation: + +* Read the [Contributing Guide](development/contributing_guide.md). It is meant + to walk new contributors through the process of developing and submitting a + change to the Synapse codebase (which is [hosted on + GitHub](https://github.com/matrix-org/synapse)). + +* Set up your [development + environment](development/contributing_guide.md#2-what-do-i-need), then learn + how to [lint](development/contributing_guide.md#run-the-linters) and + [test](development/contributing_guide.md#8-test-test-test) your code. + +* Look at [the issue tracker](https://github.com/matrix-org/synapse/issues) for + bugs to fix or features to add. If you're new, it may be best to start with + those labeled [good first + issue](https://github.com/matrix-org/synapse/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22). + +* Understand [how Synapse is + built](development/internal_documentation/index.html), how to [migrate + database schemas](development/database_schema.md), learn about + [federation](federate.md) and how to [set up a local + federation](federate.md#running-a-demo-federation-of-synapses) for development. + +* We like to keep our `git` history clean. [Learn](development/git.md) how to + do so! + +* And finally, contribute to this documentation! The source for which is + [located here](https://github.com/matrix-org/synapse/tree/develop/docs). + +## Donating to Synapse development + +Want to help keep Synapse going but don't know how to code? Synapse is a +[Matrix.org Foundation](https://matrix.org) project. Consider becoming a +supportor on [Liberapay](https://liberapay.com/matrixdotorg), +[Patreon](https://patreon.com/matrixdotorg) or through +[PayPal](https://paypal.me/matrixdotorg) via a one-time donation. + +If you are an organisation or enterprise and would like to sponsor development, +reach out to us over email at: support (at) matrix.org + +## Reporting a security vulnerability + +If you've found a security issue in Synapse or any other Matrix.org Foundation +project, please report it to us in accordance with our [Security Disclosure +Policy](https://www.matrix.org/security-disclosure-policy/). Thank you! From f563676c097b830346acc7a4ce3e910c6b10c4c3 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 6 Oct 2021 18:55:25 +0100 Subject: [PATCH 068/111] `disallow-untyped-defs` for `synapse.state` (#11004) * `disallow-untyped-defs` for `synapse.state` Much smaller than I was expecting! --- changelog.d/11004.misc | 1 + mypy.ini | 3 +++ synapse/state/__init__.py | 2 +- synapse/state/v1.py | 4 ++-- synapse/state/v2.py | 2 +- 5 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 changelog.d/11004.misc diff --git a/changelog.d/11004.misc b/changelog.d/11004.misc new file mode 100644 index 000000000000..821033710a3a --- /dev/null +++ b/changelog.d/11004.misc @@ -0,0 +1 @@ +Add further type hints to `synapse.state`. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index 86459bdcb62d..a052d49c7133 100644 --- a/mypy.ini +++ b/mypy.ini @@ -99,6 +99,9 @@ disallow_untyped_defs = True [mypy-synapse.rest.*] disallow_untyped_defs = True +[mypy-synapse.state.*] +disallow_untyped_defs = True + [mypy-synapse.util.batching_queue] disallow_untyped_defs = True diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index c981df3f18b3..5cf2e1257587 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -118,7 +118,7 @@ def __init__( else: self.state_id = _gen_state_id() - def __len__(self): + def __len__(self) -> int: return len(self.state) diff --git a/synapse/state/v1.py b/synapse/state/v1.py index 017e6fd92d38..ffe6207a3c2d 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -225,7 +225,7 @@ def _resolve_with_state( conflicted_state_ids: StateMap[Set[str]], auth_event_ids: StateMap[str], state_map: Dict[str, EventBase], -): +) -> MutableStateMap[str]: conflicted_state = {} for key, event_ids in conflicted_state_ids.items(): events = [state_map[ev_id] for ev_id in event_ids if ev_id in state_map] @@ -362,7 +362,7 @@ def _resolve_normal_events( def _ordered_events(events: Iterable[EventBase]) -> List[EventBase]: - def key_func(e): + def key_func(e: EventBase) -> Tuple[int, str]: # we have to use utf-8 rather than ascii here because it turns out we allow # people to send us events with non-ascii event IDs :/ return -int(e.depth), hashlib.sha1(e.event_id.encode("utf-8")).hexdigest() diff --git a/synapse/state/v2.py b/synapse/state/v2.py index 586b0e12febe..bd18eefd582b 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -481,7 +481,7 @@ async def _reverse_topological_power_sort( if idx % _AWAIT_AFTER_ITERATIONS == 0: await clock.sleep(0) - def _get_power_order(event_id): + def _get_power_order(event_id: str) -> Tuple[int, int, str]: ev = event_map[event_id] pl = event_to_pl[event_id] From 52aefd50860f9b44f48a9b465d42f26faa4eb84f Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 7 Oct 2021 12:37:10 +0200 Subject: [PATCH 069/111] Catch AttributeErrors when calling registerProducer (#10995) Looks like the wrong exception type was caught in #10932. --- changelog.d/10995.bugfix | 1 + synapse/http/server.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10995.bugfix diff --git a/changelog.d/10995.bugfix b/changelog.d/10995.bugfix new file mode 100644 index 000000000000..3eef96f3db72 --- /dev/null +++ b/changelog.d/10995.bugfix @@ -0,0 +1 @@ +Correct a bugfix introduced in Synapse v1.44.0 that wouldn't catch every error of the connection breaks before a response could be written to it. diff --git a/synapse/http/server.py b/synapse/http/server.py index 0df1bfbeef7a..897ba5e4531b 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -563,7 +563,10 @@ def __init__( try: self._request.registerProducer(self, True) - except RuntimeError as e: + except AttributeError as e: + # Calling self._request.registerProducer might raise an AttributeError since + # the underlying Twisted code calls self._request.channel.registerProducer, + # however self._request.channel will be None if the connection was lost. logger.info("Connection disconnected before response was written: %r", e) # We drop our references to data we'll not use. From 86af6b2f0ef92a317900fd4a4f6d3436ff8a011c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 7 Oct 2021 12:20:03 +0100 Subject: [PATCH 070/111] Add a comment in _process_received_pdu (#11011) --- changelog.d/11011.misc | 1 + synapse/handlers/federation_event.py | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 changelog.d/11011.misc diff --git a/changelog.d/11011.misc b/changelog.d/11011.misc new file mode 100644 index 000000000000..9a765435dbe4 --- /dev/null +++ b/changelog.d/11011.misc @@ -0,0 +1 @@ +Clean up some of the federation event authentication code for clarity. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 243be46267cd..0645ce93927e 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -894,6 +894,9 @@ async def _process_received_pdu( backfilled=backfilled, ) except AuthError as e: + # FIXME richvdh 2021/10/07 I don't think this is reachable. Let's log it + # for now + logger.exception("Unexpected AuthError from _check_event_auth") raise FederationError("ERROR", e.code, e.msg, affected=event.event_id) await self._run_push_actions_and_persist_event(event, context, backfilled) From 96fe77c2546598449c1d423c125f84c92620b155 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 7 Oct 2021 12:43:25 +0100 Subject: [PATCH 071/111] Improve the logging in _auth_and_persist_outliers (#11010) Include the event ids being peristed --- changelog.d/11010.misc | 1 + synapse/handlers/federation_event.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11010.misc diff --git a/changelog.d/11010.misc b/changelog.d/11010.misc new file mode 100644 index 000000000000..9a765435dbe4 --- /dev/null +++ b/changelog.d/11010.misc @@ -0,0 +1 @@ +Clean up some of the federation event authentication code for clarity. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 0645ce93927e..f640b417b39a 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -1161,7 +1161,10 @@ async def _auth_and_persist_fetched_events( return logger.info( - "Persisting %i of %i remaining events", len(roots), len(event_map) + "Persisting %i of %i remaining outliers: %s", + len(roots), + len(event_map), + shortstr(e.event_id for e in roots), ) await self._auth_and_persist_fetched_events_inner(origin, room_id, roots) From e0bf34dada709776ae00843e47cd811d1cd195c6 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 7 Oct 2021 13:26:11 +0100 Subject: [PATCH 072/111] Don't alter directory entries for local users when setting a per-room nickname (#11002) Co-authored-by: Patrick Cloke --- changelog.d/11002.bugfix | 1 + synapse/handlers/user_directory.py | 20 ++++++++++------ tests/handlers/test_user_directory.py | 34 +++++++++++++++++++++++++++ 3 files changed, 48 insertions(+), 7 deletions(-) create mode 100644 changelog.d/11002.bugfix diff --git a/changelog.d/11002.bugfix b/changelog.d/11002.bugfix new file mode 100644 index 000000000000..cf894a6314b4 --- /dev/null +++ b/changelog.d/11002.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where local users' per-room nicknames/avatars were visible to anyone who could see you in the user_directory. diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 97f60b58068a..b7b19733461e 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -203,6 +203,7 @@ async def _handle_deltas(self, deltas: List[Dict[str, Any]]) -> None: public_value=Membership.JOIN, ) + is_remote = not self.is_mine_id(state_key) if change is MatchChange.now_false: # Need to check if the server left the room entirely, if so # we might need to remove all the users in that room @@ -224,15 +225,20 @@ async def _handle_deltas(self, deltas: List[Dict[str, Any]]) -> None: else: logger.debug("Server is still in room: %r", room_id) - include_in_dir = not self.is_mine_id( - state_key - ) or await self.store.should_include_local_user_in_dir(state_key) + include_in_dir = ( + is_remote + or await self.store.should_include_local_user_in_dir(state_key) + ) if include_in_dir: if change is MatchChange.no_change: - # Handle any profile changes - await self._handle_profile_change( - state_key, room_id, prev_event_id, event_id - ) + # Handle any profile changes for remote users. + # (For local users we are not forced to scan membership + # events; instead the rest of the application calls + # `handle_local_profile_change`.) + if is_remote: + await self._handle_profile_change( + state_key, room_id, prev_event_id, event_id + ) continue if change is MatchChange.now_true: # The user joined diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 03fd5a3e2c52..47217f054202 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -402,6 +402,40 @@ def test_process_join_after_server_leaves_room(self) -> None: public3 = self.get_success(self.user_dir_helper.get_users_in_public_rooms()) self.assertEqual(set(public3), {(alice, room2), (bob, room2)}) + def test_per_room_profile_doesnt_alter_directory_entry(self) -> None: + alice = self.register_user("alice", "pass") + alice_token = self.login(alice, "pass") + bob = self.register_user("bob", "pass") + + # Alice should have a user directory entry created at registration. + users = self.get_success(self.user_dir_helper.get_profiles_in_user_directory()) + self.assertEqual( + users[alice], ProfileInfo(display_name="alice", avatar_url=None) + ) + + # Alice makes a room for herself. + room = self.helper.create_room_as(alice, is_public=True, tok=alice_token) + + # Alice sets a nickname unique to that room. + self.helper.send_state( + room, + "m.room.member", + { + "displayname": "Freddy Mercury", + "membership": "join", + }, + alice_token, + state_key=alice, + ) + + # Alice's display name remains the same in the user directory. + search_result = self.get_success(self.handler.search_users(bob, alice, 10)) + self.assertEqual( + search_result["results"], + [{"display_name": "alice", "avatar_url": None, "user_id": alice}], + 0, + ) + def test_private_room(self) -> None: """ A user can be searched for only by people that are either in a public From 7301019d48f1a4ca7683b1745be55cecc6fe4be3 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 7 Oct 2021 09:38:31 -0400 Subject: [PATCH 073/111] Ensure each cache config test uses separate state. (#11019) Hopefully this fixes these tests sometimes failing in CI. --- changelog.d/11019.misc | 1 + tests/config/test_cache.py | 20 ++++++++++++-------- 2 files changed, 13 insertions(+), 8 deletions(-) create mode 100644 changelog.d/11019.misc diff --git a/changelog.d/11019.misc b/changelog.d/11019.misc new file mode 100644 index 000000000000..aae5ee62b2e8 --- /dev/null +++ b/changelog.d/11019.misc @@ -0,0 +1 @@ +Ensure that cache config tests do not share state. diff --git a/tests/config/test_cache.py b/tests/config/test_cache.py index f518abdb7a0a..79d417568d7c 100644 --- a/tests/config/test_cache.py +++ b/tests/config/test_cache.py @@ -12,12 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +from unittest.mock import patch + from synapse.config.cache import CacheConfig, add_resizable_cache from synapse.util.caches.lrucache import LruCache from tests.unittest import TestCase +# Patch the global _CACHES so that each test runs against its own state. +@patch("synapse.config.cache._CACHES", new_callable=dict) class CacheConfigTests(TestCase): def setUp(self): # Reset caches before each test @@ -26,7 +30,7 @@ def setUp(self): def tearDown(self): self.config.reset() - def test_individual_caches_from_environ(self): + def test_individual_caches_from_environ(self, _caches): """ Individual cache factors will be loaded from the environment. """ @@ -39,7 +43,7 @@ def test_individual_caches_from_environ(self): self.assertEqual(dict(self.config.cache_factors), {"something_or_other": 2.0}) - def test_config_overrides_environ(self): + def test_config_overrides_environ(self, _caches): """ Individual cache factors defined in the environment will take precedence over those in the config. @@ -56,7 +60,7 @@ def test_config_overrides_environ(self): {"foo": 1.0, "bar": 3.0, "something_or_other": 2.0}, ) - def test_individual_instantiated_before_config_load(self): + def test_individual_instantiated_before_config_load(self, _caches): """ If a cache is instantiated before the config is read, it will be given the default cache size in the interim, and then resized once the config @@ -72,7 +76,7 @@ def test_individual_instantiated_before_config_load(self): self.assertEqual(cache.max_size, 300) - def test_individual_instantiated_after_config_load(self): + def test_individual_instantiated_after_config_load(self, _caches): """ If a cache is instantiated after the config is read, it will be immediately resized to the correct size given the per_cache_factor if @@ -85,7 +89,7 @@ def test_individual_instantiated_after_config_load(self): add_resizable_cache("foo", cache_resize_callback=cache.set_cache_factor) self.assertEqual(cache.max_size, 200) - def test_global_instantiated_before_config_load(self): + def test_global_instantiated_before_config_load(self, _caches): """ If a cache is instantiated before the config is read, it will be given the default cache size in the interim, and then resized to the new @@ -100,7 +104,7 @@ def test_global_instantiated_before_config_load(self): self.assertEqual(cache.max_size, 400) - def test_global_instantiated_after_config_load(self): + def test_global_instantiated_after_config_load(self, _caches): """ If a cache is instantiated after the config is read, it will be immediately resized to the correct size given the global factor if there @@ -113,7 +117,7 @@ def test_global_instantiated_after_config_load(self): add_resizable_cache("foo", cache_resize_callback=cache.set_cache_factor) self.assertEqual(cache.max_size, 150) - def test_cache_with_asterisk_in_name(self): + def test_cache_with_asterisk_in_name(self, _caches): """Some caches have asterisks in their name, test that they are set correctly.""" config = { @@ -139,7 +143,7 @@ def test_cache_with_asterisk_in_name(self): add_resizable_cache("*cache_c*", cache_resize_callback=cache_c.set_cache_factor) self.assertEqual(cache_c.max_size, 200) - def test_apply_cache_factor_from_config(self): + def test_apply_cache_factor_from_config(self, _caches): """Caches can disable applying cache factor updates, mainly used by event cache size. """ From e79ee48313404abf8fbb7c88361e4ab1efa29a81 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 7 Oct 2021 19:55:15 +0100 Subject: [PATCH 074/111] disallow-untyped-defs for synapse.server_notices (#11021) --- changelog.d/11021.misc | 1 + mypy.ini | 3 +++ synapse/server_notices/server_notices_manager.py | 8 ++------ 3 files changed, 6 insertions(+), 6 deletions(-) create mode 100644 changelog.d/11021.misc diff --git a/changelog.d/11021.misc b/changelog.d/11021.misc new file mode 100644 index 000000000000..8ac1bfcf226b --- /dev/null +++ b/changelog.d/11021.misc @@ -0,0 +1 @@ +Add additional type hints to `synapse.server_notices`. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index a052d49c7133..68437e5ce11b 100644 --- a/mypy.ini +++ b/mypy.ini @@ -99,6 +99,9 @@ disallow_untyped_defs = True [mypy-synapse.rest.*] disallow_untyped_defs = True +[mypy-synapse.server_notices.*] +disallow_untyped_defs = True + [mypy-synapse.state.*] disallow_untyped_defs = True diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index cd1c5ff6f468..0cf60236f8b4 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -41,12 +41,8 @@ def __init__(self, hs: "HomeServer"): self._notifier = hs.get_notifier() self.server_notices_mxid = self._config.servernotices.server_notices_mxid - def is_enabled(self): - """Checks if server notices are enabled on this server. - - Returns: - bool - """ + def is_enabled(self) -> bool: + """Checks if server notices are enabled on this server.""" return self.server_notices_mxid is not None async def send_notice( From 0b4d5ce5e34ab46b5b55976bfdd0d1d0b105cf13 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 8 Oct 2021 10:05:48 +0100 Subject: [PATCH 075/111] Fix CI to run the unit tests without optional deps (#11017) This also turns off calculating code coverage, as we didn't use it and it was a lot of noise --- .github/workflows/tests.yml | 9 ++++++--- changelog.d/11017.misc | 1 + 2 files changed, 7 insertions(+), 3 deletions(-) create mode 100644 changelog.d/11017.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 96c39dd9a4bc..30a911fdbd34 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -78,20 +78,23 @@ jobs: matrix: python-version: ["3.6", "3.7", "3.8", "3.9", "3.10"] database: ["sqlite"] + toxenv: ["py"] include: # Newest Python without optional deps - python-version: "3.10" - toxenv: "py-noextras,combine" + toxenv: "py-noextras" # Oldest Python with PostgreSQL - python-version: "3.6" database: "postgres" postgres-version: "9.6" + toxenv: "py" # Newest Python with newest PostgreSQL - python-version: "3.10" database: "postgres" postgres-version: "14" + toxenv: "py" steps: - uses: actions/checkout@v2 @@ -111,7 +114,7 @@ jobs: if: ${{ matrix.postgres-version }} timeout-minutes: 2 run: until pg_isready -h localhost; do sleep 1; done - - run: tox -e py,combine + - run: tox -e ${{ matrix.toxenv }} env: TRIAL_FLAGS: "--jobs=2" SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }} @@ -169,7 +172,7 @@ jobs: with: python-version: ${{ matrix.python-version }} - run: pip install tox - - run: tox -e py,combine + - run: tox -e py env: TRIAL_FLAGS: "--jobs=2" - name: Dump logs diff --git a/changelog.d/11017.misc b/changelog.d/11017.misc new file mode 100644 index 000000000000..f05530ac94c7 --- /dev/null +++ b/changelog.d/11017.misc @@ -0,0 +1 @@ +Fix CI to run the unit tests without optional deps. From bb228f35237879b0cae93e3b5efab468b94a1e5b Mon Sep 17 00:00:00 2001 From: Nick Barrett Date: Fri, 8 Oct 2021 12:08:25 +0100 Subject: [PATCH 076/111] Include exception in json logging (#11028) --- changelog.d/11028.feature | 1 + synapse/logging/_terse_json.py | 6 ++++++ tests/logging/test_terse_json.py | 28 ++++++++++++++++++++++++++++ 3 files changed, 35 insertions(+) create mode 100644 changelog.d/11028.feature diff --git a/changelog.d/11028.feature b/changelog.d/11028.feature new file mode 100644 index 000000000000..48798356b7d1 --- /dev/null +++ b/changelog.d/11028.feature @@ -0,0 +1 @@ +Include exception information in JSON logging output. Contributed by @Fizzadar at Beeper. diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py index 6e82f7c7f17a..b78d6e17c93c 100644 --- a/synapse/logging/_terse_json.py +++ b/synapse/logging/_terse_json.py @@ -65,6 +65,12 @@ def _format(self, record: logging.LogRecord, event: dict) -> str: if key not in _IGNORED_LOG_RECORD_ATTRIBUTES: event[key] = value + if record.exc_info: + exc_type, exc_value, _ = record.exc_info + if exc_type: + event["exc_type"] = f"{exc_type.__name__}" + event["exc_value"] = f"{exc_value}" + return _encoder.encode(event) diff --git a/tests/logging/test_terse_json.py b/tests/logging/test_terse_json.py index f73fcd684e0e..96f399b7abf4 100644 --- a/tests/logging/test_terse_json.py +++ b/tests/logging/test_terse_json.py @@ -198,3 +198,31 @@ def test_with_request_context(self): self.assertEqual(log["url"], "/_matrix/client/versions") self.assertEqual(log["protocol"], "1.1") self.assertEqual(log["user_agent"], "") + + def test_with_exception(self): + """ + The logging exception type & value should be added to the JSON response. + """ + handler = logging.StreamHandler(self.output) + handler.setFormatter(JsonFormatter()) + logger = self.get_logger(handler) + + try: + raise ValueError("That's wrong, you wally!") + except ValueError: + logger.exception("Hello there, %s!", "wally") + + log = self.get_log_line() + + # The terse logger should give us these keys. + expected_log_keys = [ + "log", + "level", + "namespace", + "exc_type", + "exc_value", + ] + self.assertCountEqual(log.keys(), expected_log_keys) + self.assertEqual(log["log"], "Hello there, wally!") + self.assertEqual(log["exc_type"], "ValueError") + self.assertEqual(log["exc_value"], "That's wrong, you wally!") From 49a683d871add82fb1a8125c6803ac15ec7d341b Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 8 Oct 2021 12:27:16 +0100 Subject: [PATCH 077/111] Fix long-standing bug where `ReadWriteLock` could drop logging contexts (#10993) Use `PreserveLoggingContext()` to ensure that logging contexts are not lost when exiting a read/write lock. When exiting a read/write lock, callbacks on a `Deferred` are triggered as a signal to any waiting coroutines. Any waiting coroutine that becomes runnable is likely to follow the Synapse logging context rules and will restore its own logging context, then either run to completion or await another `Deferred`, resetting the logging context in the process. --- changelog.d/10993.misc | 1 + synapse/util/async_helpers.py | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelog.d/10993.misc diff --git a/changelog.d/10993.misc b/changelog.d/10993.misc new file mode 100644 index 000000000000..23c73dbac5c1 --- /dev/null +++ b/changelog.d/10993.misc @@ -0,0 +1 @@ +Fix a long-standing bug where `ReadWriteLock`s could drop logging contexts on exit. diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 82d918a05fd0..5df80ea8e7b4 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -438,7 +438,8 @@ def _ctx_manager(): try: yield finally: - new_defer.callback(None) + with PreserveLoggingContext(): + new_defer.callback(None) self.key_to_current_readers.get(key, set()).discard(new_defer) return _ctx_manager() @@ -466,7 +467,8 @@ def _ctx_manager(): try: yield finally: - new_defer.callback(None) + with PreserveLoggingContext(): + new_defer.callback(None) if self.key_to_current_writer[key] == new_defer: self.key_to_current_writer.pop(key) From eb9ddc8c2e807e691fd1820f88f7c0bf43822661 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 8 Oct 2021 07:44:43 -0400 Subject: [PATCH 078/111] Remove the deprecated BaseHandler. (#11005) The shared ratelimit function was replaced with a dedicated RequestRatelimiter class (accessible from the HomeServer object). Other properties were copied to each sub-class that inherited from BaseHandler. --- changelog.d/11005.misc | 1 + synapse/api/ratelimiting.py | 86 ++++++++++++++++++ synapse/handlers/_base.py | 120 ------------------------- synapse/handlers/admin.py | 7 +- synapse/handlers/auth.py | 8 +- synapse/handlers/deactivate_account.py | 6 +- synapse/handlers/device.py | 10 +-- synapse/handlers/directory.py | 9 +- synapse/handlers/events.py | 12 ++- synapse/handlers/federation.py | 6 +- synapse/handlers/identity.py | 7 +- synapse/handlers/initial_sync.py | 8 +- synapse/handlers/message.py | 7 +- synapse/handlers/profile.py | 11 +-- synapse/handlers/read_marker.py | 5 +- synapse/handlers/receipts.py | 6 +- synapse/handlers/register.py | 9 +- synapse/handlers/room.py | 15 ++-- synapse/handlers/room_list.py | 7 +- synapse/handlers/room_member.py | 8 +- synapse/handlers/saml.py | 7 +- synapse/handlers/search.py | 9 +- synapse/handlers/set_password.py | 6 +- synapse/server.py | 11 ++- 24 files changed, 166 insertions(+), 215 deletions(-) create mode 100644 changelog.d/11005.misc delete mode 100644 synapse/handlers/_base.py diff --git a/changelog.d/11005.misc b/changelog.d/11005.misc new file mode 100644 index 000000000000..a893591971a3 --- /dev/null +++ b/changelog.d/11005.misc @@ -0,0 +1 @@ +Remove the deprecated `BaseHandler` object. diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py index cbdd74025b35..e8964097d31f 100644 --- a/synapse/api/ratelimiting.py +++ b/synapse/api/ratelimiting.py @@ -17,6 +17,7 @@ from typing import Hashable, Optional, Tuple from synapse.api.errors import LimitExceededError +from synapse.config.ratelimiting import RateLimitConfig from synapse.storage.databases.main import DataStore from synapse.types import Requester from synapse.util import Clock @@ -233,3 +234,88 @@ async def ratelimit( raise LimitExceededError( retry_after_ms=int(1000 * (time_allowed - time_now_s)) ) + + +class RequestRatelimiter: + def __init__( + self, + store: DataStore, + clock: Clock, + rc_message: RateLimitConfig, + rc_admin_redaction: Optional[RateLimitConfig], + ): + self.store = store + self.clock = clock + + # The rate_hz and burst_count are overridden on a per-user basis + self.request_ratelimiter = Ratelimiter( + store=self.store, clock=self.clock, rate_hz=0, burst_count=0 + ) + self._rc_message = rc_message + + # Check whether ratelimiting room admin message redaction is enabled + # by the presence of rate limits in the config + if rc_admin_redaction: + self.admin_redaction_ratelimiter: Optional[Ratelimiter] = Ratelimiter( + store=self.store, + clock=self.clock, + rate_hz=rc_admin_redaction.per_second, + burst_count=rc_admin_redaction.burst_count, + ) + else: + self.admin_redaction_ratelimiter = None + + async def ratelimit( + self, + requester: Requester, + update: bool = True, + is_admin_redaction: bool = False, + ) -> None: + """Ratelimits requests. + + Args: + requester + update: Whether to record that a request is being processed. + Set to False when doing multiple checks for one request (e.g. + to check up front if we would reject the request), and set to + True for the last call for a given request. + is_admin_redaction: Whether this is a room admin/moderator + redacting an event. If so then we may apply different + ratelimits depending on config. + + Raises: + LimitExceededError if the request should be ratelimited + """ + user_id = requester.user.to_string() + + # The AS user itself is never rate limited. + app_service = self.store.get_app_service_by_user_id(user_id) + if app_service is not None: + return # do not ratelimit app service senders + + messages_per_second = self._rc_message.per_second + burst_count = self._rc_message.burst_count + + # Check if there is a per user override in the DB. + override = await self.store.get_ratelimit_for_user(user_id) + if override: + # If overridden with a null Hz then ratelimiting has been entirely + # disabled for the user + if not override.messages_per_second: + return + + messages_per_second = override.messages_per_second + burst_count = override.burst_count + + if is_admin_redaction and self.admin_redaction_ratelimiter: + # If we have separate config for admin redactions, use a separate + # ratelimiter as to not have user_ids clash + await self.admin_redaction_ratelimiter.ratelimit(requester, update=update) + else: + # Override rate and burst count per-user + await self.request_ratelimiter.ratelimit( + requester, + rate_hz=messages_per_second, + burst_count=burst_count, + update=update, + ) diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py deleted file mode 100644 index 0ccef884e76a..000000000000 --- a/synapse/handlers/_base.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright 2014 - 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -from typing import TYPE_CHECKING, Optional - -from synapse.api.ratelimiting import Ratelimiter -from synapse.types import Requester - -if TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - - -class BaseHandler: - """ - Common base class for the event handlers. - - Deprecated: new code should not use this. Instead, Handler classes should define the - fields they actually need. The utility methods should either be factored out to - standalone helper functions, or to different Handler classes. - """ - - def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() - self.auth = hs.get_auth() - self.notifier = hs.get_notifier() - self.state_handler = hs.get_state_handler() - self.distributor = hs.get_distributor() - self.clock = hs.get_clock() - self.hs = hs - - # The rate_hz and burst_count are overridden on a per-user basis - self.request_ratelimiter = Ratelimiter( - store=self.store, clock=self.clock, rate_hz=0, burst_count=0 - ) - self._rc_message = self.hs.config.ratelimiting.rc_message - - # Check whether ratelimiting room admin message redaction is enabled - # by the presence of rate limits in the config - if self.hs.config.ratelimiting.rc_admin_redaction: - self.admin_redaction_ratelimiter: Optional[Ratelimiter] = Ratelimiter( - store=self.store, - clock=self.clock, - rate_hz=self.hs.config.ratelimiting.rc_admin_redaction.per_second, - burst_count=self.hs.config.ratelimiting.rc_admin_redaction.burst_count, - ) - else: - self.admin_redaction_ratelimiter = None - - self.server_name = hs.hostname - - self.event_builder_factory = hs.get_event_builder_factory() - - async def ratelimit( - self, - requester: Requester, - update: bool = True, - is_admin_redaction: bool = False, - ) -> None: - """Ratelimits requests. - - Args: - requester - update: Whether to record that a request is being processed. - Set to False when doing multiple checks for one request (e.g. - to check up front if we would reject the request), and set to - True for the last call for a given request. - is_admin_redaction: Whether this is a room admin/moderator - redacting an event. If so then we may apply different - ratelimits depending on config. - - Raises: - LimitExceededError if the request should be ratelimited - """ - user_id = requester.user.to_string() - - # The AS user itself is never rate limited. - app_service = self.store.get_app_service_by_user_id(user_id) - if app_service is not None: - return # do not ratelimit app service senders - - messages_per_second = self._rc_message.per_second - burst_count = self._rc_message.burst_count - - # Check if there is a per user override in the DB. - override = await self.store.get_ratelimit_for_user(user_id) - if override: - # If overridden with a null Hz then ratelimiting has been entirely - # disabled for the user - if not override.messages_per_second: - return - - messages_per_second = override.messages_per_second - burst_count = override.burst_count - - if is_admin_redaction and self.admin_redaction_ratelimiter: - # If we have separate config for admin redactions, use a separate - # ratelimiter as to not have user_ids clash - await self.admin_redaction_ratelimiter.ratelimit(requester, update=update) - else: - # Override rate and burst count per-user - await self.request_ratelimiter.ratelimit( - requester, - rate_hz=messages_per_second, - burst_count=burst_count, - update=update, - ) diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index bfa7f2c545c6..a53cd62d3ca1 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -21,18 +21,15 @@ from synapse.types import JsonDict, RoomStreamToken, StateMap, UserID from synapse.visibility import filter_events_for_client -from ._base import BaseHandler - if TYPE_CHECKING: from synapse.server import HomeServer logger = logging.getLogger(__name__) -class AdminHandler(BaseHandler): +class AdminHandler: def __init__(self, hs: "HomeServer"): - super().__init__(hs) - + self.store = hs.get_datastore() self.storage = hs.get_storage() self.state_store = self.storage.state diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 2d0f3d566c01..f4612a5b9223 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -52,7 +52,6 @@ UserDeactivatedError, ) from synapse.api.ratelimiting import Ratelimiter -from synapse.handlers._base import BaseHandler from synapse.handlers.ui_auth import ( INTERACTIVE_AUTH_CHECKERS, UIAuthSessionDataConstants, @@ -186,12 +185,13 @@ class LoginTokenAttributes: auth_provider_id = attr.ib(type=str) -class AuthHandler(BaseHandler): +class AuthHandler: SESSION_EXPIRE_MS = 48 * 60 * 60 * 1000 def __init__(self, hs: "HomeServer"): - super().__init__(hs) - + self.store = hs.get_datastore() + self.auth = hs.get_auth() + self.clock = hs.get_clock() self.checkers: Dict[str, UserInteractiveAuthChecker] = {} for auth_checker_class in INTERACTIVE_AUTH_CHECKERS: inst = auth_checker_class(hs) diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 12bdca744510..e88c3c27ce80 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -19,19 +19,17 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import Requester, UserID, create_requester -from ._base import BaseHandler - if TYPE_CHECKING: from synapse.server import HomeServer logger = logging.getLogger(__name__) -class DeactivateAccountHandler(BaseHandler): +class DeactivateAccountHandler: """Handler which deals with deactivating user accounts.""" def __init__(self, hs: "HomeServer"): - super().__init__(hs) + self.store = hs.get_datastore() self.hs = hs self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 35334725d76b..75e60197603c 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -40,8 +40,6 @@ from synapse.util.metrics import measure_func from synapse.util.retryutils import NotRetryingDestination -from ._base import BaseHandler - if TYPE_CHECKING: from synapse.server import HomeServer @@ -50,14 +48,16 @@ MAX_DEVICE_DISPLAY_NAME_LEN = 100 -class DeviceWorkerHandler(BaseHandler): +class DeviceWorkerHandler: def __init__(self, hs: "HomeServer"): - super().__init__(hs) - + self.clock = hs.get_clock() self.hs = hs + self.store = hs.get_datastore() + self.notifier = hs.get_notifier() self.state = hs.get_state_handler() self.state_store = hs.get_storage().state self._auth_handler = hs.get_auth_handler() + self.server_name = hs.hostname @trace async def get_devices_by_user(self, user_id: str) -> List[JsonDict]: diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 9078781d5a3d..14ed7d987963 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -31,18 +31,16 @@ from synapse.storage.databases.main.directory import RoomAliasMapping from synapse.types import JsonDict, Requester, RoomAlias, UserID, get_domain_from_id -from ._base import BaseHandler - if TYPE_CHECKING: from synapse.server import HomeServer logger = logging.getLogger(__name__) -class DirectoryHandler(BaseHandler): +class DirectoryHandler: def __init__(self, hs: "HomeServer"): - super().__init__(hs) - + self.auth = hs.get_auth() + self.hs = hs self.state = hs.get_state_handler() self.appservice_handler = hs.get_application_service_handler() self.event_creation_handler = hs.get_event_creation_handler() @@ -51,6 +49,7 @@ def __init__(self, hs: "HomeServer"): self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search self.require_membership = hs.config.server.require_membership_for_aliases self.third_party_event_rules = hs.get_third_party_event_rules() + self.server_name = hs.hostname self.federation = hs.get_federation_client() hs.get_federation_registry().register_query_handler( diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 4b3f037072d9..1f64534a8a78 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -25,8 +25,6 @@ from synapse.types import JsonDict, UserID from synapse.visibility import filter_events_for_client -from ._base import BaseHandler - if TYPE_CHECKING: from synapse.server import HomeServer @@ -34,11 +32,11 @@ logger = logging.getLogger(__name__) -class EventStreamHandler(BaseHandler): +class EventStreamHandler: def __init__(self, hs: "HomeServer"): - super().__init__(hs) - + self.store = hs.get_datastore() self.clock = hs.get_clock() + self.hs = hs self.notifier = hs.get_notifier() self.state = hs.get_state_handler() @@ -138,9 +136,9 @@ async def get_stream( return chunk -class EventHandler(BaseHandler): +class EventHandler: def __init__(self, hs: "HomeServer"): - super().__init__(hs) + self.store = hs.get_datastore() self.storage = hs.get_storage() async def get_event( diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 043ca4a224b8..3e341bd287bf 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -53,7 +53,6 @@ from synapse.events.snapshot import EventContext from synapse.events.validator import EventValidator from synapse.federation.federation_client import InvalidResponseError -from synapse.handlers._base import BaseHandler from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import ( make_deferred_yieldable, @@ -78,15 +77,13 @@ logger = logging.getLogger(__name__) -class FederationHandler(BaseHandler): +class FederationHandler: """Handles general incoming federation requests Incoming events are *not* handled here, for which see FederationEventHandler. """ def __init__(self, hs: "HomeServer"): - super().__init__(hs) - self.hs = hs self.store = hs.get_datastore() @@ -99,6 +96,7 @@ def __init__(self, hs: "HomeServer"): self.is_mine_id = hs.is_mine_id self.spam_checker = hs.get_spam_checker() self.event_creation_handler = hs.get_event_creation_handler() + self.event_builder_factory = hs.get_event_builder_factory() self._event_auth_handler = hs.get_event_auth_handler() self._server_notices_mxid = hs.config.servernotices.server_notices_mxid self.config = hs.config diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index c881475c25ac..9c319b538323 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -39,8 +39,6 @@ valid_id_server_location, ) -from ._base import BaseHandler - if TYPE_CHECKING: from synapse.server import HomeServer @@ -49,10 +47,9 @@ id_server_scheme = "https://" -class IdentityHandler(BaseHandler): +class IdentityHandler: def __init__(self, hs: "HomeServer"): - super().__init__(hs) - + self.store = hs.get_datastore() # An HTTP client for contacting trusted URLs. self.http_client = SimpleHttpClient(hs) # An HTTP client for contacting identity servers specified by clients. diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 9ad39a65d8b6..d4e45561555c 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -31,8 +31,6 @@ from synapse.util.caches.response_cache import ResponseCache from synapse.visibility import filter_events_for_client -from ._base import BaseHandler - if TYPE_CHECKING: from synapse.server import HomeServer @@ -40,9 +38,11 @@ logger = logging.getLogger(__name__) -class InitialSyncHandler(BaseHandler): +class InitialSyncHandler: def __init__(self, hs: "HomeServer"): - super().__init__(hs) + self.store = hs.get_datastore() + self.auth = hs.get_auth() + self.state_handler = hs.get_state_handler() self.hs = hs self.state = hs.get_state_handler() self.clock = hs.get_clock() diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index ccd7827207a6..4de9f4b8288a 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -62,8 +62,6 @@ from synapse.util.metrics import measure_func from synapse.visibility import filter_events_for_client -from ._base import BaseHandler - if TYPE_CHECKING: from synapse.events.third_party_rules import ThirdPartyEventRules from synapse.server import HomeServer @@ -433,8 +431,7 @@ def __init__(self, hs: "HomeServer"): self.send_event = ReplicationSendEventRestServlet.make_client(hs) - # This is only used to get at ratelimit function - self.base_handler = BaseHandler(hs) + self.request_ratelimiter = hs.get_request_ratelimiter() # We arbitrarily limit concurrent event creation for a room to 5. # This is to stop us from diverging history *too* much. @@ -1322,7 +1319,7 @@ async def persist_and_notify_client_event( original_event and event.sender != original_event.sender ) - await self.base_handler.ratelimit( + await self.request_ratelimiter.ratelimit( requester, is_admin_redaction=is_admin_redaction ) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 2e19706c6941..e6c3cf585b17 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -32,8 +32,6 @@ get_domain_from_id, ) -from ._base import BaseHandler - if TYPE_CHECKING: from synapse.server import HomeServer @@ -43,7 +41,7 @@ MAX_AVATAR_URL_LEN = 1000 -class ProfileHandler(BaseHandler): +class ProfileHandler: """Handles fetching and updating user profile information. ProfileHandler can be instantiated directly on workers and will @@ -54,7 +52,9 @@ class ProfileHandler(BaseHandler): PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000 def __init__(self, hs: "HomeServer"): - super().__init__(hs) + self.store = hs.get_datastore() + self.clock = hs.get_clock() + self.hs = hs self.federation = hs.get_federation_client() hs.get_federation_registry().register_query_handler( @@ -62,6 +62,7 @@ def __init__(self, hs: "HomeServer"): ) self.user_directory_handler = hs.get_user_directory_handler() + self.request_ratelimiter = hs.get_request_ratelimiter() if hs.config.worker.run_background_tasks: self.clock.looping_call( @@ -346,7 +347,7 @@ async def _update_join_states( if not self.hs.is_mine(target_user): return - await self.ratelimit(requester) + await self.request_ratelimiter.ratelimit(requester) # Do not actually update the room state for shadow-banned users. if requester.shadow_banned: diff --git a/synapse/handlers/read_marker.py b/synapse/handlers/read_marker.py index bd8160e7ed43..58593e570e1d 100644 --- a/synapse/handlers/read_marker.py +++ b/synapse/handlers/read_marker.py @@ -17,17 +17,14 @@ from synapse.util.async_helpers import Linearizer -from ._base import BaseHandler - if TYPE_CHECKING: from synapse.server import HomeServer logger = logging.getLogger(__name__) -class ReadMarkerHandler(BaseHandler): +class ReadMarkerHandler: def __init__(self, hs: "HomeServer"): - super().__init__(hs) self.server_name = hs.config.server.server_name self.store = hs.get_datastore() self.account_data_handler = hs.get_account_data_handler() diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index f21f33ada28c..374e961e3bf8 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -16,7 +16,6 @@ from synapse.api.constants import ReadReceiptEventFields from synapse.appservice import ApplicationService -from synapse.handlers._base import BaseHandler from synapse.streams import EventSource from synapse.types import JsonDict, ReadReceipt, UserID, get_domain_from_id @@ -26,10 +25,9 @@ logger = logging.getLogger(__name__) -class ReceiptsHandler(BaseHandler): +class ReceiptsHandler: def __init__(self, hs: "HomeServer"): - super().__init__(hs) - + self.notifier = hs.get_notifier() self.server_name = hs.config.server.server_name self.store = hs.get_datastore() self.event_auth_handler = hs.get_event_auth_handler() diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 441af7a84868..a0e6a01775d4 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -41,8 +41,6 @@ from synapse.storage.state import StateFilter from synapse.types import RoomAlias, UserID, create_requester -from ._base import BaseHandler - if TYPE_CHECKING: from synapse.server import HomeServer @@ -85,9 +83,10 @@ class LoginDict(TypedDict): refresh_token: Optional[str] -class RegistrationHandler(BaseHandler): +class RegistrationHandler: def __init__(self, hs: "HomeServer"): - super().__init__(hs) + self.store = hs.get_datastore() + self.clock = hs.get_clock() self.hs = hs self.auth = hs.get_auth() self._auth_handler = hs.get_auth_handler() @@ -515,7 +514,7 @@ async def _join_rooms(self, user_id: str) -> None: # we don't have a local user in the room to craft up an invite with. requires_invite = await self.store.is_host_joined( room_id, - self.server_name, + self._server_name, ) if requires_invite: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index d40dbd761d80..7072bca1fcc5 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -76,8 +76,6 @@ from synapse.util.stringutils import parse_and_validate_server_name from synapse.visibility import filter_events_for_client -from ._base import BaseHandler - if TYPE_CHECKING: from synapse.server import HomeServer @@ -88,15 +86,18 @@ FIVE_MINUTES_IN_MS = 5 * 60 * 1000 -class RoomCreationHandler(BaseHandler): +class RoomCreationHandler: def __init__(self, hs: "HomeServer"): - super().__init__(hs) - + self.store = hs.get_datastore() + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.hs = hs self.spam_checker = hs.get_spam_checker() self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() self._event_auth_handler = hs.get_event_auth_handler() self.config = hs.config + self.request_ratelimiter = hs.get_request_ratelimiter() # Room state based off defined presets self._presets_dict: Dict[str, Dict[str, Any]] = { @@ -162,7 +163,7 @@ async def upgrade_room( Raises: ShadowBanError if the requester is shadow-banned. """ - await self.ratelimit(requester) + await self.request_ratelimiter.ratelimit(requester) user_id = requester.user.to_string() @@ -665,7 +666,7 @@ async def create_room( raise SynapseError(403, "You are not permitted to create rooms") if ratelimit: - await self.ratelimit(requester) + await self.request_ratelimiter.ratelimit(requester) room_version_id = config.get( "room_version", self.config.server.default_room_version.identifier diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index c3d4199ed13c..ba7a14d651c5 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -36,8 +36,6 @@ from synapse.util.caches.descriptors import _CacheContext, cached from synapse.util.caches.response_cache import ResponseCache -from ._base import BaseHandler - if TYPE_CHECKING: from synapse.server import HomeServer @@ -49,9 +47,10 @@ EMPTY_THIRD_PARTY_ID = ThirdPartyInstanceID(None, None) -class RoomListHandler(BaseHandler): +class RoomListHandler: def __init__(self, hs: "HomeServer"): - super().__init__(hs) + self.store = hs.get_datastore() + self.hs = hs self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search self.response_cache: ResponseCache[ Tuple[Optional[int], Optional[str], Optional[ThirdPartyInstanceID]] diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index eef337feeb8d..74e6c7eca6b1 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -51,8 +51,6 @@ from synapse.util.async_helpers import Linearizer from synapse.util.distributor import user_left_room -from ._base import BaseHandler - if TYPE_CHECKING: from synapse.server import HomeServer @@ -118,9 +116,7 @@ def __init__(self, hs: "HomeServer"): burst_count=hs.config.ratelimiting.rc_invites_per_user.burst_count, ) - # This is only used to get at the ratelimit function. It's fine there are - # multiple of these as it doesn't store state. - self.base_handler = BaseHandler(hs) + self.request_ratelimiter = hs.get_request_ratelimiter() @abc.abstractmethod async def _remote_join( @@ -1275,7 +1271,7 @@ async def do_3pid_invite( # We need to rate limit *before* we send out any 3PID invites, so we # can't just rely on the standard ratelimiting of events. - await self.base_handler.ratelimit(requester) + await self.request_ratelimiter.ratelimit(requester) can_invite = await self.third_party_event_rules.check_threepid_can_be_invited( medium, address, room_id diff --git a/synapse/handlers/saml.py b/synapse/handlers/saml.py index 2fed9f377a5e..727d75a50c6c 100644 --- a/synapse/handlers/saml.py +++ b/synapse/handlers/saml.py @@ -22,7 +22,6 @@ from synapse.api.errors import SynapseError from synapse.config import ConfigError -from synapse.handlers._base import BaseHandler from synapse.handlers.sso import MappingException, UserAttributes from synapse.http.servlet import parse_string from synapse.http.site import SynapseRequest @@ -51,9 +50,11 @@ class Saml2SessionData: ui_auth_session_id: Optional[str] = None -class SamlHandler(BaseHandler): +class SamlHandler: def __init__(self, hs: "HomeServer"): - super().__init__(hs) + self.store = hs.get_datastore() + self.clock = hs.get_clock() + self.server_name = hs.hostname self._saml_client = Saml2Client(hs.config.saml2.saml2_sp_config) self._saml_idp_entityid = hs.config.saml2.saml2_idp_entityid diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 6d3333ee00f3..a3ffa26be860 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -26,17 +26,18 @@ from synapse.types import JsonDict, UserID from synapse.visibility import filter_events_for_client -from ._base import BaseHandler - if TYPE_CHECKING: from synapse.server import HomeServer logger = logging.getLogger(__name__) -class SearchHandler(BaseHandler): +class SearchHandler: def __init__(self, hs: "HomeServer"): - super().__init__(hs) + self.store = hs.get_datastore() + self.state_handler = hs.get_state_handler() + self.clock = hs.get_clock() + self.hs = hs self._event_serializer = hs.get_event_client_serializer() self.storage = hs.get_storage() self.state_store = self.storage.state diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py index a63fac828342..706ad72761a3 100644 --- a/synapse/handlers/set_password.py +++ b/synapse/handlers/set_password.py @@ -17,19 +17,17 @@ from synapse.api.errors import Codes, StoreError, SynapseError from synapse.types import Requester -from ._base import BaseHandler - if TYPE_CHECKING: from synapse.server import HomeServer logger = logging.getLogger(__name__) -class SetPasswordHandler(BaseHandler): +class SetPasswordHandler: """Handler which deals with changing user account passwords""" def __init__(self, hs: "HomeServer"): - super().__init__(hs) + self.store = hs.get_datastore() self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() diff --git a/synapse/server.py b/synapse/server.py index 637eb15b786d..0783df41d486 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -39,7 +39,7 @@ from synapse.api.auth import Auth from synapse.api.filtering import Filtering -from synapse.api.ratelimiting import Ratelimiter +from synapse.api.ratelimiting import Ratelimiter, RequestRatelimiter from synapse.appservice.api import ApplicationServiceApi from synapse.appservice.scheduler import ApplicationServiceScheduler from synapse.config.homeserver import HomeServerConfig @@ -816,3 +816,12 @@ def get_outbound_redis_connection(self) -> Optional["RedisProtocol"]: def should_send_federation(self) -> bool: "Should this server be sending federation traffic directly?" return self.config.worker.send_federation + + @cache_in_self + def get_request_ratelimiter(self) -> RequestRatelimiter: + return RequestRatelimiter( + self.get_datastore(), + self.get_clock(), + self.config.ratelimiting.rc_message, + self.config.ratelimiting.rc_admin_redaction, + ) From 670a8d9a1e18159917ca1b4f8e5af48a0b258f5e Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 8 Oct 2021 12:52:48 +0100 Subject: [PATCH 079/111] Fix overwriting profile when making room public (#11003) This splits apart `handle_new_user` into a function which adds an entry to the `user_directory` and a function which updates the room sharing tables. I plan to continue doing more of this kind of refactoring to clarify the implementation. --- changelog.d/11003.bugfix | 1 + synapse/handlers/user_directory.py | 63 +++++++++++++----------- tests/handlers/test_user_directory.py | 71 ++++++++++++++++++++++++++- 3 files changed, 104 insertions(+), 31 deletions(-) create mode 100644 changelog.d/11003.bugfix diff --git a/changelog.d/11003.bugfix b/changelog.d/11003.bugfix new file mode 100644 index 000000000000..0786f1b886ac --- /dev/null +++ b/changelog.d/11003.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where a user's per-room nickname/avatar would overwrite their profile in the user directory when a room was made public. \ No newline at end of file diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index b7b19733461e..8810f048ba4a 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -242,18 +242,15 @@ async def _handle_deltas(self, deltas: List[Dict[str, Any]]) -> None: continue if change is MatchChange.now_true: # The user joined - event = await self.store.get_event(event_id, allow_none=True) - # It isn't expected for this event to not exist, but we - # don't want the entire background process to break. - if event is None: - continue - - profile = ProfileInfo( - avatar_url=event.content.get("avatar_url"), - display_name=event.content.get("displayname"), - ) - - await self._handle_new_user(room_id, state_key, profile) + # This may be the first time we've seen a remote user. If + # so, ensure we have a directory entry for them. (We don't + # need to do this for local users: their directory entry + # is created at the point of registration. + if is_remote: + await self._upsert_directory_entry_for_remote_user( + state_key, event_id + ) + await self._track_user_joined_room(room_id, state_key) else: # The user left await self._handle_remove_user(room_id, state_key) else: @@ -303,7 +300,7 @@ async def _handle_room_publicity_change( room_id ) - logger.debug("Change: %r, publicness: %r", publicness, is_public) + logger.debug("Publicness change: %r, is_public: %r", publicness, is_public) if publicness is MatchChange.now_true and not is_public: # If we became world readable but room isn't currently public then @@ -314,42 +311,50 @@ async def _handle_room_publicity_change( # ignore the change return - other_users_in_room_with_profiles = ( - await self.store.get_users_in_room_with_profiles(room_id) - ) + users_in_room = await self.store.get_users_in_room(room_id) # Remove every user from the sharing tables for that room. - for user_id in other_users_in_room_with_profiles.keys(): + for user_id in users_in_room: await self.store.remove_user_who_share_room(user_id, room_id) # Then, re-add them to the tables. - # NOTE: this is not the most efficient method, as handle_new_user sets + # NOTE: this is not the most efficient method, as _track_user_joined_room sets # up local_user -> other_user and other_user_whos_local -> local_user, # which when ran over an entire room, will result in the same values # being added multiple times. The batching upserts shouldn't make this # too bad, though. - for user_id, profile in other_users_in_room_with_profiles.items(): - await self._handle_new_user(room_id, user_id, profile) + for user_id in users_in_room: + await self._track_user_joined_room(room_id, user_id) - async def _handle_new_user( - self, room_id: str, user_id: str, profile: ProfileInfo + async def _upsert_directory_entry_for_remote_user( + self, user_id: str, event_id: str ) -> None: - """Called when we might need to add user to directory - - Args: - room_id: The room ID that user joined or started being public - user_id + """A remote user has just joined a room. Ensure they have an entry in + the user directory. The caller is responsible for making sure they're + remote. """ + event = await self.store.get_event(event_id, allow_none=True) + # It isn't expected for this event to not exist, but we + # don't want the entire background process to break. + if event is None: + return + logger.debug("Adding new user to dir, %r", user_id) await self.store.update_profile_in_user_dir( - user_id, profile.display_name, profile.avatar_url + user_id, event.content.get("displayname"), event.content.get("avatar_url") ) + async def _track_user_joined_room(self, room_id: str, user_id: str) -> None: + """Someone's just joined a room. Update `users_in_public_rooms` or + `users_who_share_private_rooms` as appropriate. + + The caller is responsible for ensuring that the given user is not excluded + from the user directory. + """ is_public = await self.store.is_room_world_readable_or_publicly_joinable( room_id ) - # Now we update users who share rooms with users. other_users_in_room = await self.store.get_users_in_room(room_id) if is_public: diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 47217f054202..db65253773c6 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -372,8 +372,6 @@ def test_process_join_after_server_leaves_room(self) -> None: # Alice makes two rooms. Bob joins one of them. room1 = self.helper.create_room_as(alice, tok=alice_token) room2 = self.helper.create_room_as(alice, tok=alice_token) - print("room1=", room1) - print("room2=", room2) self.helper.join(room1, bob, tok=bob_token) # The user sharing tables should have been updated. @@ -436,6 +434,75 @@ def test_per_room_profile_doesnt_alter_directory_entry(self) -> None: 0, ) + def test_making_room_public_doesnt_alter_directory_entry(self) -> None: + """Per-room names shouldn't go to the directory when the room becomes public. + + This isn't about preventing a leak (the room is now public, so the nickname + is too). It's about preserving the invariant that we only show a user's public + profile in the user directory results. + + I made this a Synapse test case rather than a Complement one because + I think this is (strictly speaking) an implementation choice. Synapse + has chosen to only ever use the public profile when responding to a user + directory search. There's no privacy leak here, because making the room + public discloses the per-room name. + + The spec doesn't mandate anything about _how_ a user + should appear in a /user_directory/search result. Hypothetical example: + suppose Bob searches for Alice. When representing Alice in a search + result, it's reasonable to use any of Alice's nicknames that Bob is + aware of. Heck, maybe we even want to use lots of them in a combined + displayname like `Alice (aka "ali", "ally", "41iC3")`. + """ + + # TODO the same should apply when Alice is a remote user. + alice = self.register_user("alice", "pass") + alice_token = self.login(alice, "pass") + bob = self.register_user("bob", "pass") + bob_token = self.login(bob, "pass") + + # Alice and Bob are in a private room. + room = self.helper.create_room_as(alice, is_public=False, tok=alice_token) + self.helper.invite(room, src=alice, targ=bob, tok=alice_token) + self.helper.join(room, user=bob, tok=bob_token) + + # Alice has a nickname unique to that room. + + self.helper.send_state( + room, + "m.room.member", + { + "displayname": "Freddy Mercury", + "membership": "join", + }, + alice_token, + state_key=alice, + ) + + # Check Alice isn't recorded as being in a public room. + public = self.get_success(self.user_dir_helper.get_users_in_public_rooms()) + self.assertNotIn((alice, room), public) + + # One of them makes the room public. + self.helper.send_state( + room, + "m.room.join_rules", + {"join_rule": "public"}, + alice_token, + ) + + # Check that Alice is now recorded as being in a public room + public = self.get_success(self.user_dir_helper.get_users_in_public_rooms()) + self.assertIn((alice, room), public) + + # Alice's display name remains the same in the user directory. + search_result = self.get_success(self.handler.search_users(bob, alice, 10)) + self.assertEqual( + search_result["results"], + [{"display_name": "alice", "avatar_url": None, "user_id": alice}], + 0, + ) + def test_private_room(self) -> None: """ A user can be searched for only by people that are either in a public From 797ee7812db28f6cf130d68e2d10911c826b0be5 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 8 Oct 2021 14:49:41 +0100 Subject: [PATCH 080/111] Relax `ignore-missing-imports` for modules that have stubs now and update mypy (#11006) Updating mypy past version 0.9 means that third-party stubs are no-longer distributed with typeshed. See http://mypy-lang.blogspot.com/2021/06/mypy-0900-released.html for details. We therefore pull in stub packages in setup.py Additionally, some modules that we were previously ignoring import failures for now have stubs. So let's use them. The rest of this change consists of fixups to make the newer mypy + stubs pass CI. Co-authored-by: Patrick Cloke --- changelog.d/11006.misc | 1 + mypy.ini | 69 +++++++++---------- setup.py | 11 ++- synapse/config/tls.py | 9 ++- synapse/http/client.py | 2 +- synapse/logging/context.py | 16 ++--- synapse/metrics/background_process_metrics.py | 2 +- synapse/push/mailer.py | 2 +- synapse/rest/media/v1/__init__.py | 38 ++++------ synapse/rest/media/v1/thumbnailer.py | 21 ++++-- synapse/storage/prepare_database.py | 4 ++ synapse/util/__init__.py | 5 +- 12 files changed, 100 insertions(+), 80 deletions(-) create mode 100644 changelog.d/11006.misc diff --git a/changelog.d/11006.misc b/changelog.d/11006.misc new file mode 100644 index 000000000000..7b4abae76a63 --- /dev/null +++ b/changelog.d/11006.misc @@ -0,0 +1 @@ +Bump mypy version for CI to 0.910, and pull in new type stubs for dependencies. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index 68437e5ce11b..e7cb80b6eb19 100644 --- a/mypy.ini +++ b/mypy.ini @@ -198,98 +198,97 @@ disallow_untyped_defs = True [mypy-tests.storage.test_user_directory] disallow_untyped_defs = True -[mypy-pymacaroons.*] -ignore_missing_imports = True +;; Dependencies without annotations +;; Before ignoring a module, check to see if type stubs are available. +;; The `typeshed` project maintains stubs here: +;; https://github.com/python/typeshed/tree/master/stubs +;; and for each package `foo` there's a corresponding `types-foo` package on PyPI, +;; which we can pull in as a dev dependency by adding to `setup.py`'s +;; `CONDITIONAL_REQUIREMENTS["mypy"]` list. -[mypy-zope] +[mypy-authlib.*] ignore_missing_imports = True [mypy-bcrypt] ignore_missing_imports = True -[mypy-constantly] -ignore_missing_imports = True - -[mypy-twisted.*] +[mypy-canonicaljson] ignore_missing_imports = True -[mypy-treq.*] +[mypy-constantly] ignore_missing_imports = True -[mypy-hyperlink] +[mypy-daemonize] ignore_missing_imports = True [mypy-h11] ignore_missing_imports = True -[mypy-msgpack] -ignore_missing_imports = True - -[mypy-opentracing] +[mypy-hiredis] ignore_missing_imports = True -[mypy-OpenSSL.*] +[mypy-hyperlink] ignore_missing_imports = True -[mypy-netaddr] +[mypy-ijson.*] ignore_missing_imports = True -[mypy-saml2.*] +[mypy-jaeger_client.*] ignore_missing_imports = True -[mypy-canonicaljson] +[mypy-josepy.*] ignore_missing_imports = True -[mypy-jaeger_client.*] +[mypy-jwt.*] ignore_missing_imports = True -[mypy-jsonschema] +[mypy-lxml] ignore_missing_imports = True -[mypy-signedjson.*] +[mypy-msgpack] ignore_missing_imports = True -[mypy-prometheus_client.*] +[mypy-nacl.*] ignore_missing_imports = True -[mypy-service_identity.*] +[mypy-netaddr] ignore_missing_imports = True -[mypy-daemonize] +[mypy-opentracing] ignore_missing_imports = True -[mypy-sentry_sdk] +[mypy-phonenumbers.*] ignore_missing_imports = True -[mypy-PIL.*] +[mypy-prometheus_client.*] ignore_missing_imports = True -[mypy-lxml] +[mypy-pymacaroons.*] ignore_missing_imports = True -[mypy-jwt.*] +[mypy-pympler.*] ignore_missing_imports = True -[mypy-authlib.*] +[mypy-rust_python_jaeger_reporter.*] ignore_missing_imports = True -[mypy-rust_python_jaeger_reporter.*] +[mypy-saml2.*] ignore_missing_imports = True -[mypy-nacl.*] +[mypy-sentry_sdk] ignore_missing_imports = True -[mypy-hiredis] +[mypy-service_identity.*] ignore_missing_imports = True -[mypy-josepy.*] +[mypy-signedjson.*] ignore_missing_imports = True -[mypy-pympler.*] +[mypy-treq.*] ignore_missing_imports = True -[mypy-phonenumbers.*] +[mypy-twisted.*] ignore_missing_imports = True -[mypy-ijson.*] +[mypy-zope] ignore_missing_imports = True diff --git a/setup.py b/setup.py index c47856351081..f8b4487bc19b 100755 --- a/setup.py +++ b/setup.py @@ -112,7 +112,16 @@ def exec_file(path_segments): "pygithub==1.55", ] -CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.812", "mypy-zope==0.2.13"] +CONDITIONAL_REQUIREMENTS["mypy"] = [ + "mypy==0.910", + "mypy-zope==0.3.2", + "types-bleach>=4.1.0", + "types-jsonschema>=3.2.0", + "types-Pillow>=8.3.4", + "types-pyOpenSSL>=20.0.7", + "types-PyYAML>=5.4.10", + "types-setuptools>=57.4.0", +] # Dependencies which are exclusively required by unit test code. This is # NOT a list of all modules that are necessary to run the unit tests. diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 5679f05e4270..6227434bac68 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -172,9 +172,12 @@ def is_disk_cert_valid(self, allow_self_signed=True): ) # YYYYMMDDhhmmssZ -- in UTC - expires_on = datetime.strptime( - tls_certificate.get_notAfter().decode("ascii"), "%Y%m%d%H%M%SZ" - ) + expiry_data = tls_certificate.get_notAfter() + if expiry_data is None: + raise ValueError( + "TLS Certificate has no expiry date, and this is not permitted" + ) + expires_on = datetime.strptime(expiry_data.decode("ascii"), "%Y%m%d%H%M%SZ") now = datetime.utcnow() days_remaining = (expires_on - now).days return days_remaining diff --git a/synapse/http/client.py b/synapse/http/client.py index 5204c3d08ccf..b5a2d333a6ce 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -912,7 +912,7 @@ class InsecureInterceptableContextFactory(ssl.ContextFactory): def __init__(self): self._context = SSL.Context(SSL.SSLv23_METHOD) - self._context.set_verify(VERIFY_NONE, lambda *_: None) + self._context.set_verify(VERIFY_NONE, lambda *_: False) def getContext(self, hostname=None, port=None): return self._context diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 02e5ddd2ef2a..bdc018774381 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -52,7 +52,7 @@ is_thread_resource_usage_supported = True - def get_thread_resource_usage() -> "Optional[resource._RUsage]": + def get_thread_resource_usage() -> "Optional[resource.struct_rusage]": return resource.getrusage(RUSAGE_THREAD) @@ -61,7 +61,7 @@ def get_thread_resource_usage() -> "Optional[resource._RUsage]": # won't track resource usage. is_thread_resource_usage_supported = False - def get_thread_resource_usage() -> "Optional[resource._RUsage]": + def get_thread_resource_usage() -> "Optional[resource.struct_rusage]": return None @@ -226,10 +226,10 @@ def __str__(self): def copy_to(self, record): pass - def start(self, rusage: "Optional[resource._RUsage]"): + def start(self, rusage: "Optional[resource.struct_rusage]"): pass - def stop(self, rusage: "Optional[resource._RUsage]"): + def stop(self, rusage: "Optional[resource.struct_rusage]"): pass def add_database_transaction(self, duration_sec): @@ -289,7 +289,7 @@ def __init__( # The thread resource usage when the logcontext became active. None # if the context is not currently active. - self.usage_start: Optional[resource._RUsage] = None + self.usage_start: Optional[resource.struct_rusage] = None self.main_thread = get_thread_id() self.request = None @@ -410,7 +410,7 @@ def copy_to(self, record) -> None: # we also track the current scope: record.scope = self.scope - def start(self, rusage: "Optional[resource._RUsage]") -> None: + def start(self, rusage: "Optional[resource.struct_rusage]") -> None: """ Record that this logcontext is currently running. @@ -435,7 +435,7 @@ def start(self, rusage: "Optional[resource._RUsage]") -> None: else: self.usage_start = rusage - def stop(self, rusage: "Optional[resource._RUsage]") -> None: + def stop(self, rusage: "Optional[resource.struct_rusage]") -> None: """ Record that this logcontext is no longer running. @@ -490,7 +490,7 @@ def get_resource_usage(self) -> ContextResourceUsage: return res - def _get_cputime(self, current: "resource._RUsage") -> Tuple[float, float]: + def _get_cputime(self, current: "resource.struct_rusage") -> Tuple[float, float]: """Get the cpu usage time between start() and the given rusage Args: diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 3a14260752ed..2ab599a33479 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -265,7 +265,7 @@ def __init__(self, name: str, instance_id: Optional[Union[int, str]] = None): super().__init__("%s-%s" % (name, instance_id)) self._proc = _BackgroundProcess(name, self) - def start(self, rusage: "Optional[resource._RUsage]"): + def start(self, rusage: "Optional[resource.struct_rusage]"): """Log context has started running (again).""" super().start(rusage) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index e38e3c5d44e6..ce299ba3da16 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -892,7 +892,7 @@ def safe_text(raw_text: str) -> jinja2.Markup: A Markup object ready to safely use in a Jinja template. """ return jinja2.Markup( - bleach.linkify(bleach.clean(raw_text, tags=[], attributes={}, strip=False)) + bleach.linkify(bleach.clean(raw_text, tags=[], attributes=[], strip=False)) ) diff --git a/synapse/rest/media/v1/__init__.py b/synapse/rest/media/v1/__init__.py index 3dd16d4bb542..d5b74cddf125 100644 --- a/synapse/rest/media/v1/__init__.py +++ b/synapse/rest/media/v1/__init__.py @@ -12,33 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. -import PIL.Image +from PIL.features import check_codec # check for JPEG support. -try: - PIL.Image._getdecoder("rgb", "jpeg", None) -except OSError as e: - if str(e).startswith("decoder jpeg not available"): - raise Exception( - "FATAL: jpeg codec not supported. Install pillow correctly! " - " 'sudo apt-get install libjpeg-dev' then 'pip uninstall pillow &&" - " pip install pillow --user'" - ) -except Exception: - # any other exception is fine - pass +if not check_codec("jpg"): + raise Exception( + "FATAL: jpeg codec not supported. Install pillow correctly! " + " 'sudo apt-get install libjpeg-dev' then 'pip uninstall pillow &&" + " pip install pillow --user'" + ) # check for PNG support. -try: - PIL.Image._getdecoder("rgb", "zip", None) -except OSError as e: - if str(e).startswith("decoder zip not available"): - raise Exception( - "FATAL: zip codec not supported. Install pillow correctly! " - " 'sudo apt-get install libjpeg-dev' then 'pip uninstall pillow &&" - " pip install pillow --user'" - ) -except Exception: - # any other exception is fine - pass +if not check_codec("zlib"): + raise Exception( + "FATAL: zip codec not supported. Install pillow correctly! " + " 'sudo apt-get install libjpeg-dev' then 'pip uninstall pillow &&" + " pip install pillow --user'" + ) diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py index df54a4064990..46701a8b8364 100644 --- a/synapse/rest/media/v1/thumbnailer.py +++ b/synapse/rest/media/v1/thumbnailer.py @@ -61,9 +61,19 @@ def __init__(self, input_path: str): self.transpose_method = None try: # We don't use ImageOps.exif_transpose since it crashes with big EXIF - image_exif = self.image._getexif() + # + # Ignore safety: Pillow seems to acknowledge that this method is + # "private, experimental, but generally widely used". Pillow 6 + # includes a public getexif() method (no underscore) that we might + # consider using instead when we can bump that dependency. + # + # At the time of writing, Debian buster (currently oldstable) + # provides version 5.4.1. It's expected to EOL in mid-2022, see + # https://wiki.debian.org/DebianReleases#Production_Releases + image_exif = self.image._getexif() # type: ignore if image_exif is not None: image_orientation = image_exif.get(EXIF_ORIENTATION_TAG) + assert isinstance(image_orientation, int) self.transpose_method = EXIF_TRANSPOSE_MAPPINGS.get(image_orientation) except Exception as e: # A lot of parsing errors can happen when parsing EXIF @@ -76,7 +86,10 @@ def transpose(self) -> Tuple[int, int]: A tuple containing the new image size in pixels as (width, height). """ if self.transpose_method is not None: - self.image = self.image.transpose(self.transpose_method) + # Safety: `transpose` takes an int rather than e.g. an IntEnum. + # self.transpose_method is set above to be a value in + # EXIF_TRANSPOSE_MAPPINGS, and that only contains correct values. + self.image = self.image.transpose(self.transpose_method) # type: ignore[arg-type] self.width, self.height = self.image.size self.transpose_method = None # We don't need EXIF any more @@ -101,7 +114,7 @@ def aspect(self, max_width: int, max_height: int) -> Tuple[int, int]: else: return (max_height * self.width) // self.height, max_height - def _resize(self, width: int, height: int) -> Image: + def _resize(self, width: int, height: int) -> Image.Image: # 1-bit or 8-bit color palette images need converting to RGB # otherwise they will be scaled using nearest neighbour which # looks awful. @@ -151,7 +164,7 @@ def crop(self, width: int, height: int, output_type: str) -> BytesIO: cropped = scaled_image.crop((crop_left, 0, crop_right, height)) return self._encode_image(cropped, output_type) - def _encode_image(self, output_image: Image, output_type: str) -> BytesIO: + def _encode_image(self, output_image: Image.Image, output_type: str) -> BytesIO: output_bytes_io = BytesIO() fmt = self.FORMATS[output_type] if fmt == "JPEG": diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index a63eaddfdc1c..11ca47ea2825 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -487,6 +487,10 @@ def _upgrade_existing_database( spec = importlib.util.spec_from_file_location( module_name, absolute_path ) + if spec is None: + raise RuntimeError( + f"Could not build a module spec for {module_name} at {absolute_path}" + ) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) # type: ignore diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 64daff59df0d..abf53d149dba 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -51,7 +51,10 @@ def _handle_frozendict(obj: Any) -> Dict[Any, Any]: # fishing the protected dict out of the object is a bit nasty, # but we don't really want the overhead of copying the dict. try: - return obj._dict + # Safety: we catch the AttributeError immediately below. + # See https://github.com/matrix-org/python-canonicaljson/issues/36#issuecomment-927816293 + # for discussion on how frozendict's internals have changed over time. + return obj._dict # type: ignore[attr-defined] except AttributeError: # When the C implementation of frozendict is used, # there isn't a `_dict` attribute with a dict From 51a5da74ccd383806378b53ee8a09e27a8829f31 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 8 Oct 2021 15:25:16 +0100 Subject: [PATCH 081/111] Annotate synapse.storage.util (#10892) Also mark `synapse.streams` as having has no untyped defs Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> --- changelog.d/10892.misc | 1 + mypy.ini | 6 + .../slave/storage/_slaved_id_tracker.py | 4 +- synapse/replication/slave/storage/pushers.py | 10 +- synapse/storage/databases/main/pusher.py | 10 +- .../storage/databases/main/registration.py | 9 +- synapse/storage/util/id_generators.py | 143 +++++++++++------- synapse/storage/util/sequence.py | 6 +- 8 files changed, 124 insertions(+), 65 deletions(-) create mode 100644 changelog.d/10892.misc diff --git a/changelog.d/10892.misc b/changelog.d/10892.misc new file mode 100644 index 000000000000..c8c471159b19 --- /dev/null +++ b/changelog.d/10892.misc @@ -0,0 +1 @@ +Add further type hints to `synapse.storage.util`. diff --git a/mypy.ini b/mypy.ini index e7cb80b6eb19..bc2b59ff5622 100644 --- a/mypy.ini +++ b/mypy.ini @@ -105,6 +105,12 @@ disallow_untyped_defs = True [mypy-synapse.state.*] disallow_untyped_defs = True +[mypy-synapse.storage.util.*] +disallow_untyped_defs = True + +[mypy-synapse.streams.*] +disallow_untyped_defs = True + [mypy-synapse.util.batching_queue] disallow_untyped_defs = True diff --git a/synapse/replication/slave/storage/_slaved_id_tracker.py b/synapse/replication/slave/storage/_slaved_id_tracker.py index 2cb7489047f7..8c1bf9227ac6 100644 --- a/synapse/replication/slave/storage/_slaved_id_tracker.py +++ b/synapse/replication/slave/storage/_slaved_id_tracker.py @@ -13,14 +13,14 @@ # limitations under the License. from typing import List, Optional, Tuple -from synapse.storage.types import Connection +from synapse.storage.database import LoggingDatabaseConnection from synapse.storage.util.id_generators import _load_current_id class SlavedIdTracker: def __init__( self, - db_conn: Connection, + db_conn: LoggingDatabaseConnection, table: str, column: str, extra_tables: Optional[List[Tuple[str, str]]] = None, diff --git a/synapse/replication/slave/storage/pushers.py b/synapse/replication/slave/storage/pushers.py index 2672a2c94b15..cea90c0f1bf4 100644 --- a/synapse/replication/slave/storage/pushers.py +++ b/synapse/replication/slave/storage/pushers.py @@ -15,9 +15,8 @@ from typing import TYPE_CHECKING from synapse.replication.tcp.streams import PushersStream -from synapse.storage.database import DatabasePool +from synapse.storage.database import DatabasePool, LoggingDatabaseConnection from synapse.storage.databases.main.pusher import PusherWorkerStore -from synapse.storage.types import Connection from ._base import BaseSlavedStore from ._slaved_id_tracker import SlavedIdTracker @@ -27,7 +26,12 @@ class SlavedPusherStore(PusherWorkerStore, BaseSlavedStore): - def __init__(self, database: DatabasePool, db_conn: Connection, hs: "HomeServer"): + def __init__( + self, + database: DatabasePool, + db_conn: LoggingDatabaseConnection, + hs: "HomeServer", + ): super().__init__(database, db_conn, hs) self._pushers_id_gen = SlavedIdTracker( # type: ignore db_conn, "pushers", "id", extra_tables=[("deleted_pushers", "stream_id")] diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index a93caae8d02c..b73ce53c9156 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -18,8 +18,7 @@ from synapse.push import PusherConfig, ThrottleParams from synapse.storage._base import SQLBaseStore, db_to_json -from synapse.storage.database import DatabasePool -from synapse.storage.types import Connection +from synapse.storage.database import DatabasePool, LoggingDatabaseConnection from synapse.storage.util.id_generators import StreamIdGenerator from synapse.types import JsonDict from synapse.util import json_encoder @@ -32,7 +31,12 @@ class PusherWorkerStore(SQLBaseStore): - def __init__(self, database: DatabasePool, db_conn: Connection, hs: "HomeServer"): + def __init__( + self, + database: DatabasePool, + db_conn: LoggingDatabaseConnection, + hs: "HomeServer", + ): super().__init__(database, db_conn, hs) self._pushers_id_gen = StreamIdGenerator( db_conn, "pushers", "id", extra_tables=[("deleted_pushers", "stream_id")] diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 7de4ad7f9b3c..181841ee0659 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -26,7 +26,7 @@ from synapse.storage.database import DatabasePool, LoggingDatabaseConnection from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore from synapse.storage.databases.main.stats import StatsStore -from synapse.storage.types import Connection, Cursor +from synapse.storage.types import Cursor from synapse.storage.util.id_generators import IdGenerator from synapse.storage.util.sequence import build_sequence_generator from synapse.types import UserID, UserInfo @@ -1775,7 +1775,12 @@ async def is_guest(self, user_id: str) -> bool: class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore): - def __init__(self, database: DatabasePool, db_conn: Connection, hs: "HomeServer"): + def __init__( + self, + database: DatabasePool, + db_conn: LoggingDatabaseConnection, + hs: "HomeServer", + ): super().__init__(database, db_conn, hs) self._ignore_unknown_session_error = ( diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 6f7cbe40f498..852bd79fee85 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -16,42 +16,62 @@ import threading from collections import OrderedDict from contextlib import contextmanager -from typing import Dict, Iterable, List, Optional, Set, Tuple, Union +from types import TracebackType +from typing import ( + AsyncContextManager, + ContextManager, + Dict, + Generator, + Generic, + Iterable, + List, + Optional, + Sequence, + Set, + Tuple, + Type, + TypeVar, + Union, + cast, +) import attr from sortedcontainers import SortedSet from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.storage.database import DatabasePool, LoggingTransaction +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + LoggingTransaction, +) from synapse.storage.types import Cursor from synapse.storage.util.sequence import PostgresSequenceGenerator logger = logging.getLogger(__name__) +T = TypeVar("T") + + class IdGenerator: - def __init__(self, db_conn, table, column): + def __init__( + self, + db_conn: LoggingDatabaseConnection, + table: str, + column: str, + ): self._lock = threading.Lock() self._next_id = _load_current_id(db_conn, table, column) - def get_next(self): + def get_next(self) -> int: with self._lock: self._next_id += 1 return self._next_id -def _load_current_id(db_conn, table, column, step=1): - """ - - Args: - db_conn (object): - table (str): - column (str): - step (int): - - Returns: - int - """ +def _load_current_id( + db_conn: LoggingDatabaseConnection, table: str, column: str, step: int = 1 +) -> int: # debug logging for https://github.com/matrix-org/synapse/issues/7968 logger.info("initialising stream generator for %s(%s)", table, column) cur = db_conn.cursor(txn_name="_load_current_id") @@ -59,7 +79,9 @@ def _load_current_id(db_conn, table, column, step=1): cur.execute("SELECT MAX(%s) FROM %s" % (column, table)) else: cur.execute("SELECT MIN(%s) FROM %s" % (column, table)) - (val,) = cur.fetchone() + result = cur.fetchone() + assert result is not None + (val,) = result cur.close() current_id = int(val) if val else step return (max if step > 0 else min)(current_id, step) @@ -93,16 +115,16 @@ class StreamIdGenerator: def __init__( self, - db_conn, - table, - column, + db_conn: LoggingDatabaseConnection, + table: str, + column: str, extra_tables: Iterable[Tuple[str, str]] = (), - step=1, - ): + step: int = 1, + ) -> None: assert step != 0 self._lock = threading.Lock() - self._step = step - self._current = _load_current_id(db_conn, table, column, step) + self._step: int = step + self._current: int = _load_current_id(db_conn, table, column, step) for table, column in extra_tables: self._current = (max if step > 0 else min)( self._current, _load_current_id(db_conn, table, column, step) @@ -115,7 +137,7 @@ def __init__( # The key and values are the same, but we never look at the values. self._unfinished_ids: OrderedDict[int, int] = OrderedDict() - def get_next(self): + def get_next(self) -> AsyncContextManager[int]: """ Usage: async with stream_id_gen.get_next() as stream_id: @@ -128,7 +150,7 @@ def get_next(self): self._unfinished_ids[next_id] = next_id @contextmanager - def manager(): + def manager() -> Generator[int, None, None]: try: yield next_id finally: @@ -137,7 +159,7 @@ def manager(): return _AsyncCtxManagerWrapper(manager()) - def get_next_mult(self, n): + def get_next_mult(self, n: int) -> AsyncContextManager[Sequence[int]]: """ Usage: async with stream_id_gen.get_next(n) as stream_ids: @@ -155,7 +177,7 @@ def get_next_mult(self, n): self._unfinished_ids[next_id] = next_id @contextmanager - def manager(): + def manager() -> Generator[Sequence[int], None, None]: try: yield next_ids finally: @@ -215,7 +237,7 @@ class MultiWriterIdGenerator: def __init__( self, - db_conn, + db_conn: LoggingDatabaseConnection, db: DatabasePool, stream_name: str, instance_name: str, @@ -223,7 +245,7 @@ def __init__( sequence_name: str, writers: List[str], positive: bool = True, - ): + ) -> None: self._db = db self._stream_name = stream_name self._instance_name = instance_name @@ -285,9 +307,9 @@ def __init__( def _load_current_ids( self, - db_conn, + db_conn: LoggingDatabaseConnection, tables: List[Tuple[str, str, str]], - ): + ) -> None: cur = db_conn.cursor(txn_name="_load_current_ids") # Load the current positions of all writers for the stream. @@ -335,7 +357,9 @@ def _load_current_ids( "agg": "MAX" if self._positive else "-MIN", } cur.execute(sql) - (stream_id,) = cur.fetchone() + result = cur.fetchone() + assert result is not None + (stream_id,) = result max_stream_id = max(max_stream_id, stream_id) @@ -354,7 +378,7 @@ def _load_current_ids( self._persisted_upto_position = min_stream_id - rows = [] + rows: List[Tuple[str, int]] = [] for table, instance_column, id_column in tables: sql = """ SELECT %(instance)s, %(id)s FROM %(table)s @@ -367,7 +391,8 @@ def _load_current_ids( } cur.execute(sql, (min_stream_id * self._return_factor,)) - rows.extend(cur) + # Cast safety: this corresponds to the types returned by the query above. + rows.extend(cast(Iterable[Tuple[str, int]], cur)) # Sort so that we handle rows in order for each instance. rows.sort() @@ -385,13 +410,13 @@ def _load_current_ids( cur.close() - def _load_next_id_txn(self, txn) -> int: + def _load_next_id_txn(self, txn: Cursor) -> int: return self._sequence_gen.get_next_id_txn(txn) - def _load_next_mult_id_txn(self, txn, n: int) -> List[int]: + def _load_next_mult_id_txn(self, txn: Cursor, n: int) -> List[int]: return self._sequence_gen.get_next_mult_txn(txn, n) - def get_next(self): + def get_next(self) -> AsyncContextManager[int]: """ Usage: async with stream_id_gen.get_next() as stream_id: @@ -403,9 +428,12 @@ def get_next(self): if self._writers and self._instance_name not in self._writers: raise Exception("Tried to allocate stream ID on non-writer") - return _MultiWriterCtxManager(self) + # Cast safety: the second argument to _MultiWriterCtxManager, multiple_ids, + # controls the return type. If `None` or omitted, the context manager yields + # a single integer stream_id; otherwise it yields a list of stream_ids. + return cast(AsyncContextManager[int], _MultiWriterCtxManager(self)) - def get_next_mult(self, n: int): + def get_next_mult(self, n: int) -> AsyncContextManager[List[int]]: """ Usage: async with stream_id_gen.get_next_mult(5) as stream_ids: @@ -417,9 +445,10 @@ def get_next_mult(self, n: int): if self._writers and self._instance_name not in self._writers: raise Exception("Tried to allocate stream ID on non-writer") - return _MultiWriterCtxManager(self, n) + # Cast safety: see get_next. + return cast(AsyncContextManager[List[int]], _MultiWriterCtxManager(self, n)) - def get_next_txn(self, txn: LoggingTransaction): + def get_next_txn(self, txn: LoggingTransaction) -> int: """ Usage: @@ -457,7 +486,7 @@ def get_next_txn(self, txn: LoggingTransaction): return self._return_factor * next_id - def _mark_id_as_finished(self, next_id: int): + def _mark_id_as_finished(self, next_id: int) -> None: """The ID has finished being processed so we should advance the current position if possible. """ @@ -534,7 +563,7 @@ def get_positions(self) -> Dict[str, int]: for name, i in self._current_positions.items() } - def advance(self, instance_name: str, new_id: int): + def advance(self, instance_name: str, new_id: int) -> None: """Advance the position of the named writer to the given ID, if greater than existing entry. """ @@ -560,7 +589,7 @@ def get_persisted_upto_position(self) -> int: with self._lock: return self._return_factor * self._persisted_upto_position - def _add_persisted_position(self, new_id: int): + def _add_persisted_position(self, new_id: int) -> None: """Record that we have persisted a position. This is used to keep the `_current_positions` up to date. @@ -606,7 +635,7 @@ def _add_persisted_position(self, new_id: int): # do. break - def _update_stream_positions_table_txn(self, txn: Cursor): + def _update_stream_positions_table_txn(self, txn: Cursor) -> None: """Update the `stream_positions` table with newly persisted position.""" if not self._writers: @@ -628,20 +657,25 @@ def _update_stream_positions_table_txn(self, txn: Cursor): txn.execute(sql, (self._stream_name, self._instance_name, pos)) -@attr.s(slots=True) -class _AsyncCtxManagerWrapper: +@attr.s(frozen=True, auto_attribs=True) +class _AsyncCtxManagerWrapper(Generic[T]): """Helper class to convert a plain context manager to an async one. This is mainly useful if you have a plain context manager but the interface requires an async one. """ - inner = attr.ib() + inner: ContextManager[T] - async def __aenter__(self): + async def __aenter__(self) -> T: return self.inner.__enter__() - async def __aexit__(self, exc_type, exc, tb): + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc: Optional[BaseException], + tb: Optional[TracebackType], + ) -> Optional[bool]: return self.inner.__exit__(exc_type, exc, tb) @@ -671,7 +705,12 @@ async def __aenter__(self) -> Union[int, List[int]]: else: return [i * self.id_gen._return_factor for i in self.stream_ids] - async def __aexit__(self, exc_type, exc, tb): + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc: Optional[BaseException], + tb: Optional[TracebackType], + ) -> bool: for i in self.stream_ids: self.id_gen._mark_id_as_finished(i) diff --git a/synapse/storage/util/sequence.py b/synapse/storage/util/sequence.py index bb33e04fb10a..75268cbe1595 100644 --- a/synapse/storage/util/sequence.py +++ b/synapse/storage/util/sequence.py @@ -81,7 +81,7 @@ def check_consistency( id_column: str, stream_name: Optional[str] = None, positive: bool = True, - ): + ) -> None: """Should be called during start up to test that the current value of the sequence is greater than or equal to the maximum ID in the table. @@ -122,7 +122,7 @@ def check_consistency( id_column: str, stream_name: Optional[str] = None, positive: bool = True, - ): + ) -> None: """See SequenceGenerator.check_consistency for docstring.""" txn = db_conn.cursor(txn_name="sequence.check_consistency") @@ -244,7 +244,7 @@ def check_consistency( id_column: str, stream_name: Optional[str] = None, positive: bool = True, - ): + ) -> None: # There is nothing to do for in memory sequences pass From c576598a6834c59e7e6e51eb72c2967b00762666 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Fri, 8 Oct 2021 17:11:14 +0100 Subject: [PATCH 082/111] Include the requirements for [mypy,lint] in [dev] --- setup.py | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/setup.py b/setup.py index f8b4487bc19b..220084a49d46 100755 --- a/setup.py +++ b/setup.py @@ -103,15 +103,6 @@ def exec_file(path_segments): "flake8", ] -CONDITIONAL_REQUIREMENTS["dev"] = CONDITIONAL_REQUIREMENTS["lint"] + [ - # The following are used by the release script - "click==7.1.2", - "redbaron==0.9.2", - "GitPython==3.1.14", - "commonmark==0.9.1", - "pygithub==1.55", -] - CONDITIONAL_REQUIREMENTS["mypy"] = [ "mypy==0.910", "mypy-zope==0.3.2", @@ -130,6 +121,20 @@ def exec_file(path_segments): # parameterized_class decorator was introduced in parameterized 0.7.0 CONDITIONAL_REQUIREMENTS["test"] = ["parameterized>=0.7.0"] +CONDITIONAL_REQUIREMENTS["dev"] = ( + CONDITIONAL_REQUIREMENTS["lint"] + + CONDITIONAL_REQUIREMENTS["mypy"] + + CONDITIONAL_REQUIREMENTS["test"] + + [ + # The following are used by the release script + "click==7.1.2", + "redbaron==0.9.2", + "GitPython==3.1.14", + "commonmark==0.9.1", + "pygithub==1.55", + ] +) + setup( name="matrix-synapse", version=version, From 9f23ff78da69c84b9ab6f1dacd4a3fd31d17a812 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Fri, 8 Oct 2021 17:11:32 +0100 Subject: [PATCH 083/111] Update contributing guide to use [all,dev] --- docs/development/contributing_guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 580a4f7f9854..3bf08a72bb21 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -50,7 +50,7 @@ setup a *virtualenv*, as follows: cd path/where/you/have/cloned/the/repository python3 -m venv ./env source ./env/bin/activate -pip install -e ".[all,lint,mypy,test]" +pip install -e ".[all,dev]" pip install tox ``` From d51a3400196763c2de38918719d50ab75f3d1bc5 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Fri, 8 Oct 2021 17:12:40 +0100 Subject: [PATCH 084/111] Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/11034.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/11034.misc diff --git a/changelog.d/11034.misc b/changelog.d/11034.misc new file mode 100644 index 000000000000..b15fd66ac360 --- /dev/null +++ b/changelog.d/11034.misc @@ -0,0 +1 @@ +When installing the optional developer dependencies, also include the dependencies needed for type-checking and unit testing. From 593eeac19ea8ecc1344933f91fb4fc18a8a97221 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Fri, 8 Oct 2021 17:15:32 +0100 Subject: [PATCH 085/111] Revert accidental push to develop. --- changelog.d/11034.misc | 1 - docs/development/contributing_guide.md | 2 +- setup.py | 23 +++++++++-------------- 3 files changed, 10 insertions(+), 16 deletions(-) delete mode 100644 changelog.d/11034.misc diff --git a/changelog.d/11034.misc b/changelog.d/11034.misc deleted file mode 100644 index b15fd66ac360..000000000000 --- a/changelog.d/11034.misc +++ /dev/null @@ -1 +0,0 @@ -When installing the optional developer dependencies, also include the dependencies needed for type-checking and unit testing. diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 3bf08a72bb21..580a4f7f9854 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -50,7 +50,7 @@ setup a *virtualenv*, as follows: cd path/where/you/have/cloned/the/repository python3 -m venv ./env source ./env/bin/activate -pip install -e ".[all,dev]" +pip install -e ".[all,lint,mypy,test]" pip install tox ``` diff --git a/setup.py b/setup.py index 220084a49d46..f8b4487bc19b 100755 --- a/setup.py +++ b/setup.py @@ -103,6 +103,15 @@ def exec_file(path_segments): "flake8", ] +CONDITIONAL_REQUIREMENTS["dev"] = CONDITIONAL_REQUIREMENTS["lint"] + [ + # The following are used by the release script + "click==7.1.2", + "redbaron==0.9.2", + "GitPython==3.1.14", + "commonmark==0.9.1", + "pygithub==1.55", +] + CONDITIONAL_REQUIREMENTS["mypy"] = [ "mypy==0.910", "mypy-zope==0.3.2", @@ -121,20 +130,6 @@ def exec_file(path_segments): # parameterized_class decorator was introduced in parameterized 0.7.0 CONDITIONAL_REQUIREMENTS["test"] = ["parameterized>=0.7.0"] -CONDITIONAL_REQUIREMENTS["dev"] = ( - CONDITIONAL_REQUIREMENTS["lint"] - + CONDITIONAL_REQUIREMENTS["mypy"] - + CONDITIONAL_REQUIREMENTS["test"] - + [ - # The following are used by the release script - "click==7.1.2", - "redbaron==0.9.2", - "GitPython==3.1.14", - "commonmark==0.9.1", - "pygithub==1.55", - ] -) - setup( name="matrix-synapse", version=version, From 1b112840d2c6dafa131eba4f0285409bb7345661 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 8 Oct 2021 14:14:42 -0400 Subject: [PATCH 086/111] Autodiscover oEmbed endpoint from returned HTML (#10822) Searches the returned HTML for an oEmbed endpoint using the autodiscovery mechanism (``), and will request it to generate the preview. --- changelog.d/10822.feature | 1 + synapse/rest/media/v1/oembed.py | 26 ++++ synapse/rest/media/v1/preview_url_resource.py | 112 ++++++++++++------ tests/rest/media/v1/test_url_preview.py | 100 +++++++++++++++- tests/test_preview.py | 40 ++++--- 5 files changed, 224 insertions(+), 55 deletions(-) create mode 100644 changelog.d/10822.feature diff --git a/changelog.d/10822.feature b/changelog.d/10822.feature new file mode 100644 index 000000000000..72566e31ec9e --- /dev/null +++ b/changelog.d/10822.feature @@ -0,0 +1 @@ +Support autodiscovery of oEmbed previews. diff --git a/synapse/rest/media/v1/oembed.py b/synapse/rest/media/v1/oembed.py index e04671fb95cf..6d7e1f9064af 100644 --- a/synapse/rest/media/v1/oembed.py +++ b/synapse/rest/media/v1/oembed.py @@ -96,6 +96,32 @@ def get_oembed_url(self, url: str) -> Optional[str]: # No match. return None + def autodiscover_from_html(self, tree: "etree.Element") -> Optional[str]: + """ + Search an HTML document for oEmbed autodiscovery information. + + Args: + tree: The parsed HTML body. + + Returns: + The URL to use for oEmbed information, or None if no URL was found. + """ + # Search for link elements with the proper rel and type attributes. + for tag in tree.xpath( + "//link[@rel='alternate'][@type='application/json+oembed']" + ): + if "href" in tag.attrib: + return tag.attrib["href"] + + # Some providers (e.g. Flickr) use alternative instead of alternate. + for tag in tree.xpath( + "//link[@rel='alternative'][@type='application/json+oembed']" + ): + if "href" in tag.attrib: + return tag.attrib["href"] + + return None + def parse_oembed_response(self, url: str, raw_body: bytes) -> OEmbedResult: """ Parse the oEmbed response into an Open Graph response. diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 044f44a3977e..1fe0fc8aa9e5 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -22,7 +22,7 @@ import shutil import sys import traceback -from typing import TYPE_CHECKING, Dict, Generator, Iterable, Optional, Union +from typing import TYPE_CHECKING, Dict, Generator, Iterable, Optional, Tuple, Union from urllib import parse as urlparse import attr @@ -296,22 +296,32 @@ async def _do_preview(self, url: str, user: str, ts: int) -> bytes: body = file.read() encoding = get_html_media_encoding(body, media_info.media_type) - og = decode_and_calc_og(body, media_info.uri, encoding) - - await self._precache_image_url(user, media_info, og) - - elif oembed_url and _is_json(media_info.media_type): - # Handle an oEmbed response. - with open(media_info.filename, "rb") as file: - body = file.read() - - oembed_response = self._oembed.parse_oembed_response(url, body) - og = oembed_response.open_graph_result - - # Use the cache age from the oEmbed result, instead of the HTTP response. - if oembed_response.cache_age is not None: - expiration_ms = oembed_response.cache_age + tree = decode_body(body, encoding) + if tree is not None: + # Check if this HTML document points to oEmbed information and + # defer to that. + oembed_url = self._oembed.autodiscover_from_html(tree) + og = {} + if oembed_url: + oembed_info = await self._download_url(oembed_url, user) + og, expiration_ms = await self._handle_oembed_response( + url, oembed_info, expiration_ms + ) + + # If there was no oEmbed URL (or oEmbed parsing failed), attempt + # to generate the Open Graph information from the HTML. + if not oembed_url or not og: + og = _calc_og(tree, media_info.uri) + + await self._precache_image_url(user, media_info, og) + else: + og = {} + elif oembed_url: + # Handle the oEmbed information. + og, expiration_ms = await self._handle_oembed_response( + url, media_info, expiration_ms + ) await self._precache_image_url(user, media_info, og) else: @@ -479,6 +489,39 @@ async def _precache_image_url( else: del og["og:image"] + async def _handle_oembed_response( + self, url: str, media_info: MediaInfo, expiration_ms: int + ) -> Tuple[JsonDict, int]: + """ + Parse the downloaded oEmbed info. + + Args: + url: The URL which is being previewed (not the one which was + requested). + media_info: The media being previewed. + expiration_ms: The length of time, in milliseconds, the media is valid for. + + Returns: + A tuple of: + The Open Graph dictionary, if the oEmbed info can be parsed. + The (possibly updated) length of time, in milliseconds, the media is valid for. + """ + # If JSON was not returned, there's nothing to do. + if not _is_json(media_info.media_type): + return {}, expiration_ms + + with open(media_info.filename, "rb") as file: + body = file.read() + + oembed_response = self._oembed.parse_oembed_response(url, body) + open_graph_result = oembed_response.open_graph_result + + # Use the cache age from the oEmbed result, if one was given. + if open_graph_result and oembed_response.cache_age is not None: + expiration_ms = oembed_response.cache_age + + return open_graph_result, expiration_ms + def _start_expire_url_cache_data(self) -> Deferred: return run_as_background_process( "expire_url_cache_data", self._expire_url_cache_data @@ -631,26 +674,22 @@ def get_html_media_encoding(body: bytes, content_type: str) -> str: return "utf-8" -def decode_and_calc_og( - body: bytes, media_uri: str, request_encoding: Optional[str] = None -) -> JsonDict: +def decode_body( + body: bytes, request_encoding: Optional[str] = None +) -> Optional["etree.Element"]: """ - Calculate metadata for an HTML document. - - This uses lxml to parse the HTML document into the OG response. If errors - occur during processing of the document, an empty response is returned. + This uses lxml to parse the HTML document. Args: body: The HTML document, as bytes. - media_url: The URI used to download the body. request_encoding: The character encoding of the body, as a string. Returns: - The OG response as a dictionary. + The parsed HTML body, or None if an error occurred during processed. """ # If there's no body, nothing useful is going to be found. if not body: - return {} + return None from lxml import etree @@ -662,25 +701,22 @@ def decode_and_calc_og( parser = etree.HTMLParser(recover=True, encoding="utf-8") except Exception as e: logger.warning("Unable to create HTML parser: %s" % (e,)) - return {} - - def _attempt_calc_og(body_attempt: Union[bytes, str]) -> Dict[str, Optional[str]]: - # Attempt to parse the body. If this fails, log and return no metadata. - tree = etree.fromstring(body_attempt, parser) - - # The data was successfully parsed, but no tree was found. - if tree is None: - return {} + return None - return _calc_og(tree, media_uri) + def _attempt_decode_body( + body_attempt: Union[bytes, str] + ) -> Optional["etree.Element"]: + # Attempt to parse the body. Returns None if the body was successfully + # parsed, but no tree was found. + return etree.fromstring(body_attempt, parser) # Attempt to parse the body. If this fails, log and return no metadata. try: - return _attempt_calc_og(body) + return _attempt_decode_body(body) except UnicodeDecodeError: # blindly try decoding the body as utf-8, which seems to fix # the charset mismatches on https://google.com - return _attempt_calc_og(body.decode("utf-8", "ignore")) + return _attempt_decode_body(body.decode("utf-8", "ignore")) def _calc_og(tree: "etree.Element", media_uri: str) -> Dict[str, Optional[str]]: diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py index ce43de780b51..8698135a769d 100644 --- a/tests/rest/media/v1/test_url_preview.py +++ b/tests/rest/media/v1/test_url_preview.py @@ -725,9 +725,107 @@ def test_oembed_format(self): }, ) + def test_oembed_autodiscovery(self): + """ + Autodiscovery works by finding the link in the HTML response and then requesting an oEmbed URL. + 1. Request a preview of a URL which is not known to the oEmbed code. + 2. It returns HTML including a link to an oEmbed preview. + 3. The oEmbed preview is requested and returns a URL for an image. + 4. The image is requested for thumbnailing. + """ + # This is a little cheesy in that we use the www subdomain (which isn't the + # list of oEmbed patterns) to get "raw" HTML response. + self.lookups["www.twitter.com"] = [(IPv4Address, "10.1.2.3")] + self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.3")] + self.lookups["cdn.twitter.com"] = [(IPv4Address, "10.1.2.3")] + + result = b""" + + """ + + channel = self.make_request( + "GET", + "preview_url?url=http://www.twitter.com/matrixdotorg/status/12345", + shorthand=False, + await_result=False, + ) + self.pump() + + client = self.reactor.tcpClients[0][2].buildProtocol(None) + server = AccumulatingProtocol() + server.makeConnection(FakeTransport(client, self.reactor)) + client.makeConnection(FakeTransport(server, self.reactor)) + client.dataReceived( + ( + b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n" + b'Content-Type: text/html; charset="utf8"\r\n\r\n' + ) + % (len(result),) + + result + ) + + self.pump() + + # The oEmbed response. + result2 = { + "version": "1.0", + "type": "photo", + "url": "http://cdn.twitter.com/matrixdotorg", + } + oembed_content = json.dumps(result2).encode("utf-8") + + # Ensure a second request is made to the oEmbed URL. + client = self.reactor.tcpClients[1][2].buildProtocol(None) + server = AccumulatingProtocol() + server.makeConnection(FakeTransport(client, self.reactor)) + client.makeConnection(FakeTransport(server, self.reactor)) + client.dataReceived( + ( + b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n" + b'Content-Type: application/json; charset="utf8"\r\n\r\n' + ) + % (len(oembed_content),) + + oembed_content + ) + + self.pump() + + # Ensure the URL is what was requested. + self.assertIn(b"/oembed?", server.data) + + # Ensure a third request is made to the photo URL. + client = self.reactor.tcpClients[2][2].buildProtocol(None) + server = AccumulatingProtocol() + server.makeConnection(FakeTransport(client, self.reactor)) + client.makeConnection(FakeTransport(server, self.reactor)) + client.dataReceived( + ( + b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n" + b"Content-Type: image/png\r\n\r\n" + ) + % (len(SMALL_PNG),) + + SMALL_PNG + ) + + self.pump() + + # Ensure the URL is what was requested. + self.assertIn(b"/matrixdotorg", server.data) + + self.assertEqual(channel.code, 200) + body = channel.json_body + self.assertEqual( + body["og:url"], "http://www.twitter.com/matrixdotorg/status/12345" + ) + self.assertTrue(body["og:image"].startswith("mxc://")) + self.assertEqual(body["og:image:height"], 1) + self.assertEqual(body["og:image:width"], 1) + self.assertEqual(body["og:image:type"], "image/png") + def _download_image(self): """Downloads an image into the URL cache. - Returns: A (host, media_id) tuple representing the MXC URI of the image. """ diff --git a/tests/test_preview.py b/tests/test_preview.py index 48e792b55b52..09e017b4d94c 100644 --- a/tests/test_preview.py +++ b/tests/test_preview.py @@ -13,7 +13,8 @@ # limitations under the License. from synapse.rest.media.v1.preview_url_resource import ( - decode_and_calc_og, + _calc_og, + decode_body, get_html_media_encoding, summarize_paragraphs, ) @@ -158,7 +159,8 @@ def test_simple(self): """ - og = decode_and_calc_og(html, "http://example.com/test.html") + tree = decode_body(html) + og = _calc_og(tree, "http://example.com/test.html") self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) @@ -173,7 +175,8 @@ def test_comment(self): """ - og = decode_and_calc_og(html, "http://example.com/test.html") + tree = decode_body(html) + og = _calc_og(tree, "http://example.com/test.html") self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) @@ -191,7 +194,8 @@ def test_comment2(self): """ - og = decode_and_calc_og(html, "http://example.com/test.html") + tree = decode_body(html) + og = _calc_og(tree, "http://example.com/test.html") self.assertEqual( og, @@ -212,7 +216,8 @@ def test_script(self): """ - og = decode_and_calc_og(html, "http://example.com/test.html") + tree = decode_body(html) + og = _calc_og(tree, "http://example.com/test.html") self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) @@ -225,7 +230,8 @@ def test_missing_title(self): """ - og = decode_and_calc_og(html, "http://example.com/test.html") + tree = decode_body(html) + og = _calc_og(tree, "http://example.com/test.html") self.assertEqual(og, {"og:title": None, "og:description": "Some text."}) @@ -239,7 +245,8 @@ def test_h1_as_title(self): """ - og = decode_and_calc_og(html, "http://example.com/test.html") + tree = decode_body(html) + og = _calc_og(tree, "http://example.com/test.html") self.assertEqual(og, {"og:title": "Title", "og:description": "Some text."}) @@ -253,21 +260,22 @@ def test_missing_title_and_broken_h1(self): """ - og = decode_and_calc_og(html, "http://example.com/test.html") + tree = decode_body(html) + og = _calc_og(tree, "http://example.com/test.html") self.assertEqual(og, {"og:title": None, "og:description": "Some text."}) def test_empty(self): """Test a body with no data in it.""" html = b"" - og = decode_and_calc_og(html, "http://example.com/test.html") - self.assertEqual(og, {}) + tree = decode_body(html) + self.assertIsNone(tree) def test_no_tree(self): """A valid body with no tree in it.""" html = b"\x00" - og = decode_and_calc_og(html, "http://example.com/test.html") - self.assertEqual(og, {}) + tree = decode_body(html) + self.assertIsNone(tree) def test_invalid_encoding(self): """An invalid character encoding should be ignored and treated as UTF-8, if possible.""" @@ -279,9 +287,8 @@ def test_invalid_encoding(self): """ - og = decode_and_calc_og( - html, "http://example.com/test.html", "invalid-encoding" - ) + tree = decode_body(html, "invalid-encoding") + og = _calc_og(tree, "http://example.com/test.html") self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) def test_invalid_encoding2(self): @@ -295,7 +302,8 @@ def test_invalid_encoding2(self): """ - og = decode_and_calc_og(html, "http://example.com/test.html") + tree = decode_body(html) + og = _calc_og(tree, "http://example.com/test.html") self.assertEqual(og, {"og:title": "ÿÿ Foo", "og:description": "Some text."}) From a7d22c36dbbbdd396aeb8938b57b5fd7edb689f3 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 8 Oct 2021 18:35:00 -0500 Subject: [PATCH 087/111] Refactor MSC2716 `/batch_send` endpoint into separate handler functions (#10974) --- changelog.d/10974.misc | 1 + synapse/handlers/room_batch.py | 423 ++++++++++++++++++++++++++++++ synapse/rest/client/room_batch.py | 339 ++++-------------------- synapse/server.py | 5 + 4 files changed, 485 insertions(+), 283 deletions(-) create mode 100644 changelog.d/10974.misc create mode 100644 synapse/handlers/room_batch.py diff --git a/changelog.d/10974.misc b/changelog.d/10974.misc new file mode 100644 index 000000000000..8695b378aabb --- /dev/null +++ b/changelog.d/10974.misc @@ -0,0 +1 @@ +Refactor [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` mega function into smaller handler functions. diff --git a/synapse/handlers/room_batch.py b/synapse/handlers/room_batch.py new file mode 100644 index 000000000000..51dd4e755570 --- /dev/null +++ b/synapse/handlers/room_batch.py @@ -0,0 +1,423 @@ +import logging +from typing import TYPE_CHECKING, List, Tuple + +from synapse.api.constants import EventContentFields, EventTypes +from synapse.appservice import ApplicationService +from synapse.http.servlet import assert_params_in_dict +from synapse.types import JsonDict, Requester, UserID, create_requester +from synapse.util.stringutils import random_string + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class RoomBatchHandler: + def __init__(self, hs: "HomeServer"): + self.hs = hs + self.store = hs.get_datastore() + self.state_store = hs.get_storage().state + self.event_creation_handler = hs.get_event_creation_handler() + self.room_member_handler = hs.get_room_member_handler() + self.auth = hs.get_auth() + + async def inherit_depth_from_prev_ids(self, prev_event_ids: List[str]) -> int: + """Finds the depth which would sort it after the most-recent + prev_event_id but before the successors of those events. If no + successors are found, we assume it's an historical extremity part of the + current batch and use the same depth of the prev_event_ids. + + Args: + prev_event_ids: List of prev event IDs + + Returns: + Inherited depth + """ + ( + most_recent_prev_event_id, + most_recent_prev_event_depth, + ) = await self.store.get_max_depth_of(prev_event_ids) + + # We want to insert the historical event after the `prev_event` but before the successor event + # + # We inherit depth from the successor event instead of the `prev_event` + # because events returned from `/messages` are first sorted by `topological_ordering` + # which is just the `depth` and then tie-break with `stream_ordering`. + # + # We mark these inserted historical events as "backfilled" which gives them a + # negative `stream_ordering`. If we use the same depth as the `prev_event`, + # then our historical event will tie-break and be sorted before the `prev_event` + # when it should come after. + # + # We want to use the successor event depth so they appear after `prev_event` because + # it has a larger `depth` but before the successor event because the `stream_ordering` + # is negative before the successor event. + successor_event_ids = await self.store.get_successor_events( + [most_recent_prev_event_id] + ) + + # If we can't find any successor events, then it's a forward extremity of + # historical messages and we can just inherit from the previous historical + # event which we can already assume has the correct depth where we want + # to insert into. + if not successor_event_ids: + depth = most_recent_prev_event_depth + else: + ( + _, + oldest_successor_depth, + ) = await self.store.get_min_depth_of(successor_event_ids) + + depth = oldest_successor_depth + + return depth + + def create_insertion_event_dict( + self, sender: str, room_id: str, origin_server_ts: int + ) -> JsonDict: + """Creates an event dict for an "insertion" event with the proper fields + and a random batch ID. + + Args: + sender: The event author MXID + room_id: The room ID that the event belongs to + origin_server_ts: Timestamp when the event was sent + + Returns: + The new event dictionary to insert. + """ + + next_batch_id = random_string(8) + insertion_event = { + "type": EventTypes.MSC2716_INSERTION, + "sender": sender, + "room_id": room_id, + "content": { + EventContentFields.MSC2716_NEXT_BATCH_ID: next_batch_id, + EventContentFields.MSC2716_HISTORICAL: True, + }, + "origin_server_ts": origin_server_ts, + } + + return insertion_event + + async def create_requester_for_user_id_from_app_service( + self, user_id: str, app_service: ApplicationService + ) -> Requester: + """Creates a new requester for the given user_id + and validates that the app service is allowed to control + the given user. + + Args: + user_id: The author MXID that the app service is controlling + app_service: The app service that controls the user + + Returns: + Requester object + """ + + await self.auth.validate_appservice_can_control_user_id(app_service, user_id) + + return create_requester(user_id, app_service=app_service) + + async def get_most_recent_auth_event_ids_from_event_id_list( + self, event_ids: List[str] + ) -> List[str]: + """Find the most recent auth event ids (derived from state events) that + allowed that message to be sent. We will use this as a base + to auth our historical messages against. + + Args: + event_ids: List of event ID's to look at + + Returns: + List of event ID's + """ + + ( + most_recent_prev_event_id, + _, + ) = await self.store.get_max_depth_of(event_ids) + # mapping from (type, state_key) -> state_event_id + prev_state_map = await self.state_store.get_state_ids_for_event( + most_recent_prev_event_id + ) + # List of state event ID's + prev_state_ids = list(prev_state_map.values()) + auth_event_ids = prev_state_ids + + return auth_event_ids + + async def persist_state_events_at_start( + self, + state_events_at_start: List[JsonDict], + room_id: str, + initial_auth_event_ids: List[str], + app_service_requester: Requester, + ) -> List[str]: + """Takes all `state_events_at_start` event dictionaries and creates/persists + them as floating state events which don't resolve into the current room state. + They are floating because they reference a fake prev_event which doesn't connect + to the normal DAG at all. + + Args: + state_events_at_start: + room_id: Room where you want the events persisted in. + initial_auth_event_ids: These will be the auth_events for the first + state event created. Each event created afterwards will be + added to the list of auth events for the next state event + created. + app_service_requester: The requester of an application service. + + Returns: + List of state event ID's we just persisted + """ + assert app_service_requester.app_service + + state_event_ids_at_start = [] + auth_event_ids = initial_auth_event_ids.copy() + for state_event in state_events_at_start: + assert_params_in_dict( + state_event, ["type", "origin_server_ts", "content", "sender"] + ) + + logger.debug( + "RoomBatchSendEventRestServlet inserting state_event=%s, auth_event_ids=%s", + state_event, + auth_event_ids, + ) + + event_dict = { + "type": state_event["type"], + "origin_server_ts": state_event["origin_server_ts"], + "content": state_event["content"], + "room_id": room_id, + "sender": state_event["sender"], + "state_key": state_event["state_key"], + } + + # Mark all events as historical + event_dict["content"][EventContentFields.MSC2716_HISTORICAL] = True + + # Make the state events float off on their own so we don't have a + # bunch of `@mxid joined the room` noise between each batch + fake_prev_event_id = "$" + random_string(43) + + # TODO: This is pretty much the same as some other code to handle inserting state in this file + if event_dict["type"] == EventTypes.Member: + membership = event_dict["content"].get("membership", None) + event_id, _ = await self.room_member_handler.update_membership( + await self.create_requester_for_user_id_from_app_service( + state_event["sender"], app_service_requester.app_service + ), + target=UserID.from_string(event_dict["state_key"]), + room_id=room_id, + action=membership, + content=event_dict["content"], + outlier=True, + prev_event_ids=[fake_prev_event_id], + # Make sure to use a copy of this list because we modify it + # later in the loop here. Otherwise it will be the same + # reference and also update in the event when we append later. + auth_event_ids=auth_event_ids.copy(), + ) + else: + # TODO: Add some complement tests that adds state that is not member joins + # and will use this code path. Maybe we only want to support join state events + # and can get rid of this `else`? + ( + event, + _, + ) = await self.event_creation_handler.create_and_send_nonmember_event( + await self.create_requester_for_user_id_from_app_service( + state_event["sender"], app_service_requester.app_service + ), + event_dict, + outlier=True, + prev_event_ids=[fake_prev_event_id], + # Make sure to use a copy of this list because we modify it + # later in the loop here. Otherwise it will be the same + # reference and also update in the event when we append later. + auth_event_ids=auth_event_ids.copy(), + ) + event_id = event.event_id + + state_event_ids_at_start.append(event_id) + auth_event_ids.append(event_id) + + return state_event_ids_at_start + + async def persist_historical_events( + self, + events_to_create: List[JsonDict], + room_id: str, + initial_prev_event_ids: List[str], + inherited_depth: int, + auth_event_ids: List[str], + app_service_requester: Requester, + ) -> List[str]: + """Create and persists all events provided sequentially. Handles the + complexity of creating events in chronological order so they can + reference each other by prev_event but still persists in + reverse-chronoloical order so they have the correct + (topological_ordering, stream_ordering) and sort correctly from + /messages. + + Args: + events_to_create: List of historical events to create in JSON + dictionary format. + room_id: Room where you want the events persisted in. + initial_prev_event_ids: These will be the prev_events for the first + event created. Each event created afterwards will point to the + previous event created. + inherited_depth: The depth to create the events at (you will + probably by calling inherit_depth_from_prev_ids(...)). + auth_event_ids: Define which events allow you to create the given + event in the room. + app_service_requester: The requester of an application service. + + Returns: + List of persisted event IDs + """ + assert app_service_requester.app_service + + prev_event_ids = initial_prev_event_ids.copy() + + event_ids = [] + events_to_persist = [] + for ev in events_to_create: + assert_params_in_dict(ev, ["type", "origin_server_ts", "content", "sender"]) + + event_dict = { + "type": ev["type"], + "origin_server_ts": ev["origin_server_ts"], + "content": ev["content"], + "room_id": room_id, + "sender": ev["sender"], # requester.user.to_string(), + "prev_events": prev_event_ids.copy(), + } + + # Mark all events as historical + event_dict["content"][EventContentFields.MSC2716_HISTORICAL] = True + + event, context = await self.event_creation_handler.create_event( + await self.create_requester_for_user_id_from_app_service( + ev["sender"], app_service_requester.app_service + ), + event_dict, + prev_event_ids=event_dict.get("prev_events"), + auth_event_ids=auth_event_ids, + historical=True, + depth=inherited_depth, + ) + logger.debug( + "RoomBatchSendEventRestServlet inserting event=%s, prev_event_ids=%s, auth_event_ids=%s", + event, + prev_event_ids, + auth_event_ids, + ) + + assert self.hs.is_mine_id(event.sender), "User must be our own: %s" % ( + event.sender, + ) + + events_to_persist.append((event, context)) + event_id = event.event_id + + event_ids.append(event_id) + prev_event_ids = [event_id] + + # Persist events in reverse-chronological order so they have the + # correct stream_ordering as they are backfilled (which decrements). + # Events are sorted by (topological_ordering, stream_ordering) + # where topological_ordering is just depth. + for (event, context) in reversed(events_to_persist): + await self.event_creation_handler.handle_new_client_event( + await self.create_requester_for_user_id_from_app_service( + event["sender"], app_service_requester.app_service + ), + event=event, + context=context, + ) + + return event_ids + + async def handle_batch_of_events( + self, + events_to_create: List[JsonDict], + room_id: str, + batch_id_to_connect_to: str, + initial_prev_event_ids: List[str], + inherited_depth: int, + auth_event_ids: List[str], + app_service_requester: Requester, + ) -> Tuple[List[str], str]: + """ + Handles creating and persisting all of the historical events as well + as insertion and batch meta events to make the batch navigable in the DAG. + + Args: + events_to_create: List of historical events to create in JSON + dictionary format. + room_id: Room where you want the events created in. + batch_id_to_connect_to: The batch_id from the insertion event you + want this batch to connect to. + initial_prev_event_ids: These will be the prev_events for the first + event created. Each event created afterwards will point to the + previous event created. + inherited_depth: The depth to create the events at (you will + probably by calling inherit_depth_from_prev_ids(...)). + auth_event_ids: Define which events allow you to create the given + event in the room. + app_service_requester: The requester of an application service. + + Returns: + Tuple containing a list of created events and the next_batch_id + """ + + # Connect this current batch to the insertion event from the previous batch + last_event_in_batch = events_to_create[-1] + batch_event = { + "type": EventTypes.MSC2716_BATCH, + "sender": app_service_requester.user.to_string(), + "room_id": room_id, + "content": { + EventContentFields.MSC2716_BATCH_ID: batch_id_to_connect_to, + EventContentFields.MSC2716_HISTORICAL: True, + }, + # Since the batch event is put at the end of the batch, + # where the newest-in-time event is, copy the origin_server_ts from + # the last event we're inserting + "origin_server_ts": last_event_in_batch["origin_server_ts"], + } + # Add the batch event to the end of the batch (newest-in-time) + events_to_create.append(batch_event) + + # Add an "insertion" event to the start of each batch (next to the oldest-in-time + # event in the batch) so the next batch can be connected to this one. + insertion_event = self.create_insertion_event_dict( + sender=app_service_requester.user.to_string(), + room_id=room_id, + # Since the insertion event is put at the start of the batch, + # where the oldest-in-time event is, copy the origin_server_ts from + # the first event we're inserting + origin_server_ts=events_to_create[0]["origin_server_ts"], + ) + next_batch_id = insertion_event["content"][ + EventContentFields.MSC2716_NEXT_BATCH_ID + ] + # Prepend the insertion event to the start of the batch (oldest-in-time) + events_to_create = [insertion_event] + events_to_create + + # Create and persist all of the historical events + event_ids = await self.persist_historical_events( + events_to_create=events_to_create, + room_id=room_id, + initial_prev_event_ids=initial_prev_event_ids, + inherited_depth=inherited_depth, + auth_event_ids=auth_event_ids, + app_service_requester=app_service_requester, + ) + + return event_ids, next_batch_id diff --git a/synapse/rest/client/room_batch.py b/synapse/rest/client/room_batch.py index 1dffcc314793..38ad4c24475b 100644 --- a/synapse/rest/client/room_batch.py +++ b/synapse/rest/client/room_batch.py @@ -15,13 +15,12 @@ import logging import re from http import HTTPStatus -from typing import TYPE_CHECKING, Awaitable, List, Tuple +from typing import TYPE_CHECKING, Awaitable, Tuple from twisted.web.server import Request -from synapse.api.constants import EventContentFields, EventTypes +from synapse.api.constants import EventContentFields from synapse.api.errors import AuthError, Codes, SynapseError -from synapse.appservice import ApplicationService from synapse.http.server import HttpServer from synapse.http.servlet import ( RestServlet, @@ -32,7 +31,7 @@ ) from synapse.http.site import SynapseRequest from synapse.rest.client.transactions import HttpTransactionCache -from synapse.types import JsonDict, Requester, UserID, create_requester +from synapse.types import JsonDict from synapse.util.stringutils import random_string if TYPE_CHECKING: @@ -77,102 +76,12 @@ class RoomBatchSendEventRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() - self.hs = hs self.store = hs.get_datastore() - self.state_store = hs.get_storage().state self.event_creation_handler = hs.get_event_creation_handler() - self.room_member_handler = hs.get_room_member_handler() self.auth = hs.get_auth() + self.room_batch_handler = hs.get_room_batch_handler() self.txns = HttpTransactionCache(hs) - async def _inherit_depth_from_prev_ids(self, prev_event_ids: List[str]) -> int: - ( - most_recent_prev_event_id, - most_recent_prev_event_depth, - ) = await self.store.get_max_depth_of(prev_event_ids) - - # We want to insert the historical event after the `prev_event` but before the successor event - # - # We inherit depth from the successor event instead of the `prev_event` - # because events returned from `/messages` are first sorted by `topological_ordering` - # which is just the `depth` and then tie-break with `stream_ordering`. - # - # We mark these inserted historical events as "backfilled" which gives them a - # negative `stream_ordering`. If we use the same depth as the `prev_event`, - # then our historical event will tie-break and be sorted before the `prev_event` - # when it should come after. - # - # We want to use the successor event depth so they appear after `prev_event` because - # it has a larger `depth` but before the successor event because the `stream_ordering` - # is negative before the successor event. - successor_event_ids = await self.store.get_successor_events( - [most_recent_prev_event_id] - ) - - # If we can't find any successor events, then it's a forward extremity of - # historical messages and we can just inherit from the previous historical - # event which we can already assume has the correct depth where we want - # to insert into. - if not successor_event_ids: - depth = most_recent_prev_event_depth - else: - ( - _, - oldest_successor_depth, - ) = await self.store.get_min_depth_of(successor_event_ids) - - depth = oldest_successor_depth - - return depth - - def _create_insertion_event_dict( - self, sender: str, room_id: str, origin_server_ts: int - ) -> JsonDict: - """Creates an event dict for an "insertion" event with the proper fields - and a random batch ID. - - Args: - sender: The event author MXID - room_id: The room ID that the event belongs to - origin_server_ts: Timestamp when the event was sent - - Returns: - The new event dictionary to insert. - """ - - next_batch_id = random_string(8) - insertion_event = { - "type": EventTypes.MSC2716_INSERTION, - "sender": sender, - "room_id": room_id, - "content": { - EventContentFields.MSC2716_NEXT_BATCH_ID: next_batch_id, - EventContentFields.MSC2716_HISTORICAL: True, - }, - "origin_server_ts": origin_server_ts, - } - - return insertion_event - - async def _create_requester_for_user_id_from_app_service( - self, user_id: str, app_service: ApplicationService - ) -> Requester: - """Creates a new requester for the given user_id - and validates that the app service is allowed to control - the given user. - - Args: - user_id: The author MXID that the app service is controlling - app_service: The app service that controls the user - - Returns: - Requester object - """ - - await self.auth.validate_appservice_can_control_user_id(app_service, user_id) - - return create_requester(user_id, app_service=app_service) - async def on_POST( self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: @@ -200,123 +109,62 @@ async def on_POST( errcode=Codes.MISSING_PARAM, ) + # Verify the batch_id_from_query corresponds to an actual insertion event + # and have the batch connected. + if batch_id_from_query: + corresponding_insertion_event_id = ( + await self.store.get_insertion_event_by_batch_id( + room_id, batch_id_from_query + ) + ) + if corresponding_insertion_event_id is None: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "No insertion event corresponds to the given ?batch_id", + errcode=Codes.INVALID_PARAM, + ) + # For the event we are inserting next to (`prev_event_ids_from_query`), # find the most recent auth events (derived from state events) that # allowed that message to be sent. We will use that as a base # to auth our historical messages against. - ( - most_recent_prev_event_id, - _, - ) = await self.store.get_max_depth_of(prev_event_ids_from_query) - # mapping from (type, state_key) -> state_event_id - prev_state_map = await self.state_store.get_state_ids_for_event( - most_recent_prev_event_id + auth_event_ids = await self.room_batch_handler.get_most_recent_auth_event_ids_from_event_id_list( + prev_event_ids_from_query ) - # List of state event ID's - prev_state_ids = list(prev_state_map.values()) - auth_event_ids = prev_state_ids - - state_event_ids_at_start = [] - for state_event in body["state_events_at_start"]: - assert_params_in_dict( - state_event, ["type", "origin_server_ts", "content", "sender"] - ) - logger.debug( - "RoomBatchSendEventRestServlet inserting state_event=%s, auth_event_ids=%s", - state_event, - auth_event_ids, + # Create and persist all of the state events that float off on their own + # before the batch. These will most likely be all of the invite/member + # state events used to auth the upcoming historical messages. + state_event_ids_at_start = ( + await self.room_batch_handler.persist_state_events_at_start( + state_events_at_start=body["state_events_at_start"], + room_id=room_id, + initial_auth_event_ids=auth_event_ids, + app_service_requester=requester, ) + ) + # Update our ongoing auth event ID list with all of the new state we + # just created + auth_event_ids.extend(state_event_ids_at_start) - event_dict = { - "type": state_event["type"], - "origin_server_ts": state_event["origin_server_ts"], - "content": state_event["content"], - "room_id": room_id, - "sender": state_event["sender"], - "state_key": state_event["state_key"], - } - - # Mark all events as historical - event_dict["content"][EventContentFields.MSC2716_HISTORICAL] = True - - # Make the state events float off on their own - fake_prev_event_id = "$" + random_string(43) - - # TODO: This is pretty much the same as some other code to handle inserting state in this file - if event_dict["type"] == EventTypes.Member: - membership = event_dict["content"].get("membership", None) - event_id, _ = await self.room_member_handler.update_membership( - await self._create_requester_for_user_id_from_app_service( - state_event["sender"], requester.app_service - ), - target=UserID.from_string(event_dict["state_key"]), - room_id=room_id, - action=membership, - content=event_dict["content"], - outlier=True, - prev_event_ids=[fake_prev_event_id], - # Make sure to use a copy of this list because we modify it - # later in the loop here. Otherwise it will be the same - # reference and also update in the event when we append later. - auth_event_ids=auth_event_ids.copy(), - ) - else: - # TODO: Add some complement tests that adds state that is not member joins - # and will use this code path. Maybe we only want to support join state events - # and can get rid of this `else`? - ( - event, - _, - ) = await self.event_creation_handler.create_and_send_nonmember_event( - await self._create_requester_for_user_id_from_app_service( - state_event["sender"], requester.app_service - ), - event_dict, - outlier=True, - prev_event_ids=[fake_prev_event_id], - # Make sure to use a copy of this list because we modify it - # later in the loop here. Otherwise it will be the same - # reference and also update in the event when we append later. - auth_event_ids=auth_event_ids.copy(), - ) - event_id = event.event_id - - state_event_ids_at_start.append(event_id) - auth_event_ids.append(event_id) - - events_to_create = body["events"] - - inherited_depth = await self._inherit_depth_from_prev_ids( + inherited_depth = await self.room_batch_handler.inherit_depth_from_prev_ids( prev_event_ids_from_query ) + events_to_create = body["events"] + # Figure out which batch to connect to. If they passed in # batch_id_from_query let's use it. The batch ID passed in comes # from the batch_id in the "insertion" event from the previous batch. last_event_in_batch = events_to_create[-1] - batch_id_to_connect_to = batch_id_from_query base_insertion_event = None if batch_id_from_query: + batch_id_to_connect_to = batch_id_from_query # All but the first base insertion event should point at a fake # event, which causes the HS to ask for the state at the start of # the batch later. + fake_prev_event_id = "$" + random_string(43) prev_event_ids = [fake_prev_event_id] - - # Verify the batch_id_from_query corresponds to an actual insertion event - # and have the batch connected. - corresponding_insertion_event_id = ( - await self.store.get_insertion_event_by_batch_id( - room_id, batch_id_from_query - ) - ) - if corresponding_insertion_event_id is None: - raise SynapseError( - HTTPStatus.BAD_REQUEST, - "No insertion event corresponds to the given ?batch_id", - errcode=Codes.INVALID_PARAM, - ) - pass # Otherwise, create an insertion event to act as a starting point. # # We don't always have an insertion event to start hanging more history @@ -327,10 +175,12 @@ async def on_POST( else: prev_event_ids = prev_event_ids_from_query - base_insertion_event_dict = self._create_insertion_event_dict( - sender=requester.user.to_string(), - room_id=room_id, - origin_server_ts=last_event_in_batch["origin_server_ts"], + base_insertion_event_dict = ( + self.room_batch_handler.create_insertion_event_dict( + sender=requester.user.to_string(), + room_id=room_id, + origin_server_ts=last_event_in_batch["origin_server_ts"], + ) ) base_insertion_event_dict["prev_events"] = prev_event_ids.copy() @@ -338,7 +188,7 @@ async def on_POST( base_insertion_event, _, ) = await self.event_creation_handler.create_and_send_nonmember_event( - await self._create_requester_for_user_id_from_app_service( + await self.room_batch_handler.create_requester_for_user_id_from_app_service( base_insertion_event_dict["sender"], requester.app_service, ), @@ -353,92 +203,17 @@ async def on_POST( EventContentFields.MSC2716_NEXT_BATCH_ID ] - # Connect this current batch to the insertion event from the previous batch - batch_event = { - "type": EventTypes.MSC2716_BATCH, - "sender": requester.user.to_string(), - "room_id": room_id, - "content": { - EventContentFields.MSC2716_BATCH_ID: batch_id_to_connect_to, - EventContentFields.MSC2716_HISTORICAL: True, - }, - # Since the batch event is put at the end of the batch, - # where the newest-in-time event is, copy the origin_server_ts from - # the last event we're inserting - "origin_server_ts": last_event_in_batch["origin_server_ts"], - } - # Add the batch event to the end of the batch (newest-in-time) - events_to_create.append(batch_event) - - # Add an "insertion" event to the start of each batch (next to the oldest-in-time - # event in the batch) so the next batch can be connected to this one. - insertion_event = self._create_insertion_event_dict( - sender=requester.user.to_string(), + # Create and persist all of the historical events as well as insertion + # and batch meta events to make the batch navigable in the DAG. + event_ids, next_batch_id = await self.room_batch_handler.handle_batch_of_events( + events_to_create=events_to_create, room_id=room_id, - # Since the insertion event is put at the start of the batch, - # where the oldest-in-time event is, copy the origin_server_ts from - # the first event we're inserting - origin_server_ts=events_to_create[0]["origin_server_ts"], + batch_id_to_connect_to=batch_id_to_connect_to, + initial_prev_event_ids=prev_event_ids, + inherited_depth=inherited_depth, + auth_event_ids=auth_event_ids, + app_service_requester=requester, ) - # Prepend the insertion event to the start of the batch (oldest-in-time) - events_to_create = [insertion_event] + events_to_create - - event_ids = [] - events_to_persist = [] - for ev in events_to_create: - assert_params_in_dict(ev, ["type", "origin_server_ts", "content", "sender"]) - - event_dict = { - "type": ev["type"], - "origin_server_ts": ev["origin_server_ts"], - "content": ev["content"], - "room_id": room_id, - "sender": ev["sender"], # requester.user.to_string(), - "prev_events": prev_event_ids.copy(), - } - - # Mark all events as historical - event_dict["content"][EventContentFields.MSC2716_HISTORICAL] = True - - event, context = await self.event_creation_handler.create_event( - await self._create_requester_for_user_id_from_app_service( - ev["sender"], requester.app_service - ), - event_dict, - prev_event_ids=event_dict.get("prev_events"), - auth_event_ids=auth_event_ids, - historical=True, - depth=inherited_depth, - ) - logger.debug( - "RoomBatchSendEventRestServlet inserting event=%s, prev_event_ids=%s, auth_event_ids=%s", - event, - prev_event_ids, - auth_event_ids, - ) - - assert self.hs.is_mine_id(event.sender), "User must be our own: %s" % ( - event.sender, - ) - - events_to_persist.append((event, context)) - event_id = event.event_id - - event_ids.append(event_id) - prev_event_ids = [event_id] - - # Persist events in reverse-chronological order so they have the - # correct stream_ordering as they are backfilled (which decrements). - # Events are sorted by (topological_ordering, stream_ordering) - # where topological_ordering is just depth. - for (event, context) in reversed(events_to_persist): - ev = await self.event_creation_handler.handle_new_client_event( - await self._create_requester_for_user_id_from_app_service( - event["sender"], requester.app_service - ), - event=event, - context=context, - ) insertion_event_id = event_ids[0] batch_event_id = event_ids[-1] @@ -447,9 +222,7 @@ async def on_POST( response_dict = { "state_event_ids": state_event_ids_at_start, "event_ids": historical_event_ids, - "next_batch_id": insertion_event["content"][ - EventContentFields.MSC2716_NEXT_BATCH_ID - ], + "next_batch_id": next_batch_id, "insertion_event_id": insertion_event_id, "batch_event_id": batch_event_id, } diff --git a/synapse/server.py b/synapse/server.py index 0783df41d486..5bc045d615b4 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -97,6 +97,7 @@ RoomCreationHandler, RoomShutdownHandler, ) +from synapse.handlers.room_batch import RoomBatchHandler from synapse.handlers.room_list import RoomListHandler from synapse.handlers.room_member import RoomMemberHandler, RoomMemberMasterHandler from synapse.handlers.room_member_worker import RoomMemberWorkerHandler @@ -437,6 +438,10 @@ def get_federation_http_client(self) -> MatrixFederationHttpClient: def get_room_creation_handler(self) -> RoomCreationHandler: return RoomCreationHandler(self) + @cache_in_self + def get_room_batch_handler(self) -> RoomBatchHandler: + return RoomBatchHandler(self) + @cache_in_self def get_room_shutdown_handler(self) -> RoomShutdownHandler: return RoomShutdownHandler(self) From b742cb2e4a2257504f27796275d65b6874f43f5b Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 11 Oct 2021 14:48:38 +0100 Subject: [PATCH 088/111] Release script improvements (#10966) Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/10966.misc | 1 + scripts-dev/release.py | 36 ++++++++++++++++++++++++++++++++---- 2 files changed, 33 insertions(+), 4 deletions(-) create mode 100644 changelog.d/10966.misc diff --git a/changelog.d/10966.misc b/changelog.d/10966.misc new file mode 100644 index 000000000000..095b9d56897c --- /dev/null +++ b/changelog.d/10966.misc @@ -0,0 +1 @@ +Make the release script more robust and transparent. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index ab2d860ab8bd..4e1f99fee4e1 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -35,6 +35,19 @@ from packaging import version +def run_until_successful(command, *args, **kwargs): + while True: + completed_process = subprocess.run(command, *args, **kwargs) + exit_code = completed_process.returncode + if exit_code == 0: + # successful, so nothing more to do here. + return completed_process + + print(f"The command {command!r} failed with exit code {exit_code}.") + print("Please try to correct the failure and then re-run.") + click.confirm("Try again?", abort=True) + + @click.group() def cli(): """An interactive script to walk through the parts of creating a release. @@ -197,7 +210,7 @@ def prepare(): f.write(parsed_synapse_ast.dumps()) # Generate changelogs - subprocess.run("python3 -m towncrier", shell=True) + run_until_successful("python3 -m towncrier", shell=True) # Generate debian changelogs if parsed_new_version.pre is not None: @@ -209,11 +222,11 @@ def prepare(): else: debian_version = new_version - subprocess.run( + run_until_successful( f'dch -M -v {debian_version} "New synapse release {debian_version}."', shell=True, ) - subprocess.run('dch -M -r -D stable ""', shell=True) + run_until_successful('dch -M -r -D stable ""', shell=True) # Show the user the changes and ask if they want to edit the change log. repo.git.add("-u") @@ -224,7 +237,7 @@ def prepare(): # Commit the changes. repo.git.add("-u") - repo.git.commit(f"-m {new_version}") + repo.git.commit("-m", new_version) # We give the option to bail here in case the user wants to make sure things # are OK before pushing. @@ -239,6 +252,8 @@ def prepare(): # Otherwise, push and open the changelog in the browser. repo.git.push("-u", repo.remote().name, repo.active_branch.name) + print("Opening the changelog in your browser...") + print("Please ask others to give it a check.") click.launch( f"https://github.com/matrix-org/synapse/blob/{repo.active_branch.name}/CHANGES.md" ) @@ -290,7 +305,19 @@ def tag(gh_token: Optional[str]): # If no token was given, we bail here if not gh_token: + print("Launching the GitHub release page in your browser.") + print("Please correct the title and create a draft.") + if current_version.is_prerelease: + print("As this is an RC, remember to mark it as a pre-release!") + print("(by the way, this step can be automated by passing --gh-token,") + print("or one of the GH_TOKEN or GITHUB_TOKEN env vars.)") click.launch(f"https://github.com/matrix-org/synapse/releases/edit/{tag_name}") + + print("Once done, you need to wait for the release assets to build.") + if click.confirm("Launch the release assets actions page?", default=True): + click.launch( + f"https://github.com/matrix-org/synapse/actions?query=branch%3A{tag_name}" + ) return # Create a new draft release @@ -305,6 +332,7 @@ def tag(gh_token: Optional[str]): ) # Open the release and the actions where we are building the assets. + print("Launching the release page and the actions page.") click.launch(release.html_url) click.launch( f"https://github.com/matrix-org/synapse/actions?query=branch%3A{tag_name}" From 4c838112dcb06d83e6cfd0f14bf95197fb466863 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 11 Oct 2021 16:28:29 +0100 Subject: [PATCH 089/111] Remove unnecessary list comprehension in `synapse_port_db` to fix linting in CI (#11043) --- changelog.d/11043.misc | 1 + scripts/synapse_port_db | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11043.misc diff --git a/changelog.d/11043.misc b/changelog.d/11043.misc new file mode 100644 index 000000000000..c5f127bb4646 --- /dev/null +++ b/changelog.d/11043.misc @@ -0,0 +1 @@ +Remove unnecessary list comprehension from `synapse_port_db` to satisfy code style requirements. diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index a947d9e49e42..349866eb9a82 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -1069,7 +1069,7 @@ class CursesProgress(Progress): self.stdscr.addstr(0, 0, status, curses.A_BOLD) - max_len = max([len(t) for t in self.tables.keys()]) + max_len = max(len(t) for t in self.tables.keys()) left_margin = 5 middle_space = 1 From 3828dd819b972ed4381413c542e9e8cd43041e1b Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 11 Oct 2021 16:29:02 +0100 Subject: [PATCH 090/111] Pass through `SynapseError`s that are raised from experimental `check_event_allowed` callback of the module API (#11042) Co-authored-by: Brendan Abolivier --- changelog.d/11042.bugfix | 1 + synapse/events/third_party_rules.py | 9 +++++++++ 2 files changed, 10 insertions(+) create mode 100644 changelog.d/11042.bugfix diff --git a/changelog.d/11042.bugfix b/changelog.d/11042.bugfix new file mode 100644 index 000000000000..536c47417d49 --- /dev/null +++ b/changelog.d/11042.bugfix @@ -0,0 +1 @@ +Work around a regression, introduced in Synapse 1.39.0, that caused `SynapseError`s raised by the experimental third-party rules module callback `check_event_allowed` to be ignored. diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index d94b1bb4d275..976d9fa4468d 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -217,6 +217,15 @@ async def check_event_allowed( for callback in self._check_event_allowed_callbacks: try: res, replacement_data = await callback(event, state_events) + except SynapseError as e: + # FIXME: Being able to throw SynapseErrors is relied upon by + # some modules. PR #10386 accidentally broke this ability. + # That said, we aren't keen on exposing this implementation detail + # to modules and we should one day have a proper way to do what + # is wanted. + # This module callback needs a rework so that hacks such as + # this one are not necessary. + raise e except Exception as e: logger.warning("Failed to run module API callback %s: %s", callback, e) continue From 5e29d417fc5933e26ba85a40c298d46b09580330 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 11 Oct 2021 16:34:31 +0100 Subject: [PATCH 091/111] Include the requirements for [mypy,lint] in [dev] (#11034) --- README.rst | 2 +- changelog.d/11034.misc | 1 + docs/development/contributing_guide.md | 2 +- setup.py | 23 ++++++++++++++--------- 4 files changed, 17 insertions(+), 11 deletions(-) create mode 100644 changelog.d/11034.misc diff --git a/README.rst b/README.rst index 63deb06eac2b..50de3a49b05f 100644 --- a/README.rst +++ b/README.rst @@ -298,7 +298,7 @@ to install using pip and a virtualenv:: python3 -m venv ./env source ./env/bin/activate - pip install -e ".[all,test]" + pip install -e ".[all,dev]" This will run a process of downloading and installing all the needed dependencies into a virtual env. If any dependencies fail to install, diff --git a/changelog.d/11034.misc b/changelog.d/11034.misc new file mode 100644 index 000000000000..b15fd66ac360 --- /dev/null +++ b/changelog.d/11034.misc @@ -0,0 +1 @@ +When installing the optional developer dependencies, also include the dependencies needed for type-checking and unit testing. diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 580a4f7f9854..3bf08a72bb21 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -50,7 +50,7 @@ setup a *virtualenv*, as follows: cd path/where/you/have/cloned/the/repository python3 -m venv ./env source ./env/bin/activate -pip install -e ".[all,lint,mypy,test]" +pip install -e ".[all,dev]" pip install tox ``` diff --git a/setup.py b/setup.py index f8b4487bc19b..220084a49d46 100755 --- a/setup.py +++ b/setup.py @@ -103,15 +103,6 @@ def exec_file(path_segments): "flake8", ] -CONDITIONAL_REQUIREMENTS["dev"] = CONDITIONAL_REQUIREMENTS["lint"] + [ - # The following are used by the release script - "click==7.1.2", - "redbaron==0.9.2", - "GitPython==3.1.14", - "commonmark==0.9.1", - "pygithub==1.55", -] - CONDITIONAL_REQUIREMENTS["mypy"] = [ "mypy==0.910", "mypy-zope==0.3.2", @@ -130,6 +121,20 @@ def exec_file(path_segments): # parameterized_class decorator was introduced in parameterized 0.7.0 CONDITIONAL_REQUIREMENTS["test"] = ["parameterized>=0.7.0"] +CONDITIONAL_REQUIREMENTS["dev"] = ( + CONDITIONAL_REQUIREMENTS["lint"] + + CONDITIONAL_REQUIREMENTS["mypy"] + + CONDITIONAL_REQUIREMENTS["test"] + + [ + # The following are used by the release script + "click==7.1.2", + "redbaron==0.9.2", + "GitPython==3.1.14", + "commonmark==0.9.1", + "pygithub==1.55", + ] +) + setup( name="matrix-synapse", version=version, From e0f11ae4a5688a0521f20f36e440c87cfccfd69a Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 11 Oct 2021 17:42:10 +0100 Subject: [PATCH 092/111] disallow-untyped-defs for synapse.push (#11023) --- changelog.d/11023.misc | 1 + mypy.ini | 3 +++ synapse/push/__init__.py | 2 +- synapse/push/bulk_push_rule_evaluator.py | 20 ++++++++++++++++---- synapse/push/clientformat.py | 4 +++- synapse/push/httppusher.py | 4 ++-- synapse/storage/databases/main/push_rule.py | 4 ++-- 7 files changed, 28 insertions(+), 10 deletions(-) create mode 100644 changelog.d/11023.misc diff --git a/changelog.d/11023.misc b/changelog.d/11023.misc new file mode 100644 index 000000000000..ecc0467529be --- /dev/null +++ b/changelog.d/11023.misc @@ -0,0 +1 @@ +Add additional type hints for `synapse.push`. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index bc2b59ff5622..a7019e2bd498 100644 --- a/mypy.ini +++ b/mypy.ini @@ -96,6 +96,9 @@ files = [mypy-synapse.handlers.*] disallow_untyped_defs = True +[mypy-synapse.push.*] +disallow_untyped_defs = True + [mypy-synapse.rest.*] disallow_untyped_defs = True diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 2c23afe8e3ab..820f6f3f7ec0 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -94,7 +94,7 @@ def on_new_notifications(self, max_token: RoomStreamToken) -> None: self._start_processing() @abc.abstractmethod - def _start_processing(self): + def _start_processing(self) -> None: """Start processing push notifications.""" raise NotImplementedError() diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index c337e530d3cf..0622a37ae8fd 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -290,6 +290,12 @@ def _condition_checker( return True +MemberMap = Dict[str, Tuple[str, str]] +Rule = Dict[str, dict] +RulesByUser = Dict[str, List[Rule]] +StateGroup = Union[object, int] + + @attr.s(slots=True) class RulesForRoomData: """The data stored in the cache by `RulesForRoom`. @@ -299,16 +305,16 @@ class RulesForRoomData: """ # event_id -> (user_id, state) - member_map = attr.ib(type=Dict[str, Tuple[str, str]], factory=dict) + member_map = attr.ib(type=MemberMap, factory=dict) # user_id -> rules - rules_by_user = attr.ib(type=Dict[str, List[Dict[str, dict]]], factory=dict) + rules_by_user = attr.ib(type=RulesByUser, factory=dict) # The last state group we updated the caches for. If the state_group of # a new event comes along, we know that we can just return the cached # result. # On invalidation of the rules themselves (if the user changes them), # we invalidate everything and set state_group to `object()` - state_group = attr.ib(type=Union[object, int], factory=object) + state_group = attr.ib(type=StateGroup, factory=object) # A sequence number to keep track of when we're allowed to update the # cache. We bump the sequence number when we invalidate the cache. If @@ -532,7 +538,13 @@ async def _update_rules_with_member_event_ids( self.update_cache(sequence, members, ret_rules_by_user, state_group) - def update_cache(self, sequence, members, rules_by_user, state_group) -> None: + def update_cache( + self, + sequence: int, + members: MemberMap, + rules_by_user: RulesByUser, + state_group: StateGroup, + ) -> None: if sequence == self.data.sequence: self.data.member_map.update(members) self.data.rules_by_user = rules_by_user diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py index 1fc9716a3422..c5708cd8885b 100644 --- a/synapse/push/clientformat.py +++ b/synapse/push/clientformat.py @@ -19,7 +19,9 @@ from synapse.types import UserID -def format_push_rules_for_user(user: UserID, ruleslist) -> Dict[str, Dict[str, list]]: +def format_push_rules_for_user( + user: UserID, ruleslist: List +) -> Dict[str, Dict[str, list]]: """Converts a list of rawrules and a enabled map into nested dictionaries to match the Matrix client-server format for push rules""" diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index eac65572b2d8..dbf4ad7f97ee 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -403,10 +403,10 @@ async def dispatch_push( rejected = resp["rejected"] return rejected - async def _send_badge(self, badge): + async def _send_badge(self, badge: int) -> None: """ Args: - badge (int): number of unread messages + badge: number of unread messages """ logger.debug("Sending updated badge count %d to %s", badge, self.name) d = { diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index b81e33964ac7..fc720f59478b 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -14,7 +14,7 @@ # limitations under the License. import abc import logging -from typing import List, Tuple, Union +from typing import Dict, List, Tuple, Union from synapse.api.errors import NotFoundError, StoreError from synapse.push.baserules import list_with_base_rules @@ -139,7 +139,7 @@ async def get_push_rules_for_user(self, user_id): return _load_rules(rows, enabled_map, use_new_defaults) @cached(max_entries=5000) - async def get_push_rules_enabled_for_user(self, user_id): + async def get_push_rules_enabled_for_user(self, user_id) -> Dict[str, bool]: results = await self.db_pool.simple_select_list( table="push_rules_enable", keyvalues={"user_name": user_id}, From 8c5255b6643f0f1465a5ac9bf28dcea3b4f00405 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 12 Oct 2021 10:47:15 +0100 Subject: [PATCH 093/111] 1.45.0rc1 --- CHANGES.md | 78 +++++++++++++++++++++++++++++++++++++++ changelog.d/10822.feature | 1 - changelog.d/10877.feature | 1 - changelog.d/10888.misc | 1 - changelog.d/10892.misc | 1 - changelog.d/10894.feature | 1 - changelog.d/10895.misc | 1 - changelog.d/10902.misc | 1 - changelog.d/10903.misc | 1 - changelog.d/10910.feature | 1 - changelog.d/10915.misc | 1 - changelog.d/10916.misc | 1 - changelog.d/10922.bugfix | 1 - changelog.d/10924.bugfix | 1 - changelog.d/10926.misc | 1 - changelog.d/10927.bugfix | 1 - changelog.d/10934.misc | 1 - changelog.d/10935.misc | 1 - changelog.d/10936.misc | 1 - changelog.d/10939.misc | 1 - changelog.d/10940.misc | 1 - changelog.d/10945.misc | 1 - changelog.d/10947.bugfix | 1 - changelog.d/10954.feature | 1 - changelog.d/10956.bugfix | 1 - changelog.d/10958.misc | 1 - changelog.d/10959.misc | 1 - changelog.d/10960.bugfix | 1 - changelog.d/10961.misc | 1 - changelog.d/10962.bugfix | 1 - changelog.d/10963.misc | 1 - changelog.d/10966.misc | 1 - changelog.d/10971.doc | 1 - changelog.d/10973.doc | 1 - changelog.d/10974.misc | 1 - changelog.d/10981.bugfix | 1 - changelog.d/10982.bugfix | 1 - changelog.d/10983.misc | 1 - changelog.d/10985.misc | 1 - changelog.d/10986.misc | 1 - changelog.d/10987.misc | 1 - changelog.d/10988.misc | 1 - changelog.d/10990.doc | 1 - changelog.d/10991.doc | 1 - changelog.d/10992.misc | 1 - changelog.d/10993.misc | 1 - changelog.d/10994.misc | 1 - changelog.d/10995.bugfix | 1 - changelog.d/11002.bugfix | 1 - changelog.d/11003.bugfix | 1 - changelog.d/11004.misc | 1 - changelog.d/11005.misc | 1 - changelog.d/11006.misc | 1 - changelog.d/11010.misc | 1 - changelog.d/11011.misc | 1 - changelog.d/11017.misc | 1 - changelog.d/11019.misc | 1 - changelog.d/11021.misc | 1 - changelog.d/11023.misc | 1 - changelog.d/11028.feature | 1 - changelog.d/11034.misc | 1 - changelog.d/11042.bugfix | 1 - changelog.d/11043.misc | 1 - changelog.d/9655.feature | 1 - debian/changelog | 7 +++- synapse/__init__.py | 2 +- 66 files changed, 84 insertions(+), 66 deletions(-) delete mode 100644 changelog.d/10822.feature delete mode 100644 changelog.d/10877.feature delete mode 100644 changelog.d/10888.misc delete mode 100644 changelog.d/10892.misc delete mode 100644 changelog.d/10894.feature delete mode 100644 changelog.d/10895.misc delete mode 100644 changelog.d/10902.misc delete mode 100644 changelog.d/10903.misc delete mode 100644 changelog.d/10910.feature delete mode 100644 changelog.d/10915.misc delete mode 100644 changelog.d/10916.misc delete mode 100644 changelog.d/10922.bugfix delete mode 100644 changelog.d/10924.bugfix delete mode 100644 changelog.d/10926.misc delete mode 100644 changelog.d/10927.bugfix delete mode 100644 changelog.d/10934.misc delete mode 100644 changelog.d/10935.misc delete mode 100644 changelog.d/10936.misc delete mode 100644 changelog.d/10939.misc delete mode 100644 changelog.d/10940.misc delete mode 100644 changelog.d/10945.misc delete mode 100644 changelog.d/10947.bugfix delete mode 100644 changelog.d/10954.feature delete mode 100644 changelog.d/10956.bugfix delete mode 100644 changelog.d/10958.misc delete mode 100644 changelog.d/10959.misc delete mode 100644 changelog.d/10960.bugfix delete mode 100644 changelog.d/10961.misc delete mode 100644 changelog.d/10962.bugfix delete mode 100644 changelog.d/10963.misc delete mode 100644 changelog.d/10966.misc delete mode 100644 changelog.d/10971.doc delete mode 100644 changelog.d/10973.doc delete mode 100644 changelog.d/10974.misc delete mode 100644 changelog.d/10981.bugfix delete mode 100644 changelog.d/10982.bugfix delete mode 100644 changelog.d/10983.misc delete mode 100644 changelog.d/10985.misc delete mode 100644 changelog.d/10986.misc delete mode 100644 changelog.d/10987.misc delete mode 100644 changelog.d/10988.misc delete mode 100644 changelog.d/10990.doc delete mode 100644 changelog.d/10991.doc delete mode 100644 changelog.d/10992.misc delete mode 100644 changelog.d/10993.misc delete mode 100644 changelog.d/10994.misc delete mode 100644 changelog.d/10995.bugfix delete mode 100644 changelog.d/11002.bugfix delete mode 100644 changelog.d/11003.bugfix delete mode 100644 changelog.d/11004.misc delete mode 100644 changelog.d/11005.misc delete mode 100644 changelog.d/11006.misc delete mode 100644 changelog.d/11010.misc delete mode 100644 changelog.d/11011.misc delete mode 100644 changelog.d/11017.misc delete mode 100644 changelog.d/11019.misc delete mode 100644 changelog.d/11021.misc delete mode 100644 changelog.d/11023.misc delete mode 100644 changelog.d/11028.feature delete mode 100644 changelog.d/11034.misc delete mode 100644 changelog.d/11042.bugfix delete mode 100644 changelog.d/11043.misc delete mode 100644 changelog.d/9655.feature diff --git a/CHANGES.md b/CHANGES.md index 3f048ba881ae..9b3999896710 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,81 @@ +Synapse 1.45.0rc1 (2021-10-12) +============================== + +Features +-------- + +- Add [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069) support to `/account/whoami`. ([\#9655](https://github.com/matrix-org/synapse/issues/9655)) +- Support autodiscovery of oEmbed previews. ([\#10822](https://github.com/matrix-org/synapse/issues/10822)) +- Ensure `(room_id, next_batch_id)` is unique across [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) insertion events in rooms to avoid cross-talk/conflicts between batches. ([\#10877](https://github.com/matrix-org/synapse/issues/10877)) +- Add a `user_may_send_3pid_invite` spam checker callback for modules to allow or deny 3PID invites. ([\#10894](https://github.com/matrix-org/synapse/issues/10894)) +- Add a spam checker callback to allow or deny room joins. ([\#10910](https://github.com/matrix-org/synapse/issues/10910)) +- Include an `update_synapse_database` script in the distribution. Contributed by @Fizzadar at Beeper. ([\#10954](https://github.com/matrix-org/synapse/issues/10954)) +- Include exception information in JSON logging output. Contributed by @Fizzadar at Beeper. ([\#11028](https://github.com/matrix-org/synapse/issues/11028)) + + +Bugfixes +-------- + +- Fix a minor bug in the response to `/_matrix/client/r0/voip/turnServer`. Contributed by @lukaslihotzki. ([\#10922](https://github.com/matrix-org/synapse/issues/10922)) +- Fix a bug where empty `yyyy-mm-dd/` directories would be left behind in the media store's `url_cache_thumbnails/` directory. ([\#10924](https://github.com/matrix-org/synapse/issues/10924)) +- Fix a bug introduced in Synapse v1.40.0 where the signature checks for room version 8/9 could be applied to earlier room versions in some situations. ([\#10927](https://github.com/matrix-org/synapse/issues/10927)) +- Fixes a long-standing bug wherin deactivated users still count towards the mau limit. ([\#10947](https://github.com/matrix-org/synapse/issues/10947)) +- Fix a long-standing bug which meant that events received over federation were sometimes incorrectly accepted into the room state. ([\#10956](https://github.com/matrix-org/synapse/issues/10956)) +- Fix a long-standing bug where rebuilding the user directory wouldn't exclude support and disabled users. ([\#10960](https://github.com/matrix-org/synapse/issues/10960)) +- Fix [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` endpoint rejecting subsequent batches with unknown batch ID error in existing room versions from the room creator. ([\#10962](https://github.com/matrix-org/synapse/issues/10962)) +- Fix a bug that could leak local users' per-room nicknames and avatars when the user directory is rebuilt. ([\#10981](https://github.com/matrix-org/synapse/issues/10981)) +- Fix a long-standing bug where the remainder of a batch of user directory changes would be silently dropped if the server left a room early in the batch. ([\#10982](https://github.com/matrix-org/synapse/issues/10982)) +- Correct a bugfix introduced in Synapse v1.44.0 that wouldn't catch every error of the connection breaks before a response could be written to it. ([\#10995](https://github.com/matrix-org/synapse/issues/10995)) +- Fix a long-standing bug where local users' per-room nicknames/avatars were visible to anyone who could see you in the user_directory. ([\#11002](https://github.com/matrix-org/synapse/issues/11002)) +- Fix a long-standing bug where a user's per-room nickname/avatar would overwrite their profile in the user directory when a room was made public. ([\#11003](https://github.com/matrix-org/synapse/issues/11003)) +- Work around a regression, introduced in Synapse 1.39.0, that caused `SynapseError`s raised by the experimental third-party rules module callback `check_event_allowed` to be ignored. ([\#11042](https://github.com/matrix-org/synapse/issues/11042)) + + +Improved Documentation +---------------------- + +- Change wording ("reference homeserver") in Synapse repository documentation. Contributed by @maxkratz. ([\#10971](https://github.com/matrix-org/synapse/issues/10971)) +- Fix a dead URL in development documentation (SAML) and change wording from "Riot" to "Element". Contributed by @maxkratz. ([\#10973](https://github.com/matrix-org/synapse/issues/10973)) +- Add additional content to the Welcome and Overview page of the documentation. ([\#10990](https://github.com/matrix-org/synapse/issues/10990)) +- Update links to MSCs in documentation. Contributed by @dklimpel. ([\#10991](https://github.com/matrix-org/synapse/issues/10991)) + + +Internal Changes +---------------- + +- Improve type hinting in `synapse.util`. ([\#10888](https://github.com/matrix-org/synapse/issues/10888)) +- Add further type hints to `synapse.storage.util`. ([\#10892](https://github.com/matrix-org/synapse/issues/10892)) +- Fix type hints to be compatible with an upcoming change to Twisted. ([\#10895](https://github.com/matrix-org/synapse/issues/10895)) +- Update utility code to handle C implementations of frozendict. ([\#10902](https://github.com/matrix-org/synapse/issues/10902)) +- Drop old functionality which maintained database compatibility with Synapse versions before 1.31. ([\#10903](https://github.com/matrix-org/synapse/issues/10903)) +- Clean-up configuration helper classes for the `ServerConfig` class. ([\#10915](https://github.com/matrix-org/synapse/issues/10915)) +- Use direct references to config flags. ([\#10916](https://github.com/matrix-org/synapse/issues/10916), [\#10959](https://github.com/matrix-org/synapse/issues/10959), [\#10985](https://github.com/matrix-org/synapse/issues/10985)) +- Clean up some of the federation event authentication code for clarity. ([\#10926](https://github.com/matrix-org/synapse/issues/10926), [\#10940](https://github.com/matrix-org/synapse/issues/10940), [\#10986](https://github.com/matrix-org/synapse/issues/10986), [\#10987](https://github.com/matrix-org/synapse/issues/10987), [\#10988](https://github.com/matrix-org/synapse/issues/10988), [\#11010](https://github.com/matrix-org/synapse/issues/11010), [\#11011](https://github.com/matrix-org/synapse/issues/11011)) +- Refactor various parts of the codebase to use `RoomVersion` objects instead of room version identifier strings. ([\#10934](https://github.com/matrix-org/synapse/issues/10934)) +- Refactor user directory tests in preparation for upcoming changes. ([\#10935](https://github.com/matrix-org/synapse/issues/10935)) +- Include the event id in the logcontext when handling PDUs received over federation. ([\#10936](https://github.com/matrix-org/synapse/issues/10936)) +- Fix logged errors in unit tests. ([\#10939](https://github.com/matrix-org/synapse/issues/10939)) +- Fix a broken test to ensure that consent configuration works during registration. ([\#10945](https://github.com/matrix-org/synapse/issues/10945)) +- Add type hints to filtering classes. ([\#10958](https://github.com/matrix-org/synapse/issues/10958)) +- Add type-hint to `HomeserverTestcase.setup_test_homeserver`. ([\#10961](https://github.com/matrix-org/synapse/issues/10961)) +- Fix the test utility function `create_room_as` so that `is_public=True` will explicitly set the `visibility` parameter of room creation requests to `public`. Contributed by @AndrewFerr. ([\#10963](https://github.com/matrix-org/synapse/issues/10963)) +- Make the release script more robust and transparent. ([\#10966](https://github.com/matrix-org/synapse/issues/10966)) +- Refactor [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` mega function into smaller handler functions. ([\#10974](https://github.com/matrix-org/synapse/issues/10974)) +- Log stack traces when a missing opentracing span is detected. ([\#10983](https://github.com/matrix-org/synapse/issues/10983)) +- Update GHA config to run tests against Python 3.10 and PostgreSQL 14. ([\#10992](https://github.com/matrix-org/synapse/issues/10992)) +- Fix a long-standing bug where `ReadWriteLock`s could drop logging contexts on exit. ([\#10993](https://github.com/matrix-org/synapse/issues/10993)) +- Add a `CODEOWNERS` file to automatically request reviews from the `@matrix-org/synapse-core` team on new pull requests. ([\#10994](https://github.com/matrix-org/synapse/issues/10994)) +- Add further type hints to `synapse.state`. ([\#11004](https://github.com/matrix-org/synapse/issues/11004)) +- Remove the deprecated `BaseHandler` object. ([\#11005](https://github.com/matrix-org/synapse/issues/11005)) +- Bump mypy version for CI to 0.910, and pull in new type stubs for dependencies. ([\#11006](https://github.com/matrix-org/synapse/issues/11006)) +- Fix CI to run the unit tests without optional deps. ([\#11017](https://github.com/matrix-org/synapse/issues/11017)) +- Ensure that cache config tests do not share state. ([\#11019](https://github.com/matrix-org/synapse/issues/11019)) +- Add additional type hints to `synapse.server_notices`. ([\#11021](https://github.com/matrix-org/synapse/issues/11021)) +- Add additional type hints for `synapse.push`. ([\#11023](https://github.com/matrix-org/synapse/issues/11023)) +- When installing the optional developer dependencies, also include the dependencies needed for type-checking and unit testing. ([\#11034](https://github.com/matrix-org/synapse/issues/11034)) +- Remove unnecessary list comprehension from `synapse_port_db` to satisfy code style requirements. ([\#11043](https://github.com/matrix-org/synapse/issues/11043)) + + Synapse 1.44.0 (2021-10-05) =========================== diff --git a/changelog.d/10822.feature b/changelog.d/10822.feature deleted file mode 100644 index 72566e31ec9e..000000000000 --- a/changelog.d/10822.feature +++ /dev/null @@ -1 +0,0 @@ -Support autodiscovery of oEmbed previews. diff --git a/changelog.d/10877.feature b/changelog.d/10877.feature deleted file mode 100644 index 06a246c108a7..000000000000 --- a/changelog.d/10877.feature +++ /dev/null @@ -1 +0,0 @@ -Ensure `(room_id, next_batch_id)` is unique across [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) insertion events in rooms to avoid cross-talk/conflicts between batches. diff --git a/changelog.d/10888.misc b/changelog.d/10888.misc deleted file mode 100644 index d9c991788125..000000000000 --- a/changelog.d/10888.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hinting in `synapse.util`. \ No newline at end of file diff --git a/changelog.d/10892.misc b/changelog.d/10892.misc deleted file mode 100644 index c8c471159b19..000000000000 --- a/changelog.d/10892.misc +++ /dev/null @@ -1 +0,0 @@ -Add further type hints to `synapse.storage.util`. diff --git a/changelog.d/10894.feature b/changelog.d/10894.feature deleted file mode 100644 index a4f968bed100..000000000000 --- a/changelog.d/10894.feature +++ /dev/null @@ -1 +0,0 @@ -Add a `user_may_send_3pid_invite` spam checker callback for modules to allow or deny 3PID invites. diff --git a/changelog.d/10895.misc b/changelog.d/10895.misc deleted file mode 100644 index d1c822498016..000000000000 --- a/changelog.d/10895.misc +++ /dev/null @@ -1 +0,0 @@ -Fix type hints to be compatible with an upcoming change to Twisted. \ No newline at end of file diff --git a/changelog.d/10902.misc b/changelog.d/10902.misc deleted file mode 100644 index 2cd79887f6f7..000000000000 --- a/changelog.d/10902.misc +++ /dev/null @@ -1 +0,0 @@ -Update utility code to handle C implementations of frozendict. \ No newline at end of file diff --git a/changelog.d/10903.misc b/changelog.d/10903.misc deleted file mode 100644 index 2716ccb08c4f..000000000000 --- a/changelog.d/10903.misc +++ /dev/null @@ -1 +0,0 @@ -Drop old functionality which maintained database compatibility with Synapse versions before 1.31. diff --git a/changelog.d/10910.feature b/changelog.d/10910.feature deleted file mode 100644 index aee139f8b6f1..000000000000 --- a/changelog.d/10910.feature +++ /dev/null @@ -1 +0,0 @@ -Add a spam checker callback to allow or deny room joins. diff --git a/changelog.d/10915.misc b/changelog.d/10915.misc deleted file mode 100644 index 1ce2910ffa66..000000000000 --- a/changelog.d/10915.misc +++ /dev/null @@ -1 +0,0 @@ -Clean-up configuration helper classes for the `ServerConfig` class. diff --git a/changelog.d/10916.misc b/changelog.d/10916.misc deleted file mode 100644 index 586a0b3a9670..000000000000 --- a/changelog.d/10916.misc +++ /dev/null @@ -1 +0,0 @@ -Use direct references to config flags. diff --git a/changelog.d/10922.bugfix b/changelog.d/10922.bugfix deleted file mode 100644 index b7315514e0e7..000000000000 --- a/changelog.d/10922.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a minor bug in the response to `/_matrix/client/r0/voip/turnServer`. Contributed by @lukaslihotzki. diff --git a/changelog.d/10924.bugfix b/changelog.d/10924.bugfix deleted file mode 100644 index c73a51e32fe2..000000000000 --- a/changelog.d/10924.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug where empty `yyyy-mm-dd/` directories would be left behind in the media store's `url_cache_thumbnails/` directory. diff --git a/changelog.d/10926.misc b/changelog.d/10926.misc deleted file mode 100644 index 9a765435dbe4..000000000000 --- a/changelog.d/10926.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up some of the federation event authentication code for clarity. diff --git a/changelog.d/10927.bugfix b/changelog.d/10927.bugfix deleted file mode 100644 index fd24288c5499..000000000000 --- a/changelog.d/10927.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse v1.40.0 where the signature checks for room version 8/9 could be applied to earlier room versions in some situations. diff --git a/changelog.d/10934.misc b/changelog.d/10934.misc deleted file mode 100644 index 56c640ec9e91..000000000000 --- a/changelog.d/10934.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor various parts of the codebase to use `RoomVersion` objects instead of room version identifier strings. diff --git a/changelog.d/10935.misc b/changelog.d/10935.misc deleted file mode 100644 index 80529c04cae2..000000000000 --- a/changelog.d/10935.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor user directory tests in preparation for upcoming changes. diff --git a/changelog.d/10936.misc b/changelog.d/10936.misc deleted file mode 100644 index 9d1d6e5b02ef..000000000000 --- a/changelog.d/10936.misc +++ /dev/null @@ -1 +0,0 @@ -Include the event id in the logcontext when handling PDUs received over federation. diff --git a/changelog.d/10939.misc b/changelog.d/10939.misc deleted file mode 100644 index a7cecf8a5b61..000000000000 --- a/changelog.d/10939.misc +++ /dev/null @@ -1 +0,0 @@ -Fix logged errors in unit tests. diff --git a/changelog.d/10940.misc b/changelog.d/10940.misc deleted file mode 100644 index 9a765435dbe4..000000000000 --- a/changelog.d/10940.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up some of the federation event authentication code for clarity. diff --git a/changelog.d/10945.misc b/changelog.d/10945.misc deleted file mode 100644 index 7cf1f02ad612..000000000000 --- a/changelog.d/10945.misc +++ /dev/null @@ -1 +0,0 @@ -Fix a broken test to ensure that consent configuration works during registration. diff --git a/changelog.d/10947.bugfix b/changelog.d/10947.bugfix deleted file mode 100644 index 40c70d3ece9f..000000000000 --- a/changelog.d/10947.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixes a long-standing bug wherin deactivated users still count towards the mau limit. \ No newline at end of file diff --git a/changelog.d/10954.feature b/changelog.d/10954.feature deleted file mode 100644 index 94dfa7175c31..000000000000 --- a/changelog.d/10954.feature +++ /dev/null @@ -1 +0,0 @@ -Include an `update_synapse_database` script in the distribution. Contributed by @Fizzadar at Beeper. diff --git a/changelog.d/10956.bugfix b/changelog.d/10956.bugfix deleted file mode 100644 index 13b8e5983b73..000000000000 --- a/changelog.d/10956.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug which meant that events received over federation were sometimes incorrectly accepted into the room state. diff --git a/changelog.d/10958.misc b/changelog.d/10958.misc deleted file mode 100644 index 409ecc35cbce..000000000000 --- a/changelog.d/10958.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to filtering classes. diff --git a/changelog.d/10959.misc b/changelog.d/10959.misc deleted file mode 100644 index 586a0b3a9670..000000000000 --- a/changelog.d/10959.misc +++ /dev/null @@ -1 +0,0 @@ -Use direct references to config flags. diff --git a/changelog.d/10960.bugfix b/changelog.d/10960.bugfix deleted file mode 100644 index b4f1c228ea0e..000000000000 --- a/changelog.d/10960.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where rebuilding the user directory wouldn't exclude support and disabled users. \ No newline at end of file diff --git a/changelog.d/10961.misc b/changelog.d/10961.misc deleted file mode 100644 index 0e35813488dc..000000000000 --- a/changelog.d/10961.misc +++ /dev/null @@ -1 +0,0 @@ -Add type-hint to `HomeserverTestcase.setup_test_homeserver`. \ No newline at end of file diff --git a/changelog.d/10962.bugfix b/changelog.d/10962.bugfix deleted file mode 100644 index 9b0760d7315f..000000000000 --- a/changelog.d/10962.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` endpoint rejecting subsequent batches with unknown batch ID error in existing room versions from the room creator. diff --git a/changelog.d/10963.misc b/changelog.d/10963.misc deleted file mode 100644 index daf40155de56..000000000000 --- a/changelog.d/10963.misc +++ /dev/null @@ -1 +0,0 @@ -Fix the test utility function `create_room_as` so that `is_public=True` will explicitly set the `visibility` parameter of room creation requests to `public`. Contributed by @AndrewFerr. diff --git a/changelog.d/10966.misc b/changelog.d/10966.misc deleted file mode 100644 index 095b9d56897c..000000000000 --- a/changelog.d/10966.misc +++ /dev/null @@ -1 +0,0 @@ -Make the release script more robust and transparent. diff --git a/changelog.d/10971.doc b/changelog.d/10971.doc deleted file mode 100644 index cc6cfe416454..000000000000 --- a/changelog.d/10971.doc +++ /dev/null @@ -1 +0,0 @@ -Change wording ("reference homeserver") in Synapse repository documentation. Contributed by @maxkratz. diff --git a/changelog.d/10973.doc b/changelog.d/10973.doc deleted file mode 100644 index d7429a9da6db..000000000000 --- a/changelog.d/10973.doc +++ /dev/null @@ -1 +0,0 @@ -Fix a dead URL in development documentation (SAML) and change wording from "Riot" to "Element". Contributed by @maxkratz. diff --git a/changelog.d/10974.misc b/changelog.d/10974.misc deleted file mode 100644 index 8695b378aabb..000000000000 --- a/changelog.d/10974.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` mega function into smaller handler functions. diff --git a/changelog.d/10981.bugfix b/changelog.d/10981.bugfix deleted file mode 100644 index d7bf66034882..000000000000 --- a/changelog.d/10981.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug that could leak local users' per-room nicknames and avatars when the user directory is rebuilt. \ No newline at end of file diff --git a/changelog.d/10982.bugfix b/changelog.d/10982.bugfix deleted file mode 100644 index 5c9e15eeaa42..000000000000 --- a/changelog.d/10982.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where the remainder of a batch of user directory changes would be silently dropped if the server left a room early in the batch. \ No newline at end of file diff --git a/changelog.d/10983.misc b/changelog.d/10983.misc deleted file mode 100644 index 235899d14f4a..000000000000 --- a/changelog.d/10983.misc +++ /dev/null @@ -1 +0,0 @@ -Log stack traces when a missing opentracing span is detected. diff --git a/changelog.d/10985.misc b/changelog.d/10985.misc deleted file mode 100644 index 586a0b3a9670..000000000000 --- a/changelog.d/10985.misc +++ /dev/null @@ -1 +0,0 @@ -Use direct references to config flags. diff --git a/changelog.d/10986.misc b/changelog.d/10986.misc deleted file mode 100644 index 9a765435dbe4..000000000000 --- a/changelog.d/10986.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up some of the federation event authentication code for clarity. diff --git a/changelog.d/10987.misc b/changelog.d/10987.misc deleted file mode 100644 index 9a765435dbe4..000000000000 --- a/changelog.d/10987.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up some of the federation event authentication code for clarity. diff --git a/changelog.d/10988.misc b/changelog.d/10988.misc deleted file mode 100644 index 9a765435dbe4..000000000000 --- a/changelog.d/10988.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up some of the federation event authentication code for clarity. diff --git a/changelog.d/10990.doc b/changelog.d/10990.doc deleted file mode 100644 index 51290d620098..000000000000 --- a/changelog.d/10990.doc +++ /dev/null @@ -1 +0,0 @@ -Add additional content to the Welcome and Overview page of the documentation. diff --git a/changelog.d/10991.doc b/changelog.d/10991.doc deleted file mode 100644 index 2f9bb24ca726..000000000000 --- a/changelog.d/10991.doc +++ /dev/null @@ -1 +0,0 @@ -Update links to MSCs in documentation. Contributed by @dklimpel. \ No newline at end of file diff --git a/changelog.d/10992.misc b/changelog.d/10992.misc deleted file mode 100644 index 60432a559c80..000000000000 --- a/changelog.d/10992.misc +++ /dev/null @@ -1 +0,0 @@ -Update GHA config to run tests against Python 3.10 and PostgreSQL 14. diff --git a/changelog.d/10993.misc b/changelog.d/10993.misc deleted file mode 100644 index 23c73dbac5c1..000000000000 --- a/changelog.d/10993.misc +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where `ReadWriteLock`s could drop logging contexts on exit. diff --git a/changelog.d/10994.misc b/changelog.d/10994.misc deleted file mode 100644 index 0a8538b01e18..000000000000 --- a/changelog.d/10994.misc +++ /dev/null @@ -1 +0,0 @@ -Add a `CODEOWNERS` file to automatically request reviews from the `@matrix-org/synapse-core` team on new pull requests. diff --git a/changelog.d/10995.bugfix b/changelog.d/10995.bugfix deleted file mode 100644 index 3eef96f3db72..000000000000 --- a/changelog.d/10995.bugfix +++ /dev/null @@ -1 +0,0 @@ -Correct a bugfix introduced in Synapse v1.44.0 that wouldn't catch every error of the connection breaks before a response could be written to it. diff --git a/changelog.d/11002.bugfix b/changelog.d/11002.bugfix deleted file mode 100644 index cf894a6314b4..000000000000 --- a/changelog.d/11002.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where local users' per-room nicknames/avatars were visible to anyone who could see you in the user_directory. diff --git a/changelog.d/11003.bugfix b/changelog.d/11003.bugfix deleted file mode 100644 index 0786f1b886ac..000000000000 --- a/changelog.d/11003.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where a user's per-room nickname/avatar would overwrite their profile in the user directory when a room was made public. \ No newline at end of file diff --git a/changelog.d/11004.misc b/changelog.d/11004.misc deleted file mode 100644 index 821033710a3a..000000000000 --- a/changelog.d/11004.misc +++ /dev/null @@ -1 +0,0 @@ -Add further type hints to `synapse.state`. \ No newline at end of file diff --git a/changelog.d/11005.misc b/changelog.d/11005.misc deleted file mode 100644 index a893591971a3..000000000000 --- a/changelog.d/11005.misc +++ /dev/null @@ -1 +0,0 @@ -Remove the deprecated `BaseHandler` object. diff --git a/changelog.d/11006.misc b/changelog.d/11006.misc deleted file mode 100644 index 7b4abae76a63..000000000000 --- a/changelog.d/11006.misc +++ /dev/null @@ -1 +0,0 @@ -Bump mypy version for CI to 0.910, and pull in new type stubs for dependencies. \ No newline at end of file diff --git a/changelog.d/11010.misc b/changelog.d/11010.misc deleted file mode 100644 index 9a765435dbe4..000000000000 --- a/changelog.d/11010.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up some of the federation event authentication code for clarity. diff --git a/changelog.d/11011.misc b/changelog.d/11011.misc deleted file mode 100644 index 9a765435dbe4..000000000000 --- a/changelog.d/11011.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up some of the federation event authentication code for clarity. diff --git a/changelog.d/11017.misc b/changelog.d/11017.misc deleted file mode 100644 index f05530ac94c7..000000000000 --- a/changelog.d/11017.misc +++ /dev/null @@ -1 +0,0 @@ -Fix CI to run the unit tests without optional deps. diff --git a/changelog.d/11019.misc b/changelog.d/11019.misc deleted file mode 100644 index aae5ee62b2e8..000000000000 --- a/changelog.d/11019.misc +++ /dev/null @@ -1 +0,0 @@ -Ensure that cache config tests do not share state. diff --git a/changelog.d/11021.misc b/changelog.d/11021.misc deleted file mode 100644 index 8ac1bfcf226b..000000000000 --- a/changelog.d/11021.misc +++ /dev/null @@ -1 +0,0 @@ -Add additional type hints to `synapse.server_notices`. \ No newline at end of file diff --git a/changelog.d/11023.misc b/changelog.d/11023.misc deleted file mode 100644 index ecc0467529be..000000000000 --- a/changelog.d/11023.misc +++ /dev/null @@ -1 +0,0 @@ -Add additional type hints for `synapse.push`. \ No newline at end of file diff --git a/changelog.d/11028.feature b/changelog.d/11028.feature deleted file mode 100644 index 48798356b7d1..000000000000 --- a/changelog.d/11028.feature +++ /dev/null @@ -1 +0,0 @@ -Include exception information in JSON logging output. Contributed by @Fizzadar at Beeper. diff --git a/changelog.d/11034.misc b/changelog.d/11034.misc deleted file mode 100644 index b15fd66ac360..000000000000 --- a/changelog.d/11034.misc +++ /dev/null @@ -1 +0,0 @@ -When installing the optional developer dependencies, also include the dependencies needed for type-checking and unit testing. diff --git a/changelog.d/11042.bugfix b/changelog.d/11042.bugfix deleted file mode 100644 index 536c47417d49..000000000000 --- a/changelog.d/11042.bugfix +++ /dev/null @@ -1 +0,0 @@ -Work around a regression, introduced in Synapse 1.39.0, that caused `SynapseError`s raised by the experimental third-party rules module callback `check_event_allowed` to be ignored. diff --git a/changelog.d/11043.misc b/changelog.d/11043.misc deleted file mode 100644 index c5f127bb4646..000000000000 --- a/changelog.d/11043.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unnecessary list comprehension from `synapse_port_db` to satisfy code style requirements. diff --git a/changelog.d/9655.feature b/changelog.d/9655.feature deleted file mode 100644 index 70cac230d848..000000000000 --- a/changelog.d/9655.feature +++ /dev/null @@ -1 +0,0 @@ -Add [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069) support to `/account/whoami`. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 8e80c78ee7a0..0d5db739e7e8 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,9 +1,12 @@ -matrix-synapse-py3 (1.44.0~rc2+nmu1) UNRELEASED; urgency=medium +matrix-synapse-py3 (1.45.0~rc1) stable; urgency=medium [ Nick @ Beeper ] * Include an `update_synapse_database` script in the distribution. - -- root Mon, 04 Oct 2021 13:29:26 +0000 + [ Synapse Packaging team ] + * New synapse release 1.45.0~rc1. + + -- Synapse Packaging team Tue, 12 Oct 2021 10:46:27 +0100 matrix-synapse-py3 (1.44.0) stable; urgency=medium diff --git a/synapse/__init__.py b/synapse/__init__.py index b8979c365ee7..6b109ccffa9f 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.44.0" +__version__ = "1.45.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 60af28c5dd803ac4ad1aa216574cac33b6daed6a Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 12 Oct 2021 10:55:39 +0100 Subject: [PATCH 094/111] Fixup changelog --- CHANGES.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 9b3999896710..592486321703 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -18,17 +18,17 @@ Bugfixes - Fix a minor bug in the response to `/_matrix/client/r0/voip/turnServer`. Contributed by @lukaslihotzki. ([\#10922](https://github.com/matrix-org/synapse/issues/10922)) - Fix a bug where empty `yyyy-mm-dd/` directories would be left behind in the media store's `url_cache_thumbnails/` directory. ([\#10924](https://github.com/matrix-org/synapse/issues/10924)) -- Fix a bug introduced in Synapse v1.40.0 where the signature checks for room version 8/9 could be applied to earlier room versions in some situations. ([\#10927](https://github.com/matrix-org/synapse/issues/10927)) -- Fixes a long-standing bug wherin deactivated users still count towards the mau limit. ([\#10947](https://github.com/matrix-org/synapse/issues/10947)) +- Fix a bug introduced in Synapse v1.40.0 where the signature checks for room version 8 and 9 could be applied to earlier room versions in some situations. ([\#10927](https://github.com/matrix-org/synapse/issues/10927)) +- Fixes a long-standing bug wherein deactivated users still count towards the mau limit. ([\#10947](https://github.com/matrix-org/synapse/issues/10947)) - Fix a long-standing bug which meant that events received over federation were sometimes incorrectly accepted into the room state. ([\#10956](https://github.com/matrix-org/synapse/issues/10956)) -- Fix a long-standing bug where rebuilding the user directory wouldn't exclude support and disabled users. ([\#10960](https://github.com/matrix-org/synapse/issues/10960)) +- Fix a long-standing bug where rebuilding the user directory wouldn't exclude support and deactivated users. ([\#10960](https://github.com/matrix-org/synapse/issues/10960)) - Fix [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` endpoint rejecting subsequent batches with unknown batch ID error in existing room versions from the room creator. ([\#10962](https://github.com/matrix-org/synapse/issues/10962)) - Fix a bug that could leak local users' per-room nicknames and avatars when the user directory is rebuilt. ([\#10981](https://github.com/matrix-org/synapse/issues/10981)) - Fix a long-standing bug where the remainder of a batch of user directory changes would be silently dropped if the server left a room early in the batch. ([\#10982](https://github.com/matrix-org/synapse/issues/10982)) -- Correct a bugfix introduced in Synapse v1.44.0 that wouldn't catch every error of the connection breaks before a response could be written to it. ([\#10995](https://github.com/matrix-org/synapse/issues/10995)) -- Fix a long-standing bug where local users' per-room nicknames/avatars were visible to anyone who could see you in the user_directory. ([\#11002](https://github.com/matrix-org/synapse/issues/11002)) +- Correct a bugfix introduced in Synapse v1.44.0 that would catch the wrong error if a connection is lost before a response could be written to it. ([\#10995](https://github.com/matrix-org/synapse/issues/10995)) +- Fix a long-standing bug where local users' per-room nicknames/avatars were visible to anyone who could see you in the user directory. ([\#11002](https://github.com/matrix-org/synapse/issues/11002)) - Fix a long-standing bug where a user's per-room nickname/avatar would overwrite their profile in the user directory when a room was made public. ([\#11003](https://github.com/matrix-org/synapse/issues/11003)) -- Work around a regression, introduced in Synapse 1.39.0, that caused `SynapseError`s raised by the experimental third-party rules module callback `check_event_allowed` to be ignored. ([\#11042](https://github.com/matrix-org/synapse/issues/11042)) +- Work around a regression, introduced in Synapse v1.39.0, that caused `SynapseError`s raised by the experimental third-party rules module callback `check_event_allowed` to be ignored. ([\#11042](https://github.com/matrix-org/synapse/issues/11042)) Improved Documentation @@ -47,7 +47,7 @@ Internal Changes - Add further type hints to `synapse.storage.util`. ([\#10892](https://github.com/matrix-org/synapse/issues/10892)) - Fix type hints to be compatible with an upcoming change to Twisted. ([\#10895](https://github.com/matrix-org/synapse/issues/10895)) - Update utility code to handle C implementations of frozendict. ([\#10902](https://github.com/matrix-org/synapse/issues/10902)) -- Drop old functionality which maintained database compatibility with Synapse versions before 1.31. ([\#10903](https://github.com/matrix-org/synapse/issues/10903)) +- Drop old functionality which maintained database compatibility with Synapse versions before v1.31. ([\#10903](https://github.com/matrix-org/synapse/issues/10903)) - Clean-up configuration helper classes for the `ServerConfig` class. ([\#10915](https://github.com/matrix-org/synapse/issues/10915)) - Use direct references to config flags. ([\#10916](https://github.com/matrix-org/synapse/issues/10916), [\#10959](https://github.com/matrix-org/synapse/issues/10959), [\#10985](https://github.com/matrix-org/synapse/issues/10985)) - Clean up some of the federation event authentication code for clarity. ([\#10926](https://github.com/matrix-org/synapse/issues/10926), [\#10940](https://github.com/matrix-org/synapse/issues/10940), [\#10986](https://github.com/matrix-org/synapse/issues/10986), [\#10987](https://github.com/matrix-org/synapse/issues/10987), [\#10988](https://github.com/matrix-org/synapse/issues/10988), [\#11010](https://github.com/matrix-org/synapse/issues/11010), [\#11011](https://github.com/matrix-org/synapse/issues/11011)) From b01e953291d9de5a5d81718c686b7c1ed83146be Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 12 Oct 2021 10:58:26 +0100 Subject: [PATCH 095/111] Add warning about known issues --- CHANGES.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 592486321703..068e4e183ee0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,8 @@ Synapse 1.45.0rc1 (2021-10-12) ============================== +**Note:** We are aware of [a performance issue](https://github.com/matrix-org/synapse/issues/11049) introduced in Synapse v1.44.0, as well as [a bug](https://github.com/matrix-org/synapse/issues/11025) with the user directory when using application services. While this release candidate doesn't fix either of those issues, a second release candidate is expected to come out in a few days to address them. + Features -------- From 9e13cd98af154c7a67220cd13e5153025e94dc5b Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 12 Oct 2021 11:23:51 +0100 Subject: [PATCH 096/111] Update upgrade notes --- docs/upgrade.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/upgrade.md b/docs/upgrade.md index a8221372df50..18ecb2678ee6 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -85,6 +85,15 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.45.0 + +## Changes required to media storage provider modules when reading from the Synapse configuration object + +Media storage provider modules that read from the Synapse configuration object (i.e. that +read the value of `hs.config.[...]`) now need to specify the configuration section they're +reading from. This means that if a module reads the value of e.g. `hs.config.media_store_path`, +it needs to replace it with `hs.config.media.media_store_path`. + # Upgrading to v1.44.0 ## The URL preview cache is no longer mirrored to storage providers From f6b62bdc4d1c86f78dd08052582c0e8a878534eb Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 12 Oct 2021 11:36:27 +0100 Subject: [PATCH 097/111] Add a link to the upgrade notes --- CHANGES.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 068e4e183ee0..54f79fb55ff3 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -3,6 +3,8 @@ Synapse 1.45.0rc1 (2021-10-12) **Note:** We are aware of [a performance issue](https://github.com/matrix-org/synapse/issues/11049) introduced in Synapse v1.44.0, as well as [a bug](https://github.com/matrix-org/synapse/issues/11025) with the user directory when using application services. While this release candidate doesn't fix either of those issues, a second release candidate is expected to come out in a few days to address them. +Media storage providers module that read from Synapse's configuration need changes as of this version, see the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#upgrading-to-v1450) for more informations. + Features -------- From 8afa48f7f6b7cb243df80feeaaa9fc651c6dc3aa Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 12 Oct 2021 11:38:33 +0100 Subject: [PATCH 098/111] Typo --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 54f79fb55ff3..a7986ae82a71 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -3,7 +3,7 @@ Synapse 1.45.0rc1 (2021-10-12) **Note:** We are aware of [a performance issue](https://github.com/matrix-org/synapse/issues/11049) introduced in Synapse v1.44.0, as well as [a bug](https://github.com/matrix-org/synapse/issues/11025) with the user directory when using application services. While this release candidate doesn't fix either of those issues, a second release candidate is expected to come out in a few days to address them. -Media storage providers module that read from Synapse's configuration need changes as of this version, see the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#upgrading-to-v1450) for more informations. +Media storage providers module that read from Synapse's configuration need changes as of this version, see the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#upgrading-to-v1450) for more information. Features -------- From a5871f53ed6725464d09306f6ee115410c286e43 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 12 Oct 2021 11:43:13 +0100 Subject: [PATCH 099/111] Fixup changelog --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index a7986ae82a71..5acc8b537e05 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -10,7 +10,6 @@ Features - Add [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069) support to `/account/whoami`. ([\#9655](https://github.com/matrix-org/synapse/issues/9655)) - Support autodiscovery of oEmbed previews. ([\#10822](https://github.com/matrix-org/synapse/issues/10822)) -- Ensure `(room_id, next_batch_id)` is unique across [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) insertion events in rooms to avoid cross-talk/conflicts between batches. ([\#10877](https://github.com/matrix-org/synapse/issues/10877)) - Add a `user_may_send_3pid_invite` spam checker callback for modules to allow or deny 3PID invites. ([\#10894](https://github.com/matrix-org/synapse/issues/10894)) - Add a spam checker callback to allow or deny room joins. ([\#10910](https://github.com/matrix-org/synapse/issues/10910)) - Include an `update_synapse_database` script in the distribution. Contributed by @Fizzadar at Beeper. ([\#10954](https://github.com/matrix-org/synapse/issues/10954)) @@ -23,7 +22,7 @@ Bugfixes - Fix a minor bug in the response to `/_matrix/client/r0/voip/turnServer`. Contributed by @lukaslihotzki. ([\#10922](https://github.com/matrix-org/synapse/issues/10922)) - Fix a bug where empty `yyyy-mm-dd/` directories would be left behind in the media store's `url_cache_thumbnails/` directory. ([\#10924](https://github.com/matrix-org/synapse/issues/10924)) - Fix a bug introduced in Synapse v1.40.0 where the signature checks for room version 8 and 9 could be applied to earlier room versions in some situations. ([\#10927](https://github.com/matrix-org/synapse/issues/10927)) -- Fixes a long-standing bug wherein deactivated users still count towards the mau limit. ([\#10947](https://github.com/matrix-org/synapse/issues/10947)) +- Fix a long-standing bug wherein deactivated users still count towards the monthly active users limit. ([\#10947](https://github.com/matrix-org/synapse/issues/10947)) - Fix a long-standing bug which meant that events received over federation were sometimes incorrectly accepted into the room state. ([\#10956](https://github.com/matrix-org/synapse/issues/10956)) - Fix a long-standing bug where rebuilding the user directory wouldn't exclude support and deactivated users. ([\#10960](https://github.com/matrix-org/synapse/issues/10960)) - Fix [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` endpoint rejecting subsequent batches with unknown batch ID error in existing room versions from the room creator. ([\#10962](https://github.com/matrix-org/synapse/issues/10962)) @@ -33,6 +32,7 @@ Bugfixes - Fix a long-standing bug where local users' per-room nicknames/avatars were visible to anyone who could see you in the user directory. ([\#11002](https://github.com/matrix-org/synapse/issues/11002)) - Fix a long-standing bug where a user's per-room nickname/avatar would overwrite their profile in the user directory when a room was made public. ([\#11003](https://github.com/matrix-org/synapse/issues/11003)) - Work around a regression, introduced in Synapse v1.39.0, that caused `SynapseError`s raised by the experimental third-party rules module callback `check_event_allowed` to be ignored. ([\#11042](https://github.com/matrix-org/synapse/issues/11042)) +- Fix a bug in [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) insertion events in rooms that could cause cross-talk/conflicts between batches. ([\#10877](https://github.com/matrix-org/synapse/issues/10877)) Improved Documentation From 5c35074d859077f5ade846c450d19ea9dceb62f0 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 12 Oct 2021 08:55:33 -0400 Subject: [PATCH 100/111] Reset global cache state before cache tests. (#11036) This reverts #11019 and structures the code a bit more like it was before #10985. The global cache state must be reset before running the tests since other test cases might have configured caching (and thus touched the global state). --- changelog.d/11036.misc | 1 + tests/config/test_cache.py | 24 +++++++++++------------- 2 files changed, 12 insertions(+), 13 deletions(-) create mode 100644 changelog.d/11036.misc diff --git a/changelog.d/11036.misc b/changelog.d/11036.misc new file mode 100644 index 000000000000..aae5ee62b2e8 --- /dev/null +++ b/changelog.d/11036.misc @@ -0,0 +1 @@ +Ensure that cache config tests do not share state. diff --git a/tests/config/test_cache.py b/tests/config/test_cache.py index 79d417568d7c..4bb82e810e0c 100644 --- a/tests/config/test_cache.py +++ b/tests/config/test_cache.py @@ -12,25 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -from unittest.mock import patch - from synapse.config.cache import CacheConfig, add_resizable_cache from synapse.util.caches.lrucache import LruCache from tests.unittest import TestCase -# Patch the global _CACHES so that each test runs against its own state. -@patch("synapse.config.cache._CACHES", new_callable=dict) class CacheConfigTests(TestCase): def setUp(self): - # Reset caches before each test + # Reset caches before each test since there's global state involved. self.config = CacheConfig() + self.config.reset() def tearDown(self): + # Also reset the caches after each test to leave state pristine. self.config.reset() - def test_individual_caches_from_environ(self, _caches): + def test_individual_caches_from_environ(self): """ Individual cache factors will be loaded from the environment. """ @@ -43,7 +41,7 @@ def test_individual_caches_from_environ(self, _caches): self.assertEqual(dict(self.config.cache_factors), {"something_or_other": 2.0}) - def test_config_overrides_environ(self, _caches): + def test_config_overrides_environ(self): """ Individual cache factors defined in the environment will take precedence over those in the config. @@ -60,7 +58,7 @@ def test_config_overrides_environ(self, _caches): {"foo": 1.0, "bar": 3.0, "something_or_other": 2.0}, ) - def test_individual_instantiated_before_config_load(self, _caches): + def test_individual_instantiated_before_config_load(self): """ If a cache is instantiated before the config is read, it will be given the default cache size in the interim, and then resized once the config @@ -76,7 +74,7 @@ def test_individual_instantiated_before_config_load(self, _caches): self.assertEqual(cache.max_size, 300) - def test_individual_instantiated_after_config_load(self, _caches): + def test_individual_instantiated_after_config_load(self): """ If a cache is instantiated after the config is read, it will be immediately resized to the correct size given the per_cache_factor if @@ -89,7 +87,7 @@ def test_individual_instantiated_after_config_load(self, _caches): add_resizable_cache("foo", cache_resize_callback=cache.set_cache_factor) self.assertEqual(cache.max_size, 200) - def test_global_instantiated_before_config_load(self, _caches): + def test_global_instantiated_before_config_load(self): """ If a cache is instantiated before the config is read, it will be given the default cache size in the interim, and then resized to the new @@ -104,7 +102,7 @@ def test_global_instantiated_before_config_load(self, _caches): self.assertEqual(cache.max_size, 400) - def test_global_instantiated_after_config_load(self, _caches): + def test_global_instantiated_after_config_load(self): """ If a cache is instantiated after the config is read, it will be immediately resized to the correct size given the global factor if there @@ -117,7 +115,7 @@ def test_global_instantiated_after_config_load(self, _caches): add_resizable_cache("foo", cache_resize_callback=cache.set_cache_factor) self.assertEqual(cache.max_size, 150) - def test_cache_with_asterisk_in_name(self, _caches): + def test_cache_with_asterisk_in_name(self): """Some caches have asterisks in their name, test that they are set correctly.""" config = { @@ -143,7 +141,7 @@ def test_cache_with_asterisk_in_name(self, _caches): add_resizable_cache("*cache_c*", cache_resize_callback=cache_c.set_cache_factor) self.assertEqual(cache_c.max_size, 200) - def test_apply_cache_factor_from_config(self, _caches): + def test_apply_cache_factor_from_config(self): """Caches can disable applying cache factor updates, mainly used by event cache size. """ From 333d6f4e843c49002623417e6aa22da0521c4742 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 12 Oct 2021 14:27:09 +0100 Subject: [PATCH 101/111] Fix race in `MultiWriterIdGenerator` (#11045) The race allowed the current position to advance too far when stream IDs are still being persisted. This happened when it received a new stream ID from a remote write between a new stream ID being allocated and it being added to the set of unpersisted stream IDs. Fixes #9424. --- changelog.d/11045.bugfix | 1 + synapse/storage/util/id_generators.py | 82 ++++++++++++++++++++++----- 2 files changed, 68 insertions(+), 15 deletions(-) create mode 100644 changelog.d/11045.bugfix diff --git a/changelog.d/11045.bugfix b/changelog.d/11045.bugfix new file mode 100644 index 000000000000..d712dc946a56 --- /dev/null +++ b/changelog.d/11045.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug when using multiple event persister workers where events were not correctly sent down `/sync` due to a race. diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 852bd79fee85..670811611fcf 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -36,7 +36,7 @@ ) import attr -from sortedcontainers import SortedSet +from sortedcontainers import SortedList, SortedSet from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.database import ( @@ -265,6 +265,15 @@ def __init__( # should be less than the minimum of this set (if not empty). self._unfinished_ids: SortedSet[int] = SortedSet() + # We also need to track when we've requested some new stream IDs but + # they haven't yet been added to the `_unfinished_ids` set. Every time + # we request a new stream ID we add the current max stream ID to the + # list, and remove it once we've added the newly allocated IDs to the + # `_unfinished_ids` set. This means that we *may* be allocated stream + # IDs above those in the list, and so we can't advance the local current + # position beyond the minimum stream ID in this list. + self._in_flight_fetches: SortedList[int] = SortedList() + # Set of local IDs that we've processed that are larger than the current # position, due to there being smaller unpersisted IDs. self._finished_ids: Set[int] = set() @@ -290,6 +299,9 @@ def __init__( ) self._known_persisted_positions: List[int] = [] + # The maximum stream ID that we have seen been allocated across any writer. + self._max_seen_allocated_stream_id = 1 + self._sequence_gen = PostgresSequenceGenerator(sequence_name) # We check that the table and sequence haven't diverged. @@ -305,6 +317,10 @@ def __init__( # This goes and fills out the above state from the database. self._load_current_ids(db_conn, tables) + self._max_seen_allocated_stream_id = max( + self._current_positions.values(), default=1 + ) + def _load_current_ids( self, db_conn: LoggingDatabaseConnection, @@ -411,10 +427,32 @@ def _load_current_ids( cur.close() def _load_next_id_txn(self, txn: Cursor) -> int: - return self._sequence_gen.get_next_id_txn(txn) + stream_ids = self._load_next_mult_id_txn(txn, 1) + return stream_ids[0] def _load_next_mult_id_txn(self, txn: Cursor, n: int) -> List[int]: - return self._sequence_gen.get_next_mult_txn(txn, n) + # We need to track that we've requested some more stream IDs, and what + # the current max allocated stream ID is. This is to prevent a race + # where we've been allocated stream IDs but they have not yet been added + # to the `_unfinished_ids` set, allowing the current position to advance + # past them. + with self._lock: + current_max = self._max_seen_allocated_stream_id + self._in_flight_fetches.add(current_max) + + try: + stream_ids = self._sequence_gen.get_next_mult_txn(txn, n) + + with self._lock: + self._unfinished_ids.update(stream_ids) + self._max_seen_allocated_stream_id = max( + self._max_seen_allocated_stream_id, self._unfinished_ids[-1] + ) + finally: + with self._lock: + self._in_flight_fetches.remove(current_max) + + return stream_ids def get_next(self) -> AsyncContextManager[int]: """ @@ -463,9 +501,6 @@ def get_next_txn(self, txn: LoggingTransaction) -> int: next_id = self._load_next_id_txn(txn) - with self._lock: - self._unfinished_ids.add(next_id) - txn.call_after(self._mark_id_as_finished, next_id) txn.call_on_exception(self._mark_id_as_finished, next_id) @@ -497,15 +532,27 @@ def _mark_id_as_finished(self, next_id: int) -> None: new_cur: Optional[int] = None - if self._unfinished_ids: + if self._unfinished_ids or self._in_flight_fetches: # If there are unfinished IDs then the new position will be the - # largest finished ID less than the minimum unfinished ID. + # largest finished ID strictly less than the minimum unfinished + # ID. + + # The minimum unfinished ID needs to take account of both + # `_unfinished_ids` and `_in_flight_fetches`. + if self._unfinished_ids and self._in_flight_fetches: + # `_in_flight_fetches` stores the maximum safe stream ID, so + # we add one to make it equivalent to the minimum unsafe ID. + min_unfinished = min( + self._unfinished_ids[0], self._in_flight_fetches[0] + 1 + ) + elif self._in_flight_fetches: + min_unfinished = self._in_flight_fetches[0] + 1 + else: + min_unfinished = self._unfinished_ids[0] finished = set() - - min_unfinshed = self._unfinished_ids[0] for s in self._finished_ids: - if s < min_unfinshed: + if s < min_unfinished: if new_cur is None or new_cur < s: new_cur = s else: @@ -575,6 +622,10 @@ def advance(self, instance_name: str, new_id: int) -> None: new_id, self._current_positions.get(instance_name, 0) ) + self._max_seen_allocated_stream_id = max( + self._max_seen_allocated_stream_id, new_id + ) + self._add_persisted_position(new_id) def get_persisted_upto_position(self) -> int: @@ -605,7 +656,11 @@ def _add_persisted_position(self, new_id: int) -> None: # to report a recent position when asked, rather than a potentially old # one (if this instance hasn't written anything for a while). our_current_position = self._current_positions.get(self._instance_name) - if our_current_position and not self._unfinished_ids: + if ( + our_current_position + and not self._unfinished_ids + and not self._in_flight_fetches + ): self._current_positions[self._instance_name] = max( our_current_position, new_id ) @@ -697,9 +752,6 @@ async def __aenter__(self) -> Union[int, List[int]]: db_autocommit=True, ) - with self.id_gen._lock: - self.id_gen._unfinished_ids.update(self.stream_ids) - if self.multiple_ids is None: return self.stream_ids[0] * self.id_gen._return_factor else: From 1db9282dfa3976497d4208c1d84dd1796e529332 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 12 Oct 2021 13:15:42 -0400 Subject: [PATCH 102/111] Fix formatting string when oEmbed errors occur. (#11061) --- changelog.d/11061.bugfix | 1 + synapse/rest/media/v1/oembed.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11061.bugfix diff --git a/changelog.d/11061.bugfix b/changelog.d/11061.bugfix new file mode 100644 index 000000000000..26fb64379375 --- /dev/null +++ b/changelog.d/11061.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse v1.44.0 when logging errors during oEmbed processing. diff --git a/synapse/rest/media/v1/oembed.py b/synapse/rest/media/v1/oembed.py index 6d7e1f9064af..78b1603f19f5 100644 --- a/synapse/rest/media/v1/oembed.py +++ b/synapse/rest/media/v1/oembed.py @@ -191,7 +191,7 @@ def parse_oembed_response(self, url: str, raw_body: bytes) -> OEmbedResult: except Exception as e: # Trap any exception and let the code follow as usual. - logger.warning(f"Error parsing oEmbed metadata from {url}: {e:r}") + logger.warning("Error parsing oEmbed metadata from %s: %r", url, e) open_graph_response = {} cache_age = None From b83e82255630f2ba7ea959b68e5a82b4d938f000 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 13 Oct 2021 10:38:22 +0100 Subject: [PATCH 103/111] Stop user directory from failing if it encounters users not in the `users` table. (#11053) The following scenarios would halt the user directory updater: - user joins room - user leaves room - user present in room which switches from private to public, or vice versa. for two classes of users: - appservice senders - users missing from the user table. If this happened, the user directory would be stuck, unable to make forward progress. Exclude both cases from the user directory, so that we ignore them. Co-authored-by: Eric Eastwood Co-authored-by: reivilibre Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> Co-authored-by: Brendan Abolivier --- changelog.d/10825.misc | 1 + changelog.d/10970.misc | 1 + changelog.d/10996.misc | 1 + changelog.d/11053.bugfix | 2 + synapse/logging/opentracing.py | 8 + synapse/replication/http/_base.py | 154 +++--- synapse/storage/databases/main/client_ips.py | 13 +- .../storage/databases/main/user_directory.py | 24 +- synapse/storage/state.py | 172 +++++- tests/handlers/test_user_directory.py | 65 ++- tests/storage/test_client_ips.py | 43 ++ tests/storage/test_state.py | 513 +++++++++++++++++- tests/storage/test_user_directory.py | 17 +- 13 files changed, 921 insertions(+), 93 deletions(-) create mode 100644 changelog.d/10825.misc create mode 100644 changelog.d/10970.misc create mode 100644 changelog.d/10996.misc create mode 100644 changelog.d/11053.bugfix diff --git a/changelog.d/10825.misc b/changelog.d/10825.misc new file mode 100644 index 000000000000..f9786164d7ec --- /dev/null +++ b/changelog.d/10825.misc @@ -0,0 +1 @@ +Add an 'approximate difference' method to `StateFilter`. diff --git a/changelog.d/10970.misc b/changelog.d/10970.misc new file mode 100644 index 000000000000..bb75ea79a657 --- /dev/null +++ b/changelog.d/10970.misc @@ -0,0 +1 @@ +Fix inconsistent behavior of `get_last_client_by_ip` when reporting data that has not been stored in the database yet. diff --git a/changelog.d/10996.misc b/changelog.d/10996.misc new file mode 100644 index 000000000000..c830d7ec2cc7 --- /dev/null +++ b/changelog.d/10996.misc @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.21.0 that causes opentracing and Prometheus metrics for replication requests to be measured incorrectly. diff --git a/changelog.d/11053.bugfix b/changelog.d/11053.bugfix new file mode 100644 index 000000000000..a59cfac93157 --- /dev/null +++ b/changelog.d/11053.bugfix @@ -0,0 +1,2 @@ +Fix a bug introduced in Synapse 1.45.0rc1 where the user directory would stop updating if it processed an event from a +user not in the `users` table. diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 5276c4bfcce8..20d23a426064 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -807,6 +807,14 @@ def err_back(result): result.addCallbacks(call_back, err_back) else: + if inspect.isawaitable(result): + logger.error( + "@trace may not have wrapped %s correctly! " + "The function is not async but returned a %s.", + func.__qualname__, + type(result).__name__, + ) + scope.__exit__(None, None, None) return result diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index f1b78d09f9a2..e047ec74d85f 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -182,85 +182,87 @@ def make_client(cls, hs): ) @trace(opname="outgoing_replication_request") - @outgoing_gauge.track_inprogress() async def send_request(*, instance_name="master", **kwargs): - if instance_name == local_instance_name: - raise Exception("Trying to send HTTP request to self") - if instance_name == "master": - host = master_host - port = master_port - elif instance_name in instance_map: - host = instance_map[instance_name].host - port = instance_map[instance_name].port - else: - raise Exception( - "Instance %r not in 'instance_map' config" % (instance_name,) + with outgoing_gauge.track_inprogress(): + if instance_name == local_instance_name: + raise Exception("Trying to send HTTP request to self") + if instance_name == "master": + host = master_host + port = master_port + elif instance_name in instance_map: + host = instance_map[instance_name].host + port = instance_map[instance_name].port + else: + raise Exception( + "Instance %r not in 'instance_map' config" % (instance_name,) + ) + + data = await cls._serialize_payload(**kwargs) + + url_args = [ + urllib.parse.quote(kwargs[name], safe="") for name in cls.PATH_ARGS + ] + + if cls.CACHE: + txn_id = random_string(10) + url_args.append(txn_id) + + if cls.METHOD == "POST": + request_func = client.post_json_get_json + elif cls.METHOD == "PUT": + request_func = client.put_json + elif cls.METHOD == "GET": + request_func = client.get_json + else: + # We have already asserted in the constructor that a + # compatible was picked, but lets be paranoid. + raise Exception( + "Unknown METHOD on %s replication endpoint" % (cls.NAME,) + ) + + uri = "http://%s:%s/_synapse/replication/%s/%s" % ( + host, + port, + cls.NAME, + "/".join(url_args), ) - data = await cls._serialize_payload(**kwargs) - - url_args = [ - urllib.parse.quote(kwargs[name], safe="") for name in cls.PATH_ARGS - ] - - if cls.CACHE: - txn_id = random_string(10) - url_args.append(txn_id) - - if cls.METHOD == "POST": - request_func = client.post_json_get_json - elif cls.METHOD == "PUT": - request_func = client.put_json - elif cls.METHOD == "GET": - request_func = client.get_json - else: - # We have already asserted in the constructor that a - # compatible was picked, but lets be paranoid. - raise Exception( - "Unknown METHOD on %s replication endpoint" % (cls.NAME,) - ) - - uri = "http://%s:%s/_synapse/replication/%s/%s" % ( - host, - port, - cls.NAME, - "/".join(url_args), - ) - - try: - # We keep retrying the same request for timeouts. This is so that we - # have a good idea that the request has either succeeded or failed on - # the master, and so whether we should clean up or not. - while True: - headers: Dict[bytes, List[bytes]] = {} - # Add an authorization header, if configured. - if replication_secret: - headers[b"Authorization"] = [b"Bearer " + replication_secret] - opentracing.inject_header_dict(headers, check_destination=False) - try: - result = await request_func(uri, data, headers=headers) - break - except RequestTimedOutError: - if not cls.RETRY_ON_TIMEOUT: - raise - - logger.warning("%s request timed out; retrying", cls.NAME) - - # If we timed out we probably don't need to worry about backing - # off too much, but lets just wait a little anyway. - await clock.sleep(1) - except HttpResponseException as e: - # We convert to SynapseError as we know that it was a SynapseError - # on the main process that we should send to the client. (And - # importantly, not stack traces everywhere) - _outgoing_request_counter.labels(cls.NAME, e.code).inc() - raise e.to_synapse_error() - except Exception as e: - _outgoing_request_counter.labels(cls.NAME, "ERR").inc() - raise SynapseError(502, "Failed to talk to main process") from e - - _outgoing_request_counter.labels(cls.NAME, 200).inc() - return result + try: + # We keep retrying the same request for timeouts. This is so that we + # have a good idea that the request has either succeeded or failed + # on the master, and so whether we should clean up or not. + while True: + headers: Dict[bytes, List[bytes]] = {} + # Add an authorization header, if configured. + if replication_secret: + headers[b"Authorization"] = [ + b"Bearer " + replication_secret + ] + opentracing.inject_header_dict(headers, check_destination=False) + try: + result = await request_func(uri, data, headers=headers) + break + except RequestTimedOutError: + if not cls.RETRY_ON_TIMEOUT: + raise + + logger.warning("%s request timed out; retrying", cls.NAME) + + # If we timed out we probably don't need to worry about backing + # off too much, but lets just wait a little anyway. + await clock.sleep(1) + except HttpResponseException as e: + # We convert to SynapseError as we know that it was a SynapseError + # on the main process that we should send to the client. (And + # importantly, not stack traces everywhere) + _outgoing_request_counter.labels(cls.NAME, e.code).inc() + raise e.to_synapse_error() + except Exception as e: + _outgoing_request_counter.labels(cls.NAME, "ERR").inc() + raise SynapseError(502, "Failed to talk to main process") from e + + _outgoing_request_counter.labels(cls.NAME, 200).inc() + return result return send_request diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index c77acc7c84c5..6c1ef0904973 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -538,15 +538,20 @@ async def get_last_client_ip_by_device( """ ret = await super().get_last_client_ip_by_device(user_id, device_id) - # Update what is retrieved from the database with data which is pending insertion. + # Update what is retrieved from the database with data which is pending + # insertion, as if it has already been stored in the database. for key in self._batch_row_update: - uid, access_token, ip = key + uid, _access_token, ip = key if uid == user_id: user_agent, did, last_seen = self._batch_row_update[key] + + if did is None: + # These updates don't make it to the `devices` table + continue + if not device_id or did == device_id: - ret[(user_id, device_id)] = { + ret[(user_id, did)] = { "user_id": user_id, - "access_token": access_token, "ip": ip, "user_agent": user_agent, "device_id": did, diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 5c713a732ee9..e98a45b6af60 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -26,6 +26,8 @@ cast, ) +from synapse.api.errors import StoreError + if TYPE_CHECKING: from synapse.server import HomeServer @@ -383,7 +385,19 @@ async def should_include_local_user_in_dir(self, user: str) -> bool: """Certain classes of local user are omitted from the user directory. Is this user one of them? """ - # App service users aren't usually contactable, so exclude them. + # We're opting to exclude the appservice sender (user defined by the + # `sender_localpart` in the appservice registration) even though + # technically it could be DM-able. In the future, this could potentially + # be configurable per-appservice whether the appservice sender can be + # contacted. + if self.get_app_service_by_user_id(user) is not None: + return False + + # We're opting to exclude appservice users (anyone matching the user + # namespace regex in the appservice registration) even though technically + # they could be DM-able. In the future, this could potentially + # be configurable per-appservice whether the appservice users can be + # contacted. if self.get_if_app_services_interested_in_user(user): # TODO we might want to make this configurable for each app service return False @@ -393,8 +407,14 @@ async def should_include_local_user_in_dir(self, user: str) -> bool: return False # Deactivated users aren't contactable, so should not appear in the user directory. - if await self.get_user_deactivated_status(user): + try: + if await self.get_user_deactivated_status(user): + return False + except StoreError: + # No such user in the users table. No need to do this when calling + # is_support_user---that returns False if the user is missing. return False + return True async def is_room_world_readable_or_publicly_joinable(self, room_id: str) -> bool: diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 5e86befde430..b5ba1560d139 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -15,9 +15,11 @@ from typing import ( TYPE_CHECKING, Awaitable, + Collection, Dict, Iterable, List, + Mapping, Optional, Set, Tuple, @@ -29,7 +31,7 @@ from synapse.api.constants import EventTypes from synapse.events import EventBase -from synapse.types import MutableStateMap, StateMap +from synapse.types import MutableStateMap, StateKey, StateMap if TYPE_CHECKING: from typing import FrozenSet # noqa: used within quoted type hint; flake8 sad @@ -134,6 +136,23 @@ def from_lazy_load_member_list(members: Iterable[str]) -> "StateFilter": include_others=True, ) + @staticmethod + def freeze(types: Mapping[str, Optional[Collection[str]]], include_others: bool): + """ + Returns a (frozen) StateFilter with the same contents as the parameters + specified here, which can be made of mutable types. + """ + types_with_frozen_values: Dict[str, Optional[FrozenSet[str]]] = {} + for state_types, state_keys in types.items(): + if state_keys is not None: + types_with_frozen_values[state_types] = frozenset(state_keys) + else: + types_with_frozen_values[state_types] = None + + return StateFilter( + frozendict(types_with_frozen_values), include_others=include_others + ) + def return_expanded(self) -> "StateFilter": """Creates a new StateFilter where type wild cards have been removed (except for memberships). The returned filter is a superset of the @@ -356,6 +375,157 @@ def get_member_split(self) -> Tuple["StateFilter", "StateFilter"]: return member_filter, non_member_filter + def _decompose_into_four_parts( + self, + ) -> Tuple[Tuple[bool, Set[str]], Tuple[Set[str], Set[StateKey]]]: + """ + Decomposes this state filter into 4 constituent parts, which can be + thought of as this: + all? - minus_wildcards + plus_wildcards + plus_state_keys + + where + * all represents ALL state + * minus_wildcards represents entire state types to remove + * plus_wildcards represents entire state types to add + * plus_state_keys represents individual state keys to add + + See `recompose_from_four_parts` for the other direction of this + correspondence. + """ + is_all = self.include_others + excluded_types: Set[str] = {t for t in self.types if is_all} + wildcard_types: Set[str] = {t for t, s in self.types.items() if s is None} + concrete_keys: Set[StateKey] = set(self.concrete_types()) + + return (is_all, excluded_types), (wildcard_types, concrete_keys) + + @staticmethod + def _recompose_from_four_parts( + all_part: bool, + minus_wildcards: Set[str], + plus_wildcards: Set[str], + plus_state_keys: Set[StateKey], + ) -> "StateFilter": + """ + Recomposes a state filter from 4 parts. + + See `decompose_into_four_parts` (the other direction of this + correspondence) for descriptions on each of the parts. + """ + + # {state type -> set of state keys OR None for wildcard} + # (The same structure as that of a StateFilter.) + new_types: Dict[str, Optional[Set[str]]] = {} + + # if we start with all, insert the excluded statetypes as empty sets + # to prevent them from being included + if all_part: + new_types.update({state_type: set() for state_type in minus_wildcards}) + + # insert the plus wildcards + new_types.update({state_type: None for state_type in plus_wildcards}) + + # insert the specific state keys + for state_type, state_key in plus_state_keys: + if state_type in new_types: + entry = new_types[state_type] + if entry is not None: + entry.add(state_key) + elif not all_part: + # don't insert if the entire type is already included by + # include_others as this would actually shrink the state allowed + # by this filter. + new_types[state_type] = {state_key} + + return StateFilter.freeze(new_types, include_others=all_part) + + def approx_difference(self, other: "StateFilter") -> "StateFilter": + """ + Returns a state filter which represents `self - other`. + + This is useful for determining what state remains to be pulled out of the + database if we want the state included by `self` but already have the state + included by `other`. + + The returned state filter + - MUST include all state events that are included by this filter (`self`) + unless they are included by `other`; + - MUST NOT include state events not included by this filter (`self`); and + - MAY be an over-approximation: the returned state filter + MAY additionally include some state events from `other`. + + This implementation attempts to return the narrowest such state filter. + In the case that `self` contains wildcards for state types where + `other` contains specific state keys, an approximation must be made: + the returned state filter keeps the wildcard, as state filters are not + able to express 'all state keys except some given examples'. + e.g. + StateFilter(m.room.member -> None (wildcard)) + minus + StateFilter(m.room.member -> {'@wombat:example.org'}) + is approximated as + StateFilter(m.room.member -> None (wildcard)) + """ + + # We first transform self and other into an alternative representation: + # - whether or not they include all events to begin with ('all') + # - if so, which event types are excluded? ('excludes') + # - which entire event types to include ('wildcards') + # - which concrete state keys to include ('concrete state keys') + (self_all, self_excludes), ( + self_wildcards, + self_concrete_keys, + ) = self._decompose_into_four_parts() + (other_all, other_excludes), ( + other_wildcards, + other_concrete_keys, + ) = other._decompose_into_four_parts() + + # Start with an estimate of the difference based on self + new_all = self_all + # Wildcards from the other can be added to the exclusion filter + new_excludes = self_excludes | other_wildcards + # We remove wildcards that appeared as wildcards in the other + new_wildcards = self_wildcards - other_wildcards + # We filter out the concrete state keys that appear in the other + # as wildcards or concrete state keys. + new_concrete_keys = { + (state_type, state_key) + for (state_type, state_key) in self_concrete_keys + if state_type not in other_wildcards + } - other_concrete_keys + + if other_all: + if self_all: + # If self starts with all, then we add as wildcards any + # types which appear in the other's exclusion filter (but + # aren't in the self exclusion filter). This is as the other + # filter will return everything BUT the types in its exclusion, so + # we need to add those excluded types that also match the self + # filter as wildcard types in the new filter. + new_wildcards |= other_excludes.difference(self_excludes) + + # If other is an `include_others` then the difference isn't. + new_all = False + # (We have no need for excludes when we don't start with all, as there + # is nothing to exclude.) + new_excludes = set() + + # We also filter out all state types that aren't in the exclusion + # list of the other. + new_wildcards &= other_excludes + new_concrete_keys = { + (state_type, state_key) + for (state_type, state_key) in new_concrete_keys + if state_type in other_excludes + } + + # Transform our newly-constructed state filter from the alternative + # representation back into the normal StateFilter representation. + return StateFilter._recompose_from_four_parts( + new_all, new_excludes, new_wildcards, new_concrete_keys + ) + class StateGroupStorage: """High level interface to fetching state for event.""" diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index db65253773c6..0120b4688b93 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -63,7 +63,9 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hostname="test", id="1234", namespaces={"users": [{"regex": r"@as_user.*", "exclusive": True}]}, - sender="@as:test", + # Note: this user does not match the regex above, so that tests + # can distinguish the sender from the AS user. + sender="@as_main:test", ) mock_load_appservices = Mock(return_value=[self.appservice]) @@ -122,7 +124,7 @@ def test_normal_user_pair(self) -> None: {(alice, bob, private), (bob, alice, private)}, ) - # The next three tests (test_population_excludes_*) all setup + # The next four tests (test_excludes_*) all setup # - A normal user included in the user dir # - A public and private room created by that user # - A user excluded from the room dir, belonging to both rooms @@ -179,6 +181,34 @@ def test_excludes_appservices_user(self) -> None: ) self._check_only_one_user_in_directory(user, public) + def test_excludes_appservice_sender(self) -> None: + user = self.register_user("user", "pass") + token = self.login(user, "pass") + room = self.helper.create_room_as(user, is_public=True, tok=token) + self.helper.join(room, self.appservice.sender, tok=self.appservice.token) + self._check_only_one_user_in_directory(user, room) + + def test_user_not_in_users_table(self) -> None: + """Unclear how it happens, but on matrix.org we've seen join events + for users who aren't in the users table. Test that we don't fall over + when processing such a user. + """ + user1 = self.register_user("user1", "pass") + token1 = self.login(user1, "pass") + room = self.helper.create_room_as(user1, is_public=True, tok=token1) + + # Inject a join event for a user who doesn't exist + self.get_success(inject_member_event(self.hs, room, "@not-a-user:test", "join")) + + # Another new user registers and joins the room + user2 = self.register_user("user2", "pass") + token2 = self.login(user2, "pass") + self.helper.join(room, user2, tok=token2) + + # The dodgy event should not have stopped us from processing user2's join. + in_public = self.get_success(self.user_dir_helper.get_users_in_public_rooms()) + self.assertEqual(set(in_public), {(user1, room), (user2, room)}) + def _create_rooms_and_inject_memberships( self, creator: str, token: str, joiner: str ) -> Tuple[str, str]: @@ -230,7 +260,7 @@ def test_handle_local_profile_change_with_support_user(self) -> None: ) ) profile = self.get_success(self.store.get_user_in_directory(support_user_id)) - self.assertTrue(profile is None) + self.assertIsNone(profile) display_name = "display_name" profile_info = ProfileInfo(avatar_url="avatar_url", display_name=display_name) @@ -264,7 +294,7 @@ def test_handle_local_profile_change_with_deactivated_user(self) -> None: # profile is not in directory profile = self.get_success(self.store.get_user_in_directory(r_user_id)) - self.assertTrue(profile is None) + self.assertIsNone(profile) # update profile after deactivation self.get_success( @@ -273,7 +303,7 @@ def test_handle_local_profile_change_with_deactivated_user(self) -> None: # profile is furthermore not in directory profile = self.get_success(self.store.get_user_in_directory(r_user_id)) - self.assertTrue(profile is None) + self.assertIsNone(profile) def test_handle_local_profile_change_with_appservice_user(self) -> None: # create user @@ -283,7 +313,7 @@ def test_handle_local_profile_change_with_appservice_user(self) -> None: # profile is not in directory profile = self.get_success(self.store.get_user_in_directory(as_user_id)) - self.assertTrue(profile is None) + self.assertIsNone(profile) # update profile profile_info = ProfileInfo(avatar_url="avatar_url", display_name="4L1c3") @@ -293,7 +323,28 @@ def test_handle_local_profile_change_with_appservice_user(self) -> None: # profile is still not in directory profile = self.get_success(self.store.get_user_in_directory(as_user_id)) - self.assertTrue(profile is None) + self.assertIsNone(profile) + + def test_handle_local_profile_change_with_appservice_sender(self) -> None: + # profile is not in directory + profile = self.get_success( + self.store.get_user_in_directory(self.appservice.sender) + ) + self.assertIsNone(profile) + + # update profile + profile_info = ProfileInfo(avatar_url="avatar_url", display_name="4L1c3") + self.get_success( + self.handler.handle_local_profile_change( + self.appservice.sender, profile_info + ) + ) + + # profile is still not in directory + profile = self.get_success( + self.store.get_user_in_directory(self.appservice.sender) + ) + self.assertIsNone(profile) def test_handle_user_deactivated_support_user(self) -> None: s_user_id = "@support:test" diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index dada4f98c934..0e4013ebeaa7 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -146,6 +146,49 @@ def test_insert_new_client_ip_none_device_id(self): ], ) + @parameterized.expand([(False,), (True,)]) + def test_get_last_client_ip_by_device(self, after_persisting: bool): + """Test `get_last_client_ip_by_device` for persisted and unpersisted data""" + self.reactor.advance(12345678) + + user_id = "@user:id" + device_id = "MY_DEVICE" + + # Insert a user IP + self.get_success( + self.store.store_device( + user_id, + device_id, + "display name", + ) + ) + self.get_success( + self.store.insert_client_ip( + user_id, "access_token", "ip", "user_agent", device_id + ) + ) + + if after_persisting: + # Trigger the storage loop + self.reactor.advance(10) + + result = self.get_success( + self.store.get_last_client_ip_by_device(user_id, device_id) + ) + + self.assertEqual( + result, + { + (user_id, device_id): { + "user_id": user_id, + "device_id": device_id, + "ip": "ip", + "user_agent": "user_agent", + "last_seen": 12345678000, + }, + }, + ) + @parameterized.expand([(False,), (True,)]) def test_get_user_ip_and_agents(self, after_persisting: bool): """Test `get_user_ip_and_agents` for persisted and unpersisted data""" diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index 32060f2abd63..70d52b088c81 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -21,7 +21,7 @@ from synapse.storage.state import StateFilter from synapse.types import RoomID, UserID -from tests.unittest import HomeserverTestCase +from tests.unittest import HomeserverTestCase, TestCase logger = logging.getLogger(__name__) @@ -105,7 +105,6 @@ def test_get_state_groups(self): self.assertEqual({ev.event_id for ev in state_list}, {e1.event_id, e2.event_id}) def test_get_state_for_event(self): - # this defaults to a linear DAG as each new injection defaults to whatever # forward extremities are currently in the DB for this room. e1 = self.inject_state_event(self.room, self.u_alice, EventTypes.Create, "", {}) @@ -483,3 +482,513 @@ def test_get_state_for_event(self): self.assertEqual(is_all, True) self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict) + + +class StateFilterDifferenceTestCase(TestCase): + def assert_difference( + self, minuend: StateFilter, subtrahend: StateFilter, expected: StateFilter + ): + self.assertEqual( + minuend.approx_difference(subtrahend), + expected, + f"StateFilter difference not correct:\n\n\t{minuend!r}\nminus\n\t{subtrahend!r}\nwas\n\t{minuend.approx_difference(subtrahend)}\nexpected\n\t{expected}", + ) + + def test_state_filter_difference_no_include_other_minus_no_include_other(self): + """ + Tests the StateFilter.approx_difference method + where, in a.approx_difference(b), both a and b do not have the + include_others flag set. + """ + # (wildcard on state keys) - (wildcard on state keys): + self.assert_difference( + StateFilter.freeze( + {EventTypes.Member: None, EventTypes.Create: None}, + include_others=False, + ), + StateFilter.freeze( + {EventTypes.Member: None, EventTypes.CanonicalAlias: None}, + include_others=False, + ), + StateFilter.freeze({EventTypes.Create: None}, include_others=False), + ) + + # (wildcard on state keys) - (specific state keys) + # This one is an over-approximation because we can't represent + # 'all state keys except a few named examples' + self.assert_difference( + StateFilter.freeze({EventTypes.Member: None}, include_others=False), + StateFilter.freeze( + {EventTypes.Member: {"@wombat:spqr"}}, + include_others=False, + ), + StateFilter.freeze({EventTypes.Member: None}, include_others=False), + ) + + # (wildcard on state keys) - (no state keys) + self.assert_difference( + StateFilter.freeze( + {EventTypes.Member: None}, + include_others=False, + ), + StateFilter.freeze( + { + EventTypes.Member: set(), + }, + include_others=False, + ), + StateFilter.freeze( + {EventTypes.Member: None}, + include_others=False, + ), + ) + + # (specific state keys) - (wildcard on state keys): + self.assert_difference( + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"}, + EventTypes.CanonicalAlias: {""}, + }, + include_others=False, + ), + StateFilter.freeze( + {EventTypes.Member: None}, + include_others=False, + ), + StateFilter.freeze( + {EventTypes.CanonicalAlias: {""}}, + include_others=False, + ), + ) + + # (specific state keys) - (specific state keys) + self.assert_difference( + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"}, + EventTypes.CanonicalAlias: {""}, + }, + include_others=False, + ), + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:spqr"}, + }, + include_others=False, + ), + StateFilter.freeze( + { + EventTypes.Member: {"@spqr:spqr"}, + EventTypes.CanonicalAlias: {""}, + }, + include_others=False, + ), + ) + + # (specific state keys) - (no state keys) + self.assert_difference( + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"}, + EventTypes.CanonicalAlias: {""}, + }, + include_others=False, + ), + StateFilter.freeze( + { + EventTypes.Member: set(), + }, + include_others=False, + ), + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"}, + EventTypes.CanonicalAlias: {""}, + }, + include_others=False, + ), + ) + + def test_state_filter_difference_include_other_minus_no_include_other(self): + """ + Tests the StateFilter.approx_difference method + where, in a.approx_difference(b), only a has the include_others flag set. + """ + # (wildcard on state keys) - (wildcard on state keys): + self.assert_difference( + StateFilter.freeze( + {EventTypes.Member: None, EventTypes.Create: None}, + include_others=True, + ), + StateFilter.freeze( + {EventTypes.Member: None, EventTypes.CanonicalAlias: None}, + include_others=False, + ), + StateFilter.freeze( + { + EventTypes.Create: None, + EventTypes.Member: set(), + EventTypes.CanonicalAlias: set(), + }, + include_others=True, + ), + ) + + # (wildcard on state keys) - (specific state keys) + # This one is an over-approximation because we can't represent + # 'all state keys except a few named examples' + # This also shows that the resultant state filter is normalised. + self.assert_difference( + StateFilter.freeze({EventTypes.Member: None}, include_others=True), + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:spqr"}, + EventTypes.Create: {""}, + }, + include_others=False, + ), + StateFilter(types=frozendict(), include_others=True), + ) + + # (wildcard on state keys) - (no state keys) + self.assert_difference( + StateFilter.freeze( + {EventTypes.Member: None}, + include_others=True, + ), + StateFilter.freeze( + { + EventTypes.Member: set(), + }, + include_others=False, + ), + StateFilter( + types=frozendict(), + include_others=True, + ), + ) + + # (specific state keys) - (wildcard on state keys): + self.assert_difference( + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"}, + EventTypes.CanonicalAlias: {""}, + }, + include_others=True, + ), + StateFilter.freeze( + {EventTypes.Member: None}, + include_others=False, + ), + StateFilter.freeze( + { + EventTypes.CanonicalAlias: {""}, + EventTypes.Member: set(), + }, + include_others=True, + ), + ) + + # (specific state keys) - (specific state keys) + self.assert_difference( + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"}, + EventTypes.CanonicalAlias: {""}, + }, + include_others=True, + ), + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:spqr"}, + }, + include_others=False, + ), + StateFilter.freeze( + { + EventTypes.Member: {"@spqr:spqr"}, + EventTypes.CanonicalAlias: {""}, + }, + include_others=True, + ), + ) + + # (specific state keys) - (no state keys) + self.assert_difference( + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"}, + EventTypes.CanonicalAlias: {""}, + }, + include_others=True, + ), + StateFilter.freeze( + { + EventTypes.Member: set(), + }, + include_others=False, + ), + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"}, + EventTypes.CanonicalAlias: {""}, + }, + include_others=True, + ), + ) + + def test_state_filter_difference_include_other_minus_include_other(self): + """ + Tests the StateFilter.approx_difference method + where, in a.approx_difference(b), both a and b have the include_others + flag set. + """ + # (wildcard on state keys) - (wildcard on state keys): + self.assert_difference( + StateFilter.freeze( + {EventTypes.Member: None, EventTypes.Create: None}, + include_others=True, + ), + StateFilter.freeze( + {EventTypes.Member: None, EventTypes.CanonicalAlias: None}, + include_others=True, + ), + StateFilter(types=frozendict(), include_others=False), + ) + + # (wildcard on state keys) - (specific state keys) + # This one is an over-approximation because we can't represent + # 'all state keys except a few named examples' + self.assert_difference( + StateFilter.freeze({EventTypes.Member: None}, include_others=True), + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:spqr"}, + EventTypes.CanonicalAlias: {""}, + }, + include_others=True, + ), + StateFilter.freeze( + {EventTypes.Member: None, EventTypes.CanonicalAlias: None}, + include_others=False, + ), + ) + + # (wildcard on state keys) - (no state keys) + self.assert_difference( + StateFilter.freeze( + {EventTypes.Member: None}, + include_others=True, + ), + StateFilter.freeze( + { + EventTypes.Member: set(), + }, + include_others=True, + ), + StateFilter.freeze( + {EventTypes.Member: None}, + include_others=False, + ), + ) + + # (specific state keys) - (wildcard on state keys): + self.assert_difference( + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"}, + EventTypes.CanonicalAlias: {""}, + }, + include_others=True, + ), + StateFilter.freeze( + {EventTypes.Member: None}, + include_others=True, + ), + StateFilter( + types=frozendict(), + include_others=False, + ), + ) + + # (specific state keys) - (specific state keys) + # This one is an over-approximation because we can't represent + # 'all state keys except a few named examples' + self.assert_difference( + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"}, + EventTypes.CanonicalAlias: {""}, + EventTypes.Create: {""}, + }, + include_others=True, + ), + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:spqr"}, + EventTypes.Create: set(), + }, + include_others=True, + ), + StateFilter.freeze( + { + EventTypes.Member: {"@spqr:spqr"}, + EventTypes.Create: {""}, + }, + include_others=False, + ), + ) + + # (specific state keys) - (no state keys) + self.assert_difference( + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"}, + EventTypes.CanonicalAlias: {""}, + }, + include_others=True, + ), + StateFilter.freeze( + { + EventTypes.Member: set(), + }, + include_others=True, + ), + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"}, + }, + include_others=False, + ), + ) + + def test_state_filter_difference_no_include_other_minus_include_other(self): + """ + Tests the StateFilter.approx_difference method + where, in a.approx_difference(b), only b has the include_others flag set. + """ + # (wildcard on state keys) - (wildcard on state keys): + self.assert_difference( + StateFilter.freeze( + {EventTypes.Member: None, EventTypes.Create: None}, + include_others=False, + ), + StateFilter.freeze( + {EventTypes.Member: None, EventTypes.CanonicalAlias: None}, + include_others=True, + ), + StateFilter(types=frozendict(), include_others=False), + ) + + # (wildcard on state keys) - (specific state keys) + # This one is an over-approximation because we can't represent + # 'all state keys except a few named examples' + self.assert_difference( + StateFilter.freeze({EventTypes.Member: None}, include_others=False), + StateFilter.freeze( + {EventTypes.Member: {"@wombat:spqr"}}, + include_others=True, + ), + StateFilter.freeze({EventTypes.Member: None}, include_others=False), + ) + + # (wildcard on state keys) - (no state keys) + self.assert_difference( + StateFilter.freeze( + {EventTypes.Member: None}, + include_others=False, + ), + StateFilter.freeze( + { + EventTypes.Member: set(), + }, + include_others=True, + ), + StateFilter.freeze( + {EventTypes.Member: None}, + include_others=False, + ), + ) + + # (specific state keys) - (wildcard on state keys): + self.assert_difference( + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"}, + EventTypes.CanonicalAlias: {""}, + }, + include_others=False, + ), + StateFilter.freeze( + {EventTypes.Member: None}, + include_others=True, + ), + StateFilter( + types=frozendict(), + include_others=False, + ), + ) + + # (specific state keys) - (specific state keys) + # This one is an over-approximation because we can't represent + # 'all state keys except a few named examples' + self.assert_difference( + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"}, + EventTypes.CanonicalAlias: {""}, + }, + include_others=False, + ), + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:spqr"}, + }, + include_others=True, + ), + StateFilter.freeze( + { + EventTypes.Member: {"@spqr:spqr"}, + }, + include_others=False, + ), + ) + + # (specific state keys) - (no state keys) + self.assert_difference( + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"}, + EventTypes.CanonicalAlias: {""}, + }, + include_others=False, + ), + StateFilter.freeze( + { + EventTypes.Member: set(), + }, + include_others=True, + ), + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"}, + }, + include_others=False, + ), + ) + + def test_state_filter_difference_simple_cases(self): + """ + Tests some very simple cases of the StateFilter approx_difference, + that are not explicitly tested by the more in-depth tests. + """ + + self.assert_difference(StateFilter.all(), StateFilter.all(), StateFilter.none()) + + self.assert_difference( + StateFilter.all(), + StateFilter.none(), + StateFilter.all(), + ) diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index 9f483ad681c6..be3ed64f5eae 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -256,7 +256,7 @@ def test_initial(self) -> None: users = self.get_success(self.user_dir_helper.get_users_in_user_directory()) self.assertEqual(users, {u1, u2, u3}) - # The next three tests (test_population_excludes_*) all set up + # The next four tests (test_population_excludes_*) all set up # - A normal user included in the user dir # - A public and private room created by that user # - A user excluded from the room dir, belonging to both rooms @@ -364,6 +364,21 @@ def test_population_excludes_appservice_user(self) -> None: # Check the AS user is not in the directory. self._check_room_sharing_tables(user, public, private) + def test_population_excludes_appservice_sender(self) -> None: + user = self.register_user("user", "pass") + token = self.login(user, "pass") + + # Join the AS sender to rooms owned by the normal user. + public, private = self._create_rooms_and_inject_memberships( + user, token, self.appservice.sender + ) + + # Rebuild the directory. + self._purge_and_rebuild_user_dir() + + # Check the AS sender is not in the directory. + self._check_room_sharing_tables(user, public, private) + def test_population_conceals_private_nickname(self) -> None: # Make a private room, and set a nickname within user = self.register_user("aaaa", "pass") From 99a4e5222d82cfcf8686b71d8fa94c8a81dfcbd6 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 14 Oct 2021 10:59:27 +0100 Subject: [PATCH 104/111] 1.45.0rc2 --- CHANGES.md | 21 +++++++++++++++++++++ changelog.d/10825.misc | 1 - changelog.d/10970.misc | 1 - changelog.d/10996.misc | 1 - changelog.d/11036.misc | 1 - changelog.d/11045.bugfix | 1 - changelog.d/11053.bugfix | 2 -- changelog.d/11061.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 10 files changed, 28 insertions(+), 9 deletions(-) delete mode 100644 changelog.d/10825.misc delete mode 100644 changelog.d/10970.misc delete mode 100644 changelog.d/10996.misc delete mode 100644 changelog.d/11036.misc delete mode 100644 changelog.d/11045.bugfix delete mode 100644 changelog.d/11053.bugfix delete mode 100644 changelog.d/11061.bugfix diff --git a/CHANGES.md b/CHANGES.md index 5acc8b537e05..0d6983088022 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,24 @@ +Synapse 1.45.0rc2 (2021-10-14) +============================== + +Bugfixes +-------- + +- Fix a long-standing bug when using multiple event persister workers where events were not correctly sent down `/sync` due to a race. ([\#11045](https://github.com/matrix-org/synapse/issues/11045)) +- Fix a bug introduced in Synapse 1.45.0rc1 where the user directory would stop updating if it processed an event from a + user not in the `users` table. ([\#11053](https://github.com/matrix-org/synapse/issues/11053)) +- Fix a bug introduced in Synapse v1.44.0 when logging errors during oEmbed processing. ([\#11061](https://github.com/matrix-org/synapse/issues/11061)) + + +Internal Changes +---------------- + +- Add an 'approximate difference' method to `StateFilter`. ([\#10825](https://github.com/matrix-org/synapse/issues/10825)) +- Fix inconsistent behavior of `get_last_client_by_ip` when reporting data that has not been stored in the database yet. ([\#10970](https://github.com/matrix-org/synapse/issues/10970)) +- Fix a bug introduced in Synapse 1.21.0 that causes opentracing and Prometheus metrics for replication requests to be measured incorrectly. ([\#10996](https://github.com/matrix-org/synapse/issues/10996)) +- Ensure that cache config tests do not share state. ([\#11036](https://github.com/matrix-org/synapse/issues/11036)) + + Synapse 1.45.0rc1 (2021-10-12) ============================== diff --git a/changelog.d/10825.misc b/changelog.d/10825.misc deleted file mode 100644 index f9786164d7ec..000000000000 --- a/changelog.d/10825.misc +++ /dev/null @@ -1 +0,0 @@ -Add an 'approximate difference' method to `StateFilter`. diff --git a/changelog.d/10970.misc b/changelog.d/10970.misc deleted file mode 100644 index bb75ea79a657..000000000000 --- a/changelog.d/10970.misc +++ /dev/null @@ -1 +0,0 @@ -Fix inconsistent behavior of `get_last_client_by_ip` when reporting data that has not been stored in the database yet. diff --git a/changelog.d/10996.misc b/changelog.d/10996.misc deleted file mode 100644 index c830d7ec2cc7..000000000000 --- a/changelog.d/10996.misc +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.21.0 that causes opentracing and Prometheus metrics for replication requests to be measured incorrectly. diff --git a/changelog.d/11036.misc b/changelog.d/11036.misc deleted file mode 100644 index aae5ee62b2e8..000000000000 --- a/changelog.d/11036.misc +++ /dev/null @@ -1 +0,0 @@ -Ensure that cache config tests do not share state. diff --git a/changelog.d/11045.bugfix b/changelog.d/11045.bugfix deleted file mode 100644 index d712dc946a56..000000000000 --- a/changelog.d/11045.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug when using multiple event persister workers where events were not correctly sent down `/sync` due to a race. diff --git a/changelog.d/11053.bugfix b/changelog.d/11053.bugfix deleted file mode 100644 index a59cfac93157..000000000000 --- a/changelog.d/11053.bugfix +++ /dev/null @@ -1,2 +0,0 @@ -Fix a bug introduced in Synapse 1.45.0rc1 where the user directory would stop updating if it processed an event from a -user not in the `users` table. diff --git a/changelog.d/11061.bugfix b/changelog.d/11061.bugfix deleted file mode 100644 index 26fb64379375..000000000000 --- a/changelog.d/11061.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse v1.44.0 when logging errors during oEmbed processing. diff --git a/debian/changelog b/debian/changelog index 0d5db739e7e8..f930befbe73a 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.45.0~rc2) stable; urgency=medium + + * New synapse release 1.45.0~rc2. + + -- Synapse Packaging team Thu, 14 Oct 2021 10:58:24 +0100 + matrix-synapse-py3 (1.45.0~rc1) stable; urgency=medium [ Nick @ Beeper ] diff --git a/synapse/__init__.py b/synapse/__init__.py index 6b109ccffa9f..ab90093e7081 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.45.0rc1" +__version__ = "1.45.0rc2" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 87c3a6dcc08e158d44f36b96bbc9311a9edb99e1 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 14 Oct 2021 11:03:35 +0100 Subject: [PATCH 105/111] Refer to the bugs mentioned in 1.45.0rc1 note --- CHANGES.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 0d6983088022..7ac7e794dc8c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,8 @@ Synapse 1.45.0rc2 (2021-10-14) ============================== +**Note:** This release candidate [fixes](https://github.com/matrix-org/synapse/issues/11053) the user directory [bug](https://github.com/matrix-org/synapse/issues/11025) present in 1.45.0rc1. However, the [performance issue](https://github.com/matrix-org/synapse/issues/11049) mentioned in that release is yet to be resolved. + Bugfixes -------- From 4d761d24badb5a05abe9adeb46515553ca007b9d Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 14 Oct 2021 11:18:40 +0100 Subject: [PATCH 106/111] mentioned -> which appeared --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 7ac7e794dc8c..60f51005e813 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,7 +1,7 @@ Synapse 1.45.0rc2 (2021-10-14) ============================== -**Note:** This release candidate [fixes](https://github.com/matrix-org/synapse/issues/11053) the user directory [bug](https://github.com/matrix-org/synapse/issues/11025) present in 1.45.0rc1. However, the [performance issue](https://github.com/matrix-org/synapse/issues/11049) mentioned in that release is yet to be resolved. +**Note:** This release candidate [fixes](https://github.com/matrix-org/synapse/issues/11053) the user directory [bug](https://github.com/matrix-org/synapse/issues/11025) present in 1.45.0rc1. However, the [performance issue](https://github.com/matrix-org/synapse/issues/11049) which appeared in that release is yet to be resolved. Bugfixes -------- From b1c1a34f4680f89e5de506444155081c380dae97 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 14 Oct 2021 11:20:02 +0100 Subject: [PATCH 107/111] it appeared in 1.44, not 45rc1 --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 60f51005e813..c8b078a2f431 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,7 +1,7 @@ Synapse 1.45.0rc2 (2021-10-14) ============================== -**Note:** This release candidate [fixes](https://github.com/matrix-org/synapse/issues/11053) the user directory [bug](https://github.com/matrix-org/synapse/issues/11025) present in 1.45.0rc1. However, the [performance issue](https://github.com/matrix-org/synapse/issues/11049) which appeared in that release is yet to be resolved. +**Note:** This release candidate [fixes](https://github.com/matrix-org/synapse/issues/11053) the user directory [bug](https://github.com/matrix-org/synapse/issues/11025) present in 1.45.0rc1. However, the [performance issue](https://github.com/matrix-org/synapse/issues/11049) which appeared in v1.44.0 is yet to be resolved. Bugfixes -------- From 191396f4baf467023b9913f83a1e3a696aa0e3a0 Mon Sep 17 00:00:00 2001 From: Dan Callahan Date: Tue, 19 Oct 2021 11:16:52 +0100 Subject: [PATCH 108/111] Reword changelog regarding a suspected regression (#11117) Signed-off-by: Dan Callahan --- CHANGES.md | 17 ++++++++++++++--- changelog.d/11117.doc | 1 + 2 files changed, 15 insertions(+), 3 deletions(-) create mode 100644 changelog.d/11117.doc diff --git a/CHANGES.md b/CHANGES.md index c8b078a2f431..6a4d5f5c97e6 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,7 +1,14 @@ Synapse 1.45.0rc2 (2021-10-14) ============================== -**Note:** This release candidate [fixes](https://github.com/matrix-org/synapse/issues/11053) the user directory [bug](https://github.com/matrix-org/synapse/issues/11025) present in 1.45.0rc1. However, the [performance issue](https://github.com/matrix-org/synapse/issues/11049) which appeared in v1.44.0 is yet to be resolved. +This release candidate [fixes](https://github.com/matrix-org/synapse/issues/11053) a user directory [bug](https://github.com/matrix-org/synapse/issues/11025) present in 1.45.0rc1. + +Known Issues +------------ + +- A suspected [performance regression](https://github.com/matrix-org/synapse/issues/11049) which was first reported after the release of v1.44.0 remains unresolved. + + We have not been able to identify a probable cause. Affected users report that setting up a federation sender worker appears to alleviate symptoms of the regression. Bugfixes -------- @@ -24,9 +31,13 @@ Internal Changes Synapse 1.45.0rc1 (2021-10-12) ============================== -**Note:** We are aware of [a performance issue](https://github.com/matrix-org/synapse/issues/11049) introduced in Synapse v1.44.0, as well as [a bug](https://github.com/matrix-org/synapse/issues/11025) with the user directory when using application services. While this release candidate doesn't fix either of those issues, a second release candidate is expected to come out in a few days to address them. +**Note:** Media storage providers module that read from Synapse's configuration need changes as of this version, see the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#upgrading-to-v1450) for more information. + +Known Issues +------------ -Media storage providers module that read from Synapse's configuration need changes as of this version, see the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#upgrading-to-v1450) for more information. +- We are investigating [a performance issue](https://github.com/matrix-org/synapse/issues/11049) which was reported after the release of v1.44.0. +- We are aware of [a bug](https://github.com/matrix-org/synapse/issues/11025) with the user directory when using application services. A second release candidate is expected which will resolve this. Features -------- diff --git a/changelog.d/11117.doc b/changelog.d/11117.doc new file mode 100644 index 000000000000..b4809fd6ea41 --- /dev/null +++ b/changelog.d/11117.doc @@ -0,0 +1 @@ +Reword changelog to clarify concerns about a suspected performance regression in 1.44. From 8b1185347a3bfa4530500fe274ffc9006ec027ea Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 19 Oct 2021 11:19:55 +0100 Subject: [PATCH 109/111] 1.45.0 --- CHANGES.md | 11 +++++++++++ changelog.d/11117.doc | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 4 files changed, 18 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/11117.doc diff --git a/CHANGES.md b/CHANGES.md index 6a4d5f5c97e6..7d1b804e8be4 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,14 @@ +Synapse 1.45.0 (2021-10-19) +=========================== + +No functional changes since Synapse 1.45.0rc2. + +Improved Documentation +---------------------- + +- Reword changelog to clarify concerns about a suspected performance regression in 1.44. ([\#11117](https://github.com/matrix-org/synapse/issues/11117)) + + Synapse 1.45.0rc2 (2021-10-14) ============================== diff --git a/changelog.d/11117.doc b/changelog.d/11117.doc deleted file mode 100644 index b4809fd6ea41..000000000000 --- a/changelog.d/11117.doc +++ /dev/null @@ -1 +0,0 @@ -Reword changelog to clarify concerns about a suspected performance regression in 1.44. diff --git a/debian/changelog b/debian/changelog index f930befbe73a..5fefb2f2ac05 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.45.0) stable; urgency=medium + + * New synapse release 1.45.0. + + -- Synapse Packaging team Tue, 19 Oct 2021 11:18:53 +0100 + matrix-synapse-py3 (1.45.0~rc2) stable; urgency=medium * New synapse release 1.45.0~rc2. diff --git a/synapse/__init__.py b/synapse/__init__.py index ab90093e7081..97452f34fe1e 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.45.0rc2" +__version__ = "1.45.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From a21f8c4b41adc323adf4a8ac517ff591856a74fb Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 19 Oct 2021 11:21:21 +0100 Subject: [PATCH 110/111] Duplicate known issues under 1.45 release --- CHANGES.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 7d1b804e8be4..e36b1acf8c50 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -3,6 +3,13 @@ Synapse 1.45.0 (2021-10-19) No functional changes since Synapse 1.45.0rc2. +Known Issues +------------ + +- A suspected [performance regression](https://github.com/matrix-org/synapse/issues/11049) which was first reported after the release of v1.44.0 remains unresolved. + + We have not been able to identify a probable cause. Affected users report that setting up a federation sender worker appears to alleviate symptoms of the regression. + Improved Documentation ---------------------- From 95813ff43cc08f2064049050a67e027751d9b091 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 19 Oct 2021 11:30:16 +0100 Subject: [PATCH 111/111] Be less inconsistent about v1.2.3 versus 1.2.3 --- CHANGES.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index e36b1acf8c50..435387d7b010 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -6,14 +6,14 @@ No functional changes since Synapse 1.45.0rc2. Known Issues ------------ -- A suspected [performance regression](https://github.com/matrix-org/synapse/issues/11049) which was first reported after the release of v1.44.0 remains unresolved. +- A suspected [performance regression](https://github.com/matrix-org/synapse/issues/11049) which was first reported after the release of 1.44.0 remains unresolved. We have not been able to identify a probable cause. Affected users report that setting up a federation sender worker appears to alleviate symptoms of the regression. Improved Documentation ---------------------- -- Reword changelog to clarify concerns about a suspected performance regression in 1.44. ([\#11117](https://github.com/matrix-org/synapse/issues/11117)) +- Reword changelog to clarify concerns about a suspected performance regression in 1.44.0. ([\#11117](https://github.com/matrix-org/synapse/issues/11117)) Synapse 1.45.0rc2 (2021-10-14) @@ -24,7 +24,7 @@ This release candidate [fixes](https://github.com/matrix-org/synapse/issues/1105 Known Issues ------------ -- A suspected [performance regression](https://github.com/matrix-org/synapse/issues/11049) which was first reported after the release of v1.44.0 remains unresolved. +- A suspected [performance regression](https://github.com/matrix-org/synapse/issues/11049) which was first reported after the release of 1.44.0 remains unresolved. We have not been able to identify a probable cause. Affected users report that setting up a federation sender worker appears to alleviate symptoms of the regression. @@ -34,7 +34,7 @@ Bugfixes - Fix a long-standing bug when using multiple event persister workers where events were not correctly sent down `/sync` due to a race. ([\#11045](https://github.com/matrix-org/synapse/issues/11045)) - Fix a bug introduced in Synapse 1.45.0rc1 where the user directory would stop updating if it processed an event from a user not in the `users` table. ([\#11053](https://github.com/matrix-org/synapse/issues/11053)) -- Fix a bug introduced in Synapse v1.44.0 when logging errors during oEmbed processing. ([\#11061](https://github.com/matrix-org/synapse/issues/11061)) +- Fix a bug introduced in Synapse 1.44.0 when logging errors during oEmbed processing. ([\#11061](https://github.com/matrix-org/synapse/issues/11061)) Internal Changes @@ -54,7 +54,7 @@ Synapse 1.45.0rc1 (2021-10-12) Known Issues ------------ -- We are investigating [a performance issue](https://github.com/matrix-org/synapse/issues/11049) which was reported after the release of v1.44.0. +- We are investigating [a performance issue](https://github.com/matrix-org/synapse/issues/11049) which was reported after the release of 1.44.0. - We are aware of [a bug](https://github.com/matrix-org/synapse/issues/11025) with the user directory when using application services. A second release candidate is expected which will resolve this. Features