From 83f1ccfcaba76785ab4bd91e3177724e2dbb85ed Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 20 Jul 2021 12:28:00 +0100 Subject: [PATCH 01/61] Fix dropping locks on shut down --- synapse/storage/databases/main/lock.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/synapse/storage/databases/main/lock.py b/synapse/storage/databases/main/lock.py index 774861074c8a..3d1dff660bd9 100644 --- a/synapse/storage/databases/main/lock.py +++ b/synapse/storage/databases/main/lock.py @@ -78,7 +78,11 @@ async def _on_shutdown(self) -> None: """Called when the server is shutting down""" logger.info("Dropping held locks due to shutdown") - for (lock_name, lock_key), token in self._live_tokens.items(): + # We need to take a copy of the tokens dict as dropping the locks will + # cause the dictionary to change. + tokens = dict(self._live_tokens) + + for (lock_name, lock_key), token in tokens.items(): await self._drop_lock(lock_name, lock_key, token) logger.info("Dropped locks due to shutdown") From 794371b1bf800353a7a4496dc6aeefb30c50831e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 20 Jul 2021 12:28:40 +0100 Subject: [PATCH 02/61] Revert "Fix dropping locks on shut down" This reverts commit 83f1ccfcaba76785ab4bd91e3177724e2dbb85ed. --- synapse/storage/databases/main/lock.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/synapse/storage/databases/main/lock.py b/synapse/storage/databases/main/lock.py index 3d1dff660bd9..774861074c8a 100644 --- a/synapse/storage/databases/main/lock.py +++ b/synapse/storage/databases/main/lock.py @@ -78,11 +78,7 @@ async def _on_shutdown(self) -> None: """Called when the server is shutting down""" logger.info("Dropping held locks due to shutdown") - # We need to take a copy of the tokens dict as dropping the locks will - # cause the dictionary to change. - tokens = dict(self._live_tokens) - - for (lock_name, lock_key), token in tokens.items(): + for (lock_name, lock_key), token in self._live_tokens.items(): await self._drop_lock(lock_name, lock_key, token) logger.info("Dropped locks due to shutdown") From 69226c1ab4e88d1f104ad8aaa13fb9dd0ff5dbb2 Mon Sep 17 00:00:00 2001 From: Michael Telatynski <7t3chguy@gmail.com> Date: Tue, 20 Jul 2021 12:59:23 +0100 Subject: [PATCH 03/61] MSC3244 room capabilities implementation (#10283) --- changelog.d/10283.feature | 1 + synapse/api/room_versions.py | 38 ++++++++++++++- synapse/config/experimental.py | 3 ++ synapse/rest/client/v2_alpha/capabilities.py | 8 +++- .../rest/client/v2_alpha/test_capabilities.py | 46 +++++++++++++++++++ 5 files changed, 93 insertions(+), 3 deletions(-) create mode 100644 changelog.d/10283.feature diff --git a/changelog.d/10283.feature b/changelog.d/10283.feature new file mode 100644 index 000000000000..99d633dbfb86 --- /dev/null +++ b/changelog.d/10283.feature @@ -0,0 +1 @@ +Initial support for MSC3244, Room version capabilities over the /capabilities API. \ No newline at end of file diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index a20abc5a655f..8dd33dcb83e4 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict +from typing import Callable, Dict, Optional import attr @@ -208,5 +208,39 @@ class RoomVersions: RoomVersions.MSC3083, RoomVersions.V7, ) - # Note that we do not include MSC2043 here unless it is enabled in the config. +} + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class RoomVersionCapability: + """An object which describes the unique attributes of a room version.""" + + identifier: str # the identifier for this capability + preferred_version: Optional[RoomVersion] + support_check_lambda: Callable[[RoomVersion], bool] + + +MSC3244_CAPABILITIES = { + cap.identifier: { + "preferred": cap.preferred_version.identifier + if cap.preferred_version is not None + else None, + "support": [ + v.identifier + for v in KNOWN_ROOM_VERSIONS.values() + if cap.support_check_lambda(v) + ], + } + for cap in ( + RoomVersionCapability( + "knock", + RoomVersions.V7, + lambda room_version: room_version.msc2403_knocking, + ), + RoomVersionCapability( + "restricted", + None, + lambda room_version: room_version.msc3083_join_rules, + ), + ) } diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index e25ccba9ace0..040c4504d8a6 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -32,3 +32,6 @@ def read_config(self, config: JsonDict, **kwargs): # MSC2716 (backfill existing history) self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False) + + # MSC3244 (room version capabilities) + self.msc3244_enabled: bool = experimental.get("msc3244_enabled", False) diff --git a/synapse/rest/client/v2_alpha/capabilities.py b/synapse/rest/client/v2_alpha/capabilities.py index 6a240214845d..88e3aac7974b 100644 --- a/synapse/rest/client/v2_alpha/capabilities.py +++ b/synapse/rest/client/v2_alpha/capabilities.py @@ -14,7 +14,7 @@ import logging from typing import TYPE_CHECKING, Tuple -from synapse.api.room_versions import KNOWN_ROOM_VERSIONS +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, MSC3244_CAPABILITIES from synapse.http.servlet import RestServlet from synapse.http.site import SynapseRequest from synapse.types import JsonDict @@ -55,6 +55,12 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: "m.change_password": {"enabled": change_password}, } } + + if self.config.experimental.msc3244_enabled: + response["capabilities"]["m.room_versions"][ + "org.matrix.msc3244.room_capabilities" + ] = MSC3244_CAPABILITIES + return 200, response diff --git a/tests/rest/client/v2_alpha/test_capabilities.py b/tests/rest/client/v2_alpha/test_capabilities.py index 874052c61ca2..f80f48a45577 100644 --- a/tests/rest/client/v2_alpha/test_capabilities.py +++ b/tests/rest/client/v2_alpha/test_capabilities.py @@ -102,3 +102,49 @@ def test_get_change_password_capabilities_password_disabled(self): self.assertEqual(channel.code, 200) self.assertFalse(capabilities["m.change_password"]["enabled"]) + + def test_get_does_not_include_msc3244_fields_by_default(self): + localpart = "user" + password = "pass" + user = self.register_user(localpart, password) + access_token = self.get_success( + self.auth_handler.get_access_token_for_user_id( + user, device_id=None, valid_until_ms=None + ) + ) + + channel = self.make_request("GET", self.url, access_token=access_token) + capabilities = channel.json_body["capabilities"] + + self.assertEqual(channel.code, 200) + self.assertNotIn( + "org.matrix.msc3244.room_capabilities", capabilities["m.room_versions"] + ) + + @override_config({"experimental_features": {"msc3244_enabled": True}}) + def test_get_does_include_msc3244_fields_when_enabled(self): + localpart = "user" + password = "pass" + user = self.register_user(localpart, password) + access_token = self.get_success( + self.auth_handler.get_access_token_for_user_id( + user, device_id=None, valid_until_ms=None + ) + ) + + channel = self.make_request("GET", self.url, access_token=access_token) + capabilities = channel.json_body["capabilities"] + + self.assertEqual(channel.code, 200) + for details in capabilities["m.room_versions"][ + "org.matrix.msc3244.room_capabilities" + ].values(): + if details["preferred"] is not None: + self.assertTrue( + details["preferred"] in KNOWN_ROOM_VERSIONS, + str(details["preferred"]), + ) + + self.assertGreater(len(details["support"]), 0) + for room_version in details["support"]: + self.assertTrue(room_version in KNOWN_ROOM_VERSIONS, str(room_version)) From 2d89c66b8811aa4968aefea3572f174fa00cc3c2 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 21 Jul 2021 05:29:57 -0500 Subject: [PATCH 04/61] Switch to `chunk` events so we can auth via power_levels (MSC2716) (#10432) Previously, we were using `content.chunk_id` to connect one chunk to another. But these events can be from any `sender` and we can't tell who should be able to send historical events. We know we only want the application service to do it but these events have the sender of a real historical message, not the application service user ID as the sender. Other federated homeservers also have no indicator which senders are an application service on the originating homeserver. So we want to auth all of the MSC2716 events via power_levels and have them be sent by the application service with proper PL levels in the room. --- changelog.d/10432.misc | 1 + synapse/api/constants.py | 6 ++++-- synapse/rest/client/v1/room.py | 17 +++++++++++++---- 3 files changed, 18 insertions(+), 6 deletions(-) create mode 100644 changelog.d/10432.misc diff --git a/changelog.d/10432.misc b/changelog.d/10432.misc new file mode 100644 index 000000000000..3a8cdf0ae0e9 --- /dev/null +++ b/changelog.d/10432.misc @@ -0,0 +1 @@ +Connect historical chunks together with chunk events instead of a content field (MSC2716). diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 8363c2bb0f5f..4caafc0ac921 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -120,6 +120,7 @@ class EventTypes: SpaceParent = "m.space.parent" MSC2716_INSERTION = "org.matrix.msc2716.insertion" + MSC2716_CHUNK = "org.matrix.msc2716.chunk" MSC2716_MARKER = "org.matrix.msc2716.marker" @@ -190,9 +191,10 @@ class EventContentFields: # Used on normal messages to indicate they were historically imported after the fact MSC2716_HISTORICAL = "org.matrix.msc2716.historical" - # For "insertion" events + # For "insertion" events to indicate what the next chunk ID should be in + # order to connect to it MSC2716_NEXT_CHUNK_ID = "org.matrix.msc2716.next_chunk_id" - # Used on normal message events to indicate where the chunk connects to + # Used on "chunk" events to indicate which insertion event it connects to MSC2716_CHUNK_ID = "org.matrix.msc2716.chunk_id" # For "marker" events MSC2716_MARKER_INSERTION = "org.matrix.msc2716.marker.insertion" diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 31a1193cd3c0..c95c5ae23460 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -553,9 +553,18 @@ async def on_POST(self, request, room_id): ] # Connect this current chunk to the insertion event from the previous chunk - last_event_in_chunk["content"][ - EventContentFields.MSC2716_CHUNK_ID - ] = chunk_id_to_connect_to + chunk_event = { + "type": EventTypes.MSC2716_CHUNK, + "sender": requester.user.to_string(), + "room_id": room_id, + "content": {EventContentFields.MSC2716_CHUNK_ID: chunk_id_to_connect_to}, + # Since the chunk event is put at the end of the chunk, + # where the newest-in-time event is, copy the origin_server_ts from + # the last event we're inserting + "origin_server_ts": last_event_in_chunk["origin_server_ts"], + } + # Add the chunk event to the end of the chunk (newest-in-time) + events_to_create.append(chunk_event) # Add an "insertion" event to the start of each chunk (next to the oldest-in-time # event in the chunk) so the next chunk can be connected to this one. @@ -567,7 +576,7 @@ async def on_POST(self, request, room_id): # the first event we're inserting origin_server_ts=events_to_create[0]["origin_server_ts"], ) - # Prepend the insertion event to the start of the chunk + # Prepend the insertion event to the start of the chunk (oldest-in-time) events_to_create = [insertion_event] + events_to_create event_ids = [] From 5db118626bebb9ce3913758282787d47cd8f375e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 21 Jul 2021 09:47:56 -0400 Subject: [PATCH 05/61] Add a return type to parse_string. (#10438) And set the required attribute in a few places which will error if a parameter is not provided. --- changelog.d/10438.misc | 1 + synapse/http/servlet.py | 38 ++++++++++++++++- synapse/rest/admin/users.py | 4 +- synapse/rest/client/v1/room.py | 8 ++-- synapse/rest/client/v2_alpha/keys.py | 2 +- synapse/rest/client/v2_alpha/relations.py | 42 +++++++++++-------- synapse/rest/client/v2_alpha/sync.py | 2 +- synapse/rest/consent/consent_resource.py | 2 +- synapse/rest/media/v1/preview_url_resource.py | 10 ++--- synapse/storage/databases/main/__init__.py | 2 +- synapse/storage/databases/main/room.py | 2 +- synapse/storage/databases/main/stats.py | 2 +- synapse/streams/config.py | 16 +++---- 13 files changed, 86 insertions(+), 45 deletions(-) create mode 100644 changelog.d/10438.misc diff --git a/changelog.d/10438.misc b/changelog.d/10438.misc new file mode 100644 index 000000000000..a55757849903 --- /dev/null +++ b/changelog.d/10438.misc @@ -0,0 +1 @@ +Improve servlet type hints. diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 04560fb58977..cf45b6623b23 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -172,6 +172,42 @@ def parse_bytes_from_args( return default +@overload +def parse_string( + request: Request, + name: str, + default: str, + *, + allowed_values: Optional[Iterable[str]] = None, + encoding: str = "ascii", +) -> str: + ... + + +@overload +def parse_string( + request: Request, + name: str, + *, + required: Literal[True], + allowed_values: Optional[Iterable[str]] = None, + encoding: str = "ascii", +) -> str: + ... + + +@overload +def parse_string( + request: Request, + name: str, + *, + required: bool = False, + allowed_values: Optional[Iterable[str]] = None, + encoding: str = "ascii", +) -> Optional[str]: + ... + + def parse_string( request: Request, name: str, @@ -179,7 +215,7 @@ def parse_string( required: bool = False, allowed_values: Optional[Iterable[str]] = None, encoding: str = "ascii", -): +) -> Optional[str]: """ Parse a string parameter from the request query string. diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 589e47fa473c..6736536172b5 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -90,8 +90,8 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: errcode=Codes.INVALID_PARAM, ) - user_id = parse_string(request, "user_id", default=None) - name = parse_string(request, "name", default=None) + user_id = parse_string(request, "user_id") + name = parse_string(request, "name") guests = parse_boolean(request, "guests", default=True) deactivated = parse_boolean(request, "deactivated", default=False) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index c95c5ae23460..5d309a534c00 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -413,7 +413,7 @@ async def on_POST(self, request, room_id): assert_params_in_dict(body, ["state_events_at_start", "events"]) prev_events_from_query = parse_strings_from_args(request.args, "prev_event") - chunk_id_from_query = parse_string(request, "chunk_id", default=None) + chunk_id_from_query = parse_string(request, "chunk_id") if prev_events_from_query is None: raise SynapseError( @@ -735,7 +735,7 @@ def __init__(self, hs): self.auth = hs.get_auth() async def on_GET(self, request): - server = parse_string(request, "server", default=None) + server = parse_string(request, "server") try: await self.auth.get_user_by_req(request, allow_guest=True) @@ -755,7 +755,7 @@ async def on_GET(self, request): raise e limit = parse_integer(request, "limit", 0) - since_token = parse_string(request, "since", None) + since_token = parse_string(request, "since") if limit == 0: # zero is a special value which corresponds to no limit. @@ -789,7 +789,7 @@ async def on_GET(self, request): async def on_POST(self, request): await self.auth.get_user_by_req(request, allow_guest=True) - server = parse_string(request, "server", default=None) + server = parse_string(request, "server") content = parse_json_object_from_request(request) limit: Optional[int] = int(content.get("limit", 100)) diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 33cf8de18633..d0d9d30d40a3 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -194,7 +194,7 @@ def __init__(self, hs): async def on_GET(self, request): requester = await self.auth.get_user_by_req(request, allow_guest=True) - from_token_string = parse_string(request, "from") + from_token_string = parse_string(request, "from", required=True) set_tag("from", from_token_string) # We want to enforce they do pass us one, but we ignore it and return diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index c7da6759dbf8..0821cd285fd3 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -158,19 +158,21 @@ async def on_GET( event = await self.event_handler.get_event(requester.user, room_id, parent_id) limit = parse_integer(request, "limit", default=5) - from_token = parse_string(request, "from") - to_token = parse_string(request, "to") + from_token_str = parse_string(request, "from") + to_token_str = parse_string(request, "to") if event.internal_metadata.is_redacted(): # If the event is redacted, return an empty list of relations pagination_chunk = PaginationChunk(chunk=[]) else: # Return the relations - if from_token: - from_token = RelationPaginationToken.from_string(from_token) + from_token = None + if from_token_str: + from_token = RelationPaginationToken.from_string(from_token_str) - if to_token: - to_token = RelationPaginationToken.from_string(to_token) + to_token = None + if to_token_str: + to_token = RelationPaginationToken.from_string(to_token_str) pagination_chunk = await self.store.get_relations_for_event( event_id=parent_id, @@ -256,19 +258,21 @@ async def on_GET( raise SynapseError(400, "Relation type must be 'annotation'") limit = parse_integer(request, "limit", default=5) - from_token = parse_string(request, "from") - to_token = parse_string(request, "to") + from_token_str = parse_string(request, "from") + to_token_str = parse_string(request, "to") if event.internal_metadata.is_redacted(): # If the event is redacted, return an empty list of relations pagination_chunk = PaginationChunk(chunk=[]) else: # Return the relations - if from_token: - from_token = AggregationPaginationToken.from_string(from_token) + from_token = None + if from_token_str: + from_token = AggregationPaginationToken.from_string(from_token_str) - if to_token: - to_token = AggregationPaginationToken.from_string(to_token) + to_token = None + if to_token_str: + to_token = AggregationPaginationToken.from_string(to_token_str) pagination_chunk = await self.store.get_aggregation_groups_for_event( event_id=parent_id, @@ -336,14 +340,16 @@ async def on_GET(self, request, room_id, parent_id, relation_type, event_type, k raise SynapseError(400, "Relation type must be 'annotation'") limit = parse_integer(request, "limit", default=5) - from_token = parse_string(request, "from") - to_token = parse_string(request, "to") + from_token_str = parse_string(request, "from") + to_token_str = parse_string(request, "to") - if from_token: - from_token = RelationPaginationToken.from_string(from_token) + from_token = None + if from_token_str: + from_token = RelationPaginationToken.from_string(from_token_str) - if to_token: - to_token = RelationPaginationToken.from_string(to_token) + to_token = None + if to_token_str: + to_token = RelationPaginationToken.from_string(to_token_str) result = await self.store.get_relations_for_event( event_id=parent_id, diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index ecbbcf3851be..7bb4e6b8aad6 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -112,7 +112,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: default="online", allowed_values=self.ALLOWED_PRESENCE, ) - filter_id = parse_string(request, "filter", default=None) + filter_id = parse_string(request, "filter") full_state = parse_boolean(request, "full_state", default=False) logger.debug( diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py index 4282e2b228b1..11f73208321b 100644 --- a/synapse/rest/consent/consent_resource.py +++ b/synapse/rest/consent/consent_resource.py @@ -112,7 +112,7 @@ async def _async_render_GET(self, request): request (twisted.web.http.Request): """ version = parse_string(request, "v", default=self._default_consent_version) - username = parse_string(request, "u", required=False, default="") + username = parse_string(request, "u", default="") userhmac = None has_consented = False public_version = username == "" diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 8e7fead3a286..172212ee3a6b 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -186,15 +186,11 @@ async def _async_render_OPTIONS(self, request: Request) -> None: respond_with_json(request, 200, {}, send_cors=True) async def _async_render_GET(self, request: SynapseRequest) -> None: - # This will always be set by the time Twisted calls us. - assert request.args is not None - # XXX: if get_user_by_req fails, what should we do in an async render? requester = await self.auth.get_user_by_req(request) - url = parse_string(request, "url") - if b"ts" in request.args: - ts = parse_integer(request, "ts") - else: + url = parse_string(request, "url", required=True) + ts = parse_integer(request, "ts") + if ts is None: ts = self.clock.time_msec() # XXX: we could move this into _do_preview if we wanted. diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index a3fddea042af..bacfbce4afde 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -249,7 +249,7 @@ async def get_users_paginate( name: Optional[str] = None, guests: bool = True, deactivated: bool = False, - order_by: UserSortOrder = UserSortOrder.USER_ID.value, + order_by: str = UserSortOrder.USER_ID.value, direction: str = "f", ) -> Tuple[List[JsonDict], int]: """Function to retrieve a paginated list of users from diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 6ddafe543412..443e5f331545 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -363,7 +363,7 @@ async def get_rooms_paginate( self, start: int, limit: int, - order_by: RoomSortOrder, + order_by: str, reverse_order: bool, search_term: Optional[str], ) -> Tuple[List[Dict[str, Any]], int]: diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index 59d67c255b6d..0f9aa54ca988 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -647,7 +647,7 @@ async def get_users_media_usage_paginate( limit: int, from_ts: Optional[int] = None, until_ts: Optional[int] = None, - order_by: Optional[UserSortOrder] = UserSortOrder.USER_ID.value, + order_by: Optional[str] = UserSortOrder.USER_ID.value, direction: Optional[str] = "f", search_term: Optional[str] = None, ) -> Tuple[List[JsonDict], Dict[str, int]]: diff --git a/synapse/streams/config.py b/synapse/streams/config.py index 13d300588bbc..cf4005984bb6 100644 --- a/synapse/streams/config.py +++ b/synapse/streams/config.py @@ -47,20 +47,22 @@ async def from_request( ) -> "PaginationConfig": direction = parse_string(request, "dir", default="f", allowed_values=["f", "b"]) - from_tok = parse_string(request, "from") - to_tok = parse_string(request, "to") + from_tok_str = parse_string(request, "from") + to_tok_str = parse_string(request, "to") try: - if from_tok == "END": + from_tok = None + if from_tok_str == "END": from_tok = None # For backwards compat. - elif from_tok: - from_tok = await StreamToken.from_string(store, from_tok) + elif from_tok_str: + from_tok = await StreamToken.from_string(store, from_tok_str) except Exception: raise SynapseError(400, "'from' parameter is invalid") try: - if to_tok: - to_tok = await StreamToken.from_string(store, to_tok) + to_tok = None + if to_tok_str: + to_tok = await StreamToken.from_string(store, to_tok_str) except Exception: raise SynapseError(400, "'to' parameter is invalid") From d15e72e511724f2e4729b31808d410c1b1ad9041 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 21 Jul 2021 13:29:54 -0400 Subject: [PATCH 06/61] Update the notification email subject when invited to a space. (#10426) --- changelog.d/10426.feature | 1 + synapse/config/emailconfig.py | 4 +++- synapse/push/mailer.py | 18 +++++++++++++++++- 3 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 changelog.d/10426.feature diff --git a/changelog.d/10426.feature b/changelog.d/10426.feature new file mode 100644 index 000000000000..9cca6dc456df --- /dev/null +++ b/changelog.d/10426.feature @@ -0,0 +1 @@ +Email notifications now state whether an invitation is to a room or a space. diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index bcecbfec0322..8d8f166e9bfb 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -39,12 +39,13 @@ "messages_from_person_and_others": "[%(app)s] You have messages on %(app)s from %(person)s and others...", "invite_from_person": "[%(app)s] %(person)s has invited you to chat on %(app)s...", "invite_from_person_to_room": "[%(app)s] %(person)s has invited you to join the %(room)s room on %(app)s...", + "invite_from_person_to_space": "[%(app)s] %(person)s has invited you to join the %(space)s space on %(app)s...", "password_reset": "[%(server_name)s] Password reset", "email_validation": "[%(server_name)s] Validate your email", } -@attr.s +@attr.s(slots=True, frozen=True) class EmailSubjectConfig: message_from_person_in_room = attr.ib(type=str) message_from_person = attr.ib(type=str) @@ -54,6 +55,7 @@ class EmailSubjectConfig: messages_from_person_and_others = attr.ib(type=str) invite_from_person = attr.ib(type=str) invite_from_person_to_room = attr.ib(type=str) + invite_from_person_to_space = attr.ib(type=str) password_reset = attr.ib(type=str) email_validation = attr.ib(type=str) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 7be5fe1e9ba1..941fb238b798 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -19,7 +19,7 @@ import bleach import jinja2 -from synapse.api.constants import EventTypes, Membership +from synapse.api.constants import EventTypes, Membership, RoomTypes from synapse.api.errors import StoreError from synapse.config.emailconfig import EmailSubjectConfig from synapse.events import EventBase @@ -600,6 +600,22 @@ async def _make_summary_text_single_room( "app": self.app_name, } + # If the room is a space, it gets a slightly different topic. + create_event_id = room_state_ids.get(("m.room.create", "")) + if create_event_id: + create_event = await self.store.get_event( + create_event_id, allow_none=True + ) + if ( + create_event + and create_event.content.get("room_type") == RoomTypes.SPACE + ): + return self.email_subjects.invite_from_person_to_space % { + "person": inviter_name, + "space": room_name, + "app": self.app_name, + } + return self.email_subjects.invite_from_person_to_room % { "person": inviter_name, "room": room_name, From 5b68816de9e9861f5113e99cc4c8f0779829db6b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 21 Jul 2021 13:48:06 -0400 Subject: [PATCH 07/61] Fix the hierarchy of OpenID providers in the docs. (#10445) --- changelog.d/10445.doc | 1 + docs/openid.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10445.doc diff --git a/changelog.d/10445.doc b/changelog.d/10445.doc new file mode 100644 index 000000000000..4c023ded7ca6 --- /dev/null +++ b/changelog.d/10445.doc @@ -0,0 +1 @@ +Fix hierarchy of providers on the OpenID page. diff --git a/docs/openid.md b/docs/openid.md index cfaafc50150f..f685fd551acc 100644 --- a/docs/openid.md +++ b/docs/openid.md @@ -410,7 +410,7 @@ oidc_providers: display_name_template: "{{ user.name }}" ``` -## Apple +### Apple Configuring "Sign in with Apple" (SiWA) requires an Apple Developer account. From 590cc4e888f072f7f0788da1f93d80c7bc86be4a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 21 Jul 2021 14:12:22 -0400 Subject: [PATCH 08/61] Add type hints to additional servlet functions (#10437) Improves type hints for: * parse_{boolean,integer} * parse_{boolean,integer}_from_args * parse_json_{value,object}_from_request And fixes any incorrect calls that resulted from unknown types. --- changelog.d/10437.misc | 1 + synapse/federation/transport/server.py | 13 +- synapse/http/servlet.py | 220 ++++++++++++++++++------ synapse/rest/client/v1/room.py | 2 +- synapse/storage/databases/main/stats.py | 2 +- tests/rest/admin/test_media.py | 4 +- 6 files changed, 176 insertions(+), 66 deletions(-) create mode 100644 changelog.d/10437.misc diff --git a/changelog.d/10437.misc b/changelog.d/10437.misc new file mode 100644 index 000000000000..a55757849903 --- /dev/null +++ b/changelog.d/10437.misc @@ -0,0 +1 @@ +Improve servlet type hints. diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 2974d4d0cc1e..5e059d6e09d4 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -984,7 +984,7 @@ async def on_GET( limit = parse_integer_from_args(query, "limit", 0) since_token = parse_string_from_args(query, "since", None) include_all_networks = parse_boolean_from_args( - query, "include_all_networks", False + query, "include_all_networks", default=False ) third_party_instance_id = parse_string_from_args( query, "third_party_instance_id", None @@ -1908,16 +1908,7 @@ async def on_GET( suggested_only = parse_boolean_from_args(query, "suggested_only", default=False) max_rooms_per_space = parse_integer_from_args(query, "max_rooms_per_space") - exclude_rooms = [] - if b"exclude_rooms" in query: - try: - exclude_rooms = [ - room_id.decode("ascii") for room_id in query[b"exclude_rooms"] - ] - except Exception: - raise SynapseError( - 400, "Bad query parameter for exclude_rooms", Codes.INVALID_PARAM - ) + exclude_rooms = parse_strings_from_args(query, "exclude_rooms", default=[]) return 200, await self.handler.federation_space_summary( origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index cf45b6623b23..732a1e6aeb88 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -14,47 +14,86 @@ """ This module contains base REST classes for constructing REST servlets. """ import logging -from typing import Dict, Iterable, List, Optional, overload +from typing import Iterable, List, Mapping, Optional, Sequence, overload from typing_extensions import Literal from twisted.web.server import Request from synapse.api.errors import Codes, SynapseError +from synapse.types import JsonDict from synapse.util import json_decoder logger = logging.getLogger(__name__) -def parse_integer(request, name, default=None, required=False): +@overload +def parse_integer(request: Request, name: str, default: int) -> int: + ... + + +@overload +def parse_integer(request: Request, name: str, *, required: Literal[True]) -> int: + ... + + +@overload +def parse_integer( + request: Request, name: str, default: Optional[int] = None, required: bool = False +) -> Optional[int]: + ... + + +def parse_integer( + request: Request, name: str, default: Optional[int] = None, required: bool = False +) -> Optional[int]: """Parse an integer parameter from the request string Args: request: the twisted HTTP request. - name (bytes/unicode): the name of the query parameter. - default (int|None): value to use if the parameter is absent, defaults - to None. - required (bool): whether to raise a 400 SynapseError if the - parameter is absent, defaults to False. + name: the name of the query parameter. + default: value to use if the parameter is absent, defaults to None. + required: whether to raise a 400 SynapseError if the parameter is absent, + defaults to False. Returns: - int|None: An int value or the default. + An int value or the default. Raises: SynapseError: if the parameter is absent and required, or if the parameter is present and not an integer. """ - return parse_integer_from_args(request.args, name, default, required) + args: Mapping[bytes, Sequence[bytes]] = request.args # type: ignore + return parse_integer_from_args(args, name, default, required) -def parse_integer_from_args(args, name, default=None, required=False): +def parse_integer_from_args( + args: Mapping[bytes, Sequence[bytes]], + name: str, + default: Optional[int] = None, + required: bool = False, +) -> Optional[int]: + """Parse an integer parameter from the request string + + Args: + args: A mapping of request args as bytes to a list of bytes (e.g. request.args). + name: the name of the query parameter. + default: value to use if the parameter is absent, defaults to None. + required: whether to raise a 400 SynapseError if the parameter is absent, + defaults to False. + + Returns: + An int value or the default. - if not isinstance(name, bytes): - name = name.encode("ascii") + Raises: + SynapseError: if the parameter is absent and required, or if the + parameter is present and not an integer. + """ + name_bytes = name.encode("ascii") - if name in args: + if name_bytes in args: try: - return int(args[name][0]) + return int(args[name_bytes][0]) except Exception: message = "Query parameter %r must be an integer" % (name,) raise SynapseError(400, message, errcode=Codes.INVALID_PARAM) @@ -66,36 +105,102 @@ def parse_integer_from_args(args, name, default=None, required=False): return default -def parse_boolean(request, name, default=None, required=False): +@overload +def parse_boolean(request: Request, name: str, default: bool) -> bool: + ... + + +@overload +def parse_boolean(request: Request, name: str, *, required: Literal[True]) -> bool: + ... + + +@overload +def parse_boolean( + request: Request, name: str, default: Optional[bool] = None, required: bool = False +) -> Optional[bool]: + ... + + +def parse_boolean( + request: Request, name: str, default: Optional[bool] = None, required: bool = False +) -> Optional[bool]: """Parse a boolean parameter from the request query string Args: request: the twisted HTTP request. - name (bytes/unicode): the name of the query parameter. - default (bool|None): value to use if the parameter is absent, defaults - to None. - required (bool): whether to raise a 400 SynapseError if the - parameter is absent, defaults to False. + name: the name of the query parameter. + default: value to use if the parameter is absent, defaults to None. + required: whether to raise a 400 SynapseError if the parameter is absent, + defaults to False. Returns: - bool|None: A bool value or the default. + A bool value or the default. Raises: SynapseError: if the parameter is absent and required, or if the parameter is present and not one of "true" or "false". """ + args: Mapping[bytes, Sequence[bytes]] = request.args # type: ignore + return parse_boolean_from_args(args, name, default, required) - return parse_boolean_from_args(request.args, name, default, required) +@overload +def parse_boolean_from_args( + args: Mapping[bytes, Sequence[bytes]], + name: str, + default: bool, +) -> bool: + ... -def parse_boolean_from_args(args, name, default=None, required=False): - if not isinstance(name, bytes): - name = name.encode("ascii") +@overload +def parse_boolean_from_args( + args: Mapping[bytes, Sequence[bytes]], + name: str, + *, + required: Literal[True], +) -> bool: + ... + - if name in args: +@overload +def parse_boolean_from_args( + args: Mapping[bytes, Sequence[bytes]], + name: str, + default: Optional[bool] = None, + required: bool = False, +) -> Optional[bool]: + ... + + +def parse_boolean_from_args( + args: Mapping[bytes, Sequence[bytes]], + name: str, + default: Optional[bool] = None, + required: bool = False, +) -> Optional[bool]: + """Parse a boolean parameter from the request query string + + Args: + args: A mapping of request args as bytes to a list of bytes (e.g. request.args). + name: the name of the query parameter. + default: value to use if the parameter is absent, defaults to None. + required: whether to raise a 400 SynapseError if the parameter is absent, + defaults to False. + + Returns: + A bool value or the default. + + Raises: + SynapseError: if the parameter is absent and required, or if the + parameter is present and not one of "true" or "false". + """ + name_bytes = name.encode("ascii") + + if name_bytes in args: try: - return {b"true": True, b"false": False}[args[name][0]] + return {b"true": True, b"false": False}[args[name_bytes][0]] except Exception: message = ( "Boolean query parameter %r must be one of ['true', 'false']" @@ -111,7 +216,7 @@ def parse_boolean_from_args(args, name, default=None, required=False): @overload def parse_bytes_from_args( - args: Dict[bytes, List[bytes]], + args: Mapping[bytes, Sequence[bytes]], name: str, default: Optional[bytes] = None, ) -> Optional[bytes]: @@ -120,7 +225,7 @@ def parse_bytes_from_args( @overload def parse_bytes_from_args( - args: Dict[bytes, List[bytes]], + args: Mapping[bytes, Sequence[bytes]], name: str, default: Literal[None] = None, *, @@ -131,7 +236,7 @@ def parse_bytes_from_args( @overload def parse_bytes_from_args( - args: Dict[bytes, List[bytes]], + args: Mapping[bytes, Sequence[bytes]], name: str, default: Optional[bytes] = None, required: bool = False, @@ -140,7 +245,7 @@ def parse_bytes_from_args( def parse_bytes_from_args( - args: Dict[bytes, List[bytes]], + args: Mapping[bytes, Sequence[bytes]], name: str, default: Optional[bytes] = None, required: bool = False, @@ -241,7 +346,7 @@ def parse_string( parameter is present, must be one of a list of allowed values and is not one of those allowed values. """ - args: Dict[bytes, List[bytes]] = request.args # type: ignore + args: Mapping[bytes, Sequence[bytes]] = request.args # type: ignore return parse_string_from_args( args, name, @@ -275,9 +380,8 @@ def _parse_string_value( @overload def parse_strings_from_args( - args: Dict[bytes, List[bytes]], + args: Mapping[bytes, Sequence[bytes]], name: str, - default: Optional[List[str]] = None, *, allowed_values: Optional[Iterable[str]] = None, encoding: str = "ascii", @@ -287,9 +391,20 @@ def parse_strings_from_args( @overload def parse_strings_from_args( - args: Dict[bytes, List[bytes]], + args: Mapping[bytes, Sequence[bytes]], + name: str, + default: List[str], + *, + allowed_values: Optional[Iterable[str]] = None, + encoding: str = "ascii", +) -> List[str]: + ... + + +@overload +def parse_strings_from_args( + args: Mapping[bytes, Sequence[bytes]], name: str, - default: Optional[List[str]] = None, *, required: Literal[True], allowed_values: Optional[Iterable[str]] = None, @@ -300,7 +415,7 @@ def parse_strings_from_args( @overload def parse_strings_from_args( - args: Dict[bytes, List[bytes]], + args: Mapping[bytes, Sequence[bytes]], name: str, default: Optional[List[str]] = None, *, @@ -312,7 +427,7 @@ def parse_strings_from_args( def parse_strings_from_args( - args: Dict[bytes, List[bytes]], + args: Mapping[bytes, Sequence[bytes]], name: str, default: Optional[List[str]] = None, required: bool = False, @@ -361,7 +476,7 @@ def parse_strings_from_args( @overload def parse_string_from_args( - args: Dict[bytes, List[bytes]], + args: Mapping[bytes, Sequence[bytes]], name: str, default: Optional[str] = None, *, @@ -373,7 +488,7 @@ def parse_string_from_args( @overload def parse_string_from_args( - args: Dict[bytes, List[bytes]], + args: Mapping[bytes, Sequence[bytes]], name: str, default: Optional[str] = None, *, @@ -386,7 +501,7 @@ def parse_string_from_args( @overload def parse_string_from_args( - args: Dict[bytes, List[bytes]], + args: Mapping[bytes, Sequence[bytes]], name: str, default: Optional[str] = None, required: bool = False, @@ -397,7 +512,7 @@ def parse_string_from_args( def parse_string_from_args( - args: Dict[bytes, List[bytes]], + args: Mapping[bytes, Sequence[bytes]], name: str, default: Optional[str] = None, required: bool = False, @@ -445,13 +560,14 @@ def parse_string_from_args( return strings[0] -def parse_json_value_from_request(request, allow_empty_body=False): +def parse_json_value_from_request( + request: Request, allow_empty_body: bool = False +) -> Optional[JsonDict]: """Parse a JSON value from the body of a twisted HTTP request. Args: request: the twisted HTTP request. - allow_empty_body (bool): if True, an empty body will be accepted and - turned into None + allow_empty_body: if True, an empty body will be accepted and turned into None Returns: The JSON value. @@ -460,7 +576,7 @@ def parse_json_value_from_request(request, allow_empty_body=False): SynapseError if the request body couldn't be decoded as JSON. """ try: - content_bytes = request.content.read() + content_bytes = request.content.read() # type: ignore except Exception: raise SynapseError(400, "Error reading JSON content.") @@ -476,13 +592,15 @@ def parse_json_value_from_request(request, allow_empty_body=False): return content -def parse_json_object_from_request(request, allow_empty_body=False): +def parse_json_object_from_request( + request: Request, allow_empty_body: bool = False +) -> JsonDict: """Parse a JSON object from the body of a twisted HTTP request. Args: request: the twisted HTTP request. - allow_empty_body (bool): if True, an empty body will be accepted and - turned into an empty dict. + allow_empty_body: if True, an empty body will be accepted and turned into + an empty dict. Raises: SynapseError if the request body couldn't be decoded as JSON or @@ -493,14 +611,14 @@ def parse_json_object_from_request(request, allow_empty_body=False): if allow_empty_body and content is None: return {} - if type(content) != dict: + if not isinstance(content, dict): message = "Content must be a JSON object." raise SynapseError(400, message, errcode=Codes.BAD_JSON) return content -def assert_params_in_dict(body, required): +def assert_params_in_dict(body: JsonDict, required: Iterable[str]) -> None: absent = [] for k in required: if k not in body: diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 5d309a534c00..25ba52c624b6 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -754,7 +754,7 @@ async def on_GET(self, request): if server: raise e - limit = parse_integer(request, "limit", 0) + limit: Optional[int] = parse_integer(request, "limit", 0) since_token = parse_string(request, "since") if limit == 0: diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index 0f9aa54ca988..889e0d362539 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -650,7 +650,7 @@ async def get_users_media_usage_paginate( order_by: Optional[str] = UserSortOrder.USER_ID.value, direction: Optional[str] = "f", search_term: Optional[str] = None, - ) -> Tuple[List[JsonDict], Dict[str, int]]: + ) -> Tuple[List[JsonDict], int]: """Function to retrieve a paginated list of users and their uploaded local media (size and number). This will return a json list of users and the total number of users matching the filter criteria. diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index 6fee0f95b6cb..7198fd293f52 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -261,7 +261,7 @@ def test_missing_parameter(self): self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"]) self.assertEqual( - "Missing integer query parameter b'before_ts'", channel.json_body["error"] + "Missing integer query parameter 'before_ts'", channel.json_body["error"] ) def test_invalid_parameter(self): @@ -303,7 +303,7 @@ def test_invalid_parameter(self): self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) self.assertEqual( - "Boolean query parameter b'keep_profiles' must be one of ['true', 'false']", + "Boolean query parameter 'keep_profiles' must be one of ['true', 'false']", channel.json_body["error"], ) From 8ae0bdca753d2f51b32bc712b66c26f331ec728c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 21 Jul 2021 21:25:28 +0100 Subject: [PATCH 09/61] Drop xenial-support hacks (#10429) --- changelog.d/10429.misc | 1 + debian/build_virtualenv | 4 +--- debian/changelog | 6 ++++++ debian/compat | 2 +- debian/control | 5 +---- debian/rules | 4 +--- docker/Dockerfile-dhvirtualenv | 18 +++++++++++------- 7 files changed, 22 insertions(+), 18 deletions(-) create mode 100644 changelog.d/10429.misc diff --git a/changelog.d/10429.misc b/changelog.d/10429.misc new file mode 100644 index 000000000000..ccb2217f64fd --- /dev/null +++ b/changelog.d/10429.misc @@ -0,0 +1 @@ +Drop backwards-compatibility code that was required to support Ubuntu Xenial. diff --git a/debian/build_virtualenv b/debian/build_virtualenv index 21caad90cc58..68c86599536c 100755 --- a/debian/build_virtualenv +++ b/debian/build_virtualenv @@ -33,13 +33,11 @@ esac # Use --builtin-venv to use the better `venv` module from CPython 3.4+ rather # than the 2/3 compatible `virtualenv`. -# Pin pip to 20.3.4 to fix breakage in 21.0 on py3.5 (xenial) - dh_virtualenv \ --install-suffix "matrix-synapse" \ --builtin-venv \ --python "$SNAKE" \ - --upgrade-pip-to="20.3.4" \ + --upgrade-pip \ --preinstall="lxml" \ --preinstall="mock" \ --extra-pip-arg="--no-cache-dir" \ diff --git a/debian/changelog b/debian/changelog index 4d214c23b651..55f7ee003c84 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.39.0ubuntu1) UNRELEASED; urgency=medium + + * Drop backwards-compatibility code that was required to support Ubuntu Xenial. + + -- Richard van der Hoff Tue, 20 Jul 2021 00:10:03 +0100 + matrix-synapse-py3 (1.39.0~rc1) stable; urgency=medium * New synapse release 1.39.0rc1. diff --git a/debian/compat b/debian/compat index ec635144f600..f599e28b8ab0 100644 --- a/debian/compat +++ b/debian/compat @@ -1 +1 @@ -9 +10 diff --git a/debian/control b/debian/control index 8167a901a4f1..763fabd6f6ae 100644 --- a/debian/control +++ b/debian/control @@ -3,11 +3,8 @@ Section: contrib/python Priority: extra Maintainer: Synapse Packaging team # keep this list in sync with the build dependencies in docker/Dockerfile-dhvirtualenv. -# TODO: Remove the dependency on dh-systemd after dropping support for Ubuntu xenial -# On all other supported releases, it's merely a transitional package which -# does nothing but depends on debhelper (> 9.20160709) Build-Depends: - debhelper (>= 9.20160709) | dh-systemd, + debhelper (>= 10), dh-virtualenv (>= 1.1), libsystemd-dev, libpq-dev, diff --git a/debian/rules b/debian/rules index c744060a57ae..b9d490adc94c 100755 --- a/debian/rules +++ b/debian/rules @@ -51,7 +51,5 @@ override_dh_shlibdeps: override_dh_virtualenv: ./debian/build_virtualenv -# We are restricted to compat level 9 (because xenial), so have to -# enable the systemd bits manually. %: - dh $@ --with python-virtualenv --with systemd + dh $@ --with python-virtualenv diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv index 0d74630370c4..017be8555ea9 100644 --- a/docker/Dockerfile-dhvirtualenv +++ b/docker/Dockerfile-dhvirtualenv @@ -15,6 +15,15 @@ ARG distro="" ### ### Stage 0: build a dh-virtualenv ### + +# This is only really needed on bionic and focal, since other distributions we +# care about have a recent version of dh-virtualenv by default. Unfortunately, +# it looks like focal is going to be with us for a while. +# +# (focal doesn't have a dh-virtualenv package at all. There is a PPA at +# https://launchpad.net/~jyrki-pulliainen/+archive/ubuntu/dh-virtualenv, but +# it's not obviously easier to use that than to build our own.) + FROM ${distro} as builder RUN apt-get update -qq -o Acquire::Languages=none @@ -27,7 +36,7 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get install \ wget # fetch and unpack the package -# TODO: Upgrade to 1.2.2 once xenial is dropped +# TODO: Upgrade to 1.2.2 once bionic is dropped (1.2.2 requires debhelper 12; bionic has only 11) RUN mkdir /dh-virtualenv RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/spotify/dh-virtualenv/archive/ac6e1b1.tar.gz RUN tar -xv --strip-components=1 -C /dh-virtualenv -f /dh-virtualenv.tar.gz @@ -59,8 +68,6 @@ ENV LANG C.UTF-8 # # NB: keep this list in sync with the list of build-deps in debian/control # TODO: it would be nice to do that automatically. -# TODO: Remove the dh-systemd stanza after dropping support for Ubuntu xenial -# it's a transitional package on all other, more recent releases RUN apt-get update -qq -o Acquire::Languages=none \ && env DEBIAN_FRONTEND=noninteractive apt-get install \ -yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \ @@ -76,10 +83,7 @@ RUN apt-get update -qq -o Acquire::Languages=none \ python3-venv \ sqlite3 \ libpq-dev \ - xmlsec1 \ - && ( env DEBIAN_FRONTEND=noninteractive apt-get install \ - -yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \ - dh-systemd || true ) + xmlsec1 COPY --from=builder /dh-virtualenv_1.2~dev-1_all.deb / From f1347bcfdcf7e0ff54a81cd05618af8882e4a757 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 22 Jul 2021 11:10:30 +0100 Subject: [PATCH 10/61] Fix the tests-done Github Actions job (#10444) --- .github/workflows/tests.yml | 19 ++++++++++++++++++- changelog.d/10444.misc | 1 + 2 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10444.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index cef4439477b8..9759163290e9 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -347,7 +347,12 @@ jobs: # a job which marks all the other jobs as complete, thus allowing PRs to be merged. tests-done: + if: ${{ always() }} needs: + - lint + - lint-crlf + - lint-newsfile + - lint-sdist - trial - trial-olddeps - sytest @@ -355,4 +360,16 @@ jobs: - complement runs-on: ubuntu-latest steps: - - run: "true" \ No newline at end of file + - name: Set build result + env: + NEEDS_CONTEXT: ${{ toJSON(needs) }} + # the `jq` incantation dumps out a series of " " lines + run: | + set -o pipefail + jq -r 'to_entries[] | [.key,.value.result] | join(" ")' \ + <<< $NEEDS_CONTEXT | + while read job result; do + if [ "$result" != "success" ]; then + echo "::set-failed ::Job $job returned $result" + fi + done diff --git a/changelog.d/10444.misc b/changelog.d/10444.misc new file mode 100644 index 000000000000..c012e89f4bbb --- /dev/null +++ b/changelog.d/10444.misc @@ -0,0 +1 @@ +Update the `tests-done` Github Actions status. From 5e2df47f72ab3270853da6019aba1aa4d4b2cc56 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 22 Jul 2021 11:35:06 +0100 Subject: [PATCH 11/61] Cancel redundant GHA workflows (#10451) --- .github/workflows/release-artifacts.yml | 4 ++++ .github/workflows/tests.yml | 4 ++++ changelog.d/10451.misc | 1 + 3 files changed, 9 insertions(+) create mode 100644 changelog.d/10451.misc diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 325c1f7d3940..0beb418a07c3 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -12,6 +12,10 @@ on: # we do the full build on tags. tags: ["v*"] +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + permissions: contents: write diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 9759163290e9..4e61824ee5bc 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -5,6 +5,10 @@ on: branches: ["develop", "release-*"] pull_request: +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: lint: runs-on: ubuntu-latest diff --git a/changelog.d/10451.misc b/changelog.d/10451.misc new file mode 100644 index 000000000000..e38f4b476dde --- /dev/null +++ b/changelog.d/10451.misc @@ -0,0 +1 @@ +Cancel redundant GHA workflows when a new commit is pushed. From d518b05a8667943bd0aa9ab1edc91eec0a8283fe Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 22 Jul 2021 05:58:24 -0500 Subject: [PATCH 12/61] Move dev/ docs to development/ (#10453) --- CONTRIBUTING.md | 2 +- changelog.d/10453.doc | 1 + docs/SUMMARY.md | 6 +++--- docs/{dev => development}/cas.md | 0 docs/{dev => development}/git.md | 6 +++--- docs/{dev => development/img}/git/branches.jpg | Bin docs/{dev => development/img}/git/clean.png | Bin docs/{dev => development/img}/git/squash.png | Bin docs/{dev => development}/saml.md | 0 9 files changed, 8 insertions(+), 7 deletions(-) create mode 100644 changelog.d/10453.doc rename docs/{dev => development}/cas.md (100%) rename docs/{dev => development}/git.md (97%) rename docs/{dev => development/img}/git/branches.jpg (100%) rename docs/{dev => development/img}/git/clean.png (100%) rename docs/{dev => development/img}/git/squash.png (100%) rename docs/{dev => development}/saml.md (100%) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a4e6688042e3..80ef6aa235ae 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -392,7 +392,7 @@ By now, you know the drill! # Notes for maintainers on merging PRs etc There are some notes for those with commit access to the project on how we -manage git [here](docs/dev/git.md). +manage git [here](docs/development/git.md). # Conclusion diff --git a/changelog.d/10453.doc b/changelog.d/10453.doc new file mode 100644 index 000000000000..5d4db9bca29c --- /dev/null +++ b/changelog.d/10453.doc @@ -0,0 +1 @@ +Consolidate development documentation to `docs/development/`. diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index db4ef1a44e86..f1bde91420fc 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -67,7 +67,7 @@ # Development - [Contributing Guide](development/contributing_guide.md) - [Code Style](code_style.md) - - [Git Usage](dev/git.md) + - [Git Usage](development/git.md) - [Testing]() - [OpenTracing](opentracing.md) - [Database Schemas](development/database_schema.md) @@ -77,8 +77,8 @@ - [TCP Replication](tcp_replication.md) - [Internal Documentation](development/internal_documentation/README.md) - [Single Sign-On]() - - [SAML](dev/saml.md) - - [CAS](dev/cas.md) + - [SAML](development/saml.md) + - [CAS](development/cas.md) - [State Resolution]() - [The Auth Chain Difference Algorithm](auth_chain_difference_algorithm.md) - [Media Repository](media_repository.md) diff --git a/docs/dev/cas.md b/docs/development/cas.md similarity index 100% rename from docs/dev/cas.md rename to docs/development/cas.md diff --git a/docs/dev/git.md b/docs/development/git.md similarity index 97% rename from docs/dev/git.md rename to docs/development/git.md index 87950f07b21b..9b1ed54b65ac 100644 --- a/docs/dev/git.md +++ b/docs/development/git.md @@ -9,7 +9,7 @@ commits each of which contains a single change building on what came before. Here, by way of an arbitrary example, is the top of `git log --graph b2dba0607`: -clean git graph +clean git graph Note how the commit comment explains clearly what is changing and why. Also note the *absence* of merge commits, as well as the absence of commits called @@ -61,7 +61,7 @@ Ok, so that's what we'd like to achieve. How do we achieve it? The TL;DR is: when you come to merge a pull request, you *probably* want to “squash and merge”: -![squash and merge](git/squash.png). +![squash and merge](img/git/squash.png). (This applies whether you are merging your own PR, or that of another contributor.) @@ -105,7 +105,7 @@ complicated. Here's how we do it. Let's start with a picture: -![branching model](git/branches.jpg) +![branching model](img/git/branches.jpg) It looks complicated, but it's really not. There's one basic rule: *anyone* is free to merge from *any* more-stable branch to *any* less-stable branch at diff --git a/docs/dev/git/branches.jpg b/docs/development/img/git/branches.jpg similarity index 100% rename from docs/dev/git/branches.jpg rename to docs/development/img/git/branches.jpg diff --git a/docs/dev/git/clean.png b/docs/development/img/git/clean.png similarity index 100% rename from docs/dev/git/clean.png rename to docs/development/img/git/clean.png diff --git a/docs/dev/git/squash.png b/docs/development/img/git/squash.png similarity index 100% rename from docs/dev/git/squash.png rename to docs/development/img/git/squash.png diff --git a/docs/dev/saml.md b/docs/development/saml.md similarity index 100% rename from docs/dev/saml.md rename to docs/development/saml.md From d8324b8238a31b8d749b1dfe507c3bed3bcc6e17 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 22 Jul 2021 12:00:16 +0100 Subject: [PATCH 13/61] Fix a handful of type annotations. (#10446) * switch from `types.CoroutineType` to `typing.Coroutine` these should be identical semantically, and since `defer.ensureDeferred` is defined to take a `typing.Coroutine`, will keep mypy happy * Fix some annotations on inlineCallbacks functions * changelog --- changelog.d/10446.misc | 1 + synapse/http/federation/matrix_federation_agent.py | 4 ++-- synapse/logging/context.py | 4 ++-- synapse/module_api/__init__.py | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changelog.d/10446.misc diff --git a/changelog.d/10446.misc b/changelog.d/10446.misc new file mode 100644 index 000000000000..a5a0ca80eb4d --- /dev/null +++ b/changelog.d/10446.misc @@ -0,0 +1 @@ +Update type annotations to work with forthcoming Twisted 21.7.0 release. diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 950770201a79..c16b7f10e645 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -27,7 +27,7 @@ ) from twisted.web.client import URI, Agent, HTTPConnectionPool from twisted.web.http_headers import Headers -from twisted.web.iweb import IAgent, IAgentEndpointFactory, IBodyProducer +from twisted.web.iweb import IAgent, IAgentEndpointFactory, IBodyProducer, IResponse from synapse.crypto.context_factory import FederationPolicyForHTTPS from synapse.http.client import BlacklistingAgentWrapper @@ -116,7 +116,7 @@ def request( uri: bytes, headers: Optional[Headers] = None, bodyProducer: Optional[IBodyProducer] = None, - ) -> Generator[defer.Deferred, Any, defer.Deferred]: + ) -> Generator[defer.Deferred, Any, IResponse]: """ Args: method: HTTP method: GET/POST/etc diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 18ac50780285..02e5ddd2ef2a 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -25,7 +25,7 @@ import inspect import logging import threading -import types +import typing import warnings from typing import TYPE_CHECKING, Optional, Tuple, TypeVar, Union @@ -745,7 +745,7 @@ def run_in_background(f, *args, **kwargs) -> defer.Deferred: # by synchronous exceptions, so let's turn them into Failures. return defer.fail() - if isinstance(res, types.CoroutineType): + if isinstance(res, typing.Coroutine): res = defer.ensureDeferred(res) # At this point we should have a Deferred, if not then f was a synchronous diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 1259fc2d906a..473812b8e295 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -484,7 +484,7 @@ async def complete_sso_login_async( @defer.inlineCallbacks def get_state_events_in_room( self, room_id: str, types: Iterable[Tuple[str, Optional[str]]] - ) -> Generator[defer.Deferred, Any, defer.Deferred]: + ) -> Generator[defer.Deferred, Any, Iterable[EventBase]]: """Gets current state events for the given room. (This is exposed for compatibility with the old SpamCheckerApi. We should From 38b346a504cd4155b1986d50ebcff2199e1690be Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 22 Jul 2021 12:39:50 +0100 Subject: [PATCH 14/61] Replace `or_ignore` in `simple_insert` with `simple_upsert` (#10442) Now that we have `simple_upsert` that should be used in preference to trying to insert and looking for an exception. The main benefit is that we ERROR message don't get written to postgres logs. We also have tidy up the return value on `simple_upsert`, rather than having a tri-state of inserted/not-inserted/unknown. --- changelog.d/10442.misc | 1 + synapse/storage/database.py | 51 ++++++-------- synapse/storage/databases/main/devices.py | 9 ++- .../databases/main/monthly_active_users.py | 8 +-- .../storage/databases/main/transactions.py | 8 ++- .../storage/databases/main/user_directory.py | 66 ++++--------------- 6 files changed, 44 insertions(+), 99 deletions(-) create mode 100644 changelog.d/10442.misc diff --git a/changelog.d/10442.misc b/changelog.d/10442.misc new file mode 100644 index 000000000000..b8d412d73289 --- /dev/null +++ b/changelog.d/10442.misc @@ -0,0 +1 @@ +Replace usage of `or_ignore` in `simple_insert` with `simple_upsert` usage, to stop spamming postgres logs with spurious ERROR messages. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index ccf9ac51ef8c..4d4643619f68 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -832,31 +832,16 @@ async def simple_insert( self, table: str, values: Dict[str, Any], - or_ignore: bool = False, desc: str = "simple_insert", - ) -> bool: + ) -> None: """Executes an INSERT query on the named table. Args: table: string giving the table name values: dict of new column names and values for them - or_ignore: bool stating whether an exception should be raised - when a conflicting row already exists. If True, False will be - returned by the function instead desc: description of the transaction, for logging and metrics - - Returns: - Whether the row was inserted or not. Only useful when `or_ignore` is True """ - try: - await self.runInteraction(desc, self.simple_insert_txn, table, values) - except self.engine.module.IntegrityError: - # We have to do or_ignore flag at this layer, since we can't reuse - # a cursor after we receive an error from the db. - if not or_ignore: - raise - return False - return True + await self.runInteraction(desc, self.simple_insert_txn, table, values) @staticmethod def simple_insert_txn( @@ -930,7 +915,7 @@ async def simple_upsert( insertion_values: Optional[Dict[str, Any]] = None, desc: str = "simple_upsert", lock: bool = True, - ) -> Optional[bool]: + ) -> bool: """ `lock` should generally be set to True (the default), but can be set @@ -951,8 +936,8 @@ async def simple_upsert( desc: description of the transaction, for logging and metrics lock: True to lock the table when doing the upsert. Returns: - Native upserts always return None. Emulated upserts return True if a - new entry was created, False if an existing one was updated. + Returns True if a row was inserted or updated (i.e. if `values` is + not empty then this always returns True) """ insertion_values = insertion_values or {} @@ -995,7 +980,7 @@ def simple_upsert_txn( values: Dict[str, Any], insertion_values: Optional[Dict[str, Any]] = None, lock: bool = True, - ) -> Optional[bool]: + ) -> bool: """ Pick the UPSERT method which works best on the platform. Either the native one (Pg9.5+, recent SQLites), or fall back to an emulated method. @@ -1008,16 +993,15 @@ def simple_upsert_txn( insertion_values: additional key/values to use only when inserting lock: True to lock the table when doing the upsert. Returns: - Native upserts always return None. Emulated upserts return True if a - new entry was created, False if an existing one was updated. + Returns True if a row was inserted or updated (i.e. if `values` is + not empty then this always returns True) """ insertion_values = insertion_values or {} if self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables: - self.simple_upsert_txn_native_upsert( + return self.simple_upsert_txn_native_upsert( txn, table, keyvalues, values, insertion_values=insertion_values ) - return None else: return self.simple_upsert_txn_emulated( txn, @@ -1045,8 +1029,8 @@ def simple_upsert_txn_emulated( insertion_values: additional key/values to use only when inserting lock: True to lock the table when doing the upsert. Returns: - Returns True if a new entry was created, False if an existing - one was updated. + Returns True if a row was inserted or updated (i.e. if `values` is + not empty then this always returns True) """ insertion_values = insertion_values or {} @@ -1086,8 +1070,7 @@ def _getwhere(key): txn.execute(sql, sqlargs) if txn.rowcount > 0: - # successfully updated at least one row. - return False + return True # We didn't find any existing rows, so insert a new one allvalues: Dict[str, Any] = {} @@ -1111,15 +1094,19 @@ def simple_upsert_txn_native_upsert( keyvalues: Dict[str, Any], values: Dict[str, Any], insertion_values: Optional[Dict[str, Any]] = None, - ) -> None: + ) -> bool: """ - Use the native UPSERT functionality in recent PostgreSQL versions. + Use the native UPSERT functionality in PostgreSQL. Args: table: The table to upsert into keyvalues: The unique key tables and their new values values: The nonunique columns and their new values insertion_values: additional key/values to use only when inserting + + Returns: + Returns True if a row was inserted or updated (i.e. if `values` is + not empty then this always returns True) """ allvalues: Dict[str, Any] = {} allvalues.update(keyvalues) @@ -1140,6 +1127,8 @@ def simple_upsert_txn_native_upsert( ) txn.execute(sql, list(allvalues.values())) + return bool(txn.rowcount) + async def simple_upsert_many( self, table: str, diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 18f07d96dcd0..3816a0ca5382 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1078,16 +1078,18 @@ async def store_device( return False try: - inserted = await self.db_pool.simple_insert( + inserted = await self.db_pool.simple_upsert( "devices", - values={ + keyvalues={ "user_id": user_id, "device_id": device_id, + }, + values={}, + insertion_values={ "display_name": initial_device_display_name, "hidden": False, }, desc="store_device", - or_ignore=True, ) if not inserted: # if the device already exists, check if it's a real device, or @@ -1099,6 +1101,7 @@ async def store_device( ) if hidden: raise StoreError(400, "The device ID is in use", Codes.FORBIDDEN) + self.device_id_exists_cache.set(key, True) return inserted except StoreError: diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index fe25638289cd..d213b2670364 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -297,17 +297,13 @@ def upsert_monthly_active_user_txn(self, txn, user_id): Args: txn (cursor): user_id (str): user to add/update - - Returns: - bool: True if a new entry was created, False if an - existing one was updated. """ # Am consciously deciding to lock the table on the basis that is ought # never be a big table and alternative approaches (batching multiple # upserts into a single txn) introduced a lot of extra complexity. # See https://github.com/matrix-org/synapse/issues/3854 for more - is_insert = self.db_pool.simple_upsert_txn( + self.db_pool.simple_upsert_txn( txn, table="monthly_active_users", keyvalues={"user_id": user_id}, @@ -322,8 +318,6 @@ def upsert_monthly_active_user_txn(self, txn, user_id): txn, self.user_last_seen_monthly_active, (user_id,) ) - return is_insert - async def populate_monthly_active_users(self, user_id): """Checks on the state of monthly active user limits and optionally add the user to the monthly active tables diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index d211c423b2ce..7728d5f1023c 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -134,16 +134,18 @@ async def set_received_txn_response( response_dict: The response, to be encoded into JSON. """ - await self.db_pool.simple_insert( + await self.db_pool.simple_upsert( table="received_transactions", - values={ + keyvalues={ "transaction_id": transaction_id, "origin": origin, + }, + values={}, + insertion_values={ "response_code": code, "response_json": db_binary_type(encode_canonical_json(response_dict)), "ts": self._clock.time_msec(), }, - or_ignore=True, desc="set_received_txn_response", ) diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index a6bfb4902a1c..9d28d69ac7d1 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -377,7 +377,7 @@ async def update_profile_in_user_dir( avatar_url = None def _update_profile_in_user_dir_txn(txn): - new_entry = self.db_pool.simple_upsert_txn( + self.db_pool.simple_upsert_txn( txn, table="user_directory", keyvalues={"user_id": user_id}, @@ -388,8 +388,7 @@ def _update_profile_in_user_dir_txn(txn): if isinstance(self.database_engine, PostgresEngine): # We weight the localpart most highly, then display name and finally # server name - if self.database_engine.can_native_upsert: - sql = """ + sql = """ INSERT INTO user_directory_search(user_id, vector) VALUES (?, setweight(to_tsvector('simple', ?), 'A') @@ -397,58 +396,15 @@ def _update_profile_in_user_dir_txn(txn): || setweight(to_tsvector('simple', COALESCE(?, '')), 'B') ) ON CONFLICT (user_id) DO UPDATE SET vector=EXCLUDED.vector """ - txn.execute( - sql, - ( - user_id, - get_localpart_from_id(user_id), - get_domain_from_id(user_id), - display_name, - ), - ) - else: - # TODO: Remove this code after we've bumped the minimum version - # of postgres to always support upserts, so we can get rid of - # `new_entry` usage - if new_entry is True: - sql = """ - INSERT INTO user_directory_search(user_id, vector) - VALUES (?, - setweight(to_tsvector('simple', ?), 'A') - || setweight(to_tsvector('simple', ?), 'D') - || setweight(to_tsvector('simple', COALESCE(?, '')), 'B') - ) - """ - txn.execute( - sql, - ( - user_id, - get_localpart_from_id(user_id), - get_domain_from_id(user_id), - display_name, - ), - ) - elif new_entry is False: - sql = """ - UPDATE user_directory_search - SET vector = setweight(to_tsvector('simple', ?), 'A') - || setweight(to_tsvector('simple', ?), 'D') - || setweight(to_tsvector('simple', COALESCE(?, '')), 'B') - WHERE user_id = ? - """ - txn.execute( - sql, - ( - get_localpart_from_id(user_id), - get_domain_from_id(user_id), - display_name, - user_id, - ), - ) - else: - raise RuntimeError( - "upsert returned None when 'can_native_upsert' is False" - ) + txn.execute( + sql, + ( + user_id, + get_localpart_from_id(user_id), + get_domain_from_id(user_id), + display_name, + ), + ) elif isinstance(self.database_engine, Sqlite3Engine): value = "%s %s" % (user_id, display_name) if display_name else user_id self.db_pool.simple_upsert_txn( From 89c4ca81bb597159e456449c548ba3f166843ddc Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 22 Jul 2021 16:05:16 +0200 Subject: [PATCH 15/61] Add `creation_ts` to list users admin API (#10448) Signed-off-by: Dirk Klimpel dirk@klimpel.org --- changelog.d/10448.feature | 1 + docs/admin_api/user_admin_api.md | 10 +++-- synapse/rest/admin/users.py | 2 + synapse/storage/databases/main/__init__.py | 19 ++++----- synapse/storage/databases/main/stats.py | 2 + tests/rest/admin/test_user.py | 45 +++++++++++++--------- 6 files changed, 46 insertions(+), 33 deletions(-) create mode 100644 changelog.d/10448.feature diff --git a/changelog.d/10448.feature b/changelog.d/10448.feature new file mode 100644 index 000000000000..f6579e0ca85b --- /dev/null +++ b/changelog.d/10448.feature @@ -0,0 +1 @@ +Add `creation_ts` to list users admin API. \ No newline at end of file diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 4a65d0c3bc96..160899754ede 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -144,7 +144,8 @@ A response body like the following is returned: "deactivated": 0, "shadow_banned": 0, "displayname": "", - "avatar_url": null + "avatar_url": null, + "creation_ts": 1560432668000 }, { "name": "", "is_guest": 0, @@ -153,7 +154,8 @@ A response body like the following is returned: "deactivated": 0, "shadow_banned": 0, "displayname": "", - "avatar_url": "" + "avatar_url": "", + "creation_ts": 1561550621000 } ], "next_token": "100", @@ -197,11 +199,12 @@ The following parameters should be set in the URL: - `shadow_banned` - Users are ordered by `shadow_banned` status. - `displayname` - Users are ordered alphabetically by `displayname`. - `avatar_url` - Users are ordered alphabetically by avatar URL. + - `creation_ts` - Users are ordered by when the users was created in ms. - `dir` - Direction of media order. Either `f` for forwards or `b` for backwards. Setting this value to `b` will reverse the above sort order. Defaults to `f`. -Caution. The database only has indexes on the columns `name` and `created_ts`. +Caution. The database only has indexes on the columns `name` and `creation_ts`. This means that if a different sort order is used (`is_guest`, `admin`, `user_type`, `deactivated`, `shadow_banned`, `avatar_url` or `displayname`), this can cause a large load on the database, especially for large environments. @@ -222,6 +225,7 @@ The following fields are returned in the JSON response body: - `shadow_banned` - bool - Status if that user has been marked as shadow banned. - `displayname` - string - The user's display name if they have set one. - `avatar_url` - string - The user's avatar URL if they have set one. + - `creation_ts` - integer - The user's creation timestamp in ms. - `next_token`: string representing a positive integer - Indication for pagination. See above. - `total` - integer - Total number of media. diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 6736536172b5..eef76ab18a94 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -62,6 +62,7 @@ class UsersRestServletV2(RestServlet): The parameter `name` can be used to filter by user id or display name. The parameter `guests` can be used to exclude guest users. The parameter `deactivated` can be used to include deactivated users. + The parameter `order_by` can be used to order the result. """ def __init__(self, hs: "HomeServer"): @@ -108,6 +109,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: UserSortOrder.USER_TYPE.value, UserSortOrder.AVATAR_URL.value, UserSortOrder.SHADOW_BANNED.value, + UserSortOrder.CREATION_TS.value, ), ) diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index bacfbce4afde..8d9f07111db5 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -297,27 +297,22 @@ def get_users_paginate_txn(txn): where_clause = "WHERE " + " AND ".join(filters) if len(filters) > 0 else "" - sql_base = """ + sql_base = f""" FROM users as u LEFT JOIN profiles AS p ON u.name = '@' || p.user_id || ':' || ? - {} - """.format( - where_clause - ) + {where_clause} + """ sql = "SELECT COUNT(*) as total_users " + sql_base txn.execute(sql, args) count = txn.fetchone()[0] - sql = """ - SELECT name, user_type, is_guest, admin, deactivated, shadow_banned, displayname, avatar_url + sql = f""" + SELECT name, user_type, is_guest, admin, deactivated, shadow_banned, + displayname, avatar_url, creation_ts * 1000 as creation_ts {sql_base} ORDER BY {order_by_column} {order}, u.name ASC LIMIT ? OFFSET ? - """.format( - sql_base=sql_base, - order_by_column=order_by_column, - order=order, - ) + """ args += [limit, start] txn.execute(sql, args) users = self.db_pool.cursor_to_dict(txn) diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index 889e0d362539..42edbcc05784 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -75,6 +75,7 @@ class UserSortOrder(Enum): USER_TYPE = ordered alphabetically by `user_type` AVATAR_URL = ordered alphabetically by `avatar_url` SHADOW_BANNED = ordered by `shadow_banned` + CREATION_TS = ordered by `creation_ts` """ MEDIA_LENGTH = "media_length" @@ -88,6 +89,7 @@ class UserSortOrder(Enum): USER_TYPE = "user_type" AVATAR_URL = "avatar_url" SHADOW_BANNED = "shadow_banned" + CREATION_TS = "creation_ts" class StatsStore(StateDeltasStore): diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 4fccce34fdd5..42f50c092101 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -473,7 +473,7 @@ def test_no_auth(self): """ channel = self.make_request("GET", self.url, b"{}") - self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) def test_requester_is_no_admin(self): @@ -485,7 +485,7 @@ def test_requester_is_no_admin(self): channel = self.make_request("GET", self.url, access_token=other_user_token) - self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(403, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_all_users(self): @@ -497,11 +497,11 @@ def test_all_users(self): channel = self.make_request( "GET", self.url + "?deactivated=true", - b"{}", + {}, access_token=self.admin_user_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(3, len(channel.json_body["users"])) self.assertEqual(3, channel.json_body["total"]) @@ -532,7 +532,7 @@ def _search_test( ) channel = self.make_request( "GET", - url.encode("ascii"), + url, access_token=self.admin_user_tok, ) self.assertEqual(expected_http_code, channel.code, msg=channel.json_body) @@ -598,7 +598,7 @@ def test_invalid_parameter(self): access_token=self.admin_user_tok, ) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # negative from @@ -608,7 +608,7 @@ def test_invalid_parameter(self): access_token=self.admin_user_tok, ) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) # invalid guests @@ -618,7 +618,7 @@ def test_invalid_parameter(self): access_token=self.admin_user_tok, ) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) # invalid deactivated @@ -628,7 +628,7 @@ def test_invalid_parameter(self): access_token=self.admin_user_tok, ) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) # unkown order_by @@ -648,7 +648,7 @@ def test_invalid_parameter(self): access_token=self.admin_user_tok, ) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) def test_limit(self): @@ -666,7 +666,7 @@ def test_limit(self): access_token=self.admin_user_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(len(channel.json_body["users"]), 5) self.assertEqual(channel.json_body["next_token"], "5") @@ -687,7 +687,7 @@ def test_from(self): access_token=self.admin_user_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(len(channel.json_body["users"]), 15) self.assertNotIn("next_token", channel.json_body) @@ -708,7 +708,7 @@ def test_limit_and_from(self): access_token=self.admin_user_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(channel.json_body["next_token"], "15") self.assertEqual(len(channel.json_body["users"]), 10) @@ -731,7 +731,7 @@ def test_next_token(self): access_token=self.admin_user_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(len(channel.json_body["users"]), number_users) self.assertNotIn("next_token", channel.json_body) @@ -744,7 +744,7 @@ def test_next_token(self): access_token=self.admin_user_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(len(channel.json_body["users"]), number_users) self.assertNotIn("next_token", channel.json_body) @@ -757,7 +757,7 @@ def test_next_token(self): access_token=self.admin_user_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(len(channel.json_body["users"]), 19) self.assertEqual(channel.json_body["next_token"], "19") @@ -771,7 +771,7 @@ def test_next_token(self): access_token=self.admin_user_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_users) self.assertEqual(len(channel.json_body["users"]), 1) self.assertNotIn("next_token", channel.json_body) @@ -781,7 +781,10 @@ def test_order_by(self): Testing order list with parameter `order_by` """ + # make sure that the users do not have the same timestamps + self.reactor.advance(10) user1 = self.register_user("user1", "pass1", admin=False, displayname="Name Z") + self.reactor.advance(10) user2 = self.register_user("user2", "pass2", admin=False, displayname="Name Y") # Modify user @@ -841,6 +844,11 @@ def test_order_by(self): self._order_test([self.admin_user, user2, user1], "avatar_url", "f") self._order_test([user1, user2, self.admin_user], "avatar_url", "b") + # order by creation_ts + self._order_test([self.admin_user, user1, user2], "creation_ts") + self._order_test([self.admin_user, user1, user2], "creation_ts", "f") + self._order_test([user2, user1, self.admin_user], "creation_ts", "b") + def _order_test( self, expected_user_list: List[str], @@ -863,7 +871,7 @@ def _order_test( url += "dir=%s" % (dir,) channel = self.make_request( "GET", - url.encode("ascii"), + url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) @@ -887,6 +895,7 @@ def _check_fields(self, content: JsonDict): self.assertIn("shadow_banned", u) self.assertIn("displayname", u) self.assertIn("avatar_url", u) + self.assertIn("creation_ts", u) def _create_users(self, number_users: int): """ From cd5fcd2731182aa48aa7c0a44c7e547681021296 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 22 Jul 2021 15:19:30 -0500 Subject: [PATCH 16/61] Disable msc2716 until Complement update is merged (#10463) --- changelog.d/10463.misc | 1 + scripts-dev/complement.sh | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10463.misc diff --git a/changelog.d/10463.misc b/changelog.d/10463.misc new file mode 100644 index 000000000000..d7b4d2222e10 --- /dev/null +++ b/changelog.d/10463.misc @@ -0,0 +1 @@ +Disable `msc2716` Complement tests until Complement updates are merged. diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index aca32edc176e..4df224be6719 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -65,4 +65,4 @@ if [[ -n "$1" ]]; then fi # Run the tests! -go test -v -tags synapse_blacklist,msc2946,msc3083,msc2716,msc2403 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests +go test -v -tags synapse_blacklist,msc2946,msc3083,msc2403 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests From f22252d4f9383ebb9134d6592d74da83d537f79a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 26 Jul 2021 11:36:01 +0100 Subject: [PATCH 17/61] Enable docker image caching for the deb build (#10431) --- .github/workflows/release-artifacts.yml | 39 ++++++++++++++++++++++--- changelog.d/10431.misc | 1 + scripts-dev/build_debian_packages | 38 ++++++++++++++++++------ 3 files changed, 65 insertions(+), 13 deletions(-) create mode 100644 changelog.d/10431.misc diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 0beb418a07c3..eb294f161939 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -48,12 +48,43 @@ jobs: distro: ${{ fromJson(needs.get-distros.outputs.distros) }} steps: - - uses: actions/checkout@v2 + - name: Checkout + uses: actions/checkout@v2 with: path: src - - uses: actions/setup-python@v2 - - run: ./src/scripts-dev/build_debian_packages "${{ matrix.distro }}" - - uses: actions/upload-artifact@v2 + + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v1 + with: + install: true + + - name: Set up docker layer caching + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx- + + - name: Set up python + uses: actions/setup-python@v2 + + - name: Build the packages + # see https://github.com/docker/build-push-action/issues/252 + # for the cache magic here + run: | + ./src/scripts-dev/build_debian_packages \ + --docker-build-arg=--cache-from=type=local,src=/tmp/.buildx-cache \ + --docker-build-arg=--cache-to=type=local,mode=max,dest=/tmp/.buildx-cache-new \ + --docker-build-arg=--progress=plain \ + --docker-build-arg=--load \ + "${{ matrix.distro }}" + rm -rf /tmp/.buildx-cache + mv /tmp/.buildx-cache-new /tmp/.buildx-cache + + - name: Upload debs as artifacts + uses: actions/upload-artifact@v2 with: name: debs path: debs/* diff --git a/changelog.d/10431.misc b/changelog.d/10431.misc new file mode 100644 index 000000000000..34b9b49da680 --- /dev/null +++ b/changelog.d/10431.misc @@ -0,0 +1 @@ +Use a docker image cache for the prerequisites for the debian package build. diff --git a/scripts-dev/build_debian_packages b/scripts-dev/build_debian_packages index e25c5bb2650d..0ed1c679fd94 100755 --- a/scripts-dev/build_debian_packages +++ b/scripts-dev/build_debian_packages @@ -17,6 +17,7 @@ import subprocess import sys import threading from concurrent.futures import ThreadPoolExecutor +from typing import Optional, Sequence DISTS = ( "debian:buster", @@ -39,8 +40,11 @@ projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) class Builder(object): - def __init__(self, redirect_stdout=False): + def __init__( + self, redirect_stdout=False, docker_build_args: Optional[Sequence[str]] = None + ): self.redirect_stdout = redirect_stdout + self._docker_build_args = tuple(docker_build_args or ()) self.active_containers = set() self._lock = threading.Lock() self._failed = False @@ -79,8 +83,8 @@ class Builder(object): stdout = None # first build a docker image for the build environment - subprocess.check_call( - [ + build_args = ( + ( "docker", "build", "--tag", @@ -89,8 +93,13 @@ class Builder(object): "distro=" + dist, "-f", "docker/Dockerfile-dhvirtualenv", - "docker", - ], + ) + + self._docker_build_args + + ("docker",) + ) + + subprocess.check_call( + build_args, stdout=stdout, stderr=subprocess.STDOUT, cwd=projdir, @@ -147,9 +156,7 @@ class Builder(object): self.active_containers.remove(c) -def run_builds(dists, jobs=1, skip_tests=False): - builder = Builder(redirect_stdout=(jobs > 1)) - +def run_builds(builder, dists, jobs=1, skip_tests=False): def sig(signum, _frame): print("Caught SIGINT") builder.kill_containers() @@ -180,6 +187,11 @@ if __name__ == "__main__": action="store_true", help="skip running tests after building", ) + parser.add_argument( + "--docker-build-arg", + action="append", + help="specify an argument to pass to docker build", + ) parser.add_argument( "--show-dists-json", action="store_true", @@ -195,4 +207,12 @@ if __name__ == "__main__": if args.show_dists_json: print(json.dumps(DISTS)) else: - run_builds(dists=args.dist, jobs=args.jobs, skip_tests=args.no_check) + builder = Builder( + redirect_stdout=(args.jobs > 1), docker_build_args=args.docker_build_arg + ) + run_builds( + builder, + dists=args.dist, + jobs=args.jobs, + skip_tests=args.no_check, + ) From 4fb92d93eae3c3519a9a47ea7fdef2d492014f2f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 26 Jul 2021 11:53:09 -0400 Subject: [PATCH 18/61] Add type hints to synapse.federation.transport.client. (#10408) --- changelog.d/10408.misc | 1 + synapse/federation/transport/client.py | 499 +++++++++++++++---------- 2 files changed, 299 insertions(+), 201 deletions(-) create mode 100644 changelog.d/10408.misc diff --git a/changelog.d/10408.misc b/changelog.d/10408.misc new file mode 100644 index 000000000000..abccd210a924 --- /dev/null +++ b/changelog.d/10408.misc @@ -0,0 +1 @@ +Add type hints to `synapse.federation.transport.client` module. diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 98b1bf77fdea..e73bdb52b33c 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -15,7 +15,7 @@ import logging import urllib -from typing import Any, Dict, List, Optional +from typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, Tuple, Union import attr import ijson @@ -29,6 +29,7 @@ FEDERATION_V2_PREFIX, ) from synapse.events import EventBase, make_event_from_dict +from synapse.federation.units import Transaction from synapse.http.matrixfederationclient import ByteParser from synapse.logging.utils import log_function from synapse.types import JsonDict @@ -49,23 +50,25 @@ def __init__(self, hs): self.client = hs.get_federation_http_client() @log_function - def get_room_state_ids(self, destination, room_id, event_id): + async def get_room_state_ids( + self, destination: str, room_id: str, event_id: str + ) -> JsonDict: """Requests all state for a given room from the given server at the given event. Returns the state's event_id's Args: - destination (str): The host name of the remote homeserver we want + destination: The host name of the remote homeserver we want to get the state from. - context (str): The name of the context we want the state of - event_id (str): The event we want the context at. + context: The name of the context we want the state of + event_id: The event we want the context at. Returns: - Awaitable: Results in a dict received from the remote homeserver. + Results in a dict received from the remote homeserver. """ logger.debug("get_room_state_ids dest=%s, room=%s", destination, room_id) path = _create_v1_path("/state_ids/%s", room_id) - return self.client.get_json( + return await self.client.get_json( destination, path=path, args={"event_id": event_id}, @@ -73,39 +76,43 @@ def get_room_state_ids(self, destination, room_id, event_id): ) @log_function - def get_event(self, destination, event_id, timeout=None): + async def get_event( + self, destination: str, event_id: str, timeout: Optional[int] = None + ) -> JsonDict: """Requests the pdu with give id and origin from the given server. Args: - destination (str): The host name of the remote homeserver we want + destination: The host name of the remote homeserver we want to get the state from. - event_id (str): The id of the event being requested. - timeout (int): How long to try (in ms) the destination for before + event_id: The id of the event being requested. + timeout: How long to try (in ms) the destination for before giving up. None indicates no timeout. Returns: - Awaitable: Results in a dict received from the remote homeserver. + Results in a dict received from the remote homeserver. """ logger.debug("get_pdu dest=%s, event_id=%s", destination, event_id) path = _create_v1_path("/event/%s", event_id) - return self.client.get_json( + return await self.client.get_json( destination, path=path, timeout=timeout, try_trailing_slash_on_400=True ) @log_function - def backfill(self, destination, room_id, event_tuples, limit): + async def backfill( + self, destination: str, room_id: str, event_tuples: Iterable[str], limit: int + ) -> Optional[JsonDict]: """Requests `limit` previous PDUs in a given context before list of PDUs. Args: - dest (str) - room_id (str) - event_tuples (list) - limit (int) + destination + room_id + event_tuples + limit Returns: - Awaitable: Results in a dict received from the remote homeserver. + Results in a dict received from the remote homeserver. """ logger.debug( "backfill dest=%s, room_id=%s, event_tuples=%r, limit=%s", @@ -117,18 +124,22 @@ def backfill(self, destination, room_id, event_tuples, limit): if not event_tuples: # TODO: raise? - return + return None path = _create_v1_path("/backfill/%s", room_id) args = {"v": event_tuples, "limit": [str(limit)]} - return self.client.get_json( + return await self.client.get_json( destination, path=path, args=args, try_trailing_slash_on_400=True ) @log_function - async def send_transaction(self, transaction, json_data_callback=None): + async def send_transaction( + self, + transaction: Transaction, + json_data_callback: Optional[Callable[[], JsonDict]] = None, + ) -> JsonDict: """Sends the given Transaction to its destination Args: @@ -149,21 +160,21 @@ async def send_transaction(self, transaction, json_data_callback=None): """ logger.debug( "send_data dest=%s, txid=%s", - transaction.destination, - transaction.transaction_id, + transaction.destination, # type: ignore + transaction.transaction_id, # type: ignore ) - if transaction.destination == self.server_name: + if transaction.destination == self.server_name: # type: ignore raise RuntimeError("Transport layer cannot send to itself!") # FIXME: This is only used by the tests. The actual json sent is # generated by the json_data_callback. json_data = transaction.get_dict() - path = _create_v1_path("/send/%s", transaction.transaction_id) + path = _create_v1_path("/send/%s", transaction.transaction_id) # type: ignore - response = await self.client.put_json( - transaction.destination, + return await self.client.put_json( + transaction.destination, # type: ignore path=path, data=json_data, json_data_callback=json_data_callback, @@ -172,8 +183,6 @@ async def send_transaction(self, transaction, json_data_callback=None): try_trailing_slash_on_400=True, ) - return response - @log_function async def make_query( self, destination, query_type, args, retry_on_dns_fail, ignore_backoff=False @@ -193,8 +202,13 @@ async def make_query( @log_function async def make_membership_event( - self, destination, room_id, user_id, membership, params - ): + self, + destination: str, + room_id: str, + user_id: str, + membership: str, + params: Optional[Mapping[str, Union[str, Iterable[str]]]], + ) -> JsonDict: """Asks a remote server to build and sign us a membership event Note that this does not append any events to any graphs. @@ -240,7 +254,7 @@ async def make_membership_event( ignore_backoff = True retry_on_dns_fail = True - content = await self.client.get_json( + return await self.client.get_json( destination=destination, path=path, args=params, @@ -249,20 +263,18 @@ async def make_membership_event( ignore_backoff=ignore_backoff, ) - return content - @log_function async def send_join_v1( self, - room_version, - destination, - room_id, - event_id, - content, + room_version: RoomVersion, + destination: str, + room_id: str, + event_id: str, + content: JsonDict, ) -> "SendJoinResponse": path = _create_v1_path("/send_join/%s/%s", room_id, event_id) - response = await self.client.put_json( + return await self.client.put_json( destination=destination, path=path, data=content, @@ -270,15 +282,18 @@ async def send_join_v1( max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN, ) - return response - @log_function async def send_join_v2( - self, room_version, destination, room_id, event_id, content + self, + room_version: RoomVersion, + destination: str, + room_id: str, + event_id: str, + content: JsonDict, ) -> "SendJoinResponse": path = _create_v2_path("/send_join/%s/%s", room_id, event_id) - response = await self.client.put_json( + return await self.client.put_json( destination=destination, path=path, data=content, @@ -286,13 +301,13 @@ async def send_join_v2( max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN, ) - return response - @log_function - async def send_leave_v1(self, destination, room_id, event_id, content): + async def send_leave_v1( + self, destination: str, room_id: str, event_id: str, content: JsonDict + ) -> Tuple[int, JsonDict]: path = _create_v1_path("/send_leave/%s/%s", room_id, event_id) - response = await self.client.put_json( + return await self.client.put_json( destination=destination, path=path, data=content, @@ -303,13 +318,13 @@ async def send_leave_v1(self, destination, room_id, event_id, content): ignore_backoff=True, ) - return response - @log_function - async def send_leave_v2(self, destination, room_id, event_id, content): + async def send_leave_v2( + self, destination: str, room_id: str, event_id: str, content: JsonDict + ) -> JsonDict: path = _create_v2_path("/send_leave/%s/%s", room_id, event_id) - response = await self.client.put_json( + return await self.client.put_json( destination=destination, path=path, data=content, @@ -320,8 +335,6 @@ async def send_leave_v2(self, destination, room_id, event_id, content): ignore_backoff=True, ) - return response - @log_function async def send_knock_v1( self, @@ -357,25 +370,25 @@ async def send_knock_v1( ) @log_function - async def send_invite_v1(self, destination, room_id, event_id, content): + async def send_invite_v1( + self, destination: str, room_id: str, event_id: str, content: JsonDict + ) -> Tuple[int, JsonDict]: path = _create_v1_path("/invite/%s/%s", room_id, event_id) - response = await self.client.put_json( + return await self.client.put_json( destination=destination, path=path, data=content, ignore_backoff=True ) - return response - @log_function - async def send_invite_v2(self, destination, room_id, event_id, content): + async def send_invite_v2( + self, destination: str, room_id: str, event_id: str, content: JsonDict + ) -> JsonDict: path = _create_v2_path("/invite/%s/%s", room_id, event_id) - response = await self.client.put_json( + return await self.client.put_json( destination=destination, path=path, data=content, ignore_backoff=True ) - return response - @log_function async def get_public_rooms( self, @@ -385,7 +398,7 @@ async def get_public_rooms( search_filter: Optional[Dict] = None, include_all_networks: bool = False, third_party_instance_id: Optional[str] = None, - ): + ) -> JsonDict: """Get the list of public rooms from a remote homeserver See synapse.federation.federation_client.FederationClient.get_public_rooms for @@ -450,25 +463,27 @@ async def get_public_rooms( return response @log_function - async def exchange_third_party_invite(self, destination, room_id, event_dict): + async def exchange_third_party_invite( + self, destination: str, room_id: str, event_dict: JsonDict + ) -> JsonDict: path = _create_v1_path("/exchange_third_party_invite/%s", room_id) - response = await self.client.put_json( + return await self.client.put_json( destination=destination, path=path, data=event_dict ) - return response - @log_function - async def get_event_auth(self, destination, room_id, event_id): + async def get_event_auth( + self, destination: str, room_id: str, event_id: str + ) -> JsonDict: path = _create_v1_path("/event_auth/%s/%s", room_id, event_id) - content = await self.client.get_json(destination=destination, path=path) - - return content + return await self.client.get_json(destination=destination, path=path) @log_function - async def query_client_keys(self, destination, query_content, timeout): + async def query_client_keys( + self, destination: str, query_content: JsonDict, timeout: int + ) -> JsonDict: """Query the device keys for a list of user ids hosted on a remote server. @@ -496,20 +511,21 @@ async def query_client_keys(self, destination, query_content, timeout): } Args: - destination(str): The server to query. - query_content(dict): The user ids to query. + destination: The server to query. + query_content: The user ids to query. Returns: A dict containing device and cross-signing keys. """ path = _create_v1_path("/user/keys/query") - content = await self.client.post_json( + return await self.client.post_json( destination=destination, path=path, data=query_content, timeout=timeout ) - return content @log_function - async def query_user_devices(self, destination, user_id, timeout): + async def query_user_devices( + self, destination: str, user_id: str, timeout: int + ) -> JsonDict: """Query the devices for a user id hosted on a remote server. Response: @@ -535,20 +551,21 @@ async def query_user_devices(self, destination, user_id, timeout): } Args: - destination(str): The server to query. - query_content(dict): The user ids to query. + destination: The server to query. + query_content: The user ids to query. Returns: A dict containing device and cross-signing keys. """ path = _create_v1_path("/user/devices/%s", user_id) - content = await self.client.get_json( + return await self.client.get_json( destination=destination, path=path, timeout=timeout ) - return content @log_function - async def claim_client_keys(self, destination, query_content, timeout): + async def claim_client_keys( + self, destination: str, query_content: JsonDict, timeout: int + ) -> JsonDict: """Claim one-time keys for a list of devices hosted on a remote server. Request: @@ -572,33 +589,32 @@ async def claim_client_keys(self, destination, query_content, timeout): } Args: - destination(str): The server to query. - query_content(dict): The user ids to query. + destination: The server to query. + query_content: The user ids to query. Returns: A dict containing the one-time keys. """ path = _create_v1_path("/user/keys/claim") - content = await self.client.post_json( + return await self.client.post_json( destination=destination, path=path, data=query_content, timeout=timeout ) - return content @log_function async def get_missing_events( self, - destination, - room_id, - earliest_events, - latest_events, - limit, - min_depth, - timeout, - ): + destination: str, + room_id: str, + earliest_events: Iterable[str], + latest_events: Iterable[str], + limit: int, + min_depth: int, + timeout: int, + ) -> JsonDict: path = _create_v1_path("/get_missing_events/%s", room_id) - content = await self.client.post_json( + return await self.client.post_json( destination=destination, path=path, data={ @@ -610,14 +626,14 @@ async def get_missing_events( timeout=timeout, ) - return content - @log_function - def get_group_profile(self, destination, group_id, requester_user_id): + async def get_group_profile( + self, destination: str, group_id: str, requester_user_id: str + ) -> JsonDict: """Get a group profile""" path = _create_v1_path("/groups/%s/profile", group_id) - return self.client.get_json( + return await self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, @@ -625,14 +641,16 @@ def get_group_profile(self, destination, group_id, requester_user_id): ) @log_function - def update_group_profile(self, destination, group_id, requester_user_id, content): + async def update_group_profile( + self, destination: str, group_id: str, requester_user_id: str, content: JsonDict + ) -> JsonDict: """Update a remote group profile Args: - destination (str) - group_id (str) - requester_user_id (str) - content (dict): The new profile of the group + destination + group_id + requester_user_id + content: The new profile of the group """ path = _create_v1_path("/groups/%s/profile", group_id) @@ -645,11 +663,13 @@ def update_group_profile(self, destination, group_id, requester_user_id, content ) @log_function - def get_group_summary(self, destination, group_id, requester_user_id): + async def get_group_summary( + self, destination: str, group_id: str, requester_user_id: str + ) -> JsonDict: """Get a group summary""" path = _create_v1_path("/groups/%s/summary", group_id) - return self.client.get_json( + return await self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, @@ -657,24 +677,31 @@ def get_group_summary(self, destination, group_id, requester_user_id): ) @log_function - def get_rooms_in_group(self, destination, group_id, requester_user_id): + async def get_rooms_in_group( + self, destination: str, group_id: str, requester_user_id: str + ) -> JsonDict: """Get all rooms in a group""" path = _create_v1_path("/groups/%s/rooms", group_id) - return self.client.get_json( + return await self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) - def add_room_to_group( - self, destination, group_id, requester_user_id, room_id, content - ): + async def add_room_to_group( + self, + destination: str, + group_id: str, + requester_user_id: str, + room_id: str, + content: JsonDict, + ) -> JsonDict: """Add a room to a group""" path = _create_v1_path("/groups/%s/room/%s", group_id, room_id) - return self.client.post_json( + return await self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, @@ -682,15 +709,21 @@ def add_room_to_group( ignore_backoff=True, ) - def update_room_in_group( - self, destination, group_id, requester_user_id, room_id, config_key, content - ): + async def update_room_in_group( + self, + destination: str, + group_id: str, + requester_user_id: str, + room_id: str, + config_key: str, + content: JsonDict, + ) -> JsonDict: """Update room in group""" path = _create_v1_path( "/groups/%s/room/%s/config/%s", group_id, room_id, config_key ) - return self.client.post_json( + return await self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, @@ -698,11 +731,13 @@ def update_room_in_group( ignore_backoff=True, ) - def remove_room_from_group(self, destination, group_id, requester_user_id, room_id): + async def remove_room_from_group( + self, destination: str, group_id: str, requester_user_id: str, room_id: str + ) -> JsonDict: """Remove a room from a group""" path = _create_v1_path("/groups/%s/room/%s", group_id, room_id) - return self.client.delete_json( + return await self.client.delete_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, @@ -710,11 +745,13 @@ def remove_room_from_group(self, destination, group_id, requester_user_id, room_ ) @log_function - def get_users_in_group(self, destination, group_id, requester_user_id): + async def get_users_in_group( + self, destination: str, group_id: str, requester_user_id: str + ) -> JsonDict: """Get users in a group""" path = _create_v1_path("/groups/%s/users", group_id) - return self.client.get_json( + return await self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, @@ -722,11 +759,13 @@ def get_users_in_group(self, destination, group_id, requester_user_id): ) @log_function - def get_invited_users_in_group(self, destination, group_id, requester_user_id): + async def get_invited_users_in_group( + self, destination: str, group_id: str, requester_user_id: str + ) -> JsonDict: """Get users that have been invited to a group""" path = _create_v1_path("/groups/%s/invited_users", group_id) - return self.client.get_json( + return await self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, @@ -734,16 +773,20 @@ def get_invited_users_in_group(self, destination, group_id, requester_user_id): ) @log_function - def accept_group_invite(self, destination, group_id, user_id, content): + async def accept_group_invite( + self, destination: str, group_id: str, user_id: str, content: JsonDict + ) -> JsonDict: """Accept a group invite""" path = _create_v1_path("/groups/%s/users/%s/accept_invite", group_id, user_id) - return self.client.post_json( + return await self.client.post_json( destination=destination, path=path, data=content, ignore_backoff=True ) @log_function - def join_group(self, destination, group_id, user_id, content): + def join_group( + self, destination: str, group_id: str, user_id: str, content: JsonDict + ) -> JsonDict: """Attempts to join a group""" path = _create_v1_path("/groups/%s/users/%s/join", group_id, user_id) @@ -752,13 +795,18 @@ def join_group(self, destination, group_id, user_id, content): ) @log_function - def invite_to_group( - self, destination, group_id, user_id, requester_user_id, content - ): + async def invite_to_group( + self, + destination: str, + group_id: str, + user_id: str, + requester_user_id: str, + content: JsonDict, + ) -> JsonDict: """Invite a user to a group""" path = _create_v1_path("/groups/%s/users/%s/invite", group_id, user_id) - return self.client.post_json( + return await self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, @@ -767,25 +815,32 @@ def invite_to_group( ) @log_function - def invite_to_group_notification(self, destination, group_id, user_id, content): + async def invite_to_group_notification( + self, destination: str, group_id: str, user_id: str, content: JsonDict + ) -> JsonDict: """Sent by group server to inform a user's server that they have been invited. """ path = _create_v1_path("/groups/local/%s/users/%s/invite", group_id, user_id) - return self.client.post_json( + return await self.client.post_json( destination=destination, path=path, data=content, ignore_backoff=True ) @log_function - def remove_user_from_group( - self, destination, group_id, requester_user_id, user_id, content - ): + async def remove_user_from_group( + self, + destination: str, + group_id: str, + requester_user_id: str, + user_id: str, + content: JsonDict, + ) -> JsonDict: """Remove a user from a group""" path = _create_v1_path("/groups/%s/users/%s/remove", group_id, user_id) - return self.client.post_json( + return await self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, @@ -794,35 +849,43 @@ def remove_user_from_group( ) @log_function - def remove_user_from_group_notification( - self, destination, group_id, user_id, content - ): + async def remove_user_from_group_notification( + self, destination: str, group_id: str, user_id: str, content: JsonDict + ) -> JsonDict: """Sent by group server to inform a user's server that they have been kicked from the group. """ path = _create_v1_path("/groups/local/%s/users/%s/remove", group_id, user_id) - return self.client.post_json( + return await self.client.post_json( destination=destination, path=path, data=content, ignore_backoff=True ) @log_function - def renew_group_attestation(self, destination, group_id, user_id, content): + async def renew_group_attestation( + self, destination: str, group_id: str, user_id: str, content: JsonDict + ) -> JsonDict: """Sent by either a group server or a user's server to periodically update the attestations """ path = _create_v1_path("/groups/%s/renew_attestation/%s", group_id, user_id) - return self.client.post_json( + return await self.client.post_json( destination=destination, path=path, data=content, ignore_backoff=True ) @log_function - def update_group_summary_room( - self, destination, group_id, user_id, room_id, category_id, content - ): + async def update_group_summary_room( + self, + destination: str, + group_id: str, + user_id: str, + room_id: str, + category_id: str, + content: JsonDict, + ) -> JsonDict: """Update a room entry in a group summary""" if category_id: path = _create_v1_path( @@ -834,7 +897,7 @@ def update_group_summary_room( else: path = _create_v1_path("/groups/%s/summary/rooms/%s", group_id, room_id) - return self.client.post_json( + return await self.client.post_json( destination=destination, path=path, args={"requester_user_id": user_id}, @@ -843,9 +906,14 @@ def update_group_summary_room( ) @log_function - def delete_group_summary_room( - self, destination, group_id, user_id, room_id, category_id - ): + async def delete_group_summary_room( + self, + destination: str, + group_id: str, + user_id: str, + room_id: str, + category_id: str, + ) -> JsonDict: """Delete a room entry in a group summary""" if category_id: path = _create_v1_path( @@ -857,7 +925,7 @@ def delete_group_summary_room( else: path = _create_v1_path("/groups/%s/summary/rooms/%s", group_id, room_id) - return self.client.delete_json( + return await self.client.delete_json( destination=destination, path=path, args={"requester_user_id": user_id}, @@ -865,11 +933,13 @@ def delete_group_summary_room( ) @log_function - def get_group_categories(self, destination, group_id, requester_user_id): + async def get_group_categories( + self, destination: str, group_id: str, requester_user_id: str + ) -> JsonDict: """Get all categories in a group""" path = _create_v1_path("/groups/%s/categories", group_id) - return self.client.get_json( + return await self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, @@ -877,11 +947,13 @@ def get_group_categories(self, destination, group_id, requester_user_id): ) @log_function - def get_group_category(self, destination, group_id, requester_user_id, category_id): + async def get_group_category( + self, destination: str, group_id: str, requester_user_id: str, category_id: str + ) -> JsonDict: """Get category info in a group""" path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id) - return self.client.get_json( + return await self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, @@ -889,13 +961,18 @@ def get_group_category(self, destination, group_id, requester_user_id, category_ ) @log_function - def update_group_category( - self, destination, group_id, requester_user_id, category_id, content - ): + async def update_group_category( + self, + destination: str, + group_id: str, + requester_user_id: str, + category_id: str, + content: JsonDict, + ) -> JsonDict: """Update a category in a group""" path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id) - return self.client.post_json( + return await self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, @@ -904,13 +981,13 @@ def update_group_category( ) @log_function - def delete_group_category( - self, destination, group_id, requester_user_id, category_id - ): + async def delete_group_category( + self, destination: str, group_id: str, requester_user_id: str, category_id: str + ) -> JsonDict: """Delete a category in a group""" path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id) - return self.client.delete_json( + return await self.client.delete_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, @@ -918,11 +995,13 @@ def delete_group_category( ) @log_function - def get_group_roles(self, destination, group_id, requester_user_id): + async def get_group_roles( + self, destination: str, group_id: str, requester_user_id: str + ) -> JsonDict: """Get all roles in a group""" path = _create_v1_path("/groups/%s/roles", group_id) - return self.client.get_json( + return await self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, @@ -930,11 +1009,13 @@ def get_group_roles(self, destination, group_id, requester_user_id): ) @log_function - def get_group_role(self, destination, group_id, requester_user_id, role_id): + async def get_group_role( + self, destination: str, group_id: str, requester_user_id: str, role_id: str + ) -> JsonDict: """Get a roles info""" path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id) - return self.client.get_json( + return await self.client.get_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, @@ -942,13 +1023,18 @@ def get_group_role(self, destination, group_id, requester_user_id, role_id): ) @log_function - def update_group_role( - self, destination, group_id, requester_user_id, role_id, content - ): + async def update_group_role( + self, + destination: str, + group_id: str, + requester_user_id: str, + role_id: str, + content: JsonDict, + ) -> JsonDict: """Update a role in a group""" path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id) - return self.client.post_json( + return await self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, @@ -957,11 +1043,13 @@ def update_group_role( ) @log_function - def delete_group_role(self, destination, group_id, requester_user_id, role_id): + async def delete_group_role( + self, destination: str, group_id: str, requester_user_id: str, role_id: str + ) -> JsonDict: """Delete a role in a group""" path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id) - return self.client.delete_json( + return await self.client.delete_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, @@ -969,9 +1057,15 @@ def delete_group_role(self, destination, group_id, requester_user_id, role_id): ) @log_function - def update_group_summary_user( - self, destination, group_id, requester_user_id, user_id, role_id, content - ): + async def update_group_summary_user( + self, + destination: str, + group_id: str, + requester_user_id: str, + user_id: str, + role_id: str, + content: JsonDict, + ) -> JsonDict: """Update a users entry in a group""" if role_id: path = _create_v1_path( @@ -980,7 +1074,7 @@ def update_group_summary_user( else: path = _create_v1_path("/groups/%s/summary/users/%s", group_id, user_id) - return self.client.post_json( + return await self.client.post_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, @@ -989,11 +1083,13 @@ def update_group_summary_user( ) @log_function - def set_group_join_policy(self, destination, group_id, requester_user_id, content): + async def set_group_join_policy( + self, destination: str, group_id: str, requester_user_id: str, content: JsonDict + ) -> JsonDict: """Sets the join policy for a group""" path = _create_v1_path("/groups/%s/settings/m.join_policy", group_id) - return self.client.put_json( + return await self.client.put_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, @@ -1002,9 +1098,14 @@ def set_group_join_policy(self, destination, group_id, requester_user_id, conten ) @log_function - def delete_group_summary_user( - self, destination, group_id, requester_user_id, user_id, role_id - ): + async def delete_group_summary_user( + self, + destination: str, + group_id: str, + requester_user_id: str, + user_id: str, + role_id: str, + ) -> JsonDict: """Delete a users entry in a group""" if role_id: path = _create_v1_path( @@ -1013,33 +1114,35 @@ def delete_group_summary_user( else: path = _create_v1_path("/groups/%s/summary/users/%s", group_id, user_id) - return self.client.delete_json( + return await self.client.delete_json( destination=destination, path=path, args={"requester_user_id": requester_user_id}, ignore_backoff=True, ) - def bulk_get_publicised_groups(self, destination, user_ids): + async def bulk_get_publicised_groups( + self, destination: str, user_ids: Iterable[str] + ) -> JsonDict: """Get the groups a list of users are publicising""" path = _create_v1_path("/get_groups_publicised") content = {"user_ids": user_ids} - return self.client.post_json( + return await self.client.post_json( destination=destination, path=path, data=content, ignore_backoff=True ) - def get_room_complexity(self, destination, room_id): + async def get_room_complexity(self, destination: str, room_id: str) -> JsonDict: """ Args: - destination (str): The remote server - room_id (str): The room ID to ask about. + destination: The remote server + room_id: The room ID to ask about. """ path = _create_path(FEDERATION_UNSTABLE_PREFIX, "/rooms/%s/complexity", room_id) - return self.client.get_json(destination=destination, path=path) + return await self.client.get_json(destination=destination, path=path) async def get_space_summary( self, @@ -1075,14 +1178,14 @@ async def get_space_summary( ) -def _create_path(federation_prefix, path, *args): +def _create_path(federation_prefix: str, path: str, *args: str) -> str: """ Ensures that all args are url encoded. """ return federation_prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args) -def _create_v1_path(path, *args): +def _create_v1_path(path: str, *args: str) -> str: """Creates a path against V1 federation API from the path template and args. Ensures that all args are url encoded. @@ -1091,16 +1194,13 @@ def _create_v1_path(path, *args): _create_v1_path("/event/%s", event_id) Args: - path (str): String template for the path - args: ([str]): Args to insert into path. Each arg will be url encoded - - Returns: - str + path: String template for the path + args: Args to insert into path. Each arg will be url encoded """ return _create_path(FEDERATION_V1_PREFIX, path, *args) -def _create_v2_path(path, *args): +def _create_v2_path(path: str, *args: str) -> str: """Creates a path against V2 federation API from the path template and args. Ensures that all args are url encoded. @@ -1109,11 +1209,8 @@ def _create_v2_path(path, *args): _create_v2_path("/event/%s", event_id) Args: - path (str): String template for the path - args: ([str]): Args to insert into path. Each arg will be url encoded - - Returns: - str + path: String template for the path + args: Args to insert into path. Each arg will be url encoded """ return _create_path(FEDERATION_V2_PREFIX, path, *args) From 228decfce1a71651d64c359d1cf28e10d0a69fc8 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 26 Jul 2021 12:17:00 -0400 Subject: [PATCH 19/61] Update the MSC3083 support to verify if joins are from an authorized server. (#10254) --- changelog.d/10254.feature | 1 + synapse/api/errors.py | 3 + synapse/api/room_versions.py | 2 +- synapse/event_auth.py | 77 ++++++++--- synapse/federation/federation_base.py | 28 ++++ synapse/federation/federation_client.py | 61 +++++++-- synapse/federation/federation_server.py | 41 +++++- synapse/federation/transport/client.py | 30 +++- synapse/handlers/event_auth.py | 85 +++++++++++- synapse/handlers/federation.py | 54 ++++++-- synapse/handlers/room_member.py | 175 ++++++++++++++++++++++-- synapse/state/__init__.py | 12 +- synapse/state/v1.py | 40 ++++-- synapse/state/v2.py | 11 +- tests/state/test_v2.py | 6 +- tests/storage/test_redaction.py | 6 +- tests/test_event_auth.py | 98 +++++++++++-- 17 files changed, 632 insertions(+), 98 deletions(-) create mode 100644 changelog.d/10254.feature diff --git a/changelog.d/10254.feature b/changelog.d/10254.feature new file mode 100644 index 000000000000..df8bb51167f7 --- /dev/null +++ b/changelog.d/10254.feature @@ -0,0 +1 @@ +Update support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083) to consider changes in the MSC around which servers can issue join events. diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 054ab14ab6e0..dc662bca8353 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -75,6 +75,9 @@ class Codes: INVALID_SIGNATURE = "M_INVALID_SIGNATURE" USER_DEACTIVATED = "M_USER_DEACTIVATED" BAD_ALIAS = "M_BAD_ALIAS" + # For restricted join rules. + UNABLE_AUTHORISE_JOIN = "M_UNABLE_TO_AUTHORISE_JOIN" + UNABLE_TO_GRANT_JOIN = "M_UNABLE_TO_GRANT_JOIN" class CodeMessageException(RuntimeError): diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 8dd33dcb83e4..697319e52dc5 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -168,7 +168,7 @@ class RoomVersions: msc2403_knocking=False, ) MSC3083 = RoomVersion( - "org.matrix.msc3083", + "org.matrix.msc3083.v2", RoomDisposition.UNSTABLE, EventFormatVersions.V3, StateResolutionVersions.V2, diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 137dff251370..cc92d3547792 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -106,6 +106,18 @@ def check( if not event.signatures.get(event_id_domain): raise AuthError(403, "Event not signed by sending server") + is_invite_via_allow_rule = ( + event.type == EventTypes.Member + and event.membership == Membership.JOIN + and "join_authorised_via_users_server" in event.content + ) + if is_invite_via_allow_rule: + authoriser_domain = get_domain_from_id( + event.content["join_authorised_via_users_server"] + ) + if not event.signatures.get(authoriser_domain): + raise AuthError(403, "Event not signed by authorising server") + # Implementation of https://matrix.org/docs/spec/rooms/v1#authorization-rules # # 1. If type is m.room.create: @@ -177,7 +189,7 @@ def check( # https://github.com/vector-im/vector-web/issues/1208 hopefully if event.type == EventTypes.ThirdPartyInvite: user_level = get_user_power_level(event.user_id, auth_events) - invite_level = _get_named_level(auth_events, "invite", 0) + invite_level = get_named_level(auth_events, "invite", 0) if user_level < invite_level: raise AuthError(403, "You don't have permission to invite users") @@ -285,8 +297,8 @@ def _is_membership_change_allowed( user_level = get_user_power_level(event.user_id, auth_events) target_level = get_user_power_level(target_user_id, auth_events) - # FIXME (erikj): What should we do here as the default? - ban_level = _get_named_level(auth_events, "ban", 50) + invite_level = get_named_level(auth_events, "invite", 0) + ban_level = get_named_level(auth_events, "ban", 50) logger.debug( "_is_membership_change_allowed: %s", @@ -336,8 +348,6 @@ def _is_membership_change_allowed( elif target_in_room: # the target is already in the room. raise AuthError(403, "%s is already in the room." % target_user_id) else: - invite_level = _get_named_level(auth_events, "invite", 0) - if user_level < invite_level: raise AuthError(403, "You don't have permission to invite users") elif Membership.JOIN == membership: @@ -345,16 +355,41 @@ def _is_membership_change_allowed( # * They are not banned. # * They are accepting a previously sent invitation. # * They are already joined (it's a NOOP). - # * The room is public or restricted. + # * The room is public. + # * The room is restricted and the user meets the allows rules. if event.user_id != target_user_id: raise AuthError(403, "Cannot force another user to join.") elif target_banned: raise AuthError(403, "You are banned from this room") - elif join_rule == JoinRules.PUBLIC or ( + elif join_rule == JoinRules.PUBLIC: + pass + elif ( room_version.msc3083_join_rules and join_rule == JoinRules.MSC3083_RESTRICTED ): - pass + # This is the same as public, but the event must contain a reference + # to the server who authorised the join. If the event does not contain + # the proper content it is rejected. + # + # Note that if the caller is in the room or invited, then they do + # not need to meet the allow rules. + if not caller_in_room and not caller_invited: + authorising_user = event.content.get("join_authorised_via_users_server") + + if authorising_user is None: + raise AuthError(403, "Join event is missing authorising user.") + + # The authorising user must be in the room. + key = (EventTypes.Member, authorising_user) + member_event = auth_events.get(key) + _check_joined_room(member_event, authorising_user, event.room_id) + + authorising_user_level = get_user_power_level( + authorising_user, auth_events + ) + if authorising_user_level < invite_level: + raise AuthError(403, "Join event authorised by invalid server.") + elif join_rule == JoinRules.INVITE or ( room_version.msc2403_knocking and join_rule == JoinRules.KNOCK ): @@ -369,7 +404,7 @@ def _is_membership_change_allowed( if target_banned and user_level < ban_level: raise AuthError(403, "You cannot unban user %s." % (target_user_id,)) elif target_user_id != event.user_id: - kick_level = _get_named_level(auth_events, "kick", 50) + kick_level = get_named_level(auth_events, "kick", 50) if user_level < kick_level or user_level <= target_level: raise AuthError(403, "You cannot kick user %s." % target_user_id) @@ -445,7 +480,7 @@ def get_send_level( def _can_send_event(event: EventBase, auth_events: StateMap[EventBase]) -> bool: - power_levels_event = _get_power_level_event(auth_events) + power_levels_event = get_power_level_event(auth_events) send_level = get_send_level(event.type, event.get("state_key"), power_levels_event) user_level = get_user_power_level(event.user_id, auth_events) @@ -485,7 +520,7 @@ def check_redaction( """ user_level = get_user_power_level(event.user_id, auth_events) - redact_level = _get_named_level(auth_events, "redact", 50) + redact_level = get_named_level(auth_events, "redact", 50) if user_level >= redact_level: return False @@ -600,7 +635,7 @@ def _check_power_levels( ) -def _get_power_level_event(auth_events: StateMap[EventBase]) -> Optional[EventBase]: +def get_power_level_event(auth_events: StateMap[EventBase]) -> Optional[EventBase]: return auth_events.get((EventTypes.PowerLevels, "")) @@ -616,7 +651,7 @@ def get_user_power_level(user_id: str, auth_events: StateMap[EventBase]) -> int: Returns: the user's power level in this room. """ - power_level_event = _get_power_level_event(auth_events) + power_level_event = get_power_level_event(auth_events) if power_level_event: level = power_level_event.content.get("users", {}).get(user_id) if not level: @@ -640,8 +675,8 @@ def get_user_power_level(user_id: str, auth_events: StateMap[EventBase]) -> int: return 0 -def _get_named_level(auth_events: StateMap[EventBase], name: str, default: int) -> int: - power_level_event = _get_power_level_event(auth_events) +def get_named_level(auth_events: StateMap[EventBase], name: str, default: int) -> int: + power_level_event = get_power_level_event(auth_events) if not power_level_event: return default @@ -728,7 +763,9 @@ def get_public_keys(invite_event: EventBase) -> List[Dict[str, Any]]: return public_keys -def auth_types_for_event(event: Union[EventBase, EventBuilder]) -> Set[Tuple[str, str]]: +def auth_types_for_event( + room_version: RoomVersion, event: Union[EventBase, EventBuilder] +) -> Set[Tuple[str, str]]: """Given an event, return a list of (EventType, StateKey) that may be needed to auth the event. The returned list may be a superset of what would actually be required depending on the full state of the room. @@ -760,4 +797,12 @@ def auth_types_for_event(event: Union[EventBase, EventBuilder]) -> Set[Tuple[str ) auth_types.add(key) + if room_version.msc3083_join_rules and membership == Membership.JOIN: + if "join_authorised_via_users_server" in event.content: + key = ( + EventTypes.Member, + event.content["join_authorised_via_users_server"], + ) + auth_types.add(key) + return auth_types diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 2bfe6a3d3739..024e440ff401 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -178,6 +178,34 @@ async def _check_sigs_on_pdu( ) raise SynapseError(403, errmsg, Codes.FORBIDDEN) + # If this is a join event for a restricted room it may have been authorised + # via a different server from the sending server. Check those signatures. + if ( + room_version.msc3083_join_rules + and pdu.type == EventTypes.Member + and pdu.membership == Membership.JOIN + and "join_authorised_via_users_server" in pdu.content + ): + authorising_server = get_domain_from_id( + pdu.content["join_authorised_via_users_server"] + ) + try: + await keyring.verify_event_for_server( + authorising_server, + pdu, + pdu.origin_server_ts if room_version.enforce_key_validity else 0, + ) + except Exception as e: + errmsg = ( + "event id %s: unable to verify signature for authorising server %s: %s" + % ( + pdu.event_id, + authorising_server, + e, + ) + ) + raise SynapseError(403, errmsg, Codes.FORBIDDEN) + def _is_invite_via_3pid(event: EventBase) -> bool: return ( diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index c767d30627a6..dbadf102f2d7 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -19,7 +19,6 @@ import logging from typing import ( TYPE_CHECKING, - Any, Awaitable, Callable, Collection, @@ -79,7 +78,15 @@ class InvalidResponseError(RuntimeError): we couldn't parse """ - pass + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class SendJoinResult: + # The event to persist. + event: EventBase + # A string giving the server the event was sent to. + origin: str + state: List[EventBase] + auth_chain: List[EventBase] class FederationClient(FederationBase): @@ -677,7 +684,7 @@ async def send_request(destination: str) -> Tuple[str, EventBase, RoomVersion]: async def send_join( self, destinations: Iterable[str], pdu: EventBase, room_version: RoomVersion - ) -> Dict[str, Any]: + ) -> SendJoinResult: """Sends a join event to one of a list of homeservers. Doing so will cause the remote server to add the event to the graph, @@ -691,18 +698,38 @@ async def send_join( did the make_join) Returns: - a dict with members ``origin`` (a string - giving the server the event was sent to, ``state`` (?) and - ``auth_chain``. + The result of the send join request. Raises: SynapseError: if the chosen remote server returns a 300/400 code, or no servers successfully handle the request. """ - async def send_request(destination) -> Dict[str, Any]: + async def send_request(destination) -> SendJoinResult: response = await self._do_send_join(room_version, destination, pdu) + # If an event was returned (and expected to be returned): + # + # * Ensure it has the same event ID (note that the event ID is a hash + # of the event fields for versions which support MSC3083). + # * Ensure the signatures are good. + # + # Otherwise, fallback to the provided event. + if room_version.msc3083_join_rules and response.event: + event = response.event + + valid_pdu = await self._check_sigs_and_hash_and_fetch_one( + pdu=event, + origin=destination, + outlier=True, + room_version=room_version, + ) + + if valid_pdu is None or event.event_id != pdu.event_id: + raise InvalidResponseError("Returned an invalid join event") + else: + event = pdu + state = response.state auth_chain = response.auth_events @@ -784,11 +811,21 @@ async def _execute(pdu: EventBase) -> None: % (auth_chain_create_events,) ) - return { - "state": signed_state, - "auth_chain": signed_auth, - "origin": destination, - } + return SendJoinResult( + event=event, + state=signed_state, + auth_chain=signed_auth, + origin=destination, + ) + + if room_version.msc3083_join_rules: + # If the join is being authorised via allow rules, we need to send + # the /send_join back to the same server that was originally used + # with /make_join. + if "join_authorised_via_users_server" in pdu.content: + destinations = [ + get_domain_from_id(pdu.content["join_authorised_via_users_server"]) + ] return await self._try_destination_list("send_join", destinations, send_request) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 29619aeeb86e..2892a11d7d8e 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -45,6 +45,7 @@ UnsupportedRoomVersionError, ) from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion +from synapse.crypto.event_signing import compute_event_signature from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.federation.federation_base import FederationBase, event_from_pdu_json @@ -64,7 +65,7 @@ ReplicationGetQueryRestServlet, ) from synapse.storage.databases.main.lock import Lock -from synapse.types import JsonDict +from synapse.types import JsonDict, get_domain_from_id from synapse.util import glob_to_regex, json_decoder, unwrapFirstError from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.caches.response_cache import ResponseCache @@ -586,7 +587,7 @@ async def on_invite_request( async def on_send_join_request( self, origin: str, content: JsonDict, room_id: str ) -> Dict[str, Any]: - context = await self._on_send_membership_event( + event, context = await self._on_send_membership_event( origin, content, Membership.JOIN, room_id ) @@ -597,6 +598,7 @@ async def on_send_join_request( time_now = self._clock.time_msec() return { + "org.matrix.msc3083.v2.event": event.get_pdu_json(), "state": [p.get_pdu_json(time_now) for p in state.values()], "auth_chain": [p.get_pdu_json(time_now) for p in auth_chain], } @@ -681,7 +683,7 @@ async def on_send_knock_request( Returns: The stripped room state. """ - event_context = await self._on_send_membership_event( + _, context = await self._on_send_membership_event( origin, content, Membership.KNOCK, room_id ) @@ -690,14 +692,14 @@ async def on_send_knock_request( # related to the room while the knock request is pending. stripped_room_state = ( await self.store.get_stripped_room_state_from_event_context( - event_context, self._room_prejoin_state_types + context, self._room_prejoin_state_types ) ) return {"knock_state_events": stripped_room_state} async def _on_send_membership_event( self, origin: str, content: JsonDict, membership_type: str, room_id: str - ) -> EventContext: + ) -> Tuple[EventBase, EventContext]: """Handle an on_send_{join,leave,knock} request Does some preliminary validation before passing the request on to the @@ -712,7 +714,7 @@ async def _on_send_membership_event( in the event Returns: - The context of the event after inserting it into the room graph. + The event and context of the event after inserting it into the room graph. Raises: SynapseError if there is a problem with the request, including things like @@ -748,6 +750,33 @@ async def _on_send_membership_event( logger.debug("_on_send_membership_event: pdu sigs: %s", event.signatures) + # Sign the event since we're vouching on behalf of the remote server that + # the event is valid to be sent into the room. Currently this is only done + # if the user is being joined via restricted join rules. + if ( + room_version.msc3083_join_rules + and event.membership == Membership.JOIN + and "join_authorised_via_users_server" in event.content + ): + # We can only authorise our own users. + authorising_server = get_domain_from_id( + event.content["join_authorised_via_users_server"] + ) + if authorising_server != self.server_name: + raise SynapseError( + 400, + f"Cannot authorise request from resident server: {authorising_server}", + ) + + event.signatures.update( + compute_event_signature( + room_version, + event.get_pdu_json(), + self.hs.hostname, + self.hs.signing_key, + ) + ) + event = await self._check_sigs_and_hash(room_version, event) return await self.handler.on_send_membership_event(origin, event) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index e73bdb52b33c..6a8d3ad4fe6d 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -1219,8 +1219,26 @@ def _create_v2_path(path: str, *args: str) -> str: class SendJoinResponse: """The parsed response of a `/send_join` request.""" + # The list of auth events from the /send_join response. auth_events: List[EventBase] + # The list of state from the /send_join response. state: List[EventBase] + # The raw join event from the /send_join response. + event_dict: JsonDict + # The parsed join event from the /send_join response. This will be None if + # "event" is not included in the response. + event: Optional[EventBase] = None + + +@ijson.coroutine +def _event_parser(event_dict: JsonDict): + """Helper function for use with `ijson.kvitems_coro` to parse key-value pairs + to add them to a given dictionary. + """ + + while True: + key, value = yield + event_dict[key] = value @ijson.coroutine @@ -1246,7 +1264,8 @@ class SendJoinParser(ByteParser[SendJoinResponse]): CONTENT_TYPE = "application/json" def __init__(self, room_version: RoomVersion, v1_api: bool): - self._response = SendJoinResponse([], []) + self._response = SendJoinResponse([], [], {}) + self._room_version = room_version # The V1 API has the shape of `[200, {...}]`, which we handle by # prefixing with `item.*`. @@ -1260,12 +1279,21 @@ def __init__(self, room_version: RoomVersion, v1_api: bool): _event_list_parser(room_version, self._response.auth_events), prefix + "auth_chain.item", ) + self._coro_event = ijson.kvitems_coro( + _event_parser(self._response.event_dict), + prefix + "org.matrix.msc3083.v2.event", + ) def write(self, data: bytes) -> int: self._coro_state.send(data) self._coro_auth.send(data) + self._coro_event.send(data) return len(data) def finish(self) -> SendJoinResponse: + if self._response.event_dict: + self._response.event = make_event_from_dict( + self._response.event_dict, self._room_version + ) return self._response diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py index 41dbdfd0a1b6..53fac1f8a3a4 100644 --- a/synapse/handlers/event_auth.py +++ b/synapse/handlers/event_auth.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging from typing import TYPE_CHECKING, Collection, List, Optional, Union from synapse import event_auth @@ -20,16 +21,18 @@ Membership, RestrictedJoinRuleTypes, ) -from synapse.api.errors import AuthError +from synapse.api.errors import AuthError, Codes, SynapseError from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion from synapse.events import EventBase from synapse.events.builder import EventBuilder -from synapse.types import StateMap +from synapse.types import StateMap, get_domain_from_id from synapse.util.metrics import Measure if TYPE_CHECKING: from synapse.server import HomeServer +logger = logging.getLogger(__name__) + class EventAuthHandler: """ @@ -39,6 +42,7 @@ class EventAuthHandler: def __init__(self, hs: "HomeServer"): self._clock = hs.get_clock() self._store = hs.get_datastore() + self._server_name = hs.hostname async def check_from_context( self, room_version: str, event, context, do_sig_check=True @@ -81,15 +85,76 @@ def compute_auth_events( # introduce undesirable "state reset" behaviour. # # All of which sounds a bit tricky so we don't bother for now. - auth_ids = [] - for etype, state_key in event_auth.auth_types_for_event(event): + for etype, state_key in event_auth.auth_types_for_event( + event.room_version, event + ): auth_ev_id = current_state_ids.get((etype, state_key)) if auth_ev_id: auth_ids.append(auth_ev_id) return auth_ids + async def get_user_which_could_invite( + self, room_id: str, current_state_ids: StateMap[str] + ) -> str: + """ + Searches the room state for a local user who has the power level necessary + to invite other users. + + Args: + room_id: The room ID under search. + current_state_ids: The current state of the room. + + Returns: + The MXID of the user which could issue an invite. + + Raises: + SynapseError if no appropriate user is found. + """ + power_level_event_id = current_state_ids.get((EventTypes.PowerLevels, "")) + invite_level = 0 + users_default_level = 0 + if power_level_event_id: + power_level_event = await self._store.get_event(power_level_event_id) + invite_level = power_level_event.content.get("invite", invite_level) + users_default_level = power_level_event.content.get( + "users_default", users_default_level + ) + users = power_level_event.content.get("users", {}) + else: + users = {} + + # Find the user with the highest power level. + users_in_room = await self._store.get_users_in_room(room_id) + # Only interested in local users. + local_users_in_room = [ + u for u in users_in_room if get_domain_from_id(u) == self._server_name + ] + chosen_user = max( + local_users_in_room, + key=lambda user: users.get(user, users_default_level), + default=None, + ) + + # Return the chosen if they can issue invites. + user_power_level = users.get(chosen_user, users_default_level) + if chosen_user and user_power_level >= invite_level: + logger.debug( + "Found a user who can issue invites %s with power level %d >= invite level %d", + chosen_user, + user_power_level, + invite_level, + ) + return chosen_user + + # No user was found. + raise SynapseError( + 400, + "Unable to find a user which could issue an invite", + Codes.UNABLE_TO_GRANT_JOIN, + ) + async def check_host_in_room(self, room_id: str, host: str) -> bool: with Measure(self._clock, "check_host_in_room"): return await self._store.is_host_joined(room_id, host) @@ -134,6 +199,18 @@ async def check_restricted_join_rules( # in any of them. allowed_rooms = await self.get_rooms_that_allow_join(state_ids) if not await self.is_user_in_rooms(allowed_rooms, user_id): + + # If this is a remote request, the user might be in an allowed room + # that we do not know about. + if get_domain_from_id(user_id) != self._server_name: + for room_id in allowed_rooms: + if not await self._store.is_host_joined(room_id, self._server_name): + raise SynapseError( + 400, + f"Unable to check if {user_id} is in allowed rooms.", + Codes.UNABLE_AUTHORISE_JOIN, + ) + raise AuthError( 403, "You do not belong to any of the required rooms to join this room.", diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 57287199091d..aba095d2e120 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1494,9 +1494,10 @@ async def do_invite_join( host_list, event, room_version_obj ) - origin = ret["origin"] - state = ret["state"] - auth_chain = ret["auth_chain"] + event = ret.event + origin = ret.origin + state = ret.state + auth_chain = ret.auth_chain auth_chain.sort(key=lambda e: e.depth) logger.debug("do_invite_join auth_chain: %s", auth_chain) @@ -1676,7 +1677,7 @@ async def on_make_join_request( # checking the room version will check that we've actually heard of the room # (and return a 404 otherwise) - room_version = await self.store.get_room_version_id(room_id) + room_version = await self.store.get_room_version(room_id) # now check that we are *still* in the room is_in_room = await self._event_auth_handler.check_host_in_room( @@ -1691,8 +1692,38 @@ async def on_make_join_request( event_content = {"membership": Membership.JOIN} + # If the current room is using restricted join rules, additional information + # may need to be included in the event content in order to efficiently + # validate the event. + # + # Note that this requires the /send_join request to come back to the + # same server. + if room_version.msc3083_join_rules: + state_ids = await self.store.get_current_state_ids(room_id) + if await self._event_auth_handler.has_restricted_join_rules( + state_ids, room_version + ): + prev_member_event_id = state_ids.get((EventTypes.Member, user_id), None) + # If the user is invited or joined to the room already, then + # no additional info is needed. + include_auth_user_id = True + if prev_member_event_id: + prev_member_event = await self.store.get_event(prev_member_event_id) + include_auth_user_id = prev_member_event.membership not in ( + Membership.JOIN, + Membership.INVITE, + ) + + if include_auth_user_id: + event_content[ + "join_authorised_via_users_server" + ] = await self._event_auth_handler.get_user_which_could_invite( + room_id, + state_ids, + ) + builder = self.event_builder_factory.new( - room_version, + room_version.identifier, { "type": EventTypes.Member, "content": event_content, @@ -1710,10 +1741,13 @@ async def on_make_join_request( logger.warning("Failed to create join to %s because %s", room_id, e) raise + # Ensure the user can even join the room. + await self._check_join_restrictions(context, event) + # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_join_request` await self._event_auth_handler.check_from_context( - room_version, event, context, do_sig_check=False + room_version.identifier, event, context, do_sig_check=False ) return event @@ -1958,7 +1992,7 @@ async def on_make_knock_request( @log_function async def on_send_membership_event( self, origin: str, event: EventBase - ) -> EventContext: + ) -> Tuple[EventBase, EventContext]: """ We have received a join/leave/knock event for a room via send_join/leave/knock. @@ -1981,7 +2015,7 @@ async def on_send_membership_event( event: The member event that has been signed by the remote homeserver. Returns: - The context of the event after inserting it into the room graph. + The event and context of the event after inserting it into the room graph. Raises: SynapseError if the event is not accepted into the room @@ -2037,7 +2071,7 @@ async def on_send_membership_event( # all looks good, we can persist the event. await self._run_push_actions_and_persist_event(event, context) - return context + return event, context async def _check_join_restrictions( self, context: EventContext, event: EventBase @@ -2473,7 +2507,7 @@ async def _check_for_soft_fail( ) # Now check if event pass auth against said current state - auth_types = auth_types_for_event(event) + auth_types = auth_types_for_event(room_version_obj, event) current_state_ids_list = [ e for k, e in current_state_ids.items() if k in auth_types ] diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 11925916094b..65ad3efa6a60 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -16,7 +16,7 @@ import logging import random from http import HTTPStatus -from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple +from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple from synapse import types from synapse.api.constants import AccountDataTypes, EventTypes, Membership @@ -28,6 +28,7 @@ SynapseError, ) from synapse.api.ratelimiting import Ratelimiter +from synapse.event_auth import get_named_level, get_power_level_event from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.types import ( @@ -340,16 +341,10 @@ async def _local_membership_update( if event.membership == Membership.JOIN: newly_joined = True - prev_member_event = None if prev_member_event_id: prev_member_event = await self.store.get_event(prev_member_event_id) newly_joined = prev_member_event.membership != Membership.JOIN - # Check if the member should be allowed access via membership in a space. - await self.event_auth_handler.check_restricted_join_rules( - prev_state_ids, event.room_version, user_id, prev_member_event - ) - # Only rate-limit if the user actually joined the room, otherwise we'll end # up blocking profile updates. if newly_joined and ratelimit: @@ -701,7 +696,11 @@ async def update_membership_locked( # so don't really fit into the general auth process. raise AuthError(403, "Guest access not allowed") - if not is_host_in_room: + # Check if a remote join should be performed. + remote_join, remote_room_hosts = await self._should_perform_remote_join( + target.to_string(), room_id, remote_room_hosts, content, is_host_in_room + ) + if remote_join: if ratelimit: time_now_s = self.clock.time() ( @@ -826,6 +825,106 @@ async def update_membership_locked( outlier=outlier, ) + async def _should_perform_remote_join( + self, + user_id: str, + room_id: str, + remote_room_hosts: List[str], + content: JsonDict, + is_host_in_room: bool, + ) -> Tuple[bool, List[str]]: + """ + Check whether the server should do a remote join (as opposed to a local + join) for a user. + + Generally a remote join is used if: + + * The server is not yet in the room. + * The server is in the room, the room has restricted join rules, the user + is not joined or invited to the room, and the server does not have + another user who is capable of issuing invites. + + Args: + user_id: The user joining the room. + room_id: The room being joined. + remote_room_hosts: A list of remote room hosts. + content: The content to use as the event body of the join. This may + be modified. + is_host_in_room: True if the host is in the room. + + Returns: + A tuple of: + True if a remote join should be performed. False if the join can be + done locally. + + A list of remote room hosts to use. This is an empty list if a + local join is to be done. + """ + # If the host isn't in the room, pass through the prospective hosts. + if not is_host_in_room: + return True, remote_room_hosts + + # If the host is in the room, but not one of the authorised hosts + # for restricted join rules, a remote join must be used. + room_version = await self.store.get_room_version(room_id) + current_state_ids = await self.store.get_current_state_ids(room_id) + + # If restricted join rules are not being used, a local join can always + # be used. + if not await self.event_auth_handler.has_restricted_join_rules( + current_state_ids, room_version + ): + return False, [] + + # If the user is invited to the room or already joined, the join + # event can always be issued locally. + prev_member_event_id = current_state_ids.get((EventTypes.Member, user_id), None) + prev_member_event = None + if prev_member_event_id: + prev_member_event = await self.store.get_event(prev_member_event_id) + if prev_member_event.membership in ( + Membership.JOIN, + Membership.INVITE, + ): + return False, [] + + # If the local host has a user who can issue invites, then a local + # join can be done. + # + # If not, generate a new list of remote hosts based on which + # can issue invites. + event_map = await self.store.get_events(current_state_ids.values()) + current_state = { + state_key: event_map[event_id] + for state_key, event_id in current_state_ids.items() + } + allowed_servers = get_servers_from_users( + get_users_which_can_issue_invite(current_state) + ) + + # If the local server is not one of allowed servers, then a remote + # join must be done. Return the list of prospective servers based on + # which can issue invites. + if self.hs.hostname not in allowed_servers: + return True, list(allowed_servers) + + # Ensure the member should be allowed access via membership in a room. + await self.event_auth_handler.check_restricted_join_rules( + current_state_ids, room_version, user_id, prev_member_event + ) + + # If this is going to be a local join, additional information must + # be included in the event content in order to efficiently validate + # the event. + content[ + "join_authorised_via_users_server" + ] = await self.event_auth_handler.get_user_which_could_invite( + room_id, + current_state_ids, + ) + + return False, [] + async def transfer_room_state_on_room_upgrade( self, old_room_id: str, room_id: str ) -> None: @@ -1514,3 +1613,63 @@ async def forget(self, user: UserID, room_id: str) -> None: if membership: await self.store.forget(user_id, room_id) + + +def get_users_which_can_issue_invite(auth_events: StateMap[EventBase]) -> List[str]: + """ + Return the list of users which can issue invites. + + This is done by exploring the joined users and comparing their power levels + to the necessyar power level to issue an invite. + + Args: + auth_events: state in force at this point in the room + + Returns: + The users which can issue invites. + """ + invite_level = get_named_level(auth_events, "invite", 0) + users_default_level = get_named_level(auth_events, "users_default", 0) + power_level_event = get_power_level_event(auth_events) + + # Custom power-levels for users. + if power_level_event: + users = power_level_event.content.get("users", {}) + else: + users = {} + + result = [] + + # Check which members are able to invite by ensuring they're joined and have + # the necessary power level. + for (event_type, state_key), event in auth_events.items(): + if event_type != EventTypes.Member: + continue + + if event.membership != Membership.JOIN: + continue + + # Check if the user has a custom power level. + if users.get(state_key, users_default_level) >= invite_level: + result.append(state_key) + + return result + + +def get_servers_from_users(users: List[str]) -> Set[str]: + """ + Resolve a list of users into their servers. + + Args: + users: A list of users. + + Returns: + A set of servers. + """ + servers = set() + for user in users: + try: + servers.add(get_domain_from_id(user)) + except SynapseError: + pass + return servers diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 6223daf5228d..2e15471435ea 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -636,16 +636,20 @@ async def resolve_events_with_store( """ try: with Measure(self.clock, "state._resolve_events") as m: - v = KNOWN_ROOM_VERSIONS[room_version] - if v.state_res == StateResolutionVersions.V1: + room_version_obj = KNOWN_ROOM_VERSIONS[room_version] + if room_version_obj.state_res == StateResolutionVersions.V1: return await v1.resolve_events_with_store( - room_id, state_sets, event_map, state_res_store.get_events + room_id, + room_version_obj, + state_sets, + event_map, + state_res_store.get_events, ) else: return await v2.resolve_events_with_store( self.clock, room_id, - room_version, + room_version_obj, state_sets, event_map, state_res_store, diff --git a/synapse/state/v1.py b/synapse/state/v1.py index 267193cedf73..92336d7cc8be 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -29,7 +29,7 @@ from synapse import event_auth from synapse.api.constants import EventTypes from synapse.api.errors import AuthError -from synapse.api.room_versions import RoomVersions +from synapse.api.room_versions import RoomVersion, RoomVersions from synapse.events import EventBase from synapse.types import MutableStateMap, StateMap @@ -41,6 +41,7 @@ async def resolve_events_with_store( room_id: str, + room_version: RoomVersion, state_sets: Sequence[StateMap[str]], event_map: Optional[Dict[str, EventBase]], state_map_factory: Callable[[Iterable[str]], Awaitable[Dict[str, EventBase]]], @@ -104,7 +105,7 @@ async def resolve_events_with_store( # get the ids of the auth events which allow us to authenticate the # conflicted state, picking only from the unconflicting state. auth_events = _create_auth_events_from_maps( - unconflicted_state, conflicted_state, state_map + room_version, unconflicted_state, conflicted_state, state_map ) new_needed_events = set(auth_events.values()) @@ -132,7 +133,7 @@ async def resolve_events_with_store( state_map.update(state_map_new) return _resolve_with_state( - unconflicted_state, conflicted_state, auth_events, state_map + room_version, unconflicted_state, conflicted_state, auth_events, state_map ) @@ -187,6 +188,7 @@ def _seperate( def _create_auth_events_from_maps( + room_version: RoomVersion, unconflicted_state: StateMap[str], conflicted_state: StateMap[Set[str]], state_map: Dict[str, EventBase], @@ -194,6 +196,7 @@ def _create_auth_events_from_maps( """ Args: + room_version: The room version. unconflicted_state: The unconflicted state map. conflicted_state: The conflicted state map. state_map: @@ -205,7 +208,9 @@ def _create_auth_events_from_maps( for event_ids in conflicted_state.values(): for event_id in event_ids: if event_id in state_map: - keys = event_auth.auth_types_for_event(state_map[event_id]) + keys = event_auth.auth_types_for_event( + room_version, state_map[event_id] + ) for key in keys: if key not in auth_events: auth_event_id = unconflicted_state.get(key, None) @@ -215,6 +220,7 @@ def _create_auth_events_from_maps( def _resolve_with_state( + room_version: RoomVersion, unconflicted_state_ids: MutableStateMap[str], conflicted_state_ids: StateMap[Set[str]], auth_event_ids: StateMap[str], @@ -235,7 +241,9 @@ def _resolve_with_state( } try: - resolved_state = _resolve_state_events(conflicted_state, auth_events) + resolved_state = _resolve_state_events( + room_version, conflicted_state, auth_events + ) except Exception: logger.exception("Failed to resolve state") raise @@ -248,7 +256,9 @@ def _resolve_with_state( def _resolve_state_events( - conflicted_state: StateMap[List[EventBase]], auth_events: MutableStateMap[EventBase] + room_version: RoomVersion, + conflicted_state: StateMap[List[EventBase]], + auth_events: MutableStateMap[EventBase], ) -> StateMap[EventBase]: """This is where we actually decide which of the conflicted state to use. @@ -263,21 +273,27 @@ def _resolve_state_events( if POWER_KEY in conflicted_state: events = conflicted_state[POWER_KEY] logger.debug("Resolving conflicted power levels %r", events) - resolved_state[POWER_KEY] = _resolve_auth_events(events, auth_events) + resolved_state[POWER_KEY] = _resolve_auth_events( + room_version, events, auth_events + ) auth_events.update(resolved_state) for key, events in conflicted_state.items(): if key[0] == EventTypes.JoinRules: logger.debug("Resolving conflicted join rules %r", events) - resolved_state[key] = _resolve_auth_events(events, auth_events) + resolved_state[key] = _resolve_auth_events( + room_version, events, auth_events + ) auth_events.update(resolved_state) for key, events in conflicted_state.items(): if key[0] == EventTypes.Member: logger.debug("Resolving conflicted member lists %r", events) - resolved_state[key] = _resolve_auth_events(events, auth_events) + resolved_state[key] = _resolve_auth_events( + room_version, events, auth_events + ) auth_events.update(resolved_state) @@ -290,12 +306,14 @@ def _resolve_state_events( def _resolve_auth_events( - events: List[EventBase], auth_events: StateMap[EventBase] + room_version: RoomVersion, events: List[EventBase], auth_events: StateMap[EventBase] ) -> EventBase: reverse = list(reversed(_ordered_events(events))) auth_keys = { - key for event in events for key in event_auth.auth_types_for_event(event) + key + for event in events + for key in event_auth.auth_types_for_event(room_version, event) } new_auth_events = {} diff --git a/synapse/state/v2.py b/synapse/state/v2.py index e66e6571c8d9..7b1e8361def4 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -36,7 +36,7 @@ from synapse import event_auth from synapse.api.constants import EventTypes from synapse.api.errors import AuthError -from synapse.api.room_versions import KNOWN_ROOM_VERSIONS +from synapse.api.room_versions import RoomVersion from synapse.events import EventBase from synapse.types import MutableStateMap, StateMap from synapse.util import Clock @@ -53,7 +53,7 @@ async def resolve_events_with_store( clock: Clock, room_id: str, - room_version: str, + room_version: RoomVersion, state_sets: Sequence[StateMap[str]], event_map: Optional[Dict[str, EventBase]], state_res_store: "synapse.state.StateResolutionStore", @@ -497,7 +497,7 @@ def _get_power_order(event_id): async def _iterative_auth_checks( clock: Clock, room_id: str, - room_version: str, + room_version: RoomVersion, event_ids: List[str], base_state: StateMap[str], event_map: Dict[str, EventBase], @@ -519,7 +519,6 @@ async def _iterative_auth_checks( Returns the final updated state """ resolved_state = dict(base_state) - room_version_obj = KNOWN_ROOM_VERSIONS[room_version] for idx, event_id in enumerate(event_ids, start=1): event = event_map[event_id] @@ -538,7 +537,7 @@ async def _iterative_auth_checks( if ev.rejected_reason is None: auth_events[(ev.type, ev.state_key)] = ev - for key in event_auth.auth_types_for_event(event): + for key in event_auth.auth_types_for_event(room_version, event): if key in resolved_state: ev_id = resolved_state[key] ev = await _get_event(room_id, ev_id, event_map, state_res_store) @@ -548,7 +547,7 @@ async def _iterative_auth_checks( try: event_auth.check( - room_version_obj, + room_version, event, auth_events, do_sig_check=False, diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index 43fc79ca746d..8370a2719518 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -484,7 +484,7 @@ def do_check(self, events, edges, expected_state_ids): state_d = resolve_events_with_store( FakeClock(), ROOM_ID, - RoomVersions.V2.identifier, + RoomVersions.V2, [state_at_event[n] for n in prev_events], event_map=event_map, state_res_store=TestStateResolutionStore(event_map), @@ -496,7 +496,7 @@ def do_check(self, events, edges, expected_state_ids): if fake_event.state_key is not None: state_after[(fake_event.type, fake_event.state_key)] = event_id - auth_types = set(auth_types_for_event(fake_event)) + auth_types = set(auth_types_for_event(RoomVersions.V6, fake_event)) auth_events = [] for key in auth_types: @@ -633,7 +633,7 @@ def test_event_map_none(self): state_d = resolve_events_with_store( FakeClock(), ROOM_ID, - RoomVersions.V2.identifier, + RoomVersions.V2, [self.state_at_bob, self.state_at_charlie], event_map=None, state_res_store=TestStateResolutionStore(self.event_map), diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index dbacce4380f2..8c95a0a2fb1f 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional +from typing import List, Optional from canonicaljson import json @@ -234,8 +234,8 @@ def __init__(self, base_builder, event_id): async def build( self, - prev_event_ids, - auth_event_ids, + prev_event_ids: List[str], + auth_event_ids: Optional[List[str]], depth: Optional[int] = None, ): built_event = await self._base_builder.build( diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index f73306ecc4e7..e5550aec4d2a 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -351,7 +351,11 @@ def test_join_rules_msc3083_restricted(self): """ Test joining a restricted room from MSC3083. - This is pretty much the same test as public. + This is similar to the public test, but has some additional checks on + signatures. + + The checks which care about signatures fake them by simply adding an + object of the proper form, not generating valid signatures. """ creator = "@creator:example.com" pleb = "@joiner:example.com" @@ -359,6 +363,7 @@ def test_join_rules_msc3083_restricted(self): auth_events = { ("m.room.create", ""): _create_event(creator), ("m.room.member", creator): _join_event(creator), + ("m.room.power_levels", ""): _power_levels_event(creator, {"invite": 0}), ("m.room.join_rules", ""): _join_rules_event(creator, "restricted"), } @@ -371,19 +376,81 @@ def test_join_rules_msc3083_restricted(self): do_sig_check=False, ) - # Check join. + # A properly formatted join event should work. + authorised_join_event = _join_event( + pleb, + additional_content={ + "join_authorised_via_users_server": "@creator:example.com" + }, + ) event_auth.check( RoomVersions.MSC3083, - _join_event(pleb), + authorised_join_event, auth_events, do_sig_check=False, ) - # A user cannot be force-joined to a room. + # A join issued by a specific user works (i.e. the power level checks + # are done properly). + pl_auth_events = auth_events.copy() + pl_auth_events[("m.room.power_levels", "")] = _power_levels_event( + creator, {"invite": 100, "users": {"@inviter:foo.test": 150}} + ) + pl_auth_events[("m.room.member", "@inviter:foo.test")] = _join_event( + "@inviter:foo.test" + ) + event_auth.check( + RoomVersions.MSC3083, + _join_event( + pleb, + additional_content={ + "join_authorised_via_users_server": "@inviter:foo.test" + }, + ), + pl_auth_events, + do_sig_check=False, + ) + + # A join which is missing an authorised server is rejected. with self.assertRaises(AuthError): event_auth.check( RoomVersions.MSC3083, - _member_event(pleb, "join", sender=creator), + _join_event(pleb), + auth_events, + do_sig_check=False, + ) + + # An join authorised by a user who is not in the room is rejected. + pl_auth_events = auth_events.copy() + pl_auth_events[("m.room.power_levels", "")] = _power_levels_event( + creator, {"invite": 100, "users": {"@other:example.com": 150}} + ) + with self.assertRaises(AuthError): + event_auth.check( + RoomVersions.MSC3083, + _join_event( + pleb, + additional_content={ + "join_authorised_via_users_server": "@other:example.com" + }, + ), + auth_events, + do_sig_check=False, + ) + + # A user cannot be force-joined to a room. (This uses an event which + # *would* be valid, but is sent be a different user.) + with self.assertRaises(AuthError): + event_auth.check( + RoomVersions.MSC3083, + _member_event( + pleb, + "join", + sender=creator, + additional_content={ + "join_authorised_via_users_server": "@inviter:foo.test" + }, + ), auth_events, do_sig_check=False, ) @@ -393,7 +460,7 @@ def test_join_rules_msc3083_restricted(self): with self.assertRaises(AuthError): event_auth.check( RoomVersions.MSC3083, - _join_event(pleb), + authorised_join_event, auth_events, do_sig_check=False, ) @@ -402,12 +469,13 @@ def test_join_rules_msc3083_restricted(self): auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave") event_auth.check( RoomVersions.MSC3083, - _join_event(pleb), + authorised_join_event, auth_events, do_sig_check=False, ) - # A user can send a join if they're in the room. + # A user can send a join if they're in the room. (This doesn't need to + # be authorised since the user is already joined.) auth_events[("m.room.member", pleb)] = _member_event(pleb, "join") event_auth.check( RoomVersions.MSC3083, @@ -416,7 +484,8 @@ def test_join_rules_msc3083_restricted(self): do_sig_check=False, ) - # A user can accept an invite. + # A user can accept an invite. (This doesn't need to be authorised since + # the user was invited.) auth_events[("m.room.member", pleb)] = _member_event( pleb, "invite", sender=creator ) @@ -446,7 +515,10 @@ def _create_event(user_id: str) -> EventBase: def _member_event( - user_id: str, membership: str, sender: Optional[str] = None + user_id: str, + membership: str, + sender: Optional[str] = None, + additional_content: Optional[dict] = None, ) -> EventBase: return make_event_from_dict( { @@ -455,14 +527,14 @@ def _member_event( "type": "m.room.member", "sender": sender or user_id, "state_key": user_id, - "content": {"membership": membership}, + "content": {"membership": membership, **(additional_content or {})}, "prev_events": [], } ) -def _join_event(user_id: str) -> EventBase: - return _member_event(user_id, "join") +def _join_event(user_id: str, additional_content: Optional[dict] = None) -> EventBase: + return _member_event(user_id, "join", additional_content=additional_content) def _power_levels_event(sender: str, content: JsonDict) -> EventBase: From b7186c6e8ddacc328ae2c155162b36291a3c2b79 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 26 Jul 2021 12:49:53 -0400 Subject: [PATCH 20/61] Add type hints to state handler. (#10482) --- changelog.d/10482.misc | 1 + synapse/state/__init__.py | 26 ++++++++++++++---------- synapse/storage/databases/state/store.py | 17 ++++++++++------ synapse/storage/state.py | 4 ++-- 4 files changed, 29 insertions(+), 19 deletions(-) create mode 100644 changelog.d/10482.misc diff --git a/changelog.d/10482.misc b/changelog.d/10482.misc new file mode 100644 index 000000000000..4e9e2126e173 --- /dev/null +++ b/changelog.d/10482.misc @@ -0,0 +1 @@ +Additional type hints in the state handler. diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 2e15471435ea..463ce58dae45 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -16,6 +16,7 @@ import logging from collections import defaultdict, namedtuple from typing import ( + TYPE_CHECKING, Any, Awaitable, Callable, @@ -52,6 +53,10 @@ from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.metrics import Measure, measure_func +if TYPE_CHECKING: + from synapse.server import HomeServer + from synapse.storage.databases.main import DataStore + logger = logging.getLogger(__name__) metrics_logger = logging.getLogger("synapse.state.metrics") @@ -74,7 +79,7 @@ POWER_KEY = (EventTypes.PowerLevels, "") -def _gen_state_id(): +def _gen_state_id() -> str: global _NEXT_STATE_ID s = "X%d" % (_NEXT_STATE_ID,) _NEXT_STATE_ID += 1 @@ -109,7 +114,7 @@ def __init__( # `state_id` is either a state_group (and so an int) or a string. This # ensures we don't accidentally persist a state_id as a stateg_group if state_group: - self.state_id = state_group + self.state_id: Union[str, int] = state_group else: self.state_id = _gen_state_id() @@ -122,7 +127,7 @@ class StateHandler: where necessary """ - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() self.store = hs.get_datastore() self.state_store = hs.get_storage().state @@ -507,7 +512,7 @@ class StateResolutionHandler: be storage-independent. """ - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() self.resolve_linearizer = Linearizer(name="state_resolve_lock") @@ -657,13 +662,15 @@ async def resolve_events_with_store( finally: self._record_state_res_metrics(room_id, m.get_resource_usage()) - def _record_state_res_metrics(self, room_id: str, rusage: ContextResourceUsage): + def _record_state_res_metrics( + self, room_id: str, rusage: ContextResourceUsage + ) -> None: room_metrics = self._state_res_metrics[room_id] room_metrics.cpu_time += rusage.ru_utime + rusage.ru_stime room_metrics.db_time += rusage.db_txn_duration_sec room_metrics.db_events += rusage.evt_db_fetch_count - def _report_metrics(self): + def _report_metrics(self) -> None: if not self._state_res_metrics: # no state res has happened since the last iteration: don't bother logging. return @@ -773,16 +780,13 @@ def _make_state_cache_entry( ) -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class StateResolutionStore: """Interface that allows state resolution algorithms to access the database in well defined way. - - Args: - store (DataStore) """ - store = attr.ib() + store: "DataStore" def get_events( self, event_ids: Iterable[str], allow_rejected: bool = False diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index e38461adbc72..f839c0c24f1c 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -372,18 +372,23 @@ def _insert_into_cache( ) async def store_state_group( - self, event_id, room_id, prev_group, delta_ids, current_state_ids + self, + event_id: str, + room_id: str, + prev_group: Optional[int], + delta_ids: Optional[StateMap[str]], + current_state_ids: StateMap[str], ) -> int: """Store a new set of state, returning a newly assigned state group. Args: - event_id (str): The event ID for which the state was calculated - room_id (str) - prev_group (int|None): A previous state group for the room, optional. - delta_ids (dict|None): The delta between state at `prev_group` and + event_id: The event ID for which the state was calculated + room_id + prev_group: A previous state group for the room, optional. + delta_ids: The delta between state at `prev_group` and `current_state_ids`, if `prev_group` was given. Same format as `current_state_ids`. - current_state_ids (dict): The state to store. Map of (type, state_key) + current_state_ids: The state to store. Map of (type, state_key) to event_id. Returns: diff --git a/synapse/storage/state.py b/synapse/storage/state.py index f8fbba9d381e..e5400d681a65 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -570,8 +570,8 @@ async def store_state_group( event_id: str, room_id: str, prev_group: Optional[int], - delta_ids: Optional[dict], - current_state_ids: dict, + delta_ids: Optional[StateMap[str]], + current_state_ids: StateMap[str], ) -> int: """Store a new set of state, returning a newly assigned state group. From b3a757eb3b11151b1fac7833d6be239c9084f725 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Mon, 26 Jul 2021 23:28:20 -0600 Subject: [PATCH 21/61] Support MSC2033: Device ID on whoami (#9918) * Fix no-access-token bug in deactivation tests * Support MSC2033: Device ID on whoami * Test for appservices too MSC: https://github.com/matrix-org/matrix-doc/pull/2033 The MSC has passed FCP, which means stable endpoints can be used. --- changelog.d/9918.feature | 1 + synapse/rest/client/v2_alpha/account.py | 9 ++++- tests/rest/client/v2_alpha/test_account.py | 43 +++++++++++++++++++++- 3 files changed, 51 insertions(+), 2 deletions(-) create mode 100644 changelog.d/9918.feature diff --git a/changelog.d/9918.feature b/changelog.d/9918.feature new file mode 100644 index 000000000000..98f0a5089321 --- /dev/null +++ b/changelog.d/9918.feature @@ -0,0 +1 @@ +Add support for [MSC2033](https://github.com/matrix-org/matrix-doc/pull/2033): `device_id` on `/account/whoami`. \ No newline at end of file diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 085561d3e9d8..fb5ad2906eaa 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -884,7 +884,14 @@ def __init__(self, hs): async def on_GET(self, request): requester = await self.auth.get_user_by_req(request) - return 200, {"user_id": requester.user.to_string()} + response = {"user_id": requester.user.to_string()} + + # Appservices and similar accounts do not have device IDs + # that we can report on, so exclude them for compliance. + if requester.device_id is not None: + response["device_id"] = requester.device_id + + return 200, response def register_servlets(hs, http_server): diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py index 4ef19145d1b9..317a2287e376 100644 --- a/tests/rest/client/v2_alpha/test_account.py +++ b/tests/rest/client/v2_alpha/test_account.py @@ -24,6 +24,7 @@ import synapse.rest.admin from synapse.api.constants import LoginType, Membership from synapse.api.errors import Codes, HttpResponseException +from synapse.appservice import ApplicationService from synapse.rest.client.v1 import login, room from synapse.rest.client.v2_alpha import account, register from synapse.rest.synapse.client.password_reset import PasswordResetSubmitTokenResource @@ -397,7 +398,7 @@ def test_deactivate_account(self): self.assertTrue(self.get_success(store.get_user_deactivated_status(user_id))) # Check that this access token has been invalidated. - channel = self.make_request("GET", "account/whoami") + channel = self.make_request("GET", "account/whoami", access_token=tok) self.assertEqual(channel.code, 401) def test_pending_invites(self): @@ -458,6 +459,46 @@ def deactivate(self, user_id, tok): self.assertEqual(channel.code, 200) +class WhoamiTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + login.register_servlets, + account.register_servlets, + register.register_servlets, + ] + + def test_GET_whoami(self): + device_id = "wouldgohere" + user_id = self.register_user("kermit", "test") + tok = self.login("kermit", "test", device_id=device_id) + + whoami = self.whoami(tok) + self.assertEqual(whoami, {"user_id": user_id, "device_id": device_id}) + + def test_GET_whoami_appservices(self): + user_id = "@as:test" + as_token = "i_am_an_app_service" + + appservice = ApplicationService( + as_token, + self.hs.config.server_name, + id="1234", + namespaces={"users": [{"regex": user_id, "exclusive": True}]}, + sender=user_id, + ) + self.hs.get_datastore().services_cache.append(appservice) + + whoami = self.whoami(as_token) + self.assertEqual(whoami, {"user_id": user_id}) + self.assertFalse(hasattr(whoami, "device_id")) + + def whoami(self, tok): + channel = self.make_request("GET", "account/whoami", {}, access_token=tok) + self.assertEqual(channel.code, 200) + return channel.json_body + + class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): servlets = [ From 2476d5373cde3a881b6f8f3ccc5d19707e9f600d Mon Sep 17 00:00:00 2001 From: Denis Kasak Date: Tue, 27 Jul 2021 11:45:10 +0000 Subject: [PATCH 22/61] Mitigate media repo XSSs on IE11. (#10468) IE11 doesn't support Content-Security-Policy but it has support for a non-standard X-Content-Security-Policy header, which only supports the sandbox directive. This prevents script execution, so it at least offers some protection against media repo-based attacks. Signed-off-by: Denis Kasak --- changelog.d/10468.misc | 1 + synapse/rest/media/v1/download_resource.py | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/10468.misc diff --git a/changelog.d/10468.misc b/changelog.d/10468.misc new file mode 100644 index 000000000000..b9854bb4c16c --- /dev/null +++ b/changelog.d/10468.misc @@ -0,0 +1 @@ +Mitigate media repo XSS attacks on IE11 via the non-standard X-Content-Security-Policy header. diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/v1/download_resource.py index cd2468f9c59a..d6d938953e44 100644 --- a/synapse/rest/media/v1/download_resource.py +++ b/synapse/rest/media/v1/download_resource.py @@ -49,6 +49,8 @@ async def _async_render_GET(self, request: Request) -> None: b" media-src 'self';" b" object-src 'self';", ) + # Limited non-standard form of CSP for IE11 + request.setHeader(b"X-Content-Security-Policy", b"sandbox;") request.setHeader( b"Referrer-Policy", b"no-referrer", From 13944678c3c696418bfde3463bda4cedc8d289c2 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 27 Jul 2021 08:08:51 -0400 Subject: [PATCH 23/61] Use new go test running syntax for complement. (#10488) Updates CI and the helper script t ensures all tests are run (in parallel). --- .github/workflows/tests.yml | 2 +- changelog.d/10488.misc | 1 + scripts-dev/complement.sh | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/10488.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 4e61824ee5bc..0a62c62d02c3 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -344,7 +344,7 @@ jobs: working-directory: complement/dockerfiles # Run Complement - - run: go test -v -tags synapse_blacklist,msc2403,msc2946,msc3083 ./tests + - run: go test -v -tags synapse_blacklist,msc2403,msc2946,msc3083 ./tests/... env: COMPLEMENT_BASE_IMAGE: complement-synapse:latest working-directory: complement diff --git a/changelog.d/10488.misc b/changelog.d/10488.misc new file mode 100644 index 000000000000..a55502c16359 --- /dev/null +++ b/changelog.d/10488.misc @@ -0,0 +1 @@ +Update syntax used to run complement tests. diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 4df224be6719..cba015d942ca 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -65,4 +65,4 @@ if [[ -n "$1" ]]; then fi # Run the tests! -go test -v -tags synapse_blacklist,msc2946,msc3083,msc2403 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests +go test -v -tags synapse_blacklist,msc2946,msc3083,msc2403 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/... From e16eab29d671504144f4185d4738e5bfd7a3a2c6 Mon Sep 17 00:00:00 2001 From: reivilibre <38398653+reivilibre@users.noreply.github.com> Date: Tue, 27 Jul 2021 14:32:05 +0100 Subject: [PATCH 24/61] Add a PeriodicallyFlushingMemoryHandler to prevent logging silence (#10407) Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/10407.feature | 1 + docs/sample_log_config.yaml | 5 ++- synapse/config/logger.py | 5 ++- synapse/logging/handlers.py | 88 +++++++++++++++++++++++++++++++++++++ 4 files changed, 97 insertions(+), 2 deletions(-) create mode 100644 changelog.d/10407.feature create mode 100644 synapse/logging/handlers.py diff --git a/changelog.d/10407.feature b/changelog.d/10407.feature new file mode 100644 index 000000000000..db277d9ecde0 --- /dev/null +++ b/changelog.d/10407.feature @@ -0,0 +1 @@ +Add a buffered logging handler which periodically flushes itself. diff --git a/docs/sample_log_config.yaml b/docs/sample_log_config.yaml index 669e60008113..b088c834057d 100644 --- a/docs/sample_log_config.yaml +++ b/docs/sample_log_config.yaml @@ -28,7 +28,7 @@ handlers: # will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR # logs will still be flushed immediately. buffer: - class: logging.handlers.MemoryHandler + class: synapse.logging.handlers.PeriodicallyFlushingMemoryHandler target: file # The capacity is the number of log lines that are buffered before # being written to disk. Increasing this will lead to better @@ -36,6 +36,9 @@ handlers: # be written to disk. capacity: 10 flushLevel: 30 # Flush for WARNING logs as well + # The period of time, in seconds, between forced flushes. + # Messages will not be delayed for longer than this time. + period: 5 # A handler that writes logs to stderr. Unused by default, but can be used # instead of "buffer" and "file" in the logger handlers. diff --git a/synapse/config/logger.py b/synapse/config/logger.py index ad4e6e61c3bf..dcd3ed1dac12 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -71,7 +71,7 @@ # will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR # logs will still be flushed immediately. buffer: - class: logging.handlers.MemoryHandler + class: synapse.logging.handlers.PeriodicallyFlushingMemoryHandler target: file # The capacity is the number of log lines that are buffered before # being written to disk. Increasing this will lead to better @@ -79,6 +79,9 @@ # be written to disk. capacity: 10 flushLevel: 30 # Flush for WARNING logs as well + # The period of time, in seconds, between forced flushes. + # Messages will not be delayed for longer than this time. + period: 5 # A handler that writes logs to stderr. Unused by default, but can be used # instead of "buffer" and "file" in the logger handlers. diff --git a/synapse/logging/handlers.py b/synapse/logging/handlers.py new file mode 100644 index 000000000000..a6c212f300fd --- /dev/null +++ b/synapse/logging/handlers.py @@ -0,0 +1,88 @@ +import logging +import time +from logging import Handler, LogRecord +from logging.handlers import MemoryHandler +from threading import Thread +from typing import Optional + +from twisted.internet.interfaces import IReactorCore + + +class PeriodicallyFlushingMemoryHandler(MemoryHandler): + """ + This is a subclass of MemoryHandler that additionally spawns a background + thread to periodically flush the buffer. + + This prevents messages from being buffered for too long. + + Additionally, all messages will be immediately flushed if the reactor has + not yet been started. + """ + + def __init__( + self, + capacity: int, + flushLevel: int = logging.ERROR, + target: Optional[Handler] = None, + flushOnClose: bool = True, + period: float = 5.0, + reactor: Optional[IReactorCore] = None, + ) -> None: + """ + period: the period between automatic flushes + + reactor: if specified, a custom reactor to use. If not specifies, + defaults to the globally-installed reactor. + Log entries will be flushed immediately until this reactor has + started. + """ + super().__init__(capacity, flushLevel, target, flushOnClose) + + self._flush_period: float = period + self._active: bool = True + self._reactor_started = False + + self._flushing_thread: Thread = Thread( + name="PeriodicallyFlushingMemoryHandler flushing thread", + target=self._flush_periodically, + ) + self._flushing_thread.start() + + def on_reactor_running(): + self._reactor_started = True + + reactor_to_use: IReactorCore + if reactor is None: + from twisted.internet import reactor as global_reactor + + reactor_to_use = global_reactor # type: ignore[assignment] + else: + reactor_to_use = reactor + + # call our hook when the reactor start up + reactor_to_use.callWhenRunning(on_reactor_running) + + def shouldFlush(self, record: LogRecord) -> bool: + """ + Before reactor start-up, log everything immediately. + Otherwise, fall back to original behaviour of waiting for the buffer to fill. + """ + + if self._reactor_started: + return super().shouldFlush(record) + else: + return True + + def _flush_periodically(self): + """ + Whilst this handler is active, flush the handler periodically. + """ + + while self._active: + # flush is thread-safe; it acquires and releases the lock internally + self.flush() + time.sleep(self._flush_period) + + def close(self) -> None: + self._active = False + super().close() From 076deade028613da56391758305d645edeab40e5 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Tue, 27 Jul 2021 18:31:06 +0200 Subject: [PATCH 25/61] allow specifying https:// proxy (#10411) --- changelog.d/10411.feature | 1 + synapse/http/proxyagent.py | 184 +++++++++------- tests/http/test_proxyagent.py | 398 +++++++++++++++++++++++++++++----- 3 files changed, 450 insertions(+), 133 deletions(-) create mode 100644 changelog.d/10411.feature diff --git a/changelog.d/10411.feature b/changelog.d/10411.feature new file mode 100644 index 000000000000..ef0ab84b17b1 --- /dev/null +++ b/changelog.d/10411.feature @@ -0,0 +1 @@ +Add support for https connections to a proxy server. Contributed by @Bubu and @dklimpel. \ No newline at end of file diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py index f7193e60bdbc..19e987f11877 100644 --- a/synapse/http/proxyagent.py +++ b/synapse/http/proxyagent.py @@ -14,21 +14,32 @@ import base64 import logging import re -from typing import Optional, Tuple -from urllib.request import getproxies_environment, proxy_bypass_environment +from typing import Any, Dict, Optional, Tuple +from urllib.parse import urlparse +from urllib.request import ( # type: ignore[attr-defined] + getproxies_environment, + proxy_bypass_environment, +) import attr from zope.interface import implementer from twisted.internet import defer from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS +from twisted.internet.interfaces import IReactorCore, IStreamClientEndpoint from twisted.python.failure import Failure -from twisted.web.client import URI, BrowserLikePolicyForHTTPS, _AgentBase +from twisted.web.client import ( + URI, + BrowserLikePolicyForHTTPS, + HTTPConnectionPool, + _AgentBase, +) from twisted.web.error import SchemeNotSupported from twisted.web.http_headers import Headers -from twisted.web.iweb import IAgent, IPolicyForHTTPS +from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint +from synapse.types import ISynapseReactor logger = logging.getLogger(__name__) @@ -63,35 +74,38 @@ class ProxyAgent(_AgentBase): reactor might have some blacklisting applied (i.e. for DNS queries), but we need unblocked access to the proxy. - contextFactory (IPolicyForHTTPS): A factory for TLS contexts, to control the + contextFactory: A factory for TLS contexts, to control the verification parameters of OpenSSL. The default is to use a `BrowserLikePolicyForHTTPS`, so unless you have special requirements you can leave this as-is. - connectTimeout (Optional[float]): The amount of time that this Agent will wait + connectTimeout: The amount of time that this Agent will wait for the peer to accept a connection, in seconds. If 'None', HostnameEndpoint's default (30s) will be used. - This is used for connections to both proxies and destination servers. - bindAddress (bytes): The local address for client sockets to bind to. + bindAddress: The local address for client sockets to bind to. - pool (HTTPConnectionPool|None): connection pool to be used. If None, a + pool: connection pool to be used. If None, a non-persistent pool instance will be created. - use_proxy (bool): Whether proxy settings should be discovered and used + use_proxy: Whether proxy settings should be discovered and used from conventional environment variables. + + Raises: + ValueError if use_proxy is set and the environment variables + contain an invalid proxy specification. """ def __init__( self, - reactor, - proxy_reactor=None, + reactor: IReactorCore, + proxy_reactor: Optional[ISynapseReactor] = None, contextFactory: Optional[IPolicyForHTTPS] = None, - connectTimeout=None, - bindAddress=None, - pool=None, - use_proxy=False, + connectTimeout: Optional[float] = None, + bindAddress: Optional[bytes] = None, + pool: Optional[HTTPConnectionPool] = None, + use_proxy: bool = False, ): contextFactory = contextFactory or BrowserLikePolicyForHTTPS() @@ -102,7 +116,7 @@ def __init__( else: self.proxy_reactor = proxy_reactor - self._endpoint_kwargs = {} + self._endpoint_kwargs: Dict[str, Any] = {} if connectTimeout is not None: self._endpoint_kwargs["timeout"] = connectTimeout if bindAddress is not None: @@ -117,16 +131,12 @@ def __init__( https_proxy = proxies["https"].encode() if "https" in proxies else None no_proxy = proxies["no"] if "no" in proxies else None - # Parse credentials from http and https proxy connection string if present - self.http_proxy_creds, http_proxy = parse_username_password(http_proxy) - self.https_proxy_creds, https_proxy = parse_username_password(https_proxy) - - self.http_proxy_endpoint = _http_proxy_endpoint( - http_proxy, self.proxy_reactor, **self._endpoint_kwargs + self.http_proxy_endpoint, self.http_proxy_creds = _http_proxy_endpoint( + http_proxy, self.proxy_reactor, contextFactory, **self._endpoint_kwargs ) - self.https_proxy_endpoint = _http_proxy_endpoint( - https_proxy, self.proxy_reactor, **self._endpoint_kwargs + self.https_proxy_endpoint, self.https_proxy_creds = _http_proxy_endpoint( + https_proxy, self.proxy_reactor, contextFactory, **self._endpoint_kwargs ) self.no_proxy = no_proxy @@ -134,7 +144,13 @@ def __init__( self._policy_for_https = contextFactory self._reactor = reactor - def request(self, method, uri, headers=None, bodyProducer=None): + def request( + self, + method: bytes, + uri: bytes, + headers: Optional[Headers] = None, + bodyProducer: Optional[IBodyProducer] = None, + ) -> defer.Deferred: """ Issue a request to the server indicated by the given uri. @@ -146,16 +162,15 @@ def request(self, method, uri, headers=None, bodyProducer=None): See also: twisted.web.iweb.IAgent.request Args: - method (bytes): The request method to use, such as `GET`, `POST`, etc + method: The request method to use, such as `GET`, `POST`, etc - uri (bytes): The location of the resource to request. + uri: The location of the resource to request. - headers (Headers|None): Extra headers to send with the request + headers: Extra headers to send with the request - bodyProducer (IBodyProducer|None): An object which can generate bytes to - make up the body of this request (for example, the properly encoded - contents of a file for a file upload). Or, None if the request is to - have no body. + bodyProducer: An object which can generate bytes to make up the body of + this request (for example, the properly encoded contents of a file for + a file upload). Or, None if the request is to have no body. Returns: Deferred[IResponse]: completes when the header of the response has @@ -253,70 +268,89 @@ def request(self, method, uri, headers=None, bodyProducer=None): ) -def _http_proxy_endpoint(proxy: Optional[bytes], reactor, **kwargs): +def _http_proxy_endpoint( + proxy: Optional[bytes], + reactor: IReactorCore, + tls_options_factory: IPolicyForHTTPS, + **kwargs, +) -> Tuple[Optional[IStreamClientEndpoint], Optional[ProxyCredentials]]: """Parses an http proxy setting and returns an endpoint for the proxy Args: - proxy: the proxy setting in the form: [:@][:] - Note that compared to other apps, this function currently lacks support - for specifying a protocol schema (i.e. protocol://...). + proxy: the proxy setting in the form: [scheme://][:@][:] + This currently supports http:// and https:// proxies. + A hostname without scheme is assumed to be http. reactor: reactor to be used to connect to the proxy + tls_options_factory: the TLS options to use when connecting through a https proxy + kwargs: other args to be passed to HostnameEndpoint Returns: - interfaces.IStreamClientEndpoint|None: endpoint to use to connect to the proxy, - or None + a tuple of + endpoint to use to connect to the proxy, or None + ProxyCredentials or if no credentials were found, or None + + Raise: + ValueError if proxy has no hostname or unsupported scheme. """ if proxy is None: - return None + return None, None - # Parse the connection string - host, port = parse_host_port(proxy, default_port=1080) - return HostnameEndpoint(reactor, host, port, **kwargs) + # Note: urlsplit/urlparse cannot be used here as that does not work (for Python + # 3.9+) on scheme-less proxies, e.g. host:port. + scheme, host, port, credentials = parse_proxy(proxy) + proxy_endpoint = HostnameEndpoint(reactor, host, port, **kwargs) -def parse_username_password(proxy: bytes) -> Tuple[Optional[ProxyCredentials], bytes]: - """ - Parses the username and password from a proxy declaration e.g - username:password@hostname:port. + if scheme == b"https": + tls_options = tls_options_factory.creatorForNetloc(host, port) + proxy_endpoint = wrapClientTLS(tls_options, proxy_endpoint) - Args: - proxy: The proxy connection string. + return proxy_endpoint, credentials - Returns - An instance of ProxyCredentials and the proxy connection string with any credentials - stripped, i.e u:p@host:port -> host:port. If no credentials were found, the - ProxyCredentials instance is replaced with None. - """ - if proxy and b"@" in proxy: - # We use rsplit here as the password could contain an @ character - credentials, proxy_without_credentials = proxy.rsplit(b"@", 1) - return ProxyCredentials(credentials), proxy_without_credentials - return None, proxy +def parse_proxy( + proxy: bytes, default_scheme: bytes = b"http", default_port: int = 1080 +) -> Tuple[bytes, bytes, int, Optional[ProxyCredentials]]: + """ + Parse a proxy connection string. + Given a HTTP proxy URL, breaks it down into components and checks that it + has a hostname (otherwise it is not useful to us when trying to find a + proxy) and asserts that the URL has a scheme we support. -def parse_host_port(hostport: bytes, default_port: int = None) -> Tuple[bytes, int]: - """ - Parse the hostname and port from a proxy connection byte string. Args: - hostport: The proxy connection string. Must be in the form 'host[:port]'. - default_port: The default port to return if one is not found in `hostport`. + proxy: The proxy connection string. Must be in the form '[scheme://][:@]host[:port]'. + default_scheme: The default scheme to return if one is not found in `proxy`. Defaults to http + default_port: The default port to return if one is not found in `proxy`. Defaults to 1080 Returns: - A tuple containing the hostname and port. Uses `default_port` if one was not found. + A tuple containing the scheme, hostname, port and ProxyCredentials. + If no credentials were found, the ProxyCredentials instance is replaced with None. + + Raise: + ValueError if proxy has no hostname or unsupported scheme. """ - if b":" in hostport: - host, port = hostport.rsplit(b":", 1) - try: - port = int(port) - return host, port - except ValueError: - # the thing after the : wasn't a valid port; presumably this is an - # IPv6 address. - pass + # First check if we have a scheme present + # Note: urlsplit/urlparse cannot be used (for Python # 3.9+) on scheme-less proxies, e.g. host:port. + if b"://" not in proxy: + proxy = b"".join([default_scheme, b"://", proxy]) + + url = urlparse(proxy) + + if not url.hostname: + raise ValueError("Proxy URL did not contain a hostname! Please specify one.") + + if url.scheme not in (b"http", b"https"): + raise ValueError( + f"Unknown proxy scheme {url.scheme!s}; only 'http' and 'https' is supported." + ) + + credentials = None + if url.username and url.password: + credentials = ProxyCredentials(b"".join([url.username, b":", url.password])) - return hostport, default_port + return url.scheme, url.hostname, url.port or default_port, credentials diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py index 437113929a13..e5865c161d5e 100644 --- a/tests/http/test_proxyagent.py +++ b/tests/http/test_proxyagent.py @@ -14,19 +14,22 @@ import base64 import logging import os -from typing import Optional +from typing import Iterable, Optional from unittest.mock import patch import treq from netaddr import IPSet +from parameterized import parameterized from twisted.internet import interfaces # noqa: F401 +from twisted.internet.endpoints import HostnameEndpoint, _WrapperEndpoint +from twisted.internet.interfaces import IProtocol, IProtocolFactory from twisted.internet.protocol import Factory -from twisted.protocols.tls import TLSMemoryBIOFactory +from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol from twisted.web.http import HTTPChannel from synapse.http.client import BlacklistingReactorWrapper -from synapse.http.proxyagent import ProxyAgent +from synapse.http.proxyagent import ProxyAgent, ProxyCredentials, parse_proxy from tests.http import TestServerTLSConnectionFactory, get_test_https_policy from tests.server import FakeTransport, ThreadedMemoryReactorClock @@ -37,33 +40,208 @@ HTTPFactory = Factory.forProtocol(HTTPChannel) +class ProxyParserTests(TestCase): + """ + Values for test + [ + proxy_string, + expected_scheme, + expected_hostname, + expected_port, + expected_credentials, + ] + """ + + @parameterized.expand( + [ + # host + [b"localhost", b"http", b"localhost", 1080, None], + [b"localhost:9988", b"http", b"localhost", 9988, None], + # host+scheme + [b"https://localhost", b"https", b"localhost", 1080, None], + [b"https://localhost:1234", b"https", b"localhost", 1234, None], + # ipv4 + [b"1.2.3.4", b"http", b"1.2.3.4", 1080, None], + [b"1.2.3.4:9988", b"http", b"1.2.3.4", 9988, None], + # ipv4+scheme + [b"https://1.2.3.4", b"https", b"1.2.3.4", 1080, None], + [b"https://1.2.3.4:9988", b"https", b"1.2.3.4", 9988, None], + # ipv6 - without brackets is broken + # [ + # b"2001:0db8:85a3:0000:0000:8a2e:0370:effe", + # b"http", + # b"2001:0db8:85a3:0000:0000:8a2e:0370:effe", + # 1080, + # None, + # ], + # [ + # b"2001:0db8:85a3:0000:0000:8a2e:0370:1234", + # b"http", + # b"2001:0db8:85a3:0000:0000:8a2e:0370:1234", + # 1080, + # None, + # ], + # [b"::1", b"http", b"::1", 1080, None], + # [b"::ffff:0.0.0.0", b"http", b"::ffff:0.0.0.0", 1080, None], + # ipv6 - with brackets + [ + b"[2001:0db8:85a3:0000:0000:8a2e:0370:effe]", + b"http", + b"2001:0db8:85a3:0000:0000:8a2e:0370:effe", + 1080, + None, + ], + [ + b"[2001:0db8:85a3:0000:0000:8a2e:0370:1234]", + b"http", + b"2001:0db8:85a3:0000:0000:8a2e:0370:1234", + 1080, + None, + ], + [b"[::1]", b"http", b"::1", 1080, None], + [b"[::ffff:0.0.0.0]", b"http", b"::ffff:0.0.0.0", 1080, None], + # ipv6+port + [ + b"[2001:0db8:85a3:0000:0000:8a2e:0370:effe]:9988", + b"http", + b"2001:0db8:85a3:0000:0000:8a2e:0370:effe", + 9988, + None, + ], + [ + b"[2001:0db8:85a3:0000:0000:8a2e:0370:1234]:9988", + b"http", + b"2001:0db8:85a3:0000:0000:8a2e:0370:1234", + 9988, + None, + ], + [b"[::1]:9988", b"http", b"::1", 9988, None], + [b"[::ffff:0.0.0.0]:9988", b"http", b"::ffff:0.0.0.0", 9988, None], + # ipv6+scheme + [ + b"https://[2001:0db8:85a3:0000:0000:8a2e:0370:effe]", + b"https", + b"2001:0db8:85a3:0000:0000:8a2e:0370:effe", + 1080, + None, + ], + [ + b"https://[2001:0db8:85a3:0000:0000:8a2e:0370:1234]", + b"https", + b"2001:0db8:85a3:0000:0000:8a2e:0370:1234", + 1080, + None, + ], + [b"https://[::1]", b"https", b"::1", 1080, None], + [b"https://[::ffff:0.0.0.0]", b"https", b"::ffff:0.0.0.0", 1080, None], + # ipv6+scheme+port + [ + b"https://[2001:0db8:85a3:0000:0000:8a2e:0370:effe]:9988", + b"https", + b"2001:0db8:85a3:0000:0000:8a2e:0370:effe", + 9988, + None, + ], + [ + b"https://[2001:0db8:85a3:0000:0000:8a2e:0370:1234]:9988", + b"https", + b"2001:0db8:85a3:0000:0000:8a2e:0370:1234", + 9988, + None, + ], + [b"https://[::1]:9988", b"https", b"::1", 9988, None], + # with credentials + [ + b"https://user:pass@1.2.3.4:9988", + b"https", + b"1.2.3.4", + 9988, + b"user:pass", + ], + [b"user:pass@1.2.3.4:9988", b"http", b"1.2.3.4", 9988, b"user:pass"], + [ + b"https://user:pass@proxy.local:9988", + b"https", + b"proxy.local", + 9988, + b"user:pass", + ], + [ + b"user:pass@proxy.local:9988", + b"http", + b"proxy.local", + 9988, + b"user:pass", + ], + ] + ) + def test_parse_proxy( + self, + proxy_string: bytes, + expected_scheme: bytes, + expected_hostname: bytes, + expected_port: int, + expected_credentials: Optional[bytes], + ): + """ + Tests that a given proxy URL will be broken into the components. + Args: + proxy_string: The proxy connection string. + expected_scheme: Expected value of proxy scheme. + expected_hostname: Expected value of proxy hostname. + expected_port: Expected value of proxy port. + expected_credentials: Expected value of credentials. + Must be in form ':' or None + """ + proxy_cred = None + if expected_credentials: + proxy_cred = ProxyCredentials(expected_credentials) + self.assertEqual( + ( + expected_scheme, + expected_hostname, + expected_port, + proxy_cred, + ), + parse_proxy(proxy_string), + ) + + class MatrixFederationAgentTests(TestCase): def setUp(self): self.reactor = ThreadedMemoryReactorClock() def _make_connection( - self, client_factory, server_factory, ssl=False, expected_sni=None - ): + self, + client_factory: IProtocolFactory, + server_factory: IProtocolFactory, + ssl: bool = False, + expected_sni: Optional[bytes] = None, + tls_sanlist: Optional[Iterable[bytes]] = None, + ) -> IProtocol: """Builds a test server, and completes the outgoing client connection Args: - client_factory (interfaces.IProtocolFactory): the the factory that the + client_factory: the the factory that the application is trying to use to make the outbound connection. We will invoke it to build the client Protocol - server_factory (interfaces.IProtocolFactory): a factory to build the + server_factory: a factory to build the server-side protocol - ssl (bool): If true, we will expect an ssl connection and wrap + ssl: If true, we will expect an ssl connection and wrap server_factory with a TLSMemoryBIOFactory - expected_sni (bytes|None): the expected SNI value + expected_sni: the expected SNI value + + tls_sanlist: list of SAN entries for the TLS cert presented by the server. + Defaults to [b'DNS:test.com'] Returns: - IProtocol: the server Protocol returned by server_factory + the server Protocol returned by server_factory """ if ssl: - server_factory = _wrap_server_factory_for_tls(server_factory) + server_factory = _wrap_server_factory_for_tls(server_factory, tls_sanlist) server_protocol = server_factory.buildProtocol(None) @@ -98,22 +276,28 @@ def _make_connection( self.assertEqual( server_name, expected_sni, - "Expected SNI %s but got %s" % (expected_sni, server_name), + f"Expected SNI {expected_sni!s} but got {server_name!s}", ) return http_protocol - def _test_request_direct_connection(self, agent, scheme, hostname, path): + def _test_request_direct_connection( + self, + agent: ProxyAgent, + scheme: bytes, + hostname: bytes, + path: bytes, + ): """Runs a test case for a direct connection not going through a proxy. Args: - agent (ProxyAgent): the proxy agent being tested + agent: the proxy agent being tested - scheme (bytes): expected to be either "http" or "https" + scheme: expected to be either "http" or "https" - hostname (bytes): the hostname to connect to in the test + hostname: the hostname to connect to in the test - path (bytes): the path to connect to in the test + path: the path to connect to in the test """ is_https = scheme == b"https" @@ -208,7 +392,7 @@ def test_http_request_via_proxy(self): """ Tests that requests can be made through a proxy. """ - self._do_http_request_via_proxy(auth_credentials=None) + self._do_http_request_via_proxy(ssl=False, auth_credentials=None) @patch.dict( os.environ, @@ -218,12 +402,28 @@ def test_http_request_via_proxy_with_auth(self): """ Tests that authenticated requests can be made through a proxy. """ - self._do_http_request_via_proxy(auth_credentials="bob:pinkponies") + self._do_http_request_via_proxy(ssl=False, auth_credentials=b"bob:pinkponies") + + @patch.dict( + os.environ, {"http_proxy": "https://proxy.com:8888", "no_proxy": "unused.com"} + ) + def test_http_request_via_https_proxy(self): + self._do_http_request_via_proxy(ssl=True, auth_credentials=None) + + @patch.dict( + os.environ, + { + "http_proxy": "https://bob:pinkponies@proxy.com:8888", + "no_proxy": "unused.com", + }, + ) + def test_http_request_via_https_proxy_with_auth(self): + self._do_http_request_via_proxy(ssl=True, auth_credentials=b"bob:pinkponies") @patch.dict(os.environ, {"https_proxy": "proxy.com", "no_proxy": "unused.com"}) def test_https_request_via_proxy(self): """Tests that TLS-encrypted requests can be made through a proxy""" - self._do_https_request_via_proxy(auth_credentials=None) + self._do_https_request_via_proxy(ssl=False, auth_credentials=None) @patch.dict( os.environ, @@ -231,16 +431,40 @@ def test_https_request_via_proxy(self): ) def test_https_request_via_proxy_with_auth(self): """Tests that authenticated, TLS-encrypted requests can be made through a proxy""" - self._do_https_request_via_proxy(auth_credentials="bob:pinkponies") + self._do_https_request_via_proxy(ssl=False, auth_credentials=b"bob:pinkponies") + + @patch.dict( + os.environ, {"https_proxy": "https://proxy.com", "no_proxy": "unused.com"} + ) + def test_https_request_via_https_proxy(self): + """Tests that TLS-encrypted requests can be made through a proxy""" + self._do_https_request_via_proxy(ssl=True, auth_credentials=None) + + @patch.dict( + os.environ, + {"https_proxy": "https://bob:pinkponies@proxy.com", "no_proxy": "unused.com"}, + ) + def test_https_request_via_https_proxy_with_auth(self): + """Tests that authenticated, TLS-encrypted requests can be made through a proxy""" + self._do_https_request_via_proxy(ssl=True, auth_credentials=b"bob:pinkponies") def _do_http_request_via_proxy( self, - auth_credentials: Optional[str] = None, + ssl: bool = False, + auth_credentials: Optional[bytes] = None, ): + """Send a http request via an agent and check that it is correctly received at + the proxy. The proxy can use either http or https. + Args: + ssl: True if we expect the request to connect via https to proxy + auth_credentials: credentials to authenticate at proxy """ - Tests that requests can be made through a proxy. - """ - agent = ProxyAgent(self.reactor, use_proxy=True) + if ssl: + agent = ProxyAgent( + self.reactor, use_proxy=True, contextFactory=get_test_https_policy() + ) + else: + agent = ProxyAgent(self.reactor, use_proxy=True) self.reactor.lookups["proxy.com"] = "1.2.3.5" d = agent.request(b"GET", b"http://test.com") @@ -254,7 +478,11 @@ def _do_http_request_via_proxy( # make a test server, and wire up the client http_server = self._make_connection( - client_factory, _get_test_protocol_factory() + client_factory, + _get_test_protocol_factory(), + ssl=ssl, + tls_sanlist=[b"DNS:proxy.com"] if ssl else None, + expected_sni=b"proxy.com" if ssl else None, ) # the FakeTransport is async, so we need to pump the reactor @@ -272,7 +500,7 @@ def _do_http_request_via_proxy( if auth_credentials is not None: # Compute the correct header value for Proxy-Authorization - encoded_credentials = base64.b64encode(b"bob:pinkponies") + encoded_credentials = base64.b64encode(auth_credentials) expected_header_value = b"Basic " + encoded_credentials # Validate the header's value @@ -295,8 +523,15 @@ def _do_http_request_via_proxy( def _do_https_request_via_proxy( self, - auth_credentials: Optional[str] = None, + ssl: bool = False, + auth_credentials: Optional[bytes] = None, ): + """Send a https request via an agent and check that it is correctly received at + the proxy and client. The proxy can use either http or https. + Args: + ssl: True if we expect the request to connect via https to proxy + auth_credentials: credentials to authenticate at proxy + """ agent = ProxyAgent( self.reactor, contextFactory=get_test_https_policy(), @@ -313,18 +548,15 @@ def _do_https_request_via_proxy( self.assertEqual(host, "1.2.3.5") self.assertEqual(port, 1080) - # make a test HTTP server, and wire up the client + # make a test server to act as the proxy, and wire up the client proxy_server = self._make_connection( - client_factory, _get_test_protocol_factory() + client_factory, + _get_test_protocol_factory(), + ssl=ssl, + tls_sanlist=[b"DNS:proxy.com"] if ssl else None, + expected_sni=b"proxy.com" if ssl else None, ) - - # fish the transports back out so that we can do the old switcheroo - s2c_transport = proxy_server.transport - client_protocol = s2c_transport.other - c2s_transport = client_protocol.transport - - # the FakeTransport is async, so we need to pump the reactor - self.reactor.advance(0) + assert isinstance(proxy_server, HTTPChannel) # now there should be a pending CONNECT request self.assertEqual(len(proxy_server.requests), 1) @@ -340,7 +572,7 @@ def _do_https_request_via_proxy( if auth_credentials is not None: # Compute the correct header value for Proxy-Authorization - encoded_credentials = base64.b64encode(b"bob:pinkponies") + encoded_credentials = base64.b64encode(auth_credentials) expected_header_value = b"Basic " + encoded_credentials # Validate the header's value @@ -352,31 +584,49 @@ def _do_https_request_via_proxy( # tell the proxy server not to close the connection proxy_server.persistent = True - # this just stops the http Request trying to do a chunked response - # request.setHeader(b"Content-Length", b"0") request.finish() - # now we can replace the proxy channel with a new, SSL-wrapped HTTP channel - ssl_factory = _wrap_server_factory_for_tls(_get_test_protocol_factory()) - ssl_protocol = ssl_factory.buildProtocol(None) - http_server = ssl_protocol.wrappedProtocol + # now we make another test server to act as the upstream HTTP server. + server_ssl_protocol = _wrap_server_factory_for_tls( + _get_test_protocol_factory() + ).buildProtocol(None) - ssl_protocol.makeConnection( - FakeTransport(client_protocol, self.reactor, ssl_protocol) - ) - c2s_transport.other = ssl_protocol + # Tell the HTTP server to send outgoing traffic back via the proxy's transport. + proxy_server_transport = proxy_server.transport + server_ssl_protocol.makeConnection(proxy_server_transport) + + # ... and replace the protocol on the proxy's transport with the + # TLSMemoryBIOProtocol for the test server, so that incoming traffic + # to the proxy gets sent over to the HTTP(s) server. + # + # This needs a bit of gut-wrenching, which is different depending on whether + # the proxy is using TLS or not. + # + # (an alternative, possibly more elegant, approach would be to use a custom + # Protocol to implement the proxy, which starts out by forwarding to an + # HTTPChannel (to implement the CONNECT command) and can then be switched + # into a mode where it forwards its traffic to another Protocol.) + if ssl: + assert isinstance(proxy_server_transport, TLSMemoryBIOProtocol) + proxy_server_transport.wrappedProtocol = server_ssl_protocol + else: + assert isinstance(proxy_server_transport, FakeTransport) + client_protocol = proxy_server_transport.other + c2s_transport = client_protocol.transport + c2s_transport.other = server_ssl_protocol self.reactor.advance(0) - server_name = ssl_protocol._tlsConnection.get_servername() + server_name = server_ssl_protocol._tlsConnection.get_servername() expected_sni = b"test.com" self.assertEqual( server_name, expected_sni, - "Expected SNI %s but got %s" % (expected_sni, server_name), + f"Expected SNI {expected_sni!s} but got {server_name!s}", ) # now there should be a pending request + http_server = server_ssl_protocol.wrappedProtocol self.assertEqual(len(http_server.requests), 1) request = http_server.requests[0] @@ -510,7 +760,7 @@ def test_https_request_via_uppercase_proxy_with_blacklist(self): self.assertEqual( server_name, expected_sni, - "Expected SNI %s but got %s" % (expected_sni, server_name), + f"Expected SNI {expected_sni!s} but got {server_name!s}", ) # now there should be a pending request @@ -529,16 +779,48 @@ def test_https_request_via_uppercase_proxy_with_blacklist(self): body = self.successResultOf(treq.content(resp)) self.assertEqual(body, b"result") + @patch.dict(os.environ, {"http_proxy": "proxy.com:8888"}) + def test_proxy_with_no_scheme(self): + http_proxy_agent = ProxyAgent(self.reactor, use_proxy=True) + self.assertIsInstance(http_proxy_agent.http_proxy_endpoint, HostnameEndpoint) + self.assertEqual(http_proxy_agent.http_proxy_endpoint._hostStr, "proxy.com") + self.assertEqual(http_proxy_agent.http_proxy_endpoint._port, 8888) + + @patch.dict(os.environ, {"http_proxy": "socks://proxy.com:8888"}) + def test_proxy_with_unsupported_scheme(self): + with self.assertRaises(ValueError): + ProxyAgent(self.reactor, use_proxy=True) + + @patch.dict(os.environ, {"http_proxy": "http://proxy.com:8888"}) + def test_proxy_with_http_scheme(self): + http_proxy_agent = ProxyAgent(self.reactor, use_proxy=True) + self.assertIsInstance(http_proxy_agent.http_proxy_endpoint, HostnameEndpoint) + self.assertEqual(http_proxy_agent.http_proxy_endpoint._hostStr, "proxy.com") + self.assertEqual(http_proxy_agent.http_proxy_endpoint._port, 8888) + + @patch.dict(os.environ, {"http_proxy": "https://proxy.com:8888"}) + def test_proxy_with_https_scheme(self): + https_proxy_agent = ProxyAgent(self.reactor, use_proxy=True) + self.assertIsInstance(https_proxy_agent.http_proxy_endpoint, _WrapperEndpoint) + self.assertEqual( + https_proxy_agent.http_proxy_endpoint._wrappedEndpoint._hostStr, "proxy.com" + ) + self.assertEqual( + https_proxy_agent.http_proxy_endpoint._wrappedEndpoint._port, 8888 + ) + -def _wrap_server_factory_for_tls(factory, sanlist=None): +def _wrap_server_factory_for_tls( + factory: IProtocolFactory, sanlist: Iterable[bytes] = None +) -> IProtocolFactory: """Wrap an existing Protocol Factory with a test TLSMemoryBIOFactory The resultant factory will create a TLS server which presents a certificate signed by our test CA, valid for the domains in `sanlist` Args: - factory (interfaces.IProtocolFactory): protocol factory to wrap - sanlist (iterable[bytes]): list of domains the cert should be valid for + factory: protocol factory to wrap + sanlist: list of domains the cert should be valid for Returns: interfaces.IProtocolFactory @@ -552,7 +834,7 @@ def _wrap_server_factory_for_tls(factory, sanlist=None): ) -def _get_test_protocol_factory(): +def _get_test_protocol_factory() -> IProtocolFactory: """Get a protocol Factory which will build an HTTPChannel Returns: @@ -566,6 +848,6 @@ def _get_test_protocol_factory(): return server_factory -def _log_request(request): +def _log_request(request: str): """Implements Factory.log, which is expected by Request.finish""" - logger.info("Completed request %s", request) + logger.info(f"Completed request {request}") From 5b22d5ee033f2c251bb06d2bd9e0e729df89f90f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 27 Jul 2021 18:01:04 +0100 Subject: [PATCH 26/61] Fix `oldest_pdu_in_federation_staging` (#10455) If the staging area was empty we'd report an age of 51 years, which is not true or helpful. --- changelog.d/10455.bugfix | 1 + synapse/storage/databases/main/event_federation.py | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 changelog.d/10455.bugfix diff --git a/changelog.d/10455.bugfix b/changelog.d/10455.bugfix new file mode 100644 index 000000000000..23c74a3c8912 --- /dev/null +++ b/changelog.d/10455.bugfix @@ -0,0 +1 @@ +Fix `synapse_federation_server_oldest_inbound_pdu_in_staging` Prometheus metric to not report a max age of 51 years when the queue is empty. diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index d39368c20ed6..f4a00b0736cf 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -1227,12 +1227,15 @@ def _get_stats_for_federation_staging_txn(txn): (count,) = txn.fetchone() txn.execute( - "SELECT coalesce(min(received_ts), 0) FROM federation_inbound_events_staging" + "SELECT min(received_ts) FROM federation_inbound_events_staging" ) (received_ts,) = txn.fetchone() - age = self._clock.time_msec() - received_ts + # If there is nothing in the staging area default it to 0. + age = 0 + if received_ts is not None: + age = self._clock.time_msec() - received_ts return count, age From 8e1febc6a1e909eeb4334d5572956f669ee2d290 Mon Sep 17 00:00:00 2001 From: sri-vidyut Date: Wed, 28 Jul 2021 02:29:42 +0900 Subject: [PATCH 27/61] Support underscores (in addition to hyphens) for charset detection. (#10410) --- changelog.d/10410.bugfix | 1 + synapse/rest/media/v1/preview_url_resource.py | 6 ++++-- tests/test_preview.py | 13 +++++++++++++ 3 files changed, 18 insertions(+), 2 deletions(-) create mode 100644 changelog.d/10410.bugfix diff --git a/changelog.d/10410.bugfix b/changelog.d/10410.bugfix new file mode 100644 index 000000000000..65b418fd35ee --- /dev/null +++ b/changelog.d/10410.bugfix @@ -0,0 +1 @@ +Improve character set detection in URL previews by supporting underscores (in addition to hyphens). Contributed by @srividyut. diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 172212ee3a6b..0f051d4041ef 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -58,9 +58,11 @@ logger = logging.getLogger(__name__) -_charset_match = re.compile(br'<\s*meta[^>]*charset\s*=\s*"?([a-z0-9-]+)"?', flags=re.I) +_charset_match = re.compile( + br'<\s*meta[^>]*charset\s*=\s*"?([a-z0-9_-]+)"?', flags=re.I +) _xml_encoding_match = re.compile( - br'\s*<\s*\?\s*xml[^>]*encoding="([a-z0-9-]+)"', flags=re.I + br'\s*<\s*\?\s*xml[^>]*encoding="([a-z0-9_-]+)"', flags=re.I ) _content_type_match = re.compile(r'.*; *charset="?(.*?)"?(;|$)', flags=re.I) diff --git a/tests/test_preview.py b/tests/test_preview.py index cac3d81ac14d..48e792b55b52 100644 --- a/tests/test_preview.py +++ b/tests/test_preview.py @@ -325,6 +325,19 @@ def test_meta_charset(self): ) self.assertEqual(encoding, "ascii") + def test_meta_charset_underscores(self): + """A character encoding contains underscore.""" + encoding = get_html_media_encoding( + b""" + + + + + """, + "text/html", + ) + self.assertEqual(encoding, "Shift_JIS") + def test_xml_encoding(self): """A character encoding is found via the meta tag.""" encoding = get_html_media_encoding( From 048968301278aa6ece0a694d7554b7d7d5f7e9ae Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 27 Jul 2021 14:28:23 -0500 Subject: [PATCH 28/61] Document Complement dev usage (#10483) --- CONTRIBUTING.md | 41 +++++++++++++++++++++++++++++++++++++++-- changelog.d/10483.doc | 1 + 2 files changed, 40 insertions(+), 2 deletions(-) create mode 100644 changelog.d/10483.doc diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 80ef6aa235ae..e7eef23419d5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -155,7 +155,7 @@ source ./env/bin/activate ./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder ``` -## Run the unit tests. +## Run the unit tests (Twisted trial). The unit tests run parts of Synapse, including your changes, to see if anything was broken. They are slower than the linters but will typically catch more errors. @@ -186,7 +186,7 @@ SYNAPSE_TEST_LOG_LEVEL=DEBUG trial tests ``` -## Run the integration tests. +## Run the integration tests ([Sytest](https://github.com/matrix-org/sytest)). The integration tests are a more comprehensive suite of tests. They run a full version of Synapse, including your changes, to check if @@ -203,6 +203,43 @@ $ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v This configuration should generally cover your needs. For more details about other configurations, see [documentation in the SyTest repo](https://github.com/matrix-org/sytest/blob/develop/docker/README.md). +## Run the integration tests ([Complement](https://github.com/matrix-org/complement)). + +[Complement](https://github.com/matrix-org/complement) is a suite of black box tests that can be run on any homeserver implementation. It can also be thought of as end-to-end (e2e) tests. + +It's often nice to develop on Synapse and write Complement tests at the same time. +Here is how to run your local Synapse checkout against your local Complement checkout. + +(checkout [`complement`](https://github.com/matrix-org/complement) alongside your `synapse` checkout) +```sh +COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh +``` + +To run a specific test file, you can pass the test name at the end of the command. The name passed comes from the naming structure in your Complement tests. If you're unsure of the name, you can do a full run and copy it from the test output: + +```sh +COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh TestBackfillingHistory +``` + +To run a specific test, you can specify the whole name structure: + +```sh +COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh TestBackfillingHistory/parallel/Backfilled_historical_events_resolve_with_proper_state_in_correct_order +``` + + +### Access database for homeserver after Complement test runs. + +If you're curious what the database looks like after you run some tests, here are some steps to get you going in Synapse: + + 1. In your Complement test comment out `defer deployment.Destroy(t)` and replace with `defer time.Sleep(2 * time.Hour)` to keep the homeserver running after the tests complete + 1. Start the Complement tests + 1. Find the name of the container, `docker ps -f name=complement_` (this will filter for just the Compelement related Docker containers) + 1. Access the container replacing the name with what you found in the previous step: `docker exec -it complement_1_hs_with_application_service.hs1_2 /bin/bash` + 1. Install sqlite (database driver), `apt-get update && apt-get install -y sqlite3` + 1. Then run `sqlite3` and open the database `.open /conf/homeserver.db` (this db path comes from the Synapse homeserver.yaml) + + # 9. Submit your patch. Once you're happy with your patch, it's time to prepare a Pull Request. diff --git a/changelog.d/10483.doc b/changelog.d/10483.doc new file mode 100644 index 000000000000..0f699fafddba --- /dev/null +++ b/changelog.d/10483.doc @@ -0,0 +1 @@ +Document how to use Complement while developing a new Synapse feature. From c3b037795a927ecf58fd3ab099c2a751f05de4d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=A0imon=20Brandner?= Date: Wed, 28 Jul 2021 10:05:11 +0200 Subject: [PATCH 29/61] Support for MSC2285 (hidden read receipts) (#10413) Implementation of matrix-org/matrix-doc#2285 --- changelog.d/10413.feature | 1 + synapse/api/constants.py | 4 + synapse/config/experimental.py | 3 + synapse/handlers/initial_sync.py | 7 +- synapse/handlers/receipts.py | 58 +++- synapse/replication/tcp/client.py | 5 + synapse/rest/client/v2_alpha/read_marker.py | 14 +- synapse/rest/client/v2_alpha/receipts.py | 22 +- synapse/rest/client/versions.py | 2 + tests/handlers/test_receipts.py | 294 ++++++++++++++++++++ tests/rest/client/v2_alpha/test_sync.py | 97 ++++++- 11 files changed, 495 insertions(+), 12 deletions(-) create mode 100644 changelog.d/10413.feature create mode 100644 tests/handlers/test_receipts.py diff --git a/changelog.d/10413.feature b/changelog.d/10413.feature new file mode 100644 index 000000000000..3964db7e0eae --- /dev/null +++ b/changelog.d/10413.feature @@ -0,0 +1 @@ +Support for [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-doc/pull/2285). Contributed by @SimonBrandner. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 4caafc0ac921..56e7233b9eb8 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -224,3 +224,7 @@ class HistoryVisibility: JOINED = "joined" SHARED = "shared" WORLD_READABLE = "world_readable" + + +class ReadReceiptEventFields: + MSC2285_HIDDEN = "org.matrix.msc2285.hidden" diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 040c4504d8a6..4c60ee8c2859 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -33,5 +33,8 @@ def read_config(self, config: JsonDict, **kwargs): # MSC2716 (backfill existing history) self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False) + # MSC2285 (hidden read receipts) + self.msc2285_enabled: bool = experimental.get("msc2285_enabled", False) + # MSC3244 (room version capabilities) self.msc3244_enabled: bool = experimental.get("msc3244_enabled", False) diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 5d4964076084..e1c544a3c981 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -21,6 +21,7 @@ from synapse.api.errors import SynapseError from synapse.events.validator import EventValidator from synapse.handlers.presence import format_user_presence_state +from synapse.handlers.receipts import ReceiptEventSource from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.storage.roommember import RoomsForUser from synapse.streams.config import PaginationConfig @@ -134,6 +135,8 @@ async def _snapshot_all_rooms( joined_rooms, to_key=int(now_token.receipt_key), ) + if self.hs.config.experimental.msc2285_enabled: + receipt = ReceiptEventSource.filter_out_hidden(receipt, user_id) tags_by_room = await self.store.get_tags_for_user(user_id) @@ -430,7 +433,9 @@ async def get_receipts(): room_id, to_key=now_token.receipt_key ) if not receipts: - receipts = [] + return [] + if self.hs.config.experimental.msc2285_enabled: + receipts = ReceiptEventSource.filter_out_hidden(receipts, user_id) return receipts presence, receipts, (messages, token) = await make_deferred_yieldable( diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index 283483fc2c71..b9085bbccb39 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -14,9 +14,10 @@ import logging from typing import TYPE_CHECKING, List, Optional, Tuple +from synapse.api.constants import ReadReceiptEventFields from synapse.appservice import ApplicationService from synapse.handlers._base import BaseHandler -from synapse.types import JsonDict, ReadReceipt, get_domain_from_id +from synapse.types import JsonDict, ReadReceipt, UserID, get_domain_from_id if TYPE_CHECKING: from synapse.server import HomeServer @@ -137,7 +138,7 @@ async def _handle_new_receipts(self, receipts: List[ReadReceipt]) -> bool: return True async def received_client_receipt( - self, room_id: str, receipt_type: str, user_id: str, event_id: str + self, room_id: str, receipt_type: str, user_id: str, event_id: str, hidden: bool ) -> None: """Called when a client tells us a local user has read up to the given event_id in the room. @@ -147,23 +148,67 @@ async def received_client_receipt( receipt_type=receipt_type, user_id=user_id, event_ids=[event_id], - data={"ts": int(self.clock.time_msec())}, + data={"ts": int(self.clock.time_msec()), "hidden": hidden}, ) is_new = await self._handle_new_receipts([receipt]) if not is_new: return - if self.federation_sender: + if self.federation_sender and not ( + self.hs.config.experimental.msc2285_enabled and hidden + ): await self.federation_sender.send_read_receipt(receipt) class ReceiptEventSource: def __init__(self, hs: "HomeServer"): self.store = hs.get_datastore() + self.config = hs.config + + @staticmethod + def filter_out_hidden(events: List[JsonDict], user_id: str) -> List[JsonDict]: + visible_events = [] + + # filter out hidden receipts the user shouldn't see + for event in events: + content = event.get("content", {}) + new_event = event.copy() + new_event["content"] = {} + + for event_id in content.keys(): + event_content = content.get(event_id, {}) + m_read = event_content.get("m.read", {}) + + # If m_read is missing copy over the original event_content as there is nothing to process here + if not m_read: + new_event["content"][event_id] = event_content.copy() + continue + + new_users = {} + for rr_user_id, user_rr in m_read.items(): + hidden = user_rr.get("hidden", None) + if hidden is not True or rr_user_id == user_id: + new_users[rr_user_id] = user_rr.copy() + # If hidden has a value replace hidden with the correct prefixed key + if hidden is not None: + new_users[rr_user_id].pop("hidden") + new_users[rr_user_id][ + ReadReceiptEventFields.MSC2285_HIDDEN + ] = hidden + + # Set new users unless empty + if len(new_users.keys()) > 0: + new_event["content"][event_id] = {"m.read": new_users} + + # Append new_event to visible_events unless empty + if len(new_event["content"].keys()) > 0: + visible_events.append(new_event) + + return visible_events async def get_new_events( - self, from_key: int, room_ids: List[str], **kwargs + self, from_key: int, room_ids: List[str], user: UserID, **kwargs ) -> Tuple[List[JsonDict], int]: from_key = int(from_key) to_key = self.get_current_key() @@ -175,6 +220,9 @@ async def get_new_events( room_ids, from_key=from_key, to_key=to_key ) + if self.config.experimental.msc2285_enabled: + events = ReceiptEventSource.filter_out_hidden(events, user.to_string()) + return (events, to_key) async def get_new_events_as( diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 9d4859798bd3..e09b85781489 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -393,6 +393,11 @@ async def _on_new_receipts(self, rows): # we only want to send on receipts for our own users if not self._is_mine_id(receipt.user_id): continue + if ( + receipt.data.get("hidden", False) + and self._hs.config.experimental.msc2285_enabled + ): + continue receipt_info = ReadReceipt( receipt.room_id, receipt.receipt_type, diff --git a/synapse/rest/client/v2_alpha/read_marker.py b/synapse/rest/client/v2_alpha/read_marker.py index 5988fa47e5e9..027f8b81fa93 100644 --- a/synapse/rest/client/v2_alpha/read_marker.py +++ b/synapse/rest/client/v2_alpha/read_marker.py @@ -14,6 +14,8 @@ import logging +from synapse.api.constants import ReadReceiptEventFields +from synapse.api.errors import Codes, SynapseError from synapse.http.servlet import RestServlet, parse_json_object_from_request from ._base import client_patterns @@ -37,14 +39,24 @@ async def on_POST(self, request, room_id): await self.presence_handler.bump_presence_active_time(requester.user) body = parse_json_object_from_request(request) - read_event_id = body.get("m.read", None) + hidden = body.get(ReadReceiptEventFields.MSC2285_HIDDEN, False) + + if not isinstance(hidden, bool): + raise SynapseError( + 400, + "Param %s must be a boolean, if given" + % ReadReceiptEventFields.MSC2285_HIDDEN, + Codes.BAD_JSON, + ) + if read_event_id: await self.receipts_handler.received_client_receipt( room_id, "m.read", user_id=requester.user.to_string(), event_id=read_event_id, + hidden=hidden, ) read_marker_event_id = body.get("m.fully_read", None) diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py index 8cf4aebdbec3..4b98979b477d 100644 --- a/synapse/rest/client/v2_alpha/receipts.py +++ b/synapse/rest/client/v2_alpha/receipts.py @@ -14,8 +14,9 @@ import logging -from synapse.api.errors import SynapseError -from synapse.http.servlet import RestServlet +from synapse.api.constants import ReadReceiptEventFields +from synapse.api.errors import Codes, SynapseError +from synapse.http.servlet import RestServlet, parse_json_object_from_request from ._base import client_patterns @@ -42,10 +43,25 @@ async def on_POST(self, request, room_id, receipt_type, event_id): if receipt_type != "m.read": raise SynapseError(400, "Receipt type must be 'm.read'") + body = parse_json_object_from_request(request) + hidden = body.get(ReadReceiptEventFields.MSC2285_HIDDEN, False) + + if not isinstance(hidden, bool): + raise SynapseError( + 400, + "Param %s must be a boolean, if given" + % ReadReceiptEventFields.MSC2285_HIDDEN, + Codes.BAD_JSON, + ) + await self.presence_handler.bump_presence_active_time(requester.user) await self.receipts_handler.received_client_receipt( - room_id, receipt_type, user_id=requester.user.to_string(), event_id=event_id + room_id, + receipt_type, + user_id=requester.user.to_string(), + event_id=event_id, + hidden=hidden, ) return 200, {} diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 4582c274c7ad..fa2e4e9cba48 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -82,6 +82,8 @@ def on_GET(self, request): "io.element.e2ee_forced.trusted_private": self.e2ee_forced_trusted_private, # Supports the busy presence state described in MSC3026. "org.matrix.msc3026.busy_presence": self.config.experimental.msc3026_enabled, + # Supports receiving hidden read receipts as per MSC2285 + "org.matrix.msc2285": self.config.experimental.msc2285_enabled, }, }, ) diff --git a/tests/handlers/test_receipts.py b/tests/handlers/test_receipts.py new file mode 100644 index 000000000000..93a9a084b24b --- /dev/null +++ b/tests/handlers/test_receipts.py @@ -0,0 +1,294 @@ +# Copyright 2021 Šimon Brandner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import List + +from synapse.api.constants import ReadReceiptEventFields +from synapse.types import JsonDict + +from tests import unittest + + +class ReceiptsTestCase(unittest.HomeserverTestCase): + def prepare(self, reactor, clock, hs): + self.event_source = hs.get_event_sources().sources["receipt"] + + # In the first param of _test_filters_hidden we use "hidden" instead of + # ReadReceiptEventFields.MSC2285_HIDDEN. We do this because we're mocking + # the data from the database which doesn't use the prefix + + def test_filters_out_hidden_receipt(self): + self._test_filters_hidden( + [ + { + "content": { + "$1435641916114394fHBLK:matrix.org": { + "m.read": { + "@rikj:jki.re": { + "ts": 1436451550453, + "hidden": True, + } + } + } + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + [], + ) + + def test_does_not_filter_out_our_hidden_receipt(self): + self._test_filters_hidden( + [ + { + "content": { + "$1435641916hfgh4394fHBLK:matrix.org": { + "m.read": { + "@me:server.org": { + "ts": 1436451550453, + "hidden": True, + }, + } + } + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + [ + { + "content": { + "$1435641916hfgh4394fHBLK:matrix.org": { + "m.read": { + "@me:server.org": { + "ts": 1436451550453, + ReadReceiptEventFields.MSC2285_HIDDEN: True, + }, + } + } + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + ) + + def test_filters_out_hidden_receipt_and_ignores_rest(self): + self._test_filters_hidden( + [ + { + "content": { + "$1dgdgrd5641916114394fHBLK:matrix.org": { + "m.read": { + "@rikj:jki.re": { + "ts": 1436451550453, + "hidden": True, + }, + "@user:jki.re": { + "ts": 1436451550453, + }, + } + } + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + [ + { + "content": { + "$1dgdgrd5641916114394fHBLK:matrix.org": { + "m.read": { + "@user:jki.re": { + "ts": 1436451550453, + } + } + } + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + ) + + def test_filters_out_event_with_only_hidden_receipts_and_ignores_the_rest(self): + self._test_filters_hidden( + [ + { + "content": { + "$14356419edgd14394fHBLK:matrix.org": { + "m.read": { + "@rikj:jki.re": { + "ts": 1436451550453, + "hidden": True, + }, + } + }, + "$1435641916114394fHBLK:matrix.org": { + "m.read": { + "@user:jki.re": { + "ts": 1436451550453, + } + } + }, + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + [ + { + "content": { + "$1435641916114394fHBLK:matrix.org": { + "m.read": { + "@user:jki.re": { + "ts": 1436451550453, + } + } + } + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + ) + + def test_handles_missing_content_of_m_read(self): + self._test_filters_hidden( + [ + { + "content": { + "$14356419ggffg114394fHBLK:matrix.org": {"m.read": {}}, + "$1435641916114394fHBLK:matrix.org": { + "m.read": { + "@user:jki.re": { + "ts": 1436451550453, + } + } + }, + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + [ + { + "content": { + "$14356419ggffg114394fHBLK:matrix.org": {"m.read": {}}, + "$1435641916114394fHBLK:matrix.org": { + "m.read": { + "@user:jki.re": { + "ts": 1436451550453, + } + } + }, + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + ) + + def test_handles_empty_event(self): + self._test_filters_hidden( + [ + { + "content": { + "$143564gdfg6114394fHBLK:matrix.org": {}, + "$1435641916114394fHBLK:matrix.org": { + "m.read": { + "@user:jki.re": { + "ts": 1436451550453, + } + } + }, + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + [ + { + "content": { + "$143564gdfg6114394fHBLK:matrix.org": {}, + "$1435641916114394fHBLK:matrix.org": { + "m.read": { + "@user:jki.re": { + "ts": 1436451550453, + } + } + }, + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + ) + + def test_filters_out_receipt_event_with_only_hidden_receipt_and_ignores_rest(self): + self._test_filters_hidden( + [ + { + "content": { + "$14356419edgd14394fHBLK:matrix.org": { + "m.read": { + "@rikj:jki.re": { + "ts": 1436451550453, + "hidden": True, + }, + } + }, + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + }, + { + "content": { + "$1435641916114394fHBLK:matrix.org": { + "m.read": { + "@user:jki.re": { + "ts": 1436451550453, + } + } + }, + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + }, + ], + [ + { + "content": { + "$1435641916114394fHBLK:matrix.org": { + "m.read": { + "@user:jki.re": { + "ts": 1436451550453, + } + } + } + }, + "room_id": "!jEsUZKDJdhlrceRyVU:example.org", + "type": "m.receipt", + } + ], + ) + + def _test_filters_hidden( + self, events: List[JsonDict], expected_output: List[JsonDict] + ): + """Tests that the _filter_out_hidden returns the expected output""" + filtered_events = self.event_source.filter_out_hidden(events, "@me:server.org") + self.assertEquals(filtered_events, expected_output) diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py index cdca3a3e2300..f6ae9ae18110 100644 --- a/tests/rest/client/v2_alpha/test_sync.py +++ b/tests/rest/client/v2_alpha/test_sync.py @@ -15,9 +15,14 @@ import json import synapse.rest.admin -from synapse.api.constants import EventContentFields, EventTypes, RelationTypes +from synapse.api.constants import ( + EventContentFields, + EventTypes, + ReadReceiptEventFields, + RelationTypes, +) from synapse.rest.client.v1 import login, room -from synapse.rest.client.v2_alpha import knock, read_marker, sync +from synapse.rest.client.v2_alpha import knock, read_marker, receipts, sync from tests import unittest from tests.federation.transport.test_knocking import ( @@ -368,6 +373,76 @@ def test_knock_room_state(self): ) +class ReadReceiptsTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + receipts.register_servlets, + room.register_servlets, + sync.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.url = "/sync?since=%s" + self.next_batch = "s0" + + # Register the first user + self.user_id = self.register_user("kermit", "monkey") + self.tok = self.login("kermit", "monkey") + + # Create the room + self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) + + # Register the second user + self.user2 = self.register_user("kermit2", "monkey") + self.tok2 = self.login("kermit2", "monkey") + + # Join the second user + self.helper.join(room=self.room_id, user=self.user2, tok=self.tok2) + + @override_config({"experimental_features": {"msc2285_enabled": True}}) + def test_hidden_read_receipts(self): + # Send a message as the first user + res = self.helper.send(self.room_id, body="hello", tok=self.tok) + + # Send a read receipt to tell the server the first user's message was read + body = json.dumps({ReadReceiptEventFields.MSC2285_HIDDEN: True}).encode("utf8") + channel = self.make_request( + "POST", + "/rooms/%s/receipt/m.read/%s" % (self.room_id, res["event_id"]), + body, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 200) + + # Test that the first user can't see the other user's hidden read receipt + self.assertEqual(self._get_read_receipt(), None) + + def _get_read_receipt(self): + """Syncs and returns the read receipt.""" + + # Checks if event is a read receipt + def is_read_receipt(event): + return event["type"] == "m.receipt" + + # Sync + channel = self.make_request( + "GET", + self.url % self.next_batch, + access_token=self.tok, + ) + self.assertEqual(channel.code, 200) + + # Store the next batch for the next request. + self.next_batch = channel.json_body["next_batch"] + + # Return the read receipt + ephemeral_events = channel.json_body["rooms"]["join"][self.room_id][ + "ephemeral" + ]["events"] + return next(filter(is_read_receipt, ephemeral_events), None) + + class UnreadMessagesTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets, @@ -375,6 +450,7 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): read_marker.register_servlets, room.register_servlets, sync.register_servlets, + receipts.register_servlets, ] def prepare(self, reactor, clock, hs): @@ -448,6 +524,23 @@ def test_unread_counts(self): # Check that the unread counter is back to 0. self._check_unread_count(0) + # Check that hidden read receipts don't break unread counts + res = self.helper.send(self.room_id, "hello", tok=self.tok2) + self._check_unread_count(1) + + # Send a read receipt to tell the server we've read the latest event. + body = json.dumps({ReadReceiptEventFields.MSC2285_HIDDEN: True}).encode("utf8") + channel = self.make_request( + "POST", + "/rooms/%s/receipt/m.read/%s" % (self.room_id, res["event_id"]), + body, + access_token=self.tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Check that the unread counter is back to 0. + self._check_unread_count(0) + # Check that room name changes increase the unread counter. self.helper.send_state( self.room_id, From 752fe0cd9869d25bb3e02a539aba67e98afea514 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 28 Jul 2021 07:03:01 -0400 Subject: [PATCH 30/61] Restricted rooms (MSC3083) should not have their allow key redacted. (#10489) --- changelog.d/10489.feature | 1 + synapse/events/utils.py | 2 ++ tests/events/test_utils.py | 43 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 46 insertions(+) create mode 100644 changelog.d/10489.feature diff --git a/changelog.d/10489.feature b/changelog.d/10489.feature new file mode 100644 index 000000000000..df8bb51167f7 --- /dev/null +++ b/changelog.d/10489.feature @@ -0,0 +1 @@ +Update support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083) to consider changes in the MSC around which servers can issue join events. diff --git a/synapse/events/utils.py b/synapse/events/utils.py index ec96999e4e77..f4da9e0923c2 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -109,6 +109,8 @@ def add_fields(*fields): add_fields("creator") elif event_type == EventTypes.JoinRules: add_fields("join_rule") + if room_version.msc3083_join_rules: + add_fields("allow") elif event_type == EventTypes.PowerLevels: add_fields( "users", diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 9274ce4c396d..e2a5fc018cdf 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -301,6 +301,49 @@ def test_redacts(self): room_version=RoomVersions.MSC2176, ) + def test_join_rules(self): + """Join rules events have changed behavior starting with MSC3083.""" + self.run_test( + { + "type": "m.room.join_rules", + "event_id": "$test:domain", + "content": { + "join_rule": "invite", + "allow": [], + "other_key": "stripped", + }, + }, + { + "type": "m.room.join_rules", + "event_id": "$test:domain", + "content": {"join_rule": "invite"}, + "signatures": {}, + "unsigned": {}, + }, + ) + + # After MSC3083, alias events have no special behavior. + self.run_test( + { + "type": "m.room.join_rules", + "content": { + "join_rule": "invite", + "allow": [], + "other_key": "stripped", + }, + }, + { + "type": "m.room.join_rules", + "content": { + "join_rule": "invite", + "allow": [], + }, + "signatures": {}, + "unsigned": {}, + }, + room_version=RoomVersions.MSC3083, + ) + class SerializeEventTestCase(unittest.TestCase): def serialize(self, ev, fields): From 9643dfde6ac4568682c1cc187fef206debfedbd7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 28 Jul 2021 12:25:12 +0100 Subject: [PATCH 31/61] improve typing annotations in CachedCall (#10450) tighten up some of the typing in CachedCall, which is going to be needed when Twisted 21.7 brings better typing on Deferred. --- changelog.d/10450.misc | 1 + synapse/util/caches/cached_call.py | 27 +++++++++++++++++---------- 2 files changed, 18 insertions(+), 10 deletions(-) create mode 100644 changelog.d/10450.misc diff --git a/changelog.d/10450.misc b/changelog.d/10450.misc new file mode 100644 index 000000000000..aa646f0841c7 --- /dev/null +++ b/changelog.d/10450.misc @@ -0,0 +1 @@ + Update type annotations to work with forthcoming Twisted 21.7.0 release. diff --git a/synapse/util/caches/cached_call.py b/synapse/util/caches/cached_call.py index 891bee0b33ae..e58dd91eda7b 100644 --- a/synapse/util/caches/cached_call.py +++ b/synapse/util/caches/cached_call.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import enum from typing import Awaitable, Callable, Generic, Optional, TypeVar, Union from twisted.internet.defer import Deferred @@ -22,6 +22,10 @@ TV = TypeVar("TV") +class _Sentinel(enum.Enum): + sentinel = object() + + class CachedCall(Generic[TV]): """A wrapper for asynchronous calls whose results should be shared @@ -65,7 +69,7 @@ def __init__(self, f: Callable[[], Awaitable[TV]]): """ self._callable: Optional[Callable[[], Awaitable[TV]]] = f self._deferred: Optional[Deferred] = None - self._result: Union[None, Failure, TV] = None + self._result: Union[_Sentinel, TV, Failure] = _Sentinel.sentinel async def get(self) -> TV: """Kick off the call if necessary, and return the result""" @@ -78,8 +82,9 @@ async def get(self) -> TV: self._callable = None # once the deferred completes, store the result. We cannot simply leave the - # result in the deferred, since if it's a Failure, GCing the deferred - # would then log a critical error about unhandled Failures. + # result in the deferred, since `awaiting` a deferred destroys its result. + # (Also, if it's a Failure, GCing the deferred would log a critical error + # about unhandled Failures) def got_result(r): self._result = r @@ -92,13 +97,15 @@ def got_result(r): # and any eventual exception may not be reported. # we can now await the deferred, and once it completes, return the result. - await make_deferred_yieldable(self._deferred) + if isinstance(self._result, _Sentinel): + await make_deferred_yieldable(self._deferred) + assert not isinstance(self._result, _Sentinel) + + if isinstance(self._result, Failure): + self._result.raiseException() + raise AssertionError("unexpected return from Failure.raiseException") - # I *think* this is the easiest way to correctly raise a Failure without having - # to gut-wrench into the implementation of Deferred. - d = Deferred() - d.callback(self._result) - return await d + return self._result class RetryOnExceptionCachedCall(Generic[TV]): From d9cb658c78bdb676762488d08ba44998307c781a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 28 Jul 2021 13:04:11 +0100 Subject: [PATCH 32/61] Fix up type hints for Twisted 21.7 (#10490) Mostly this involves decorating a few Deferred declarations with extra type hints. We wrap the types in quotes to avoid runtime errors when running against older versions of Twisted that don't have generics on Deferred. --- changelog.d/10490.misc | 1 + synapse/http/client.py | 4 ++-- synapse/replication/tcp/client.py | 2 +- synapse/util/async_helpers.py | 16 ++++++++-------- synapse/util/caches/deferred_cache.py | 15 ++++++++++++--- synapse/util/caches/descriptors.py | 2 +- 6 files changed, 25 insertions(+), 15 deletions(-) create mode 100644 changelog.d/10490.misc diff --git a/changelog.d/10490.misc b/changelog.d/10490.misc new file mode 100644 index 000000000000..630c31adaeb9 --- /dev/null +++ b/changelog.d/10490.misc @@ -0,0 +1 @@ +Fix up type annotations to work with Twisted 21.7. diff --git a/synapse/http/client.py b/synapse/http/client.py index 2ac76b15c2d2..c2ea51ee162b 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -847,7 +847,7 @@ def connectionLost(self, reason: Failure = connectionDone) -> None: def read_body_with_max_size( response: IResponse, stream: ByteWriteable, max_size: Optional[int] -) -> defer.Deferred: +) -> "defer.Deferred[int]": """ Read a HTTP response body to a file-object. Optionally enforcing a maximum file size. @@ -862,7 +862,7 @@ def read_body_with_max_size( Returns: A Deferred which resolves to the length of the read body. """ - d = defer.Deferred() + d: "defer.Deferred[int]" = defer.Deferred() # If the Content-Length header gives a size larger than the maximum allowed # size, do not bother downloading the body. diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index e09b85781489..3fd281171308 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -285,7 +285,7 @@ async def wait_for_stream_position( # Create a new deferred that times out after N seconds, as we don't want # to wedge here forever. - deferred = Deferred() + deferred: "Deferred[None]" = Deferred() deferred = timeout_deferred( deferred, _WAIT_FOR_REPLICATION_TIMEOUT_SECONDS, self._reactor ) diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 014db1355b13..912cf85f89be 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -49,6 +49,8 @@ logger = logging.getLogger(__name__) +_T = TypeVar("_T") + class ObservableDeferred: """Wraps a deferred object so that we can add observer deferreds. These @@ -121,7 +123,7 @@ def observe(self) -> defer.Deferred: effect the underlying deferred. """ if not self._result: - d = defer.Deferred() + d: "defer.Deferred[Any]" = defer.Deferred() def remove(r): self._observers.discard(d) @@ -415,7 +417,7 @@ def __init__(self): self.key_to_current_writer: Dict[str, defer.Deferred] = {} async def read(self, key: str) -> ContextManager: - new_defer = defer.Deferred() + new_defer: "defer.Deferred[None]" = defer.Deferred() curr_readers = self.key_to_current_readers.setdefault(key, set()) curr_writer = self.key_to_current_writer.get(key, None) @@ -438,7 +440,7 @@ def _ctx_manager(): return _ctx_manager() async def write(self, key: str) -> ContextManager: - new_defer = defer.Deferred() + new_defer: "defer.Deferred[None]" = defer.Deferred() curr_readers = self.key_to_current_readers.get(key, set()) curr_writer = self.key_to_current_writer.get(key, None) @@ -471,10 +473,8 @@ def _ctx_manager(): def timeout_deferred( - deferred: defer.Deferred, - timeout: float, - reactor: IReactorTime, -) -> defer.Deferred: + deferred: "defer.Deferred[_T]", timeout: float, reactor: IReactorTime +) -> "defer.Deferred[_T]": """The in built twisted `Deferred.addTimeout` fails to time out deferreds that have a canceller that throws exceptions. This method creates a new deferred that wraps and times out the given deferred, correctly handling @@ -497,7 +497,7 @@ def timeout_deferred( Returns: A new Deferred, which will errback with defer.TimeoutError on timeout. """ - new_d = defer.Deferred() + new_d: "defer.Deferred[_T]" = defer.Deferred() timed_out = [False] diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index 8c6fafc6770a..b6456392cd92 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -16,7 +16,16 @@ import enum import threading -from typing import Callable, Generic, Iterable, MutableMapping, Optional, TypeVar, Union +from typing import ( + Callable, + Generic, + Iterable, + MutableMapping, + Optional, + TypeVar, + Union, + cast, +) from prometheus_client import Gauge @@ -166,7 +175,7 @@ def get_immediate( def set( self, key: KT, - value: defer.Deferred, + value: "defer.Deferred[VT]", callback: Optional[Callable[[], None]] = None, ) -> defer.Deferred: """Adds a new entry to the cache (or updates an existing one). @@ -214,7 +223,7 @@ def set( if value.called: result = value.result if not isinstance(result, failure.Failure): - self.cache.set(key, result, callbacks) + self.cache.set(key, cast(VT, result), callbacks) return value # otherwise, we'll add an entry to the _pending_deferred_cache for now, diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 1e8e6b1d01c6..1ca31e41ac61 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -413,7 +413,7 @@ def arg_to_cache_key(arg): # relevant result for that key. deferreds_map = {} for arg in missing: - deferred = defer.Deferred() + deferred: "defer.Deferred[Any]" = defer.Deferred() deferreds_map[arg] = deferred key = arg_to_cache_key(arg) cache.set(key, deferred, callback=invalidate_callback) From d0b294ad974c05621426369a00be6bf05c4af997 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 28 Jul 2021 10:46:37 -0500 Subject: [PATCH 33/61] Make historical events discoverable from backfill for servers without any scrollback history (MSC2716) (#10245) * Make historical messages available to federated servers Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716 Follow-up to https://github.com/matrix-org/synapse/pull/9247 * Debug message not available on federation * Add base starting insertion point when no chunk ID is provided * Fix messages from multiple senders in historical chunk Follow-up to https://github.com/matrix-org/synapse/pull/9247 Part of MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716 --- Previously, Synapse would throw a 403, `Cannot force another user to join.`, because we were trying to use `?user_id` from a single virtual user which did not match with messages from other users in the chunk. * Remove debug lines * Messing with selecting insertion event extremeties * Move db schema change to new version * Add more better comments * Make a fake requester with just what we need See https://github.com/matrix-org/synapse/pull/10276#discussion_r660999080 * Store insertion events in table * Make base insertion event float off on its own See https://github.com/matrix-org/synapse/pull/10250#issuecomment-875711889 Conflicts: synapse/rest/client/v1/room.py * Validate that the app service can actually control the given user See https://github.com/matrix-org/synapse/pull/10276#issuecomment-876316455 Conflicts: synapse/rest/client/v1/room.py * Add some better comments on what we're trying to check for * Continue debugging * Share validation logic * Add inserted historical messages to /backfill response * Remove debug sql queries * Some marker event implemntation trials * Clean up PR * Rename insertion_event_id to just event_id * Add some better sql comments * More accurate description * Add changelog * Make it clear what MSC the change is part of * Add more detail on which insertion event came through * Address review and improve sql queries * Only use event_id as unique constraint * Fix test case where insertion event is already in the normal DAG * Remove debug changes * Switch to chunk events so we can auth via power_levels Previously, we were using `content.chunk_id` to connect one chunk to another. But these events can be from any `sender` and we can't tell who should be able to send historical events. We know we only want the application service to do it but these events have the sender of a real historical message, not the application service user ID as the sender. Other federated homeservers also have no indicator which senders are an application service on the originating homeserver. So we want to auth all of the MSC2716 events via power_levels and have them be sent by the application service with proper PL levels in the room. * Switch to chunk events for federation * Add unstable room version to support new historical PL * Fix federated events being rejected for no state_groups Add fix from https://github.com/matrix-org/synapse/pull/10439 until it merges. * Only connect base insertion event to prev_event_ids Per discussion with @erikjohnston, https://matrix.to/#/!UytJQHLQYfvYWsGrGY:jki.re/$12bTUiObDFdHLAYtT7E-BvYRp3k_xv8w0dUQHibasJk?via=jki.re&via=matrix.org * Make it possible to get the room_version with txn * Allow but ignore historical events in unsupported room version See https://github.com/matrix-org/synapse/pull/10245#discussion_r675592489 We can't reject historical events on unsupported room versions because homeservers without knowledge of MSC2716 or the new room version don't reject historical events either. Since we can't rely on the auth check here to stop historical events on unsupported room versions, I've added some additional checks in the processing/persisting code (`synapse/storage/databases/main/events.py` -> `_handle_insertion_event` and `_handle_chunk_event`). I've had to do some refactoring so there is method to fetch the room version by `txn`. * Move to unique index syntax See https://github.com/matrix-org/synapse/pull/10245#discussion_r675638509 * High-level document how the insertion->chunk lookup works * Remove create_event fallback for room_versions See https://github.com/matrix-org/synapse/pull/10245/files#r677641879 * Use updated method name --- changelog.d/10245.feature | 1 + synapse/api/constants.py | 3 - synapse/api/room_versions.py | 27 ++++++ synapse/event_auth.py | 38 ++++++++ synapse/events/utils.py | 3 + synapse/handlers/federation.py | 6 +- synapse/handlers/room.py | 1 + synapse/rest/client/v1/room.py | 7 +- .../databases/main/event_federation.py | 88 ++++++++++++++++-- synapse/storage/databases/main/events.py | 91 +++++++++++++++++++ synapse/storage/databases/main/state.py | 50 +++++++--- .../delta/61/01insertion_event_lookups.sql | 49 ++++++++++ 12 files changed, 338 insertions(+), 26 deletions(-) create mode 100644 changelog.d/10245.feature create mode 100644 synapse/storage/schema/main/delta/61/01insertion_event_lookups.sql diff --git a/changelog.d/10245.feature b/changelog.d/10245.feature new file mode 100644 index 000000000000..b3c48cc2cc25 --- /dev/null +++ b/changelog.d/10245.feature @@ -0,0 +1 @@ +Make historical events discoverable from backfill for servers without any scrollback history (part of MSC2716). diff --git a/synapse/api/constants.py b/synapse/api/constants.py index a40d6d796134..a986fdb47a6e 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -206,9 +206,6 @@ class EventContentFields: MSC2716_CHUNK_ID = "org.matrix.msc2716.chunk_id" # For "marker" events MSC2716_MARKER_INSERTION = "org.matrix.msc2716.marker.insertion" - MSC2716_MARKER_INSERTION_PREV_EVENTS = ( - "org.matrix.msc2716.marker.insertion_prev_events" - ) class RoomTypes: diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 697319e52dc5..bc678efe4973 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -73,6 +73,9 @@ class RoomVersion: # MSC2403: Allows join_rules to be set to 'knock', changes auth rules to allow sending # m.room.membership event with membership 'knock'. msc2403_knocking = attr.ib(type=bool) + # MSC2716: Adds m.room.power_levels -> content.historical field to control + # whether "insertion", "chunk", "marker" events can be sent + msc2716_historical = attr.ib(type=bool) class RoomVersions: @@ -88,6 +91,7 @@ class RoomVersions: msc2176_redaction_rules=False, msc3083_join_rules=False, msc2403_knocking=False, + msc2716_historical=False, ) V2 = RoomVersion( "2", @@ -101,6 +105,7 @@ class RoomVersions: msc2176_redaction_rules=False, msc3083_join_rules=False, msc2403_knocking=False, + msc2716_historical=False, ) V3 = RoomVersion( "3", @@ -114,6 +119,7 @@ class RoomVersions: msc2176_redaction_rules=False, msc3083_join_rules=False, msc2403_knocking=False, + msc2716_historical=False, ) V4 = RoomVersion( "4", @@ -127,6 +133,7 @@ class RoomVersions: msc2176_redaction_rules=False, msc3083_join_rules=False, msc2403_knocking=False, + msc2716_historical=False, ) V5 = RoomVersion( "5", @@ -140,6 +147,7 @@ class RoomVersions: msc2176_redaction_rules=False, msc3083_join_rules=False, msc2403_knocking=False, + msc2716_historical=False, ) V6 = RoomVersion( "6", @@ -153,6 +161,7 @@ class RoomVersions: msc2176_redaction_rules=False, msc3083_join_rules=False, msc2403_knocking=False, + msc2716_historical=False, ) MSC2176 = RoomVersion( "org.matrix.msc2176", @@ -166,6 +175,7 @@ class RoomVersions: msc2176_redaction_rules=True, msc3083_join_rules=False, msc2403_knocking=False, + msc2716_historical=False, ) MSC3083 = RoomVersion( "org.matrix.msc3083.v2", @@ -179,6 +189,7 @@ class RoomVersions: msc2176_redaction_rules=False, msc3083_join_rules=True, msc2403_knocking=False, + msc2716_historical=False, ) V7 = RoomVersion( "7", @@ -192,6 +203,21 @@ class RoomVersions: msc2176_redaction_rules=False, msc3083_join_rules=False, msc2403_knocking=True, + msc2716_historical=False, + ) + MSC2716 = RoomVersion( + "org.matrix.msc2716", + RoomDisposition.STABLE, + EventFormatVersions.V3, + StateResolutionVersions.V2, + enforce_key_validity=True, + special_case_aliases_auth=False, + strict_canonicaljson=True, + limit_notifications_power_levels=True, + msc2176_redaction_rules=False, + msc3083_join_rules=False, + msc2403_knocking=True, + msc2716_historical=True, ) @@ -207,6 +233,7 @@ class RoomVersions: RoomVersions.MSC2176, RoomVersions.MSC3083, RoomVersions.V7, + RoomVersions.MSC2716, ) } diff --git a/synapse/event_auth.py b/synapse/event_auth.py index cc92d3547792..0fa7ffc99fd4 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -205,6 +205,13 @@ def check( if event.type == EventTypes.Redaction: check_redaction(room_version_obj, event, auth_events) + if ( + event.type == EventTypes.MSC2716_INSERTION + or event.type == EventTypes.MSC2716_CHUNK + or event.type == EventTypes.MSC2716_MARKER + ): + check_historical(room_version_obj, event, auth_events) + logger.debug("Allowing! %s", event) @@ -539,6 +546,37 @@ def check_redaction( raise AuthError(403, "You don't have permission to redact events") +def check_historical( + room_version_obj: RoomVersion, + event: EventBase, + auth_events: StateMap[EventBase], +) -> None: + """Check whether the event sender is allowed to send historical related + events like "insertion", "chunk", and "marker". + + Returns: + None + + Raises: + AuthError if the event sender is not allowed to send historical related events + ("insertion", "chunk", and "marker"). + """ + # Ignore the auth checks in room versions that do not support historical + # events + if not room_version_obj.msc2716_historical: + return + + user_level = get_user_power_level(event.user_id, auth_events) + + historical_level = get_named_level(auth_events, "historical", 100) + + if user_level < historical_level: + raise AuthError( + 403, + 'You don\'t have permission to send send historical related events ("insertion", "chunk", and "marker")', + ) + + def _check_power_levels( room_version_obj: RoomVersion, event: EventBase, diff --git a/synapse/events/utils.py b/synapse/events/utils.py index f4da9e0923c2..a0c07f62f44b 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -126,6 +126,9 @@ def add_fields(*fields): if room_version.msc2176_redaction_rules: add_fields("invite") + if room_version.msc2716_historical: + add_fields("historical") + elif event_type == EventTypes.Aliases and room_version.special_case_aliases_auth: add_fields("aliases") elif event_type == EventTypes.RoomHistoryVisibility: diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index aba095d2e120..8197b60b7673 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2748,9 +2748,11 @@ async def _update_auth_events_and_context_for_auth( event.event_id, e.event_id, ) - context = await self.state_handler.compute_event_context(e) + missing_auth_event_context = ( + await self.state_handler.compute_event_context(e) + ) await self._auth_and_persist_event( - origin, e, context, auth_events=auth + origin, e, missing_auth_event_context, auth_events=auth ) if e.event_id in event_auth_events: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 370561e54935..b33fe09f77eb 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -951,6 +951,7 @@ async def send(etype: str, content: JsonDict, **kwargs) -> int: "kick": 50, "redact": 50, "invite": 50, + "historical": 100, } if config["original_invitees_have_ops"]: diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 25ba52c624b6..502a91758813 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -504,7 +504,6 @@ async def on_POST(self, request, room_id): events_to_create = body["events"] - prev_event_ids = prev_events_from_query inherited_depth = await self._inherit_depth_from_prev_ids( prev_events_from_query ) @@ -516,6 +515,10 @@ async def on_POST(self, request, room_id): chunk_id_to_connect_to = chunk_id_from_query base_insertion_event = None if chunk_id_from_query: + # All but the first base insertion event should point at a fake + # event, which causes the HS to ask for the state at the start of + # the chunk later. + prev_event_ids = [fake_prev_event_id] # TODO: Verify the chunk_id_from_query corresponds to an insertion event pass # Otherwise, create an insertion event to act as a starting point. @@ -526,6 +529,8 @@ async def on_POST(self, request, room_id): # an insertion event), in which case we just create a new insertion event # that can then get pointed to by a "marker" event later. else: + prev_event_ids = prev_events_from_query + base_insertion_event_dict = self._create_insertion_event_dict( sender=requester.user.to_string(), room_id=room_id, diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index f4a00b0736cf..547e43ab9844 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -936,15 +936,46 @@ def _get_backfill_events(self, txn, room_id, event_list, limit): # We want to make sure that we do a breadth-first, "depth" ordered # search. - query = ( - "SELECT depth, prev_event_id FROM event_edges" - " INNER JOIN events" - " ON prev_event_id = events.event_id" - " WHERE event_edges.event_id = ?" - " AND event_edges.is_state = ?" - " LIMIT ?" - ) + # Look for the prev_event_id connected to the given event_id + query = """ + SELECT depth, prev_event_id FROM event_edges + /* Get the depth of the prev_event_id from the events table */ + INNER JOIN events + ON prev_event_id = events.event_id + /* Find an event which matches the given event_id */ + WHERE event_edges.event_id = ? + AND event_edges.is_state = ? + LIMIT ? + """ + + # Look for the "insertion" events connected to the given event_id + connected_insertion_event_query = """ + SELECT e.depth, i.event_id FROM insertion_event_edges AS i + /* Get the depth of the insertion event from the events table */ + INNER JOIN events AS e USING (event_id) + /* Find an insertion event which points via prev_events to the given event_id */ + WHERE i.insertion_prev_event_id = ? + LIMIT ? + """ + + # Find any chunk connections of a given insertion event + chunk_connection_query = """ + SELECT e.depth, c.event_id FROM insertion_events AS i + /* Find the chunk that connects to the given insertion event */ + INNER JOIN chunk_events AS c + ON i.next_chunk_id = c.chunk_id + /* Get the depth of the chunk start event from the events table */ + INNER JOIN events AS e USING (event_id) + /* Find an insertion event which matches the given event_id */ + WHERE i.event_id = ? + LIMIT ? + """ + # In a PriorityQueue, the lowest valued entries are retrieved first. + # We're using depth as the priority in the queue. + # Depth is lowest at the oldest-in-time message and highest and + # newest-in-time message. We add events to the queue with a negative depth so that + # we process the newest-in-time messages first going backwards in time. queue = PriorityQueue() for event_id in event_list: @@ -970,9 +1001,48 @@ def _get_backfill_events(self, txn, room_id, event_list, limit): event_results.add(event_id) + # Try and find any potential historical chunks of message history. + # + # First we look for an insertion event connected to the current + # event (by prev_event). If we find any, we need to go and try to + # find any chunk events connected to the insertion event (by + # chunk_id). If we find any, we'll add them to the queue and + # navigate up the DAG like normal in the next iteration of the loop. + txn.execute( + connected_insertion_event_query, (event_id, limit - len(event_results)) + ) + connected_insertion_event_id_results = txn.fetchall() + logger.debug( + "_get_backfill_events: connected_insertion_event_query %s", + connected_insertion_event_id_results, + ) + for row in connected_insertion_event_id_results: + connected_insertion_event_depth = row[0] + connected_insertion_event = row[1] + queue.put((-connected_insertion_event_depth, connected_insertion_event)) + + # Find any chunk connections for the given insertion event + txn.execute( + chunk_connection_query, + (connected_insertion_event, limit - len(event_results)), + ) + chunk_start_event_id_results = txn.fetchall() + logger.debug( + "_get_backfill_events: chunk_start_event_id_results %s", + chunk_start_event_id_results, + ) + for row in chunk_start_event_id_results: + if row[1] not in event_results: + queue.put((-row[0], row[1])) + + # Navigate up the DAG by prev_event txn.execute(query, (event_id, False, limit - len(event_results))) + prev_event_id_results = txn.fetchall() + logger.debug( + "_get_backfill_events: prev_event_ids %s", prev_event_id_results + ) - for row in txn: + for row in prev_event_id_results: if row[1] not in event_results: queue.put((-row[0], row[1])) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index a396a201d4fc..86baf397fbda 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1502,6 +1502,9 @@ def _update_metadata_tables_txn( self._handle_event_relations(txn, event) + self._handle_insertion_event(txn, event) + self._handle_chunk_event(txn, event) + # Store the labels for this event. labels = event.content.get(EventContentFields.LABELS) if labels: @@ -1754,6 +1757,94 @@ def _handle_event_relations(self, txn, event): if rel_type == RelationTypes.REPLACE: txn.call_after(self.store.get_applicable_edit.invalidate, (parent_id,)) + def _handle_insertion_event(self, txn: LoggingTransaction, event: EventBase): + """Handles keeping track of insertion events and edges/connections. + Part of MSC2716. + + Args: + txn: The database transaction object + event: The event to process + """ + + if event.type != EventTypes.MSC2716_INSERTION: + # Not a insertion event + return + + # Skip processing a insertion event if the room version doesn't + # support it. + room_version = self.store.get_room_version_txn(txn, event.room_id) + if not room_version.msc2716_historical: + return + + next_chunk_id = event.content.get(EventContentFields.MSC2716_NEXT_CHUNK_ID) + if next_chunk_id is None: + # Invalid insertion event without next chunk ID + return + + logger.debug( + "_handle_insertion_event (next_chunk_id=%s) %s", next_chunk_id, event + ) + + # Keep track of the insertion event and the chunk ID + self.db_pool.simple_insert_txn( + txn, + table="insertion_events", + values={ + "event_id": event.event_id, + "room_id": event.room_id, + "next_chunk_id": next_chunk_id, + }, + ) + + # Insert an edge for every prev_event connection + for prev_event_id in event.prev_events: + self.db_pool.simple_insert_txn( + txn, + table="insertion_event_edges", + values={ + "event_id": event.event_id, + "room_id": event.room_id, + "insertion_prev_event_id": prev_event_id, + }, + ) + + def _handle_chunk_event(self, txn: LoggingTransaction, event: EventBase): + """Handles inserting the chunk edges/connections between the chunk event + and an insertion event. Part of MSC2716. + + Args: + txn: The database transaction object + event: The event to process + """ + + if event.type != EventTypes.MSC2716_CHUNK: + # Not a chunk event + return + + # Skip processing a chunk event if the room version doesn't + # support it. + room_version = self.store.get_room_version_txn(txn, event.room_id) + if not room_version.msc2716_historical: + return + + chunk_id = event.content.get(EventContentFields.MSC2716_CHUNK_ID) + if chunk_id is None: + # Invalid chunk event without a chunk ID + return + + logger.debug("_handle_chunk_event chunk_id=%s %s", chunk_id, event) + + # Keep track of the insertion event and the chunk ID + self.db_pool.simple_insert_txn( + txn, + table="chunk_events", + values={ + "event_id": event.event_id, + "room_id": event.room_id, + "chunk_id": chunk_id, + }, + ) + def _handle_redaction(self, txn, redacted_event_id): """Handles receiving a redaction and checking whether we need to remove any redacted relations from the database. diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index 1757064a686f..8e22da99ae60 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -22,7 +22,7 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion from synapse.events import EventBase from synapse.storage._base import SQLBaseStore -from synapse.storage.database import DatabasePool +from synapse.storage.database import DatabasePool, LoggingTransaction from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.databases.main.roommember import RoomMemberWorkerStore from synapse.storage.state import StateFilter @@ -58,15 +58,32 @@ def __init__(self, database: DatabasePool, db_conn, hs): async def get_room_version(self, room_id: str) -> RoomVersion: """Get the room_version of a given room - Raises: NotFoundError: if the room is unknown + UnsupportedRoomVersionError: if the room uses an unknown room version. + Typically this happens if support for the room's version has been + removed from Synapse. + """ + return await self.db_pool.runInteraction( + "get_room_version_txn", + self.get_room_version_txn, + room_id, + ) + def get_room_version_txn( + self, txn: LoggingTransaction, room_id: str + ) -> RoomVersion: + """Get the room_version of a given room + Args: + txn: Transaction object + room_id: The room_id of the room you are trying to get the version for + Raises: + NotFoundError: if the room is unknown UnsupportedRoomVersionError: if the room uses an unknown room version. Typically this happens if support for the room's version has been removed from Synapse. """ - room_version_id = await self.get_room_version_id(room_id) + room_version_id = self.get_room_version_id_txn(txn, room_id) v = KNOWN_ROOM_VERSIONS.get(room_version_id) if not v: @@ -80,7 +97,20 @@ async def get_room_version(self, room_id: str) -> RoomVersion: @cached(max_entries=10000) async def get_room_version_id(self, room_id: str) -> str: """Get the room_version of a given room + Raises: + NotFoundError: if the room is unknown + """ + return await self.db_pool.runInteraction( + "get_room_version_id_txn", + self.get_room_version_id_txn, + room_id, + ) + def get_room_version_id_txn(self, txn: LoggingTransaction, room_id: str) -> str: + """Get the room_version of a given room + Args: + txn: Transaction object + room_id: The room_id of the room you are trying to get the version for Raises: NotFoundError: if the room is unknown """ @@ -88,24 +118,22 @@ async def get_room_version_id(self, room_id: str) -> str: # First we try looking up room version from the database, but for old # rooms we might not have added the room version to it yet so we fall # back to previous behaviour and look in current state events. - + # # We really should have an entry in the rooms table for every room we # care about, but let's be a bit paranoid (at least while the background # update is happening) to avoid breaking existing rooms. - version = await self.db_pool.simple_select_one_onecol( + room_version = self.db_pool.simple_select_one_onecol_txn( + txn, table="rooms", keyvalues={"room_id": room_id}, retcol="room_version", - desc="get_room_version", allow_none=True, ) - if version is not None: - return version + if room_version is None: + raise NotFoundError("Could not room_version for %s" % (room_id,)) - # Retrieve the room's create event - create_event = await self.get_create_event_for_room(room_id) - return create_event.content.get("room_version", "1") + return room_version async def get_room_predecessor(self, room_id: str) -> Optional[dict]: """Get the predecessor of an upgraded room if it exists. diff --git a/synapse/storage/schema/main/delta/61/01insertion_event_lookups.sql b/synapse/storage/schema/main/delta/61/01insertion_event_lookups.sql new file mode 100644 index 000000000000..7d7bafc631ab --- /dev/null +++ b/synapse/storage/schema/main/delta/61/01insertion_event_lookups.sql @@ -0,0 +1,49 @@ +/* Copyright 2021 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Add a table that keeps track of "insertion" events and +-- their next_chunk_id's so we can navigate to the next chunk of history. +CREATE TABLE IF NOT EXISTS insertion_events( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + next_chunk_id TEXT NOT NULL +); +CREATE UNIQUE INDEX IF NOT EXISTS insertion_events_event_id ON insertion_events(event_id); +CREATE INDEX IF NOT EXISTS insertion_events_next_chunk_id ON insertion_events(next_chunk_id); + +-- Add a table that keeps track of all of the events we are inserting between. +-- We use this when navigating the DAG and when we hit an event which matches +-- `insertion_prev_event_id`, it should backfill from the "insertion" event and +-- navigate the historical messages from there. +CREATE TABLE IF NOT EXISTS insertion_event_edges( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + insertion_prev_event_id TEXT NOT NULL +); + +CREATE UNIQUE INDEX IF NOT EXISTS insertion_event_edges_event_id ON insertion_event_edges(event_id); +CREATE INDEX IF NOT EXISTS insertion_event_edges_insertion_room_id ON insertion_event_edges(room_id); +CREATE INDEX IF NOT EXISTS insertion_event_edges_insertion_prev_event_id ON insertion_event_edges(insertion_prev_event_id); + +-- Add a table that keeps track of how each chunk is labeled. The chunks are +-- connected together based on an insertion events `next_chunk_id`. +CREATE TABLE IF NOT EXISTS chunk_events( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + chunk_id TEXT NOT NULL +); + +CREATE UNIQUE INDEX IF NOT EXISTS chunk_events_event_id ON chunk_events(event_id); +CREATE INDEX IF NOT EXISTS chunk_events_chunk_id ON chunk_events(chunk_id); From 858363d0b7e58fd71875b25d183537bb3b5a397f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 28 Jul 2021 20:55:50 +0100 Subject: [PATCH 34/61] Generics for `ObservableDeferred` (#10491) Now that `Deferred` is a generic class, let's update `ObeservableDeferred` to follow suit. --- changelog.d/10491.misc | 1 + synapse/notifier.py | 5 +++-- synapse/storage/persist_events.py | 4 +++- synapse/util/async_helpers.py | 14 ++++++++------ 4 files changed, 15 insertions(+), 9 deletions(-) create mode 100644 changelog.d/10491.misc diff --git a/changelog.d/10491.misc b/changelog.d/10491.misc new file mode 100644 index 000000000000..3867cf2682c1 --- /dev/null +++ b/changelog.d/10491.misc @@ -0,0 +1 @@ +Improve type annotations for `ObservableDeferred`. diff --git a/synapse/notifier.py b/synapse/notifier.py index c5fbebc17d75..bbe337949ac5 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -111,8 +111,9 @@ def __init__( self.last_notified_token = current_token self.last_notified_ms = time_now_ms - with PreserveLoggingContext(): - self.notify_deferred = ObservableDeferred(defer.Deferred()) + self.notify_deferred: ObservableDeferred[StreamToken] = ObservableDeferred( + defer.Deferred() + ) def notify( self, diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index a39877f0d5b2..0e8270746d78 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -170,7 +170,9 @@ async def add_to_queue( end_item = queue[-1] else: # need to make a new queue item - deferred = ObservableDeferred(defer.Deferred(), consumeErrors=True) + deferred: ObservableDeferred[_PersistResult] = ObservableDeferred( + defer.Deferred(), consumeErrors=True + ) end_item = _EventPersistQueueItem( events_and_contexts=[], diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 912cf85f89be..a3b65aee27b5 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -23,6 +23,7 @@ Awaitable, Callable, Dict, + Generic, Hashable, Iterable, List, @@ -39,6 +40,7 @@ from twisted.internet.defer import CancelledError from twisted.internet.interfaces import IReactorTime from twisted.python import failure +from twisted.python.failure import Failure from synapse.logging.context import ( PreserveLoggingContext, @@ -52,7 +54,7 @@ _T = TypeVar("_T") -class ObservableDeferred: +class ObservableDeferred(Generic[_T]): """Wraps a deferred object so that we can add observer deferreds. These observer deferreds do not affect the callback chain of the original deferred. @@ -70,7 +72,7 @@ class ObservableDeferred: __slots__ = ["_deferred", "_observers", "_result"] - def __init__(self, deferred: defer.Deferred, consumeErrors: bool = False): + def __init__(self, deferred: "defer.Deferred[_T]", consumeErrors: bool = False): object.__setattr__(self, "_deferred", deferred) object.__setattr__(self, "_result", None) object.__setattr__(self, "_observers", set()) @@ -115,7 +117,7 @@ def errback(f): deferred.addCallbacks(callback, errback) - def observe(self) -> defer.Deferred: + def observe(self) -> "defer.Deferred[_T]": """Observe the underlying deferred. This returns a brand new deferred that is resolved when the underlying @@ -123,7 +125,7 @@ def observe(self) -> defer.Deferred: effect the underlying deferred. """ if not self._result: - d: "defer.Deferred[Any]" = defer.Deferred() + d: "defer.Deferred[_T]" = defer.Deferred() def remove(r): self._observers.discard(d) @@ -137,7 +139,7 @@ def remove(r): success, res = self._result return defer.succeed(res) if success else defer.fail(res) - def observers(self) -> List[defer.Deferred]: + def observers(self) -> "List[defer.Deferred[_T]]": return self._observers def has_called(self) -> bool: @@ -146,7 +148,7 @@ def has_called(self) -> bool: def has_succeeded(self) -> bool: return self._result is not None and self._result[0] is True - def get_result(self) -> Any: + def get_result(self) -> Union[_T, Failure]: return self._result[1] def __getattr__(self, name: str) -> Any: From db6e7f15eaee81be54b960d040102900f20e3f74 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 29 Jul 2021 03:46:51 -0500 Subject: [PATCH 35/61] Fix backfilled events being rejected for no `state_groups` (#10439) Reproducible on a federated homeserver when there is a membership auth event as a floating outlier. Then when we try to backfill one of that persons messages, it has missing membership auth to fetch which caused us to mistakenly replace the `context` for the message with that of the floating membership `outlier` event. Since `outliers` have no `state` or `state_group`, the error bubbles up when we continue down the persisting route: `sqlite3.IntegrityError: NOT NULL constraint failed: event_to_state_groups.state_group` Call stack: ``` backfill _auth_and_persist_event _check_event_auth _update_auth_events_and_context_for_auth ``` --- changelog.d/10439.bugfix | 1 + tests/handlers/test_federation.py | 131 ++++++++++++++++++++++++++++++ 2 files changed, 132 insertions(+) create mode 100644 changelog.d/10439.bugfix diff --git a/changelog.d/10439.bugfix b/changelog.d/10439.bugfix new file mode 100644 index 000000000000..74e5a25126d9 --- /dev/null +++ b/changelog.d/10439.bugfix @@ -0,0 +1 @@ +Fix events with floating outlier state being rejected over federation. diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index ba8cf44f4626..4140fcefc2c4 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import List from unittest import TestCase from synapse.api.constants import EventTypes @@ -22,6 +23,7 @@ from synapse.logging.context import LoggingContext, run_in_background from synapse.rest import admin from synapse.rest.client.v1 import login, room +from synapse.util.stringutils import random_string from tests import unittest @@ -39,6 +41,8 @@ def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver(federation_http_client=None) self.handler = hs.get_federation_handler() self.store = hs.get_datastore() + self.state_store = hs.get_storage().state + self._event_auth_handler = hs.get_event_auth_handler() return hs def test_exchange_revoked_invite(self): @@ -190,6 +194,133 @@ def test_rejected_state_event_state(self): self.assertEqual(sg, sg2) + def test_backfill_floating_outlier_membership_auth(self): + """ + As the local homeserver, check that we can properly process a federated + event from the OTHER_SERVER with auth_events that include a floating + membership event from the OTHER_SERVER. + + Regression test, see #10439. + """ + OTHER_SERVER = "otherserver" + OTHER_USER = "@otheruser:" + OTHER_SERVER + + # create the room + user_id = self.register_user("kermit", "test") + tok = self.login("kermit", "test") + room_id = self.helper.create_room_as( + room_creator=user_id, + is_public=True, + tok=tok, + extra_content={ + "preset": "public_chat", + }, + ) + room_version = self.get_success(self.store.get_room_version(room_id)) + + prev_event_ids = self.get_success(self.store.get_prev_events_for_room(room_id)) + ( + most_recent_prev_event_id, + most_recent_prev_event_depth, + ) = self.get_success(self.store.get_max_depth_of(prev_event_ids)) + # mapping from (type, state_key) -> state_event_id + prev_state_map = self.get_success( + self.state_store.get_state_ids_for_event(most_recent_prev_event_id) + ) + # List of state event ID's + prev_state_ids = list(prev_state_map.values()) + auth_event_ids = prev_state_ids + auth_events = list( + self.get_success(self.store.get_events(auth_event_ids)).values() + ) + + # build a floating outlier member state event + fake_prev_event_id = "$" + random_string(43) + member_event_dict = { + "type": EventTypes.Member, + "content": { + "membership": "join", + }, + "state_key": OTHER_USER, + "room_id": room_id, + "sender": OTHER_USER, + "depth": most_recent_prev_event_depth, + "prev_events": [fake_prev_event_id], + "origin_server_ts": self.clock.time_msec(), + "signatures": {OTHER_SERVER: {"ed25519:key_version": "SomeSignatureHere"}}, + } + builder = self.hs.get_event_builder_factory().for_room_version( + room_version, member_event_dict + ) + member_event = self.get_success( + builder.build( + prev_event_ids=member_event_dict["prev_events"], + auth_event_ids=self._event_auth_handler.compute_auth_events( + builder, + prev_state_map, + for_verification=False, + ), + depth=member_event_dict["depth"], + ) + ) + # Override the signature added from "test" homeserver that we created the event with + member_event.signatures = member_event_dict["signatures"] + + # Add the new member_event to the StateMap + prev_state_map[ + (member_event.type, member_event.state_key) + ] = member_event.event_id + auth_events.append(member_event) + + # build and send an event authed based on the member event + message_event_dict = { + "type": EventTypes.Message, + "content": {}, + "room_id": room_id, + "sender": OTHER_USER, + "depth": most_recent_prev_event_depth, + "prev_events": prev_event_ids.copy(), + "origin_server_ts": self.clock.time_msec(), + "signatures": {OTHER_SERVER: {"ed25519:key_version": "SomeSignatureHere"}}, + } + builder = self.hs.get_event_builder_factory().for_room_version( + room_version, message_event_dict + ) + message_event = self.get_success( + builder.build( + prev_event_ids=message_event_dict["prev_events"], + auth_event_ids=self._event_auth_handler.compute_auth_events( + builder, + prev_state_map, + for_verification=False, + ), + depth=message_event_dict["depth"], + ) + ) + # Override the signature added from "test" homeserver that we created the event with + message_event.signatures = message_event_dict["signatures"] + + # Stub the /event_auth response from the OTHER_SERVER + async def get_event_auth( + destination: str, room_id: str, event_id: str + ) -> List[EventBase]: + return auth_events + + self.handler.federation_client.get_event_auth = get_event_auth + + with LoggingContext("receive_pdu"): + # Fake the OTHER_SERVER federating the message event over to our local homeserver + d = run_in_background( + self.handler.on_receive_pdu, OTHER_SERVER, message_event + ) + self.get_success(d) + + # Now try and get the events on our local homeserver + stored_event = self.get_success( + self.store.get_event(message_event.event_id, allow_none=True) + ) + self.assertTrue(stored_event is not None) + @unittest.override_config( {"rc_invites": {"per_user": {"per_second": 0.5, "burst_count": 3}}} ) From 3a541a7daa3191f0d91cb33d76778d450107640c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 29 Jul 2021 07:50:14 -0400 Subject: [PATCH 36/61] Improve failover logic for MSC3083 restricted rooms. (#10447) If the federation client receives an M_UNABLE_TO_AUTHORISE_JOIN or M_UNABLE_TO_GRANT_JOIN response it will attempt another server before giving up completely. --- changelog.d/10447.feature | 1 + synapse/federation/federation_client.py | 43 ++++++++++++++++++++++--- 2 files changed, 40 insertions(+), 4 deletions(-) create mode 100644 changelog.d/10447.feature diff --git a/changelog.d/10447.feature b/changelog.d/10447.feature new file mode 100644 index 000000000000..df8bb51167f7 --- /dev/null +++ b/changelog.d/10447.feature @@ -0,0 +1 @@ +Update support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083) to consider changes in the MSC around which servers can issue join events. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index dbadf102f2d7..b7a10da15a89 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -22,6 +22,7 @@ Awaitable, Callable, Collection, + Container, Dict, Iterable, List, @@ -513,6 +514,7 @@ async def _try_destination_list( description: str, destinations: Iterable[str], callback: Callable[[str], Awaitable[T]], + failover_errcodes: Optional[Container[str]] = None, failover_on_unknown_endpoint: bool = False, ) -> T: """Try an operation on a series of servers, until it succeeds @@ -533,6 +535,9 @@ async def _try_destination_list( next server tried. Normally the stacktrace is logged but this is suppressed if the exception is an InvalidResponseError. + failover_errcodes: Error codes (specific to this endpoint) which should + cause a failover when received as part of an HTTP 400 error. + failover_on_unknown_endpoint: if True, we will try other servers if it looks like a server doesn't support the endpoint. This is typically useful if the endpoint in question is new or experimental. @@ -544,6 +549,9 @@ async def _try_destination_list( SynapseError if the chosen remote server returns a 300/400 code, or no servers were reachable. """ + if failover_errcodes is None: + failover_errcodes = () + for destination in destinations: if destination == self.server_name: continue @@ -558,11 +566,17 @@ async def _try_destination_list( synapse_error = e.to_synapse_error() failover = False - # Failover on an internal server error, or if the destination - # doesn't implemented the endpoint for some reason. + # Failover should occur: + # + # * On internal server errors. + # * If the destination responds that it cannot complete the request. + # * If the destination doesn't implemented the endpoint for some reason. if 500 <= e.code < 600: failover = True + elif e.code == 400 and synapse_error.errcode in failover_errcodes: + failover = True + elif failover_on_unknown_endpoint and self._is_unknown_endpoint( e, synapse_error ): @@ -678,8 +692,20 @@ async def send_request(destination: str) -> Tuple[str, EventBase, RoomVersion]: return destination, ev, room_version + # MSC3083 defines additional error codes for room joins. Unfortunately + # we do not yet know the room version, assume these will only be returned + # by valid room versions. + failover_errcodes = ( + (Codes.UNABLE_AUTHORISE_JOIN, Codes.UNABLE_TO_GRANT_JOIN) + if membership == Membership.JOIN + else None + ) + return await self._try_destination_list( - "make_" + membership, destinations, send_request + "make_" + membership, + destinations, + send_request, + failover_errcodes=failover_errcodes, ) async def send_join( @@ -818,7 +844,14 @@ async def _execute(pdu: EventBase) -> None: origin=destination, ) + # MSC3083 defines additional error codes for room joins. + failover_errcodes = None if room_version.msc3083_join_rules: + failover_errcodes = ( + Codes.UNABLE_AUTHORISE_JOIN, + Codes.UNABLE_TO_GRANT_JOIN, + ) + # If the join is being authorised via allow rules, we need to send # the /send_join back to the same server that was originally used # with /make_join. @@ -827,7 +860,9 @@ async def _execute(pdu: EventBase) -> None: get_domain_from_id(pdu.content["join_authorised_via_users_server"]) ] - return await self._try_destination_list("send_join", destinations, send_request) + return await self._try_destination_list( + "send_join", destinations, send_request, failover_errcodes=failover_errcodes + ) async def _do_send_join( self, room_version: RoomVersion, destination: str, pdu: EventBase From b7f7ca24b1ca79426289c9c26e9314df9a0a96f6 Mon Sep 17 00:00:00 2001 From: V02460 Date: Thu, 29 Jul 2021 22:34:14 +0200 Subject: [PATCH 37/61] Remove shebang line from module files (#10415) Signed-off-by: Kai A. Hiller --- changelog.d/10415.misc | 1 + synapse/_scripts/review_recent_signups.py | 1 - synapse/app/admin_cmd.py | 1 - synapse/app/appservice.py | 1 - synapse/app/client_reader.py | 1 - synapse/app/event_creator.py | 1 - synapse/app/federation_reader.py | 1 - synapse/app/federation_sender.py | 1 - synapse/app/frontend_proxy.py | 1 - synapse/app/generic_worker.py | 1 - synapse/app/homeserver.py | 1 - synapse/app/media_repository.py | 1 - synapse/app/pusher.py | 1 - synapse/app/synchrotron.py | 1 - synapse/app/user_dir.py | 1 - synapse/push/pusherpool.py | 1 - synapse/util/versionstring.py | 1 - 17 files changed, 1 insertion(+), 16 deletions(-) create mode 100644 changelog.d/10415.misc diff --git a/changelog.d/10415.misc b/changelog.d/10415.misc new file mode 100644 index 000000000000..3b9501acbbcb --- /dev/null +++ b/changelog.d/10415.misc @@ -0,0 +1 @@ +Remove shebang line from module files. diff --git a/synapse/_scripts/review_recent_signups.py b/synapse/_scripts/review_recent_signups.py index 01dc0c42377f..9de913db889c 100644 --- a/synapse/_scripts/review_recent_signups.py +++ b/synapse/_scripts/review_recent_signups.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 2878d2c14077..3234d9ebba07 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2019 Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index 2d50060ffb0b..de1bcee0a785 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 2d50060ffb0b..de1bcee0a785 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index 57af28f10a57..885454ed44c0 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 2d50060ffb0b..de1bcee0a785 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index 2d50060ffb0b..de1bcee0a785 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 2d50060ffb0b..de1bcee0a785 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index c3d49925189e..3b7131af8fa2 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 920b34d97bf4..7dae163c1af3 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index 2d50060ffb0b..de1bcee0a785 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 2d50060ffb0b..de1bcee0a785 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 2d50060ffb0b..de1bcee0a785 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index a368efb354ed..14bde2717943 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 85621f33ef1e..a1436f39305d 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/versionstring.py b/synapse/util/versionstring.py index dfa30a62296a..cb08af7385eb 100644 --- a/synapse/util/versionstring.py +++ b/synapse/util/versionstring.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); From c167e09fe58d3a256fb1c763b391ad6633d2507d Mon Sep 17 00:00:00 2001 From: reivilibre <38398653+reivilibre@users.noreply.github.com> Date: Fri, 30 Jul 2021 12:34:21 +0100 Subject: [PATCH 38/61] Fix explicit assignment of PL 0 from being misinterpreted in rare circumstances (#10499) --- changelog.d/10499.bugfix | 1 + synapse/event_auth.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10499.bugfix diff --git a/changelog.d/10499.bugfix b/changelog.d/10499.bugfix new file mode 100644 index 000000000000..6487af6c9691 --- /dev/null +++ b/changelog.d/10499.bugfix @@ -0,0 +1 @@ +Fix a bug which caused an explicit assignment of power-level 0 to a user to be misinterpreted in rare circumstances. diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 0fa7ffc99fd4..4c92e9a2d40f 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -692,7 +692,7 @@ def get_user_power_level(user_id: str, auth_events: StateMap[EventBase]) -> int: power_level_event = get_power_level_event(auth_events) if power_level_event: level = power_level_event.content.get("users", {}).get(user_id) - if not level: + if level is None: level = power_level_event.content.get("users_default", 0) if level is None: From 2afdb5c98470ab9d5aa793906c1710a65fb3028c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Sun, 1 Aug 2021 10:47:36 +0100 Subject: [PATCH 39/61] Fix deb build script to set prerelease flag correctly (#10500) --- changelog.d/10500.misc | 1 + docker/build_debian.sh | 9 ++++----- 2 files changed, 5 insertions(+), 5 deletions(-) create mode 100644 changelog.d/10500.misc diff --git a/changelog.d/10500.misc b/changelog.d/10500.misc new file mode 100644 index 000000000000..dbaff57364d2 --- /dev/null +++ b/changelog.d/10500.misc @@ -0,0 +1 @@ +Fix a bug which caused production debian packages to be incorrectly marked as 'prerelease'. diff --git a/docker/build_debian.sh b/docker/build_debian.sh index f572ed9aa0ef..801ff454716c 100644 --- a/docker/build_debian.sh +++ b/docker/build_debian.sh @@ -11,10 +11,6 @@ DIST=`cut -d ':' -f2 <<< $distro` cp -aT /synapse/source /synapse/build cd /synapse/build -# add an entry to the changelog for this distribution -dch -M -l "+$DIST" "build for $DIST" -dch -M -r "" --force-distribution --distribution "$DIST" - # if this is a prerelease, set the Section accordingly. # # When the package is later added to the package repo, reprepro will use the @@ -23,11 +19,14 @@ dch -M -r "" --force-distribution --distribution "$DIST" DEB_VERSION=`dpkg-parsechangelog -SVersion` case $DEB_VERSION in - *rc*|*a*|*b*|*c*) + *~rc*|*~a*|*~b*|*~c*) sed -ie '/^Section:/c\Section: prerelease' debian/control ;; esac +# add an entry to the changelog for this distribution +dch -M -l "+$DIST" "build for $DIST" +dch -M -r "" --force-distribution --distribution "$DIST" dpkg-buildpackage -us -uc From ba5287f5e8be150551824493b3ad685dde00a543 Mon Sep 17 00:00:00 2001 From: Toni Spets Date: Mon, 2 Aug 2021 16:24:43 +0300 Subject: [PATCH 40/61] Allow setting transaction limit for db connections (#10440) Setting the value will help PostgreSQL free up memory by recycling the connections in the connection pool. Signed-off-by: Toni Spets --- changelog.d/10440.feature | 1 + docs/sample_config.yaml | 4 ++++ synapse/config/database.py | 4 ++++ synapse/storage/database.py | 21 +++++++++++++++++++ tests/storage/test_txn_limit.py | 36 +++++++++++++++++++++++++++++++++ tests/utils.py | 3 +++ 6 files changed, 69 insertions(+) create mode 100644 changelog.d/10440.feature create mode 100644 tests/storage/test_txn_limit.py diff --git a/changelog.d/10440.feature b/changelog.d/10440.feature new file mode 100644 index 000000000000..f1833b0bd726 --- /dev/null +++ b/changelog.d/10440.feature @@ -0,0 +1 @@ +Allow setting transaction limit for database connections. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 853c2f68990f..1a217f35dba9 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -720,6 +720,9 @@ caches: # 'name' gives the database engine to use: either 'sqlite3' (for SQLite) or # 'psycopg2' (for PostgreSQL). # +# 'txn_limit' gives the maximum number of transactions to run per connection +# before reconnecting. Defaults to 0, which means no limit. +# # 'args' gives options which are passed through to the database engine, # except for options starting 'cp_', which are used to configure the Twisted # connection pool. For a reference to valid arguments, see: @@ -740,6 +743,7 @@ caches: # #database: # name: psycopg2 +# txn_limit: 10000 # args: # user: synapse_user # password: secretpassword diff --git a/synapse/config/database.py b/synapse/config/database.py index 3d7d92f6152f..651e31b57621 100644 --- a/synapse/config/database.py +++ b/synapse/config/database.py @@ -33,6 +33,9 @@ # 'name' gives the database engine to use: either 'sqlite3' (for SQLite) or # 'psycopg2' (for PostgreSQL). # +# 'txn_limit' gives the maximum number of transactions to run per connection +# before reconnecting. Defaults to 0, which means no limit. +# # 'args' gives options which are passed through to the database engine, # except for options starting 'cp_', which are used to configure the Twisted # connection pool. For a reference to valid arguments, see: @@ -53,6 +56,7 @@ # #database: # name: psycopg2 +# txn_limit: 10000 # args: # user: synapse_user # password: secretpassword diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 4d4643619f68..c8015a384857 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -15,6 +15,7 @@ # limitations under the License. import logging import time +from collections import defaultdict from sys import intern from time import monotonic as monotonic_time from typing import ( @@ -397,6 +398,7 @@ def __init__( ): self.hs = hs self._clock = hs.get_clock() + self._txn_limit = database_config.config.get("txn_limit", 0) self._database_config = database_config self._db_pool = make_pool(hs.get_reactor(), database_config, engine) @@ -406,6 +408,9 @@ def __init__( self._current_txn_total_time = 0.0 self._previous_loop_ts = 0.0 + # Transaction counter: key is the twisted thread id, value is the current count + self._txn_counters: Dict[int, int] = defaultdict(int) + # TODO(paul): These can eventually be removed once the metrics code # is running in mainline, and we have some nice monitoring frontends # to watch it @@ -750,10 +755,26 @@ def inner_func(conn, *args, **kwargs): sql_scheduling_timer.observe(sched_duration_sec) context.add_database_scheduled(sched_duration_sec) + if self._txn_limit > 0: + tid = self._db_pool.threadID() + self._txn_counters[tid] += 1 + + if self._txn_counters[tid] > self._txn_limit: + logger.debug( + "Reconnecting database connection over transaction limit" + ) + conn.reconnect() + opentracing.log_kv( + {"message": "reconnected due to txn limit"} + ) + self._txn_counters[tid] = 1 + if self.engine.is_connection_closed(conn): logger.debug("Reconnecting closed database connection") conn.reconnect() opentracing.log_kv({"message": "reconnected"}) + if self._txn_limit > 0: + self._txn_counters[tid] = 1 try: if db_autocommit: diff --git a/tests/storage/test_txn_limit.py b/tests/storage/test_txn_limit.py new file mode 100644 index 000000000000..9be51f9ebd45 --- /dev/null +++ b/tests/storage/test_txn_limit.py @@ -0,0 +1,36 @@ +# Copyright 2014-2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tests import unittest + + +class SQLTransactionLimitTestCase(unittest.HomeserverTestCase): + """Test SQL transaction limit doesn't break transactions.""" + + def make_homeserver(self, reactor, clock): + return self.setup_test_homeserver(db_txn_limit=1000) + + def test_config(self): + db_config = self.hs.config.get_single_database() + self.assertEqual(db_config.config["txn_limit"], 1000) + + def test_select(self): + def do_select(txn): + txn.execute("SELECT 1") + + db_pool = self.hs.get_datastores().databases[0] + + # force txn limit to roll over at least once + for i in range(0, 1001): + self.get_success_or_raise(db_pool.runInteraction("test_select", do_select)) diff --git a/tests/utils.py b/tests/utils.py index 6bd008dcfe21..f3458ca88dfc 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -239,6 +239,9 @@ def setup_test_homeserver( "args": {"database": ":memory:", "cp_min": 1, "cp_max": 1}, } + if "db_txn_limit" in kwargs: + database_config["txn_limit"] = kwargs["db_txn_limit"] + database = DatabaseConnectionConfig("master", database_config) config.database.databases = [database] From 01d45fe964d323e7f66358c2db57d00a44bf2274 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 2 Aug 2021 14:37:25 +0100 Subject: [PATCH 41/61] Prune inbound federation queues if they get too long (#10390) --- changelog.d/10390.misc | 1 + synapse/federation/federation_server.py | 17 +++ .../databases/main/event_federation.py | 104 +++++++++++++++++- tests/storage/test_event_federation.py | 57 ++++++++++ 4 files changed, 177 insertions(+), 2 deletions(-) create mode 100644 changelog.d/10390.misc diff --git a/changelog.d/10390.misc b/changelog.d/10390.misc new file mode 100644 index 000000000000..911a5733eee8 --- /dev/null +++ b/changelog.d/10390.misc @@ -0,0 +1 @@ +Prune inbound federation inbound queues for a room if they get too large. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 2892a11d7d8e..145b9161d985 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -1024,6 +1024,23 @@ async def _process_incoming_pdus_in_room_inner( origin, event = next + # Prune the event queue if it's getting large. + # + # We do this *after* handling the first event as the common case is + # that the queue is empty (/has the single event in), and so there's + # no need to do this check. + pruned = await self.store.prune_staged_events_in_room(room_id, room_version) + if pruned: + # If we have pruned the queue check we need to refetch the next + # event to handle. + next = await self.store.get_next_staged_event_for_room( + room_id, room_version + ) + if not next: + break + + origin, event = next + lock = await self.store.try_acquire_lock( _INBOUND_EVENT_HANDLING_LOCK_NAME, room_id ) diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 547e43ab9844..44018c1c31ab 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -16,11 +16,11 @@ from queue import Empty, PriorityQueue from typing import Collection, Dict, Iterable, List, Optional, Set, Tuple -from prometheus_client import Gauge +from prometheus_client import Counter, Gauge from synapse.api.constants import MAX_DEPTH from synapse.api.errors import StoreError -from synapse.api.room_versions import RoomVersion +from synapse.api.room_versions import EventFormatVersions, RoomVersion from synapse.events import EventBase, make_event_from_dict from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause @@ -44,6 +44,12 @@ "The total number of events in the inbound federation staging", ) +pdus_pruned_from_federation_queue = Counter( + "synapse_federation_server_number_inbound_pdu_pruned", + "The number of events in the inbound federation staging that have been " + "pruned due to the queue getting too long", +) + logger = logging.getLogger(__name__) @@ -1277,6 +1283,100 @@ def _get_next_staged_event_for_room_txn(txn): return origin, event + async def prune_staged_events_in_room( + self, + room_id: str, + room_version: RoomVersion, + ) -> bool: + """Checks if there are lots of staged events for the room, and if so + prune them down. + + Returns: + Whether any events were pruned + """ + + # First check the size of the queue. + count = await self.db_pool.simple_select_one_onecol( + table="federation_inbound_events_staging", + keyvalues={"room_id": room_id}, + retcol="COALESCE(COUNT(*), 0)", + desc="prune_staged_events_in_room_count", + ) + + if count < 100: + return False + + # If the queue is too large, then we want clear the entire queue, + # keeping only the forward extremities (i.e. the events not referenced + # by other events in the queue). We do this so that we can always + # backpaginate in all the events we have dropped. + rows = await self.db_pool.simple_select_list( + table="federation_inbound_events_staging", + keyvalues={"room_id": room_id}, + retcols=("event_id", "event_json"), + desc="prune_staged_events_in_room_fetch", + ) + + # Find the set of events referenced by those in the queue, as well as + # collecting all the event IDs in the queue. + referenced_events: Set[str] = set() + seen_events: Set[str] = set() + for row in rows: + event_id = row["event_id"] + seen_events.add(event_id) + event_d = db_to_json(row["event_json"]) + + # We don't bother parsing the dicts into full blown event objects, + # as that is needlessly expensive. + + # We haven't checked that the `prev_events` have the right format + # yet, so we check as we go. + prev_events = event_d.get("prev_events", []) + if not isinstance(prev_events, list): + logger.info("Invalid prev_events for %s", event_id) + continue + + if room_version.event_format == EventFormatVersions.V1: + for prev_event_tuple in prev_events: + if not isinstance(prev_event_tuple, list) or len(prev_events) != 2: + logger.info("Invalid prev_events for %s", event_id) + break + + prev_event_id = prev_event_tuple[0] + if not isinstance(prev_event_id, str): + logger.info("Invalid prev_events for %s", event_id) + break + + referenced_events.add(prev_event_id) + else: + for prev_event_id in prev_events: + if not isinstance(prev_event_id, str): + logger.info("Invalid prev_events for %s", event_id) + break + + referenced_events.add(prev_event_id) + + to_delete = referenced_events & seen_events + if not to_delete: + return False + + pdus_pruned_from_federation_queue.inc(len(to_delete)) + logger.info( + "Pruning %d events in room %s from federation queue", + len(to_delete), + room_id, + ) + + await self.db_pool.simple_delete_many( + table="federation_inbound_events_staging", + keyvalues={"room_id": room_id}, + iterable=to_delete, + column="event_id", + desc="prune_staged_events_in_room_delete", + ) + + return True + async def get_all_rooms_with_staged_incoming_events(self) -> List[str]: """Get the room IDs of all events currently staged.""" return await self.db_pool.simple_select_onecol( diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index a0e22594785f..c3fcf7e7b405 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -15,7 +15,9 @@ import attr from parameterized import parameterized +from synapse.api.room_versions import RoomVersions from synapse.events import _EventInternalMetadata +from synapse.util import json_encoder import tests.unittest import tests.utils @@ -504,6 +506,61 @@ def insert_event(txn): ) self.assertSetEqual(difference, set()) + def test_prune_inbound_federation_queue(self): + "Test that pruning of inbound federation queues work" + + room_id = "some_room_id" + + # Insert a bunch of events that all reference the previous one. + self.get_success( + self.store.db_pool.simple_insert_many( + table="federation_inbound_events_staging", + values=[ + { + "origin": "some_origin", + "room_id": room_id, + "received_ts": 0, + "event_id": f"$fake_event_id_{i + 1}", + "event_json": json_encoder.encode( + {"prev_events": [f"$fake_event_id_{i}"]} + ), + "internal_metadata": "{}", + } + for i in range(500) + ], + desc="test_prune_inbound_federation_queue", + ) + ) + + # Calling prune once should return True, i.e. a prune happen. The second + # time it shouldn't. + pruned = self.get_success( + self.store.prune_staged_events_in_room(room_id, RoomVersions.V6) + ) + self.assertTrue(pruned) + + pruned = self.get_success( + self.store.prune_staged_events_in_room(room_id, RoomVersions.V6) + ) + self.assertFalse(pruned) + + # Assert that we only have a single event left in the queue, and that it + # is the last one. + count = self.get_success( + self.store.db_pool.simple_select_one_onecol( + table="federation_inbound_events_staging", + keyvalues={"room_id": room_id}, + retcol="COALESCE(COUNT(*), 0)", + desc="test_prune_inbound_federation_queue", + ) + ) + self.assertEqual(count, 1) + + _, event_id = self.get_success( + self.store.get_next_staged_event_id_for_room(room_id) + ) + self.assertEqual(event_id, "$fake_event_id_500") + @attr.s class FakeEvent: From fb086edaeddf8cdb8a03b8564d1b6883ac5cac6e Mon Sep 17 00:00:00 2001 From: reivilibre <38398653+reivilibre@users.noreply.github.com> Date: Mon, 2 Aug 2021 16:50:22 +0100 Subject: [PATCH 42/61] Fix codestyle CI from #10440 (#10511) Co-authored-by: Erik Johnston --- changelog.d/10511.feature | 1 + tests/storage/test_txn_limit.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10511.feature diff --git a/changelog.d/10511.feature b/changelog.d/10511.feature new file mode 100644 index 000000000000..f1833b0bd726 --- /dev/null +++ b/changelog.d/10511.feature @@ -0,0 +1 @@ +Allow setting transaction limit for database connections. diff --git a/tests/storage/test_txn_limit.py b/tests/storage/test_txn_limit.py index 9be51f9ebd45..6ff3ebb13780 100644 --- a/tests/storage/test_txn_limit.py +++ b/tests/storage/test_txn_limit.py @@ -32,5 +32,5 @@ def do_select(txn): db_pool = self.hs.get_datastores().databases[0] # force txn limit to roll over at least once - for i in range(0, 1001): + for _ in range(0, 1001): self.get_success_or_raise(db_pool.runInteraction("test_select", do_select)) From a6ea32a79893b6ee694d036f3bc29a02a79d51e8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 2 Aug 2021 21:06:34 +0100 Subject: [PATCH 43/61] Fix the `tests-done` github actions step, again (#10512) --- .github/workflows/tests.yml | 21 ++++++++++++--------- changelog.d/10512.misc | 1 + 2 files changed, 13 insertions(+), 9 deletions(-) create mode 100644 changelog.d/10512.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 0a62c62d02c3..239553ae138e 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -367,13 +367,16 @@ jobs: - name: Set build result env: NEEDS_CONTEXT: ${{ toJSON(needs) }} - # the `jq` incantation dumps out a series of " " lines + # the `jq` incantation dumps out a series of " " lines. + # we set it to an intermediate variable to avoid a pipe, which makes it + # hard to set $rc. run: | - set -o pipefail - jq -r 'to_entries[] | [.key,.value.result] | join(" ")' \ - <<< $NEEDS_CONTEXT | - while read job result; do - if [ "$result" != "success" ]; then - echo "::set-failed ::Job $job returned $result" - fi - done + rc=0 + results=$(jq -r 'to_entries[] | [.key,.value.result] | join(" ")' <<< $NEEDS_CONTEXT) + while read job result ; do + if [ "$result" != "success" ]; then + echo "::set-failed ::Job $job returned $result" + rc=1 + fi + done <<< $results + exit $rc diff --git a/changelog.d/10512.misc b/changelog.d/10512.misc new file mode 100644 index 000000000000..c012e89f4bbb --- /dev/null +++ b/changelog.d/10512.misc @@ -0,0 +1 @@ +Update the `tests-done` Github Actions status. From 2bae2c632ff595bda770212678521e04288f00a9 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 3 Aug 2021 05:08:57 -0500 Subject: [PATCH 44/61] Add developer documentation to explain room DAG concepts like `outliers` and `state_groups` (#10464) --- changelog.d/10464.doc | 1 + docs/SUMMARY.md | 1 + docs/development/room-dag-concepts.md | 79 +++++++++++++++++++++++++++ 3 files changed, 81 insertions(+) create mode 100644 changelog.d/10464.doc create mode 100644 docs/development/room-dag-concepts.md diff --git a/changelog.d/10464.doc b/changelog.d/10464.doc new file mode 100644 index 000000000000..764fb9f65c23 --- /dev/null +++ b/changelog.d/10464.doc @@ -0,0 +1 @@ +Add some developer docs to explain room DAG concepts like `outliers`, `state_groups`, `depth`, etc. diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index f1bde91420fc..10be12d63865 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -79,6 +79,7 @@ - [Single Sign-On]() - [SAML](development/saml.md) - [CAS](development/cas.md) + - [Room DAG concepts](development/room-dag-concepts.md) - [State Resolution]() - [The Auth Chain Difference Algorithm](auth_chain_difference_algorithm.md) - [Media Repository](media_repository.md) diff --git a/docs/development/room-dag-concepts.md b/docs/development/room-dag-concepts.md new file mode 100644 index 000000000000..5eed72bec662 --- /dev/null +++ b/docs/development/room-dag-concepts.md @@ -0,0 +1,79 @@ +# Room DAG concepts + +## Edges + +The word "edge" comes from graph theory lingo. An edge is just a connection +between two events. In Synapse, we connect events by specifying their +`prev_events`. A subsequent event points back at a previous event. + +``` +A (oldest) <---- B <---- C (most recent) +``` + + +## Depth and stream ordering + +Events are normally sorted by `(topological_ordering, stream_ordering)` where +`topological_ordering` is just `depth`. In other words, we first sort by `depth` +and then tie-break based on `stream_ordering`. `depth` is incremented as new +messages are added to the DAG. Normally, `stream_ordering` is an auto +incrementing integer, but backfilled events start with `stream_ordering=-1` and decrement. + +--- + + - `/sync` returns things in the order they arrive at the server (`stream_ordering`). + - `/messages` (and `/backfill` in the federation API) return them in the order determined by the event graph `(topological_ordering, stream_ordering)`. + +The general idea is that, if you're following a room in real-time (i.e. +`/sync`), you probably want to see the messages as they arrive at your server, +rather than skipping any that arrived late; whereas if you're looking at a +historical section of timeline (i.e. `/messages`), you want to see the best +representation of the state of the room as others were seeing it at the time. + + +## Forward extremity + +Most-recent-in-time events in the DAG which are not referenced by any other events' `prev_events` yet. + +The forward extremities of a room are used as the `prev_events` when the next event is sent. + + +## Backwards extremity + +The current marker of where we have backfilled up to and will generally be the +oldest-in-time events we know of in the DAG. + +This is an event where we haven't fetched all of the `prev_events` for. + +Once we have fetched all of its `prev_events`, it's unmarked as a backwards +extremity (although we may have formed new backwards extremities from the prev +events during the backfilling process). + + +## Outliers + +We mark an event as an `outlier` when we haven't figured out the state for the +room at that point in the DAG yet. + +We won't *necessarily* have the `prev_events` of an `outlier` in the database, +but it's entirely possible that we *might*. The status of whether we have all of +the `prev_events` is marked as a [backwards extremity](#backwards-extremity). + +For example, when we fetch the event auth chain or state for a given event, we +mark all of those claimed auth events as outliers because we haven't done the +state calculation ourself. + + +## State groups + +For every non-outlier event we need to know the state at that event. Instead of +storing the full state for each event in the DB (i.e. a `event_id -> state` +mapping), which is *very* space inefficient when state doesn't change, we +instead assign each different set of state a "state group" and then have +mappings of `event_id -> state_group` and `state_group -> state`. + + +### Stage group edges + +TODO: `state_group_edges` is a further optimization... + notes from @Azrenbeth, https://pastebin.com/seUGVGeT From a7bacccd8550b45fc1fa3dcff90f36125827b4ba Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 3 Aug 2021 11:23:45 +0100 Subject: [PATCH 45/61] Extend the release script to tag and create the releases. (#10496) --- changelog.d/10496.misc | 1 + scripts-dev/release.py | 311 ++++++++++++++++++++++++++++++++++++----- setup.py | 2 + 3 files changed, 278 insertions(+), 36 deletions(-) create mode 100644 changelog.d/10496.misc diff --git a/changelog.d/10496.misc b/changelog.d/10496.misc new file mode 100644 index 000000000000..6d0d3e539126 --- /dev/null +++ b/changelog.d/10496.misc @@ -0,0 +1 @@ +Extend release script to also tag and create GitHub releases. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index cff433af2af2..e864dc6ed53f 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -14,29 +14,57 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""An interactive script for doing a release. See `run()` below. +"""An interactive script for doing a release. See `cli()` below. """ +import re import subprocess import sys -from typing import Optional +import urllib.request +from os import path +from tempfile import TemporaryDirectory +from typing import List, Optional, Tuple +import attr import click +import commonmark import git +import redbaron +from click.exceptions import ClickException +from github import Github from packaging import version -from redbaron import RedBaron -@click.command() -def run(): - """An interactive script to walk through the initial stages of creating a - release, including creating release branch, updating changelog and pushing to - GitHub. +@click.group() +def cli(): + """An interactive script to walk through the parts of creating a release. Requires the dev dependencies be installed, which can be done via: pip install -e .[dev] + Then to use: + + ./scripts-dev/release.py prepare + + # ... ask others to look at the changelog ... + + ./scripts-dev/release.py tag + + # ... wait for asssets to build ... + + ./scripts-dev/release.py publish + ./scripts-dev/release.py upload + + If the env var GH_TOKEN (or GITHUB_TOKEN) is set, or passed into the + `tag`/`publish` command, then a new draft release will be created/published. + """ + + +@cli.command() +def prepare(): + """Do the initial stages of creating a release, including creating release + branch, updating changelog and pushing to GitHub. """ # Make sure we're in a git repo. @@ -51,32 +79,8 @@ def run(): click.secho("Updating git repo...") repo.remote().fetch() - # Parse the AST and load the `__version__` node so that we can edit it - # later. - with open("synapse/__init__.py") as f: - red = RedBaron(f.read()) - - version_node = None - for node in red: - if node.type != "assignment": - continue - - if node.target.type != "name": - continue - - if node.target.value != "__version__": - continue - - version_node = node - break - - if not version_node: - print("Failed to find '__version__' definition in synapse/__init__.py") - sys.exit(1) - - # Parse the current version. - current_version = version.parse(version_node.value.value.strip('"')) - assert isinstance(current_version, version.Version) + # Get the current version and AST from root Synapse module. + current_version, parsed_synapse_ast, version_node = parse_version_from_module() # Figure out what sort of release we're doing and calcuate the new version. rc = click.confirm("RC", default=True) @@ -190,7 +194,7 @@ def run(): # Update the `__version__` variable and write it back to the file. version_node.value = '"' + new_version + '"' with open("synapse/__init__.py", "w") as f: - f.write(red.dumps()) + f.write(parsed_synapse_ast.dumps()) # Generate changelogs subprocess.run("python3 -m towncrier", shell=True) @@ -240,6 +244,180 @@ def run(): ) +@cli.command() +@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"]) +def tag(gh_token: Optional[str]): + """Tags the release and generates a draft GitHub release""" + + # Make sure we're in a git repo. + try: + repo = git.Repo() + except git.InvalidGitRepositoryError: + raise click.ClickException("Not in Synapse repo.") + + if repo.is_dirty(): + raise click.ClickException("Uncommitted changes exist.") + + click.secho("Updating git repo...") + repo.remote().fetch() + + # Find out the version and tag name. + current_version, _, _ = parse_version_from_module() + tag_name = f"v{current_version}" + + # Check we haven't released this version. + if tag_name in repo.tags: + raise click.ClickException(f"Tag {tag_name} already exists!\n") + + # Get the appropriate changelogs and tag. + changes = get_changes_for_version(current_version) + + click.echo_via_pager(changes) + if click.confirm("Edit text?", default=False): + changes = click.edit(changes, require_save=False) + + repo.create_tag(tag_name, message=changes) + + if not click.confirm("Push tag to GitHub?", default=True): + print("") + print("Run when ready to push:") + print("") + print(f"\tgit push {repo.remote().name} tag {current_version}") + print("") + return + + repo.git.push(repo.remote().name, "tag", tag_name) + + # If no token was given, we bail here + if not gh_token: + click.launch(f"https://github.com/matrix-org/synapse/releases/edit/{tag_name}") + return + + # Create a new draft release + gh = Github(gh_token) + gh_repo = gh.get_repo("matrix-org/synapse") + release = gh_repo.create_git_release( + tag=tag_name, + name=tag_name, + message=changes, + draft=True, + prerelease=current_version.is_prerelease, + ) + + # Open the release and the actions where we are building the assets. + click.launch(release.url) + click.launch( + f"https://github.com/matrix-org/synapse/actions?query=branch%3A{tag_name}" + ) + + click.echo("Wait for release assets to be built") + + +@cli.command() +@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True) +def publish(gh_token: str): + """Publish release.""" + + # Make sure we're in a git repo. + try: + repo = git.Repo() + except git.InvalidGitRepositoryError: + raise click.ClickException("Not in Synapse repo.") + + if repo.is_dirty(): + raise click.ClickException("Uncommitted changes exist.") + + current_version, _, _ = parse_version_from_module() + tag_name = f"v{current_version}" + + if not click.confirm(f"Publish {tag_name}?", default=True): + return + + # Publish the draft release + gh = Github(gh_token) + gh_repo = gh.get_repo("matrix-org/synapse") + for release in gh_repo.get_releases(): + if release.title == tag_name: + break + else: + raise ClickException(f"Failed to find GitHub release for {tag_name}") + + assert release.title == tag_name + + if not release.draft: + click.echo("Release already published.") + return + + release = release.update_release( + name=release.title, + message=release.body, + tag_name=release.tag_name, + prerelease=release.prerelease, + draft=False, + ) + + +@cli.command() +def upload(): + """Upload release to pypi.""" + + current_version, _, _ = parse_version_from_module() + tag_name = f"v{current_version}" + + pypi_asset_names = [ + f"matrix_synapse-{current_version}-py3-none-any.whl", + f"matrix-synapse-{current_version}.tar.gz", + ] + + with TemporaryDirectory(prefix=f"synapse_upload_{tag_name}_") as tmpdir: + for name in pypi_asset_names: + filename = path.join(tmpdir, name) + url = f"https://github.com/matrix-org/synapse/releases/download/{tag_name}/{name}" + + click.echo(f"Downloading {name} into {filename}") + urllib.request.urlretrieve(url, filename=filename) + + if click.confirm("Upload to PyPI?", default=True): + subprocess.run("twine upload *", shell=True, cwd=tmpdir) + + click.echo( + f"Done! Remember to merge the tag {tag_name} into the appropriate branches" + ) + + +def parse_version_from_module() -> Tuple[ + version.Version, redbaron.RedBaron, redbaron.Node +]: + # Parse the AST and load the `__version__` node so that we can edit it + # later. + with open("synapse/__init__.py") as f: + red = redbaron.RedBaron(f.read()) + + version_node = None + for node in red: + if node.type != "assignment": + continue + + if node.target.type != "name": + continue + + if node.target.value != "__version__": + continue + + version_node = node + break + + if not version_node: + print("Failed to find '__version__' definition in synapse/__init__.py") + sys.exit(1) + + # Parse the current version. + current_version = version.parse(version_node.value.value.strip('"')) + assert isinstance(current_version, version.Version) + + return current_version, red, version_node + + def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]: """Find the branch/ref, looking first locally then in the remote.""" if ref_name in repo.refs: @@ -256,5 +434,66 @@ def update_branch(repo: git.Repo): repo.git.merge(repo.active_branch.tracking_branch().name) +def get_changes_for_version(wanted_version: version.Version) -> str: + """Get the changelogs for the given version. + + If an RC then will only get the changelog for that RC version, otherwise if + its a full release will get the changelog for the release and all its RCs. + """ + + with open("CHANGES.md") as f: + changes = f.read() + + # First we parse the changelog so that we can split it into sections based + # on the release headings. + ast = commonmark.Parser().parse(changes) + + @attr.s(auto_attribs=True) + class VersionSection: + title: str + + # These are 0-based. + start_line: int + end_line: Optional[int] = None # Is none if its the last entry + + headings: List[VersionSection] = [] + for node, _ in ast.walker(): + # We look for all text nodes that are in a level 1 heading. + if node.t != "text": + continue + + if node.parent.t != "heading" or node.parent.level != 1: + continue + + # If we have a previous heading then we update its `end_line`. + if headings: + headings[-1].end_line = node.parent.sourcepos[0][0] - 1 + + headings.append(VersionSection(node.literal, node.parent.sourcepos[0][0] - 1)) + + changes_by_line = changes.split("\n") + + version_changelog = [] # The lines we want to include in the changelog + + # Go through each section and find any that match the requested version. + regex = re.compile(r"^Synapse v?(\S+)") + for section in headings: + groups = regex.match(section.title) + if not groups: + continue + + heading_version = version.parse(groups.group(1)) + heading_base_version = version.parse(heading_version.base_version) + + # Check if heading version matches the requested version, or if its an + # RC of the requested version. + if wanted_version not in (heading_version, heading_base_version): + continue + + version_changelog.extend(changes_by_line[section.start_line : section.end_line]) + + return "\n".join(version_changelog) + + if __name__ == "__main__": - run() + cli() diff --git a/setup.py b/setup.py index 1081548e0067..c47856351081 100755 --- a/setup.py +++ b/setup.py @@ -108,6 +108,8 @@ def exec_file(path_segments): "click==7.1.2", "redbaron==0.9.2", "GitPython==3.1.14", + "commonmark==0.9.1", + "pygithub==1.55", ] CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.812", "mypy-zope==0.2.13"] From f4ac934afe1d91bb0cc1bb6dafc77a94165aa740 Mon Sep 17 00:00:00 2001 From: reivilibre <38398653+reivilibre@users.noreply.github.com> Date: Tue, 3 Aug 2021 11:30:39 +0100 Subject: [PATCH 46/61] Revert use of PeriodicallyFlushingMemoryHandler by default (#10515) --- changelog.d/10515.feature | 1 + docs/sample_log_config.yaml | 5 +---- synapse/config/logger.py | 5 +---- 3 files changed, 3 insertions(+), 8 deletions(-) create mode 100644 changelog.d/10515.feature diff --git a/changelog.d/10515.feature b/changelog.d/10515.feature new file mode 100644 index 000000000000..db277d9ecde0 --- /dev/null +++ b/changelog.d/10515.feature @@ -0,0 +1 @@ +Add a buffered logging handler which periodically flushes itself. diff --git a/docs/sample_log_config.yaml b/docs/sample_log_config.yaml index b088c834057d..669e60008113 100644 --- a/docs/sample_log_config.yaml +++ b/docs/sample_log_config.yaml @@ -28,7 +28,7 @@ handlers: # will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR # logs will still be flushed immediately. buffer: - class: synapse.logging.handlers.PeriodicallyFlushingMemoryHandler + class: logging.handlers.MemoryHandler target: file # The capacity is the number of log lines that are buffered before # being written to disk. Increasing this will lead to better @@ -36,9 +36,6 @@ handlers: # be written to disk. capacity: 10 flushLevel: 30 # Flush for WARNING logs as well - # The period of time, in seconds, between forced flushes. - # Messages will not be delayed for longer than this time. - period: 5 # A handler that writes logs to stderr. Unused by default, but can be used # instead of "buffer" and "file" in the logger handlers. diff --git a/synapse/config/logger.py b/synapse/config/logger.py index dcd3ed1dac12..ad4e6e61c3bf 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -71,7 +71,7 @@ # will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR # logs will still be flushed immediately. buffer: - class: synapse.logging.handlers.PeriodicallyFlushingMemoryHandler + class: logging.handlers.MemoryHandler target: file # The capacity is the number of log lines that are buffered before # being written to disk. Increasing this will lead to better @@ -79,9 +79,6 @@ # be written to disk. capacity: 10 flushLevel: 30 # Flush for WARNING logs as well - # The period of time, in seconds, between forced flushes. - # Messages will not be delayed for longer than this time. - period: 5 # A handler that writes logs to stderr. Unused by default, but can be used # instead of "buffer" and "file" in the logger handlers. From c8566191fcd7edf224db468f860c1b638fb8e763 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 3 Aug 2021 11:32:10 +0100 Subject: [PATCH 47/61] 1.40.0rc1 --- CHANGES.md | 60 +++++++++++++++++++++++++++++++++++++++ changelog.d/10245.feature | 1 - changelog.d/10254.feature | 1 - changelog.d/10283.feature | 1 - changelog.d/10390.misc | 1 - changelog.d/10407.feature | 1 - changelog.d/10408.misc | 1 - changelog.d/10410.bugfix | 1 - changelog.d/10411.feature | 1 - changelog.d/10413.feature | 1 - changelog.d/10415.misc | 1 - changelog.d/10426.feature | 1 - changelog.d/10429.misc | 1 - changelog.d/10431.misc | 1 - changelog.d/10432.misc | 1 - changelog.d/10437.misc | 1 - changelog.d/10438.misc | 1 - changelog.d/10439.bugfix | 1 - changelog.d/10440.feature | 1 - changelog.d/10442.misc | 1 - changelog.d/10444.misc | 1 - changelog.d/10445.doc | 1 - changelog.d/10446.misc | 1 - changelog.d/10447.feature | 1 - changelog.d/10448.feature | 1 - changelog.d/10450.misc | 1 - changelog.d/10451.misc | 1 - changelog.d/10453.doc | 1 - changelog.d/10455.bugfix | 1 - changelog.d/10463.misc | 1 - changelog.d/10464.doc | 1 - changelog.d/10468.misc | 1 - changelog.d/10482.misc | 1 - changelog.d/10483.doc | 1 - changelog.d/10488.misc | 1 - changelog.d/10489.feature | 1 - changelog.d/10490.misc | 1 - changelog.d/10491.misc | 1 - changelog.d/10496.misc | 1 - changelog.d/10499.bugfix | 1 - changelog.d/10500.misc | 1 - changelog.d/10511.feature | 1 - changelog.d/10512.misc | 1 - changelog.d/10515.feature | 1 - changelog.d/9918.feature | 1 - debian/changelog | 8 ++++-- synapse/__init__.py | 2 +- 47 files changed, 67 insertions(+), 47 deletions(-) delete mode 100644 changelog.d/10245.feature delete mode 100644 changelog.d/10254.feature delete mode 100644 changelog.d/10283.feature delete mode 100644 changelog.d/10390.misc delete mode 100644 changelog.d/10407.feature delete mode 100644 changelog.d/10408.misc delete mode 100644 changelog.d/10410.bugfix delete mode 100644 changelog.d/10411.feature delete mode 100644 changelog.d/10413.feature delete mode 100644 changelog.d/10415.misc delete mode 100644 changelog.d/10426.feature delete mode 100644 changelog.d/10429.misc delete mode 100644 changelog.d/10431.misc delete mode 100644 changelog.d/10432.misc delete mode 100644 changelog.d/10437.misc delete mode 100644 changelog.d/10438.misc delete mode 100644 changelog.d/10439.bugfix delete mode 100644 changelog.d/10440.feature delete mode 100644 changelog.d/10442.misc delete mode 100644 changelog.d/10444.misc delete mode 100644 changelog.d/10445.doc delete mode 100644 changelog.d/10446.misc delete mode 100644 changelog.d/10447.feature delete mode 100644 changelog.d/10448.feature delete mode 100644 changelog.d/10450.misc delete mode 100644 changelog.d/10451.misc delete mode 100644 changelog.d/10453.doc delete mode 100644 changelog.d/10455.bugfix delete mode 100644 changelog.d/10463.misc delete mode 100644 changelog.d/10464.doc delete mode 100644 changelog.d/10468.misc delete mode 100644 changelog.d/10482.misc delete mode 100644 changelog.d/10483.doc delete mode 100644 changelog.d/10488.misc delete mode 100644 changelog.d/10489.feature delete mode 100644 changelog.d/10490.misc delete mode 100644 changelog.d/10491.misc delete mode 100644 changelog.d/10496.misc delete mode 100644 changelog.d/10499.bugfix delete mode 100644 changelog.d/10500.misc delete mode 100644 changelog.d/10511.feature delete mode 100644 changelog.d/10512.misc delete mode 100644 changelog.d/10515.feature delete mode 100644 changelog.d/9918.feature diff --git a/CHANGES.md b/CHANGES.md index 653324928145..34274cfe9ca8 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,63 @@ +Synapse 1.40.0rc1 (2021-08-03) +============================== + +Features +-------- + +- Add support for [MSC2033](https://github.com/matrix-org/matrix-doc/pull/2033): `device_id` on `/account/whoami`. ([\#9918](https://github.com/matrix-org/synapse/issues/9918)) +- Make historical events discoverable from backfill for servers without any scrollback history (part of MSC2716). ([\#10245](https://github.com/matrix-org/synapse/issues/10245)) +- Update support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083) to consider changes in the MSC around which servers can issue join events. ([\#10254](https://github.com/matrix-org/synapse/issues/10254), [\#10447](https://github.com/matrix-org/synapse/issues/10447), [\#10489](https://github.com/matrix-org/synapse/issues/10489)) +- Initial support for MSC3244, Room version capabilities over the /capabilities API. ([\#10283](https://github.com/matrix-org/synapse/issues/10283)) +- Add a buffered logging handler which periodically flushes itself. ([\#10407](https://github.com/matrix-org/synapse/issues/10407), [\#10515](https://github.com/matrix-org/synapse/issues/10515)) +- Add support for https connections to a proxy server. Contributed by @Bubu and @dklimpel. ([\#10411](https://github.com/matrix-org/synapse/issues/10411)) +- Support for [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-doc/pull/2285). Contributed by @SimonBrandner. ([\#10413](https://github.com/matrix-org/synapse/issues/10413)) +- Email notifications now state whether an invitation is to a room or a space. ([\#10426](https://github.com/matrix-org/synapse/issues/10426)) +- Allow setting transaction limit for database connections. ([\#10440](https://github.com/matrix-org/synapse/issues/10440), [\#10511](https://github.com/matrix-org/synapse/issues/10511)) +- Add `creation_ts` to list users admin API. ([\#10448](https://github.com/matrix-org/synapse/issues/10448)) + + +Bugfixes +-------- + +- Improve character set detection in URL previews by supporting underscores (in addition to hyphens). Contributed by @srividyut. ([\#10410](https://github.com/matrix-org/synapse/issues/10410)) +- Fix events with floating outlier state being rejected over federation. ([\#10439](https://github.com/matrix-org/synapse/issues/10439)) +- Fix `synapse_federation_server_oldest_inbound_pdu_in_staging` Prometheus metric to not report a max age of 51 years when the queue is empty. ([\#10455](https://github.com/matrix-org/synapse/issues/10455)) +- Fix a bug which caused an explicit assignment of power-level 0 to a user to be misinterpreted in rare circumstances. ([\#10499](https://github.com/matrix-org/synapse/issues/10499)) + + +Improved Documentation +---------------------- + +- Fix hierarchy of providers on the OpenID page. ([\#10445](https://github.com/matrix-org/synapse/issues/10445)) +- Consolidate development documentation to `docs/development/`. ([\#10453](https://github.com/matrix-org/synapse/issues/10453)) +- Add some developer docs to explain room DAG concepts like `outliers`, `state_groups`, `depth`, etc. ([\#10464](https://github.com/matrix-org/synapse/issues/10464)) +- Document how to use Complement while developing a new Synapse feature. ([\#10483](https://github.com/matrix-org/synapse/issues/10483)) + + +Internal Changes +---------------- + +- Prune inbound federation inbound queues for a room if they get too large. ([\#10390](https://github.com/matrix-org/synapse/issues/10390)) +- Add type hints to `synapse.federation.transport.client` module. ([\#10408](https://github.com/matrix-org/synapse/issues/10408)) +- Remove shebang line from module files. ([\#10415](https://github.com/matrix-org/synapse/issues/10415)) +- Drop backwards-compatibility code that was required to support Ubuntu Xenial. ([\#10429](https://github.com/matrix-org/synapse/issues/10429)) +- Use a docker image cache for the prerequisites for the debian package build. ([\#10431](https://github.com/matrix-org/synapse/issues/10431)) +- Connect historical chunks together with chunk events instead of a content field (MSC2716). ([\#10432](https://github.com/matrix-org/synapse/issues/10432)) +- Improve servlet type hints. ([\#10437](https://github.com/matrix-org/synapse/issues/10437), [\#10438](https://github.com/matrix-org/synapse/issues/10438)) +- Replace usage of `or_ignore` in `simple_insert` with `simple_upsert` usage, to stop spamming postgres logs with spurious ERROR messages. ([\#10442](https://github.com/matrix-org/synapse/issues/10442)) +- Update the `tests-done` Github Actions status. ([\#10444](https://github.com/matrix-org/synapse/issues/10444), [\#10512](https://github.com/matrix-org/synapse/issues/10512)) +- Update type annotations to work with forthcoming Twisted 21.7.0 release. ([\#10446](https://github.com/matrix-org/synapse/issues/10446), [\#10450](https://github.com/matrix-org/synapse/issues/10450)) +- Cancel redundant GHA workflows when a new commit is pushed. ([\#10451](https://github.com/matrix-org/synapse/issues/10451)) +- Disable `msc2716` Complement tests until Complement updates are merged. ([\#10463](https://github.com/matrix-org/synapse/issues/10463)) +- Mitigate media repo XSS attacks on IE11 via the non-standard X-Content-Security-Policy header. ([\#10468](https://github.com/matrix-org/synapse/issues/10468)) +- Additional type hints in the state handler. ([\#10482](https://github.com/matrix-org/synapse/issues/10482)) +- Update syntax used to run complement tests. ([\#10488](https://github.com/matrix-org/synapse/issues/10488)) +- Fix up type annotations to work with Twisted 21.7. ([\#10490](https://github.com/matrix-org/synapse/issues/10490)) +- Improve type annotations for `ObservableDeferred`. ([\#10491](https://github.com/matrix-org/synapse/issues/10491)) +- Extend release script to also tag and create GitHub releases. ([\#10496](https://github.com/matrix-org/synapse/issues/10496)) +- Fix a bug which caused production debian packages to be incorrectly marked as 'prerelease'. ([\#10500](https://github.com/matrix-org/synapse/issues/10500)) + + Synapse 1.39.0 (2021-07-29) =========================== diff --git a/changelog.d/10245.feature b/changelog.d/10245.feature deleted file mode 100644 index b3c48cc2cc25..000000000000 --- a/changelog.d/10245.feature +++ /dev/null @@ -1 +0,0 @@ -Make historical events discoverable from backfill for servers without any scrollback history (part of MSC2716). diff --git a/changelog.d/10254.feature b/changelog.d/10254.feature deleted file mode 100644 index df8bb51167f7..000000000000 --- a/changelog.d/10254.feature +++ /dev/null @@ -1 +0,0 @@ -Update support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083) to consider changes in the MSC around which servers can issue join events. diff --git a/changelog.d/10283.feature b/changelog.d/10283.feature deleted file mode 100644 index 99d633dbfb86..000000000000 --- a/changelog.d/10283.feature +++ /dev/null @@ -1 +0,0 @@ -Initial support for MSC3244, Room version capabilities over the /capabilities API. \ No newline at end of file diff --git a/changelog.d/10390.misc b/changelog.d/10390.misc deleted file mode 100644 index 911a5733eee8..000000000000 --- a/changelog.d/10390.misc +++ /dev/null @@ -1 +0,0 @@ -Prune inbound federation inbound queues for a room if they get too large. diff --git a/changelog.d/10407.feature b/changelog.d/10407.feature deleted file mode 100644 index db277d9ecde0..000000000000 --- a/changelog.d/10407.feature +++ /dev/null @@ -1 +0,0 @@ -Add a buffered logging handler which periodically flushes itself. diff --git a/changelog.d/10408.misc b/changelog.d/10408.misc deleted file mode 100644 index abccd210a924..000000000000 --- a/changelog.d/10408.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to `synapse.federation.transport.client` module. diff --git a/changelog.d/10410.bugfix b/changelog.d/10410.bugfix deleted file mode 100644 index 65b418fd35ee..000000000000 --- a/changelog.d/10410.bugfix +++ /dev/null @@ -1 +0,0 @@ -Improve character set detection in URL previews by supporting underscores (in addition to hyphens). Contributed by @srividyut. diff --git a/changelog.d/10411.feature b/changelog.d/10411.feature deleted file mode 100644 index ef0ab84b17b1..000000000000 --- a/changelog.d/10411.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for https connections to a proxy server. Contributed by @Bubu and @dklimpel. \ No newline at end of file diff --git a/changelog.d/10413.feature b/changelog.d/10413.feature deleted file mode 100644 index 3964db7e0eae..000000000000 --- a/changelog.d/10413.feature +++ /dev/null @@ -1 +0,0 @@ -Support for [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-doc/pull/2285). Contributed by @SimonBrandner. diff --git a/changelog.d/10415.misc b/changelog.d/10415.misc deleted file mode 100644 index 3b9501acbbcb..000000000000 --- a/changelog.d/10415.misc +++ /dev/null @@ -1 +0,0 @@ -Remove shebang line from module files. diff --git a/changelog.d/10426.feature b/changelog.d/10426.feature deleted file mode 100644 index 9cca6dc456df..000000000000 --- a/changelog.d/10426.feature +++ /dev/null @@ -1 +0,0 @@ -Email notifications now state whether an invitation is to a room or a space. diff --git a/changelog.d/10429.misc b/changelog.d/10429.misc deleted file mode 100644 index ccb2217f64fd..000000000000 --- a/changelog.d/10429.misc +++ /dev/null @@ -1 +0,0 @@ -Drop backwards-compatibility code that was required to support Ubuntu Xenial. diff --git a/changelog.d/10431.misc b/changelog.d/10431.misc deleted file mode 100644 index 34b9b49da680..000000000000 --- a/changelog.d/10431.misc +++ /dev/null @@ -1 +0,0 @@ -Use a docker image cache for the prerequisites for the debian package build. diff --git a/changelog.d/10432.misc b/changelog.d/10432.misc deleted file mode 100644 index 3a8cdf0ae0e9..000000000000 --- a/changelog.d/10432.misc +++ /dev/null @@ -1 +0,0 @@ -Connect historical chunks together with chunk events instead of a content field (MSC2716). diff --git a/changelog.d/10437.misc b/changelog.d/10437.misc deleted file mode 100644 index a55757849903..000000000000 --- a/changelog.d/10437.misc +++ /dev/null @@ -1 +0,0 @@ -Improve servlet type hints. diff --git a/changelog.d/10438.misc b/changelog.d/10438.misc deleted file mode 100644 index a55757849903..000000000000 --- a/changelog.d/10438.misc +++ /dev/null @@ -1 +0,0 @@ -Improve servlet type hints. diff --git a/changelog.d/10439.bugfix b/changelog.d/10439.bugfix deleted file mode 100644 index 74e5a25126d9..000000000000 --- a/changelog.d/10439.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix events with floating outlier state being rejected over federation. diff --git a/changelog.d/10440.feature b/changelog.d/10440.feature deleted file mode 100644 index f1833b0bd726..000000000000 --- a/changelog.d/10440.feature +++ /dev/null @@ -1 +0,0 @@ -Allow setting transaction limit for database connections. diff --git a/changelog.d/10442.misc b/changelog.d/10442.misc deleted file mode 100644 index b8d412d73289..000000000000 --- a/changelog.d/10442.misc +++ /dev/null @@ -1 +0,0 @@ -Replace usage of `or_ignore` in `simple_insert` with `simple_upsert` usage, to stop spamming postgres logs with spurious ERROR messages. diff --git a/changelog.d/10444.misc b/changelog.d/10444.misc deleted file mode 100644 index c012e89f4bbb..000000000000 --- a/changelog.d/10444.misc +++ /dev/null @@ -1 +0,0 @@ -Update the `tests-done` Github Actions status. diff --git a/changelog.d/10445.doc b/changelog.d/10445.doc deleted file mode 100644 index 4c023ded7ca6..000000000000 --- a/changelog.d/10445.doc +++ /dev/null @@ -1 +0,0 @@ -Fix hierarchy of providers on the OpenID page. diff --git a/changelog.d/10446.misc b/changelog.d/10446.misc deleted file mode 100644 index a5a0ca80eb4d..000000000000 --- a/changelog.d/10446.misc +++ /dev/null @@ -1 +0,0 @@ -Update type annotations to work with forthcoming Twisted 21.7.0 release. diff --git a/changelog.d/10447.feature b/changelog.d/10447.feature deleted file mode 100644 index df8bb51167f7..000000000000 --- a/changelog.d/10447.feature +++ /dev/null @@ -1 +0,0 @@ -Update support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083) to consider changes in the MSC around which servers can issue join events. diff --git a/changelog.d/10448.feature b/changelog.d/10448.feature deleted file mode 100644 index f6579e0ca85b..000000000000 --- a/changelog.d/10448.feature +++ /dev/null @@ -1 +0,0 @@ -Add `creation_ts` to list users admin API. \ No newline at end of file diff --git a/changelog.d/10450.misc b/changelog.d/10450.misc deleted file mode 100644 index aa646f0841c7..000000000000 --- a/changelog.d/10450.misc +++ /dev/null @@ -1 +0,0 @@ - Update type annotations to work with forthcoming Twisted 21.7.0 release. diff --git a/changelog.d/10451.misc b/changelog.d/10451.misc deleted file mode 100644 index e38f4b476dde..000000000000 --- a/changelog.d/10451.misc +++ /dev/null @@ -1 +0,0 @@ -Cancel redundant GHA workflows when a new commit is pushed. diff --git a/changelog.d/10453.doc b/changelog.d/10453.doc deleted file mode 100644 index 5d4db9bca29c..000000000000 --- a/changelog.d/10453.doc +++ /dev/null @@ -1 +0,0 @@ -Consolidate development documentation to `docs/development/`. diff --git a/changelog.d/10455.bugfix b/changelog.d/10455.bugfix deleted file mode 100644 index 23c74a3c8912..000000000000 --- a/changelog.d/10455.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix `synapse_federation_server_oldest_inbound_pdu_in_staging` Prometheus metric to not report a max age of 51 years when the queue is empty. diff --git a/changelog.d/10463.misc b/changelog.d/10463.misc deleted file mode 100644 index d7b4d2222e10..000000000000 --- a/changelog.d/10463.misc +++ /dev/null @@ -1 +0,0 @@ -Disable `msc2716` Complement tests until Complement updates are merged. diff --git a/changelog.d/10464.doc b/changelog.d/10464.doc deleted file mode 100644 index 764fb9f65c23..000000000000 --- a/changelog.d/10464.doc +++ /dev/null @@ -1 +0,0 @@ -Add some developer docs to explain room DAG concepts like `outliers`, `state_groups`, `depth`, etc. diff --git a/changelog.d/10468.misc b/changelog.d/10468.misc deleted file mode 100644 index b9854bb4c16c..000000000000 --- a/changelog.d/10468.misc +++ /dev/null @@ -1 +0,0 @@ -Mitigate media repo XSS attacks on IE11 via the non-standard X-Content-Security-Policy header. diff --git a/changelog.d/10482.misc b/changelog.d/10482.misc deleted file mode 100644 index 4e9e2126e173..000000000000 --- a/changelog.d/10482.misc +++ /dev/null @@ -1 +0,0 @@ -Additional type hints in the state handler. diff --git a/changelog.d/10483.doc b/changelog.d/10483.doc deleted file mode 100644 index 0f699fafddba..000000000000 --- a/changelog.d/10483.doc +++ /dev/null @@ -1 +0,0 @@ -Document how to use Complement while developing a new Synapse feature. diff --git a/changelog.d/10488.misc b/changelog.d/10488.misc deleted file mode 100644 index a55502c16359..000000000000 --- a/changelog.d/10488.misc +++ /dev/null @@ -1 +0,0 @@ -Update syntax used to run complement tests. diff --git a/changelog.d/10489.feature b/changelog.d/10489.feature deleted file mode 100644 index df8bb51167f7..000000000000 --- a/changelog.d/10489.feature +++ /dev/null @@ -1 +0,0 @@ -Update support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083) to consider changes in the MSC around which servers can issue join events. diff --git a/changelog.d/10490.misc b/changelog.d/10490.misc deleted file mode 100644 index 630c31adaeb9..000000000000 --- a/changelog.d/10490.misc +++ /dev/null @@ -1 +0,0 @@ -Fix up type annotations to work with Twisted 21.7. diff --git a/changelog.d/10491.misc b/changelog.d/10491.misc deleted file mode 100644 index 3867cf2682c1..000000000000 --- a/changelog.d/10491.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type annotations for `ObservableDeferred`. diff --git a/changelog.d/10496.misc b/changelog.d/10496.misc deleted file mode 100644 index 6d0d3e539126..000000000000 --- a/changelog.d/10496.misc +++ /dev/null @@ -1 +0,0 @@ -Extend release script to also tag and create GitHub releases. diff --git a/changelog.d/10499.bugfix b/changelog.d/10499.bugfix deleted file mode 100644 index 6487af6c9691..000000000000 --- a/changelog.d/10499.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug which caused an explicit assignment of power-level 0 to a user to be misinterpreted in rare circumstances. diff --git a/changelog.d/10500.misc b/changelog.d/10500.misc deleted file mode 100644 index dbaff57364d2..000000000000 --- a/changelog.d/10500.misc +++ /dev/null @@ -1 +0,0 @@ -Fix a bug which caused production debian packages to be incorrectly marked as 'prerelease'. diff --git a/changelog.d/10511.feature b/changelog.d/10511.feature deleted file mode 100644 index f1833b0bd726..000000000000 --- a/changelog.d/10511.feature +++ /dev/null @@ -1 +0,0 @@ -Allow setting transaction limit for database connections. diff --git a/changelog.d/10512.misc b/changelog.d/10512.misc deleted file mode 100644 index c012e89f4bbb..000000000000 --- a/changelog.d/10512.misc +++ /dev/null @@ -1 +0,0 @@ -Update the `tests-done` Github Actions status. diff --git a/changelog.d/10515.feature b/changelog.d/10515.feature deleted file mode 100644 index db277d9ecde0..000000000000 --- a/changelog.d/10515.feature +++ /dev/null @@ -1 +0,0 @@ -Add a buffered logging handler which periodically flushes itself. diff --git a/changelog.d/9918.feature b/changelog.d/9918.feature deleted file mode 100644 index 98f0a5089321..000000000000 --- a/changelog.d/9918.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for [MSC2033](https://github.com/matrix-org/matrix-doc/pull/2033): `device_id` on `/account/whoami`. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 341c1ac99268..f0557c35ef87 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,8 +1,12 @@ -matrix-synapse-py3 (1.39.0ubuntu1) UNRELEASED; urgency=medium +matrix-synapse-py3 (1.40.0~rc1) stable; urgency=medium + [ Richard van der Hoff ] * Drop backwards-compatibility code that was required to support Ubuntu Xenial. - -- Richard van der Hoff Tue, 20 Jul 2021 00:10:03 +0100 + [ Synapse Packaging team ] + * New synapse release 1.40.0~rc1. + + -- Synapse Packaging team Tue, 03 Aug 2021 11:31:49 +0100 matrix-synapse-py3 (1.39.0) stable; urgency=medium diff --git a/synapse/__init__.py b/synapse/__init__.py index 5da6c924fcec..d6c176550897 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.39.0" +__version__ = "1.40.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From c80ec5d15386a8d3db03d0064b4e87d52d38dff2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 3 Aug 2021 11:48:48 +0100 Subject: [PATCH 48/61] Fixup changelog --- CHANGES.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 34274cfe9ca8..8b78fe92f6a0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,22 +5,22 @@ Features -------- - Add support for [MSC2033](https://github.com/matrix-org/matrix-doc/pull/2033): `device_id` on `/account/whoami`. ([\#9918](https://github.com/matrix-org/synapse/issues/9918)) -- Make historical events discoverable from backfill for servers without any scrollback history (part of MSC2716). ([\#10245](https://github.com/matrix-org/synapse/issues/10245)) +- Make historical events discoverable from backfill for servers without any scrollback history (part of [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716)). ([\#10245](https://github.com/matrix-org/synapse/issues/10245)) - Update support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083) to consider changes in the MSC around which servers can issue join events. ([\#10254](https://github.com/matrix-org/synapse/issues/10254), [\#10447](https://github.com/matrix-org/synapse/issues/10447), [\#10489](https://github.com/matrix-org/synapse/issues/10489)) -- Initial support for MSC3244, Room version capabilities over the /capabilities API. ([\#10283](https://github.com/matrix-org/synapse/issues/10283)) +- Initial support for [MSC3244](https://github.com/matrix-org/matrix-doc/pull/3244), Room version capabilities over the /capabilities API. ([\#10283](https://github.com/matrix-org/synapse/issues/10283)) - Add a buffered logging handler which periodically flushes itself. ([\#10407](https://github.com/matrix-org/synapse/issues/10407), [\#10515](https://github.com/matrix-org/synapse/issues/10515)) - Add support for https connections to a proxy server. Contributed by @Bubu and @dklimpel. ([\#10411](https://github.com/matrix-org/synapse/issues/10411)) - Support for [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-doc/pull/2285). Contributed by @SimonBrandner. ([\#10413](https://github.com/matrix-org/synapse/issues/10413)) - Email notifications now state whether an invitation is to a room or a space. ([\#10426](https://github.com/matrix-org/synapse/issues/10426)) - Allow setting transaction limit for database connections. ([\#10440](https://github.com/matrix-org/synapse/issues/10440), [\#10511](https://github.com/matrix-org/synapse/issues/10511)) -- Add `creation_ts` to list users admin API. ([\#10448](https://github.com/matrix-org/synapse/issues/10448)) +- Add `creation_ts` to "list users" admin API. ([\#10448](https://github.com/matrix-org/synapse/issues/10448)) Bugfixes -------- - Improve character set detection in URL previews by supporting underscores (in addition to hyphens). Contributed by @srividyut. ([\#10410](https://github.com/matrix-org/synapse/issues/10410)) -- Fix events with floating outlier state being rejected over federation. ([\#10439](https://github.com/matrix-org/synapse/issues/10439)) +- Fix events being incorrectly rejected over federation if they reference auth events that the server needed to fetch. ([\#10439](https://github.com/matrix-org/synapse/issues/10439)) - Fix `synapse_federation_server_oldest_inbound_pdu_in_staging` Prometheus metric to not report a max age of 51 years when the queue is empty. ([\#10455](https://github.com/matrix-org/synapse/issues/10455)) - Fix a bug which caused an explicit assignment of power-level 0 to a user to be misinterpreted in rare circumstances. ([\#10499](https://github.com/matrix-org/synapse/issues/10499)) @@ -37,12 +37,12 @@ Improved Documentation Internal Changes ---------------- -- Prune inbound federation inbound queues for a room if they get too large. ([\#10390](https://github.com/matrix-org/synapse/issues/10390)) +- Prune inbound federation queues for a room if they get too large. ([\#10390](https://github.com/matrix-org/synapse/issues/10390)) - Add type hints to `synapse.federation.transport.client` module. ([\#10408](https://github.com/matrix-org/synapse/issues/10408)) - Remove shebang line from module files. ([\#10415](https://github.com/matrix-org/synapse/issues/10415)) - Drop backwards-compatibility code that was required to support Ubuntu Xenial. ([\#10429](https://github.com/matrix-org/synapse/issues/10429)) - Use a docker image cache for the prerequisites for the debian package build. ([\#10431](https://github.com/matrix-org/synapse/issues/10431)) -- Connect historical chunks together with chunk events instead of a content field (MSC2716). ([\#10432](https://github.com/matrix-org/synapse/issues/10432)) +- Connect historical chunks together with chunk events instead of a content field ([MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716)). ([\#10432](https://github.com/matrix-org/synapse/issues/10432)) - Improve servlet type hints. ([\#10437](https://github.com/matrix-org/synapse/issues/10437), [\#10438](https://github.com/matrix-org/synapse/issues/10438)) - Replace usage of `or_ignore` in `simple_insert` with `simple_upsert` usage, to stop spamming postgres logs with spurious ERROR messages. ([\#10442](https://github.com/matrix-org/synapse/issues/10442)) - Update the `tests-done` Github Actions status. ([\#10444](https://github.com/matrix-org/synapse/issues/10444), [\#10512](https://github.com/matrix-org/synapse/issues/10512)) From da6cd82106636d4c8b5143d7c2839f11fb40fbd2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 3 Aug 2021 12:11:26 +0100 Subject: [PATCH 49/61] Fixup changelog --- CHANGES.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 8b78fe92f6a0..f2d694588618 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ Features -------- - Add support for [MSC2033](https://github.com/matrix-org/matrix-doc/pull/2033): `device_id` on `/account/whoami`. ([\#9918](https://github.com/matrix-org/synapse/issues/9918)) -- Make historical events discoverable from backfill for servers without any scrollback history (part of [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716)). ([\#10245](https://github.com/matrix-org/synapse/issues/10245)) +- Add support for [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716). ([\#10245](https://github.com/matrix-org/synapse/issues/10245), [\#10432](https://github.com/matrix-org/synapse/issues/10432), [\#10463](https://github.com/matrix-org/synapse/issues/10463)) - Update support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083) to consider changes in the MSC around which servers can issue join events. ([\#10254](https://github.com/matrix-org/synapse/issues/10254), [\#10447](https://github.com/matrix-org/synapse/issues/10447), [\#10489](https://github.com/matrix-org/synapse/issues/10489)) - Initial support for [MSC3244](https://github.com/matrix-org/matrix-doc/pull/3244), Room version capabilities over the /capabilities API. ([\#10283](https://github.com/matrix-org/synapse/issues/10283)) - Add a buffered logging handler which periodically flushes itself. ([\#10407](https://github.com/matrix-org/synapse/issues/10407), [\#10515](https://github.com/matrix-org/synapse/issues/10515)) @@ -42,13 +42,11 @@ Internal Changes - Remove shebang line from module files. ([\#10415](https://github.com/matrix-org/synapse/issues/10415)) - Drop backwards-compatibility code that was required to support Ubuntu Xenial. ([\#10429](https://github.com/matrix-org/synapse/issues/10429)) - Use a docker image cache for the prerequisites for the debian package build. ([\#10431](https://github.com/matrix-org/synapse/issues/10431)) -- Connect historical chunks together with chunk events instead of a content field ([MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716)). ([\#10432](https://github.com/matrix-org/synapse/issues/10432)) - Improve servlet type hints. ([\#10437](https://github.com/matrix-org/synapse/issues/10437), [\#10438](https://github.com/matrix-org/synapse/issues/10438)) - Replace usage of `or_ignore` in `simple_insert` with `simple_upsert` usage, to stop spamming postgres logs with spurious ERROR messages. ([\#10442](https://github.com/matrix-org/synapse/issues/10442)) - Update the `tests-done` Github Actions status. ([\#10444](https://github.com/matrix-org/synapse/issues/10444), [\#10512](https://github.com/matrix-org/synapse/issues/10512)) - Update type annotations to work with forthcoming Twisted 21.7.0 release. ([\#10446](https://github.com/matrix-org/synapse/issues/10446), [\#10450](https://github.com/matrix-org/synapse/issues/10450)) - Cancel redundant GHA workflows when a new commit is pushed. ([\#10451](https://github.com/matrix-org/synapse/issues/10451)) -- Disable `msc2716` Complement tests until Complement updates are merged. ([\#10463](https://github.com/matrix-org/synapse/issues/10463)) - Mitigate media repo XSS attacks on IE11 via the non-standard X-Content-Security-Policy header. ([\#10468](https://github.com/matrix-org/synapse/issues/10468)) - Additional type hints in the state handler. ([\#10482](https://github.com/matrix-org/synapse/issues/10482)) - Update syntax used to run complement tests. ([\#10488](https://github.com/matrix-org/synapse/issues/10488)) From 42225aa421efa9dd87fc63286f24f4697e4d2572 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 3 Aug 2021 12:12:50 +0100 Subject: [PATCH 50/61] Fixup changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index f2d694588618..7ce28c4c182a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ Features -------- - Add support for [MSC2033](https://github.com/matrix-org/matrix-doc/pull/2033): `device_id` on `/account/whoami`. ([\#9918](https://github.com/matrix-org/synapse/issues/9918)) -- Add support for [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716). ([\#10245](https://github.com/matrix-org/synapse/issues/10245), [\#10432](https://github.com/matrix-org/synapse/issues/10432), [\#10463](https://github.com/matrix-org/synapse/issues/10463)) +- Update support for [MSC2716 - Incrementally importing history into existing rooms](https://github.com/matrix-org/matrix-doc/pull/2716). ([\#10245](https://github.com/matrix-org/synapse/issues/10245), [\#10432](https://github.com/matrix-org/synapse/issues/10432), [\#10463](https://github.com/matrix-org/synapse/issues/10463)) - Update support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083) to consider changes in the MSC around which servers can issue join events. ([\#10254](https://github.com/matrix-org/synapse/issues/10254), [\#10447](https://github.com/matrix-org/synapse/issues/10447), [\#10489](https://github.com/matrix-org/synapse/issues/10489)) - Initial support for [MSC3244](https://github.com/matrix-org/matrix-doc/pull/3244), Room version capabilities over the /capabilities API. ([\#10283](https://github.com/matrix-org/synapse/issues/10283)) - Add a buffered logging handler which periodically flushes itself. ([\#10407](https://github.com/matrix-org/synapse/issues/10407), [\#10515](https://github.com/matrix-org/synapse/issues/10515)) From 6878e1065308caf0f79e380b4de1433ab1487a34 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 3 Aug 2021 13:29:17 +0100 Subject: [PATCH 51/61] Fix release script URL (#10516) --- changelog.d/10516.misc | 1 + scripts-dev/release.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10516.misc diff --git a/changelog.d/10516.misc b/changelog.d/10516.misc new file mode 100644 index 000000000000..4d8c5e4805d6 --- /dev/null +++ b/changelog.d/10516.misc @@ -0,0 +1 @@ +Fix release script to open correct URL for the release. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index e864dc6ed53f..a339260c4371 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -305,7 +305,7 @@ def tag(gh_token: Optional[str]): ) # Open the release and the actions where we are building the assets. - click.launch(release.url) + click.launch(release.html_url) click.launch( f"https://github.com/matrix-org/synapse/actions?query=branch%3A{tag_name}" ) From 903db99ed552d06f0a9e0379e55e655c5761355b Mon Sep 17 00:00:00 2001 From: reivilibre <38398653+reivilibre@users.noreply.github.com> Date: Tue, 3 Aug 2021 14:28:30 +0100 Subject: [PATCH 52/61] Fix PeriodicallyFlushingMemoryHandler inhibiting application shutdown (#10517) --- changelog.d/10517.bugfix | 1 + synapse/logging/handlers.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/10517.bugfix diff --git a/changelog.d/10517.bugfix b/changelog.d/10517.bugfix new file mode 100644 index 000000000000..5b044bb34dd8 --- /dev/null +++ b/changelog.d/10517.bugfix @@ -0,0 +1 @@ +Fix the `PeriodicallyFlushingMemoryHandler` inhibiting application shutdown because of its background thread. diff --git a/synapse/logging/handlers.py b/synapse/logging/handlers.py index a6c212f300fd..af5fc407a8e5 100644 --- a/synapse/logging/handlers.py +++ b/synapse/logging/handlers.py @@ -45,6 +45,7 @@ def __init__( self._flushing_thread: Thread = Thread( name="PeriodicallyFlushingMemoryHandler flushing thread", target=self._flush_periodically, + daemon=True, ) self._flushing_thread.start() From e8a3e8140291be0548ad80d0e942a9aaae6c2434 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 4 Aug 2021 16:13:24 +0200 Subject: [PATCH 53/61] Don't fail on empty bodies when sending out read receipts (#10531) Fixes a bug introduced in rc1 that would cause Synapse to 400 on read receipts requests with empty bodies. Broken in #10413 --- changelog.d/10531.bugfix | 1 + synapse/rest/client/v2_alpha/receipts.py | 2 +- tests/rest/client/v2_alpha/test_sync.py | 12 ++++++++++++ 3 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10531.bugfix diff --git a/changelog.d/10531.bugfix b/changelog.d/10531.bugfix new file mode 100644 index 000000000000..aaa921ee91d1 --- /dev/null +++ b/changelog.d/10531.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse v1.40.0rc1 that would cause Synapse to respond with an error when clients would update their read receipts. diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py index 4b98979b477d..d9ab836cd892 100644 --- a/synapse/rest/client/v2_alpha/receipts.py +++ b/synapse/rest/client/v2_alpha/receipts.py @@ -43,7 +43,7 @@ async def on_POST(self, request, room_id, receipt_type, event_id): if receipt_type != "m.read": raise SynapseError(400, "Receipt type must be 'm.read'") - body = parse_json_object_from_request(request) + body = parse_json_object_from_request(request, allow_empty_body=True) hidden = body.get(ReadReceiptEventFields.MSC2285_HIDDEN, False) if not isinstance(hidden, bool): diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py index f6ae9ae18110..15748ed4fd9f 100644 --- a/tests/rest/client/v2_alpha/test_sync.py +++ b/tests/rest/client/v2_alpha/test_sync.py @@ -418,6 +418,18 @@ def test_hidden_read_receipts(self): # Test that the first user can't see the other user's hidden read receipt self.assertEqual(self._get_read_receipt(), None) + def test_read_receipt_with_empty_body(self): + # Send a message as the first user + res = self.helper.send(self.room_id, body="hello", tok=self.tok) + + # Send a read receipt for this message with an empty body + channel = self.make_request( + "POST", + "/rooms/%s/receipt/m.read/%s" % (self.room_id, res["event_id"]), + access_token=self.tok2, + ) + self.assertEqual(channel.code, 200) + def _get_read_receipt(self): """Syncs and returns the read receipt.""" From 02c2f631aed5cc2ef4bcaea25b443b625616f816 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 4 Aug 2021 17:09:27 +0100 Subject: [PATCH 54/61] 1.40.0rc2 --- CHANGES.md | 16 ++++++++++++++++ changelog.d/10516.misc | 1 - changelog.d/10517.bugfix | 1 - changelog.d/10531.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 6 files changed, 23 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/10516.misc delete mode 100644 changelog.d/10517.bugfix delete mode 100644 changelog.d/10531.bugfix diff --git a/CHANGES.md b/CHANGES.md index 7ce28c4c182a..75031986d312 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,19 @@ +Synapse 1.40.0rc2 (2021-08-04) +============================== + +Bugfixes +-------- + +- Fix the `PeriodicallyFlushingMemoryHandler` inhibiting application shutdown because of its background thread. ([\#10517](https://github.com/matrix-org/synapse/issues/10517)) +- Fix a bug introduced in Synapse v1.40.0rc1 that would cause Synapse to respond with an error when clients would update their read receipts. ([\#10531](https://github.com/matrix-org/synapse/issues/10531)) + + +Internal Changes +---------------- + +- Fix release script to open correct URL for the release. ([\#10516](https://github.com/matrix-org/synapse/issues/10516)) + + Synapse 1.40.0rc1 (2021-08-03) ============================== diff --git a/changelog.d/10516.misc b/changelog.d/10516.misc deleted file mode 100644 index 4d8c5e4805d6..000000000000 --- a/changelog.d/10516.misc +++ /dev/null @@ -1 +0,0 @@ -Fix release script to open correct URL for the release. diff --git a/changelog.d/10517.bugfix b/changelog.d/10517.bugfix deleted file mode 100644 index 5b044bb34dd8..000000000000 --- a/changelog.d/10517.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix the `PeriodicallyFlushingMemoryHandler` inhibiting application shutdown because of its background thread. diff --git a/changelog.d/10531.bugfix b/changelog.d/10531.bugfix deleted file mode 100644 index aaa921ee91d1..000000000000 --- a/changelog.d/10531.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse v1.40.0rc1 that would cause Synapse to respond with an error when clients would update their read receipts. diff --git a/debian/changelog b/debian/changelog index f0557c35ef87..c523101f9a39 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.40.0~rc2) stable; urgency=medium + + * New synapse release 1.40.0~rc2. + + -- Synapse Packaging team Wed, 04 Aug 2021 17:08:55 +0100 + matrix-synapse-py3 (1.40.0~rc1) stable; urgency=medium [ Richard van der Hoff ] diff --git a/synapse/__init__.py b/synapse/__init__.py index d6c176550897..da5246353162 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.40.0rc1" +__version__ = "1.40.0rc2" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 167335bd3da0fcfa0b2ba5ca3dc9d2f7c953c1eb Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 4 Aug 2021 17:11:23 +0100 Subject: [PATCH 55/61] Fixup changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 75031986d312..052ab49599f2 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ Bugfixes -------- - Fix the `PeriodicallyFlushingMemoryHandler` inhibiting application shutdown because of its background thread. ([\#10517](https://github.com/matrix-org/synapse/issues/10517)) -- Fix a bug introduced in Synapse v1.40.0rc1 that would cause Synapse to respond with an error when clients would update their read receipts. ([\#10531](https://github.com/matrix-org/synapse/issues/10531)) +- Fix a bug introduced in Synapse v1.40.0rc1 that could cause Synapse to respond with an error when clients would update their a receipts. ([\#10531](https://github.com/matrix-org/synapse/issues/10531)) Internal Changes From cc1cb0ab54654b1a1d938ae464a3471dd1b588a5 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 4 Aug 2021 17:14:55 +0100 Subject: [PATCH 56/61] Fixup changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 052ab49599f2..7d3bdebbde4f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ Bugfixes -------- - Fix the `PeriodicallyFlushingMemoryHandler` inhibiting application shutdown because of its background thread. ([\#10517](https://github.com/matrix-org/synapse/issues/10517)) -- Fix a bug introduced in Synapse v1.40.0rc1 that could cause Synapse to respond with an error when clients would update their a receipts. ([\#10531](https://github.com/matrix-org/synapse/issues/10531)) +- Fix a bug introduced in Synapse v1.40.0rc1 that could cause Synapse to respond with an error when clients would update read receipts. ([\#10531](https://github.com/matrix-org/synapse/issues/10531)) Internal Changes From 05111f8f26252cc936fce685846322289039128d Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 4 Aug 2021 17:16:08 +0100 Subject: [PATCH 57/61] Fixup changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 7d3bdebbde4f..62ea684e586a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -11,7 +11,7 @@ Bugfixes Internal Changes ---------------- -- Fix release script to open correct URL for the release. ([\#10516](https://github.com/matrix-org/synapse/issues/10516)) +- Fix release script to open the correct URL for the release. ([\#10516](https://github.com/matrix-org/synapse/issues/10516)) Synapse 1.40.0rc1 (2021-08-03) From 457853100240fc5e015e10a62ecffdd799128b0c Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 5 Aug 2021 20:00:44 +0200 Subject: [PATCH 58/61] fix broken links in `upgrade.md` (#10543) Signed-off-by: Dirk Klimpel dirk@klimpel.org --- changelog.d/10543.doc | 1 + docs/upgrade.md | 51 +++++++++++++++++++------------------------ 2 files changed, 24 insertions(+), 28 deletions(-) create mode 100644 changelog.d/10543.doc diff --git a/changelog.d/10543.doc b/changelog.d/10543.doc new file mode 100644 index 000000000000..6c06722eb447 --- /dev/null +++ b/changelog.d/10543.doc @@ -0,0 +1 @@ +Fix broken links in `upgrade.md`. Contributed by @dklimpel. diff --git a/docs/upgrade.md b/docs/upgrade.md index c8f4a2c17172..ce9167e6de13 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -142,9 +142,9 @@ SQLite databases are unaffected by this change. The current spam checker interface is deprecated in favour of a new generic modules system. Authors of spam checker modules can refer to [this -documentation](https://matrix-org.github.io/synapse/develop/modules.html#porting-an-existing-module-that-uses-the-old-interface) +documentation](modules.md#porting-an-existing-module-that-uses-the-old-interface) to update their modules. Synapse administrators can refer to [this -documentation](https://matrix-org.github.io/synapse/develop/modules.html#using-modules) +documentation](modules.md#using-modules) to update their configuration once the modules they are using have been updated. We plan to remove support for the current spam checker interface in August 2021. @@ -217,8 +217,7 @@ Instructions for doing so are provided ## Dropping support for old Python, Postgres and SQLite versions -In line with our [deprecation -policy](https://github.com/matrix-org/synapse/blob/release-v1.32.0/docs/deprecation_policy.md), +In line with our [deprecation policy](deprecation_policy.md), we've dropped support for Python 3.5 and PostgreSQL 9.5, as they are no longer supported upstream. @@ -231,8 +230,7 @@ The deprecated v1 "list accounts" admin API (`GET /_synapse/admin/v1/users/`) has been removed in this version. -The [v2 list accounts -API](https://github.com/matrix-org/synapse/blob/master/docs/admin_api/user_admin_api.rst#list-accounts) +The [v2 list accounts API](admin_api/user_admin_api.md#list-accounts) has been available since Synapse 1.7.0 (2019-12-13), and is accessible under `GET /_synapse/admin/v2/users`. @@ -267,7 +265,7 @@ by the client. Synapse also requires the [Host]{.title-ref} header to be preserved. -See the [reverse proxy documentation](../reverse_proxy.md), where the +See the [reverse proxy documentation](reverse_proxy.md), where the example configurations have been updated to show how to set these headers. @@ -286,7 +284,7 @@ identity providers: `[synapse public baseurl]/_synapse/client/oidc/callback` to the list of permitted "redirect URIs" at the identity provider. - See the [OpenID docs](../openid.md) for more information on setting + See the [OpenID docs](openid.md) for more information on setting up OpenID Connect. - If your server is configured for single sign-on via a SAML2 identity @@ -486,8 +484,7 @@ lock down external access to the Admin API endpoints. This release deprecates use of the `structured: true` logging configuration for structured logging. If your logging configuration contains `structured: true` then it should be modified based on the -[structured logging -documentation](../structured_logging.md). +[structured logging documentation](structured_logging.md). The `structured` and `drains` logging options are now deprecated and should be replaced by standard logging configuration of `handlers` and @@ -517,14 +514,13 @@ acts the same as the `http_client` argument previously passed to ## Forwarding `/_synapse/client` through your reverse proxy -The [reverse proxy -documentation](https://github.com/matrix-org/synapse/blob/develop/docs/reverse_proxy.md) +The [reverse proxy documentation](reverse_proxy.md) has been updated to include reverse proxy directives for `/_synapse/client/*` endpoints. As the user password reset flow now uses endpoints under this prefix, **you must update your reverse proxy configurations for user password reset to work**. -Additionally, note that the [Synapse worker documentation](https://github.com/matrix-org/synapse/blob/develop/docs/workers.md) has been updated to +Additionally, note that the [Synapse worker documentation](workers.md) has been updated to : state that the `/_synapse/client/password_reset/email/submit_token` endpoint can be handled @@ -588,7 +584,7 @@ updated. When setting up worker processes, we now recommend the use of a Redis server for replication. **The old direct TCP connection method is deprecated and will be removed in a future release.** See -[workers](../workers.md) for more details. +[workers](workers.md) for more details. # Upgrading to v1.14.0 @@ -720,8 +716,7 @@ participating in many rooms. omitting the `CONCURRENTLY` keyword. Note however that this operation may in itself cause Synapse to stop running for some time. Synapse admins are reminded that [SQLite is not recommended for use - outside a test - environment](https://github.com/matrix-org/synapse/blob/master/README.rst#using-postgresql). + outside a test environment](postgres.md). 3. Once the index has been created, the `SELECT` query in step 1 above should complete quickly. It is therefore safe to upgrade to Synapse @@ -739,7 +734,7 @@ participating in many rooms. Synapse will now log a warning on start up if used with a PostgreSQL database that has a non-recommended locale set. -See [Postgres](../postgres.md) for details. +See [Postgres](postgres.md) for details. # Upgrading to v1.8.0 @@ -856,8 +851,8 @@ section headed `email`, and be sure to have at least the You may also need to set `smtp_user`, `smtp_pass`, and `require_transport_security`. -See the [sample configuration file](docs/sample_config.yaml) for more -details on these settings. +See the [sample configuration file](usage/configuration/homeserver_sample_config.md) +for more details on these settings. #### Delegate email to an identity server @@ -959,7 +954,7 @@ back to v1.3.1, subject to the following: Some counter metrics have been renamed, with the old names deprecated. See [the metrics -documentation](../metrics-howto.md#renaming-of-metrics--deprecation-of-old-names-in-12) +documentation](metrics-howto.md#renaming-of-metrics--deprecation-of-old-names-in-12) for details. # Upgrading to v1.1.0 @@ -995,7 +990,7 @@ more details on upgrading your database. Synapse v1.0 is the first release to enforce validation of TLS certificates for the federation API. It is therefore essential that your certificates are correctly configured. See the -[FAQ](../MSC1711_certificates_FAQ.md) for more information. +[FAQ](MSC1711_certificates_FAQ.md) for more information. Note, v1.0 installations will also no longer be able to federate with servers that have not correctly configured their certificates. @@ -1010,8 +1005,8 @@ ways:- - Configure a whitelist of server domains to trust via `federation_certificate_verification_whitelist`. -See the [sample configuration file](docs/sample_config.yaml) for more -details on these settings. +See the [sample configuration file](usage/configuration/homeserver_sample_config.md) +for more details on these settings. ## Email @@ -1036,8 +1031,8 @@ If you are absolutely certain that you wish to continue using an identity server for password resets, set `trust_identity_server_for_password_resets` to `true`. -See the [sample configuration file](docs/sample_config.yaml) for more -details on these settings. +See the [sample configuration file](usage/configuration/homeserver_sample_config.md) +for more details on these settings. ## New email templates @@ -1057,11 +1052,11 @@ sent to them. Please be aware that, before Synapse v1.0 is released around March 2019, you will need to replace any self-signed certificates with those -verified by a root CA. Information on how to do so can be found at [the -ACME docs](../ACME.md). +verified by a root CA. Information on how to do so can be found at the +ACME docs. For more information on configuring TLS certificates see the -[FAQ](../MSC1711_certificates_FAQ.md). +[FAQ](MSC1711_certificates_FAQ.md). # Upgrading to v0.34.0 From 0c246dd4a09e21e677934d0d83efa573c9127a6f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 9 Aug 2021 04:46:39 -0400 Subject: [PATCH 59/61] Support MSC3289: Room version 8 (#10449) This adds support for MSC3289: room version 8. This is room version 7 + MSC3083. --- changelog.d/10449.bugfix | 1 + changelog.d/10449.feature | 1 + synapse/api/constants.py | 2 +- synapse/api/room_versions.py | 28 ++++++++++++++-------------- synapse/event_auth.py | 5 +---- synapse/handlers/event_auth.py | 2 +- tests/events/test_utils.py | 2 +- tests/handlers/test_space_summary.py | 12 ++++++------ tests/test_event_auth.py | 18 +++++++++--------- 9 files changed, 35 insertions(+), 36 deletions(-) create mode 100644 changelog.d/10449.bugfix create mode 100644 changelog.d/10449.feature diff --git a/changelog.d/10449.bugfix b/changelog.d/10449.bugfix new file mode 100644 index 000000000000..c5e23ba0191a --- /dev/null +++ b/changelog.d/10449.bugfix @@ -0,0 +1 @@ +Mark the experimental room version from [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) as unstable. diff --git a/changelog.d/10449.feature b/changelog.d/10449.feature new file mode 100644 index 000000000000..a45a17cb28fd --- /dev/null +++ b/changelog.d/10449.feature @@ -0,0 +1 @@ +Support [MSC3289: room version 8](https://github.com/matrix-org/matrix-doc/pull/3289). diff --git a/synapse/api/constants.py b/synapse/api/constants.py index a986fdb47a6e..e0e24fddac35 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -62,7 +62,7 @@ class JoinRules: INVITE = "invite" PRIVATE = "private" # As defined for MSC3083. - MSC3083_RESTRICTED = "restricted" + RESTRICTED = "restricted" class RestrictedJoinRuleTypes: diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index bc678efe4973..f32a40ba4ae6 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -177,9 +177,9 @@ class RoomVersions: msc2403_knocking=False, msc2716_historical=False, ) - MSC3083 = RoomVersion( - "org.matrix.msc3083.v2", - RoomDisposition.UNSTABLE, + V7 = RoomVersion( + "7", + RoomDisposition.STABLE, EventFormatVersions.V3, StateResolutionVersions.V2, enforce_key_validity=True, @@ -187,13 +187,13 @@ class RoomVersions: strict_canonicaljson=True, limit_notifications_power_levels=True, msc2176_redaction_rules=False, - msc3083_join_rules=True, - msc2403_knocking=False, + msc3083_join_rules=False, + msc2403_knocking=True, msc2716_historical=False, ) - V7 = RoomVersion( - "7", - RoomDisposition.STABLE, + MSC2716 = RoomVersion( + "org.matrix.msc2716", + RoomDisposition.UNSTABLE, EventFormatVersions.V3, StateResolutionVersions.V2, enforce_key_validity=True, @@ -203,10 +203,10 @@ class RoomVersions: msc2176_redaction_rules=False, msc3083_join_rules=False, msc2403_knocking=True, - msc2716_historical=False, + msc2716_historical=True, ) - MSC2716 = RoomVersion( - "org.matrix.msc2716", + V8 = RoomVersion( + "8", RoomDisposition.STABLE, EventFormatVersions.V3, StateResolutionVersions.V2, @@ -215,9 +215,9 @@ class RoomVersions: strict_canonicaljson=True, limit_notifications_power_levels=True, msc2176_redaction_rules=False, - msc3083_join_rules=False, + msc3083_join_rules=True, msc2403_knocking=True, - msc2716_historical=True, + msc2716_historical=False, ) @@ -231,9 +231,9 @@ class RoomVersions: RoomVersions.V5, RoomVersions.V6, RoomVersions.MSC2176, - RoomVersions.MSC3083, RoomVersions.V7, RoomVersions.MSC2716, + RoomVersions.V8, ) } diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 4c92e9a2d40f..c3a0c10499d9 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -370,10 +370,7 @@ def _is_membership_change_allowed( raise AuthError(403, "You are banned from this room") elif join_rule == JoinRules.PUBLIC: pass - elif ( - room_version.msc3083_join_rules - and join_rule == JoinRules.MSC3083_RESTRICTED - ): + elif room_version.msc3083_join_rules and join_rule == JoinRules.RESTRICTED: # This is the same as public, but the event must contain a reference # to the server who authorised the join. If the event does not contain # the proper content it is rejected. diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py index 53fac1f8a3a4..e2410e482f8e 100644 --- a/synapse/handlers/event_auth.py +++ b/synapse/handlers/event_auth.py @@ -240,7 +240,7 @@ async def has_restricted_join_rules( # If the join rule is not restricted, this doesn't apply. join_rules_event = await self._store.get_event(join_rules_event_id) - return join_rules_event.content.get("join_rule") == JoinRules.MSC3083_RESTRICTED + return join_rules_event.content.get("join_rule") == JoinRules.RESTRICTED async def get_rooms_that_allow_join( self, state_ids: StateMap[str] diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index e2a5fc018cdf..7a826c086edd 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -341,7 +341,7 @@ def test_join_rules(self): "signatures": {}, "unsigned": {}, }, - room_version=RoomVersions.MSC3083, + room_version=RoomVersions.V8, ) diff --git a/tests/handlers/test_space_summary.py b/tests/handlers/test_space_summary.py index 3f73ad7f9478..01975c13d4fc 100644 --- a/tests/handlers/test_space_summary.py +++ b/tests/handlers/test_space_summary.py @@ -231,13 +231,13 @@ def test_filtering(self): invited_room = self._create_room_with_join_rule(JoinRules.INVITE) self.helper.invite(invited_room, targ=user2, tok=self.token) restricted_room = self._create_room_with_join_rule( - JoinRules.MSC3083_RESTRICTED, - room_version=RoomVersions.MSC3083.identifier, + JoinRules.RESTRICTED, + room_version=RoomVersions.V8.identifier, allow=[], ) restricted_accessible_room = self._create_room_with_join_rule( - JoinRules.MSC3083_RESTRICTED, - room_version=RoomVersions.MSC3083.identifier, + JoinRules.RESTRICTED, + room_version=RoomVersions.V8.identifier, allow=[ { "type": RestrictedJoinRuleTypes.ROOM_MEMBERSHIP, @@ -459,13 +459,13 @@ async def summarize_remote_room( { "room_id": restricted_room, "world_readable": False, - "join_rules": JoinRules.MSC3083_RESTRICTED, + "join_rules": JoinRules.RESTRICTED, "allowed_spaces": [], }, { "room_id": restricted_accessible_room, "world_readable": False, - "join_rules": JoinRules.MSC3083_RESTRICTED, + "join_rules": JoinRules.RESTRICTED, "allowed_spaces": [self.room], }, { diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index e5550aec4d2a..6ebd01bcbe78 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -384,7 +384,7 @@ def test_join_rules_msc3083_restricted(self): }, ) event_auth.check( - RoomVersions.MSC3083, + RoomVersions.V8, authorised_join_event, auth_events, do_sig_check=False, @@ -400,7 +400,7 @@ def test_join_rules_msc3083_restricted(self): "@inviter:foo.test" ) event_auth.check( - RoomVersions.MSC3083, + RoomVersions.V8, _join_event( pleb, additional_content={ @@ -414,7 +414,7 @@ def test_join_rules_msc3083_restricted(self): # A join which is missing an authorised server is rejected. with self.assertRaises(AuthError): event_auth.check( - RoomVersions.MSC3083, + RoomVersions.V8, _join_event(pleb), auth_events, do_sig_check=False, @@ -427,7 +427,7 @@ def test_join_rules_msc3083_restricted(self): ) with self.assertRaises(AuthError): event_auth.check( - RoomVersions.MSC3083, + RoomVersions.V8, _join_event( pleb, additional_content={ @@ -442,7 +442,7 @@ def test_join_rules_msc3083_restricted(self): # *would* be valid, but is sent be a different user.) with self.assertRaises(AuthError): event_auth.check( - RoomVersions.MSC3083, + RoomVersions.V8, _member_event( pleb, "join", @@ -459,7 +459,7 @@ def test_join_rules_msc3083_restricted(self): auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban") with self.assertRaises(AuthError): event_auth.check( - RoomVersions.MSC3083, + RoomVersions.V8, authorised_join_event, auth_events, do_sig_check=False, @@ -468,7 +468,7 @@ def test_join_rules_msc3083_restricted(self): # A user who left can re-join. auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave") event_auth.check( - RoomVersions.MSC3083, + RoomVersions.V8, authorised_join_event, auth_events, do_sig_check=False, @@ -478,7 +478,7 @@ def test_join_rules_msc3083_restricted(self): # be authorised since the user is already joined.) auth_events[("m.room.member", pleb)] = _member_event(pleb, "join") event_auth.check( - RoomVersions.MSC3083, + RoomVersions.V8, _join_event(pleb), auth_events, do_sig_check=False, @@ -490,7 +490,7 @@ def test_join_rules_msc3083_restricted(self): pleb, "invite", sender=creator ) event_auth.check( - RoomVersions.MSC3083, + RoomVersions.V8, _join_event(pleb), auth_events, do_sig_check=False, From ad35b7739e72fe198fa78fa4279f58cacfc9fa37 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 9 Aug 2021 13:41:29 +0100 Subject: [PATCH 60/61] 1.40.0rc3 --- CHANGES.md | 21 +++++++++++++++++++++ changelog.d/10449.bugfix | 1 - changelog.d/10449.feature | 1 - changelog.d/10543.doc | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 6 files changed, 28 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/10449.bugfix delete mode 100644 changelog.d/10449.feature delete mode 100644 changelog.d/10543.doc diff --git a/CHANGES.md b/CHANGES.md index 62ea684e586a..b04abbeb4d7d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,24 @@ +Synapse 1.40.0rc3 (2021-08-09) +============================== + +Features +-------- + +- Support [MSC3289: room version 8](https://github.com/matrix-org/matrix-doc/pull/3289). ([\#10449](https://github.com/matrix-org/synapse/issues/10449)) + + +Bugfixes +-------- + +- Mark the experimental room version from [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) as unstable. ([\#10449](https://github.com/matrix-org/synapse/issues/10449)) + + +Improved Documentation +---------------------- + +- Fix broken links in `upgrade.md`. Contributed by @dklimpel. ([\#10543](https://github.com/matrix-org/synapse/issues/10543)) + + Synapse 1.40.0rc2 (2021-08-04) ============================== diff --git a/changelog.d/10449.bugfix b/changelog.d/10449.bugfix deleted file mode 100644 index c5e23ba0191a..000000000000 --- a/changelog.d/10449.bugfix +++ /dev/null @@ -1 +0,0 @@ -Mark the experimental room version from [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) as unstable. diff --git a/changelog.d/10449.feature b/changelog.d/10449.feature deleted file mode 100644 index a45a17cb28fd..000000000000 --- a/changelog.d/10449.feature +++ /dev/null @@ -1 +0,0 @@ -Support [MSC3289: room version 8](https://github.com/matrix-org/matrix-doc/pull/3289). diff --git a/changelog.d/10543.doc b/changelog.d/10543.doc deleted file mode 100644 index 6c06722eb447..000000000000 --- a/changelog.d/10543.doc +++ /dev/null @@ -1 +0,0 @@ -Fix broken links in `upgrade.md`. Contributed by @dklimpel. diff --git a/debian/changelog b/debian/changelog index c523101f9a39..7b44341bc6cb 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.40.0~rc3) stable; urgency=medium + + * New synapse release 1.40.0~rc3. + + -- Synapse Packaging team Mon, 09 Aug 2021 13:41:08 +0100 + matrix-synapse-py3 (1.40.0~rc2) stable; urgency=medium * New synapse release 1.40.0~rc2. diff --git a/synapse/__init__.py b/synapse/__init__.py index da5246353162..5cca899f7d8b 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.40.0rc2" +__version__ = "1.40.0rc3" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 9f7c038272318bab09535e85e6bb4345ed2f1368 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 10 Aug 2021 13:50:58 +0100 Subject: [PATCH 61/61] 1.40.0 --- CHANGES.md | 6 ++++++ debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index b04abbeb4d7d..0e5e052951a9 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.40.0 (2021-08-10) +=========================== + +No significant changes. + + Synapse 1.40.0rc3 (2021-08-09) ============================== diff --git a/debian/changelog b/debian/changelog index 7b44341bc6cb..d3da448b0fc6 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.40.0) stable; urgency=medium + + * New synapse release 1.40.0. + + -- Synapse Packaging team Tue, 10 Aug 2021 13:50:48 +0100 + matrix-synapse-py3 (1.40.0~rc3) stable; urgency=medium * New synapse release 1.40.0~rc3. diff --git a/synapse/__init__.py b/synapse/__init__.py index 5cca899f7d8b..919293cd80c5 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.40.0rc3" +__version__ = "1.40.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when