123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866 |
- # -*- coding: utf-8 -*-
- # Copyright 2015, 2016 OpenMarket Ltd
- # Copyright 2018 New Vector Ltd
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- import logging
- import six
- from six import iteritems
- from canonicaljson import json
- from prometheus_client import Counter
- from twisted.internet import defer
- from twisted.internet.abstract import isIPAddress
- from twisted.python import failure
- from synapse.api.constants import EventTypes, Membership
- from synapse.api.errors import (
- AuthError,
- Codes,
- FederationError,
- IncompatibleRoomVersionError,
- NotFoundError,
- SynapseError,
- UnsupportedRoomVersionError,
- )
- from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
- from synapse.crypto.event_signing import compute_event_signature
- from synapse.events import room_version_to_event_format
- from synapse.federation.federation_base import FederationBase, event_from_pdu_json
- from synapse.federation.persistence import TransactionActions
- from synapse.federation.units import Edu, Transaction
- from synapse.http.endpoint import parse_server_name
- from synapse.logging.context import nested_logging_context
- from synapse.logging.utils import log_function
- from synapse.replication.http.federation import (
- ReplicationFederationSendEduRestServlet,
- ReplicationGetQueryRestServlet,
- )
- from synapse.types import get_domain_from_id
- from synapse.util import glob_to_regex
- from synapse.util.async_helpers import Linearizer, concurrently_execute
- from synapse.util.caches.response_cache import ResponseCache
- # when processing incoming transactions, we try to handle multiple rooms in
- # parallel, up to this limit.
- TRANSACTION_CONCURRENCY_LIMIT = 10
- logger = logging.getLogger(__name__)
- received_pdus_counter = Counter("synapse_federation_server_received_pdus", "")
- received_edus_counter = Counter("synapse_federation_server_received_edus", "")
- received_queries_counter = Counter(
- "synapse_federation_server_received_queries", "", ["type"]
- )
- class FederationServer(FederationBase):
- def __init__(self, hs):
- super(FederationServer, self).__init__(hs)
- self.auth = hs.get_auth()
- self.handler = hs.get_handlers().federation_handler
- self._server_linearizer = Linearizer("fed_server")
- self._transaction_linearizer = Linearizer("fed_txn_handler")
- self.transaction_actions = TransactionActions(self.store)
- self.registry = hs.get_federation_registry()
- # We cache responses to state queries, as they take a while and often
- # come in waves.
- self._state_resp_cache = ResponseCache(hs, "state_resp", timeout_ms=30000)
- @defer.inlineCallbacks
- @log_function
- def on_backfill_request(self, origin, room_id, versions, limit):
- with (yield self._server_linearizer.queue((origin, room_id))):
- origin_host, _ = parse_server_name(origin)
- yield self.check_server_matches_acl(origin_host, room_id)
- pdus = yield self.handler.on_backfill_request(
- origin, room_id, versions, limit
- )
- res = self._transaction_from_pdus(pdus).get_dict()
- return (200, res)
- @defer.inlineCallbacks
- @log_function
- def on_incoming_transaction(self, origin, transaction_data):
- # keep this as early as possible to make the calculated origin ts as
- # accurate as possible.
- request_time = self._clock.time_msec()
- transaction = Transaction(**transaction_data)
- if not transaction.transaction_id:
- raise Exception("Transaction missing transaction_id")
- logger.debug("[%s] Got transaction", transaction.transaction_id)
- # use a linearizer to ensure that we don't process the same transaction
- # multiple times in parallel.
- with (
- yield self._transaction_linearizer.queue(
- (origin, transaction.transaction_id)
- )
- ):
- result = yield self._handle_incoming_transaction(
- origin, transaction, request_time
- )
- return result
- @defer.inlineCallbacks
- def _handle_incoming_transaction(self, origin, transaction, request_time):
- """ Process an incoming transaction and return the HTTP response
- Args:
- origin (unicode): the server making the request
- transaction (Transaction): incoming transaction
- request_time (int): timestamp that the HTTP request arrived at
- Returns:
- Deferred[(int, object)]: http response code and body
- """
- response = yield self.transaction_actions.have_responded(origin, transaction)
- if response:
- logger.debug(
- "[%s] We've already responded to this request",
- transaction.transaction_id,
- )
- return response
- logger.debug("[%s] Transaction is new", transaction.transaction_id)
- # Reject if PDU count > 50 and EDU count > 100
- if len(transaction.pdus) > 50 or (
- hasattr(transaction, "edus") and len(transaction.edus) > 100
- ):
- logger.info("Transaction PDU or EDU count too large. Returning 400")
- response = {}
- yield self.transaction_actions.set_response(
- origin, transaction, 400, response
- )
- return (400, response)
- received_pdus_counter.inc(len(transaction.pdus))
- origin_host, _ = parse_server_name(origin)
- pdus_by_room = {}
- for p in transaction.pdus:
- if "unsigned" in p:
- unsigned = p["unsigned"]
- if "age" in unsigned:
- p["age"] = unsigned["age"]
- if "age" in p:
- p["age_ts"] = request_time - int(p["age"])
- del p["age"]
- # We try and pull out an event ID so that if later checks fail we
- # can log something sensible. We don't mandate an event ID here in
- # case future event formats get rid of the key.
- possible_event_id = p.get("event_id", "<Unknown>")
- # Now we get the room ID so that we can check that we know the
- # version of the room.
- room_id = p.get("room_id")
- if not room_id:
- logger.info(
- "Ignoring PDU as does not have a room_id. Event ID: %s",
- possible_event_id,
- )
- continue
- try:
- room_version = yield self.store.get_room_version(room_id)
- except NotFoundError:
- logger.info("Ignoring PDU for unknown room_id: %s", room_id)
- continue
- try:
- format_ver = room_version_to_event_format(room_version)
- except UnsupportedRoomVersionError:
- # this can happen if support for a given room version is withdrawn,
- # so that we still get events for said room.
- logger.info(
- "Ignoring PDU for room %s with unknown version %s",
- room_id,
- room_version,
- )
- continue
- event = event_from_pdu_json(p, format_ver)
- pdus_by_room.setdefault(room_id, []).append(event)
- pdu_results = {}
- # we can process different rooms in parallel (which is useful if they
- # require callouts to other servers to fetch missing events), but
- # impose a limit to avoid going too crazy with ram/cpu.
- @defer.inlineCallbacks
- def process_pdus_for_room(room_id):
- logger.debug("Processing PDUs for %s", room_id)
- try:
- yield self.check_server_matches_acl(origin_host, room_id)
- except AuthError as e:
- logger.warn("Ignoring PDUs for room %s from banned server", room_id)
- for pdu in pdus_by_room[room_id]:
- event_id = pdu.event_id
- pdu_results[event_id] = e.error_dict()
- return
- for pdu in pdus_by_room[room_id]:
- event_id = pdu.event_id
- with nested_logging_context(event_id):
- try:
- yield self._handle_received_pdu(origin, pdu)
- pdu_results[event_id] = {}
- except FederationError as e:
- logger.warn("Error handling PDU %s: %s", event_id, e)
- pdu_results[event_id] = {"error": str(e)}
- except Exception as e:
- f = failure.Failure()
- pdu_results[event_id] = {"error": str(e)}
- logger.error(
- "Failed to handle PDU %s",
- event_id,
- exc_info=(f.type, f.value, f.getTracebackObject()),
- )
- yield concurrently_execute(
- process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT
- )
- if hasattr(transaction, "edus"):
- for edu in (Edu(**x) for x in transaction.edus):
- yield self.received_edu(origin, edu.edu_type, edu.content)
- response = {"pdus": pdu_results}
- logger.debug("Returning: %s", str(response))
- yield self.transaction_actions.set_response(origin, transaction, 200, response)
- return (200, response)
- @defer.inlineCallbacks
- def received_edu(self, origin, edu_type, content):
- received_edus_counter.inc()
- yield self.registry.on_edu(edu_type, origin, content)
- @defer.inlineCallbacks
- @log_function
- def on_context_state_request(self, origin, room_id, event_id):
- if not event_id:
- raise NotImplementedError("Specify an event")
- origin_host, _ = parse_server_name(origin)
- yield self.check_server_matches_acl(origin_host, room_id)
- in_room = yield self.auth.check_host_in_room(room_id, origin)
- if not in_room:
- raise AuthError(403, "Host not in room.")
- # we grab the linearizer to protect ourselves from servers which hammer
- # us. In theory we might already have the response to this query
- # in the cache so we could return it without waiting for the linearizer
- # - but that's non-trivial to get right, and anyway somewhat defeats
- # the point of the linearizer.
- with (yield self._server_linearizer.queue((origin, room_id))):
- resp = yield self._state_resp_cache.wrap(
- (room_id, event_id),
- self._on_context_state_request_compute,
- room_id,
- event_id,
- )
- return (200, resp)
- @defer.inlineCallbacks
- def on_state_ids_request(self, origin, room_id, event_id):
- if not event_id:
- raise NotImplementedError("Specify an event")
- origin_host, _ = parse_server_name(origin)
- yield self.check_server_matches_acl(origin_host, room_id)
- in_room = yield self.auth.check_host_in_room(room_id, origin)
- if not in_room:
- raise AuthError(403, "Host not in room.")
- state_ids = yield self.handler.get_state_ids_for_pdu(room_id, event_id)
- auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids)
- return (200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids})
- @defer.inlineCallbacks
- def _on_context_state_request_compute(self, room_id, event_id):
- pdus = yield self.handler.get_state_for_pdu(room_id, event_id)
- auth_chain = yield self.store.get_auth_chain([pdu.event_id for pdu in pdus])
- for event in auth_chain:
- # We sign these again because there was a bug where we
- # incorrectly signed things the first time round
- if self.hs.is_mine_id(event.event_id):
- event.signatures.update(
- compute_event_signature(
- event.get_pdu_json(),
- self.hs.hostname,
- self.hs.config.signing_key[0],
- )
- )
- return {
- "pdus": [pdu.get_pdu_json() for pdu in pdus],
- "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
- }
- @defer.inlineCallbacks
- @log_function
- def on_pdu_request(self, origin, event_id):
- pdu = yield self.handler.get_persisted_pdu(origin, event_id)
- if pdu:
- return (200, self._transaction_from_pdus([pdu]).get_dict())
- else:
- return (404, "")
- @defer.inlineCallbacks
- def on_query_request(self, query_type, args):
- received_queries_counter.labels(query_type).inc()
- resp = yield self.registry.on_query(query_type, args)
- return (200, resp)
- @defer.inlineCallbacks
- def on_make_join_request(self, origin, room_id, user_id, supported_versions):
- origin_host, _ = parse_server_name(origin)
- yield self.check_server_matches_acl(origin_host, room_id)
- room_version = yield self.store.get_room_version(room_id)
- if room_version not in supported_versions:
- logger.warn("Room version %s not in %s", room_version, supported_versions)
- raise IncompatibleRoomVersionError(room_version=room_version)
- pdu = yield self.handler.on_make_join_request(room_id, user_id)
- time_now = self._clock.time_msec()
- return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
- @defer.inlineCallbacks
- def on_invite_request(self, origin, content, room_version):
- if room_version not in KNOWN_ROOM_VERSIONS:
- raise SynapseError(
- 400,
- "Homeserver does not support this room version",
- Codes.UNSUPPORTED_ROOM_VERSION,
- )
- format_ver = room_version_to_event_format(room_version)
- pdu = event_from_pdu_json(content, format_ver)
- origin_host, _ = parse_server_name(origin)
- yield self.check_server_matches_acl(origin_host, pdu.room_id)
- ret_pdu = yield self.handler.on_invite_request(origin, pdu)
- time_now = self._clock.time_msec()
- return {"event": ret_pdu.get_pdu_json(time_now)}
- @defer.inlineCallbacks
- def on_send_join_request(self, origin, content, room_id):
- logger.debug("on_send_join_request: content: %s", content)
- room_version = yield self.store.get_room_version(room_id)
- format_ver = room_version_to_event_format(room_version)
- pdu = event_from_pdu_json(content, format_ver)
- origin_host, _ = parse_server_name(origin)
- yield self.check_server_matches_acl(origin_host, pdu.room_id)
- logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
- res_pdus = yield self.handler.on_send_join_request(origin, pdu)
- time_now = self._clock.time_msec()
- return (
- 200,
- {
- "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
- "auth_chain": [
- p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]
- ],
- },
- )
- @defer.inlineCallbacks
- def on_make_leave_request(self, origin, room_id, user_id):
- origin_host, _ = parse_server_name(origin)
- yield self.check_server_matches_acl(origin_host, room_id)
- pdu = yield self.handler.on_make_leave_request(room_id, user_id)
- room_version = yield self.store.get_room_version(room_id)
- time_now = self._clock.time_msec()
- return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
- @defer.inlineCallbacks
- def on_send_leave_request(self, origin, content, room_id):
- logger.debug("on_send_leave_request: content: %s", content)
- room_version = yield self.store.get_room_version(room_id)
- format_ver = room_version_to_event_format(room_version)
- pdu = event_from_pdu_json(content, format_ver)
- origin_host, _ = parse_server_name(origin)
- yield self.check_server_matches_acl(origin_host, pdu.room_id)
- logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
- yield self.handler.on_send_leave_request(origin, pdu)
- return (200, {})
- @defer.inlineCallbacks
- def on_event_auth(self, origin, room_id, event_id):
- with (yield self._server_linearizer.queue((origin, room_id))):
- origin_host, _ = parse_server_name(origin)
- yield self.check_server_matches_acl(origin_host, room_id)
- time_now = self._clock.time_msec()
- auth_pdus = yield self.handler.on_event_auth(event_id)
- res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
- return (200, res)
- @defer.inlineCallbacks
- def on_query_auth_request(self, origin, content, room_id, event_id):
- """
- Content is a dict with keys::
- auth_chain (list): A list of events that give the auth chain.
- missing (list): A list of event_ids indicating what the other
- side (`origin`) think we're missing.
- rejects (dict): A mapping from event_id to a 2-tuple of reason
- string and a proof (or None) of why the event was rejected.
- The keys of this dict give the list of events the `origin` has
- rejected.
- Args:
- origin (str)
- content (dict)
- event_id (str)
- Returns:
- Deferred: Results in `dict` with the same format as `content`
- """
- with (yield self._server_linearizer.queue((origin, room_id))):
- origin_host, _ = parse_server_name(origin)
- yield self.check_server_matches_acl(origin_host, room_id)
- room_version = yield self.store.get_room_version(room_id)
- format_ver = room_version_to_event_format(room_version)
- auth_chain = [
- event_from_pdu_json(e, format_ver) for e in content["auth_chain"]
- ]
- signed_auth = yield self._check_sigs_and_hash_and_fetch(
- origin, auth_chain, outlier=True, room_version=room_version
- )
- ret = yield self.handler.on_query_auth(
- origin,
- event_id,
- room_id,
- signed_auth,
- content.get("rejects", []),
- content.get("missing", []),
- )
- time_now = self._clock.time_msec()
- send_content = {
- "auth_chain": [e.get_pdu_json(time_now) for e in ret["auth_chain"]],
- "rejects": ret.get("rejects", []),
- "missing": ret.get("missing", []),
- }
- return (200, send_content)
- @log_function
- def on_query_client_keys(self, origin, content):
- return self.on_query_request("client_keys", content)
- def on_query_user_devices(self, origin, user_id):
- return self.on_query_request("user_devices", user_id)
- @defer.inlineCallbacks
- @log_function
- def on_claim_client_keys(self, origin, content):
- query = []
- for user_id, device_keys in content.get("one_time_keys", {}).items():
- for device_id, algorithm in device_keys.items():
- query.append((user_id, device_id, algorithm))
- results = yield self.store.claim_e2e_one_time_keys(query)
- json_result = {}
- for user_id, device_keys in results.items():
- for device_id, keys in device_keys.items():
- for key_id, json_bytes in keys.items():
- json_result.setdefault(user_id, {})[device_id] = {
- key_id: json.loads(json_bytes)
- }
- logger.info(
- "Claimed one-time-keys: %s",
- ",".join(
- (
- "%s for %s:%s" % (key_id, user_id, device_id)
- for user_id, user_keys in iteritems(json_result)
- for device_id, device_keys in iteritems(user_keys)
- for key_id, _ in iteritems(device_keys)
- )
- ),
- )
- return {"one_time_keys": json_result}
- @defer.inlineCallbacks
- @log_function
- def on_get_missing_events(
- self, origin, room_id, earliest_events, latest_events, limit
- ):
- with (yield self._server_linearizer.queue((origin, room_id))):
- origin_host, _ = parse_server_name(origin)
- yield self.check_server_matches_acl(origin_host, room_id)
- logger.info(
- "on_get_missing_events: earliest_events: %r, latest_events: %r,"
- " limit: %d",
- earliest_events,
- latest_events,
- limit,
- )
- missing_events = yield self.handler.on_get_missing_events(
- origin, room_id, earliest_events, latest_events, limit
- )
- if len(missing_events) < 5:
- logger.info(
- "Returning %d events: %r", len(missing_events), missing_events
- )
- else:
- logger.info("Returning %d events", len(missing_events))
- time_now = self._clock.time_msec()
- return {"events": [ev.get_pdu_json(time_now) for ev in missing_events]}
- @log_function
- def on_openid_userinfo(self, token):
- ts_now_ms = self._clock.time_msec()
- return self.store.get_user_id_for_open_id_token(token, ts_now_ms)
- def _transaction_from_pdus(self, pdu_list):
- """Returns a new Transaction containing the given PDUs suitable for
- transmission.
- """
- time_now = self._clock.time_msec()
- pdus = [p.get_pdu_json(time_now) for p in pdu_list]
- return Transaction(
- origin=self.server_name,
- pdus=pdus,
- origin_server_ts=int(time_now),
- destination=None,
- )
- @defer.inlineCallbacks
- def _handle_received_pdu(self, origin, pdu):
- """ Process a PDU received in a federation /send/ transaction.
- If the event is invalid, then this method throws a FederationError.
- (The error will then be logged and sent back to the sender (which
- probably won't do anything with it), and other events in the
- transaction will be processed as normal).
- It is likely that we'll then receive other events which refer to
- this rejected_event in their prev_events, etc. When that happens,
- we'll attempt to fetch the rejected event again, which will presumably
- fail, so those second-generation events will also get rejected.
- Eventually, we get to the point where there are more than 10 events
- between any new events and the original rejected event. Since we
- only try to backfill 10 events deep on received pdu, we then accept the
- new event, possibly introducing a discontinuity in the DAG, with new
- forward extremities, so normal service is approximately returned,
- until we try to backfill across the discontinuity.
- Args:
- origin (str): server which sent the pdu
- pdu (FrozenEvent): received pdu
- Returns (Deferred): completes with None
- Raises: FederationError if the signatures / hash do not match, or
- if the event was unacceptable for any other reason (eg, too large,
- too many prev_events, couldn't find the prev_events)
- """
- # check that it's actually being sent from a valid destination to
- # workaround bug #1753 in 0.18.5 and 0.18.6
- if origin != get_domain_from_id(pdu.sender):
- # We continue to accept join events from any server; this is
- # necessary for the federation join dance to work correctly.
- # (When we join over federation, the "helper" server is
- # responsible for sending out the join event, rather than the
- # origin. See bug #1893. This is also true for some third party
- # invites).
- if not (
- pdu.type == "m.room.member"
- and pdu.content
- and pdu.content.get("membership", None)
- in (Membership.JOIN, Membership.INVITE)
- ):
- logger.info(
- "Discarding PDU %s from invalid origin %s", pdu.event_id, origin
- )
- return
- else:
- logger.info("Accepting join PDU %s from %s", pdu.event_id, origin)
- # We've already checked that we know the room version by this point
- room_version = yield self.store.get_room_version(pdu.room_id)
- # Check signature.
- try:
- pdu = yield self._check_sigs_and_hash(room_version, pdu)
- except SynapseError as e:
- raise FederationError("ERROR", e.code, e.msg, affected=pdu.event_id)
- yield self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True)
- def __str__(self):
- return "<ReplicationLayer(%s)>" % self.server_name
- @defer.inlineCallbacks
- def exchange_third_party_invite(
- self, sender_user_id, target_user_id, room_id, signed
- ):
- ret = yield self.handler.exchange_third_party_invite(
- sender_user_id, target_user_id, room_id, signed
- )
- return ret
- @defer.inlineCallbacks
- def on_exchange_third_party_invite_request(self, origin, room_id, event_dict):
- ret = yield self.handler.on_exchange_third_party_invite_request(
- origin, room_id, event_dict
- )
- return ret
- @defer.inlineCallbacks
- def check_server_matches_acl(self, server_name, room_id):
- """Check if the given server is allowed by the server ACLs in the room
- Args:
- server_name (str): name of server, *without any port part*
- room_id (str): ID of the room to check
- Raises:
- AuthError if the server does not match the ACL
- """
- state_ids = yield self.store.get_current_state_ids(room_id)
- acl_event_id = state_ids.get((EventTypes.ServerACL, ""))
- if not acl_event_id:
- return
- acl_event = yield self.store.get_event(acl_event_id)
- if server_matches_acl_event(server_name, acl_event):
- return
- raise AuthError(code=403, msg="Server is banned from room")
- def server_matches_acl_event(server_name, acl_event):
- """Check if the given server is allowed by the ACL event
- Args:
- server_name (str): name of server, without any port part
- acl_event (EventBase): m.room.server_acl event
- Returns:
- bool: True if this server is allowed by the ACLs
- """
- logger.debug("Checking %s against acl %s", server_name, acl_event.content)
- # first of all, check if literal IPs are blocked, and if so, whether the
- # server name is a literal IP
- allow_ip_literals = acl_event.content.get("allow_ip_literals", True)
- if not isinstance(allow_ip_literals, bool):
- logger.warn("Ignorning non-bool allow_ip_literals flag")
- allow_ip_literals = True
- if not allow_ip_literals:
- # check for ipv6 literals. These start with '['.
- if server_name[0] == "[":
- return False
- # check for ipv4 literals. We can just lift the routine from twisted.
- if isIPAddress(server_name):
- return False
- # next, check the deny list
- deny = acl_event.content.get("deny", [])
- if not isinstance(deny, (list, tuple)):
- logger.warn("Ignorning non-list deny ACL %s", deny)
- deny = []
- for e in deny:
- if _acl_entry_matches(server_name, e):
- # logger.info("%s matched deny rule %s", server_name, e)
- return False
- # then the allow list.
- allow = acl_event.content.get("allow", [])
- if not isinstance(allow, (list, tuple)):
- logger.warn("Ignorning non-list allow ACL %s", allow)
- allow = []
- for e in allow:
- if _acl_entry_matches(server_name, e):
- # logger.info("%s matched allow rule %s", server_name, e)
- return True
- # everything else should be rejected.
- # logger.info("%s fell through", server_name)
- return False
- def _acl_entry_matches(server_name, acl_entry):
- if not isinstance(acl_entry, six.string_types):
- logger.warn(
- "Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry)
- )
- return False
- regex = glob_to_regex(acl_entry)
- return regex.match(server_name)
- class FederationHandlerRegistry(object):
- """Allows classes to register themselves as handlers for a given EDU or
- query type for incoming federation traffic.
- """
- def __init__(self):
- self.edu_handlers = {}
- self.query_handlers = {}
- def register_edu_handler(self, edu_type, handler):
- """Sets the handler callable that will be used to handle an incoming
- federation EDU of the given type.
- Args:
- edu_type (str): The type of the incoming EDU to register handler for
- handler (Callable[[str, dict]]): A callable invoked on incoming EDU
- of the given type. The arguments are the origin server name and
- the EDU contents.
- """
- if edu_type in self.edu_handlers:
- raise KeyError("Already have an EDU handler for %s" % (edu_type,))
- logger.info("Registering federation EDU handler for %r", edu_type)
- self.edu_handlers[edu_type] = handler
- def register_query_handler(self, query_type, handler):
- """Sets the handler callable that will be used to handle an incoming
- federation query of the given type.
- Args:
- query_type (str): Category name of the query, which should match
- the string used by make_query.
- handler (Callable[[dict], Deferred[dict]]): Invoked to handle
- incoming queries of this type. The return will be yielded
- on and the result used as the response to the query request.
- """
- if query_type in self.query_handlers:
- raise KeyError("Already have a Query handler for %s" % (query_type,))
- logger.info("Registering federation query handler for %r", query_type)
- self.query_handlers[query_type] = handler
- @defer.inlineCallbacks
- def on_edu(self, edu_type, origin, content):
- handler = self.edu_handlers.get(edu_type)
- if not handler:
- logger.warn("No handler registered for EDU type %s", edu_type)
- try:
- yield handler(origin, content)
- except SynapseError as e:
- logger.info("Failed to handle edu %r: %r", edu_type, e)
- except Exception:
- logger.exception("Failed to handle edu %r", edu_type)
- def on_query(self, query_type, args):
- handler = self.query_handlers.get(query_type)
- if not handler:
- logger.warn("No handler registered for query type %s", query_type)
- raise NotFoundError("No handler for Query type '%s'" % (query_type,))
- return handler(args)
- class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
- """A FederationHandlerRegistry for worker processes.
- When receiving EDU or queries it will check if an appropriate handler has
- been registered on the worker, if there isn't one then it calls off to the
- master process.
- """
- def __init__(self, hs):
- self.config = hs.config
- self.http_client = hs.get_simple_http_client()
- self.clock = hs.get_clock()
- self._get_query_client = ReplicationGetQueryRestServlet.make_client(hs)
- self._send_edu = ReplicationFederationSendEduRestServlet.make_client(hs)
- super(ReplicationFederationHandlerRegistry, self).__init__()
- def on_edu(self, edu_type, origin, content):
- """Overrides FederationHandlerRegistry
- """
- if not self.config.use_presence and edu_type == "m.presence":
- return
- handler = self.edu_handlers.get(edu_type)
- if handler:
- return super(ReplicationFederationHandlerRegistry, self).on_edu(
- edu_type, origin, content
- )
- return self._send_edu(edu_type=edu_type, origin=origin, content=content)
- def on_query(self, query_type, args):
- """Overrides FederationHandlerRegistry
- """
- handler = self.query_handlers.get(query_type)
- if handler:
- return handler(args)
- return self._get_query_client(query_type=query_type, args=args)
|