Browse Source
Draft: feat: rate limiting Closes #4 See merge request famedly/conduit!783merge-requests/783/merge
176 changed files with 3670 additions and 1338 deletions
@ -1,2 +1,5 @@
|
||||
[env] |
||||
RUMA_UNSTABLE_EXHAUSTIVE_TYPES = "1" |
||||
|
||||
[alias] |
||||
xtask = "run --package xtask --" |
||||
|
||||
@ -0,0 +1,39 @@
|
||||
[package] |
||||
edition.workspace = true |
||||
homepage.workspace = true |
||||
name = "conduit-config" |
||||
repository.workspace = true |
||||
rust-version.workspace = true |
||||
version = "0.11.0-alpha" |
||||
|
||||
[dependencies] |
||||
# Self explanitory, used for deserializing the configuration |
||||
serde.workspace = true |
||||
# Parsing media retention policies |
||||
bytesize = { workspace = true, features = ["serde"] } |
||||
humantime-serde = "1" |
||||
# Validating urls |
||||
url = { version = "2", features = ["serde"] } |
||||
# Error type |
||||
thiserror.workspace = true |
||||
# Validating S3 config |
||||
rusty-s3.workspace = true |
||||
# Proxy config |
||||
reqwest.workspace = true |
||||
# Generating documentation |
||||
conduit-macros.workspace = true |
||||
|
||||
# default room version, server name, ignored keys |
||||
[dependencies.ruma] |
||||
features = ["client-api", "federation-api"] |
||||
workspace = true |
||||
|
||||
[features] |
||||
rocksdb = [] |
||||
sqlite = [] |
||||
|
||||
# Used to generate docs, shouldn't be used outside of xtask |
||||
doc-generators = ["conduit-macros/doc-generators"] |
||||
|
||||
[lints] |
||||
workspace = true |
||||
@ -0,0 +1,23 @@
|
||||
use thiserror::Error; |
||||
|
||||
pub type Result<T, E = Error> = std::result::Result<T, E>; |
||||
|
||||
#[derive(Error, Debug)] |
||||
pub enum Error { |
||||
#[error(
|
||||
"The media directory structure depth multiplied by the length is equal to or greater than a sha256 hex hash, please reduce at least one of the two so that their product is less than 64" |
||||
)] |
||||
DirectoryStructureLengthDepthTooLarge, |
||||
|
||||
#[error("Invalid S3 config")] |
||||
S3, |
||||
|
||||
#[error("Failed to construct proxy config: {source}")] |
||||
Proxy { |
||||
#[from] |
||||
source: reqwest::Error, |
||||
}, |
||||
|
||||
#[error("Registration token is empty")] |
||||
EmptyRegistrationToken, |
||||
} |
||||
@ -0,0 +1,909 @@
|
||||
use std::{collections::HashMap, num::NonZeroU64}; |
||||
|
||||
use bytesize::ByteSize; |
||||
use ruma::api::Metadata; |
||||
use serde::Deserialize; |
||||
|
||||
#[derive(Debug, Clone, Deserialize)] |
||||
pub struct WrappedShadowConfig { |
||||
#[serde(default)] |
||||
pub inherits: ConfigPreset, |
||||
#[serde(flatten)] |
||||
pub config: ShadowConfig, |
||||
} |
||||
|
||||
impl From<WrappedShadowConfig> for Config { |
||||
fn from(value: WrappedShadowConfig) -> Self { |
||||
Config::get_preset(value.inherits).apply_overrides(value.config) |
||||
} |
||||
} |
||||
|
||||
#[derive(Debug, Clone, Deserialize, Default, Copy)] |
||||
#[cfg_attr(feature = "doc-generators", derive(serde::Serialize))] |
||||
#[serde(rename_all = "snake_case")] |
||||
pub enum ConfigPreset { |
||||
/// Default rate-limiting configuration, recommended for small private servers (i.e. single-user
|
||||
/// or for family and/or friends)
|
||||
#[default] |
||||
PrivateSmall, |
||||
PrivateMedium, |
||||
PublicMedium, |
||||
PublicLarge, |
||||
} |
||||
|
||||
#[derive(Debug, Clone, Deserialize)] |
||||
pub struct ShadowConfig { |
||||
pub client: |
||||
ShadowConfigFragment<ClientRestriction, ShadowClientMediaConfig, AuthenticationFailures>, |
||||
pub federation: |
||||
ShadowConfigFragment<FederationRestriction, ShadowFederationMediaConfig, Nothing>, |
||||
} |
||||
|
||||
pub trait RestrictionGeneric: ConfigPart + std::hash::Hash + Eq {} |
||||
impl<T> RestrictionGeneric for T where T: ConfigPart + std::hash::Hash + Eq {} |
||||
|
||||
pub trait ConfigPart: Clone + std::fmt::Debug + serde::de::DeserializeOwned {} |
||||
impl<T> ConfigPart for T where T: Clone + std::fmt::Debug + serde::de::DeserializeOwned {} |
||||
|
||||
#[derive(Debug, Clone, Deserialize)] |
||||
pub struct ShadowConfigFragment<R, M, T> |
||||
where |
||||
R: RestrictionGeneric, |
||||
M: ConfigPart, |
||||
T: ConfigPart, |
||||
{ |
||||
#[serde(bound(deserialize = "R: RestrictionGeneric, M: ConfigPart, T: ConfigPart"))] |
||||
pub target: Option<ShadowConfigFragmentFragment<R, M, T>>, |
||||
#[serde(bound(deserialize = "R: RestrictionGeneric, M: ConfigPart"))] |
||||
pub global: Option<ShadowConfigFragmentFragment<R, M, Nothing>>, |
||||
} |
||||
|
||||
#[derive(Debug, Clone, Deserialize)] |
||||
pub struct ShadowConfigFragmentFragment<R, M, T> |
||||
where |
||||
R: RestrictionGeneric, |
||||
M: ConfigPart, |
||||
T: ConfigPart, |
||||
{ |
||||
#[serde(
|
||||
flatten, |
||||
// https://play.rust-lang.org/?version=stable&mode=debug&edition=2024&gist=fe75063b73c6d9860991c41572e00035
|
||||
//
|
||||
// For some reason specifying the default function fixes the issue in the playground link
|
||||
// above. Makes no sense to me, but hey, it works.
|
||||
default = "HashMap::new", |
||||
bound(deserialize = "R: RestrictionGeneric") |
||||
)] |
||||
pub map: HashMap<R, RequestLimitation>, |
||||
#[serde(bound(deserialize = "M: ConfigPart"))] |
||||
pub media: Option<M>, |
||||
#[serde(flatten)] |
||||
#[serde(bound(deserialize = "T: ConfigPart"))] |
||||
pub additional_fields: Option<T>, |
||||
} |
||||
|
||||
#[derive(Clone, Copy, Debug, Deserialize)] |
||||
pub struct ShadowClientMediaConfig { |
||||
pub download: Option<MediaLimitation>, |
||||
pub upload: Option<MediaLimitation>, |
||||
pub fetch: Option<MediaLimitation>, |
||||
} |
||||
|
||||
#[derive(Clone, Copy, Debug, Deserialize)] |
||||
pub struct ShadowFederationMediaConfig { |
||||
pub download: Option<MediaLimitation>, |
||||
} |
||||
|
||||
#[derive(Debug, Clone, Deserialize)] |
||||
#[serde(from = "WrappedShadowConfig")] |
||||
pub struct Config { |
||||
pub target: ConfigFragment<AuthenticationFailures>, |
||||
pub global: ConfigFragment<Nothing>, |
||||
} |
||||
|
||||
impl Default for Config { |
||||
fn default() -> Self { |
||||
Self::get_preset(ConfigPreset::default()) |
||||
} |
||||
} |
||||
|
||||
#[derive(Debug, Clone, Deserialize)] |
||||
pub struct ConfigFragment<T> |
||||
where |
||||
T: ConfigPart, |
||||
{ |
||||
#[serde(bound(deserialize = "T: ConfigPart"))] |
||||
pub client: ConfigFragmentFragment<ClientRestriction, ClientMediaConfig, T>, |
||||
pub federation: ConfigFragmentFragment<FederationRestriction, FederationMediaConfig, Nothing>, |
||||
} |
||||
|
||||
#[derive(Debug, Clone, Deserialize)] |
||||
pub struct ConfigFragmentFragment<R, M, T> |
||||
where |
||||
R: RestrictionGeneric, |
||||
M: ConfigPart, |
||||
T: ConfigPart, |
||||
{ |
||||
#[serde(flatten)] |
||||
#[serde(bound(deserialize = "R: RestrictionGeneric"))] |
||||
pub map: HashMap<R, RequestLimitation>, |
||||
#[serde(bound(deserialize = "M: ConfigPart"))] |
||||
pub media: M, |
||||
#[serde(flatten)] |
||||
#[serde(bound(deserialize = "T: ConfigPart"))] |
||||
pub additional_fields: T, |
||||
} |
||||
|
||||
impl<R, M, T> ConfigFragmentFragment<R, M, T> |
||||
where |
||||
R: RestrictionGeneric, |
||||
M: ConfigPart + MediaConfig, |
||||
T: ConfigPart, |
||||
{ |
||||
pub fn apply_overrides( |
||||
self, |
||||
shadow: Option<ShadowConfigFragmentFragment<R, M::Shadow, T>>, |
||||
) -> Self { |
||||
let Some(shadow) = shadow else { |
||||
return self; |
||||
}; |
||||
|
||||
let ConfigFragmentFragment { |
||||
mut map, |
||||
media, |
||||
additional_fields, |
||||
} = self; |
||||
|
||||
map.extend(shadow.map); |
||||
|
||||
Self { |
||||
map, |
||||
media: if let Some(sm) = shadow.media { |
||||
media.apply_overrides(sm) |
||||
} else { |
||||
media |
||||
}, |
||||
additional_fields: shadow.additional_fields.unwrap_or(additional_fields), |
||||
} |
||||
} |
||||
} |
||||
|
||||
#[derive(Debug, Clone, Deserialize)] |
||||
pub struct AuthenticationFailures { |
||||
pub authentication_failures: RequestLimitation, |
||||
} |
||||
|
||||
impl AuthenticationFailures { |
||||
fn new(timeframe: Timeframe, burst_capacity: NonZeroU64) -> Self { |
||||
Self { |
||||
authentication_failures: RequestLimitation::new(timeframe, burst_capacity), |
||||
} |
||||
} |
||||
} |
||||
|
||||
#[derive(Debug, Clone, Deserialize)] |
||||
pub struct Nothing; |
||||
|
||||
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] |
||||
pub enum Restriction { |
||||
Client(ClientRestriction), |
||||
Federation(FederationRestriction), |
||||
} |
||||
|
||||
impl From<ClientRestriction> for Restriction { |
||||
fn from(value: ClientRestriction) -> Self { |
||||
Self::Client(value) |
||||
} |
||||
} |
||||
|
||||
impl From<FederationRestriction> for Restriction { |
||||
fn from(value: FederationRestriction) -> Self { |
||||
Self::Federation(value) |
||||
} |
||||
} |
||||
|
||||
#[cfg(feature = "doc-generators")] |
||||
pub trait DocumentRestrictions: Sized { |
||||
fn variant_doc_comments() -> Vec<(Self, String)>; |
||||
fn container_doc_comment() -> String; |
||||
} |
||||
|
||||
/// Applies for endpoints on the client-server API, which are used by clients, appservices, and
|
||||
/// bots. Appservices can bypass rate-limiting though if `rate_limited` is set to `false` in their
|
||||
/// registration file.
|
||||
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd)] |
||||
#[cfg_attr(
|
||||
feature = "doc-generators", |
||||
derive(conduit_macros::DocumentRestrictions, serde::Serialize) |
||||
)] |
||||
#[serde(rename_all = "snake_case")] |
||||
pub enum ClientRestriction { |
||||
/// For registering a new user account. May be called multiples times for a single
|
||||
/// registration if there are extra steps, e.g. providing a registration token.
|
||||
Registration, |
||||
/// For logging into an existing account.
|
||||
Login, |
||||
/// For checking whether a given registration token would allow the user to register an
|
||||
/// account.
|
||||
RegistrationTokenValidity, |
||||
|
||||
/// For sending an event to a room.
|
||||
///
|
||||
/// Note that this is not used for state events, but for users who are unprivliged in a room,
|
||||
/// the only state event they'll be able to send are ones to update their room profile.
|
||||
SendEvent, |
||||
|
||||
/// For joining a room.
|
||||
Join, |
||||
/// For inviting a user to a room.
|
||||
Invite, |
||||
/// For knocking on a room.
|
||||
Knock, |
||||
|
||||
/// For reporting a user, event, or room.
|
||||
SendReport, |
||||
|
||||
/// For adding an alias to a room.
|
||||
CreateAlias, |
||||
|
||||
/// For downloading a media file.
|
||||
///
|
||||
/// For rate-limiting based on the size of files downloaded, see the media rate-limiting
|
||||
/// configuration.
|
||||
MediaDownload, |
||||
/// For uploading a media file.
|
||||
///
|
||||
/// For rate-limiting based on the size of files uploaded, see the media rate-limiting
|
||||
/// configuration.
|
||||
MediaCreate, |
||||
} |
||||
|
||||
/// Applies for endpoints on the federation API of this server, hence restricting how
|
||||
/// many times other servers can use these endpoints on this server in a given timeframe.
|
||||
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd)] |
||||
#[cfg_attr(
|
||||
feature = "doc-generators", |
||||
derive(conduit_macros::DocumentRestrictions, serde::Serialize) |
||||
)] |
||||
#[serde(rename_all = "snake_case")] |
||||
pub enum FederationRestriction { |
||||
/// For joining a room.
|
||||
Join, |
||||
/// For knocking on a room.
|
||||
Knock, |
||||
/// For inviting a local user to a room.
|
||||
Invite, |
||||
|
||||
// Transactions should be handled by a completely dedicated rate-limiter
|
||||
/* /// For sending transactions of PDU/EDUs.
|
||||
///
|
||||
///
|
||||
Transaction, */ |
||||
/// For downloading media.
|
||||
MediaDownload, |
||||
} |
||||
|
||||
impl TryFrom<Metadata> for Restriction { |
||||
type Error = (); |
||||
|
||||
fn try_from(value: Metadata) -> Result<Self, Self::Error> { |
||||
use Restriction::*; |
||||
use ruma::api::{ |
||||
IncomingRequest, |
||||
client::{ |
||||
account::{check_registration_token_validity, register}, |
||||
alias::create_alias, |
||||
authenticated_media::{ |
||||
get_content, get_content_as_filename, get_content_thumbnail, get_media_preview, |
||||
}, |
||||
knock::knock_room, |
||||
media::{self, create_content, create_mxc_uri}, |
||||
membership::{invite_user, join_room_by_id, join_room_by_id_or_alias}, |
||||
message::send_message_event, |
||||
reporting::report_user, |
||||
room::{report_content, report_room}, |
||||
session::login, |
||||
state::send_state_event, |
||||
}, |
||||
federation::{ |
||||
authenticated_media::{ |
||||
get_content as federation_get_content, |
||||
get_content_thumbnail as federation_get_content_thumbnail, |
||||
}, |
||||
membership::{create_invite, create_join_event, create_knock_event}, |
||||
}, |
||||
}; |
||||
|
||||
Ok(match value { |
||||
register::v3::Request::METADATA => Client(ClientRestriction::Registration), |
||||
check_registration_token_validity::v1::Request::METADATA => { |
||||
Client(ClientRestriction::RegistrationTokenValidity) |
||||
} |
||||
login::v3::Request::METADATA => Client(ClientRestriction::Login), |
||||
send_message_event::v3::Request::METADATA | send_state_event::v3::Request::METADATA => { |
||||
Client(ClientRestriction::SendEvent) |
||||
} |
||||
join_room_by_id::v3::Request::METADATA |
||||
| join_room_by_id_or_alias::v3::Request::METADATA => Client(ClientRestriction::Join), |
||||
invite_user::v3::Request::METADATA => Client(ClientRestriction::Invite), |
||||
knock_room::v3::Request::METADATA => Client(ClientRestriction::Knock), |
||||
report_user::v3::Request::METADATA |
||||
| report_content::v3::Request::METADATA |
||||
| report_room::v3::Request::METADATA => Client(ClientRestriction::SendReport), |
||||
create_alias::v3::Request::METADATA => Client(ClientRestriction::CreateAlias), |
||||
// NOTE: handle async media upload in a way that doesn't half the number of uploads you can do within a short timeframe, while not allowing pre-generation of MXC uris to allow uploading double the number of media at once
|
||||
create_content::v3::Request::METADATA | create_mxc_uri::v1::Request::METADATA => { |
||||
Client(ClientRestriction::MediaCreate) |
||||
} |
||||
// Unauthenticate media is deprecated
|
||||
#[allow(deprecated)] |
||||
media::get_content::v3::Request::METADATA |
||||
| media::get_content_as_filename::v3::Request::METADATA |
||||
| media::get_content_thumbnail::v3::Request::METADATA |
||||
| media::get_media_preview::v3::Request::METADATA |
||||
| get_content::v1::Request::METADATA |
||||
| get_content_as_filename::v1::Request::METADATA |
||||
| get_content_thumbnail::v1::Request::METADATA |
||||
| get_media_preview::v1::Request::METADATA => Client(ClientRestriction::MediaDownload), |
||||
federation_get_content::v1::Request::METADATA |
||||
| federation_get_content_thumbnail::v1::Request::METADATA => { |
||||
Federation(FederationRestriction::MediaDownload) |
||||
} |
||||
// v1 is deprecated
|
||||
#[allow(deprecated)] |
||||
create_join_event::v1::Request::METADATA | create_join_event::v2::Request::METADATA => { |
||||
Federation(FederationRestriction::Join) |
||||
} |
||||
create_knock_event::v1::Request::METADATA => Federation(FederationRestriction::Knock), |
||||
create_invite::v1::Request::METADATA | create_invite::v2::Request::METADATA => { |
||||
Federation(FederationRestriction::Invite) |
||||
} |
||||
|
||||
_ => return Err(()), |
||||
}) |
||||
} |
||||
} |
||||
|
||||
impl<T> ConfigFragment<T> |
||||
where |
||||
T: ConfigPart, |
||||
{ |
||||
pub fn get(&self, restriction: &Restriction) -> &RequestLimitation { |
||||
// Maybe look into https://github.com/moriyoshi-kasuga/enum-table
|
||||
match restriction { |
||||
Restriction::Client(client_restriction) => { |
||||
self.client.map.get(client_restriction).unwrap() |
||||
} |
||||
Restriction::Federation(federation_restriction) => { |
||||
self.federation.map.get(federation_restriction).unwrap() |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
#[derive(Clone, Copy, Debug, Deserialize)] |
||||
pub struct RequestLimitation { |
||||
#[serde(flatten)] |
||||
pub timeframe: Timeframe, |
||||
pub burst_capacity: NonZeroU64, |
||||
} |
||||
|
||||
impl RequestLimitation { |
||||
pub fn new(timeframe: Timeframe, burst_capacity: NonZeroU64) -> Self { |
||||
Self { |
||||
timeframe, |
||||
burst_capacity, |
||||
} |
||||
} |
||||
} |
||||
|
||||
#[derive(Deserialize, Clone, Copy, Debug)] |
||||
#[serde(rename_all = "snake_case")] |
||||
// When deserializing, we want this prefix
|
||||
#[allow(clippy::enum_variant_names)] |
||||
pub enum Timeframe { |
||||
PerSecond(NonZeroU64), |
||||
PerMinute(NonZeroU64), |
||||
PerHour(NonZeroU64), |
||||
PerDay(NonZeroU64), |
||||
} |
||||
|
||||
#[cfg(feature = "doc-generators")] |
||||
impl std::fmt::Display for Timeframe { |
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { |
||||
let value; |
||||
|
||||
let string = match self { |
||||
Self::PerSecond(v) => { |
||||
value = v; |
||||
"second" |
||||
} |
||||
Self::PerMinute(v) => { |
||||
value = v; |
||||
"minute" |
||||
} |
||||
Self::PerHour(v) => { |
||||
value = v; |
||||
"hour" |
||||
} |
||||
Self::PerDay(v) => { |
||||
value = v; |
||||
"day" |
||||
} |
||||
}; |
||||
write!(f, "{value} requests per {string}") |
||||
} |
||||
} |
||||
|
||||
impl Timeframe { |
||||
pub fn nano_gap(&self) -> u64 { |
||||
match self { |
||||
Timeframe::PerSecond(t) => 1000 * 1000 * 1000 / t.get(), |
||||
Timeframe::PerMinute(t) => 1000 * 1000 * 1000 * 60 / t.get(), |
||||
Timeframe::PerHour(t) => 1000 * 1000 * 1000 * 60 * 60 / t.get(), |
||||
Timeframe::PerDay(t) => 1000 * 1000 * 1000 * 60 * 60 * 24 / t.get(), |
||||
} |
||||
} |
||||
} |
||||
|
||||
pub trait MediaConfig { |
||||
type Shadow: ConfigPart; |
||||
|
||||
fn apply_overrides(self, shadow: Self::Shadow) -> Self; |
||||
} |
||||
|
||||
#[derive(Clone, Copy, Debug, Deserialize)] |
||||
pub struct ClientMediaConfig { |
||||
pub download: MediaLimitation, |
||||
pub upload: MediaLimitation, |
||||
pub fetch: MediaLimitation, |
||||
} |
||||
|
||||
impl MediaConfig for ClientMediaConfig { |
||||
type Shadow = ShadowClientMediaConfig; |
||||
|
||||
fn apply_overrides(self, shadow: Self::Shadow) -> Self { |
||||
let Self::Shadow { |
||||
download, |
||||
upload, |
||||
fetch, |
||||
} = shadow; |
||||
|
||||
Self { |
||||
download: download.unwrap_or(self.download), |
||||
upload: upload.unwrap_or(self.upload), |
||||
fetch: fetch.unwrap_or(self.fetch), |
||||
} |
||||
} |
||||
} |
||||
|
||||
#[derive(Clone, Copy, Debug, Deserialize)] |
||||
pub struct FederationMediaConfig { |
||||
pub download: MediaLimitation, |
||||
} |
||||
|
||||
impl MediaConfig for FederationMediaConfig { |
||||
type Shadow = ShadowFederationMediaConfig; |
||||
|
||||
fn apply_overrides(self, shadow: Self::Shadow) -> Self { |
||||
Self { |
||||
download: shadow.download.unwrap_or(self.download), |
||||
} |
||||
} |
||||
} |
||||
|
||||
#[derive(Clone, Copy, Debug, Deserialize)] |
||||
pub struct MediaLimitation { |
||||
#[serde(flatten)] |
||||
pub timeframe: MediaTimeframe, |
||||
pub burst_capacity: ByteSize, |
||||
} |
||||
|
||||
impl MediaLimitation { |
||||
pub fn new(timeframe: MediaTimeframe, burst_capacity: ByteSize) -> Self { |
||||
Self { |
||||
timeframe, |
||||
burst_capacity, |
||||
} |
||||
} |
||||
} |
||||
|
||||
#[derive(Deserialize, Clone, Copy, Debug)] |
||||
#[serde(rename_all = "snake_case")] |
||||
// When deserializing, we want this prefix
|
||||
#[allow(clippy::enum_variant_names)] |
||||
pub enum MediaTimeframe { |
||||
PerSecond(ByteSize), |
||||
PerMinute(ByteSize), |
||||
PerHour(ByteSize), |
||||
PerDay(ByteSize), |
||||
} |
||||
|
||||
impl MediaTimeframe { |
||||
pub fn bytes_per_sec(&self) -> u64 { |
||||
match self { |
||||
MediaTimeframe::PerSecond(t) => t.as_u64(), |
||||
MediaTimeframe::PerMinute(t) => t.as_u64() / 60, |
||||
MediaTimeframe::PerHour(t) => t.as_u64() / (60 * 60), |
||||
MediaTimeframe::PerDay(t) => t.as_u64() / (60 * 60 * 24), |
||||
} |
||||
} |
||||
} |
||||
|
||||
fn nz(int: u64) -> NonZeroU64 { |
||||
NonZeroU64::new(int).expect("Values are static") |
||||
} |
||||
|
||||
macro_rules! default_restriction_map { |
||||
($restriction_type:ident; $($restriction:ident, $timeframe:ident, $timeframe_value:expr, $burst_capacity:expr;)*) => { |
||||
HashMap::from_iter([ |
||||
$(( |
||||
$restriction_type::$restriction, |
||||
RequestLimitation::new(Timeframe::$timeframe(nz($timeframe_value)), nz($burst_capacity)), |
||||
),)* |
||||
]) |
||||
} |
||||
} |
||||
|
||||
macro_rules! media_config { |
||||
($config_type:ident; $($key:ident: $timeframe:ident, $timeframe_value:expr, $burst_capacity:expr;)*) => { |
||||
$config_type { |
||||
$($key: MediaLimitation::new(MediaTimeframe::$timeframe($timeframe_value), $burst_capacity),)* |
||||
} |
||||
} |
||||
} |
||||
|
||||
impl Config { |
||||
fn apply_overrides(self, shadow: ShadowConfig) -> Self { |
||||
let ShadowConfig { |
||||
client: |
||||
ShadowConfigFragment { |
||||
target: client_target, |
||||
global: client_global, |
||||
}, |
||||
federation: |
||||
ShadowConfigFragment { |
||||
target: federation_target, |
||||
global: federation_global, |
||||
}, |
||||
} = shadow; |
||||
|
||||
Self { |
||||
target: ConfigFragment { |
||||
client: self.target.client.apply_overrides(client_target), |
||||
federation: self.target.federation.apply_overrides(federation_target), |
||||
}, |
||||
global: ConfigFragment { |
||||
client: self.global.client.apply_overrides(client_global), |
||||
federation: self.global.federation.apply_overrides(federation_global), |
||||
}, |
||||
} |
||||
} |
||||
|
||||
pub fn get_preset(preset: ConfigPreset) -> Self { |
||||
// The client target map shouldn't really differ between presets, as individual user's
|
||||
// behaviours shouldn't differ depending on the size of the server or whether it's private
|
||||
// or public, but maybe I'm wrong.
|
||||
let target_client_map = default_restriction_map!( |
||||
ClientRestriction; |
||||
|
||||
Registration, PerDay, 3, 10; |
||||
Login, PerDay, 5, 20; |
||||
RegistrationTokenValidity, PerDay, 10, 20; |
||||
SendEvent, PerMinute, 15, 60; |
||||
Join, PerHour, 5, 30; |
||||
Knock, PerHour, 5, 30; |
||||
Invite, PerHour, 2, 20; |
||||
SendReport, PerDay, 5, 20; |
||||
CreateAlias, PerDay, 2, 20; |
||||
MediaDownload, PerHour, 30, 100; |
||||
MediaCreate, PerMinute, 4, 20; |
||||
); |
||||
// Same goes for media
|
||||
let target_client_media = media_config! { |
||||
ClientMediaConfig; |
||||
|
||||
download: PerMinute, ByteSize::mb(100), ByteSize::mb(50); |
||||
upload: PerMinute, ByteSize::mb(10), ByteSize::mb(100); |
||||
fetch: PerMinute, ByteSize::mb(100), ByteSize::mb(50); |
||||
}; |
||||
|
||||
// Currently, these values are completely arbitrary, not informed by any sort of
|
||||
// knowledge. In the future, it would be good to have some sort of analytics to
|
||||
// determine what some good defaults could be. Maybe getting some percentiles for
|
||||
// burst_capacity & timeframes used. How we'd tell the difference between power users
|
||||
// and malicilous attacks, I'm not sure.
|
||||
match preset { |
||||
ConfigPreset::PrivateSmall => Self { |
||||
target: ConfigFragment { |
||||
client: ConfigFragmentFragment { |
||||
map: target_client_map, |
||||
media: target_client_media, |
||||
additional_fields: AuthenticationFailures::new( |
||||
Timeframe::PerHour(nz(1)), |
||||
nz(20), |
||||
), |
||||
}, |
||||
federation: ConfigFragmentFragment { |
||||
map: default_restriction_map!( |
||||
FederationRestriction; |
||||
|
||||
Join, PerHour, 10, 10; |
||||
Knock, PerHour, 10, 10; |
||||
Invite, PerHour, 10, 10; |
||||
MediaDownload, PerMinute, 10, 50; |
||||
), |
||||
media: media_config! { |
||||
FederationMediaConfig; |
||||
|
||||
download: PerMinute, ByteSize::mb(100), ByteSize::mb(100); |
||||
}, |
||||
additional_fields: Nothing, |
||||
}, |
||||
}, |
||||
global: ConfigFragment { |
||||
client: ConfigFragmentFragment { |
||||
map: default_restriction_map!( |
||||
ClientRestriction; |
||||
|
||||
Registration, PerDay, 10, 20; |
||||
Login, PerHour, 10, 10; |
||||
RegistrationTokenValidity, PerDay, 10, 20; |
||||
SendEvent, PerSecond, 2, 100; |
||||
Join, PerMinute, 1, 30; |
||||
Knock, PerMinute, 1, 30; |
||||
Invite, PerHour, 10, 20; |
||||
SendReport, PerHour, 1, 25; |
||||
CreateAlias, PerHour, 5, 20; |
||||
MediaDownload, PerMinute, 5, 150; |
||||
MediaCreate, PerMinute, 20, 50; |
||||
), |
||||
media: media_config! { |
||||
ClientMediaConfig; |
||||
|
||||
download: PerMinute, ByteSize::mb(250), ByteSize::mb(100); |
||||
upload: PerMinute, ByteSize::mb(50), ByteSize::mb(100); |
||||
fetch: PerMinute, ByteSize::mb(250), ByteSize::mb(100); |
||||
}, |
||||
additional_fields: Nothing, |
||||
}, |
||||
federation: ConfigFragmentFragment { |
||||
map: default_restriction_map!( |
||||
FederationRestriction; |
||||
|
||||
Join, PerMinute, 10, 10; |
||||
Knock, PerMinute, 10, 10; |
||||
Invite, PerMinute, 10, 10; |
||||
MediaDownload, PerSecond, 10, 250; |
||||
), |
||||
media: media_config! { |
||||
FederationMediaConfig; |
||||
|
||||
download: PerMinute, ByteSize::mb(250), ByteSize::mb(250); |
||||
}, |
||||
additional_fields: Nothing, |
||||
}, |
||||
}, |
||||
}, |
||||
ConfigPreset::PrivateMedium => Self { |
||||
target: ConfigFragment { |
||||
client: ConfigFragmentFragment { |
||||
map: target_client_map, |
||||
media: target_client_media, |
||||
additional_fields: AuthenticationFailures::new( |
||||
Timeframe::PerHour(nz(10)), |
||||
nz(20), |
||||
), |
||||
}, |
||||
federation: ConfigFragmentFragment { |
||||
map: default_restriction_map!( |
||||
FederationRestriction; |
||||
|
||||
Join, PerHour, 30, 10; |
||||
Knock, PerHour, 30, 10; |
||||
Invite, PerHour, 30, 10; |
||||
MediaDownload, PerMinute, 100, 50; |
||||
), |
||||
media: media_config! { |
||||
FederationMediaConfig; |
||||
|
||||
download: PerMinute, ByteSize::mb(200), ByteSize::mb(200); |
||||
}, |
||||
additional_fields: Nothing, |
||||
}, |
||||
}, |
||||
global: ConfigFragment { |
||||
client: ConfigFragmentFragment { |
||||
map: default_restriction_map!( |
||||
ClientRestriction; |
||||
|
||||
Registration, PerDay, 20, 20; |
||||
Login, PerHour, 25, 15; |
||||
RegistrationTokenValidity, PerDay, 20, 20; |
||||
SendEvent, PerSecond, 10, 100; |
||||
Join, PerMinute, 5, 30; |
||||
Knock, PerMinute, 5, 30; |
||||
Invite, PerMinute, 1, 20; |
||||
SendReport, PerHour, 10, 25; |
||||
CreateAlias, PerMinute, 1, 50; |
||||
MediaDownload, PerSecond, 1, 200; |
||||
MediaCreate, PerSecond, 2, 20; |
||||
), |
||||
media: media_config! { |
||||
ClientMediaConfig; |
||||
|
||||
download: PerMinute, ByteSize::mb(500), ByteSize::mb(200); |
||||
upload: PerMinute, ByteSize::mb(100), ByteSize::mb(200); |
||||
fetch: PerMinute, ByteSize::mb(500), ByteSize::mb(200); |
||||
}, |
||||
additional_fields: Nothing, |
||||
}, |
||||
federation: ConfigFragmentFragment { |
||||
map: default_restriction_map!( |
||||
FederationRestriction; |
||||
|
||||
Join, PerMinute, 25, 25; |
||||
Knock, PerMinute, 25, 25; |
||||
Invite, PerMinute, 25, 25; |
||||
MediaDownload, PerSecond, 10, 100; |
||||
), |
||||
media: media_config! { |
||||
FederationMediaConfig; |
||||
|
||||
download: PerMinute, ByteSize::mb(500), ByteSize::mb(500); |
||||
}, |
||||
additional_fields: Nothing, |
||||
}, |
||||
}, |
||||
}, |
||||
ConfigPreset::PublicMedium => Self { |
||||
target: ConfigFragment { |
||||
client: ConfigFragmentFragment { |
||||
map: target_client_map, |
||||
media: target_client_media, |
||||
additional_fields: AuthenticationFailures::new( |
||||
Timeframe::PerHour(nz(10)), |
||||
nz(20), |
||||
), |
||||
}, |
||||
federation: ConfigFragmentFragment { |
||||
map: default_restriction_map!( |
||||
FederationRestriction; |
||||
|
||||
Join, PerHour, 30, 10; |
||||
Knock, PerHour, 30, 10; |
||||
Invite, PerHour, 30, 10; |
||||
MediaDownload, PerMinute, 100, 50; |
||||
), |
||||
media: media_config! { |
||||
FederationMediaConfig; |
||||
|
||||
download: PerMinute, ByteSize::mb(200), ByteSize::mb(200); |
||||
}, |
||||
additional_fields: Nothing, |
||||
}, |
||||
}, |
||||
global: ConfigFragment { |
||||
client: ConfigFragmentFragment { |
||||
map: default_restriction_map!( |
||||
ClientRestriction; |
||||
|
||||
Registration, PerHour, 5, 20; |
||||
Login, PerHour, 25, 15; |
||||
// Public servers don't have registration tokens, so let's rate limit
|
||||
// heavily so that if they revert to a private server again, it's a
|
||||
// reminder to change their preset.
|
||||
RegistrationTokenValidity, PerDay, 1, 1; |
||||
SendEvent, PerSecond, 10, 100; |
||||
Join, PerMinute, 5, 30; |
||||
Knock, PerMinute, 5, 30; |
||||
Invite, PerMinute, 1, 20; |
||||
SendReport, PerHour, 10, 25; |
||||
CreateAlias, PerMinute, 1, 50; |
||||
MediaDownload, PerSecond, 1, 200; |
||||
MediaCreate, PerSecond, 2, 20; |
||||
), |
||||
media: media_config! { |
||||
ClientMediaConfig; |
||||
|
||||
download: PerMinute, ByteSize::mb(500), ByteSize::mb(200); |
||||
upload: PerMinute, ByteSize::mb(100), ByteSize::mb(200); |
||||
fetch: PerMinute, ByteSize::mb(500), ByteSize::mb(200); |
||||
}, |
||||
additional_fields: Nothing, |
||||
}, |
||||
federation: ConfigFragmentFragment { |
||||
map: default_restriction_map!( |
||||
FederationRestriction; |
||||
|
||||
Join, PerMinute, 25, 25; |
||||
Knock, PerMinute, 25, 25; |
||||
Invite, PerMinute, 25, 25; |
||||
MediaDownload, PerSecond, 10, 100; |
||||
), |
||||
media: media_config! { |
||||
FederationMediaConfig; |
||||
|
||||
download: PerMinute, ByteSize::mb(500), ByteSize::mb(500); |
||||
}, |
||||
additional_fields: Nothing, |
||||
}, |
||||
}, |
||||
}, |
||||
ConfigPreset::PublicLarge => Self { |
||||
target: ConfigFragment { |
||||
client: ConfigFragmentFragment { |
||||
map: target_client_map, |
||||
media: target_client_media, |
||||
additional_fields: AuthenticationFailures::new( |
||||
Timeframe::PerMinute(nz(1)), |
||||
nz(20), |
||||
), |
||||
}, |
||||
federation: ConfigFragmentFragment { |
||||
map: default_restriction_map!( |
||||
FederationRestriction; |
||||
|
||||
Join, PerHour, 90, 30; |
||||
Knock, PerHour, 90, 30; |
||||
Invite, PerHour, 90, 30; |
||||
MediaDownload, PerMinute, 100, 50; |
||||
), |
||||
media: media_config! { |
||||
FederationMediaConfig; |
||||
|
||||
download: PerMinute, ByteSize::mb(600), ByteSize::mb(300); |
||||
}, |
||||
additional_fields: Nothing, |
||||
}, |
||||
}, |
||||
global: ConfigFragment { |
||||
client: ConfigFragmentFragment { |
||||
map: default_restriction_map!( |
||||
ClientRestriction; |
||||
|
||||
Registration, PerMinute, 4, 25; |
||||
Login, PerMinute, 10, 25; |
||||
// Public servers don't have registration tokens, so let's rate limit
|
||||
// heavily so that if they revert to a private server again, it's a
|
||||
// reminder to change their preset.
|
||||
RegistrationTokenValidity, PerDay, 1, 1; |
||||
SendEvent, PerSecond, 100, 50; |
||||
Join, PerSecond, 1, 20; |
||||
Knock, PerSecond, 1, 20; |
||||
Invite, PerMinute, 10, 40; |
||||
SendReport, PerMinute, 5, 25; |
||||
CreateAlias, PerMinute, 30, 20; |
||||
MediaDownload, PerSecond, 25, 200; |
||||
MediaCreate, PerSecond, 10, 30; |
||||
), |
||||
media: media_config! { |
||||
ClientMediaConfig; |
||||
|
||||
download: PerMinute, ByteSize::gb(2), ByteSize::mb(500); |
||||
upload: PerMinute, ByteSize::mb(500), ByteSize::mb(500); |
||||
fetch: PerMinute, ByteSize::gb(2), ByteSize::mb(500); |
||||
}, |
||||
additional_fields: Nothing, |
||||
}, |
||||
federation: ConfigFragmentFragment { |
||||
map: default_restriction_map!( |
||||
FederationRestriction; |
||||
|
||||
Join, PerSecond, 1, 50; |
||||
Knock, PerSecond, 1, 50; |
||||
Invite, PerSecond, 1, 50; |
||||
MediaDownload, PerSecond, 50, 100; |
||||
), |
||||
media: media_config! { |
||||
FederationMediaConfig; |
||||
|
||||
download: PerMinute, ByteSize::gb(2), ByteSize::gb(1); |
||||
}, |
||||
additional_fields: Nothing, |
||||
}, |
||||
}, |
||||
}, |
||||
} |
||||
} |
||||
} |
||||
@ -0,0 +1,23 @@
|
||||
[package] |
||||
edition.workspace = true |
||||
homepage.workspace = true |
||||
name = "conduit-macros" |
||||
repository.workspace = true |
||||
rust-version.workspace = true |
||||
version = "0.11.0-alpha" |
||||
|
||||
[lib] |
||||
proc-macro = true |
||||
|
||||
[dependencies] |
||||
# Parsing and quoting tokens |
||||
proc-macro2 = "1" |
||||
quote = "1" |
||||
syn = { version = "2", features = ["full"] } |
||||
|
||||
[features] |
||||
default = ["doc-generators"] |
||||
doc-generators = [] |
||||
|
||||
[lints] |
||||
workspace = true |
||||
@ -0,0 +1,95 @@
|
||||
use proc_macro2::TokenStream as TokenStream2; |
||||
use quote::{ToTokens, quote}; |
||||
use syn::{Attribute, Expr, Ident, ItemEnum, Lit, MetaNameValue, Variant, parse::Parse}; |
||||
|
||||
pub(super) struct Restrictions { |
||||
ident: Ident, |
||||
doc_comment: String, |
||||
variants: Vec<Restriction>, |
||||
} |
||||
|
||||
struct Restriction { |
||||
ident: Ident, |
||||
doc_comment: String, |
||||
} |
||||
|
||||
impl Parse for Restrictions { |
||||
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> { |
||||
let ItemEnum { |
||||
ident, |
||||
variants, |
||||
attrs, |
||||
.. |
||||
} = ItemEnum::parse(input)?; |
||||
|
||||
let variants = variants |
||||
.into_iter() |
||||
.map(|Variant { attrs, ident, .. }| { |
||||
let doc_comment = attrs_to_doc_comment(attrs); |
||||
|
||||
Ok(Restriction { ident, doc_comment }) |
||||
}) |
||||
.collect::<syn::Result<Vec<_>>>()?; |
||||
|
||||
let doc_comment = attrs_to_doc_comment(attrs); |
||||
|
||||
Ok(Self { |
||||
ident, |
||||
variants, |
||||
doc_comment, |
||||
}) |
||||
} |
||||
} |
||||
|
||||
fn attrs_to_doc_comment(attrs: Vec<Attribute>) -> String { |
||||
attrs |
||||
.into_iter() |
||||
.filter_map(|attr| { |
||||
if let syn::Meta::NameValue(MetaNameValue { path, value, .. }) = attr.meta |
||||
&& path.is_ident("doc") |
||||
&& let Expr::Lit(lit) = value |
||||
&& let Lit::Str(string) = lit.lit |
||||
{ |
||||
Some(string.value().trim().to_owned()) |
||||
} else { |
||||
None |
||||
} |
||||
}) |
||||
.collect::<Vec<_>>() |
||||
.join("\n") |
||||
} |
||||
|
||||
/// Produces the following function on said restriction:
|
||||
/// - `variant_doc_comments`, returning each variant and it's doc comment.
|
||||
/// - `container_doc_comment`, returning each variant and it's doc comment.
|
||||
impl ToTokens for Restrictions { |
||||
fn to_tokens(&self, tokens: &mut TokenStream2) { |
||||
let Self { |
||||
ident, |
||||
variants, |
||||
doc_comment, /* , doc_comments */ |
||||
} = self; |
||||
let output = quote! { |
||||
impl DocumentRestrictions for #ident { |
||||
fn variant_doc_comments() -> Vec<(Self, String)> { |
||||
vec![#((#variants)),*] |
||||
} |
||||
|
||||
fn container_doc_comment() -> String { |
||||
#doc_comment.to_owned() |
||||
} |
||||
} |
||||
}; |
||||
|
||||
tokens.extend(output); |
||||
} |
||||
} |
||||
|
||||
impl ToTokens for Restriction { |
||||
fn to_tokens(&self, tokens: &mut TokenStream2) { |
||||
let Self { ident, doc_comment } = self; |
||||
|
||||
// `clone` because `to_tokens` takes a reference to self.
|
||||
tokens.extend(quote!( (Self::#ident, #doc_comment.to_owned() ) )) |
||||
} |
||||
} |
||||
@ -0,0 +1,17 @@
|
||||
use proc_macro::TokenStream; |
||||
use quote::quote; |
||||
|
||||
#[cfg(feature = "doc-generators")] |
||||
mod doc_generators; |
||||
|
||||
/// Allows for the doc comments of restrictions to be accessed at runtime.
|
||||
#[cfg(feature = "doc-generators")] |
||||
#[proc_macro_derive(DocumentRestrictions)] |
||||
pub fn document_restrictions(item: TokenStream) -> TokenStream { |
||||
use doc_generators::Restrictions; |
||||
use syn::parse_macro_input; |
||||
|
||||
let restrictions = parse_macro_input!(item as Restrictions); |
||||
|
||||
quote! { #restrictions }.into() |
||||
} |
||||
@ -0,0 +1,210 @@
|
||||
[package] |
||||
authors = ["timokoesters <timo@koesters.xyz>"] |
||||
description = "A Matrix homeserver written in Rust" |
||||
edition.workspace = true |
||||
homepage.workspace = true |
||||
license = "Apache-2.0" |
||||
name = "conduit" |
||||
readme = "README.md" |
||||
repository.workspace = true |
||||
rust-version.workspace = true |
||||
version = "0.11.0-alpha" |
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html |
||||
|
||||
[lints] |
||||
workspace = true |
||||
|
||||
[dependencies] |
||||
# For the configuration |
||||
conduit-config.workspace = true |
||||
|
||||
# Web framework |
||||
axum = { version = "0.8", default-features = false, features = [ |
||||
"form", |
||||
"http1", |
||||
"http2", |
||||
"json", |
||||
"matched-path", |
||||
"tokio", |
||||
], optional = true } |
||||
axum-extra = { version = "0.10", features = ["typed-header"] } |
||||
axum-server = { version = "0.7", features = ["tls-rustls"] } |
||||
tower = { version = "0.5", features = ["util"] } |
||||
tower-http = { version = "0.6", features = [ |
||||
"add-extension", |
||||
"cors", |
||||
"sensitive-headers", |
||||
"trace", |
||||
"util", |
||||
] } |
||||
tower-service = "0.3" |
||||
|
||||
# Async runtime and utilities |
||||
tokio = { version = "1", features = ["fs", "macros", "signal", "sync"] } |
||||
|
||||
# Used for the http request / response body type for Ruma endpoints used with reqwest |
||||
bytes = "1" |
||||
http = "1" |
||||
# Used for ruma wrapper |
||||
serde_json = { workspace = true, features = ["raw_value"] } |
||||
# Used for appservice registration files |
||||
serde_yaml = "0.9" |
||||
# Used for pdu definition |
||||
serde = { version = "1", features = ["rc"] } |
||||
# Used for secure identifiers |
||||
rand = "0.9" |
||||
# Used to hash passwords |
||||
rust-argon2 = "2" |
||||
# Used to send requests |
||||
hyper = "1" |
||||
hyper-util = { version = "0.1", features = [ |
||||
"client", |
||||
"client-legacy", |
||||
"http1", |
||||
"http2", |
||||
] } |
||||
reqwest = { workspace = true, features = ["rustls-tls-native-roots", "socks"] } |
||||
# Used for conduit::Error type |
||||
thiserror.workspace = true #TODO: 2 |
||||
# Used to generate thumbnails for images |
||||
image = { version = "0.25", default-features = false, features = [ |
||||
"gif", |
||||
"jpeg", |
||||
"png", |
||||
"webp", |
||||
] } |
||||
# Used for creating media filenames |
||||
hex = "0.4" |
||||
sha2 = "0.10" |
||||
|
||||
# Used for parsing admin commands and purging media files for space limitations |
||||
bytesize.workspace = true |
||||
# Used to encode server public key |
||||
base64 = "0.22" |
||||
# Used when hashing the state |
||||
ring = "0.17" |
||||
# Used when querying the SRV record of other servers |
||||
hickory-resolver = "0.25" |
||||
# Used to find matching events for appservices |
||||
regex = "1" |
||||
# jwt jsonwebtokens |
||||
jsonwebtoken = "9" |
||||
# Performance measurements |
||||
opentelemetry = "0.29" |
||||
opentelemetry-jaeger-propagator = "0.29" |
||||
opentelemetry-otlp = { version = "0.29", features = ["grpc-tonic"] } |
||||
opentelemetry_sdk = { version = "0.29", features = ["rt-tokio"] } |
||||
tracing = "0.1" |
||||
tracing-flame = "0.2.0" |
||||
tracing-opentelemetry = "0.30" |
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] } |
||||
|
||||
lru-cache = "0.1.2" |
||||
parking_lot = { version = "0.12", optional = true } |
||||
rusqlite = { version = "0.35", optional = true, features = ["bundled"] } |
||||
|
||||
# crossbeam = { version = "0.8.2", optional = true } |
||||
num_cpus = "1" |
||||
threadpool = "1" |
||||
# Used for ruma wrapper |
||||
serde_html_form = "0.2" |
||||
|
||||
thread_local = "1" |
||||
# used for TURN server authentication |
||||
hmac = "0.12" |
||||
sha-1 = "0.10" |
||||
# used for conduit's CLI and admin room command parsing |
||||
chrono = "0.4" |
||||
clap.workspace = true |
||||
humantime = "2" |
||||
shell-words = "1.1.0" |
||||
|
||||
futures-util = { version = "0.3", default-features = false } |
||||
# Used for reading the configuration from conduit.toml & environment variables |
||||
figment = { version = "0.10", features = ["env", "toml"] } |
||||
|
||||
async-trait = "0.1" |
||||
tikv-jemallocator = { version = "0.6", features = [ |
||||
"unprefixed_malloc_on_supported_platforms", |
||||
], optional = true } |
||||
|
||||
sd-notify = { version = "0.4", optional = true } |
||||
# Used for inspecting request errors |
||||
http-body-util = "0.1.3" |
||||
# Used for S3 media backend |
||||
rusty-s3.workspace = true |
||||
|
||||
# Used for matrix spec type definitions and helpers |
||||
[dependencies.ruma] |
||||
features = [ |
||||
"appservice-api-c", |
||||
"canonical-json", |
||||
"client-api", |
||||
"compat-empty-string-null", |
||||
"compat-get-3pids", |
||||
"compat-null", |
||||
"compat-optional", |
||||
"compat-optional-txn-pdus", |
||||
"compat-server-signing-key-version", |
||||
"compat-tag-info", |
||||
"compat-unset-avatar", |
||||
"federation-api", |
||||
"push-gateway-api-c", |
||||
"rand", |
||||
"ring-compat", |
||||
"state-res", |
||||
"unstable-msc2448", |
||||
"unstable-msc4186", |
||||
"unstable-msc4311", |
||||
] |
||||
workspace = true |
||||
|
||||
[dependencies.rocksdb] |
||||
features = ["lz4", "multi-threaded-cf", "zstd"] |
||||
optional = true |
||||
package = "rust-rocksdb" |
||||
version = "0.43" |
||||
|
||||
[target.'cfg(unix)'.dependencies] |
||||
nix = { version = "0.30", features = ["resource"] } |
||||
|
||||
[features] |
||||
backend_rocksdb = ["conduit-config/rocksdb", "rocksdb"] |
||||
backend_sqlite = ["conduit-config/sqlite", "sqlite"] |
||||
conduit_bin = ["axum"] |
||||
default = ["backend_rocksdb", "backend_sqlite", "conduit_bin", "systemd"] |
||||
jemalloc = ["tikv-jemallocator"] |
||||
sqlite = ["parking_lot", "rusqlite", "tokio/signal"] |
||||
systemd = ["sd-notify"] |
||||
|
||||
enforce_msc4311 = [] |
||||
|
||||
[[bin]] |
||||
name = "conduit" |
||||
path = "src/main.rs" |
||||
required-features = ["conduit_bin"] |
||||
|
||||
[lib] |
||||
name = "conduit" |
||||
path = "src/lib.rs" |
||||
|
||||
[package.metadata.deb] |
||||
assets = [ |
||||
{ mode = "644", source = "../README.md", dest = "usr/share/doc/matrix-conduit" }, |
||||
{ mode = "644", source = "../debian/README.md", dest = "usr/share/doc/matrix-conduit/README.Debian" }, |
||||
{ mode = "755", source = "target/release/conduit", dest = "usr/sbin/matrix-conduit" }, |
||||
] |
||||
conf-files = ["/etc/matrix-conduit/conduit.toml"] |
||||
copyright = "2020, Timo Kösters <timo@koesters.xyz>" |
||||
depends = "$auto, ca-certificates" |
||||
extended-description = """\ |
||||
A fast Matrix homeserver that is optimized for smaller, personal servers, \ |
||||
instead of a server that has high scalability.""" |
||||
license-file = ["../LICENSE", "3"] |
||||
maintainer = "Paul van Tilburg <paul@luon.net>" |
||||
maintainer-scripts = "debian/" |
||||
name = "matrix-conduit" |
||||
priority = "optional" |
||||
section = "net" |
||||
systemd-units = { unit-name = "matrix-conduit" } |
||||
@ -1,6 +1,6 @@
|
||||
use crate::{services, utils, Error, Result, SUPPORTED_VERSIONS}; |
||||
use crate::{Error, Result, SUPPORTED_VERSIONS, services, utils}; |
||||
use bytes::BytesMut; |
||||
use ruma::api::{appservice::Registration, IncomingResponse, OutgoingRequest, SendAccessToken}; |
||||
use ruma::api::{IncomingResponse, OutgoingRequest, SendAccessToken, appservice::Registration}; |
||||
use std::{fmt::Debug, mem, time::Duration}; |
||||
use tracing::warn; |
||||
|
||||
@ -1,4 +1,4 @@
|
||||
use crate::{services, Error, Result, Ruma}; |
||||
use crate::{Error, Result, Ruma, services}; |
||||
use ruma::api::client::{ |
||||
alias::{create_alias, delete_alias, get_alias}, |
||||
error::ErrorKind, |
||||
@ -1,4 +1,4 @@
|
||||
use crate::{services, Error, Result, Ruma}; |
||||
use crate::{Error, Result, Ruma, services}; |
||||
use ruma::api::client::{ |
||||
backup::{ |
||||
add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, |
||||
@ -1,4 +1,4 @@
|
||||
use crate::{services, Result, Ruma}; |
||||
use crate::{Result, Ruma, services}; |
||||
use ruma::api::client::discovery::get_capabilities::{ |
||||
self, |
||||
v3::{Capabilities, RoomVersionStability, RoomVersionsCapability}, |
||||
@ -1,4 +1,4 @@
|
||||
use crate::{services, Error, Result, Ruma}; |
||||
use crate::{Error, Result, Ruma, services}; |
||||
use ruma::{ |
||||
api::client::{ |
||||
config::{ |
||||
@ -1,4 +1,4 @@
|
||||
use crate::{services, Error, Result, Ruma}; |
||||
use crate::{Error, Result, Ruma, services}; |
||||
use ruma::{ |
||||
api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions}, |
||||
events::StateEventType, |
||||
@ -1,4 +1,4 @@
|
||||
use crate::{services, Error, Result, Ruma}; |
||||
use crate::{Error, Result, Ruma, services}; |
||||
use ruma::api::client::{ |
||||
error::ErrorKind, |
||||
filter::{create_filter, get_filter}, |
||||
@ -1,6 +1,7 @@
|
||||
use crate::{ |
||||
Error, Result, Ruma, |
||||
service::{pdu::PduBuilder, rooms::timeline::PduCount}, |
||||
services, utils, Error, Result, Ruma, |
||||
services, utils, |
||||
}; |
||||
use ruma::{ |
||||
api::client::{ |
||||
@ -1,4 +1,4 @@
|
||||
use crate::{services, utils, Error, Result, Ruma}; |
||||
use crate::{Error, Result, Ruma, services, utils}; |
||||
use ruma::api::client::{ |
||||
error::ErrorKind, |
||||
presence::{get_presence, set_presence}, |
||||
@ -1,9 +1,9 @@
|
||||
use std::sync::Arc; |
||||
|
||||
use crate::{service::pdu::PduBuilder, services, Result, Ruma}; |
||||
use crate::{Result, Ruma, service::pdu::PduBuilder, services}; |
||||
use ruma::{ |
||||
api::client::redact::redact_event, |
||||
events::{room::redaction::RoomRedactionEventContent, TimelineEventType}, |
||||
events::{TimelineEventType, room::redaction::RoomRedactionEventContent}, |
||||
}; |
||||
|
||||
use serde_json::value::to_raw_value; |
||||
@ -1,12 +1,12 @@
|
||||
use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; |
||||
use crate::{services, utils, Error, Result, Ruma}; |
||||
use crate::{Error, Result, Ruma, services, utils}; |
||||
use ruma::{ |
||||
UserId, |
||||
api::client::{ |
||||
error::ErrorKind, |
||||
session::{get_login_types, login, logout, logout_all}, |
||||
uiaa::UserIdentifier, |
||||
}, |
||||
UserId, |
||||
}; |
||||
use serde::Deserialize; |
||||
use tracing::{info, warn}; |
||||
@ -1,9 +1,9 @@
|
||||
use std::str::FromStr; |
||||
|
||||
use crate::{service::rooms::spaces::PagnationToken, services, Error, Result, Ruma}; |
||||
use crate::{Error, Result, Ruma, service::rooms::spaces::PagnationToken, services}; |
||||
use ruma::{ |
||||
api::client::{error::ErrorKind, space::get_hierarchy}, |
||||
UInt, |
||||
api::client::{error::ErrorKind, space::get_hierarchy}, |
||||
}; |
||||
|
||||
/// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy``
|
||||
@ -1,9 +1,9 @@
|
||||
use crate::{services, Error, Result, Ruma}; |
||||
use crate::{Error, Result, Ruma, services}; |
||||
use ruma::{ |
||||
api::client::tag::{create_tag, delete_tag, get_tags}, |
||||
events::{ |
||||
tag::{TagEvent, TagEventContent}, |
||||
RoomAccountDataEventType, |
||||
tag::{TagEvent, TagEventContent}, |
||||
}, |
||||
}; |
||||
use std::collections::BTreeMap; |
||||
@ -1,6 +1,6 @@
|
||||
use ruma::api::client::{error::ErrorKind, threads::get_threads}; |
||||
|
||||
use crate::{services, Error, Result, Ruma}; |
||||
use crate::{Error, Result, Ruma, services}; |
||||
|
||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/threads`
|
||||
pub async fn get_threads_route( |
||||
@ -1,6 +1,6 @@
|
||||
use std::collections::BTreeMap; |
||||
|
||||
use crate::{services, Error, Result, Ruma}; |
||||
use crate::{Error, Result, Ruma, services}; |
||||
use ruma::{ |
||||
api::{ |
||||
client::{error::ErrorKind, to_device::send_event_to_device}, |
||||
@ -1,4 +1,4 @@
|
||||
use crate::{services, utils, Error, Result, Ruma}; |
||||
use crate::{Error, Result, Ruma, services, utils}; |
||||
use ruma::api::client::{error::ErrorKind, typing::create_typing_event}; |
||||
|
||||
/// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}`
|
||||
@ -1,9 +1,9 @@
|
||||
use crate::{services, Result, Ruma}; |
||||
use crate::{Result, Ruma, services}; |
||||
use ruma::{ |
||||
api::client::user_directory::search_users, |
||||
events::{ |
||||
room::join_rules::{JoinRule, RoomJoinRulesEventContent}, |
||||
StateEventType, |
||||
room::join_rules::{JoinRule, RoomJoinRulesEventContent}, |
||||
}, |
||||
}; |
||||
|
||||
@ -1,9 +1,10 @@
|
||||
use crate::{config::TurnAuth, services, Error, Result, Ruma}; |
||||
use base64::{engine::general_purpose, Engine as _}; |
||||
use crate::{Error, Result, Ruma, services}; |
||||
use base64::{Engine as _, engine::general_purpose}; |
||||
use conduit_config::TurnAuth; |
||||
use hmac::{Hmac, Mac}; |
||||
use ruma::{ |
||||
api::client::{error::ErrorKind, voip::get_turn_server_info}, |
||||
SecondsSinceUnixEpoch, |
||||
api::client::{error::ErrorKind, voip::get_turn_server_info}, |
||||
}; |
||||
use sha1::Sha1; |
||||
use std::time::{Duration, SystemTime}; |
||||
@ -1,6 +1,6 @@
|
||||
use ruma::api::client::discovery::discover_homeserver::{self, HomeserverInfo}; |
||||
|
||||
use crate::{services, Result, Ruma}; |
||||
use crate::{Result, Ruma, services}; |
||||
|
||||
/// # `GET /.well-known/matrix/client`
|
||||
///
|
||||
@ -0,0 +1,452 @@
|
||||
use std::{ |
||||
collections::BTreeMap, |
||||
error::Error as _, |
||||
iter::FromIterator, |
||||
net::{IpAddr, SocketAddr}, |
||||
str::{self, FromStr}, |
||||
}; |
||||
|
||||
use axum::{ |
||||
RequestPartsExt, |
||||
body::Body, |
||||
extract::{ConnectInfo, FromRequest, Path}, |
||||
response::{IntoResponse, Response}, |
||||
}; |
||||
use axum_extra::{ |
||||
TypedHeader, |
||||
headers::{Authorization, authorization::Bearer}, |
||||
typed_header::TypedHeaderRejectionReason, |
||||
}; |
||||
use bytes::{BufMut, BytesMut}; |
||||
use conduit_config::IpAddrDetection; |
||||
use http::{Request, StatusCode}; |
||||
use ruma::{ |
||||
CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedUserId, UserId, |
||||
api::{ |
||||
AuthScheme, IncomingRequest, OutgoingResponse, client::error::ErrorKind, |
||||
federation::authentication::XMatrix, |
||||
}, |
||||
}; |
||||
use serde::Deserialize; |
||||
use tracing::{debug, error, warn}; |
||||
|
||||
use super::{Ruma, RumaResponse}; |
||||
use crate::{ |
||||
Error, Result, |
||||
service::{appservice::RegistrationInfo, rate_limiting::Target}, |
||||
services, |
||||
}; |
||||
|
||||
enum Token { |
||||
Appservice(Box<RegistrationInfo>), |
||||
User((OwnedUserId, OwnedDeviceId)), |
||||
AuthRateLimited(Error), |
||||
Invalid, |
||||
None, |
||||
} |
||||
|
||||
impl<T, S> FromRequest<S> for Ruma<T> |
||||
where |
||||
T: IncomingRequest, |
||||
S: Sync, |
||||
{ |
||||
type Rejection = Error; |
||||
|
||||
async fn from_request(req: Request<Body>, _state: &S) -> Result<Self, Self::Rejection> { |
||||
#[derive(Deserialize)] |
||||
struct QueryParams { |
||||
access_token: Option<String>, |
||||
user_id: Option<String>, |
||||
} |
||||
|
||||
let (mut parts, mut body) = { |
||||
let (parts, body) = req.into_parts(); |
||||
let body = axum::body::to_bytes( |
||||
body, |
||||
services() |
||||
.globals |
||||
.max_request_size() |
||||
.try_into() |
||||
.unwrap_or(usize::MAX), |
||||
) |
||||
.await |
||||
.map_err(|err| { |
||||
if err |
||||
.source() |
||||
.is_some_and(|err| err.is::<http_body_util::LengthLimitError>()) |
||||
{ |
||||
Error::BadRequest(ErrorKind::TooLarge, "Reached maximum request size") |
||||
} else { |
||||
error!("An unknown error has occurred: {err}"); |
||||
Error::BadRequest(ErrorKind::Unknown, "An unknown error has occurred") |
||||
} |
||||
})?; |
||||
(parts, body) |
||||
}; |
||||
|
||||
let metadata = T::METADATA; |
||||
let auth_header: Option<TypedHeader<Authorization<Bearer>>> = |
||||
// If X-Matrix signatures are used, it causes this extraction to fail with an error
|
||||
if metadata.authentication != AuthScheme::ServerSignatures { |
||||
parts.extract().await? |
||||
} else { |
||||
None |
||||
}; |
||||
let path_params: Path<Vec<String>> = parts.extract().await?; |
||||
|
||||
let query = parts.uri.query().unwrap_or_default(); |
||||
let query_params: QueryParams = match serde_html_form::from_str(query) { |
||||
Ok(params) => params, |
||||
Err(e) => { |
||||
error!(%query, "Failed to deserialize query parameters: {}", e); |
||||
return Err(Error::BadRequest( |
||||
ErrorKind::Unknown, |
||||
"Failed to read query parameters", |
||||
)); |
||||
} |
||||
}; |
||||
|
||||
let token = match &auth_header { |
||||
Some(TypedHeader(Authorization(bearer))) => Some(bearer.token()), |
||||
None => query_params.access_token.as_deref(), |
||||
}; |
||||
|
||||
let sender_ip_address: Option<IpAddr> = |
||||
match &services().globals.config.ip_address_detection { |
||||
IpAddrDetection::SocketAddress => { |
||||
let addr: ConnectInfo<SocketAddr> = parts.extract().await?; |
||||
Some(addr.ip()) |
||||
} |
||||
IpAddrDetection::Header(name) => parts |
||||
.headers |
||||
.get(name) |
||||
.and_then(|header| header.to_str().ok()) |
||||
.map(|header| header.split_once(',').map(|(ip, _)| ip).unwrap_or(header)) |
||||
.and_then(|ip| IpAddr::from_str(ip).ok()), |
||||
}; |
||||
|
||||
let token = if let Some(token) = token { |
||||
let mut rate_limited = None; |
||||
|
||||
if let Some(ip_addr) = sender_ip_address { |
||||
if let Err(instant) = services().rate_limiting.pre_auth_check(ip_addr).await { |
||||
rate_limited = Some(instant); |
||||
} |
||||
} |
||||
|
||||
if let Some(instant) = rate_limited { |
||||
Token::AuthRateLimited(instant) |
||||
} else if let Some(reg_info) = services().appservice.find_from_token(token).await { |
||||
Token::Appservice(Box::new(reg_info.clone())) |
||||
} else if let Some((user_id, device_id)) = services().users.find_from_token(token)? { |
||||
Token::User((user_id, device_id)) |
||||
} else { |
||||
Token::Invalid |
||||
} |
||||
} else { |
||||
Token::None |
||||
}; |
||||
|
||||
let mut json_body = serde_json::from_slice::<CanonicalJsonValue>(&body).ok(); |
||||
|
||||
let (sender_user, sender_device, sender_servername, appservice_info) = match ( |
||||
metadata.authentication, |
||||
token, |
||||
) { |
||||
(_, Token::AuthRateLimited(instant)) => { |
||||
return Err(instant); |
||||
} |
||||
(_, Token::Invalid) => { |
||||
// OpenID endpoint uses a query param with the same name, drop this once query params for user auth are removed from the spec
|
||||
if query_params.access_token.is_some() { |
||||
(None, None, None, None) |
||||
} else { |
||||
if let Some(addr) = sender_ip_address { |
||||
services() |
||||
.rate_limiting |
||||
.update_post_auth_failure(addr) |
||||
.await; |
||||
} else { |
||||
error!( |
||||
"Auth failure occurred, but IP address was not extracted. Please check your Conduit & reverse proxy configuration, as if nothing is done, an attacker can brute-force access tokens and login to user's accounts" |
||||
); |
||||
} |
||||
|
||||
return Err(Error::BadRequest( |
||||
ErrorKind::UnknownToken { soft_logout: false }, |
||||
"Unknown access token.", |
||||
)); |
||||
} |
||||
} |
||||
(AuthScheme::AccessToken, Token::Appservice(info)) => { |
||||
let user_id = query_params |
||||
.user_id |
||||
.map_or_else( |
||||
|| { |
||||
UserId::parse_with_server_name( |
||||
info.registration.sender_localpart.as_str(), |
||||
services().globals.server_name(), |
||||
) |
||||
}, |
||||
UserId::parse, |
||||
) |
||||
.map_err(|_| { |
||||
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") |
||||
})?; |
||||
|
||||
if !info.is_user_match(&user_id) { |
||||
return Err(Error::BadRequest( |
||||
ErrorKind::Exclusive, |
||||
"User is not in namespace.", |
||||
)); |
||||
} |
||||
|
||||
if !services().users.exists(&user_id)? { |
||||
return Err(Error::BadRequest( |
||||
ErrorKind::forbidden(), |
||||
"User does not exist.", |
||||
)); |
||||
} |
||||
|
||||
(Some(user_id), None, None, Some(*info)) |
||||
} |
||||
( |
||||
AuthScheme::None |
||||
| AuthScheme::AppserviceToken |
||||
| AuthScheme::AppserviceTokenOptional |
||||
| AuthScheme::AccessTokenOptional, |
||||
Token::Appservice(info), |
||||
) => (None, None, None, Some(*info)), |
||||
(AuthScheme::AppserviceToken | AuthScheme::AccessToken, Token::None) => { |
||||
return Err(Error::BadRequest( |
||||
ErrorKind::MissingToken, |
||||
"Missing access token.", |
||||
)); |
||||
} |
||||
( |
||||
AuthScheme::AccessToken | AuthScheme::AccessTokenOptional | AuthScheme::None, |
||||
Token::User((user_id, device_id)), |
||||
) => (Some(user_id), Some(device_id), None, None), |
||||
(AuthScheme::ServerSignatures, Token::None) => { |
||||
let TypedHeader(Authorization(x_matrix)) = parts |
||||
.extract::<TypedHeader<Authorization<XMatrix>>>() |
||||
.await |
||||
.map_err(|e| { |
||||
warn!("Missing or invalid Authorization header: {}", e); |
||||
|
||||
let msg = match e.reason() { |
||||
TypedHeaderRejectionReason::Missing => "Missing Authorization header.", |
||||
TypedHeaderRejectionReason::Error(_) => "Invalid X-Matrix signatures.", |
||||
_ => "Unknown header-related error", |
||||
}; |
||||
|
||||
Error::BadRequest(ErrorKind::forbidden(), msg) |
||||
})?; |
||||
|
||||
if let Some(dest) = x_matrix.destination { |
||||
if dest != services().globals.server_name() { |
||||
return Err(Error::BadRequest( |
||||
ErrorKind::Unauthorized, |
||||
"X-Matrix destination field does not match server name.", |
||||
)); |
||||
} |
||||
}; |
||||
|
||||
let origin_signatures = BTreeMap::from_iter([( |
||||
x_matrix.key.clone(), |
||||
CanonicalJsonValue::String(x_matrix.sig.to_string()), |
||||
)]); |
||||
|
||||
let signatures = BTreeMap::from_iter([( |
||||
x_matrix.origin.as_str().to_owned(), |
||||
CanonicalJsonValue::Object( |
||||
origin_signatures |
||||
.into_iter() |
||||
.map(|(k, v)| (k.to_string(), v)) |
||||
.collect(), |
||||
), |
||||
)]); |
||||
|
||||
let mut request_map = BTreeMap::from_iter([ |
||||
( |
||||
"method".to_owned(), |
||||
CanonicalJsonValue::String(parts.method.to_string()), |
||||
), |
||||
( |
||||
"uri".to_owned(), |
||||
CanonicalJsonValue::String(parts.uri.to_string()), |
||||
), |
||||
( |
||||
"origin".to_owned(), |
||||
CanonicalJsonValue::String(x_matrix.origin.as_str().to_owned()), |
||||
), |
||||
( |
||||
"destination".to_owned(), |
||||
CanonicalJsonValue::String( |
||||
services().globals.server_name().as_str().to_owned(), |
||||
), |
||||
), |
||||
( |
||||
"signatures".to_owned(), |
||||
CanonicalJsonValue::Object(signatures), |
||||
), |
||||
]); |
||||
|
||||
if let Some(json_body) = &json_body { |
||||
request_map.insert("content".to_owned(), json_body.clone()); |
||||
}; |
||||
|
||||
let keys_result = services() |
||||
.rooms |
||||
.event_handler |
||||
.fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_string()], false) |
||||
.await; |
||||
|
||||
let keys = match keys_result { |
||||
Ok(b) => b, |
||||
Err(e) => { |
||||
warn!("Failed to fetch signing keys: {}", e); |
||||
return Err(Error::BadRequest( |
||||
ErrorKind::forbidden(), |
||||
"Failed to fetch signing keys.", |
||||
)); |
||||
} |
||||
}; |
||||
|
||||
// Only verify_keys that are currently valid should be used for validating requests
|
||||
// as per MSC4029
|
||||
let pub_key_map = BTreeMap::from_iter([( |
||||
x_matrix.origin.as_str().to_owned(), |
||||
if keys.valid_until_ts > MilliSecondsSinceUnixEpoch::now() { |
||||
keys.verify_keys |
||||
.into_iter() |
||||
.map(|(id, key)| (id, key.key)) |
||||
.collect() |
||||
} else { |
||||
BTreeMap::new() |
||||
}, |
||||
)]); |
||||
|
||||
match ruma::signatures::verify_json(&pub_key_map, &request_map) { |
||||
Ok(()) => (None, None, Some(x_matrix.origin), None), |
||||
Err(e) => { |
||||
warn!( |
||||
"Failed to verify json request from {}: {}\n{:?}", |
||||
x_matrix.origin, e, request_map |
||||
); |
||||
|
||||
if parts.uri.to_string().contains('@') { |
||||
warn!( |
||||
"Request uri contained '@' character. Make sure your \ |
||||
reverse proxy gives Conduit the raw uri (apache: use \ |
||||
nocanon)" |
||||
); |
||||
} |
||||
|
||||
return Err(Error::BadRequest( |
||||
ErrorKind::forbidden(), |
||||
"Failed to verify X-Matrix signatures.", |
||||
)); |
||||
} |
||||
} |
||||
} |
||||
( |
||||
AuthScheme::None |
||||
| AuthScheme::AppserviceTokenOptional |
||||
| AuthScheme::AccessTokenOptional, |
||||
Token::None, |
||||
) => (None, None, None, None), |
||||
(AuthScheme::ServerSignatures, Token::Appservice(_) | Token::User(_)) => { |
||||
return Err(Error::BadRequest( |
||||
ErrorKind::Unauthorized, |
||||
"Only server signatures should be used on this endpoint.", |
||||
)); |
||||
} |
||||
(AuthScheme::AppserviceToken | AuthScheme::AppserviceTokenOptional, Token::User(_)) => { |
||||
return Err(Error::BadRequest( |
||||
ErrorKind::Unauthorized, |
||||
"Only appservice access tokens should be used on this endpoint.", |
||||
)); |
||||
} |
||||
}; |
||||
|
||||
let sender_ip_address = parts |
||||
.headers |
||||
.get("X-Forwarded-For") |
||||
.and_then(|header| header.to_str().ok()) |
||||
.map(|header| header.split_once(',').map(|(ip, _)| ip).unwrap_or(header)) |
||||
.and_then(|ip| IpAddr::from_str(ip).ok()); |
||||
|
||||
let target = if let Some(server_name) = sender_servername.clone() { |
||||
Some(Target::Server(server_name)) |
||||
} else if let Some(user) = &sender_user { |
||||
Some(Target::from_client_request(appservice_info.clone(), user)) |
||||
} else { |
||||
sender_ip_address.map(Target::Ip) |
||||
}; |
||||
|
||||
services().rate_limiting.check(target, metadata).await?; |
||||
|
||||
let mut http_request = Request::builder().uri(parts.uri).method(parts.method); |
||||
*http_request.headers_mut().unwrap() = parts.headers; |
||||
|
||||
if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body { |
||||
let user_id = sender_user.clone().unwrap_or_else(|| { |
||||
UserId::parse_with_server_name("", services().globals.server_name()) |
||||
.expect("we know this is valid") |
||||
}); |
||||
|
||||
let uiaa_request = json_body |
||||
.get("auth") |
||||
.and_then(|auth| auth.as_object()) |
||||
.and_then(|auth| auth.get("session")) |
||||
.and_then(|session| session.as_str()) |
||||
.and_then(|session| { |
||||
services().uiaa.get_uiaa_request( |
||||
&user_id, |
||||
&sender_device.clone().unwrap_or_else(|| "".into()), |
||||
session, |
||||
) |
||||
}); |
||||
|
||||
if let Some(CanonicalJsonValue::Object(initial_request)) = uiaa_request { |
||||
for (key, value) in initial_request { |
||||
json_body.entry(key).or_insert(value); |
||||
} |
||||
} |
||||
|
||||
let mut buf = BytesMut::new().writer(); |
||||
serde_json::to_writer(&mut buf, json_body).expect("value serialization can't fail"); |
||||
body = buf.into_inner().freeze(); |
||||
} |
||||
|
||||
let http_request = http_request.body(&*body).unwrap(); |
||||
|
||||
debug!("{:?}", http_request); |
||||
|
||||
let body = T::try_from_http_request(http_request, &path_params).map_err(|e| { |
||||
warn!("try_from_http_request failed: {:?}", e); |
||||
debug!("JSON body: {:?}", json_body); |
||||
Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.") |
||||
})?; |
||||
|
||||
Ok(Ruma { |
||||
body, |
||||
sender_user, |
||||
sender_device, |
||||
sender_servername, |
||||
appservice_info, |
||||
json_body, |
||||
sender_ip_address, |
||||
}) |
||||
} |
||||
} |
||||
|
||||
impl<T: OutgoingResponse> IntoResponse for RumaResponse<T> { |
||||
fn into_response(self) -> Response { |
||||
match self.0.try_into_http_response::<BytesMut>() { |
||||
Ok(res) => res.map(BytesMut::freeze).map(Body::from).into_response(), |
||||
Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(), |
||||
} |
||||
} |
||||
} |
||||
@ -1,6 +1,7 @@
|
||||
use super::Config; |
||||
use crate::Result; |
||||
|
||||
use conduit_config::Config; |
||||
|
||||
use std::{future::Future, pin::Pin, sync::Arc}; |
||||
|
||||
#[cfg(feature = "sqlite")] |
||||
@ -1,5 +1,6 @@
|
||||
use super::{super::Config, watchers::Watchers, KeyValueDatabaseEngine, KvTree}; |
||||
use crate::{utils, Result}; |
||||
use super::{KeyValueDatabaseEngine, KvTree, watchers::Watchers}; |
||||
use crate::{Result, utils}; |
||||
use conduit_config::Config; |
||||
use std::{ |
||||
future::Future, |
||||
pin::Pin, |
||||
@ -1,5 +1,6 @@
|
||||
use super::{watchers::Watchers, KeyValueDatabaseEngine, KvTree}; |
||||
use crate::{database::Config, Result}; |
||||
use super::{KeyValueDatabaseEngine, KvTree, watchers::Watchers}; |
||||
use crate::Result; |
||||
use conduit_config::Config; |
||||
use parking_lot::{Mutex, MutexGuard}; |
||||
use rusqlite::{Connection, DatabaseName::Main, OptionalExtension}; |
||||
use std::{ |
||||
@ -1,5 +1,5 @@
|
||||
use std::{ |
||||
collections::{hash_map, HashMap}, |
||||
collections::{HashMap, hash_map}, |
||||
future::Future, |
||||
pin::Pin, |
||||
sync::RwLock, |
||||
@ -1,13 +1,13 @@
|
||||
use std::collections::HashMap; |
||||
|
||||
use ruma::{ |
||||
RoomId, UserId, |
||||
api::client::error::ErrorKind, |
||||
events::{AnyGlobalAccountDataEvent, AnyRoomAccountDataEvent, RoomAccountDataEventType}, |
||||
serde::Raw, |
||||
RoomId, UserId, |
||||
}; |
||||
|
||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; |
||||
use crate::{Error, Result, database::KeyValueDatabase, service, services, utils}; |
||||
|
||||
impl service::account_data::Data for KeyValueDatabase { |
||||
/// Places one event in the account data of the user and removes the previous entry.
|
||||
@ -1,6 +1,6 @@
|
||||
use ruma::api::appservice::Registration; |
||||
|
||||
use crate::{database::KeyValueDatabase, service, utils, Error, Result}; |
||||
use crate::{Error, Result, database::KeyValueDatabase, service, utils}; |
||||
|
||||
impl service::appservice::Data for KeyValueDatabase { |
||||
/// Registers an appservice and returns the ID to the caller
|
||||
@ -1,18 +1,19 @@
|
||||
use std::collections::HashMap; |
||||
|
||||
use async_trait::async_trait; |
||||
use futures_util::{stream::FuturesUnordered, StreamExt}; |
||||
use futures_util::{StreamExt, stream::FuturesUnordered}; |
||||
use lru_cache::LruCache; |
||||
use ruma::{ |
||||
DeviceId, ServerName, UserId, |
||||
api::federation::discovery::{OldVerifyKey, ServerSigningKeys}, |
||||
signatures::Ed25519KeyPair, |
||||
DeviceId, ServerName, UserId, |
||||
}; |
||||
|
||||
use crate::{ |
||||
Error, Result, |
||||
database::KeyValueDatabase, |
||||
service::{self, globals::SigningKeys}, |
||||
services, utils, Error, Result, |
||||
services, utils, |
||||
}; |
||||
|
||||
pub const COUNTER: &[u8] = b"c"; |
||||
@ -1,15 +1,15 @@
|
||||
use std::collections::BTreeMap; |
||||
|
||||
use ruma::{ |
||||
OwnedRoomId, RoomId, UserId, |
||||
api::client::{ |
||||
backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, |
||||
error::ErrorKind, |
||||
}, |
||||
serde::Raw, |
||||
OwnedRoomId, RoomId, UserId, |
||||
}; |
||||
|
||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; |
||||
use crate::{Error, Result, database::KeyValueDatabase, service, services, utils}; |
||||
|
||||
impl service::key_backups::Data for KeyValueDatabase { |
||||
fn create_backup( |
||||
@ -1,9 +1,9 @@
|
||||
use ruma::{ |
||||
api::client::push::{set_pusher, Pusher}, |
||||
UserId, |
||||
api::client::push::{Pusher, set_pusher}, |
||||
}; |
||||
|
||||
use crate::{database::KeyValueDatabase, service, utils, Error, Result}; |
||||
use crate::{Error, Result, database::KeyValueDatabase, service, utils}; |
||||
|
||||
impl service::pusher::Data for KeyValueDatabase { |
||||
fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::PusherAction) -> Result<()> { |
||||
@ -1,9 +1,9 @@
|
||||
use ruma::{ |
||||
api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, |
||||
UserId, |
||||
OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, UserId, |
||||
api::client::error::ErrorKind, |
||||
}; |
||||
|
||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; |
||||
use crate::{Error, Result, database::KeyValueDatabase, service, services, utils}; |
||||
|
||||
impl service::rooms::alias::Data for KeyValueDatabase { |
||||
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()> { |
||||
@ -1,6 +1,6 @@
|
||||
use std::{collections::HashSet, mem::size_of, sync::Arc}; |
||||
|
||||
use crate::{database::KeyValueDatabase, service, utils, Result}; |
||||
use crate::{Result, database::KeyValueDatabase, service, utils}; |
||||
|
||||
impl service::rooms::auth_chain::Data for KeyValueDatabase { |
||||
fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result<Option<Arc<HashSet<u64>>>> { |
||||
@ -1,6 +1,6 @@
|
||||
use ruma::{OwnedRoomId, RoomId}; |
||||
|
||||
use crate::{database::KeyValueDatabase, service, utils, Error, Result}; |
||||
use crate::{Error, Result, database::KeyValueDatabase, service, utils}; |
||||
|
||||
impl service::rooms::directory::Data for KeyValueDatabase { |
||||
fn set_public(&self, room_id: &RoomId) -> Result<()> { |
||||
@ -1,10 +1,10 @@
|
||||
use std::collections::HashMap; |
||||
|
||||
use ruma::{ |
||||
events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, RoomId, UInt, UserId, |
||||
OwnedUserId, RoomId, UInt, UserId, events::presence::PresenceEvent, presence::PresenceState, |
||||
}; |
||||
|
||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; |
||||
use crate::{Error, Result, database::KeyValueDatabase, service, services, utils}; |
||||
|
||||
impl service::rooms::edus::presence::Data for KeyValueDatabase { |
||||
fn update_presence( |
||||
@ -1,8 +1,8 @@
|
||||
use ruma::{ |
||||
events::receipt::ReceiptEvent, serde::Raw, CanonicalJsonObject, OwnedUserId, RoomId, UserId, |
||||
CanonicalJsonObject, OwnedUserId, RoomId, UserId, events::receipt::ReceiptEvent, serde::Raw, |
||||
}; |
||||
|
||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; |
||||
use crate::{Error, Result, database::KeyValueDatabase, service, services, utils}; |
||||
|
||||
impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { |
||||
fn readreceipt_update( |
||||
@ -1,6 +1,6 @@
|
||||
use ruma::{DeviceId, RoomId, UserId}; |
||||
|
||||
use crate::{database::KeyValueDatabase, service, Result}; |
||||
use crate::{Result, database::KeyValueDatabase, service}; |
||||
|
||||
impl service::rooms::lazy_loading::Data for KeyValueDatabase { |
||||
fn lazy_load_was_sent_before( |
||||
@ -1,6 +1,6 @@
|
||||
use ruma::{OwnedRoomId, RoomId}; |
||||
|
||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; |
||||
use crate::{Error, Result, database::KeyValueDatabase, service, services, utils}; |
||||
|
||||
impl service::rooms::metadata::Data for KeyValueDatabase { |
||||
fn exists(&self, room_id: &RoomId) -> Result<bool> { |
||||
@ -1,6 +1,6 @@
|
||||
use ruma::{CanonicalJsonObject, EventId}; |
||||
|
||||
use crate::{database::KeyValueDatabase, service, Error, PduEvent, Result}; |
||||
use crate::{Error, PduEvent, Result, database::KeyValueDatabase, service}; |
||||
|
||||
impl service::rooms::outlier::Data for KeyValueDatabase { |
||||
fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result<Option<CanonicalJsonObject>> { |
||||
@ -1,6 +1,6 @@
|
||||
use ruma::RoomId; |
||||
|
||||
use crate::{database::KeyValueDatabase, service, services, utils, Result}; |
||||
use crate::{Result, database::KeyValueDatabase, service, services, utils}; |
||||
|
||||
/// Splits a string into tokens used as keys in the search inverted index
|
||||
///
|
||||
@ -1,8 +1,8 @@
|
||||
use std::sync::Arc; |
||||
|
||||
use ruma::{events::StateEventType, EventId, RoomId}; |
||||
use ruma::{EventId, RoomId, events::StateEventType}; |
||||
|
||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; |
||||
use crate::{Error, Result, database::KeyValueDatabase, service, services, utils}; |
||||
|
||||
impl service::rooms::short::Data for KeyValueDatabase { |
||||
fn get_or_create_shorteventid(&self, event_id: &EventId) -> Result<u64> { |
||||
@ -1,8 +1,8 @@
|
||||
use std::{collections::HashMap, sync::Arc}; |
||||
|
||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; |
||||
use crate::{Error, PduEvent, Result, database::KeyValueDatabase, service, services, utils}; |
||||
use async_trait::async_trait; |
||||
use ruma::{events::StateEventType, EventId, RoomId}; |
||||
use ruma::{EventId, RoomId, events::StateEventType}; |
||||
|
||||
#[async_trait] |
||||
impl service::rooms::state_accessor::Data for KeyValueDatabase { |
||||
@ -1,15 +1,16 @@
|
||||
use std::{collections::HashSet, sync::Arc}; |
||||
|
||||
use ruma::{ |
||||
OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, |
||||
events::{AnyStrippedStateEvent, AnySyncStateEvent}, |
||||
serde::Raw, |
||||
OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, |
||||
}; |
||||
|
||||
use crate::{ |
||||
database::{abstraction::KvTree, KeyValueDatabase}, |
||||
Error, Result, |
||||
database::{KeyValueDatabase, abstraction::KvTree}, |
||||
service::{self, appservice::RegistrationInfo}, |
||||
services, utils, Error, Result, |
||||
services, utils, |
||||
}; |
||||
|
||||
use super::{get_room_and_user_byte_ids, get_userroom_id_bytes}; |
||||
@ -1,9 +1,10 @@
|
||||
use std::{collections::HashSet, mem::size_of, sync::Arc}; |
||||
|
||||
use crate::{ |
||||
Error, Result, |
||||
database::KeyValueDatabase, |
||||
service::{self, rooms::state_compressor::data::StateDiff}, |
||||
utils, Error, Result, |
||||
utils, |
||||
}; |
||||
|
||||
impl service::rooms::state_compressor::Data for KeyValueDatabase { |
||||
@ -1,6 +1,6 @@
|
||||
use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId}; |
||||
use ruma::{OwnedUserId, RoomId, UserId, api::client::threads::get_threads::v1::IncludeThreads}; |
||||
|
||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; |
||||
use crate::{Error, PduEvent, Result, database::KeyValueDatabase, service, services, utils}; |
||||
|
||||
impl service::rooms::threads::Data for KeyValueDatabase { |
||||
fn threads_until<'a>( |
||||
@ -1,12 +1,13 @@
|
||||
use ruma::{ServerName, UserId}; |
||||
|
||||
use crate::{ |
||||
Error, Result, |
||||
database::KeyValueDatabase, |
||||
service::{ |
||||
self, |
||||
sending::{OutgoingKind, SendingEventType}, |
||||
}, |
||||
services, utils, Error, Result, |
||||
services, utils, |
||||
}; |
||||
|
||||
impl service::sending::Data for KeyValueDatabase { |
||||
@ -1,6 +1,6 @@
|
||||
use ruma::{DeviceId, TransactionId, UserId}; |
||||
|
||||
use crate::{database::KeyValueDatabase, service, Result}; |
||||
use crate::{Result, database::KeyValueDatabase, service}; |
||||
|
||||
impl service::transaction_ids::Data for KeyValueDatabase { |
||||
fn add_txnid( |
||||
@ -1,9 +1,9 @@
|
||||
use ruma::{ |
||||
api::client::{error::ErrorKind, uiaa::UiaaInfo}, |
||||
CanonicalJsonValue, DeviceId, UserId, |
||||
api::client::{error::ErrorKind, uiaa::UiaaInfo}, |
||||
}; |
||||
|
||||
use crate::{database::KeyValueDatabase, service, Error, Result}; |
||||
use crate::{Error, Result, database::KeyValueDatabase, service}; |
||||
|
||||
impl service::uiaa::Data for KeyValueDatabase { |
||||
fn set_uiaa_request( |
||||
@ -1,20 +1,21 @@
|
||||
use std::{collections::BTreeMap, mem::size_of}; |
||||
|
||||
use ruma::{ |
||||
DeviceId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, OwnedDeviceId, OwnedMxcUri, |
||||
OwnedOneTimeKeyId, OwnedUserId, UInt, UserId, |
||||
api::client::{device::Device, error::ErrorKind, filter::FilterDefinition}, |
||||
encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, |
||||
events::{AnyToDeviceEvent, StateEventType}, |
||||
serde::Raw, |
||||
DeviceId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, OwnedDeviceId, OwnedMxcUri, |
||||
OwnedOneTimeKeyId, OwnedUserId, UInt, UserId, |
||||
}; |
||||
use tracing::warn; |
||||
|
||||
use crate::{ |
||||
Error, Result, |
||||
api::client_server::TOKEN_LENGTH, |
||||
database::KeyValueDatabase, |
||||
service::{self, users::clean_signatures}, |
||||
services, utils, Error, Result, |
||||
services, utils, |
||||
}; |
||||
|
||||
impl service::users::Data for KeyValueDatabase { |
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue