mirror of https://gitlab.com/famedly/conduit.git
Browse Source
Merge branch 'next' into 'master' See merge request famedly/conduit!646merge-requests/582/merge v0.7.0
110 changed files with 5372 additions and 4085 deletions
@ -0,0 +1,15 @@
|
||||
# EditorConfig is awesome: https://EditorConfig.org |
||||
|
||||
root = true |
||||
|
||||
[*] |
||||
charset = utf-8 |
||||
end_of_line = lf |
||||
tab_width = 4 |
||||
indent_size = 4 |
||||
indent_style = space |
||||
insert_final_newline = true |
||||
max_line_length = 120 |
||||
|
||||
[*.nix] |
||||
indent_size = 2 |
||||
@ -1,244 +1,184 @@
|
||||
stages: |
||||
- build |
||||
- build docker image |
||||
- test |
||||
- upload artifacts |
||||
- ci |
||||
- artifacts |
||||
- publish |
||||
|
||||
variables: |
||||
# Make GitLab CI go fast: |
||||
GIT_SUBMODULE_STRATEGY: recursive |
||||
FF_USE_FASTZIP: 1 |
||||
CACHE_COMPRESSION_LEVEL: fastest |
||||
|
||||
# --------------------------------------------------------------------- # |
||||
# Create and publish docker image # |
||||
# --------------------------------------------------------------------- # |
||||
|
||||
.docker-shared-settings: |
||||
stage: "build docker image" |
||||
needs: [] |
||||
tags: [ "docker" ] |
||||
variables: |
||||
# Docker in Docker: |
||||
DOCKER_BUILDKIT: 1 |
||||
image: |
||||
name: docker.io/docker |
||||
services: |
||||
- name: docker.io/docker:dind |
||||
alias: docker |
||||
script: |
||||
- apk add openssh-client |
||||
- eval $(ssh-agent -s) |
||||
- mkdir -p ~/.ssh && chmod 700 ~/.ssh |
||||
- printf "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config |
||||
- sh .gitlab/setup-buildx-remote-builders.sh |
||||
# Authorize against this project's own image registry: |
||||
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY |
||||
# Build multiplatform image and push to temporary tag: |
||||
- > |
||||
docker buildx build |
||||
--platform "linux/arm/v7,linux/arm64,linux/amd64" |
||||
--pull |
||||
--tag "$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID" |
||||
--push |
||||
--provenance=false |
||||
--file "Dockerfile" . |
||||
# Build multiplatform image to deb stage and extract their .deb files: |
||||
- > |
||||
docker buildx build |
||||
--platform "linux/arm/v7,linux/arm64,linux/amd64" |
||||
--target "packager-result" |
||||
--output="type=local,dest=/tmp/build-output" |
||||
--provenance=false |
||||
--file "Dockerfile" . |
||||
# Build multiplatform image to binary stage and extract their binaries: |
||||
- > |
||||
docker buildx build |
||||
--platform "linux/arm/v7,linux/arm64,linux/amd64" |
||||
--target "builder-result" |
||||
--output="type=local,dest=/tmp/build-output" |
||||
--provenance=false |
||||
--file "Dockerfile" . |
||||
# Copy to GitLab container registry: |
||||
- > |
||||
docker buildx imagetools create |
||||
--tag "$CI_REGISTRY_IMAGE/$TAG" |
||||
--tag "$CI_REGISTRY_IMAGE/$TAG-bullseye" |
||||
--tag "$CI_REGISTRY_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA" |
||||
"$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID" |
||||
# if DockerHub credentials exist, also copy to dockerhub: |
||||
- if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi |
||||
- > |
||||
if [ -n "${DOCKER_HUB}" ]; then |
||||
docker buildx imagetools create |
||||
--tag "$DOCKER_HUB_IMAGE/$TAG" |
||||
--tag "$DOCKER_HUB_IMAGE/$TAG-bullseye" |
||||
--tag "$DOCKER_HUB_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA" |
||||
"$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID" |
||||
; fi |
||||
- mv /tmp/build-output ./ |
||||
artifacts: |
||||
paths: |
||||
- "./build-output/" |
||||
# Makes some things print in color |
||||
TERM: ansi |
||||
|
||||
docker:next: |
||||
extends: .docker-shared-settings |
||||
# Avoid duplicate pipelines |
||||
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines |
||||
workflow: |
||||
rules: |
||||
- if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "next"' |
||||
variables: |
||||
TAG: "matrix-conduit:next" |
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event" |
||||
- if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS |
||||
when: never |
||||
- if: $CI |
||||
|
||||
docker:master: |
||||
extends: .docker-shared-settings |
||||
rules: |
||||
- if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "master"' |
||||
variables: |
||||
TAG: "matrix-conduit:latest" |
||||
before_script: |
||||
# Enable nix-command and flakes |
||||
- if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi |
||||
|
||||
docker:tags: |
||||
extends: .docker-shared-settings |
||||
rules: |
||||
- if: "$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_TAG" |
||||
variables: |
||||
TAG: "matrix-conduit:$CI_COMMIT_TAG" |
||||
# Add our own binary cache |
||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix.computer.surgery/conduit" >> /etc/nix/nix.conf; fi |
||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:ZGAf6P6LhNvnoJJ3Me3PRg7tlLSrPxcQ2RiE5LIppjo=" >> /etc/nix/nix.conf; fi |
||||
|
||||
# Add alternate binary cache |
||||
- if command -v nix > /dev/null && [ -n "$ATTIC_ENDPOINT" ]; then echo "extra-substituters = $ATTIC_ENDPOINT" >> /etc/nix/nix.conf; fi |
||||
- if command -v nix > /dev/null && [ -n "$ATTIC_PUBLIC_KEY" ]; then echo "extra-trusted-public-keys = $ATTIC_PUBLIC_KEY" >> /etc/nix/nix.conf; fi |
||||
|
||||
docker build debugging: |
||||
extends: .docker-shared-settings |
||||
rules: |
||||
- if: "$CI_MERGE_REQUEST_TITLE =~ /.*[Dd]ocker.*/" |
||||
variables: |
||||
TAG: "matrix-conduit-docker-tests:latest" |
||||
# Add crane binary cache |
||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi |
||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi |
||||
|
||||
# --------------------------------------------------------------------- # |
||||
# Run tests # |
||||
# --------------------------------------------------------------------- # |
||||
# Add nix-community binary cache |
||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi |
||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi |
||||
|
||||
cargo check: |
||||
stage: test |
||||
image: docker.io/rust:1.70.0-bullseye |
||||
needs: [] |
||||
interruptible: true |
||||
before_script: |
||||
- "rustup show && rustc --version && cargo --version" # Print version info for debugging |
||||
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb |
||||
script: |
||||
- cargo check |
||||
# Install direnv and nix-direnv |
||||
- if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi |
||||
|
||||
# Allow .envrc |
||||
- if command -v nix > /dev/null; then direnv allow; fi |
||||
|
||||
.test-shared-settings: |
||||
stage: "test" |
||||
needs: [] |
||||
image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest" |
||||
tags: ["docker"] |
||||
variables: |
||||
CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow |
||||
interruptible: true |
||||
# Set CARGO_HOME to a cacheable path |
||||
- export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo" |
||||
|
||||
test:cargo: |
||||
extends: .test-shared-settings |
||||
before_script: |
||||
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb |
||||
ci: |
||||
stage: ci |
||||
image: nixos/nix:2.20.4 |
||||
script: |
||||
- rustc --version && cargo --version # Print version info for debugging |
||||
- "cargo test --color always --workspace --verbose --locked --no-fail-fast" |
||||
# Cache the inputs required for the devShell |
||||
- ./bin/nix-build-and-cache .#devShells.x86_64-linux.default.inputDerivation |
||||
|
||||
test:clippy: |
||||
extends: .test-shared-settings |
||||
allow_failure: true |
||||
before_script: |
||||
- rustup component add clippy |
||||
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb |
||||
- direnv exec . engage |
||||
cache: |
||||
key: nix |
||||
paths: |
||||
- target |
||||
- .gitlab-ci.d |
||||
rules: |
||||
# CI on upstream runners (only available for maintainers) |
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event" && $IS_UPSTREAM_CI == "true" |
||||
# Manual CI on unprotected branches that are not MRs |
||||
- if: $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_COMMIT_REF_PROTECTED == "false" |
||||
when: manual |
||||
# Manual CI on forks |
||||
- if: $IS_UPSTREAM_CI != "true" |
||||
when: manual |
||||
- if: $CI |
||||
interruptible: true |
||||
|
||||
artifacts: |
||||
stage: artifacts |
||||
image: nixos/nix:2.20.4 |
||||
script: |
||||
- rustc --version && cargo --version # Print version info for debugging |
||||
- "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" |
||||
- ./bin/nix-build-and-cache .#static-x86_64-unknown-linux-musl |
||||
- cp result/bin/conduit x86_64-unknown-linux-musl |
||||
|
||||
- mkdir -p target/release |
||||
- cp result/bin/conduit target/release |
||||
- direnv exec . cargo deb --no-build |
||||
- mv target/debian/*.deb x86_64-unknown-linux-musl.deb |
||||
|
||||
# Since the OCI image package is based on the binary package, this has the |
||||
# fun side effect of uploading the normal binary too. Conduit users who are |
||||
# deploying with Nix can leverage this fact by adding our binary cache to |
||||
# their systems. |
||||
# |
||||
# Note that although we have an `oci-image-x86_64-unknown-linux-musl` |
||||
# output, we don't build it because it would be largely redundant to this |
||||
# one since it's all containerized anyway. |
||||
- ./bin/nix-build-and-cache .#oci-image |
||||
- cp result oci-image-amd64.tar.gz |
||||
|
||||
- ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl |
||||
- cp result/bin/conduit aarch64-unknown-linux-musl |
||||
|
||||
- ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl |
||||
- cp result oci-image-arm64v8.tar.gz |
||||
|
||||
- ./bin/nix-build-and-cache .#book |
||||
# We can't just copy the symlink, we need to dereference it https://gitlab.com/gitlab-org/gitlab/-/issues/19746 |
||||
- cp -r --dereference result public |
||||
artifacts: |
||||
when: always |
||||
reports: |
||||
codequality: gl-code-quality-report.json |
||||
paths: |
||||
- x86_64-unknown-linux-musl |
||||
- aarch64-unknown-linux-musl |
||||
- x86_64-unknown-linux-musl.deb |
||||
- oci-image-amd64.tar.gz |
||||
- oci-image-arm64v8.tar.gz |
||||
- public |
||||
rules: |
||||
# CI required for all MRs |
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event" |
||||
# Optional CI on forks |
||||
- if: $IS_UPSTREAM_CI != "true" |
||||
when: manual |
||||
allow_failure: true |
||||
- if: $CI |
||||
interruptible: true |
||||
|
||||
test:format: |
||||
extends: .test-shared-settings |
||||
before_script: |
||||
- rustup component add rustfmt |
||||
.push-oci-image: |
||||
stage: publish |
||||
image: docker:25.0.0 |
||||
services: |
||||
- docker:25.0.0-dind |
||||
variables: |
||||
IMAGE_SUFFIX_AMD64: amd64 |
||||
IMAGE_SUFFIX_ARM64V8: arm64v8 |
||||
script: |
||||
- cargo fmt --all -- --check |
||||
- docker load -i oci-image-amd64.tar.gz |
||||
- IMAGE_ID_AMD64=$(docker images -q conduit:next) |
||||
- docker load -i oci-image-arm64v8.tar.gz |
||||
- IMAGE_ID_ARM64V8=$(docker images -q conduit:next) |
||||
# Tag and push the architecture specific images |
||||
- docker tag $IMAGE_ID_AMD64 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 |
||||
- docker tag $IMAGE_ID_ARM64V8 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 |
||||
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 |
||||
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 |
||||
# Tag the multi-arch image |
||||
- docker manifest create $IMAGE_NAME:$CI_COMMIT_SHA --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 |
||||
- docker manifest push $IMAGE_NAME:$CI_COMMIT_SHA |
||||
# Tag and push the git ref |
||||
- docker manifest create $IMAGE_NAME:$CI_COMMIT_REF_NAME --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 |
||||
- docker manifest push $IMAGE_NAME:$CI_COMMIT_REF_NAME |
||||
# Tag git tags as 'latest' |
||||
- | |
||||
if [[ -n "$CI_COMMIT_TAG" ]]; then |
||||
docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 |
||||
docker manifest push $IMAGE_NAME:latest |
||||
fi |
||||
dependencies: |
||||
- artifacts |
||||
only: |
||||
- next |
||||
- master |
||||
- tags |
||||
|
||||
oci-image:push-gitlab: |
||||
extends: .push-oci-image |
||||
variables: |
||||
IMAGE_NAME: $CI_REGISTRY_IMAGE/matrix-conduit |
||||
before_script: |
||||
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY |
||||
|
||||
test:audit: |
||||
extends: .test-shared-settings |
||||
allow_failure: true |
||||
script: |
||||
- cargo audit --color always || true |
||||
- cargo audit --stale --json | gitlab-report -p audit > gl-sast-report.json |
||||
artifacts: |
||||
when: always |
||||
reports: |
||||
sast: gl-sast-report.json |
||||
|
||||
test:dockerlint: |
||||
stage: "test" |
||||
needs: [] |
||||
image: "ghcr.io/hadolint/hadolint@sha256:6c4b7c23f96339489dd35f21a711996d7ce63047467a9a562287748a03ad5242" # 2.8.0-alpine |
||||
interruptible: true |
||||
oci-image:push-dockerhub: |
||||
extends: .push-oci-image |
||||
variables: |
||||
IMAGE_NAME: matrixconduit/matrix-conduit |
||||
before_script: |
||||
- docker login -u $DOCKER_HUB_USER -p $DOCKER_HUB_PASSWORD |
||||
|
||||
pages: |
||||
stage: publish |
||||
dependencies: |
||||
- artifacts |
||||
only: |
||||
- next |
||||
script: |
||||
- hadolint --version |
||||
# First pass: Print for CI log: |
||||
- > |
||||
hadolint |
||||
--no-fail --verbose |
||||
./Dockerfile |
||||
# Then output the results into a json for GitLab to pretty-print this in the MR: |
||||
- > |
||||
hadolint |
||||
--format gitlab_codeclimate |
||||
--failure-threshold error |
||||
./Dockerfile > dockerlint.json |
||||
- "true" |
||||
artifacts: |
||||
when: always |
||||
reports: |
||||
codequality: dockerlint.json |
||||
paths: |
||||
- dockerlint.json |
||||
rules: |
||||
- if: '$CI_COMMIT_REF_NAME != "master"' |
||||
changes: |
||||
- docker/*Dockerfile |
||||
- Dockerfile |
||||
- .gitlab-ci.yml |
||||
- if: '$CI_COMMIT_REF_NAME == "master"' |
||||
- if: '$CI_COMMIT_REF_NAME == "next"' |
||||
|
||||
# --------------------------------------------------------------------- # |
||||
# Store binaries as package so they have download urls # |
||||
# --------------------------------------------------------------------- # |
||||
|
||||
# DISABLED FOR NOW, NEEDS TO BE FIXED AT A LATER TIME: |
||||
|
||||
#publish:package: |
||||
# stage: "upload artifacts" |
||||
# needs: |
||||
# - "docker:tags" |
||||
# rules: |
||||
# - if: "$CI_COMMIT_TAG" |
||||
# image: curlimages/curl:latest |
||||
# tags: ["docker"] |
||||
# variables: |
||||
# GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts |
||||
# script: |
||||
# - 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"' |
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit "${BASE_URL}/conduit-x86_64-unknown-linux-gnu"' |
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit "${BASE_URL}/conduit-armv7-unknown-linux-gnu"' |
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"' |
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"' |
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit.deb "${BASE_URL}/conduit-armv7-unknown-linux-gnu.deb"' |
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit.deb "${BASE_URL}/conduit-aarch64-unknown-linux-gnu.deb"' |
||||
|
||||
# Avoid duplicate pipelines |
||||
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines |
||||
workflow: |
||||
rules: |
||||
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"' |
||||
- if: "$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS" |
||||
when: never |
||||
- if: "$CI_COMMIT_BRANCH" |
||||
- if: "$CI_COMMIT_TAG" |
||||
- public |
||||
|
||||
@ -0,0 +1,3 @@
|
||||
# Docs: Map markdown to html files |
||||
- source: /docs/(.+)\.md/ |
||||
public: '\1.html' |
||||
@ -1,132 +0,0 @@
|
||||
# syntax=docker/dockerfile:1 |
||||
FROM docker.io/rust:1.70-bullseye AS base |
||||
|
||||
FROM base AS builder |
||||
WORKDIR /usr/src/conduit |
||||
|
||||
# Install required packages to build Conduit and it's dependencies |
||||
RUN apt-get update && \ |
||||
apt-get -y --no-install-recommends install libclang-dev=1:11.0-51+nmu5 |
||||
|
||||
# == Build dependencies without our own code separately for caching == |
||||
# |
||||
# Need a fake main.rs since Cargo refuses to build anything otherwise. |
||||
# |
||||
# See https://github.com/rust-lang/cargo/issues/2644 for a Cargo feature |
||||
# request that would allow just dependencies to be compiled, presumably |
||||
# regardless of whether source files are available. |
||||
RUN mkdir src && touch src/lib.rs && echo 'fn main() {}' > src/main.rs |
||||
COPY Cargo.toml Cargo.lock ./ |
||||
RUN cargo build --release && rm -r src |
||||
|
||||
# Copy over actual Conduit sources |
||||
COPY src src |
||||
|
||||
# main.rs and lib.rs need their timestamp updated for this to work correctly since |
||||
# otherwise the build with the fake main.rs from above is newer than the |
||||
# source files (COPY preserves timestamps). |
||||
# |
||||
# Builds conduit and places the binary at /usr/src/conduit/target/release/conduit |
||||
RUN touch src/main.rs && touch src/lib.rs && cargo build --release |
||||
|
||||
|
||||
# ONLY USEFUL FOR CI: target stage to extract build artifacts |
||||
FROM scratch AS builder-result |
||||
COPY --from=builder /usr/src/conduit/target/release/conduit /conduit |
||||
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------------------------------------------- |
||||
# Build cargo-deb, a tool to package up rust binaries into .deb packages for Debian/Ubuntu based systems: |
||||
# --------------------------------------------------------------------------------------------------------------- |
||||
FROM base AS build-cargo-deb |
||||
|
||||
RUN apt-get update && \ |
||||
apt-get install -y --no-install-recommends \ |
||||
dpkg \ |
||||
dpkg-dev \ |
||||
liblzma-dev |
||||
|
||||
RUN cargo install cargo-deb |
||||
# => binary is in /usr/local/cargo/bin/cargo-deb |
||||
|
||||
|
||||
# --------------------------------------------------------------------------------------------------------------- |
||||
# Package conduit build-result into a .deb package: |
||||
# --------------------------------------------------------------------------------------------------------------- |
||||
FROM builder AS packager |
||||
WORKDIR /usr/src/conduit |
||||
|
||||
COPY ./LICENSE ./LICENSE |
||||
COPY ./README.md ./README.md |
||||
COPY debian ./debian |
||||
COPY --from=build-cargo-deb /usr/local/cargo/bin/cargo-deb /usr/local/cargo/bin/cargo-deb |
||||
|
||||
# --no-build makes cargo-deb reuse already compiled project |
||||
RUN cargo deb --no-build |
||||
# => Package is in /usr/src/conduit/target/debian/<project_name>_<version>_<arch>.deb |
||||
|
||||
|
||||
# ONLY USEFUL FOR CI: target stage to extract build artifacts |
||||
FROM scratch AS packager-result |
||||
COPY --from=packager /usr/src/conduit/target/debian/*.deb /conduit.deb |
||||
|
||||
|
||||
# --------------------------------------------------------------------------------------------------------------- |
||||
# Stuff below this line actually ends up in the resulting docker image |
||||
# --------------------------------------------------------------------------------------------------------------- |
||||
FROM docker.io/debian:bullseye-slim AS runner |
||||
|
||||
# Standard port on which Conduit launches. |
||||
# You still need to map the port when using the docker command or docker-compose. |
||||
EXPOSE 6167 |
||||
|
||||
ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit |
||||
|
||||
ENV CONDUIT_PORT=6167 \ |
||||
CONDUIT_ADDRESS="0.0.0.0" \ |
||||
CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \ |
||||
CONDUIT_CONFIG='' |
||||
# └─> Set no config file to do all configuration with env vars |
||||
|
||||
# Conduit needs: |
||||
# dpkg: to install conduit.deb |
||||
# ca-certificates: for https |
||||
# iproute2 & wget: for the healthcheck script |
||||
RUN apt-get update && apt-get -y --no-install-recommends install \ |
||||
dpkg \ |
||||
ca-certificates \ |
||||
iproute2 \ |
||||
wget \ |
||||
&& rm -rf /var/lib/apt/lists/* |
||||
|
||||
# Test if Conduit is still alive, uses the same endpoint as Element |
||||
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh |
||||
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh |
||||
|
||||
# Install conduit.deb: |
||||
COPY --from=packager /usr/src/conduit/target/debian/*.deb /srv/conduit/ |
||||
RUN dpkg -i /srv/conduit/*.deb |
||||
|
||||
# Improve security: Don't run stuff as root, that does not need to run as root |
||||
# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems. |
||||
ARG USER_ID=1000 |
||||
ARG GROUP_ID=1000 |
||||
RUN set -x ; \ |
||||
groupadd -r -g ${GROUP_ID} conduit ; \ |
||||
useradd -l -r -M -d /srv/conduit -o -u ${USER_ID} -g conduit conduit && exit 0 ; exit 1 |
||||
|
||||
# Create database directory, change ownership of Conduit files to conduit user and group and make the healthcheck executable: |
||||
RUN chown -cR conduit:conduit /srv/conduit && \ |
||||
chmod +x /srv/conduit/healthcheck.sh && \ |
||||
mkdir -p ${DEFAULT_DB_PATH} && \ |
||||
chown -cR conduit:conduit ${DEFAULT_DB_PATH} |
||||
|
||||
# Change user to conduit, no root permissions afterwards: |
||||
USER conduit |
||||
# Set container home directory |
||||
WORKDIR /srv/conduit |
||||
|
||||
# Run Conduit and print backtraces on panics |
||||
ENV RUST_BACKTRACE=1 |
||||
ENTRYPOINT [ "/usr/sbin/matrix-conduit" ] |
||||
@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env bash |
||||
|
||||
set -euo pipefail |
||||
|
||||
# Path to Complement's source code |
||||
COMPLEMENT_SRC="$1" |
||||
|
||||
# A `.jsonl` file to write test logs to |
||||
LOG_FILE="$2" |
||||
|
||||
# A `.jsonl` file to write test results to |
||||
RESULTS_FILE="$3" |
||||
|
||||
OCI_IMAGE="complement-conduit:dev" |
||||
|
||||
env \ |
||||
-C "$(git rev-parse --show-toplevel)" \ |
||||
docker build \ |
||||
--tag "$OCI_IMAGE" \ |
||||
--file complement/Dockerfile \ |
||||
. |
||||
|
||||
# It's okay (likely, even) that `go test` exits nonzero |
||||
set +o pipefail |
||||
env \ |
||||
-C "$COMPLEMENT_SRC" \ |
||||
COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \ |
||||
go test -json ./tests | tee "$LOG_FILE" |
||||
set -o pipefail |
||||
|
||||
# Post-process the results into an easy-to-compare format |
||||
cat "$LOG_FILE" | jq -c ' |
||||
select( |
||||
(.Action == "pass" or .Action == "fail" or .Action == "skip") |
||||
and .Test != null |
||||
) | {Action: .Action, Test: .Test} |
||||
' | sort > "$RESULTS_FILE" |
||||
@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env bash |
||||
|
||||
set -euo pipefail |
||||
|
||||
# The first argument must be the desired installable |
||||
INSTALLABLE="$1" |
||||
|
||||
# Build the installable and forward any other arguments too |
||||
nix build "$@" |
||||
|
||||
if [ ! -z ${ATTIC_TOKEN+x} ]; then |
||||
nix run --inputs-from . attic -- \ |
||||
login \ |
||||
conduit \ |
||||
"${ATTIC_ENDPOINT:-https://nix.computer.surgery/conduit}" \ |
||||
"$ATTIC_TOKEN" |
||||
|
||||
# Push the target installable and its build dependencies |
||||
nix run --inputs-from . attic -- \ |
||||
push \ |
||||
conduit \ |
||||
"$(nix path-info "$INSTALLABLE" --derivation)" \ |
||||
"$(nix path-info "$INSTALLABLE")" |
||||
else |
||||
echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache" |
||||
fi |
||||
@ -0,0 +1,18 @@
|
||||
[book] |
||||
title = "Conduit" |
||||
description = "Conduit is a simple, fast and reliable chat server for the Matrix protocol" |
||||
language = "en" |
||||
multilingual = false |
||||
src = "docs" |
||||
|
||||
[build] |
||||
build-dir = "public" |
||||
create-missing = true |
||||
|
||||
[output.html] |
||||
git-repository-url = "https://gitlab.com/famedly/conduit" |
||||
edit-url-template = "https://gitlab.com/famedly/conduit/-/edit/next/{path}" |
||||
git-repository-icon = "fa-git-square" |
||||
|
||||
[output.html.search] |
||||
limit-results = 15 |
||||
@ -1,13 +1,11 @@
|
||||
# Running Conduit on Complement |
||||
# Complement |
||||
|
||||
This assumes that you're familiar with complement, if not, please readme |
||||
[their readme](https://github.com/matrix-org/complement#running). |
||||
## What's that? |
||||
|
||||
Complement works with "base images", this directory (and Dockerfile) helps build the conduit complement-ready docker |
||||
image. |
||||
Have a look at [its repository](https://github.com/matrix-org/complement). |
||||
|
||||
To build, `cd` to the base directory of the workspace, and run this: |
||||
## How do I use it with Conduit? |
||||
|
||||
`docker build -t complement-conduit:dev -f complement/Dockerfile .` |
||||
|
||||
Then use `complement-conduit:dev` as a base image for running complement tests. |
||||
The script at [`../bin/complement`](../bin/complement) has automation for this. |
||||
It takes a few command line arguments, you can read the script to find out what |
||||
those are. |
||||
|
||||
@ -0,0 +1,10 @@
|
||||
(import |
||||
( |
||||
let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in |
||||
fetchTarball { |
||||
url = lock.nodes.flake-compat.locked.url or "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz"; |
||||
sha256 = lock.nodes.flake-compat.locked.narHash; |
||||
} |
||||
) |
||||
{ src = ./.; } |
||||
).defaultNix |
||||
@ -0,0 +1,12 @@
|
||||
# Summary |
||||
|
||||
- [Introduction](introduction.md) |
||||
|
||||
- [Configuration](configuration.md) |
||||
- [Deploying](deploying.md) |
||||
- [Generic](deploying/generic.md) |
||||
- [Debian](deploying/debian.md) |
||||
- [Docker](deploying/docker.md) |
||||
- [NixOS](deploying/nixos.md) |
||||
- [TURN](turn.md) |
||||
- [Appservices](appservices.md) |
||||
@ -0,0 +1,3 @@
|
||||
# Deploying |
||||
|
||||
This chapter describes various ways to deploy Conduit. |
||||
@ -0,0 +1,18 @@
|
||||
# Conduit for NixOS |
||||
|
||||
Conduit can be acquired by Nix from various places: |
||||
|
||||
* The `flake.nix` at the root of the repo |
||||
* The `default.nix` at the root of the repo |
||||
* From Nixpkgs |
||||
|
||||
The `flake.nix` and `default.nix` do not (currently) provide a NixOS module, so |
||||
(for now) [`services.matrix-conduit`][module] from Nixpkgs should be used to |
||||
configure Conduit. |
||||
|
||||
If you want to run the latest code, you should get Conduit from the `flake.nix` |
||||
or `default.nix` and set [`services.matrix-conduit.package`][package] |
||||
appropriately. |
||||
|
||||
[module]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit |
||||
[package]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit.package |
||||
@ -0,0 +1,13 @@
|
||||
# Conduit |
||||
|
||||
{{#include ../README.md:catchphrase}} |
||||
|
||||
{{#include ../README.md:body}} |
||||
|
||||
#### How can I deploy my own? |
||||
|
||||
- [Deployment options](deploying.md) |
||||
|
||||
If you want to connect an Appservice to Conduit, take a look at the [appservices documentation](appservices.md). |
||||
|
||||
{{#include ../README.md:footer}} |
||||
@ -0,0 +1,74 @@
|
||||
interpreter = ["bash", "-euo", "pipefail", "-c"] |
||||
|
||||
[[task]] |
||||
name = "engage" |
||||
group = "versions" |
||||
script = "engage --version" |
||||
|
||||
[[task]] |
||||
name = "rustc" |
||||
group = "versions" |
||||
script = "rustc --version" |
||||
|
||||
[[task]] |
||||
name = "cargo" |
||||
group = "versions" |
||||
script = "cargo --version" |
||||
|
||||
[[task]] |
||||
name = "cargo-fmt" |
||||
group = "versions" |
||||
script = "cargo fmt --version" |
||||
|
||||
[[task]] |
||||
name = "rustdoc" |
||||
group = "versions" |
||||
script = "rustdoc --version" |
||||
|
||||
[[task]] |
||||
name = "cargo-clippy" |
||||
group = "versions" |
||||
script = "cargo clippy -- --version" |
||||
|
||||
[[task]] |
||||
name = "lychee" |
||||
group = "versions" |
||||
script = "lychee --version" |
||||
|
||||
[[task]] |
||||
name = "cargo-fmt" |
||||
group = "lints" |
||||
script = "cargo fmt --check -- --color=always" |
||||
|
||||
[[task]] |
||||
name = "cargo-doc" |
||||
group = "lints" |
||||
script = """ |
||||
RUSTDOCFLAGS="-D warnings" cargo doc \ |
||||
--workspace \ |
||||
--no-deps \ |
||||
--document-private-items \ |
||||
--color always |
||||
""" |
||||
|
||||
[[task]] |
||||
name = "cargo-clippy" |
||||
group = "lints" |
||||
script = "cargo clippy --workspace --all-targets --color=always -- -D warnings" |
||||
|
||||
[[task]] |
||||
name = "lychee" |
||||
group = "lints" |
||||
script = "lychee --offline docs" |
||||
|
||||
[[task]] |
||||
name = "cargo" |
||||
group = "tests" |
||||
script = """ |
||||
cargo test \ |
||||
--workspace \ |
||||
--all-targets \ |
||||
--color=always \ |
||||
-- \ |
||||
--color=always |
||||
""" |
||||
@ -1,198 +0,0 @@
|
||||
# Conduit for Nix/NixOS |
||||
|
||||
This guide assumes you have a recent version of Nix (^2.4) installed. |
||||
|
||||
Since Conduit ships as a Nix flake, you'll first need to [enable |
||||
flakes][enable_flakes]. |
||||
|
||||
You can now use the usual Nix commands to interact with Conduit's flake. For |
||||
example, `nix run gitlab:famedly/conduit` will run Conduit (though you'll need |
||||
to provide configuration and such manually as usual). |
||||
|
||||
If your NixOS configuration is defined as a flake, you can depend on this flake |
||||
to provide a more up-to-date version than provided by `nixpkgs`. In your flake, |
||||
add the following to your `inputs`: |
||||
|
||||
```nix |
||||
conduit = { |
||||
url = "gitlab:famedly/conduit"; |
||||
|
||||
# Assuming you have an input for nixpkgs called `nixpkgs`. If you experience |
||||
# build failures while using this, try commenting/deleting this line. This |
||||
# will probably also require you to always build from source. |
||||
inputs.nixpkgs.follows = "nixpkgs"; |
||||
}; |
||||
``` |
||||
|
||||
Next, make sure you're passing your flake inputs to the `specialArgs` argument |
||||
of `nixpkgs.lib.nixosSystem` [as explained here][specialargs]. This guide will |
||||
assume you've named the group `flake-inputs`. |
||||
|
||||
Now you can configure Conduit and a reverse proxy for it. Add the following to |
||||
a new Nix file and include it in your configuration: |
||||
|
||||
```nix |
||||
{ config |
||||
, pkgs |
||||
, flake-inputs |
||||
, ... |
||||
}: |
||||
|
||||
let |
||||
# You'll need to edit these values |
||||
|
||||
# The hostname that will appear in your user and room IDs |
||||
server_name = "example.com"; |
||||
|
||||
# The hostname that Conduit actually runs on |
||||
# |
||||
# This can be the same as `server_name` if you want. This is only necessary |
||||
# when Conduit is running on a different machine than the one hosting your |
||||
# root domain. This configuration also assumes this is all running on a single |
||||
# machine, some tweaks will need to be made if this is not the case. |
||||
matrix_hostname = "matrix.${server_name}"; |
||||
|
||||
# An admin email for TLS certificate notifications |
||||
admin_email = "admin@${server_name}"; |
||||
|
||||
# These ones you can leave alone |
||||
|
||||
# Build a dervation that stores the content of `${server_name}/.well-known/matrix/server` |
||||
well_known_server = pkgs.writeText "well-known-matrix-server" '' |
||||
{ |
||||
"m.server": "${matrix_hostname}" |
||||
} |
||||
''; |
||||
|
||||
# Build a dervation that stores the content of `${server_name}/.well-known/matrix/client` |
||||
well_known_client = pkgs.writeText "well-known-matrix-client" '' |
||||
{ |
||||
"m.homeserver": { |
||||
"base_url": "https://${matrix_hostname}" |
||||
} |
||||
} |
||||
''; |
||||
in |
||||
|
||||
{ |
||||
# Configure Conduit itself |
||||
services.matrix-conduit = { |
||||
enable = true; |
||||
|
||||
# This causes NixOS to use the flake defined in this repository instead of |
||||
# the build of Conduit built into nixpkgs. |
||||
package = flake-inputs.conduit.packages.${pkgs.system}.default; |
||||
|
||||
settings.global = { |
||||
inherit server_name; |
||||
}; |
||||
}; |
||||
|
||||
# Configure automated TLS acquisition/renewal |
||||
security.acme = { |
||||
acceptTerms = true; |
||||
defaults = { |
||||
email = admin_email; |
||||
}; |
||||
}; |
||||
|
||||
# ACME data must be readable by the NGINX user |
||||
users.users.nginx.extraGroups = [ |
||||
"acme" |
||||
]; |
||||
|
||||
# Configure NGINX as a reverse proxy |
||||
services.nginx = { |
||||
enable = true; |
||||
recommendedProxySettings = true; |
||||
|
||||
virtualHosts = { |
||||
"${matrix_hostname}" = { |
||||
forceSSL = true; |
||||
enableACME = true; |
||||
|
||||
listen = [ |
||||
{ |
||||
addr = "0.0.0.0"; |
||||
port = 443; |
||||
ssl = true; |
||||
} |
||||
{ |
||||
addr = "[::]"; |
||||
port = 443; |
||||
ssl = true; |
||||
} { |
||||
addr = "0.0.0.0"; |
||||
port = 8448; |
||||
ssl = true; |
||||
} |
||||
{ |
||||
addr = "[::]"; |
||||
port = 8448; |
||||
ssl = true; |
||||
} |
||||
]; |
||||
|
||||
locations."/_matrix/" = { |
||||
proxyPass = "http://backend_conduit$request_uri"; |
||||
proxyWebsockets = true; |
||||
extraConfig = '' |
||||
proxy_set_header Host $host; |
||||
proxy_buffering off; |
||||
''; |
||||
}; |
||||
|
||||
extraConfig = '' |
||||
merge_slashes off; |
||||
''; |
||||
}; |
||||
|
||||
"${server_name}" = { |
||||
forceSSL = true; |
||||
enableACME = true; |
||||
|
||||
locations."=/.well-known/matrix/server" = { |
||||
# Use the contents of the derivation built previously |
||||
alias = "${well_known_server}"; |
||||
|
||||
extraConfig = '' |
||||
# Set the header since by default NGINX thinks it's just bytes |
||||
default_type application/json; |
||||
''; |
||||
}; |
||||
|
||||
locations."=/.well-known/matrix/client" = { |
||||
# Use the contents of the derivation built previously |
||||
alias = "${well_known_client}"; |
||||
|
||||
extraConfig = '' |
||||
# Set the header since by default NGINX thinks it's just bytes |
||||
default_type application/json; |
||||
|
||||
# https://matrix.org/docs/spec/client_server/r0.4.0#web-browser-clients |
||||
add_header Access-Control-Allow-Origin "*"; |
||||
''; |
||||
}; |
||||
}; |
||||
}; |
||||
|
||||
upstreams = { |
||||
"backend_conduit" = { |
||||
servers = { |
||||
"[::1]:${toString config.services.matrix-conduit.settings.global.port}" = { }; |
||||
}; |
||||
}; |
||||
}; |
||||
}; |
||||
|
||||
# Open firewall ports for HTTP, HTTPS, and Matrix federation |
||||
networking.firewall.allowedTCPPorts = [ 80 443 8448 ]; |
||||
networking.firewall.allowedUDPPorts = [ 80 443 8448 ]; |
||||
} |
||||
``` |
||||
|
||||
Now you can rebuild your system configuration and you should be good to go! |
||||
|
||||
[enable_flakes]: https://nixos.wiki/wiki/Flakes#Enable_flakes |
||||
|
||||
[specialargs]: https://nixos.wiki/wiki/Flakes#Using_nix_flakes_with_NixOS |
||||
@ -0,0 +1,22 @@
|
||||
# This is the authoritiative configuration of this project's Rust toolchain. |
||||
# |
||||
# Other files that need upkeep when this changes: |
||||
# |
||||
# * `.gitlab-ci.yml` |
||||
# * `Cargo.toml` |
||||
# * `flake.nix` |
||||
# |
||||
# Search in those files for `rust-toolchain.toml` to find the relevant places. |
||||
# If you're having trouble making the relevant changes, bug a maintainer. |
||||
|
||||
[toolchain] |
||||
channel = "1.75.0" |
||||
components = [ |
||||
# For rust-analyzer |
||||
"rust-src", |
||||
] |
||||
targets = [ |
||||
"x86_64-unknown-linux-gnu", |
||||
"x86_64-unknown-linux-musl", |
||||
"aarch64-unknown-linux-musl", |
||||
] |
||||
@ -0,0 +1,27 @@
|
||||
//! Integration with `clap`
|
||||
|
||||
use clap::Parser; |
||||
|
||||
/// Returns the current version of the crate with extra info if supplied
|
||||
///
|
||||
/// Set the environment variable `CONDUIT_VERSION_EXTRA` to any UTF-8 string to
|
||||
/// include it in parenthesis after the SemVer version. A common value are git
|
||||
/// commit hashes.
|
||||
fn version() -> String { |
||||
let cargo_pkg_version = env!("CARGO_PKG_VERSION"); |
||||
|
||||
match option_env!("CONDUIT_VERSION_EXTRA") { |
||||
Some(x) => format!("{} ({})", cargo_pkg_version, x), |
||||
None => cargo_pkg_version.to_owned(), |
||||
} |
||||
} |
||||
|
||||
/// Command line arguments
|
||||
#[derive(Parser)] |
||||
#[clap(about, version = version())] |
||||
pub struct Args {} |
||||
|
||||
/// Parse command line arguments into structured data
|
||||
pub fn parse() -> Args { |
||||
Args::parse() |
||||
} |
||||
@ -1,127 +0,0 @@
|
||||
use std::{collections::HashSet, mem}; |
||||
|
||||
use ruma::{OwnedUserId, RoomId, UserId}; |
||||
|
||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; |
||||
|
||||
impl service::rooms::edus::typing::Data for KeyValueDatabase { |
||||
fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> { |
||||
let mut prefix = room_id.as_bytes().to_vec(); |
||||
prefix.push(0xff); |
||||
|
||||
let count = services().globals.next_count()?.to_be_bytes(); |
||||
|
||||
let mut room_typing_id = prefix; |
||||
room_typing_id.extend_from_slice(&timeout.to_be_bytes()); |
||||
room_typing_id.push(0xff); |
||||
room_typing_id.extend_from_slice(&count); |
||||
|
||||
self.typingid_userid |
||||
.insert(&room_typing_id, user_id.as_bytes())?; |
||||
|
||||
self.roomid_lasttypingupdate |
||||
.insert(room_id.as_bytes(), &count)?; |
||||
|
||||
Ok(()) |
||||
} |
||||
|
||||
fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { |
||||
let mut prefix = room_id.as_bytes().to_vec(); |
||||
prefix.push(0xff); |
||||
|
||||
let user_id = user_id.to_string(); |
||||
|
||||
let mut found_outdated = false; |
||||
|
||||
// Maybe there are multiple ones from calling roomtyping_add multiple times
|
||||
for outdated_edu in self |
||||
.typingid_userid |
||||
.scan_prefix(prefix) |
||||
.filter(|(_, v)| &**v == user_id.as_bytes()) |
||||
{ |
||||
self.typingid_userid.remove(&outdated_edu.0)?; |
||||
found_outdated = true; |
||||
} |
||||
|
||||
if found_outdated { |
||||
self.roomid_lasttypingupdate.insert( |
||||
room_id.as_bytes(), |
||||
&services().globals.next_count()?.to_be_bytes(), |
||||
)?; |
||||
} |
||||
|
||||
Ok(()) |
||||
} |
||||
|
||||
fn typings_maintain(&self, room_id: &RoomId) -> Result<()> { |
||||
let mut prefix = room_id.as_bytes().to_vec(); |
||||
prefix.push(0xff); |
||||
|
||||
let current_timestamp = utils::millis_since_unix_epoch(); |
||||
|
||||
let mut found_outdated = false; |
||||
|
||||
// Find all outdated edus before inserting a new one
|
||||
for outdated_edu in self |
||||
.typingid_userid |
||||
.scan_prefix(prefix) |
||||
.map(|(key, _)| { |
||||
Ok::<_, Error>(( |
||||
key.clone(), |
||||
utils::u64_from_bytes( |
||||
&key.splitn(2, |&b| b == 0xff).nth(1).ok_or_else(|| { |
||||
Error::bad_database("RoomTyping has invalid timestamp or delimiters.") |
||||
})?[0..mem::size_of::<u64>()], |
||||
) |
||||
.map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?, |
||||
)) |
||||
}) |
||||
.filter_map(|r| r.ok()) |
||||
.take_while(|&(_, timestamp)| timestamp < current_timestamp) |
||||
{ |
||||
// This is an outdated edu (time > timestamp)
|
||||
self.typingid_userid.remove(&outdated_edu.0)?; |
||||
found_outdated = true; |
||||
} |
||||
|
||||
if found_outdated { |
||||
self.roomid_lasttypingupdate.insert( |
||||
room_id.as_bytes(), |
||||
&services().globals.next_count()?.to_be_bytes(), |
||||
)?; |
||||
} |
||||
|
||||
Ok(()) |
||||
} |
||||
|
||||
fn last_typing_update(&self, room_id: &RoomId) -> Result<u64> { |
||||
Ok(self |
||||
.roomid_lasttypingupdate |
||||
.get(room_id.as_bytes())? |
||||
.map(|bytes| { |
||||
utils::u64_from_bytes(&bytes).map_err(|_| { |
||||
Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") |
||||
}) |
||||
}) |
||||
.transpose()? |
||||
.unwrap_or(0)) |
||||
} |
||||
|
||||
fn typings_all(&self, room_id: &RoomId) -> Result<HashSet<OwnedUserId>> { |
||||
let mut prefix = room_id.as_bytes().to_vec(); |
||||
prefix.push(0xff); |
||||
|
||||
let mut user_ids = HashSet::new(); |
||||
|
||||
for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { |
||||
let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { |
||||
Error::bad_database("User ID in typingid_userid is invalid unicode.") |
||||
})?) |
||||
.map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; |
||||
|
||||
user_ids.insert(user_id); |
||||
} |
||||
|
||||
Ok(user_ids) |
||||
} |
||||
} |
||||
@ -1,37 +1,224 @@
|
||||
mod data; |
||||
|
||||
use std::collections::BTreeMap; |
||||
|
||||
pub use data::Data; |
||||
|
||||
use crate::Result; |
||||
use futures_util::Future; |
||||
use regex::RegexSet; |
||||
use ruma::{ |
||||
api::appservice::{Namespace, Registration}, |
||||
RoomAliasId, RoomId, UserId, |
||||
}; |
||||
use tokio::sync::RwLock; |
||||
|
||||
use crate::{services, Result}; |
||||
|
||||
/// Compiled regular expressions for a namespace.
|
||||
#[derive(Clone, Debug)] |
||||
pub struct NamespaceRegex { |
||||
pub exclusive: Option<RegexSet>, |
||||
pub non_exclusive: Option<RegexSet>, |
||||
} |
||||
|
||||
impl NamespaceRegex { |
||||
/// Checks if this namespace has rights to a namespace
|
||||
pub fn is_match(&self, heystack: &str) -> bool { |
||||
if self.is_exclusive_match(heystack) { |
||||
return true; |
||||
} |
||||
|
||||
if let Some(non_exclusive) = &self.non_exclusive { |
||||
if non_exclusive.is_match(heystack) { |
||||
return true; |
||||
} |
||||
} |
||||
false |
||||
} |
||||
|
||||
/// Checks if this namespace has exlusive rights to a namespace
|
||||
pub fn is_exclusive_match(&self, heystack: &str) -> bool { |
||||
if let Some(exclusive) = &self.exclusive { |
||||
if exclusive.is_match(heystack) { |
||||
return true; |
||||
} |
||||
} |
||||
false |
||||
} |
||||
} |
||||
|
||||
impl TryFrom<Vec<Namespace>> for NamespaceRegex { |
||||
fn try_from(value: Vec<Namespace>) -> Result<Self, regex::Error> { |
||||
let mut exclusive = vec![]; |
||||
let mut non_exclusive = vec![]; |
||||
|
||||
for namespace in value { |
||||
if namespace.exclusive { |
||||
exclusive.push(namespace.regex); |
||||
} else { |
||||
non_exclusive.push(namespace.regex); |
||||
} |
||||
} |
||||
|
||||
Ok(NamespaceRegex { |
||||
exclusive: if exclusive.is_empty() { |
||||
None |
||||
} else { |
||||
Some(RegexSet::new(exclusive)?) |
||||
}, |
||||
non_exclusive: if non_exclusive.is_empty() { |
||||
None |
||||
} else { |
||||
Some(RegexSet::new(non_exclusive)?) |
||||
}, |
||||
}) |
||||
} |
||||
|
||||
type Error = regex::Error; |
||||
} |
||||
|
||||
/// Appservice registration combined with its compiled regular expressions.
|
||||
#[derive(Clone, Debug)] |
||||
pub struct RegistrationInfo { |
||||
pub registration: Registration, |
||||
pub users: NamespaceRegex, |
||||
pub aliases: NamespaceRegex, |
||||
pub rooms: NamespaceRegex, |
||||
} |
||||
|
||||
impl RegistrationInfo { |
||||
pub fn is_user_match(&self, user_id: &UserId) -> bool { |
||||
self.users.is_match(user_id.as_str()) |
||||
|| self.registration.sender_localpart == user_id.localpart() |
||||
} |
||||
|
||||
pub fn is_exclusive_user_match(&self, user_id: &UserId) -> bool { |
||||
self.users.is_exclusive_match(user_id.as_str()) |
||||
|| self.registration.sender_localpart == user_id.localpart() |
||||
} |
||||
} |
||||
|
||||
impl TryFrom<Registration> for RegistrationInfo { |
||||
fn try_from(value: Registration) -> Result<RegistrationInfo, regex::Error> { |
||||
Ok(RegistrationInfo { |
||||
users: value.namespaces.users.clone().try_into()?, |
||||
aliases: value.namespaces.aliases.clone().try_into()?, |
||||
rooms: value.namespaces.rooms.clone().try_into()?, |
||||
registration: value, |
||||
}) |
||||
} |
||||
|
||||
type Error = regex::Error; |
||||
} |
||||
|
||||
pub struct Service { |
||||
pub db: &'static dyn Data, |
||||
registration_info: RwLock<BTreeMap<String, RegistrationInfo>>, |
||||
} |
||||
|
||||
impl Service { |
||||
/// Registers an appservice and returns the ID to the caller
|
||||
pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<String> { |
||||
pub fn build(db: &'static dyn Data) -> Result<Self> { |
||||
let mut registration_info = BTreeMap::new(); |
||||
// Inserting registrations into cache
|
||||
for appservice in db.all()? { |
||||
registration_info.insert( |
||||
appservice.0, |
||||
appservice |
||||
.1 |
||||
.try_into() |
||||
.expect("Should be validated on registration"), |
||||
); |
||||
} |
||||
|
||||
Ok(Self { |
||||
db, |
||||
registration_info: RwLock::new(registration_info), |
||||
}) |
||||
} |
||||
/// Registers an appservice and returns the ID to the caller.
|
||||
pub async fn register_appservice(&self, yaml: Registration) -> Result<String> { |
||||
//TODO: Check for collisions between exclusive appservice namespaces
|
||||
services() |
||||
.appservice |
||||
.registration_info |
||||
.write() |
||||
.await |
||||
.insert(yaml.id.clone(), yaml.clone().try_into()?); |
||||
|
||||
self.db.register_appservice(yaml) |
||||
} |
||||
|
||||
/// Remove an appservice registration
|
||||
/// Removes an appservice registration.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `service_name` - the name you send to register the service previously
|
||||
pub fn unregister_appservice(&self, service_name: &str) -> Result<()> { |
||||
pub async fn unregister_appservice(&self, service_name: &str) -> Result<()> { |
||||
services() |
||||
.appservice |
||||
.registration_info |
||||
.write() |
||||
.await |
||||
.remove(service_name) |
||||
.ok_or_else(|| crate::Error::AdminCommand("Appservice not found"))?; |
||||
|
||||
self.db.unregister_appservice(service_name) |
||||
} |
||||
|
||||
pub fn get_registration(&self, id: &str) -> Result<Option<serde_yaml::Value>> { |
||||
self.db.get_registration(id) |
||||
pub async fn get_registration(&self, id: &str) -> Option<Registration> { |
||||
self.registration_info |
||||
.read() |
||||
.await |
||||
.get(id) |
||||
.cloned() |
||||
.map(|info| info.registration) |
||||
} |
||||
|
||||
pub async fn iter_ids(&self) -> Vec<String> { |
||||
self.registration_info |
||||
.read() |
||||
.await |
||||
.keys() |
||||
.cloned() |
||||
.collect() |
||||
} |
||||
|
||||
pub async fn find_from_token(&self, token: &str) -> Option<RegistrationInfo> { |
||||
self.read() |
||||
.await |
||||
.values() |
||||
.find(|info| info.registration.as_token == token) |
||||
.cloned() |
||||
} |
||||
|
||||
// Checks if a given user id matches any exclusive appservice regex
|
||||
pub async fn is_exclusive_user_id(&self, user_id: &UserId) -> bool { |
||||
self.read() |
||||
.await |
||||
.values() |
||||
.any(|info| info.is_exclusive_user_match(user_id)) |
||||
} |
||||
|
||||
// Checks if a given room alias matches any exclusive appservice regex
|
||||
pub async fn is_exclusive_alias(&self, alias: &RoomAliasId) -> bool { |
||||
self.read() |
||||
.await |
||||
.values() |
||||
.any(|info| info.aliases.is_exclusive_match(alias.as_str())) |
||||
} |
||||
|
||||
pub fn iter_ids(&self) -> Result<impl Iterator<Item = Result<String>> + '_> { |
||||
self.db.iter_ids() |
||||
// Checks if a given room id matches any exclusive appservice regex
|
||||
pub async fn is_exclusive_room_id(&self, room_id: &RoomId) -> bool { |
||||
self.read() |
||||
.await |
||||
.values() |
||||
.any(|info| info.rooms.is_exclusive_match(room_id.as_str())) |
||||
} |
||||
|
||||
pub fn all(&self) -> Result<Vec<(String, serde_yaml::Value)>> { |
||||
self.db.all() |
||||
pub fn read( |
||||
&self, |
||||
) -> impl Future<Output = tokio::sync::RwLockReadGuard<'_, BTreeMap<String, RegistrationInfo>>> |
||||
{ |
||||
self.registration_info.read() |
||||
} |
||||
} |
||||
|
||||
@ -1,21 +0,0 @@
|
||||
use crate::Result; |
||||
use ruma::{OwnedUserId, RoomId, UserId}; |
||||
use std::collections::HashSet; |
||||
|
||||
pub trait Data: Send + Sync { |
||||
/// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is
|
||||
/// called.
|
||||
fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()>; |
||||
|
||||
/// Removes a user from typing before the timeout is reached.
|
||||
fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; |
||||
|
||||
/// Makes sure that typing events with old timestamps get removed.
|
||||
fn typings_maintain(&self, room_id: &RoomId) -> Result<()>; |
||||
|
||||
/// Returns the count of the last typing update in this room.
|
||||
fn last_typing_update(&self, room_id: &RoomId) -> Result<u64>; |
||||
|
||||
/// Returns all user ids currently typing.
|
||||
fn typings_all(&self, room_id: &RoomId) -> Result<HashSet<OwnedUserId>>; |
||||
} |
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue