mirror of
https://github.com/hoernschen/dendrite.git
synced 2024-12-26 15:08:28 +00:00
parent
11b557097c
commit
f956a8c1d9
46 changed files with 447 additions and 855 deletions
1
.github/workflows/helm.yml
vendored
1
.github/workflows/helm.yml
vendored
|
@ -38,3 +38,4 @@ jobs:
|
||||||
with:
|
with:
|
||||||
config: helm/cr.yaml
|
config: helm/cr.yaml
|
||||||
charts_dir: helm/
|
charts_dir: helm/
|
||||||
|
mark_as_latest: false
|
||||||
|
|
|
@ -13,7 +13,7 @@ It intends to provide an **efficient**, **reliable** and **scalable** alternativ
|
||||||
|
|
||||||
Dendrite is **beta** software, which means:
|
Dendrite is **beta** software, which means:
|
||||||
|
|
||||||
- Dendrite is ready for early adopters. We recommend running in Monolith mode with a PostgreSQL database.
|
- Dendrite is ready for early adopters. We recommend running Dendrite with a PostgreSQL database.
|
||||||
- Dendrite has periodic releases. We intend to release new versions as we fix bugs and land significant features.
|
- Dendrite has periodic releases. We intend to release new versions as we fix bugs and land significant features.
|
||||||
- Dendrite supports database schema upgrades between releases. This means you should never lose your messages when upgrading Dendrite.
|
- Dendrite supports database schema upgrades between releases. This means you should never lose your messages when upgrading Dendrite.
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ This does not mean:
|
||||||
|
|
||||||
- Dendrite is bug-free. It has not yet been battle-tested in the real world and so will be error prone initially.
|
- Dendrite is bug-free. It has not yet been battle-tested in the real world and so will be error prone initially.
|
||||||
- Dendrite is feature-complete. There may be client or federation APIs that are not implemented.
|
- Dendrite is feature-complete. There may be client or federation APIs that are not implemented.
|
||||||
- Dendrite is ready for massive homeserver deployments. There is no sharding of microservices (although it is possible to run them on separate machines) and there is no high-availability/clustering support.
|
- Dendrite is ready for massive homeserver deployments. There is no high-availability/clustering support.
|
||||||
|
|
||||||
Currently, we expect Dendrite to function well for small (10s/100s of users) homeserver deployments as well as P2P Matrix nodes in-browser or on mobile devices.
|
Currently, we expect Dendrite to function well for small (10s/100s of users) homeserver deployments as well as P2P Matrix nodes in-browser or on mobile devices.
|
||||||
|
|
||||||
|
@ -60,7 +60,7 @@ The following instructions are enough to get Dendrite started as a non-federatin
|
||||||
```bash
|
```bash
|
||||||
$ git clone https://github.com/matrix-org/dendrite
|
$ git clone https://github.com/matrix-org/dendrite
|
||||||
$ cd dendrite
|
$ cd dendrite
|
||||||
$ ./build.sh
|
$ go build -o bin/ ./cmd/...
|
||||||
|
|
||||||
# Generate a Matrix signing key for federation (required)
|
# Generate a Matrix signing key for federation (required)
|
||||||
$ ./bin/generate-keys --private-key matrix_key.pem
|
$ ./bin/generate-keys --private-key matrix_key.pem
|
||||||
|
@ -85,7 +85,7 @@ Then point your favourite Matrix client at `http://localhost:8008` or `https://l
|
||||||
|
|
||||||
## Progress
|
## Progress
|
||||||
|
|
||||||
We use a script called Are We Synapse Yet which checks Sytest compliance rates. Sytest is a black-box homeserver
|
We use a script called "Are We Synapse Yet" which checks Sytest compliance rates. Sytest is a black-box homeserver
|
||||||
test rig with around 900 tests. The script works out how many of these tests are passing on Dendrite and it
|
test rig with around 900 tests. The script works out how many of these tests are passing on Dendrite and it
|
||||||
updates with CI. As of January 2023, we have 100% server-server parity with Synapse, and the client-server parity is at 93% , though check
|
updates with CI. As of January 2023, we have 100% server-server parity with Synapse, and the client-server parity is at 93% , though check
|
||||||
CI for the latest numbers. In practice, this means you can communicate locally and via federation with Synapse
|
CI for the latest numbers. In practice, this means you can communicate locally and via federation with Synapse
|
||||||
|
|
51
build.cmd
51
build.cmd
|
@ -1,51 +0,0 @@
|
||||||
@echo off
|
|
||||||
|
|
||||||
:ENTRY_POINT
|
|
||||||
setlocal EnableDelayedExpansion
|
|
||||||
|
|
||||||
REM script base dir
|
|
||||||
set SCRIPTDIR=%~dp0
|
|
||||||
set PROJDIR=%SCRIPTDIR:~0,-1%
|
|
||||||
|
|
||||||
REM Put installed packages into ./bin
|
|
||||||
set GOBIN=%PROJDIR%\bin
|
|
||||||
|
|
||||||
set FLAGS=
|
|
||||||
|
|
||||||
REM Check if sources are under Git control
|
|
||||||
if not exist ".git" goto :CHECK_BIN
|
|
||||||
|
|
||||||
REM set BUILD=`git rev-parse --short HEAD \\ ""`
|
|
||||||
FOR /F "tokens=*" %%X IN ('git rev-parse --short HEAD') DO (
|
|
||||||
set BUILD=%%X
|
|
||||||
)
|
|
||||||
|
|
||||||
REM set BRANCH=`(git symbolic-ref --short HEAD \ tr -d \/ ) \\ ""`
|
|
||||||
FOR /F "tokens=*" %%X IN ('git symbolic-ref --short HEAD') DO (
|
|
||||||
set BRANCHRAW=%%X
|
|
||||||
set BRANCH=!BRANCHRAW:/=!
|
|
||||||
)
|
|
||||||
if "%BRANCH%" == "main" set BRANCH=
|
|
||||||
|
|
||||||
set FLAGS=-X github.com/matrix-org/dendrite/internal.branch=%BRANCH% -X github.com/matrix-org/dendrite/internal.build=%BUILD%
|
|
||||||
|
|
||||||
:CHECK_BIN
|
|
||||||
if exist "bin" goto :ALL_SET
|
|
||||||
mkdir "bin"
|
|
||||||
|
|
||||||
:ALL_SET
|
|
||||||
set CGO_ENABLED=1
|
|
||||||
for /D %%P in (cmd\*) do (
|
|
||||||
go build -trimpath -ldflags "%FLAGS%" -v -o ".\bin" ".\%%P"
|
|
||||||
)
|
|
||||||
|
|
||||||
set CGO_ENABLED=0
|
|
||||||
set GOOS=js
|
|
||||||
set GOARCH=wasm
|
|
||||||
go build -trimpath -ldflags "%FLAGS%" -o bin\main.wasm .\cmd\dendritejs-pinecone
|
|
||||||
|
|
||||||
goto :DONE
|
|
||||||
|
|
||||||
:DONE
|
|
||||||
echo Done
|
|
||||||
endlocal
|
|
24
build.sh
24
build.sh
|
@ -1,24 +0,0 @@
|
||||||
#!/bin/sh -eu
|
|
||||||
|
|
||||||
# Put installed packages into ./bin
|
|
||||||
export GOBIN=$PWD/`dirname $0`/bin
|
|
||||||
|
|
||||||
if [ -d ".git" ]
|
|
||||||
then
|
|
||||||
export BUILD=`git rev-parse --short HEAD || ""`
|
|
||||||
export BRANCH=`(git symbolic-ref --short HEAD | tr -d \/ ) || ""`
|
|
||||||
if [ "$BRANCH" = main ]
|
|
||||||
then
|
|
||||||
export BRANCH=""
|
|
||||||
fi
|
|
||||||
|
|
||||||
export FLAGS="-X github.com/matrix-org/dendrite/internal.branch=$BRANCH -X github.com/matrix-org/dendrite/internal.build=$BUILD"
|
|
||||||
else
|
|
||||||
export FLAGS=""
|
|
||||||
fi
|
|
||||||
|
|
||||||
mkdir -p bin
|
|
||||||
|
|
||||||
CGO_ENABLED=1 go build -trimpath -ldflags "$FLAGS" -v -o "bin/" ./cmd/...
|
|
||||||
|
|
||||||
# CGO_ENABLED=0 GOOS=js GOARCH=wasm go build -trimpath -ldflags "$FLAGS" -o bin/main.wasm ./cmd/dendritejs-pinecone
|
|
|
@ -6,23 +6,20 @@ They can be found on Docker Hub:
|
||||||
|
|
||||||
- [matrixdotorg/dendrite-monolith](https://hub.docker.com/r/matrixdotorg/dendrite-monolith) for monolith deployments
|
- [matrixdotorg/dendrite-monolith](https://hub.docker.com/r/matrixdotorg/dendrite-monolith) for monolith deployments
|
||||||
|
|
||||||
## Dockerfiles
|
## Dockerfile
|
||||||
|
|
||||||
The `Dockerfile` is a multistage file which can build all four Dendrite
|
The `Dockerfile` is a multistage file which can build Dendrite. From the root of the Dendrite
|
||||||
images depending on the supplied `--target`. From the root of the Dendrite
|
|
||||||
repository, run:
|
repository, run:
|
||||||
|
|
||||||
```
|
```
|
||||||
docker build . --target monolith -t matrixdotorg/dendrite-monolith
|
docker build . -t matrixdotorg/dendrite-monolith
|
||||||
docker build . --target demo-pinecone -t matrixdotorg/dendrite-demo-pinecone
|
|
||||||
docker build . --target demo-yggdrasil -t matrixdotorg/dendrite-demo-yggdrasil
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Compose files
|
## Compose file
|
||||||
|
|
||||||
There are two sample `docker-compose` files:
|
There is one sample `docker-compose` files:
|
||||||
|
|
||||||
- `docker-compose.monolith.yml` which runs a monolith Dendrite deployment
|
- `docker-compose.yml` which runs a Dendrite deployment with Postgres
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
|
@ -55,7 +52,7 @@ Create your config based on the [`dendrite-sample.yaml`](https://github.com/matr
|
||||||
Then start the deployment:
|
Then start the deployment:
|
||||||
|
|
||||||
```
|
```
|
||||||
docker-compose -f docker-compose.monolith.yml up
|
docker-compose -f docker-compose.yml up
|
||||||
```
|
```
|
||||||
|
|
||||||
## Building the images
|
## Building the images
|
||||||
|
|
|
@ -1,44 +0,0 @@
|
||||||
version: "3.4"
|
|
||||||
services:
|
|
||||||
postgres:
|
|
||||||
hostname: postgres
|
|
||||||
image: postgres:14
|
|
||||||
restart: always
|
|
||||||
volumes:
|
|
||||||
- ./postgres/create_db.sh:/docker-entrypoint-initdb.d/20-create_db.sh
|
|
||||||
# To persist your PostgreSQL databases outside of the Docker image,
|
|
||||||
# to prevent data loss, modify the following ./path_to path:
|
|
||||||
- ./path_to/postgresql:/var/lib/postgresql/data
|
|
||||||
environment:
|
|
||||||
POSTGRES_PASSWORD: itsasecret
|
|
||||||
POSTGRES_USER: dendrite
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD-SHELL", "pg_isready -U dendrite"]
|
|
||||||
interval: 5s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 5
|
|
||||||
networks:
|
|
||||||
- internal
|
|
||||||
|
|
||||||
monolith:
|
|
||||||
hostname: monolith
|
|
||||||
image: matrixdotorg/dendrite-monolith:latest
|
|
||||||
command: [
|
|
||||||
"--tls-cert=server.crt",
|
|
||||||
"--tls-key=server.key"
|
|
||||||
]
|
|
||||||
ports:
|
|
||||||
- 8008:8008
|
|
||||||
- 8448:8448
|
|
||||||
volumes:
|
|
||||||
- ./config:/etc/dendrite
|
|
||||||
- ./media:/var/dendrite/media
|
|
||||||
depends_on:
|
|
||||||
- postgres
|
|
||||||
networks:
|
|
||||||
- internal
|
|
||||||
restart: unless-stopped
|
|
||||||
|
|
||||||
networks:
|
|
||||||
internal:
|
|
||||||
attachable: true
|
|
52
build/docker/docker-compose.yml
Normal file
52
build/docker/docker-compose.yml
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
version: "3.4"
|
||||||
|
|
||||||
|
services:
|
||||||
|
postgres:
|
||||||
|
hostname: postgres
|
||||||
|
image: postgres:15-alpine
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
# This will create a docker volume to persist the database files in.
|
||||||
|
# If you prefer those files to be outside of docker, you'll need to change this.
|
||||||
|
- dendrite_postgres_data:/var/lib/postgresql/data
|
||||||
|
environment:
|
||||||
|
POSTGRES_PASSWORD: itsasecret
|
||||||
|
POSTGRES_USER: dendrite
|
||||||
|
POSTGRES_DATABASE: dendrite
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pg_isready -U dendrite"]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
networks:
|
||||||
|
- internal
|
||||||
|
|
||||||
|
monolith:
|
||||||
|
hostname: monolith
|
||||||
|
image: matrixdotorg/dendrite-monolith:latest
|
||||||
|
ports:
|
||||||
|
- 8008:8008
|
||||||
|
- 8448:8448
|
||||||
|
volumes:
|
||||||
|
- ./config:/etc/dendrite
|
||||||
|
# The following volumes use docker volumes, change this
|
||||||
|
# if you prefer to have those files outside of docker.
|
||||||
|
- dendrite_media:/var/dendrite/media
|
||||||
|
- dendrite_jetstream:/var/dendrite/jetstream
|
||||||
|
- dendrite_search_index:/var/dendrite/searchindex
|
||||||
|
depends_on:
|
||||||
|
postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
networks:
|
||||||
|
- internal
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
networks:
|
||||||
|
internal:
|
||||||
|
attachable: true
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
dendrite_postgres_data:
|
||||||
|
dendrite_media:
|
||||||
|
dendrite_jetstream:
|
||||||
|
dendrite_search_index:
|
|
@ -1,5 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
for db in userapi_accounts mediaapi syncapi roomserver keyserver federationapi appservice mscs; do
|
|
||||||
createdb -U dendrite -O dendrite dendrite_$db
|
|
||||||
done
|
|
|
@ -133,7 +133,11 @@ func TestPurgeRoom(t *testing.T) {
|
||||||
cfg, processCtx, close := testrig.CreateConfig(t, dbType)
|
cfg, processCtx, close := testrig.CreateConfig(t, dbType)
|
||||||
caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics)
|
caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics)
|
||||||
natsInstance := jetstream.NATSInstance{}
|
natsInstance := jetstream.NATSInstance{}
|
||||||
defer close()
|
defer func() {
|
||||||
|
// give components the time to process purge requests
|
||||||
|
time.Sleep(time.Millisecond * 50)
|
||||||
|
close()
|
||||||
|
}()
|
||||||
|
|
||||||
routers := httputil.NewRouters()
|
routers := httputil.NewRouters()
|
||||||
cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions)
|
cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions)
|
||||||
|
|
|
@ -124,6 +124,7 @@ func AdminResetPassword(req *http.Request, cfg *config.ClientAPI, device *api.De
|
||||||
}
|
}
|
||||||
request := struct {
|
request := struct {
|
||||||
Password string `json:"password"`
|
Password string `json:"password"`
|
||||||
|
LogoutDevices bool `json:"logout_devices"`
|
||||||
}{}
|
}{}
|
||||||
if err = json.NewDecoder(req.Body).Decode(&request); err != nil {
|
if err = json.NewDecoder(req.Body).Decode(&request); err != nil {
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
|
@ -146,7 +147,7 @@ func AdminResetPassword(req *http.Request, cfg *config.ClientAPI, device *api.De
|
||||||
Localpart: localpart,
|
Localpart: localpart,
|
||||||
ServerName: serverName,
|
ServerName: serverName,
|
||||||
Password: request.Password,
|
Password: request.Password,
|
||||||
LogoutDevices: true,
|
LogoutDevices: request.LogoutDevices,
|
||||||
}
|
}
|
||||||
updateRes := &api.PerformPasswordUpdateResponse{}
|
updateRes := &api.PerformPasswordUpdateResponse{}
|
||||||
if err := userAPI.PerformPasswordUpdate(req.Context(), updateReq, updateRes); err != nil {
|
if err := userAPI.PerformPasswordUpdate(req.Context(), updateReq, updateRes); err != nil {
|
||||||
|
|
|
@ -69,8 +69,7 @@ global:
|
||||||
# e.g. localhost:443
|
# e.g. localhost:443
|
||||||
well_known_server_name: ""
|
well_known_server_name: ""
|
||||||
|
|
||||||
# The server name to delegate client-server communications to, with optional port
|
# The base URL to delegate client-server communications to e.g. https://localhost
|
||||||
# e.g. localhost:443
|
|
||||||
well_known_client_name: ""
|
well_known_client_name: ""
|
||||||
|
|
||||||
# Lists of domains that the server will trust as identity servers to verify third
|
# Lists of domains that the server will trust as identity servers to verify third
|
||||||
|
|
|
@ -24,7 +24,7 @@ No, although a good portion of the Matrix specification has been implemented. Mo
|
||||||
|
|
||||||
Dendrite development is currently supported by a small team of developers and due to those limited resources, the majority of the effort is focused on getting Dendrite to be
|
Dendrite development is currently supported by a small team of developers and due to those limited resources, the majority of the effort is focused on getting Dendrite to be
|
||||||
specification complete. If there are major features you're requesting (e.g. new administration endpoints), we'd like to strongly encourage you to join the community in supporting
|
specification complete. If there are major features you're requesting (e.g. new administration endpoints), we'd like to strongly encourage you to join the community in supporting
|
||||||
the development efforts through [contributing](https://matrix-org.github.io/dendrite/development/contributing).
|
the development efforts through [contributing](../development/contributing).
|
||||||
|
|
||||||
## Is there a migration path from Synapse to Dendrite?
|
## Is there a migration path from Synapse to Dendrite?
|
||||||
|
|
||||||
|
@ -103,7 +103,7 @@ This can be done by performing a room upgrade. Use the command `/upgraderoom <ve
|
||||||
|
|
||||||
## How do I reset somebody's password on my server?
|
## How do I reset somebody's password on my server?
|
||||||
|
|
||||||
Use the admin endpoint [resetpassword](https://matrix-org.github.io/dendrite/administration/adminapi#post-_dendriteadminresetpassworduserid)
|
Use the admin endpoint [resetpassword](./administration/adminapi#post-_dendriteadminresetpassworduserid)
|
||||||
|
|
||||||
## Should I use PostgreSQL or SQLite for my databases?
|
## Should I use PostgreSQL or SQLite for my databases?
|
||||||
|
|
||||||
|
@ -157,7 +157,7 @@ You may need to revisit the connection limit of your PostgreSQL server and/or ma
|
||||||
|
|
||||||
## VOIP and Video Calls don't appear to work on Dendrite
|
## VOIP and Video Calls don't appear to work on Dendrite
|
||||||
|
|
||||||
There is likely an issue with your STUN/TURN configuration on the server. If you believe your configuration to be correct, please see the [troubleshooting](administration/5_troubleshooting.md) for troubleshooting recommendations.
|
There is likely an issue with your STUN/TURN configuration on the server. If you believe your configuration to be correct, please see the [troubleshooting](administration/6_troubleshooting.md) for troubleshooting recommendations.
|
||||||
|
|
||||||
## What is being reported when enabling phone-home statistics?
|
## What is being reported when enabling phone-home statistics?
|
||||||
|
|
||||||
|
|
|
@ -6,8 +6,8 @@ or alternatively, in the [installation](installation/) folder:
|
||||||
|
|
||||||
1. [Planning your deployment](installation/1_planning.md)
|
1. [Planning your deployment](installation/1_planning.md)
|
||||||
2. [Setting up the domain](installation/2_domainname.md)
|
2. [Setting up the domain](installation/2_domainname.md)
|
||||||
3. [Preparing database storage](installation/3_database.md)
|
3. [Installing Dendrite](installation/manual/1_build.md)
|
||||||
4. [Generating signing keys](installation/4_signingkey.md)
|
4. [Preparing database storage](installation/manual/2_database.md)
|
||||||
5. [Installing as a monolith](installation/5_install_monolith.md)
|
5. [Populate the configuration](installation/manual/3_configuration.md)
|
||||||
6. [Populate the configuration](installation/7_configuration.md)
|
6. [Generating signing keys](installation/manual/4_signingkey.md)
|
||||||
7. [Starting the monolith](installation/8_starting_monolith.md)
|
7. [Starting Dendrite](installation/manual/5_starting_dendrite.md)
|
||||||
|
|
|
@ -11,10 +11,9 @@ User accounts can be created on a Dendrite instance in a number of ways.
|
||||||
|
|
||||||
## From the command line
|
## From the command line
|
||||||
|
|
||||||
The `create-account` tool is built in the `bin` folder when building Dendrite with
|
The `create-account` tool is built in the `bin` folder when [building](../installation/build) Dendrite.
|
||||||
the `build.sh` script.
|
|
||||||
|
|
||||||
It uses the `dendrite.yaml` configuration file to connect to a running Dendrite instance and requires
|
It uses the `dendrite.yaml` configuration file to connect to a **running** Dendrite instance and requires
|
||||||
shared secret registration to be enabled as explained below.
|
shared secret registration to be enabled as explained below.
|
||||||
|
|
||||||
An example of using `create-account` to create a **normal account**:
|
An example of using `create-account` to create a **normal account**:
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
title: Supported admin APIs
|
title: Supported admin APIs
|
||||||
parent: Administration
|
parent: Administration
|
||||||
|
nav_order: 4
|
||||||
permalink: /administration/adminapi
|
permalink: /administration/adminapi
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -51,11 +52,15 @@ the room IDs of all affected rooms.
|
||||||
|
|
||||||
Reset the password of a local user.
|
Reset the password of a local user.
|
||||||
|
|
||||||
|
**If `logout_devices` is set to `true`, all `access_tokens` will be invalidated, resulting
|
||||||
|
in the potential loss of encrypted messages**
|
||||||
|
|
||||||
Request body format:
|
Request body format:
|
||||||
|
|
||||||
```
|
```json
|
||||||
{
|
{
|
||||||
"password": "new_password_here"
|
"password": "new_password_here",
|
||||||
|
"logout_devices": false
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -68,11 +73,14 @@ Indexing is done in the background, the server logs every 1000 events (or below)
|
||||||
|
|
||||||
This endpoint instructs Dendrite to immediately query `/devices/{userID}` on a federated server. An empty JSON body will be returned on success, updating all locally stored user devices/keys. This can be used to possibly resolve E2EE issues, where the remote user can't decrypt messages.
|
This endpoint instructs Dendrite to immediately query `/devices/{userID}` on a federated server. An empty JSON body will be returned on success, updating all locally stored user devices/keys. This can be used to possibly resolve E2EE issues, where the remote user can't decrypt messages.
|
||||||
|
|
||||||
|
## POST `/_dendrite/admin/purgeRoom/{roomID}`
|
||||||
|
|
||||||
|
This endpoint instructs Dendrite to remove the given room from its database. Before doing so, it will evacuate all local users from the room. It does **NOT** remove media files. Depending on the size of the room, this may take a while. Will return an empty JSON once other components were instructed to delete the room.
|
||||||
|
|
||||||
## POST `/_synapse/admin/v1/send_server_notice`
|
## POST `/_synapse/admin/v1/send_server_notice`
|
||||||
|
|
||||||
Request body format:
|
Request body format:
|
||||||
```
|
```json
|
||||||
{
|
{
|
||||||
"user_id": "@target_user:server_name",
|
"user_id": "@target_user:server_name",
|
||||||
"content": {
|
"content": {
|
||||||
|
@ -85,7 +93,7 @@ Request body format:
|
||||||
Send a server notice to a specific user. See the [Matrix Spec](https://spec.matrix.org/v1.3/client-server-api/#server-notices) for additional details on server notice behaviour.
|
Send a server notice to a specific user. See the [Matrix Spec](https://spec.matrix.org/v1.3/client-server-api/#server-notices) for additional details on server notice behaviour.
|
||||||
If successfully sent, the API will return the following response:
|
If successfully sent, the API will return the following response:
|
||||||
|
|
||||||
```
|
```json
|
||||||
{
|
{
|
||||||
"event_id": "<event_id>"
|
"event_id": "<event_id>"
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
---
|
---
|
||||||
title: Optimise your installation
|
title: Optimise your installation
|
||||||
parent: Installation
|
parent: Administration
|
||||||
has_toc: true
|
has_toc: true
|
||||||
nav_order: 11
|
nav_order: 5
|
||||||
permalink: /installation/start/optimisation
|
permalink: /administration/optimisation
|
||||||
---
|
---
|
||||||
|
|
||||||
# Optimise your installation
|
# Optimise your installation
|
||||||
|
@ -36,11 +36,6 @@ connections it will open to the database.
|
||||||
**If you are using the `global` database pool** then you only need to configure the
|
**If you are using the `global` database pool** then you only need to configure the
|
||||||
`max_open_conns` setting once in the `global` section.
|
`max_open_conns` setting once in the `global` section.
|
||||||
|
|
||||||
**If you are defining a `database` config per component** then you will need to ensure that
|
|
||||||
the **sum total** of all configured `max_open_conns` to a given database server do not exceed
|
|
||||||
the connection limit. If you configure a total that adds up to more connections than are available
|
|
||||||
then this will cause database queries to fail.
|
|
||||||
|
|
||||||
You may wish to raise the `max_connections` limit on your PostgreSQL server to accommodate
|
You may wish to raise the `max_connections` limit on your PostgreSQL server to accommodate
|
||||||
additional connections, in which case you should also update the `max_open_conns` in your
|
additional connections, in which case you should also update the `max_open_conns` in your
|
||||||
Dendrite configuration accordingly. However be aware that this is only advisable on particularly
|
Dendrite configuration accordingly. However be aware that this is only advisable on particularly
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
title: Troubleshooting
|
title: Troubleshooting
|
||||||
parent: Administration
|
parent: Administration
|
||||||
|
nav_order: 6
|
||||||
permalink: /administration/troubleshooting
|
permalink: /administration/troubleshooting
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -18,7 +19,7 @@ be clues in the logs.
|
||||||
You can increase this log level to the more verbose `debug` level if necessary by adding
|
You can increase this log level to the more verbose `debug` level if necessary by adding
|
||||||
this to the config and restarting Dendrite:
|
this to the config and restarting Dendrite:
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
logging:
|
logging:
|
||||||
- type: std
|
- type: std
|
||||||
level: debug
|
level: debug
|
||||||
|
@ -56,12 +57,7 @@ number of database connections does not exceed the maximum allowed by PostgreSQL
|
||||||
|
|
||||||
Open your `postgresql.conf` configuration file and check the value of `max_connections`
|
Open your `postgresql.conf` configuration file and check the value of `max_connections`
|
||||||
(which is typically `100` by default). Then open your `dendrite.yaml` configuration file
|
(which is typically `100` by default). Then open your `dendrite.yaml` configuration file
|
||||||
and ensure that:
|
and ensure that in the `global.database` section, `max_open_conns` does not exceed that number.
|
||||||
|
|
||||||
1. If you are using the `global.database` section, that `max_open_conns` does not exceed
|
|
||||||
that number;
|
|
||||||
2. If you are **not** using the `global.database` section, that the sum total of all
|
|
||||||
`max_open_conns` across all `database` blocks does not exceed that number.
|
|
||||||
|
|
||||||
## 5. File descriptors
|
## 5. File descriptors
|
||||||
|
|
||||||
|
@ -77,7 +73,7 @@ If there aren't, you will see a log lines like this:
|
||||||
level=warning msg="IMPORTANT: Process file descriptor limit is currently 65535, it is recommended to raise the limit for Dendrite to at least 65535 to avoid issues"
|
level=warning msg="IMPORTANT: Process file descriptor limit is currently 65535, it is recommended to raise the limit for Dendrite to at least 65535 to avoid issues"
|
||||||
```
|
```
|
||||||
|
|
||||||
Follow the [Optimisation](../installation/11_optimisation.md) instructions to correct the
|
Follow the [Optimisation](5_optimisation.md) instructions to correct the
|
||||||
available number of file descriptors.
|
available number of file descriptors.
|
||||||
|
|
||||||
## 6. STUN/TURN Server tester
|
## 6. STUN/TURN Server tester
|
|
@ -1,85 +0,0 @@
|
||||||
# Sample Caddyfile for using Caddy in front of Dendrite
|
|
||||||
|
|
||||||
#
|
|
||||||
|
|
||||||
# Customize email address and domain names
|
|
||||||
|
|
||||||
# Optional settings commented out
|
|
||||||
|
|
||||||
#
|
|
||||||
|
|
||||||
# BE SURE YOUR DOMAINS ARE POINTED AT YOUR SERVER FIRST
|
|
||||||
|
|
||||||
# Documentation: <https://caddyserver.com/docs/>
|
|
||||||
|
|
||||||
#
|
|
||||||
|
|
||||||
# Bonus tip: If your IP address changes, use Caddy's
|
|
||||||
|
|
||||||
# dynamic DNS plugin to update your DNS records to
|
|
||||||
|
|
||||||
# point to your new IP automatically
|
|
||||||
|
|
||||||
# <https://github.com/mholt/caddy-dynamicdns>
|
|
||||||
|
|
||||||
#
|
|
||||||
|
|
||||||
# Global options block
|
|
||||||
|
|
||||||
{
|
|
||||||
# In case there is a problem with your certificates.
|
|
||||||
# email example@example.com
|
|
||||||
|
|
||||||
# Turn off the admin endpoint if you don't need graceful config
|
|
||||||
# changes and/or are running untrusted code on your machine.
|
|
||||||
# admin off
|
|
||||||
|
|
||||||
# Enable this if your clients don't send ServerName in TLS handshakes.
|
|
||||||
# default_sni example.com
|
|
||||||
|
|
||||||
# Enable debug mode for verbose logging.
|
|
||||||
# debug
|
|
||||||
|
|
||||||
# Use Let's Encrypt's staging endpoint for testing.
|
|
||||||
# acme_ca https://acme-staging-v02.api.letsencrypt.org/directory
|
|
||||||
|
|
||||||
# If you're port-forwarding HTTP/HTTPS ports from 80/443 to something
|
|
||||||
# else, enable these and put the alternate port numbers here.
|
|
||||||
# http_port 8080
|
|
||||||
# https_port 8443
|
|
||||||
}
|
|
||||||
|
|
||||||
# The server name of your matrix homeserver. This example shows
|
|
||||||
|
|
||||||
# "well-known delegation" from the registered domain to a subdomain
|
|
||||||
|
|
||||||
# which is only needed if your server_name doesn't match your Matrix
|
|
||||||
|
|
||||||
# homeserver URL (i.e. you can show users a vanity domain that looks
|
|
||||||
|
|
||||||
# nice and is easy to remember but still have your Matrix server on
|
|
||||||
|
|
||||||
# its own subdomain or hosted service)
|
|
||||||
|
|
||||||
example.com {
|
|
||||||
header /.well-known/matrix/*Content-Type application/json
|
|
||||||
header /.well-known/matrix/* Access-Control-Allow-Origin *
|
|
||||||
respond /.well-known/matrix/server `{"m.server": "matrix.example.com:443"}`
|
|
||||||
respond /.well-known/matrix/client `{"m.homeserver": {"base_url": "https://matrix.example.com"}}`
|
|
||||||
}
|
|
||||||
|
|
||||||
# The actual domain name whereby your Matrix server is accessed
|
|
||||||
|
|
||||||
matrix.example.com {
|
|
||||||
# Change the end of each reverse_proxy line to the correct
|
|
||||||
# address for your various services.
|
|
||||||
@sync_api {
|
|
||||||
path_regexp /_matrix/client/.*?/(sync|user/.*?/filter/?.*|keys/changes|rooms/.*?/(messages|.*?_?members|context/.*?|relations/.*?|event/.*?))$
|
|
||||||
}
|
|
||||||
reverse_proxy @sync_api sync_api:8073
|
|
||||||
|
|
||||||
reverse_proxy /_matrix/client* client_api:8071
|
|
||||||
reverse_proxy /_matrix/federation* federation_api:8071
|
|
||||||
reverse_proxy /_matrix/key* federation_api:8071
|
|
||||||
reverse_proxy /_matrix/media* media_api:8071
|
|
||||||
}
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
title: Contributing
|
title: Contributing
|
||||||
parent: Development
|
parent: Development
|
||||||
|
nav_order: 1
|
||||||
permalink: /development/contributing
|
permalink: /development/contributing
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
title: Profiling
|
title: Profiling
|
||||||
parent: Development
|
parent: Development
|
||||||
|
nav_order: 4
|
||||||
permalink: /development/profiling
|
permalink: /development/profiling
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
@ -1,52 +1,48 @@
|
||||||
---
|
---
|
||||||
title: Coverage
|
title: Coverage
|
||||||
parent: Development
|
parent: Development
|
||||||
|
nav_order: 3
|
||||||
permalink: /development/coverage
|
permalink: /development/coverage
|
||||||
---
|
---
|
||||||
|
|
||||||
To generate a test coverage report for Sytest, a small patch needs to be applied to the Sytest repository to compile and use the instrumented binary:
|
## Running unit tests with coverage enabled
|
||||||
```patch
|
|
||||||
diff --git a/lib/SyTest/Homeserver/Dendrite.pm b/lib/SyTest/Homeserver/Dendrite.pm
|
|
||||||
index 8f0e209c..ad057e52 100644
|
|
||||||
--- a/lib/SyTest/Homeserver/Dendrite.pm
|
|
||||||
+++ b/lib/SyTest/Homeserver/Dendrite.pm
|
|
||||||
@@ -337,7 +337,7 @@ sub _start_monolith
|
|
||||||
|
|
||||||
$output->diag( "Starting monolith server" );
|
Running unit tests with coverage enabled can be done with the following commands, this will generate a `integrationcover.log`
|
||||||
my @command = (
|
```bash
|
||||||
- $self->{bindir} . '/dendrite',
|
go test -covermode=atomic -coverpkg=./... -coverprofile=integrationcover.log $(go list ./... | grep -v '/cmd/')
|
||||||
+ $self->{bindir} . '/dendrite', '--test.coverprofile=' . $self->{hs_dir} . '/integrationcover.log', "DEVEL",
|
go tool cover -func=integrationcover.log
|
||||||
'--config', $self->{paths}{config},
|
```
|
||||||
'--http-bind-address', $self->{bind_host} . ':' . $self->unsecure_port,
|
|
||||||
'--https-bind-address', $self->{bind_host} . ':' . $self->secure_port,
|
## Running Sytest with coverage enabled
|
||||||
diff --git a/scripts/dendrite_sytest.sh b/scripts/dendrite_sytest.sh
|
|
||||||
index f009332b..7ea79869 100755
|
To run Sytest with coverage enabled:
|
||||||
--- a/scripts/dendrite_sytest.sh
|
|
||||||
+++ b/scripts/dendrite_sytest.sh
|
```bash
|
||||||
@@ -34,7 +34,8 @@ export GOBIN=/tmp/bin
|
docker run --rm --name sytest -v "/Users/kegan/github/sytest:/sytest" \
|
||||||
echo >&2 "--- Building dendrite from source"
|
-v "/Users/kegan/github/dendrite:/src" -v "$(pwd)/sytest_logs:/logs" \
|
||||||
cd /src
|
-v "/Users/kegan/go/:/gopath" -e "POSTGRES=1" \
|
||||||
mkdir -p $GOBIN
|
-e "COVER=1" \
|
||||||
-go install -v ./cmd/dendrite
|
matrixdotorg/sytest-dendrite:latest
|
||||||
+# go install -v ./cmd/dendrite
|
|
||||||
+go test -c -cover -covermode=atomic -o $GOBIN/dendrite -coverpkg "github.com/matrix-org/..." ./cmd/dendrite
|
# to get a more accurate coverage you may also need to run Sytest using SQLite as the database:
|
||||||
go install -v ./cmd/generate-keys
|
docker run --rm --name sytest -v "/Users/kegan/github/sytest:/sytest" \
|
||||||
cd -
|
-v "/Users/kegan/github/dendrite:/src" -v "$(pwd)/sytest_logs:/logs" \
|
||||||
```
|
-v "/Users/kegan/go/:/gopath" \
|
||||||
|
-e "COVER=1" \
|
||||||
|
matrixdotorg/sytest-dendrite:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
This will generate a folder `covdatafiles` in each server's directory, e.g `server-0/covdatafiles`. To parse them,
|
||||||
|
ensure your working directory is under the Dendrite repository then run:
|
||||||
|
|
||||||
Then run Sytest. This will generate a new file `integrationcover.log` in each server's directory e.g `server-0/integrationcover.log`. To parse it,
|
|
||||||
ensure your working directory is under the Dendrite repository then run:
|
|
||||||
```bash
|
```bash
|
||||||
go tool cover -func=/path/to/server-0/integrationcover.log
|
go tool covdata func -i="$(find -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)"
|
||||||
```
|
```
|
||||||
which will produce an output like:
|
which will produce an output like:
|
||||||
```
|
```
|
||||||
...
|
...
|
||||||
github.com/matrix-org/util/json.go:83: NewJSONRequestHandler 100.0%
|
|
||||||
github.com/matrix-org/util/json.go:90: Protect 57.1%
|
|
||||||
github.com/matrix-org/util/json.go:110: RequestWithLogging 100.0%
|
|
||||||
github.com/matrix-org/util/json.go:132: MakeJSONAPI 70.0%
|
github.com/matrix-org/util/json.go:132: MakeJSONAPI 70.0%
|
||||||
github.com/matrix-org/util/json.go:151: respond 61.5%
|
github.com/matrix-org/util/json.go:151: respond 84.6%
|
||||||
github.com/matrix-org/util/json.go:180: WithCORSOptions 0.0%
|
github.com/matrix-org/util/json.go:180: WithCORSOptions 0.0%
|
||||||
github.com/matrix-org/util/json.go:191: SetCORSHeaders 100.0%
|
github.com/matrix-org/util/json.go:191: SetCORSHeaders 100.0%
|
||||||
github.com/matrix-org/util/json.go:202: RandomString 100.0%
|
github.com/matrix-org/util/json.go:202: RandomString 100.0%
|
||||||
|
@ -54,25 +50,81 @@ github.com/matrix-org/util/json.go:210: init 100.0%
|
||||||
github.com/matrix-org/util/unique.go:13: Unique 91.7%
|
github.com/matrix-org/util/unique.go:13: Unique 91.7%
|
||||||
github.com/matrix-org/util/unique.go:48: SortAndUnique 100.0%
|
github.com/matrix-org/util/unique.go:48: SortAndUnique 100.0%
|
||||||
github.com/matrix-org/util/unique.go:55: UniqueStrings 100.0%
|
github.com/matrix-org/util/unique.go:55: UniqueStrings 100.0%
|
||||||
total: (statements) 53.7%
|
total (statements) 64.0%
|
||||||
```
|
```
|
||||||
The total coverage for this run is the last line at the bottom. However, this value is misleading because Dendrite can run in many different configurations,
|
(after running Sytest for Postgres _and_ SQLite)
|
||||||
which will never be tested in a single test run (e.g sqlite or postgres). To get a more accurate value, additional processing is required
|
|
||||||
to remove packages which will never be tested and extension MSCs:
|
The total coverage for this run is the last line at the bottom. However, this value is misleading because Dendrite can run in different configurations,
|
||||||
|
which will never be tested in a single test run (e.g sqlite or postgres). To get a more accurate value, you'll need run Sytest for Postgres and SQLite (see commands above).
|
||||||
|
Additional processing is required also to remove packages which will never be tested and extension MSCs:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# These commands are all similar but change which package paths are _removed_ from the output.
|
# If you executed both commands from above, you can get the total coverage using the following commands
|
||||||
|
go tool covdata textfmt -i="$(find -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)" -o sytest.cov
|
||||||
|
grep -Ev 'relayapi|setup/mscs' sytest.cov > final.cov
|
||||||
|
go tool cover -func=final.cov
|
||||||
|
|
||||||
# For Postgres
|
# If you only executed the one for Postgres:
|
||||||
go tool cover -func=/path/to/server-0/integrationcover.log | grep 'github.com/matrix-org/dendrite' | grep -Ev 'inthttp|sqlite|setup/mscs|api_trace' > coverage.txt
|
go tool covdata textfmt -i="$(find -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)" -o sytest.cov
|
||||||
|
grep -Ev 'relayapi|sqlite|setup/mscs' sytest.cov > final.cov
|
||||||
|
go tool cover -func=final.cov
|
||||||
|
|
||||||
# For SQLite
|
# If you only executed the one for SQLite:
|
||||||
go tool cover -func=/path/to/server-0/integrationcover.log | grep 'github.com/matrix-org/dendrite' | grep -Ev 'inthttp|postgres|setup/mscs|api_trace' > coverage.txt
|
go tool covdata textfmt -i="$(find -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)" -o sytest.cov
|
||||||
|
grep -Ev 'relayapi|postgres|setup/mscs' sytest.cov > final.cov
|
||||||
|
go tool cover -func=final.cov
|
||||||
```
|
```
|
||||||
|
|
||||||
A total value can then be calculated using:
|
## Getting coverage from Complement
|
||||||
|
|
||||||
|
Getting the coverage for Complement runs is a bit more involved.
|
||||||
|
|
||||||
|
First you'll need a docker image compatible with Complement, one can be built using
|
||||||
```bash
|
```bash
|
||||||
cat coverage.txt | awk -F '\t+' '{x = x + $3} END {print x/NR}'
|
docker build -t complement-dendrite -f build/scripts/Complement.Dockerfile .
|
||||||
|
```
|
||||||
|
from within the Dendrite repository.
|
||||||
|
|
||||||
|
Clone complement to a directory of your liking:
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/matrix-org/complement.git
|
||||||
|
cd complement
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Next we'll need a script to execute after a test finishes, create a new file `posttest.sh`, make the file executable (`chmod +x posttest.sh`)
|
||||||
|
and add the following content:
|
||||||
|
```bash
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
We currently do not have a way to combine Sytest/Complement/Unit Tests into a single coverage report.
|
mkdir -p /tmp/Complement/logs/$2/$1/
|
||||||
|
docker cp $1:/tmp/covdatafiles/. /tmp/Complement/logs/$2/$1/
|
||||||
|
```
|
||||||
|
This will copy the `covdatafiles` files from each container to something like
|
||||||
|
`/tmp/Complement/logs/TestLogin/94f9c428de95779d2b62a3ccd8eab9d5ddcf65cc259a40ece06bdc61687ffed3/`. (`$1` is the containerID, `$2` the test name)
|
||||||
|
|
||||||
|
Now that we have set up everything we need, we can finally execute Complement:
|
||||||
|
```bash
|
||||||
|
COMPLEMENT_BASE_IMAGE=complement-dendrite \
|
||||||
|
COMPLEMENT_SHARE_ENV_PREFIX=COMPLEMENT_DENDRITE_ \
|
||||||
|
COMPLEMENT_DENDRITE_COVER=1 \
|
||||||
|
COMPLEMENT_POST_TEST_SCRIPT=$(pwd)/posttest.sh \
|
||||||
|
go test -tags dendrite_blacklist ./tests/... -count=1 -v -timeout=30m -failfast=false
|
||||||
|
```
|
||||||
|
|
||||||
|
Once this is done, you can copy the resulting `covdatafiles` files to your Dendrite repository for the next step.
|
||||||
|
```bash
|
||||||
|
cp -pr /tmp/Complement/logs PathToYourDendriteRepository
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also run the following to get the coverage for Complement runs alone:
|
||||||
|
```bash
|
||||||
|
go tool covdata func -i="$(find /tmp/Complement -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Combining the results of (almost) all runs
|
||||||
|
|
||||||
|
Now that we have all our `covdatafiles` files within the Dendrite repository, you can now execute the following command, to get the coverage
|
||||||
|
overall (excluding unit tests):
|
||||||
|
```bash
|
||||||
|
go tool covdata func -i="$(find -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)"
|
||||||
|
```
|
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
title: SyTest
|
title: SyTest
|
||||||
parent: Development
|
parent: Development
|
||||||
|
nav_order: 2
|
||||||
permalink: /development/sytest
|
permalink: /development/sytest
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -23,7 +24,7 @@ After running the tests, a script will print the tests you need to add to
|
||||||
You should proceed after you see no build problems for dendrite after running:
|
You should proceed after you see no build problems for dendrite after running:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
./build.sh
|
go build -o bin/ ./cmd/...
|
||||||
```
|
```
|
||||||
|
|
||||||
If you are fixing an issue marked with
|
If you are fixing an issue marked with
|
||||||
|
@ -61,6 +62,8 @@ When debugging, the following Docker `run` options may also be useful:
|
||||||
* `-e "DENDRITE_TRACE_HTTP=1"`: Adds HTTP tracing to server logs.
|
* `-e "DENDRITE_TRACE_HTTP=1"`: Adds HTTP tracing to server logs.
|
||||||
* `-e "DENDRITE_TRACE_INTERNAL=1"`: Adds roomserver internal API tracing to
|
* `-e "DENDRITE_TRACE_INTERNAL=1"`: Adds roomserver internal API tracing to
|
||||||
server logs.
|
server logs.
|
||||||
|
* `-e "COVER=1"`: Run Sytest with an instrumented binary, producing a Go coverage file per server.
|
||||||
|
* `-e "RACE_DETECTION=1"`: Build the binaries with the `-race` flag (Note: This will significantly slow down test runs)
|
||||||
|
|
||||||
The docker command also supports a single positional argument for the test file to
|
The docker command also supports a single positional argument for the test file to
|
||||||
run, so you can run a single `.pl` file rather than the whole test suite. For example:
|
run, so you can run a single `.pl` file rather than the whole test suite. For example:
|
||||||
|
@ -71,68 +74,3 @@ docker run --rm --name sytest -v "/Users/kegan/github/sytest:/sytest"
|
||||||
-v "/Users/kegan/go/:/gopath" -e "POSTGRES=1" -e "DENDRITE_TRACE_HTTP=1"
|
-v "/Users/kegan/go/:/gopath" -e "POSTGRES=1" -e "DENDRITE_TRACE_HTTP=1"
|
||||||
matrixdotorg/sytest-dendrite:latest tests/50federation/40devicelists.pl
|
matrixdotorg/sytest-dendrite:latest tests/50federation/40devicelists.pl
|
||||||
```
|
```
|
||||||
|
|
||||||
### Manually Setting up SyTest
|
|
||||||
|
|
||||||
**We advise AGAINST using manual SyTest setups.**
|
|
||||||
|
|
||||||
If you don't want to use the Docker image, you can also run SyTest by hand. Make
|
|
||||||
sure you have Perl 5 or above, and get SyTest with:
|
|
||||||
|
|
||||||
(Note that this guide assumes your SyTest checkout is next to your
|
|
||||||
`dendrite` checkout.)
|
|
||||||
|
|
||||||
```sh
|
|
||||||
git clone -b develop https://github.com/matrix-org/sytest
|
|
||||||
cd sytest
|
|
||||||
./install-deps.pl
|
|
||||||
```
|
|
||||||
|
|
||||||
Set up the database:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
sudo -u postgres psql -c "CREATE USER dendrite PASSWORD 'itsasecret'"
|
|
||||||
sudo -u postgres psql -c "ALTER USER dendrite CREATEDB"
|
|
||||||
for i in dendrite0 dendrite1 sytest_template; do sudo -u postgres psql -c "CREATE DATABASE $i OWNER dendrite;"; done
|
|
||||||
mkdir -p "server-0"
|
|
||||||
cat > "server-0/database.yaml" << EOF
|
|
||||||
args:
|
|
||||||
user: dendrite
|
|
||||||
password: itsasecret
|
|
||||||
database: dendrite0
|
|
||||||
host: 127.0.0.1
|
|
||||||
sslmode: disable
|
|
||||||
type: pg
|
|
||||||
EOF
|
|
||||||
mkdir -p "server-1"
|
|
||||||
cat > "server-1/database.yaml" << EOF
|
|
||||||
args:
|
|
||||||
user: dendrite
|
|
||||||
password: itsasecret
|
|
||||||
database: dendrite1
|
|
||||||
host: 127.0.0.1
|
|
||||||
sslmode: disable
|
|
||||||
type: pg
|
|
||||||
EOF
|
|
||||||
```
|
|
||||||
|
|
||||||
Run the tests:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
POSTGRES=1 ./run-tests.pl -I Dendrite::Monolith -d ../dendrite/bin -W ../dendrite/sytest-whitelist -O tap --all | tee results.tap
|
|
||||||
```
|
|
||||||
|
|
||||||
where `tee` lets you see the results while they're being piped to the file, and
|
|
||||||
`POSTGRES=1` enables testing with PostgeSQL. If the `POSTGRES` environment
|
|
||||||
variable is not set or is set to 0, SyTest will fall back to SQLite 3. For more
|
|
||||||
flags and options, see <https://github.com/matrix-org/sytest#running>.
|
|
||||||
|
|
||||||
Once the tests are complete, run the helper script to see if you need to add
|
|
||||||
any newly passing test names to `sytest-whitelist` in the project's root
|
|
||||||
directory:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
../dendrite/show-expected-fail-tests.sh results.tap ../dendrite/sytest-whitelist ../dendrite/sytest-blacklist
|
|
||||||
```
|
|
||||||
|
|
||||||
If the script prints nothing/exits with 0, then you're good to go.
|
|
||||||
|
|
|
@ -1,114 +0,0 @@
|
||||||
---
|
|
||||||
title: OpenTracing
|
|
||||||
has_children: true
|
|
||||||
parent: Development
|
|
||||||
permalink: /development/opentracing
|
|
||||||
---
|
|
||||||
|
|
||||||
# OpenTracing
|
|
||||||
|
|
||||||
Dendrite extensively uses the [opentracing.io](http://opentracing.io) framework
|
|
||||||
to trace work across the different logical components.
|
|
||||||
|
|
||||||
At its most basic opentracing tracks "spans" of work; recording start and end
|
|
||||||
times as well as any parent span that caused the piece of work.
|
|
||||||
|
|
||||||
A typical example would be a new span being created on an incoming request that
|
|
||||||
finishes when the response is sent. When the code needs to hit out to a
|
|
||||||
different component a new span is created with the initial span as its parent.
|
|
||||||
This would end up looking roughly like:
|
|
||||||
|
|
||||||
```
|
|
||||||
Received request Sent response
|
|
||||||
|<───────────────────────────────────────>|
|
|
||||||
|<────────────────────>|
|
|
||||||
RPC call RPC call returns
|
|
||||||
```
|
|
||||||
|
|
||||||
This is useful to see where the time is being spent processing a request on a
|
|
||||||
component. However, opentracing allows tracking of spans across components. This
|
|
||||||
makes it possible to see exactly what work goes into processing a request:
|
|
||||||
|
|
||||||
```
|
|
||||||
Component 1 |<─────────────────── HTTP ────────────────────>|
|
|
||||||
|<──────────────── RPC ─────────────────>|
|
|
||||||
Component 2 |<─ SQL ─>| |<── RPC ───>|
|
|
||||||
Component 3 |<─ SQL ─>|
|
|
||||||
```
|
|
||||||
|
|
||||||
This is achieved by serializing span information during all communication
|
|
||||||
between components. For HTTP requests, this is achieved by the sender
|
|
||||||
serializing the span into a HTTP header, and the receiver deserializing the span
|
|
||||||
on receipt. (Generally a new span is then immediately created with the
|
|
||||||
deserialized span as the parent).
|
|
||||||
|
|
||||||
A collection of spans that are related is called a trace.
|
|
||||||
|
|
||||||
Spans are passed through the code via contexts, rather than manually. It is
|
|
||||||
therefore important that all spans that are created are immediately added to the
|
|
||||||
current context. Thankfully the opentracing library gives helper functions for
|
|
||||||
doing this:
|
|
||||||
|
|
||||||
```golang
|
|
||||||
span, ctx := opentracing.StartSpanFromContext(ctx, spanName)
|
|
||||||
defer span.Finish()
|
|
||||||
```
|
|
||||||
|
|
||||||
This will create a new span, adding any span already in `ctx` as a parent to the
|
|
||||||
new span.
|
|
||||||
|
|
||||||
Adding Information
|
|
||||||
------------------
|
|
||||||
|
|
||||||
Opentracing allows adding information to a trace via three mechanisms:
|
|
||||||
|
|
||||||
- "tags" ─ A span can be tagged with a key/value pair. This is typically
|
|
||||||
information that relates to the span, e.g. for spans created for incoming HTTP
|
|
||||||
requests could include the request path and response codes as tags, spans for
|
|
||||||
SQL could include the query being executed.
|
|
||||||
- "logs" ─ Key/value pairs can be looged at a particular instance in a trace.
|
|
||||||
This can be useful to log e.g. any errors that happen.
|
|
||||||
- "baggage" ─ Arbitrary key/value pairs can be added to a span to which all
|
|
||||||
child spans have access. Baggage isn't saved and so isn't available when
|
|
||||||
inspecting the traces, but can be used to add context to logs or tags in child
|
|
||||||
spans.
|
|
||||||
|
|
||||||
See
|
|
||||||
[specification.md](https://github.com/opentracing/specification/blob/master/specification.md)
|
|
||||||
for some of the common tags and log fields used.
|
|
||||||
|
|
||||||
Span Relationships
|
|
||||||
------------------
|
|
||||||
|
|
||||||
Spans can be related to each other. The most common relation is `childOf`, which
|
|
||||||
indicates the child span somehow depends on the parent span ─ typically the
|
|
||||||
parent span cannot complete until all child spans are completed.
|
|
||||||
|
|
||||||
A second relation type is `followsFrom`, where the parent has no dependence on
|
|
||||||
the child span. This usually indicates some sort of fire and forget behaviour,
|
|
||||||
e.g. adding a message to a pipeline or inserting into a kafka topic.
|
|
||||||
|
|
||||||
Jaeger
|
|
||||||
------
|
|
||||||
|
|
||||||
Opentracing is just a framework. We use
|
|
||||||
[jaeger](https://github.com/jaegertracing/jaeger) as the actual implementation.
|
|
||||||
|
|
||||||
Jaeger is responsible for recording, sending and saving traces, as well as
|
|
||||||
giving a UI for viewing and interacting with traces.
|
|
||||||
|
|
||||||
To enable jaeger a `Tracer` object must be instansiated from the config (as well
|
|
||||||
as having a jaeger server running somewhere, usually locally). A `Tracer` does
|
|
||||||
several things:
|
|
||||||
|
|
||||||
- Decides which traces to save and send to the server. There are multiple
|
|
||||||
schemes for doing this, with a simple example being to save a certain fraction
|
|
||||||
of traces.
|
|
||||||
- Communicating with the jaeger backend. If not explicitly specified uses the
|
|
||||||
default port on localhost.
|
|
||||||
- Associates a service name to all spans created by the tracer. This service
|
|
||||||
name equates to a logical component, e.g. spans created by clientapi will have
|
|
||||||
a different service name than ones created by the syncapi. Database access
|
|
||||||
will also typically use a different service name.
|
|
||||||
|
|
||||||
This means that there is a tracer per service name/component.
|
|
|
@ -1,57 +0,0 @@
|
||||||
---
|
|
||||||
title: Setup
|
|
||||||
parent: OpenTracing
|
|
||||||
grand_parent: Development
|
|
||||||
permalink: /development/opentracing/setup
|
|
||||||
---
|
|
||||||
|
|
||||||
# OpenTracing Setup
|
|
||||||
|
|
||||||
Dendrite uses [Jaeger](https://www.jaegertracing.io/) for tracing between microservices.
|
|
||||||
Tracing shows the nesting of logical spans which provides visibility on how the microservices interact.
|
|
||||||
This document explains how to set up Jaeger locally on a single machine.
|
|
||||||
|
|
||||||
## Set up the Jaeger backend
|
|
||||||
|
|
||||||
The [easiest way](https://www.jaegertracing.io/docs/1.18/getting-started/) is to use the all-in-one Docker image:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ docker run -d --name jaeger \
|
|
||||||
-e COLLECTOR_ZIPKIN_HTTP_PORT=9411 \
|
|
||||||
-p 5775:5775/udp \
|
|
||||||
-p 6831:6831/udp \
|
|
||||||
-p 6832:6832/udp \
|
|
||||||
-p 5778:5778 \
|
|
||||||
-p 16686:16686 \
|
|
||||||
-p 14268:14268 \
|
|
||||||
-p 14250:14250 \
|
|
||||||
-p 9411:9411 \
|
|
||||||
jaegertracing/all-in-one:1.18
|
|
||||||
```
|
|
||||||
|
|
||||||
## Configuring Dendrite to talk to Jaeger
|
|
||||||
|
|
||||||
Modify your config to look like: (this will send every single span to Jaeger which will be slow on large instances, but for local testing it's fine)
|
|
||||||
|
|
||||||
```
|
|
||||||
tracing:
|
|
||||||
enabled: true
|
|
||||||
jaeger:
|
|
||||||
serviceName: "dendrite"
|
|
||||||
disabled: false
|
|
||||||
rpc_metrics: true
|
|
||||||
tags: []
|
|
||||||
sampler:
|
|
||||||
type: const
|
|
||||||
param: 1
|
|
||||||
```
|
|
||||||
|
|
||||||
then run the monolith server:
|
|
||||||
|
|
||||||
```
|
|
||||||
./dendrite --tls-cert server.crt --tls-key server.key --config dendrite.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
## Checking traces
|
|
||||||
|
|
||||||
Visit <http://localhost:16686> to see traces under `DendriteMonolith`.
|
|
|
@ -1,35 +0,0 @@
|
||||||
# Depending on which port is used for federation (.well-known/matrix/server or SRV record),
|
|
||||||
# ensure there's a binding for that port in the configuration. Replace "FEDPORT" with port
|
|
||||||
# number, (e.g. "8448"), and "IPV4" with your server's ipv4 address (separate binding for
|
|
||||||
# each ip address, e.g. if you use both ipv4 and ipv6 addresses).
|
|
||||||
|
|
||||||
Binding {
|
|
||||||
Port = FEDPORT
|
|
||||||
Interface = IPV4
|
|
||||||
TLScertFile = /path/to/fullchainandprivkey.pem
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
VirtualHost {
|
|
||||||
...
|
|
||||||
# route requests to:
|
|
||||||
# /_matrix/client/.*/sync
|
|
||||||
# /_matrix/client/.*/user/{userId}/filter
|
|
||||||
# /_matrix/client/.*/user/{userId}/filter/{filterID}
|
|
||||||
# /_matrix/client/.*/keys/changes
|
|
||||||
# /_matrix/client/.*/rooms/{roomId}/messages
|
|
||||||
# /_matrix/client/.*/rooms/{roomId}/context/{eventID}
|
|
||||||
# /_matrix/client/.*/rooms/{roomId}/event/{eventID}
|
|
||||||
# /_matrix/client/.*/rooms/{roomId}/relations/{eventID}
|
|
||||||
# /_matrix/client/.*/rooms/{roomId}/relations/{eventID}/{relType}
|
|
||||||
# /_matrix/client/.*/rooms/{roomId}/relations/{eventID}/{relType}/{eventType}
|
|
||||||
# /_matrix/client/.*/rooms/{roomId}/members
|
|
||||||
# /_matrix/client/.*/rooms/{roomId}/joined_members
|
|
||||||
# to sync_api
|
|
||||||
ReverseProxy = /_matrix/client/.*?/(sync|user/.*?/filter/?.*|keys/changes|rooms/.*?/(messages|.*?_?members|context/.*?|relations/.*?|event/.*?))$ http://localhost:8073 600
|
|
||||||
ReverseProxy = /_matrix/client http://localhost:8071 600
|
|
||||||
ReverseProxy = /_matrix/federation http://localhost:8072 600
|
|
||||||
ReverseProxy = /_matrix/key http://localhost:8072 600
|
|
||||||
ReverseProxy = /_matrix/media http://localhost:8074 600
|
|
||||||
...
|
|
||||||
}
|
|
|
@ -7,23 +7,13 @@ permalink: /installation/planning
|
||||||
|
|
||||||
# Planning your installation
|
# Planning your installation
|
||||||
|
|
||||||
## Modes
|
## Database
|
||||||
|
|
||||||
Dendrite consists of several components, each responsible for a different aspect of the Matrix protocol.
|
|
||||||
Users can run Dendrite in one of two modes which dictate how these components are executed and communicate.
|
|
||||||
|
|
||||||
* **Monolith mode** runs all components in a single process. Components communicate through an internal NATS
|
|
||||||
server with generally low overhead. This mode dramatically simplifies deployment complexity and offers the
|
|
||||||
best balance between performance and resource usage for low-to-mid volume deployments.
|
|
||||||
|
|
||||||
|
|
||||||
## Databases
|
|
||||||
|
|
||||||
Dendrite can run with either a PostgreSQL or a SQLite backend. There are considerable tradeoffs
|
Dendrite can run with either a PostgreSQL or a SQLite backend. There are considerable tradeoffs
|
||||||
to consider:
|
to consider:
|
||||||
|
|
||||||
* **PostgreSQL**: Needs to run separately to Dendrite, needs to be installed and configured separately
|
* **PostgreSQL**: Needs to run separately to Dendrite, needs to be installed and configured separately
|
||||||
and and will use more resources over all, but will be **considerably faster** than SQLite. PostgreSQL
|
and will use more resources over all, but will be **considerably faster** than SQLite. PostgreSQL
|
||||||
has much better write concurrency which will allow Dendrite to process more tasks in parallel. This
|
has much better write concurrency which will allow Dendrite to process more tasks in parallel. This
|
||||||
will be necessary for federated deployments to perform adequately.
|
will be necessary for federated deployments to perform adequately.
|
||||||
|
|
||||||
|
@ -80,18 +70,17 @@ If using the PostgreSQL database engine, you should install PostgreSQL 12 or lat
|
||||||
### NATS Server
|
### NATS Server
|
||||||
|
|
||||||
Dendrite comes with a built-in [NATS Server](https://github.com/nats-io/nats-server) and
|
Dendrite comes with a built-in [NATS Server](https://github.com/nats-io/nats-server) and
|
||||||
therefore does not need this to be manually installed. If you are planning a monolith installation, you
|
therefore does not need this to be manually installed.
|
||||||
do not need to do anything.
|
|
||||||
|
|
||||||
|
|
||||||
### Reverse proxy
|
### Reverse proxy
|
||||||
|
|
||||||
A reverse proxy such as [Caddy](https://caddyserver.com), [NGINX](https://www.nginx.com) or
|
A reverse proxy such as [Caddy](https://caddyserver.com), [NGINX](https://www.nginx.com) or
|
||||||
[HAProxy](http://www.haproxy.org) is useful for deployments. Configuring those is not covered in this documentation, although sample configurations
|
[HAProxy](http://www.haproxy.org) is useful for deployments. Configuring this is not covered in this documentation, although sample configurations
|
||||||
for [Caddy](https://github.com/matrix-org/dendrite/blob/main/docs/caddy) and
|
for [Caddy](https://github.com/matrix-org/dendrite/blob/main/docs/caddy) and
|
||||||
[NGINX](https://github.com/matrix-org/dendrite/blob/main/docs/nginx) are provided.
|
[NGINX](https://github.com/matrix-org/dendrite/blob/main/docs/nginx) are provided.
|
||||||
|
|
||||||
### Windows
|
### Windows
|
||||||
|
|
||||||
Finally, if you want to build Dendrite on Windows, you will need need `gcc` in the path. The best
|
Finally, if you want to build Dendrite on Windows, you will need `gcc` in the path. The best
|
||||||
way to achieve this is by installing and building Dendrite under [MinGW-w64](https://www.mingw-w64.org/).
|
way to achieve this is by installing and building Dendrite under [MinGW-w64](https://www.mingw-w64.org/).
|
||||||
|
|
|
@ -20,7 +20,7 @@ Matrix servers usually discover each other when federating using the following m
|
||||||
well-known file to connect to the remote homeserver;
|
well-known file to connect to the remote homeserver;
|
||||||
2. If a DNS SRV delegation exists on `example.com`, use the IP address and port from the DNS SRV
|
2. If a DNS SRV delegation exists on `example.com`, use the IP address and port from the DNS SRV
|
||||||
record to connect to the remote homeserver;
|
record to connect to the remote homeserver;
|
||||||
3. If neither well-known or DNS SRV delegation are configured, attempt to connect to the remote
|
3. If neither well-known nor DNS SRV delegation are configured, attempt to connect to the remote
|
||||||
homeserver by connecting to `example.com` port TCP/8448 using HTTPS.
|
homeserver by connecting to `example.com` port TCP/8448 using HTTPS.
|
||||||
|
|
||||||
The exact details of how server name resolution works can be found in
|
The exact details of how server name resolution works can be found in
|
||||||
|
|
|
@ -1,21 +0,0 @@
|
||||||
---
|
|
||||||
title: Installing as a monolith
|
|
||||||
parent: Installation
|
|
||||||
has_toc: true
|
|
||||||
nav_order: 5
|
|
||||||
permalink: /installation/install/monolith
|
|
||||||
---
|
|
||||||
|
|
||||||
# Installing as a monolith
|
|
||||||
|
|
||||||
You can install the Dendrite monolith binary into `$GOPATH/bin` by using `go install`:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
go install ./cmd/dendrite
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, you can specify a custom path for the binary to be written to using `go build`:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
go build -o /usr/local/bin/ ./cmd/dendrite
|
|
||||||
```
|
|
|
@ -1,42 +0,0 @@
|
||||||
---
|
|
||||||
title: Starting the monolith
|
|
||||||
parent: Installation
|
|
||||||
has_toc: true
|
|
||||||
nav_order: 9
|
|
||||||
permalink: /installation/start/monolith
|
|
||||||
---
|
|
||||||
|
|
||||||
# Starting the monolith
|
|
||||||
|
|
||||||
Once you have completed all of the preparation and installation steps,
|
|
||||||
you can start your Dendrite monolith deployment by starting `dendrite`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./dendrite -config /path/to/dendrite.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
By default, Dendrite will listen HTTP on port 8008. If you want to change the addresses
|
|
||||||
or ports that Dendrite listens on, you can use the `-http-bind-address` and
|
|
||||||
`-https-bind-address` command line arguments:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./dendrite -config /path/to/dendrite.yaml \
|
|
||||||
-http-bind-address 1.2.3.4:12345 \
|
|
||||||
-https-bind-address 1.2.3.4:54321
|
|
||||||
```
|
|
||||||
|
|
||||||
## Running under systemd
|
|
||||||
|
|
||||||
A common deployment pattern is to run the monolith under systemd. For this, you
|
|
||||||
will need to create a service unit file. An example service unit file is available
|
|
||||||
in the [GitHub repository](https://github.com/matrix-org/dendrite/blob/main/docs/systemd/monolith-example.service).
|
|
||||||
|
|
||||||
Once you have installed the service unit, you can notify systemd, enable and start
|
|
||||||
the service:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl enable dendrite
|
|
||||||
systemctl start dendrite
|
|
||||||
journalctl -fu dendrite
|
|
||||||
```
|
|
11
docs/installation/docker.md
Normal file
11
docs/installation/docker.md
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
---
|
||||||
|
title: Docker
|
||||||
|
parent: Installation
|
||||||
|
has_children: true
|
||||||
|
nav_order: 4
|
||||||
|
permalink: /docker
|
||||||
|
---
|
||||||
|
|
||||||
|
# Installation using Docker
|
||||||
|
|
||||||
|
This section contains documentation how to install Dendrite using Docker
|
57
docs/installation/docker/1_docker.md
Normal file
57
docs/installation/docker/1_docker.md
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
---
|
||||||
|
title: Installation
|
||||||
|
parent: Docker
|
||||||
|
grand_parent: Installation
|
||||||
|
has_toc: true
|
||||||
|
nav_order: 1
|
||||||
|
permalink: /installation/docker/install
|
||||||
|
---
|
||||||
|
|
||||||
|
# Installing Dendrite using Docker Compose
|
||||||
|
|
||||||
|
Dendrite provides an [example](https://github.com/matrix-org/dendrite/blob/main/build/docker/docker-compose.yml)
|
||||||
|
Docker compose file, which needs some preparation to start successfully.
|
||||||
|
Please note that this compose file only has Postgres as a dependency, and you need to configure
|
||||||
|
a [reverse proxy](../planning#reverse-proxy).
|
||||||
|
|
||||||
|
## Preparations
|
||||||
|
|
||||||
|
### Generate a private key
|
||||||
|
|
||||||
|
First we'll generate private key, which is used to sign events, the following will create one in `./config`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mkdir -p ./config
|
||||||
|
docker run --rm --entrypoint="/usr/bin/generate-keys" \
|
||||||
|
-v $(pwd)/config:/mnt \
|
||||||
|
matrixdotorg/dendrite-monolith:latest \
|
||||||
|
-private-key /mnt/matrix_key.pem
|
||||||
|
```
|
||||||
|
(**NOTE**: This only needs to be executed **once**, as you otherwise overwrite the key)
|
||||||
|
|
||||||
|
### Generate a config
|
||||||
|
|
||||||
|
Similar to the command above, we can generate a config to be used, which will use the correct paths
|
||||||
|
as specified in the example docker-compose file. Change `server` to your domain and `db` according to your changes
|
||||||
|
to the docker-compose file (`services.postgres.environment` values):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mkdir -p ./config
|
||||||
|
docker run --rm --entrypoint="/bin/sh" \
|
||||||
|
-v $(pwd)/config:/mnt \
|
||||||
|
matrixdotorg/dendrite-monolith:latest \
|
||||||
|
-c "/usr/bin/generate-config \
|
||||||
|
-dir /var/dendrite/ \
|
||||||
|
-db postgres://dendrite:itsasecret@postgres/dendrite?sslmode=disable \
|
||||||
|
-server YourDomainHere > /mnt/dendrite.yaml"
|
||||||
|
```
|
||||||
|
|
||||||
|
You can then change `config/dendrite.yaml` to your liking.
|
||||||
|
|
||||||
|
## Starting Dendrite
|
||||||
|
|
||||||
|
Once you're done changing the config, you can now start up Dendrite with
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker-compose -f docker-compose.yml up
|
||||||
|
```
|
11
docs/installation/helm.md
Normal file
11
docs/installation/helm.md
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
---
|
||||||
|
title: Helm
|
||||||
|
parent: Installation
|
||||||
|
has_children: true
|
||||||
|
nav_order: 3
|
||||||
|
permalink: /helm
|
||||||
|
---
|
||||||
|
|
||||||
|
# Helm
|
||||||
|
|
||||||
|
This section contains documentation how to use [Helm](https://helm.sh/) to install Dendrite on a [Kubernetes](https://kubernetes.io/) cluster.
|
58
docs/installation/helm/1_helm.md
Normal file
58
docs/installation/helm/1_helm.md
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
---
|
||||||
|
title: Installation
|
||||||
|
parent: Helm
|
||||||
|
grand_parent: Installation
|
||||||
|
has_toc: true
|
||||||
|
nav_order: 1
|
||||||
|
permalink: /installation/helm/install
|
||||||
|
---
|
||||||
|
|
||||||
|
# Installing Dendrite using Helm
|
||||||
|
|
||||||
|
To install Dendrite using the Helm chart, you first have to add the repository using the following commands:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
helm repo add dendrite https://matrix-org.github.io/dendrite/
|
||||||
|
helm repo update
|
||||||
|
```
|
||||||
|
|
||||||
|
Next you'll need to create a `values.yaml` file and configure it to your liking. All possible values can be found
|
||||||
|
[here](https://github.com/matrix-org/dendrite/blob/main/helm/dendrite/values.yaml), but at least you need to configure
|
||||||
|
a `server_name`, otherwise the chart will complain about it:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
dendrite_config:
|
||||||
|
global:
|
||||||
|
server_name: "localhost"
|
||||||
|
```
|
||||||
|
|
||||||
|
If you are going to use an existing Postgres database, you'll also need to configure this connection:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
dendrite_config:
|
||||||
|
global:
|
||||||
|
database:
|
||||||
|
connection_string: "postgresql://PostgresUser:PostgresPassword@PostgresHostName/DendriteDatabaseName"
|
||||||
|
max_open_conns: 90
|
||||||
|
max_idle_conns: 5
|
||||||
|
conn_max_lifetime: -1
|
||||||
|
```
|
||||||
|
|
||||||
|
## Installing with PostgreSQL
|
||||||
|
|
||||||
|
The chart comes with a dependency on Postgres, which can be installed alongside Dendrite, this needs to be enabled in
|
||||||
|
the `values.yaml`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
postgresql:
|
||||||
|
enabled: true # this installs Postgres
|
||||||
|
primary:
|
||||||
|
persistence:
|
||||||
|
size: 1Gi # defines the size for $PGDATA
|
||||||
|
|
||||||
|
dendrite_config:
|
||||||
|
global:
|
||||||
|
server_name: "localhost"
|
||||||
|
```
|
||||||
|
|
||||||
|
Using this option, the `database.connection_string` will be set for you automatically.
|
11
docs/installation/manual.md
Normal file
11
docs/installation/manual.md
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
---
|
||||||
|
title: Manual
|
||||||
|
parent: Installation
|
||||||
|
has_children: true
|
||||||
|
nav_order: 5
|
||||||
|
permalink: /manual
|
||||||
|
---
|
||||||
|
|
||||||
|
# Manual Installation
|
||||||
|
|
||||||
|
This section contains documentation how to manually install Dendrite
|
|
@ -1,31 +1,26 @@
|
||||||
---
|
---
|
||||||
title: Building Dendrite
|
title: Building/Installing Dendrite
|
||||||
parent: Installation
|
parent: Manual
|
||||||
|
grand_parent: Installation
|
||||||
has_toc: true
|
has_toc: true
|
||||||
nav_order: 3
|
nav_order: 1
|
||||||
permalink: /installation/build
|
permalink: /installation/manual/build
|
||||||
---
|
---
|
||||||
|
|
||||||
# Build all Dendrite commands
|
# Build all Dendrite commands
|
||||||
|
|
||||||
Dendrite has numerous utility commands in addition to the actual server binaries.
|
Dendrite has numerous utility commands in addition to the actual server binaries.
|
||||||
Build them all from the root of the source repo with `build.sh` (Linux/Mac):
|
Build them all from the root of the source repo with:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
./build.sh
|
go build -o bin/ ./cmd/...
|
||||||
```
|
|
||||||
|
|
||||||
or `build.cmd` (Windows):
|
|
||||||
|
|
||||||
```powershell
|
|
||||||
build.cmd
|
|
||||||
```
|
```
|
||||||
|
|
||||||
The resulting binaries will be placed in the `bin` subfolder.
|
The resulting binaries will be placed in the `bin` subfolder.
|
||||||
|
|
||||||
# Installing as a monolith
|
# Installing Dendrite
|
||||||
|
|
||||||
You can install the Dendrite monolith binary into `$GOPATH/bin` by using `go install`:
|
You can install the Dendrite binary into `$GOPATH/bin` by using `go install`:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
go install ./cmd/dendrite
|
go install ./cmd/dendrite
|
|
@ -1,8 +1,10 @@
|
||||||
---
|
---
|
||||||
title: Preparing database storage
|
title: Preparing database storage
|
||||||
parent: Installation
|
parent: Installation
|
||||||
nav_order: 3
|
nav_order: 2
|
||||||
permalink: /installation/database
|
parent: Manual
|
||||||
|
grand_parent: Installation
|
||||||
|
permalink: /installation/manual/database
|
||||||
---
|
---
|
||||||
|
|
||||||
# Preparing database storage
|
# Preparing database storage
|
||||||
|
@ -13,31 +15,22 @@ may need to perform some manual steps outlined below.
|
||||||
## PostgreSQL
|
## PostgreSQL
|
||||||
|
|
||||||
Dendrite can automatically populate the database with the relevant tables and indexes, but
|
Dendrite can automatically populate the database with the relevant tables and indexes, but
|
||||||
it is not capable of creating the databases themselves. You will need to create the databases
|
it is not capable of creating the database itself. You will need to create the database
|
||||||
manually.
|
manually.
|
||||||
|
|
||||||
The databases **must** be created with UTF-8 encoding configured or you will likely run into problems
|
The database **must** be created with UTF-8 encoding configured, or you will likely run into problems
|
||||||
with your Dendrite deployment.
|
with your Dendrite deployment.
|
||||||
|
|
||||||
At this point, you can choose to either use a single database for all Dendrite components,
|
You will need to create a single PostgreSQL database. Deployments
|
||||||
or you can run each component with its own separate database:
|
can use a single global connection pool, which makes updating the configuration file much easier.
|
||||||
|
Only one database connection string to manage and likely simpler to back up the database. All
|
||||||
|
components will be sharing the same database resources (CPU, RAM, storage).
|
||||||
|
|
||||||
* **Single database**: You will need to create a single PostgreSQL database. Monolith deployments
|
You will most likely want to:
|
||||||
can use a single global connection pool, which makes updating the configuration file much easier.
|
|
||||||
Only one database connection string to manage and likely simpler to back up the database. All
|
|
||||||
components will be sharing the same database resources (CPU, RAM, storage).
|
|
||||||
|
|
||||||
* **Separate databases**: You will need to create a separate PostgreSQL database for each
|
|
||||||
component. You will need to configure each component that has storage in the Dendrite
|
|
||||||
configuration file with its own connection parameters. Allows running a different database engine
|
|
||||||
for each component on a different machine if needs be, each with their own CPU, RAM and storage —
|
|
||||||
almost certainly overkill unless you are running a very large Dendrite deployment.
|
|
||||||
|
|
||||||
For either configuration, you will want to:
|
|
||||||
|
|
||||||
1. Configure a role (with a username and password) which Dendrite can use to connect to the
|
1. Configure a role (with a username and password) which Dendrite can use to connect to the
|
||||||
database;
|
database;
|
||||||
2. Create the database(s) themselves, ensuring that the Dendrite role has privileges over them.
|
2. Create the database itself, ensuring that the Dendrite role has privileges over them.
|
||||||
As Dendrite will create and manage the database tables, indexes and sequences by itself, the
|
As Dendrite will create and manage the database tables, indexes and sequences by itself, the
|
||||||
Dendrite role must have suitable privileges over the database.
|
Dendrite role must have suitable privileges over the database.
|
||||||
|
|
||||||
|
@ -71,27 +64,6 @@ Create the database itself, using the `dendrite` role from above:
|
||||||
sudo -u postgres createdb -O dendrite -E UTF-8 dendrite
|
sudo -u postgres createdb -O dendrite -E UTF-8 dendrite
|
||||||
```
|
```
|
||||||
|
|
||||||
### Multiple database creation
|
|
||||||
|
|
||||||
The following eight components require a database. In this example they will be named:
|
|
||||||
|
|
||||||
| Appservice API | `dendrite_appservice` |
|
|
||||||
| Federation API | `dendrite_federationapi` |
|
|
||||||
| Media API | `dendrite_mediaapi` |
|
|
||||||
| MSCs | `dendrite_mscs` |
|
|
||||||
| Roomserver | `dendrite_roomserver` |
|
|
||||||
| Sync API | `dendrite_syncapi` |
|
|
||||||
| Key server | `dendrite_keyserver` |
|
|
||||||
| User API | `dendrite_userapi` |
|
|
||||||
|
|
||||||
... therefore you will need to create eight different databases:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
for i in appservice federationapi mediaapi mscs roomserver syncapi keyserver userapi; do
|
|
||||||
sudo -u postgres createdb -O dendrite -E UTF-8 dendrite_$i
|
|
||||||
done
|
|
||||||
```
|
|
||||||
|
|
||||||
## SQLite
|
## SQLite
|
||||||
|
|
||||||
**WARNING:** The Dendrite SQLite backend is slower, less reliable and not recommended for
|
**WARNING:** The Dendrite SQLite backend is slower, less reliable and not recommended for
|
|
@ -1,8 +1,9 @@
|
||||||
---
|
---
|
||||||
title: Configuring Dendrite
|
title: Configuring Dendrite
|
||||||
parent: Installation
|
parent: Manual
|
||||||
nav_order: 7
|
grand_parent: Installation
|
||||||
permalink: /installation/configuration
|
nav_order: 3
|
||||||
|
permalink: /installation/manual/configuration
|
||||||
---
|
---
|
||||||
|
|
||||||
# Configuring Dendrite
|
# Configuring Dendrite
|
||||||
|
@ -20,7 +21,7 @@ sections:
|
||||||
|
|
||||||
First of all, you will need to configure the server name of your Matrix homeserver.
|
First of all, you will need to configure the server name of your Matrix homeserver.
|
||||||
This must match the domain name that you have selected whilst [configuring the domain
|
This must match the domain name that you have selected whilst [configuring the domain
|
||||||
name delegation](domainname).
|
name delegation](domainname#delegation).
|
||||||
|
|
||||||
In the `global` section, set the `server_name` to your delegated domain name:
|
In the `global` section, set the `server_name` to your delegated domain name:
|
||||||
|
|
||||||
|
@ -44,7 +45,7 @@ global:
|
||||||
|
|
||||||
## JetStream configuration
|
## JetStream configuration
|
||||||
|
|
||||||
Monolith deployments can use the built-in NATS Server rather than running a standalone
|
Dendrite deployments can use the built-in NATS Server rather than running a standalone
|
||||||
server. If you want to use a standalone NATS Server anyway, you can also configure that too.
|
server. If you want to use a standalone NATS Server anyway, you can also configure that too.
|
||||||
|
|
||||||
### Built-in NATS Server
|
### Built-in NATS Server
|
||||||
|
@ -56,7 +57,6 @@ configured and set a `storage_path` to a persistent folder on the filesystem:
|
||||||
global:
|
global:
|
||||||
# ...
|
# ...
|
||||||
jetstream:
|
jetstream:
|
||||||
in_memory: false
|
|
||||||
storage_path: /path/to/storage/folder
|
storage_path: /path/to/storage/folder
|
||||||
topic_prefix: Dendrite
|
topic_prefix: Dendrite
|
||||||
```
|
```
|
||||||
|
@ -79,22 +79,17 @@ You do not need to configure the `storage_path` when using a standalone NATS Ser
|
||||||
In the case that you are connecting to a multi-node NATS cluster, you can configure more than
|
In the case that you are connecting to a multi-node NATS cluster, you can configure more than
|
||||||
one address in the `addresses` field.
|
one address in the `addresses` field.
|
||||||
|
|
||||||
## Database connections
|
## Database connection using a global connection pool
|
||||||
|
|
||||||
Configuring database connections varies based on the [database configuration](database)
|
If you want to use a single connection pool to a single PostgreSQL database,
|
||||||
that you chose.
|
then you must uncomment and configure the `database` section within the `global` section:
|
||||||
|
|
||||||
### Global connection pool
|
|
||||||
|
|
||||||
If you want to use a single connection pool to a single PostgreSQL database, then you must
|
|
||||||
uncomment and configure the `database` section within the `global` section:
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
global:
|
global:
|
||||||
# ...
|
# ...
|
||||||
database:
|
database:
|
||||||
connection_string: postgres://user:pass@hostname/database?sslmode=disable
|
connection_string: postgres://user:pass@hostname/database?sslmode=disable
|
||||||
max_open_conns: 100
|
max_open_conns: 90
|
||||||
max_idle_conns: 5
|
max_idle_conns: 5
|
||||||
conn_max_lifetime: -1
|
conn_max_lifetime: -1
|
||||||
```
|
```
|
||||||
|
@ -104,42 +99,13 @@ configuration file, e.g. under the `app_service_api`, `federation_api`, `key_ser
|
||||||
`media_api`, `mscs`, `relay_api`, `room_server`, `sync_api` and `user_api` blocks, otherwise
|
`media_api`, `mscs`, `relay_api`, `room_server`, `sync_api` and `user_api` blocks, otherwise
|
||||||
these will override the `global` database configuration.
|
these will override the `global` database configuration.
|
||||||
|
|
||||||
### Per-component connections (all other configurations)
|
|
||||||
|
|
||||||
If you are are using SQLite databases or separate PostgreSQL
|
|
||||||
databases per component, then you must instead configure the `database` sections under each
|
|
||||||
of the component blocks ,e.g. under the `app_service_api`, `federation_api`, `key_server`,
|
|
||||||
`media_api`, `mscs`, `relay_api`, `room_server`, `sync_api` and `user_api` blocks.
|
|
||||||
|
|
||||||
For example, with PostgreSQL:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
room_server:
|
|
||||||
# ...
|
|
||||||
database:
|
|
||||||
connection_string: postgres://user:pass@hostname/dendrite_component?sslmode=disable
|
|
||||||
max_open_conns: 10
|
|
||||||
max_idle_conns: 2
|
|
||||||
conn_max_lifetime: -1
|
|
||||||
```
|
|
||||||
|
|
||||||
... or with SQLite:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
room_server:
|
|
||||||
# ...
|
|
||||||
database:
|
|
||||||
connection_string: file:roomserver.db
|
|
||||||
max_open_conns: 10
|
|
||||||
max_idle_conns: 2
|
|
||||||
conn_max_lifetime: -1
|
|
||||||
```
|
|
||||||
|
|
||||||
## Full-text search
|
## Full-text search
|
||||||
|
|
||||||
Dendrite supports experimental full-text indexing using [Bleve](https://github.com/blevesearch/bleve). It is configured in the `sync_api` section as follows.
|
Dendrite supports full-text indexing using [Bleve](https://github.com/blevesearch/bleve). It is configured in the `sync_api` section as follows.
|
||||||
|
|
||||||
Depending on the language most likely to be used on the server, it might make sense to change the `language` used when indexing, to ensure the returned results match the expectations. A full list of possible languages can be found [here](https://github.com/blevesearch/bleve/tree/master/analysis/lang).
|
Depending on the language most likely to be used on the server, it might make sense to change the `language` used when indexing,
|
||||||
|
to ensure the returned results match the expectations. A full list of possible languages
|
||||||
|
can be found [here](https://github.com/matrix-org/dendrite/blob/5b73592f5a4dddf64184fcbe33f4c1835c656480/internal/fulltext/bleve.go#L25-L46).
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
sync_api:
|
sync_api:
|
|
@ -1,8 +1,9 @@
|
||||||
---
|
---
|
||||||
title: Generating signing keys
|
title: Generating signing keys
|
||||||
parent: Installation
|
parent: Manual
|
||||||
nav_order: 8
|
grand_parent: Installation
|
||||||
permalink: /installation/signingkeys
|
nav_order: 4
|
||||||
|
permalink: /installation/manual/signingkeys
|
||||||
---
|
---
|
||||||
|
|
||||||
# Generating signing keys
|
# Generating signing keys
|
||||||
|
@ -11,7 +12,7 @@ All Matrix homeservers require a signing private key, which will be used to auth
|
||||||
federation requests and events.
|
federation requests and events.
|
||||||
|
|
||||||
The `generate-keys` utility can be used to generate a private key. Assuming that Dendrite was
|
The `generate-keys` utility can be used to generate a private key. Assuming that Dendrite was
|
||||||
built using `build.sh`, you should find the `generate-keys` utility in the `bin` folder.
|
built using `go build -o bin/ ./cmd/...`, you should find the `generate-keys` utility in the `bin` folder.
|
||||||
|
|
||||||
To generate a Matrix signing private key:
|
To generate a Matrix signing private key:
|
||||||
|
|
26
docs/installation/manual/5_starting_dendrite.md
Normal file
26
docs/installation/manual/5_starting_dendrite.md
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
---
|
||||||
|
title: Starting Dendrite
|
||||||
|
parent: Manual
|
||||||
|
grand_parent: Installation
|
||||||
|
nav_order: 5
|
||||||
|
permalink: /installation/manual/start
|
||||||
|
---
|
||||||
|
|
||||||
|
# Starting Dendrite
|
||||||
|
|
||||||
|
Once you have completed all preparation and installation steps,
|
||||||
|
you can start your Dendrite deployment by executing the `dendrite` binary:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./dendrite -config /path/to/dendrite.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
By default, Dendrite will listen HTTP on port 8008. If you want to change the addresses
|
||||||
|
or ports that Dendrite listens on, you can use the `-http-bind-address` and
|
||||||
|
`-https-bind-address` command line arguments:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./dendrite -config /path/to/dendrite.yaml \
|
||||||
|
-http-bind-address 1.2.3.4:12345 \
|
||||||
|
-https-bind-address 1.2.3.4:54321
|
||||||
|
```
|
|
@ -1,58 +0,0 @@
|
||||||
server {
|
|
||||||
listen 443 ssl; # IPv4
|
|
||||||
listen [::]:443 ssl; # IPv6
|
|
||||||
server_name my.hostname.com;
|
|
||||||
|
|
||||||
ssl_certificate /path/to/fullchain.pem;
|
|
||||||
ssl_certificate_key /path/to/privkey.pem;
|
|
||||||
ssl_dhparam /path/to/ssl-dhparams.pem;
|
|
||||||
|
|
||||||
proxy_set_header Host $host;
|
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
|
||||||
proxy_read_timeout 600;
|
|
||||||
|
|
||||||
location /.well-known/matrix/server {
|
|
||||||
return 200 '{ "m.server": "my.hostname.com:443" }';
|
|
||||||
}
|
|
||||||
|
|
||||||
location /.well-known/matrix/client {
|
|
||||||
# If your sever_name here doesn't match your matrix homeserver URL
|
|
||||||
# (e.g. hostname.com as server_name and matrix.hostname.com as homeserver URL)
|
|
||||||
# add_header Access-Control-Allow-Origin '*';
|
|
||||||
return 200 '{ "m.homeserver": { "base_url": "https://my.hostname.com" } }';
|
|
||||||
}
|
|
||||||
|
|
||||||
# route requests to:
|
|
||||||
# /_matrix/client/.*/sync
|
|
||||||
# /_matrix/client/.*/user/{userId}/filter
|
|
||||||
# /_matrix/client/.*/user/{userId}/filter/{filterID}
|
|
||||||
# /_matrix/client/.*/keys/changes
|
|
||||||
# /_matrix/client/.*/rooms/{roomId}/messages
|
|
||||||
# /_matrix/client/.*/rooms/{roomId}/context/{eventID}
|
|
||||||
# /_matrix/client/.*/rooms/{roomId}/event/{eventID}
|
|
||||||
# /_matrix/client/.*/rooms/{roomId}/relations/{eventID}
|
|
||||||
# /_matrix/client/.*/rooms/{roomId}/relations/{eventID}/{relType}
|
|
||||||
# /_matrix/client/.*/rooms/{roomId}/relations/{eventID}/{relType}/{eventType}
|
|
||||||
# /_matrix/client/.*/rooms/{roomId}/members
|
|
||||||
# /_matrix/client/.*/rooms/{roomId}/joined_members
|
|
||||||
# to sync_api
|
|
||||||
location ~ /_matrix/client/.*?/(sync|user/.*?/filter/?.*|keys/changes|rooms/.*?/(messages|.*?_?members|context/.*?|relations/.*?|event/.*?))$ {
|
|
||||||
proxy_pass http://sync_api:8073;
|
|
||||||
}
|
|
||||||
|
|
||||||
location /_matrix/client {
|
|
||||||
proxy_pass http://client_api:8071;
|
|
||||||
}
|
|
||||||
|
|
||||||
location /_matrix/federation {
|
|
||||||
proxy_pass http://federation_api:8072;
|
|
||||||
}
|
|
||||||
|
|
||||||
location /_matrix/key {
|
|
||||||
proxy_pass http://federation_api:8072;
|
|
||||||
}
|
|
||||||
|
|
||||||
location /_matrix/media {
|
|
||||||
proxy_pass http://media_api:8074;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,19 +0,0 @@
|
||||||
[Unit]
|
|
||||||
Description=Dendrite (Matrix Homeserver)
|
|
||||||
After=syslog.target
|
|
||||||
After=network.target
|
|
||||||
After=postgresql.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Environment=GODEBUG=madvdontneed=1
|
|
||||||
RestartSec=2s
|
|
||||||
Type=simple
|
|
||||||
User=dendrite
|
|
||||||
Group=dendrite
|
|
||||||
WorkingDirectory=/opt/dendrite/
|
|
||||||
ExecStart=/opt/dendrite/bin/dendrite
|
|
||||||
Restart=always
|
|
||||||
LimitNOFILE=65535
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
|
@ -136,7 +136,7 @@ func (r *Admin) PerformAdminEvacuateRoom(
|
||||||
|
|
||||||
inputReq := &api.InputRoomEventsRequest{
|
inputReq := &api.InputRoomEventsRequest{
|
||||||
InputRoomEvents: inputEvents,
|
InputRoomEvents: inputEvents,
|
||||||
Asynchronous: true,
|
Asynchronous: false,
|
||||||
}
|
}
|
||||||
inputRes := &api.InputRoomEventsResponse{}
|
inputRes := &api.InputRoomEventsResponse{}
|
||||||
r.Inputer.InputRoomEvents(ctx, inputReq, inputRes)
|
r.Inputer.InputRoomEvents(ctx, inputReq, inputRes)
|
||||||
|
@ -200,18 +200,24 @@ func (r *Admin) PerformAdminPurgeRoom(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Evacuate the room before purging it from the database
|
// Evacuate the room before purging it from the database
|
||||||
if _, err := r.PerformAdminEvacuateRoom(ctx, roomID); err != nil {
|
evacAffected, err := r.PerformAdminEvacuateRoom(ctx, roomID)
|
||||||
|
if err != nil {
|
||||||
logrus.WithField("room_id", roomID).WithError(err).Warn("Failed to evacuate room before purging")
|
logrus.WithField("room_id", roomID).WithError(err).Warn("Failed to evacuate room before purging")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logrus.WithFields(logrus.Fields{
|
||||||
|
"room_id": roomID,
|
||||||
|
"evacuated_users": len(evacAffected),
|
||||||
|
}).Warn("Evacuated room, purging room from roomserver now")
|
||||||
|
|
||||||
logrus.WithField("room_id", roomID).Warn("Purging room from roomserver")
|
logrus.WithField("room_id", roomID).Warn("Purging room from roomserver")
|
||||||
if err := r.DB.PurgeRoom(ctx, roomID); err != nil {
|
if err := r.DB.PurgeRoom(ctx, roomID); err != nil {
|
||||||
logrus.WithField("room_id", roomID).WithError(err).Warn("Failed to purge room from roomserver")
|
logrus.WithField("room_id", roomID).WithError(err).Warn("Failed to purge room from roomserver")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.WithField("room_id", roomID).Warn("Room purged from roomserver")
|
logrus.WithField("room_id", roomID).Warn("Room purged from roomserver, informing other components")
|
||||||
|
|
||||||
return r.Inputer.OutputProducer.ProduceRoomEvents(roomID, []api.OutputEvent{
|
return r.Inputer.OutputProducer.ProduceRoomEvents(roomID, []api.OutputEvent{
|
||||||
{
|
{
|
||||||
|
|
|
@ -130,7 +130,7 @@ func (s *accountDataStatements) SelectAccountDataInRange(
|
||||||
if pos == 0 {
|
if pos == 0 {
|
||||||
pos = r.High()
|
pos = r.High()
|
||||||
}
|
}
|
||||||
return data, pos, nil
|
return data, pos, rows.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *accountDataStatements) SelectMaxAccountDataID(
|
func (s *accountDataStatements) SelectMaxAccountDataID(
|
||||||
|
|
Loading…
Reference in a new issue