mirror of
https://github.com/hoernschen/dendrite.git
synced 2025-04-04 11:03:39 +00:00
Merge branch 'master' into neilalexander/rsconcurrency
This commit is contained in:
commit
ea4ae5d85d
53 changed files with 462 additions and 369 deletions
20
CHANGES.md
20
CHANGES.md
|
@ -1,5 +1,25 @@
|
|||
# Changelog
|
||||
|
||||
## Dendrite 0.3.8 (2021-01-28)
|
||||
|
||||
### Fixes
|
||||
|
||||
* A well-known lookup regression in version 0.3.7 has been fixed
|
||||
|
||||
## Dendrite 0.3.7 (2021-01-26)
|
||||
|
||||
### Features
|
||||
|
||||
* Sync filtering support (for event types, senders and limits)
|
||||
* In-process DNS caching support for deployments where a local DNS caching resolver is not available (disabled by default)
|
||||
* Experimental support for MSC2444 (Peeking over Federation) has been merged
|
||||
* Experimental federation support for MSC2946 (Spaces Summary) has been merged
|
||||
|
||||
### Fixes
|
||||
|
||||
* Dendrite will no longer load a given event more than once for state resolution, which may help to reduce memory usage and database I/O slightly in some cases
|
||||
* Large well-known responses will no longer use significant amounts of memory
|
||||
|
||||
## Dendrite 0.3.6 (2021-01-18)
|
||||
|
||||
### Features
|
||||
|
|
|
@ -89,7 +89,7 @@ func NewInternalAPI(
|
|||
// We can't add ASes at runtime so this is safe to do.
|
||||
if len(workerStates) > 0 {
|
||||
consumer := consumers.NewOutputRoomEventConsumer(
|
||||
base.Cfg, consumer, appserviceDB,
|
||||
base.ProcessContext, base.Cfg, consumer, appserviceDB,
|
||||
rsAPI, workerStates,
|
||||
)
|
||||
if err := consumer.Start(); err != nil {
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
|
@ -41,6 +42,7 @@ type OutputRoomEventConsumer struct {
|
|||
// NewOutputRoomEventConsumer creates a new OutputRoomEventConsumer. Call
|
||||
// Start() to begin consuming from room servers.
|
||||
func NewOutputRoomEventConsumer(
|
||||
process *process.ProcessContext,
|
||||
cfg *config.Dendrite,
|
||||
kafkaConsumer sarama.Consumer,
|
||||
appserviceDB storage.Database,
|
||||
|
@ -48,6 +50,7 @@ func NewOutputRoomEventConsumer(
|
|||
workerStates []types.ApplicationServiceWorkerState,
|
||||
) *OutputRoomEventConsumer {
|
||||
consumer := internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "appservice/roomserver",
|
||||
Topic: cfg.Global.Kafka.TopicFor(config.TopicOutputRoomEvent),
|
||||
Consumer: kafkaConsumer,
|
||||
|
|
|
@ -166,6 +166,7 @@ func (m *DendriteMonolith) Start() {
|
|||
),
|
||||
}
|
||||
monolith.AddAllPublicRoutes(
|
||||
base.ProcessContext,
|
||||
base.PublicClientAPIMux,
|
||||
base.PublicFederationAPIMux,
|
||||
base.PublicKeyAPIMux,
|
||||
|
|
|
@ -192,6 +192,7 @@ func main() {
|
|||
ExtPublicRoomsProvider: provider,
|
||||
}
|
||||
monolith.AddAllPublicRoutes(
|
||||
base.Base.ProcessContext,
|
||||
base.Base.PublicClientAPIMux,
|
||||
base.Base.PublicFederationAPIMux,
|
||||
base.Base.PublicKeyAPIMux,
|
||||
|
@ -234,5 +235,5 @@ func main() {
|
|||
}
|
||||
|
||||
// We want to block forever to let the HTTP and HTTPS handler serve the APIs
|
||||
select {}
|
||||
base.Base.WaitForShutdown()
|
||||
}
|
||||
|
|
|
@ -150,6 +150,7 @@ func main() {
|
|||
),
|
||||
}
|
||||
monolith.AddAllPublicRoutes(
|
||||
base.ProcessContext,
|
||||
base.PublicClientAPIMux,
|
||||
base.PublicFederationAPIMux,
|
||||
base.PublicKeyAPIMux,
|
||||
|
@ -200,5 +201,6 @@ func main() {
|
|||
}
|
||||
}()
|
||||
|
||||
select {}
|
||||
// We want to block forever to let the HTTP and HTTPS handler serve the APIs
|
||||
base.WaitForShutdown()
|
||||
}
|
||||
|
|
|
@ -144,6 +144,7 @@ func main() {
|
|||
KeyAPI: keyAPI,
|
||||
}
|
||||
monolith.AddAllPublicRoutes(
|
||||
base.ProcessContext,
|
||||
base.PublicClientAPIMux,
|
||||
base.PublicFederationAPIMux,
|
||||
base.PublicKeyAPIMux,
|
||||
|
@ -176,5 +177,5 @@ func main() {
|
|||
}
|
||||
|
||||
// We want to block forever to let the HTTP and HTTPS handler serve the APIs
|
||||
select {}
|
||||
base.WaitForShutdown()
|
||||
}
|
||||
|
|
|
@ -74,5 +74,6 @@ func main() {
|
|||
base := setup.NewBaseDendrite(cfg, component, false) // TODO
|
||||
defer base.Close() // nolint: errcheck
|
||||
|
||||
start(base, cfg)
|
||||
go start(base, cfg)
|
||||
base.WaitForShutdown()
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ func SyncAPI(base *setup.BaseDendrite, cfg *config.Dendrite) {
|
|||
rsAPI := base.RoomserverHTTPClient()
|
||||
|
||||
syncapi.AddPublicRoutes(
|
||||
base.ProcessContext,
|
||||
base.PublicClientAPIMux, userAPI, rsAPI,
|
||||
base.KeyServerHTTPClient(),
|
||||
federation, &cfg.SyncAPI,
|
||||
|
|
|
@ -231,6 +231,7 @@ func main() {
|
|||
ExtPublicRoomsProvider: p2pPublicRoomProvider,
|
||||
}
|
||||
monolith.AddAllPublicRoutes(
|
||||
base.ProcessContext,
|
||||
base.PublicClientAPIMux,
|
||||
base.PublicFederationAPIMux,
|
||||
base.PublicKeyAPIMux,
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/federationsender/storage"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
@ -44,6 +45,7 @@ type OutputEDUConsumer struct {
|
|||
|
||||
// NewOutputEDUConsumer creates a new OutputEDUConsumer. Call Start() to begin consuming from EDU servers.
|
||||
func NewOutputEDUConsumer(
|
||||
process *process.ProcessContext,
|
||||
cfg *config.FederationSender,
|
||||
kafkaConsumer sarama.Consumer,
|
||||
queues *queue.OutgoingQueues,
|
||||
|
@ -51,18 +53,21 @@ func NewOutputEDUConsumer(
|
|||
) *OutputEDUConsumer {
|
||||
c := &OutputEDUConsumer{
|
||||
typingConsumer: &internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "eduserver/typing",
|
||||
Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputTypingEvent),
|
||||
Consumer: kafkaConsumer,
|
||||
PartitionStore: store,
|
||||
},
|
||||
sendToDeviceConsumer: &internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "eduserver/sendtodevice",
|
||||
Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputSendToDeviceEvent),
|
||||
Consumer: kafkaConsumer,
|
||||
PartitionStore: store,
|
||||
},
|
||||
receiptConsumer: &internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "eduserver/receipt",
|
||||
Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputReceiptEvent),
|
||||
Consumer: kafkaConsumer,
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/keyserver/api"
|
||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
@ -41,6 +42,7 @@ type KeyChangeConsumer struct {
|
|||
|
||||
// NewKeyChangeConsumer creates a new KeyChangeConsumer. Call Start() to begin consuming from key servers.
|
||||
func NewKeyChangeConsumer(
|
||||
process *process.ProcessContext,
|
||||
cfg *config.KeyServer,
|
||||
kafkaConsumer sarama.Consumer,
|
||||
queues *queue.OutgoingQueues,
|
||||
|
@ -49,6 +51,7 @@ func NewKeyChangeConsumer(
|
|||
) *KeyChangeConsumer {
|
||||
c := &KeyChangeConsumer{
|
||||
consumer: &internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "federationsender/keychange",
|
||||
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputKeyChangeEvent)),
|
||||
Consumer: kafkaConsumer,
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
@ -41,6 +42,7 @@ type OutputRoomEventConsumer struct {
|
|||
|
||||
// NewOutputRoomEventConsumer creates a new OutputRoomEventConsumer. Call Start() to begin consuming from room servers.
|
||||
func NewOutputRoomEventConsumer(
|
||||
process *process.ProcessContext,
|
||||
cfg *config.FederationSender,
|
||||
kafkaConsumer sarama.Consumer,
|
||||
queues *queue.OutgoingQueues,
|
||||
|
@ -48,6 +50,7 @@ func NewOutputRoomEventConsumer(
|
|||
rsAPI api.RoomserverInternalAPI,
|
||||
) *OutputRoomEventConsumer {
|
||||
consumer := internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "federationsender/roomserver",
|
||||
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputRoomEvent)),
|
||||
Consumer: kafkaConsumer,
|
||||
|
|
|
@ -59,7 +59,8 @@ func NewInternalAPI(
|
|||
consumer, _ := kafka.SetupConsumerProducer(&cfg.Matrix.Kafka)
|
||||
|
||||
queues := queue.NewOutgoingQueues(
|
||||
federationSenderDB, cfg.Matrix.DisableFederation,
|
||||
federationSenderDB, base.ProcessContext,
|
||||
cfg.Matrix.DisableFederation,
|
||||
cfg.Matrix.ServerName, federation, rsAPI, stats,
|
||||
&queue.SigningInfo{
|
||||
KeyID: cfg.Matrix.KeyID,
|
||||
|
@ -69,7 +70,7 @@ func NewInternalAPI(
|
|||
)
|
||||
|
||||
rsConsumer := consumers.NewOutputRoomEventConsumer(
|
||||
cfg, consumer, queues,
|
||||
base.ProcessContext, cfg, consumer, queues,
|
||||
federationSenderDB, rsAPI,
|
||||
)
|
||||
if err = rsConsumer.Start(); err != nil {
|
||||
|
@ -77,13 +78,13 @@ func NewInternalAPI(
|
|||
}
|
||||
|
||||
tsConsumer := consumers.NewOutputEDUConsumer(
|
||||
cfg, consumer, queues, federationSenderDB,
|
||||
base.ProcessContext, cfg, consumer, queues, federationSenderDB,
|
||||
)
|
||||
if err := tsConsumer.Start(); err != nil {
|
||||
logrus.WithError(err).Panic("failed to start typing server consumer")
|
||||
}
|
||||
keyConsumer := consumers.NewKeyChangeConsumer(
|
||||
&base.Cfg.KeyServer, consumer, queues, federationSenderDB, rsAPI,
|
||||
base.ProcessContext, &base.Cfg.KeyServer, consumer, queues, federationSenderDB, rsAPI,
|
||||
)
|
||||
if err := keyConsumer.Start(); err != nil {
|
||||
logrus.WithError(err).Panic("failed to start key server consumer")
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/federationsender/storage"
|
||||
"github.com/matrix-org/dendrite/federationsender/storage/shared"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/gomatrix"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
@ -46,6 +47,7 @@ const (
|
|||
// at a time.
|
||||
type destinationQueue struct {
|
||||
db storage.Database
|
||||
process *process.ProcessContext
|
||||
signing *SigningInfo
|
||||
rsAPI api.RoomserverInternalAPI
|
||||
client *gomatrixserverlib.FederationClient // federation client
|
||||
|
@ -411,7 +413,7 @@ func (oq *destinationQueue) nextTransaction(
|
|||
// TODO: we should check for 500-ish fails vs 400-ish here,
|
||||
// since we shouldn't queue things indefinitely in response
|
||||
// to a 400-ish error
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5)
|
||||
ctx, cancel := context.WithTimeout(oq.process.Context(), time.Minute*5)
|
||||
defer cancel()
|
||||
_, err := oq.client.SendTransaction(ctx, t)
|
||||
switch err.(type) {
|
||||
|
@ -442,7 +444,7 @@ func (oq *destinationQueue) nextTransaction(
|
|||
log.WithFields(log.Fields{
|
||||
"destination": oq.destination,
|
||||
log.ErrorKey: err,
|
||||
}).Infof("Failed to send transaction %q", t.TransactionID)
|
||||
}).Debugf("Failed to send transaction %q", t.TransactionID)
|
||||
return false, 0, 0, err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/federationsender/storage"
|
||||
"github.com/matrix-org/dendrite/federationsender/storage/shared"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
@ -36,6 +37,7 @@ import (
|
|||
// matrix servers
|
||||
type OutgoingQueues struct {
|
||||
db storage.Database
|
||||
process *process.ProcessContext
|
||||
disabled bool
|
||||
rsAPI api.RoomserverInternalAPI
|
||||
origin gomatrixserverlib.ServerName
|
||||
|
@ -80,6 +82,7 @@ var destinationQueueBackingOff = prometheus.NewGauge(
|
|||
// NewOutgoingQueues makes a new OutgoingQueues
|
||||
func NewOutgoingQueues(
|
||||
db storage.Database,
|
||||
process *process.ProcessContext,
|
||||
disabled bool,
|
||||
origin gomatrixserverlib.ServerName,
|
||||
client *gomatrixserverlib.FederationClient,
|
||||
|
@ -89,6 +92,7 @@ func NewOutgoingQueues(
|
|||
) *OutgoingQueues {
|
||||
queues := &OutgoingQueues{
|
||||
disabled: disabled,
|
||||
process: process,
|
||||
db: db,
|
||||
rsAPI: rsAPI,
|
||||
origin: origin,
|
||||
|
@ -151,6 +155,7 @@ func (oqs *OutgoingQueues) getQueue(destination gomatrixserverlib.ServerName) *d
|
|||
destinationQueueTotal.Inc()
|
||||
oq = &destinationQueue{
|
||||
db: oqs.db,
|
||||
process: oqs.process,
|
||||
rsAPI: oqs.rsAPI,
|
||||
origin: oqs.origin,
|
||||
destination: destination,
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
// Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package deltas
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/pressly/goose"
|
||||
)
|
||||
|
||||
func LoadFromGoose() {
|
||||
goose.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable)
|
||||
}
|
||||
|
||||
func LoadRemoveRoomsTable(m *sqlutil.Migrations) {
|
||||
m.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable)
|
||||
}
|
||||
|
||||
func UpRemoveRoomsTable(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
DROP TABLE IF EXISTS federationsender_rooms;
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute upgrade: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DownRemoveRoomsTable(tx *sql.Tx) error {
|
||||
// We can't reverse this.
|
||||
return nil
|
||||
}
|
|
@ -1,104 +0,0 @@
|
|||
// Copyright 2017-2018 New Vector Ltd
|
||||
// Copyright 2019-2020 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
)
|
||||
|
||||
const roomSchema = `
|
||||
CREATE TABLE IF NOT EXISTS federationsender_rooms (
|
||||
-- The string ID of the room
|
||||
room_id TEXT PRIMARY KEY,
|
||||
-- The most recent event state by the room server.
|
||||
-- We can use this to tell if our view of the room state has become
|
||||
-- desynchronised.
|
||||
last_event_id TEXT NOT NULL
|
||||
);`
|
||||
|
||||
const insertRoomSQL = "" +
|
||||
"INSERT INTO federationsender_rooms (room_id, last_event_id) VALUES ($1, '')" +
|
||||
" ON CONFLICT DO NOTHING"
|
||||
|
||||
const selectRoomForUpdateSQL = "" +
|
||||
"SELECT last_event_id FROM federationsender_rooms WHERE room_id = $1 FOR UPDATE"
|
||||
|
||||
const updateRoomSQL = "" +
|
||||
"UPDATE federationsender_rooms SET last_event_id = $2 WHERE room_id = $1"
|
||||
|
||||
type roomStatements struct {
|
||||
db *sql.DB
|
||||
insertRoomStmt *sql.Stmt
|
||||
selectRoomForUpdateStmt *sql.Stmt
|
||||
updateRoomStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewPostgresRoomsTable(db *sql.DB) (s *roomStatements, err error) {
|
||||
s = &roomStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err = s.db.Exec(roomSchema)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if s.insertRoomStmt, err = s.db.Prepare(insertRoomSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.selectRoomForUpdateStmt, err = s.db.Prepare(selectRoomForUpdateSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.updateRoomStmt, err = s.db.Prepare(updateRoomSQL); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// insertRoom inserts the room if it didn't already exist.
|
||||
// If the room didn't exist then last_event_id is set to the empty string.
|
||||
func (s *roomStatements) InsertRoom(
|
||||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
) error {
|
||||
_, err := sqlutil.TxStmt(txn, s.insertRoomStmt).ExecContext(ctx, roomID)
|
||||
return err
|
||||
}
|
||||
|
||||
// selectRoomForUpdate locks the row for the room and returns the last_event_id.
|
||||
// The row must already exist in the table. Callers can ensure that the row
|
||||
// exists by calling insertRoom first.
|
||||
func (s *roomStatements) SelectRoomForUpdate(
|
||||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
) (string, error) {
|
||||
var lastEventID string
|
||||
stmt := sqlutil.TxStmt(txn, s.selectRoomForUpdateStmt)
|
||||
err := stmt.QueryRowContext(ctx, roomID).Scan(&lastEventID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return lastEventID, nil
|
||||
}
|
||||
|
||||
// updateRoom updates the last_event_id for the room. selectRoomForUpdate should
|
||||
// have already been called earlier within the transaction.
|
||||
func (s *roomStatements) UpdateRoom(
|
||||
ctx context.Context, txn *sql.Tx, roomID, lastEventID string,
|
||||
) error {
|
||||
stmt := sqlutil.TxStmt(txn, s.updateRoomStmt)
|
||||
_, err := stmt.ExecContext(ctx, roomID, lastEventID)
|
||||
return err
|
||||
}
|
|
@ -18,6 +18,7 @@ package postgres
|
|||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/matrix-org/dendrite/federationsender/storage/postgres/deltas"
|
||||
"github.com/matrix-org/dendrite/federationsender/storage/shared"
|
||||
"github.com/matrix-org/dendrite/internal/caching"
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
|
@ -56,10 +57,6 @@ func NewDatabase(dbProperties *config.DatabaseOptions, cache caching.FederationS
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rooms, err := NewPostgresRoomsTable(d.db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blacklist, err := NewPostgresBlacklistTable(d.db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -72,6 +69,11 @@ func NewDatabase(dbProperties *config.DatabaseOptions, cache caching.FederationS
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := sqlutil.NewMigrations()
|
||||
deltas.LoadRemoveRoomsTable(m)
|
||||
if err = m.RunDeltas(d.db, dbProperties); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.Database = shared.Database{
|
||||
DB: d.db,
|
||||
Cache: cache,
|
||||
|
@ -80,7 +82,6 @@ func NewDatabase(dbProperties *config.DatabaseOptions, cache caching.FederationS
|
|||
FederationSenderQueuePDUs: queuePDUs,
|
||||
FederationSenderQueueEDUs: queueEDUs,
|
||||
FederationSenderQueueJSON: queueJSON,
|
||||
FederationSenderRooms: rooms,
|
||||
FederationSenderBlacklist: blacklist,
|
||||
FederationSenderInboundPeeks: inboundPeeks,
|
||||
FederationSenderOutboundPeeks: outboundPeeks,
|
||||
|
|
|
@ -34,7 +34,6 @@ type Database struct {
|
|||
FederationSenderQueueEDUs tables.FederationSenderQueueEDUs
|
||||
FederationSenderQueueJSON tables.FederationSenderQueueJSON
|
||||
FederationSenderJoinedHosts tables.FederationSenderJoinedHosts
|
||||
FederationSenderRooms tables.FederationSenderRooms
|
||||
FederationSenderBlacklist tables.FederationSenderBlacklist
|
||||
FederationSenderOutboundPeeks tables.FederationSenderOutboundPeeks
|
||||
FederationSenderInboundPeeks tables.FederationSenderInboundPeeks
|
||||
|
@ -64,29 +63,6 @@ func (d *Database) UpdateRoom(
|
|||
removeHosts []string,
|
||||
) (joinedHosts []types.JoinedHost, err error) {
|
||||
err = d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
||||
err = d.FederationSenderRooms.InsertRoom(ctx, txn, roomID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lastSentEventID, err := d.FederationSenderRooms.SelectRoomForUpdate(ctx, txn, roomID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if lastSentEventID == newEventID {
|
||||
// We've handled this message before, so let's just ignore it.
|
||||
// We can only get a duplicate for the last message we processed,
|
||||
// so its enough just to compare the newEventID with lastSentEventID
|
||||
return nil
|
||||
}
|
||||
|
||||
if lastSentEventID != "" && lastSentEventID != oldEventID {
|
||||
return types.EventIDMismatchError{
|
||||
DatabaseID: lastSentEventID, RoomServerID: oldEventID,
|
||||
}
|
||||
}
|
||||
|
||||
joinedHosts, err = d.FederationSenderJoinedHosts.SelectJoinedHostsWithTx(ctx, txn, roomID)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -101,7 +77,7 @@ func (d *Database) UpdateRoom(
|
|||
if err = d.FederationSenderJoinedHosts.DeleteJoinedHosts(ctx, txn, removeHosts); err != nil {
|
||||
return err
|
||||
}
|
||||
return d.FederationSenderRooms.UpdateRoom(ctx, txn, roomID, newEventID)
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
// Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package deltas
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/pressly/goose"
|
||||
)
|
||||
|
||||
func LoadFromGoose() {
|
||||
goose.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable)
|
||||
}
|
||||
|
||||
func LoadRemoveRoomsTable(m *sqlutil.Migrations) {
|
||||
m.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable)
|
||||
}
|
||||
|
||||
func UpRemoveRoomsTable(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
DROP TABLE IF EXISTS federationsender_rooms;
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute upgrade: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DownRemoveRoomsTable(tx *sql.Tx) error {
|
||||
// We can't reverse this.
|
||||
return nil
|
||||
}
|
|
@ -1,105 +0,0 @@
|
|||
// Copyright 2017-2018 New Vector Ltd
|
||||
// Copyright 2019-2020 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sqlite3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
)
|
||||
|
||||
const roomSchema = `
|
||||
CREATE TABLE IF NOT EXISTS federationsender_rooms (
|
||||
-- The string ID of the room
|
||||
room_id TEXT PRIMARY KEY,
|
||||
-- The most recent event state by the room server.
|
||||
-- We can use this to tell if our view of the room state has become
|
||||
-- desynchronised.
|
||||
last_event_id TEXT NOT NULL
|
||||
);`
|
||||
|
||||
const insertRoomSQL = "" +
|
||||
"INSERT INTO federationsender_rooms (room_id, last_event_id) VALUES ($1, '')" +
|
||||
" ON CONFLICT DO NOTHING"
|
||||
|
||||
const selectRoomForUpdateSQL = "" +
|
||||
"SELECT last_event_id FROM federationsender_rooms WHERE room_id = $1"
|
||||
|
||||
const updateRoomSQL = "" +
|
||||
"UPDATE federationsender_rooms SET last_event_id = $2 WHERE room_id = $1"
|
||||
|
||||
type roomStatements struct {
|
||||
db *sql.DB
|
||||
insertRoomStmt *sql.Stmt
|
||||
selectRoomForUpdateStmt *sql.Stmt
|
||||
updateRoomStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewSQLiteRoomsTable(db *sql.DB) (s *roomStatements, err error) {
|
||||
s = &roomStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err = db.Exec(roomSchema)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if s.insertRoomStmt, err = db.Prepare(insertRoomSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.selectRoomForUpdateStmt, err = db.Prepare(selectRoomForUpdateSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.updateRoomStmt, err = db.Prepare(updateRoomSQL); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// insertRoom inserts the room if it didn't already exist.
|
||||
// If the room didn't exist then last_event_id is set to the empty string.
|
||||
func (s *roomStatements) InsertRoom(
|
||||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
) error {
|
||||
_, err := sqlutil.TxStmt(txn, s.insertRoomStmt).ExecContext(ctx, roomID)
|
||||
return err
|
||||
}
|
||||
|
||||
// selectRoomForUpdate locks the row for the room and returns the last_event_id.
|
||||
// The row must already exist in the table. Callers can ensure that the row
|
||||
// exists by calling insertRoom first.
|
||||
func (s *roomStatements) SelectRoomForUpdate(
|
||||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
) (string, error) {
|
||||
var lastEventID string
|
||||
stmt := sqlutil.TxStmt(txn, s.selectRoomForUpdateStmt)
|
||||
err := stmt.QueryRowContext(ctx, roomID).Scan(&lastEventID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return lastEventID, nil
|
||||
}
|
||||
|
||||
// updateRoom updates the last_event_id for the room. selectRoomForUpdate should
|
||||
// have already been called earlier within the transaction.
|
||||
func (s *roomStatements) UpdateRoom(
|
||||
ctx context.Context, txn *sql.Tx, roomID, lastEventID string,
|
||||
) error {
|
||||
stmt := sqlutil.TxStmt(txn, s.updateRoomStmt)
|
||||
_, err := stmt.ExecContext(ctx, roomID, lastEventID)
|
||||
return err
|
||||
}
|
|
@ -21,6 +21,7 @@ import (
|
|||
_ "github.com/mattn/go-sqlite3"
|
||||
|
||||
"github.com/matrix-org/dendrite/federationsender/storage/shared"
|
||||
"github.com/matrix-org/dendrite/federationsender/storage/sqlite3/deltas"
|
||||
"github.com/matrix-org/dendrite/internal/caching"
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
|
@ -46,10 +47,6 @@ func NewDatabase(dbProperties *config.DatabaseOptions, cache caching.FederationS
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rooms, err := NewSQLiteRoomsTable(d.db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
queuePDUs, err := NewSQLiteQueuePDUsTable(d.db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -74,6 +71,11 @@ func NewDatabase(dbProperties *config.DatabaseOptions, cache caching.FederationS
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := sqlutil.NewMigrations()
|
||||
deltas.LoadRemoveRoomsTable(m)
|
||||
if err = m.RunDeltas(d.db, dbProperties); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.Database = shared.Database{
|
||||
DB: d.db,
|
||||
Cache: cache,
|
||||
|
@ -82,7 +84,6 @@ func NewDatabase(dbProperties *config.DatabaseOptions, cache caching.FederationS
|
|||
FederationSenderQueuePDUs: queuePDUs,
|
||||
FederationSenderQueueEDUs: queueEDUs,
|
||||
FederationSenderQueueJSON: queueJSON,
|
||||
FederationSenderRooms: rooms,
|
||||
FederationSenderBlacklist: blacklist,
|
||||
FederationSenderOutboundPeeks: outboundPeeks,
|
||||
FederationSenderInboundPeeks: inboundPeeks,
|
||||
|
|
|
@ -56,12 +56,6 @@ type FederationSenderJoinedHosts interface {
|
|||
SelectJoinedHostsForRooms(ctx context.Context, roomIDs []string) ([]gomatrixserverlib.ServerName, error)
|
||||
}
|
||||
|
||||
type FederationSenderRooms interface {
|
||||
InsertRoom(ctx context.Context, txn *sql.Tx, roomID string) error
|
||||
SelectRoomForUpdate(ctx context.Context, txn *sql.Tx, roomID string) (string, error)
|
||||
UpdateRoom(ctx context.Context, txn *sql.Tx, roomID, lastEventID string) error
|
||||
}
|
||||
|
||||
type FederationSenderBlacklist interface {
|
||||
InsertBlacklist(ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName) error
|
||||
SelectBlacklist(ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName) (bool, error)
|
||||
|
|
|
@ -15,8 +15,6 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
|
@ -34,22 +32,6 @@ func (s ServerNames) Len() int { return len(s) }
|
|||
func (s ServerNames) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s ServerNames) Less(i, j int) bool { return s[i] < s[j] }
|
||||
|
||||
// A EventIDMismatchError indicates that we have got out of sync with the
|
||||
// room server.
|
||||
type EventIDMismatchError struct {
|
||||
// The event ID we have stored in our local database.
|
||||
DatabaseID string
|
||||
// The event ID received from the room server.
|
||||
RoomServerID string
|
||||
}
|
||||
|
||||
func (e EventIDMismatchError) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"mismatched last sent event ID: had %q in database got %q from room server",
|
||||
e.DatabaseID, e.RoomServerID,
|
||||
)
|
||||
}
|
||||
|
||||
// tracks peeks we're performing on another server over federation
|
||||
type OutboundPeek struct {
|
||||
PeekID string
|
||||
|
|
2
go.mod
2
go.mod
|
@ -22,7 +22,7 @@ require (
|
|||
github.com/matrix-org/go-http-js-libp2p v0.0.0-20200518170932-783164aeeda4
|
||||
github.com/matrix-org/go-sqlite3-js v0.0.0-20200522092705-bc8506ccbcf3
|
||||
github.com/matrix-org/gomatrix v0.0.0-20200827122206-7dd5e2a05bcd
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20210122154608-a38974bd8a37
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20210129163316-dd4d53729ead
|
||||
github.com/matrix-org/naffka v0.0.0-20200901083833-bcdd62999a91
|
||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4
|
||||
github.com/mattn/go-sqlite3 v1.14.2
|
||||
|
|
4
go.sum
4
go.sum
|
@ -567,8 +567,8 @@ github.com/matrix-org/gomatrix v0.0.0-20190528120928-7df988a63f26 h1:Hr3zjRsq2bh
|
|||
github.com/matrix-org/gomatrix v0.0.0-20190528120928-7df988a63f26/go.mod h1:3fxX6gUjWyI/2Bt7J1OLhpCzOfO/bB3AiX0cJtEKud0=
|
||||
github.com/matrix-org/gomatrix v0.0.0-20200827122206-7dd5e2a05bcd h1:xVrqJK3xHREMNjwjljkAUaadalWc0rRbmVuQatzmgwg=
|
||||
github.com/matrix-org/gomatrix v0.0.0-20200827122206-7dd5e2a05bcd/go.mod h1:/gBX06Kw0exX1HrwmoBibFA98yBk/jxKpGVeyQbff+s=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20210122154608-a38974bd8a37 h1:si2CZZpwOLWZfDXfgHPkaTlaAkdJvpJzr1zVqyKXd0I=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20210122154608-a38974bd8a37/go.mod h1:JsAzE1Ll3+gDWS9JSUHPJiiyAksvOOnGWF2nXdg4ZzU=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20210129163316-dd4d53729ead h1:VmGJybKUQin8+NyA9ZkrHJpE8ygXzcON9peQH9LC92c=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20210129163316-dd4d53729ead/go.mod h1:JsAzE1Ll3+gDWS9JSUHPJiiyAksvOOnGWF2nXdg4ZzU=
|
||||
github.com/matrix-org/naffka v0.0.0-20200901083833-bcdd62999a91 h1:HJ6U3S3ljJqNffYMcIeAncp5qT/i+ZMiJ2JC2F0aXP4=
|
||||
github.com/matrix-org/naffka v0.0.0-20200901083833-bcdd62999a91/go.mod h1:sjyPyRxKM5uw1nD2cJ6O2OxI6GOqyVBfNXqKjBZTBZE=
|
||||
github.com/matrix-org/util v0.0.0-20190711121626-527ce5ddefc7 h1:ntrLa/8xVzeSs8vHFHK25k0C+NV74sYMJnNSg5NoSRo=
|
||||
|
|
|
@ -20,6 +20,8 @@ import (
|
|||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// A PartitionStorer has the storage APIs needed by the consumer.
|
||||
|
@ -33,6 +35,9 @@ type PartitionStorer interface {
|
|||
// A ContinualConsumer continually consumes logs even across restarts. It requires a PartitionStorer to
|
||||
// remember the offset it reached.
|
||||
type ContinualConsumer struct {
|
||||
// The parent context for the listener, stop consuming when this context is done
|
||||
Process *process.ProcessContext
|
||||
// The component name
|
||||
ComponentName string
|
||||
// The kafkaesque topic to consume events from.
|
||||
// This is the name used in kafka to identify the stream to consume events from.
|
||||
|
@ -100,6 +105,15 @@ func (c *ContinualConsumer) StartOffsets() ([]sqlutil.PartitionOffset, error) {
|
|||
}
|
||||
for _, pc := range partitionConsumers {
|
||||
go c.consumePartition(pc)
|
||||
if c.Process != nil {
|
||||
c.Process.ComponentStarted()
|
||||
go func(pc sarama.PartitionConsumer) {
|
||||
<-c.Process.WaitForShutdown()
|
||||
_ = pc.Close()
|
||||
c.Process.ComponentFinished()
|
||||
logrus.Infof("Stopped consumer for %q topic %q", c.ComponentName, c.Topic)
|
||||
}(pc)
|
||||
}
|
||||
}
|
||||
|
||||
return storedOffsets, nil
|
||||
|
|
|
@ -17,7 +17,7 @@ var build string
|
|||
const (
|
||||
VersionMajor = 0
|
||||
VersionMinor = 3
|
||||
VersionPatch = 6
|
||||
VersionPatch = 8
|
||||
VersionTag = "" // example: "rc1"
|
||||
)
|
||||
|
||||
|
|
|
@ -107,13 +107,6 @@ func (r *Inputer) updateMembership(
|
|||
return updates, nil
|
||||
}
|
||||
|
||||
if add == nil {
|
||||
// This can happen when we have rejoined a room and suddenly we have a
|
||||
// divergence between the former state and the new one. We don't want to
|
||||
// act on removals and apparently there are no adds, so stop here.
|
||||
return updates, nil
|
||||
}
|
||||
|
||||
mu, err := updater.MembershipUpdater(targetUserNID, r.isLocalTarget(add))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -225,7 +225,7 @@ func buildInviteStrippedState(
|
|||
for _, t := range []string{
|
||||
gomatrixserverlib.MRoomName, gomatrixserverlib.MRoomCanonicalAlias,
|
||||
gomatrixserverlib.MRoomAliases, gomatrixserverlib.MRoomJoinRules,
|
||||
"m.room.avatar", "m.room.encryption",
|
||||
"m.room.avatar", "m.room.encryption", gomatrixserverlib.MRoomCreate,
|
||||
} {
|
||||
stateWanted = append(stateWanted, gomatrixserverlib.StateKeyTuple{
|
||||
EventType: t,
|
||||
|
|
|
@ -33,19 +33,21 @@ import (
|
|||
type StateResolution struct {
|
||||
db storage.Database
|
||||
roomInfo types.RoomInfo
|
||||
events map[types.EventNID]*gomatrixserverlib.Event
|
||||
}
|
||||
|
||||
func NewStateResolution(db storage.Database, roomInfo types.RoomInfo) StateResolution {
|
||||
return StateResolution{
|
||||
db: db,
|
||||
roomInfo: roomInfo,
|
||||
events: make(map[types.EventNID]*gomatrixserverlib.Event),
|
||||
}
|
||||
}
|
||||
|
||||
// LoadStateAtSnapshot loads the full state of a room at a particular snapshot.
|
||||
// This is typically the state before an event or the current state of a room.
|
||||
// Returns a sorted list of state entries or an error if there was a problem talking to the database.
|
||||
func (v StateResolution) LoadStateAtSnapshot(
|
||||
func (v *StateResolution) LoadStateAtSnapshot(
|
||||
ctx context.Context, stateNID types.StateSnapshotNID,
|
||||
) ([]types.StateEntry, error) {
|
||||
stateBlockNIDLists, err := v.db.StateBlockNIDs(ctx, []types.StateSnapshotNID{stateNID})
|
||||
|
@ -83,7 +85,7 @@ func (v StateResolution) LoadStateAtSnapshot(
|
|||
}
|
||||
|
||||
// LoadStateAtEvent loads the full state of a room before a particular event.
|
||||
func (v StateResolution) LoadStateAtEvent(
|
||||
func (v *StateResolution) LoadStateAtEvent(
|
||||
ctx context.Context, eventID string,
|
||||
) ([]types.StateEntry, error) {
|
||||
snapshotNID, err := v.db.SnapshotNIDFromEventID(ctx, eventID)
|
||||
|
@ -105,7 +107,7 @@ func (v StateResolution) LoadStateAtEvent(
|
|||
// LoadCombinedStateAfterEvents loads a snapshot of the state after each of the events
|
||||
// and combines those snapshots together into a single list. At this point it is
|
||||
// possible to run into duplicate (type, state key) tuples.
|
||||
func (v StateResolution) LoadCombinedStateAfterEvents(
|
||||
func (v *StateResolution) LoadCombinedStateAfterEvents(
|
||||
ctx context.Context, prevStates []types.StateAtEvent,
|
||||
) ([]types.StateEntry, error) {
|
||||
stateNIDs := make([]types.StateSnapshotNID, len(prevStates))
|
||||
|
@ -177,7 +179,7 @@ func (v StateResolution) LoadCombinedStateAfterEvents(
|
|||
}
|
||||
|
||||
// DifferenceBetweeenStateSnapshots works out which state entries have been added and removed between two snapshots.
|
||||
func (v StateResolution) DifferenceBetweeenStateSnapshots(
|
||||
func (v *StateResolution) DifferenceBetweeenStateSnapshots(
|
||||
ctx context.Context, oldStateNID, newStateNID types.StateSnapshotNID,
|
||||
) (removed, added []types.StateEntry, err error) {
|
||||
if oldStateNID == newStateNID {
|
||||
|
@ -236,7 +238,7 @@ func (v StateResolution) DifferenceBetweeenStateSnapshots(
|
|||
// If there is no entry for a given event type and state key pair then it will be discarded.
|
||||
// This is typically the state before an event or the current state of a room.
|
||||
// Returns a sorted list of state entries or an error if there was a problem talking to the database.
|
||||
func (v StateResolution) LoadStateAtSnapshotForStringTuples(
|
||||
func (v *StateResolution) LoadStateAtSnapshotForStringTuples(
|
||||
ctx context.Context,
|
||||
stateNID types.StateSnapshotNID,
|
||||
stateKeyTuples []gomatrixserverlib.StateKeyTuple,
|
||||
|
@ -251,7 +253,7 @@ func (v StateResolution) LoadStateAtSnapshotForStringTuples(
|
|||
// stringTuplesToNumericTuples converts the string state key tuples into numeric IDs
|
||||
// If there isn't a numeric ID for either the event type or the event state key then the tuple is discarded.
|
||||
// Returns an error if there was a problem talking to the database.
|
||||
func (v StateResolution) stringTuplesToNumericTuples(
|
||||
func (v *StateResolution) stringTuplesToNumericTuples(
|
||||
ctx context.Context,
|
||||
stringTuples []gomatrixserverlib.StateKeyTuple,
|
||||
) ([]types.StateKeyTuple, error) {
|
||||
|
@ -292,7 +294,7 @@ func (v StateResolution) stringTuplesToNumericTuples(
|
|||
// If there is no entry for a given event type and state key pair then it will be discarded.
|
||||
// This is typically the state before an event or the current state of a room.
|
||||
// Returns a sorted list of state entries or an error if there was a problem talking to the database.
|
||||
func (v StateResolution) loadStateAtSnapshotForNumericTuples(
|
||||
func (v *StateResolution) loadStateAtSnapshotForNumericTuples(
|
||||
ctx context.Context,
|
||||
stateNID types.StateSnapshotNID,
|
||||
stateKeyTuples []types.StateKeyTuple,
|
||||
|
@ -340,7 +342,7 @@ func (v StateResolution) loadStateAtSnapshotForNumericTuples(
|
|||
// If there is no entry for a given event type and state key pair then it will be discarded.
|
||||
// This is typically the state before an event.
|
||||
// Returns a sorted list of state entries or an error if there was a problem talking to the database.
|
||||
func (v StateResolution) LoadStateAfterEventsForStringTuples(
|
||||
func (v *StateResolution) LoadStateAfterEventsForStringTuples(
|
||||
ctx context.Context,
|
||||
prevStates []types.StateAtEvent,
|
||||
stateKeyTuples []gomatrixserverlib.StateKeyTuple,
|
||||
|
@ -352,7 +354,7 @@ func (v StateResolution) LoadStateAfterEventsForStringTuples(
|
|||
return v.loadStateAfterEventsForNumericTuples(ctx, prevStates, numericTuples)
|
||||
}
|
||||
|
||||
func (v StateResolution) loadStateAfterEventsForNumericTuples(
|
||||
func (v *StateResolution) loadStateAfterEventsForNumericTuples(
|
||||
ctx context.Context,
|
||||
prevStates []types.StateAtEvent,
|
||||
stateKeyTuples []types.StateKeyTuple,
|
||||
|
@ -520,7 +522,7 @@ func init() {
|
|||
// CalculateAndStoreStateBeforeEvent calculates a snapshot of the state of a room before an event.
|
||||
// Stores the snapshot of the state in the database.
|
||||
// Returns a numeric ID for the snapshot of the state before the event.
|
||||
func (v StateResolution) CalculateAndStoreStateBeforeEvent(
|
||||
func (v *StateResolution) CalculateAndStoreStateBeforeEvent(
|
||||
ctx context.Context,
|
||||
event *gomatrixserverlib.Event,
|
||||
isRejected bool,
|
||||
|
@ -537,7 +539,7 @@ func (v StateResolution) CalculateAndStoreStateBeforeEvent(
|
|||
|
||||
// CalculateAndStoreStateAfterEvents finds the room state after the given events.
|
||||
// Stores the resulting state in the database and returns a numeric ID for that snapshot.
|
||||
func (v StateResolution) CalculateAndStoreStateAfterEvents(
|
||||
func (v *StateResolution) CalculateAndStoreStateAfterEvents(
|
||||
ctx context.Context,
|
||||
prevStates []types.StateAtEvent,
|
||||
) (types.StateSnapshotNID, error) {
|
||||
|
@ -607,7 +609,7 @@ const maxStateBlockNIDs = 64
|
|||
// calculateAndStoreStateAfterManyEvents finds the room state after the given events.
|
||||
// This handles the slow path of calculateAndStoreStateAfterEvents for when there is more than one event.
|
||||
// Stores the resulting state and returns a numeric ID for the snapshot.
|
||||
func (v StateResolution) calculateAndStoreStateAfterManyEvents(
|
||||
func (v *StateResolution) calculateAndStoreStateAfterManyEvents(
|
||||
ctx context.Context,
|
||||
roomNID types.RoomNID,
|
||||
prevStates []types.StateAtEvent,
|
||||
|
@ -627,7 +629,7 @@ func (v StateResolution) calculateAndStoreStateAfterManyEvents(
|
|||
return metrics.stop(v.db.AddState(ctx, roomNID, nil, state))
|
||||
}
|
||||
|
||||
func (v StateResolution) calculateStateAfterManyEvents(
|
||||
func (v *StateResolution) calculateStateAfterManyEvents(
|
||||
ctx context.Context, roomVersion gomatrixserverlib.RoomVersion,
|
||||
prevStates []types.StateAtEvent,
|
||||
) (state []types.StateEntry, algorithm string, conflictLength int, err error) {
|
||||
|
@ -754,7 +756,7 @@ func ResolveConflictsAdhoc(
|
|||
return resolved, nil
|
||||
}
|
||||
|
||||
func (v StateResolution) resolveConflicts(
|
||||
func (v *StateResolution) resolveConflicts(
|
||||
ctx context.Context, version gomatrixserverlib.RoomVersion,
|
||||
notConflicted, conflicted []types.StateEntry,
|
||||
) ([]types.StateEntry, error) {
|
||||
|
@ -778,7 +780,7 @@ func (v StateResolution) resolveConflicts(
|
|||
// Returns a list that combines the entries without conflicts with the result of state resolution for the entries with conflicts.
|
||||
// The returned list is sorted by state key tuple.
|
||||
// Returns an error if there was a problem talking to the database.
|
||||
func (v StateResolution) resolveConflictsV1(
|
||||
func (v *StateResolution) resolveConflictsV1(
|
||||
ctx context.Context,
|
||||
notConflicted, conflicted []types.StateEntry,
|
||||
) ([]types.StateEntry, error) {
|
||||
|
@ -842,7 +844,7 @@ func (v StateResolution) resolveConflictsV1(
|
|||
// The returned list is sorted by state key tuple.
|
||||
// Returns an error if there was a problem talking to the database.
|
||||
// nolint:gocyclo
|
||||
func (v StateResolution) resolveConflictsV2(
|
||||
func (v *StateResolution) resolveConflictsV2(
|
||||
ctx context.Context,
|
||||
notConflicted, conflicted []types.StateEntry,
|
||||
) ([]types.StateEntry, error) {
|
||||
|
@ -959,7 +961,7 @@ func (v StateResolution) resolveConflictsV2(
|
|||
}
|
||||
|
||||
// stateKeyTuplesNeeded works out which numeric state key tuples we need to authenticate some events.
|
||||
func (v StateResolution) stateKeyTuplesNeeded(stateKeyNIDMap map[string]types.EventStateKeyNID, stateNeeded gomatrixserverlib.StateNeeded) []types.StateKeyTuple {
|
||||
func (v *StateResolution) stateKeyTuplesNeeded(stateKeyNIDMap map[string]types.EventStateKeyNID, stateNeeded gomatrixserverlib.StateNeeded) []types.StateKeyTuple {
|
||||
var keyTuples []types.StateKeyTuple
|
||||
if stateNeeded.Create {
|
||||
keyTuples = append(keyTuples, types.StateKeyTuple{
|
||||
|
@ -1004,26 +1006,33 @@ func (v StateResolution) stateKeyTuplesNeeded(stateKeyNIDMap map[string]types.Ev
|
|||
// Returns a list of state events in no particular order and a map from string event ID back to state entry.
|
||||
// The map can be used to recover which numeric state entry a given event is for.
|
||||
// Returns an error if there was a problem talking to the database.
|
||||
func (v StateResolution) loadStateEvents(
|
||||
func (v *StateResolution) loadStateEvents(
|
||||
ctx context.Context, entries []types.StateEntry,
|
||||
) ([]*gomatrixserverlib.Event, map[string]types.StateEntry, error) {
|
||||
eventNIDs := make([]types.EventNID, len(entries))
|
||||
for i := range entries {
|
||||
eventNIDs[i] = entries[i].EventNID
|
||||
result := make([]*gomatrixserverlib.Event, 0, len(entries))
|
||||
eventEntries := make([]types.StateEntry, 0, len(entries))
|
||||
eventNIDs := make([]types.EventNID, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
if e, ok := v.events[entry.EventNID]; ok {
|
||||
result = append(result, e)
|
||||
} else {
|
||||
eventEntries = append(eventEntries, entry)
|
||||
eventNIDs = append(eventNIDs, entry.EventNID)
|
||||
}
|
||||
}
|
||||
events, err := v.db.Events(ctx, eventNIDs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
eventIDMap := map[string]types.StateEntry{}
|
||||
result := make([]*gomatrixserverlib.Event, len(entries))
|
||||
for i := range entries {
|
||||
event, ok := eventMap(events).lookup(entries[i].EventNID)
|
||||
for _, entry := range eventEntries {
|
||||
event, ok := eventMap(events).lookup(entry.EventNID)
|
||||
if !ok {
|
||||
panic(fmt.Errorf("Corrupt DB: Missing event numeric ID %d", entries[i].EventNID))
|
||||
panic(fmt.Errorf("Corrupt DB: Missing event numeric ID %d", entry.EventNID))
|
||||
}
|
||||
result[i] = event.Event
|
||||
eventIDMap[event.Event.EventID()] = entries[i]
|
||||
result = append(result, event.Event)
|
||||
eventIDMap[event.Event.EventID()] = entry
|
||||
v.events[entry.EventNID] = event.Event
|
||||
}
|
||||
return result, eventIDMap, nil
|
||||
}
|
||||
|
|
|
@ -15,22 +15,28 @@
|
|||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal/caching"
|
||||
"github.com/matrix-org/dendrite/internal/httputil"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"go.uber.org/atomic"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/h2c"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/dendrite/userapi/storage/accounts"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
|
@ -61,6 +67,7 @@ import (
|
|||
// should only be used during start up.
|
||||
// Must be closed when shutting down.
|
||||
type BaseDendrite struct {
|
||||
*process.ProcessContext
|
||||
componentName string
|
||||
tracerCloser io.Closer
|
||||
PublicClientAPIMux *mux.Router
|
||||
|
@ -161,7 +168,9 @@ func NewBaseDendrite(cfg *config.Dendrite, componentName string, useHTTPAPIs boo
|
|||
// We need to be careful with media APIs if they read from a filesystem to make sure they
|
||||
// are not inadvertently reading paths without cleaning, else this could introduce a
|
||||
// directory traversal attack e.g /../../../etc/passwd
|
||||
|
||||
return &BaseDendrite{
|
||||
ProcessContext: process.NewProcessContext(),
|
||||
componentName: componentName,
|
||||
UseHTTPAPIs: useHTTPAPIs,
|
||||
tracerCloser: closer,
|
||||
|
@ -354,14 +363,26 @@ func (b *BaseDendrite) SetupAndServeHTTP(
|
|||
|
||||
if internalAddr != NoListener && internalAddr != externalAddr {
|
||||
go func() {
|
||||
var internalShutdown atomic.Bool // RegisterOnShutdown can be called more than once
|
||||
logrus.Infof("Starting internal %s listener on %s", b.componentName, internalServ.Addr)
|
||||
b.ProcessContext.ComponentStarted()
|
||||
internalServ.RegisterOnShutdown(func() {
|
||||
if internalShutdown.CAS(false, true) {
|
||||
b.ProcessContext.ComponentFinished()
|
||||
logrus.Infof("Stopped internal HTTP listener")
|
||||
}
|
||||
})
|
||||
if certFile != nil && keyFile != nil {
|
||||
if err := internalServ.ListenAndServeTLS(*certFile, *keyFile); err != nil {
|
||||
logrus.WithError(err).Fatal("failed to serve HTTPS")
|
||||
if err != http.ErrServerClosed {
|
||||
logrus.WithError(err).Fatal("failed to serve HTTPS")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := internalServ.ListenAndServe(); err != nil {
|
||||
logrus.WithError(err).Fatal("failed to serve HTTP")
|
||||
if err != http.ErrServerClosed {
|
||||
logrus.WithError(err).Fatal("failed to serve HTTP")
|
||||
}
|
||||
}
|
||||
}
|
||||
logrus.Infof("Stopped internal %s listener on %s", b.componentName, internalServ.Addr)
|
||||
|
@ -370,19 +391,52 @@ func (b *BaseDendrite) SetupAndServeHTTP(
|
|||
|
||||
if externalAddr != NoListener {
|
||||
go func() {
|
||||
var externalShutdown atomic.Bool // RegisterOnShutdown can be called more than once
|
||||
logrus.Infof("Starting external %s listener on %s", b.componentName, externalServ.Addr)
|
||||
b.ProcessContext.ComponentStarted()
|
||||
externalServ.RegisterOnShutdown(func() {
|
||||
if externalShutdown.CAS(false, true) {
|
||||
b.ProcessContext.ComponentFinished()
|
||||
logrus.Infof("Stopped external HTTP listener")
|
||||
}
|
||||
})
|
||||
if certFile != nil && keyFile != nil {
|
||||
if err := externalServ.ListenAndServeTLS(*certFile, *keyFile); err != nil {
|
||||
logrus.WithError(err).Fatal("failed to serve HTTPS")
|
||||
if err != http.ErrServerClosed {
|
||||
logrus.WithError(err).Fatal("failed to serve HTTPS")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := externalServ.ListenAndServe(); err != nil {
|
||||
logrus.WithError(err).Fatal("failed to serve HTTP")
|
||||
if err != http.ErrServerClosed {
|
||||
logrus.WithError(err).Fatal("failed to serve HTTP")
|
||||
}
|
||||
}
|
||||
}
|
||||
logrus.Infof("Stopped external %s listener on %s", b.componentName, externalServ.Addr)
|
||||
}()
|
||||
}
|
||||
|
||||
select {}
|
||||
<-b.ProcessContext.WaitForShutdown()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
|
||||
_ = internalServ.Shutdown(ctx)
|
||||
_ = externalServ.Shutdown(ctx)
|
||||
logrus.Infof("Stopped HTTP listeners")
|
||||
}
|
||||
|
||||
func (b *BaseDendrite) WaitForShutdown() {
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-sigs
|
||||
signal.Reset(syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
logrus.Warnf("Shutdown signal received")
|
||||
|
||||
b.ProcessContext.ShutdownDendrite()
|
||||
b.ProcessContext.WaitForComponentsToFinish()
|
||||
|
||||
logrus.Warnf("Dendrite is exiting now")
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/mediaapi"
|
||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
serverKeyAPI "github.com/matrix-org/dendrite/signingkeyserver/api"
|
||||
"github.com/matrix-org/dendrite/syncapi"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
|
@ -56,7 +57,7 @@ type Monolith struct {
|
|||
}
|
||||
|
||||
// AddAllPublicRoutes attaches all public paths to the given router
|
||||
func (m *Monolith) AddAllPublicRoutes(csMux, ssMux, keyMux, mediaMux *mux.Router) {
|
||||
func (m *Monolith) AddAllPublicRoutes(process *process.ProcessContext, csMux, ssMux, keyMux, mediaMux *mux.Router) {
|
||||
clientapi.AddPublicRoutes(
|
||||
csMux, &m.Config.ClientAPI, m.AccountDB,
|
||||
m.FedClient, m.RoomserverAPI,
|
||||
|
@ -71,7 +72,7 @@ func (m *Monolith) AddAllPublicRoutes(csMux, ssMux, keyMux, mediaMux *mux.Router
|
|||
)
|
||||
mediaapi.AddPublicRoutes(mediaMux, &m.Config.MediaAPI, m.UserAPI, m.Client)
|
||||
syncapi.AddPublicRoutes(
|
||||
csMux, m.UserAPI, m.RoomserverAPI,
|
||||
process, csMux, m.UserAPI, m.RoomserverAPI,
|
||||
m.KeyAPI, m.FedClient, &m.Config.SyncAPI,
|
||||
)
|
||||
}
|
||||
|
|
45
setup/process/process.go
Normal file
45
setup/process/process.go
Normal file
|
@ -0,0 +1,45 @@
|
|||
package process
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type ProcessContext struct {
|
||||
wg *sync.WaitGroup // used to wait for components to shutdown
|
||||
ctx context.Context // cancelled when Stop is called
|
||||
shutdown context.CancelFunc // shut down Dendrite
|
||||
}
|
||||
|
||||
func NewProcessContext() *ProcessContext {
|
||||
ctx, shutdown := context.WithCancel(context.Background())
|
||||
return &ProcessContext{
|
||||
ctx: ctx,
|
||||
shutdown: shutdown,
|
||||
wg: &sync.WaitGroup{},
|
||||
}
|
||||
}
|
||||
|
||||
func (b *ProcessContext) Context() context.Context {
|
||||
return context.WithValue(b.ctx, "scope", "process") // nolint:staticcheck
|
||||
}
|
||||
|
||||
func (b *ProcessContext) ComponentStarted() {
|
||||
b.wg.Add(1)
|
||||
}
|
||||
|
||||
func (b *ProcessContext) ComponentFinished() {
|
||||
b.wg.Done()
|
||||
}
|
||||
|
||||
func (b *ProcessContext) ShutdownDendrite() {
|
||||
b.shutdown()
|
||||
}
|
||||
|
||||
func (b *ProcessContext) WaitForShutdown() <-chan struct{} {
|
||||
return b.ctx.Done()
|
||||
}
|
||||
|
||||
func (b *ProcessContext) WaitForComponentsToFinish() {
|
||||
b.wg.Wait()
|
||||
}
|
|
@ -22,6 +22,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/internal/eventutil"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/dendrite/syncapi/notifier"
|
||||
"github.com/matrix-org/dendrite/syncapi/storage"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
|
@ -38,14 +39,15 @@ type OutputClientDataConsumer struct {
|
|||
|
||||
// NewOutputClientDataConsumer creates a new OutputClientData consumer. Call Start() to begin consuming from room servers.
|
||||
func NewOutputClientDataConsumer(
|
||||
process *process.ProcessContext,
|
||||
cfg *config.SyncAPI,
|
||||
kafkaConsumer sarama.Consumer,
|
||||
store storage.Database,
|
||||
notifier *notifier.Notifier,
|
||||
stream types.StreamProvider,
|
||||
) *OutputClientDataConsumer {
|
||||
|
||||
consumer := internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "syncapi/clientapi",
|
||||
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputClientData)),
|
||||
Consumer: kafkaConsumer,
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/eduserver/api"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/dendrite/syncapi/notifier"
|
||||
"github.com/matrix-org/dendrite/syncapi/storage"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
|
@ -39,6 +40,7 @@ type OutputReceiptEventConsumer struct {
|
|||
// NewOutputReceiptEventConsumer creates a new OutputReceiptEventConsumer.
|
||||
// Call Start() to begin consuming from the EDU server.
|
||||
func NewOutputReceiptEventConsumer(
|
||||
process *process.ProcessContext,
|
||||
cfg *config.SyncAPI,
|
||||
kafkaConsumer sarama.Consumer,
|
||||
store storage.Database,
|
||||
|
@ -47,6 +49,7 @@ func NewOutputReceiptEventConsumer(
|
|||
) *OutputReceiptEventConsumer {
|
||||
|
||||
consumer := internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "syncapi/eduserver/receipt",
|
||||
Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputReceiptEvent),
|
||||
Consumer: kafkaConsumer,
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/eduserver/api"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/dendrite/syncapi/notifier"
|
||||
"github.com/matrix-org/dendrite/syncapi/storage"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
|
@ -42,6 +43,7 @@ type OutputSendToDeviceEventConsumer struct {
|
|||
// NewOutputSendToDeviceEventConsumer creates a new OutputSendToDeviceEventConsumer.
|
||||
// Call Start() to begin consuming from the EDU server.
|
||||
func NewOutputSendToDeviceEventConsumer(
|
||||
process *process.ProcessContext,
|
||||
cfg *config.SyncAPI,
|
||||
kafkaConsumer sarama.Consumer,
|
||||
store storage.Database,
|
||||
|
@ -50,6 +52,7 @@ func NewOutputSendToDeviceEventConsumer(
|
|||
) *OutputSendToDeviceEventConsumer {
|
||||
|
||||
consumer := internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "syncapi/eduserver/sendtodevice",
|
||||
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputSendToDeviceEvent)),
|
||||
Consumer: kafkaConsumer,
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/eduserver/cache"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/dendrite/syncapi/notifier"
|
||||
"github.com/matrix-org/dendrite/syncapi/storage"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
|
@ -39,6 +40,7 @@ type OutputTypingEventConsumer struct {
|
|||
// NewOutputTypingEventConsumer creates a new OutputTypingEventConsumer.
|
||||
// Call Start() to begin consuming from the EDU server.
|
||||
func NewOutputTypingEventConsumer(
|
||||
process *process.ProcessContext,
|
||||
cfg *config.SyncAPI,
|
||||
kafkaConsumer sarama.Consumer,
|
||||
store storage.Database,
|
||||
|
@ -48,6 +50,7 @@ func NewOutputTypingEventConsumer(
|
|||
) *OutputTypingEventConsumer {
|
||||
|
||||
consumer := internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "syncapi/eduserver/typing",
|
||||
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputTypingEvent)),
|
||||
Consumer: kafkaConsumer,
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/keyserver/api"
|
||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/dendrite/syncapi/notifier"
|
||||
"github.com/matrix-org/dendrite/syncapi/storage"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
|
@ -46,6 +47,7 @@ type OutputKeyChangeEventConsumer struct {
|
|||
// NewOutputKeyChangeEventConsumer creates a new OutputKeyChangeEventConsumer.
|
||||
// Call Start() to begin consuming from the key server.
|
||||
func NewOutputKeyChangeEventConsumer(
|
||||
process *process.ProcessContext,
|
||||
serverName gomatrixserverlib.ServerName,
|
||||
topic string,
|
||||
kafkaConsumer sarama.Consumer,
|
||||
|
@ -57,6 +59,7 @@ func NewOutputKeyChangeEventConsumer(
|
|||
) *OutputKeyChangeEventConsumer {
|
||||
|
||||
consumer := internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "syncapi/keychange",
|
||||
Topic: topic,
|
||||
Consumer: kafkaConsumer,
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/dendrite/syncapi/notifier"
|
||||
"github.com/matrix-org/dendrite/syncapi/storage"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
|
@ -43,6 +44,7 @@ type OutputRoomEventConsumer struct {
|
|||
|
||||
// NewOutputRoomEventConsumer creates a new OutputRoomEventConsumer. Call Start() to begin consuming from room servers.
|
||||
func NewOutputRoomEventConsumer(
|
||||
process *process.ProcessContext,
|
||||
cfg *config.SyncAPI,
|
||||
kafkaConsumer sarama.Consumer,
|
||||
store storage.Database,
|
||||
|
@ -53,6 +55,7 @@ func NewOutputRoomEventConsumer(
|
|||
) *OutputRoomEventConsumer {
|
||||
|
||||
consumer := internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "syncapi/roomserver",
|
||||
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputRoomEvent)),
|
||||
Consumer: kafkaConsumer,
|
||||
|
|
|
@ -35,7 +35,7 @@ type Database interface {
|
|||
MaxStreamPositionForAccountData(ctx context.Context) (types.StreamPosition, error)
|
||||
MaxStreamPositionForSendToDeviceMessages(ctx context.Context) (types.StreamPosition, error)
|
||||
|
||||
CurrentState(ctx context.Context, roomID string, stateFilterPart *gomatrixserverlib.StateFilter) ([]*gomatrixserverlib.HeaderedEvent, error)
|
||||
CurrentState(ctx context.Context, roomID string, stateFilterPart *gomatrixserverlib.StateFilter, excludeEventIDs []string) ([]*gomatrixserverlib.HeaderedEvent, error)
|
||||
GetStateDeltasForFullStateSync(ctx context.Context, device *userapi.Device, r types.Range, userID string, stateFilter *gomatrixserverlib.StateFilter) ([]types.StateDelta, []string, error)
|
||||
GetStateDeltas(ctx context.Context, device *userapi.Device, r types.Range, userID string, stateFilter *gomatrixserverlib.StateFilter) ([]types.StateDelta, []string, error)
|
||||
RoomIDsWithMembership(ctx context.Context, userID string, membership string) ([]string, error)
|
||||
|
|
|
@ -53,7 +53,7 @@ CREATE UNIQUE INDEX IF NOT EXISTS syncapi_account_data_id_idx ON syncapi_account
|
|||
const insertAccountDataSQL = "" +
|
||||
"INSERT INTO syncapi_account_data_type (user_id, room_id, type) VALUES ($1, $2, $3)" +
|
||||
" ON CONFLICT ON CONSTRAINT syncapi_account_data_unique" +
|
||||
" DO UPDATE SET id = EXCLUDED.id" +
|
||||
" DO UPDATE SET id = nextval('syncapi_stream_id')" +
|
||||
" RETURNING id"
|
||||
|
||||
const selectAccountDataInRangeSQL = "" +
|
||||
|
|
|
@ -84,7 +84,8 @@ const selectCurrentStateSQL = "" +
|
|||
" AND ( $4::text[] IS NULL OR type LIKE ANY($4) )" +
|
||||
" AND ( $5::text[] IS NULL OR NOT(type LIKE ANY($5)) )" +
|
||||
" AND ( $6::bool IS NULL OR contains_url = $6 )" +
|
||||
" LIMIT $7"
|
||||
" AND (event_id = ANY($7)) IS NOT TRUE" +
|
||||
" LIMIT $8"
|
||||
|
||||
const selectJoinedUsersSQL = "" +
|
||||
"SELECT room_id, state_key FROM syncapi_current_room_state WHERE type = 'm.room.member' AND membership = 'join'"
|
||||
|
@ -197,6 +198,7 @@ func (s *currentRoomStateStatements) SelectRoomIDsWithMembership(
|
|||
func (s *currentRoomStateStatements) SelectCurrentState(
|
||||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
stateFilter *gomatrixserverlib.StateFilter,
|
||||
excludeEventIDs []string,
|
||||
) ([]*gomatrixserverlib.HeaderedEvent, error) {
|
||||
stmt := sqlutil.TxStmt(txn, s.selectCurrentStateStmt)
|
||||
rows, err := stmt.QueryContext(ctx, roomID,
|
||||
|
@ -205,6 +207,7 @@ func (s *currentRoomStateStatements) SelectCurrentState(
|
|||
pq.StringArray(filterConvertTypeWildcardToSQL(stateFilter.Types)),
|
||||
pq.StringArray(filterConvertTypeWildcardToSQL(stateFilter.NotTypes)),
|
||||
stateFilter.ContainsURL,
|
||||
pq.StringArray(excludeEventIDs),
|
||||
stateFilter.Limit,
|
||||
)
|
||||
if err != nil {
|
||||
|
|
|
@ -103,8 +103,8 @@ func (d *Database) MaxStreamPositionForAccountData(ctx context.Context) (types.S
|
|||
return types.StreamPosition(id), nil
|
||||
}
|
||||
|
||||
func (d *Database) CurrentState(ctx context.Context, roomID string, stateFilterPart *gomatrixserverlib.StateFilter) ([]*gomatrixserverlib.HeaderedEvent, error) {
|
||||
return d.CurrentRoomState.SelectCurrentState(ctx, nil, roomID, stateFilterPart)
|
||||
func (d *Database) CurrentState(ctx context.Context, roomID string, stateFilterPart *gomatrixserverlib.StateFilter, excludeEventIDs []string) ([]*gomatrixserverlib.HeaderedEvent, error) {
|
||||
return d.CurrentRoomState.SelectCurrentState(ctx, nil, roomID, stateFilterPart, excludeEventIDs)
|
||||
}
|
||||
|
||||
func (d *Database) RoomIDsWithMembership(ctx context.Context, userID string, membership string) ([]string, error) {
|
||||
|
@ -195,7 +195,7 @@ func (d *Database) GetStateEvent(
|
|||
func (d *Database) GetStateEventsForRoom(
|
||||
ctx context.Context, roomID string, stateFilter *gomatrixserverlib.StateFilter,
|
||||
) (stateEvents []*gomatrixserverlib.HeaderedEvent, err error) {
|
||||
stateEvents, err = d.CurrentRoomState.SelectCurrentState(ctx, nil, roomID, stateFilter)
|
||||
stateEvents, err = d.CurrentRoomState.SelectCurrentState(ctx, nil, roomID, stateFilter, nil)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -870,7 +870,7 @@ func (d *Database) currentStateStreamEventsForRoom(
|
|||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
stateFilter *gomatrixserverlib.StateFilter,
|
||||
) ([]types.StreamEvent, error) {
|
||||
allState, err := d.CurrentRoomState.SelectCurrentState(ctx, txn, roomID, stateFilter)
|
||||
allState, err := d.CurrentRoomState.SelectCurrentState(ctx, txn, roomID, stateFilter, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ CREATE TABLE IF NOT EXISTS syncapi_account_data_type (
|
|||
const insertAccountDataSQL = "" +
|
||||
"INSERT INTO syncapi_account_data_type (id, user_id, room_id, type) VALUES ($1, $2, $3, $4)" +
|
||||
" ON CONFLICT (user_id, room_id, type) DO UPDATE" +
|
||||
" SET id = EXCLUDED.id"
|
||||
" SET id = $5"
|
||||
|
||||
const selectAccountDataInRangeSQL = "" +
|
||||
"SELECT room_id, type FROM syncapi_account_data_type" +
|
||||
|
@ -86,7 +86,7 @@ func (s *accountDataStatements) InsertAccountData(
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = sqlutil.TxStmt(txn, s.insertAccountDataStmt).ExecContext(ctx, pos, userID, roomID, dataType)
|
||||
_, err = sqlutil.TxStmt(txn, s.insertAccountDataStmt).ExecContext(ctx, pos, userID, roomID, dataType, pos)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -178,6 +178,7 @@ func (s *currentRoomStateStatements) SelectRoomIDsWithMembership(
|
|||
func (s *currentRoomStateStatements) SelectCurrentState(
|
||||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
stateFilter *gomatrixserverlib.StateFilter,
|
||||
excludeEventIDs []string,
|
||||
) ([]*gomatrixserverlib.HeaderedEvent, error) {
|
||||
stmt, params, err := prepareWithFilters(
|
||||
s.db, txn, selectCurrentStateSQL,
|
||||
|
@ -186,7 +187,7 @@ func (s *currentRoomStateStatements) SelectCurrentState(
|
|||
},
|
||||
stateFilter.Senders, stateFilter.NotSenders,
|
||||
stateFilter.Types, stateFilter.NotTypes,
|
||||
stateFilter.Limit, FilterOrderNone,
|
||||
excludeEventIDs, stateFilter.Limit, FilterOrderNone,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("s.prepareWithFilters: %w", err)
|
||||
|
|
|
@ -23,9 +23,10 @@ const (
|
|||
// fields might come from either a StateFilter or an EventFilter,
|
||||
// and it's easier just to have the caller extract the relevant
|
||||
// parts.
|
||||
// nolint:gocyclo
|
||||
func prepareWithFilters(
|
||||
db *sql.DB, txn *sql.Tx, query string, params []interface{},
|
||||
senders, notsenders, types, nottypes []string,
|
||||
senders, notsenders, types, nottypes []string, excludeEventIDs []string,
|
||||
limit int, order FilterOrder,
|
||||
) (*sql.Stmt, []interface{}, error) {
|
||||
offset := len(params)
|
||||
|
@ -53,6 +54,12 @@ func prepareWithFilters(
|
|||
params, offset = append(params, v), offset+1
|
||||
}
|
||||
}
|
||||
if count := len(excludeEventIDs); count > 0 {
|
||||
query += " AND event_id NOT IN " + sqlutil.QueryVariadicOffset(count, offset)
|
||||
for _, v := range excludeEventIDs {
|
||||
params, offset = append(params, v), offset+1
|
||||
}
|
||||
}
|
||||
switch order {
|
||||
case FilterOrderAsc:
|
||||
query += " ORDER BY id ASC"
|
||||
|
|
|
@ -150,7 +150,7 @@ func (s *outputRoomEventsStatements) SelectStateInRange(
|
|||
},
|
||||
stateFilter.Senders, stateFilter.NotSenders,
|
||||
stateFilter.Types, stateFilter.NotTypes,
|
||||
stateFilter.Limit, FilterOrderAsc,
|
||||
nil, stateFilter.Limit, FilterOrderAsc,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("s.prepareWithFilters: %w", err)
|
||||
|
@ -326,7 +326,7 @@ func (s *outputRoomEventsStatements) SelectRecentEvents(
|
|||
},
|
||||
eventFilter.Senders, eventFilter.NotSenders,
|
||||
eventFilter.Types, eventFilter.NotTypes,
|
||||
eventFilter.Limit+1, FilterOrderDesc,
|
||||
nil, eventFilter.Limit+1, FilterOrderDesc,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("s.prepareWithFilters: %w", err)
|
||||
|
@ -374,7 +374,7 @@ func (s *outputRoomEventsStatements) SelectEarlyEvents(
|
|||
},
|
||||
eventFilter.Senders, eventFilter.NotSenders,
|
||||
eventFilter.Types, eventFilter.NotTypes,
|
||||
eventFilter.Limit, FilterOrderAsc,
|
||||
nil, eventFilter.Limit, FilterOrderAsc,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("s.prepareWithFilters: %w", err)
|
||||
|
|
|
@ -91,7 +91,7 @@ type CurrentRoomState interface {
|
|||
DeleteRoomStateByEventID(ctx context.Context, txn *sql.Tx, eventID string) error
|
||||
DeleteRoomStateForRoom(ctx context.Context, txn *sql.Tx, roomID string) error
|
||||
// SelectCurrentState returns all the current state events for the given room.
|
||||
SelectCurrentState(ctx context.Context, txn *sql.Tx, roomID string, stateFilter *gomatrixserverlib.StateFilter) ([]*gomatrixserverlib.HeaderedEvent, error)
|
||||
SelectCurrentState(ctx context.Context, txn *sql.Tx, roomID string, stateFilter *gomatrixserverlib.StateFilter, excludeEventIDs []string) ([]*gomatrixserverlib.HeaderedEvent, error)
|
||||
// SelectRoomIDsWithMembership returns the list of room IDs which have the given user in the given membership state.
|
||||
SelectRoomIDsWithMembership(ctx context.Context, txn *sql.Tx, userID string, membership string) ([]string, error)
|
||||
// SelectJoinedUsers returns a map of room ID to a list of joined user IDs.
|
||||
|
|
|
@ -19,7 +19,7 @@ func (p *DeviceListStreamProvider) CompleteSync(
|
|||
ctx context.Context,
|
||||
req *types.SyncRequest,
|
||||
) types.LogPosition {
|
||||
return p.IncrementalSync(ctx, req, types.LogPosition{}, p.LatestPosition(ctx))
|
||||
return p.LatestPosition(ctx)
|
||||
}
|
||||
|
||||
func (p *DeviceListStreamProvider) IncrementalSync(
|
||||
|
|
|
@ -2,18 +2,54 @@ package streams
|
|||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"go.uber.org/atomic"
|
||||
)
|
||||
|
||||
// The max number of per-room goroutines to have running.
|
||||
// Too high and this will consume lots of CPU, too low and complete
|
||||
// sync responses will take longer to process.
|
||||
const PDU_STREAM_WORKERS = 256
|
||||
|
||||
// The maximum number of tasks that can be queued in total before
|
||||
// backpressure will build up and the rests will start to block.
|
||||
const PDU_STREAM_QUEUESIZE = PDU_STREAM_WORKERS * 8
|
||||
|
||||
type PDUStreamProvider struct {
|
||||
StreamProvider
|
||||
|
||||
tasks chan func()
|
||||
workers atomic.Int32
|
||||
}
|
||||
|
||||
func (p *PDUStreamProvider) worker() {
|
||||
defer p.workers.Dec()
|
||||
for {
|
||||
select {
|
||||
case f := <-p.tasks:
|
||||
f()
|
||||
case <-time.After(time.Second * 10):
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PDUStreamProvider) queue(f func()) {
|
||||
if p.workers.Load() < PDU_STREAM_WORKERS {
|
||||
p.workers.Inc()
|
||||
go p.worker()
|
||||
}
|
||||
p.tasks <- f
|
||||
}
|
||||
|
||||
func (p *PDUStreamProvider) Setup() {
|
||||
p.StreamProvider.Setup()
|
||||
p.tasks = make(chan func(), PDU_STREAM_QUEUESIZE)
|
||||
|
||||
p.latestMutex.Lock()
|
||||
defer p.latestMutex.Unlock()
|
||||
|
@ -52,19 +88,32 @@ func (p *PDUStreamProvider) CompleteSync(
|
|||
eventFilter := req.Filter.Room.Timeline
|
||||
|
||||
// Build up a /sync response. Add joined rooms.
|
||||
for _, roomID := range joinedRoomIDs {
|
||||
var jr *types.JoinResponse
|
||||
jr, err = p.getJoinResponseForCompleteSync(
|
||||
ctx, roomID, r, &stateFilter, &eventFilter, req.Device,
|
||||
)
|
||||
if err != nil {
|
||||
req.Log.WithError(err).Error("p.getJoinResponseForCompleteSync failed")
|
||||
return from
|
||||
}
|
||||
req.Response.Rooms.Join[roomID] = *jr
|
||||
req.Rooms[roomID] = gomatrixserverlib.Join
|
||||
var reqMutex sync.Mutex
|
||||
var reqWaitGroup sync.WaitGroup
|
||||
reqWaitGroup.Add(len(joinedRoomIDs))
|
||||
for _, room := range joinedRoomIDs {
|
||||
roomID := room
|
||||
p.queue(func() {
|
||||
defer reqWaitGroup.Done()
|
||||
|
||||
var jr *types.JoinResponse
|
||||
jr, err = p.getJoinResponseForCompleteSync(
|
||||
ctx, roomID, r, &stateFilter, &eventFilter, req.WantFullState, req.Device,
|
||||
)
|
||||
if err != nil {
|
||||
req.Log.WithError(err).Error("p.getJoinResponseForCompleteSync failed")
|
||||
return
|
||||
}
|
||||
|
||||
reqMutex.Lock()
|
||||
defer reqMutex.Unlock()
|
||||
req.Response.Rooms.Join[roomID] = *jr
|
||||
req.Rooms[roomID] = gomatrixserverlib.Join
|
||||
})
|
||||
}
|
||||
|
||||
reqWaitGroup.Wait()
|
||||
|
||||
// Add peeked rooms.
|
||||
peeks, err := p.DB.PeeksInRange(ctx, req.Device.UserID, req.Device.ID, r)
|
||||
if err != nil {
|
||||
|
@ -75,7 +124,7 @@ func (p *PDUStreamProvider) CompleteSync(
|
|||
if !peek.Deleted {
|
||||
var jr *types.JoinResponse
|
||||
jr, err = p.getJoinResponseForCompleteSync(
|
||||
ctx, peek.RoomID, r, &stateFilter, &eventFilter, req.Device,
|
||||
ctx, peek.RoomID, r, &stateFilter, &eventFilter, req.WantFullState, req.Device,
|
||||
)
|
||||
if err != nil {
|
||||
req.Log.WithError(err).Error("p.getJoinResponseForCompleteSync failed")
|
||||
|
@ -205,26 +254,37 @@ func (p *PDUStreamProvider) addRoomDeltaToResponse(
|
|||
return nil
|
||||
}
|
||||
|
||||
// nolint:gocyclo
|
||||
func (p *PDUStreamProvider) getJoinResponseForCompleteSync(
|
||||
ctx context.Context,
|
||||
roomID string,
|
||||
r types.Range,
|
||||
stateFilter *gomatrixserverlib.StateFilter,
|
||||
eventFilter *gomatrixserverlib.RoomEventFilter,
|
||||
wantFullState bool,
|
||||
device *userapi.Device,
|
||||
) (jr *types.JoinResponse, err error) {
|
||||
var stateEvents []*gomatrixserverlib.HeaderedEvent
|
||||
stateEvents, err = p.DB.CurrentState(ctx, roomID, stateFilter)
|
||||
// TODO: When filters are added, we may need to call this multiple times to get enough events.
|
||||
// See: https://github.com/matrix-org/synapse/blob/v0.19.3/synapse/handlers/sync.py#L316
|
||||
recentStreamEvents, limited, err := p.DB.RecentEvents(
|
||||
ctx, roomID, r, eventFilter, true, true,
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// TODO: When filters are added, we may need to call this multiple times to get enough events.
|
||||
// See: https://github.com/matrix-org/synapse/blob/v0.19.3/synapse/handlers/sync.py#L316
|
||||
var recentStreamEvents []types.StreamEvent
|
||||
var limited bool
|
||||
recentStreamEvents, limited, err = p.DB.RecentEvents(
|
||||
ctx, roomID, r, eventFilter, true, true,
|
||||
)
|
||||
|
||||
// Get the event IDs of the stream events we fetched. There's no point in us
|
||||
var excludingEventIDs []string
|
||||
if !wantFullState {
|
||||
excludingEventIDs = make([]string, 0, len(recentStreamEvents))
|
||||
for _, event := range recentStreamEvents {
|
||||
if event.StateKey() != nil {
|
||||
excludingEventIDs = append(excludingEventIDs, event.EventID())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stateEvents, err := p.DB.CurrentState(ctx, roomID, stateFilter, excludingEventIDs)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/kafka"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
||||
|
@ -39,6 +40,7 @@ import (
|
|||
// AddPublicRoutes sets up and registers HTTP handlers for the SyncAPI
|
||||
// component.
|
||||
func AddPublicRoutes(
|
||||
process *process.ProcessContext,
|
||||
router *mux.Router,
|
||||
userAPI userapi.UserInternalAPI,
|
||||
rsAPI api.RoomserverInternalAPI,
|
||||
|
@ -63,7 +65,7 @@ func AddPublicRoutes(
|
|||
requestPool := sync.NewRequestPool(syncDB, cfg, userAPI, keyAPI, rsAPI, streams, notifier)
|
||||
|
||||
keyChangeConsumer := consumers.NewOutputKeyChangeEventConsumer(
|
||||
cfg.Matrix.ServerName, string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputKeyChangeEvent)),
|
||||
process, cfg.Matrix.ServerName, string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputKeyChangeEvent)),
|
||||
consumer, keyAPI, rsAPI, syncDB, notifier, streams.DeviceListStreamProvider,
|
||||
)
|
||||
if err = keyChangeConsumer.Start(); err != nil {
|
||||
|
@ -71,7 +73,7 @@ func AddPublicRoutes(
|
|||
}
|
||||
|
||||
roomConsumer := consumers.NewOutputRoomEventConsumer(
|
||||
cfg, consumer, syncDB, notifier, streams.PDUStreamProvider,
|
||||
process, cfg, consumer, syncDB, notifier, streams.PDUStreamProvider,
|
||||
streams.InviteStreamProvider, rsAPI,
|
||||
)
|
||||
if err = roomConsumer.Start(); err != nil {
|
||||
|
@ -79,28 +81,28 @@ func AddPublicRoutes(
|
|||
}
|
||||
|
||||
clientConsumer := consumers.NewOutputClientDataConsumer(
|
||||
cfg, consumer, syncDB, notifier, streams.AccountDataStreamProvider,
|
||||
process, cfg, consumer, syncDB, notifier, streams.AccountDataStreamProvider,
|
||||
)
|
||||
if err = clientConsumer.Start(); err != nil {
|
||||
logrus.WithError(err).Panicf("failed to start client data consumer")
|
||||
}
|
||||
|
||||
typingConsumer := consumers.NewOutputTypingEventConsumer(
|
||||
cfg, consumer, syncDB, eduCache, notifier, streams.TypingStreamProvider,
|
||||
process, cfg, consumer, syncDB, eduCache, notifier, streams.TypingStreamProvider,
|
||||
)
|
||||
if err = typingConsumer.Start(); err != nil {
|
||||
logrus.WithError(err).Panicf("failed to start typing consumer")
|
||||
}
|
||||
|
||||
sendToDeviceConsumer := consumers.NewOutputSendToDeviceEventConsumer(
|
||||
cfg, consumer, syncDB, notifier, streams.SendToDeviceStreamProvider,
|
||||
process, cfg, consumer, syncDB, notifier, streams.SendToDeviceStreamProvider,
|
||||
)
|
||||
if err = sendToDeviceConsumer.Start(); err != nil {
|
||||
logrus.WithError(err).Panicf("failed to start send-to-device consumer")
|
||||
}
|
||||
|
||||
receiptConsumer := consumers.NewOutputReceiptEventConsumer(
|
||||
cfg, consumer, syncDB, notifier, streams.ReceiptStreamProvider,
|
||||
process, cfg, consumer, syncDB, notifier, streams.ReceiptStreamProvider,
|
||||
)
|
||||
if err = receiptConsumer.Start(); err != nil {
|
||||
logrus.WithError(err).Panicf("failed to start receipts consumer")
|
||||
|
|
Loading…
Reference in a new issue