mirror of
https://github.com/hoernschen/dendrite.git
synced 2024-12-27 23:48:27 +00:00
bcb89ada5e
* fix conversion from int to string yields a string of one rune, not a string of digits * Add receipts table to syncapi * Use StreamingToken as the since value * Add required method to testEDUProducer * Make receipt json creation "easier" to read * Add receipts api to the eduserver * Add receipts endpoint * Add eduserver kafka consumer * Add missing kafka config * Add passing tests to whitelist Signed-off-by: Till Faelligen <tfaelligen@gmail.com> * Fix copy & paste error * Fix column count error * Make outbound federation receipts pass * Make "Inbound federation rejects receipts from wrong remote" pass * Don't use errors package * - Add TODO for batching requests - Rename variable * Return a better error message * - Use OutputReceiptEvent instead of InputReceiptEvent as result - Don't use the errors package for errors - Defer CloseAndLogIfError to close rows - Fix Copyright * Better creation/usage of JoinResponse * Query all joined rooms instead of just one * Update gomatrixserverlib * Add sqlite3 migration * Add postgres migration * Ensure required sequence exists before running migrations * Clarification on comment * - Fix a bug when creating client receipts - Use concrete types instead of interface{} * Remove dead code Use key for timestamp * Fix postgres query... * Remove single purpose struct * Use key/value directly * Only apply receipts on initial sync or if edu positions differ, otherwise we'll be sending the same receipts over and over again. * Actually update the id, so it is correctly send in syncs * Set receipt on request to /read_markers * Fix issue with receipts getting overwritten * Use fmt.Errorf instead of pkg/errors * Revert "Add postgres migration" This reverts commit 722fe5a04628882b787d096942459961db159b06. * Revert "Add sqlite3 migration" This reverts commit d113b03f6495a4b8f8bcf158a3d00b510b4240cc. * Fix selectRoomReceipts query * Make golangci-lint happy Co-authored-by: Neil Alexander <neilalexander@users.noreply.github.com>
61 lines
2.2 KiB
Go
61 lines
2.2 KiB
Go
package config
|
|
|
|
import "fmt"
|
|
|
|
// Defined Kafka topics.
|
|
const (
|
|
TopicOutputTypingEvent = "OutputTypingEvent"
|
|
TopicOutputSendToDeviceEvent = "OutputSendToDeviceEvent"
|
|
TopicOutputKeyChangeEvent = "OutputKeyChangeEvent"
|
|
TopicOutputRoomEvent = "OutputRoomEvent"
|
|
TopicOutputClientData = "OutputClientData"
|
|
TopicOutputReceiptEvent = "OutputReceiptEvent"
|
|
)
|
|
|
|
type Kafka struct {
|
|
// A list of kafka addresses to connect to.
|
|
Addresses []string `yaml:"addresses"`
|
|
// The prefix to use for Kafka topic names for this homeserver - really only
|
|
// useful if running more than one Dendrite on the same Kafka deployment.
|
|
TopicPrefix string `yaml:"topic_prefix"`
|
|
// Whether to use naffka instead of kafka.
|
|
// Naffka can only be used when running dendrite as a single monolithic server.
|
|
// Kafka can be used both with a monolithic server and when running the
|
|
// components as separate servers.
|
|
UseNaffka bool `yaml:"use_naffka"`
|
|
// The Naffka database is used internally by the naffka library, if used.
|
|
Database DatabaseOptions `yaml:"naffka_database"`
|
|
// The max size a Kafka message passed between consumer/producer can have
|
|
// Equals roughly max.message.bytes / fetch.message.max.bytes in Kafka
|
|
MaxMessageBytes *int `yaml:"max_message_bytes"`
|
|
}
|
|
|
|
func (k *Kafka) TopicFor(name string) string {
|
|
return fmt.Sprintf("%s%s", k.TopicPrefix, name)
|
|
}
|
|
|
|
func (c *Kafka) Defaults() {
|
|
c.UseNaffka = true
|
|
c.Database.Defaults()
|
|
c.Addresses = []string{"localhost:2181"}
|
|
c.Database.ConnectionString = DataSource("file:naffka.db")
|
|
c.TopicPrefix = "Dendrite"
|
|
|
|
maxBytes := 1024 * 1024 * 8 // about 8MB
|
|
c.MaxMessageBytes = &maxBytes
|
|
}
|
|
|
|
func (c *Kafka) Verify(configErrs *ConfigErrors, isMonolith bool) {
|
|
if c.UseNaffka {
|
|
if !isMonolith {
|
|
configErrs.Add("naffka can only be used in a monolithic server")
|
|
}
|
|
checkNotEmpty(configErrs, "global.kafka.database.connection_string", string(c.Database.ConnectionString))
|
|
} else {
|
|
// If we aren't using naffka then we need to have at least one kafka
|
|
// server to talk to.
|
|
checkNotZero(configErrs, "global.kafka.addresses", int64(len(c.Addresses)))
|
|
}
|
|
checkNotEmpty(configErrs, "global.kafka.topic_prefix", string(c.TopicPrefix))
|
|
checkPositive(configErrs, "global.kafka.max_message_bytes", int64(*c.MaxMessageBytes))
|
|
}
|