mirror of
https://github.com/hoernschen/dendrite.git
synced 2025-08-02 14:12:47 +00:00
Support sqlite in addition to postgres (#869)
* Move current work into single branch * Initial massaging of clientapi etc (not working yet) * Interfaces for accounts/devices databases * Duplicate postgres package for sqlite3 (no changes made to it yet) * Some keydb, accountdb, devicedb, common partition fixes, some more syncapi tweaking * Fix accounts DB, device DB * Update naffka dependency for SQLite * Naffka SQLite * Update naffka to latest master * SQLite support for federationsender * Mostly not-bad support for SQLite in syncapi (although there are problems where lots of events get classed incorrectly as backward extremities, probably because of IN/ANY clauses that are badly supported) * Update Dockerfile -> Go 1.13.7, add build-base (as gcc and friends are needed for SQLite) * Implement GET endpoints for account_data in clientapi * Nuke filtering for now... * Revert "Implement GET endpoints for account_data in clientapi" This reverts commit 4d80dff4583d278620d9b3ed437e9fcd8d4674ee. * Implement GET endpoints for account_data in clientapi (#861) * Implement GET endpoints for account_data in clientapi * Fix accountDB parameter * Remove fmt.Println * Fix insertAccountData SQLite query * Fix accountDB storage interfaces * Add empty push rules into account data on account creation (#862) * Put SaveAccountData into the right function this time * Not sure if roomserver is better or worse now * sqlite work * Allow empty last sent ID for the first event * sqlite: room creation works * Support sending messages * Nuke fmt.println * Move QueryVariadic etc into common, other device fixes * Fix some linter issues * Fix bugs * Fix some linting errors * Fix errcheck lint errors * Make naffka use postgres as fallback, fix couple of compile errors * What on earth happened to the /rooms/{roomID}/send/{eventType} routing Co-authored-by: Neil Alexander <neilalexander@users.noreply.github.com>
This commit is contained in:
parent
6942ee1de0
commit
b6ea1bc67a
103 changed files with 9467 additions and 710 deletions
108
roomserver/storage/sqlite3/event_json_table.go
Normal file
108
roomserver/storage/sqlite3/event_json_table.go
Normal file
|
@ -0,0 +1,108 @@
|
|||
// Copyright 2017-2018 New Vector Ltd
|
||||
// Copyright 2019-2020 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sqlite3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"strings"
|
||||
|
||||
"github.com/matrix-org/dendrite/common"
|
||||
"github.com/matrix-org/dendrite/roomserver/types"
|
||||
)
|
||||
|
||||
const eventJSONSchema = `
|
||||
CREATE TABLE IF NOT EXISTS roomserver_event_json (
|
||||
event_nid INTEGER NOT NULL PRIMARY KEY,
|
||||
event_json TEXT NOT NULL
|
||||
);
|
||||
`
|
||||
|
||||
const insertEventJSONSQL = `
|
||||
INSERT INTO roomserver_event_json (event_nid, event_json) VALUES ($1, $2)
|
||||
ON CONFLICT DO NOTHING
|
||||
`
|
||||
|
||||
// Bulk event JSON lookup by numeric event ID.
|
||||
// Sort by the numeric event ID.
|
||||
// This means that we can use binary search to lookup by numeric event ID.
|
||||
const bulkSelectEventJSONSQL = `
|
||||
SELECT event_nid, event_json FROM roomserver_event_json
|
||||
WHERE event_nid IN ($1)
|
||||
ORDER BY event_nid ASC
|
||||
`
|
||||
|
||||
type eventJSONStatements struct {
|
||||
db *sql.DB
|
||||
insertEventJSONStmt *sql.Stmt
|
||||
bulkSelectEventJSONStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func (s *eventJSONStatements) prepare(db *sql.DB) (err error) {
|
||||
s.db = db
|
||||
_, err = db.Exec(eventJSONSchema)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return statementList{
|
||||
{&s.insertEventJSONStmt, insertEventJSONSQL},
|
||||
{&s.bulkSelectEventJSONStmt, bulkSelectEventJSONSQL},
|
||||
}.prepare(db)
|
||||
}
|
||||
|
||||
func (s *eventJSONStatements) insertEventJSON(
|
||||
ctx context.Context, txn *sql.Tx, eventNID types.EventNID, eventJSON []byte,
|
||||
) error {
|
||||
_, err := common.TxStmt(txn, s.insertEventJSONStmt).ExecContext(ctx, int64(eventNID), eventJSON)
|
||||
return err
|
||||
}
|
||||
|
||||
type eventJSONPair struct {
|
||||
EventNID types.EventNID
|
||||
EventJSON []byte
|
||||
}
|
||||
|
||||
func (s *eventJSONStatements) bulkSelectEventJSON(
|
||||
ctx context.Context, txn *sql.Tx, eventNIDs []types.EventNID,
|
||||
) ([]eventJSONPair, error) {
|
||||
iEventNIDs := make([]interface{}, len(eventNIDs))
|
||||
for k, v := range eventNIDs {
|
||||
iEventNIDs[k] = v
|
||||
}
|
||||
selectOrig := strings.Replace(bulkSelectEventJSONSQL, "($1)", common.QueryVariadic(len(iEventNIDs)), 1)
|
||||
|
||||
rows, err := txn.QueryContext(ctx, selectOrig, iEventNIDs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close() // nolint: errcheck
|
||||
|
||||
// We know that we will only get as many results as event NIDs
|
||||
// because of the unique constraint on event NIDs.
|
||||
// So we can allocate an array of the correct size now.
|
||||
// We might get fewer results than NIDs so we adjust the length of the slice before returning it.
|
||||
results := make([]eventJSONPair, len(eventNIDs))
|
||||
i := 0
|
||||
for ; rows.Next(); i++ {
|
||||
result := &results[i]
|
||||
var eventNID int64
|
||||
if err := rows.Scan(&eventNID, &result.EventJSON); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result.EventNID = types.EventNID(eventNID)
|
||||
}
|
||||
return results[:i], nil
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue