2020-01-03 14:07:05 +00:00
|
|
|
// Copyright 2017-2018 New Vector Ltd
|
|
|
|
// Copyright 2019-2020 The Matrix.org Foundation C.I.C.
|
2017-04-20 22:40:52 +00:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2020-01-03 14:07:05 +00:00
|
|
|
package postgres
|
2017-03-30 14:29:23 +00:00
|
|
|
|
|
|
|
import (
|
2017-09-18 15:52:22 +00:00
|
|
|
"context"
|
2017-03-30 14:29:23 +00:00
|
|
|
"database/sql"
|
2019-08-07 10:12:09 +00:00
|
|
|
"encoding/json"
|
2018-11-07 19:12:23 +00:00
|
|
|
"sort"
|
2017-03-30 14:29:23 +00:00
|
|
|
|
2020-06-12 13:55:57 +00:00
|
|
|
"github.com/matrix-org/dendrite/internal"
|
2017-12-06 09:37:18 +00:00
|
|
|
"github.com/matrix-org/dendrite/roomserver/api"
|
2020-05-14 08:53:55 +00:00
|
|
|
"github.com/matrix-org/dendrite/syncapi/storage/tables"
|
2020-01-23 17:51:10 +00:00
|
|
|
"github.com/matrix-org/dendrite/syncapi/types"
|
2017-12-06 09:37:18 +00:00
|
|
|
|
2017-03-30 14:29:23 +00:00
|
|
|
"github.com/lib/pq"
|
2020-06-12 13:55:57 +00:00
|
|
|
"github.com/matrix-org/dendrite/internal/sqlutil"
|
2017-04-05 09:30:13 +00:00
|
|
|
"github.com/matrix-org/gomatrixserverlib"
|
2017-11-16 10:12:02 +00:00
|
|
|
log "github.com/sirupsen/logrus"
|
2017-03-30 14:29:23 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const outputRoomEventsSchema = `
|
2017-09-19 16:15:46 +00:00
|
|
|
-- This sequence is shared between all the tables generated from kafka logs.
|
|
|
|
CREATE SEQUENCE IF NOT EXISTS syncapi_stream_id;
|
|
|
|
|
2017-03-30 14:29:23 +00:00
|
|
|
-- Stores output room events received from the roomserver.
|
2017-08-07 10:51:46 +00:00
|
|
|
CREATE TABLE IF NOT EXISTS syncapi_output_room_events (
|
2020-01-23 17:51:10 +00:00
|
|
|
-- An incrementing ID which denotes the position in the log that this event resides at.
|
|
|
|
-- NB: 'serial' makes no guarantees to increment by 1 every time, only that it increments.
|
|
|
|
-- This isn't a problem for us since we just want to order by this field.
|
|
|
|
id BIGINT PRIMARY KEY DEFAULT nextval('syncapi_stream_id'),
|
|
|
|
-- The event ID for the event
|
2020-01-24 11:40:27 +00:00
|
|
|
event_id TEXT NOT NULL CONSTRAINT syncapi_event_id_idx UNIQUE,
|
2020-01-23 17:51:10 +00:00
|
|
|
-- The 'room_id' key for the event.
|
|
|
|
room_id TEXT NOT NULL,
|
2020-03-19 12:07:01 +00:00
|
|
|
-- The headered JSON for the event, containing potentially additional metadata such as
|
|
|
|
-- the room version. Stored as TEXT because this should be valid UTF-8.
|
|
|
|
headered_event_json TEXT NOT NULL,
|
2020-01-23 17:51:10 +00:00
|
|
|
-- The event type e.g 'm.room.member'.
|
|
|
|
type TEXT NOT NULL,
|
|
|
|
-- The 'sender' property of the event.
|
|
|
|
sender TEXT NOT NULL,
|
|
|
|
-- true if the event content contains a url key.
|
|
|
|
contains_url BOOL NOT NULL,
|
|
|
|
-- A list of event IDs which represent a delta of added/removed room state. This can be NULL
|
|
|
|
-- if there is no delta.
|
|
|
|
add_state_ids TEXT[],
|
|
|
|
remove_state_ids TEXT[],
|
|
|
|
-- The client session that sent the event, if any
|
|
|
|
session_id BIGINT,
|
|
|
|
-- The transaction id used to send the event, if any
|
|
|
|
transaction_id TEXT,
|
|
|
|
-- Should the event be excluded from responses to /sync requests. Useful for
|
|
|
|
-- events retrieved through backfilling that have a position in the stream
|
|
|
|
-- that relates to the moment these were retrieved rather than the moment these
|
|
|
|
-- were emitted.
|
|
|
|
exclude_from_sync BOOL DEFAULT FALSE
|
2017-03-30 14:29:23 +00:00
|
|
|
);
|
|
|
|
`
|
|
|
|
|
|
|
|
const insertEventSQL = "" +
|
2017-08-07 10:51:46 +00:00
|
|
|
"INSERT INTO syncapi_output_room_events (" +
|
2020-03-19 12:07:01 +00:00
|
|
|
"room_id, event_id, headered_event_json, type, sender, contains_url, add_state_ids, remove_state_ids, session_id, transaction_id, exclude_from_sync" +
|
2020-01-24 11:40:27 +00:00
|
|
|
") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) " +
|
2021-02-17 15:18:41 +00:00
|
|
|
"ON CONFLICT ON CONSTRAINT syncapi_event_id_idx DO UPDATE SET exclude_from_sync = (excluded.exclude_from_sync AND $11) " +
|
2020-01-24 11:40:27 +00:00
|
|
|
"RETURNING id"
|
2017-04-05 09:30:13 +00:00
|
|
|
|
|
|
|
const selectEventsSQL = "" +
|
2020-12-09 18:07:17 +00:00
|
|
|
"SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id FROM syncapi_output_room_events WHERE event_id = ANY($1)"
|
2017-04-10 14:12:18 +00:00
|
|
|
|
2017-04-13 15:56:46 +00:00
|
|
|
const selectRecentEventsSQL = "" +
|
2020-12-09 18:07:17 +00:00
|
|
|
"SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id FROM syncapi_output_room_events" +
|
2017-08-07 10:51:46 +00:00
|
|
|
" WHERE room_id = $1 AND id > $2 AND id <= $3" +
|
2021-01-19 18:00:42 +00:00
|
|
|
" AND ( $4::text[] IS NULL OR sender = ANY($4) )" +
|
|
|
|
" AND ( $5::text[] IS NULL OR NOT(sender = ANY($5)) )" +
|
|
|
|
" AND ( $6::text[] IS NULL OR type LIKE ANY($6) )" +
|
|
|
|
" AND ( $7::text[] IS NULL OR NOT(type LIKE ANY($7)) )" +
|
|
|
|
" ORDER BY id DESC LIMIT $8"
|
2017-04-13 15:56:46 +00:00
|
|
|
|
2020-01-23 17:51:10 +00:00
|
|
|
const selectRecentEventsForSyncSQL = "" +
|
2020-12-09 18:07:17 +00:00
|
|
|
"SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id FROM syncapi_output_room_events" +
|
2020-01-23 17:51:10 +00:00
|
|
|
" WHERE room_id = $1 AND id > $2 AND id <= $3 AND exclude_from_sync = FALSE" +
|
2021-01-19 18:00:42 +00:00
|
|
|
" AND ( $4::text[] IS NULL OR sender = ANY($4) )" +
|
|
|
|
" AND ( $5::text[] IS NULL OR NOT(sender = ANY($5)) )" +
|
|
|
|
" AND ( $6::text[] IS NULL OR type LIKE ANY($6) )" +
|
|
|
|
" AND ( $7::text[] IS NULL OR NOT(type LIKE ANY($7)) )" +
|
|
|
|
" ORDER BY id DESC LIMIT $8"
|
2020-01-23 17:51:10 +00:00
|
|
|
|
|
|
|
const selectEarlyEventsSQL = "" +
|
2020-12-09 18:07:17 +00:00
|
|
|
"SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id FROM syncapi_output_room_events" +
|
2020-01-23 17:51:10 +00:00
|
|
|
" WHERE room_id = $1 AND id > $2 AND id <= $3" +
|
2021-01-19 18:00:42 +00:00
|
|
|
" AND ( $4::text[] IS NULL OR sender = ANY($4) )" +
|
|
|
|
" AND ( $5::text[] IS NULL OR NOT(sender = ANY($5)) )" +
|
|
|
|
" AND ( $6::text[] IS NULL OR type LIKE ANY($6) )" +
|
|
|
|
" AND ( $7::text[] IS NULL OR NOT(type LIKE ANY($7)) )" +
|
|
|
|
" ORDER BY id ASC LIMIT $8"
|
2020-01-23 17:51:10 +00:00
|
|
|
|
2017-09-19 16:15:46 +00:00
|
|
|
const selectMaxEventIDSQL = "" +
|
2017-08-07 10:51:46 +00:00
|
|
|
"SELECT MAX(id) FROM syncapi_output_room_events"
|
2017-04-10 14:12:18 +00:00
|
|
|
|
2020-07-08 16:45:39 +00:00
|
|
|
const updateEventJSONSQL = "" +
|
|
|
|
"UPDATE syncapi_output_room_events SET headered_event_json=$1 WHERE event_id=$2"
|
|
|
|
|
2017-04-19 15:04:01 +00:00
|
|
|
// In order for us to apply the state updates correctly, rows need to be ordered in the order they were received (id).
|
|
|
|
const selectStateInRangeSQL = "" +
|
2021-11-03 09:53:37 +00:00
|
|
|
"SELECT event_id, id, headered_event_json, exclude_from_sync, add_state_ids, remove_state_ids" +
|
2017-08-07 10:51:46 +00:00
|
|
|
" FROM syncapi_output_room_events" +
|
2017-05-12 15:56:17 +00:00
|
|
|
" WHERE (id > $1 AND id <= $2) AND (add_state_ids IS NOT NULL OR remove_state_ids IS NOT NULL)" +
|
2022-03-11 12:48:45 +00:00
|
|
|
" AND room_id = ANY($3)" +
|
|
|
|
" AND ( $4::text[] IS NULL OR sender = ANY($4) )" +
|
|
|
|
" AND ( $5::text[] IS NULL OR NOT(sender = ANY($5)) )" +
|
|
|
|
" AND ( $6::text[] IS NULL OR type LIKE ANY($6) )" +
|
|
|
|
" AND ( $7::text[] IS NULL OR NOT(type LIKE ANY($7)) )" +
|
|
|
|
" AND ( $8::bool IS NULL OR contains_url = $8 )" +
|
2019-08-07 10:12:09 +00:00
|
|
|
" ORDER BY id ASC" +
|
2022-03-11 12:48:45 +00:00
|
|
|
" LIMIT $9"
|
2017-04-19 15:04:01 +00:00
|
|
|
|
2020-09-15 10:17:46 +00:00
|
|
|
const deleteEventsForRoomSQL = "" +
|
|
|
|
"DELETE FROM syncapi_output_room_events WHERE room_id = $1"
|
|
|
|
|
2022-02-21 16:12:22 +00:00
|
|
|
const selectContextEventSQL = "" +
|
|
|
|
"SELECT id, headered_event_json FROM syncapi_output_room_events WHERE room_id = $1 AND event_id = $2"
|
|
|
|
|
|
|
|
const selectContextBeforeEventSQL = "" +
|
|
|
|
"SELECT headered_event_json FROM syncapi_output_room_events WHERE room_id = $1 AND id < $2" +
|
|
|
|
" AND ( $4::text[] IS NULL OR sender = ANY($4) )" +
|
|
|
|
" AND ( $5::text[] IS NULL OR NOT(sender = ANY($5)) )" +
|
|
|
|
" AND ( $6::text[] IS NULL OR type LIKE ANY($6) )" +
|
|
|
|
" AND ( $7::text[] IS NULL OR NOT(type LIKE ANY($7)) )" +
|
|
|
|
" ORDER BY id DESC LIMIT $3"
|
|
|
|
|
|
|
|
const selectContextAfterEventSQL = "" +
|
|
|
|
"SELECT id, headered_event_json FROM syncapi_output_room_events WHERE room_id = $1 AND id > $2" +
|
|
|
|
" AND ( $4::text[] IS NULL OR sender = ANY($4) )" +
|
|
|
|
" AND ( $5::text[] IS NULL OR NOT(sender = ANY($5)) )" +
|
|
|
|
" AND ( $6::text[] IS NULL OR type LIKE ANY($6) )" +
|
|
|
|
" AND ( $7::text[] IS NULL OR NOT(type LIKE ANY($7)) )" +
|
|
|
|
" ORDER BY id ASC LIMIT $3"
|
|
|
|
|
2017-03-30 14:29:23 +00:00
|
|
|
type outputRoomEventsStatements struct {
|
2020-05-01 11:41:38 +00:00
|
|
|
insertEventStmt *sql.Stmt
|
|
|
|
selectEventsStmt *sql.Stmt
|
|
|
|
selectMaxEventIDStmt *sql.Stmt
|
|
|
|
selectRecentEventsStmt *sql.Stmt
|
|
|
|
selectRecentEventsForSyncStmt *sql.Stmt
|
|
|
|
selectEarlyEventsStmt *sql.Stmt
|
|
|
|
selectStateInRangeStmt *sql.Stmt
|
2020-07-08 16:45:39 +00:00
|
|
|
updateEventJSONStmt *sql.Stmt
|
2020-09-15 10:17:46 +00:00
|
|
|
deleteEventsForRoomStmt *sql.Stmt
|
2022-02-21 16:12:22 +00:00
|
|
|
selectContextEventStmt *sql.Stmt
|
|
|
|
selectContextBeforeEventStmt *sql.Stmt
|
|
|
|
selectContextAfterEventStmt *sql.Stmt
|
2017-03-30 14:29:23 +00:00
|
|
|
}
|
|
|
|
|
2020-05-14 08:53:55 +00:00
|
|
|
func NewPostgresEventsTable(db *sql.DB) (tables.Events, error) {
|
|
|
|
s := &outputRoomEventsStatements{}
|
|
|
|
_, err := db.Exec(outputRoomEventsSchema)
|
2017-03-30 14:29:23 +00:00
|
|
|
if err != nil {
|
2020-05-14 08:53:55 +00:00
|
|
|
return nil, err
|
2017-03-30 14:29:23 +00:00
|
|
|
}
|
2022-02-21 16:12:22 +00:00
|
|
|
return s, sqlutil.StatementList{
|
|
|
|
{&s.insertEventStmt, insertEventSQL},
|
|
|
|
{&s.selectEventsStmt, selectEventsSQL},
|
|
|
|
{&s.selectMaxEventIDStmt, selectMaxEventIDSQL},
|
|
|
|
{&s.selectRecentEventsStmt, selectRecentEventsSQL},
|
|
|
|
{&s.selectRecentEventsForSyncStmt, selectRecentEventsForSyncSQL},
|
|
|
|
{&s.selectEarlyEventsStmt, selectEarlyEventsSQL},
|
|
|
|
{&s.selectStateInRangeStmt, selectStateInRangeSQL},
|
|
|
|
{&s.updateEventJSONStmt, updateEventJSONSQL},
|
|
|
|
{&s.deleteEventsForRoomStmt, deleteEventsForRoomSQL},
|
|
|
|
{&s.selectContextEventStmt, selectContextEventSQL},
|
|
|
|
{&s.selectContextBeforeEventStmt, selectContextBeforeEventSQL},
|
|
|
|
{&s.selectContextAfterEventStmt, selectContextAfterEventSQL},
|
|
|
|
}.Prepare(db)
|
2017-04-10 14:12:18 +00:00
|
|
|
}
|
|
|
|
|
2020-07-08 16:45:39 +00:00
|
|
|
func (s *outputRoomEventsStatements) UpdateEventJSON(ctx context.Context, event *gomatrixserverlib.HeaderedEvent) error {
|
|
|
|
headeredJSON, err := json.Marshal(event)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
_, err = s.updateEventJSONStmt.ExecContext(ctx, headeredJSON, event.EventID())
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-07-12 14:59:53 +00:00
|
|
|
// selectStateInRange returns the state events between the two given PDU stream positions, exclusive of oldPos, inclusive of newPos.
|
2017-04-19 15:04:01 +00:00
|
|
|
// Results are bucketed based on the room ID. If the same state is overwritten multiple times between the
|
|
|
|
// two positions, only the most recent state is returned.
|
2020-05-14 08:53:55 +00:00
|
|
|
func (s *outputRoomEventsStatements) SelectStateInRange(
|
2020-05-15 08:41:12 +00:00
|
|
|
ctx context.Context, txn *sql.Tx, r types.Range,
|
2022-03-11 12:48:45 +00:00
|
|
|
stateFilter *gomatrixserverlib.StateFilter, roomIDs []string,
|
2020-01-23 17:51:10 +00:00
|
|
|
) (map[string]map[string]bool, map[string]types.StreamEvent, error) {
|
2020-06-12 13:55:57 +00:00
|
|
|
stmt := sqlutil.TxStmt(txn, s.selectStateInRangeStmt)
|
2017-09-18 15:52:22 +00:00
|
|
|
|
2019-08-07 10:12:09 +00:00
|
|
|
rows, err := stmt.QueryContext(
|
2022-03-11 12:48:45 +00:00
|
|
|
ctx, r.Low(), r.High(), pq.StringArray(roomIDs),
|
Upgrade gomatrixserverlib dependency (#808)
* Upgrade gomatrixserverlib dependency
Signed-off-by: Thibaut CHARLES cromfr@gmail.com
* Added missing passing sytest
Signed-off-by: Thibaut CHARLES cromfr@gmail.com
* Fix login using identifier key
Not a full fix, it only really supports logging in with
the localpart of an mxid.
Signed-off-by: Serra Allgood <serra@allgood.dev>
* Replace deprecated prometheus.InstrumentHandler and unsafe time.Ticker
* goimports
* re-add temporarily missing deps?
* Refactor InstrumentHandlerCounter definition
* URL decode args
* Return server names (#833)
* Remove unnecessary map->array processing
* Return server names in room federation directory query
* Knock off a TODO
* Fix /send_join and /send_leave (#821)
Fix the /send_join and /send_leave endpoints, so that they use the v2 endpoints as mandated by MSC1802. Also comment out the SyTest tests that are failing because of lack of support for the v1 endpoints.
* Refuse /send_join without m.room.create (#824)
Signed-off-by: Abhishek Kumar <abhishekkumar2718@gmail.com>
* AS should use the v1 endpoint, rather than r0 (#827)
* docker: Passthrough parameters to dendrite-monolith-server
* Fix copy & paste error (#812)
* Use gomatrixserverlib.Transaction instead of local type (#590) (#811)
* Move files back if linting fails (#810)
* replaced gometalinter description with golangci-lint (#837)
* Amend syncapi SQL queries to return missing columns (#840)
* This commit updates a couple of the syncapi SQL queries to return additional columns that are required/expected by rowsToStreamEvents in output_room_events_table.go.
It's not exactly clear to me yet what transaction_id and session_id do, but these being added n #367 results in state events breaking the /sync endpoint.
This is a temporary fix. We need to come up with a better solution.
* gomatrix to gomatrixserverlib on some weird line change
* Tweaks from @babolivier review comments
* Implement storage interfaces (#841)
* Implement interfaces for federationsender storage
* Implement interfaces for mediaapi storage
* Implement interfaces for publicroomsapi storage
* Implement interfaces for roomserver storage
* Implement interfaces for syncapi storage
* Implement interfaces for keydb storage
* common.PartitionStorer in publicroomsapi interface
* Update copyright notices
* make cmd directory path absolute in build.sh (#830)
* Resync testfile with current sytest pass/fail (#832)
* Resync testfile with current sytest pass/fail
* Add displayname test
* Fall back to postgres when database connection string parsing fails (#842)
* Fall back to postgres when parsing the database connection string for a URI schema fails
* Fix behaviour so that it really tries postgres when URL parsing fails and it complains about unknown schema if it succeeds
* Fix #842
* Fix #842 - again...
* Federation fixes (#845)
* Update gomatrixserverlib to p2p commit 92c0338, other tweaks
* Update gomatrixserverlib to p2p commit e5dcc65
* Rewrite getAuthChain
* Update gomatrixserverlib in go.mod/go.sum
* Correct a couple of package refs for updated gmsl/gomatrix
* Update gomatrixserverlib ref in go.mod/go.sum
* Update getAuthChain comments following @babolivier review
* Add a Sytest blacklist file (#849)
* Add more passing tests to the testfile, add test blacklist file (#848)
* CS API: Support for /messages, fixes for /sync (#847)
* Merge forward
* Tidy up a bit
* TODO: What to do with NextBatch here?
* Replace SyncPosition with PaginationToken throughout syncapi
* Fix PaginationTokens
* Fix lint errors
* Add a couple of missing functions into the syncapi external storage interface
* Some updates based on review comments from @babolivier
* Some updates based on review comments from @babolivier
* argh whitespacing
* Fix opentracing span
* Remove dead code
* Don't overshadow err (fix lint issue)
* Handle extremities after inserting event into topology
* Try insert event topology as ON CONFLICT DO NOTHING
* Prevent OOB error in addRoomDeltaToResponse
* Thwarted by gocyclo again
* Fix NewPaginationTokenFromString, define unit test for it
* Update pagination token test
* Update sytest-whitelist
* Hopefully fix some of the sync batch tokens
* Remove extraneous sync position func
* Revert to topology tokens in addRoomDeltaToResponse etc
* Fix typo
* Remove prevPDUPos as dead now that backwardTopologyPos is used instead
* Fix selectEventsWithEventIDsSQL
* Update sytest-blacklist
* Update sytest-whitelist
* Some fixes for #847 (#850)
* Fix a couple of cases where backfilling events we already had causes panics, hopefully fix ordering of events, update GMSL dependency for backfill URL fixes
* Remove commented out lines from output_room_events_table schema
* Wire up publicroomsapi for roomserver events (#851)
* Wire up publicroomsapi to roomserver events
* Remove parameter that was incorrectly brought over from p2p work
* nolint containsBackwardExtremity for now
* Store our own keys in the keydb (#853)
* Store our own keys in the keydb
The DirectKeyFetcher makes the assumption that you can always reach the key/v2/server endpoint of any server, including our own. We previously haven't bothered to store our own keys in the keydb so this would mean we end up making key requests to ourselves.
In the libp2p world as an example, self-dialling is not possible, therefore this would render it impossible to get our own keys.
This commit adds our own keys into the keydb so that we don't create unnecessarily (and maybe impossible) requests.
* Use golang.org/x/crypto/ed25519 instead of crypto/ed25519 for pre-Go 1.13
* More sync fixes (#854)
* Further sync tweaks
* Remove unnecessary blank line
* getBackwardTopologyPos always returns a usable value
* Revert order fixing
* Implement GET endpoints for account_data in clientapi (#861)
* Implement GET endpoints for account_data in clientapi
* Fix accountDB parameter
* Remove fmt.Println
* Add empty push rules into account data on account creation (#862)
* Handle kind=guest query parameter on /register (#860)
* Handle kind=guest query parameter on /register
* Reorganized imports
* Pass device_id as nil
* Added tests to systest-whitelist
* Update sytest-whitelist
* Blacklist 'displayname updates affect room member events' (#859)
* Room version abstractions (#865)
* Rough first pass at adding room version abstractions
* Define newer room versions
* Update room version metadata
* Fix roomserver/versions
* Try to fix whitespace in roomsSchema
* Implement room version capabilities in CS API (#866)
* Add wiring for querying the roomserver for the default room version
* Try to implement /capabilities for room versions
* Update copyright notices
* Update sytests, add /capabilities endpoint into CS API
* Update sytest-whitelist
* Add GetDefaultRoomVersion
* Fix cases where state package was shadowed
* Fix version formatting
* Update Dockerfile to Go 1.13.6
* oh yes types I remember
* And fix the default too
* Update documentation for Go 1.13 (#867)
* Pass cfg by reference around the codebase (#819)
* Pass cfg by reference around the codebase
* Merge branch 'master' into pass-cfg-by-ref
Co-authored-by: Neil Alexander <neilalexander@users.noreply.github.com>
* Implement missing device management features (#835)
* Implement missing device management features
Signed-off-by: Till Faelligen <tfaelligen@gmail.com>
* Add a little more documentation
* Undo changes
* Use non-anonymous struct to decode devices list
* Update sytest-whitelist
* Update sytest-whitelist
* Update sytest-blacklist
Co-authored-by: Neil Alexander <neilalexander@users.noreply.github.com>
* Adding sslmode: disable to sytest server config (#813)
Co-authored-by: Neil Alexander <neilalexander@users.noreply.github.com>
* Fix AppService bind addrs in test (#805)
Co-authored-by: Neil Alexander <neilalexander@users.noreply.github.com>
* Always defer *sql.Rows.Close and consult with Err (#844)
* Always defer *sql.Rows.Close and consult with Err
database/sql.Rows.Next() makes sure to call Close only after exhausting
result rows which would NOT happen when returning early from a bad Scan.
Close being idempotent makes it a great candidate to get always deferred
regardless of what happens later on the result set.
This change also makes sure call Err() after exhausting Next() and
propagate non-nil results from it as the documentation advises.
Closes #764
Signed-off-by: Kiril Vladimiroff <kiril@vladimiroff.org>
* Override named result parameters in last returns
Signed-off-by: Kiril Vladimiroff <kiril@vladimiroff.org>
* Do the same over new changes that got merged
Signed-off-by: Kiril Vladimiroff <kiril@vladimiroff.org>
Co-authored-by: Neil Alexander <neilalexander@users.noreply.github.com>
* Clean up
Co-authored-by: Serra Allgood <serra@allgood.dev>
Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
Co-authored-by: Neil Alexander <neilalexander@users.noreply.github.com>
Co-authored-by: Brendan Abolivier <github@brendanabolivier.com>
Co-authored-by: Abhishek Kumar <31231064+abhishekkumar2718@users.noreply.github.com>
Co-authored-by: Will Hunt <will@half-shot.uk>
Co-authored-by: S7evinK <tfaelligen@gmail.com>
Co-authored-by: Arshpreet <30545756+arsh-7@users.noreply.github.com>
Co-authored-by: Prateek Sachan <42961174+prateek2211@users.noreply.github.com>
Co-authored-by: Behouba Manassé <behouba@gmail.com>
Co-authored-by: aditsachde <23707194+aditsachde@users.noreply.github.com>
Co-authored-by: Kiril Vladimiroff <kiril@vladimiroff.org>
2020-02-11 15:46:51 +00:00
|
|
|
pq.StringArray(stateFilter.Senders),
|
|
|
|
pq.StringArray(stateFilter.NotSenders),
|
|
|
|
pq.StringArray(filterConvertTypeWildcardToSQL(stateFilter.Types)),
|
|
|
|
pq.StringArray(filterConvertTypeWildcardToSQL(stateFilter.NotTypes)),
|
|
|
|
stateFilter.ContainsURL,
|
|
|
|
stateFilter.Limit,
|
2019-08-07 10:12:09 +00:00
|
|
|
)
|
2017-04-19 15:04:01 +00:00
|
|
|
if err != nil {
|
2017-06-05 09:37:04 +00:00
|
|
|
return nil, nil, err
|
2017-04-19 15:04:01 +00:00
|
|
|
}
|
2020-05-21 13:40:13 +00:00
|
|
|
defer internal.CloseAndLogIfError(ctx, rows, "selectStateInRange: rows.close() failed")
|
2017-04-19 15:04:01 +00:00
|
|
|
// Fetch all the state change events for all rooms between the two positions then loop each event and:
|
|
|
|
// - Keep a cache of the event by ID (99% of state change events are for the event itself)
|
|
|
|
// - For each room ID, build up an array of event IDs which represents cumulative adds/removes
|
|
|
|
// For each room, map cumulative event IDs to events and return. This may need to a batch SELECT based on event ID
|
|
|
|
// if they aren't in the event ID cache. We don't handle state deletion yet.
|
2020-01-23 17:51:10 +00:00
|
|
|
eventIDToEvent := make(map[string]types.StreamEvent)
|
2017-04-19 15:04:01 +00:00
|
|
|
|
|
|
|
// RoomID => A set (map[string]bool) of state event IDs which are between the two positions
|
|
|
|
stateNeeded := make(map[string]map[string]bool)
|
|
|
|
|
|
|
|
for rows.Next() {
|
|
|
|
var (
|
2021-11-03 09:53:37 +00:00
|
|
|
eventID string
|
2020-01-23 17:51:10 +00:00
|
|
|
streamPos types.StreamPosition
|
|
|
|
eventBytes []byte
|
|
|
|
excludeFromSync bool
|
|
|
|
addIDs pq.StringArray
|
|
|
|
delIDs pq.StringArray
|
2017-04-19 15:04:01 +00:00
|
|
|
)
|
2021-11-03 09:53:37 +00:00
|
|
|
if err := rows.Scan(&eventID, &streamPos, &eventBytes, &excludeFromSync, &addIDs, &delIDs); err != nil {
|
2017-06-05 09:37:04 +00:00
|
|
|
return nil, nil, err
|
2017-04-19 15:04:01 +00:00
|
|
|
}
|
|
|
|
// Sanity check for deleted state and whine if we see it. We don't need to do anything
|
|
|
|
// since it'll just mark the event as not being needed.
|
|
|
|
if len(addIDs) < len(delIDs) {
|
|
|
|
log.WithFields(log.Fields{
|
2020-05-15 08:41:12 +00:00
|
|
|
"since": r.From,
|
|
|
|
"current": r.To,
|
2017-04-19 15:04:01 +00:00
|
|
|
"adds": addIDs,
|
|
|
|
"dels": delIDs,
|
|
|
|
}).Warn("StateBetween: ignoring deleted state")
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: Handle redacted events
|
2020-03-19 12:07:01 +00:00
|
|
|
var ev gomatrixserverlib.HeaderedEvent
|
2021-11-03 09:53:37 +00:00
|
|
|
if err := ev.UnmarshalJSONWithEventID(eventBytes, eventID); err != nil {
|
2017-06-05 09:37:04 +00:00
|
|
|
return nil, nil, err
|
2017-04-19 15:04:01 +00:00
|
|
|
}
|
|
|
|
needSet := stateNeeded[ev.RoomID()]
|
|
|
|
if needSet == nil { // make set if required
|
|
|
|
needSet = make(map[string]bool)
|
|
|
|
}
|
|
|
|
for _, id := range delIDs {
|
|
|
|
needSet[id] = false
|
|
|
|
}
|
|
|
|
for _, id := range addIDs {
|
|
|
|
needSet[id] = true
|
|
|
|
}
|
|
|
|
stateNeeded[ev.RoomID()] = needSet
|
|
|
|
|
2021-11-03 09:53:37 +00:00
|
|
|
eventIDToEvent[eventID] = types.StreamEvent{
|
2020-11-16 15:44:53 +00:00
|
|
|
HeaderedEvent: &ev,
|
2020-01-23 17:51:10 +00:00
|
|
|
StreamPosition: streamPos,
|
|
|
|
ExcludeFromSync: excludeFromSync,
|
2017-12-06 09:37:18 +00:00
|
|
|
}
|
2017-04-19 15:04:01 +00:00
|
|
|
}
|
|
|
|
|
2020-02-11 14:12:21 +00:00
|
|
|
return stateNeeded, eventIDToEvent, rows.Err()
|
2017-04-19 15:04:01 +00:00
|
|
|
}
|
|
|
|
|
2017-04-13 15:56:46 +00:00
|
|
|
// MaxID returns the ID of the last inserted event in this table. 'txn' is optional. If it is not supplied,
|
|
|
|
// then this function should only ever be used at startup, as it will race with inserting events if it is
|
|
|
|
// done afterwards. If there are no inserted events, 0 is returned.
|
2020-05-14 08:53:55 +00:00
|
|
|
func (s *outputRoomEventsStatements) SelectMaxEventID(
|
2017-09-18 15:52:22 +00:00
|
|
|
ctx context.Context, txn *sql.Tx,
|
|
|
|
) (id int64, err error) {
|
2017-04-10 14:12:18 +00:00
|
|
|
var nullableID sql.NullInt64
|
2020-06-12 13:55:57 +00:00
|
|
|
stmt := sqlutil.TxStmt(txn, s.selectMaxEventIDStmt)
|
2017-09-18 15:52:22 +00:00
|
|
|
err = stmt.QueryRowContext(ctx).Scan(&nullableID)
|
2017-04-10 14:12:18 +00:00
|
|
|
if nullableID.Valid {
|
|
|
|
id = nullableID.Int64
|
|
|
|
}
|
2017-03-30 14:29:23 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-04-10 14:12:18 +00:00
|
|
|
// InsertEvent into the output_room_events table. addState and removeState are an optional list of state event IDs. Returns the position
|
|
|
|
// of the inserted event.
|
2020-05-14 08:53:55 +00:00
|
|
|
func (s *outputRoomEventsStatements) InsertEvent(
|
2017-09-18 15:52:22 +00:00
|
|
|
ctx context.Context, txn *sql.Tx,
|
2020-03-19 12:07:01 +00:00
|
|
|
event *gomatrixserverlib.HeaderedEvent, addState, removeState []string,
|
2020-01-23 17:51:10 +00:00
|
|
|
transactionID *api.TransactionID, excludeFromSync bool,
|
|
|
|
) (streamPos types.StreamPosition, err error) {
|
2019-08-23 16:55:40 +00:00
|
|
|
var txnID *string
|
|
|
|
var sessionID *int64
|
2017-12-06 09:37:18 +00:00
|
|
|
if transactionID != nil {
|
2019-08-23 16:55:40 +00:00
|
|
|
sessionID = &transactionID.SessionID
|
2017-12-06 09:37:18 +00:00
|
|
|
txnID = &transactionID.TransactionID
|
|
|
|
}
|
|
|
|
|
2019-08-07 10:12:09 +00:00
|
|
|
// Parse content as JSON and search for an "url" key
|
|
|
|
containsURL := false
|
|
|
|
var content map[string]interface{}
|
|
|
|
if json.Unmarshal(event.Content(), &content) != nil {
|
|
|
|
// Set containsURL to true if url is present
|
|
|
|
_, containsURL = content["url"]
|
|
|
|
}
|
|
|
|
|
2020-03-19 12:07:01 +00:00
|
|
|
var headeredJSON []byte
|
|
|
|
headeredJSON, err = json.Marshal(event)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-06-12 13:55:57 +00:00
|
|
|
stmt := sqlutil.TxStmt(txn, s.insertEventStmt)
|
2017-09-18 15:52:22 +00:00
|
|
|
err = stmt.QueryRowContext(
|
|
|
|
ctx,
|
|
|
|
event.RoomID(),
|
|
|
|
event.EventID(),
|
2020-03-19 12:07:01 +00:00
|
|
|
headeredJSON,
|
2019-08-07 10:12:09 +00:00
|
|
|
event.Type(),
|
|
|
|
event.Sender(),
|
|
|
|
containsURL,
|
2017-09-18 15:52:22 +00:00
|
|
|
pq.StringArray(addState),
|
|
|
|
pq.StringArray(removeState),
|
2019-08-23 16:55:40 +00:00
|
|
|
sessionID,
|
2017-12-06 09:37:18 +00:00
|
|
|
txnID,
|
2020-01-23 17:51:10 +00:00
|
|
|
excludeFromSync,
|
2017-04-10 14:12:18 +00:00
|
|
|
).Scan(&streamPos)
|
|
|
|
return
|
2017-03-30 14:29:23 +00:00
|
|
|
}
|
2017-04-05 09:30:13 +00:00
|
|
|
|
2020-01-23 17:51:10 +00:00
|
|
|
// selectRecentEvents returns the most recent events in the given room, up to a maximum of 'limit'.
|
|
|
|
// If onlySyncEvents has a value of true, only returns the events that aren't marked as to exclude
|
|
|
|
// from sync.
|
2020-05-14 08:53:55 +00:00
|
|
|
func (s *outputRoomEventsStatements) SelectRecentEvents(
|
2017-09-18 15:52:22 +00:00
|
|
|
ctx context.Context, txn *sql.Tx,
|
2021-01-19 18:00:42 +00:00
|
|
|
roomID string, r types.Range, eventFilter *gomatrixserverlib.RoomEventFilter,
|
2020-01-23 17:51:10 +00:00
|
|
|
chronologicalOrder bool, onlySyncEvents bool,
|
2020-06-26 14:34:41 +00:00
|
|
|
) ([]types.StreamEvent, bool, error) {
|
2020-01-23 17:51:10 +00:00
|
|
|
var stmt *sql.Stmt
|
|
|
|
if onlySyncEvents {
|
2020-06-12 13:55:57 +00:00
|
|
|
stmt = sqlutil.TxStmt(txn, s.selectRecentEventsForSyncStmt)
|
2020-01-23 17:51:10 +00:00
|
|
|
} else {
|
2020-06-12 13:55:57 +00:00
|
|
|
stmt = sqlutil.TxStmt(txn, s.selectRecentEventsStmt)
|
2020-01-23 17:51:10 +00:00
|
|
|
}
|
2021-01-19 18:00:42 +00:00
|
|
|
rows, err := stmt.QueryContext(
|
|
|
|
ctx, roomID, r.Low(), r.High(),
|
|
|
|
pq.StringArray(eventFilter.Senders),
|
|
|
|
pq.StringArray(eventFilter.NotSenders),
|
|
|
|
pq.StringArray(filterConvertTypeWildcardToSQL(eventFilter.Types)),
|
|
|
|
pq.StringArray(filterConvertTypeWildcardToSQL(eventFilter.NotTypes)),
|
|
|
|
eventFilter.Limit+1,
|
|
|
|
)
|
2020-01-23 17:51:10 +00:00
|
|
|
if err != nil {
|
2020-06-26 14:34:41 +00:00
|
|
|
return nil, false, err
|
2020-01-23 17:51:10 +00:00
|
|
|
}
|
2020-05-21 13:40:13 +00:00
|
|
|
defer internal.CloseAndLogIfError(ctx, rows, "selectRecentEvents: rows.close() failed")
|
2020-01-23 17:51:10 +00:00
|
|
|
events, err := rowsToStreamEvents(rows)
|
|
|
|
if err != nil {
|
2020-06-26 14:34:41 +00:00
|
|
|
return nil, false, err
|
2020-01-23 17:51:10 +00:00
|
|
|
}
|
|
|
|
if chronologicalOrder {
|
|
|
|
// The events need to be returned from oldest to latest, which isn't
|
|
|
|
// necessary the way the SQL query returns them, so a sort is necessary to
|
|
|
|
// ensure the events are in the right order in the slice.
|
|
|
|
sort.SliceStable(events, func(i int, j int) bool {
|
|
|
|
return events[i].StreamPosition < events[j].StreamPosition
|
|
|
|
})
|
|
|
|
}
|
2020-06-26 14:34:41 +00:00
|
|
|
// we queried for 1 more than the limit, so if we returned one more mark limited=true
|
|
|
|
limited := false
|
2021-01-19 18:00:42 +00:00
|
|
|
if len(events) > eventFilter.Limit {
|
2020-06-26 14:34:41 +00:00
|
|
|
limited = true
|
|
|
|
// re-slice the extra (oldest) event out: in chronological order this is the first entry, else the last.
|
|
|
|
if chronologicalOrder {
|
|
|
|
events = events[1:]
|
|
|
|
} else {
|
|
|
|
events = events[:len(events)-1]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return events, limited, nil
|
2020-01-23 17:51:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// selectEarlyEvents returns the earliest events in the given room, starting
|
|
|
|
// from a given position, up to a maximum of 'limit'.
|
2020-05-14 08:53:55 +00:00
|
|
|
func (s *outputRoomEventsStatements) SelectEarlyEvents(
|
2020-01-23 17:51:10 +00:00
|
|
|
ctx context.Context, txn *sql.Tx,
|
2021-01-19 18:00:42 +00:00
|
|
|
roomID string, r types.Range, eventFilter *gomatrixserverlib.RoomEventFilter,
|
2020-01-23 17:51:10 +00:00
|
|
|
) ([]types.StreamEvent, error) {
|
2020-06-12 13:55:57 +00:00
|
|
|
stmt := sqlutil.TxStmt(txn, s.selectEarlyEventsStmt)
|
2021-01-19 18:00:42 +00:00
|
|
|
rows, err := stmt.QueryContext(
|
|
|
|
ctx, roomID, r.Low(), r.High(),
|
|
|
|
pq.StringArray(eventFilter.Senders),
|
|
|
|
pq.StringArray(eventFilter.NotSenders),
|
|
|
|
pq.StringArray(filterConvertTypeWildcardToSQL(eventFilter.Types)),
|
|
|
|
pq.StringArray(filterConvertTypeWildcardToSQL(eventFilter.NotTypes)),
|
|
|
|
eventFilter.Limit,
|
|
|
|
)
|
2017-04-13 15:56:46 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-05-21 13:40:13 +00:00
|
|
|
defer internal.CloseAndLogIfError(ctx, rows, "selectEarlyEvents: rows.close() failed")
|
2017-06-07 15:35:41 +00:00
|
|
|
events, err := rowsToStreamEvents(rows)
|
2017-04-20 10:18:26 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-11-07 19:12:23 +00:00
|
|
|
// The events need to be returned from oldest to latest, which isn't
|
2019-07-12 15:43:01 +00:00
|
|
|
// necessarily the way the SQL query returns them, so a sort is necessary to
|
2018-11-07 19:12:23 +00:00
|
|
|
// ensure the events are in the right order in the slice.
|
|
|
|
sort.SliceStable(events, func(i int, j int) bool {
|
2020-01-23 17:51:10 +00:00
|
|
|
return events[i].StreamPosition < events[j].StreamPosition
|
2018-11-07 19:12:23 +00:00
|
|
|
})
|
2018-01-02 10:33:25 +00:00
|
|
|
return events, nil
|
2017-04-13 15:56:46 +00:00
|
|
|
}
|
|
|
|
|
2020-01-23 17:51:10 +00:00
|
|
|
// selectEvents returns the events for the given event IDs. If an event is
|
|
|
|
// missing from the database, it will be omitted.
|
2020-05-14 08:53:55 +00:00
|
|
|
func (s *outputRoomEventsStatements) SelectEvents(
|
2022-04-08 16:53:24 +00:00
|
|
|
ctx context.Context, txn *sql.Tx, eventIDs []string, preserveOrder bool,
|
2020-01-23 17:51:10 +00:00
|
|
|
) ([]types.StreamEvent, error) {
|
2020-06-12 13:55:57 +00:00
|
|
|
stmt := sqlutil.TxStmt(txn, s.selectEventsStmt)
|
2017-09-18 15:52:22 +00:00
|
|
|
rows, err := stmt.QueryContext(ctx, pq.StringArray(eventIDs))
|
2017-04-13 15:56:46 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-05-21 13:40:13 +00:00
|
|
|
defer internal.CloseAndLogIfError(ctx, rows, "selectEvents: rows.close() failed")
|
2022-04-08 16:53:24 +00:00
|
|
|
streamEvents, err := rowsToStreamEvents(rows)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if preserveOrder {
|
|
|
|
eventMap := make(map[string]types.StreamEvent)
|
|
|
|
for _, ev := range streamEvents {
|
|
|
|
eventMap[ev.EventID()] = ev
|
|
|
|
}
|
|
|
|
var returnEvents []types.StreamEvent
|
|
|
|
for _, eventID := range eventIDs {
|
|
|
|
ev, ok := eventMap[eventID]
|
|
|
|
if ok {
|
|
|
|
returnEvents = append(returnEvents, ev)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return returnEvents, nil
|
|
|
|
}
|
|
|
|
return streamEvents, nil
|
2017-04-13 15:56:46 +00:00
|
|
|
}
|
|
|
|
|
2020-09-15 10:17:46 +00:00
|
|
|
func (s *outputRoomEventsStatements) DeleteEventsForRoom(
|
|
|
|
ctx context.Context, txn *sql.Tx, roomID string,
|
|
|
|
) (err error) {
|
|
|
|
_, err = sqlutil.TxStmt(txn, s.deleteEventsForRoomStmt).ExecContext(ctx, roomID)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-02-21 16:12:22 +00:00
|
|
|
func (s *outputRoomEventsStatements) SelectContextEvent(ctx context.Context, txn *sql.Tx, roomID, eventID string) (id int, evt gomatrixserverlib.HeaderedEvent, err error) {
|
|
|
|
row := sqlutil.TxStmt(txn, s.selectContextEventStmt).QueryRowContext(ctx, roomID, eventID)
|
|
|
|
|
|
|
|
var eventAsString string
|
|
|
|
if err = row.Scan(&id, &eventAsString); err != nil {
|
|
|
|
return 0, evt, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = json.Unmarshal([]byte(eventAsString), &evt); err != nil {
|
|
|
|
return 0, evt, err
|
|
|
|
}
|
|
|
|
return id, evt, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *outputRoomEventsStatements) SelectContextBeforeEvent(
|
|
|
|
ctx context.Context, txn *sql.Tx, id int, roomID string, filter *gomatrixserverlib.RoomEventFilter,
|
|
|
|
) (evts []*gomatrixserverlib.HeaderedEvent, err error) {
|
|
|
|
rows, err := sqlutil.TxStmt(txn, s.selectContextBeforeEventStmt).QueryContext(
|
|
|
|
ctx, roomID, id, filter.Limit,
|
|
|
|
pq.StringArray(filter.Senders),
|
|
|
|
pq.StringArray(filter.NotSenders),
|
|
|
|
pq.StringArray(filterConvertTypeWildcardToSQL(filter.Types)),
|
|
|
|
pq.StringArray(filterConvertTypeWildcardToSQL(filter.NotTypes)),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2022-03-24 10:03:22 +00:00
|
|
|
defer internal.CloseAndLogIfError(ctx, rows, "rows.close() failed")
|
2022-02-21 16:12:22 +00:00
|
|
|
|
|
|
|
for rows.Next() {
|
|
|
|
var (
|
|
|
|
eventBytes []byte
|
|
|
|
evt *gomatrixserverlib.HeaderedEvent
|
|
|
|
)
|
|
|
|
if err = rows.Scan(&eventBytes); err != nil {
|
|
|
|
return evts, err
|
|
|
|
}
|
|
|
|
if err = json.Unmarshal(eventBytes, &evt); err != nil {
|
|
|
|
return evts, err
|
|
|
|
}
|
|
|
|
evts = append(evts, evt)
|
|
|
|
}
|
|
|
|
|
|
|
|
return evts, rows.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *outputRoomEventsStatements) SelectContextAfterEvent(
|
|
|
|
ctx context.Context, txn *sql.Tx, id int, roomID string, filter *gomatrixserverlib.RoomEventFilter,
|
|
|
|
) (lastID int, evts []*gomatrixserverlib.HeaderedEvent, err error) {
|
|
|
|
rows, err := sqlutil.TxStmt(txn, s.selectContextAfterEventStmt).QueryContext(
|
|
|
|
ctx, roomID, id, filter.Limit,
|
|
|
|
pq.StringArray(filter.Senders),
|
|
|
|
pq.StringArray(filter.NotSenders),
|
|
|
|
pq.StringArray(filterConvertTypeWildcardToSQL(filter.Types)),
|
|
|
|
pq.StringArray(filterConvertTypeWildcardToSQL(filter.NotTypes)),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2022-03-24 10:03:22 +00:00
|
|
|
defer internal.CloseAndLogIfError(ctx, rows, "rows.close() failed")
|
2022-02-21 16:12:22 +00:00
|
|
|
|
|
|
|
for rows.Next() {
|
|
|
|
var (
|
|
|
|
eventBytes []byte
|
|
|
|
evt *gomatrixserverlib.HeaderedEvent
|
|
|
|
)
|
|
|
|
if err = rows.Scan(&lastID, &eventBytes); err != nil {
|
|
|
|
return 0, evts, err
|
|
|
|
}
|
|
|
|
if err = json.Unmarshal(eventBytes, &evt); err != nil {
|
|
|
|
return 0, evts, err
|
|
|
|
}
|
|
|
|
evts = append(evts, evt)
|
|
|
|
}
|
|
|
|
|
|
|
|
return lastID, evts, rows.Err()
|
|
|
|
}
|
|
|
|
|
2020-01-23 17:51:10 +00:00
|
|
|
func rowsToStreamEvents(rows *sql.Rows) ([]types.StreamEvent, error) {
|
|
|
|
var result []types.StreamEvent
|
2017-04-13 15:56:46 +00:00
|
|
|
for rows.Next() {
|
2017-05-17 15:21:27 +00:00
|
|
|
var (
|
2020-12-09 18:07:17 +00:00
|
|
|
eventID string
|
2020-01-23 17:51:10 +00:00
|
|
|
streamPos types.StreamPosition
|
|
|
|
eventBytes []byte
|
|
|
|
excludeFromSync bool
|
|
|
|
sessionID *int64
|
|
|
|
txnID *string
|
|
|
|
transactionID *api.TransactionID
|
2017-05-17 15:21:27 +00:00
|
|
|
)
|
2020-12-09 18:07:17 +00:00
|
|
|
if err := rows.Scan(&eventID, &streamPos, &eventBytes, &sessionID, &excludeFromSync, &txnID); err != nil {
|
2017-04-05 09:30:13 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2017-04-13 15:56:46 +00:00
|
|
|
// TODO: Handle redacted events
|
2020-03-19 12:07:01 +00:00
|
|
|
var ev gomatrixserverlib.HeaderedEvent
|
2020-12-09 18:07:17 +00:00
|
|
|
if err := ev.UnmarshalJSONWithEventID(eventBytes, eventID); err != nil {
|
2017-04-05 09:30:13 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2017-12-06 09:37:18 +00:00
|
|
|
|
2019-08-23 16:55:40 +00:00
|
|
|
if sessionID != nil && txnID != nil {
|
2017-12-06 09:37:18 +00:00
|
|
|
transactionID = &api.TransactionID{
|
2019-08-23 16:55:40 +00:00
|
|
|
SessionID: *sessionID,
|
2017-12-06 09:37:18 +00:00
|
|
|
TransactionID: *txnID,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-23 17:51:10 +00:00
|
|
|
result = append(result, types.StreamEvent{
|
2020-11-16 15:44:53 +00:00
|
|
|
HeaderedEvent: &ev,
|
2020-01-23 17:51:10 +00:00
|
|
|
StreamPosition: streamPos,
|
|
|
|
TransactionID: transactionID,
|
|
|
|
ExcludeFromSync: excludeFromSync,
|
2017-12-06 09:37:18 +00:00
|
|
|
})
|
2017-04-05 09:30:13 +00:00
|
|
|
}
|
2020-02-11 14:12:21 +00:00
|
|
|
return result, rows.Err()
|
2017-04-05 09:30:13 +00:00
|
|
|
}
|