mirror of
https://github.com/hoernschen/dendrite.git
synced 2024-12-26 15:08:28 +00:00
sytest: Add remaining backfill tests (#1052)
One failed because of `null` instead of `[]` in HTTP responses. One failed because we hadn't implemented in-line filter limits!
This commit is contained in:
parent
1414922026
commit
6091bf044f
4 changed files with 25 additions and 2 deletions
|
@ -102,7 +102,7 @@ func Backfill(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var eventJSONs []json.RawMessage
|
eventJSONs := []json.RawMessage{}
|
||||||
for _, e := range gomatrixserverlib.ReverseTopologicalOrdering(
|
for _, e := range gomatrixserverlib.ReverseTopologicalOrdering(
|
||||||
evs,
|
evs,
|
||||||
gomatrixserverlib.TopologicalOrderByPrevEvents,
|
gomatrixserverlib.TopologicalOrderByPrevEvents,
|
||||||
|
|
|
@ -16,6 +16,7 @@ package sync
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
@ -30,6 +31,14 @@ import (
|
||||||
const defaultSyncTimeout = time.Duration(0)
|
const defaultSyncTimeout = time.Duration(0)
|
||||||
const defaultTimelineLimit = 20
|
const defaultTimelineLimit = 20
|
||||||
|
|
||||||
|
type filter struct {
|
||||||
|
Room struct {
|
||||||
|
Timeline struct {
|
||||||
|
Limit *int `json:"limit"`
|
||||||
|
} `json:"timeline"`
|
||||||
|
} `json:"room"`
|
||||||
|
}
|
||||||
|
|
||||||
// syncRequest represents a /sync request, with sensible defaults/sanity checks applied.
|
// syncRequest represents a /sync request, with sensible defaults/sanity checks applied.
|
||||||
type syncRequest struct {
|
type syncRequest struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
|
@ -54,6 +63,17 @@ func newSyncRequest(req *http.Request, device authtypes.Device) (*syncRequest, e
|
||||||
}
|
}
|
||||||
since = &tok
|
since = &tok
|
||||||
}
|
}
|
||||||
|
timelineLimit := defaultTimelineLimit
|
||||||
|
// TODO: read from stored filters too
|
||||||
|
filterQuery := req.URL.Query().Get("filter")
|
||||||
|
if filterQuery != "" && filterQuery[0] == '{' {
|
||||||
|
// attempt to parse the timeline limit at least
|
||||||
|
var f filter
|
||||||
|
err := json.Unmarshal([]byte(filterQuery), &f)
|
||||||
|
if err == nil && f.Room.Timeline.Limit != nil {
|
||||||
|
timelineLimit = *f.Room.Timeline.Limit
|
||||||
|
}
|
||||||
|
}
|
||||||
// TODO: Additional query params: set_presence, filter
|
// TODO: Additional query params: set_presence, filter
|
||||||
return &syncRequest{
|
return &syncRequest{
|
||||||
ctx: req.Context(),
|
ctx: req.Context(),
|
||||||
|
@ -61,7 +81,7 @@ func newSyncRequest(req *http.Request, device authtypes.Device) (*syncRequest, e
|
||||||
timeout: timeout,
|
timeout: timeout,
|
||||||
since: since,
|
since: since,
|
||||||
wantFullState: wantFullState,
|
wantFullState: wantFullState,
|
||||||
limit: defaultTimelineLimit, // TODO: read from filter
|
limit: timelineLimit,
|
||||||
log: util.GetLogger(req.Context()),
|
log: util.GetLogger(req.Context()),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,6 +59,7 @@ func (rp *RequestPool) OnIncomingSyncRequest(req *http.Request, device *authtype
|
||||||
"userID": userID,
|
"userID": userID,
|
||||||
"since": syncReq.since,
|
"since": syncReq.since,
|
||||||
"timeout": syncReq.timeout,
|
"timeout": syncReq.timeout,
|
||||||
|
"limit": syncReq.limit,
|
||||||
})
|
})
|
||||||
|
|
||||||
currPos := rp.notifier.CurrentPosition()
|
currPos := rp.notifier.CurrentPosition()
|
||||||
|
|
|
@ -281,3 +281,5 @@ An event which redacts itself should be ignored
|
||||||
A pair of events which redact each other should be ignored
|
A pair of events which redact each other should be ignored
|
||||||
Outbound federation can backfill events
|
Outbound federation can backfill events
|
||||||
Inbound federation can backfill events
|
Inbound federation can backfill events
|
||||||
|
Backfill checks the events requested belong to the room
|
||||||
|
Backfilled events whose prev_events are in a different room do not allow cross-room back-pagination
|
||||||
|
|
Loading…
Reference in a new issue