Modify sync transaction behaviour (#2758)

This now uses a transaction per stream, so that errors in one stream
don't propagate to another, and we therefore no longer need to do hacks
to reopen a new transaction after aborting a failed one.
This commit is contained in:
Neil Alexander 2022-10-03 11:38:20 +01:00 committed by GitHub
parent d4710217f8
commit d32f60249d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 155 additions and 91 deletions

View file

@ -54,7 +54,6 @@ func (p *AccountDataStreamProvider) IncrementalSync(
)
if err != nil {
req.Log.WithError(err).Error("p.DB.GetAccountDataInRange failed")
_ = snapshot.Reset()
return from
}

View file

@ -34,13 +34,11 @@ func (p *DeviceListStreamProvider) IncrementalSync(
to, _, err = internal.DeviceListCatchup(context.Background(), snapshot, p.keyAPI, p.rsAPI, req.Device.UserID, req.Response, from, to)
if err != nil {
req.Log.WithError(err).Error("internal.DeviceListCatchup failed")
_ = snapshot.Reset()
return from
}
err = internal.DeviceOTKCounts(req.Context, p.keyAPI, req.Device.UserID, req.Device.ID, req.Response)
if err != nil {
req.Log.WithError(err).Error("internal.DeviceListCatchup failed")
_ = snapshot.Reset()
return from
}

View file

@ -56,7 +56,6 @@ func (p *InviteStreamProvider) IncrementalSync(
)
if err != nil {
req.Log.WithError(err).Error("p.DB.InviteEventsInRange failed")
_ = snapshot.Reset()
return from
}

View file

@ -46,7 +46,6 @@ func (p *NotificationDataStreamProvider) IncrementalSync(
countsByRoom, err := snapshot.GetUserUnreadNotificationCountsForRooms(ctx, req.Device.UserID, req.Rooms)
if err != nil {
req.Log.WithError(err).Error("GetUserUnreadNotificationCountsForRooms failed")
_ = snapshot.Reset()
return from
}

View file

@ -75,7 +75,6 @@ func (p *PDUStreamProvider) CompleteSync(
joinedRoomIDs, err := snapshot.RoomIDsWithMembership(ctx, req.Device.UserID, gomatrixserverlib.Join)
if err != nil {
req.Log.WithError(err).Error("p.DB.RoomIDsWithMembership failed")
_ = snapshot.Reset()
return from
}
@ -102,10 +101,10 @@ func (p *PDUStreamProvider) CompleteSync(
)
if jerr != nil {
req.Log.WithError(jerr).Error("p.getJoinResponseForCompleteSync failed")
if err = snapshot.Reset(); err != nil {
if err == context.DeadlineExceeded || err == context.Canceled || err == sql.ErrTxDone {
return from
}
continue // return from
continue
}
req.Response.Rooms.Join[roomID] = *jr
req.Rooms[roomID] = gomatrixserverlib.Join
@ -115,7 +114,6 @@ func (p *PDUStreamProvider) CompleteSync(
peeks, err := snapshot.PeeksInRange(ctx, req.Device.UserID, req.Device.ID, r)
if err != nil {
req.Log.WithError(err).Error("p.DB.PeeksInRange failed")
_ = snapshot.Reset()
return from
}
for _, peek := range peeks {
@ -126,10 +124,10 @@ func (p *PDUStreamProvider) CompleteSync(
)
if err != nil {
req.Log.WithError(err).Error("p.getJoinResponseForCompleteSync failed")
if err = snapshot.Reset(); err != nil {
if err == context.DeadlineExceeded || err == context.Canceled || err == sql.ErrTxDone {
return from
}
continue // return from
continue
}
req.Response.Rooms.Peek[peek.RoomID] = *jr
}
@ -160,14 +158,12 @@ func (p *PDUStreamProvider) IncrementalSync(
if req.WantFullState {
if stateDeltas, syncJoinedRooms, err = snapshot.GetStateDeltasForFullStateSync(ctx, req.Device, r, req.Device.UserID, &stateFilter); err != nil {
req.Log.WithError(err).Error("p.DB.GetStateDeltasForFullStateSync failed")
_ = snapshot.Reset()
return
return from
}
} else {
if stateDeltas, syncJoinedRooms, err = snapshot.GetStateDeltas(ctx, req.Device, r, req.Device.UserID, &stateFilter); err != nil {
req.Log.WithError(err).Error("p.DB.GetStateDeltas failed")
_ = snapshot.Reset()
return
return from
}
}
@ -181,7 +177,6 @@ func (p *PDUStreamProvider) IncrementalSync(
if err = p.addIgnoredUsersToFilter(ctx, snapshot, req, &eventFilter); err != nil {
req.Log.WithError(err).Error("unable to update event filter with ignored users")
_ = snapshot.Reset()
}
newPos = from
@ -201,13 +196,10 @@ func (p *PDUStreamProvider) IncrementalSync(
var pos types.StreamPosition
if pos, err = p.addRoomDeltaToResponse(ctx, snapshot, req.Device, newRange, delta, &eventFilter, &stateFilter, req.Response); err != nil {
req.Log.WithError(err).Error("d.addRoomDeltaToResponse failed")
if err == context.DeadlineExceeded || err == context.Canceled {
if err == context.DeadlineExceeded || err == context.Canceled || err == sql.ErrTxDone {
return newPos
}
if err = snapshot.Reset(); err != nil {
return from
}
continue // return to
continue
}
// Reset the position, as it is only for the special case of newly joined rooms
if delta.NewlyJoined {
@ -307,7 +299,6 @@ func (p *PDUStreamProvider) addRoomDeltaToResponse(
events, err := applyHistoryVisibilityFilter(ctx, snapshot, p.rsAPI, delta.RoomID, device.UserID, eventFilter.Limit, recentEvents)
if err != nil {
logrus.WithError(err).Error("unable to apply history visibility filter")
_ = snapshot.Reset()
}
if len(delta.StateEvents) > 0 {

View file

@ -67,7 +67,6 @@ func (p *PresenceStreamProvider) IncrementalSync(
presences, err := snapshot.PresenceAfter(ctx, from, gomatrixserverlib.EventFilter{Limit: 1000})
if err != nil {
req.Log.WithError(err).Error("p.DB.PresenceAfter failed")
_ = snapshot.Reset()
return from
}

View file

@ -52,7 +52,6 @@ func (p *ReceiptStreamProvider) IncrementalSync(
lastPos, receipts, err := snapshot.RoomReceiptsAfter(ctx, joinedRooms, from)
if err != nil {
req.Log.WithError(err).Error("p.DB.RoomReceiptsAfter failed")
_ = snapshot.Reset()
return from
}

View file

@ -44,7 +44,6 @@ func (p *SendToDeviceStreamProvider) IncrementalSync(
lastPos, events, err := snapshot.SendToDeviceUpdatesForSync(req.Context, req.Device.UserID, req.Device.ID, from, to)
if err != nil {
req.Log.WithError(err).Error("p.DB.SendToDeviceUpdatesForSync failed")
_ = snapshot.Reset()
return from
}