sliding-sync/tests-integration/notifications_test.go

165 lines
5.4 KiB
Go
Raw Permalink Normal View History

package syncv3
import (
"encoding/json"
"fmt"
"testing"
"time"
"github.com/matrix-org/sliding-sync/sync2"
"github.com/matrix-org/sliding-sync/sync3"
"github.com/matrix-org/sliding-sync/testutils"
"github.com/matrix-org/sliding-sync/testutils/m"
)
// Test that sort operations that favour notif counts always appear at the start of the list.
func TestNotificationsOnTop(t *testing.T) {
pqString := testutils.PrepareDBConnectionString()
// setup code
v2 := runTestV2Server(t)
v3 := runTestServer(t, v2, pqString)
defer v2.close()
defer v3.close()
bob := "@TestNotificationsOnTop_bob:localhost"
bingRoomID := "!TestNotificationsOnTop_bing:localhost"
noBingRoomID := "!TestNotificationsOnTop_nobing:localhost"
latestTimestamp := time.Now()
allRooms := []roomEvents{
// this room on top when sorted by recency
{
roomID: noBingRoomID,
events: append(createRoomState(t, alice, latestTimestamp), []json.RawMessage{
testutils.NewStateEvent(
t, "m.room.member", bob, bob, map[string]interface{}{"membership": "join", "displayname": "Bob"},
testutils.WithTimestamp(latestTimestamp.Add(5*time.Second)),
),
}...),
},
{
roomID: bingRoomID,
events: append(createRoomState(t, alice, latestTimestamp), []json.RawMessage{
testutils.NewStateEvent(
t, "m.room.member", bob, bob, map[string]interface{}{"membership": "join", "displayname": "Bob"},
testutils.WithTimestamp(latestTimestamp),
),
}...),
},
}
v2.addAccount(t, alice, aliceToken)
v2.queueResponse(alice, sync2.SyncResponse{
Rooms: sync2.SyncRoomsResponse{
Join: v2JoinTimeline(allRooms...),
},
})
// connect and make sure we get nobing, bing
syncRequestBody := sync3.Request{
Lists: map[string]sync3.RequestList{
"a": {
Ranges: sync3.SliceRanges{
[2]int64{0, int64(len(allRooms) - 1)}, // all rooms
},
RoomSubscription: sync3.RoomSubscription{
TimelineLimit: int64(100),
},
// prefer highlights/notifs/rest, and group them by recency not counts count first, THEN eventually recency
Sort: []string{sync3.SortByNotificationLevel, sync3.SortByRecency},
}},
}
res := v3.mustDoV3Request(t, aliceToken, syncRequestBody)
m.MatchResponse(t, res, m.MatchList("a", m.MatchV3Count(len(allRooms)), m.MatchV3Ops(
add extensions for typing and receipts; bugfixes and additional perf improvements Features: - Add `typing` extension. - Add `receipts` extension. - Add comprehensive prometheus `/metrics` activated via `SYNCV3_PROM`. - Add `SYNCV3_PPROF` support. - Add `by_notification_level` sort order. - Add `include_old_rooms` support. - Add support for `$ME` and `$LAZY`. - Add correct filtering when `*,*` is used as `required_state`. - Add `num_live` to each room response to indicate how many timeline entries are live. Bug fixes: - Use a stricter comparison function on ranges: fixes an issue whereby UTs fail on go1.19 due to change in sorting algorithm. - Send back an `errcode` on HTTP errors (e.g expired sessions). - Remove `unsigned.txn_id` on insertion into the DB. Otherwise other users would see other users txn IDs :( - Improve range delta algorithm: previously it didn't handle cases like `[0,20] -> [20,30]` and would panic. - Send HTTP 400 for invalid range requests. - Don't publish no-op unread counts which just adds extra noise. - Fix leaking DB connections which could eventually consume all available connections. - Ensure we always unblock WaitUntilInitialSync even on invalid access tokens. Other code relies on WaitUntilInitialSync() actually returning at _some_ point e.g on startup we have N workers which bound the number of concurrent pollers made at any one time, we need to not just hog a worker forever. Improvements: - Greatly improve startup times of sync3 handlers by improving `JoinedRoomsTracker`: a modest amount of data would take ~28s to create the handler, now it takes 4s. - Massively improve initial initial v3 sync times, by refactoring `JoinedRoomsTracker`, from ~47s to <1s. - Add `SlidingSyncUntil...` in tests to reduce races. - Tweak the API shape of JoinedUsersForRoom to reduce state block processing time for large rooms from 63s to 39s. - Add trace task for initial syncs. - Include the proxy version in UA strings. - HTTP errors now wait 1s before returning to stop clients tight-looping on error. - Pending event buffer is now 2000. - Index the room ID first to cull the most events when returning timeline entries. Speeds up `SelectLatestEventsBetween` by a factor of 8. - Remove cancelled `m.room_key_requests` from the to-device inbox. Cuts down the amount of events in the inbox by ~94% for very large (20k+) inboxes, ~50% for moderate sized (200 events) inboxes. Adds book-keeping to remember the unacked to-device position for each client.
2022-12-14 18:53:55 +00:00
m.MatchV3SyncOp(0, int64(len(allRooms)-1), []string{noBingRoomID, bingRoomID}),
)))
// send a bing message into the bing room, make sure it comes through and is on top
bingEvent := testutils.NewEvent(t, "m.room.message", bob, map[string]interface{}{"body": "BING!"}, testutils.WithTimestamp(latestTimestamp.Add(1*time.Minute)))
v2.queueResponse(alice, sync2.SyncResponse{
Rooms: sync2.SyncRoomsResponse{
Join: map[string]sync2.SyncV2JoinResponse{
bingRoomID: {
UnreadNotifications: sync2.UnreadNotifications{
HighlightCount: ptr(1),
},
Timeline: sync2.TimelineResponse{
Events: []json.RawMessage{
bingEvent,
},
},
},
},
},
})
v2.waitUntilEmpty(t, alice)
res = v3.mustDoV3RequestWithPos(t, aliceToken, res.Pos, syncRequestBody)
m.MatchResponse(t, res, m.MatchList("a", m.MatchV3Count(len(allRooms)),
m.MatchV3Ops(m.MatchV3DeleteOp(1), m.MatchV3InsertOp(0, bingRoomID)),
), m.MatchRoomSubscriptionsStrict(map[string][]m.RoomMatcher{
bingRoomID: {
m.MatchRoomHighlightCount(1),
},
}))
// send a message into the nobing room, it's position must not change due to our sort order
noBingEvent := testutils.NewEvent(t, "m.room.message", bob, map[string]interface{}{"body": "no bing"}, testutils.WithTimestamp(latestTimestamp.Add(2*time.Minute)))
v2.queueResponse(alice, sync2.SyncResponse{
Rooms: sync2.SyncRoomsResponse{
Join: map[string]sync2.SyncV2JoinResponse{
noBingRoomID: {
Timeline: sync2.TimelineResponse{
Events: []json.RawMessage{
noBingEvent,
},
},
},
},
},
})
v2.waitUntilEmpty(t, alice)
res = v3.mustDoV3RequestWithPos(t, aliceToken, res.Pos, syncRequestBody)
m.MatchResponse(t, res, m.MatchList("a", m.MatchV3Count(len(allRooms))),
m.MatchNoV3Ops(),
)
// restart the server and sync from fresh again, it should still have the bing room on top
v3.restart(t, v2, pqString)
res = v3.mustDoV3Request(t, aliceToken, sync3.Request{
Lists: map[string]sync3.RequestList{
"a": {
Ranges: sync3.SliceRanges{
[2]int64{0, int64(len(allRooms) - 1)}, // all rooms
},
RoomSubscription: sync3.RoomSubscription{
TimelineLimit: int64(100),
},
// prefer highlight count first, THEN eventually recency
Sort: []string{sync3.SortByNotificationLevel, sync3.SortByRecency},
}},
})
m.MatchResponse(t, res, m.MatchList("a", m.MatchV3Count(len(allRooms)), m.MatchV3Ops(
m.MatchV3SyncOpFn(func(op *sync3.ResponseOpRange) error {
2022-05-27 09:54:17 +01:00
if len(op.RoomIDs) != len(allRooms) {
return fmt.Errorf("want %d rooms, got %d", len(allRooms), len(op.RoomIDs))
}
err := allRooms[1].MatchRoom(op.RoomIDs[0],
2022-05-27 09:54:17 +01:00
res.Rooms[op.RoomIDs[0]], // bing room is first
m.MatchRoomHighlightCount(1),
m.MatchRoomNotificationCount(0),
m.MatchRoomTimelineMostRecent(1, []json.RawMessage{bingEvent}),
)
if err != nil {
return err
}
err = allRooms[0].MatchRoom(op.RoomIDs[1],
2022-05-27 09:54:17 +01:00
res.Rooms[op.RoomIDs[1]], // no bing room is second
m.MatchRoomHighlightCount(0),
m.MatchRoomNotificationCount(0),
m.MatchRoomTimelineMostRecent(1, []json.RawMessage{noBingEvent}),
)
if err != nil {
return err
}
return nil
}),
)))
}