2022-12-20 13:32:39 +00:00
|
|
|
package syncv3
|
|
|
|
|
|
|
|
import (
|
bugfix: fix a bug with list ops when sorting with unread counts; fix a bug which could cause typing/receipts to not be live streamed
Previously, we would not send unread count INCREASES to the client,
as we would expect the actual event update to wake up the client conn.
This was great because it meant the event+unread count arrived atomically
on the client. This was implemented as "parse unread counts first, then events".
However, this introduced a bug when there were >1 user in the same room. In this
scenario, one poller may get the event first, which would go through to the client.
The subsequent unread count update would then be dropped and not sent to the client.
This would just be an unfortunate UI bug if it weren't for sorting by_notification_count
and sorting by_notification_level. Both of these sort operations use the unread counts
to determine room list ordering. This list would be updated on the server, but no
list operation would be sent to the client, causing the room lists to de-sync, and
resulting in incorrect DELETE/INSERT ops. This would manifest as duplicate rooms
on the room list.
In the process of fixing this, also fix a bug where typing notifications would not
always be sent to the client - it would only do so when piggybacked due to incorrect
type switches.
Also fix another bug which prevented receipts from always being sent to the client.
This was caused by the extensions handler not checking if the receipt extension had
data to determine if it should return. This the interacted with an as-yet unfixed bug
which cleared the extension on subequent updates, causing the receipt to be lost entirely.
A fix for this will be inbound soon.
2023-02-07 13:34:26 +00:00
|
|
|
"encoding/json"
|
2024-04-25 15:15:42 +01:00
|
|
|
"strings"
|
2022-12-20 13:32:39 +00:00
|
|
|
"testing"
|
bugfix: fix a bug with list ops when sorting with unread counts; fix a bug which could cause typing/receipts to not be live streamed
Previously, we would not send unread count INCREASES to the client,
as we would expect the actual event update to wake up the client conn.
This was great because it meant the event+unread count arrived atomically
on the client. This was implemented as "parse unread counts first, then events".
However, this introduced a bug when there were >1 user in the same room. In this
scenario, one poller may get the event first, which would go through to the client.
The subsequent unread count update would then be dropped and not sent to the client.
This would just be an unfortunate UI bug if it weren't for sorting by_notification_count
and sorting by_notification_level. Both of these sort operations use the unread counts
to determine room list ordering. This list would be updated on the server, but no
list operation would be sent to the client, causing the room lists to de-sync, and
resulting in incorrect DELETE/INSERT ops. This would manifest as duplicate rooms
on the room list.
In the process of fixing this, also fix a bug where typing notifications would not
always be sent to the client - it would only do so when piggybacked due to incorrect
type switches.
Also fix another bug which prevented receipts from always being sent to the client.
This was caused by the extensions handler not checking if the receipt extension had
data to determine if it should return. This the interacted with an as-yet unfixed bug
which cleared the extension on subequent updates, causing the receipt to be lost entirely.
A fix for this will be inbound soon.
2023-02-07 13:34:26 +00:00
|
|
|
"time"
|
2022-12-20 13:32:39 +00:00
|
|
|
|
bugfix: fix a bug with list ops when sorting with unread counts; fix a bug which could cause typing/receipts to not be live streamed
Previously, we would not send unread count INCREASES to the client,
as we would expect the actual event update to wake up the client conn.
This was great because it meant the event+unread count arrived atomically
on the client. This was implemented as "parse unread counts first, then events".
However, this introduced a bug when there were >1 user in the same room. In this
scenario, one poller may get the event first, which would go through to the client.
The subsequent unread count update would then be dropped and not sent to the client.
This would just be an unfortunate UI bug if it weren't for sorting by_notification_count
and sorting by_notification_level. Both of these sort operations use the unread counts
to determine room list ordering. This list would be updated on the server, but no
list operation would be sent to the client, causing the room lists to de-sync, and
resulting in incorrect DELETE/INSERT ops. This would manifest as duplicate rooms
on the room list.
In the process of fixing this, also fix a bug where typing notifications would not
always be sent to the client - it would only do so when piggybacked due to incorrect
type switches.
Also fix another bug which prevented receipts from always being sent to the client.
This was caused by the extensions handler not checking if the receipt extension had
data to determine if it should return. This the interacted with an as-yet unfixed bug
which cleared the extension on subequent updates, causing the receipt to be lost entirely.
A fix for this will be inbound soon.
2023-02-07 13:34:26 +00:00
|
|
|
"github.com/matrix-org/sliding-sync/sync2"
|
2022-12-20 13:32:39 +00:00
|
|
|
"github.com/matrix-org/sliding-sync/sync3"
|
bugfix: fix a bug with list ops when sorting with unread counts; fix a bug which could cause typing/receipts to not be live streamed
Previously, we would not send unread count INCREASES to the client,
as we would expect the actual event update to wake up the client conn.
This was great because it meant the event+unread count arrived atomically
on the client. This was implemented as "parse unread counts first, then events".
However, this introduced a bug when there were >1 user in the same room. In this
scenario, one poller may get the event first, which would go through to the client.
The subsequent unread count update would then be dropped and not sent to the client.
This would just be an unfortunate UI bug if it weren't for sorting by_notification_count
and sorting by_notification_level. Both of these sort operations use the unread counts
to determine room list ordering. This list would be updated on the server, but no
list operation would be sent to the client, causing the room lists to de-sync, and
resulting in incorrect DELETE/INSERT ops. This would manifest as duplicate rooms
on the room list.
In the process of fixing this, also fix a bug where typing notifications would not
always be sent to the client - it would only do so when piggybacked due to incorrect
type switches.
Also fix another bug which prevented receipts from always being sent to the client.
This was caused by the extensions handler not checking if the receipt extension had
data to determine if it should return. This the interacted with an as-yet unfixed bug
which cleared the extension on subequent updates, causing the receipt to be lost entirely.
A fix for this will be inbound soon.
2023-02-07 13:34:26 +00:00
|
|
|
"github.com/matrix-org/sliding-sync/testutils"
|
2022-12-20 13:32:39 +00:00
|
|
|
"github.com/matrix-org/sliding-sync/testutils/m"
|
2024-04-24 16:20:07 +01:00
|
|
|
"github.com/tidwall/sjson"
|
2022-12-20 13:32:39 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestListsAsKeys(t *testing.T) {
|
|
|
|
boolTrue := true
|
|
|
|
rig := NewTestRig(t)
|
|
|
|
defer rig.Finish()
|
|
|
|
encryptedRoomID := "!TestListsAsKeys_encrypted:localhost"
|
|
|
|
unencryptedRoomID := "!TestListsAsKeys_unencrypted:localhost"
|
|
|
|
rig.SetupV2RoomsForUser(t, alice, NoFlush, map[string]RoomDescriptor{
|
|
|
|
encryptedRoomID: {
|
|
|
|
IsEncrypted: true,
|
|
|
|
},
|
|
|
|
unencryptedRoomID: {
|
|
|
|
IsEncrypted: false,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
aliceToken := rig.Token(alice)
|
|
|
|
// make an encrypted room list, then bump both rooms and send a 2nd request with zero data
|
|
|
|
// and make sure that we see the encrypted room message only (so the filter is still active)
|
|
|
|
res := rig.V3.mustDoV3Request(t, aliceToken, sync3.Request{
|
|
|
|
Lists: map[string]sync3.RequestList{
|
|
|
|
"enc": {
|
|
|
|
Ranges: sync3.SliceRanges{{0, 20}},
|
|
|
|
Filters: &sync3.RequestFilters{
|
|
|
|
IsEncrypted: &boolTrue,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
m.MatchResponse(t, res, m.MatchLists(map[string][]m.ListMatcher{
|
|
|
|
"enc": {
|
|
|
|
m.MatchV3Count(1),
|
|
|
|
},
|
|
|
|
}), m.MatchRoomSubscription(encryptedRoomID))
|
|
|
|
|
|
|
|
rig.FlushText(t, alice, encryptedRoomID, "bump encrypted")
|
|
|
|
rig.FlushText(t, alice, unencryptedRoomID, "bump unencrypted")
|
|
|
|
|
|
|
|
res = rig.V3.mustDoV3RequestWithPos(t, aliceToken, res.Pos, sync3.Request{})
|
|
|
|
m.MatchResponse(t, res, m.MatchLists(map[string][]m.ListMatcher{
|
|
|
|
"enc": {
|
|
|
|
m.MatchV3Count(1),
|
|
|
|
},
|
|
|
|
}), m.MatchRoomSubscriptionsStrict(map[string][]m.RoomMatcher{
|
|
|
|
encryptedRoomID: {},
|
|
|
|
}))
|
|
|
|
}
|
bugfix: fix a bug with list ops when sorting with unread counts; fix a bug which could cause typing/receipts to not be live streamed
Previously, we would not send unread count INCREASES to the client,
as we would expect the actual event update to wake up the client conn.
This was great because it meant the event+unread count arrived atomically
on the client. This was implemented as "parse unread counts first, then events".
However, this introduced a bug when there were >1 user in the same room. In this
scenario, one poller may get the event first, which would go through to the client.
The subsequent unread count update would then be dropped and not sent to the client.
This would just be an unfortunate UI bug if it weren't for sorting by_notification_count
and sorting by_notification_level. Both of these sort operations use the unread counts
to determine room list ordering. This list would be updated on the server, but no
list operation would be sent to the client, causing the room lists to de-sync, and
resulting in incorrect DELETE/INSERT ops. This would manifest as duplicate rooms
on the room list.
In the process of fixing this, also fix a bug where typing notifications would not
always be sent to the client - it would only do so when piggybacked due to incorrect
type switches.
Also fix another bug which prevented receipts from always being sent to the client.
This was caused by the extensions handler not checking if the receipt extension had
data to determine if it should return. This the interacted with an as-yet unfixed bug
which cleared the extension on subequent updates, causing the receipt to be lost entirely.
A fix for this will be inbound soon.
2023-02-07 13:34:26 +00:00
|
|
|
|
|
|
|
// Regression test for a cause of duplicate rooms in the room list.
|
|
|
|
// The pollers process unread counts _before_ events. It does this so if counts bump from 0->1 we don't
|
|
|
|
// tell clients, instead we wait for the event and then tell them both at the same time atomically.
|
|
|
|
// This is desirable as it means we don't have phantom notifications. However, it doesn't work reliably
|
|
|
|
// in the proxy because one device poller can return the event in one /sync response then the unread count
|
|
|
|
// arrives later on a different poller's sync response.
|
|
|
|
// This manifest itself as confusing, invalid DELETE/INSERT operations, causing rooms to be duplicated.
|
|
|
|
func TestUnreadCountMisordering(t *testing.T) {
|
|
|
|
pqString := testutils.PrepareDBConnectionString()
|
|
|
|
one := 1
|
|
|
|
zero := 0
|
|
|
|
// setup code
|
|
|
|
v2 := runTestV2Server(t)
|
|
|
|
v3 := runTestServer(t, v2, pqString)
|
|
|
|
defer v2.close()
|
|
|
|
defer v3.close()
|
|
|
|
// Create 3 rooms with the following order, sorted by notification level
|
|
|
|
// - A [1 unread count] most recent
|
|
|
|
// - B [0 unread count]
|
|
|
|
// - C [0 unread count]
|
|
|
|
// Then send a new event in C -> [A,C,B] DELETE 2, INSERT 1 C
|
|
|
|
// Then send unread count in C++ -> [C,A,B] DELETE 1, INSERT 0 C // this might be suppressed
|
|
|
|
// Then send something unrelated which will cause a resort. This will cause a desync in lists between client/server.
|
|
|
|
// Then send unread count in C-- -> [A,C,B] DELETE 0, INSERT 1 C <-- this makes no sense if the prev ops were suppressed.
|
|
|
|
roomA := "!a:localhost"
|
|
|
|
roomB := "!b:localhost"
|
|
|
|
roomC := "!c:localhost"
|
|
|
|
data := map[string]struct {
|
|
|
|
latestTimestamp time.Time
|
|
|
|
notifCount int
|
|
|
|
}{
|
|
|
|
roomA: {
|
|
|
|
latestTimestamp: time.Now().Add(20 * time.Second),
|
|
|
|
notifCount: 1,
|
|
|
|
},
|
|
|
|
roomB: {
|
|
|
|
latestTimestamp: time.Now().Add(30 * time.Second),
|
|
|
|
notifCount: 0,
|
|
|
|
},
|
|
|
|
roomC: {
|
|
|
|
latestTimestamp: time.Now().Add(10 * time.Second),
|
|
|
|
notifCount: 0,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var re []roomEvents
|
|
|
|
for roomID, info := range data {
|
|
|
|
info := info
|
|
|
|
re = append(re, roomEvents{
|
|
|
|
roomID: roomID,
|
|
|
|
state: createRoomState(t, alice, time.Now()),
|
|
|
|
events: []json.RawMessage{
|
|
|
|
testutils.NewEvent(t, "m.room.message", alice, map[string]interface{}{"body": "latest msg"}, testutils.WithTimestamp(info.latestTimestamp)),
|
|
|
|
},
|
|
|
|
notifCount: &info.notifCount,
|
|
|
|
})
|
|
|
|
}
|
2023-05-15 19:11:48 +01:00
|
|
|
v2.addAccount(t, alice, aliceToken)
|
bugfix: fix a bug with list ops when sorting with unread counts; fix a bug which could cause typing/receipts to not be live streamed
Previously, we would not send unread count INCREASES to the client,
as we would expect the actual event update to wake up the client conn.
This was great because it meant the event+unread count arrived atomically
on the client. This was implemented as "parse unread counts first, then events".
However, this introduced a bug when there were >1 user in the same room. In this
scenario, one poller may get the event first, which would go through to the client.
The subsequent unread count update would then be dropped and not sent to the client.
This would just be an unfortunate UI bug if it weren't for sorting by_notification_count
and sorting by_notification_level. Both of these sort operations use the unread counts
to determine room list ordering. This list would be updated on the server, but no
list operation would be sent to the client, causing the room lists to de-sync, and
resulting in incorrect DELETE/INSERT ops. This would manifest as duplicate rooms
on the room list.
In the process of fixing this, also fix a bug where typing notifications would not
always be sent to the client - it would only do so when piggybacked due to incorrect
type switches.
Also fix another bug which prevented receipts from always being sent to the client.
This was caused by the extensions handler not checking if the receipt extension had
data to determine if it should return. This the interacted with an as-yet unfixed bug
which cleared the extension on subequent updates, causing the receipt to be lost entirely.
A fix for this will be inbound soon.
2023-02-07 13:34:26 +00:00
|
|
|
v2.queueResponse(alice, sync2.SyncResponse{
|
|
|
|
Rooms: sync2.SyncRoomsResponse{
|
|
|
|
Join: v2JoinTimeline(re...),
|
|
|
|
},
|
|
|
|
})
|
|
|
|
|
|
|
|
res := v3.mustDoV3Request(t, aliceToken, sync3.Request{
|
|
|
|
Lists: map[string]sync3.RequestList{
|
|
|
|
"a": {
|
|
|
|
Ranges: [][2]int64{{0, 5}},
|
|
|
|
Sort: []string{sync3.SortByNotificationLevel, sync3.SortByRecency},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
m.MatchResponse(t, res, m.MatchList("a", m.MatchV3Count(3), m.MatchV3Ops(
|
2023-04-04 20:03:47 +01:00
|
|
|
m.MatchV3SyncOp(0, 2, []string{roomA, roomB, roomC}),
|
bugfix: fix a bug with list ops when sorting with unread counts; fix a bug which could cause typing/receipts to not be live streamed
Previously, we would not send unread count INCREASES to the client,
as we would expect the actual event update to wake up the client conn.
This was great because it meant the event+unread count arrived atomically
on the client. This was implemented as "parse unread counts first, then events".
However, this introduced a bug when there were >1 user in the same room. In this
scenario, one poller may get the event first, which would go through to the client.
The subsequent unread count update would then be dropped and not sent to the client.
This would just be an unfortunate UI bug if it weren't for sorting by_notification_count
and sorting by_notification_level. Both of these sort operations use the unread counts
to determine room list ordering. This list would be updated on the server, but no
list operation would be sent to the client, causing the room lists to de-sync, and
resulting in incorrect DELETE/INSERT ops. This would manifest as duplicate rooms
on the room list.
In the process of fixing this, also fix a bug where typing notifications would not
always be sent to the client - it would only do so when piggybacked due to incorrect
type switches.
Also fix another bug which prevented receipts from always being sent to the client.
This was caused by the extensions handler not checking if the receipt extension had
data to determine if it should return. This the interacted with an as-yet unfixed bug
which cleared the extension on subequent updates, causing the receipt to be lost entirely.
A fix for this will be inbound soon.
2023-02-07 13:34:26 +00:00
|
|
|
))) // A,B,C SYNC
|
|
|
|
|
|
|
|
// Then send a new event in C -> [A,C,B] DELETE 2, INSERT 1 C
|
|
|
|
v2.queueResponse(alice, sync2.SyncResponse{
|
|
|
|
Rooms: sync2.SyncRoomsResponse{
|
|
|
|
Join: v2JoinTimeline(roomEvents{
|
|
|
|
roomID: roomC,
|
|
|
|
events: []json.RawMessage{
|
|
|
|
testutils.NewEvent(t, "m.room.message", alice, map[string]interface{}{"body": "bump"}, testutils.WithTimestamp(time.Now().Add(time.Second*40))),
|
|
|
|
},
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
})
|
|
|
|
v2.waitUntilEmpty(t, aliceToken)
|
|
|
|
res = v3.mustDoV3RequestWithPos(t, aliceToken, res.Pos, sync3.Request{})
|
|
|
|
m.MatchResponse(t, res, m.MatchList("a", m.MatchV3Count(3), m.MatchV3Ops(
|
|
|
|
m.MatchV3DeleteOp(2), m.MatchV3InsertOp(1, roomC),
|
|
|
|
)))
|
|
|
|
|
|
|
|
// Then send unread count in C++ -> [C,A,B] DELETE 1, INSERT 0 C // this might be suppressed
|
|
|
|
v2.queueResponse(alice, sync2.SyncResponse{
|
|
|
|
Rooms: sync2.SyncRoomsResponse{
|
|
|
|
Join: v2JoinTimeline(roomEvents{
|
|
|
|
roomID: roomC,
|
|
|
|
notifCount: &one,
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
})
|
|
|
|
v2.waitUntilEmpty(t, aliceToken)
|
|
|
|
|
|
|
|
// Then send something unrelated which will cause a resort. This will cause a desync in lists between client/server.
|
|
|
|
// This is unrelated because it doesn't affect sort position: B has same timestamp
|
|
|
|
v2.queueResponse(alice, sync2.SyncResponse{
|
|
|
|
Rooms: sync2.SyncRoomsResponse{
|
|
|
|
Join: v2JoinTimeline(roomEvents{
|
|
|
|
roomID: roomB,
|
|
|
|
events: []json.RawMessage{
|
|
|
|
testutils.NewEvent(t, "m.room.message", alice, map[string]interface{}{"body": "bump 2"}, testutils.WithTimestamp(data[roomB].latestTimestamp)),
|
|
|
|
},
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
})
|
|
|
|
v2.waitUntilEmpty(t, aliceToken)
|
|
|
|
|
|
|
|
// Then send unread count in C-- -> [A,C,B] <-- this ends up being DEL 0, INS 1 C which is just wrong if we suppressed earlier.
|
|
|
|
v2.queueResponse(alice, sync2.SyncResponse{
|
|
|
|
Rooms: sync2.SyncRoomsResponse{
|
|
|
|
Join: v2JoinTimeline(roomEvents{
|
|
|
|
roomID: roomC,
|
|
|
|
notifCount: &zero,
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
})
|
|
|
|
v2.waitUntilEmpty(t, aliceToken)
|
|
|
|
|
|
|
|
res = v3.mustDoV3RequestWithPos(t, aliceToken, res.Pos, sync3.Request{})
|
|
|
|
m.MatchResponse(t, res, m.MatchList("a", m.MatchV3Count(3), m.MatchV3Ops(
|
|
|
|
m.MatchV3DeleteOp(1), m.MatchV3InsertOp(0, roomC), // the unread count coming through
|
|
|
|
m.MatchV3DeleteOp(0), m.MatchV3InsertOp(1, roomC), // the unread count decrease coming through
|
|
|
|
)))
|
|
|
|
}
|
2023-06-06 20:39:57 +01:00
|
|
|
|
|
|
|
func TestBumpEventTypesOnStartup(t *testing.T) {
|
2023-06-07 13:58:44 +01:00
|
|
|
const room1ID = "!room1:localhost"
|
|
|
|
const room2ID = "!room2:localhost"
|
|
|
|
const room3ID = "!room3:localhost"
|
|
|
|
|
|
|
|
// Create three rooms, with a one-second pause between each creation.
|
2023-06-06 20:39:57 +01:00
|
|
|
ts := time.Now()
|
2023-06-07 13:58:44 +01:00
|
|
|
state := createRoomState(t, alice, ts)
|
|
|
|
r2State := createRoomState(t, alice, ts.Add(time.Second))
|
|
|
|
r3State := createRoomState(t, alice, ts.Add(2*time.Second))
|
|
|
|
ts = ts.Add(2 * time.Second)
|
2023-06-06 20:39:57 +01:00
|
|
|
|
2023-06-07 13:58:44 +01:00
|
|
|
r1Timeline := []json.RawMessage{}
|
2023-06-06 20:39:57 +01:00
|
|
|
r2Timeline := []json.RawMessage{}
|
|
|
|
r3Timeline := []json.RawMessage{}
|
|
|
|
|
2023-06-07 13:58:44 +01:00
|
|
|
steps := []struct {
|
|
|
|
timeline *[]json.RawMessage
|
|
|
|
event json.RawMessage
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
timeline: &r1Timeline,
|
|
|
|
event: testutils.NewStateEvent(t, "m.room.topic", "", alice, map[string]interface{}{"topic": "potato"}, testutils.WithTimestamp(ts)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
timeline: &r1Timeline,
|
|
|
|
event: testutils.NewMessageEvent(t, alice, "message in room 1", testutils.WithTimestamp(ts)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
timeline: &r2Timeline,
|
|
|
|
event: testutils.NewMessageEvent(t, alice, "message in room 2", testutils.WithTimestamp(ts)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
timeline: &r3Timeline,
|
|
|
|
event: testutils.NewMessageEvent(t, alice, "message in room 3", testutils.WithTimestamp(ts)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
timeline: &r2Timeline,
|
|
|
|
event: testutils.NewStateEvent(t, "m.room.topic", "", alice, map[string]interface{}{"topic": "bananas"}, testutils.WithTimestamp(ts)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
timeline: &r1Timeline,
|
|
|
|
event: testutils.NewStateEvent(t, "m.room.member", alice, alice, map[string]interface{}{"membership": "join", "displayname": "all ice"}, testutils.WithTimestamp(ts)),
|
|
|
|
},
|
|
|
|
}
|
2023-06-06 20:39:57 +01:00
|
|
|
|
2023-06-07 13:58:44 +01:00
|
|
|
// Append events to the correct timeline. Add at least a second between
|
|
|
|
// significant events, to ensure there aren't any timestamp clashes.
|
|
|
|
for _, step := range steps {
|
|
|
|
ts = ts.Add(time.Second)
|
|
|
|
step.event = testutils.SetTimestamp(t, step.event, ts)
|
|
|
|
*step.timeline = append(*step.timeline, step.event)
|
|
|
|
}
|
2023-06-06 20:39:57 +01:00
|
|
|
|
|
|
|
r1 := roomEvents{
|
|
|
|
roomID: room1ID,
|
|
|
|
name: "room 1",
|
2023-06-07 13:58:44 +01:00
|
|
|
state: state,
|
2023-06-06 20:39:57 +01:00
|
|
|
events: r1Timeline,
|
|
|
|
}
|
|
|
|
r2 := roomEvents{
|
|
|
|
roomID: room2ID,
|
|
|
|
name: "room 2",
|
2023-06-07 13:58:44 +01:00
|
|
|
state: r2State,
|
2023-06-06 20:39:57 +01:00
|
|
|
events: r2Timeline,
|
|
|
|
}
|
|
|
|
r3 := roomEvents{
|
|
|
|
roomID: room3ID,
|
|
|
|
name: "room 3",
|
2023-06-07 13:58:44 +01:00
|
|
|
state: r3State,
|
2023-06-06 20:39:57 +01:00
|
|
|
events: r3Timeline,
|
|
|
|
}
|
|
|
|
|
|
|
|
pqString := testutils.PrepareDBConnectionString()
|
|
|
|
v2 := runTestV2Server(t)
|
|
|
|
v3 := runTestServer(t, v2, pqString)
|
|
|
|
defer v2.close()
|
|
|
|
defer v3.close()
|
|
|
|
|
2023-06-07 13:58:44 +01:00
|
|
|
t.Log("Prepare to tell the proxy about three rooms and events in them.")
|
2023-06-06 20:39:57 +01:00
|
|
|
v2.addAccount(t, alice, aliceToken)
|
|
|
|
v2.queueResponse(aliceToken, sync2.SyncResponse{
|
|
|
|
Rooms: sync2.SyncRoomsResponse{
|
|
|
|
Join: v2JoinTimeline(r1, r2, r3),
|
|
|
|
},
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Log("Alice requests a new sliding sync connection.")
|
|
|
|
v3.mustDoV3Request(t, aliceToken, sync3.Request{
|
|
|
|
Lists: map[string]sync3.RequestList{
|
|
|
|
"a": {
|
|
|
|
RoomSubscription: sync3.RoomSubscription{
|
|
|
|
TimelineLimit: 20,
|
|
|
|
},
|
|
|
|
Ranges: sync3.SliceRanges{{0, 2}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
|
2023-06-07 13:58:44 +01:00
|
|
|
// Confirm that the poller polled.
|
2023-06-06 20:39:57 +01:00
|
|
|
v2.waitUntilEmpty(t, aliceToken)
|
|
|
|
|
|
|
|
t.Log("The proxy restarts.")
|
|
|
|
v3.restart(t, v2, pqString)
|
|
|
|
|
2023-06-07 13:58:44 +01:00
|
|
|
// Vary the bump event types, and compare the room order we get to what we expect.
|
|
|
|
// The pertinent events are:
|
|
|
|
// (1) create and join r1
|
|
|
|
// (2) create and join r2
|
|
|
|
// (3) create and join r3
|
|
|
|
// (4) r1: topic set
|
|
|
|
// (5) r1: message
|
|
|
|
// (6) r2: message
|
|
|
|
// (7) r3: message
|
|
|
|
// (8) r2: topic
|
|
|
|
// (9) r1: profile change
|
|
|
|
|
2023-06-06 20:39:57 +01:00
|
|
|
cases := []struct {
|
|
|
|
BumpEventTypes []string
|
|
|
|
RoomIDs []string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
BumpEventTypes: []string{"m.room.message"},
|
2023-06-07 13:58:44 +01:00
|
|
|
// r3 message (7), r2 message (6), r1 message (5).
|
|
|
|
RoomIDs: []string{room3ID, room2ID, room1ID},
|
2023-06-06 20:39:57 +01:00
|
|
|
},
|
|
|
|
{
|
|
|
|
BumpEventTypes: []string{"m.room.topic"},
|
2023-06-07 13:58:44 +01:00
|
|
|
// r2 topic (8), r1 topic (4), r3 join (3).
|
|
|
|
RoomIDs: []string{room2ID, room1ID, room3ID},
|
2023-06-06 20:39:57 +01:00
|
|
|
},
|
|
|
|
{
|
|
|
|
BumpEventTypes: []string{},
|
2023-06-07 13:58:44 +01:00
|
|
|
// r1 profile (9), r2 topic (8), r3 message (7)
|
|
|
|
RoomIDs: []string{room1ID, room2ID, room3ID},
|
2023-06-06 20:39:57 +01:00
|
|
|
},
|
|
|
|
{
|
|
|
|
BumpEventTypes: []string{"m.room.topic", "m.room.message"},
|
2023-06-07 13:58:44 +01:00
|
|
|
// r2 topic (8), r3 message (7), r1 message (5)
|
|
|
|
RoomIDs: []string{room2ID, room3ID, room1ID},
|
2023-06-06 20:39:57 +01:00
|
|
|
},
|
|
|
|
{
|
2023-06-07 13:58:44 +01:00
|
|
|
// r2 profile (8), r3 join (3), r1 join (1)
|
2023-06-06 20:39:57 +01:00
|
|
|
BumpEventTypes: []string{"m.room.member"},
|
|
|
|
RoomIDs: []string{room1ID, room3ID, room2ID},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
BumpEventTypes: []string{"com.example.doesnotexist"},
|
2023-06-07 13:58:44 +01:00
|
|
|
// r3 join (3), r2 join (2), r1 join (1)
|
2023-06-06 20:39:57 +01:00
|
|
|
RoomIDs: []string{room3ID, room2ID, room1ID},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, testCase := range cases {
|
|
|
|
t.Logf("Alice makes a new sync connection with bump events %v", testCase.BumpEventTypes)
|
|
|
|
res := v3.mustDoV3Request(t, aliceToken, sync3.Request{
|
|
|
|
Lists: map[string]sync3.RequestList{
|
|
|
|
"list": {
|
|
|
|
Ranges: sync3.SliceRanges{{0, 2}},
|
|
|
|
BumpEventTypes: testCase.BumpEventTypes,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
t.Logf("Alice should see the three rooms in the order %v", testCase.RoomIDs)
|
|
|
|
m.MatchResponse(t, res, m.MatchList("list",
|
|
|
|
m.MatchV3Ops(m.MatchV3SyncOp(0, 2, testCase.RoomIDs)),
|
|
|
|
m.MatchV3Count(3),
|
|
|
|
))
|
|
|
|
}
|
|
|
|
}
|
2024-04-24 16:20:07 +01:00
|
|
|
|
|
|
|
func TestDeleteMSC4115Field(t *testing.T) {
|
2024-04-25 15:15:42 +01:00
|
|
|
t.Run("stable prefix", func(t *testing.T) {
|
|
|
|
testDeleteMSC4115Field(t, "membership")
|
|
|
|
})
|
|
|
|
t.Run("unstable prefix", func(t *testing.T) {
|
|
|
|
testDeleteMSC4115Field(t, "io.element.msc4115.membership")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testDeleteMSC4115Field(t *testing.T, fieldName string) {
|
2024-04-24 16:20:07 +01:00
|
|
|
rig := NewTestRig(t)
|
|
|
|
defer rig.Finish()
|
|
|
|
roomID := "!TestDeleteMSC4115Field:localhost"
|
|
|
|
rig.SetupV2RoomsForUser(t, alice, NoFlush, map[string]RoomDescriptor{
|
|
|
|
roomID: {},
|
|
|
|
})
|
|
|
|
aliceToken := rig.Token(alice)
|
|
|
|
res := rig.V3.mustDoV3Request(t, aliceToken, sync3.Request{
|
|
|
|
Lists: map[string]sync3.RequestList{
|
|
|
|
"a": {
|
|
|
|
Ranges: sync3.SliceRanges{{0, 20}},
|
|
|
|
RoomSubscription: sync3.RoomSubscription{
|
|
|
|
TimelineLimit: 10,
|
|
|
|
RequiredState: [][2]string{{"m.room.name", "*"}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
m.MatchResponse(t, res, m.MatchLists(map[string][]m.ListMatcher{
|
|
|
|
"a": {
|
|
|
|
m.MatchV3Count(1),
|
|
|
|
},
|
|
|
|
}), m.MatchRoomSubscription(roomID))
|
|
|
|
|
|
|
|
// ensure live events remove the field.
|
|
|
|
liveEvent := testutils.NewMessageEvent(t, alice, "live event", testutils.WithUnsigned(map[string]interface{}{
|
2024-04-25 15:15:42 +01:00
|
|
|
fieldName: "join",
|
2024-04-24 16:20:07 +01:00
|
|
|
}))
|
|
|
|
liveEventWithoutMembership := make(json.RawMessage, len(liveEvent))
|
|
|
|
copy(liveEventWithoutMembership, liveEvent)
|
2024-04-25 15:15:42 +01:00
|
|
|
liveEventWithoutMembership, err := sjson.DeleteBytes(liveEventWithoutMembership, "unsigned."+strings.ReplaceAll(fieldName, ".", `\.`))
|
2024-04-24 16:20:07 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to delete unsigned.membership field")
|
|
|
|
}
|
|
|
|
rig.FlushEvent(t, alice, roomID, liveEvent)
|
|
|
|
|
|
|
|
res = rig.V3.mustDoV3RequestWithPos(t, aliceToken, res.Pos, sync3.Request{})
|
|
|
|
m.MatchResponse(t, res, m.MatchRoomSubscriptionsStrict(map[string][]m.RoomMatcher{
|
|
|
|
roomID: {
|
|
|
|
m.MatchRoomTimelineMostRecent(1, []json.RawMessage{liveEventWithoutMembership}),
|
|
|
|
},
|
|
|
|
}))
|
|
|
|
|
|
|
|
// ensure state events remove the field.
|
|
|
|
stateEvent := testutils.NewStateEvent(t, "m.room.name", "", alice, map[string]interface{}{
|
|
|
|
"name": "Room Name",
|
|
|
|
}, testutils.WithUnsigned(map[string]interface{}{
|
2024-04-25 15:15:42 +01:00
|
|
|
fieldName: "join",
|
2024-04-24 16:20:07 +01:00
|
|
|
}))
|
|
|
|
stateEventWithoutMembership := make(json.RawMessage, len(stateEvent))
|
|
|
|
copy(stateEventWithoutMembership, stateEvent)
|
2024-04-25 15:15:42 +01:00
|
|
|
stateEventWithoutMembership, err = sjson.DeleteBytes(stateEventWithoutMembership, "unsigned."+strings.ReplaceAll(fieldName, ".", `\.`))
|
2024-04-24 16:20:07 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to delete unsigned.membership field")
|
|
|
|
}
|
|
|
|
rig.V2.queueResponse(alice, sync2.SyncResponse{
|
|
|
|
Rooms: sync2.SyncRoomsResponse{
|
|
|
|
Join: v2JoinTimeline(roomEvents{
|
|
|
|
roomID: roomID,
|
|
|
|
state: []json.RawMessage{stateEvent},
|
|
|
|
events: []json.RawMessage{testutils.NewMessageEvent(t, alice, "dummy")},
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
})
|
|
|
|
rig.V2.waitUntilEmpty(t, alice)
|
|
|
|
|
|
|
|
// sending v2 state invalidates the SS connection so start again pre-emptively.
|
|
|
|
res = rig.V3.mustDoV3Request(t, aliceToken, sync3.Request{
|
|
|
|
Lists: map[string]sync3.RequestList{
|
|
|
|
"a": {
|
|
|
|
Ranges: sync3.SliceRanges{{0, 20}},
|
|
|
|
RoomSubscription: sync3.RoomSubscription{
|
|
|
|
TimelineLimit: 10,
|
|
|
|
RequiredState: [][2]string{{"m.room.name", "*"}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
m.MatchResponse(t, res, m.MatchRoomSubscriptionsStrict(map[string][]m.RoomMatcher{
|
|
|
|
roomID: {
|
|
|
|
m.MatchRoomRequiredState([]json.RawMessage{stateEventWithoutMembership}),
|
|
|
|
},
|
|
|
|
}))
|
|
|
|
}
|