mirror of
https://github.com/matrix-org/sliding-sync.git
synced 2025-03-10 13:37:11 +00:00
Add a sensible timeline_limit cap
To avoid pathological cases where large timeline limits are requested.
This commit is contained in:
parent
aa3ea8fe2e
commit
6d6a2d6c08
@ -68,6 +68,7 @@ type Storage struct {
|
|||||||
DeviceDataTable *DeviceDataTable
|
DeviceDataTable *DeviceDataTable
|
||||||
ReceiptTable *ReceiptTable
|
ReceiptTable *ReceiptTable
|
||||||
DB *sqlx.DB
|
DB *sqlx.DB
|
||||||
|
MaxTimelineLimit int
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewStorage(postgresURI string) *Storage {
|
func NewStorage(postgresURI string) *Storage {
|
||||||
@ -102,6 +103,7 @@ func NewStorageWithDB(db *sqlx.DB, addPrometheusMetrics bool) *Storage {
|
|||||||
DeviceDataTable: NewDeviceDataTable(db),
|
DeviceDataTable: NewDeviceDataTable(db),
|
||||||
ReceiptTable: NewReceiptTable(db),
|
ReceiptTable: NewReceiptTable(db),
|
||||||
DB: db,
|
DB: db,
|
||||||
|
MaxTimelineLimit: 50,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -705,12 +707,15 @@ func (s *Storage) RoomStateAfterEventPosition(ctx context.Context, roomIDs []str
|
|||||||
// - in the given rooms
|
// - in the given rooms
|
||||||
// - that the user has permission to see
|
// - that the user has permission to see
|
||||||
// - with NIDs <= `to`.
|
// - with NIDs <= `to`.
|
||||||
// Up to `limit` events are chosen per room.
|
// Up to `limit` events are chosen per room. This limit be itself be limited according to MaxTimelineLimit.
|
||||||
func (s *Storage) LatestEventsInRooms(userID string, roomIDs []string, to int64, limit int) (map[string]*LatestEvents, error) {
|
func (s *Storage) LatestEventsInRooms(userID string, roomIDs []string, to int64, limit int) (map[string]*LatestEvents, error) {
|
||||||
roomIDToRange, err := s.visibleEventNIDsBetweenForRooms(userID, roomIDs, 0, to)
|
roomIDToRange, err := s.visibleEventNIDsBetweenForRooms(userID, roomIDs, 0, to)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if s.MaxTimelineLimit != 0 && limit > s.MaxTimelineLimit {
|
||||||
|
limit = s.MaxTimelineLimit
|
||||||
|
}
|
||||||
result := make(map[string]*LatestEvents, len(roomIDs))
|
result := make(map[string]*LatestEvents, len(roomIDs))
|
||||||
err = sqlutil.WithTransaction(s.Accumulator.db, func(txn *sqlx.Tx) error {
|
err = sqlutil.WithTransaction(s.Accumulator.db, func(txn *sqlx.Tx) error {
|
||||||
for roomID, r := range roomIDToRange {
|
for roomID, r := range roomIDToRange {
|
||||||
|
@ -1339,6 +1339,49 @@ func TestNumLiveBulk(t *testing.T) {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure that clients cannot just set timeline_limit: 99999 and DoS the server
|
||||||
|
func TestSensibleLimitToTimelineLimit(t *testing.T) {
|
||||||
|
pqString := testutils.PrepareDBConnectionString()
|
||||||
|
// setup code
|
||||||
|
v2 := runTestV2Server(t)
|
||||||
|
v3 := runTestServer(t, v2, pqString)
|
||||||
|
defer v2.close()
|
||||||
|
defer v3.close()
|
||||||
|
roomID := "!a:localhost"
|
||||||
|
|
||||||
|
var hundredEvents = make([]json.RawMessage, 100)
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
hundredEvents[i] = testutils.NewEvent(t, "m.room.message", alice, map[string]any{
|
||||||
|
"msgtype": "m.text",
|
||||||
|
"body": fmt.Sprintf("msg %d", i),
|
||||||
|
}, testutils.WithTimestamp(time.Now().Add(time.Second)))
|
||||||
|
}
|
||||||
|
|
||||||
|
v2.addAccount(t, alice, aliceToken)
|
||||||
|
v2.queueResponse(alice, sync2.SyncResponse{
|
||||||
|
Rooms: sync2.SyncRoomsResponse{
|
||||||
|
Join: v2JoinTimeline(roomEvents{
|
||||||
|
roomID: roomID,
|
||||||
|
state: createRoomState(t, alice, time.Now()),
|
||||||
|
events: hundredEvents,
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
res := v3.mustDoV3Request(t, aliceToken, sync3.Request{
|
||||||
|
Lists: map[string]sync3.RequestList{"a": {
|
||||||
|
Ranges: sync3.SliceRanges{
|
||||||
|
[2]int64{0, 10},
|
||||||
|
},
|
||||||
|
RoomSubscription: sync3.RoomSubscription{
|
||||||
|
TimelineLimit: 99999,
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
})
|
||||||
|
m.MatchResponse(t, res, m.MatchList("a",
|
||||||
|
m.MatchV3Ops(m.MatchV3SyncOp(0, 0, []string{roomID})),
|
||||||
|
), m.MatchRoomSubscription(roomID, m.MatchRoomTimeline(hundredEvents[50:]))) // caps at 50
|
||||||
|
}
|
||||||
|
|
||||||
// Regression test for a thing which Synapse can sometimes send down sync v2.
|
// Regression test for a thing which Synapse can sometimes send down sync v2.
|
||||||
// See https://github.com/matrix-org/sliding-sync/issues/367
|
// See https://github.com/matrix-org/sliding-sync/issues/367
|
||||||
// This would cause this room to not be processed at all, which is bad.
|
// This would cause this room to not be processed at all, which is bad.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user