add extensions for typing and receipts; bugfixes and additional perf improvements

Features:
 - Add `typing` extension.
 - Add `receipts` extension.
 - Add comprehensive prometheus `/metrics` activated via `SYNCV3_PROM`.
 - Add `SYNCV3_PPROF` support.
 - Add `by_notification_level` sort order.
 - Add `include_old_rooms` support.
 - Add support for `$ME` and `$LAZY`.
 - Add correct filtering when `*,*` is used as `required_state`.
 - Add `num_live` to each room response to indicate how many timeline entries are live.

Bug fixes:
 - Use a stricter comparison function on ranges: fixes an issue whereby UTs fail on go1.19 due to change in sorting algorithm.
 - Send back an `errcode` on HTTP errors (e.g expired sessions).
 - Remove `unsigned.txn_id` on insertion into the DB. Otherwise other users would see other users txn IDs :(
 - Improve range delta algorithm: previously it didn't handle cases like `[0,20] -> [20,30]` and would panic.
 - Send HTTP 400 for invalid range requests.
 - Don't publish no-op unread counts which just adds extra noise.
 - Fix leaking DB connections which could eventually consume all available connections.
 - Ensure we always unblock WaitUntilInitialSync even on invalid access tokens. Other code relies on WaitUntilInitialSync() actually returning at _some_ point e.g on startup we have N workers which bound the number of concurrent pollers made at any one time, we need to not just hog a worker forever.

Improvements:
 - Greatly improve startup times of sync3 handlers by improving `JoinedRoomsTracker`: a modest amount of data would take ~28s to create the handler, now it takes 4s.
 - Massively improve initial initial v3 sync times, by refactoring `JoinedRoomsTracker`, from ~47s to <1s.
 - Add `SlidingSyncUntil...` in tests to reduce races.
 - Tweak the API shape of JoinedUsersForRoom to reduce state block processing time for large rooms from 63s to 39s.
 - Add trace task for initial syncs.
 - Include the proxy version in UA strings.
 - HTTP errors now wait 1s before returning to stop clients tight-looping on error.
 - Pending event buffer is now 2000.
 - Index the room ID first to cull the most events when returning timeline entries. Speeds up `SelectLatestEventsBetween` by a factor of 8.
 - Remove cancelled `m.room_key_requests` from the to-device inbox. Cuts down the amount of events in the inbox by ~94% for very large (20k+) inboxes, ~50% for moderate sized (200 events) inboxes. Adds book-keeping to remember the unacked to-device position for each client.
This commit is contained in:
Kegan Dougal 2022-12-14 18:53:55 +00:00
parent b90a18a62a
commit be8543a21a
91 changed files with 7183 additions and 935 deletions

View File

@ -104,3 +104,36 @@ jobs:
SYNCV3_DB: user=postgres dbname=syncv3 sslmode=disable password=postgres host=localhost
SYNCV3_SERVER: http://localhost:8008
SYNCV3_SECRET: itsasecret
element_web:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: "Build docker image"
run: docker build -f Dockerfile -t ghcr.io/matrix-org/sliding-sync-proxy:ci .
- uses: actions/checkout@v2
with:
repository: matrix-org/matrix-react-sdk
- uses: actions/setup-node@v3
with:
cache: 'yarn'
- name: Fetch layered build
run: scripts/ci/layered.sh
- name: Copy config
run: cp element.io/develop/config.json config.json
working-directory: ./element-web
- name: Build
env:
CI_PACKAGE: true
run: yarn build
working-directory: ./element-web
- name: "Run cypress tests"
uses: cypress-io/github-action@v4.1.1
with:
browser: chrome
start: npx serve -p 8080 ./element-web/webapp
wait-on: 'http://localhost:8080'
spec: cypress/e2e/sliding-sync/*.ts
env:
PUPPETEER_SKIP_CHROMIUM_DOWNLOAD: true
TMPDIR: ${{ runner.temp }}
CYPRESS_SLIDING_SYNC_PROXY_TAG: 'ci'

View File

@ -1,6 +1,6 @@
## Architecture
_Current as of August 2022_
_Current as of December 2022_
At a high-level, clients (like Element) talk directly to their own homeserver (like Synapse) for every
single CS API endpoint as usual. Clients which opt-in to sliding sync will no longer call `/sync`, and

View File

@ -5,22 +5,28 @@ import (
"net/http"
_ "net/http/pprof"
"os"
"time"
"strings"
syncv3 "github.com/matrix-org/sync-v3"
"github.com/matrix-org/sync-v3/sync2"
"github.com/matrix-org/sync-v3/sync3/handler"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
var GitCommit string
const version = "0.4.1"
const version = "0.98.0"
const (
// Possibly required fields depending on the mode
EnvServer = "SYNCV3_SERVER"
EnvDB = "SYNCV3_DB"
EnvBindAddr = "SYNCV3_BINDADDR"
EnvSecret = "SYNCV3_SECRET"
// Optional fields
EnvPPROF = "SYNCV3_PPROF"
EnvPrometheus = "SYNCV3_PROM"
EnvDebug = "SYNCV3_DEBUG"
)
var helpMsg = fmt.Sprintf(`
@ -28,8 +34,10 @@ Environment var
%s Required. The destination homeserver to talk to (CS API HTTPS URL) e.g 'https://matrix-client.matrix.org'
%s Required. The postgres connection string: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
%s (Default: 0.0.0.0:8008) The interface and port to listen on.
%s Required. A secret to use to encrypt access tokens. Must remain the same for the lifetime of the database.
`, EnvServer, EnvDB, EnvBindAddr, EnvSecret)
%s Required. A secret to use to encrypt access tokens. Must remain the same for the lifetime of the database.
%s Defualt: unset. The bind addr for pprof debugging e.g ':6060'. If not set, does not listen.
%s Default: unset. The bind addr for Prometheus metrics, which will be accessible at /metrics at this address.
`, EnvServer, EnvDB, EnvBindAddr, EnvSecret, EnvPPROF, EnvPrometheus)
func defaulting(in, dft string) string {
if in == "" {
@ -40,31 +48,50 @@ func defaulting(in, dft string) string {
func main() {
fmt.Printf("Sync v3 [%s] (%s)\n", version, GitCommit)
sync2.ProxyVersion = version
syncv3.Version = fmt.Sprintf("%s (%s)", version, GitCommit)
flagDestinationServer := os.Getenv(EnvServer)
flagPostgres := os.Getenv(EnvDB)
flagSecret := os.Getenv(EnvSecret)
flagBindAddr := defaulting(os.Getenv(EnvBindAddr), "0.0.0.0:8008")
if flagDestinationServer == "" || flagPostgres == "" || flagSecret == "" {
fmt.Print(helpMsg)
fmt.Printf("\n%s and %s and %s must be set\n", EnvServer, EnvBindAddr, EnvSecret)
os.Exit(1)
args := map[string]string{
EnvServer: os.Getenv(EnvServer),
EnvDB: os.Getenv(EnvDB),
EnvSecret: os.Getenv(EnvSecret),
EnvBindAddr: defaulting(os.Getenv(EnvBindAddr), "0.0.0.0:8008"),
EnvPPROF: os.Getenv(EnvPPROF),
EnvPrometheus: os.Getenv(EnvPrometheus),
EnvDebug: os.Getenv(EnvDebug),
}
requiredEnvVars := []string{EnvServer, EnvDB, EnvSecret, EnvBindAddr}
for _, requiredEnvVar := range requiredEnvVars {
if args[requiredEnvVar] == "" {
fmt.Print(helpMsg)
fmt.Printf("\n%s is not set", requiredEnvVar)
fmt.Printf("\n%s must be set\n", strings.Join(requiredEnvVars, ", "))
os.Exit(1)
}
}
// pprof
go func() {
if err := http.ListenAndServe(":6060", nil); err != nil {
panic(err)
}
}()
h, err := handler.NewSync3Handler(&sync2.HTTPClient{
Client: &http.Client{
Timeout: 5 * time.Minute,
},
DestinationServer: flagDestinationServer,
}, flagPostgres, flagSecret, os.Getenv("SYNCV3_DEBUG") == "1")
if err != nil {
panic(err)
if args[EnvPPROF] != "" {
go func() {
fmt.Printf("Starting pprof listener on %s\n", args[EnvPPROF])
if err := http.ListenAndServe(args[EnvPPROF], nil); err != nil {
panic(err)
}
}()
}
go h.StartV2Pollers()
syncv3.RunSyncV3Server(h, flagBindAddr, flagDestinationServer)
if args[EnvPrometheus] != "" {
go func() {
fmt.Printf("Starting prometheus listener on %s\n", args[EnvPrometheus])
http.Handle("/metrics", promhttp.Handler())
if err := http.ListenAndServe(args[EnvPrometheus], nil); err != nil {
panic(err)
}
}()
}
h2, h3 := syncv3.Setup(args[EnvServer], args[EnvDB], args[EnvSecret], syncv3.Opts{
Debug: args[EnvDebug] == "1",
AddPrometheusMetrics: args[EnvPrometheus] != "",
})
go h2.StartV2Pollers()
syncv3.RunSyncV3Server(h3, args[EnvBindAddr], args[EnvServer])
select {} // block forever
}

2
go.mod
View File

@ -11,10 +11,10 @@ require (
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16 // indirect
github.com/matrix-org/gomatrixserverlib v0.0.0-20211026114500-ddecab880266
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4 // indirect
github.com/prometheus/client_golang v1.13.0 // indirect
github.com/rs/zerolog v1.21.0
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/tidwall/gjson v1.10.2
github.com/tidwall/sjson v1.2.3
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 // indirect
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359 // indirect
)

429
go.sum
View File

@ -1,26 +1,161 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/ReneKroon/ttlcache/v2 v2.8.1 h1:0Exdyt5+vEsdRoFO1T7qDIYM3gq/ETbeYV+vjgcPxZk=
github.com/ReneKroon/ttlcache/v2 v2.8.1/go.mod h1:mBxvsNY+BT8qLLd6CuAJubbKo6r0jh3nb5et22bbfGY=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/frankban/quicktest v1.0.0/go.mod h1:R98jIehRai+d1/3Hv2//jOVCTJhW1VBavT6B6CuGq2k=
github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/jmoiron/sqlx v1.3.3 h1:j82X0bf7oQ27XeqxicSZsTU5suPwKElg3oyxNn43iTk=
github.com/jmoiron/sqlx v1.3.3/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
@ -40,23 +175,63 @@ github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4 h1:eCEHXWDv9Rm335M
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.1.25 h1:dFwPR6SfLtrSwgDcIq2bcU/gVutB4sNApq2HBdqcakg=
github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4=
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.21.0 h1:Q3vdXlfLNT+OftyBHsU0Y445MD+8m8axjKgf2si0QcM=
github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@ -70,59 +245,219 @@ github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhso
github.com/tidwall/sjson v1.0.3/go.mod h1:bURseu1nuBkFpIES5cz6zBtjmYeOQmEESshn7VpF15Y=
github.com/tidwall/sjson v1.2.3 h1:5+deguEhHSEjmuICXZ21uSSsXotWMA0orU783+Z7Cp8=
github.com/tidwall/sjson v1.2.3/go.mod h1:5WdjKx3AQMvCJ4RG6/2UYT7dLrGvJUV1x4jdTAyGvZs=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359 h1:2B5p2L5IfGiD7+b9BOoRMC6DgObAVZV+Fsp050NqXik=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
@ -131,15 +466,109 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/h2non/gock.v1 v1.0.14 h1:fTeu9fcUvSnLNacYvYI54h+1/XEteDyHvrVCZEEEYNM=
gopkg.in/h2non/gock.v1 v1.0.14/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE=
gopkg.in/macaroon.v2 v2.1.0/go.mod h1:OUb+TQP/OP0WOerC2Jp/3CwhIKyIa9kQjuc7H24e6/o=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=

View File

@ -21,6 +21,8 @@ type data struct {
txnID string
numToDeviceEvents int
numGlobalAccountData int
numChangedDevices int
numLeftDevices int
}
// prepare a request context so it can contain syncv3 info
@ -43,7 +45,10 @@ func SetRequestContextUserID(ctx context.Context, userID string) {
da.userID = userID
}
func SetRequestContextResponseInfo(ctx context.Context, since, next int64, numRooms int, txnID string, numToDeviceEvents, numGlobalAccountData int) {
func SetRequestContextResponseInfo(
ctx context.Context, since, next int64, numRooms int, txnID string, numToDeviceEvents, numGlobalAccountData int,
numChangedDevices, numLeftDevices int,
) {
d := ctx.Value(ctxData)
if d == nil {
return
@ -55,6 +60,8 @@ func SetRequestContextResponseInfo(ctx context.Context, since, next int64, numRo
da.txnID = txnID
da.numToDeviceEvents = numToDeviceEvents
da.numGlobalAccountData = numGlobalAccountData
da.numChangedDevices = numChangedDevices
da.numLeftDevices = numLeftDevices
}
func DecorateLogger(ctx context.Context, l *zerolog.Event) *zerolog.Event {
@ -84,5 +91,11 @@ func DecorateLogger(ctx context.Context, l *zerolog.Event) *zerolog.Event {
if da.numGlobalAccountData > 0 {
l = l.Int("ag", da.numGlobalAccountData)
}
if da.numChangedDevices > 0 {
l = l.Int("dl-c", da.numChangedDevices)
}
if da.numLeftDevices > 0 {
l = l.Int("dl-l", da.numLeftDevices)
}
return l
}

88
internal/device_data.go Normal file
View File

@ -0,0 +1,88 @@
package internal
import (
"sync"
)
// DeviceData contains useful data for this user's device. This list can be expanded without prompting
// schema changes. These values are upserted into the database and persisted forever.
type DeviceData struct {
// Contains the latest device_one_time_keys_count values.
// Set whenever this field arrives down the v2 poller, and it replaces what was previously there.
OTKCounts map[string]int `json:"otk"`
// Contains the latest device_unused_fallback_key_types value
// Set whenever this field arrives down the v2 poller, and it replaces what was previously there.
FallbackKeyTypes []string `json:"fallback"`
DeviceLists DeviceLists `json:"dl"`
UserID string
DeviceID string
}
type UserDeviceKey struct {
UserID string
DeviceID string
}
type DeviceDataMap struct {
deviceDataMu *sync.Mutex
deviceDataMap map[UserDeviceKey]*DeviceData
Pos int64
}
func NewDeviceDataMap(startPos int64, devices []DeviceData) *DeviceDataMap {
ddm := &DeviceDataMap{
deviceDataMu: &sync.Mutex{},
deviceDataMap: make(map[UserDeviceKey]*DeviceData),
Pos: startPos,
}
for i, dd := range devices {
ddm.deviceDataMap[UserDeviceKey{
UserID: dd.UserID,
DeviceID: dd.DeviceID,
}] = &devices[i]
}
return ddm
}
func (d *DeviceDataMap) Get(userID, deviceID string) *DeviceData {
key := UserDeviceKey{
UserID: userID,
DeviceID: deviceID,
}
d.deviceDataMu.Lock()
defer d.deviceDataMu.Unlock()
dd, ok := d.deviceDataMap[key]
if !ok {
return nil
}
return dd
}
func (d *DeviceDataMap) Update(dd DeviceData) DeviceData {
key := UserDeviceKey{
UserID: dd.UserID,
DeviceID: dd.DeviceID,
}
d.deviceDataMu.Lock()
defer d.deviceDataMu.Unlock()
existing, ok := d.deviceDataMap[key]
if !ok {
existing = &DeviceData{
UserID: dd.UserID,
DeviceID: dd.DeviceID,
}
}
if dd.OTKCounts != nil {
existing.OTKCounts = dd.OTKCounts
}
if dd.FallbackKeyTypes != nil {
existing.FallbackKeyTypes = dd.FallbackKeyTypes
}
existing.DeviceLists = existing.DeviceLists.Combine(dd.DeviceLists)
d.deviceDataMap[key] = existing
return *existing
}

61
internal/device_lists.go Normal file
View File

@ -0,0 +1,61 @@
package internal
const (
DeviceListChanged = 1
DeviceListLeft = 2
)
type DeviceLists struct {
// map user_id -> DeviceList enum
New map[string]int `json:"n"`
Sent map[string]int `json:"s"`
}
func (dl DeviceLists) Combine(newer DeviceLists) DeviceLists {
n := dl.New
if n == nil {
n = make(map[string]int)
}
for k, v := range newer.New {
n[k] = v
}
s := dl.Sent
if s == nil {
s = make(map[string]int)
}
for k, v := range newer.Sent {
s[k] = v
}
return DeviceLists{
New: n,
Sent: s,
}
}
func ToDeviceListChangesMap(changed, left []string) map[string]int {
if len(changed) == 0 && len(left) == 0 {
return nil
}
m := make(map[string]int)
for _, userID := range changed {
m[userID] = DeviceListChanged
}
for _, userID := range left {
m[userID] = DeviceListLeft
}
return m
}
func DeviceListChangesArrays(m map[string]int) (changed, left []string) {
changed = make([]string, 0)
left = make([]string, 0)
for userID, state := range m {
switch state {
case DeviceListChanged:
changed = append(changed, userID)
case DeviceListLeft:
left = append(left, userID)
}
}
return
}

View File

@ -17,6 +17,7 @@ var logger = zerolog.New(os.Stdout).With().Timestamp().Logger().Output(zerolog.C
type HandlerError struct {
StatusCode int
Err error
ErrCode string
}
func (e *HandlerError) Error() string {
@ -24,15 +25,27 @@ func (e *HandlerError) Error() string {
}
type jsonError struct {
Err string `json:"error"`
Err string `json:"error"`
Code string `json:"errcode,omitempty"`
}
func (e HandlerError) JSON() []byte {
je := jsonError{e.Error()}
je := jsonError{
Err: e.Error(),
Code: e.ErrCode,
}
b, _ := json.Marshal(je)
return b
}
func ExpiredSessionError() *HandlerError {
return &HandlerError{
StatusCode: 400,
Err: fmt.Errorf("session expired"),
ErrCode: "M_UNKNOWN_POS",
}
}
// Assert that the expression is true, similar to assert() in C. If expr is false, print or panic.
//
// If expr is false and SYNCV3_DEBUG=1 then the program panics.
@ -44,9 +57,12 @@ func (e HandlerError) JSON() []byte {
// whenever a programming or logic error occurs.
//
// The msg provided should be the expectation of the assert e.g:
// Assert("list is not empty", len(list) > 0)
//
// Assert("list is not empty", len(list) > 0)
//
// Which then produces:
// assertion failed: list is not empty
//
// assertion failed: list is not empty
func Assert(msg string, expr bool) {
if expr {
return

View File

@ -8,66 +8,6 @@ import (
"strings"
)
type RequiredStateMap struct {
eventTypesWithWildcardStateKeys map[string]struct{}
stateKeysForWildcardEventType []string
eventTypeToStateKeys map[string][]string
allState bool
}
func NewRequiredStateMap(eventTypesWithWildcardStateKeys map[string]struct{},
stateKeysForWildcardEventType []string,
eventTypeToStateKeys map[string][]string,
allState bool) *RequiredStateMap {
return &RequiredStateMap{
eventTypesWithWildcardStateKeys: eventTypesWithWildcardStateKeys,
stateKeysForWildcardEventType: stateKeysForWildcardEventType,
eventTypeToStateKeys: eventTypeToStateKeys,
allState: allState,
}
}
func (rsm *RequiredStateMap) Include(evType, stateKey string) bool {
if rsm.allState {
return true
}
// check if we should include this event due to wildcard event types
for _, sk := range rsm.stateKeysForWildcardEventType {
if sk == stateKey || sk == "*" {
return true
}
}
// check if we should include this event due to wildcard state keys
for et := range rsm.eventTypesWithWildcardStateKeys {
if et == evType {
return true
}
}
// check if we should include this event due to exact type/state key match
for _, sk := range rsm.eventTypeToStateKeys[evType] {
if sk == stateKey {
return true
}
}
return false
}
// work out what to ask the storage layer: if we have wildcard event types we need to pull all
// room state and cannot only pull out certain event types. If we have wildcard state keys we
// need to use an empty list for state keys.
func (rsm *RequiredStateMap) QueryStateMap() map[string][]string {
queryStateMap := make(map[string][]string)
if len(rsm.stateKeysForWildcardEventType) == 0 { // no wildcard event types
for evType, stateKeys := range rsm.eventTypeToStateKeys {
queryStateMap[evType] = stateKeys
}
for evType := range rsm.eventTypesWithWildcardStateKeys {
queryStateMap[evType] = nil
}
}
return queryStateMap
}
func HashedTokenFromRequest(req *http.Request) (hashAccessToken string, accessToken string, err error) {
// return a hash of the access token
ah := req.Header.Get("Authorization")

View File

@ -0,0 +1,86 @@
package internal
const StateKeyLazy = "$LAZY"
type RequiredStateMap struct {
eventTypesWithWildcardStateKeys map[string]struct{}
stateKeysForWildcardEventType []string
eventTypeToStateKeys map[string][]string
allState bool
lazyLoading bool
}
func NewRequiredStateMap(eventTypesWithWildcardStateKeys map[string]struct{},
stateKeysForWildcardEventType []string,
eventTypeToStateKeys map[string][]string,
allState, lazyLoading bool) *RequiredStateMap {
return &RequiredStateMap{
eventTypesWithWildcardStateKeys: eventTypesWithWildcardStateKeys,
stateKeysForWildcardEventType: stateKeysForWildcardEventType,
eventTypeToStateKeys: eventTypeToStateKeys,
allState: allState,
lazyLoading: lazyLoading,
}
}
func (rsm *RequiredStateMap) IsLazyLoading() bool {
return rsm.lazyLoading
}
func (rsm *RequiredStateMap) Include(evType, stateKey string) bool {
if rsm.allState {
// "additional entries FILTER OUT the returned set of state events. These additional entries cannot use '*' themselves."
includedStateKeys := rsm.eventTypeToStateKeys[evType]
if len(includedStateKeys) > 0 {
for _, sk := range includedStateKeys {
if sk == stateKey {
return true
}
}
return false
}
return true
}
// check if we should include this event due to wildcard event types
for _, sk := range rsm.stateKeysForWildcardEventType {
if sk == stateKey || sk == "*" {
return true
}
}
// check if we should include this event due to wildcard state keys
for et := range rsm.eventTypesWithWildcardStateKeys {
if et == evType {
return true
}
}
// check if we should include this event due to exact type/state key match
for _, sk := range rsm.eventTypeToStateKeys[evType] {
if sk == stateKey {
return true
}
}
return false
}
// work out what to ask the storage layer: if we have wildcard event types we need to pull all
// room state and cannot only pull out certain event types. If we have wildcard state keys we
// need to use an empty list for state keys.
func (rsm *RequiredStateMap) QueryStateMap() map[string][]string {
queryStateMap := make(map[string][]string)
if rsm.allState {
return queryStateMap
}
if len(rsm.stateKeysForWildcardEventType) == 0 { // no wildcard event types
for evType, stateKeys := range rsm.eventTypeToStateKeys {
if evType == "m.room.member" && rsm.lazyLoading {
queryStateMap[evType] = nil
} else {
queryStateMap[evType] = stateKeys
}
}
for evType := range rsm.eventTypesWithWildcardStateKeys {
queryStateMap[evType] = nil
}
}
return queryStateMap
}

View File

@ -1,6 +1,7 @@
package internal
import (
"encoding/json"
"fmt"
"strings"
)
@ -15,10 +16,13 @@ type RoomMetadata struct {
InviteCount int
LastMessageTimestamp uint64
Encrypted bool
PredecessorRoomID *string
UpgradedRoomID *string
RoomType *string
// if this room is a space, which rooms are m.space.child state events. This is the same for all users hence is global.
ChildSpaceRooms map[string]struct{}
// The latest m.typing ephemeral event for this room.
TypingEvent json.RawMessage
}
// SameRoomName checks if the fields relevant for room names have changed between the two metadatas.

10
internal/types.go Normal file
View File

@ -0,0 +1,10 @@
package internal
type Receipt struct {
RoomID string `db:"room_id"`
EventID string `db:"event_id"`
UserID string `db:"user_id"`
TS int64 `db:"ts"`
ThreadID string `db:"thread_id"`
IsPrivate bool
}

119
pubsub/pubsub.go Normal file
View File

@ -0,0 +1,119 @@
package pubsub
import (
"fmt"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
)
// Every payload needs a type to distinguish what kind of update it is.
type Payload interface {
Type() string
}
// Listener represents the common functions required by all subscription listeners
type Listener interface {
// Begin listening on this channel with this callback starting from this position. Blocks until Close() is called.
Listen(chanName string, fn func(p Payload)) error
// Close the listener. No more callbacks should fire.
Close() error
}
// Notifier represents the common functions required by all notifiers
type Notifier interface {
// Notify chanName that there is a new payload p. Return an error if we failed to send the notification.
Notify(chanName string, p Payload) error
// Close is called when we should stop listening.
Close() error
}
type PubSub struct {
chans map[string]chan Payload
mu *sync.Mutex
closed bool
bufferSize int
}
func NewPubSub(bufferSize int) *PubSub {
return &PubSub{
chans: make(map[string]chan Payload),
mu: &sync.Mutex{},
bufferSize: bufferSize,
}
}
func (ps *PubSub) getChan(chanName string) chan Payload {
ps.mu.Lock()
defer ps.mu.Unlock()
ch := ps.chans[chanName]
if ch == nil {
ch = make(chan Payload, ps.bufferSize)
ps.chans[chanName] = ch
}
return ch
}
func (ps *PubSub) Notify(chanName string, p Payload) error {
ch := ps.getChan(chanName)
select {
case ch <- p:
break
case <-time.After(5 * time.Second):
return fmt.Errorf("notify with payload %v timed out", p.Type())
}
return nil
}
func (ps *PubSub) Close() error {
if ps.closed {
return nil
}
ps.closed = true
ps.mu.Lock()
defer ps.mu.Unlock()
for _, ch := range ps.chans {
close(ch)
}
return nil
}
func (ps *PubSub) Listen(chanName string, fn func(p Payload)) error {
ch := ps.getChan(chanName)
for payload := range ch {
fn(payload)
}
return nil
}
// Wrapper around a Notifier which adds Prometheus metrics
type PromNotifier struct {
Notifier
msgCounter *prometheus.CounterVec
}
func (p *PromNotifier) Notify(chanName string, payload Payload) error {
p.msgCounter.WithLabelValues(payload.Type()).Inc()
return p.Notifier.Notify(chanName, payload)
}
func (p *PromNotifier) Close() error {
prometheus.Unregister(p.msgCounter)
return p.Notifier.Close()
}
// Wrap a notifier for prometheus metrics
func NewPromNotifier(n Notifier, subsystem string) Notifier {
p := &PromNotifier{
Notifier: n,
msgCounter: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "sliding_sync",
Subsystem: subsystem,
Name: "num_payloads",
Help: "Number of payloads published",
}, []string{"payload_type"}),
}
prometheus.MustRegister(p.msgCounter)
return p
}

141
pubsub/v2.go Normal file
View File

@ -0,0 +1,141 @@
package pubsub
import (
"encoding/json"
"github.com/matrix-org/sync-v3/internal"
)
// The channel which has V2* payloads
const ChanV2 = "v2ch"
type V2Listener interface {
Initialise(p *V2Initialise)
Accumulate(p *V2Accumulate)
OnAccountData(p *V2AccountData)
OnInvite(p *V2InviteRoom)
OnLeftRoom(p *V2LeaveRoom)
OnUnreadCounts(p *V2UnreadCounts)
OnInitialSyncComplete(p *V2InitialSyncComplete)
OnDeviceData(p *V2DeviceData)
OnTyping(p *V2Typing)
OnReceipt(p *V2Receipt)
}
type V2Initialise struct {
RoomID string
SnapshotNID int64
}
func (v V2Initialise) Type() string { return "s" }
type V2Accumulate struct {
RoomID string
PrevBatch string
EventNIDs []int64
}
func (v V2Accumulate) Type() string { return "a" }
type V2UnreadCounts struct {
UserID string
RoomID string
HighlightCount *int
NotificationCount *int
}
func (v V2UnreadCounts) Type() string { return "u" }
type V2AccountData struct {
UserID string
RoomID string
Types []string
}
func (v V2AccountData) Type() string { return "c" }
type V2LeaveRoom struct {
UserID string
RoomID string
}
func (v V2LeaveRoom) Type() string { return "l" }
type V2InviteRoom struct {
UserID string
RoomID string
}
func (v V2InviteRoom) Type() string { return "i" }
type V2InitialSyncComplete struct {
UserID string
DeviceID string
}
func (v V2InitialSyncComplete) Type() string { return "x" }
type V2DeviceData struct {
Pos int64
}
func (v V2DeviceData) Type() string { return "d" }
type V2Typing struct {
RoomID string
EphemeralEvent json.RawMessage
}
func (v V2Typing) Type() string { return "t" }
type V2Receipt struct {
RoomID string
Receipts []internal.Receipt
}
func (v V2Receipt) Type() string { return "r" }
type V2Sub struct {
listener Listener
receiver V2Listener
}
func NewV2Sub(l Listener, recv V2Listener) *V2Sub {
return &V2Sub{
listener: l,
receiver: recv,
}
}
func (v *V2Sub) Teardown() {
v.listener.Close()
}
func (v *V2Sub) onMessage(p Payload) {
switch p.Type() {
case V2Receipt{}.Type():
v.receiver.OnReceipt(p.(*V2Receipt))
case V2Initialise{}.Type():
v.receiver.Initialise(p.(*V2Initialise))
case V2Accumulate{}.Type():
v.receiver.Accumulate(p.(*V2Accumulate))
case V2AccountData{}.Type():
v.receiver.OnAccountData(p.(*V2AccountData))
case V2InviteRoom{}.Type():
v.receiver.OnInvite(p.(*V2InviteRoom))
case V2LeaveRoom{}.Type():
v.receiver.OnLeftRoom(p.(*V2LeaveRoom))
case V2UnreadCounts{}.Type():
v.receiver.OnUnreadCounts(p.(*V2UnreadCounts))
case V2InitialSyncComplete{}.Type():
v.receiver.OnInitialSyncComplete(p.(*V2InitialSyncComplete))
case V2DeviceData{}.Type():
v.receiver.OnDeviceData(p.(*V2DeviceData))
case V2Typing{}.Type():
v.receiver.OnTyping(p.(*V2Typing))
}
}
func (v *V2Sub) Listen() error {
return v.listener.Listen(ChanV2, v.onMessage)
}

42
pubsub/v3.go Normal file
View File

@ -0,0 +1,42 @@
package pubsub
// The channel which has V3* payloads
const ChanV3 = "v3ch"
type V3Listener interface {
EnsurePolling(p *V3EnsurePolling)
}
type V3EnsurePolling struct {
UserID string
DeviceID string
}
func (v V3EnsurePolling) Type() string { return "p" }
type V3Sub struct {
listener Listener
receiver V3Listener
}
func NewV3Sub(l Listener, recv V3Listener) *V3Sub {
return &V3Sub{
listener: l,
receiver: recv,
}
}
func (v *V3Sub) Teardown() {
v.listener.Close()
}
func (v *V3Sub) onMessage(p Payload) {
switch p.Type() {
case V3EnsurePolling{}.Type():
v.receiver.EnsurePolling(p.(*V3EnsurePolling))
}
}
func (v *V3Sub) Listen() error {
return v.listener.Listen(ChanV3, v.onMessage)
}

View File

@ -1,7 +1,6 @@
package state
import (
"database/sql"
"fmt"
"github.com/jmoiron/sqlx"
@ -63,14 +62,10 @@ func (t *AccountDataTable) Insert(txn *sqlx.Tx, accDatas []AccountData) ([]Accou
return dedupedAccountData, nil
}
func (t *AccountDataTable) Select(txn *sqlx.Tx, userID, eventType, roomID string) (*AccountData, error) {
var acc AccountData
err := txn.Get(&acc, `SELECT user_id, room_id, type, data FROM syncv3_account_data
WHERE user_id=$1 AND type=$2 AND room_id=$3`, userID, eventType, roomID)
if err == sql.ErrNoRows {
return nil, nil
}
return &acc, err
func (t *AccountDataTable) Select(txn *sqlx.Tx, userID string, eventTypes []string, roomID string) (datas []AccountData, err error) {
err = txn.Select(&datas, `SELECT user_id, room_id, type, data FROM syncv3_account_data
WHERE user_id=$1 AND type=ANY($2) AND room_id=$3`, userID, pq.StringArray(eventTypes), roomID)
return
}
func (t *AccountDataTable) SelectWithType(txn *sqlx.Tx, userID, evType string) (datas []AccountData, err error) {

View File

@ -90,20 +90,20 @@ func TestAccountData(t *testing.T) {
}
// select the updated event
gotData, err := table.Select(txn, alice, eventType, roomA)
gotData, err := table.Select(txn, alice, []string{eventType}, roomA)
if err != nil {
t.Fatalf("Select: %s", err)
}
if !reflect.DeepEqual(*gotData, accountData[len(accountData)-1]) {
if !reflect.DeepEqual(gotData[0], accountData[len(accountData)-1]) {
t.Fatalf("Select: expected updated event to be returned but wasn't. Got %+v want %+v", gotData, accountData[len(accountData)-1])
}
// Select the global event
gotData, err = table.Select(txn, alice, eventType, sync2.AccountDataGlobalRoom)
gotData, err = table.Select(txn, alice, []string{eventType}, sync2.AccountDataGlobalRoom)
if err != nil {
t.Fatalf("Select: %s", err)
}
if !reflect.DeepEqual(*gotData, accountData[len(accountData)-3]) {
if !reflect.DeepEqual(gotData[0], accountData[len(accountData)-3]) {
t.Fatalf("Select: expected global event to be returned but wasn't. Got %+v want %+v", gotData, accountData[len(accountData)-3])
}
@ -152,4 +152,16 @@ func TestAccountData(t *testing.T) {
t.Fatalf("SelectWithType: got %v want %v", gotDatas, wantDatas)
}
// Select all types in this room
gotDatas, err = table.Select(txn, alice, []string{eventType, "dummy"}, roomB)
if err != nil {
t.Fatalf("SelectWithType: %v", err)
}
wantDatas = []AccountData{
accountData[1], accountData[2],
}
if !accountDatasEqual(gotDatas, wantDatas) {
t.Fatalf("Select(multi-types): got %v want %v", gotDatas, wantDatas)
}
}

View File

@ -140,16 +140,17 @@ func (a *Accumulator) roomInfoDelta(roomID string, events []Event) RoomInfo {
// Initialise starts a new sync accumulator for the given room using the given state as a baseline.
// This will only take effect if this is the first time the v3 server has seen this room, and it wasn't
// possible to get all events up to the create event (e.g Matrix HQ). Returns true if this call actually
// added new events
// added new events, along with the snapshot NID.
//
// This function:
// - Stores these events
// - Sets up the current snapshot based on the state list given.
func (a *Accumulator) Initialise(roomID string, state []json.RawMessage) (bool, error) {
func (a *Accumulator) Initialise(roomID string, state []json.RawMessage) (bool, int64, error) {
if len(state) == 0 {
return false, nil
return false, 0, nil
}
addedEvents := false
var snapID int64
err := sqlutil.WithTransaction(a.db, func(txn *sqlx.Tx) error {
// Attempt to short-circuit. This has to be done inside a transaction to make sure
// we don't race with multiple calls to Initialise with the same room ID.
@ -234,13 +235,15 @@ func (a *Accumulator) Initialise(roomID string, state []json.RawMessage) (bool,
// will have an associated state snapshot ID on the event.
// Set the snapshot ID as the current state
snapID = snapshot.SnapshotID
return a.roomsTable.Upsert(txn, info, snapshot.SnapshotID, latestNID)
})
return addedEvents, err
return addedEvents, snapID, err
}
// Accumulate internal state from a user's sync response. The timeline order MUST be in the order
// received from the server. Returns the number of new events in the timeline.
// received from the server. Returns the number of new events in the timeline, the new timeline event NIDs
// or an error.
//
// This function does several things:
// - It ensures all events are persisted in the database. This is shared amongst users.
@ -249,9 +252,9 @@ func (a *Accumulator) Initialise(roomID string, state []json.RawMessage) (bool,
// to exist in the database, and the sync stream is already linearised for us.
// - Else it creates a new room state snapshot if the timeline contains state events (as this now represents the current state)
// - It adds entries to the membership log for membership events.
func (a *Accumulator) Accumulate(roomID string, prevBatch string, timeline []json.RawMessage) (numNew int, latestNID int64, err error) {
func (a *Accumulator) Accumulate(roomID string, prevBatch string, timeline []json.RawMessage) (numNew int, timelineNIDs []int64, err error) {
if len(timeline) == 0 {
return 0, 0, nil
return 0, nil, nil
}
err = sqlutil.WithTransaction(a.db, func(txn *sqlx.Tx) error {
// Insert the events. Check for duplicates which can happen in the real world when joining
@ -292,6 +295,7 @@ func (a *Accumulator) Accumulate(roomID string, prevBatch string, timeline []jso
}
numNew = len(eventIDToNID)
var latestNID int64
newEvents := make([]Event, 0, len(eventIDToNID))
for _, ev := range dedupedEvents {
nid, ok := eventIDToNID[ev.ID]
@ -308,6 +312,7 @@ func (a *Accumulator) Accumulate(roomID string, prevBatch string, timeline []jso
latestNID = ev.NID
}
newEvents = append(newEvents, ev)
timelineNIDs = append(timelineNIDs, ev.NID)
}
}
@ -369,7 +374,7 @@ func (a *Accumulator) Accumulate(roomID string, prevBatch string, timeline []jso
}
return nil
})
return numNew, latestNID, err
return numNew, timelineNIDs, err
}
// Delta returns a list of events of at most `limit` for the room not including `lastEventNID`.

View File

@ -26,7 +26,7 @@ func TestAccumulatorInitialise(t *testing.T) {
t.Fatalf("failed to open SQL db: %s", err)
}
accumulator := NewAccumulator(db)
added, err := accumulator.Initialise(roomID, roomEvents)
added, initSnapID, err := accumulator.Initialise(roomID, roomEvents)
if err != nil {
t.Fatalf("falied to Initialise accumulator: %s", err)
}
@ -48,6 +48,9 @@ func TestAccumulatorInitialise(t *testing.T) {
if snapID == 0 {
t.Fatalf("Initialise did not store a current snapshot")
}
if snapID != initSnapID {
t.Fatalf("Initialise returned wrong snapshot ID, got %v want %v", initSnapID, snapID)
}
// this snapshot should have 3 events in it
row, err := accumulator.snapshotTable.Select(txn, snapID)
@ -73,7 +76,7 @@ func TestAccumulatorInitialise(t *testing.T) {
}
// Subsequent calls do nothing and are not an error
added, err = accumulator.Initialise(roomID, roomEvents)
added, _, err = accumulator.Initialise(roomID, roomEvents)
if err != nil {
t.Fatalf("falied to Initialise accumulator: %s", err)
}
@ -94,7 +97,7 @@ func TestAccumulatorAccumulate(t *testing.T) {
t.Fatalf("failed to open SQL db: %s", err)
}
accumulator := NewAccumulator(db)
_, err = accumulator.Initialise(roomID, roomEvents)
_, _, err = accumulator.Initialise(roomID, roomEvents)
if err != nil {
t.Fatalf("failed to Initialise accumulator: %s", err)
}
@ -109,8 +112,8 @@ func TestAccumulatorAccumulate(t *testing.T) {
[]byte(`{"event_id":"I", "type":"m.room.history_visibility", "state_key":"", "content":{"visibility":"public"}}`),
}
var numNew int
var gotLatestNID int64
if numNew, gotLatestNID, err = accumulator.Accumulate(roomID, "", newEvents); err != nil {
var latestNIDs []int64
if numNew, latestNIDs, err = accumulator.Accumulate(roomID, "", newEvents); err != nil {
t.Fatalf("failed to Accumulate: %s", err)
}
if numNew != len(newEvents) {
@ -121,8 +124,8 @@ func TestAccumulatorAccumulate(t *testing.T) {
if err != nil {
t.Fatalf("failed to check latest NID from Accumulate: %s", err)
}
if gotLatestNID != wantLatestNID {
t.Errorf("Accumulator.Accumulate returned latest nid %d, want %d", gotLatestNID, wantLatestNID)
if latestNIDs[len(latestNIDs)-1] != wantLatestNID {
t.Errorf("Accumulator.Accumulate returned latest nid %d, want %d", latestNIDs[len(latestNIDs)-1], wantLatestNID)
}
// Begin assertions
@ -183,7 +186,7 @@ func TestAccumulatorDelta(t *testing.T) {
t.Fatalf("failed to open SQL db: %s", err)
}
accumulator := NewAccumulator(db)
_, err = accumulator.Initialise(roomID, nil)
_, _, err = accumulator.Initialise(roomID, nil)
if err != nil {
t.Fatalf("failed to Initialise accumulator: %s", err)
}
@ -234,7 +237,7 @@ func TestAccumulatorMembershipLogs(t *testing.T) {
t.Fatalf("failed to open SQL db: %s", err)
}
accumulator := NewAccumulator(db)
_, err = accumulator.Initialise(roomID, nil)
_, _, err = accumulator.Initialise(roomID, nil)
if err != nil {
t.Fatalf("failed to Initialise accumulator: %s", err)
}
@ -376,7 +379,7 @@ func TestAccumulatorDupeEvents(t *testing.T) {
}
accumulator := NewAccumulator(db)
roomID := "!buggy:localhost"
_, err = accumulator.Initialise(roomID, joinRoom.State.Events)
_, _, err = accumulator.Initialise(roomID, joinRoom.State.Events)
if err != nil {
t.Fatalf("failed to Initialise accumulator: %s", err)
}
@ -418,7 +421,7 @@ func TestAccumulatorMisorderedGraceful(t *testing.T) {
accumulator := NewAccumulator(db)
roomID := "!TestAccumulatorStateReset:localhost"
// Create a room with initial state A,C
_, err = accumulator.Initialise(roomID, []json.RawMessage{
_, _, err = accumulator.Initialise(roomID, []json.RawMessage{
eventA, eventC,
})
if err != nil {
@ -593,7 +596,9 @@ func TestCalculateNewSnapshotDupe(t *testing.T) {
}
func currentSnapshotNIDs(t *testing.T, snapshotTable *SnapshotTable, roomID string) []int64 {
roomToSnapshotEvents, err := snapshotTable.CurrentSnapshots()
txn := snapshotTable.db.MustBeginTx(context.Background(), nil)
defer txn.Commit()
roomToSnapshotEvents, err := snapshotTable.CurrentSnapshots(txn)
if err != nil {
t.Errorf("currentSnapshotNIDs: %s", err)
}

139
state/device_data_table.go Normal file
View File

@ -0,0 +1,139 @@
package state
import (
"database/sql"
"encoding/json"
"github.com/jmoiron/sqlx"
"github.com/matrix-org/sync-v3/internal"
"github.com/matrix-org/sync-v3/sqlutil"
)
type DeviceDataRow struct {
ID int64 `db:"id"`
UserID string `db:"user_id"`
DeviceID string `db:"device_id"`
// This will contain internal.DeviceData serialised as JSON. It's stored in a single column as we don't
// need to perform searches on this data.
Data []byte `db:"data"`
}
type DeviceDataTable struct {
db *sqlx.DB
}
func NewDeviceDataTable(db *sqlx.DB) *DeviceDataTable {
db.MustExec(`
CREATE SEQUENCE IF NOT EXISTS syncv3_device_data_seq;
CREATE TABLE IF NOT EXISTS syncv3_device_data (
id BIGINT PRIMARY KEY NOT NULL DEFAULT nextval('syncv3_device_data_seq'),
user_id TEXT NOT NULL,
device_id TEXT NOT NULL,
data BYTEA NOT NULL,
UNIQUE(user_id, device_id)
);
`)
return &DeviceDataTable{
db: db,
}
}
// Atomically select the device data for this user|device and then swap DeviceLists around if set.
// This should only be called by the v3 HTTP APIs when servicing an E2EE extension request.
func (t *DeviceDataTable) Select(userID, deviceID string, swap bool) (dd *internal.DeviceData, err error) {
err = sqlutil.WithTransaction(t.db, func(txn *sqlx.Tx) error {
var row DeviceDataRow
err = t.db.Get(&row, `SELECT data FROM syncv3_device_data WHERE user_id=$1 AND device_id=$2`, userID, deviceID)
if err != nil {
if err == sql.ErrNoRows {
// if there is no device data for this user, it's not an error.
return nil
}
return err
}
// unmarshal to swap
var tempDD internal.DeviceData
if err = json.Unmarshal(row.Data, &tempDD); err != nil {
return err
}
tempDD.UserID = userID
tempDD.DeviceID = deviceID
if !swap {
dd = &tempDD
return nil // don't swap
}
// swap over the fields
n := tempDD.DeviceLists.New
tempDD.DeviceLists.Sent = n
tempDD.DeviceLists.New = make(map[string]int)
// re-marshal and write
data, err := json.Marshal(tempDD)
if err != nil {
return err
}
_, err = t.db.Exec(`UPDATE syncv3_device_data SET data=$1 WHERE user_id=$2 AND device_id=$3`, data, userID, deviceID)
dd = &tempDD
return err
})
return
}
func (t *DeviceDataTable) SelectFrom(pos int64) (results []internal.DeviceData, nextPos int64, err error) {
nextPos = pos
var rows []DeviceDataRow
err = t.db.Select(&rows, `SELECT id, user_id, device_id, data FROM syncv3_device_data WHERE id > $1 ORDER BY id ASC`, pos)
if err != nil {
return
}
results = make([]internal.DeviceData, len(rows))
for i := range rows {
var dd internal.DeviceData
if err = json.Unmarshal(rows[i].Data, &dd); err != nil {
return
}
dd.UserID = rows[i].UserID
dd.DeviceID = rows[i].DeviceID
results[i] = dd
nextPos = rows[i].ID
}
return
}
// Upsert combines what is in the database for this user|device with the partial entry `dd`
func (t *DeviceDataTable) Upsert(dd *internal.DeviceData) (pos int64, err error) {
err = sqlutil.WithTransaction(t.db, func(txn *sqlx.Tx) error {
// select what already exists
var row DeviceDataRow
err = t.db.Get(&row, `SELECT data FROM syncv3_device_data WHERE user_id=$1 AND device_id=$2`, dd.UserID, dd.DeviceID)
if err != nil && err != sql.ErrNoRows {
return err
}
// unmarshal and combine
var tempDD internal.DeviceData
if len(row.Data) > 0 {
if err = json.Unmarshal(row.Data, &tempDD); err != nil {
return err
}
}
if dd.FallbackKeyTypes != nil {
tempDD.FallbackKeyTypes = dd.FallbackKeyTypes
}
if dd.OTKCounts != nil {
tempDD.OTKCounts = dd.OTKCounts
}
tempDD.DeviceLists = tempDD.DeviceLists.Combine(dd.DeviceLists)
data, err := json.Marshal(tempDD)
if err != nil {
return err
}
err = t.db.QueryRow(
`INSERT INTO syncv3_device_data(user_id, device_id, data) VALUES($1,$2,$3)
ON CONFLICT (user_id, device_id) DO UPDATE SET data=$3, id=nextval('syncv3_device_data_seq') RETURNING id`,
dd.UserID, dd.DeviceID, data,
).Scan(&pos)
return err
})
return
}

View File

@ -0,0 +1,237 @@
package state
import (
"reflect"
"testing"
"github.com/jmoiron/sqlx"
"github.com/matrix-org/sync-v3/internal"
)
func assertVal(t *testing.T, msg string, got, want interface{}) {
if !reflect.DeepEqual(got, want) {
t.Errorf("%s: got %v want %v", msg, got, want)
}
}
func assertDeviceDatas(t *testing.T, got, want []internal.DeviceData) {
t.Helper()
if len(got) != len(want) {
t.Fatalf("got %d devices, want %d : %+v", len(got), len(want), got)
}
for i := range want {
g := got[i]
w := want[i]
assertVal(t, "device id", g.DeviceID, w.DeviceID)
assertVal(t, "user id", g.UserID, w.UserID)
assertVal(t, "FallbackKeyTypes", g.FallbackKeyTypes, w.FallbackKeyTypes)
assertVal(t, "OTKCounts", g.OTKCounts, w.OTKCounts)
}
}
func TestDeviceDataTable(t *testing.T) {
db, err := sqlx.Open("postgres", postgresConnectionString)
if err != nil {
t.Fatalf("failed to open SQL db: %s", err)
}
table := NewDeviceDataTable(db)
userID := "@alice"
deviceID := "ALICE"
dd := &internal.DeviceData{
UserID: userID,
DeviceID: deviceID,
OTKCounts: map[string]int{
"foo": 100,
},
FallbackKeyTypes: []string{"foo", "bar"},
}
// test basic insert -> select
pos, err := table.Upsert(dd)
assertNoError(t, err)
results, nextPos, err := table.SelectFrom(-1)
assertNoError(t, err)
if pos != nextPos {
t.Fatalf("Upsert returned pos %v but SelectFrom returned pos %v", pos, nextPos)
}
assertDeviceDatas(t, results, []internal.DeviceData{*dd})
// at latest -> no results
results, nextPos, err = table.SelectFrom(nextPos)
assertNoError(t, err)
if pos != nextPos {
t.Fatalf("Upsert returned pos %v but SelectFrom returned pos %v", pos, nextPos)
}
assertDeviceDatas(t, results, nil)
// multiple insert -> replace on user|device
dd2 := *dd
dd2.OTKCounts = map[string]int{"foo": 99}
_, err = table.Upsert(&dd2)
assertNoError(t, err)
dd3 := *dd
dd3.OTKCounts = map[string]int{"foo": 98}
pos, err = table.Upsert(&dd3)
assertNoError(t, err)
results, nextPos, err = table.SelectFrom(nextPos)
assertNoError(t, err)
if pos != nextPos {
t.Fatalf("Upsert returned pos %v but SelectFrom returned pos %v", pos, nextPos)
}
assertDeviceDatas(t, results, []internal.DeviceData{dd3})
// multiple insert -> different user, same device + same user, different device
dd4 := *dd
dd4.UserID = "@bob"
_, err = table.Upsert(&dd4)
assertNoError(t, err)
dd5 := *dd
dd5.DeviceID = "ANOTHER"
pos, err = table.Upsert(&dd5)
assertNoError(t, err)
results, nextPos, err = table.SelectFrom(nextPos)
assertNoError(t, err)
if pos != nextPos {
t.Fatalf("Upsert returned pos %v but SelectFrom returned pos %v", pos, nextPos)
}
assertDeviceDatas(t, results, []internal.DeviceData{dd4, dd5})
}
func TestDeviceDataTableSwaps(t *testing.T) {
db, err := sqlx.Open("postgres", postgresConnectionString)
if err != nil {
t.Fatalf("failed to open SQL db: %s", err)
}
table := NewDeviceDataTable(db)
userID := "@bob"
deviceID := "BOB"
// test accumulating deltas
deltas := []internal.DeviceData{
{
UserID: userID,
DeviceID: deviceID,
OTKCounts: map[string]int{
"foo": 100,
"bar": 92,
},
},
{
UserID: userID,
DeviceID: deviceID,
FallbackKeyTypes: []string{"foobar"},
DeviceLists: internal.DeviceLists{
New: internal.ToDeviceListChangesMap([]string{"alice"}, nil),
},
},
{
UserID: userID,
DeviceID: deviceID,
OTKCounts: map[string]int{
"foo": 99,
},
},
{
UserID: userID,
DeviceID: deviceID,
DeviceLists: internal.DeviceLists{
New: internal.ToDeviceListChangesMap([]string{"bob"}, nil),
},
},
}
for _, dd := range deltas {
_, err = table.Upsert(&dd)
assertNoError(t, err)
}
want := internal.DeviceData{
UserID: userID,
DeviceID: deviceID,
OTKCounts: map[string]int{
"foo": 99,
},
FallbackKeyTypes: []string{"foobar"},
DeviceLists: internal.DeviceLists{
New: internal.ToDeviceListChangesMap([]string{"alice", "bob"}, nil),
},
}
// check we can read-only select
for i := 0; i < 3; i++ {
got, err := table.Select(userID, deviceID, false)
assertNoError(t, err)
assertDeviceDatas(t, []internal.DeviceData{*got}, []internal.DeviceData{want})
}
// now swap-er-roo
got, err := table.Select(userID, deviceID, true)
assertNoError(t, err)
want2 := want
want2.DeviceLists = internal.DeviceLists{
Sent: internal.ToDeviceListChangesMap([]string{"alice"}, nil),
New: nil,
}
assertDeviceDatas(t, []internal.DeviceData{*got}, []internal.DeviceData{want2})
// this is permanent, read-only views show this too
got, err = table.Select(userID, deviceID, false)
assertNoError(t, err)
assertDeviceDatas(t, []internal.DeviceData{*got}, []internal.DeviceData{want2})
// another swap causes sent to be cleared out
got, err = table.Select(userID, deviceID, true)
assertNoError(t, err)
want3 := want2
want3.DeviceLists = internal.DeviceLists{
Sent: nil,
New: nil,
}
assertDeviceDatas(t, []internal.DeviceData{*got}, []internal.DeviceData{want3})
// get back the original state
for _, dd := range deltas {
_, err = table.Upsert(&dd)
assertNoError(t, err)
}
got, err = table.Select(userID, deviceID, false)
assertNoError(t, err)
assertDeviceDatas(t, []internal.DeviceData{*got}, []internal.DeviceData{want})
// swap once then add once so both sent and new are populated
_, err = table.Select(userID, deviceID, true)
assertNoError(t, err)
_, err = table.Upsert(&internal.DeviceData{
UserID: userID,
DeviceID: deviceID,
DeviceLists: internal.DeviceLists{
New: internal.ToDeviceListChangesMap([]string{"bob"}, []string{"charlie"}),
},
})
assertNoError(t, err)
want4 := want
want4.DeviceLists = internal.DeviceLists{
Sent: internal.ToDeviceListChangesMap([]string{"alice"}, nil),
New: internal.ToDeviceListChangesMap([]string{"bob"}, []string{"charlie"}),
}
got, err = table.Select(userID, deviceID, false)
assertNoError(t, err)
assertDeviceDatas(t, []internal.DeviceData{*got}, []internal.DeviceData{want4})
// another append then consume
_, err = table.Upsert(&internal.DeviceData{
UserID: userID,
DeviceID: deviceID,
DeviceLists: internal.DeviceLists{
New: internal.ToDeviceListChangesMap([]string{"dave"}, []string{"dave"}),
},
})
assertNoError(t, err)
got, err = table.Select(userID, deviceID, true)
assertNoError(t, err)
want5 := want4
want5.DeviceLists = internal.DeviceLists{
Sent: internal.ToDeviceListChangesMap([]string{"bob", "dave"}, []string{"charlie", "dave"}),
New: nil,
}
assertDeviceDatas(t, []internal.DeviceData{*got}, []internal.DeviceData{want5})
}

View File

@ -7,9 +7,11 @@ import (
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
"github.com/tidwall/gjson"
"github.com/tidwall/sjson"
"github.com/matrix-org/sync-v3/internal"
"github.com/matrix-org/sync-v3/sqlutil"
"github.com/tidwall/gjson"
)
const (
@ -124,7 +126,7 @@ func NewEventTable(db *sqlx.DB) *EventTable {
-- index for querying membership deltas in particular rooms
CREATE INDEX IF NOT EXISTS syncv3_events_type_room_nid_idx ON syncv3_events(event_type, room_id, event_nid);
-- index for querying events in a given room
CREATE INDEX IF NOT EXISTS syncv3_nid_room_idx ON syncv3_events(event_nid, room_id, is_state);
CREATE INDEX IF NOT EXISTS syncv3_nid_room_state_idx ON syncv3_events(room_id, event_nid, is_state);
`)
return &EventTable{db}
}
@ -146,6 +148,16 @@ func (t *EventTable) Insert(txn *sqlx.Tx, events []Event, checkFields bool) (map
ensureFieldsSet(events)
}
result := make(map[string]int)
for i := range events {
if !gjson.GetBytes(events[i].JSON, "unsigned.txn_id").Exists() {
continue
}
js, err := sjson.DeleteBytes(events[i].JSON, "unsigned.txn_id")
if err != nil {
return nil, err
}
events[i].JSON = js
}
chunks := sqlutil.Chunkify(8, MaxPostgresParameters, EventChunker(events))
var eventID string
var eventNID int
@ -281,9 +293,9 @@ func (t *EventTable) SelectLatestEventsBetween(txn *sqlx.Tx, roomID string, lowe
return events, err
}
func (t *EventTable) selectLatestEventInAllRooms() ([]Event, error) {
func (t *EventTable) selectLatestEventInAllRooms(txn *sqlx.Tx) ([]Event, error) {
result := []Event{}
rows, err := t.db.Query(
rows, err := txn.Query(
`SELECT room_id, event FROM syncv3_events WHERE event_nid in (SELECT MAX(event_nid) FROM syncv3_events GROUP BY room_id)`,
)
if err != nil {

View File

@ -7,6 +7,8 @@ import (
"testing"
"github.com/jmoiron/sqlx"
"github.com/tidwall/gjson"
"github.com/matrix-org/sync-v3/sqlutil"
"github.com/matrix-org/sync-v3/testutils"
)
@ -823,3 +825,65 @@ func TestEventTablePrevBatch(t *testing.T) {
// 4: SelectClosestPrevBatch with an event without a prev_batch returns nothing if there are no newer events with a prev_batch
assertPrevBatch(roomID1, 8, "") // query event I, returns nothing
}
func TestRemoveUnsignedTXNID(t *testing.T) {
db, err := sqlx.Open("postgres", postgresConnectionString)
if err != nil {
t.Fatalf("failed to open SQL db: %s", err)
}
txn, err := db.Beginx()
if err != nil {
t.Fatalf("failed to start txn: %s", err)
}
defer txn.Rollback()
alice := "@TestRemoveUnsignedTXNID_alice:localhost"
roomID1 := "!1:localhost"
events := []Event{
{ // field should be removed
ID: "$A",
RoomID: roomID1,
JSON: testutils.NewJoinEvent(t, alice,
testutils.WithUnsigned(map[string]interface{}{
"prev_content": map[string]interface{}{
"membership": "invite",
},
"txn_id": "randomTxnID",
}),
),
},
{ // non-existent field should not result in an error
ID: "$B",
RoomID: roomID1,
JSON: testutils.NewJoinEvent(t, alice,
testutils.WithUnsigned(map[string]interface{}{
"prev_content": map[string]interface{}{
"membership": "join",
},
}),
),
},
}
table := NewEventTable(db)
// Insert the events
_, err = table.Insert(txn, events, false)
if err != nil {
t.Errorf("failed to insert event: %s", err)
}
// Get the inserted events
gotEvents, err := table.SelectByIDs(txn, false, []string{"$A", "$B"})
if err != nil {
t.Fatalf("failed to select events: %s", err)
}
// None of the events should have a `unsigned.txn_id` field
for _, ev := range gotEvents {
jsonTXNId := gjson.GetBytes(ev.JSON, "unsigned.txn_id")
if jsonTXNId.Exists() {
t.Fatalf("expected unsigned.txn_id to be removed, got '%s'", jsonTXNId.String())
}
}
}

View File

@ -1,6 +1,7 @@
package state
import (
"database/sql"
"encoding/json"
"github.com/jmoiron/sqlx"
@ -59,6 +60,20 @@ func (t *InvitesTable) InsertInvite(userID, roomID string, inviteRoomState []jso
return err
}
func (t *InvitesTable) SelectInviteState(userID, roomID string) (inviteState []json.RawMessage, err error) {
var blob json.RawMessage
if err := t.db.QueryRow(`SELECT invite_state FROM syncv3_invites WHERE user_id=$1 AND room_id=$2`, userID, roomID).Scan(&blob); err != nil && err != sql.ErrNoRows {
return nil, err
}
if blob == nil {
return
}
if err := json.Unmarshal(blob, &inviteState); err != nil {
return nil, err
}
return inviteState, nil
}
// Select all invites for this user. Returns a map of room ID to invite_state (json array).
func (t *InvitesTable) SelectAllInvitesForUser(userID string) (map[string][]json.RawMessage, error) {
rows, err := t.db.Query(`SELECT room_id, invite_state FROM syncv3_invites WHERE user_id = $1`, userID)

View File

@ -58,6 +58,13 @@ func TestInviteTable(t *testing.T) {
if !reflect.DeepEqual(invites[roomA], inviteStateB) {
t.Errorf("room %s got %s want %s", roomA, jsonArrStr(invites[roomA]), jsonArrStr(inviteStateB))
}
bobInvite, err := table.SelectInviteState(bob, roomA)
if err != nil {
t.Fatalf("failed to SelectInviteState: %s", err)
}
if !reflect.DeepEqual(bobInvite, inviteStateB) {
t.Errorf("SelectInviteState: got %v want %v", bobInvite, inviteStateB)
}
// Assert no-ones invites
invites, err = table.SelectAllInvitesForUser("no one")

245
state/receipt_table.go Normal file
View File

@ -0,0 +1,245 @@
package state
import (
"encoding/json"
"fmt"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
"github.com/matrix-org/sync-v3/internal"
"github.com/matrix-org/sync-v3/sqlutil"
)
type receiptEDU struct {
Type string `json:"type"`
Content map[string]struct {
Read map[string]receiptInfo `json:"m.read,omitempty"`
ReadPrivate map[string]receiptInfo `json:"m.read.private,omitempty"`
} `json:"content"`
}
type receiptInfo struct {
TS int64 `json:"ts"`
ThreadID string `json:"thread_id,omitempty"`
}
type ReceiptTable struct {
db *sqlx.DB
}
func NewReceiptTable(db *sqlx.DB) *ReceiptTable {
// we make 2 tables here to reduce the compound key size to be just room/user/thread and not
// room/user/thread/receipt_type. This should help performance somewhat when querying. Other than
// that, the tables are identical.
tableNames := []string{
"syncv3_receipts", "syncv3_receipts_private",
}
schema := `
CREATE TABLE IF NOT EXISTS %s (
room_id TEXT NOT NULL,
user_id TEXT NOT NULL,
thread_id TEXT NOT NULL,
event_id TEXT NOT NULL,
ts BIGINT NOT NULL,
UNIQUE(room_id, user_id, thread_id)
);
-- for querying by events in the timeline, need to search by event id
CREATE INDEX IF NOT EXISTS %s_by_event_idx ON %s(room_id, event_id);
-- for querying all receipts for a user in a room, need to search by user id
CREATE INDEX IF NOT EXISTS %s_by_user_idx ON %s(room_id, user_id);
`
for _, tableName := range tableNames {
db.MustExec(fmt.Sprintf(schema, tableName, tableName, tableName, tableName, tableName))
}
return &ReceiptTable{db}
}
// Insert new receipts based on a receipt EDU
// Returns newly inserted receipts, or nil if there are no new receipts.
// These newly inserted receipts can then be sent to the API processes for live updates.
func (t *ReceiptTable) Insert(roomID string, ephEvent json.RawMessage) (receipts []internal.Receipt, err error) {
readReceipts, privateReceipts, err := unpackReceiptsFromEDU(roomID, ephEvent)
if err != nil {
return nil, err
}
if len(readReceipts) == 0 && len(privateReceipts) == 0 {
return nil, nil
}
err = sqlutil.WithTransaction(t.db, func(txn *sqlx.Tx) error {
readReceipts, err = t.bulkInsert("syncv3_receipts", txn, readReceipts)
if err != nil {
return err
}
privateReceipts, err = t.bulkInsert("syncv3_receipts_private", txn, privateReceipts)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to insert receipts: %s", err)
}
// no new receipts
if len(readReceipts) == 0 && len(privateReceipts) == 0 {
return nil, nil
}
// combine together new receipts
return append(readReceipts, privateReceipts...), nil
}
// Select all non-private receipts for the event IDs given. Events must be in the room ID given.
// The parsed receipts are returned so callers can use information in the receipts in further queries
// e.g to pull out profile information for users read receipts. Call PackReceiptsIntoEDU when sending to clients.
func (t *ReceiptTable) SelectReceiptsForEvents(roomID string, eventIDs []string) (receipts []internal.Receipt, err error) {
err = t.db.Select(&receipts, `SELECT room_id, event_id, user_id, ts, thread_id FROM syncv3_receipts
WHERE room_id=$1 AND event_id = ANY($2)`, roomID, pq.StringArray(eventIDs))
return
}
// Select all (including private) receipts for this user in this room.
func (t *ReceiptTable) SelectReceiptsForUser(roomID, userID string) (receipts []internal.Receipt, err error) {
err = t.db.Select(&receipts, `SELECT room_id, event_id, user_id, ts, thread_id FROM syncv3_receipts
WHERE room_id=$1 AND user_id = $2`, roomID, userID)
if err != nil {
return nil, err
}
var privReceipts []internal.Receipt
err = t.db.Select(&privReceipts, `SELECT room_id, event_id, user_id, ts, thread_id FROM syncv3_receipts_private
WHERE room_id=$1 AND user_id = $2`, roomID, userID)
for i := range privReceipts {
privReceipts[i].IsPrivate = true
}
receipts = append(receipts, privReceipts...)
return
}
func (t *ReceiptTable) bulkInsert(tableName string, txn *sqlx.Tx, receipts []internal.Receipt) (newReceipts []internal.Receipt, err error) {
if len(receipts) == 0 {
return
}
chunks := sqlutil.Chunkify(5, MaxPostgresParameters, ReceiptChunker(receipts))
var eventID string
var roomID string
var threadID string
var userID string
var ts int64
for _, chunk := range chunks {
rows, err := txn.NamedQuery(`
INSERT INTO `+tableName+` AS old (room_id, event_id, user_id, ts, thread_id)
VALUES (:room_id, :event_id, :user_id, :ts, :thread_id) ON CONFLICT (room_id, user_id, thread_id) DO UPDATE SET event_id=excluded.event_id, ts=excluded.ts WHERE old.event_id <> excluded.event_id
RETURNING room_id, user_id, thread_id, event_id, ts`, chunk)
if err != nil {
return nil, err
}
for rows.Next() {
if err := rows.Scan(&roomID, &userID, &threadID, &eventID, &ts); err != nil {
rows.Close()
return nil, err
}
newReceipts = append(newReceipts, internal.Receipt{
RoomID: roomID,
EventID: eventID,
UserID: userID,
TS: ts,
ThreadID: threadID,
IsPrivate: tableName == "syncv3_receipts_private",
})
}
rows.Close()
}
return
}
// PackReceiptsIntoEDU bundles all the receipts into a single m.receipt EDU, suitable for sending down
// client connections.
func PackReceiptsIntoEDU(receipts []internal.Receipt) (json.RawMessage, error) {
newReceiptEDU := receiptEDU{
Type: "m.receipt",
Content: make(map[string]struct {
Read map[string]receiptInfo `json:"m.read,omitempty"`
ReadPrivate map[string]receiptInfo `json:"m.read.private,omitempty"`
}),
}
for _, r := range receipts {
receiptsForEvent := newReceiptEDU.Content[r.EventID]
if r.IsPrivate {
if receiptsForEvent.ReadPrivate == nil {
receiptsForEvent.ReadPrivate = make(map[string]receiptInfo)
}
receiptsForEvent.ReadPrivate[r.UserID] = receiptInfo{
TS: r.TS,
ThreadID: r.ThreadID,
}
} else {
if receiptsForEvent.Read == nil {
receiptsForEvent.Read = make(map[string]receiptInfo)
}
receiptsForEvent.Read[r.UserID] = receiptInfo{
TS: r.TS,
ThreadID: r.ThreadID,
}
}
newReceiptEDU.Content[r.EventID] = receiptsForEvent
}
return json.Marshal(newReceiptEDU)
}
func unpackReceiptsFromEDU(roomID string, ephEvent json.RawMessage) (readReceipts, privateReceipts []internal.Receipt, err error) {
// unpack the receipts, of the form:
// {
// "content": {
// "$1435641916114394fHBLK:matrix.org": {
// "m.read": {
// "@rikj:jki.re": {
// "ts": 1436451550453,
// "thread_id": "$aaabbbccc"
// }
// },
// "m.read.private": {
// "@self:example.org": {
// "ts": 1661384801651
// }
// }
// }
// },
// "type": "m.receipt"
// }
var edu receiptEDU
if err := json.Unmarshal(ephEvent, &edu); err != nil {
return nil, nil, err
}
if edu.Type != "m.receipt" {
return
}
for eventID, content := range edu.Content {
for userID, val := range content.Read {
readReceipts = append(readReceipts, internal.Receipt{
UserID: userID,
RoomID: roomID,
EventID: eventID,
TS: val.TS,
ThreadID: val.ThreadID,
})
}
for userID, val := range content.ReadPrivate {
privateReceipts = append(privateReceipts, internal.Receipt{
UserID: userID,
RoomID: roomID,
EventID: eventID,
TS: val.TS,
ThreadID: val.ThreadID,
IsPrivate: true,
})
}
}
return readReceipts, privateReceipts, nil
}
type ReceiptChunker []internal.Receipt
func (c ReceiptChunker) Len() int {
return len(c)
}
func (c ReceiptChunker) Subslice(i, j int) sqlutil.Chunker {
return c[i:j]
}

217
state/receipt_table_test.go Normal file
View File

@ -0,0 +1,217 @@
package state
import (
"encoding/json"
"reflect"
"sort"
"testing"
"github.com/jmoiron/sqlx"
"github.com/matrix-org/sync-v3/internal"
)
func sortReceipts(receipts []internal.Receipt) {
sort.Slice(receipts, func(i, j int) bool {
keyi := receipts[i].EventID + receipts[i].RoomID + receipts[i].UserID + receipts[i].ThreadID
keyj := receipts[j].EventID + receipts[j].RoomID + receipts[j].UserID + receipts[j].ThreadID
return keyi < keyj
})
}
func parsedReceiptsEqual(t *testing.T, got, want []internal.Receipt) {
t.Helper()
sortReceipts(got)
sortReceipts(want)
if len(got) != len(want) {
t.Fatalf("got %d, want %d, got: %+v want %+v", len(got), len(want), got, want)
}
for i := range want {
if !reflect.DeepEqual(got[i], want[i]) {
t.Errorf("i=%d got %+v want %+v", i, got[i], want[i])
}
}
}
func TestReceiptTable(t *testing.T) {
db, err := sqlx.Open("postgres", postgresConnectionString)
if err != nil {
t.Fatalf("failed to open SQL db: %s", err)
}
roomA := "!A:ReceiptTable"
roomB := "!B:ReceiptTable"
edu := json.RawMessage(`{
"content": {
"$1435641916114394fHBLK:matrix.org": {
"m.read": {
"@rikj:jki.re": {
"ts": 1436451550453
}
},
"m.read.private": {
"@self:example.org": {
"ts": 1661384801651
}
}
}
},
"type": "m.receipt"
}`)
table := NewReceiptTable(db)
// inserting same receipts for different rooms should work - compound key should include the room ID
for _, roomID := range []string{roomA, roomB} {
newReceipts, err := table.Insert(roomID, edu)
if err != nil {
t.Fatalf("Insert: %s", err)
}
parsedReceiptsEqual(t, newReceipts, []internal.Receipt{
{
RoomID: roomID,
EventID: "$1435641916114394fHBLK:matrix.org",
UserID: "@rikj:jki.re",
TS: 1436451550453,
ThreadID: "",
},
{
RoomID: roomID,
EventID: "$1435641916114394fHBLK:matrix.org",
UserID: "@self:example.org",
TS: 1661384801651,
ThreadID: "",
IsPrivate: true,
},
})
}
// dupe receipts = no delta
newReceipts, err := table.Insert(roomA, edu)
assertNoError(t, err)
parsedReceiptsEqual(t, newReceipts, nil)
// selecting receipts -> ignores private receipt
got, err := table.SelectReceiptsForEvents(roomA, []string{"$1435641916114394fHBLK:matrix.org"})
assertNoError(t, err)
parsedReceiptsEqual(t, got, []internal.Receipt{
{
RoomID: roomA,
EventID: "$1435641916114394fHBLK:matrix.org",
UserID: "@rikj:jki.re",
TS: 1436451550453,
ThreadID: "",
},
})
// new receipt with old receipt -> 1 delta, also check thread_id is saved.
newReceipts, err = table.Insert(roomA, json.RawMessage(`{
"content": {
"$1435641916114394fHBLK:matrix.org": {
"m.read": {
"@rikj:jki.re": {
"ts": 1436451550453
},
"@alice:bar": {
"ts": 123456,
"thread_id": "yep"
}
}
}
},
"type": "m.receipt"
}`))
assertNoError(t, err)
parsedReceiptsEqual(t, newReceipts, []internal.Receipt{
{
RoomID: roomA,
EventID: "$1435641916114394fHBLK:matrix.org",
UserID: "@alice:bar",
TS: 123456,
ThreadID: "yep",
},
})
// updated receipt for user -> 1 delta
newReceipts, err = table.Insert(roomA, json.RawMessage(`{
"content": {
"$aaaaaaaa:matrix.org": {
"m.read": {
"@rikj:jki.re": {
"ts": 1436499990453
}
}
}
},
"type": "m.receipt"
}`))
assertNoError(t, err)
parsedReceiptsEqual(t, newReceipts, []internal.Receipt{
{
RoomID: roomA,
EventID: "$aaaaaaaa:matrix.org",
UserID: "@rikj:jki.re",
TS: 1436499990453,
ThreadID: "",
},
})
// selecting multiple receipts
table.Insert(roomA, json.RawMessage(`{
"content": {
"$aaaaaaaa:matrix.org": {
"m.read": {
"@bob:bar": {
"ts": 5555
},
"@self:example.org": {
"ts": 6666,
"thread_id": "yup"
}
}
}
},
"type": "m.receipt"
}`))
got, err = table.SelectReceiptsForEvents(roomA, []string{"$aaaaaaaa:matrix.org"})
assertNoError(t, err)
parsedReceiptsEqual(t, got, []internal.Receipt{
{
RoomID: roomA,
EventID: "$aaaaaaaa:matrix.org",
UserID: "@rikj:jki.re",
TS: 1436499990453,
ThreadID: "",
},
{
RoomID: roomA,
EventID: "$aaaaaaaa:matrix.org",
UserID: "@bob:bar",
TS: 5555,
ThreadID: "",
},
{
RoomID: roomA,
EventID: "$aaaaaaaa:matrix.org",
UserID: "@self:example.org",
TS: 6666,
ThreadID: "yup",
},
})
got, err = table.SelectReceiptsForUser(roomA, "@self:example.org")
assertNoError(t, err)
parsedReceiptsEqual(t, got, []internal.Receipt{
{
RoomID: roomA,
EventID: "$1435641916114394fHBLK:matrix.org",
UserID: "@self:example.org",
TS: 1661384801651,
ThreadID: "",
IsPrivate: true,
},
{
RoomID: roomA,
EventID: "$aaaaaaaa:matrix.org",
UserID: "@self:example.org",
TS: 6666,
ThreadID: "yup",
},
})
}

View File

@ -33,8 +33,8 @@ func NewSnapshotsTable(db *sqlx.DB) *SnapshotTable {
return &SnapshotTable{db}
}
func (t *SnapshotTable) CurrentSnapshots() (map[string][]int64, error) {
rows, err := t.db.Query(
func (t *SnapshotTable) CurrentSnapshots(txn *sqlx.Tx) (map[string][]int64, error) {
rows, err := txn.Query(
`SELECT syncv3_rooms.room_id, events FROM syncv3_snapshots JOIN syncv3_rooms ON syncv3_snapshots.snapshot_id = syncv3_rooms.current_snapshot_id`,
)
if err != nil {

View File

@ -25,14 +25,25 @@ var logger = zerolog.New(os.Stdout).With().Timestamp().Logger().Output(zerolog.C
// Max number of parameters in a single SQL command
const MaxPostgresParameters = 65535
// StartupSnapshot represents a snapshot of startup data for the sliding sync HTTP API instances
type StartupSnapshot struct {
GlobalMetadata map[string]internal.RoomMetadata // room_id -> metadata
AllJoinedMembers map[string][]string // room_id -> [user_id]
}
type Storage struct {
accumulator *Accumulator
EventsTable *EventTable
TypingTable *TypingTable
ToDeviceTable *ToDeviceTable
UnreadTable *UnreadTable
AccountDataTable *AccountDataTable
InvitesTable *InvitesTable
accumulator *Accumulator
EventsTable *EventTable
ToDeviceTable *ToDeviceTable
UnreadTable *UnreadTable
AccountDataTable *AccountDataTable
InvitesTable *InvitesTable
TransactionsTable *TransactionsTable
DeviceDataTable *DeviceDataTable
ReceiptTable *ReceiptTable
DB *sqlx.DB
shutdownCh chan struct{}
shutdown bool
}
func NewStorage(postgresURI string) *Storage {
@ -49,13 +60,17 @@ func NewStorage(postgresURI string) *Storage {
entityName: "server",
}
return &Storage{
accumulator: acc,
TypingTable: NewTypingTable(db),
ToDeviceTable: NewToDeviceTable(db),
UnreadTable: NewUnreadTable(db),
EventsTable: acc.eventsTable,
AccountDataTable: NewAccountDataTable(db),
InvitesTable: NewInvitesTable(db),
accumulator: acc,
ToDeviceTable: NewToDeviceTable(db),
UnreadTable: NewUnreadTable(db),
EventsTable: acc.eventsTable,
AccountDataTable: NewAccountDataTable(db),
InvitesTable: NewInvitesTable(db),
TransactionsTable: NewTransactionsTable(db),
DeviceDataTable: NewDeviceDataTable(db),
ReceiptTable: NewReceiptTable(db),
DB: db,
shutdownCh: make(chan struct{}),
}
}
@ -63,13 +78,9 @@ func (s *Storage) LatestEventNID() (int64, error) {
return s.accumulator.eventsTable.SelectHighestNID()
}
func (s *Storage) LatestTypingID() (int64, error) {
return s.TypingTable.SelectHighestID()
}
func (s *Storage) AccountData(userID, roomID, eventType string) (data *AccountData, err error) {
func (s *Storage) AccountData(userID, roomID string, eventTypes []string) (data []AccountData, err error) {
err = sqlutil.WithTransaction(s.accumulator.db, func(txn *sqlx.Tx) error {
data, err = s.AccountDataTable.Select(txn, userID, eventType, roomID)
data, err = s.AccountDataTable.Select(txn, userID, eventTypes, roomID)
return err
})
return
@ -110,11 +121,29 @@ func (s *Storage) InsertAccountData(userID, roomID string, events []json.RawMess
return data, err
}
// Extract hero info for all rooms. MUST BE CALLED AT STARTUP ONLY AS THIS WILL RACE WITH LIVE TRAFFIC.
func (s *Storage) MetadataForAllRooms() (map[string]internal.RoomMetadata, error) {
// GlobalSnapshot snapshots the entire database for the purposes of initialising
// a sliding sync HTTP API instance. It will atomically grab metadata for all rooms, all joined members
// and the latest pubsub position in a single transaction.
func (s *Storage) GlobalSnapshot() (ss StartupSnapshot, err error) {
err = sqlutil.WithTransaction(s.accumulator.db, func(txn *sqlx.Tx) error {
ss.GlobalMetadata, err = s.MetadataForAllRooms(txn)
if err != nil {
return err
}
ss.AllJoinedMembers, err = s.AllJoinedMembers(txn)
if err != nil {
return err
}
return err
})
return
}
// Extract hero info for all rooms.
func (s *Storage) MetadataForAllRooms(txn *sqlx.Tx) (map[string]internal.RoomMetadata, error) {
// Select the joined member counts
// sub-select all current state, filter on m.room.member and then join membership
rows, err := s.accumulator.db.Query(`
rows, err := txn.Query(`
SELECT room_id, count(state_key) FROM syncv3_events
WHERE (membership='_join' OR membership = 'join') AND event_type='m.room.member' AND event_nid IN (
SELECT unnest(events) FROM syncv3_snapshots WHERE syncv3_snapshots.snapshot_id IN (
@ -134,7 +163,7 @@ func (s *Storage) MetadataForAllRooms() (map[string]internal.RoomMetadata, error
result[metadata.RoomID] = metadata
}
// Select the invited member counts using the same style of query
rows, err = s.accumulator.db.Query(`
rows, err = txn.Query(`
SELECT room_id, count(state_key) FROM syncv3_events
WHERE (membership='_invite' OR membership = 'invite') AND event_type='m.room.member' AND event_nid IN (
SELECT unnest(events) FROM syncv3_snapshots WHERE syncv3_snapshots.snapshot_id IN (
@ -157,7 +186,7 @@ func (s *Storage) MetadataForAllRooms() (map[string]internal.RoomMetadata, error
}
// work out latest timestamps
events, err := s.accumulator.eventsTable.selectLatestEventInAllRooms()
events, err := s.accumulator.eventsTable.selectLatestEventInAllRooms(txn)
if err != nil {
return nil, err
}
@ -171,7 +200,7 @@ func (s *Storage) MetadataForAllRooms() (map[string]internal.RoomMetadata, error
}
// Select the name / canonical alias for all rooms
roomIDToStateEvents, err := s.currentStateEventsInAllRooms([]string{
roomIDToStateEvents, err := s.currentStateEventsInAllRooms(txn, []string{
"m.room.name", "m.room.canonical_alias",
})
if err != nil {
@ -193,7 +222,7 @@ func (s *Storage) MetadataForAllRooms() (map[string]internal.RoomMetadata, error
// "This should be the first 5 members of the room, ordered by stream ordering, which are joined or invited."
// Unclear if this is the first 5 *most recent* (backwards) or forwards. For now we'll use the most recent
// ones, and select 6 of them so we can always use 5 no matter who is requesting the room name.
rows, err = s.accumulator.db.Query(`
rows, err = txn.Query(`
SELECT rf.* FROM (
SELECT room_id, event, rank() OVER (
PARTITION BY room_id ORDER BY event_nid DESC
@ -231,10 +260,7 @@ func (s *Storage) MetadataForAllRooms() (map[string]internal.RoomMetadata, error
})
result[roomID] = metadata
}
tx := s.accumulator.db.MustBegin()
defer tx.Commit()
roomInfos, err := s.accumulator.roomsTable.SelectRoomInfos(tx)
roomInfos, err := s.accumulator.roomsTable.SelectRoomInfos(txn)
if err != nil {
return nil, fmt.Errorf("failed to select room infos: %s", err)
}
@ -243,6 +269,7 @@ func (s *Storage) MetadataForAllRooms() (map[string]internal.RoomMetadata, error
metadata := result[info.ID]
metadata.Encrypted = info.IsEncrypted
metadata.UpgradedRoomID = info.UpgradedRoomID
metadata.PredecessorRoomID = info.PredecessorRoomID
metadata.RoomType = info.Type
result[info.ID] = metadata
if metadata.IsSpace() {
@ -251,7 +278,7 @@ func (s *Storage) MetadataForAllRooms() (map[string]internal.RoomMetadata, error
}
// select space children
spaceRoomToRelations, err := s.accumulator.spacesTable.SelectChildren(tx, spaceRoomIDs)
spaceRoomToRelations, err := s.accumulator.spacesTable.SelectChildren(txn, spaceRoomIDs)
if err != nil {
return nil, fmt.Errorf("failed to select space children: %s", err)
}
@ -271,7 +298,7 @@ func (s *Storage) MetadataForAllRooms() (map[string]internal.RoomMetadata, error
// Returns all current state events matching the event types given in all rooms. Returns a map of
// room ID to events in that room.
func (s *Storage) currentStateEventsInAllRooms(eventTypes []string) (map[string][]Event, error) {
func (s *Storage) currentStateEventsInAllRooms(txn *sqlx.Tx, eventTypes []string) (map[string][]Event, error) {
query, args, err := sqlx.In(
`SELECT syncv3_events.room_id, syncv3_events.event_type, syncv3_events.state_key, syncv3_events.event FROM syncv3_events
WHERE syncv3_events.event_type IN (?)
@ -283,7 +310,7 @@ func (s *Storage) currentStateEventsInAllRooms(eventTypes []string) (map[string]
if err != nil {
return nil, err
}
rows, err := s.accumulator.db.Query(s.accumulator.db.Rebind(query), args...)
rows, err := txn.Query(txn.Rebind(query), args...)
if err != nil {
return nil, err
}
@ -299,14 +326,45 @@ func (s *Storage) currentStateEventsInAllRooms(eventTypes []string) (map[string]
return result, nil
}
func (s *Storage) Accumulate(roomID, prevBatch string, timeline []json.RawMessage) (numNew int, latestNID int64, err error) {
func (s *Storage) Accumulate(roomID, prevBatch string, timeline []json.RawMessage) (numNew int, timelineNIDs []int64, err error) {
return s.accumulator.Accumulate(roomID, prevBatch, timeline)
}
func (s *Storage) Initialise(roomID string, state []json.RawMessage) (bool, error) {
func (s *Storage) Initialise(roomID string, state []json.RawMessage) (bool, int64, error) {
return s.accumulator.Initialise(roomID, state)
}
func (s *Storage) EventNIDs(eventNIDs []int64) ([]json.RawMessage, error) {
events, err := s.EventsTable.SelectByNIDs(nil, true, eventNIDs)
if err != nil {
return nil, err
}
e := make([]json.RawMessage, len(events))
for i := range events {
e[i] = events[i].JSON
}
return e, nil
}
func (s *Storage) StateSnapshot(snapID int64) (state []json.RawMessage, err error) {
err = sqlutil.WithTransaction(s.accumulator.db, func(txn *sqlx.Tx) error {
snapshotRow, err := s.accumulator.snapshotTable.Select(txn, snapID)
if err != nil {
return err
}
events, err := s.accumulator.eventsTable.SelectByNIDs(txn, true, snapshotRow.Events)
if err != nil {
return fmt.Errorf("failed to select state snapshot %v: %s", snapID, err)
}
state = make([]json.RawMessage, len(events))
for i := range events {
state[i] = events[i].JSON
}
return nil
})
return
}
// Look up room state after the given event position and no further. eventTypesToStateKeys is a map of event type to a list of state keys for that event type.
// If the list of state keys is empty then all events matching that event type will be returned. If the map is empty entirely, then all room state
// will be returned.
@ -549,27 +607,27 @@ func (s *Storage) visibleEventNIDsBetweenForRooms(userID string, roomIDs []strin
// this function returns a map of room ID to a slice of 2-element from|to positions. These positions are
// all INCLUSIVE, and the client should be informed of these events at some point. For example:
//
// Stream Positions
// 1 2 3 4 5 6 7 8 9 10
// Room A Maj E E E
// Room B E Maj E
// Room C E Mal E (a already joined to this room at position 0)
// Stream Positions
// 1 2 3 4 5 6 7 8 9 10
// Room A Maj E E E
// Room B E Maj E
// Room C E Mal E (a already joined to this room at position 0)
//
// E=message event, M=membership event, followed by user letter, followed by 'i' or 'j' or 'l' for invite|join|leave
// E=message event, M=membership event, followed by user letter, followed by 'i' or 'j' or 'l' for invite|join|leave
//
// - For Room A: from=1, to=10, returns { RoomA: [ [1,10] ]} (tests events in joined room)
// - For Room B: from=1, to=10, returns { RoomB: [ [5,10] ]} (tests joining a room starts events)
// - For Room C: from=1, to=10, returns { RoomC: [ [0,9] ]} (tests leaving a room stops events)
// - For Room A: from=1, to=10, returns { RoomA: [ [1,10] ]} (tests events in joined room)
// - For Room B: from=1, to=10, returns { RoomB: [ [5,10] ]} (tests joining a room starts events)
// - For Room C: from=1, to=10, returns { RoomC: [ [0,9] ]} (tests leaving a room stops events)
//
// Multiple slices can occur when a user leaves and re-joins the same room, and invites are same-element positions:
//
// Stream Positions
// 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Room D Maj E Mal E Maj E Mal E
// Room E E Mai E E Maj E E
// Stream Positions
// 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Room D Maj E Mal E Maj E Mal E
// Room E E Mai E E Maj E E
//
// - For Room D: from=1, to=15 returns { RoomD: [ [1,6], [8,10] ] } (tests multi-join/leave)
// - For Room E: from=1, to=15 returns { RoomE: [ [3,3], [13,15] ] } (tests invites)
// - For Room D: from=1, to=15 returns { RoomD: [ [1,6], [8,10] ] } (tests multi-join/leave)
// - For Room E: from=1, to=15 returns { RoomE: [ [3,3], [13,15] ] } (tests invites)
func (s *Storage) VisibleEventNIDsBetween(userID string, from, to int64) (map[string][][2]int64, error) {
// load *ALL* joined rooms for this user at from (inclusive)
joinedRoomIDs, err := s.JoinedRoomsAfterPosition(userID, from)
@ -686,14 +744,14 @@ func (s *Storage) RoomMembershipDelta(roomID string, from, to int64, limit int)
return
}
func (s *Storage) AllJoinedMembers() (map[string][]string, error) {
roomIDToEventNIDs, err := s.accumulator.snapshotTable.CurrentSnapshots()
func (s *Storage) AllJoinedMembers(txn *sqlx.Tx) (map[string][]string, error) {
roomIDToEventNIDs, err := s.accumulator.snapshotTable.CurrentSnapshots(txn)
if err != nil {
return nil, err
}
result := make(map[string][]string)
for roomID, eventNIDs := range roomIDToEventNIDs {
events, err := s.accumulator.eventsTable.SelectByNIDs(nil, true, eventNIDs)
events, err := s.accumulator.eventsTable.SelectByNIDs(txn, true, eventNIDs)
if err != nil {
return nil, fmt.Errorf("failed to select events in room %s: %s", roomID, err)
}
@ -746,5 +804,12 @@ func (s *Storage) joinedRoomsAfterPositionWithEvents(membershipEvents []Event, u
}
func (s *Storage) Teardown() {
s.accumulator.db.Close()
err := s.accumulator.db.Close()
if err != nil {
panic("Storage.Teardown: " + err.Error())
}
if !s.shutdown {
s.shutdown = true
close(s.shutdownCh)
}
}

View File

@ -28,10 +28,11 @@ func TestStorageRoomStateBeforeAndAfterEventPosition(t *testing.T) {
testutils.NewStateEvent(t, "m.room.join_rules", "", alice, map[string]interface{}{"join_rule": "invite"}),
testutils.NewStateEvent(t, "m.room.member", bob, alice, map[string]interface{}{"membership": "invite"}),
}
_, latest, err := store.Accumulate(roomID, "", events)
_, latestNIDs, err := store.Accumulate(roomID, "", events)
if err != nil {
t.Fatalf("Accumulate returned error: %s", err)
}
latest := latestNIDs[len(latestNIDs)-1]
testCases := []struct {
name string
@ -147,12 +148,14 @@ func TestStorageJoinedRoomsAfterPosition(t *testing.T) {
},
}
var latestPos int64
var latestNIDs []int64
var err error
for roomID, eventMap := range roomIDToEventMap {
_, latestPos, err = store.Accumulate(roomID, "", eventMap)
_, latestNIDs, err = store.Accumulate(roomID, "", eventMap)
if err != nil {
t.Fatalf("Accumulate on %s failed: %s", roomID, err)
}
latestPos = latestNIDs[len(latestNIDs)-1]
}
aliceJoinedRooms, err := store.JoinedRoomsAfterPosition(alice, latestPos)
if err != nil {
@ -170,7 +173,8 @@ func TestStorageJoinedRoomsAfterPosition(t *testing.T) {
}
// also test currentStateEventsInAllRooms
roomIDToCreateEvents, err := store.currentStateEventsInAllRooms([]string{"m.room.create"})
txn := store.DB.MustBeginTx(context.Background(), nil)
roomIDToCreateEvents, err := store.currentStateEventsInAllRooms(txn, []string{"m.room.create"})
if err != nil {
t.Fatalf("CurrentStateEventsInAllRooms returned error: %s", err)
}
@ -193,7 +197,8 @@ func TestStorageJoinedRoomsAfterPosition(t *testing.T) {
}
// also test MetadataForAllRooms
roomIDToMetadata, err := store.MetadataForAllRooms()
roomIDToMetadata, err := store.MetadataForAllRooms(txn)
txn.Commit()
if err != nil {
t.Fatalf("MetadataForAllRooms: %s", err)
}
@ -259,7 +264,7 @@ func TestVisibleEventNIDsBetween(t *testing.T) {
},
}
for roomID, eventMap := range roomIDToEventMap {
_, err := store.Initialise(roomID, eventMap)
_, _, err := store.Initialise(roomID, eventMap)
if err != nil {
t.Fatalf("Initialise on %s failed: %s", roomID, err)
}
@ -488,7 +493,7 @@ func TestStorageLatestEventsInRoomsPrevBatch(t *testing.T) {
},
}
_, err := store.Initialise(roomID, stateEvents)
_, _, err := store.Initialise(roomID, stateEvents)
if err != nil {
t.Fatalf("failed to initialise: %s", err)
}

View File

@ -3,24 +3,32 @@ package state
import (
"database/sql"
"encoding/json"
"fmt"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
"github.com/matrix-org/sync-v3/sqlutil"
"github.com/tidwall/gjson"
)
const (
ActionRequest = 1
ActionCancel = 2
)
// ToDeviceTable stores to_device messages for devices.
type ToDeviceTable struct {
db *sqlx.DB
latestPos int64
db *sqlx.DB
}
type ToDeviceRow struct {
Position int64 `db:"position"`
DeviceID string `db:"device_id"`
Message string `db:"message"`
Type string `db:"event_type"`
Sender string `db:"sender"`
Position int64 `db:"position"`
DeviceID string `db:"device_id"`
Message string `db:"message"`
Type string `db:"event_type"`
Sender string `db:"sender"`
UniqueKey *string `db:"unique_key"`
Action int `db:"action"`
}
type ToDeviceRowChunker []ToDeviceRow
@ -41,15 +49,25 @@ func NewToDeviceTable(db *sqlx.DB) *ToDeviceTable {
device_id TEXT NOT NULL,
event_type TEXT NOT NULL,
sender TEXT NOT NULL,
message TEXT NOT NULL
message TEXT NOT NULL,
-- nullable as these fields are not on all to-device events
unique_key TEXT,
action SMALLINT DEFAULT 0 -- 0 means unknown
);
CREATE TABLE IF NOT EXISTS syncv3_to_device_ack_pos (
device_id TEXT NOT NULL PRIMARY KEY,
unack_pos BIGINT NOT NULL
);
CREATE INDEX IF NOT EXISTS syncv3_to_device_messages_device_idx ON syncv3_to_device_messages(device_id);
CREATE INDEX IF NOT EXISTS syncv3_to_device_messages_ukey_idx ON syncv3_to_device_messages(unique_key, device_id);
`)
var latestPos int64
if err := db.QueryRow(`SELECT coalesce(MAX(position),0) FROM syncv3_to_device_messages`).Scan(&latestPos); err != nil && err != sql.ErrNoRows {
panic(err)
}
return &ToDeviceTable{db, latestPos}
return &ToDeviceTable{db}
}
func (t *ToDeviceTable) SetUnackedPosition(deviceID string, pos int64) error {
_, err := t.db.Exec(`INSERT INTO syncv3_to_device_ack_pos(device_id, unack_pos) VALUES($1,$2) ON CONFLICT (device_id)
DO UPDATE SET unack_pos=$2`, deviceID, pos)
return err
}
func (t *ToDeviceTable) DeleteMessagesUpToAndIncluding(deviceID string, toIncl int64) error {
@ -58,15 +76,12 @@ func (t *ToDeviceTable) DeleteMessagesUpToAndIncluding(deviceID string, toIncl i
}
// Query to-device messages for this device, exclusive of from and inclusive of to. If a to value is unknown, use -1.
func (t *ToDeviceTable) Messages(deviceID string, from, to, limit int64) (msgs []json.RawMessage, upTo int64, err error) {
if to == -1 {
to = t.latestPos
}
upTo = to
func (t *ToDeviceTable) Messages(deviceID string, from, limit int64) (msgs []json.RawMessage, upTo int64, err error) {
upTo = from
var rows []ToDeviceRow
err = t.db.Select(&rows,
`SELECT position, message FROM syncv3_to_device_messages WHERE device_id = $1 AND position > $2 AND position <= $3 ORDER BY position ASC LIMIT $4`,
deviceID, from, to, limit,
`SELECT position, message FROM syncv3_to_device_messages WHERE device_id = $1 AND position > $2 ORDER BY position ASC LIMIT $3`,
deviceID, from, limit,
)
if len(rows) == 0 {
return
@ -75,7 +90,6 @@ func (t *ToDeviceTable) Messages(deviceID string, from, to, limit int64) (msgs [
for i := range rows {
msgs[i] = json.RawMessage(rows[i].Message)
}
// if a limit was applied, we may not get up to 'to'
upTo = rows[len(rows)-1].Position
return
}
@ -83,6 +97,18 @@ func (t *ToDeviceTable) Messages(deviceID string, from, to, limit int64) (msgs [
func (t *ToDeviceTable) InsertMessages(deviceID string, msgs []json.RawMessage) (pos int64, err error) {
var lastPos int64
err = sqlutil.WithTransaction(t.db, func(txn *sqlx.Tx) error {
var unackPos int64
err = txn.QueryRow(`SELECT unack_pos FROM syncv3_to_device_ack_pos WHERE device_id=$1`, deviceID).Scan(&unackPos)
if err != nil && err != sql.ErrNoRows {
return fmt.Errorf("unable to select unacked pos: %s", err)
}
// Some of these events may be "cancel" actions. If we find events for the unique key of this event, then delete them
// and ignore the "cancel" action.
cancels := []string{}
allRequests := make(map[string]struct{})
allCancels := make(map[string]struct{})
rows := make([]ToDeviceRow, len(msgs))
for i := range msgs {
m := gjson.ParseBytes(msgs[i])
@ -92,17 +118,72 @@ func (t *ToDeviceTable) InsertMessages(deviceID string, msgs []json.RawMessage)
Type: m.Get("type").Str,
Sender: m.Get("sender").Str,
}
switch rows[i].Type {
case "m.room_key_request":
action := m.Get("content.action").Str
if action == "request" {
rows[i].Action = ActionRequest
} else if action == "request_cancellation" {
rows[i].Action = ActionCancel
}
// "the same request_id and requesting_device_id fields, sent by the same user."
key := fmt.Sprintf("%s-%s-%s-%s", rows[i].Type, rows[i].Sender, m.Get("content.requesting_device_id").Str, m.Get("content.request_id").Str)
rows[i].UniqueKey = &key
}
if rows[i].Action == ActionCancel && rows[i].UniqueKey != nil {
cancels = append(cancels, *rows[i].UniqueKey)
allCancels[*rows[i].UniqueKey] = struct{}{}
} else if rows[i].Action == ActionRequest && rows[i].UniqueKey != nil {
allRequests[*rows[i].UniqueKey] = struct{}{}
}
}
if len(cancels) > 0 {
var cancelled []string
// delete action: request events which have the same unique key, for this device inbox, only if they are not sent to the client already (unacked)
err = txn.Select(&cancelled, `DELETE FROM syncv3_to_device_messages WHERE unique_key = ANY($1) AND device_id = $2 AND position > $3 RETURNING unique_key`,
pq.StringArray(cancels), deviceID, unackPos)
if err != nil {
return fmt.Errorf("failed to delete cancelled events: %s", err)
}
cancelledInDBSet := make(map[string]struct{}, len(cancelled))
for _, ukey := range cancelled {
cancelledInDBSet[ukey] = struct{}{}
}
// do not insert the cancelled unique keys
newRows := make([]ToDeviceRow, 0, len(rows))
for i := range rows {
if rows[i].UniqueKey != nil {
ukey := *rows[i].UniqueKey
_, exists := cancelledInDBSet[ukey]
if exists {
continue // the request was deleted so don't insert the cancel
}
// we may be requesting and cancelling in one go, check it and ignore if so
_, reqExists := allRequests[ukey]
_, cancelExists := allCancels[ukey]
if reqExists && cancelExists {
continue
}
}
newRows = append(newRows, rows[i])
}
rows = newRows
}
// we may have nothing to do if the entire set of events were cancellations
if len(rows) == 0 {
return nil
}
chunks := sqlutil.Chunkify(4, MaxPostgresParameters, ToDeviceRowChunker(rows))
chunks := sqlutil.Chunkify(6, MaxPostgresParameters, ToDeviceRowChunker(rows))
for _, chunk := range chunks {
result, err := t.db.NamedQuery(`INSERT INTO syncv3_to_device_messages (device_id, message, event_type, sender)
VALUES (:device_id, :message, :event_type, :sender) RETURNING position`, chunk)
result, err := txn.NamedQuery(`INSERT INTO syncv3_to_device_messages (device_id, message, event_type, sender, action, unique_key)
VALUES (:device_id, :message, :event_type, :sender, :action, :unique_key) RETURNING position`, chunk)
if err != nil {
return err
}
for result.Next() {
if err = result.Scan(&lastPos); err != nil {
result.Close()
return err
}
}
@ -110,8 +191,5 @@ func (t *ToDeviceTable) InsertMessages(deviceID string, msgs []json.RawMessage)
}
return nil
})
if lastPos > t.latestPos {
t.latestPos = lastPos
}
return lastPos, err
}

View File

@ -27,23 +27,7 @@ func TestToDeviceTable(t *testing.T) {
if lastPos != 2 {
t.Fatalf("InsertMessages: bad pos returned, got %d want 2", lastPos)
}
gotMsgs, upTo, err := table.Messages(deviceID, 0, lastPos, limit)
if err != nil {
t.Fatalf("Messages: %s", err)
}
if upTo != lastPos {
t.Errorf("Message: got up to %d want %d", upTo, lastPos)
}
if len(gotMsgs) != len(msgs) {
t.Fatalf("Messages: got %d messages, want %d", len(gotMsgs), len(msgs))
}
for i := range msgs {
if !bytes.Equal(msgs[i], gotMsgs[i]) {
t.Fatalf("Messages: got %+v want %+v", gotMsgs[i], msgs[i])
}
}
// -1 to value means latest position
gotMsgs, upTo, err = table.Messages(deviceID, 0, -1, limit)
gotMsgs, upTo, err := table.Messages(deviceID, 0, limit)
if err != nil {
t.Fatalf("Messages: %s", err)
}
@ -60,7 +44,7 @@ func TestToDeviceTable(t *testing.T) {
}
// same to= token, no messages
gotMsgs, upTo, err = table.Messages(deviceID, lastPos, lastPos, limit)
gotMsgs, upTo, err = table.Messages(deviceID, lastPos, limit)
if err != nil {
t.Fatalf("Messages: %s", err)
}
@ -72,24 +56,24 @@ func TestToDeviceTable(t *testing.T) {
}
// different device ID, no messages
gotMsgs, upTo, err = table.Messages("OTHER_DEVICE", 0, lastPos, limit)
gotMsgs, upTo, err = table.Messages("OTHER_DEVICE", 0, limit)
if err != nil {
t.Fatalf("Messages: %s", err)
}
if upTo != lastPos {
t.Errorf("Message: got up to %d want %d", upTo, lastPos)
if upTo != 0 {
t.Errorf("Message: got up to %d want %d", upTo, 0)
}
if len(gotMsgs) > 0 {
t.Fatalf("Messages: got %d messages, want none", len(gotMsgs))
}
// zero limit, no messages
gotMsgs, upTo, err = table.Messages(deviceID, 0, lastPos, 0)
gotMsgs, upTo, err = table.Messages(deviceID, 0, 0)
if err != nil {
t.Fatalf("Messages: %s", err)
}
if upTo != lastPos {
t.Errorf("Message: got up to %d want %d", upTo, lastPos)
if upTo != 0 {
t.Errorf("Message: got up to %d want %d", upTo, 0)
}
if len(gotMsgs) > 0 {
t.Fatalf("Messages: got %d messages, want none", len(gotMsgs))
@ -97,7 +81,7 @@ func TestToDeviceTable(t *testing.T) {
// lower limit, cap out
var wantLimit int64 = 1
gotMsgs, upTo, err = table.Messages(deviceID, 0, lastPos, wantLimit)
gotMsgs, upTo, err = table.Messages(deviceID, 0, wantLimit)
if err != nil {
t.Fatalf("Messages: %s", err)
}
@ -113,7 +97,7 @@ func TestToDeviceTable(t *testing.T) {
if err := table.DeleteMessagesUpToAndIncluding(deviceID, lastPos-1); err != nil {
t.Fatalf("DeleteMessagesUpTo: %s", err)
}
gotMsgs, upTo, err = table.Messages(deviceID, 0, lastPos, limit)
gotMsgs, upTo, err = table.Messages(deviceID, 0, limit)
if err != nil {
t.Fatalf("Messages: %s", err)
}
@ -131,3 +115,159 @@ func TestToDeviceTable(t *testing.T) {
t.Fatalf("Messages: deleted message but unexpected message left: got %s want %s", string(gotMsgs[0]), string(want))
}
}
// Test that https://github.com/uhoreg/matrix-doc/blob/drop-stale-to-device/proposals/3944-drop-stale-to-device.md works for m.room_key_request
func TestToDeviceTableDeleteCancels(t *testing.T) {
db, err := sqlx.Open("postgres", postgresConnectionString)
if err != nil {
t.Fatalf("failed to open SQL db: %s", err)
}
sender := "SENDER"
destination := "DEST"
table := NewToDeviceTable(db)
// insert 2 requests
reqEv1 := newRoomKeyEvent(t, "request", "1", sender, map[string]interface{}{
"foo": "bar",
})
_, err = table.InsertMessages(destination, []json.RawMessage{reqEv1})
assertNoError(t, err)
gotMsgs, _, err := table.Messages(destination, 0, 10)
assertNoError(t, err)
bytesEqual(t, gotMsgs[0], reqEv1)
reqEv2 := newRoomKeyEvent(t, "request", "2", sender, map[string]interface{}{
"foo": "baz",
})
_, err = table.InsertMessages(destination, []json.RawMessage{reqEv2})
assertNoError(t, err)
gotMsgs, _, err = table.Messages(destination, 0, 10)
assertNoError(t, err)
bytesEqual(t, gotMsgs[1], reqEv2)
// now delete 1
cancelEv1 := newRoomKeyEvent(t, "request_cancellation", "1", sender, nil)
_, err = table.InsertMessages(destination, []json.RawMessage{cancelEv1})
assertNoError(t, err)
// selecting messages now returns only reqEv2
gotMsgs, _, err = table.Messages(destination, 0, 10)
assertNoError(t, err)
bytesEqual(t, gotMsgs[0], reqEv2)
// now do lots of close but not quite cancellation requests that should not match reqEv2
_, err = table.InsertMessages(destination, []json.RawMessage{
newRoomKeyEvent(t, "cancellation", "2", sender, nil), // wrong action
newRoomKeyEvent(t, "request_cancellation", "22", sender, nil), // wrong request ID
newRoomKeyEvent(t, "request_cancellation", "2", "not_who_you_think", nil), // wrong req device id
})
assertNoError(t, err)
_, err = table.InsertMessages("wrong_destination", []json.RawMessage{ // wrong destination
newRoomKeyEvent(t, "request_cancellation", "2", sender, nil),
})
assertNoError(t, err)
gotMsgs, _, err = table.Messages(destination, 0, 10)
assertNoError(t, err)
bytesEqual(t, gotMsgs[0], reqEv2) // the request lives on
if len(gotMsgs) != 4 { // the cancellations live on too, but not the one sent to the wrong dest
t.Errorf("got %d msgs, want 4", len(gotMsgs))
}
// request + cancel in one go => nothing inserted
destination2 := "DEST2"
_, err = table.InsertMessages(destination2, []json.RawMessage{
newRoomKeyEvent(t, "request", "A", sender, map[string]interface{}{
"foo": "baz",
}),
newRoomKeyEvent(t, "request_cancellation", "A", sender, nil),
})
assertNoError(t, err)
gotMsgs, _, err = table.Messages(destination2, 0, 10)
assertNoError(t, err)
if len(gotMsgs) > 0 {
t.Errorf("Got %+v want nothing", jsonArrStr(gotMsgs))
}
}
// Test that unacked events are safe from deletion
func TestToDeviceTableNoDeleteUnacks(t *testing.T) {
db, err := sqlx.Open("postgres", postgresConnectionString)
if err != nil {
t.Fatalf("failed to open SQL db: %s", err)
}
sender := "SENDER2"
destination := "DEST2"
table := NewToDeviceTable(db)
// insert request
reqEv := newRoomKeyEvent(t, "request", "1", sender, map[string]interface{}{
"foo": "bar",
})
pos, err := table.InsertMessages(destination, []json.RawMessage{reqEv})
assertNoError(t, err)
// mark this position as unacked: this means the client MAY know about this request so it isn't
// safe to delete it
err = table.SetUnackedPosition(destination, pos)
assertNoError(t, err)
// now issue a cancellation: this should NOT result in a cancellation due to protection for unacked events
cancelEv := newRoomKeyEvent(t, "request_cancellation", "1", sender, nil)
_, err = table.InsertMessages(destination, []json.RawMessage{cancelEv})
assertNoError(t, err)
// selecting messages returns both events
gotMsgs, _, err := table.Messages(destination, 0, 10)
assertNoError(t, err)
if len(gotMsgs) != 2 {
t.Fatalf("got %d msgs, want 2: %v", len(gotMsgs), jsonArrStr(gotMsgs))
}
bytesEqual(t, gotMsgs[0], reqEv)
bytesEqual(t, gotMsgs[1], cancelEv)
// test that injecting another req/cancel does cause them to be deleted
_, err = table.InsertMessages(destination, []json.RawMessage{newRoomKeyEvent(t, "request", "2", sender, map[string]interface{}{
"foo": "bar",
})})
assertNoError(t, err)
_, err = table.InsertMessages(destination, []json.RawMessage{newRoomKeyEvent(t, "request_cancellation", "2", sender, nil)})
assertNoError(t, err)
// selecting messages returns the same as before
gotMsgs, _, err = table.Messages(destination, 0, 10)
assertNoError(t, err)
if len(gotMsgs) != 2 {
t.Fatalf("got %d msgs, want 2: %v", len(gotMsgs), jsonArrStr(gotMsgs))
}
bytesEqual(t, gotMsgs[0], reqEv)
bytesEqual(t, gotMsgs[1], cancelEv)
}
func bytesEqual(t *testing.T, got, want json.RawMessage) {
t.Helper()
if !bytes.Equal(got, want) {
t.Fatalf("bytesEqual: \ngot %s\n want %s", string(got), string(want))
}
}
type roomKeyRequest struct {
Type string `json:"type"`
Content roomKeyRequestContent `json:"content"`
}
type roomKeyRequestContent struct {
Action string `json:"action"`
RequestID string `json:"request_id"`
RequestingDeviceID string `json:"requesting_device_id"`
Body map[string]interface{} `json:"body,omitempty"`
}
func newRoomKeyEvent(t *testing.T, action, reqID, reqDeviceID string, body map[string]interface{}) json.RawMessage {
rkr := roomKeyRequest{
Type: "m.room_key_request",
Content: roomKeyRequestContent{
Action: action,
RequestID: reqID,
RequestingDeviceID: reqDeviceID,
Body: body,
},
}
b, err := json.Marshal(rkr)
if err != nil {
t.Fatalf("newRoomKeyEvent: %s", err)
}
return json.RawMessage(b)
}

71
state/txn_table.go Normal file
View File

@ -0,0 +1,71 @@
package state
import (
"time"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
)
type txnRow struct {
UserID string `db:"user_id"`
EventID string `db:"event_id"`
TxnID string `db:"txn_id"`
Timestamp int64 `db:"ts"`
}
type TransactionsTable struct {
db *sqlx.DB
}
func NewTransactionsTable(db *sqlx.DB) *TransactionsTable {
// make sure tables are made
db.MustExec(`
CREATE TABLE IF NOT EXISTS syncv3_txns (
user_id TEXT NOT NULL,
event_id TEXT NOT NULL,
txn_id TEXT NOT NULL,
ts BIGINT NOT NULL,
UNIQUE(user_id, event_id)
);
`)
return &TransactionsTable{db}
}
func (t *TransactionsTable) Insert(userID string, eventIDToTxnID map[string]string) error {
ts := time.Now()
rows := make([]txnRow, 0, len(eventIDToTxnID))
for eventID, txnID := range eventIDToTxnID {
rows = append(rows, txnRow{
EventID: eventID,
TxnID: txnID,
UserID: userID,
Timestamp: ts.UnixMilli(),
})
}
result, err := t.db.NamedQuery(`
INSERT INTO syncv3_txns (user_id, event_id, txn_id, ts)
VALUES (:user_id, :event_id, :txn_id, :ts)`, rows)
if err == nil {
result.Close()
}
return err
}
func (t *TransactionsTable) Clean(boundaryTime time.Time) error {
_, err := t.db.Exec(`DELETE FROM syncv3_txns WHERE ts <= $1`, boundaryTime.UnixMilli())
return err
}
func (t *TransactionsTable) Select(userID string, eventIDs []string) (map[string]string, error) {
result := make(map[string]string, len(eventIDs))
var rows []txnRow
err := t.db.Select(&rows, `SELECT event_id, txn_id FROM syncv3_txns WHERE user_id=$1 and event_id=ANY($2)`, userID, pq.StringArray(eventIDs))
if err != nil {
return nil, err
}
for _, row := range rows {
result[row.EventID] = row.TxnID
}
return result, nil
}

88
state/txn_table_test.go Normal file
View File

@ -0,0 +1,88 @@
package state
import (
"testing"
"time"
"github.com/jmoiron/sqlx"
)
func assertTxns(t *testing.T, gotEventToTxn map[string]string, wantEventToTxn map[string]string) {
t.Helper()
if len(gotEventToTxn) != len(wantEventToTxn) {
t.Errorf("got %d results, want %d", len(gotEventToTxn), len(wantEventToTxn))
}
for wantEventID, wantTxnID := range wantEventToTxn {
gotTxnID, ok := gotEventToTxn[wantEventID]
if !ok {
t.Errorf("txn ID for event %v is missing", wantEventID)
continue
}
if gotTxnID != wantTxnID {
t.Errorf("event %v got txn ID %v want %v", wantEventID, gotTxnID, wantTxnID)
}
}
}
func TestTransactionTable(t *testing.T) {
db, err := sqlx.Open("postgres", postgresConnectionString)
if err != nil {
t.Fatalf("failed to open SQL db: %s", err)
}
userID := "@alice:txns"
eventA := "$A"
eventB := "$B"
txnIDA := "txn_A"
txnIDB := "txn_B"
table := NewTransactionsTable(db)
// empty table select
gotTxns, err := table.Select(userID, []string{eventA})
assertNoError(t, err)
assertTxns(t, gotTxns, nil)
// basic insert and select
err = table.Insert(userID, map[string]string{
eventA: txnIDA,
})
assertNoError(t, err)
gotTxns, err = table.Select(userID, []string{eventA})
assertNoError(t, err)
assertTxns(t, gotTxns, map[string]string{
eventA: txnIDA,
})
// multiple txns
err = table.Insert(userID, map[string]string{
eventB: txnIDB,
})
assertNoError(t, err)
gotTxns, err = table.Select(userID, []string{eventA, eventB})
assertNoError(t, err)
assertTxns(t, gotTxns, map[string]string{
eventA: txnIDA,
eventB: txnIDB,
})
// different user select
gotTxns, err = table.Select("@another", []string{eventA, eventB})
assertNoError(t, err)
assertTxns(t, gotTxns, nil)
// no-op cleanup
err = table.Clean(time.Now().Add(-1 * time.Minute))
assertNoError(t, err)
gotTxns, err = table.Select(userID, []string{eventA, eventB})
assertNoError(t, err)
assertTxns(t, gotTxns, map[string]string{
eventA: txnIDA,
eventB: txnIDB,
})
// real cleanup
err = table.Clean(time.Now())
assertNoError(t, err)
gotTxns, err = table.Select(userID, []string{eventA, eventB})
assertNoError(t, err)
assertTxns(t, gotTxns, nil)
}

View File

@ -1,6 +1,7 @@
package sync2
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
@ -13,9 +14,11 @@ import (
const AccountDataGlobalRoom = ""
var ProxyVersion = ""
type Client interface {
WhoAmI(accessToken string) (string, error)
DoSyncV2(accessToken, since string, isFirst bool) (*SyncResponse, int, error)
DoSyncV2(ctx context.Context, accessToken, since string, isFirst bool) (*SyncResponse, int, error)
}
// HTTPClient represents a Sync v2 Client.
@ -30,7 +33,7 @@ func (v *HTTPClient) WhoAmI(accessToken string) (string, error) {
if err != nil {
return "", err
}
req.Header.Set("User-Agent", "sync-v3-proxy")
req.Header.Set("User-Agent", "sync-v3-proxy-"+ProxyVersion)
req.Header.Set("Authorization", "Bearer "+accessToken)
res, err := v.Client.Do(req)
if err != nil {
@ -49,7 +52,7 @@ func (v *HTTPClient) WhoAmI(accessToken string) (string, error) {
// DoSyncV2 performs a sync v2 request. Returns the sync response and the response status code
// or an error. Set isFirst=true on the first sync to force a timeout=0 sync to ensure snapiness.
func (v *HTTPClient) DoSyncV2(accessToken, since string, isFirst bool) (*SyncResponse, int, error) {
func (v *HTTPClient) DoSyncV2(ctx context.Context, accessToken, since string, isFirst bool) (*SyncResponse, int, error) {
qps := "?"
if isFirst { // first time syncing in this process
qps += "timeout=0"
@ -66,7 +69,7 @@ func (v *HTTPClient) DoSyncV2(accessToken, since string, isFirst bool) (*SyncRes
req, err := http.NewRequest(
"GET", v.DestinationServer+"/_matrix/client/r0/sync"+qps, nil,
)
req.Header.Set("User-Agent", "sync-v3-proxy")
req.Header.Set("User-Agent", "sync-v3-proxy-"+ProxyVersion)
req.Header.Set("Authorization", "Bearer "+accessToken)
if err != nil {
return nil, 0, fmt.Errorf("DoSyncV2: NewRequest failed: %w", err)

371
sync2/handler2/handler.go Normal file
View File

@ -0,0 +1,371 @@
package handler2
import (
"encoding/json"
"os"
"sync"
"github.com/matrix-org/sync-v3/internal"
"github.com/matrix-org/sync-v3/pubsub"
"github.com/matrix-org/sync-v3/state"
"github.com/matrix-org/sync-v3/sync2"
"github.com/prometheus/client_golang/prometheus"
"github.com/rs/zerolog"
"github.com/tidwall/gjson"
)
var logger = zerolog.New(os.Stdout).With().Timestamp().Logger().Output(zerolog.ConsoleWriter{
Out: os.Stderr,
TimeFormat: "15:04:05",
})
// Handler is responsible for starting v2 pollers at startup,
// processing v2 data and publishing updates, and receiving and processing EnsurePolling events.
type Handler struct {
pMap *sync2.PollerMap
v2Store *sync2.Storage
Store *state.Storage
v2Pub pubsub.Notifier
v3Sub *pubsub.V3Sub
client sync2.Client
unreadMap map[string]struct {
Highlight int
Notif int
}
numPollers prometheus.Gauge
subSystem string
}
func NewHandler(
connStr string, pMap *sync2.PollerMap, v2Store *sync2.Storage, store *state.Storage, client sync2.Client,
pub pubsub.Notifier, sub pubsub.Listener, enablePrometheus bool,
) (*Handler, error) {
h := &Handler{
pMap: pMap,
v2Store: v2Store,
client: client,
Store: store,
subSystem: "poller",
unreadMap: make(map[string]struct {
Highlight int
Notif int
}),
}
pMap.SetCallbacks(h)
if enablePrometheus {
h.addPrometheusMetrics()
pub = pubsub.NewPromNotifier(pub, h.subSystem)
}
h.v2Pub = pub
// listen for v3 requests like requests to start polling
v3Sub := pubsub.NewV3Sub(sub, h)
h.v3Sub = v3Sub
return h, nil
}
// Listen starts all consumers
func (h *Handler) Listen() {
go func() {
err := h.v3Sub.Listen()
if err != nil {
logger.Err(err).Msg("Failed to listen for v3 messages")
}
}()
}
func (h *Handler) Teardown() {
// stop polling and tear down DB conns
h.v3Sub.Teardown()
h.v2Pub.Close()
h.Store.Teardown()
h.v2Store.Teardown()
h.pMap.Terminate()
if h.numPollers != nil {
prometheus.Unregister(h.numPollers)
}
}
func (h *Handler) StartV2Pollers() {
devices, err := h.v2Store.AllDevices()
if err != nil {
logger.Err(err).Msg("StartV2Pollers: failed to query devices")
return
}
// how many concurrent pollers to make at startup.
// Too high and this will flood the upstream server with sync requests at startup.
// Too low and this will take ages for the v2 pollers to startup.
numWorkers := 16
numFails := 0
ch := make(chan sync2.Device, len(devices))
for _, d := range devices {
// if we fail to decrypt the access token, skip it.
if d.AccessToken == "" {
numFails++
continue
}
ch <- d
}
close(ch)
logger.Info().Int("num_devices", len(devices)).Int("num_fail_decrypt", numFails).Msg("StartV2Pollers")
var wg sync.WaitGroup
wg.Add(numWorkers)
for i := 0; i < numWorkers; i++ {
go func() {
defer wg.Done()
for d := range ch {
h.pMap.EnsurePolling(
d.AccessToken, d.UserID, d.DeviceID, d.Since,
logger.With().Str("user_id", d.UserID).Logger(),
)
h.v2Pub.Notify(pubsub.ChanV2, &pubsub.V2InitialSyncComplete{
UserID: d.UserID,
DeviceID: d.DeviceID,
})
}
}()
}
wg.Wait()
logger.Info().Msg("StartV2Pollers finished")
h.updateMetrics()
}
func (h *Handler) updateMetrics() {
if h.numPollers == nil {
return
}
h.numPollers.Set(float64(h.pMap.NumPollers()))
}
func (h *Handler) OnTerminated(userID, deviceID string) {
h.updateMetrics()
}
func (h *Handler) addPrometheusMetrics() {
h.numPollers = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "sliding_sync",
Subsystem: h.subSystem,
Name: "num_pollers",
Help: "Number of active sync v2 pollers.",
})
prometheus.MustRegister(h.numPollers)
}
// Emits nothing as no downstream components need it.
func (h *Handler) UpdateDeviceSince(deviceID, since string) {
err := h.v2Store.UpdateDeviceSince(deviceID, since)
if err != nil {
logger.Err(err).Str("device", deviceID).Str("since", since).Msg("V2: failed to persist since token")
}
}
func (h *Handler) OnE2EEData(userID, deviceID string, otkCounts map[string]int, fallbackKeyTypes []string, deviceListChanges map[string]int) {
// some of these fields may be set
partialDD := internal.DeviceData{
UserID: userID,
DeviceID: deviceID,
OTKCounts: otkCounts,
FallbackKeyTypes: fallbackKeyTypes,
DeviceLists: internal.DeviceLists{
New: deviceListChanges,
},
}
nextPos, err := h.Store.DeviceDataTable.Upsert(&partialDD)
if err != nil {
logger.Err(err).Str("user", userID).Msg("failed to upsert device data")
return
}
h.v2Pub.Notify(pubsub.ChanV2, &pubsub.V2DeviceData{
Pos: nextPos,
})
}
func (h *Handler) Accumulate(userID, roomID, prevBatch string, timeline []json.RawMessage) {
// Remember any transaction IDs that may be unique to this user
eventIDToTxnID := make(map[string]string, len(timeline)) // event_id -> txn_id
for _, e := range timeline {
txnID := gjson.GetBytes(e, "unsigned.transaction_id")
if !txnID.Exists() {
continue
}
eventID := gjson.GetBytes(e, "event_id").Str
eventIDToTxnID[eventID] = txnID.Str
}
if len(eventIDToTxnID) > 0 {
// persist the txn IDs
err := h.Store.TransactionsTable.Insert(userID, eventIDToTxnID)
if err != nil {
logger.Err(err).Str("user", userID).Int("num_txns", len(eventIDToTxnID)).Msg("failed to persist txn IDs for user")
}
}
// Insert new events
numNew, latestNIDs, err := h.Store.Accumulate(roomID, prevBatch, timeline)
if err != nil {
logger.Err(err).Int("timeline", len(timeline)).Str("room", roomID).Msg("V2: failed to accumulate room")
return
}
if numNew == 0 {
// no new events
return
}
h.v2Pub.Notify(pubsub.ChanV2, &pubsub.V2Accumulate{
RoomID: roomID,
PrevBatch: prevBatch,
EventNIDs: latestNIDs,
})
}
func (h *Handler) Initialise(roomID string, state []json.RawMessage) {
added, snapID, err := h.Store.Initialise(roomID, state)
if err != nil {
logger.Err(err).Int("state", len(state)).Str("room", roomID).Msg("V2: failed to initialise room")
return
}
if !added {
// no new events
return
}
h.v2Pub.Notify(pubsub.ChanV2, &pubsub.V2Initialise{
RoomID: roomID,
SnapshotNID: snapID,
})
}
func (h *Handler) SetTyping(roomID string, ephEvent json.RawMessage) {
// we don't persist this for long term storage as typing notifs are inherently ephemeral.
// So rather than maintaining them forever, they will naturally expire when we terminate.
h.v2Pub.Notify(pubsub.ChanV2, &pubsub.V2Typing{
RoomID: roomID,
EphemeralEvent: ephEvent,
})
}
func (h *Handler) OnReceipt(userID, roomID, ephEventType string, ephEvent json.RawMessage) {
// update our records - we make an artifically new RR event if there are genuine changes
// else it returns nil
newReceipts, err := h.Store.ReceiptTable.Insert(roomID, ephEvent)
if err != nil {
logger.Err(err).Str("room", roomID).Msg("failed to store receipts")
return
}
if len(newReceipts) == 0 {
return
}
h.v2Pub.Notify(pubsub.ChanV2, &pubsub.V2Receipt{
RoomID: roomID,
Receipts: newReceipts,
})
}
// Send nothing, the v3 API will pull from the DB directly; no in-memory shenanigans
func (h *Handler) AddToDeviceMessages(userID, deviceID string, msgs []json.RawMessage) {
_, err := h.Store.ToDeviceTable.InsertMessages(deviceID, msgs)
if err != nil {
logger.Err(err).Str("user", userID).Str("device", deviceID).Int("msgs", len(msgs)).Msg("V2: failed to store to-device messages")
}
}
func (h *Handler) UpdateUnreadCounts(roomID, userID string, highlightCount, notifCount *int) {
// only touch the DB and notify if they have changed. sync v2 will alwyas include the counts
// even if they haven't changed :(
key := roomID + userID
entry, ok := h.unreadMap[key]
hc := 0
if highlightCount != nil {
hc = *highlightCount
}
nc := 0
if notifCount != nil {
nc = *notifCount
}
if ok && entry.Highlight == hc && entry.Notif == nc {
return // dupe
}
h.unreadMap[key] = struct {
Highlight int
Notif int
}{
Highlight: hc,
Notif: nc,
}
err := h.Store.UnreadTable.UpdateUnreadCounters(userID, roomID, highlightCount, notifCount)
if err != nil {
logger.Err(err).Str("user", userID).Str("room", roomID).Msg("failed to update unread counters")
}
h.v2Pub.Notify(pubsub.ChanV2, &pubsub.V2UnreadCounts{
RoomID: roomID,
UserID: userID,
HighlightCount: highlightCount,
NotificationCount: notifCount,
})
}
func (h *Handler) OnAccountData(userID, roomID string, events []json.RawMessage) {
data, err := h.Store.InsertAccountData(userID, roomID, events)
if err != nil {
logger.Err(err).Str("user", userID).Str("room", roomID).Msg("failed to update account data")
return
}
var types []string
for _, d := range data {
types = append(types, d.Type)
}
h.v2Pub.Notify(pubsub.ChanV2, &pubsub.V2AccountData{
UserID: userID,
RoomID: roomID,
Types: types,
})
}
func (h *Handler) OnInvite(userID, roomID string, inviteState []json.RawMessage) {
err := h.Store.InvitesTable.InsertInvite(userID, roomID, inviteState)
if err != nil {
logger.Err(err).Str("user", userID).Str("room", roomID).Msg("failed to insert invite")
return
}
h.v2Pub.Notify(pubsub.ChanV2, &pubsub.V2InviteRoom{
UserID: userID,
RoomID: roomID,
})
}
func (h *Handler) OnLeftRoom(userID, roomID string) {
// remove any invites for this user if they are rejecting an invite
err := h.Store.InvitesTable.RemoveInvite(userID, roomID)
if err != nil {
logger.Err(err).Str("user", userID).Str("room", roomID).Msg("failed to retire invite")
}
h.v2Pub.Notify(pubsub.ChanV2, &pubsub.V2LeaveRoom{
UserID: userID,
RoomID: roomID,
})
}
func (h *Handler) EnsurePolling(p *pubsub.V3EnsurePolling) {
logger.Info().Str("user", p.UserID).Msg("EnsurePolling: new request")
defer func() {
logger.Info().Str("user", p.UserID).Msg("EnsurePolling: request finished")
}()
dev, err := h.v2Store.Device(p.DeviceID)
if err != nil {
logger.Err(err).Str("user", p.UserID).Str("device", p.DeviceID).Msg("V3Sub: EnsurePolling unknown device")
return
}
// don't block us from consuming more pubsub messages just because someone wants to sync
go func() {
// blocks until an initial sync is done
h.pMap.EnsurePolling(
dev.AccessToken, dev.UserID, dev.DeviceID, dev.Since,
logger.With().Str("user_id", dev.UserID).Logger(),
)
h.updateMetrics()
h.v2Pub.Notify(pubsub.ChanV2, &pubsub.V2InitialSyncComplete{
UserID: p.UserID,
DeviceID: p.DeviceID,
})
}()
}

View File

@ -1,10 +1,13 @@
package sync2
import (
"context"
"encoding/json"
"sync"
"time"
"github.com/matrix-org/sync-v3/internal"
"github.com/prometheus/client_golang/prometheus"
"github.com/rs/zerolog"
"github.com/tidwall/gjson"
)
@ -14,39 +17,50 @@ var timeSleep = time.Sleep
// V2DataReceiver is the receiver for all the v2 sync data the poller gets
type V2DataReceiver interface {
// Update the since token for this device. Called AFTER all other data in this sync response has been processed.
UpdateDeviceSince(deviceID, since string)
Accumulate(roomID, prevBatch string, timeline []json.RawMessage)
Initialise(roomID string, state []json.RawMessage)
SetTyping(roomID string, userIDs []string)
// Add messages for this device. If an error is returned, the poll loop is terminated as continuing
// would implicitly acknowledge these messages.
AddToDeviceMessages(userID, deviceID string, msgs []json.RawMessage)
// Accumulate data for this room. This means the timeline section of the v2 response.
Accumulate(userID, roomID, prevBatch string, timeline []json.RawMessage) // latest pos with event nids of timeline entries
// Initialise the room, if it hasn't been already. This means the state section of the v2 response.
Initialise(roomID string, state []json.RawMessage) // snapshot ID?
// SetTyping indicates which users are typing.
SetTyping(roomID string, ephEvent json.RawMessage)
// Sent when there is a new receipt
OnReceipt(userID, roomID, ephEventType string, ephEvent json.RawMessage)
// AddToDeviceMessages adds this chunk of to_device messages. Preserve the ordering.
AddToDeviceMessages(userID, deviceID string, msgs []json.RawMessage) // start/end stream pos
// UpdateUnreadCounts sets the highlight_count and notification_count for this user in this room.
UpdateUnreadCounts(roomID, userID string, highlightCount, notifCount *int)
OnAccountData(userID, roomID string, events []json.RawMessage)
OnInvite(userID, roomID string, inviteState []json.RawMessage)
// Set the latest account data for this user.
OnAccountData(userID, roomID string, events []json.RawMessage) // ping update with types? Can you race when re-querying?
// Sent when there is a room in the `invite` section of the v2 response.
OnInvite(userID, roomID string, inviteState []json.RawMessage) // invitestate in db
// Sent when there is a room in the `leave` section of the v2 response.
OnLeftRoom(userID, roomID string)
// Sent when there is a _change_ in E2EE data, not all the time
OnE2EEData(userID, deviceID string, otkCounts map[string]int, fallbackKeyTypes []string, deviceListChanges map[string]int)
// Sent when the upstream homeserver sends back a 401 invalidating the token
OnTerminated(userID, deviceID string)
}
// Fetcher which PollerMap satisfies used by the E2EE extension
// Fetcher used by the E2EE extension
type E2EEFetcher interface {
LatestE2EEData(deviceID string) (otkCounts map[string]int, fallbackKeyTypes, changed, left []string)
DeviceData(userID, deviceID string, isInitial bool) *internal.DeviceData
}
type TransactionIDFetcher interface {
TransactionIDForEvent(userID, eventID string) (txnID string)
TransactionIDForEvents(userID string, eventIDs []string) (eventIDToTxnID map[string]string)
}
// PollerMap is a map of device ID to Poller
type PollerMap struct {
v2Client Client
callbacks V2DataReceiver
pollerMu *sync.Mutex
Pollers map[string]*Poller // device_id -> poller
executor chan func()
executorRunning bool
txnCache *TransactionIDCache
v2Client Client
callbacks V2DataReceiver
pollerMu *sync.Mutex
Pollers map[string]*poller // device_id -> poller
executor chan func()
executorRunning bool
processHistogramVec *prometheus.HistogramVec
}
// NewPollerMap makes a new PollerMap. Guarantees that the V2DataReceiver will be called on the same
@ -56,11 +70,13 @@ type PollerMap struct {
// which, if we assert NIDs only increment, will result in missed events.
//
// Consider these events in the same room, with 3 different pollers getting the data:
// 1 2 3 4 5 6 7 eventual DB event NID
// A B C D E F G
// ----- poll loop 1 = A,B,C new events = A,B,C latest=3
// --------- poll loop 2 = A,B,C,D,E new events = D,E latest=5
// ------------- poll loop 3 = A,B,C,D,E,F,G new events = F,G latest=7
//
// 1 2 3 4 5 6 7 eventual DB event NID
// A B C D E F G
// ----- poll loop 1 = A,B,C new events = A,B,C latest=3
// --------- poll loop 2 = A,B,C,D,E new events = D,E latest=5
// ------------- poll loop 3 = A,B,C,D,E,F,G new events = F,G latest=7
//
// The DB layer will correctly assign NIDs and stop duplicates, resulting in a set of new events which
// do not overlap. However, there is a gap between this point and updating the cache, where variable
// delays can be introduced, so F,G latest=7 could be injected first. If we then never walk back to
@ -69,37 +85,53 @@ type PollerMap struct {
// This only affects resources which are shared across multiple DEVICES such as:
// - room resources: events, EDUs
// - user resources: notif counts, account data
//
// NOT to-device messages,or since tokens.
func NewPollerMap(v2Client Client, callbacks V2DataReceiver) *PollerMap {
return &PollerMap{
v2Client: v2Client,
callbacks: callbacks,
pollerMu: &sync.Mutex{},
Pollers: make(map[string]*Poller),
executor: make(chan func(), 0),
txnCache: NewTransactionIDCache(),
func NewPollerMap(v2Client Client, enablePrometheus bool) *PollerMap {
pm := &PollerMap{
v2Client: v2Client,
pollerMu: &sync.Mutex{},
Pollers: make(map[string]*poller),
executor: make(chan func(), 0),
}
if enablePrometheus {
pm.processHistogramVec = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "sliding_sync",
Subsystem: "poller",
Name: "process_duration_secs",
Help: "Time taken in seconds for the sync v2 response to be processed fully",
Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10},
}, []string{"initial", "first"})
prometheus.MustRegister(pm.processHistogramVec)
}
return pm
}
// TransactionIDForEvent returns the transaction ID for this event for this user, if one exists.
func (h *PollerMap) TransactionIDForEvent(userID, eventID string) string {
return h.txnCache.Get(userID, eventID)
func (h *PollerMap) SetCallbacks(callbacks V2DataReceiver) {
h.callbacks = callbacks
}
// LatestE2EEData pulls the latest device_lists and device_one_time_keys_count values from the poller.
// These bits of data are ephemeral and do not need to be persisted.
func (h *PollerMap) LatestE2EEData(deviceID string) (otkCounts map[string]int, fallbackKeyTypes, changed, left []string) {
// Terminate all pollers. Useful in tests.
func (h *PollerMap) Terminate() {
h.pollerMu.Lock()
poller := h.Pollers[deviceID]
h.pollerMu.Unlock()
if poller == nil || poller.Terminated {
// possible if we have 2 devices for the same user, we just need to
// wait a bit for the 2nd device's v2 /sync to return
return
defer h.pollerMu.Unlock()
for _, p := range h.Pollers {
p.Terminate()
}
if h.processHistogramVec != nil {
prometheus.Unregister(h.processHistogramVec)
}
close(h.executor)
}
func (h *PollerMap) NumPollers() (count int) {
h.pollerMu.Lock()
defer h.pollerMu.Unlock()
for _, p := range h.Pollers {
if !p.Terminated {
count++
}
}
otkCounts = poller.OTKCounts()
fallbackKeyTypes = poller.FallbackKeyTypes()
changed, left = poller.DeviceListChanges()
return
}
@ -112,6 +144,7 @@ func (h *PollerMap) LatestE2EEData(deviceID string) (otkCounts map[string]int, f
// to-device msgs to decrypt E2EE roms.
func (h *PollerMap) EnsurePolling(accessToken, userID, deviceID, v2since string, logger zerolog.Logger) {
h.pollerMu.Lock()
logger.Info().Str("device", deviceID).Msg("EnsurePolling lock acquired")
if !h.executorRunning {
h.executorRunning = true
go h.execute()
@ -126,7 +159,8 @@ func (h *PollerMap) EnsurePolling(accessToken, userID, deviceID, v2since string,
return
}
// replace the poller
poller = NewPoller(userID, accessToken, deviceID, h.v2Client, h, h.txnCache, logger)
poller = newPoller(userID, accessToken, deviceID, h.v2Client, h, logger)
poller.processHistogramVec = h.processHistogramVec
go poller.Poll(v2since)
h.Pollers[deviceID] = poller
@ -159,11 +193,11 @@ func (h *PollerMap) execute() {
func (h *PollerMap) UpdateDeviceSince(deviceID, since string) {
h.callbacks.UpdateDeviceSince(deviceID, since)
}
func (h *PollerMap) Accumulate(roomID, prevBatch string, timeline []json.RawMessage) {
func (h *PollerMap) Accumulate(userID, roomID, prevBatch string, timeline []json.RawMessage) {
var wg sync.WaitGroup
wg.Add(1)
h.executor <- func() {
h.callbacks.Accumulate(roomID, prevBatch, timeline)
h.callbacks.Accumulate(userID, roomID, prevBatch, timeline)
wg.Done()
}
wg.Wait()
@ -177,11 +211,11 @@ func (h *PollerMap) Initialise(roomID string, state []json.RawMessage) {
}
wg.Wait()
}
func (h *PollerMap) SetTyping(roomID string, userIDs []string) {
func (h *PollerMap) SetTyping(roomID string, ephEvent json.RawMessage) {
var wg sync.WaitGroup
wg.Add(1)
h.executor <- func() {
h.callbacks.SetTyping(roomID, userIDs)
h.callbacks.SetTyping(roomID, ephEvent)
wg.Done()
}
wg.Wait()
@ -212,6 +246,10 @@ func (h *PollerMap) AddToDeviceMessages(userID, deviceID string, msgs []json.Raw
h.callbacks.AddToDeviceMessages(userID, deviceID, msgs)
}
func (h *PollerMap) OnTerminated(userID, deviceID string) {
h.callbacks.OnTerminated(userID, deviceID)
}
func (h *PollerMap) UpdateUnreadCounts(roomID, userID string, highlightCount, notifCount *int) {
var wg sync.WaitGroup
wg.Add(1)
@ -232,8 +270,28 @@ func (h *PollerMap) OnAccountData(userID, roomID string, events []json.RawMessag
wg.Wait()
}
func (h *PollerMap) OnReceipt(userID, roomID, ephEventType string, ephEvent json.RawMessage) {
var wg sync.WaitGroup
wg.Add(1)
h.executor <- func() {
h.callbacks.OnReceipt(userID, roomID, ephEventType, ephEvent)
wg.Done()
}
wg.Wait()
}
func (h *PollerMap) OnE2EEData(userID, deviceID string, otkCounts map[string]int, fallbackKeyTypes []string, deviceListChanges map[string]int) {
var wg sync.WaitGroup
wg.Add(1)
h.executor <- func() {
h.callbacks.OnE2EEData(userID, deviceID, otkCounts, fallbackKeyTypes, deviceListChanges)
wg.Done()
}
wg.Wait()
}
// Poller can automatically poll the sync v2 endpoint and accumulate the responses in storage
type Poller struct {
type poller struct {
userID string
accessToken string
deviceID string
@ -241,51 +299,70 @@ type Poller struct {
receiver V2DataReceiver
logger zerolog.Logger
// remember txn ids
txnCache *TransactionIDCache
// E2EE fields
e2eeMu *sync.Mutex
fallbackKeyTypes []string
otkCounts map[string]int
deviceListChanges map[string]string // latest user_id -> state e.g "@alice" -> "left"
// E2EE fields: we keep them so we only send callbacks on deltas not all the time
fallbackKeyTypes []string
otkCounts map[string]int
// flag set to true when poll() returns due to expired access tokens
Terminated bool
wg *sync.WaitGroup
Terminated bool
terminateCh chan struct{}
wg *sync.WaitGroup
pollHistogramVec *prometheus.HistogramVec
processHistogramVec *prometheus.HistogramVec
}
func NewPoller(userID, accessToken, deviceID string, client Client, receiver V2DataReceiver, txnCache *TransactionIDCache, logger zerolog.Logger) *Poller {
func newPoller(userID, accessToken, deviceID string, client Client, receiver V2DataReceiver, logger zerolog.Logger) *poller {
var wg sync.WaitGroup
wg.Add(1)
return &Poller{
accessToken: accessToken,
userID: userID,
deviceID: deviceID,
client: client,
receiver: receiver,
Terminated: false,
logger: logger,
e2eeMu: &sync.Mutex{},
deviceListChanges: make(map[string]string),
wg: &wg,
txnCache: txnCache,
return &poller{
accessToken: accessToken,
userID: userID,
deviceID: deviceID,
client: client,
receiver: receiver,
Terminated: false,
terminateCh: make(chan struct{}),
logger: logger,
wg: &wg,
}
}
// Blocks until the initial sync has been done on this poller.
func (p *Poller) WaitUntilInitialSync() {
func (p *poller) WaitUntilInitialSync() {
p.wg.Wait()
}
func (p *poller) Terminate() {
if p.Terminated {
return
}
p.Terminated = true
close(p.terminateCh)
}
func (p *poller) isTerminated() bool {
select {
case <-p.terminateCh:
p.Terminated = true
return true
default:
// not yet terminated
}
return false
}
// Poll will block forever, repeatedly calling v2 sync. Do this in a goroutine.
// Returns if the access token gets invalidated or if there was a fatal error processing v2 responses.
// Use WaitUntilInitialSync() to wait until the first poll has been processed.
func (p *Poller) Poll(since string) {
func (p *poller) Poll(since string) {
p.logger.Info().Str("since", since).Msg("Poller: v2 poll loop started")
defer func() {
p.receiver.OnTerminated(p.userID, p.deviceID)
}()
failCount := 0
firstTime := true
for {
for !p.Terminated {
if failCount > 0 {
// don't backoff when doing v2 syncs because the response is only in the cache for a short
// period of time (on massive accounts on matrix.org) such that if you wait 2,4,8min between
@ -294,7 +371,15 @@ func (p *Poller) Poll(since string) {
p.logger.Warn().Str("duration", waitTime.String()).Int("fail-count", failCount).Msg("Poller: waiting before next poll")
timeSleep(waitTime)
}
resp, statusCode, err := p.client.DoSyncV2(p.accessToken, since, firstTime)
if p.isTerminated() {
break
}
start := time.Now()
resp, statusCode, err := p.client.DoSyncV2(context.Background(), p.accessToken, since, firstTime)
p.trackRequestDuration(time.Since(start), since == "", firstTime)
if p.isTerminated() {
break
}
if err != nil {
// check if temporary
if statusCode != 401 {
@ -304,18 +389,22 @@ func (p *Poller) Poll(since string) {
} else {
p.logger.Warn().Msg("Poller: access token has been invalidated, terminating loop")
p.Terminated = true
return
break
}
}
if since == "" {
p.logger.Info().Msg("Poller: valid initial sync response received")
}
start = time.Now()
failCount = 0
p.parseE2EEData(resp)
p.parseGlobalAccountData(resp)
p.parseRoomsResponse(resp)
p.parseToDeviceMessages(resp)
wasInitial := since == ""
wasFirst := firstTime
since = resp.NextBatch
// persist the since token (TODO: this could get slow if we hammer the DB too much)
p.receiver.UpdateDeviceSince(p.deviceID, since)
@ -324,86 +413,105 @@ func (p *Poller) Poll(since string) {
firstTime = false
p.wg.Done()
}
p.trackProcessDuration(time.Since(start), wasInitial, wasFirst)
}
// always unblock EnsurePolling else we can end up head-of-line blocking other pollers!
if firstTime {
firstTime = false
p.wg.Done()
}
}
func (p *Poller) OTKCounts() map[string]int {
p.e2eeMu.Lock()
defer p.e2eeMu.Unlock()
return p.otkCounts
}
func (p *Poller) FallbackKeyTypes() []string {
return p.fallbackKeyTypes
}
func (p *Poller) DeviceListChanges() (changed, left []string) {
p.e2eeMu.Lock()
defer p.e2eeMu.Unlock()
changed = make([]string, 0)
left = make([]string, 0)
for userID, state := range p.deviceListChanges {
switch state {
case "changed":
changed = append(changed, userID)
case "left":
left = append(left, userID)
default:
p.logger.Warn().Str("state", state).Msg("DeviceListChanges: unknown state")
}
func (p *poller) trackRequestDuration(dur time.Duration, isInitial, isFirst bool) {
if p.pollHistogramVec == nil {
return
}
// forget them so we don't send them more than once to v3 loops
p.deviceListChanges = map[string]string{}
return
p.pollHistogramVec.WithLabelValues(labels(isInitial, isFirst)...).Observe(float64(dur.Milliseconds()))
}
func (p *Poller) parseToDeviceMessages(res *SyncResponse) {
func (p *poller) trackProcessDuration(dur time.Duration, isInitial, isFirst bool) {
if p.processHistogramVec == nil {
return
}
p.processHistogramVec.WithLabelValues(labels(isInitial, isFirst)...).Observe(float64(dur.Seconds()))
}
func labels(isInitial, isFirst bool) []string {
l := make([]string, 2)
if isInitial {
l[0] = "1"
} else {
l[0] = "0"
}
if isFirst {
l[1] = "1"
} else {
l[1] = "0"
}
return l
}
func (p *poller) parseToDeviceMessages(res *SyncResponse) {
if len(res.ToDevice.Events) == 0 {
return
}
p.receiver.AddToDeviceMessages(p.userID, p.deviceID, res.ToDevice.Events)
}
func (p *Poller) parseE2EEData(res *SyncResponse) {
p.e2eeMu.Lock()
defer p.e2eeMu.Unlock()
// we don't actively push this to v3 loops, we let them lazily fetch it via calls to
// Poller.DeviceListChanges() and Poller.OTKCounts()
if res.DeviceListsOTKCount != nil {
func (p *poller) parseE2EEData(res *SyncResponse) {
hasE2EEChanges := false
if res.DeviceListsOTKCount != nil && len(res.DeviceListsOTKCount) > 0 {
if len(p.otkCounts) != len(res.DeviceListsOTKCount) {
hasE2EEChanges = true
}
if !hasE2EEChanges && p.otkCounts != nil {
for k := range res.DeviceListsOTKCount {
if res.DeviceListsOTKCount[k] != p.otkCounts[k] {
hasE2EEChanges = true
break
}
}
}
p.otkCounts = res.DeviceListsOTKCount
}
if len(res.DeviceUnusedFallbackKeyTypes) > 0 {
if !hasE2EEChanges {
if len(p.fallbackKeyTypes) != len(res.DeviceUnusedFallbackKeyTypes) {
hasE2EEChanges = true
} else {
for i := range res.DeviceUnusedFallbackKeyTypes {
if res.DeviceUnusedFallbackKeyTypes[i] != p.fallbackKeyTypes[i] {
hasE2EEChanges = true
break
}
}
}
}
p.fallbackKeyTypes = res.DeviceUnusedFallbackKeyTypes
}
for _, userID := range res.DeviceLists.Changed {
p.deviceListChanges[userID] = "changed"
deviceListChanges := internal.ToDeviceListChangesMap(res.DeviceLists.Changed, res.DeviceLists.Left)
if deviceListChanges != nil {
hasE2EEChanges = true
}
for _, userID := range res.DeviceLists.Left {
p.deviceListChanges[userID] = "left"
if hasE2EEChanges {
p.receiver.OnE2EEData(p.userID, p.deviceID, p.otkCounts, p.fallbackKeyTypes, deviceListChanges)
}
}
func (p *Poller) parseGlobalAccountData(res *SyncResponse) {
func (p *poller) parseGlobalAccountData(res *SyncResponse) {
if len(res.AccountData.Events) == 0 {
return
}
p.receiver.OnAccountData(p.userID, AccountDataGlobalRoom, res.AccountData.Events)
}
func (p *Poller) updateTxnIDCache(timeline []json.RawMessage) {
for _, e := range timeline {
txnID := gjson.GetBytes(e, "unsigned.transaction_id")
if !txnID.Exists() {
continue
}
eventID := gjson.GetBytes(e, "event_id").Str
p.txnCache.Store(p.userID, eventID, txnID.Str)
}
}
func (p *Poller) parseRoomsResponse(res *SyncResponse) {
func (p *poller) parseRoomsResponse(res *SyncResponse) {
stateCalls := 0
timelineCalls := 0
typingCalls := 0
receiptCalls := 0
for roomID, roomData := range res.Rooms.Join {
if len(roomData.State.Events) > 0 {
stateCalls++
@ -415,36 +523,32 @@ func (p *Poller) parseRoomsResponse(res *SyncResponse) {
roomID, p.userID, roomData.UnreadNotifications.HighlightCount, roomData.UnreadNotifications.NotificationCount,
)
}
// process typing/receipts before events so we seed the caches correctly for when we return the room
for _, ephEvent := range roomData.Ephemeral.Events {
ephEventType := gjson.GetBytes(ephEvent, "type").Str
switch ephEventType {
case "m.typing":
typingCalls++
p.receiver.SetTyping(roomID, ephEvent)
case "m.receipt":
receiptCalls++
p.receiver.OnReceipt(p.userID, roomID, ephEventType, ephEvent)
}
}
// process account data
if len(roomData.AccountData.Events) > 0 {
p.receiver.OnAccountData(p.userID, roomID, roomData.AccountData.Events)
}
if len(roomData.Timeline.Events) > 0 {
timelineCalls++
p.updateTxnIDCache(roomData.Timeline.Events)
p.receiver.Accumulate(roomID, roomData.Timeline.PrevBatch, roomData.Timeline.Events)
}
for _, ephEvent := range roomData.Ephemeral.Events {
if gjson.GetBytes(ephEvent, "type").Str == "m.typing" {
users := gjson.GetBytes(ephEvent, "content.user_ids")
if !users.IsArray() {
continue // malformed event
}
var userIDs []string
for _, u := range users.Array() {
if u.Str != "" {
userIDs = append(userIDs, u.Str)
}
}
typingCalls++
p.receiver.SetTyping(roomID, userIDs)
}
p.receiver.Accumulate(p.userID, roomID, roomData.Timeline.PrevBatch, roomData.Timeline.Events)
}
}
for roomID, roomData := range res.Rooms.Leave {
// TODO: do we care about state?
if len(roomData.Timeline.Events) > 0 {
p.receiver.Accumulate(roomID, roomData.Timeline.PrevBatch, roomData.Timeline.Events)
p.receiver.Accumulate(p.userID, roomID, roomData.Timeline.PrevBatch, roomData.Timeline.Events)
}
p.receiver.OnLeftRoom(p.userID, roomID)
}
@ -452,7 +556,7 @@ func (p *Poller) parseRoomsResponse(res *SyncResponse) {
p.receiver.OnInvite(p.userID, roomID, roomData.InviteState.Events)
}
var l *zerolog.Event
if len(res.Rooms.Invite) > 1 || len(res.Rooms.Join) > 1 {
if len(res.Rooms.Invite) > 0 || len(res.Rooms.Join) > 0 {
l = p.logger.Info()
} else {
l = p.logger.Debug()
@ -460,6 +564,6 @@ func (p *Poller) parseRoomsResponse(res *SyncResponse) {
l.Ints(
"rooms [invite,join,leave]", []int{len(res.Rooms.Invite), len(res.Rooms.Join), len(res.Rooms.Leave)},
).Ints(
"storage [states,timelines,typing]", []int{stateCalls, timelineCalls, typingCalls},
"storage [states,timelines,typing,receipts]", []int{stateCalls, timelineCalls, typingCalls, receiptCalls},
).Int("to_device", len(res.ToDevice.Events)).Msg("Poller: accumulated data")
}

View File

@ -1,6 +1,7 @@
package sync2
import (
"context"
"encoding/json"
"fmt"
"os"
@ -12,7 +13,184 @@ import (
"github.com/rs/zerolog"
)
var txnIDCache = NewTransactionIDCache()
// Tests that EnsurePolling works in the happy case
func TestPollerMapEnsurePolling(t *testing.T) {
nextSince := "next"
roomID := "!foo:bar"
roomState := []json.RawMessage{
json.RawMessage(`{"event":1}`),
json.RawMessage(`{"event":2}`),
json.RawMessage(`{"event":3}`),
}
initialResponse := &SyncResponse{
NextBatch: nextSince,
Rooms: struct {
Join map[string]SyncV2JoinResponse `json:"join"`
Invite map[string]SyncV2InviteResponse `json:"invite"`
Leave map[string]SyncV2LeaveResponse `json:"leave"`
}{
Join: map[string]SyncV2JoinResponse{
roomID: {
State: EventsResponse{
Events: roomState,
},
},
},
},
}
syncRequests := make(chan string)
syncResponses := make(chan *SyncResponse)
accumulator, client := newMocks(func(authHeader, since string) (*SyncResponse, int, error) {
syncRequests <- since
return <-syncResponses, 200, nil
})
accumulator.incomingProcess = make(chan struct{})
accumulator.unblockProcess = make(chan struct{})
pm := NewPollerMap(client, false)
pm.SetCallbacks(accumulator)
ensurePollingUnblocked := make(chan struct{})
go func() {
pm.EnsurePolling("access_token", "@alice:localhost", "FOOBAR", "", zerolog.New(os.Stderr))
close(ensurePollingUnblocked)
}()
ensureBlocking := func() {
select {
case <-ensurePollingUnblocked:
t.Fatalf("EnsurePolling unblocked")
default:
}
}
// wait until we get a /sync request
since := <-syncRequests
if since != "" {
t.Fatalf("/sync not made with empty since token, got %v", since)
}
// make sure we're still blocking
ensureBlocking()
// respond to the /sync request
syncResponses <- initialResponse
// make sure we're still blocking
ensureBlocking()
// wait until we are processing the state response
<-accumulator.incomingProcess
// make sure we're still blocking
ensureBlocking()
// finish processing
accumulator.unblockProcess <- struct{}{}
// make sure we unblock
select {
case <-ensurePollingUnblocked:
case <-time.After(time.Second):
t.Fatalf("EnsurePolling did not unblock after 1s")
}
}
func TestPollerMapEnsurePollingIdempotent(t *testing.T) {
nextSince := "next"
roomID := "!foo:bar"
roomState := []json.RawMessage{
json.RawMessage(`{"event":1}`),
json.RawMessage(`{"event":2}`),
json.RawMessage(`{"event":3}`),
}
initialResponse := &SyncResponse{
NextBatch: nextSince,
Rooms: struct {
Join map[string]SyncV2JoinResponse `json:"join"`
Invite map[string]SyncV2InviteResponse `json:"invite"`
Leave map[string]SyncV2LeaveResponse `json:"leave"`
}{
Join: map[string]SyncV2JoinResponse{
roomID: {
State: EventsResponse{
Events: roomState,
},
},
},
},
}
syncRequests := make(chan string)
syncResponses := make(chan *SyncResponse)
accumulator, client := newMocks(func(authHeader, since string) (*SyncResponse, int, error) {
syncRequests <- since
return <-syncResponses, 200, nil
})
accumulator.incomingProcess = make(chan struct{})
accumulator.unblockProcess = make(chan struct{})
pm := NewPollerMap(client, false)
pm.SetCallbacks(accumulator)
ensurePollingUnblocked := make(chan struct{})
var wg sync.WaitGroup
n := 3
wg.Add(n)
for i := 0; i < n; i++ {
go func() {
t.Logf("EnsurePolling")
pm.EnsurePolling("access_token", "@alice:localhost", "FOOBAR", "", zerolog.New(os.Stderr))
wg.Done()
t.Logf("EnsurePolling unblocked")
}()
}
go func() {
wg.Wait()
close(ensurePollingUnblocked)
t.Logf("EnsurePolling all unblocked")
}()
ensureBlocking := func() {
select {
case <-ensurePollingUnblocked:
t.Fatalf("EnsurePolling unblocked")
default:
t.Logf("EnsurePolling still blocking")
}
}
// wait until we get a /sync request
since := <-syncRequests
if since != "" {
t.Fatalf("/sync not made with empty since token, got %v", since)
}
t.Logf("Recv /sync request")
// make sure we're still blocking
ensureBlocking()
// respond to the /sync request
syncResponses <- initialResponse
t.Logf("Responded to /sync request")
// make sure we're still blocking
ensureBlocking()
// wait until we are processing the state response
<-accumulator.incomingProcess
t.Logf("Processing response...")
// make sure we're still blocking
ensureBlocking()
// finish processing
accumulator.unblockProcess <- struct{}{}
t.Logf("Processed response.")
// make sure we unblock
select {
case <-ensurePollingUnblocked:
case <-time.After(time.Second):
t.Fatalf("EnsurePolling did not unblock after 1s")
}
t.Logf("EnsurePolling unblocked")
}
// Check that a call to Poll starts polling and accumulating, and terminates on 401s.
func TestPollerPollFromNothing(t *testing.T) {
@ -46,7 +224,7 @@ func TestPollerPollFromNothing(t *testing.T) {
})
var wg sync.WaitGroup
wg.Add(1)
poller := NewPoller("@alice:localhost", "Authorization: hello world", deviceID, client, accumulator, txnIDCache, zerolog.New(os.Stderr))
poller := newPoller("@alice:localhost", "Authorization: hello world", deviceID, client, accumulator, zerolog.New(os.Stderr))
go func() {
defer wg.Done()
poller.Poll("")
@ -129,7 +307,7 @@ func TestPollerPollFromExisting(t *testing.T) {
})
var wg sync.WaitGroup
wg.Add(1)
poller := NewPoller("@alice:localhost", "Authorization: hello world", deviceID, client, accumulator, txnIDCache, zerolog.New(os.Stderr))
poller := newPoller("@alice:localhost", "Authorization: hello world", deviceID, client, accumulator, zerolog.New(os.Stderr))
go func() {
defer wg.Done()
poller.Poll(since)
@ -205,7 +383,7 @@ func TestPollerBackoff(t *testing.T) {
}
var wg sync.WaitGroup
wg.Add(1)
poller := NewPoller("@alice:localhost", "Authorization: hello world", deviceID, client, accumulator, txnIDCache, zerolog.New(os.Stderr))
poller := newPoller("@alice:localhost", "Authorization: hello world", deviceID, client, accumulator, zerolog.New(os.Stderr))
go func() {
defer wg.Done()
poller.Poll("some_since_value")
@ -217,7 +395,6 @@ func TestPollerBackoff(t *testing.T) {
wg.Wait()
select {
case <-hasPolledSuccessfully:
t.Errorf("WaitUntilInitialSync fired incorrectly")
case <-time.After(100 * time.Millisecond):
break
}
@ -226,11 +403,46 @@ func TestPollerBackoff(t *testing.T) {
}
}
// Regression test to make sure that if you start polling with an invalid token, we do end up unblocking WaitUntilInitialSync
// and don't end up blocking forever.
func TestPollerUnblocksIfTerminatedInitially(t *testing.T) {
deviceID := "FOOBAR"
accumulator, client := newMocks(func(authHeader, since string) (*SyncResponse, int, error) {
return nil, 401, fmt.Errorf("terminated")
})
pollUnblocked := make(chan struct{})
waitUntilInitialSyncUnblocked := make(chan struct{})
poller := newPoller("@alice:localhost", "Authorization: hello world", deviceID, client, accumulator, zerolog.New(os.Stderr))
go func() {
poller.Poll("")
close(pollUnblocked)
}()
go func() {
poller.WaitUntilInitialSync()
close(waitUntilInitialSyncUnblocked)
}()
select {
case <-pollUnblocked:
break
case <-time.After(time.Second):
t.Errorf("Poll() did not unblock")
}
select {
case <-waitUntilInitialSyncUnblocked:
break
case <-time.After(time.Second):
t.Errorf("WaitUntilInitialSync() did not unblock")
}
}
type mockClient struct {
fn func(authHeader, since string) (*SyncResponse, int, error)
}
func (c *mockClient) DoSyncV2(authHeader, since string, isFirst bool) (*SyncResponse, int, error) {
func (c *mockClient) DoSyncV2(ctx context.Context, authHeader, since string, isFirst bool) (*SyncResponse, int, error) {
return c.fn(authHeader, since)
}
func (c *mockClient) WhoAmI(authHeader string) (string, error) {
@ -241,15 +453,23 @@ type mockDataReceiver struct {
states map[string][]json.RawMessage
timelines map[string][]json.RawMessage
deviceIDToSince map[string]string
incomingProcess chan struct{}
unblockProcess chan struct{}
}
func (a *mockDataReceiver) Accumulate(roomID, prevBatch string, timeline []json.RawMessage) {
func (a *mockDataReceiver) Accumulate(userID, roomID, prevBatch string, timeline []json.RawMessage) {
a.timelines[roomID] = append(a.timelines[roomID], timeline...)
}
func (a *mockDataReceiver) Initialise(roomID string, state []json.RawMessage) {
a.states[roomID] = state
if a.incomingProcess != nil {
a.incomingProcess <- struct{}{}
}
if a.unblockProcess != nil {
<-a.unblockProcess
}
}
func (a *mockDataReceiver) SetTyping(roomID string, userIDs []string) {
func (a *mockDataReceiver) SetTyping(roomID string, ephEvent json.RawMessage) {
}
func (s *mockDataReceiver) UpdateDeviceSince(deviceID, since string) {
s.deviceIDToSince[deviceID] = since
@ -259,9 +479,13 @@ func (s *mockDataReceiver) AddToDeviceMessages(userID, deviceID string, msgs []j
func (s *mockDataReceiver) UpdateUnreadCounts(roomID, userID string, highlightCount, notifCount *int) {
}
func (s *mockDataReceiver) OnAccountData(userID, roomID string, events []json.RawMessage) {}
func (s *mockDataReceiver) OnInvite(userID, roomID string, inviteState []json.RawMessage) {}
func (s *mockDataReceiver) OnLeftRoom(userID, roomID string) {}
func (s *mockDataReceiver) OnAccountData(userID, roomID string, events []json.RawMessage) {}
func (s *mockDataReceiver) OnReceipt(userID, roomID, ephEvenType string, ephEvent json.RawMessage) {}
func (s *mockDataReceiver) OnInvite(userID, roomID string, inviteState []json.RawMessage) {}
func (s *mockDataReceiver) OnLeftRoom(userID, roomID string) {}
func (s *mockDataReceiver) OnE2EEData(userID, deviceID string, otkCounts map[string]int, fallbackKeyTypes []string, deviceListChanges map[string]int) {
}
func (s *mockDataReceiver) OnTerminated(userID, deviceID string) {}
func newMocks(doSyncV2 func(authHeader, since string) (*SyncResponse, int, error)) (*mockDataReceiver, *mockClient) {
client := &mockClient{

View File

@ -64,6 +64,13 @@ func NewStore(postgresURI, secret string) *Storage {
}
}
func (s *Storage) Teardown() {
err := s.db.Close()
if err != nil {
panic("V2Storage.Teardown: " + err.Error())
}
}
func (s *Storage) encrypt(token string) string {
block, err := aes.NewCipher(s.key256)
if err != nil {

View File

@ -24,6 +24,7 @@ type EventData struct {
StateKey *string
Content gjson.Result
Timestamp uint64
Sender string
// the number of joined users in this room. Use this value and don't try to work it out as you
// may get it wrong due to Synapse sending duplicate join events(!) This value has them de-duped
@ -120,8 +121,23 @@ func (c *GlobalCache) LoadJoinedRooms(userID string) (pos int64, joinedRooms map
return initialLoadPosition, rooms, nil
}
func (c *GlobalCache) LoadStateEvent(ctx context.Context, roomID string, loadPosition int64, evType, stateKey string) json.RawMessage {
roomIDToStateEvents, err := c.store.RoomStateAfterEventPosition(ctx, []string{roomID}, loadPosition, map[string][]string{
evType: {stateKey},
})
if err != nil {
logger.Err(err).Str("room", roomID).Int64("pos", loadPosition).Msg("failed to load room state")
return nil
}
events := roomIDToStateEvents[roomID]
if len(events) > 0 {
return events[0].JSON
}
return nil
}
// TODO: remove? Doesn't touch global cache fields
func (c *GlobalCache) LoadRoomState(ctx context.Context, roomIDs []string, loadPosition int64, requiredStateMap *internal.RequiredStateMap) map[string][]json.RawMessage {
func (c *GlobalCache) LoadRoomState(ctx context.Context, roomIDs []string, loadPosition int64, requiredStateMap *internal.RequiredStateMap, roomToUsersInTimeline map[string][]string) map[string][]json.RawMessage {
if c.store == nil {
return nil
}
@ -136,6 +152,13 @@ func (c *GlobalCache) LoadRoomState(ctx context.Context, roomIDs []string, loadP
for _, ev := range stateEvents {
if requiredStateMap.Include(ev.Type, ev.StateKey) {
result = append(result, ev.JSON)
} else if requiredStateMap.IsLazyLoading() {
usersInTimeline := roomToUsersInTimeline[roomID]
for _, userID := range usersInTimeline {
if ev.StateKey == userID {
result = append(result, ev.JSON)
}
}
}
}
resultMap[roomID] = result
@ -184,6 +207,25 @@ func (c *GlobalCache) Startup(roomIDToMetadata map[string]internal.RoomMetadata)
// Listener function called by dispatcher below
// =================================================
func (c *GlobalCache) OnEphemeralEvent(roomID string, ephEvent json.RawMessage) {
evType := gjson.ParseBytes(ephEvent).Get("type").Str
c.roomIDToMetadataMu.Lock()
defer c.roomIDToMetadataMu.Unlock()
metadata := c.roomIDToMetadata[roomID]
if metadata == nil {
metadata = &internal.RoomMetadata{
RoomID: roomID,
ChildSpaceRooms: make(map[string]struct{}),
}
}
switch evType {
case "m.typing":
metadata.TypingEvent = ephEvent
}
c.roomIDToMetadata[roomID] = metadata
}
func (c *GlobalCache) OnNewEvent(
ed *EventData,
) {
@ -225,6 +267,10 @@ func (c *GlobalCache) OnNewEvent(
if roomType.Exists() && roomType.Type == gjson.String {
metadata.RoomType = &roomType.Str
}
predecessorRoomID := ed.Content.Get("predecessor.room_id").Str
if predecessorRoomID != "" {
metadata.PredecessorRoomID = &predecessorRoomID
}
}
case "m.space.child": // only track space child changes for now, not parents
if ed.StateKey != nil {

View File

@ -29,7 +29,7 @@ func TestGlobalCacheLoadState(t *testing.T) {
testutils.NewStateEvent(t, "m.room.name", "", alice, map[string]interface{}{"name": "The Room Name"}),
testutils.NewStateEvent(t, "m.room.name", "", alice, map[string]interface{}{"name": "The Updated Room Name"}),
}
moreEvents := []json.RawMessage{
eventsRoom2 := []json.RawMessage{
testutils.NewStateEvent(t, "m.room.create", "", alice, map[string]interface{}{"creator": alice}),
testutils.NewJoinEvent(t, alice),
testutils.NewStateEvent(t, "m.room.join_rules", "", alice, map[string]interface{}{"join_rule": "public"}),
@ -38,20 +38,23 @@ func TestGlobalCacheLoadState(t *testing.T) {
testutils.NewStateEvent(t, "m.room.name", "", alice, map[string]interface{}{"name": "The Room Name"}),
testutils.NewStateEvent(t, "m.room.name", "", alice, map[string]interface{}{"name": "The Updated Room Name"}),
}
_, _, err := store.Accumulate(roomID2, "", moreEvents)
_, _, err := store.Accumulate(roomID2, "", eventsRoom2)
if err != nil {
t.Fatalf("Accumulate: %s", err)
}
_, latest, err := store.Accumulate(roomID, "", events)
_, latestNIDs, err := store.Accumulate(roomID, "", events)
if err != nil {
t.Fatalf("Accumulate: %s", err)
}
latest := latestNIDs[len(latestNIDs)-1]
globalCache := caches.NewGlobalCache(store)
testCases := []struct {
name string
requiredState [][2]string
wantEvents map[string][]json.RawMessage
name string
me string
requiredState [][2]string
wantEvents map[string][]json.RawMessage
roomToUsersInTimeline map[string][]string
}{
{
name: "single required state returns a single event",
@ -150,7 +153,57 @@ func TestGlobalCacheLoadState(t *testing.T) {
},
wantEvents: map[string][]json.RawMessage{
roomID: {events[3]},
roomID2: {moreEvents[3]},
roomID2: {eventsRoom2[3]},
},
},
{
name: "using $ME works",
me: alice,
requiredState: [][2]string{
{"m.room.member", sync3.StateKeyMe},
},
wantEvents: map[string][]json.RawMessage{
roomID: {events[1]},
roomID2: {eventsRoom2[1]},
},
},
{
name: "using $ME ignores other member events",
me: "@bogus-user:example.com",
requiredState: [][2]string{
{"m.room.member", sync3.StateKeyMe},
},
wantEvents: nil,
},
{
name: "using $LAZY works",
me: alice,
requiredState: [][2]string{
{"m.room.member", sync3.StateKeyLazy},
},
wantEvents: map[string][]json.RawMessage{
roomID: {events[1], events[4]},
roomID2: {eventsRoom2[1], eventsRoom2[4]},
},
roomToUsersInTimeline: map[string][]string{
roomID: {alice, charlie},
roomID2: {alice, charlie},
},
},
{
name: "using $LAZY and $ME works",
me: alice,
requiredState: [][2]string{
{"m.room.member", sync3.StateKeyLazy},
{"m.room.member", sync3.StateKeyMe},
},
wantEvents: map[string][]json.RawMessage{
roomID: {events[1], events[4]},
roomID2: {eventsRoom2[1], eventsRoom2[4]},
},
roomToUsersInTimeline: map[string][]string{
roomID: {charlie},
roomID2: {charlie},
},
},
}
@ -163,7 +216,7 @@ func TestGlobalCacheLoadState(t *testing.T) {
rs := sync3.RoomSubscription{
RequiredState: tc.requiredState,
}
gotMap := globalCache.LoadRoomState(ctx, roomIDs, latest, rs.RequiredStateMap())
gotMap := globalCache.LoadRoomState(ctx, roomIDs, latest, rs.RequiredStateMap(tc.me), tc.roomToUsersInTimeline)
for _, roomID := range roomIDs {
got := gotMap[roomID]
wantEvents := tc.wantEvents[roomID]

View File

@ -1,6 +1,8 @@
package caches
import (
"encoding/json"
"github.com/matrix-org/sync-v3/internal"
"github.com/matrix-org/sync-v3/state"
)
@ -29,6 +31,15 @@ type LeftRoomUpdate struct {
RoomUpdate
}
type TypingUpdate struct {
RoomUpdate
}
type ReceiptUpdate struct {
RoomUpdate
EphemeralEvent json.RawMessage
}
type UnreadCountUpdate struct {
RoomUpdate
HasCountDecreased bool

View File

@ -17,6 +17,10 @@ const (
InvitesAreHighlightsValue = 1 // invite -> highlight count = 1
)
type CacheFinder interface {
CacheForUser(userID string) *UserCache
}
type UserRoomData struct {
IsDM bool
IsInvite bool
@ -36,6 +40,8 @@ type UserRoomData struct {
// Map of tag to order float.
// See https://spec.matrix.org/latest/client-server-api/#room-tagging
Tags map[string]float64
// the load state of the timeline
LoadPos int64
}
func NewUserRoomData() UserRoomData {
@ -268,7 +274,7 @@ func (c *UserCache) LazyLoadTimelines(loadPos int64, roomIDs []string, maxTimeli
var lazyRoomIDs []string
for _, roomID := range roomIDs {
urd := c.LoadRoomData(roomID)
if len(urd.Timeline) > 0 {
if len(urd.Timeline) > 0 && urd.LoadPos <= loadPos {
timeline := urd.Timeline
if len(timeline) > maxTimelineEvents {
timeline = timeline[len(timeline)-maxTimelineEvents:]
@ -334,6 +340,7 @@ func (c *UserCache) LazyLoadTimelines(loadPos int64, roomIDs []string, maxTimeli
urd = NewUserRoomData()
}
urd.Timeline = events
urd.LoadPos = loadPos
if len(events) > 0 {
eventID := gjson.ParseBytes(events[0]).Get("event_id").Str
urd.SetPrevBatch(eventID, roomIDToPrevBatch[roomID])
@ -414,16 +421,20 @@ func (c *UserCache) Invites() map[string]UserRoomData {
// which would cause the transaction ID to be missing from the event. Instead, we always look for txn
// IDs in the v2 poller, and then set them appropriately at request time.
func (c *UserCache) AnnotateWithTransactionIDs(events []json.RawMessage) []json.RawMessage {
eventIDs := make([]string, len(events))
eventIDIndex := make(map[string]int, len(events))
for i := range events {
eventID := gjson.GetBytes(events[i], "event_id")
txnID := c.txnIDs.TransactionIDForEvent(c.UserID, eventID.Str)
if txnID != "" {
newJSON, err := sjson.SetBytes(events[i], "unsigned.transaction_id", txnID)
if err != nil {
logger.Err(err).Str("user", c.UserID).Msg("AnnotateWithTransactionIDs: sjson failed")
} else {
events[i] = newJSON
}
eventIDs[i] = gjson.GetBytes(events[i], "event_id").Str
eventIDIndex[eventIDs[i]] = i
}
eventIDToTxnID := c.txnIDs.TransactionIDForEvents(c.UserID, eventIDs)
for eventID, txnID := range eventIDToTxnID {
i := eventIDIndex[eventID]
newJSON, err := sjson.SetBytes(events[i], "unsigned.transaction_id", txnID)
if err != nil {
logger.Err(err).Str("user", c.UserID).Msg("AnnotateWithTransactionIDs: sjson failed")
} else {
events[i] = newJSON
}
}
return events
@ -433,6 +444,29 @@ func (c *UserCache) AnnotateWithTransactionIDs(events []json.RawMessage) []json.
// Listener functions called by v2 pollers are below
// =================================================
func (c *UserCache) OnEphemeralEvent(roomID string, ephEvent json.RawMessage) {
var update RoomUpdate
evType := gjson.GetBytes(ephEvent, "type").Str
switch evType {
case "m.typing":
update = &TypingUpdate{
RoomUpdate: c.newRoomUpdate(roomID),
}
case "m.receipt":
update = &ReceiptUpdate{
RoomUpdate: c.newRoomUpdate(roomID),
EphemeralEvent: ephEvent,
}
}
if update == nil {
return
}
for _, l := range c.listeners {
l.OnRoomUpdate(update)
}
}
func (c *UserCache) OnUnreadCounts(roomID string, highlightCount, notifCount *int) {
data := c.LoadRoomData(roomID)
hasCountDecreased := false
@ -492,6 +526,7 @@ func (c *UserCache) OnNewEvent(eventData *EventData) {
if len(urd.Timeline) > 0 {
// we're tracking timelines, add this message too
urd.Timeline = append(urd.Timeline, eventData.Event)
urd.LoadPos = eventData.LatestPos
}
// reset the IsInvite field when the user actually joins/rejects the invite
if urd.IsInvite && eventData.EventType == "m.room.member" && eventData.StateKey != nil && *eventData.StateKey == c.UserID {

View File

@ -110,10 +110,7 @@ func (c *Conn) OnIncomingRequest(ctx context.Context, req *Request) (resp *Respo
if !isFirstRequest && !isRetransmit && !c.isOutstanding(req.pos) {
// the client made up a position, reject them
logger.Trace().Int64("pos", req.pos).Msg("unknown pos")
return nil, &internal.HandlerError{
StatusCode: 400,
Err: fmt.Errorf("unknown position: %d", req.pos),
}
return nil, internal.ExpiredSessionError()
}
// purge the response buffer based on the client's new position. Higher pos values are later.

View File

@ -30,6 +30,16 @@ func NewConnMap() *ConnMap {
return cm
}
func (m *ConnMap) Teardown() {
m.cache.Close()
}
func (m *ConnMap) Len() int {
m.mu.Lock()
defer m.mu.Unlock()
return len(m.connIDToConn)
}
// Conn returns a connection with this ConnID. Returns nil if no connection exists.
func (m *ConnMap) Conn(cid ConnID) *Conn {
cint, _ := m.cache.Get(cid.String())

View File

@ -19,6 +19,7 @@ const DispatcherAllUsers = "-"
type Receiver interface {
OnNewEvent(event *caches.EventData)
OnEphemeralEvent(roomID string, ephEvent json.RawMessage)
OnRegistered(latestPos int64) error
}
@ -47,11 +48,7 @@ func (d *Dispatcher) IsUserJoined(userID, roomID string) bool {
// MUST BE CALLED BEFORE V2 POLL LOOPS START.
func (d *Dispatcher) Startup(roomToJoinedUsers map[string][]string) error {
// populate joined rooms tracker
for roomID, userIDs := range roomToJoinedUsers {
for _, userID := range userIDs {
d.jrt.UserJoinedRoom(userID, roomID)
}
}
d.jrt.Startup(roomToJoinedUsers)
return nil
}
@ -71,6 +68,76 @@ func (d *Dispatcher) Register(userID string, r Receiver) error {
return r.OnRegistered(d.latestPos)
}
func (d *Dispatcher) newEventData(event json.RawMessage, roomID string, latestPos int64) *caches.EventData {
// parse the event to pull out fields we care about
var stateKey *string
ev := gjson.ParseBytes(event)
if sk := ev.Get("state_key"); sk.Exists() {
stateKey = &sk.Str
}
eventType := ev.Get("type").Str
return &caches.EventData{
Event: event,
RoomID: roomID,
EventType: eventType,
StateKey: stateKey,
Content: ev.Get("content"),
LatestPos: latestPos,
Timestamp: ev.Get("origin_server_ts").Uint(),
Sender: ev.Get("sender").Str,
}
}
// Called by v2 pollers when we receive an initial state block. Very similar to OnNewEvents but
// done in bulk for speed.
func (d *Dispatcher) OnNewInitialRoomState(roomID string, state []json.RawMessage) {
// sanity check
if _, jc := d.jrt.JoinedUsersForRoom(roomID, nil); jc > 0 {
logger.Warn().Int("join_count", jc).Str("room", roomID).Int("num_state", len(state)).Msg(
"OnNewInitialRoomState but have entries in JoinedRoomsTracker already, this should be impossible. Degrading to live events",
)
d.OnNewEvents(roomID, state, 0)
return
}
// create event datas for state
eventDatas := make([]*caches.EventData, len(state))
var joined, invited []string
for i, event := range state {
ed := d.newEventData(event, roomID, 0)
eventDatas[i] = ed
if ed.EventType == "m.room.member" && ed.StateKey != nil {
membership := ed.Content.Get("membership").Str
switch membership {
case "invite":
invited = append(invited, *ed.StateKey)
case "join":
joined = append(joined, *ed.StateKey)
}
}
}
// bulk update joined room tracker
forceInitial := d.jrt.UsersJoinedRoom(joined, roomID)
d.jrt.UsersInvitedToRoom(invited, roomID)
inviteCount := d.jrt.NumInvitedUsersForRoom(roomID)
// work out who to notify
userIDs, joinCount := d.jrt.JoinedUsersForRoom(roomID, func(userID string) bool {
if userID == DispatcherAllUsers {
return false // safety guard to prevent dupe global callbacks
}
_, exists := d.userToReceiver[userID]
return exists
})
// notify listeners
for _, ed := range eventDatas {
ed.InviteCount = inviteCount
ed.JoinCount = joinCount
d.notifyListeners(ed, userIDs, "", forceInitial, "")
}
}
// Called by v2 pollers when we receive new events
func (d *Dispatcher) OnNewEvents(
roomID string, events []json.RawMessage, latestPos int64,
@ -88,23 +155,7 @@ func (d *Dispatcher) onNewEvent(
if latestPos > d.latestPos {
d.latestPos = latestPos
}
// parse the event to pull out fields we care about
var stateKey *string
ev := gjson.ParseBytes(event)
if sk := ev.Get("state_key"); sk.Exists() {
stateKey = &sk.Str
}
eventType := ev.Get("type").Str
ed := &caches.EventData{
Event: event,
RoomID: roomID,
EventType: eventType,
StateKey: stateKey,
Content: ev.Get("content"),
LatestPos: latestPos,
Timestamp: ev.Get("origin_server_ts").Uint(),
}
ed := d.newEventData(event, roomID, latestPos)
// update the tracker
targetUser := ""
@ -116,7 +167,7 @@ func (d *Dispatcher) onNewEvent(
switch membership {
case "invite":
// we only do this to track invite counts correctly.
d.jrt.UserInvitedToRoom(targetUser, ed.RoomID)
d.jrt.UsersInvitedToRoom([]string{targetUser}, ed.RoomID)
case "join":
if d.jrt.UserJoinedRoom(targetUser, ed.RoomID) {
shouldForceInitial = true
@ -130,9 +181,46 @@ func (d *Dispatcher) onNewEvent(
}
// notify all people in this room
userIDs := d.jrt.JoinedUsersForRoom(ed.RoomID)
ed.JoinCount = len(userIDs)
userIDs, joinCount := d.jrt.JoinedUsersForRoom(ed.RoomID, func(userID string) bool {
if userID == DispatcherAllUsers {
return false // safety guard to prevent dupe global callbacks
}
_, exists := d.userToReceiver[userID]
return exists
})
ed.JoinCount = joinCount
d.notifyListeners(ed, userIDs, targetUser, shouldForceInitial, membership)
}
func (d *Dispatcher) OnEphemeralEvent(roomID string, ephEvent json.RawMessage) {
notifyUserIDs, _ := d.jrt.JoinedUsersForRoom(roomID, func(userID string) bool {
if userID == DispatcherAllUsers {
return false // safety guard to prevent dupe global callbacks
}
_, exists := d.userToReceiver[userID]
return exists
})
d.userToReceiverMu.RLock()
defer d.userToReceiverMu.RUnlock()
// global listeners (invoke before per-user listeners so caches can update)
listener := d.userToReceiver[DispatcherAllUsers]
if listener != nil {
listener.OnEphemeralEvent(roomID, ephEvent)
}
// poke user caches OnEphemeralEvent which then pokes ConnState
for _, userID := range notifyUserIDs {
l := d.userToReceiver[userID]
if l == nil {
continue
}
l.OnEphemeralEvent(roomID, ephEvent)
}
}
func (d *Dispatcher) notifyListeners(ed *caches.EventData, userIDs []string, targetUser string, shouldForceInitial bool, membership string) {
// invoke listeners
d.userToReceiverMu.RLock()
defer d.userToReceiverMu.RUnlock()

View File

@ -69,10 +69,10 @@ func ProcessLiveAccountData(up caches.Update, store *state.Storage, updateWillRe
return nil
}
func ProcessAccountData(store *state.Storage, listRoomIDs map[string]struct{}, userID string, isInitial bool, req *AccountDataRequest) (res *AccountDataResponse) {
roomIDs := make([]string, len(listRoomIDs))
func ProcessAccountData(store *state.Storage, roomIDToTimeline map[string][]string, userID string, isInitial bool, req *AccountDataRequest) (res *AccountDataResponse) {
roomIDs := make([]string, len(roomIDToTimeline))
i := 0
for roomID := range listRoomIDs {
for roomID := range roomIDToTimeline {
roomIDs[i] = roomID
i++
}

View File

@ -1,6 +1,7 @@
package extensions
import (
"github.com/matrix-org/sync-v3/internal"
"github.com/matrix-org/sync-v3/sync2"
)
@ -16,7 +17,7 @@ func (r E2EERequest) ApplyDelta(next *E2EERequest) *E2EERequest {
// Server response
type E2EEResponse struct {
OTKCounts map[string]int `json:"device_one_time_keys_count"`
OTKCounts map[string]int `json:"device_one_time_keys_count,omitempty"`
DeviceLists *E2EEDeviceList `json:"device_lists,omitempty"`
FallbackKeyTypes []string `json:"device_unused_fallback_key_types,omitempty"`
}
@ -34,19 +35,25 @@ func (r *E2EEResponse) HasData(isInitial bool) bool {
return r.DeviceLists != nil
}
func ProcessE2EE(fetcher sync2.E2EEFetcher, userID, deviceID string, req *E2EERequest) (res *E2EEResponse) {
// pull OTK counts and changed/left from v2 poller
otkCounts, fallbackKeyTypes, changed, left := fetcher.LatestE2EEData(deviceID)
res = &E2EEResponse{
OTKCounts: otkCounts,
FallbackKeyTypes: fallbackKeyTypes,
func ProcessE2EE(fetcher sync2.E2EEFetcher, userID, deviceID string, req *E2EERequest, isInitial bool) (res *E2EEResponse) {
// pull OTK counts and changed/left from device data
dd := fetcher.DeviceData(userID, deviceID, isInitial)
res = &E2EEResponse{}
if dd == nil {
return res // unknown device?
}
if dd.FallbackKeyTypes != nil {
res.FallbackKeyTypes = dd.FallbackKeyTypes
}
if dd.OTKCounts != nil {
res.OTKCounts = dd.OTKCounts
}
changed, left := internal.DeviceListChangesArrays(dd.DeviceLists.Sent)
if len(changed) > 0 || len(left) > 0 {
res.DeviceLists = &E2EEDeviceList{
Changed: changed,
Left: left,
}
logger.Info().Int("changed", len(changed)).Int("left", len(left)).Str("user", userID).Msg("E2EE extension: new data")
}
return
}

View File

@ -20,6 +20,8 @@ type Request struct {
ToDevice *ToDeviceRequest `json:"to_device"`
E2EE *E2EERequest `json:"e2ee"`
AccountData *AccountDataRequest `json:"account_data"`
Typing *TypingRequest `json:"typing"`
Receipts *ReceiptsRequest `json:"receipts"`
}
func (r Request) ApplyDelta(next *Request) Request {
@ -32,6 +34,12 @@ func (r Request) ApplyDelta(next *Request) Request {
if next.AccountData != nil {
r.AccountData = r.AccountData.ApplyDelta(next.AccountData)
}
if next.Typing != nil {
r.Typing = r.Typing.ApplyDelta(next.Typing)
}
if next.Receipts != nil {
r.Receipts = r.Receipts.ApplyDelta(next.Receipts)
}
return r
}
@ -39,6 +47,8 @@ type Response struct {
ToDevice *ToDeviceResponse `json:"to_device,omitempty"`
E2EE *E2EEResponse `json:"e2ee,omitempty"`
AccountData *AccountDataResponse `json:"account_data,omitempty"`
Typing *TypingResponse `json:"typing,omitempty"`
Receipts *ReceiptsResponse `json:"receipts,omitempty"`
}
func (e Response) HasData(isInitial bool) bool {
@ -48,30 +58,43 @@ func (e Response) HasData(isInitial bool) bool {
}
type HandlerInterface interface {
Handle(req Request, listRoomIDs map[string]struct{}, isInitial bool) (res Response)
Handle(req Request, roomIDToTimeline map[string][]string, isInitial bool) (res Response)
HandleLiveUpdate(update caches.Update, req Request, res *Response, updateWillReturnResponse, isInitial bool)
}
type Handler struct {
Store *state.Storage
E2EEFetcher sync2.E2EEFetcher
GlobalCache *caches.GlobalCache
}
func (h *Handler) HandleLiveUpdate(update caches.Update, req Request, res *Response, updateWillReturnResponse, isInitial bool) {
if req.AccountData != nil && req.AccountData.Enabled {
res.AccountData = ProcessLiveAccountData(update, h.Store, updateWillReturnResponse, req.UserID, req.AccountData)
}
if req.Typing != nil && req.Typing.Enabled {
res.Typing = ProcessLiveTyping(update, updateWillReturnResponse, req.UserID, req.Typing)
}
if req.Receipts != nil && req.Receipts.Enabled {
res.Receipts = ProcessLiveReceipts(update, updateWillReturnResponse, req.UserID, req.Receipts)
}
}
func (h *Handler) Handle(req Request, listRoomIDs map[string]struct{}, isInitial bool) (res Response) {
func (h *Handler) Handle(req Request, roomIDToTimeline map[string][]string, isInitial bool) (res Response) {
if req.ToDevice != nil && req.ToDevice.Enabled != nil && *req.ToDevice.Enabled {
res.ToDevice = ProcessToDevice(h.Store, req.UserID, req.DeviceID, req.ToDevice)
}
if req.E2EE != nil && req.E2EE.Enabled {
res.E2EE = ProcessE2EE(h.E2EEFetcher, req.UserID, req.DeviceID, req.E2EE)
res.E2EE = ProcessE2EE(h.E2EEFetcher, req.UserID, req.DeviceID, req.E2EE, isInitial)
}
if req.AccountData != nil && req.AccountData.Enabled {
res.AccountData = ProcessAccountData(h.Store, listRoomIDs, req.UserID, isInitial, req.AccountData)
res.AccountData = ProcessAccountData(h.Store, roomIDToTimeline, req.UserID, isInitial, req.AccountData)
}
if req.Typing != nil && req.Typing.Enabled {
res.Typing = ProcessTyping(h.GlobalCache, roomIDToTimeline, req.UserID, isInitial, req.Typing)
}
if req.Receipts != nil && req.Receipts.Enabled {
res.Receipts = ProcessReceipts(h.Store, roomIDToTimeline, req.UserID, isInitial, req.Receipts)
}
return
}

View File

@ -0,0 +1,69 @@
package extensions
import (
"encoding/json"
"github.com/matrix-org/sync-v3/state"
"github.com/matrix-org/sync-v3/sync3/caches"
)
// Client created request params
type ReceiptsRequest struct {
Enabled bool `json:"enabled"`
}
func (r ReceiptsRequest) ApplyDelta(next *ReceiptsRequest) *ReceiptsRequest {
r.Enabled = next.Enabled
return &r
}
// Server response
type ReceiptsResponse struct {
// room_id -> m.receipt ephemeral event
Rooms map[string]json.RawMessage `json:"rooms,omitempty"`
}
func (r *ReceiptsResponse) HasData(isInitial bool) bool {
if isInitial {
return true
}
return len(r.Rooms) > 0
}
func ProcessLiveReceipts(up caches.Update, updateWillReturnResponse bool, userID string, req *ReceiptsRequest) (res *ReceiptsResponse) {
switch update := up.(type) {
case *caches.ReceiptUpdate:
// a live receipt event happened, send this back
return &ReceiptsResponse{
Rooms: map[string]json.RawMessage{
update.RoomID(): update.EphemeralEvent,
},
}
}
return nil
}
func ProcessReceipts(store *state.Storage, roomIDToTimeline map[string][]string, userID string, isInitial bool, req *ReceiptsRequest) (res *ReceiptsResponse) {
// grab receipts for all timelines for all the rooms we're going to return
res = &ReceiptsResponse{
Rooms: make(map[string]json.RawMessage),
}
for roomID, timeline := range roomIDToTimeline {
receipts, err := store.ReceiptTable.SelectReceiptsForEvents(roomID, timeline)
if err != nil {
logger.Err(err).Str("user", userID).Str("room", roomID).Msg("failed to SelectReceiptsForEvents")
continue
}
// always include your own receipts
ownReceipts, err := store.ReceiptTable.SelectReceiptsForUser(roomID, userID)
if err != nil {
logger.Err(err).Str("user", userID).Str("room", roomID).Msg("failed to SelectReceiptsForUser")
continue
}
if len(receipts) == 0 && len(ownReceipts) == 0 {
continue
}
res.Rooms[roomID], _ = state.PackReceiptsIntoEDU(append(receipts, ownReceipts...))
}
return
}

View File

@ -58,11 +58,16 @@ func ProcessToDevice(store *state.Storage, userID, deviceID string, req *ToDevic
}
}
msgs, upTo, err := store.ToDeviceTable.Messages(deviceID, from, -1, int64(req.Limit))
msgs, upTo, err := store.ToDeviceTable.Messages(deviceID, from, int64(req.Limit))
if err != nil {
l.Err(err).Int64("from", from).Msg("cannot query to-device messages")
return nil
}
err = store.ToDeviceTable.SetUnackedPosition(deviceID, upTo)
if err != nil {
l.Err(err).Msg("cannot set unacked position")
return nil
}
res = &ToDeviceResponse{
NextBatch: fmt.Sprintf("%d", upTo),
Events: msgs,

View File

@ -0,0 +1,77 @@
package extensions
import (
"encoding/json"
"github.com/matrix-org/sync-v3/sync3/caches"
)
// Client created request params
type TypingRequest struct {
Enabled bool `json:"enabled"`
}
func (r TypingRequest) ApplyDelta(next *TypingRequest) *TypingRequest {
r.Enabled = next.Enabled
return &r
}
// Server response
type TypingResponse struct {
Rooms map[string]json.RawMessage `json:"rooms,omitempty"`
}
func (r *TypingResponse) HasData(isInitial bool) bool {
if isInitial {
return true
}
return len(r.Rooms) > 0
}
func ProcessLiveTyping(up caches.Update, updateWillReturnResponse bool, userID string, req *TypingRequest) (res *TypingResponse) {
switch update := up.(type) {
case caches.TypingUpdate:
// a live typing event happened, send this back
return &TypingResponse{
Rooms: map[string]json.RawMessage{
update.RoomID(): update.GlobalRoomMetadata().TypingEvent,
},
}
case caches.RoomUpdate:
// this is a room update which is causing us to return, meaning we are interested in this room.
// send typing for this room.
if !updateWillReturnResponse {
return nil
}
ev := update.GlobalRoomMetadata().TypingEvent
if ev == nil {
return nil
}
return &TypingResponse{
Rooms: map[string]json.RawMessage{
update.RoomID(): ev,
},
}
}
return nil
}
func ProcessTyping(globalCache *caches.GlobalCache, roomIDToTimeline map[string][]string, userID string, isInitial bool, req *TypingRequest) (res *TypingResponse) {
// grab typing users for all the rooms we're going to return
res = &TypingResponse{
Rooms: make(map[string]json.RawMessage),
}
roomIDs := make([]string, 0, len(roomIDToTimeline))
for roomID := range roomIDToTimeline {
roomIDs = append(roomIDs, roomID)
}
roomToGlobalMetadata := globalCache.LoadRooms(roomIDs...)
for roomID := range roomIDToTimeline {
meta := roomToGlobalMetadata[roomID]
if meta == nil || meta.TypingEvent == nil {
continue
}
res.Rooms[roomID] = meta.TypingEvent
}
return
}

View File

@ -4,11 +4,14 @@ import (
"context"
"encoding/json"
"runtime/trace"
"time"
"github.com/matrix-org/sync-v3/internal"
"github.com/matrix-org/sync-v3/sync3"
"github.com/matrix-org/sync-v3/sync3/caches"
"github.com/matrix-org/sync-v3/sync3/extensions"
"github.com/prometheus/client_golang/prometheus"
"github.com/tidwall/gjson"
)
type JoinChecker interface {
@ -35,29 +38,34 @@ type ConnState struct {
globalCache *caches.GlobalCache
userCache *caches.UserCache
userCacheID int
lazyCache *LazyCache
joinChecker JoinChecker
extensionsHandler extensions.HandlerInterface
extensionsHandler extensions.HandlerInterface
processHistogramVec *prometheus.HistogramVec
}
func NewConnState(
userID, deviceID string, userCache *caches.UserCache, globalCache *caches.GlobalCache,
ex extensions.HandlerInterface, joinChecker JoinChecker,
ex extensions.HandlerInterface, joinChecker JoinChecker, histVec *prometheus.HistogramVec,
) *ConnState {
cs := &ConnState{
globalCache: globalCache,
userCache: userCache,
userID: userID,
deviceID: deviceID,
roomSubscriptions: make(map[string]sync3.RoomSubscription),
lists: sync3.NewInternalRequestLists(),
extensionsHandler: ex,
joinChecker: joinChecker,
globalCache: globalCache,
userCache: userCache,
userID: userID,
deviceID: deviceID,
roomSubscriptions: make(map[string]sync3.RoomSubscription),
lists: sync3.NewInternalRequestLists(),
extensionsHandler: ex,
joinChecker: joinChecker,
lazyCache: NewLazyCache(),
processHistogramVec: histVec,
}
cs.live = &connStateLive{
ConnState: cs,
updates: make(chan caches.Update, MaxPendingEventUpdates), // TODO: customisable
ConnState: cs,
loadPositions: make(map[string]int64),
updates: make(chan caches.Update, MaxPendingEventUpdates), // TODO: customisable
}
cs.userCacheID = cs.userCache.Subsribe(cs)
return cs
@ -108,7 +116,11 @@ func (s *ConnState) load() error {
// OnIncomingRequest is guaranteed to be called sequentially (it's protected by a mutex in conn.go)
func (s *ConnState) OnIncomingRequest(ctx context.Context, cid sync3.ConnID, req *sync3.Request, isInitial bool) (*sync3.Response, error) {
ctx, task := trace.NewTask(ctx, "OnIncomingRequest")
taskType := "OnIncomingRequest"
if isInitial {
taskType = "OnIncomingRequestInitial"
}
ctx, task := trace.NewTask(ctx, taskType)
defer task.End()
if s.loadPosition == 0 {
region := trace.StartRegion(ctx, "load")
@ -122,6 +134,7 @@ func (s *ConnState) OnIncomingRequest(ctx context.Context, cid sync3.ConnID, req
// be on their own goroutine, the requests are linearised for us by Conn so it is safe to modify ConnState without
// additional locking mechanisms.
func (s *ConnState) onIncomingRequest(ctx context.Context, req *sync3.Request, isInitial bool) (*sync3.Response, error) {
start := time.Now()
// ApplyDelta works fine if s.muxedReq is nil
var delta *sync3.RequestDelta
s.muxedReq, delta = s.muxedReq.ApplyDelta(req)
@ -145,9 +158,13 @@ func (s *ConnState) onIncomingRequest(ctx context.Context, req *sync3.Request, i
Lists: respLists,
}
includedRoomIDs := make(map[string]struct{})
includedRoomIDs := make(map[string][]string)
for roomID := range response.Rooms {
includedRoomIDs[roomID] = struct{}{}
eventIDs := make([]string, len(response.Rooms[roomID].Timeline))
for i := range eventIDs {
eventIDs[i] = gjson.ParseBytes(response.Rooms[roomID].Timeline[i]).Get("event_id").Str
}
includedRoomIDs[roomID] = eventIDs
}
// Handle extensions AFTER processing lists as extensions may need to know which rooms the client
// is being notified about (e.g. for room account data)
@ -155,6 +172,15 @@ func (s *ConnState) onIncomingRequest(ctx context.Context, req *sync3.Request, i
response.Extensions = s.extensionsHandler.Handle(ex, includedRoomIDs, isInitial)
region.End()
if response.ListOps() > 0 || len(response.Rooms) > 0 || response.Extensions.HasData(isInitial) {
// we're going to immediately return, so track how long this took. We don't do this for long
// polling requests as high numbers mean nothing. We need to check if we will block as otherwise
// we will have tons of fast requests logged (as they get tracked and then hit live streaming)
// In other words, this metric tracks the time it takes to process _changes_ in the client
// requests (initial connection, modifying index positions, etc) which should always be fast.
s.trackProcessDuration(time.Since(start), isInitial)
}
// do live tracking if we have nothing to tell the client yet
region = trace.StartRegion(ctx, "liveUpdate")
s.live.liveUpdate(ctx, req, ex, isInitial, response)
@ -316,7 +342,37 @@ func (s *ConnState) buildRooms(ctx context.Context, builtSubs []BuiltSubscriptio
defer trace.StartRegion(ctx, "buildRooms").End()
result := make(map[string]sync3.Room)
for _, bs := range builtSubs {
rooms := s.getInitialRoomData(ctx, bs.RoomSubscription, bs.RoomIDs...)
roomIDs := bs.RoomIDs
if bs.RoomSubscription.IncludeOldRooms != nil {
var oldRoomIDs []string
for _, currRoomID := range bs.RoomIDs { // <- the list of subs we definitely are including
// append old rooms if we are joined to them
currRoom := s.lists.Room(currRoomID)
var prevRoomID *string
if currRoom != nil {
prevRoomID = currRoom.PredecessorRoomID
}
for prevRoomID != nil { // <- the chain of old rooms
// if not joined, bail
if !s.joinChecker.IsUserJoined(s.userID, *prevRoomID) {
break
}
oldRoomIDs = append(oldRoomIDs, *prevRoomID)
// keep checking
prevRoom := s.lists.Room(*prevRoomID)
if prevRoom != nil {
prevRoomID = prevRoom.PredecessorRoomID
}
}
}
// old rooms use a different subscription
oldRooms := s.getInitialRoomData(ctx, *bs.RoomSubscription.IncludeOldRooms, oldRoomIDs...)
for oldRoomID, oldRoom := range oldRooms {
result[oldRoomID] = oldRoom
}
}
rooms := s.getInitialRoomData(ctx, bs.RoomSubscription, roomIDs...)
for roomID, room := range rooms {
result[roomID] = room
}
@ -329,7 +385,23 @@ func (s *ConnState) getInitialRoomData(ctx context.Context, roomSub sync3.RoomSu
// We want to grab the user room data and the room metadata for each room ID.
roomIDToUserRoomData := s.userCache.LazyLoadTimelines(s.loadPosition, roomIDs, int(roomSub.TimelineLimit))
roomMetadatas := s.globalCache.LoadRooms(roomIDs...)
roomIDToState := s.globalCache.LoadRoomState(ctx, roomIDs, s.loadPosition, roomSub.RequiredStateMap())
// prepare lazy loading data structures
roomToUsersInTimeline := make(map[string][]string, len(roomIDToUserRoomData))
for roomID, urd := range roomIDToUserRoomData {
set := make(map[string]struct{})
for _, ev := range urd.Timeline {
set[gjson.GetBytes(ev, "sender").Str] = struct{}{}
}
userIDs := make([]string, len(set))
i := 0
for userID := range set {
userIDs[i] = userID
i++
}
roomToUsersInTimeline[roomID] = userIDs
}
rsm := roomSub.RequiredStateMap(s.userID)
roomIDToState := s.globalCache.LoadRoomState(ctx, roomIDs, s.loadPosition, rsm, roomToUsersInTimeline)
for _, roomID := range roomIDs {
userRoomData, ok := roomIDToUserRoomData[roomID]
@ -364,9 +436,26 @@ func (s *ConnState) getInitialRoomData(ctx context.Context, roomSub sync3.RoomSu
PrevBatch: prevBatch,
}
}
if rsm.IsLazyLoading() {
for roomID, userIDs := range roomToUsersInTimeline {
s.lazyCache.Add(roomID, userIDs...)
}
}
return rooms
}
func (s *ConnState) trackProcessDuration(dur time.Duration, isInitial bool) {
if s.processHistogramVec == nil {
return
}
val := "0"
if isInitial {
val = "1"
}
s.processHistogramVec.WithLabelValues(val).Observe(float64(dur.Seconds()))
}
// Called when the connection is torn down
func (s *ConnState) Destroy() {
s.userCache.Unsubscribe(s.userCacheID)

View File

@ -16,7 +16,7 @@ var (
// The max number of events the client is eligible to read (unfiltered) which we are willing to
// buffer on this connection. Too large and we consume lots of memory. Too small and busy accounts
// will trip the connection knifing.
MaxPendingEventUpdates = 200
MaxPendingEventUpdates = 2000
)
// Contains code for processing live updates. Split out from connstate because they concern different
@ -24,6 +24,9 @@ var (
type connStateLive struct {
*ConnState
// roomID -> latest load pos
loadPositions map[string]int64
// A channel which the dispatcher uses to send updates to the conn goroutine
// Consumed when the conn is read. There is a limit to how many updates we will store before
// saying the client is dead and clean up the conn.
@ -124,30 +127,59 @@ func (s *connStateLive) processLiveUpdate(ctx context.Context, up caches.Update,
}
}
// add in initial rooms FIRST as we replace whatever is in the rooms key for these rooms.
// If we do it after appending live updates then we can lose updates because we replace what
// we accumulated.
rooms := s.buildRooms(ctx, builder.BuildSubscriptions())
for roomID, room := range rooms {
response.Rooms[roomID] = room
// remember what point we snapshotted this room, incase we see live events which we have
// already snapshotted here.
s.loadPositions[roomID] = s.loadPosition
}
roomUpdate, _ := up.(caches.RoomUpdate)
roomEventUpdate, _ := up.(*caches.RoomEventUpdate)
// TODO: find a better way to determine if the triggering event should be included e.g ask the lists?
if hasUpdates && roomUpdate != nil {
if hasUpdates && roomEventUpdate != nil {
// include this update in the rooms response TODO: filters on event type?
userRoomData := roomUpdate.UserRoomMetadata()
r := response.Rooms[roomUpdate.RoomID()]
r.HighlightCount = int64(userRoomData.HighlightCount)
r.NotificationCount = int64(userRoomData.NotificationCount)
roomEventUpdate, _ := up.(*caches.RoomEventUpdate)
if roomEventUpdate != nil && roomEventUpdate.EventData.Event != nil {
r.Timeline = append(r.Timeline, s.userCache.AnnotateWithTransactionIDs([]json.RawMessage{
roomEventUpdate.EventData.Event,
})...)
r.NumLive++
advancedPastEvent := false
if roomEventUpdate.EventData.LatestPos <= s.loadPositions[roomEventUpdate.RoomID()] {
// this update has been accounted for by the initial:true room snapshot
advancedPastEvent = true
}
s.loadPositions[roomEventUpdate.RoomID()] = roomEventUpdate.EventData.LatestPos
// we only append to the timeline if we haven't already got this event. This can happen when:
// - 2 live events for a room mid-connection
// - next request bumps a room from outside to inside the window
// - the initial:true room from BuildSubscriptions contains the latest live events in the timeline as it's pulled from the DB
// - we then process the live events in turn which adds them again.
if !advancedPastEvent {
r.Timeline = append(r.Timeline, s.userCache.AnnotateWithTransactionIDs([]json.RawMessage{
roomEventUpdate.EventData.Event,
})...)
roomID := roomEventUpdate.RoomID()
sender := roomEventUpdate.EventData.Sender
if s.lazyCache.IsLazyLoading(roomID) && !s.lazyCache.IsSet(roomID, sender) {
// load the state event
memberEvent := s.globalCache.LoadStateEvent(context.Background(), roomID, s.loadPosition, "m.room.member", sender)
if memberEvent != nil {
r.RequiredState = append(r.RequiredState, memberEvent)
s.lazyCache.AddUser(roomID, sender)
}
}
}
}
response.Rooms[roomUpdate.RoomID()] = r
}
// add in initial rooms
rooms := s.buildRooms(ctx, builder.BuildSubscriptions())
for roomID, room := range rooms {
response.Rooms[roomID] = room
}
if roomUpdate != nil {
// try to find this room in the response. If it's there, then we may need to update some fields.
// there's no guarantees that the room will be in the response if say the event caused it to move

View File

@ -18,7 +18,7 @@ import (
type NopExtensionHandler struct{}
func (h *NopExtensionHandler) Handle(req extensions.Request, listRoomIDs map[string]struct{}, isInitial bool) (res extensions.Response) {
func (h *NopExtensionHandler) Handle(req extensions.Request, listRoomIDs map[string][]string, isInitial bool) (res extensions.Response) {
return
}
@ -33,8 +33,8 @@ func (t *NopJoinTracker) IsUserJoined(userID, roomID string) bool {
type NopTransactionFetcher struct{}
func (t *NopTransactionFetcher) TransactionIDForEvent(userID, eventID string) (txnID string) {
return ""
func (t *NopTransactionFetcher) TransactionIDForEvents(userID string, eventID []string) (eventIDToTxnID map[string]string) {
return
}
func newRoomMetadata(roomID string, lastMsgTimestamp gomatrixserverlib.Timestamp) internal.RoomMetadata {
@ -104,7 +104,7 @@ func TestConnStateInitial(t *testing.T) {
}
return result
}
cs := NewConnState(userID, deviceID, userCache, globalCache, &NopExtensionHandler{}, &NopJoinTracker{})
cs := NewConnState(userID, deviceID, userCache, globalCache, &NopExtensionHandler{}, &NopJoinTracker{}, nil)
if userID != cs.UserID() {
t.Fatalf("UserID returned wrong value, got %v want %v", cs.UserID(), userID)
}
@ -199,7 +199,7 @@ func TestConnStateInitial(t *testing.T) {
newEvent = testutils.NewEvent(t, "unimportant", "me", struct{}{}, testutils.WithTimestamp(timestampNow.Add(2*time.Second)))
dispatcher.OnNewEvents(roomA.RoomID, []json.RawMessage{
newEvent,
}, 1)
}, 2)
res, err = cs.OnIncomingRequest(context.Background(), ConnID, &sync3.Request{
Lists: []sync3.RequestList{{
Sort: []string{sync3.SortByRecency},
@ -268,7 +268,7 @@ func TestConnStateMultipleRanges(t *testing.T) {
userCache.LazyRoomDataOverride = mockLazyRoomOverride
dispatcher.Register(userCache.UserID, userCache)
dispatcher.Register(sync3.DispatcherAllUsers, globalCache)
cs := NewConnState(userID, deviceID, userCache, globalCache, &NopExtensionHandler{}, &NopJoinTracker{})
cs := NewConnState(userID, deviceID, userCache, globalCache, &NopExtensionHandler{}, &NopJoinTracker{}, nil)
// request first page
res, err := cs.OnIncomingRequest(context.Background(), ConnID, &sync3.Request{
@ -445,7 +445,7 @@ func TestBumpToOutsideRange(t *testing.T) {
userCache.LazyRoomDataOverride = mockLazyRoomOverride
dispatcher.Register(userCache.UserID, userCache)
dispatcher.Register(sync3.DispatcherAllUsers, globalCache)
cs := NewConnState(userID, deviceID, userCache, globalCache, &NopExtensionHandler{}, &NopJoinTracker{})
cs := NewConnState(userID, deviceID, userCache, globalCache, &NopExtensionHandler{}, &NopJoinTracker{}, nil)
// Ask for A,B
res, err := cs.OnIncomingRequest(context.Background(), ConnID, &sync3.Request{
Lists: []sync3.RequestList{{
@ -553,7 +553,7 @@ func TestConnStateRoomSubscriptions(t *testing.T) {
}
dispatcher.Register(userCache.UserID, userCache)
dispatcher.Register(sync3.DispatcherAllUsers, globalCache)
cs := NewConnState(userID, deviceID, userCache, globalCache, &NopExtensionHandler{}, &NopJoinTracker{})
cs := NewConnState(userID, deviceID, userCache, globalCache, &NopExtensionHandler{}, &NopJoinTracker{}, nil)
// subscribe to room D
res, err := cs.OnIncomingRequest(context.Background(), ConnID, &sync3.Request{
RoomSubscriptions: map[string]sync3.RoomSubscription{

View File

@ -0,0 +1,97 @@
package handler
import (
"sync"
"github.com/matrix-org/sync-v3/pubsub"
)
type pendingInfo struct {
done bool
ch chan struct{}
}
type EnsurePoller struct {
chanName string
mu *sync.Mutex
pendingPolls map[string]pendingInfo
notifier pubsub.Notifier
}
func NewEnsurePoller(notifier pubsub.Notifier) *EnsurePoller {
return &EnsurePoller{
chanName: pubsub.ChanV3,
mu: &sync.Mutex{},
pendingPolls: make(map[string]pendingInfo),
notifier: notifier,
}
}
// EnsurePolling blocks until the V2InitialSyncComplete response is received for this device. It is
// the caller's responsibility to call OnInitialSyncComplete when new events arrive.
func (p *EnsurePoller) EnsurePolling(userID, deviceID string) {
key := userID + "|" + deviceID
p.mu.Lock()
// do we need to wait?
if p.pendingPolls[key].done {
p.mu.Unlock()
return
}
// have we called EnsurePolling for this user/device before?
ch := p.pendingPolls[key].ch
if ch != nil {
p.mu.Unlock()
// we already called EnsurePolling on this device, so just listen for the close
// TODO: several times there have been problems getting the response back from the poller
// we should time out here after 100s and return an error or something to kick conns into
// trying again
<-ch
return
}
// Make a channel to wait until we have done an initial sync
ch = make(chan struct{})
p.pendingPolls[key] = pendingInfo{
done: false,
ch: ch,
}
p.mu.Unlock()
// ask the pollers to poll for this device
p.notifier.Notify(p.chanName, &pubsub.V3EnsurePolling{
UserID: userID,
DeviceID: deviceID,
})
// if by some miracle the notify AND sync completes before we receive on ch then this is
// still fine as recv on a closed channel will return immediately.
<-ch
}
func (p *EnsurePoller) OnInitialSyncComplete(payload *pubsub.V2InitialSyncComplete) {
key := payload.UserID + "|" + payload.DeviceID
p.mu.Lock()
defer p.mu.Unlock()
pending, ok := p.pendingPolls[key]
// were we waiting for this initial sync to complete?
if !ok {
// This can happen when the v2 poller spontaneously starts polling even without us asking it to
// e.g from the database
p.pendingPolls[key] = pendingInfo{
done: true,
}
return
}
if pending.done {
// nothing to do, we just got OnInitialSyncComplete called twice
return
}
// we get here if we asked the poller to start via EnsurePolling, so let's make that goroutine
// wake up now
ch := pending.ch
pending.done = true
pending.ch = nil
p.pendingPolls[key] = pending
close(ch)
}
func (p *EnsurePoller) Teardown() {
p.notifier.Close()
}

View File

@ -6,15 +6,19 @@ import (
"net/http"
"net/url"
"os"
"reflect"
"strconv"
"sync"
"time"
"github.com/matrix-org/sync-v3/internal"
"github.com/matrix-org/sync-v3/pubsub"
"github.com/matrix-org/sync-v3/state"
"github.com/matrix-org/sync-v3/sync2"
"github.com/matrix-org/sync-v3/sync3"
"github.com/matrix-org/sync-v3/sync3/caches"
"github.com/matrix-org/sync-v3/sync3/extensions"
"github.com/prometheus/client_golang/prometheus"
"github.com/rs/zerolog"
"github.com/rs/zerolog/hlog"
"github.com/rs/zerolog/log"
@ -33,7 +37,8 @@ type SyncLiveHandler struct {
V2 sync2.Client
Storage *state.Storage
V2Store *sync2.Storage
PollerMap *sync2.PollerMap
V2Sub *pubsub.V2Sub
V3Pub *EnsurePoller
ConnMap *sync3.ConnMap
Extensions *extensions.Handler
@ -44,92 +49,107 @@ type SyncLiveHandler struct {
Dispatcher *sync3.Dispatcher
GlobalCache *caches.GlobalCache
numConns prometheus.Gauge
histVec *prometheus.HistogramVec
}
func NewSync3Handler(v2Client sync2.Client, postgresDBURI, secret string, debug bool) (*SyncLiveHandler, error) {
func NewSync3Handler(
store *state.Storage, storev2 *sync2.Storage, v2Client sync2.Client, postgresDBURI, secret string,
debug bool, pub pubsub.Notifier, sub pubsub.Listener, enablePrometheus bool,
) (*SyncLiveHandler, error) {
logger.Info().Msg("creating handler")
if debug {
zerolog.SetGlobalLevel(zerolog.TraceLevel)
} else {
zerolog.SetGlobalLevel(zerolog.InfoLevel)
}
store := state.NewStorage(postgresDBURI)
sh := &SyncLiveHandler{
V2: v2Client,
Storage: store,
V2Store: sync2.NewStore(postgresDBURI, secret),
V2Store: storev2,
ConnMap: sync3.NewConnMap(),
userCaches: &sync.Map{},
Dispatcher: sync3.NewDispatcher(),
GlobalCache: caches.NewGlobalCache(store),
}
sh.PollerMap = sync2.NewPollerMap(v2Client, sh)
sh.Extensions = &extensions.Handler{
Store: store,
E2EEFetcher: sh.PollerMap,
}
roomToJoinedUsers, err := store.AllJoinedMembers()
if err != nil {
return nil, err
E2EEFetcher: sh,
GlobalCache: sh.GlobalCache,
}
if err := sh.Dispatcher.Startup(roomToJoinedUsers); err != nil {
return nil, fmt.Errorf("failed to load sync3.Dispatcher: %s", err)
}
sh.Dispatcher.Register(sync3.DispatcherAllUsers, sh.GlobalCache)
// every room will be present here
roomIDToMetadata, err := store.MetadataForAllRooms()
if err != nil {
return nil, fmt.Errorf("could not get metadata for all rooms: %s", err)
if enablePrometheus {
sh.addPrometheusMetrics()
pub = pubsub.NewPromNotifier(pub, "api")
}
if err := sh.GlobalCache.Startup(roomIDToMetadata); err != nil {
return nil, fmt.Errorf("failed to populate global cache: %s", err)
}
// set up pubsub mechanism to start from this point
sh.V3Pub = NewEnsurePoller(pub)
sh.V2Sub = pubsub.NewV2Sub(sub, sh)
return sh, nil
}
// used in tests to close postgres connections
func (h *SyncLiveHandler) Teardown() {
h.Storage.Teardown()
func (h *SyncLiveHandler) Startup(storeSnapshot *state.StartupSnapshot) error {
if err := h.Dispatcher.Startup(storeSnapshot.AllJoinedMembers); err != nil {
return fmt.Errorf("failed to load sync3.Dispatcher: %s", err)
}
h.Dispatcher.Register(sync3.DispatcherAllUsers, h.GlobalCache)
if err := h.GlobalCache.Startup(storeSnapshot.GlobalMetadata); err != nil {
return fmt.Errorf("failed to populate global cache: %s", err)
}
return nil
}
func (h *SyncLiveHandler) StartV2Pollers() {
devices, err := h.V2Store.AllDevices()
if err != nil {
logger.Err(err).Msg("StartV2Pollers: failed to query devices")
// Listen starts all consumers
func (h *SyncLiveHandler) Listen() {
go func() {
err := h.V2Sub.Listen()
if err != nil {
logger.Err(err).Msg("Failed to listen for v2 messages")
}
}()
}
// used in tests to close postgres connections
func (h *SyncLiveHandler) Teardown() {
// tear down DB conns
h.Storage.Teardown()
h.V2Sub.Teardown()
h.V3Pub.Teardown()
h.ConnMap.Teardown()
if h.numConns != nil {
prometheus.Unregister(h.numConns)
}
if h.histVec != nil {
prometheus.Unregister(h.histVec)
}
}
func (h *SyncLiveHandler) updateMetrics() {
if h.numConns == nil {
return
}
logger.Info().Int("num_devices", len(devices)).Msg("StartV2Pollers")
// how many concurrent pollers to make at startup.
// Too high and this will flood the upstream server with sync requests at startup.
// Too low and this will take ages for the v2 pollers to startup.
numWorkers := 16
ch := make(chan sync2.Device, len(devices))
for _, d := range devices {
// if we fail to decrypt the access token, skip it.
if d.AccessToken == "" {
continue
}
ch <- d
}
close(ch)
var wg sync.WaitGroup
wg.Add(numWorkers)
for i := 0; i < numWorkers; i++ {
go func() {
defer wg.Done()
for d := range ch {
h.PollerMap.EnsurePolling(
d.AccessToken, d.UserID, d.DeviceID, d.Since,
logger.With().Str("user_id", d.UserID).Logger(),
)
}
}()
}
wg.Wait()
logger.Info().Msg("StartV2Pollers finished")
h.numConns.Set(float64(h.ConnMap.Len()))
}
func (h *SyncLiveHandler) addPrometheusMetrics() {
h.numConns = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "sliding_sync",
Subsystem: "api",
Name: "num_active_conns",
Help: "Number of active sliding sync connections.",
})
h.histVec = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "sliding_sync",
Subsystem: "api",
Name: "process_duration_secs",
Help: "Time taken in seconds for the sliding sync response to calculated, excludes long polling",
Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10},
}, []string{"initial"})
prometheus.MustRegister(h.numConns)
prometheus.MustRegister(h.histVec)
}
func (h *SyncLiveHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
@ -146,6 +166,9 @@ func (h *SyncLiveHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
Err: err,
}
}
// artificially wait a bit before sending back the error
// this guards against tightlooping when the client hammers the server with invalid requests
time.Sleep(time.Second)
w.WriteHeader(herr.StatusCode)
w.Write(herr.JSON())
}
@ -164,6 +187,14 @@ func (h *SyncLiveHandler) serve(w http.ResponseWriter, req *http.Request) error
}
}
}
for i, l := range requestBody.Lists {
if l.Ranges != nil && !l.Ranges.Valid() {
return &internal.HandlerError{
StatusCode: 400,
Err: fmt.Errorf("list[%d] invalid ranges %v", i, l.Ranges),
}
}
}
conn, err := h.setupConnection(req, &requestBody, req.URL.Query().Get("pos") != "")
if err != nil {
@ -207,7 +238,15 @@ func (h *SyncLiveHandler) serve(w http.ResponseWriter, req *http.Request) error
if resp.Extensions.AccountData != nil {
numGlobalAccountData = len(resp.Extensions.AccountData.Global)
}
internal.SetRequestContextResponseInfo(req.Context(), cpos, resp.PosInt(), len(resp.Rooms), requestBody.TxnID, numToDeviceEvents, numGlobalAccountData)
var numChangedDevices, numLeftDevices int
if resp.Extensions.E2EE != nil && resp.Extensions.E2EE.DeviceLists != nil {
numChangedDevices = len(resp.Extensions.E2EE.DeviceLists.Changed)
numLeftDevices = len(resp.Extensions.E2EE.DeviceLists.Left)
}
internal.SetRequestContextResponseInfo(
req.Context(), cpos, resp.PosInt(), len(resp.Rooms), requestBody.TxnID, numToDeviceEvents, numGlobalAccountData,
numChangedDevices, numLeftDevices,
)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
@ -255,10 +294,7 @@ func (h *SyncLiveHandler) setupConnection(req *http.Request, syncReq *sync3.Requ
return conn, nil
}
// conn doesn't exist, we probably nuked it.
return nil, &internal.HandlerError{
StatusCode: 400,
Err: fmt.Errorf("session expired"),
}
return nil, internal.ExpiredSessionError()
}
// We're going to make a new connection
@ -287,10 +323,7 @@ func (h *SyncLiveHandler) setupConnection(req *http.Request, syncReq *sync3.Requ
}
log.Trace().Str("user", v2device.UserID).Msg("checking poller exists and is running")
h.PollerMap.EnsurePolling(
accessToken, v2device.UserID, v2device.DeviceID, v2device.Since,
hlog.FromRequest(req).With().Str("user_id", v2device.UserID).Logger(),
)
h.V3Pub.EnsurePolling(v2device.UserID, v2device.DeviceID)
log.Trace().Str("user", v2device.UserID).Msg("poller exists and is running")
// this may take a while so if the client has given up (e.g timed out) by this point, just stop.
// We'll be quicker next time as the poller will already exist.
@ -313,6 +346,9 @@ func (h *SyncLiveHandler) setupConnection(req *http.Request, syncReq *sync3.Requ
}
}
// once we have the conn, make sure our metrics are correct
defer h.updateMetrics()
// Now the v2 side of things are running, we can make a v3 live sync conn
// NB: this isn't inherently racey (we did the check for an existing conn before EnsurePolling)
// because we *either* do the existing check *or* make a new conn. It's important for CreateConn
@ -321,7 +357,7 @@ func (h *SyncLiveHandler) setupConnection(req *http.Request, syncReq *sync3.Requ
conn, created := h.ConnMap.CreateConn(sync3.ConnID{
DeviceID: deviceID,
}, func() sync3.ConnHandler {
return NewConnState(v2device.UserID, v2device.DeviceID, userCache, h.GlobalCache, h.Extensions, h.Dispatcher)
return NewConnState(v2device.UserID, v2device.DeviceID, userCache, h.GlobalCache, h.Extensions, h.Dispatcher, h.histVec)
})
if created {
log.Info().Str("user", v2device.UserID).Str("conn_id", conn.ConnID.String()).Msg("created new connection")
@ -331,13 +367,21 @@ func (h *SyncLiveHandler) setupConnection(req *http.Request, syncReq *sync3.Requ
return conn, nil
}
func (h *SyncLiveHandler) CacheForUser(userID string) *caches.UserCache {
c, ok := h.userCaches.Load(userID)
if ok {
return c.(*caches.UserCache)
}
return nil
}
func (h *SyncLiveHandler) userCache(userID string) (*caches.UserCache, error) {
// bail if we already have a cache
c, ok := h.userCaches.Load(userID)
if ok {
return c.(*caches.UserCache), nil
}
uc := caches.NewUserCache(userID, h.GlobalCache, h.Storage, h.PollerMap)
uc := caches.NewUserCache(userID, h.GlobalCache, h.Storage, h)
// select all non-zero highlight or notif counts and set them, as this is less costly than looping every room/user pair
err := h.Storage.UnreadTable.SelectAllNonZeroCountsForUser(userID, func(roomID string, highlightCount, notificationCount int) {
uc.OnUnreadCounts(roomID, &highlightCount, &notificationCount)
@ -346,12 +390,12 @@ func (h *SyncLiveHandler) userCache(userID string) (*caches.UserCache, error) {
return nil, fmt.Errorf("failed to load unread counts: %s", err)
}
// select the DM account data event and set DM room status
directEvent, err := h.Storage.AccountData(userID, sync2.AccountDataGlobalRoom, "m.direct")
directEvent, err := h.Storage.AccountData(userID, sync2.AccountDataGlobalRoom, []string{"m.direct"})
if err != nil {
return nil, fmt.Errorf("failed to load direct message status for rooms: %s", err)
}
if directEvent != nil {
uc.OnAccountData([]state.AccountData{*directEvent})
if len(directEvent) == 1 {
uc.OnAccountData([]state.AccountData{directEvent[0]})
}
// select all room tag account data and set it
@ -387,111 +431,180 @@ func (h *SyncLiveHandler) userCache(userID string) (*caches.UserCache, error) {
return uc, nil
}
// Called from the v2 poller, implements V2DataReceiver
func (h *SyncLiveHandler) UpdateDeviceSince(deviceID, since string) {
err := h.V2Store.UpdateDeviceSince(deviceID, since)
// Implements E2EEFetcher
// DeviceData returns the latest device data for this user. isInitial should be set if this is for
// an initial /sync request.
func (h *SyncLiveHandler) DeviceData(userID, deviceID string, isInitial bool) *internal.DeviceData {
// We have 2 sources of DeviceData:
// - pubsub updates stored in deviceDataMap
// - the database itself
// Most of the time we would like to pull from deviceDataMap and ignore the database entirely,
// however in most cases we need to do a database hit to atomically swap device lists over. Why?
//
// changed|left are much more important and special because:
//
// - sync v2 only sends deltas, rather than all of them unlike otk counts and fallback key types
// - we MUST guarantee that we send this to the client, as missing a user in `changed` can result in us having the wrong
// device lists for that user resulting in encryption breaking when the client encrypts for known devices.
// - we MUST NOT continually send the same device list changes on each subsequent request i.e we need to delete them
//
// We accumulate device list deltas on the v2 poller side, upserting into the database and sending pubsub notifs for.
// The accumulated deltas are stored in DeviceData.DeviceLists.New
// To guarantee we send this to the client, we need to consider a few failure modes:
// - The response is lost and the request is retried to this proxy -> ConnMap caches will get it.
// - The response is lost and the client doesn't retry until the connection expires. They then retry ->
// ConnMap cache miss, sends HTTP 400 due to invalid ?pos=
// - The response is received and the client sends the next request -> do not send deltas.
// To handle the case where responses are lost, we just need to see if this is an initial request
// and if so, return a "Read-Only" snapshot of the last sent device list changes. This means we may send
// duplicate device list changes if the response did in fact get to the client and the next request hit a
// new proxy, but that's better than losing updates. In this scenario, we do not delete any data.
// To ensure we delete device list updates over time, we now want to swap what was New to Sent and then
// send Sent. That means we forget what was originally in Sent and New is empty. We need to read and swap
// atomically else the v2 poller may insert a new update after the read but before the swap (DELETE on New)
// To ensure atomicity, we need to do this in a txn.
// Atomically move New to Sent so New is now empty and what was originally in Sent is forgotten.
shouldSwap := !isInitial
dd, err := h.Storage.DeviceDataTable.Select(userID, deviceID, shouldSwap)
if err != nil {
logger.Err(err).Str("device", deviceID).Str("since", since).Msg("V2: failed to persist since token")
logger.Err(err).Str("user", userID).Msg("failed to SelectAndSwap device data")
return nil
}
return dd
}
// Implements TransactionIDFetcher
func (h *SyncLiveHandler) TransactionIDForEvents(userID string, eventIDs []string) (eventIDToTxnID map[string]string) {
eventIDToTxnID, err := h.Storage.TransactionsTable.Select(userID, eventIDs)
if err != nil {
logger.Warn().Str("err", err.Error()).Str("user", userID).Msg("failed to select txn IDs for events")
}
return
}
func (h *SyncLiveHandler) OnInitialSyncComplete(p *pubsub.V2InitialSyncComplete) {
h.V3Pub.OnInitialSyncComplete(p)
}
// Called from the v2 poller, implements V2DataReceiver
func (h *SyncLiveHandler) Accumulate(roomID, prevBatch string, timeline []json.RawMessage) {
numNew, latestPos, err := h.Storage.Accumulate(roomID, prevBatch, timeline)
func (h *SyncLiveHandler) Accumulate(p *pubsub.V2Accumulate) {
events, err := h.Storage.EventNIDs(p.EventNIDs)
if err != nil {
logger.Err(err).Int("timeline", len(timeline)).Str("room", roomID).Msg("V2: failed to accumulate room")
logger.Err(err).Str("room", p.RoomID).Msg("Accumulate: failed to EventNIDs")
return
}
if numNew == 0 {
// no new events
if len(events) == 0 {
return
}
newEvents := timeline[len(timeline)-numNew:]
// we have new events, notify active connections
h.Dispatcher.OnNewEvents(roomID, newEvents, latestPos)
h.Dispatcher.OnNewEvents(p.RoomID, events, p.EventNIDs[len(p.EventNIDs)-1])
}
// Called from the v2 poller, implements V2DataReceiver
func (h *SyncLiveHandler) Initialise(roomID string, state []json.RawMessage) {
added, err := h.Storage.Initialise(roomID, state)
func (h *SyncLiveHandler) Initialise(p *pubsub.V2Initialise) {
state, err := h.Storage.StateSnapshot(p.SnapshotNID)
if err != nil {
logger.Err(err).Int("state", len(state)).Str("room", roomID).Msg("V2: failed to initialise room")
return
}
if !added {
// no new events
logger.Err(err).Int64("snap", p.SnapshotNID).Str("room", p.RoomID).Msg("Initialise: failed to get StateSnapshot")
return
}
// we have new state, notify caches
h.Dispatcher.OnNewEvents(roomID, state, 0)
h.Dispatcher.OnNewInitialRoomState(p.RoomID, state)
}
// Called from the v2 poller, implements V2DataReceiver
func (h *SyncLiveHandler) SetTyping(roomID string, userIDs []string) {
_, err := h.Storage.TypingTable.SetTyping(roomID, userIDs)
if err != nil {
logger.Err(err).Strs("users", userIDs).Str("room", roomID).Msg("V2: failed to store typing")
}
}
// Called from the v2 poller, implements V2DataReceiver
// Add messages for this device. If an error is returned, the poll loop is terminated as continuing
// would implicitly acknowledge these messages.
func (h *SyncLiveHandler) AddToDeviceMessages(userID, deviceID string, msgs []json.RawMessage) {
_, err := h.Storage.ToDeviceTable.InsertMessages(deviceID, msgs)
if err != nil {
logger.Err(err).Str("user", userID).Str("device", deviceID).Int("msgs", len(msgs)).Msg("V2: failed to store to-device messages")
}
}
func (h *SyncLiveHandler) UpdateUnreadCounts(roomID, userID string, highlightCount, notifCount *int) {
err := h.Storage.UnreadTable.UpdateUnreadCounters(userID, roomID, highlightCount, notifCount)
if err != nil {
logger.Err(err).Str("user", userID).Str("room", roomID).Msg("failed to update unread counters")
}
userCache, ok := h.userCaches.Load(userID)
func (h *SyncLiveHandler) OnUnreadCounts(p *pubsub.V2UnreadCounts) {
userCache, ok := h.userCaches.Load(p.UserID)
if !ok {
return
}
userCache.(*caches.UserCache).OnUnreadCounts(roomID, highlightCount, notifCount)
userCache.(*caches.UserCache).OnUnreadCounts(p.RoomID, p.HighlightCount, p.NotificationCount)
}
func (h *SyncLiveHandler) OnInvite(userID, roomID string, inviteState []json.RawMessage) {
err := h.Storage.InvitesTable.InsertInvite(userID, roomID, inviteState)
if err != nil {
logger.Err(err).Str("user", userID).Str("room", roomID).Msg("failed to insert invite")
}
userCache, ok := h.userCaches.Load(userID)
// TODO: We don't eagerly push device data updates on waiting conns (otk counts, device list changes)
// Do we need to?
func (h *SyncLiveHandler) OnDeviceData(p *pubsub.V2DeviceData) {
// Do nothing for now
}
func (h *SyncLiveHandler) OnInvite(p *pubsub.V2InviteRoom) {
userCache, ok := h.userCaches.Load(p.UserID)
if !ok {
return
}
userCache.(*caches.UserCache).OnInvite(roomID, inviteState)
inviteState, err := h.Storage.InvitesTable.SelectInviteState(p.UserID, p.RoomID)
if err != nil {
logger.Err(err).Str("user", p.UserID).Str("room", p.RoomID).Msg("failed to get invite state")
return
}
userCache.(*caches.UserCache).OnInvite(p.RoomID, inviteState)
}
func (h *SyncLiveHandler) OnLeftRoom(userID, roomID string) {
// remove any invites for this user if they are rejecting an invite
err := h.Storage.InvitesTable.RemoveInvite(userID, roomID)
if err != nil {
logger.Err(err).Str("user", userID).Str("room", roomID).Msg("failed to retire invite")
}
userCache, ok := h.userCaches.Load(userID)
func (h *SyncLiveHandler) OnLeftRoom(p *pubsub.V2LeaveRoom) {
userCache, ok := h.userCaches.Load(p.UserID)
if !ok {
return
}
userCache.(*caches.UserCache).OnLeftRoom(roomID)
userCache.(*caches.UserCache).OnLeftRoom(p.RoomID)
}
func (h *SyncLiveHandler) OnAccountData(userID, roomID string, events []json.RawMessage) {
data, err := h.Storage.InsertAccountData(userID, roomID, events)
if err != nil {
logger.Err(err).Str("user", userID).Str("room", roomID).Msg("failed to update account data")
func (h *SyncLiveHandler) OnReceipt(p *pubsub.V2Receipt) {
// split receipts into public / private
userToPrivateReceipts := make(map[string][]internal.Receipt)
publicReceipts := make([]internal.Receipt, 0, len(p.Receipts))
for _, r := range p.Receipts {
if r.IsPrivate {
userToPrivateReceipts[r.UserID] = append(userToPrivateReceipts[r.UserID], r)
} else {
publicReceipts = append(publicReceipts, r)
}
}
// always send private receipts, directly to the connected user cache if one exists
for userID, privateReceipts := range userToPrivateReceipts {
userCache, ok := h.userCaches.Load(userID)
if !ok {
continue
}
ephEvent, err := state.PackReceiptsIntoEDU(privateReceipts)
if err != nil {
logger.Err(err).Str("room", p.RoomID).Str("user", userID).Msg("unable to pack private receipts into EDU")
continue
}
userCache.(*caches.UserCache).OnEphemeralEvent(p.RoomID, ephEvent)
}
if len(publicReceipts) == 0 {
return
}
userCache, ok := h.userCaches.Load(userID)
// inform the dispatcher of global receipts
ephEvent, err := state.PackReceiptsIntoEDU(publicReceipts)
if err != nil {
logger.Err(err).Str("room", p.RoomID).Msg("unable to pack receipts into EDU")
return
}
h.Dispatcher.OnEphemeralEvent(p.RoomID, ephEvent)
}
func (h *SyncLiveHandler) OnTyping(p *pubsub.V2Typing) {
rooms := h.GlobalCache.LoadRooms(p.RoomID)
if rooms[p.RoomID] != nil {
if reflect.DeepEqual(p.EphemeralEvent, rooms[p.RoomID].TypingEvent) {
return // it's a duplicate, which happens when 2+ users are in the same room
}
}
h.Dispatcher.OnEphemeralEvent(p.RoomID, p.EphemeralEvent)
}
func (h *SyncLiveHandler) OnAccountData(p *pubsub.V2AccountData) {
userCache, ok := h.userCaches.Load(p.UserID)
if !ok {
return
}
data, err := h.Storage.AccountData(p.UserID, p.RoomID, p.Types)
if err != nil {
logger.Err(err).Str("user", p.UserID).Str("room", p.RoomID).Msg("OnAccountData: failed to lookup")
return
}
userCache.(*caches.UserCache).OnAccountData(data)
}

View File

@ -0,0 +1,44 @@
package handler
type LazyCache struct {
cache map[string]struct{}
rooms map[string]struct{}
}
func NewLazyCache() *LazyCache {
return &LazyCache{
cache: make(map[string]struct{}),
rooms: make(map[string]struct{}),
}
}
func (lc *LazyCache) IsSet(roomID, userID string) bool {
key := roomID + " | " + userID
_, exists := lc.cache[key]
return exists
}
// IsLazyLoading returns true if this room is being lazy loaded.
func (lc *LazyCache) IsLazyLoading(roomID string) bool {
_, exists := lc.rooms[roomID]
return exists
}
func (lc *LazyCache) Add(roomID string, userIDs ...string) {
for _, u := range userIDs {
lc.AddUser(roomID, u)
}
}
// AddUser to this room. Returns true if this is the first time this user has done so, and
// hence you should include the member event for this user.
func (lc *LazyCache) AddUser(roomID, userID string) bool {
lc.rooms[roomID] = struct{}{}
key := roomID + " | " + userID
_, exists := lc.cache[key]
if exists {
return false
}
lc.cache[key] = struct{}{}
return true
}

View File

@ -69,9 +69,12 @@ func (s *InternalRequestLists) SetRoom(r RoomConnMetadata) (delta RoomDelta) {
strings.Trim(internal.CalculateRoomName(&r.RoomMetadata, 5), "#!():_@"),
)
}
// filter.Include may call on this room ID in the RoomFinder, so make sure it finds it.
s.allRooms[r.RoomID] = r
for i := range s.lists {
_, alreadyExists := s.lists[i].roomIDToIndex[r.RoomID]
shouldExist := s.lists[i].filter.Include(&r)
shouldExist := s.lists[i].filter.Include(&r, s)
if shouldExist && r.HasLeft {
shouldExist = false
}
@ -97,7 +100,6 @@ func (s *InternalRequestLists) SetRoom(r RoomConnMetadata) (delta RoomDelta) {
} // else it doesn't exist and it shouldn't exist, so do nothing e.g room isn't relevant to this list
}
}
s.allRooms[r.RoomID] = r
return delta
}

View File

@ -8,7 +8,7 @@ import (
type SliceRanges [][2]int64
func (r SliceRanges) Valid() bool {
for _, sr := range r {
for i, sr := range r {
// always goes from start to end
if sr[1] < sr[0] {
return false
@ -16,6 +16,21 @@ func (r SliceRanges) Valid() bool {
if sr[0] < 0 {
return false
}
// cannot have overlapping ranges
for j := i + 1; j < len(r); j++ {
testRange := r[j]
// check both ranges with each other
for _, val := range sr {
if testRange[0] <= val && val <= testRange[1] {
return false
}
}
for _, val := range testRange {
if sr[0] <= val && val <= sr[1] {
return false
}
}
}
}
return true
}
@ -33,14 +48,15 @@ func (r SliceRanges) Inside(i int64) ([2]int64, bool) {
// ClosestInDirection returns the index position of a range bound that is closest to `i`, heading either
// towards 0 or towards infinity. If there is no range boundary in that direction, -1 is returned.
// For example:
// [0,20] i=25,towardsZero=true => 20
// [0,20] i=15,towardsZero=true => 0
// [0,20] i=15,towardsZero=false => 20
// [0,20] i=25,towardsZero=false => -1
// [0,20],[40,60] i=25,towardsZero=true => 20
// [0,20],[40,60] i=25,towardsZero=false => 40
// [0,20],[40,60] i=40,towardsZero=true => 40
// [20,40] i=40,towardsZero=true => 20
//
// [0,20] i=25,towardsZero=true => 20
// [0,20] i=15,towardsZero=true => 0
// [0,20] i=15,towardsZero=false => 20
// [0,20] i=25,towardsZero=false => -1
// [0,20],[40,60] i=25,towardsZero=true => 20
// [0,20],[40,60] i=25,towardsZero=false => 40
// [0,20],[40,60] i=40,towardsZero=true => 40
// [20,40] i=40,towardsZero=true => 20
func (r SliceRanges) ClosestInDirection(i int64, towardsZero bool) (closestIndex int64) {
// sort all range boundaries in ascending order
indexes := make([]int64, 0, len(r)*2)
@ -98,7 +114,9 @@ type pointInfo struct {
isOpen bool
}
// TODO: A,B,C track A,B then B,C incorrectly keeps B?
func (p pointInfo) same(o *pointInfo) bool {
return p.x == o.x && p.isOldRange == o.isOldRange && p.isOpen == o.isOpen
}
// Delta returns the ranges which are unchanged, added and removed.
// Intelligently handles overlaps.
@ -120,20 +138,19 @@ func (r SliceRanges) Delta(next SliceRanges) (added SliceRanges, removed SliceRa
return
}
}
// sort all points from min to max then do a sweep line algorithm over it with open/closed lists
// to track overlaps, runtime O(nlogn) due to sorting points
// Old ranges
// .-----. .------.
// -------------------------------------------> number line
// `-----` `-----`
// New ranges
//
// .--. .------.
// -----==------------------------------------> number line
// `--` `-----`
// .-----. .------. Old ranges
// -------------------------------------------> number line
// `-----` `-----` New ranges
//
//
// .--. .------. Old ranges
// -----==------------------------------------> number line (== same ranges)
// `--` `-----` New ranges
// Overlap has old/same/new
var points []pointInfo // a range = 2 points on the x-axis
var points []pointInfo // a range = 2x pointInfo on the x-axis
for _, oldRange := range r {
points = append(points, pointInfo{
x: oldRange[0],
@ -156,16 +173,20 @@ func (r SliceRanges) Delta(next SliceRanges) (added SliceRanges, removed SliceRa
isOldRange: false,
})
}
sort.Slice(points, func(i, j int) bool {
return points[i].x < points[j].x
})
sortPoints(points)
// sweep from low to high and keep tabs of which point is open
var openOldPoint *pointInfo
var openNewPoint *pointInfo
var lastPoint *pointInfo
var lastMergedRange *[2]int64
for i := range points {
point := points[i]
// e.g someone does [[0, 20] [0, 20]] in a single range which results in 0,0,20,20
if lastPoint != nil && point.same(lastPoint) {
continue
}
// We are effectively tracking a finite state machine that looks like:
//
// .------> O <--*--.
@ -184,59 +205,24 @@ func (r SliceRanges) Delta(next SliceRanges) (added SliceRanges, removed SliceRa
same = append(same, [2]int64{
lastPoint.x, point.x,
})
lastMergedRange = &same[len(same)-1]
} else if openNewPoint != nil { // N->S or N->[]
if point.isOpen { // N->S
// only add the range [N, S-1] if this point is NOT the same as N otherwise
// we will add an incorrect range. In the case where the O and N range are identical
// we will already add the range when the outermost range closes (transitioning ->[])
// This code is duplicated for the old point further down.
lastPointSame := lastPoint.x == point.x
if point.x > openNewPoint.x && !lastPointSame {
added = append(added, [2]int64{
openNewPoint.x, point.x - 1,
})
}
} else { // N->[]
// do not create 2 ranges for O=[1,5] N=[1,5]. Skip the innermost close, which is defined
// as the last point closing with the same x-value as this close point.
lastPointSameClose := !lastPoint.isOpen && lastPoint.x == point.x
if !lastPointSameClose {
pos := lastPoint.x
if !lastPoint.isOpen {
pos += 1 // the last point was a close for an overlap so we need to shift index by one
}
added = append(added, [2]int64{
pos, point.x,
})
}
mergedRange := createRange(&point, lastPoint, lastMergedRange)
if mergedRange != nil {
added = append(added, *mergedRange)
lastMergedRange = &added[len(added)-1]
}
} else if openOldPoint != nil { // O->S or O->[]
if point.isOpen { // O->S
// See above comments.
lastPointSame := lastPoint.x == point.x
if point.x > openOldPoint.x && !lastPointSame {
removed = append(removed, [2]int64{
openOldPoint.x, point.x - 1,
})
}
} else { // O->[]
// See above comments.
lastPointSameClose := !lastPoint.isOpen && lastPoint.x == point.x
if !lastPointSameClose {
pos := lastPoint.x
if !lastPoint.isOpen {
pos += 1 // the last point was a close for an overlap so we need to shift index by one
}
removed = append(removed, [2]int64{
pos, point.x,
})
}
mergedRange := createRange(&point, lastPoint, lastMergedRange)
if mergedRange != nil {
removed = append(removed, *mergedRange)
lastMergedRange = &removed[len(removed)-1]
}
}
// Remember this point
if point.isOpen {
// ensure we cannot open more than 1 range on old/new at a time
// ensure we cannot open more than 1 distinct range on old/new at a time
if (point.isOldRange && openOldPoint != nil) || (!point.isOldRange && openNewPoint != nil) {
panic(fmt.Sprintf("point already open! old=%v new=%v", r, next))
}
@ -286,6 +272,78 @@ func (r SliceRanges) SliceInto(slice Subslicer) []Subslicer {
return result
}
// createRange returns a range by closing off an existing open point. Note the "point" may be
// another open e.g in the case of [0,20] -> [15,25] we want to make the range [0,15] even though 15
// is an open not a close. We don't care if these points are old or new, as that just determines whether
// they were added or removed, it doesn't change the range logic.
func createRange(point, lastPoint *pointInfo, lastMergedRange *[2]int64) *[2]int64 {
if point.x <= lastPoint.x {
// don't make 0-length ranges which would be possible in say `[0,20] -> [0,20]`
return nil
}
// we assume we include the last point (i,e it's closing a range after all) but there are edge cases
// where we dont
start := lastPoint.x
if lastMergedRange != nil && lastPoint.x <= lastMergedRange[1] {
// this can happen for cases like:
// [0,20] -> [0,20],[20,40] whereby [0,20] is a same range but [20,40] is new, but there is
// a 1 element overlap. In this scenario, when processing 40, lastPoint.x = 20 and lastMergedRange=[0,20]
// and lastPoint.isOpen so we won't add 1 to the start index of this range, and won't know that we have
// to without information on the last merged range.
start += 1
} else if !lastPoint.isOpen {
start += 1
}
if point.isOpen {
// do -1 if 'point' is an open as it will make its own range and we don't want dupes
return &[2]int64{
start, point.x - 1,
}
} else {
// 'point' is a close so needs to include itself
return &[2]int64{
start, point.x,
}
}
}
func sortPoints(points []pointInfo) {
sort.Slice(points, func(i, j int) bool {
if points[i].x != points[j].x {
return points[i].x < points[j].x
}
// the x points are the same.
// consider the delta for:
// old = [0,20]
// new = [20, 30]
// wants:
// sames: [20,20]
// news: [21,30]
// dels: [0,19]
// Note there are 2x 20s there, so which order should they come in?
// If we process the closing 20 first, we will not know to subtract 1 to the end range,
// so we need to make sure all opening values are processed _first_. We also need this
// to be deterministic so we need to tiebreak on old/new.
// hence the rules:
// - open come first, tiebreak old
// - close come after, tiebreak new
if points[i].isOpen && !points[j].isOpen {
return true
} else if !points[i].isOpen && points[j].isOpen {
return false
}
// both are open or both are closed, tiebreak old first
if points[i].isOldRange && !points[j].isOldRange {
return true
} else if !points[i].isOldRange && points[j].isOldRange {
return false
}
return true // identical
})
}
type Subslicer interface {
Len() int64
Subslice(i, j int64) Subslicer

View File

@ -43,6 +43,30 @@ func TestRangeValid(t *testing.T) {
}),
valid: false,
},
{
input: SliceRanges([][2]int64{
{0, 20}, {20, 40}, // 20 overlaps
}),
valid: false,
},
{
input: SliceRanges([][2]int64{
{0, 20}, {40, 60}, {30, 35},
}),
valid: true,
},
{
input: SliceRanges([][2]int64{
{0, 20}, {40, 60}, {10, 15},
}),
valid: false,
},
{
input: SliceRanges([][2]int64{
{10, 15}, {40, 60}, {0, 20},
}),
valid: false,
},
}
for _, tc := range testCases {
gotValid := tc.input.Valid()
@ -472,17 +496,161 @@ func TestRangeDelta(t *testing.T) {
wantAdded: [][2]int64{},
wantRemoved: [][2]int64{},
},
// regression test, 1 element overlap
{
oldRange: [][2]int64{{10, 20}},
newRange: [][2]int64{{20, 30}},
wantSames: [][2]int64{{20, 20}},
wantAdded: [][2]int64{{21, 30}},
wantRemoved: [][2]int64{{10, 19}},
},
}
for _, tc := range testCases {
gotAdd, gotRm, gotSame := tc.oldRange.Delta(tc.newRange)
if tc.wantAdded != nil && !reflect.DeepEqual(gotAdd, tc.wantAdded) {
t.Errorf("%+v got added %+v", tc, gotAdd)
t.Errorf("%v -> %v got added %+v want %+v", tc.oldRange, tc.newRange, gotAdd, tc.wantAdded)
}
if tc.wantRemoved != nil && !reflect.DeepEqual(gotRm, tc.wantRemoved) {
t.Errorf("%+v got removed %+v", tc, gotRm)
t.Errorf("%v -> %v got remove %+v want %+v", tc.oldRange, tc.newRange, gotRm, tc.wantRemoved)
}
if tc.wantSames != nil && !reflect.DeepEqual(gotSame, tc.wantSames) {
t.Errorf("%+v got sames %+v", tc, gotSame)
t.Errorf("%v -> %v got same %+v want %+v", tc.oldRange, tc.newRange, gotSame, tc.wantSames)
}
}
}
func TestSortPoints(t *testing.T) {
testCases := []struct {
name string
input []pointInfo
want []pointInfo
}{
{
name: "two element",
input: []pointInfo{
{
x: 5,
isOldRange: true,
isOpen: true,
},
{
x: 2,
isOldRange: true,
isOpen: true,
},
},
want: []pointInfo{
{
x: 2,
isOldRange: true,
isOpen: true,
},
{
x: 5,
isOldRange: true,
isOpen: true,
},
},
},
{
name: "no dupes, sort by x",
input: []pointInfo{
{
x: 4,
isOldRange: true,
isOpen: true,
},
{
x: 1,
isOldRange: true,
isOpen: false,
},
{
x: 3,
isOldRange: false,
isOpen: true,
},
{
x: 2,
isOldRange: false,
isOpen: false,
},
},
want: []pointInfo{
{
x: 1,
isOldRange: true,
isOpen: false,
},
{
x: 2,
isOldRange: false,
isOpen: false,
},
{
x: 3,
isOldRange: false,
isOpen: true,
},
{
x: 4,
isOldRange: true,
isOpen: true,
},
},
},
{
name: "all dupes, sort by open(tie=old), close(tie=old)",
input: []pointInfo{
{
x: 4,
isOldRange: true,
isOpen: true,
},
{
x: 4,
isOldRange: false,
isOpen: true,
},
{
x: 4,
isOldRange: false,
isOpen: false,
},
{
x: 4,
isOldRange: true,
isOpen: false,
},
},
want: []pointInfo{
{
x: 4,
isOldRange: true,
isOpen: true,
},
{
x: 4,
isOldRange: false,
isOpen: true,
},
{
x: 4,
isOldRange: true,
isOpen: false,
},
{
x: 4,
isOldRange: false,
isOpen: false,
},
},
},
}
for _, tc := range testCases {
sortPoints(tc.input)
if !reflect.DeepEqual(tc.input, tc.want) {
t.Errorf("%s: got %+v\nwant %+v", tc.name, tc.input, tc.want)
}
}
}

View File

@ -12,9 +12,14 @@ import (
var (
SortByName = "by_name"
SortByRecency = "by_recency"
SortByNotificationCount = "by_notification_count"
SortByHighlightCount = "by_highlight_count"
SortBy = []string{SortByHighlightCount, SortByName, SortByNotificationCount, SortByRecency}
SortByNotificationLevel = "by_notification_level"
SortByNotificationCount = "by_notification_count" // deprecated
SortByHighlightCount = "by_highlight_count" // deprecated
SortBy = []string{SortByHighlightCount, SortByName, SortByNotificationCount, SortByRecency, SortByNotificationLevel}
Wildcard = "*"
StateKeyLazy = "$LAZY"
StateKeyMe = "$ME"
DefaultTimelineLimit = int64(20)
DefaultTimeoutMSecs = 10 * 1000 // 10s
@ -209,11 +214,13 @@ func (rl *RequestList) jumpedOverRanges(fromIndex, toIndex int) (jumpedOverRange
// Move a room from an absolute index position to another absolute position. These positions do not
// need to be inside a valid range. Returns 0-2 operations. For example:
// 1,2,3,4,5 tracking range [0,4]
// 3 bumps to top -> 3,1,2,4,5 -> DELETE index=2, INSERT val=3 index=0
// 7 bumps to top -> 7,1,2,3,4 -> DELETE index=4, INSERT val=7 index=0
// 7 bumps to op again -> 7,1,2,3,4 -> no-op as from == to index
// new room 8 in i=5 -> 7,1,2,3,4,8 -> no-op as 8 is outside the range.
//
// 1,2,3,4,5 tracking range [0,4]
// 3 bumps to top -> 3,1,2,4,5 -> DELETE index=2, INSERT val=3 index=0
// 7 bumps to top -> 7,1,2,3,4 -> DELETE index=4, INSERT val=7 index=0
// 7 bumps to op again -> 7,1,2,3,4 -> no-op as from == to index
// new room 8 in i=5 -> 7,1,2,3,4,8 -> no-op as 8 is outside the range.
//
// Returns the list of ops as well as the new toIndex if it wasn't inside a range.
func (rl *RequestList) WriteSwapOp(
roomID string, fromIndex, toIndex int,
@ -321,6 +328,10 @@ func (r *Request) ApplyDelta(nextReq *Request) (result *Request, delta *RequestD
if slowGetAllRooms == nil {
slowGetAllRooms = existingList.SlowGetAllRooms
}
includeOldRooms := nextList.IncludeOldRooms
if includeOldRooms == nil {
includeOldRooms = existingList.IncludeOldRooms
}
timelineLimit := nextList.TimelineLimit
if timelineLimit == 0 {
@ -330,10 +341,12 @@ func (r *Request) ApplyDelta(nextReq *Request) (result *Request, delta *RequestD
if filters == nil {
filters = existingList.Filters
}
lists[i] = RequestList{
RoomSubscription: RoomSubscription{
RequiredState: reqState,
TimelineLimit: timelineLimit,
RequiredState: reqState,
TimelineLimit: timelineLimit,
IncludeOldRooms: includeOldRooms,
},
Ranges: rooms,
Sort: sort,
@ -416,16 +429,26 @@ type RequestFilters struct {
IsDM *bool `json:"is_dm"`
IsEncrypted *bool `json:"is_encrypted"`
IsInvite *bool `json:"is_invite"`
IsTombstoned *bool `json:"is_tombstoned"`
IsTombstoned *bool `json:"is_tombstoned"` // deprecated
RoomTypes []*string `json:"room_types"`
NotRoomTypes []*string `json:"not_room_types"`
RoomNameFilter string `json:"room_name_like"`
Tags []string `json:"tags"`
NotTags []string `json:"not_tags"`
// TODO options to control which events should be live-streamed e.g not_types, types from sync v2
}
func (rf *RequestFilters) Include(r *RoomConnMetadata) bool {
func (rf *RequestFilters) Include(r *RoomConnMetadata, finder RoomFinder) bool {
// we always exclude old rooms from lists, but may include them in the `rooms` section if they opt-in
if r.UpgradedRoomID != nil {
// should we exclude this room? If we have _joined_ the successor room then yes because
// this room must therefore be old, else no.
nextRoom := finder.Room(*r.UpgradedRoomID)
if nextRoom != nil && !nextRoom.HasLeft && !nextRoom.IsInvite {
return false
}
}
if rf.IsEncrypted != nil && *rf.IsEncrypted != r.Encrypted {
return false
}
@ -481,12 +504,27 @@ func (rf *RequestFilters) Include(r *RoomConnMetadata) bool {
}
type RoomSubscription struct {
RequiredState [][2]string `json:"required_state"`
TimelineLimit int64 `json:"timeline_limit"`
RequiredState [][2]string `json:"required_state"`
TimelineLimit int64 `json:"timeline_limit"`
IncludeOldRooms *RoomSubscription `json:"include_old_rooms"`
}
func (rs RoomSubscription) LazyLoadMembers() bool {
for _, tuple := range rs.RequiredState {
if tuple[0] == "m.room.member" && tuple[1] == StateKeyLazy {
return true
}
}
return false
}
// Combine this subcription with another, returning a union of both as a copy.
func (rs RoomSubscription) Combine(other RoomSubscription) RoomSubscription {
return rs.combineRecursive(other, true)
}
// Combine this subcription with another, returning a union of both as a copy.
func (rs RoomSubscription) combineRecursive(other RoomSubscription, checkOldRooms bool) RoomSubscription {
var result RoomSubscription
// choose max value
if rs.TimelineLimit > other.TimelineLimit {
@ -496,45 +534,66 @@ func (rs RoomSubscription) Combine(other RoomSubscription) RoomSubscription {
}
// combine together required_state fields, we'll union them later
result.RequiredState = append(rs.RequiredState, other.RequiredState...)
if checkOldRooms {
// set include_old_rooms if it is unset
if rs.IncludeOldRooms == nil {
result.IncludeOldRooms = other.IncludeOldRooms
} else if other.IncludeOldRooms != nil {
// 2 subs have include_old_rooms set, union them. Don't check them for old rooms though as that's silly
ior := rs.IncludeOldRooms.combineRecursive(*other.IncludeOldRooms, false)
result.IncludeOldRooms = &ior
}
}
return result
}
// Calculate the required state map for this room subscription. Given event types A,B,C and state keys
// 1,2,3, the following Venn diagrams are possible:
// .---------[*,*]----------.
// | .---------. |
// | | A,2 | A,3 |
// | .----+--[B,*]--+-----. |
// | | | .-----. | | |
// | |B,1 | | B,2 | | B,3 | |
// | | | `[B,2]` | | |
// | `----+---------+-----` |
// | | C,2 | C,3 |
// | `--[*,2]--` |
// `------------------------`
//
// .---------[*,*]----------.
// | .---------. |
// | | A,2 | A,3 |
// | .----+--[B,*]--+-----. |
// | | | .-----. | | |
// | |B,1 | | B,2 | | B,3 | |
// | | | `[B,2]` | | |
// | `----+---------+-----` |
// | | C,2 | C,3 |
// | `--[*,2]--` |
// `------------------------`
//
// The largest set will be used when returning the required state map.
// For example, [B,2] + [B,*] = [B,*] because [B,*] encompasses [B,2]. This means [*,*] encompasses
// everything.
func (rs RoomSubscription) RequiredStateMap() *internal.RequiredStateMap {
// 'userID' is the ID of the user performing this request, so $ME can be replaced.
func (rs RoomSubscription) RequiredStateMap(userID string) *internal.RequiredStateMap {
result := make(map[string][]string)
eventTypesWithWildcardStateKeys := make(map[string]struct{})
var stateKeysForWildcardEventType []string
var allState bool
for _, tuple := range rs.RequiredState {
if tuple[0] == "*" {
if tuple[1] == "*" { // all state
return internal.NewRequiredStateMap(nil, nil, nil, true)
if tuple[1] == StateKeyMe {
tuple[1] = userID
}
if tuple[0] == Wildcard {
if tuple[1] == Wildcard { // all state
// we still need to parse required_state as now these filter the result set
allState = true
continue
}
stateKeysForWildcardEventType = append(stateKeysForWildcardEventType, tuple[1])
continue
}
if tuple[1] == "*" { // wildcard state key
if tuple[1] == Wildcard { // wildcard state key
eventTypesWithWildcardStateKeys[tuple[0]] = struct{}{}
} else {
result[tuple[0]] = append(result[tuple[0]], tuple[1])
}
}
return internal.NewRequiredStateMap(eventTypesWithWildcardStateKeys, stateKeysForWildcardEventType, result, false)
return internal.NewRequiredStateMap(
eventTypesWithWildcardStateKeys, stateKeysForWildcardEventType, result, allState, rs.LazyLoadMembers(),
)
}
// helper to find `null` or literal string matches

View File

@ -9,10 +9,14 @@ import (
)
func TestRoomSubscriptionUnion(t *testing.T) {
alice := "@alice:localhost"
bob := "@bob:localhost"
testCases := []struct {
name string
a RoomSubscription
b *RoomSubscription
me string
userInTimeline func(userID string) bool
wantQueryStateMap map[string][]string
matches [][2]string
noMatches [][2]string
@ -62,26 +66,38 @@ func TestRoomSubscriptionUnion(t *testing.T) {
},
{
name: "all events *,*",
a: RoomSubscription{RequiredState: [][2]string{{"*", "*"}}},
a: RoomSubscription{RequiredState: [][2]string{{Wildcard, Wildcard}}},
wantQueryStateMap: make(map[string][]string),
matches: [][2]string{{"m.room.name", ""}, {"m.room.name", "foo"}},
},
{
name: "all events *,* with other event",
a: RoomSubscription{RequiredState: [][2]string{{"*", "*"}, {"m.room.name", ""}}},
name: "all events *,* with other event -> filters",
a: RoomSubscription{RequiredState: [][2]string{{Wildcard, Wildcard}, {"m.specific.name", ""}}},
wantQueryStateMap: make(map[string][]string),
matches: [][2]string{{"m.room.name", ""}, {"m.room.name", "foo"}, {"a", "b"}},
matches: [][2]string{{"m.specific.name", ""}, {"other", "foo"}, {"a", ""}},
noMatches: [][2]string{
{"m.specific.name", "foo"},
},
},
{
name: "all events *,* with other event UNION",
a: RoomSubscription{RequiredState: [][2]string{{"m.room.name", ""}}},
b: &RoomSubscription{RequiredState: [][2]string{{"*", "*"}}},
b: &RoomSubscription{RequiredState: [][2]string{{Wildcard, Wildcard}}},
wantQueryStateMap: make(map[string][]string),
matches: [][2]string{{"m.room.name", ""}, {"m.room.name", "foo"}, {"a", "b"}},
matches: [][2]string{{"m.room.name", ""}, {"a", "b"}},
noMatches: [][2]string{{"m.room.name", "foo"}},
},
{
name: "all events *,* with other events UNION",
a: RoomSubscription{RequiredState: [][2]string{{"m.room.name", ""}, {"m.room.topic", ""}}},
b: &RoomSubscription{RequiredState: [][2]string{{Wildcard, Wildcard}, {"m.room.alias", ""}}},
wantQueryStateMap: make(map[string][]string),
matches: [][2]string{{"m.room.name", ""}, {"a", "b"}, {"m.room.topic", ""}, {"m.room.alias", ""}},
noMatches: [][2]string{{"m.room.name", "foo"}, {"m.room.topic", "bar"}, {"m.room.alias", "baz"}},
},
{
name: "wildcard state keys with explicit state keys",
a: RoomSubscription{RequiredState: [][2]string{{"m.room.name", "*"}, {"m.room.name", ""}}},
a: RoomSubscription{RequiredState: [][2]string{{"m.room.name", Wildcard}, {"m.room.name", ""}}},
wantQueryStateMap: map[string][]string{
"m.room.name": nil,
},
@ -90,7 +106,7 @@ func TestRoomSubscriptionUnion(t *testing.T) {
},
{
name: "wildcard state keys with wildcard event types",
a: RoomSubscription{RequiredState: [][2]string{{"m.room.name", "*"}, {"*", "foo"}}},
a: RoomSubscription{RequiredState: [][2]string{{"m.room.name", Wildcard}, {Wildcard, "foo"}}},
wantQueryStateMap: make(map[string][]string),
matches: [][2]string{
{"m.room.name", ""}, {"m.room.name", "foo"}, {"name", "foo"},
@ -101,8 +117,8 @@ func TestRoomSubscriptionUnion(t *testing.T) {
},
{
name: "wildcard state keys with wildcard event types UNION",
a: RoomSubscription{RequiredState: [][2]string{{"m.room.name", "*"}}},
b: &RoomSubscription{RequiredState: [][2]string{{"*", "foo"}}},
a: RoomSubscription{RequiredState: [][2]string{{"m.room.name", Wildcard}}},
b: &RoomSubscription{RequiredState: [][2]string{{Wildcard, "foo"}}},
wantQueryStateMap: make(map[string][]string),
matches: [][2]string{
{"m.room.name", ""}, {"m.room.name", "foo"}, {"name", "foo"},
@ -113,18 +129,50 @@ func TestRoomSubscriptionUnion(t *testing.T) {
},
{
name: "wildcard event types with explicit state keys",
a: RoomSubscription{RequiredState: [][2]string{{"*", "foo"}, {"*", "bar"}, {"m.room.name", ""}}},
a: RoomSubscription{RequiredState: [][2]string{{Wildcard, "foo"}, {Wildcard, "bar"}, {"m.room.name", ""}}},
wantQueryStateMap: make(map[string][]string),
matches: [][2]string{{"m.room.name", ""}, {"m.room.name", "foo"}, {"name", "foo"}, {"name", "bar"}},
noMatches: [][2]string{{"name", "baz"}, {"name", ""}},
},
{
name: "event types with $ME state keys",
me: alice,
a: RoomSubscription{RequiredState: [][2]string{{"m.room.member", StateKeyMe}}},
wantQueryStateMap: map[string][]string{
"m.room.member": {alice},
},
matches: [][2]string{{"m.room.member", alice}},
noMatches: [][2]string{{"name", "baz"}, {"name", ""}, {"name", StateKeyMe}, {"m.room.name", alice}},
},
{
name: "wildcard event types with $ME state keys",
me: alice,
a: RoomSubscription{RequiredState: [][2]string{{Wildcard, StateKeyMe}}},
wantQueryStateMap: make(map[string][]string),
matches: [][2]string{{"m.room.member", alice}, {"m.room.name", alice}},
noMatches: [][2]string{{"name", "baz"}, {"name", ""}, {"name", StateKeyMe}},
},
{
// this is what we expect clients to use, so check it works
name: "wildcard with $ME",
me: alice,
a: RoomSubscription{RequiredState: [][2]string{
{"m.room.member", StateKeyMe},
{Wildcard, Wildcard},
// Include does not implement lazy loading, so we expect this to do nothing
{"m.room.member", StateKeyLazy},
}},
wantQueryStateMap: make(map[string][]string),
matches: [][2]string{{"m.room.member", alice}, {"a", "b"}},
noMatches: [][2]string{{"m.room.member", "@someone-else"}, {"m.room.member", ""}, {"m.room.member", bob}},
},
}
for _, tc := range testCases {
sub := tc.a
if tc.b != nil {
sub = tc.a.Combine(*tc.b)
}
rsm := sub.RequiredStateMap()
rsm := sub.RequiredStateMap(tc.me)
got := rsm.QueryStateMap()
if !reflect.DeepEqual(got, tc.wantQueryStateMap) {
t.Errorf("%s: got query state map %+v want %+v", tc.name, got, tc.wantQueryStateMap)

View File

@ -19,6 +19,7 @@ type Room struct {
JoinedCount int `json:"joined_count,omitempty"`
InvitedCount int `json:"invited_count,omitempty"`
PrevBatch string `json:"prev_batch,omitempty"`
NumLive int `json:"num_live,omitempty"`
}
type RoomConnMetadata struct {

View File

@ -95,6 +95,8 @@ func (s *SortableRooms) Sort(sortBy []string) error {
comparators = append(comparators, s.comparatorSortByName)
case SortByRecency:
comparators = append(comparators, s.comparatorSortByRecency)
case SortByNotificationLevel:
comparators = append(comparators, s.comparatorSortByNotificationLevel)
default:
return fmt.Errorf("unknown sort order: %s", sort)
}
@ -160,6 +162,39 @@ func (s *SortableRooms) comparatorSortByHighlightCount(i, j int) int {
return -1
}
func (s *SortableRooms) comparatorSortByNotificationLevel(i, j int) int {
ri, rj := s.resolveRooms(i, j)
// highlight rooms come first
if ri.HighlightCount > 0 && rj.HighlightCount > 0 {
return 0
}
if ri.HighlightCount > 0 {
return 1
} else if rj.HighlightCount > 0 {
return -1
}
// then notification count
if ri.NotificationCount > 0 && rj.NotificationCount > 0 {
// when we are comparing rooms with notif counts, sort encrypted rooms above unencrypted rooms
// as the client needs to calculate highlight counts (so it's possible that notif counts are
// actually highlight counts!) - this is the "Lite" description in MSC3575
if ri.Encrypted && !rj.Encrypted {
return 1
} else if rj.Encrypted && !ri.Encrypted {
return -1
}
return 0
}
if ri.NotificationCount > 0 {
return 1
} else if rj.NotificationCount > 0 {
return -1
}
// no highlight or notifs get grouped together
return 0
}
func (s *SortableRooms) comparatorSortByNotificationCount(i, j int) int {
ri, rj := s.resolveRooms(i, j)
if ri.NotificationCount == rj.NotificationCount {
@ -185,7 +220,7 @@ func NewFilteredSortableRooms(finder RoomFinder, roomIDs []string, filter *Reque
}
for _, roomID := range roomIDs {
r := finder.Room(roomID)
if filter.Include(r) {
if filter.Include(r, finder) {
filteredRooms = append(filteredRooms, roomID)
}
}
@ -197,7 +232,7 @@ func NewFilteredSortableRooms(finder RoomFinder, roomIDs []string, filter *Reque
func (f *FilteredSortableRooms) Add(roomID string) bool {
r := f.finder.Room(roomID)
if !f.filter.Include(r) {
if !f.filter.Include(r, f.finder) {
return false
}
return f.SortableRooms.Add(roomID)

View File

@ -1,6 +1,8 @@
package sync3
import (
"reflect"
"strings"
"testing"
"github.com/matrix-org/sync-v3/internal"
@ -84,16 +86,18 @@ func TestSortBySingleOperation(t *testing.T) {
// recency: 3,4,2,1
// highlight: 1,3,4,2
// notif: 1,3,2,4
// level+recency: 3,4,1,2 as 3,4,1 have highlights then sorted by recency
wantMap := map[string][]string{
SortByName: {room4, room1, room2, room3},
SortByRecency: {room3, room4, room2, room1},
SortByHighlightCount: {room1, room3, room4, room2},
SortByNotificationCount: {room1, room3, room2, room4},
SortByNotificationLevel + " " + SortByRecency: {room3, room4, room1, room2},
}
f := newFinder(rooms)
sr := NewSortableRooms(f, f.roomIDs)
for sortBy, wantOrder := range wantMap {
sr.Sort([]string{sortBy})
sr.Sort(strings.Split(sortBy, " "))
var gotRoomIDs []string
for i := range sr.roomIDs {
gotRoomIDs = append(gotRoomIDs, sr.roomIDs[i])
@ -169,6 +173,14 @@ func TestSortByMultipleOperations(t *testing.T) {
SortBy: []string{SortByHighlightCount, SortByName},
WantRooms: []string{room1, room2, room4, room3},
},
{
SortBy: []string{SortByNotificationLevel, SortByName},
WantRooms: []string{room1, room2, room4, room3},
},
{
SortBy: []string{SortByNotificationLevel, SortByRecency},
WantRooms: []string{room2, room1, room4, room3},
},
}
f := newFinder(rooms)
sr := NewSortableRooms(f, f.roomIDs)
@ -238,3 +250,136 @@ func TestSortableRoomsRemove(t *testing.T) {
t.Errorf("IndexOf room 2 returned %v %v", i, ok)
}
}
// dedicated test as it relies on multiple fields
func TestSortByNotificationLevel(t *testing.T) {
// create the full set of possible sort variables, most recent message last
roomUnencHC := "!unencrypted-highlight-count:localhost"
roomUnencHCNC := "!unencrypted-highlight-and-notif-count:localhost"
roomUnencNC := "!unencrypted-notif-count:localhost"
roomUnenc := "!unencrypted:localhost"
roomEncHC := "!encrypted-highlight-count:localhost"
roomEncHCNC := "!encrypted-highlight-and-notif-count:localhost"
roomEncNC := "!encrypted-notif-count:localhost"
roomEnc := "!encrypted:localhost"
roomsMap := map[string]*RoomConnMetadata{
roomUnencHC: {
RoomMetadata: internal.RoomMetadata{
LastMessageTimestamp: 1,
Encrypted: false,
},
UserRoomData: caches.UserRoomData{
HighlightCount: 1,
NotificationCount: 0,
},
},
roomUnencHCNC: {
RoomMetadata: internal.RoomMetadata{
LastMessageTimestamp: 2,
Encrypted: false,
},
UserRoomData: caches.UserRoomData{
HighlightCount: 1,
NotificationCount: 1,
},
},
roomUnencNC: {
RoomMetadata: internal.RoomMetadata{
LastMessageTimestamp: 3,
Encrypted: false,
},
UserRoomData: caches.UserRoomData{
HighlightCount: 0,
NotificationCount: 1,
},
},
roomUnenc: {
RoomMetadata: internal.RoomMetadata{
LastMessageTimestamp: 4,
Encrypted: false,
},
UserRoomData: caches.UserRoomData{
HighlightCount: 0,
NotificationCount: 0,
},
},
roomEncHC: {
RoomMetadata: internal.RoomMetadata{
LastMessageTimestamp: 5,
Encrypted: true,
},
UserRoomData: caches.UserRoomData{
HighlightCount: 1,
NotificationCount: 0,
},
},
roomEncHCNC: {
RoomMetadata: internal.RoomMetadata{
LastMessageTimestamp: 6,
Encrypted: true,
},
UserRoomData: caches.UserRoomData{
HighlightCount: 1,
NotificationCount: 1,
},
},
roomEncNC: {
RoomMetadata: internal.RoomMetadata{
RoomID: roomEncNC,
LastMessageTimestamp: 7,
Encrypted: true,
},
UserRoomData: caches.UserRoomData{
HighlightCount: 0,
NotificationCount: 1,
},
},
roomEnc: {
RoomMetadata: internal.RoomMetadata{
RoomID: roomEnc,
LastMessageTimestamp: 8,
Encrypted: true,
},
UserRoomData: caches.UserRoomData{
HighlightCount: 0,
NotificationCount: 0,
},
},
}
roomIDs := make([]string, len(roomsMap))
rooms := make([]*RoomConnMetadata, len(roomsMap))
i := 0
for roomID, room := range roomsMap {
room.RoomMetadata.RoomID = roomID
roomIDs[i] = roomID
rooms[i] = room
i++
}
t.Logf("%v", roomIDs)
f := newFinder(rooms)
sr := NewSortableRooms(f, roomIDs)
if err := sr.Sort([]string{SortByNotificationLevel, SortByRecency}); err != nil {
t.Fatalf("Sort: %s", err)
}
var gotRoomIDs []string
for i := range sr.roomIDs {
gotRoomIDs = append(gotRoomIDs, sr.roomIDs[i])
}
// we expect the rooms to be grouped in this order:
// HIGHLIGHT COUNT > 0
// ENCRYPTED, NOTIF COUNT > 0
// UNENCRYPTED, NOTIF COUNT > 0
// REST
// Within each group, we expect recency sorting due to SortByRecency
wantRoomIDs := []string{
roomEncHCNC, roomEncHC, roomUnencHCNC, roomUnencHC, // in practice we don't expect to see this as encrypted rooms won't have highlight counts > 0
roomEncNC,
roomUnencNC,
roomEnc, roomUnenc,
}
if !reflect.DeepEqual(gotRoomIDs, wantRoomIDs) {
t.Errorf("got: %v", gotRoomIDs)
t.Errorf("want: %v", wantRoomIDs)
}
}

View File

@ -30,6 +30,29 @@ func NewJoinedRoomsTracker() *JoinedRoomsTracker {
}
}
// Startup efficiently sets up the joined rooms tracker, but isn't safe to call with live traffic,
// as it replaces all known in-memory state. Panics if called on a non-empty tracker.
func (t *JoinedRoomsTracker) Startup(roomToJoinedUsers map[string][]string) {
t.mu.Lock()
defer t.mu.Unlock()
if len(t.roomIDToJoinedUsers) > 0 || len(t.userIDToJoinedRooms) > 0 {
panic("programming error: cannot call JoinedRoomsTracker.Startup with existing data already set!")
}
for roomID, userIDs := range roomToJoinedUsers {
userSet := make(set)
for _, u := range userIDs {
userSet[u] = struct{}{}
rooms := t.userIDToJoinedRooms[u]
if rooms == nil {
rooms = make(set)
}
rooms[roomID] = struct{}{}
t.userIDToJoinedRooms[u] = rooms
}
t.roomIDToJoinedUsers[roomID] = userSet
}
}
func (t *JoinedRoomsTracker) IsUserJoined(userID, roomID string) bool {
t.mu.RLock()
defer t.mu.RUnlock()
@ -44,28 +67,45 @@ func (t *JoinedRoomsTracker) IsUserJoined(userID, roomID string) bool {
// returns true if the state changed
func (t *JoinedRoomsTracker) UserJoinedRoom(userID, roomID string) bool {
u := make([]string, 1, 1)
u[0] = userID
return t.UsersJoinedRoom(u, roomID)
}
// returns true if the state changed for any user in userIDs
func (t *JoinedRoomsTracker) UsersJoinedRoom(userIDs []string, roomID string) bool {
t.mu.Lock()
defer t.mu.Unlock()
wasJoined := false
wasJoined := true
users := t.roomIDToJoinedUsers[roomID]
for u := range users {
if u == userID {
wasJoined = true
for _, newlyJoinedUser := range userIDs {
_, exists := users[newlyJoinedUser]
if !exists {
wasJoined = false
break
}
}
joinedRooms := t.userIDToJoinedRooms[userID]
if joinedRooms == nil {
joinedRooms = make(set)
}
// pull out room specific structs
joinedUsers := t.roomIDToJoinedUsers[roomID]
if joinedUsers == nil {
joinedUsers = make(set)
}
invitedUsers := t.roomIDToInvitedUsers[roomID]
delete(invitedUsers, userID)
joinedRooms[roomID] = struct{}{}
joinedUsers[userID] = struct{}{}
t.userIDToJoinedRooms[userID] = joinedRooms
// loop user specific structs
for _, newlyJoinedUser := range userIDs {
joinedRooms := t.userIDToJoinedRooms[newlyJoinedUser]
if joinedRooms == nil {
joinedRooms = make(set)
}
delete(invitedUsers, newlyJoinedUser)
joinedRooms[roomID] = struct{}{}
joinedUsers[newlyJoinedUser] = struct{}{}
t.userIDToJoinedRooms[newlyJoinedUser] = joinedRooms
}
t.roomIDToJoinedUsers[roomID] = joinedUsers
t.roomIDToInvitedUsers[roomID] = invitedUsers
return !wasJoined
@ -100,32 +140,38 @@ func (t *JoinedRoomsTracker) JoinedRoomsForUser(userID string) []string {
}
return result
}
func (t *JoinedRoomsTracker) JoinedUsersForRoom(roomID string) []string {
// JoinedUsersForRoom returns the joined users in the given room, filtered by the filter function if provided. If one is not
// provided, all joined users are returned. Returns the join count at the time this function was called.
func (t *JoinedRoomsTracker) JoinedUsersForRoom(roomID string, filter func(userID string) bool) (matchedUserIDs []string, joinCount int) {
t.mu.RLock()
defer t.mu.RUnlock()
users := t.roomIDToJoinedUsers[roomID]
if users == nil || len(users) == 0 {
return nil
return nil, 0
}
n := len(users)
i := 0
result := make([]string, n)
for userID := range users {
result[i] = userID
i++
if filter == nil {
filter = func(userID string) bool { return true }
}
return result
for userID := range users {
if filter(userID) {
matchedUserIDs = append(matchedUserIDs, userID)
}
}
return matchedUserIDs, n
}
// Returns the new invite count
func (t *JoinedRoomsTracker) UserInvitedToRoom(userID, roomID string) {
func (t *JoinedRoomsTracker) UsersInvitedToRoom(userIDs []string, roomID string) {
t.mu.Lock()
defer t.mu.Unlock()
users := t.roomIDToInvitedUsers[roomID]
if users == nil {
users = make(set)
}
users[userID] = struct{}{}
for _, userID := range userIDs {
users[userID] = struct{}{}
}
t.roomIDToInvitedUsers[roomID] = users
}

View File

@ -5,6 +5,11 @@ import (
"testing"
)
func joinedUsersForRoom(jrt *JoinedRoomsTracker, roomID string) []string {
users, _ := jrt.JoinedUsersForRoom(roomID, nil)
return users
}
func TestTracker(t *testing.T) {
// basic usage
jrt := NewJoinedRoomsTracker()
@ -13,34 +18,78 @@ func TestTracker(t *testing.T) {
jrt.UserJoinedRoom("bob", "room2")
jrt.UserJoinedRoom("bob", "room3")
assertEqualSlices(t, "", jrt.JoinedRoomsForUser("alice"), []string{"room1", "room2"})
assertEqualSlices(t, "", jrt.JoinedUsersForRoom("room1"), []string{"alice"})
assertEqualSlices(t, "", joinedUsersForRoom(jrt, "room1"), []string{"alice"})
jrt.UserLeftRoom("alice", "room1")
assertEqualSlices(t, "", jrt.JoinedRoomsForUser("alice"), []string{"room2"})
assertEqualSlices(t, "", jrt.JoinedUsersForRoom("room2"), []string{"alice", "bob"})
assertEqualSlices(t, "", joinedUsersForRoom(jrt, "room2"), []string{"alice", "bob"})
// test filters
fUsers, joinCount := jrt.JoinedUsersForRoom("room2", func(userID string) bool {
return userID == "alice"
})
assertInt(t, joinCount, 2)
assertEqualSlices(t, "filter_users wrong", fUsers, []string{"alice"})
fUsers, joinCount = jrt.JoinedUsersForRoom("room2", func(userID string) bool {
return userID == "unmatched"
})
assertInt(t, joinCount, 2)
assertEqualSlices(t, "wanted no filtered users", fUsers, nil)
// bogus values
assertEqualSlices(t, "", jrt.JoinedRoomsForUser("unknown"), nil)
assertEqualSlices(t, "", jrt.JoinedUsersForRoom("unknown"), nil)
assertEqualSlices(t, "", joinedUsersForRoom(jrt, "unknown"), nil)
// leaves before joins
jrt.UserLeftRoom("alice", "unknown")
jrt.UserLeftRoom("unknown", "unknown2")
assertEqualSlices(t, "", jrt.JoinedRoomsForUser("alice"), []string{"room2"})
jrt.UserInvitedToRoom("alice", "room4")
jrt.UsersInvitedToRoom([]string{"alice"}, "room4")
assertNumEquals(t, jrt.NumInvitedUsersForRoom("room4"), 1)
jrt.UserJoinedRoom("alice", "room4")
assertNumEquals(t, jrt.NumInvitedUsersForRoom("room4"), 0)
jrt.UserJoinedRoom("alice", "room4") // dupe joins don't bother it
assertNumEquals(t, jrt.NumInvitedUsersForRoom("room4"), 0)
jrt.UserInvitedToRoom("bob", "room4")
jrt.UsersInvitedToRoom([]string{"bob"}, "room4")
assertNumEquals(t, jrt.NumInvitedUsersForRoom("room4"), 1)
jrt.UserInvitedToRoom("bob", "room4") // dupe invites don't bother it
jrt.UsersInvitedToRoom([]string{"bob"}, "room4") // dupe invites don't bother it
assertNumEquals(t, jrt.NumInvitedUsersForRoom("room4"), 1)
jrt.UserLeftRoom("bob", "room4")
assertNumEquals(t, jrt.NumInvitedUsersForRoom("room4"), 0)
}
func TestTrackerStartup(t *testing.T) {
roomA := "!a"
roomB := "!b"
roomC := "!c"
alice := "@alice"
bob := "@bob"
jrt := NewJoinedRoomsTracker()
jrt.Startup(map[string][]string{
roomA: {alice, bob},
roomB: {bob},
roomC: {alice},
})
assertEqualSlices(t, "", jrt.JoinedRoomsForUser(alice), []string{roomA, roomC})
assertEqualSlices(t, "", jrt.JoinedRoomsForUser(bob), []string{roomA, roomB})
assertEqualSlices(t, "", jrt.JoinedRoomsForUser("@someone"), []string{})
assertBool(t, "alice should be joined", jrt.IsUserJoined(alice, roomA), true)
assertBool(t, "alice should not be joined", jrt.IsUserJoined(alice, roomB), false)
assertBool(t, "alice should be joined", jrt.IsUserJoined(alice, roomC), true)
assertBool(t, "bob should be joined", jrt.IsUserJoined(bob, roomA), true)
assertBool(t, "bob should be joined", jrt.IsUserJoined(bob, roomB), true)
assertBool(t, "bob should not be joined", jrt.IsUserJoined(bob, roomC), false)
assertInt(t, jrt.NumInvitedUsersForRoom(roomA), 0)
assertInt(t, jrt.NumInvitedUsersForRoom(roomB), 0)
assertInt(t, jrt.NumInvitedUsersForRoom(roomC), 0)
}
func assertBool(t *testing.T, msg string, got, want bool) {
t.Helper()
if got != want {
t.Errorf(msg)
}
}
func assertNumEquals(t *testing.T, got, want int) {
t.Helper()
if got != want {

View File

@ -199,6 +199,14 @@ func (c *CSAPI) JoinRoom(t *testing.T, roomIDOrAlias string, serverNames []strin
return GetJSONFieldStr(t, body, "room_id")
}
func (c *CSAPI) SendTyping(t *testing.T, roomID string, isTyping bool, durMs int) {
t.Helper()
c.MustDoFunc(t, "PUT", []string{"_matrix", "client", "v3", "rooms", roomID, "typing", c.UserID}, WithJSONBody(t, map[string]interface{}{
"timeout": durMs,
"typing": isTyping,
}))
}
// LeaveRoom joins the room ID, else fails the test.
func (c *CSAPI) LeaveRoom(t *testing.T, roomID string) {
t.Helper()
@ -224,6 +232,10 @@ func (c *CSAPI) SetGlobalAccountData(t *testing.T, eventType string, content map
return c.MustDoFunc(t, "PUT", []string{"_matrix", "client", "v3", "user", c.UserID, "account_data", eventType}, WithJSONBody(t, content))
}
func (c *CSAPI) SetRoomAccountData(t *testing.T, roomID, eventType string, content map[string]interface{}) *http.Response {
return c.MustDoFunc(t, "PUT", []string{"_matrix", "client", "v3", "user", c.UserID, "rooms", roomID, "account_data", eventType}, WithJSONBody(t, content))
}
// SendEventSynced sends `e` into the room and waits for its event ID to come down /sync.
// Returns the event ID of the sent event.
func (c *CSAPI) SendEventSynced(t *testing.T, roomID string, e Event) string {
@ -243,6 +255,12 @@ func (c *CSAPI) SendEventSynced(t *testing.T, roomID string, e Event) string {
return eventID
}
func (c *CSAPI) SendReceipt(t *testing.T, roomID, eventID, receiptType string) *http.Response {
return c.MustDoFunc(t, "POST", []string{"_matrix", "client", "v3", "rooms", roomID, "read_markers"}, WithJSONBody(t, map[string]interface{}{
receiptType: eventID,
}))
}
// Perform a single /sync request with the given request options. To sync until something happens,
// see `MustSyncUntil`.
//
@ -280,24 +298,27 @@ func (c *CSAPI) MustSync(t *testing.T, syncReq SyncReq) (gjson.Result, string) {
// check functions return no error. Returns the final/latest since token.
//
// Initial /sync example: (no since token)
// bob.InviteRoom(t, roomID, alice.UserID)
// alice.JoinRoom(t, roomID, nil)
// alice.MustSyncUntil(t, client.SyncReq{}, client.SyncJoinedTo(alice.UserID, roomID))
//
// bob.InviteRoom(t, roomID, alice.UserID)
// alice.JoinRoom(t, roomID, nil)
// alice.MustSyncUntil(t, client.SyncReq{}, client.SyncJoinedTo(alice.UserID, roomID))
//
// Incremental /sync example: (test controls since token)
// since := alice.MustSyncUntil(t, client.SyncReq{TimeoutMillis: "0"}) // get a since token
// bob.InviteRoom(t, roomID, alice.UserID)
// since = alice.MustSyncUntil(t, client.SyncReq{Since: since}, client.SyncInvitedTo(alice.UserID, roomID))
// alice.JoinRoom(t, roomID, nil)
// alice.MustSyncUntil(t, client.SyncReq{Since: since}, client.SyncJoinedTo(alice.UserID, roomID))
//
// since := alice.MustSyncUntil(t, client.SyncReq{TimeoutMillis: "0"}) // get a since token
// bob.InviteRoom(t, roomID, alice.UserID)
// since = alice.MustSyncUntil(t, client.SyncReq{Since: since}, client.SyncInvitedTo(alice.UserID, roomID))
// alice.JoinRoom(t, roomID, nil)
// alice.MustSyncUntil(t, client.SyncReq{Since: since}, client.SyncJoinedTo(alice.UserID, roomID))
//
// Checking multiple parts of /sync:
// alice.MustSyncUntil(
// t, client.SyncReq{},
// client.SyncJoinedTo(alice.UserID, roomID),
// client.SyncJoinedTo(alice.UserID, roomID2),
// client.SyncJoinedTo(alice.UserID, roomID3),
// )
//
// alice.MustSyncUntil(
// t, client.SyncReq{},
// client.SyncJoinedTo(alice.UserID, roomID),
// client.SyncJoinedTo(alice.UserID, roomID2),
// client.SyncJoinedTo(alice.UserID, roomID3),
// )
//
// Check functions are unordered and independent. Once a check function returns true it is removed
// from the list of checks and won't be called again.
@ -360,7 +381,7 @@ func (c *CSAPI) MustSyncUntil(t *testing.T, syncReq SyncReq, checks ...SyncCheck
}
}
//RegisterUser will register the user with given parameters and
// RegisterUser will register the user with given parameters and
// return user ID & access token, and fail the test on network error
func (c *CSAPI) RegisterUser(t *testing.T, localpart, password string) (userID, accessToken, deviceID string) {
t.Helper()
@ -525,6 +546,17 @@ func (c *CSAPI) MustDoFunc(t *testing.T, method string, paths []string, opts ...
return res
}
func (c *CSAPI) SendToDevice(t *testing.T, eventType, userID, deviceID string, content map[string]interface{}) {
c.txnID++
c.MustDoFunc(t, "PUT", []string{"_matrix", "client", "v3", "sendToDevice", eventType, strconv.Itoa(c.txnID)}, WithJSONBody(t, map[string]interface{}{
"messages": map[string]interface{}{
userID: map[string]interface{}{
deviceID: content,
},
},
}))
}
// SlidingSync performs a single sliding sync request
func (c *CSAPI) SlidingSync(t *testing.T, data sync3.Request, opts ...RequestOpt) (resBody *sync3.Response) {
t.Helper()
@ -542,18 +574,91 @@ func (c *CSAPI) SlidingSync(t *testing.T, data sync3.Request, opts ...RequestOpt
return
}
func (c *CSAPI) SlidingSyncUntilEventID(t *testing.T, pos string, roomID string, eventID string) (res *sync3.Response) {
t.Helper()
return c.SlidingSyncUntilEvent(t, pos, sync3.Request{
RoomSubscriptions: map[string]sync3.RoomSubscription{
roomID: {
TimelineLimit: 10,
},
},
}, roomID, Event{ID: eventID})
}
func (c *CSAPI) SlidingSyncUntilMembership(t *testing.T, pos string, roomID string, target *CSAPI, membership string) (res *sync3.Response) {
t.Helper()
return c.SlidingSyncUntilEvent(t, pos, sync3.Request{
RoomSubscriptions: map[string]sync3.RoomSubscription{
roomID: {
TimelineLimit: 10,
},
},
}, roomID, Event{
Type: "m.room.member",
StateKey: &target.UserID,
Content: map[string]interface{}{
"displayname": target.Localpart,
"membership": membership,
},
})
}
func (c *CSAPI) SlidingSyncUntil(t *testing.T, pos string, data sync3.Request, check func(*sync3.Response) error) (res *sync3.Response) {
t.Helper()
start := time.Now()
for time.Since(start) < 10*time.Second {
qps := url.Values{
"timeout": []string{"500"},
}
if pos != "" {
qps["pos"] = []string{pos}
}
opts := []RequestOpt{
WithQueries(qps),
}
res := c.SlidingSync(t, data, opts...)
pos = res.Pos
err := check(res)
if err == nil {
return res
} else {
t.Logf("SlidingSyncUntil: tested response but it failed with: %v", err)
}
}
t.Fatalf("SlidingSyncUntil: timed out")
return nil
}
// SlidingSyncUntilEvent repeatedly calls sliding sync until the given Event is seen. The seen event can be matched
// on the event ID, type and state_key, etc. A zero Event always passes.
func (c *CSAPI) SlidingSyncUntilEvent(t *testing.T, pos string, data sync3.Request, roomID string, want Event) (res *sync3.Response) {
return c.SlidingSyncUntil(t, pos, data, func(r *sync3.Response) error {
room, ok := r.Rooms[roomID]
if !ok {
return fmt.Errorf("missing room %s", roomID)
}
for _, got := range room.Timeline {
if err := eventsEqual([]Event{want}, []json.RawMessage{got}); err == nil {
return nil
}
}
return fmt.Errorf("found room %s but missing event", roomID)
})
}
// DoFunc performs an arbitrary HTTP request to the server. This function supports RequestOpts to set
// extra information on the request such as an HTTP request body, query parameters and content-type.
// See all functions in this package starting with `With...`.
//
// Fails the test if an HTTP request could not be made or if there was a network error talking to the
// server. To do assertions on the HTTP response, see the `must` package. For example:
// must.MatchResponse(t, res, match.HTTPResponse{
// StatusCode: 400,
// JSON: []match.JSON{
// match.JSONKeyEqual("errcode", "M_INVALID_USERNAME"),
// },
// })
//
// must.MatchResponse(t, res, match.HTTPResponse{
// StatusCode: 400,
// JSON: []match.JSON{
// match.JSONKeyEqual("errcode", "M_INVALID_USERNAME"),
// },
// })
func (c *CSAPI) DoFunc(t *testing.T, method string, paths []string, opts ...RequestOpt) *http.Response {
t.Helper()
isSlidingSync := false

View File

@ -0,0 +1,200 @@
package syncv3_test
import (
"testing"
"github.com/matrix-org/sync-v3/sync3"
"github.com/matrix-org/sync-v3/testutils/m"
)
func TestLazyLoading(t *testing.T) {
alice := registerNewUser(t)
bob := registerNewUser(t)
charlie := registerNewUser(t)
sentinel := registerNewUser(t) // dummy user to ensure that the proxy has processed sent events
// all 3 join the room and say something
roomID := alice.CreateRoom(t, map[string]interface{}{"preset": "public_chat"})
bob.JoinRoom(t, roomID, nil)
charlie.JoinRoom(t, roomID, nil)
sentinel.JoinRoom(t, roomID, nil)
alice.SendEventSynced(t, roomID, Event{
Type: "m.room.message",
Content: map[string]interface{}{
"body": "Hello world",
"msgtype": "m.text",
},
})
bob.SendEventSynced(t, roomID, Event{
Type: "m.room.message",
Content: map[string]interface{}{
"body": "Hello world",
"msgtype": "m.text",
},
})
lastEventID := charlie.SendEventSynced(t, roomID, Event{
Type: "m.room.message",
Content: map[string]interface{}{
"body": "Hello world",
"msgtype": "m.text",
},
})
// alice requests the room lazy loaded with timeline limit 1 => sees only charlie
res := alice.SlidingSync(t, sync3.Request{
RoomSubscriptions: map[string]sync3.RoomSubscription{
roomID: {
TimelineLimit: 1,
RequiredState: [][2]string{
{"m.room.member", "$LAZY"},
},
},
},
})
m.MatchResponse(t, res, m.MatchRoomSubscriptionsStrict(map[string][]m.RoomMatcher{
roomID: {
MatchRoomRequiredState([]Event{
{
Type: "m.room.member",
StateKey: &charlie.UserID,
},
}),
MatchRoomTimeline([]Event{
{ID: lastEventID},
}),
},
}))
// bob requests the room lazy loaded with timeline limit 1 AND $ME => sees bob and charlie
bobRes := bob.SlidingSync(t, sync3.Request{
RoomSubscriptions: map[string]sync3.RoomSubscription{
roomID: {
TimelineLimit: 1,
RequiredState: [][2]string{
{"m.room.member", "$LAZY"},
{"m.room.member", "$ME"},
},
},
},
})
m.MatchResponse(t, bobRes, m.MatchRoomSubscriptionsStrict(map[string][]m.RoomMatcher{
roomID: {
MatchRoomRequiredState([]Event{
{
Type: "m.room.member",
StateKey: &charlie.UserID,
},
{
Type: "m.room.member",
StateKey: &bob.UserID,
},
}),
MatchRoomTimeline([]Event{
{ID: lastEventID},
}),
},
}))
// charlie requests the room lazy loaded with $ME AND *,* with timeline limit 1 => sees himself but a bunch of other room state too
charlieRes := charlie.SlidingSync(t, sync3.Request{
RoomSubscriptions: map[string]sync3.RoomSubscription{
roomID: {
TimelineLimit: 1,
RequiredState: [][2]string{
{"m.room.member", "$LAZY"},
{"m.room.member", "$ME"},
{"*", "*"},
},
},
},
})
m.MatchResponse(t, charlieRes, m.MatchRoomSubscriptionsStrict(map[string][]m.RoomMatcher{
roomID: {
MatchRoomRequiredState([]Event{
{
Type: "m.room.member",
StateKey: &charlie.UserID,
},
{
Type: "m.room.create",
StateKey: ptr(""),
},
{
Type: "m.room.power_levels",
StateKey: ptr(""),
},
{
Type: "m.room.history_visibility",
StateKey: ptr(""),
},
{
Type: "m.room.join_rules",
StateKey: ptr(""),
},
}),
MatchRoomTimeline([]Event{
{ID: lastEventID},
}),
},
}))
// alice now sends a message
aliceEventID := alice.SendEventSynced(t, roomID, Event{Type: "m.room.message", Content: map[string]interface{}{"body": "hello", "msgtype": "m.text"}})
sentinel.SlidingSyncUntilEventID(t, "", roomID, aliceEventID)
// bob, who didn't previously get alice's m.room.member event, should now see this
bobRes = bob.SlidingSync(t, sync3.Request{}, WithPos(bobRes.Pos))
m.MatchResponse(t, bobRes, m.MatchRoomSubscriptionsStrict(map[string][]m.RoomMatcher{
roomID: {
MatchRoomTimeline([]Event{{
Type: "m.room.message",
ID: aliceEventID,
}}),
MatchRoomRequiredState([]Event{{
Type: "m.room.member",
StateKey: &alice.UserID,
}}),
},
}))
// alice sends another message
aliceEventID2 := alice.SendEventSynced(t, roomID, Event{Type: "m.room.message", Content: map[string]interface{}{"body": "hello2", "msgtype": "m.text"}})
sentinel.SlidingSyncUntilEventID(t, "", roomID, aliceEventID2)
// bob, who had just got alice's m.room.member event, shouldn't see it again.
bobRes = bob.SlidingSync(t, sync3.Request{}, WithPos(bobRes.Pos))
m.MatchResponse(t, bobRes, m.MatchRoomSubscriptionsStrict(map[string][]m.RoomMatcher{
roomID: {
MatchRoomTimeline([]Event{{
Type: "m.room.message",
ID: aliceEventID2,
}}),
MatchRoomRequiredState(nil),
},
}))
// charlie does a sync, also gets alice's member event, exactly once
charlieRes = charlie.SlidingSync(t, sync3.Request{}, WithPos(charlieRes.Pos))
m.MatchResponse(t, charlieRes, m.MatchRoomSubscriptionsStrict(map[string][]m.RoomMatcher{
roomID: {
MatchRoomTimeline([]Event{
{
Type: "m.room.message",
ID: aliceEventID,
},
{
Type: "m.room.message",
ID: aliceEventID2,
},
}),
MatchRoomRequiredState([]Event{{
Type: "m.room.member",
StateKey: &alice.UserID,
}}),
},
}))
}

View File

@ -345,6 +345,8 @@ func TestMultipleOverlappingLists(t *testing.T) {
Content: map[string]interface{}{"body": "ping", "msgtype": "m.text"},
})
}
lastEventID := roomToEventID[allRoomIDs[0]]
alice.SlidingSyncUntilEventID(t, "", allRoomIDs[0], lastEventID)
// request 2 lists: one DMs, one encrypted. The room subscriptions are different so they should be UNION'd correctly.
// We request 5 rooms to ensure there is some overlap but not total overlap:
@ -600,7 +602,7 @@ func TestNewRoomNameCalculations(t *testing.T) {
seenRoomNames[roomID] = sub.Name
}
}
if time.Since(start) > 5*time.Second {
if time.Since(start) > 15*time.Second {
t.Errorf("timed out, did not see all rooms, seen %d/%d", len(seenRoomNames), numRooms)
break
}

View File

@ -16,19 +16,16 @@ import (
)
var (
proxyBaseURL = "http://localhost"
proxyBaseURL = os.Getenv("SYNCV3_ADDR")
homeserverBaseURL = os.Getenv("SYNCV3_SERVER")
userCounter uint64
)
func TestMain(m *testing.M) {
listenAddr := os.Getenv("SYNCV3_BINDADDR")
if listenAddr == "" {
fmt.Println("SYNCV3_BINDADDR must be set")
if proxyBaseURL == "" {
fmt.Println("SYNCV3_ADDR must be set e.g 'http://localhost:8008'")
os.Exit(1)
}
segments := strings.Split(listenAddr, ":")
proxyBaseURL += ":" + segments[1]
fmt.Println("proxy located at", proxyBaseURL)
exitCode := m.Run()
os.Exit(exitCode)
@ -95,6 +92,18 @@ func MatchRoomTimeline(events []Event) m.RoomMatcher {
}
}
func MatchRoomTimelineContains(event Event) m.RoomMatcher {
return func(r sync3.Room) error {
var err error
for _, got := range r.Timeline {
if err = eventsEqual([]Event{event}, []json.RawMessage{got}); err == nil {
return nil
}
}
return err
}
}
func MatchRoomRequiredState(events []Event) m.RoomMatcher {
return func(r sync3.Room) error {
if len(r.RequiredState) != len(events) {

View File

@ -73,6 +73,9 @@ func TestRoomStateTransitions(t *testing.T) {
// now bob accepts the invite
bob.JoinRoom(t, inviteRoomID, nil)
// wait until the proxy has got this data
alice.SlidingSyncUntilMembership(t, "", inviteRoomID, bob, "join")
// the room should be updated with the initial flag set to replace what was in the invite state
bobRes = bob.SlidingSync(t, sync3.Request{
Lists: []sync3.RequestList{
@ -102,7 +105,8 @@ func TestRoomStateTransitions(t *testing.T) {
},
}),
m.MatchRoomInitial(true),
m.MatchRoomHighlightCount(0)))
m.MatchRoomHighlightCount(0),
))
}
func TestInviteRejection(t *testing.T) {
@ -252,12 +256,9 @@ func TestInviteAcceptance(t *testing.T) {
},
}))
_, since := bob.MustSync(t, SyncReq{})
// now invite bob
alice.InviteRoom(t, secondInviteRoomID, bob.UserID)
since = bob.MustSyncUntil(t, SyncReq{Since: since}, SyncInvitedTo(bob.UserID, secondInviteRoomID))
// TODO: proxy needs to have processed this event enough for it to be waiting for us
time.Sleep(100 * time.Millisecond)
alice.SlidingSyncUntilMembership(t, "", secondInviteRoomID, bob, "invite")
res = bob.SlidingSync(t, sync3.Request{
Lists: []sync3.RequestList{
@ -289,10 +290,8 @@ func TestInviteAcceptance(t *testing.T) {
// now accept the invites
bob.JoinRoom(t, firstInviteRoomID, nil)
bob.JoinRoom(t, secondInviteRoomID, nil)
bob.MustSyncUntil(t, SyncReq{Since: since}, SyncJoinedTo(bob.UserID, firstInviteRoomID), SyncJoinedTo(bob.UserID, secondInviteRoomID))
// wait for the proxy to process the join response, no better way at present as we cannot introspect _when_ it's doing the next poll :/
// could do if this were an integration test though.
time.Sleep(100 * time.Millisecond)
alice.SlidingSyncUntilMembership(t, "", firstInviteRoomID, bob, "join")
alice.SlidingSyncUntilMembership(t, "", secondInviteRoomID, bob, "join")
// the list should be purged
res = bob.SlidingSync(t, sync3.Request{
@ -378,8 +377,8 @@ func TestMemberCounts(t *testing.T) {
// join both rooms, the counts should now reflect reality
bob.JoinRoom(t, firstRoomID, nil)
bob.JoinRoom(t, secondRoomID, nil)
bob.MustSyncUntil(t, SyncReq{}, SyncJoinedTo(bob.UserID, firstRoomID), SyncJoinedTo(bob.UserID, secondRoomID))
time.Sleep(100 * time.Millisecond) // let the proxy process the joins
alice.SlidingSyncUntilMembership(t, "", firstRoomID, bob, "join")
alice.SlidingSyncUntilMembership(t, "", secondRoomID, bob, "join")
res = bob.SlidingSync(t, sync3.Request{
Lists: []sync3.RequestList{

129
tests-e2e/num_live_test.go Normal file
View File

@ -0,0 +1,129 @@
package syncv3_test
import (
"testing"
"github.com/matrix-org/sync-v3/sync3"
"github.com/matrix-org/sync-v3/testutils/m"
)
func TestNumLive(t *testing.T) {
alice := registerNewUser(t)
bob := registerNewUser(t)
roomID := alice.CreateRoom(t, map[string]interface{}{
"preset": "public_chat",
})
bob.JoinRoom(t, roomID, nil)
eventID := alice.SendEventSynced(t, roomID, Event{
Type: "m.room.message",
Content: map[string]interface{}{
"msgtype": "m.text",
"body": "Hello World!!!!",
},
})
// initial syncs -> no live events
res := bob.SlidingSync(t, sync3.Request{
RoomSubscriptions: map[string]sync3.RoomSubscription{
roomID: {
TimelineLimit: 1,
},
},
})
m.MatchResponse(t, res, m.MatchRoomSubscriptionsStrict(
map[string][]m.RoomMatcher{
roomID: {
MatchRoomTimelineContains(Event{ID: eventID}),
m.MatchNumLive(0),
},
},
))
// live event -> 1 num live
eventID2 := alice.SendEventSynced(t, roomID, Event{
Type: "m.room.message",
Content: map[string]interface{}{
"msgtype": "m.text",
"body": "Hello World!!!! 2",
},
})
res = bob.SlidingSyncUntilEventID(t, res.Pos, roomID, eventID2)
m.MatchResponse(t, res, m.MatchRoomSubscriptionsStrict(
map[string][]m.RoomMatcher{
roomID: {
MatchRoomTimelineContains(Event{ID: eventID2}),
m.MatchNumLive(1),
},
},
))
// from fresh => 0 num live
res = bob.SlidingSyncUntilEventID(t, "", roomID, eventID2)
m.MatchResponse(t, res, m.MatchRoomSubscriptionsStrict(
map[string][]m.RoomMatcher{
roomID: {
MatchRoomTimelineContains(Event{ID: eventID2}),
m.MatchNumLive(0),
},
},
))
// now the big one -> 3 rooms, ask for top 2, bump 3rd room to top twice -> num_live=2
roomID2 := alice.CreateRoom(t, map[string]interface{}{
"preset": "public_chat",
})
bob.JoinRoom(t, roomID2, nil)
roomID3 := alice.CreateRoom(t, map[string]interface{}{
"preset": "public_chat",
})
bob.JoinRoom(t, roomID3, nil)
// let the proxy become aware of these joins
alice.SlidingSyncUntilMembership(t, "", roomID3, bob, "join")
// at this point, roomID is at the bottom, check.
res = bob.SlidingSync(t, sync3.Request{
Lists: []sync3.RequestList{
{
Ranges: sync3.SliceRanges{{0, 1}}, // top 2 rooms
Sort: []string{sync3.SortByRecency},
RoomSubscription: sync3.RoomSubscription{
TimelineLimit: 2,
},
},
},
})
m.MatchResponse(t, res, m.MatchList(0, m.MatchV3Ops(
m.MatchV3SyncOp(0, 1, []string{roomID3, roomID2}),
)))
// now send 2 live events into roomID to bump it to the top
eventID3 := alice.SendEventSynced(t, roomID, Event{
Type: "m.room.message",
Content: map[string]interface{}{
"msgtype": "m.text",
"body": "bump 1",
},
})
eventID4 := alice.SendEventSynced(t, roomID, Event{
Type: "m.room.message",
Content: map[string]interface{}{
"msgtype": "m.text",
"body": "bump 2",
},
})
// wait for the proxy to get it
alice.SlidingSyncUntilEventID(t, "", roomID, eventID4)
// now syncing with bob should see 2 live events
res = bob.SlidingSync(t, sync3.Request{
Lists: []sync3.RequestList{
{
Ranges: sync3.SliceRanges{{0, 1}}, // top 2 rooms
},
},
}, WithPos(res.Pos))
m.MatchResponse(t, res, m.MatchRoomSubscriptionsStrict(map[string][]m.RoomMatcher{
roomID: {
m.MatchNumLive(2),
// TODO: should we be including event ID 2 given timeline limit is 2?
MatchRoomTimeline([]Event{{ID: eventID2}, {ID: eventID3}, {ID: eventID4}}),
},
}))
}

220
tests-e2e/receipts_test.go Normal file
View File

@ -0,0 +1,220 @@
package syncv3_test
import (
"testing"
"github.com/matrix-org/sync-v3/sync3"
"github.com/matrix-org/sync-v3/sync3/extensions"
"github.com/matrix-org/sync-v3/testutils/m"
)
// Test that receipts are sent initially and during live streams
func TestReceipts(t *testing.T) {
alice := registerNewUser(t)
bob := registerNewUser(t)
roomID := alice.CreateRoom(t, map[string]interface{}{
"preset": "public_chat",
})
bob.JoinRoom(t, roomID, nil)
eventID := alice.SendEventSynced(t, roomID, Event{
Type: "m.room.message",
Content: map[string]interface{}{
"msgtype": "m.text",
"body": "Hello world",
},
})
alice.SendReceipt(t, roomID, eventID, "m.read")
// bob syncs -> should see receipt
res := bob.SlidingSync(t, sync3.Request{
RoomSubscriptions: map[string]sync3.RoomSubscription{
roomID: {
TimelineLimit: 1,
},
},
Extensions: extensions.Request{
Receipts: &extensions.ReceiptsRequest{
Enabled: true,
},
},
})
m.MatchResponse(t, res, m.MatchRoomSubscriptionsStrict(map[string][]m.RoomMatcher{
roomID: {
MatchRoomTimeline([]Event{
{
ID: eventID,
},
}),
},
}), m.MatchReceipts(roomID, []m.Receipt{
{
EventID: eventID,
UserID: alice.UserID,
Type: "m.read",
},
}))
// bob sends receipt -> should see it on live stream
bob.SendReceipt(t, roomID, eventID, "m.read")
bob.SlidingSyncUntil(t, res.Pos, sync3.Request{}, func(r *sync3.Response) error {
return m.MatchReceipts(roomID, []m.Receipt{
{
EventID: eventID,
UserID: bob.UserID,
Type: "m.read",
},
})(r)
})
}
func TestReceiptsLazy(t *testing.T) {
alice := registerNewUser(t)
bob := registerNewUser(t)
charlie := registerNewUser(t)
roomID := alice.CreateRoom(t, map[string]interface{}{
"preset": "public_chat",
})
bob.JoinRoom(t, roomID, nil)
charlie.JoinRoom(t, roomID, nil)
alice.SlidingSync(t, sync3.Request{}) // proxy begins tracking
eventID := alice.SendEventSynced(t, roomID, Event{
Type: "m.room.message",
Content: map[string]interface{}{
"msgtype": "m.text",
"body": "Hello world",
},
})
// everyone sends a receipt for this event
alice.SendReceipt(t, roomID, eventID, "m.read")
bob.SendReceipt(t, roomID, eventID, "m.read")
charlie.SendReceipt(t, roomID, eventID, "m.read")
// alice sends 5 new events, bob and alice ACK the last event
var fifthEventID string
for i := 0; i < 5; i++ {
fifthEventID = alice.SendEventSynced(t, roomID, Event{
Type: "m.room.message",
Content: map[string]interface{}{
"msgtype": "m.text",
"body": "Hello world",
},
})
}
alice.SendReceipt(t, roomID, fifthEventID, "m.read")
bob.SendReceipt(t, roomID, fifthEventID, "m.read")
// alice sends another 5 events and ACKs nothing
var lastEventID string
for i := 0; i < 5; i++ {
lastEventID = alice.SendEventSynced(t, roomID, Event{
Type: "m.room.message",
Content: map[string]interface{}{
"msgtype": "m.text",
"body": "Hello world",
},
})
}
// ensure the proxy has the last event processed
alice.SlidingSyncUntilEventID(t, "", roomID, lastEventID)
// Test that:
// - Bob syncs with timeline_limit: 1 => only own receipt, as you always get your own receipts.
// - Bob sync with timeline limit: 6 => receipts for fifthEventID only (self + alice)
res := bob.SlidingSync(t, sync3.Request{
RoomSubscriptions: map[string]sync3.RoomSubscription{
roomID: {
TimelineLimit: 1,
},
},
Extensions: extensions.Request{
Receipts: &extensions.ReceiptsRequest{
Enabled: true,
},
},
})
m.MatchResponse(t, res, m.MatchReceipts(roomID, []m.Receipt{
{
EventID: fifthEventID,
UserID: bob.UserID,
Type: "m.read",
},
}))
res = bob.SlidingSync(t, sync3.Request{
RoomSubscriptions: map[string]sync3.RoomSubscription{
roomID: {
TimelineLimit: 6,
},
},
Extensions: extensions.Request{
Receipts: &extensions.ReceiptsRequest{
Enabled: true,
},
},
})
m.MatchResponse(t, res, m.MatchReceipts(roomID, []m.Receipt{
{
EventID: fifthEventID,
UserID: alice.UserID,
Type: "m.read",
},
{
EventID: fifthEventID,
UserID: bob.UserID,
Type: "m.read",
},
}))
}
func TestReceiptsPrivate(t *testing.T) {
alice := registerNewUser(t)
bob := registerNewUser(t)
roomID := alice.CreateRoom(t, map[string]interface{}{
"preset": "public_chat",
})
bob.JoinRoom(t, roomID, nil)
eventID := alice.SendEventSynced(t, roomID, Event{
Type: "m.room.message",
Content: map[string]interface{}{
"msgtype": "m.text",
"body": "Hello world",
},
})
// bob secretly reads this
bob.SendReceipt(t, roomID, eventID, "m.read.private")
// alice does sliding sync -> does not see private RR
res := alice.SlidingSync(t, sync3.Request{
RoomSubscriptions: map[string]sync3.RoomSubscription{
roomID: {
TimelineLimit: 1,
},
},
Extensions: extensions.Request{
Receipts: &extensions.ReceiptsRequest{
Enabled: true,
},
},
})
m.MatchResponse(t, res, m.MatchReceipts(roomID, nil))
// bob does sliding sync -> sees private RR
res = bob.SlidingSync(t, sync3.Request{
RoomSubscriptions: map[string]sync3.RoomSubscription{
roomID: {
TimelineLimit: 1,
},
},
Extensions: extensions.Request{
Receipts: &extensions.ReceiptsRequest{
Enabled: true,
},
},
})
m.MatchResponse(t, res, m.MatchReceipts(roomID, []m.Receipt{
{
UserID: bob.UserID,
EventID: eventID,
Type: "m.read.private",
},
}))
}

View File

@ -1,5 +1,6 @@
#!/bin/bash -eu
export SYNCV3_BINDADDR=0.0.0.0:8844
export SYNCV3_ADDR='http://localhost:8844'
export SYNCV3_DEBUG=1
# Run the binary and stop it afterwards.

View File

@ -3,6 +3,7 @@ package syncv3_test
import (
"encoding/json"
"testing"
"time"
"github.com/matrix-org/sync-v3/sync3"
"github.com/matrix-org/sync-v3/testutils/m"
@ -15,11 +16,11 @@ import (
// the v3 server may still be receiving events in that room from other joined members. We need to
// make sure these events don't find their way to the client.
// Attack vector:
// - Alice is using the sync server and is in room !A.
// - Eve joins the room !A.
// - Alice kicks Eve.
// - Alice sends event $X in !A.
// - Ensure Eve does not see event $X.
// - Alice is using the sync server and is in room !A.
// - Eve joins the room !A.
// - Alice kicks Eve.
// - Alice sends event $X in !A.
// - Ensure Eve does not see event $X.
func TestSecurityLiveStreamEventLeftLeak(t *testing.T) {
alice := registerNewUser(t)
eve := registerNewUser(t)
@ -65,6 +66,7 @@ func TestSecurityLiveStreamEventLeftLeak(t *testing.T) {
"name": "I hate Eve",
},
})
time.Sleep(200 * time.Millisecond) // wait for the proxy to process it..
// Ensure Alice sees both events
aliceRes = alice.SlidingSync(t, sync3.Request{
@ -132,10 +134,10 @@ func TestSecurityLiveStreamEventLeftLeak(t *testing.T) {
// Rationale: Unlike sync v2, in v3 clients can subscribe to any room ID they want as a room_subscription.
// We need to make sure that the user is allowed to see events in that room before delivering those events.
// Attack vector:
// - Alice is using the sync server and is in room !A.
// - Eve works out the room ID !A (this isn't sensitive information).
// - Eve starts using the sync server and makes a room_subscription for !A.
// - Ensure that Eve does not see any events in !A.
// - Alice is using the sync server and is in room !A.
// - Eve works out the room ID !A (this isn't sensitive information).
// - Eve starts using the sync server and makes a room_subscription for !A.
// - Ensure that Eve does not see any events in !A.
func TestSecurityRoomSubscriptionLeak(t *testing.T) {
alice := registerNewUser(t)
eve := registerNewUser(t)
@ -200,10 +202,10 @@ func TestSecurityRoomSubscriptionLeak(t *testing.T) {
// Rationale: Unlike sync v2, in v3 clients can subscribe to any room ID they want as a space.
// We need to make sure that the user is allowed to see events in that room before delivering those events.
// Attack vector:
// - Alice is using the sync server and is in space !A with room !B.
// - Eve works out the room ID !A (this isn't sensitive information).
// - Eve starts using the sync server and makes a request for !A as the space filter.
// - Ensure that Eve does not see any events in !A or !B.
// - Alice is using the sync server and is in space !A with room !B.
// - Eve works out the room ID !A (this isn't sensitive information).
// - Eve starts using the sync server and makes a request for !A as the space filter.
// - Ensure that Eve does not see any events in !A or !B.
func TestSecuritySpaceDataLeak(t *testing.T) {
alice := registerNewUser(t)
eve := registerNewUser(t)
@ -246,11 +248,11 @@ func TestSecuritySpaceDataLeak(t *testing.T) {
// We need to make sure that if a user is in a room in multiple spaces (only 1 of them the user is joined to)
// then they cannot see the room if they apply a filter for a parent space they are not joined to.
// Attack vector:
// - Alice is using the sync server and is in space !A with room !B.
// - Eve is using the sync server and is in space !C with room !B as well.
// - Eve works out the room ID !A (this isn't sensitive information).
// - Eve starts using the sync server and makes a request for !A as the space filter.
// - Ensure that Eve does not see anything, even though they are joined to !B and the proxy knows it.
// - Alice is using the sync server and is in space !A with room !B.
// - Eve is using the sync server and is in space !C with room !B as well.
// - Eve works out the room ID !A (this isn't sensitive information).
// - Eve starts using the sync server and makes a request for !A as the space filter.
// - Ensure that Eve does not see anything, even though they are joined to !B and the proxy knows it.
func TestSecuritySpaceMetadataLeak(t *testing.T) {
alice := registerNewUser(t)
eve := registerNewUser(t)

View File

@ -2,6 +2,7 @@ package syncv3_test
import (
"testing"
"time"
"github.com/matrix-org/sync-v3/sync3"
"github.com/matrix-org/sync-v3/testutils/m"
@ -9,14 +10,15 @@ import (
// Make this graph:
//
// A D <-- parents
// .--`--. |
// B C E F <-- children
// A D <-- parents
// .--`--. |
// B C E F <-- children
//
// and query:
// spaces[A] => B,C
// spaces[D] => E
// spaces[A,B] => B,C,E
//
// spaces[A] => B,C
// spaces[D] => E
// spaces[A,B] => B,C,E
func TestSpacesFilter(t *testing.T) {
alice := registerNewUser(t)
parentA := alice.CreateRoom(t, map[string]interface{}{
@ -65,6 +67,7 @@ func TestSpacesFilter(t *testing.T) {
"via": []string{"example.com"},
},
})
time.Sleep(100 * time.Millisecond) // let the proxy process this
doSpacesListRequest := func(spaces []string, pos *string, listMatchers ...m.ListMatcher) *sync3.Response {
t.Helper()
@ -120,6 +123,7 @@ func TestSpacesFilter(t *testing.T) {
"via": []string{"example.com"},
},
})
time.Sleep(100 * time.Millisecond) // let the proxy process this
doInitialSpacesListRequest([]string{parentD}, []string{roomF, roomE})
// now remove B and re-query A
@ -128,6 +132,7 @@ func TestSpacesFilter(t *testing.T) {
StateKey: &roomB,
Content: map[string]interface{}{},
})
time.Sleep(100 * time.Millisecond) // let the proxy process this
res := doInitialSpacesListRequest([]string{parentA}, []string{roomC})
// now live stream an update to ensure it gets added
@ -138,6 +143,7 @@ func TestSpacesFilter(t *testing.T) {
"via": []string{"example.com"},
},
})
time.Sleep(100 * time.Millisecond) // let the proxy process this
res = doSpacesListRequest([]string{parentA}, &res.Pos,
m.MatchV3Count(2), m.MatchV3Ops(
m.MatchV3DeleteOp(1),

169
tests-e2e/to_device_test.go Normal file
View File

@ -0,0 +1,169 @@
package syncv3_test
import (
"encoding/json"
"testing"
"time"
"github.com/matrix-org/util"
"github.com/tidwall/gjson"
"github.com/matrix-org/sync-v3/sync3"
"github.com/matrix-org/sync-v3/sync3/extensions"
)
// Test that if you login to an account -> send a to-device message to this device -> initial proxy connection
// then you receive the to_device events.
func TestToDeviceDeliveryInitialLogin(t *testing.T) {
alice := registerNewUser(t)
bob := registerNewUser(t)
bob.SendToDevice(t, "m.dummy", alice.UserID, alice.DeviceID, map[string]interface{}{})
// loop until we see the event
loopUntilToDeviceEvent(t, alice, nil, "", "m.dummy", bob.UserID)
}
// Test that if you are live streaming then you can get to_device events sent to you.
func TestToDeviceDeliveryStream(t *testing.T) {
alice := registerNewUser(t)
bob := registerNewUser(t)
res := alice.SlidingSync(t, sync3.Request{
Extensions: extensions.Request{
ToDevice: &extensions.ToDeviceRequest{
Enabled: &boolTrue,
},
},
})
bob.SendToDevice(t, "m.dummy", alice.UserID, alice.DeviceID, map[string]interface{}{})
// loop until we see the event
loopUntilToDeviceEvent(t, alice, res, res.Extensions.ToDevice.NextBatch, "m.dummy", bob.UserID)
}
// Test that if were live streaming, then disconnect, have a to_device message sent to you, then reconnect,
// that you see the to_device message.
func TestToDeviceDeliveryReconnect(t *testing.T) {
alice := registerNewUser(t)
bob := registerNewUser(t)
// start live stream, but ignore the pos to "disconnect"
alice.SlidingSync(t, sync3.Request{
Extensions: extensions.Request{
ToDevice: &extensions.ToDeviceRequest{
Enabled: &boolTrue,
},
},
})
bob.SendToDevice(t, "m.dummy", alice.UserID, alice.DeviceID, map[string]interface{}{})
// loop until we see the event
loopUntilToDeviceEvent(t, alice, nil, "", "m.dummy", bob.UserID)
}
func TestToDeviceDropStaleKeyRequestsInitial(t *testing.T) {
alice := registerNewUser(t)
bob := registerNewUser(t)
sendMessages := 3
// send a few dummy messages, cancelling each other
for i := 0; i < sendMessages; i++ {
reqID := util.RandomString(8)
bob.SendToDevice(t, "m.room_key_request", alice.UserID, alice.DeviceID, map[string]interface{}{
"request_id": reqID,
"action": "request",
"requesting_device_id": "mydevice",
})
bob.SendToDevice(t, "m.room_key_request", alice.UserID, alice.DeviceID, map[string]interface{}{
"request_id": reqID,
"action": "request_cancellation",
"requesting_device_id": "mydevice",
})
}
bob.SendToDevice(t, "sentinel", alice.UserID, alice.DeviceID, map[string]interface{}{})
// Loop until we have the sentinel event, the rest should cancel out.
gotMessages, _ := loopUntilToDeviceEvent(t, alice, nil, "", "sentinel", bob.UserID)
wantCount := 1
if count := len(gotMessages); count > wantCount {
t.Fatalf("expected %d to-device events, got %d : %v", wantCount, count, jsonify(gotMessages))
}
}
func TestToDeviceDropStaleKeyRequestsStreamNoDelete(t *testing.T) {
alice := registerNewUser(t)
bob := registerNewUser(t)
bob.SendToDevice(t, "m.room_key_request", alice.UserID, alice.DeviceID, map[string]interface{}{
"request_id": "A",
"action": "request",
"requesting_device_id": "mydevice",
})
msgs1, res := loopUntilToDeviceEvent(t, alice, nil, "", "m.room_key_request", bob.UserID)
if len(msgs1) != 1 {
t.Fatalf("got %v want 1 message", jsonify(msgs1))
}
// now send a cancellation: we should not delete the cancellation
bob.SendToDevice(t, "m.room_key_request", alice.UserID, alice.DeviceID, map[string]interface{}{
"request_id": "A",
"action": "request_cancellation",
"requesting_device_id": "mydevice",
})
time.Sleep(100 * time.Millisecond)
msgs2, _ := loopUntilToDeviceEvent(t, alice, res, res.Extensions.ToDevice.NextBatch, "m.room_key_request", bob.UserID)
if len(msgs2) != 1 {
t.Fatalf("got %v want 1 message", jsonify(msgs2))
}
if gjson.ParseBytes(msgs1[0]).Get("content.action").Str != "request" {
t.Errorf("first message was not action: request: %v", string(msgs1[0]))
}
if gjson.ParseBytes(msgs2[0]).Get("content.action").Str != "request_cancellation" {
t.Errorf("second message was not action: request_cancellation: %v", string(msgs2[0]))
}
}
func loopUntilToDeviceEvent(t *testing.T, client *CSAPI, res *sync3.Response, since string, wantEvType string, wantSender string) ([]json.RawMessage, *sync3.Response) {
t.Helper()
gotEvent := false
var messages []json.RawMessage
checkIfHasEvent := func() {
t.Helper()
for _, ev := range res.Extensions.ToDevice.Events {
t.Logf("got to-device: %s", string(ev))
messages = append(messages, ev)
evJSON := gjson.ParseBytes(ev)
if evJSON.Get("type").Str == wantEvType && evJSON.Get("sender").Str == wantSender {
gotEvent = true
}
}
}
if res == nil {
res = client.SlidingSync(t, sync3.Request{
Extensions: extensions.Request{
ToDevice: &extensions.ToDeviceRequest{
Enabled: &boolTrue,
},
},
})
checkIfHasEvent()
since = res.Extensions.ToDevice.NextBatch
}
waitTime := 10 * time.Second
start := time.Now()
for time.Since(start) < waitTime && !gotEvent {
res = client.SlidingSync(t, sync3.Request{
Extensions: extensions.Request{
ToDevice: &extensions.ToDeviceRequest{
Since: since,
},
},
}, WithPos(res.Pos))
since = res.Extensions.ToDevice.NextBatch
checkIfHasEvent()
}
if !gotEvent {
t.Fatalf("did not see to-device message after %v", waitTime)
}
return messages, res
}
func jsonify(i interface{}) string {
b, _ := json.Marshal(i)
return string(b)
}

View File

@ -11,19 +11,15 @@ import (
"github.com/tidwall/gjson"
)
func TestTombstonesFlag(t *testing.T) {
// tests that if we upgrade a room it is removed from the list. If we request old rooms it should be included.
func TestIncludeOldRooms(t *testing.T) {
client := registerNewUser(t)
// create room
roomID := client.CreateRoom(t, map[string]interface{}{})
res := client.SlidingSync(t, sync3.Request{
Lists: []sync3.RequestList{
{
Ranges: [][2]int64{{0, 1}},
Filters: &sync3.RequestFilters{
IsTombstoned: &boolFalse,
},
RoomSubscription: sync3.RoomSubscription{
RequiredState: [][2]string{{"m.room.create", ""}},
},
@ -33,14 +29,7 @@ func TestTombstonesFlag(t *testing.T) {
m.MatchResponse(t, res, m.MatchList(0, m.MatchV3Count(1), m.MatchV3Ops(
m.MatchV3SyncOp(0, 1, []string{roomID}),
)))
upgradeRes := client.MustDoFunc(t, "POST", []string{"_matrix", "client", "v3", "rooms", roomID, "upgrade"}, WithJSONBody(t, map[string]interface{}{
"new_version": "9",
}))
var body map[string]interface{}
if err := json.NewDecoder(upgradeRes.Body).Decode(&body); err != nil {
t.Fatalf("failed to decode response: %s", err)
}
newRoomID := body["replacement_room"].(string)
newRoomID := upgradeRoom(t, client, roomID)
t.Logf("old %s new %s", roomID, newRoomID)
time.Sleep(100 * time.Millisecond) // let the proxy process it
@ -94,6 +83,253 @@ func TestTombstonesFlag(t *testing.T) {
},
},
}))
// now fresh sync with old rooms enabled
res = client.SlidingSync(t, sync3.Request{
Lists: []sync3.RequestList{
{
Ranges: [][2]int64{{0, 2}},
RoomSubscription: sync3.RoomSubscription{
RequiredState: [][2]string{{"m.room.member", client.UserID}},
IncludeOldRooms: &sync3.RoomSubscription{
RequiredState: [][2]string{{"m.room.create", ""}, {"m.room.tombstone", ""}},
},
},
},
},
})
m.MatchResponse(t, res, m.MatchList(0, m.MatchV3Count(1), m.MatchV3Ops(
m.MatchV3SyncOp(0, 2, []string{newRoomID}),
)), m.MatchRoomSubscriptionsStrict(map[string][]m.RoomMatcher{
newRoomID: {
MatchRoomRequiredState([]Event{
{
Type: "m.room.member",
StateKey: &client.UserID,
},
}),
},
roomID: {
MatchRoomRequiredState([]Event{
{
Type: "m.room.create",
StateKey: ptr(""),
},
{
Type: "m.room.tombstone",
StateKey: ptr(""),
},
}),
},
}))
// finally, a fresh sync without include_old_rooms -> newest room only
// now fresh sync with old rooms enabled
res = client.SlidingSync(t, sync3.Request{
Lists: []sync3.RequestList{
{
Ranges: [][2]int64{{0, 2}},
RoomSubscription: sync3.RoomSubscription{
RequiredState: [][2]string{{"m.room.member", client.UserID}},
},
},
},
})
m.MatchResponse(t, res, m.MatchList(0, m.MatchV3Count(1), m.MatchV3Ops(
m.MatchV3SyncOp(0, 2, []string{newRoomID}),
)), m.MatchRoomSubscriptionsStrict(map[string][]m.RoomMatcher{
newRoomID: {
MatchRoomRequiredState([]Event{
{
Type: "m.room.member",
StateKey: &client.UserID,
},
}),
},
}))
}
func TestTombstoneWalking(t *testing.T) {}
// make a long upgrade chain of A -> B -> C -> D and then make sure that we can:
// - explicitly subscribe to old rooms e.g B
// - in that subscription, include old rooms to return A and nothing else.
// - check that if you leave a room e.g B, it breaks the chain when requesting old rooms (no A)
func TestIncludeOldRoomsLongChain(t *testing.T) {
client := registerNewUser(t)
// seed the server with this client, we need to do this so the proxy has timeline history to
// return so we can assert events appear in the right rooms
res := client.SlidingSync(t, sync3.Request{})
roomA := client.CreateRoom(t, map[string]interface{}{})
client.SendEventSynced(t, roomA, Event{
Type: "m.room.message",
Content: map[string]interface{}{"body": "A", "msgtype": "m.text"},
})
roomB := upgradeRoom(t, client, roomA)
eventB := client.SendEventSynced(t, roomB, Event{
Type: "m.room.message",
Content: map[string]interface{}{"body": "B", "msgtype": "m.text"},
})
roomC := upgradeRoom(t, client, roomB)
client.SendEventSynced(t, roomC, Event{
Type: "m.room.message",
Content: map[string]interface{}{"body": "C", "msgtype": "m.text"},
})
roomD := upgradeRoom(t, client, roomC)
eventD := client.SendEventSynced(t, roomD, Event{
Type: "m.room.message",
Content: map[string]interface{}{"body": "D", "msgtype": "m.text"},
})
t.Logf("A:%s B:%s C:%s D:%s", roomA, roomB, roomC, roomD)
// wait until we have seen the final event and final upgrade
client.SlidingSyncUntilEventID(t, "", roomD, eventD)
client.SlidingSyncUntilEvent(t, "", sync3.Request{
RoomSubscriptions: map[string]sync3.RoomSubscription{
roomC: {
TimelineLimit: 5,
},
},
}, roomC, Event{Type: "m.room.tombstone", StateKey: ptr("")})
// can we subscribe to old rooms? Room B
res = client.SlidingSync(t, sync3.Request{
RoomSubscriptions: map[string]sync3.RoomSubscription{
roomB: {
RequiredState: [][2]string{{"m.room.member", client.UserID}},
TimelineLimit: 4, // tombstone event and msg
IncludeOldRooms: &sync3.RoomSubscription{
RequiredState: [][2]string{{"m.room.create", ""}},
},
},
},
})
m.MatchResponse(t, res, m.MatchNoV3Ops(), m.MatchRoomSubscriptionsStrict(map[string][]m.RoomMatcher{
roomA: {
MatchRoomRequiredState([]Event{
{
Type: "m.room.create",
StateKey: ptr(""),
},
}),
},
roomB: {
MatchRoomRequiredState([]Event{
{
Type: "m.room.member",
StateKey: &client.UserID,
},
}),
MatchRoomTimelineContains(Event{
ID: eventB,
}),
},
}))
// now leave room B and try the chain from D, we shouldn't see B or A
client.LeaveRoom(t, roomB)
client.SlidingSyncUntilEvent(t, res.Pos, sync3.Request{
RoomSubscriptions: map[string]sync3.RoomSubscription{
roomB: {
TimelineLimit: 5,
},
},
}, roomB, Event{Type: "m.room.member", StateKey: &client.UserID, Content: map[string]interface{}{"membership": "leave"}})
res = client.SlidingSync(t, sync3.Request{
RoomSubscriptions: map[string]sync3.RoomSubscription{
roomD: {
RequiredState: [][2]string{{"m.room.member", client.UserID}},
IncludeOldRooms: &sync3.RoomSubscription{
RequiredState: [][2]string{{"m.room.create", ""}},
},
},
},
})
m.MatchResponse(t, res, m.MatchNoV3Ops(), m.MatchRoomSubscriptionsStrict(map[string][]m.RoomMatcher{
roomC: {
MatchRoomRequiredState([]Event{
{
Type: "m.room.create",
StateKey: ptr(""),
},
}),
},
roomD: {
MatchRoomRequiredState([]Event{
{
Type: "m.room.member",
StateKey: &client.UserID,
},
}),
},
}))
}
// test that if you have a list version and direct sub version of include_old_rooms, they get unioned correctly.
func TestIncludeOldRoomsSubscriptionUnion(t *testing.T) {
client := registerNewUser(t)
roomA := client.CreateRoom(t, map[string]interface{}{})
roomB := upgradeRoom(t, client, roomA)
// should union to timeline_limit=2, req_state=create+member+tombstone
res := client.SlidingSync(t, sync3.Request{
Lists: []sync3.RequestList{
{
Ranges: [][2]int64{{0, 1}},
RoomSubscription: sync3.RoomSubscription{
TimelineLimit: 0,
IncludeOldRooms: &sync3.RoomSubscription{
TimelineLimit: 0,
RequiredState: [][2]string{{"m.room.member", client.UserID}, {"m.room.create", ""}},
},
},
},
},
RoomSubscriptions: map[string]sync3.RoomSubscription{
roomB: {
TimelineLimit: 0,
IncludeOldRooms: &sync3.RoomSubscription{
TimelineLimit: 1,
RequiredState: [][2]string{{"m.room.tombstone", ""}, {"m.room.create", ""}},
},
},
},
})
m.MatchResponse(t, res, m.MatchList(0, m.MatchV3Count(1), m.MatchV3Ops(
m.MatchV3SyncOp(0, 1, []string{roomB}),
)), m.MatchRoomSubscriptionsStrict(map[string][]m.RoomMatcher{
roomA: {
MatchRoomRequiredState([]Event{
{
Type: "m.room.create", StateKey: ptr(""),
},
{
Type: "m.room.member", StateKey: &client.UserID,
},
{
Type: "m.room.tombstone", StateKey: ptr(""),
},
}),
func(r sync3.Room) error {
if len(r.Timeline) != 1 {
return fmt.Errorf("timeline length %d want 1", len(r.Timeline))
}
return nil
},
},
roomB: {
MatchRoomRequiredState(nil),
MatchRoomTimeline(nil),
},
}))
}
func upgradeRoom(t *testing.T, client *CSAPI, roomID string) (newRoomID string) {
upgradeRes := client.MustDoFunc(t, "POST", []string{"_matrix", "client", "v3", "rooms", roomID, "upgrade"}, WithJSONBody(t, map[string]interface{}{
"new_version": "9",
}))
var body map[string]interface{}
if err := json.NewDecoder(upgradeRes.Body).Decode(&body); err != nil {
t.Fatalf("failed to decode response: %s", err)
}
newRoomID = body["replacement_room"].(string)
return newRoomID
}

176
tests-e2e/typing_test.go Normal file
View File

@ -0,0 +1,176 @@
package syncv3_test
import (
"encoding/json"
"fmt"
"reflect"
"sort"
"testing"
"time"
"github.com/matrix-org/sync-v3/sync3"
"github.com/matrix-org/sync-v3/sync3/extensions"
"github.com/matrix-org/sync-v3/testutils/m"
"github.com/tidwall/gjson"
)
func TestTyping(t *testing.T) {
alice := registerNewUser(t)
bob := registerNewUser(t)
roomID := alice.CreateRoom(t, map[string]interface{}{
"preset": "public_chat",
})
bob.JoinRoom(t, roomID, nil)
// typing requests are ignored on the initial sync as we only store typing notifs for _connected_ (polling)
// users of which alice is not connected yet. Only live updates will show up. This is mainly to simplify
// the proxy - server impls will be able to do this immediately.
alice.SlidingSync(t, sync3.Request{}) // start polling
bob.SlidingSync(t, sync3.Request{})
bob.SendTyping(t, roomID, true, 5000)
waitUntilTypingData(t, bob, roomID, []string{bob.UserID}) // ensure the proxy gets the data
// make sure initial requests show typing
res := alice.SlidingSync(t, sync3.Request{
Extensions: extensions.Request{
Typing: &extensions.TypingRequest{
Enabled: true,
},
},
RoomSubscriptions: map[string]sync3.RoomSubscription{
roomID: {
TimelineLimit: 1,
},
},
})
m.MatchResponse(t, res, m.MatchTyping(roomID, []string{bob.UserID}))
// make sure typing updates -> no typing go through
bob.SendTyping(t, roomID, false, 5000)
waitUntilTypingData(t, bob, roomID, []string{}) // ensure the proxy gets the data
res = alice.SlidingSync(t, sync3.Request{}, WithPos(res.Pos))
m.MatchResponse(t, res, m.MatchTyping(roomID, []string{}))
// make sure typing updates -> start typing go through
bob.SendTyping(t, roomID, true, 5000)
waitUntilTypingData(t, bob, roomID, []string{bob.UserID}) // ensure the proxy gets the data
res = alice.SlidingSync(t, sync3.Request{}, WithPos(res.Pos))
m.MatchResponse(t, res, m.MatchTyping(roomID, []string{bob.UserID}))
// make sure typing updates are consolidated when multiple people type
alice.SendTyping(t, roomID, true, 5000)
waitUntilTypingData(t, bob, roomID, []string{bob.UserID, alice.UserID}) // ensure the proxy gets the data
res = alice.SlidingSync(t, sync3.Request{}, WithPos(res.Pos))
m.MatchResponse(t, res, m.MatchTyping(roomID, []string{bob.UserID, alice.UserID}))
// make sure if you type in a room not returned in the window it does not go through
roomID2 := alice.CreateRoom(t, map[string]interface{}{
"preset": "public_chat",
})
bob.JoinRoom(t, roomID2, nil)
res = alice.SlidingSyncUntilMembership(t, res.Pos, roomID2, bob, "join")
bob.SendTyping(t, roomID2, true, 5000)
waitUntilTypingData(t, bob, roomID2, []string{bob.UserID}) // ensure the proxy gets the data
// alice should get this typing notif even if we aren't subscribing to it, because we do not track
// the entire set of rooms the client is tracking, so it's entirely possible this room was returned
// hours ago and the user wants to know information about it. We can't even rely on it being present
// in the sliding window or direct subscriptions because clients sometimes spider the entire list of
// rooms and then track "live" data. Typing is inherently live, so always return it.
// TODO: parameterise this in the typing extension?
res = alice.SlidingSync(t, sync3.Request{}, WithPos(res.Pos))
m.MatchResponse(t, res, m.MatchTyping(roomID2, []string{bob.UserID}))
// ensure that we only see 1x typing event and don't get dupes for the # connected users in the room
alice.SendTyping(t, roomID, false, 5000)
now := time.Now()
numTypingEvents := 0
for time.Since(now) < time.Second {
res = alice.SlidingSync(t, sync3.Request{}, WithPos(res.Pos))
if res.Extensions.Typing != nil && res.Extensions.Typing.Rooms != nil && res.Extensions.Typing.Rooms[roomID] != nil {
typingEv := res.Extensions.Typing.Rooms[roomID]
gotUserIDs := typingUsers(t, typingEv)
// both alice and bob are typing in roomID, and we just sent a stop typing for alice, so only count
// those events.
if reflect.DeepEqual(gotUserIDs, []string{bob.UserID}) {
numTypingEvents++
t.Logf("typing ev: %v", string(res.Extensions.Typing.Rooms[roomID]))
}
}
}
if numTypingEvents > 1 {
t.Errorf("got %d typing events, wanted 1", numTypingEvents)
}
}
// Test that when you start typing without the typing extension, we don't return a no-op response.
func TestTypingNoUpdate(t *testing.T) {
alice := registerNewUser(t)
bob := registerNewUser(t)
roomID := alice.CreateRoom(t, map[string]interface{}{
"preset": "public_chat",
})
bob.JoinRoom(t, roomID, nil)
// typing requests are ignored on the initial sync as we only store typing notifs for _connected_ (polling)
// users of which alice is not connected yet. Only live updates will show up. This is mainly to simplify
// the proxy - server impls will be able to do this immediately.
alice.SlidingSync(t, sync3.Request{}) // start polling
res := bob.SlidingSync(t, sync3.Request{
// no typing extension
RoomSubscriptions: map[string]sync3.RoomSubscription{
roomID: {
TimelineLimit: 0,
},
},
})
alice.SendTyping(t, roomID, true, 5000)
waitUntilTypingData(t, alice, roomID, []string{alice.UserID}) // wait until alice is typing
// bob should not return early with an empty roomID response
res = bob.SlidingSync(t, sync3.Request{}, WithPos(res.Pos))
m.MatchResponse(t, res, m.MatchRoomSubscriptionsStrict(nil))
}
func waitUntilTypingData(t *testing.T, client *CSAPI, roomID string, wantUserIDs []string) *sync3.Response {
t.Helper()
sort.Strings(wantUserIDs)
return client.SlidingSyncUntil(t, "", sync3.Request{
Extensions: extensions.Request{
Typing: &extensions.TypingRequest{
Enabled: true,
},
},
RoomSubscriptions: map[string]sync3.RoomSubscription{
roomID: {
TimelineLimit: 1,
},
},
}, func(r *sync3.Response) error {
if r.Extensions.Typing == nil {
return fmt.Errorf("missing typing extension")
}
if len(r.Extensions.Typing.Rooms) == 0 {
return fmt.Errorf("no rooms typing")
}
typingEvent := r.Extensions.Typing.Rooms[roomID]
if typingEvent == nil {
return fmt.Errorf("no typing for room %s", roomID)
}
gotUserIDs := typingUsers(t, typingEvent)
if !reflect.DeepEqual(gotUserIDs, wantUserIDs) {
return fmt.Errorf("wrong typing users: got %v want %v", gotUserIDs, wantUserIDs)
}
return nil
})
}
func typingUsers(t *testing.T, ev json.RawMessage) []string {
userIDs := gjson.ParseBytes(ev).Get("content.user_ids").Array()
gotUserIDs := make([]string, len(userIDs))
for i := range userIDs {
gotUserIDs[i] = userIDs[i].Str
}
sort.Strings(gotUserIDs)
return gotUserIDs
}

View File

@ -11,6 +11,7 @@ import (
"github.com/matrix-org/sync-v3/sync3"
"github.com/matrix-org/sync-v3/testutils"
"github.com/matrix-org/sync-v3/testutils/m"
"github.com/tidwall/gjson"
)
// Test that if you hit /sync and give up, we only start 1 connection.
@ -440,3 +441,122 @@ func TestTxnIDResponseBuffering(t *testing.T) {
m.MatchV3SyncOp(0, 10, []string{roomB}),
)))
}
// Regression test to make sure that if Alice does an initial sync followed by Bob, that Bob actually
// makes the request and can be serviced before Alice even though she was first. Early proxy impls had
// this behaviour but it regressed when we converted to a pubsub model as a single goroutine would handle
// EnsurePolling requests, rather than the HTTP goroutine.
func TestEnsurePollingDoesntQueue(t *testing.T) {
pqString := testutils.PrepareDBConnectionString()
// setup code
v2 := runTestV2Server(t)
v2.timeToWaitForV2Response = 5 * time.Second
v3 := runTestServer(t, v2, pqString)
defer v2.close()
defer v3.close()
roomA := "!a:localhost"
roomB := "!b:localhost"
v2.addAccount(alice, aliceToken)
v2.addAccount(bob, bobToken)
v2.queueResponse(bob, sync2.SyncResponse{
Rooms: sync2.SyncRoomsResponse{
Join: v2JoinTimeline(roomEvents{
roomID: roomB,
state: createRoomState(t, bob, time.Now()),
events: []json.RawMessage{
testutils.NewStateEvent(t, "m.room.name", "", bob, map[string]interface{}{"name": "B"}),
},
}),
},
})
var mu sync.Mutex
aliceReturned := false
// wait until alice makes the v2 /sync request, then start bob's v3 request
go func() {
t.Logf("waiting for alice's v2 poller to start")
v2.waitUntilEmpty(t, alice) // alice's poller is making the v2 request
t.Logf("alice's v2 poller is waiting, doing bob's v3 request")
startTime := time.Now()
res := v3.mustDoV3Request(t, bobToken, sync3.Request{ // start bob's v3 request
RoomSubscriptions: map[string]sync3.RoomSubscription{
roomB: {
TimelineLimit: 1,
},
},
})
t.Logf("bob's v3 response returned")
if time.Since(startTime) > 4*time.Second {
t.Errorf("took too long to process bob's v3 request, it probably stacked behind alice")
}
mu.Lock()
if aliceReturned {
t.Errorf("Alice's /sync request returned before Bob's, expected Bob's to return first")
}
mu.Unlock()
m.MatchResponse(t, res, m.MatchRoomSubscriptionsStrict(map[string][]m.RoomMatcher{
roomB: {
m.MatchJoinCount(1),
m.MatchRoomName("B"),
},
}))
// now send alice's response to unblock her
t.Logf("sending alice's v2 response")
v2.queueResponse(alice, sync2.SyncResponse{
Rooms: sync2.SyncRoomsResponse{
Join: v2JoinTimeline(roomEvents{
roomID: roomA,
state: createRoomState(t, alice, time.Now()),
events: []json.RawMessage{
testutils.NewStateEvent(t, "m.room.name", "", alice, map[string]interface{}{"name": "A"}),
},
}),
},
})
}()
t.Logf("starting alice's v3 request")
res := v3.mustDoV3Request(t, aliceToken, sync3.Request{
RoomSubscriptions: map[string]sync3.RoomSubscription{
roomA: {
TimelineLimit: 1,
},
},
})
t.Logf("alice's v3 response returned")
mu.Lock()
aliceReturned = true
mu.Unlock()
m.MatchResponse(t, res, m.MatchRoomSubscriptionsStrict(map[string][]m.RoomMatcher{
roomA: {
m.MatchJoinCount(1),
m.MatchRoomName("A"),
},
}))
}
// Test to ensure that we send back a spec-compliant error message when the session is expired.
func TestSessionExpiry(t *testing.T) {
pqString := testutils.PrepareDBConnectionString()
v2 := runTestV2Server(t)
v2.addAccount(alice, aliceToken)
v3 := runTestServer(t, v2, pqString)
roomID := "!doesnt:matter"
res1 := v3.mustDoV3Request(t, aliceToken, sync3.Request{
RoomSubscriptions: map[string]sync3.RoomSubscription{
roomID: {
TimelineLimit: 1,
},
},
})
req := sync3.Request{}
req.SetTimeoutMSecs(1)
res2 := v3.mustDoV3RequestWithPos(t, aliceToken, res1.Pos, req)
_ = v3.mustDoV3RequestWithPos(t, aliceToken, res2.Pos, req)
// now use an earlier ?pos= to expire the session
_, body, code := v3.doV3Request(t, context.Background(), aliceToken, res1.Pos, req)
if code != 400 {
t.Errorf("got HTTP %d want 400", code)
}
if gjson.ParseBytes(body).Get("errcode").Str != "M_UNKNOWN_POS" {
t.Errorf("got %v want errcode=M_UNKNOWN_POS", string(body))
}
}

View File

@ -0,0 +1,162 @@
package syncv3
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"github.com/matrix-org/sync-v3/sync2"
"github.com/matrix-org/sync-v3/sync3"
"github.com/matrix-org/sync-v3/testutils"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
func runMetricsServer(t *testing.T) *httptest.Server {
return httptest.NewServer(promhttp.Handler())
}
func getMetrics(t *testing.T, srv *httptest.Server) []string {
t.Helper()
req, err := http.NewRequest("GET", srv.URL+"/metrics", nil)
if err != nil {
t.Fatalf("failed to make metrics request: %s", err)
}
res, err := srv.Client().Do(req)
if err != nil {
t.Fatalf("failed to perform metrics request: %s", err)
}
if res.StatusCode != 200 {
t.Fatalf("/metrics returned HTTP %d", res.StatusCode)
}
defer res.Body.Close()
blob, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("/metrics failed to read response body: %s", err)
}
return strings.Split(string(blob), "\n")
}
func assertMetric(t *testing.T, lines []string, key string, val string) {
t.Helper()
for _, line := range lines {
if !strings.HasPrefix(line, key+" ") {
continue
}
segments := strings.Split(line, " ")
if val != segments[1] {
t.Errorf("want '%v %v' got '%v'", key, val, line)
}
return
}
t.Errorf("did not find key '%v' in %d lines", key, len(lines))
}
func TestMetricsNumPollers(t *testing.T) {
metricKey := "sliding_sync_poller_num_pollers"
pqString := testutils.PrepareDBConnectionString()
// setup code
v2 := runTestV2Server(t)
v3 := runTestServer(t, v2, pqString, true)
defer v2.close()
defer v3.close()
metricsServer := runMetricsServer(t)
defer metricsServer.Close()
metrics := getMetrics(t, metricsServer)
assertMetric(t, metrics, metricKey, "0")
// start a poller
v2.addAccount(alice, aliceToken)
v2.queueResponse(alice, sync2.SyncResponse{
Rooms: sync2.SyncRoomsResponse{
Join: v2JoinTimeline(roomEvents{
roomID: "!unimportant",
events: createRoomState(t, alice, time.Now()),
}),
},
})
v3.mustDoV3Request(t, aliceToken, sync3.Request{})
// verify increase
metrics = getMetrics(t, metricsServer)
assertMetric(t, metrics, metricKey, "1")
// start another poller
v2.addAccount(bob, bobToken)
v2.queueResponse(bob, sync2.SyncResponse{
Rooms: sync2.SyncRoomsResponse{
Join: v2JoinTimeline(roomEvents{
roomID: "!unimportant",
events: createRoomState(t, bob, time.Now()),
}),
},
})
v3.mustDoV3Request(t, bobToken, sync3.Request{})
// verify increase
metrics = getMetrics(t, metricsServer)
assertMetric(t, metrics, metricKey, "2")
// now invalidate a poller
v2.invalidateToken(aliceToken)
// verify decrease
metrics = getMetrics(t, metricsServer)
assertMetric(t, metrics, metricKey, "1")
}
func TestMetricsNumConns(t *testing.T) {
metricKey := "sliding_sync_api_num_active_conns"
pqString := testutils.PrepareDBConnectionString()
// setup code
v2 := runTestV2Server(t)
v3 := runTestServer(t, v2, pqString, true)
defer v2.close()
defer v3.close()
metricsServer := runMetricsServer(t)
defer metricsServer.Close()
metrics := getMetrics(t, metricsServer)
assertMetric(t, metrics, metricKey, "0")
// start a poller
v2.addAccount(alice, aliceToken)
v2.queueResponse(alice, sync2.SyncResponse{
Rooms: sync2.SyncRoomsResponse{
Join: v2JoinTimeline(roomEvents{
roomID: "!unimportant",
events: createRoomState(t, alice, time.Now()),
}),
},
})
v3.mustDoV3Request(t, aliceToken, sync3.Request{})
// verify increase
metrics = getMetrics(t, metricsServer)
assertMetric(t, metrics, metricKey, "1")
// start another poller
v2.addAccount(bob, bobToken)
v2.queueResponse(bob, sync2.SyncResponse{
Rooms: sync2.SyncRoomsResponse{
Join: v2JoinTimeline(roomEvents{
roomID: "!unimportant",
events: createRoomState(t, bob, time.Now()),
}),
},
})
res := v3.mustDoV3Request(t, bobToken, sync3.Request{})
// verify increase
metrics = getMetrics(t, metricsServer)
assertMetric(t, metrics, metricKey, "2")
// now live poll -> no increase
res = v3.mustDoV3RequestWithPos(t, bobToken, res.Pos, sync3.Request{
RoomSubscriptions: map[string]sync3.RoomSubscription{
"!foo": { // doesn't matter, just so long as we return quickly
TimelineLimit: 1,
},
},
})
metrics = getMetrics(t, metricsServer)
assertMetric(t, metrics, metricKey, "2")
// now replace conn -> no increase
v3.mustDoV3Request(t, aliceToken, sync3.Request{})
metrics = getMetrics(t, metricsServer)
assertMetric(t, metrics, metricKey, "2")
// TODO: now expire conn -> decrease
}

View File

@ -61,26 +61,13 @@ func TestNotificationsOnTop(t *testing.T) {
RoomSubscription: sync3.RoomSubscription{
TimelineLimit: int64(100),
},
// prefer highlight count first, THEN eventually recency
Sort: []string{sync3.SortByHighlightCount, sync3.SortByNotificationCount, sync3.SortByRecency},
// prefer highlights/notifs/rest, and group them by recency not counts count first, THEN eventually recency
Sort: []string{sync3.SortByNotificationLevel, sync3.SortByRecency},
}},
}
res := v3.mustDoV3Request(t, aliceToken, syncRequestBody)
m.MatchResponse(t, res, m.MatchList(0, m.MatchV3Count(len(allRooms)), m.MatchV3Ops(
m.MatchV3SyncOpFn(func(op *sync3.ResponseOpRange) error {
if len(op.RoomIDs) != len(allRooms) {
return fmt.Errorf("want %d rooms, got %d", len(allRooms), len(op.RoomIDs))
}
for i := range allRooms {
err := allRooms[i].MatchRoom(op.RoomIDs[i],
res.Rooms[op.RoomIDs[i]],
)
if err != nil {
return err
}
}
return nil
}),
m.MatchV3SyncOp(0, int64(len(allRooms)-1), []string{noBingRoomID, bingRoomID}),
)))
// send a bing message into the bing room, make sure it comes through and is on top
@ -139,7 +126,7 @@ func TestNotificationsOnTop(t *testing.T) {
TimelineLimit: int64(100),
},
// prefer highlight count first, THEN eventually recency
Sort: []string{sync3.SortByHighlightCount, sync3.SortByNotificationCount, sync3.SortByRecency},
Sort: []string{sync3.SortByNotificationLevel, sync3.SortByRecency},
}},
})
m.MatchResponse(t, res, m.MatchList(0, m.MatchV3Count(len(allRooms)), m.MatchV3Ops(

View File

@ -108,7 +108,7 @@ func TestSlowGetAllRoomsInitial(t *testing.T) {
ts := latestTimestamp
roomName := "My Room 111111"
newRoom := roomEvents{
roomID: fmt.Sprintf("!TestSlowGetAllRoomsInitial_%d:localhost", len(allRooms)),
roomID: fmt.Sprintf("!TestSlowGetAllRoomsInitial_%dNEW:localhost", len(allRooms)),
name: roomName,
events: append(createRoomState(t, alice, ts), []json.RawMessage{
testutils.NewStateEvent(t, "m.room.name", "", alice, map[string]interface{}{"name": roomName}, testutils.WithTimestamp(ts.Add(3*time.Second))),

View File

@ -25,7 +25,6 @@ func TestTimelines(t *testing.T) {
defer v2.close()
defer v3.close()
alice := "@TestTimelines_alice:localhost"
// make 20 rooms, last room is most recent, and send A,B,C into each room
allRooms := make([]roomEvents, 20)
for i := 0; i < len(allRooms); i++ {
@ -78,6 +77,7 @@ func TestTimelines(t *testing.T) {
},
},
}
v2.waitUntilEmpty(t, alice)
// add these live events to the global view of the timeline
allRooms[0].events = append(allRooms[0].events, liveEvents[0].events...)
allRooms[1].events = append(allRooms[1].events, liveEvents[1].events...)
@ -386,8 +386,10 @@ func TestInitialFlag(t *testing.T) {
}
// Regression test for in-the-wild bug:
// ERR missing events in database!
// ERR V2: failed to accumulate room error="failed to extract nids from inserted events, asked for 9 got 8"
//
// ERR missing events in database!
// ERR V2: failed to accumulate room error="failed to extract nids from inserted events, asked for 9 got 8"
//
// We should be able to gracefully handle duplicate events in the timeline.
func TestDuplicateEventsInTimeline(t *testing.T) {
pqString := testutils.PrepareDBConnectionString()

View File

@ -17,7 +17,9 @@ import (
"github.com/gorilla/mux"
"github.com/matrix-org/gomatrixserverlib"
syncv3 "github.com/matrix-org/sync-v3"
"github.com/matrix-org/sync-v3/sync2"
"github.com/matrix-org/sync-v3/sync2/handler2"
"github.com/matrix-org/sync-v3/sync3"
"github.com/matrix-org/sync-v3/sync3/handler"
"github.com/matrix-org/sync-v3/testutils"
@ -35,11 +37,13 @@ const (
)
type testV2Server struct {
mu *sync.Mutex
tokenToUser map[string]string
queues map[string]chan sync2.SyncResponse
waiting map[string]*sync.Cond // broadcasts when the server is about to read a blocking input
srv *httptest.Server
mu *sync.Mutex
tokenToUser map[string]string
queues map[string]chan sync2.SyncResponse
waiting map[string]*sync.Cond // broadcasts when the server is about to read a blocking input
srv *httptest.Server
invalidations map[string]func() // token -> callback
timeToWaitForV2Response time.Duration
}
func (s *testV2Server) addAccount(userID, token string) {
@ -52,6 +56,31 @@ func (s *testV2Server) addAccount(userID, token string) {
}
}
// remove the token and wait until the proxy sends a request with this token, then 401 it and return.
func (s *testV2Server) invalidateToken(token string) {
var wg sync.WaitGroup
wg.Add(1)
// add callback and delete the token
s.mu.Lock()
s.invalidations[token] = func() {
wg.Done()
}
delete(s.tokenToUser, token)
s.mu.Unlock()
// kick over the connection so the next request 401s and wait till we get said request
s.srv.CloseClientConnections()
wg.Wait()
// cleanup the callback
s.mu.Lock()
delete(s.invalidations, token)
s.mu.Unlock()
// need to wait for the HTTP 401 response to be processed :(
time.Sleep(100 * time.Millisecond)
}
func (s *testV2Server) userID(token string) string {
s.mu.Lock()
defer s.mu.Unlock()
@ -64,7 +93,7 @@ func (s *testV2Server) queueResponse(userID string, resp sync2.SyncResponse) {
s.mu.Unlock()
ch <- resp
if !testutils.Quiet {
log.Printf("testV2Server: enqueued v2 response for %s", userID)
log.Printf("testV2Server: enqueued v2 response for %s (%d join rooms)", userID, len(resp.Rooms.Join))
}
}
@ -102,16 +131,14 @@ func (s *testV2Server) nextResponse(userID string) *sync2.SyncResponse {
)
}
return &data
case <-time.After(1 * time.Second):
case <-time.After(s.timeToWaitForV2Response):
if !testutils.Quiet {
log.Printf("testV2Server: nextResponse %s waited >1s for data, returning null", userID)
log.Printf("testV2Server: nextResponse %s waited >%v for data, returning null", userID, s.timeToWaitForV2Response)
}
return nil
}
}
// TODO: queueDeviceResponse(token string)
func (s *testV2Server) url() string {
return s.srv.URL
}
@ -123,25 +150,41 @@ func (s *testV2Server) close() {
func runTestV2Server(t testutils.TestBenchInterface) *testV2Server {
t.Helper()
server := &testV2Server{
tokenToUser: make(map[string]string),
queues: make(map[string]chan sync2.SyncResponse),
waiting: make(map[string]*sync.Cond),
mu: &sync.Mutex{},
tokenToUser: make(map[string]string),
queues: make(map[string]chan sync2.SyncResponse),
waiting: make(map[string]*sync.Cond),
invalidations: make(map[string]func()),
mu: &sync.Mutex{},
timeToWaitForV2Response: time.Second,
}
r := mux.NewRouter()
r.HandleFunc("/_matrix/client/r0/account/whoami", func(w http.ResponseWriter, req *http.Request) {
userID := server.userID(strings.TrimPrefix(req.Header.Get("Authorization"), "Bearer "))
token := strings.TrimPrefix(req.Header.Get("Authorization"), "Bearer ")
userID := server.userID(token)
if userID == "" {
w.WriteHeader(403)
w.WriteHeader(401)
server.mu.Lock()
fn := server.invalidations[token]
if fn != nil {
fn()
}
server.mu.Unlock()
return
}
w.WriteHeader(200)
w.Write([]byte(fmt.Sprintf(`{"user_id":"%s"}`, userID)))
})
r.HandleFunc("/_matrix/client/r0/sync", func(w http.ResponseWriter, req *http.Request) {
userID := server.userID(strings.TrimPrefix(req.Header.Get("Authorization"), "Bearer "))
token := strings.TrimPrefix(req.Header.Get("Authorization"), "Bearer ")
userID := server.userID(token)
if userID == "" {
w.WriteHeader(403)
w.WriteHeader(401)
server.mu.Lock()
fn := server.invalidations[token]
if fn != nil {
fn()
}
server.mu.Unlock()
return
}
resp := server.nextResponse(userID)
@ -162,11 +205,13 @@ func runTestV2Server(t testutils.TestBenchInterface) *testV2Server {
type testV3Server struct {
srv *httptest.Server
handler *handler.SyncLiveHandler
h2 *handler2.Handler
}
func (s *testV3Server) close() {
s.srv.Close()
s.handler.Teardown()
s.h2.Teardown()
}
func (s *testV3Server) restart(t *testing.T, v2 *testV2Server, pq string) {
@ -174,8 +219,12 @@ func (s *testV3Server) restart(t *testing.T, v2 *testV2Server, pq string) {
log.Printf("restarting server")
s.close()
ss := runTestServer(t, v2, pq)
// replace all the fields which will be close()d to ensure we don't leak
s.srv = ss.srv
v2.srv.CloseClientConnections() // kick-over v2 conns
s.h2 = ss.h2
s.handler = ss.handler
// kick over v2 conns
v2.srv.CloseClientConnections()
}
func (s *testV3Server) mustDoV3Request(t testutils.TestBenchInterface, token string, reqBody sync3.Request) (respBody *sync3.Response) {
@ -232,27 +281,32 @@ func (s *testV3Server) doV3Request(t testutils.TestBenchInterface, ctx context.C
return &r, respBytes, resp.StatusCode
}
func runTestServer(t testutils.TestBenchInterface, v2Server *testV2Server, postgresConnectionString string) *testV3Server {
func runTestServer(t testutils.TestBenchInterface, v2Server *testV2Server, postgresConnectionString string, enableProm ...bool) *testV3Server {
t.Helper()
if postgresConnectionString == "" {
postgresConnectionString = testutils.PrepareDBConnectionString()
}
h, err := handler.NewSync3Handler(&sync2.HTTPClient{
Client: &http.Client{
Timeout: 5 * time.Minute,
},
DestinationServer: v2Server.url(),
}, postgresConnectionString, os.Getenv("SYNCV3_SECRET"), true)
if err != nil {
t.Fatalf("cannot make v3 handler: %s", err)
metricsEnabled := false
if len(enableProm) > 0 && enableProm[0] {
metricsEnabled = true
}
h2, h3 := syncv3.Setup(v2Server.url(), postgresConnectionString, os.Getenv("SYNCV3_SECRET"), syncv3.Opts{
Debug: true,
TestingSynchronousPubsub: true, // critical to avoid flakey tests
AddPrometheusMetrics: metricsEnabled,
})
// for ease of use we don't start v2 pollers at startup in tests
r := mux.NewRouter()
r.Handle("/_matrix/client/v3/sync", h)
r.Handle("/_matrix/client/unstable/org.matrix.msc3575/sync", h)
r.Handle("/_matrix/client/v3/sync", h3)
r.Handle("/_matrix/client/unstable/org.matrix.msc3575/sync", h3)
srv := httptest.NewServer(r)
if !testutils.Quiet {
t.Logf("v2 @ %s", v2Server.url())
}
return &testV3Server{
srv: srv,
handler: h,
handler: h3,
h2: h2,
}
}

View File

@ -14,6 +14,7 @@ import (
type TestBenchInterface interface {
Fatalf(s string, args ...interface{})
Errorf(s string, args ...interface{})
Logf(s string, args ...interface{})
Helper()
Name() string
}

View File

@ -9,6 +9,7 @@ import (
"testing"
"github.com/matrix-org/sync-v3/sync3"
"github.com/tidwall/gjson"
)
type RespMatcher func(res *sync3.Response) error
@ -46,6 +47,15 @@ func MatchInviteCount(count int) RoomMatcher {
}
}
func MatchNumLive(numLive int) RoomMatcher {
return func(r sync3.Room) error {
if r.NumLive != numLive {
return fmt.Errorf("MatchNumLive: got %v want %v", r.NumLive, numLive)
}
return nil
}
}
func MatchRoomRequiredState(events []json.RawMessage) RoomMatcher {
return func(r sync3.Room) error {
if len(r.RequiredState) != len(events) {
@ -95,12 +105,12 @@ func MatchRoomTimelineMostRecent(n int, events []json.RawMessage) RoomMatcher {
subset := events[len(events)-n:]
return func(r sync3.Room) error {
if len(r.Timeline) < len(subset) {
return fmt.Errorf("timeline length mismatch: got %d want at least %d", len(r.Timeline), len(subset))
return fmt.Errorf("MatchRoomTimelineMostRecent: timeline length mismatch: got %d want at least %d", len(r.Timeline), len(subset))
}
gotSubset := r.Timeline[len(r.Timeline)-n:]
for i := range gotSubset {
if !bytes.Equal(gotSubset[i], subset[i]) {
return fmt.Errorf("timeline[%d]\ngot %v \nwant %v", i, string(r.Timeline[i]), string(events[i]))
return fmt.Errorf("timeline[%d]\ngot %v \nwant %v", i, string(gotSubset[i]), string(subset[i]))
}
}
return nil
@ -177,7 +187,7 @@ func MatchRoomSubscriptionsStrict(wantSubs map[string][]RoomMatcher) RespMatcher
}
for _, m := range matchers {
if err := m(room); err != nil {
return fmt.Errorf("MatchRoomSubscriptionsStrict: %s", err)
return fmt.Errorf("MatchRoomSubscriptionsStrict[%s]: %s", roomID, err)
}
}
}
@ -381,6 +391,85 @@ func MatchV3Ops(matchOps ...OpMatcher) ListMatcher {
}
}
func MatchTyping(roomID string, wantUserIDs []string) RespMatcher {
return func(res *sync3.Response) error {
if res.Extensions.Typing == nil {
return fmt.Errorf("MatchTyping: no typing extension")
}
if len(res.Extensions.Typing.Rooms) == 0 || res.Extensions.Typing.Rooms[roomID] == nil {
return fmt.Errorf("MatchTyping: missing room %s: got %+v", roomID, res.Extensions.Typing)
}
sort.Strings(wantUserIDs)
ev := res.Extensions.Typing.Rooms[roomID]
userIDs := gjson.ParseBytes(ev).Get("content.user_ids").Array()
gotUserIDs := make([]string, len(userIDs))
for i := range userIDs {
gotUserIDs[i] = userIDs[i].Str
}
sort.Strings(gotUserIDs)
if !reflect.DeepEqual(gotUserIDs, wantUserIDs) {
return fmt.Errorf("MatchTyping: mismatched typing users, got %v want %v", gotUserIDs, wantUserIDs)
}
return nil
}
}
type Receipt struct {
EventID string
UserID string
Type string
ThreadID string
}
func sortReceipts(receipts []Receipt) {
sort.Slice(receipts, func(i, j int) bool {
keyi := receipts[i].EventID + receipts[i].UserID + receipts[i].Type + receipts[i].ThreadID
keyj := receipts[j].EventID + receipts[j].UserID + receipts[j].Type + receipts[j].ThreadID
return keyi < keyj
})
}
func MatchReceipts(roomID string, wantReceipts []Receipt) RespMatcher {
return func(res *sync3.Response) error {
if res.Extensions.Receipts == nil {
return fmt.Errorf("MatchReceipts: no receipts extension")
}
if len(res.Extensions.Receipts.Rooms) == 0 || res.Extensions.Receipts.Rooms[roomID] == nil {
if len(wantReceipts) == 0 {
return nil // want nothing
}
return fmt.Errorf("MatchReceipts: missing room %s: got %+v", roomID, res.Extensions.Receipts)
}
var gotReceipts []Receipt
ev := res.Extensions.Receipts.Rooms[roomID]
gjson.ParseBytes(ev).Get("content").ForEach(func(key, value gjson.Result) bool {
eventID := key.Str
value.ForEach(func(key, value gjson.Result) bool {
receiptType := key.Str
value.ForEach(func(key, value gjson.Result) bool {
userID := key.Str
threadID := value.Get("thread_id").Str
gotReceipts = append(gotReceipts, Receipt{
EventID: eventID,
UserID: userID,
Type: receiptType,
ThreadID: threadID,
})
return true
})
return true
})
return true
})
sortReceipts(gotReceipts)
sortReceipts(wantReceipts)
if !reflect.DeepEqual(gotReceipts, wantReceipts) {
return fmt.Errorf("MatchReceipts: wrong receipts, got %v want %v", gotReceipts, wantReceipts)
}
return nil
}
}
func MatchAccountData(globals []json.RawMessage, rooms map[string][]json.RawMessage) RespMatcher {
return func(res *sync3.Response) error {
if res.Extensions.AccountData == nil {

54
v3.go
View File

@ -10,6 +10,11 @@ import (
"github.com/gorilla/mux"
"github.com/matrix-org/sync-v3/internal"
"github.com/matrix-org/sync-v3/pubsub"
"github.com/matrix-org/sync-v3/state"
"github.com/matrix-org/sync-v3/sync2"
"github.com/matrix-org/sync-v3/sync2/handler2"
"github.com/matrix-org/sync-v3/sync3/handler"
"github.com/rs/zerolog"
"github.com/rs/zerolog/hlog"
)
@ -20,6 +25,14 @@ var logger = zerolog.New(os.Stdout).With().Timestamp().Logger().Output(zerolog.C
})
var Version string
type Opts struct {
Debug bool
AddPrometheusMetrics bool
// if true, publishing messages will block until the consumer has consumed it.
// Assumes a single producer and a single consumer.
TestingSynchronousPubsub bool
}
type server struct {
chain []func(next http.Handler) http.Handler
final http.Handler
@ -46,6 +59,47 @@ func allowCORS(next http.Handler) http.HandlerFunc {
}
}
// Setup the proxy
func Setup(destHomeserver, postgresURI, secret string, opts Opts) (*handler2.Handler, *handler.SyncLiveHandler) {
// Setup shared DB and HTTP client
v2Client := &sync2.HTTPClient{
Client: &http.Client{
Timeout: 5 * time.Minute,
},
DestinationServer: destHomeserver,
}
store := state.NewStorage(postgresURI)
storev2 := sync2.NewStore(postgresURI, secret)
bufferSize := 50
if opts.TestingSynchronousPubsub {
bufferSize = 0
}
pubSub := pubsub.NewPubSub(bufferSize)
// create v2 handler
h2, err := handler2.NewHandler(postgresURI, sync2.NewPollerMap(v2Client, opts.AddPrometheusMetrics), storev2, store, v2Client, pubSub, pubSub, opts.AddPrometheusMetrics)
if err != nil {
panic(err)
}
// create v3 handler
h3, err := handler.NewSync3Handler(store, storev2, v2Client, postgresURI, secret, opts.Debug, pubSub, pubSub, opts.AddPrometheusMetrics)
if err != nil {
panic(err)
}
storeSnapshot, err := store.GlobalSnapshot()
if err != nil {
panic(err)
}
logger.Info().Msg("retrieved global snapshot from database")
h3.Startup(&storeSnapshot)
// begin consuming from these positions
h2.Listen()
h3.Listen()
return h2, h3
}
// RunSyncV3Server is the main entry point to the server
func RunSyncV3Server(h http.Handler, bindAddr, destV2Server string) {
// HTTP path routing