@@ -787,7 +787,20 @@ async def current_sync_for_user(
787
787
# subscription and have updates we need to send (i.e. either because
788
788
# we haven't sent the room down, or we have but there are missing
789
789
# updates).
790
- for room_id in relevant_room_map :
790
+ for room_id , room_config in relevant_room_map .items ():
791
+ prev_room_sync_config = previous_connection_state .room_configs .get (
792
+ room_id
793
+ )
794
+ if prev_room_sync_config is not None :
795
+ # Always include rooms whose timeline limit has increased.
796
+ # (see the "XXX: Odd behavior" described below)
797
+ if (
798
+ prev_room_sync_config .timeline_limit
799
+ < room_config .timeline_limit
800
+ ):
801
+ rooms_should_send .add (room_id )
802
+ continue
803
+
791
804
status = previous_connection_state .rooms .have_sent_room (room_id )
792
805
if (
793
806
# The room was never sent down before so the client needs to know
@@ -819,12 +832,15 @@ async def current_sync_for_user(
819
832
if room_id in rooms_should_send
820
833
}
821
834
835
+ new_connection_state = previous_connection_state .get_mutable ()
836
+
822
837
@trace
823
838
@tag_args
824
839
async def handle_room (room_id : str ) -> None :
825
840
room_sync_result = await self .get_room_sync_data (
826
841
sync_config = sync_config ,
827
842
previous_connection_state = previous_connection_state ,
843
+ new_connection_state = new_connection_state ,
828
844
room_id = room_id ,
829
845
room_sync_config = relevant_rooms_to_send_map [room_id ],
830
846
room_membership_for_user_at_to_token = room_membership_for_user_map [
@@ -842,8 +858,6 @@ async def handle_room(room_id: str) -> None:
842
858
with start_active_span ("sliding_sync.generate_room_entries" ):
843
859
await concurrently_execute (handle_room , relevant_rooms_to_send_map , 10 )
844
860
845
- new_connection_state = previous_connection_state .get_mutable ()
846
-
847
861
extensions = await self .get_extensions_response (
848
862
sync_config = sync_config ,
849
863
actual_lists = lists ,
@@ -1955,6 +1969,7 @@ async def get_room_sync_data(
1955
1969
self ,
1956
1970
sync_config : SlidingSyncConfig ,
1957
1971
previous_connection_state : "PerConnectionState" ,
1972
+ new_connection_state : "MutablePerConnectionState" ,
1958
1973
room_id : str ,
1959
1974
room_sync_config : RoomSyncConfig ,
1960
1975
room_membership_for_user_at_to_token : _RoomMembershipForUser ,
@@ -1998,9 +2013,27 @@ async def get_room_sync_data(
1998
2013
# - For an incremental sync where we haven't sent it down this
1999
2014
# connection before
2000
2015
#
2001
- # Relevant spec issue: https://github.com/matrix-org/matrix-spec/issues/1917
2016
+ # Relevant spec issue:
2017
+ # https://github.com/matrix-org/matrix-spec/issues/1917
2018
+ #
2019
+ # XXX: Odd behavior - We also check if the `timeline_limit` has increased, if so
2020
+ # we ignore the from bound for the timeline to send down a larger chunk of
2021
+ # history and set `unstable_expanded_timeline` to true. This is only being added
2022
+ # to match the behavior of the Sliding Sync proxy as we expect the ElementX
2023
+ # client to feel a certain way and be able to trickle in a full page of timeline
2024
+ # messages to fill up the screen. This is a bit different to the behavior of the
2025
+ # Sliding Sync proxy (which sets initial=true, but then doesn't send down the
2026
+ # full state again), but existing apps, e.g. ElementX, just need `limited` set.
2027
+ # We don't explicitly set `limited` but this will be the case for any room that
2028
+ # has more history than we're trying to pull out. Using
2029
+ # `unstable_expanded_timeline` allows us to avoid contaminating what `initial`
2030
+ # or `limited` mean for clients that interpret them correctly. In future this
2031
+ # behavior is almost certainly going to change.
2032
+ #
2033
+ # TODO: Also handle changes to `required_state`
2002
2034
from_bound = None
2003
2035
initial = True
2036
+ ignore_timeline_bound = False
2004
2037
if from_token and not room_membership_for_user_at_to_token .newly_joined :
2005
2038
room_status = previous_connection_state .rooms .have_sent_room (room_id )
2006
2039
if room_status .status == HaveSentRoomFlag .LIVE :
@@ -2018,7 +2051,26 @@ async def get_room_sync_data(
2018
2051
2019
2052
log_kv ({"sliding_sync.room_status" : room_status })
2020
2053
2021
- log_kv ({"sliding_sync.from_bound" : from_bound , "sliding_sync.initial" : initial })
2054
+ prev_room_sync_config = previous_connection_state .room_configs .get (room_id )
2055
+ if prev_room_sync_config is not None :
2056
+ # Check if the timeline limit has increased, if so ignore the
2057
+ # timeline bound and record the change (see "XXX: Odd behavior"
2058
+ # above).
2059
+ if (
2060
+ prev_room_sync_config .timeline_limit
2061
+ < room_sync_config .timeline_limit
2062
+ ):
2063
+ ignore_timeline_bound = True
2064
+
2065
+ # TODO: Check for changes in `required_state``
2066
+
2067
+ log_kv (
2068
+ {
2069
+ "sliding_sync.from_bound" : from_bound ,
2070
+ "sliding_sync.initial" : initial ,
2071
+ "sliding_sync.ignore_timeline_bound" : ignore_timeline_bound ,
2072
+ }
2073
+ )
2022
2074
2023
2075
# Assemble the list of timeline events
2024
2076
#
@@ -2055,6 +2107,10 @@ async def get_room_sync_data(
2055
2107
room_membership_for_user_at_to_token .event_pos .to_room_stream_token ()
2056
2108
)
2057
2109
2110
+ timeline_from_bound = from_bound
2111
+ if ignore_timeline_bound :
2112
+ timeline_from_bound = None
2113
+
2058
2114
# For initial `/sync` (and other historical scenarios mentioned above), we
2059
2115
# want to view a historical section of the timeline; to fetch events by
2060
2116
# `topological_ordering` (best representation of the room DAG as others were
@@ -2080,7 +2136,7 @@ async def get_room_sync_data(
2080
2136
pagination_method : PaginateFunction = (
2081
2137
# Use `topographical_ordering` for historical events
2082
2138
paginate_room_events_by_topological_ordering
2083
- if from_bound is None
2139
+ if timeline_from_bound is None
2084
2140
# Use `stream_ordering` for updates
2085
2141
else paginate_room_events_by_stream_ordering
2086
2142
)
@@ -2090,7 +2146,7 @@ async def get_room_sync_data(
2090
2146
# (from newer to older events) starting at to_bound.
2091
2147
# This ensures we fill the `limit` with the newest events first,
2092
2148
from_key = to_bound ,
2093
- to_key = from_bound ,
2149
+ to_key = timeline_from_bound ,
2094
2150
direction = Direction .BACKWARDS ,
2095
2151
# We add one so we can determine if there are enough events to saturate
2096
2152
# the limit or not (see `limited`)
@@ -2448,6 +2504,55 @@ async def get_room_sync_data(
2448
2504
if new_bump_event_pos .stream > 0 :
2449
2505
bump_stamp = new_bump_event_pos .stream
2450
2506
2507
+ unstable_expanded_timeline = False
2508
+ prev_room_sync_config = previous_connection_state .room_configs .get (room_id )
2509
+ # Record the `room_sync_config` if we're `ignore_timeline_bound` (which means
2510
+ # that the `timeline_limit` has increased)
2511
+ if ignore_timeline_bound :
2512
+ # FIXME: We signal the fact that we're sending down more events to
2513
+ # the client by setting `unstable_expanded_timeline` to true (see
2514
+ # "XXX: Odd behavior" above).
2515
+ unstable_expanded_timeline = True
2516
+
2517
+ new_connection_state .room_configs [room_id ] = RoomSyncConfig (
2518
+ timeline_limit = room_sync_config .timeline_limit ,
2519
+ required_state_map = room_sync_config .required_state_map ,
2520
+ )
2521
+ elif prev_room_sync_config is not None :
2522
+ # If the result is `limited` then we need to record that the
2523
+ # `timeline_limit` has been reduced, as when/if the client later requests
2524
+ # more timeline then we have more data to send.
2525
+ #
2526
+ # Otherwise (when not `limited`) we don't need to record that the
2527
+ # `timeline_limit` has been reduced, as the *effective* `timeline_limit`
2528
+ # (i.e. the amount of timeline we have previously sent to the client) is at
2529
+ # least the previous `timeline_limit`.
2530
+ #
2531
+ # This is to handle the case where the `timeline_limit` e.g. goes from 10 to
2532
+ # 5 to 10 again (without any timeline gaps), where there's no point sending
2533
+ # down the initial historical chunk events when the `timeline_limit` is
2534
+ # increased as the client already has the 10 previous events. However, if
2535
+ # client has a gap in the timeline (i.e. `limited` is True), then we *do*
2536
+ # need to record the reduced timeline.
2537
+ #
2538
+ # TODO: Handle timeline gaps (`get_timeline_gaps()`) - This is separate from
2539
+ # the gaps we might see on the client because a response was `limited` we're
2540
+ # talking about above.
2541
+ if (
2542
+ limited
2543
+ and prev_room_sync_config .timeline_limit
2544
+ > room_sync_config .timeline_limit
2545
+ ):
2546
+ new_connection_state .room_configs [room_id ] = RoomSyncConfig (
2547
+ timeline_limit = room_sync_config .timeline_limit ,
2548
+ required_state_map = room_sync_config .required_state_map ,
2549
+ )
2550
+
2551
+ # TODO: Record changes in required_state.
2552
+
2553
+ else :
2554
+ new_connection_state .room_configs [room_id ] = room_sync_config
2555
+
2451
2556
set_tag (SynapseTags .RESULT_PREFIX + "initial" , initial )
2452
2557
2453
2558
return SlidingSyncResult .RoomResult (
@@ -2462,6 +2567,7 @@ async def get_room_sync_data(
2462
2567
stripped_state = stripped_state ,
2463
2568
prev_batch = prev_batch_token ,
2464
2569
limited = limited ,
2570
+ unstable_expanded_timeline = unstable_expanded_timeline ,
2465
2571
num_live = num_live ,
2466
2572
bump_stamp = bump_stamp ,
2467
2573
joined_count = room_membership_summary .get (
@@ -3264,16 +3370,30 @@ class PerConnectionState:
3264
3370
Attributes:
3265
3371
rooms: The status of each room for the events stream.
3266
3372
receipts: The status of each room for the receipts stream.
3373
+ room_configs: Map from room_id to the `RoomSyncConfig` of all
3374
+ rooms that we have previously sent down.
3267
3375
"""
3268
3376
3269
3377
rooms : RoomStatusMap [RoomStreamToken ] = attr .Factory (RoomStatusMap )
3270
3378
receipts : RoomStatusMap [MultiWriterStreamToken ] = attr .Factory (RoomStatusMap )
3271
3379
3380
+ room_configs : Mapping [str , RoomSyncConfig ] = attr .Factory (dict )
3381
+
3272
3382
def get_mutable (self ) -> "MutablePerConnectionState" :
3273
3383
"""Get a mutable copy of this state."""
3384
+ room_configs = cast (MutableMapping [str , RoomSyncConfig ], self .room_configs )
3385
+
3274
3386
return MutablePerConnectionState (
3275
3387
rooms = self .rooms .get_mutable (),
3276
3388
receipts = self .receipts .get_mutable (),
3389
+ room_configs = ChainMap ({}, room_configs ),
3390
+ )
3391
+
3392
+ def copy (self ) -> "PerConnectionState" :
3393
+ return PerConnectionState (
3394
+ rooms = self .rooms .copy (),
3395
+ receipts = self .receipts .copy (),
3396
+ room_configs = dict (self .room_configs ),
3277
3397
)
3278
3398
3279
3399
@@ -3284,8 +3404,18 @@ class MutablePerConnectionState(PerConnectionState):
3284
3404
rooms : MutableRoomStatusMap [RoomStreamToken ]
3285
3405
receipts : MutableRoomStatusMap [MultiWriterStreamToken ]
3286
3406
3407
+ room_configs : typing .ChainMap [str , RoomSyncConfig ]
3408
+
3287
3409
def has_updates (self ) -> bool :
3288
- return bool (self .rooms .get_updates ()) or bool (self .receipts .get_updates ())
3410
+ return (
3411
+ bool (self .rooms .get_updates ())
3412
+ or bool (self .receipts .get_updates ())
3413
+ or bool (self .get_room_config_updates ())
3414
+ )
3415
+
3416
+ def get_room_config_updates (self ) -> Mapping [str , RoomSyncConfig ]:
3417
+ """Get updates to the room sync config"""
3418
+ return self .room_configs .maps [0 ]
3289
3419
3290
3420
3291
3421
@attr .s (auto_attribs = True )
@@ -3369,7 +3499,6 @@ async def record_new_state(
3369
3499
) -> int :
3370
3500
"""Record updated per-connection state, returning the connection
3371
3501
position associated with the new state.
3372
-
3373
3502
If there are no changes to the state this may return the same token as
3374
3503
the existing per-connection state.
3375
3504
"""
@@ -3390,10 +3519,7 @@ async def record_new_state(
3390
3519
3391
3520
# We copy the `MutablePerConnectionState` so that the inner `ChainMap`s
3392
3521
# don't grow forever.
3393
- sync_statuses [new_store_token ] = PerConnectionState (
3394
- rooms = new_connection_state .rooms .copy (),
3395
- receipts = new_connection_state .receipts .copy (),
3396
- )
3522
+ sync_statuses [new_store_token ] = new_connection_state .copy ()
3397
3523
3398
3524
return new_store_token
3399
3525
0 commit comments