Skip to content

Commit 6eb98a4

Browse files
Sliding Sync: Handle timeline limit changes (take 2) (#17579)
This supersedes #17503, given the per-connection state is being heavily rewritten it felt easier to recreate the PR on top of that work. This correctly handles the case of timeline limits going up and down. This does not handle changes in `required_state`, but that can be done as a separate PR. Based on #17575. --------- Co-authored-by: Eric Eastwood <[email protected]>
1 parent 950ba84 commit 6eb98a4

File tree

5 files changed

+285
-13
lines changed

5 files changed

+285
-13
lines changed

changelog.d/17579.misc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
Handle changes in `timeline_limit` in experimental sliding sync.

synapse/handlers/sliding_sync.py

Lines changed: 139 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -787,7 +787,20 @@ async def current_sync_for_user(
787787
# subscription and have updates we need to send (i.e. either because
788788
# we haven't sent the room down, or we have but there are missing
789789
# updates).
790-
for room_id in relevant_room_map:
790+
for room_id, room_config in relevant_room_map.items():
791+
prev_room_sync_config = previous_connection_state.room_configs.get(
792+
room_id
793+
)
794+
if prev_room_sync_config is not None:
795+
# Always include rooms whose timeline limit has increased.
796+
# (see the "XXX: Odd behavior" described below)
797+
if (
798+
prev_room_sync_config.timeline_limit
799+
< room_config.timeline_limit
800+
):
801+
rooms_should_send.add(room_id)
802+
continue
803+
791804
status = previous_connection_state.rooms.have_sent_room(room_id)
792805
if (
793806
# The room was never sent down before so the client needs to know
@@ -819,12 +832,15 @@ async def current_sync_for_user(
819832
if room_id in rooms_should_send
820833
}
821834

835+
new_connection_state = previous_connection_state.get_mutable()
836+
822837
@trace
823838
@tag_args
824839
async def handle_room(room_id: str) -> None:
825840
room_sync_result = await self.get_room_sync_data(
826841
sync_config=sync_config,
827842
previous_connection_state=previous_connection_state,
843+
new_connection_state=new_connection_state,
828844
room_id=room_id,
829845
room_sync_config=relevant_rooms_to_send_map[room_id],
830846
room_membership_for_user_at_to_token=room_membership_for_user_map[
@@ -842,8 +858,6 @@ async def handle_room(room_id: str) -> None:
842858
with start_active_span("sliding_sync.generate_room_entries"):
843859
await concurrently_execute(handle_room, relevant_rooms_to_send_map, 10)
844860

845-
new_connection_state = previous_connection_state.get_mutable()
846-
847861
extensions = await self.get_extensions_response(
848862
sync_config=sync_config,
849863
actual_lists=lists,
@@ -1955,6 +1969,7 @@ async def get_room_sync_data(
19551969
self,
19561970
sync_config: SlidingSyncConfig,
19571971
previous_connection_state: "PerConnectionState",
1972+
new_connection_state: "MutablePerConnectionState",
19581973
room_id: str,
19591974
room_sync_config: RoomSyncConfig,
19601975
room_membership_for_user_at_to_token: _RoomMembershipForUser,
@@ -1998,9 +2013,27 @@ async def get_room_sync_data(
19982013
# - For an incremental sync where we haven't sent it down this
19992014
# connection before
20002015
#
2001-
# Relevant spec issue: https://github.com/matrix-org/matrix-spec/issues/1917
2016+
# Relevant spec issue:
2017+
# https://github.com/matrix-org/matrix-spec/issues/1917
2018+
#
2019+
# XXX: Odd behavior - We also check if the `timeline_limit` has increased, if so
2020+
# we ignore the from bound for the timeline to send down a larger chunk of
2021+
# history and set `unstable_expanded_timeline` to true. This is only being added
2022+
# to match the behavior of the Sliding Sync proxy as we expect the ElementX
2023+
# client to feel a certain way and be able to trickle in a full page of timeline
2024+
# messages to fill up the screen. This is a bit different to the behavior of the
2025+
# Sliding Sync proxy (which sets initial=true, but then doesn't send down the
2026+
# full state again), but existing apps, e.g. ElementX, just need `limited` set.
2027+
# We don't explicitly set `limited` but this will be the case for any room that
2028+
# has more history than we're trying to pull out. Using
2029+
# `unstable_expanded_timeline` allows us to avoid contaminating what `initial`
2030+
# or `limited` mean for clients that interpret them correctly. In future this
2031+
# behavior is almost certainly going to change.
2032+
#
2033+
# TODO: Also handle changes to `required_state`
20022034
from_bound = None
20032035
initial = True
2036+
ignore_timeline_bound = False
20042037
if from_token and not room_membership_for_user_at_to_token.newly_joined:
20052038
room_status = previous_connection_state.rooms.have_sent_room(room_id)
20062039
if room_status.status == HaveSentRoomFlag.LIVE:
@@ -2018,7 +2051,26 @@ async def get_room_sync_data(
20182051

20192052
log_kv({"sliding_sync.room_status": room_status})
20202053

2021-
log_kv({"sliding_sync.from_bound": from_bound, "sliding_sync.initial": initial})
2054+
prev_room_sync_config = previous_connection_state.room_configs.get(room_id)
2055+
if prev_room_sync_config is not None:
2056+
# Check if the timeline limit has increased, if so ignore the
2057+
# timeline bound and record the change (see "XXX: Odd behavior"
2058+
# above).
2059+
if (
2060+
prev_room_sync_config.timeline_limit
2061+
< room_sync_config.timeline_limit
2062+
):
2063+
ignore_timeline_bound = True
2064+
2065+
# TODO: Check for changes in `required_state``
2066+
2067+
log_kv(
2068+
{
2069+
"sliding_sync.from_bound": from_bound,
2070+
"sliding_sync.initial": initial,
2071+
"sliding_sync.ignore_timeline_bound": ignore_timeline_bound,
2072+
}
2073+
)
20222074

20232075
# Assemble the list of timeline events
20242076
#
@@ -2055,6 +2107,10 @@ async def get_room_sync_data(
20552107
room_membership_for_user_at_to_token.event_pos.to_room_stream_token()
20562108
)
20572109

2110+
timeline_from_bound = from_bound
2111+
if ignore_timeline_bound:
2112+
timeline_from_bound = None
2113+
20582114
# For initial `/sync` (and other historical scenarios mentioned above), we
20592115
# want to view a historical section of the timeline; to fetch events by
20602116
# `topological_ordering` (best representation of the room DAG as others were
@@ -2080,7 +2136,7 @@ async def get_room_sync_data(
20802136
pagination_method: PaginateFunction = (
20812137
# Use `topographical_ordering` for historical events
20822138
paginate_room_events_by_topological_ordering
2083-
if from_bound is None
2139+
if timeline_from_bound is None
20842140
# Use `stream_ordering` for updates
20852141
else paginate_room_events_by_stream_ordering
20862142
)
@@ -2090,7 +2146,7 @@ async def get_room_sync_data(
20902146
# (from newer to older events) starting at to_bound.
20912147
# This ensures we fill the `limit` with the newest events first,
20922148
from_key=to_bound,
2093-
to_key=from_bound,
2149+
to_key=timeline_from_bound,
20942150
direction=Direction.BACKWARDS,
20952151
# We add one so we can determine if there are enough events to saturate
20962152
# the limit or not (see `limited`)
@@ -2448,6 +2504,55 @@ async def get_room_sync_data(
24482504
if new_bump_event_pos.stream > 0:
24492505
bump_stamp = new_bump_event_pos.stream
24502506

2507+
unstable_expanded_timeline = False
2508+
prev_room_sync_config = previous_connection_state.room_configs.get(room_id)
2509+
# Record the `room_sync_config` if we're `ignore_timeline_bound` (which means
2510+
# that the `timeline_limit` has increased)
2511+
if ignore_timeline_bound:
2512+
# FIXME: We signal the fact that we're sending down more events to
2513+
# the client by setting `unstable_expanded_timeline` to true (see
2514+
# "XXX: Odd behavior" above).
2515+
unstable_expanded_timeline = True
2516+
2517+
new_connection_state.room_configs[room_id] = RoomSyncConfig(
2518+
timeline_limit=room_sync_config.timeline_limit,
2519+
required_state_map=room_sync_config.required_state_map,
2520+
)
2521+
elif prev_room_sync_config is not None:
2522+
# If the result is `limited` then we need to record that the
2523+
# `timeline_limit` has been reduced, as when/if the client later requests
2524+
# more timeline then we have more data to send.
2525+
#
2526+
# Otherwise (when not `limited`) we don't need to record that the
2527+
# `timeline_limit` has been reduced, as the *effective* `timeline_limit`
2528+
# (i.e. the amount of timeline we have previously sent to the client) is at
2529+
# least the previous `timeline_limit`.
2530+
#
2531+
# This is to handle the case where the `timeline_limit` e.g. goes from 10 to
2532+
# 5 to 10 again (without any timeline gaps), where there's no point sending
2533+
# down the initial historical chunk events when the `timeline_limit` is
2534+
# increased as the client already has the 10 previous events. However, if
2535+
# client has a gap in the timeline (i.e. `limited` is True), then we *do*
2536+
# need to record the reduced timeline.
2537+
#
2538+
# TODO: Handle timeline gaps (`get_timeline_gaps()`) - This is separate from
2539+
# the gaps we might see on the client because a response was `limited` we're
2540+
# talking about above.
2541+
if (
2542+
limited
2543+
and prev_room_sync_config.timeline_limit
2544+
> room_sync_config.timeline_limit
2545+
):
2546+
new_connection_state.room_configs[room_id] = RoomSyncConfig(
2547+
timeline_limit=room_sync_config.timeline_limit,
2548+
required_state_map=room_sync_config.required_state_map,
2549+
)
2550+
2551+
# TODO: Record changes in required_state.
2552+
2553+
else:
2554+
new_connection_state.room_configs[room_id] = room_sync_config
2555+
24512556
set_tag(SynapseTags.RESULT_PREFIX + "initial", initial)
24522557

24532558
return SlidingSyncResult.RoomResult(
@@ -2462,6 +2567,7 @@ async def get_room_sync_data(
24622567
stripped_state=stripped_state,
24632568
prev_batch=prev_batch_token,
24642569
limited=limited,
2570+
unstable_expanded_timeline=unstable_expanded_timeline,
24652571
num_live=num_live,
24662572
bump_stamp=bump_stamp,
24672573
joined_count=room_membership_summary.get(
@@ -3264,16 +3370,30 @@ class PerConnectionState:
32643370
Attributes:
32653371
rooms: The status of each room for the events stream.
32663372
receipts: The status of each room for the receipts stream.
3373+
room_configs: Map from room_id to the `RoomSyncConfig` of all
3374+
rooms that we have previously sent down.
32673375
"""
32683376

32693377
rooms: RoomStatusMap[RoomStreamToken] = attr.Factory(RoomStatusMap)
32703378
receipts: RoomStatusMap[MultiWriterStreamToken] = attr.Factory(RoomStatusMap)
32713379

3380+
room_configs: Mapping[str, RoomSyncConfig] = attr.Factory(dict)
3381+
32723382
def get_mutable(self) -> "MutablePerConnectionState":
32733383
"""Get a mutable copy of this state."""
3384+
room_configs = cast(MutableMapping[str, RoomSyncConfig], self.room_configs)
3385+
32743386
return MutablePerConnectionState(
32753387
rooms=self.rooms.get_mutable(),
32763388
receipts=self.receipts.get_mutable(),
3389+
room_configs=ChainMap({}, room_configs),
3390+
)
3391+
3392+
def copy(self) -> "PerConnectionState":
3393+
return PerConnectionState(
3394+
rooms=self.rooms.copy(),
3395+
receipts=self.receipts.copy(),
3396+
room_configs=dict(self.room_configs),
32773397
)
32783398

32793399

@@ -3284,8 +3404,18 @@ class MutablePerConnectionState(PerConnectionState):
32843404
rooms: MutableRoomStatusMap[RoomStreamToken]
32853405
receipts: MutableRoomStatusMap[MultiWriterStreamToken]
32863406

3407+
room_configs: typing.ChainMap[str, RoomSyncConfig]
3408+
32873409
def has_updates(self) -> bool:
3288-
return bool(self.rooms.get_updates()) or bool(self.receipts.get_updates())
3410+
return (
3411+
bool(self.rooms.get_updates())
3412+
or bool(self.receipts.get_updates())
3413+
or bool(self.get_room_config_updates())
3414+
)
3415+
3416+
def get_room_config_updates(self) -> Mapping[str, RoomSyncConfig]:
3417+
"""Get updates to the room sync config"""
3418+
return self.room_configs.maps[0]
32893419

32903420

32913421
@attr.s(auto_attribs=True)
@@ -3369,7 +3499,6 @@ async def record_new_state(
33693499
) -> int:
33703500
"""Record updated per-connection state, returning the connection
33713501
position associated with the new state.
3372-
33733502
If there are no changes to the state this may return the same token as
33743503
the existing per-connection state.
33753504
"""
@@ -3390,10 +3519,7 @@ async def record_new_state(
33903519

33913520
# We copy the `MutablePerConnectionState` so that the inner `ChainMap`s
33923521
# don't grow forever.
3393-
sync_statuses[new_store_token] = PerConnectionState(
3394-
rooms=new_connection_state.rooms.copy(),
3395-
receipts=new_connection_state.receipts.copy(),
3396-
)
3522+
sync_statuses[new_store_token] = new_connection_state.copy()
33973523

33983524
return new_store_token
33993525

synapse/rest/client/sync.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1044,6 +1044,11 @@ async def encode_rooms(
10441044
if room_result.initial:
10451045
serialized_rooms[room_id]["initial"] = room_result.initial
10461046

1047+
if room_result.unstable_expanded_timeline:
1048+
serialized_rooms[room_id][
1049+
"unstable_expanded_timeline"
1050+
] = room_result.unstable_expanded_timeline
1051+
10471052
# This will be omitted for invite/knock rooms with `stripped_state`
10481053
if (
10491054
room_result.required_state is not None

synapse/types/handlers/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -171,6 +171,9 @@ class RoomResult:
171171
their local state. When there is an update, servers MUST omit this flag
172172
entirely and NOT send "initial":false as this is wasteful on bandwidth. The
173173
absence of this flag means 'false'.
174+
unstable_expanded_timeline: Flag which is set if we're returning more historic
175+
events due to the timeline limit having increased. See "XXX: Odd behavior"
176+
comment ing `synapse.handlers.sliding_sync`.
174177
required_state: The current state of the room
175178
timeline: Latest events in the room. The last event is the most recent.
176179
bundled_aggregations: A mapping of event ID to the bundled aggregations for
@@ -219,6 +222,7 @@ class StrippedHero:
219222
heroes: Optional[List[StrippedHero]]
220223
is_dm: bool
221224
initial: bool
225+
unstable_expanded_timeline: bool
222226
# Should be empty for invite/knock rooms with `stripped_state`
223227
required_state: List[EventBase]
224228
# Should be empty for invite/knock rooms with `stripped_state`

0 commit comments

Comments
 (0)