fix: Timeline history requests causing "scrolling" and sometimes ordering things wrong

This commit is contained in:
Sorunome 2020-10-29 18:07:26 +01:00
parent 793d398d72
commit 33b1e36efd
No known key found for this signature in database
GPG Key ID: B19471D07FC9BE9C
2 changed files with 91 additions and 59 deletions

View File

@ -902,6 +902,7 @@ class Room {
'$id': (JoinedRoomUpdate() '$id': (JoinedRoomUpdate()
..state = resp.state ..state = resp.state
..timeline = (TimelineUpdate() ..timeline = (TimelineUpdate()
..limited = false
..events = resp.chunk ..events = resp.chunk
..prevBatch = resp.end)), ..prevBatch = resp.end)),
}), }),

View File

@ -33,10 +33,10 @@ typedef onTimelineInsertCallback = void Function(int insertID);
/// event list will be retreived when created by the [room.getTimeline] method. /// event list will be retreived when created by the [room.getTimeline] method.
class Timeline { class Timeline {
final Room room; final Room room;
List<Event> events = []; final List<Event> events;
/// Map of event ID to map of type to set of aggregated events /// Map of event ID to map of type to set of aggregated events
Map<String, Map<String, Set<Event>>> aggregatedEvents = {}; final Map<String, Map<String, Set<Event>>> aggregatedEvents = {};
final onTimelineUpdateCallback onUpdate; final onTimelineUpdateCallback onUpdate;
final onTimelineInsertCallback onInsert; final onTimelineInsertCallback onInsert;
@ -62,6 +62,13 @@ class Timeline {
return _eventCache[id]; return _eventCache[id];
} }
// When fetching history, we will collect them into the `_historyUpdates` set
// first, and then only process all events at once, once we have the full history.
// This ensures that the entire history fetching only triggers `onUpdate` only *once*,
// even if /sync's complete while history is being proccessed.
bool _collectHistoryUpdates = false;
final Set<EventUpdate> _historyUpdates = <EventUpdate>{};
Future<void> requestHistory( Future<void> requestHistory(
{int historyCount = Room.DefaultHistoryCount}) async { {int historyCount = Room.DefaultHistoryCount}) async {
if (!_requestingHistoryLock) { if (!_requestingHistoryLock) {
@ -69,18 +76,32 @@ class Timeline {
await room.requestHistory( await room.requestHistory(
historyCount: historyCount, historyCount: historyCount,
onHistoryReceived: () { onHistoryReceived: () {
if (room.prev_batch.isEmpty || room.prev_batch == null) { _collectHistoryUpdates = true;
events.clear();
aggregatedEvents.clear();
}
}, },
); );
await Future.delayed(const Duration(seconds: 2)); try {
_requestingHistoryLock = false; await Future.delayed(const Duration(seconds: 2));
_proccessHistoryUpdates();
} finally {
_collectHistoryUpdates = false;
_requestingHistoryLock = false;
}
} }
} }
Timeline({this.room, this.events, this.onUpdate, this.onInsert}) { void _proccessHistoryUpdates() async {
_collectHistoryUpdates = false;
for (final update in _historyUpdates) {
_handleEventUpdate(await update.decrypt(room, store: true),
sortAndUpdate: false);
}
_historyUpdates.clear();
_sort();
if (onUpdate != null) onUpdate();
}
Timeline({this.room, List<Event> events, this.onUpdate, this.onInsert})
: events = events ?? [] {
sub ??= room.client.onEvent.stream.listen(_handleEventUpdate); sub ??= room.client.onEvent.stream.listen(_handleEventUpdate);
// if the timeline is limited we want to clear our events cache // if the timeline is limited we want to clear our events cache
// as r.limitedTimeline can be "null" sometimes, we need to check for == true // as r.limitedTimeline can be "null" sometimes, we need to check for == true
@ -222,65 +243,75 @@ class Timeline {
} }
} }
void _handleEventUpdate(EventUpdate eventUpdate) async { void _handleEventUpdate(EventUpdate eventUpdate,
{bool sortAndUpdate = true}) {
try { try {
if (eventUpdate.roomID != room.id) return; if (eventUpdate.roomID != room.id) return;
if (eventUpdate.type == EventUpdateType.timeline || if (eventUpdate.type == EventUpdateType.history &&
eventUpdate.type == EventUpdateType.history) { _collectHistoryUpdates) {
var status = eventUpdate.content['status'] ?? _historyUpdates.add(eventUpdate);
(eventUpdate.content['unsigned'] is Map<String, dynamic> return;
? eventUpdate.content['unsigned'][MessageSendingStatusKey] }
: null) ??
2; if (eventUpdate.type != EventUpdateType.timeline &&
// Redaction events are handled as modification for existing events. eventUpdate.type != EventUpdateType.history) {
if (eventUpdate.eventType == EventTypes.Redaction) { return;
final eventId = _findEvent(event_id: eventUpdate.content['redacts']); }
if (eventId < events.length) { var status = eventUpdate.content['status'] ??
removeAggregatedEvent(events[eventId]); (eventUpdate.content['unsigned'] is Map<String, dynamic>
events[eventId].setRedactionEvent(Event.fromJson( ? eventUpdate.content['unsigned'][MessageSendingStatusKey]
eventUpdate.content, room, eventUpdate.sortOrder)); : null) ??
} 2;
} else if (status == -2) { // Redaction events are handled as modification for existing events.
var i = _findEvent(event_id: eventUpdate.content['event_id']); if (eventUpdate.eventType == EventTypes.Redaction) {
if (i < events.length) { final eventId = _findEvent(event_id: eventUpdate.content['redacts']);
removeAggregatedEvent(events[i]); if (eventId < events.length) {
events.removeAt(i); removeAggregatedEvent(events[eventId]);
events[eventId].setRedactionEvent(
Event.fromJson(eventUpdate.content, room, eventUpdate.sortOrder));
}
} else if (status == -2) {
var i = _findEvent(event_id: eventUpdate.content['event_id']);
if (i < events.length) {
removeAggregatedEvent(events[i]);
events.removeAt(i);
}
} else {
var i = _findEvent(
event_id: eventUpdate.content['event_id'],
unsigned_txid: eventUpdate.content['unsigned'] is Map
? eventUpdate.content['unsigned']['transaction_id']
: null);
if (i < events.length) {
// if the old status is larger than the new one, we also want to preserve the old status
final oldStatus = events[i].status;
events[i] =
Event.fromJson(eventUpdate.content, room, eventUpdate.sortOrder);
// do we preserve the status? we should allow 0 -> -1 updates and status increases
if (status < oldStatus && !(status == -1 && oldStatus == 0)) {
events[i].status = oldStatus;
} }
addAggregatedEvent(events[i]);
} else { } else {
var i = _findEvent( var newEvent =
event_id: eventUpdate.content['event_id'], Event.fromJson(eventUpdate.content, room, eventUpdate.sortOrder);
unsigned_txid: eventUpdate.content['unsigned'] is Map
? eventUpdate.content['unsigned']['transaction_id']
: null);
if (i < events.length) { if (eventUpdate.type == EventUpdateType.history &&
// if the old status is larger than the new one, we also want to preserve the old status events.indexWhere(
final oldStatus = events[i].status; (e) => e.eventId == eventUpdate.content['event_id']) !=
events[i] = Event.fromJson( -1) return;
eventUpdate.content, room, eventUpdate.sortOrder);
// do we preserve the status? we should allow 0 -> -1 updates and status increases
if (status < oldStatus && !(status == -1 && oldStatus == 0)) {
events[i].status = oldStatus;
}
addAggregatedEvent(events[i]);
} else {
var newEvent = Event.fromJson(
eventUpdate.content, room, eventUpdate.sortOrder);
if (eventUpdate.type == EventUpdateType.history && events.insert(0, newEvent);
events.indexWhere( addAggregatedEvent(newEvent);
(e) => e.eventId == eventUpdate.content['event_id']) != if (onInsert != null) onInsert(0);
-1) return;
events.insert(0, newEvent);
addAggregatedEvent(newEvent);
if (onInsert != null) onInsert(0);
}
} }
} }
_sort(); if (sortAndUpdate) {
if (onUpdate != null) onUpdate(); _sort();
if (onUpdate != null) onUpdate();
}
} catch (e, s) { } catch (e, s) {
Logs.warning('Handle event update failed: ${e.toString()}', s); Logs.warning('Handle event update failed: ${e.toString()}', s);
} }