From 25f4353ab0bc7802e6494c8c873d0c8190706319 Mon Sep 17 00:00:00 2001 From: Nicolas Werner Date: Fri, 24 Nov 2023 17:37:39 +0100 Subject: [PATCH] fix: key uploads only running once There were several issues here. Key uploads in general failed, because the await caused a Future to be returned, which failed type checking. But also we never unset our future, which was used for the online key backup uploads, which meant we would very quickly stop uploading any keys at all. --- lib/encryption/encryption.dart | 9 +- lib/encryption/key_manager.dart | 144 ++++++++++++---------- lib/src/utils/native_implementations.dart | 6 +- 3 files changed, 89 insertions(+), 70 deletions(-) diff --git a/lib/encryption/encryption.dart b/lib/encryption/encryption.dart index 8a5d1635..7d5de5e5 100644 --- a/lib/encryption/encryption.dart +++ b/lib/encryption/encryption.dart @@ -232,9 +232,12 @@ class Encryption { // the entry should always exist. In the case it doesn't, the following // line *could* throw an error. As that is a future, though, and we call // it un-awaited here, nothing happens, which is exactly the result we want - // ignore: discarded_futures - client.database?.updateInboundGroupSessionIndexes( - json.encode(inboundGroupSession.indexes), roomId, sessionId); + client.database + // ignore: discarded_futures + ?.updateInboundGroupSessionIndexes( + json.encode(inboundGroupSession.indexes), roomId, sessionId) + // ignore: discarded_futures + .onError((e, _) => Logs().e('Ignoring error for updating indexes')); } decryptedPayload = json.decode(decryptResult.plaintext); } catch (exception) { diff --git a/lib/encryption/key_manager.dart b/lib/encryption/key_manager.dart index 6c70f870..c74539cb 100644 --- a/lib/encryption/key_manager.dart +++ b/lib/encryption/key_manager.dart @@ -795,74 +795,86 @@ class KeyManager { // Make sure to not run in parallel if (_uploadingFuture != null) { if (skipIfInProgress) return; - await _uploadingFuture; - } - final completer = Completer(); - _uploadingFuture = completer.future; - - await client.userDeviceKeysLoading; - try { - if (!(await isCached())) { - return; // we can't backup anyways - } - final dbSessions = await database.getInboundGroupSessionsToUpload(); - if (dbSessions.isEmpty) { - return; // nothing to do - } - final privateKey = - base64decodeUnpadded((await encryption.ssss.getCached(megolmKey))!); - // decryption is needed to calculate the public key and thus see if the claimed information is in fact valid - final decryption = olm.PkDecryption(); - final info = await getRoomKeysBackupInfo(false); - String backupPubKey; try { - backupPubKey = decryption.init_with_private_key(privateKey); - - if (info.algorithm != - BackupAlgorithm.mMegolmBackupV1Curve25519AesSha2 || - info.authData['public_key'] != backupPubKey) { - return; - } - final args = GenerateUploadKeysArgs( - pubkey: backupPubKey, - dbSessions: [], - userId: userID, - ); - // we need to calculate verified beforehand, as else we pass a closure to an isolate - // with 500 keys they do, however, noticably block the UI, which is why we give brief async suspentions in here - // so that the event loop can progress - var i = 0; - for (final dbSession in dbSessions) { - final device = - client.getUserDeviceKeysByCurve25519Key(dbSession.senderKey); - args.dbSessions.add(DbInboundGroupSessionBundle( - dbSession: dbSession, - verified: device?.verified ?? false, - )); - i++; - if (i > 10) { - await Future.delayed(Duration(milliseconds: 1)); - i = 0; - } - } - final roomKeys = - await client.nativeImplementations.generateUploadKeys(args); - Logs().i('[Key Manager] Uploading ${dbSessions.length} room keys...'); - // upload the payload... - await client.putRoomKeys(info.version, roomKeys); - // and now finally mark all the keys as uploaded - // no need to optimze this, as we only run it so seldomly and almost never with many keys at once - for (final dbSession in dbSessions) { - await database.markInboundGroupSessionAsUploaded( - dbSession.roomId, dbSession.sessionId); - } + await _uploadingFuture; } finally { - decryption.free(); + // shouldn't be necessary, since it will be unset already by the other process that started it, but just to be safe, also unset the future here + _uploadingFuture = null; } - } catch (e, s) { - Logs().e('[Key Manager] Error uploading room keys', e, s); + } + + Future uploadInternal() async { + try { + await client.userDeviceKeysLoading; + + if (!(await isCached())) { + return; // we can't backup anyways + } + final dbSessions = await database.getInboundGroupSessionsToUpload(); + if (dbSessions.isEmpty) { + return; // nothing to do + } + final privateKey = + base64decodeUnpadded((await encryption.ssss.getCached(megolmKey))!); + // decryption is needed to calculate the public key and thus see if the claimed information is in fact valid + final decryption = olm.PkDecryption(); + final info = await getRoomKeysBackupInfo(false); + String backupPubKey; + try { + backupPubKey = decryption.init_with_private_key(privateKey); + + if (info.algorithm != + BackupAlgorithm.mMegolmBackupV1Curve25519AesSha2 || + info.authData['public_key'] != backupPubKey) { + decryption.free(); + return; + } + final args = GenerateUploadKeysArgs( + pubkey: backupPubKey, + dbSessions: [], + userId: userID, + ); + // we need to calculate verified beforehand, as else we pass a closure to an isolate + // with 500 keys they do, however, noticably block the UI, which is why we give brief async suspentions in here + // so that the event loop can progress + var i = 0; + for (final dbSession in dbSessions) { + final device = + client.getUserDeviceKeysByCurve25519Key(dbSession.senderKey); + args.dbSessions.add(DbInboundGroupSessionBundle( + dbSession: dbSession, + verified: device?.verified ?? false, + )); + i++; + if (i > 10) { + await Future.delayed(Duration(milliseconds: 1)); + i = 0; + } + } + final roomKeys = + await client.nativeImplementations.generateUploadKeys(args); + Logs().i('[Key Manager] Uploading ${dbSessions.length} room keys...'); + // upload the payload... + await client.putRoomKeys(info.version, roomKeys); + // and now finally mark all the keys as uploaded + // no need to optimze this, as we only run it so seldomly and almost never with many keys at once + for (final dbSession in dbSessions) { + await database.markInboundGroupSessionAsUploaded( + dbSession.roomId, dbSession.sessionId); + } + } finally { + decryption.free(); + } + } catch (e, s) { + Logs().e('[Key Manager] Error uploading room keys', e, s); + } + } + + _uploadingFuture = uploadInternal(); + try { + await _uploadingFuture; } finally { - completer.complete(); + _uploadingFuture = null; } } @@ -1187,12 +1199,12 @@ RoomKeys generateUploadKeysImplementation(GenerateUploadKeysArgs args) { }, ); } + enc.free(); return roomKeys; } catch (e, s) { Logs().e('[Key Manager] Error generating payload', e, s); - rethrow; - } finally { enc.free(); + rethrow; } } diff --git a/lib/src/utils/native_implementations.dart b/lib/src/utils/native_implementations.dart index 48b84069..ee243ebb 100644 --- a/lib/src/utils/native_implementations.dart +++ b/lib/src/utils/native_implementations.dart @@ -52,7 +52,7 @@ abstract class NativeImplementations { /// this implementation will catch any non-implemented method @override - dynamic noSuchMethod(Invocation invocation) async { + dynamic noSuchMethod(Invocation invocation) { final dynamic argument = invocation.positionalArguments.single; final memberName = invocation.memberName.toString().split('"')[1]; @@ -62,11 +62,15 @@ abstract class NativeImplementations { 'Fallback from NativeImplementations.dummy used.', ); switch (memberName) { + // we need to pass the futures right through or we will run into type errors later! case 'generateUploadKeys': + // ignore: discarded_futures return dummy.generateUploadKeys(argument); case 'keyFromPassphrase': + // ignore: discarded_futures return dummy.keyFromPassphrase(argument); case 'decryptFile': + // ignore: discarded_futures return dummy.decryptFile(argument); case 'shrinkImage': return dummy.shrinkImage(argument);