fix: key uploads only running once

There were several issues here. Key uploads in general failed, because
the await caused a Future<dynamic> to be returned, which failed type
checking.

But also we never unset our future, which was used for the online key
backup uploads, which meant we would very quickly stop uploading any
keys at all.
This commit is contained in:
Nicolas Werner 2023-11-24 17:37:39 +01:00
parent 9019dfd566
commit 25f4353ab0
No known key found for this signature in database
GPG Key ID: B38119FF80087618
3 changed files with 89 additions and 70 deletions

View File

@ -232,9 +232,12 @@ class Encryption {
// the entry should always exist. In the case it doesn't, the following // the entry should always exist. In the case it doesn't, the following
// line *could* throw an error. As that is a future, though, and we call // line *could* throw an error. As that is a future, though, and we call
// it un-awaited here, nothing happens, which is exactly the result we want // it un-awaited here, nothing happens, which is exactly the result we want
// ignore: discarded_futures client.database
client.database?.updateInboundGroupSessionIndexes( // ignore: discarded_futures
json.encode(inboundGroupSession.indexes), roomId, sessionId); ?.updateInboundGroupSessionIndexes(
json.encode(inboundGroupSession.indexes), roomId, sessionId)
// ignore: discarded_futures
.onError((e, _) => Logs().e('Ignoring error for updating indexes'));
} }
decryptedPayload = json.decode(decryptResult.plaintext); decryptedPayload = json.decode(decryptResult.plaintext);
} catch (exception) { } catch (exception) {

View File

@ -795,74 +795,86 @@ class KeyManager {
// Make sure to not run in parallel // Make sure to not run in parallel
if (_uploadingFuture != null) { if (_uploadingFuture != null) {
if (skipIfInProgress) return; if (skipIfInProgress) return;
await _uploadingFuture;
}
final completer = Completer<void>();
_uploadingFuture = completer.future;
await client.userDeviceKeysLoading;
try {
if (!(await isCached())) {
return; // we can't backup anyways
}
final dbSessions = await database.getInboundGroupSessionsToUpload();
if (dbSessions.isEmpty) {
return; // nothing to do
}
final privateKey =
base64decodeUnpadded((await encryption.ssss.getCached(megolmKey))!);
// decryption is needed to calculate the public key and thus see if the claimed information is in fact valid
final decryption = olm.PkDecryption();
final info = await getRoomKeysBackupInfo(false);
String backupPubKey;
try { try {
backupPubKey = decryption.init_with_private_key(privateKey); await _uploadingFuture;
if (info.algorithm !=
BackupAlgorithm.mMegolmBackupV1Curve25519AesSha2 ||
info.authData['public_key'] != backupPubKey) {
return;
}
final args = GenerateUploadKeysArgs(
pubkey: backupPubKey,
dbSessions: <DbInboundGroupSessionBundle>[],
userId: userID,
);
// we need to calculate verified beforehand, as else we pass a closure to an isolate
// with 500 keys they do, however, noticably block the UI, which is why we give brief async suspentions in here
// so that the event loop can progress
var i = 0;
for (final dbSession in dbSessions) {
final device =
client.getUserDeviceKeysByCurve25519Key(dbSession.senderKey);
args.dbSessions.add(DbInboundGroupSessionBundle(
dbSession: dbSession,
verified: device?.verified ?? false,
));
i++;
if (i > 10) {
await Future.delayed(Duration(milliseconds: 1));
i = 0;
}
}
final roomKeys =
await client.nativeImplementations.generateUploadKeys(args);
Logs().i('[Key Manager] Uploading ${dbSessions.length} room keys...');
// upload the payload...
await client.putRoomKeys(info.version, roomKeys);
// and now finally mark all the keys as uploaded
// no need to optimze this, as we only run it so seldomly and almost never with many keys at once
for (final dbSession in dbSessions) {
await database.markInboundGroupSessionAsUploaded(
dbSession.roomId, dbSession.sessionId);
}
} finally { } finally {
decryption.free(); // shouldn't be necessary, since it will be unset already by the other process that started it, but just to be safe, also unset the future here
_uploadingFuture = null;
} }
} catch (e, s) { }
Logs().e('[Key Manager] Error uploading room keys', e, s);
Future<void> uploadInternal() async {
try {
await client.userDeviceKeysLoading;
if (!(await isCached())) {
return; // we can't backup anyways
}
final dbSessions = await database.getInboundGroupSessionsToUpload();
if (dbSessions.isEmpty) {
return; // nothing to do
}
final privateKey =
base64decodeUnpadded((await encryption.ssss.getCached(megolmKey))!);
// decryption is needed to calculate the public key and thus see if the claimed information is in fact valid
final decryption = olm.PkDecryption();
final info = await getRoomKeysBackupInfo(false);
String backupPubKey;
try {
backupPubKey = decryption.init_with_private_key(privateKey);
if (info.algorithm !=
BackupAlgorithm.mMegolmBackupV1Curve25519AesSha2 ||
info.authData['public_key'] != backupPubKey) {
decryption.free();
return;
}
final args = GenerateUploadKeysArgs(
pubkey: backupPubKey,
dbSessions: <DbInboundGroupSessionBundle>[],
userId: userID,
);
// we need to calculate verified beforehand, as else we pass a closure to an isolate
// with 500 keys they do, however, noticably block the UI, which is why we give brief async suspentions in here
// so that the event loop can progress
var i = 0;
for (final dbSession in dbSessions) {
final device =
client.getUserDeviceKeysByCurve25519Key(dbSession.senderKey);
args.dbSessions.add(DbInboundGroupSessionBundle(
dbSession: dbSession,
verified: device?.verified ?? false,
));
i++;
if (i > 10) {
await Future.delayed(Duration(milliseconds: 1));
i = 0;
}
}
final roomKeys =
await client.nativeImplementations.generateUploadKeys(args);
Logs().i('[Key Manager] Uploading ${dbSessions.length} room keys...');
// upload the payload...
await client.putRoomKeys(info.version, roomKeys);
// and now finally mark all the keys as uploaded
// no need to optimze this, as we only run it so seldomly and almost never with many keys at once
for (final dbSession in dbSessions) {
await database.markInboundGroupSessionAsUploaded(
dbSession.roomId, dbSession.sessionId);
}
} finally {
decryption.free();
}
} catch (e, s) {
Logs().e('[Key Manager] Error uploading room keys', e, s);
}
}
_uploadingFuture = uploadInternal();
try {
await _uploadingFuture;
} finally { } finally {
completer.complete(); _uploadingFuture = null;
} }
} }
@ -1187,12 +1199,12 @@ RoomKeys generateUploadKeysImplementation(GenerateUploadKeysArgs args) {
}, },
); );
} }
enc.free();
return roomKeys; return roomKeys;
} catch (e, s) { } catch (e, s) {
Logs().e('[Key Manager] Error generating payload', e, s); Logs().e('[Key Manager] Error generating payload', e, s);
rethrow;
} finally {
enc.free(); enc.free();
rethrow;
} }
} }

View File

@ -52,7 +52,7 @@ abstract class NativeImplementations {
/// this implementation will catch any non-implemented method /// this implementation will catch any non-implemented method
@override @override
dynamic noSuchMethod(Invocation invocation) async { dynamic noSuchMethod(Invocation invocation) {
final dynamic argument = invocation.positionalArguments.single; final dynamic argument = invocation.positionalArguments.single;
final memberName = invocation.memberName.toString().split('"')[1]; final memberName = invocation.memberName.toString().split('"')[1];
@ -62,11 +62,15 @@ abstract class NativeImplementations {
'Fallback from NativeImplementations.dummy used.', 'Fallback from NativeImplementations.dummy used.',
); );
switch (memberName) { switch (memberName) {
// we need to pass the futures right through or we will run into type errors later!
case 'generateUploadKeys': case 'generateUploadKeys':
// ignore: discarded_futures
return dummy.generateUploadKeys(argument); return dummy.generateUploadKeys(argument);
case 'keyFromPassphrase': case 'keyFromPassphrase':
// ignore: discarded_futures
return dummy.keyFromPassphrase(argument); return dummy.keyFromPassphrase(argument);
case 'decryptFile': case 'decryptFile':
// ignore: discarded_futures
return dummy.decryptFile(argument); return dummy.decryptFile(argument);
case 'shrinkImage': case 'shrinkImage':
return dummy.shrinkImage(argument); return dummy.shrinkImage(argument);