Merge pull request #1623 from famedly/nico/fix-key-backup
fix: key uploads only running once
This commit is contained in:
commit
f1e0268944
|
|
@ -232,9 +232,12 @@ class Encryption {
|
|||
// the entry should always exist. In the case it doesn't, the following
|
||||
// line *could* throw an error. As that is a future, though, and we call
|
||||
// it un-awaited here, nothing happens, which is exactly the result we want
|
||||
// ignore: discarded_futures
|
||||
client.database?.updateInboundGroupSessionIndexes(
|
||||
json.encode(inboundGroupSession.indexes), roomId, sessionId);
|
||||
client.database
|
||||
// ignore: discarded_futures
|
||||
?.updateInboundGroupSessionIndexes(
|
||||
json.encode(inboundGroupSession.indexes), roomId, sessionId)
|
||||
// ignore: discarded_futures
|
||||
.onError((e, _) => Logs().e('Ignoring error for updating indexes'));
|
||||
}
|
||||
decryptedPayload = json.decode(decryptResult.plaintext);
|
||||
} catch (exception) {
|
||||
|
|
|
|||
|
|
@ -795,74 +795,86 @@ class KeyManager {
|
|||
// Make sure to not run in parallel
|
||||
if (_uploadingFuture != null) {
|
||||
if (skipIfInProgress) return;
|
||||
await _uploadingFuture;
|
||||
}
|
||||
final completer = Completer<void>();
|
||||
_uploadingFuture = completer.future;
|
||||
|
||||
await client.userDeviceKeysLoading;
|
||||
try {
|
||||
if (!(await isCached())) {
|
||||
return; // we can't backup anyways
|
||||
}
|
||||
final dbSessions = await database.getInboundGroupSessionsToUpload();
|
||||
if (dbSessions.isEmpty) {
|
||||
return; // nothing to do
|
||||
}
|
||||
final privateKey =
|
||||
base64decodeUnpadded((await encryption.ssss.getCached(megolmKey))!);
|
||||
// decryption is needed to calculate the public key and thus see if the claimed information is in fact valid
|
||||
final decryption = olm.PkDecryption();
|
||||
final info = await getRoomKeysBackupInfo(false);
|
||||
String backupPubKey;
|
||||
try {
|
||||
backupPubKey = decryption.init_with_private_key(privateKey);
|
||||
|
||||
if (info.algorithm !=
|
||||
BackupAlgorithm.mMegolmBackupV1Curve25519AesSha2 ||
|
||||
info.authData['public_key'] != backupPubKey) {
|
||||
return;
|
||||
}
|
||||
final args = GenerateUploadKeysArgs(
|
||||
pubkey: backupPubKey,
|
||||
dbSessions: <DbInboundGroupSessionBundle>[],
|
||||
userId: userID,
|
||||
);
|
||||
// we need to calculate verified beforehand, as else we pass a closure to an isolate
|
||||
// with 500 keys they do, however, noticably block the UI, which is why we give brief async suspentions in here
|
||||
// so that the event loop can progress
|
||||
var i = 0;
|
||||
for (final dbSession in dbSessions) {
|
||||
final device =
|
||||
client.getUserDeviceKeysByCurve25519Key(dbSession.senderKey);
|
||||
args.dbSessions.add(DbInboundGroupSessionBundle(
|
||||
dbSession: dbSession,
|
||||
verified: device?.verified ?? false,
|
||||
));
|
||||
i++;
|
||||
if (i > 10) {
|
||||
await Future.delayed(Duration(milliseconds: 1));
|
||||
i = 0;
|
||||
}
|
||||
}
|
||||
final roomKeys =
|
||||
await client.nativeImplementations.generateUploadKeys(args);
|
||||
Logs().i('[Key Manager] Uploading ${dbSessions.length} room keys...');
|
||||
// upload the payload...
|
||||
await client.putRoomKeys(info.version, roomKeys);
|
||||
// and now finally mark all the keys as uploaded
|
||||
// no need to optimze this, as we only run it so seldomly and almost never with many keys at once
|
||||
for (final dbSession in dbSessions) {
|
||||
await database.markInboundGroupSessionAsUploaded(
|
||||
dbSession.roomId, dbSession.sessionId);
|
||||
}
|
||||
await _uploadingFuture;
|
||||
} finally {
|
||||
decryption.free();
|
||||
// shouldn't be necessary, since it will be unset already by the other process that started it, but just to be safe, also unset the future here
|
||||
_uploadingFuture = null;
|
||||
}
|
||||
} catch (e, s) {
|
||||
Logs().e('[Key Manager] Error uploading room keys', e, s);
|
||||
}
|
||||
|
||||
Future<void> uploadInternal() async {
|
||||
try {
|
||||
await client.userDeviceKeysLoading;
|
||||
|
||||
if (!(await isCached())) {
|
||||
return; // we can't backup anyways
|
||||
}
|
||||
final dbSessions = await database.getInboundGroupSessionsToUpload();
|
||||
if (dbSessions.isEmpty) {
|
||||
return; // nothing to do
|
||||
}
|
||||
final privateKey =
|
||||
base64decodeUnpadded((await encryption.ssss.getCached(megolmKey))!);
|
||||
// decryption is needed to calculate the public key and thus see if the claimed information is in fact valid
|
||||
final decryption = olm.PkDecryption();
|
||||
final info = await getRoomKeysBackupInfo(false);
|
||||
String backupPubKey;
|
||||
try {
|
||||
backupPubKey = decryption.init_with_private_key(privateKey);
|
||||
|
||||
if (info.algorithm !=
|
||||
BackupAlgorithm.mMegolmBackupV1Curve25519AesSha2 ||
|
||||
info.authData['public_key'] != backupPubKey) {
|
||||
decryption.free();
|
||||
return;
|
||||
}
|
||||
final args = GenerateUploadKeysArgs(
|
||||
pubkey: backupPubKey,
|
||||
dbSessions: <DbInboundGroupSessionBundle>[],
|
||||
userId: userID,
|
||||
);
|
||||
// we need to calculate verified beforehand, as else we pass a closure to an isolate
|
||||
// with 500 keys they do, however, noticably block the UI, which is why we give brief async suspentions in here
|
||||
// so that the event loop can progress
|
||||
var i = 0;
|
||||
for (final dbSession in dbSessions) {
|
||||
final device =
|
||||
client.getUserDeviceKeysByCurve25519Key(dbSession.senderKey);
|
||||
args.dbSessions.add(DbInboundGroupSessionBundle(
|
||||
dbSession: dbSession,
|
||||
verified: device?.verified ?? false,
|
||||
));
|
||||
i++;
|
||||
if (i > 10) {
|
||||
await Future.delayed(Duration(milliseconds: 1));
|
||||
i = 0;
|
||||
}
|
||||
}
|
||||
final roomKeys =
|
||||
await client.nativeImplementations.generateUploadKeys(args);
|
||||
Logs().i('[Key Manager] Uploading ${dbSessions.length} room keys...');
|
||||
// upload the payload...
|
||||
await client.putRoomKeys(info.version, roomKeys);
|
||||
// and now finally mark all the keys as uploaded
|
||||
// no need to optimze this, as we only run it so seldomly and almost never with many keys at once
|
||||
for (final dbSession in dbSessions) {
|
||||
await database.markInboundGroupSessionAsUploaded(
|
||||
dbSession.roomId, dbSession.sessionId);
|
||||
}
|
||||
} finally {
|
||||
decryption.free();
|
||||
}
|
||||
} catch (e, s) {
|
||||
Logs().e('[Key Manager] Error uploading room keys', e, s);
|
||||
}
|
||||
}
|
||||
|
||||
_uploadingFuture = uploadInternal();
|
||||
try {
|
||||
await _uploadingFuture;
|
||||
} finally {
|
||||
completer.complete();
|
||||
_uploadingFuture = null;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1187,12 +1199,12 @@ RoomKeys generateUploadKeysImplementation(GenerateUploadKeysArgs args) {
|
|||
},
|
||||
);
|
||||
}
|
||||
enc.free();
|
||||
return roomKeys;
|
||||
} catch (e, s) {
|
||||
Logs().e('[Key Manager] Error generating payload', e, s);
|
||||
rethrow;
|
||||
} finally {
|
||||
enc.free();
|
||||
rethrow;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ abstract class NativeImplementations {
|
|||
|
||||
/// this implementation will catch any non-implemented method
|
||||
@override
|
||||
dynamic noSuchMethod(Invocation invocation) async {
|
||||
dynamic noSuchMethod(Invocation invocation) {
|
||||
final dynamic argument = invocation.positionalArguments.single;
|
||||
final memberName = invocation.memberName.toString().split('"')[1];
|
||||
|
||||
|
|
@ -62,11 +62,15 @@ abstract class NativeImplementations {
|
|||
'Fallback from NativeImplementations.dummy used.',
|
||||
);
|
||||
switch (memberName) {
|
||||
// we need to pass the futures right through or we will run into type errors later!
|
||||
case 'generateUploadKeys':
|
||||
// ignore: discarded_futures
|
||||
return dummy.generateUploadKeys(argument);
|
||||
case 'keyFromPassphrase':
|
||||
// ignore: discarded_futures
|
||||
return dummy.keyFromPassphrase(argument);
|
||||
case 'decryptFile':
|
||||
// ignore: discarded_futures
|
||||
return dummy.decryptFile(argument);
|
||||
case 'shrinkImage':
|
||||
return dummy.shrinkImage(argument);
|
||||
|
|
|
|||
Loading…
Reference in New Issue