refactor: Remove deprecated moor database

We have used some data models which were only used in moor in the tests.
I needed to rewrite them in the original data as well.
Also now the "fake database" on native is the same like on web now with hive.
This commit is contained in:
Christian Pauly 2021-08-17 10:11:59 +02:00
parent fb16b96ea6
commit 86041513f8
10 changed files with 56 additions and 8837 deletions

View File

@ -39,6 +39,5 @@ export 'src/event.dart';
export 'src/room.dart';
export 'src/timeline.dart';
export 'src/user.dart';
export 'src/database/database.dart' show Database;
export 'src/database/database_api.dart';
export 'src/database/hive_database.dart';

View File

@ -1,893 +0,0 @@
/*
* Famedly Matrix SDK
* Copyright (C) 2020, 2021 Famedly GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
import 'dart:async';
import 'dart:convert';
import 'package:matrix/encryption/utils/olm_session.dart';
import 'package:matrix/encryption/utils/outbound_group_session.dart';
import 'package:matrix/encryption/utils/ssss_cache.dart';
import 'package:matrix/encryption/utils/stored_inbound_group_session.dart';
import 'package:matrix/src/utils/QueuedToDeviceEvent.dart';
import 'package:moor/moor.dart';
import '../../matrix.dart' as sdk;
import 'package:matrix_api_lite/matrix_api_lite.dart' as api;
import '../client.dart';
import '../room.dart';
import 'database_api.dart';
part 'database.g.dart';
extension MigratorExtension on Migrator {
Future<void> createIndexIfNotExists(Index index) async {
try {
await createIndex(index);
} catch (err) {
if (!err.toString().toLowerCase().contains('already exists')) {
rethrow;
}
}
}
Future<void> createTableIfNotExists(TableInfo<Table, DataClass> table) async {
try {
await createTable(table);
} catch (err) {
if (!err.toString().toLowerCase().contains('already exists')) {
rethrow;
}
}
}
Future<void> addColumnIfNotExists(
TableInfo<Table, DataClass> table, GeneratedColumn column) async {
try {
await addColumn(table, column);
} catch (err) {
if (!err.toString().toLowerCase().contains('duplicate column name')) {
rethrow;
}
}
}
}
@UseMoor(
include: {'database.moor'},
)
class Database extends _$Database implements DatabaseApi {
Database(QueryExecutor e) : super(e);
Database.connect(DatabaseConnection connection) : super.connect(connection);
@override
bool get supportsFileStoring => true;
@override
int get schemaVersion => 12;
@override
int get maxFileSize => 1 * 1024 * 1024;
/// Update errors are coming here.
final StreamController<SdkError> onError = StreamController.broadcast();
@override
MigrationStrategy get migration => MigrationStrategy(
onCreate: (Migrator m) async {
try {
await m.createAll();
} catch (e, s) {
api.Logs().e('Create all failed in database migrator', e, s);
onError.add(SdkError(exception: e, stackTrace: s));
rethrow;
}
},
onUpgrade: (Migrator m, int from, int to) async {
try {
// this appears to be only called once, so multiple consecutive upgrades have to be handled appropriately in here
if (from == 1) {
await m.createIndexIfNotExists(userDeviceKeysIndex);
await m.createIndexIfNotExists(userDeviceKeysKeyIndex);
await m.createIndexIfNotExists(olmSessionsIndex);
await m.createIndexIfNotExists(outboundGroupSessionsIndex);
await m.createIndexIfNotExists(inboundGroupSessionsIndex);
await m.createIndexIfNotExists(roomsIndex);
await m.createIndexIfNotExists(eventsIndex);
await m.createIndexIfNotExists(roomStatesIndex);
await m.createIndexIfNotExists(accountDataIndex);
await m.createIndexIfNotExists(roomAccountDataIndex);
await m.createIndexIfNotExists(presencesIndex);
from++;
}
if (from == 2) {
await m.deleteTable('outbound_group_sessions');
await m.createTable(outboundGroupSessions);
from++;
}
if (from == 3) {
await m.createTableIfNotExists(userCrossSigningKeys);
await m.createTableIfNotExists(ssssCache);
// mark all keys as outdated so that the cross signing keys will be fetched
await customStatement(
'UPDATE user_device_keys SET outdated = true');
from++;
}
if (from == 4) {
await m.addColumnIfNotExists(
olmSessions, olmSessions.lastReceived);
from++;
}
if (from == 5) {
await m.addColumnIfNotExists(
inboundGroupSessions, inboundGroupSessions.uploaded);
await m.addColumnIfNotExists(
inboundGroupSessions, inboundGroupSessions.senderKey);
await m.addColumnIfNotExists(
inboundGroupSessions, inboundGroupSessions.senderClaimedKeys);
from++;
}
if (from == 6) {
// DATETIME was internally an int, so we should be able to re-use the
// olm_sessions table.
await m.deleteTable('outbound_group_sessions');
await m.createTable(outboundGroupSessions);
await m.deleteTable('events');
await m.createTable(events);
await m.deleteTable('room_states');
await m.createTable(roomStates);
await m.deleteTable('files');
await m.createTable(files);
// and now clear cache
await delete(presences).go();
await delete(roomAccountData).go();
await delete(accountData).go();
await delete(roomStates).go();
await delete(events).go();
await delete(rooms).go();
await delete(outboundGroupSessions).go();
await customStatement('UPDATE clients SET prev_batch = null');
from++;
}
if (from == 7) {
await m.addColumnIfNotExists(
inboundGroupSessions, inboundGroupSessions.allowedAtIndex);
from++;
}
if (from == 8) {
await m.addColumnIfNotExists(
userDeviceKeysKey, userDeviceKeysKey.lastActive);
from++;
}
if (from == 9) {
await m.addColumnIfNotExists(
userDeviceKeysKey, userDeviceKeysKey.lastSentMessage);
await m.createIndexIfNotExists(olmSessionsIdentityIndex);
from++;
}
if (from == 10) {
await m.createTableIfNotExists(toDeviceQueue);
await m.createIndexIfNotExists(toDeviceQueueIndex);
from++;
}
if (from == 11) {
await m.addColumnIfNotExists(clients, clients.syncFilterId);
from++;
}
} catch (e, s) {
api.Logs().e('Database migration failed', e, s);
onError.add(SdkError(exception: e, stackTrace: s));
rethrow;
}
},
beforeOpen: (_) async {
try {
if (executor.dialect == SqlDialect.sqlite) {
final ret = await customSelect('PRAGMA journal_mode=WAL').get();
if (ret.isNotEmpty) {
api.Logs().v('[Moor] Switched database to mode ' +
ret.first.data['journal_mode'].toString());
}
}
} catch (e, s) {
api.Logs().e('Database before open failed', e, s);
onError.add(SdkError(exception: e, stackTrace: s));
rethrow;
}
},
);
@override
Future<Map<String, dynamic>> getClient(String name) async {
final res = await dbGetClient(name).get();
if (res.isEmpty) return null;
await markPendingEventsAsError(res.single.clientId);
return res.single.toJson();
}
@override
Future<Map<String, sdk.DeviceKeysList>> getUserDeviceKeys(
sdk.Client client) async {
final deviceKeys = await getAllUserDeviceKeys(client.id).get();
if (deviceKeys.isEmpty) {
return {};
}
final deviceKeysKeys = await getAllUserDeviceKeysKeys(client.id).get();
final crossSigningKeys = await getAllUserCrossSigningKeys(client.id).get();
final res = <String, sdk.DeviceKeysList>{};
for (final entry in deviceKeys) {
res[entry.userId] = sdk.DeviceKeysList.fromDbJson(
entry.toJson(),
deviceKeysKeys
.where((k) => k.userId == entry.userId)
.map((d) => d.toJson())
.toList(),
crossSigningKeys
.where((k) => k.userId == entry.userId)
.map((d) => d.toJson())
.toList(),
client);
}
return res;
}
@override
Future<OutboundGroupSession> getOutboundGroupSession(
int clientId, String roomId, String userId) async {
final res = await dbGetOutboundGroupSession(clientId, roomId).get();
if (res.isEmpty) {
return null;
}
return OutboundGroupSession.fromJson(res.single.toJson(), userId);
}
@override
Future<List<StoredInboundGroupSession>> getAllInboundGroupSessions(
int clientId,
) async {
final res = await dbGetAllInboundGroupSessions(clientId).get();
if (res.isEmpty) {
return [];
}
return res
.map((res) => StoredInboundGroupSession.fromJson(res.toJson()))
.toList();
}
@override
Future<StoredInboundGroupSession> getInboundGroupSession(
int clientId,
String roomId,
String sessionId,
) async {
final res =
await dbGetInboundGroupSessionKey(clientId, roomId, sessionId).get();
if (res.isEmpty) {
return null;
}
return StoredInboundGroupSession.fromJson(res.single.toJson());
}
@override
Future<SSSSCache> getSSSSCache(int clientId, String type) async {
final res = await dbGetSSSSCache(clientId, type).get();
if (res.isEmpty) {
return null;
}
final dbCache = res.single;
return SSSSCache(
ciphertext: dbCache.ciphertext,
clientId: dbCache.clientId,
type: dbCache.type,
keyId: dbCache.keyId,
content: dbCache.content,
);
}
@override
Future<List<sdk.Room>> getRoomList(sdk.Client client) async {
final res = await select(rooms).get();
final resStates = await getImportantRoomStates(
client.id, client.importantStateEvents.toList())
.get();
final resAccountData = await getAllRoomAccountData(client.id).get();
final roomList = <sdk.Room>[];
final allMembersToPostload = <String, Set<String>>{};
for (final r in res) {
final room = await getRoomFromTableRow(
r,
client,
states: resStates.where((rs) => rs.roomId == r.roomId),
roomAccountData: resAccountData.where((rs) => rs.roomId == r.roomId),
);
roomList.add(room);
// let's see if we need any m.room.member events
// We always need the member event for ourself
final membersToPostload = <String>{client.userID};
// the lastEvent message preview might have an author we need to fetch, if it is a group chat
if (room.getState(api.EventTypes.Message) != null && !room.isDirectChat) {
membersToPostload.add(room.getState(api.EventTypes.Message).senderId);
}
// if the room has no name and no canonical alias, its name is calculated
// based on the heroes of the room
if (room.getState(api.EventTypes.RoomName) == null &&
room.getState(api.EventTypes.RoomCanonicalAlias) == null &&
room.summary.mHeroes != null) {
// we don't have a name and no canonical alias, so we'll need to
// post-load the heroes
membersToPostload
.addAll(room.summary.mHeroes.where((h) => h.isNotEmpty));
}
// save it for loading later
allMembersToPostload[room.id] = membersToPostload;
}
// now we postload all members, if thre are any
if (allMembersToPostload.isNotEmpty) {
// we will generate a query to fetch as many events as possible at once, as that
// significantly improves performance. However, to prevent too large queries from being constructed,
// we limit to only fetching 500 rooms at once.
// This value might be fine-tune-able to be larger (and thus increase performance more for very large accounts),
// however this very conservative value should be on the safe side.
const maxRoomsPerQuery = 500;
// as we iterate over our entries in separate chunks one-by-one we use an iterator
// which persists accross the chunks, and thus we just re-sume iteration at the place
// we prreviously left off.
final entriesIterator = allMembersToPostload.entries.iterator;
// now we iterate over all our 500-room-chunks...
for (var i = 0;
i < allMembersToPostload.keys.length;
i += maxRoomsPerQuery) {
// query the current chunk and build the query
final membersRes = await (select(roomStates)
..where((s) {
// all chunks have to have the reight client id and must be of type `m.room.member`
final basequery = s.clientId.equals(client.id) &
s.type.equals('m.room.member');
// this is where the magic happens. Here we build a query with the form
// OR room_id = '!roomId1' AND state_key IN ('@member') OR room_id = '!roomId2' AND state_key IN ('@member')
// subqueries holds our query fragment
Expression<bool> subqueries;
// here we iterate over our chunk....we musn't forget to progress our iterator!
// we must check for if our chunk is done *before* progressing the
// iterator, else we might progress it twice around chunk edges, missing on rooms
for (var j = 0;
j < maxRoomsPerQuery && entriesIterator.moveNext();
j++) {
final entry = entriesIterator.current;
// builds room_id = '!roomId1' AND state_key IN ('@member')
final q =
s.roomId.equals(entry.key) & s.stateKey.isIn(entry.value);
// adds it either as the start of subqueries or as a new OR condition to it
if (subqueries == null) {
subqueries = q;
} else {
subqueries = subqueries | q;
}
}
// combinde the basequery with the subquery together, giving our final query
return basequery & subqueries;
}))
.get();
// now that we got all the entries from the database, set them as room states
for (final dbMember in membersRes) {
final room = roomList.firstWhere((r) => r.id == dbMember.roomId);
final event = getEventFromDb(dbMember, room);
room.setState(event);
}
}
}
return roomList;
}
@override
Future<Map<String, api.BasicEvent>> getAccountData(int clientId) async {
final newAccountData = <String, api.BasicEvent>{};
final rawAccountData = await getAllAccountData(clientId).get();
for (final d in rawAccountData) {
var content = sdk.Event.getMapFromPayload(d.content);
// there was a bug where it stored the entire event, not just the content
// in the databse. This is the temporary fix for those affected by the bug
if (content['content'] is Map && content['type'] is String) {
content = content['content'];
// and save
await storeAccountData(clientId, d.type, jsonEncode(content));
}
newAccountData[d.type] = api.BasicEvent(
content: content,
type: d.type,
);
}
return newAccountData;
}
/// Stores a RoomUpdate object in the database. Must be called inside of
/// [transaction].
final Set<String> _ensuredRooms = {};
@override
Future<void> storeRoomUpdate(int clientId, sdk.RoomUpdate roomUpdate,
[sdk.Room oldRoom]) async {
final setKey = '$clientId;${roomUpdate.id}';
if (roomUpdate.membership != api.Membership.leave) {
if (!_ensuredRooms.contains(setKey)) {
await ensureRoomExists(clientId, roomUpdate.id,
roomUpdate.membership.toString().split('.').last);
_ensuredRooms.add(setKey);
}
} else {
_ensuredRooms.remove(setKey);
await removeRoom(clientId, roomUpdate.id);
return;
}
var doUpdate = oldRoom == null;
if (!doUpdate) {
doUpdate = roomUpdate.highlight_count != oldRoom.highlightCount ||
roomUpdate.notification_count != oldRoom.notificationCount ||
roomUpdate.membership.toString().split('.').last !=
oldRoom.membership.toString().split('.').last ||
(roomUpdate.summary?.mJoinedMemberCount != null &&
roomUpdate.summary.mJoinedMemberCount !=
oldRoom.summary.mInvitedMemberCount) ||
(roomUpdate.summary?.mInvitedMemberCount != null &&
roomUpdate.summary.mJoinedMemberCount !=
oldRoom.summary.mJoinedMemberCount) ||
(roomUpdate.summary?.mHeroes != null &&
roomUpdate.summary.mHeroes.join(',') !=
oldRoom.summary.mHeroes.join(','));
}
if (doUpdate) {
await (update(rooms)
..where((r) =>
r.roomId.equals(roomUpdate.id) & r.clientId.equals(clientId)))
.write(RoomsCompanion(
highlightCount: Value(roomUpdate.highlight_count),
notificationCount: Value(roomUpdate.notification_count),
membership: Value(roomUpdate.membership.toString().split('.').last),
joinedMemberCount: roomUpdate.summary?.mJoinedMemberCount != null
? Value(roomUpdate.summary.mJoinedMemberCount)
: Value.absent(),
invitedMemberCount: roomUpdate.summary?.mInvitedMemberCount != null
? Value(roomUpdate.summary.mInvitedMemberCount)
: Value.absent(),
heroes: roomUpdate.summary?.mHeroes != null
? Value(roomUpdate.summary.mHeroes.join(','))
: Value.absent(),
));
}
// Is the timeline limited? Then all previous messages should be
// removed from the database!
if (roomUpdate.limitedTimeline) {
await removeSuccessfulRoomEvents(clientId, roomUpdate.id);
await updateRoomSortOrder(0.0, 0.0, clientId, roomUpdate.id);
}
if (roomUpdate.prev_batch != null) {
await setRoomPrevBatch(roomUpdate.prev_batch, clientId, roomUpdate.id);
}
}
/// Stores an EventUpdate object in the database. Must be called inside of
/// [transaction].
@override
Future<void> storeEventUpdate(
int clientId, sdk.EventUpdate eventUpdate) async {
if (eventUpdate.type == sdk.EventUpdateType.ephemeral) return;
final eventContent = eventUpdate.content;
final type = eventUpdate.type;
final chatId = eventUpdate.roomID;
// Get the state_key for state events
String stateKey;
if (eventContent['state_key'] is String) {
stateKey = eventContent['state_key'];
}
if (eventUpdate.content['type'] == api.EventTypes.Redaction) {
await redactMessage(clientId, eventUpdate);
}
if (type == sdk.EventUpdateType.timeline ||
type == sdk.EventUpdateType.history) {
// calculate the status
var status = 2;
if (eventContent['unsigned'] is Map<String, dynamic> &&
eventContent['unsigned'][messageSendingStatusKey] is num) {
status = eventContent['unsigned'][messageSendingStatusKey];
}
if (eventContent['status'] is num) status = eventContent['status'];
var storeNewEvent = !((status == 1 || status == -1) &&
eventContent['unsigned'] is Map<String, dynamic> &&
eventContent['unsigned']['transaction_id'] is String);
if (!storeNewEvent) {
final allOldEvents =
await getEvent(clientId, eventContent['event_id'], chatId).get();
if (allOldEvents.isNotEmpty) {
// we were likely unable to change transaction_id -> event_id.....because the event ID already exists!
// So, we try to fetch the old event
// the transaction id event will automatically be deleted further down
final oldEvent = allOldEvents.first;
// do we update the status? We should allow 0 -> -1 updates and status increases
if (status > oldEvent.status ||
(oldEvent.status == 0 && status == -1)) {
// update the status
await updateEventStatusOnly(
status, clientId, eventContent['event_id'], chatId);
}
} else {
// status changed and we have an old transaction id --> update event id and stuffs
try {
final updated = await updateEventStatus(
status,
eventContent['event_id'],
clientId,
eventContent['unsigned']['transaction_id'],
chatId);
if (updated == 0) {
storeNewEvent = true;
}
} catch (err) {
// we could not update the transaction id to the event id....so it already exists
// as we just tried to fetch the event previously this is a race condition if the event comes down sync in the mean time
// that means that the status we already have in the database is likely more accurate
// than our status. So, we just ignore this error
}
}
}
if (storeNewEvent) {
DbEvent oldEvent;
if (type == sdk.EventUpdateType.history) {
final allOldEvents =
await getEvent(clientId, eventContent['event_id'], chatId).get();
if (allOldEvents.isNotEmpty) {
oldEvent = allOldEvents.first;
}
}
await storeEvent(
clientId,
eventContent['event_id'],
chatId,
oldEvent?.sortOrder ?? eventUpdate.sortOrder,
eventContent['origin_server_ts'] ??
DateTime.now().millisecondsSinceEpoch,
eventContent['sender'],
eventContent['type'],
json.encode(eventContent['unsigned'] ?? ''),
json.encode(eventContent['content']),
json.encode(eventContent['prevContent']),
eventContent['state_key'],
status,
);
}
// is there a transaction id? Then delete the event with this id.
if (status != -1 &&
status != 0 &&
eventUpdate.content['unsigned'] is Map &&
eventUpdate.content['unsigned']['transaction_id'] is String) {
await removeEvent(clientId,
eventUpdate.content['unsigned']['transaction_id'], chatId);
}
}
if (type == sdk.EventUpdateType.history) return;
if (type != sdk.EventUpdateType.accountData &&
((stateKey is String) ||
[
api.EventTypes.Message,
api.EventTypes.Sticker,
api.EventTypes.Encrypted
].contains(eventUpdate.content['type']))) {
final now = DateTime.now();
await storeRoomState(
clientId,
eventContent['event_id'] ?? now.millisecondsSinceEpoch.toString(),
chatId,
eventUpdate.sortOrder ?? 0.0,
eventContent['origin_server_ts'] ?? now.millisecondsSinceEpoch,
eventContent['sender'],
eventContent['type'],
json.encode(eventContent['unsigned'] ?? ''),
json.encode(eventContent['content']),
json.encode(eventContent['prev_content'] ?? ''),
stateKey ?? '',
);
} else if (type == sdk.EventUpdateType.accountData) {
await storeRoomAccountData(
clientId,
eventContent['type'],
chatId,
json.encode(eventContent['content']),
);
}
}
@override
Future<sdk.Event> getEventById(
int clientId, String eventId, sdk.Room room) async {
final event = await getEvent(clientId, eventId, room.id).get();
if (event.isEmpty) {
return null;
}
return getEventFromDb(event.single, room);
}
Future<bool> redactMessage(int clientId, sdk.EventUpdate eventUpdate) async {
final events = await getEvent(
clientId, eventUpdate.content['redacts'], eventUpdate.roomID)
.get();
var success = false;
for (final dbEvent in events) {
final event = getEventFromDb(dbEvent, null);
event.setRedactionEvent(sdk.Event.fromJson(eventUpdate.content, null));
final changes1 = await updateEvent(
json.encode(event.unsigned ?? ''),
json.encode(event.content ?? ''),
json.encode(event.prevContent ?? ''),
clientId,
event.eventId,
eventUpdate.roomID,
);
final changes2 = await updateEvent(
json.encode(event.unsigned ?? ''),
json.encode(event.content ?? ''),
json.encode(event.prevContent ?? ''),
clientId,
event.eventId,
eventUpdate.roomID,
);
if (changes1 == 1 && changes2 == 1) success = true;
}
return success;
}
@override
Future<void> forgetRoom(int clientId, String roomId) async {
final setKey = '$clientId;$roomId';
_ensuredRooms.remove(setKey);
await (delete(rooms)
..where((r) => r.roomId.equals(roomId) & r.clientId.equals(clientId)))
.go();
await (delete(events)
..where((r) => r.roomId.equals(roomId) & r.clientId.equals(clientId)))
.go();
await (delete(roomStates)
..where((r) => r.roomId.equals(roomId) & r.clientId.equals(clientId)))
.go();
await (delete(roomAccountData)
..where((r) => r.roomId.equals(roomId) & r.clientId.equals(clientId)))
.go();
}
@override
Future<void> clearCache(int clientId) async {
await (delete(presences)..where((r) => r.clientId.equals(clientId))).go();
await (delete(roomAccountData)..where((r) => r.clientId.equals(clientId)))
.go();
await (delete(accountData)..where((r) => r.clientId.equals(clientId))).go();
await (delete(roomStates)..where((r) => r.clientId.equals(clientId))).go();
await (delete(events)..where((r) => r.clientId.equals(clientId))).go();
await (delete(rooms)..where((r) => r.clientId.equals(clientId))).go();
await (delete(outboundGroupSessions)
..where((r) => r.clientId.equals(clientId)))
.go();
_ensuredRooms.clear();
await storePrevBatch(null, clientId);
}
@override
Future<void> clear(int clientId) async {
await clearCache(clientId);
await (delete(inboundGroupSessions)
..where((r) => r.clientId.equals(clientId)))
.go();
await (delete(ssssCache)..where((r) => r.clientId.equals(clientId))).go();
await (delete(olmSessions)..where((r) => r.clientId.equals(clientId))).go();
await (delete(userCrossSigningKeys)
..where((r) => r.clientId.equals(clientId)))
.go();
await (delete(userDeviceKeysKey)..where((r) => r.clientId.equals(clientId)))
.go();
await (delete(userDeviceKeys)..where((r) => r.clientId.equals(clientId)))
.go();
await (delete(ssssCache)..where((r) => r.clientId.equals(clientId))).go();
await (delete(clients)..where((r) => r.clientId.equals(clientId))).go();
await (delete(toDeviceQueue)..where((r) => r.clientId.equals(clientId)))
.go();
}
@override
Future<sdk.User> getUser(int clientId, String userId, sdk.Room room) async {
final res = await dbGetUser(clientId, userId, room.id).get();
if (res.isEmpty) {
return null;
}
return getEventFromDb(res.single, room).asUser;
}
@override
Future<List<sdk.User>> getUsers(int clientId, sdk.Room room) async {
final res = await dbGetUsers(clientId, room.id).get();
return res.map((r) => getEventFromDb(r, room).asUser).toList();
}
@override
Future<List<sdk.Event>> getEventList(int clientId, sdk.Room room) async {
final res = await dbGetEventList(clientId, room.id).get();
return res.map((r) => getEventFromDb(r, room)).toList();
}
@override
Future<Uint8List> getFile(String mxcUri) async {
final res = await dbGetFile(mxcUri).get();
if (res.isEmpty) return null;
return res.single.bytes;
}
@override
Future<List<sdk.Event>> getUnimportantRoomEventStatesForRoom(
int client_id,
List<String> events,
Room room,
) async {
final entries = await getUnimportantRoomStatesForRoom(
client_id,
room.id,
events,
).get();
return entries.map((dbEvent) => getEventFromDb(dbEvent, room)).toList();
}
@override
Future<List<OlmSession>> getOlmSessions(
int client_id,
String identity_key,
String userId,
) async {
final rows = await dbGetOlmSessions(client_id, identity_key).get();
return rows
.map((row) => OlmSession.fromJson(row.toJson(), userId))
.toList();
}
@override
Future<List<OlmSession>> getOlmSessionsForDevices(
int client_id,
List<String> identity_keys,
String userId,
) async {
final rows =
await dbGetOlmSessionsForDevices(client_id, identity_keys).get();
return rows
.map((row) => OlmSession.fromJson(row.toJson(), userId))
.toList();
}
@override
Future<List<QueuedToDeviceEvent>> getToDeviceEventQueue(int client_id) async {
final rows = await getToDeviceQueue(client_id).get();
return rows
.map((row) => QueuedToDeviceEvent(
id: row.id,
type: row.type,
txnId: row.txnId,
content:
(json.decode(row.content) as Map<String, dynamic>).copy(),
))
.toList();
}
@override
Future<List<String>> getLastSentMessageUserDeviceKey(
int client_id,
String user_id,
String device_id,
) =>
dbGetLastSentMessageUserDeviceKey(client_id, user_id, device_id).get();
@override
Future<List<StoredInboundGroupSession>>
getInboundGroupSessionsToUpload() async {
final rows = await dbGetInboundGroupSessionsToUpload().get();
return rows
.map((row) => StoredInboundGroupSession.fromJson(row.toJson()))
.toList();
}
}
/// Get an event from either DbRoomState or DbEvent
sdk.Event getEventFromDb(dynamic dbEntry, sdk.Room room) {
if (!(dbEntry is DbRoomState || dbEntry is DbEvent)) {
throw ('Unknown db type');
}
final content = sdk.Event.getMapFromPayload(dbEntry.content);
final unsigned = sdk.Event.getMapFromPayload(dbEntry.unsigned);
final prevContent = sdk.Event.getMapFromPayload(dbEntry.prevContent);
return sdk.Event(
status:
(dbEntry is DbEvent ? dbEntry.status : null) ?? sdk.Event.defaultStatus,
stateKey: dbEntry.stateKey,
prevContent: prevContent,
content: content,
type: dbEntry.type,
eventId: dbEntry.eventId,
roomId: dbEntry.roomId,
senderId: dbEntry.sender,
originServerTs: dbEntry.originServerTs != null
? DateTime.fromMillisecondsSinceEpoch(dbEntry.originServerTs)
: DateTime.now(),
unsigned: unsigned,
room: room,
sortOrder: dbEntry.sortOrder ?? 0.0,
);
}
/// Returns a Room from a json String which comes normally from the store. If the
/// state are also given, the method will await them.
Future<Room> getRoomFromTableRow(
// either Map<String, dynamic> or DbRoom
DbRoom row,
Client matrix, {
dynamic states, // DbRoomState, as iterator and optionally as future
dynamic
roomAccountData, // DbRoomAccountData, as iterator and optionally as future
}) async {
final newRoom = Room(
id: row.roomId,
membership: sdk.Membership.values
.firstWhere((e) => e.toString() == 'Membership.' + row.membership),
notificationCount: row.notificationCount,
highlightCount: row.highlightCount,
// TODO: do proper things
notificationSettings: 'mention',
prev_batch: row.prevBatch,
summary: sdk.RoomSummary.fromJson({
'm.heroes': row.heroes?.split(',') ?? [],
'm.joined_member_count': row.joinedMemberCount,
'm.invited_member_count': row.invitedMemberCount,
}),
client: matrix,
roomAccountData: {},
newestSortOrder: row.newestSortOrder,
oldestSortOrder: row.oldestSortOrder,
);
if (states != null) {
for (final rawState in await states) {
final newState = getEventFromDb(rawState, newRoom);
newRoom.setState(newState);
}
}
final newRoomAccountData = <String, sdk.BasicRoomEvent>{};
if (roomAccountData != null) {
for (final singleAccountData in await roomAccountData) {
final content = sdk.Event.getMapFromPayload(singleAccountData.content);
final newData = sdk.BasicRoomEvent(
content: content,
type: singleAccountData.type,
roomId: singleAccountData.roomId,
);
newRoomAccountData[newData.type] = newData;
}
}
newRoom.roomAccountData = newRoomAccountData;
return newRoom;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,261 +0,0 @@
-- Table definitions
CREATE TABLE clients (
client_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
homeserver_url TEXT NOT NULL,
token TEXT NOT NULL,
user_id TEXT NOT NULL,
device_id TEXT,
device_name TEXT,
prev_batch TEXT,
sync_filter_id TEXT,
olm_account TEXT,
UNIQUE(name)
) AS DbClient;
CREATE TABLE user_device_keys (
client_id INTEGER NOT NULL REFERENCES clients(client_id),
user_id TEXT NOT NULL,
outdated BOOLEAN DEFAULT true,
UNIQUE(client_id, user_id)
) as DbUserDeviceKey;
CREATE INDEX user_device_keys_index ON user_device_keys(client_id);
CREATE TABLE user_device_keys_key (
client_id INTEGER NOT NULL REFERENCES clients(client_id),
user_id TEXT NOT NULL,
device_id TEXT NOT NULL,
content TEXT NOT NULL,
verified BOOLEAN DEFAULT false,
blocked BOOLEAN DEFAULT false,
last_active BIGINT,
last_sent_message TEXT,
UNIQUE(client_id, user_id, device_id)
) as DbUserDeviceKeysKey;
CREATE INDEX user_device_keys_key_index ON user_device_keys_key(client_id);
CREATE TABLE user_cross_signing_keys (
client_id INTEGER NOT NULL REFERENCES clients(client_id),
user_id TEXT NOT NULL,
public_key TEXT NOT NULL,
content TEXT NOT NULL,
verified BOOLEAN DEFAULT false,
blocked BOOLEAN DEFAULT false,
UNIQUE(client_id, user_id, public_key)
) as DbUserCrossSigningKey;
CREATE INDEX user_cross_signing_keys_index ON user_cross_signing_keys(client_id);
CREATE TABLE olm_sessions (
client_id INTEGER NOT NULL REFERENCES clients(client_id),
identity_key TEXT NOT NULL,
session_id TEXT NOT NULL,
pickle TEXT NOT NULL,
last_received BIGINT,
UNIQUE(client_id, identity_key, session_id)
) AS DbOlmSessions;
CREATE INDEX olm_sessions_index ON olm_sessions(client_id);
CREATE INDEX olm_sessions_identity_index ON olm_sessions(client_id, identity_key);
CREATE TABLE outbound_group_sessions (
client_id INTEGER NOT NULL REFERENCES clients(client_id),
room_id TEXT NOT NULL,
pickle TEXT NOT NULL,
device_ids TEXT NOT NULL,
creation_time BIGINT NOT NULL,
sent_messages INTEGER NOT NULL DEFAULT '0',
UNIQUE(client_id, room_id)
) AS DbOutboundGroupSession;
CREATE INDEX outbound_group_sessions_index ON outbound_group_sessions(client_id);
CREATE TABLE inbound_group_sessions (
client_id INTEGER NOT NULL REFERENCES clients(client_id),
room_id TEXT NOT NULL,
session_id TEXT NOT NULL,
pickle TEXT NOT NULL,
content TEXT,
indexes TEXT,
allowed_at_index TEXT,
uploaded BOOLEAN DEFAULT false,
sender_key TEXT,
sender_claimed_keys TEXT,
UNIQUE(client_id, room_id, session_id)
) AS DbInboundGroupSession;
CREATE INDEX inbound_group_sessions_index ON inbound_group_sessions(client_id);
CREATE TABLE ssss_cache (
client_id INTEGER NOT NULL REFERENCES clients(client_id),
type TEXT NOT NULL,
key_id TEXT NOT NULL,
ciphertext TEXT NOT NULL,
content TEXT NOT NULL,
UNIQUE(client_id, type)
) AS DbSSSSCache;
CREATE TABLE rooms (
client_id INTEGER NOT NULL REFERENCES clients(client_id),
room_id TEXT NOT NULL,
membership TEXT NOT NULL,
highlight_count INTEGER NOT NULL DEFAULT '0',
notification_count INTEGER NOT NULL DEFAULT '0',
prev_batch TEXT DEFAULT '',
joined_member_count INTEGER NOT NULL DEFAULT '0',
invited_member_count INTEGER NOT NULL DEFAULT '0',
newest_sort_order DOUBLE NOT NULL DEFAULT '0',
oldest_sort_order DOUBLE NOT NULL DEFAULT '0',
heroes TEXT DEFAULT '',
UNIQUE(client_id, room_id)
) AS DbRoom;
CREATE INDEX rooms_index ON rooms(client_id);
CREATE TABLE events (
client_id INTEGER NOT NULL REFERENCES clients(client_id),
event_id TEXT NOT NULL,
room_id TEXT NOT NULL,
sort_order DOUBLE NOT NULL,
origin_server_ts BIGINT NOT NULL,
sender TEXT NOT NULL,
type TEXT NOT NULL,
unsigned TEXT,
content TEXT,
prev_content TEXT,
state_key TEXT,
status INTEGER,
UNIQUE(client_id, event_id, room_id)
) AS DbEvent;
CREATE INDEX events_index ON events(client_id, room_id);
CREATE TABLE room_states (
client_id INTEGER NOT NULL REFERENCES clients(client_id),
event_id TEXT NOT NULL,
room_id TEXT NOT NULL,
sort_order DOUBLE NOT NULL,
origin_server_ts BIGINT NOT NULL,
sender TEXT NOT NULL,
type TEXT NOT NULL,
unsigned TEXT,
content TEXT,
prev_content TEXT,
state_key TEXT NOT NULL,
UNIQUE(client_id, event_id, room_id),
UNIQUE(client_id, room_id, state_key, type)
) AS DbRoomState;
CREATE INDEX room_states_index ON room_states(client_id);
CREATE TABLE account_data (
client_id INTEGER NOT NULL REFERENCES clients(client_id),
type TEXT NOT NULL,
content TEXT,
UNIQUE(client_id, type)
) AS DbAccountData;
CREATE INDEX account_data_index ON account_data(client_id);
CREATE TABLE room_account_data (
client_id INTEGER NOT NULL REFERENCES clients(client_id),
type TEXT NOT NULL,
room_id TEXT NOT NULL,
content TEXT,
UNIQUE(client_id, type, room_id)
) AS DbRoomAccountData;
CREATE INDEX room_account_data_index ON room_account_data(client_id);
CREATE TABLE presences (
client_id INTEGER NOT NULL REFERENCES clients(client_id),
type TEXT NOT NULL,
sender TEXT NOT NULL,
content TEXT,
UNIQUE(client_id, type, sender)
) AS DbPresence;
CREATE INDEX presences_index ON presences(client_id);
CREATE TABLE files (
mxc_uri TEXT NOT NULL PRIMARY KEY,
bytes BLOB,
saved_at BIGINT,
UNIQUE(mxc_uri)
) AS DbFile;
CREATE TABLE to_device_queue (
client_id INTEGER NOT NULL REFERENCES clients(client_id),
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
type TEXT NOT NULL,
txn_id TEXT NOT NULL,
content TEXT NOT NULL
) as DbToDeviceQueue;
CREATE INDEX to_device_queue_index ON to_device_queue(client_id);
-- named queries
dbGetClient: SELECT * FROM clients WHERE name = :name;
updateClient: UPDATE clients SET homeserver_url = :homeserver_url, token = :token, user_id = :user_id, device_id = :device_id, device_name = :device_name, prev_batch = :prev_batch, olm_account = :olm_account WHERE client_id = :client_id;
updateClientKeys: UPDATE clients SET olm_account = :olm_account WHERE client_id = :client_id;
storePrevBatch: UPDATE clients SET prev_batch = :prev_batch WHERE client_id = :client_id;
storeSyncFilterId: UPDATE clients SET sync_filter_id = :sync_filter_id WHERE client_id = :client_id;
getAllUserDeviceKeys: SELECT * FROM user_device_keys WHERE client_id = :client_id;
getAllUserDeviceKeysKeys: SELECT * FROM user_device_keys_key WHERE client_id = :client_id;
getAllUserCrossSigningKeys: SELECT * FROM user_cross_signing_keys WHERE client_id = :client_id;
getAllOlmSessions: SELECT * FROM olm_sessions WHERE client_id = :client_id;
dbGetOlmSessions: SELECT * FROM olm_sessions WHERE client_id = :client_id AND identity_key = :identity_key;
dbGetOlmSessionsForDevices: SELECT * FROM olm_sessions WHERE client_id = :client_id AND identity_key IN :identity_keys;
storeOlmSession: INSERT OR REPLACE INTO olm_sessions (client_id, identity_key, session_id, pickle, last_received) VALUES (:client_id, :identitiy_key, :session_id, :pickle, :last_received);
getAllOutboundGroupSessions: SELECT * FROM outbound_group_sessions WHERE client_id = :client_id;
dbGetOutboundGroupSession: SELECT * FROM outbound_group_sessions WHERE client_id = :client_id AND room_id = :room_id;
storeOutboundGroupSession: INSERT OR REPLACE INTO outbound_group_sessions (client_id, room_id, pickle, device_ids, creation_time, sent_messages) VALUES (:client_id, :room_id, :pickle, :device_ids, :creation_time, :sent_messages);
removeOutboundGroupSession: DELETE FROM outbound_group_sessions WHERE client_id = :client_id AND room_id = :room_id;
dbGetInboundGroupSessionKey: SELECT * FROM inbound_group_sessions WHERE client_id = :client_id AND room_id = :room_id AND session_id = :session_id;
dbGetInboundGroupSessionKeys: SELECT * FROM inbound_group_sessions WHERE client_id = :client_id AND room_id = :room_id;
dbGetAllInboundGroupSessions: SELECT * FROM inbound_group_sessions WHERE client_id = :client_id;
storeInboundGroupSession: INSERT OR REPLACE INTO inbound_group_sessions (client_id, room_id, session_id, pickle, content, indexes, allowed_at_index, sender_key, sender_claimed_keys) VALUES (:client_id, :room_id, :session_id, :pickle, :content, :indexes, :allowed_at_index, :sender_key, :sender_claimed_keys);
updateInboundGroupSessionIndexes: UPDATE inbound_group_sessions SET indexes = :indexes WHERE client_id = :client_id AND room_id = :room_id AND session_id = :session_id;
updateInboundGroupSessionAllowedAtIndex: UPDATE inbound_group_sessions SET allowed_at_index = :allowed_at_index WHERE client_id = :client_id AND room_id = :room_id AND session_id = :session_id;
dbGetInboundGroupSessionsToUpload: SELECT * FROM inbound_group_sessions WHERE uploaded = false LIMIT 500;
markInboundGroupSessionAsUploaded: UPDATE inbound_group_sessions SET uploaded = true WHERE client_id = :client_id AND room_id = :room_id AND session_id = :session_id;
markInboundGroupSessionsAsNeedingUpload: UPDATE inbound_group_sessions SET uploaded = false WHERE client_id = :client_id;
storeUserDeviceKeysInfo: INSERT OR REPLACE INTO user_device_keys (client_id, user_id, outdated) VALUES (:client_id, :user_id, :outdated);
setVerifiedUserDeviceKey: UPDATE user_device_keys_key SET verified = :verified WHERE client_id = :client_id AND user_id = :user_id AND device_id = :device_id;
setBlockedUserDeviceKey: UPDATE user_device_keys_key SET blocked = :blocked WHERE client_id = :client_id AND user_id = :user_id AND device_id = :device_id;
storeUserDeviceKey: INSERT OR REPLACE INTO user_device_keys_key (client_id, user_id, device_id, content, verified, blocked, last_active) VALUES (:client_id, :user_id, :device_id, :content, :verified, :blocked, :last_active);
removeUserDeviceKey: DELETE FROM user_device_keys_key WHERE client_id = :client_id AND user_id = :user_id AND device_id = :device_id;
setLastActiveUserDeviceKey: UPDATE user_device_keys_key SET last_active = :last_active WHERE client_id = :client_id AND user_id = :user_id AND device_id = :device_id;
setLastSentMessageUserDeviceKey: UPDATE user_device_keys_key SET last_sent_message = :last_sent_message WHERE client_id = :client_id AND user_id = :user_id AND device_id = :device_id;
dbGetLastSentMessageUserDeviceKey: SELECT last_sent_message FROM user_device_keys_key WHERE client_id = :client_id AND user_id = :user_id AND device_id = :device_id;
setVerifiedUserCrossSigningKey: UPDATE user_cross_signing_keys SET verified = :verified WHERE client_id = :client_id AND user_id = :user_id AND public_key = :public_key;
setBlockedUserCrossSigningKey: UPDATE user_cross_signing_keys SET blocked = :blocked WHERE client_id = :client_id AND user_id = :user_id AND public_key = :public_key;
storeUserCrossSigningKey: INSERT OR REPLACE INTO user_cross_signing_keys (client_id, user_id, public_key, content, verified, blocked) VALUES (:client_id, :user_id, :public_key, :content, :verified, :blocked);
removeUserCrossSigningKey: DELETE FROM user_cross_signing_keys WHERE client_id = :client_id AND user_id = :user_id AND public_key = :public_key;
storeSSSSCache: INSERT OR REPLACE INTO ssss_cache (client_id, type, key_id, ciphertext, content) VALUES (:client_id, :type, :key_id, :ciphertext, :content);
dbGetSSSSCache: SELECT * FROM ssss_cache WHERE client_id = :client_id AND type = :type;
clearSSSSCache: DELETE FROM ssss_cache WHERE client_id = :client_id;
insertClient: INSERT INTO clients (name, homeserver_url, token, user_id, device_id, device_name, prev_batch, olm_account) VALUES (:name, :homeserver_url, :token, :user_id, :device_id, :device_name, :prev_batch, :olm_account);
ensureRoomExists: INSERT OR IGNORE INTO rooms (client_id, room_id, membership) VALUES (:client_id, :room_id, :membership);
setRoomPrevBatch: UPDATE rooms SET prev_batch = :prev_batch WHERE client_id = :client_id AND room_id = :room_id;
updateRoomSortOrder: UPDATE rooms SET oldest_sort_order = :oldest_sort_order, newest_sort_order = :newest_sort_order WHERE client_id = :client_id AND room_id = :room_id;
getAllAccountData: SELECT * FROM account_data WHERE client_id = :client_id;
storeAccountData: INSERT OR REPLACE INTO account_data (client_id, type, content) VALUES (:client_id, :type, :content);
updateEvent: UPDATE events SET unsigned = :unsigned, content = :content, prev_content = :prev_content WHERE client_id = :client_id AND event_id = :event_id AND room_id = :room_id;
updateEventStatus: UPDATE events SET status = :status, event_id = :new_event_id WHERE client_id = :client_id AND event_id = :old_event_id AND room_id = :room_id;
updateEventStatusOnly: UPDATE events SET status = :status WHERE client_id = :client_id AND event_id = :event_id AND room_id = :room_id;
getImportantRoomStates: SELECT * FROM room_states WHERE client_id = :client_id AND type IN :events;
getAllRoomStates: SELECT * FROM room_states WHERE client_id = :client_id;
getUnimportantRoomStatesForRoom: SELECT * FROM room_states WHERE client_id = :client_id AND room_id = :room_id AND type NOT IN :events;
storeEvent: INSERT OR REPLACE INTO events (client_id, event_id, room_id, sort_order, origin_server_ts, sender, type, unsigned, content, prev_content, state_key, status) VALUES (:client_id, :event_id, :room_id, :sort_order, :origin_server_ts, :sender, :type, :unsigned, :content, :prev_content, :state_key, :status);
storeRoomState: INSERT OR REPLACE INTO room_states (client_id, event_id, room_id, sort_order, origin_server_ts, sender, type, unsigned, content, prev_content, state_key) VALUES (:client_id, :event_id, :room_id, :sort_order, :origin_server_ts, :sender, :type, :unsigned, :content, :prev_content, :state_key);
getAllRoomAccountData: SELECT * FROM room_account_data WHERE client_id = :client_id;
storeRoomAccountData: INSERT OR REPLACE INTO room_account_data (client_id, type, room_id, content) VALUES (:client_id, :type, :room_id, :content);
dbGetUser: SELECT * FROM room_states WHERE client_id = :client_id AND type = 'm.room.member' AND state_key = :state_key AND room_id = :room_id;
dbGetUsers: SELECT * FROM room_states WHERE client_id = :client_id AND type = 'm.room.member' AND room_id = :room_id;
dbGetEventList: SELECT * FROM events WHERE client_id = :client_id AND room_id = :room_id GROUP BY event_id ORDER BY sort_order DESC;
getStates: SELECT * FROM room_states WHERE client_id = :client_id AND room_id = :room_id;
resetNotificationCount: UPDATE rooms SET notification_count = 0, highlight_count = 0 WHERE client_id = :client_id AND room_id = :room_id;
getRoom: SELECT * FROM rooms WHERE client_id = :client_id AND room_id = :room_id;
getEvent: SELECT * FROM events WHERE client_id = :client_id AND event_id = :event_id AND room_id = :room_id;
removeEvent: DELETE FROM events WHERE client_id = :client_id AND event_id = :event_id AND room_id = :room_id;
removeRoom: DELETE FROM rooms WHERE client_id = :client_id AND room_id = :room_id;
removeSuccessfulRoomEvents: DELETE FROM events WHERE client_id = :client_id AND room_id = :room_id AND status <> -1 AND status <> 0;
storeFile: INSERT OR REPLACE INTO files (mxc_uri, bytes, saved_at) VALUES (:mxc_uri, :bytes, :time);
dbGetFile: SELECT * FROM files WHERE mxc_uri = :mxc_uri;
markPendingEventsAsError: UPDATE events SET status = -1 WHERE client_id = :client_id AND status = 0;
deleteOldFiles: DELETE FROM files WHERE saved_at < :saved_at;
insertIntoToDeviceQueue: INSERT INTO to_device_queue (client_id, type, txn_id, content) VALUES (:client_id, :type, :txn_id, :content);
getToDeviceQueue: SELECT * FROM to_device_queue WHERE client_id = :client_id;
deleteFromToDeviceQueue: DELETE FROM to_device_queue WHERE client_id = :client_id AND id = :id;

View File

@ -12,7 +12,6 @@ dependencies:
canonical_json: ^1.0.0
markdown: ^4.0.0
html_unescape: ^2.0.0
moor: ">=4.0.0 <=4.3.2"
random_string: ^2.1.0
crypto: ^3.0.0
base58check: ^2.0.0
@ -27,7 +26,5 @@ dev_dependencies:
pedantic: ^1.11.0
test: ^1.15.7
coverage: ">=0.15.0 <2.0.0"
moor_generator: ^4.0.0
build_runner: ^1.11.1
file: ^6.1.1
#flutter_test: {sdk: flutter}

View File

@ -16,19 +16,16 @@
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
import 'dart:convert';
import 'dart:typed_data';
import 'package:matrix/matrix.dart';
import 'package:moor/moor.dart';
import 'package:test/test.dart';
import 'package:olm/olm.dart' as olm;
import 'fake_database_native.dart';
import 'fake_database.dart';
void main() {
/// All Tests related to the ChatTime
group('Moor Database Test', () {
testDatabase(getMoorDatabase(null), 0);
});
group('Hive Database Test', () {
testDatabase(getHiveDatabase(null), 0);
});

View File

@ -16,5 +16,28 @@
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
export 'fake_database_native.dart'
if (dart.library.js) 'fake_database_web.dart';
import 'dart:io';
import 'dart:math';
import 'package:matrix/matrix.dart';
import 'package:matrix/src/database/hive_database.dart';
import 'package:file/memory.dart';
import 'package:hive/hive.dart';
Future<DatabaseApi> getDatabase(Client _) => getHiveDatabase(_);
bool hiveInitialized = false;
Future<FamedlySdkHiveDatabase> getHiveDatabase(Client c) async {
if (!hiveInitialized) {
final fileSystem = MemoryFileSystem();
final testHivePath =
'${fileSystem.path}/build/.test_store/${Random().nextDouble()}';
Directory(testHivePath).createSync(recursive: true);
Hive.init(testHivePath);
hiveInitialized = true;
}
final db = FamedlySdkHiveDatabase('unit_test.${c.hashCode}');
await db.open();
return db;
}

View File

@ -1,50 +0,0 @@
/*
* Famedly Matrix SDK
* Copyright (C) 2019, 2020 Famedly GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
import 'dart:io';
import 'dart:math';
import 'package:matrix/matrix.dart';
import 'package:matrix/src/database/hive_database.dart';
import 'package:file/memory.dart';
import 'package:hive/hive.dart';
import 'package:moor/moor.dart';
import 'package:moor/ffi.dart' as moor;
Future<DatabaseApi> getDatabase(Client _) => getHiveDatabase(_);
Future<Database> getMoorDatabase(Client _) async {
moorRuntimeOptions.dontWarnAboutMultipleDatabases = true;
return Database(moor.VmDatabase.memory());
}
bool hiveInitialized = false;
Future<FamedlySdkHiveDatabase> getHiveDatabase(Client c) async {
if (!hiveInitialized) {
final fileSystem = MemoryFileSystem();
final testHivePath =
'${fileSystem.path}/build/.test_store/${Random().nextDouble()}';
Directory(testHivePath).createSync(recursive: true);
Hive.init(testHivePath);
hiveInitialized = true;
}
final db = FamedlySdkHiveDatabase('unit_test.${c.hashCode}');
await db.open();
return db;
}

View File

@ -1,26 +0,0 @@
/*
* Famedly Matrix SDK
* Copyright (C) 2019, 2020 Famedly GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
import 'package:matrix/matrix.dart';
import 'package:moor/moor.dart';
import 'package:moor/moor_web.dart' as moor;
Future<Database> getDatabase(Client _) async {
moorRuntimeOptions.dontWarnAboutMultipleDatabases = true;
return Database(moor.WebDatabase('test'));
}

View File

@ -21,8 +21,6 @@ import 'dart:typed_data';
import 'package:matrix/matrix.dart';
import 'package:matrix/src/client.dart';
import 'package:matrix/src/database/database.dart'
show DbRoom, DbRoomAccountData, DbRoomState, getRoomFromTableRow;
import 'package:matrix/src/event.dart';
import 'package:matrix/src/room.dart';
import 'package:matrix/src/user.dart';
@ -55,51 +53,40 @@ void main() {
'@charley:example.org'
];
final dbRoom = DbRoom(
clientId: 1,
roomId: id,
membership: membership.toString().split('.').last,
room = Room(
client: matrix,
id: id,
membership: membership,
highlightCount: highlightCount,
notificationCount: notificationCount,
prevBatch: '',
joinedMemberCount: notificationCount,
invitedMemberCount: notificationCount,
prev_batch: '',
newestSortOrder: 0.0,
oldestSortOrder: 0.0,
heroes: heroes.join(','),
);
final states = [
DbRoomState(
clientId: 1,
eventId: '143273582443PhrSn:example.org',
roomId: id,
sortOrder: 0.0,
originServerTs: 1432735824653,
sender: '@example:example.org',
type: 'm.room.join_rules',
unsigned: '{"age": 1234}',
content: '{"join_rule": "public"}',
prevContent: '',
stateKey: '',
),
];
final roomAccountData = [
DbRoomAccountData(
clientId: 1,
type: 'com.test.foo',
roomId: id,
content: '{"foo": "bar"}',
),
];
room = await getRoomFromTableRow(
dbRoom,
matrix,
states: states,
roomAccountData: roomAccountData,
summary: RoomSummary.fromJson({
'm.joined_member_count': 2,
'm.invited_member_count': 2,
'm.heroes': heroes,
}),
roomAccountData: {
'com.test.foo': BasicRoomEvent(
type: 'com.test.foo',
roomId: id,
content: {'foo': 'bar'},
),
},
);
room.setState(Event(
room: room,
eventId: '143273582443PhrSn:example.org',
roomId: id,
sortOrder: 0.0,
originServerTs: DateTime.fromMillisecondsSinceEpoch(1432735824653),
senderId: '@example:example.org',
type: 'm.room.join_rules',
unsigned: {'age': 1234},
content: {'join_rule': 'public'},
stateKey: '',
));
expect(room.id, id);
expect(room.membership, membership);