Added a method to allow safer database inserts

Fixed an issue where the app settings were updating immediately making them seem to
Updated GRDB to version 6.1 and SQLCipher to 4.5.2
Added a method which allows for inserting into the database while omitting columns which exist in the object but not in the database (so allow for old migrations to run with less issues)
Updated all the migrations to use the migration safe insert method
Removed some ObjC support extension functions
This commit is contained in:
Morgan Pretty 2022-10-21 17:32:51 +11:00
parent 6c58e08b4c
commit 89df1261e3
39 changed files with 1125 additions and 238 deletions

View File

@ -11,7 +11,7 @@ abstract_target 'GlobalDependencies' do
# FIXME: If https://github.com/jedisct1/swift-sodium/pull/249 gets resolved then revert this back to the standard pod
pod 'Sodium', :git => 'https://github.com/oxen-io/session-ios-swift-sodium.git', branch: 'session-build'
pod 'GRDB.swift/SQLCipher'
pod 'SQLCipher', '~> 4.0'
pod 'SQLCipher', '~> 4.5.0' # FIXME: Version 4.5.2 is crashing when access DB settings
# FIXME: We want to remove this once it's been long enough since the migration to GRDB
pod 'YapDatabase/SQLCipher', :git => 'https://github.com/oxen-io/session-ios-yap-database.git', branch: 'signal-release'

View File

@ -27,8 +27,8 @@ PODS:
- DifferenceKit/Core (1.2.0)
- DifferenceKit/UIKitExtension (1.2.0):
- DifferenceKit/Core
- GRDB.swift/SQLCipher (5.26.0):
- SQLCipher (>= 3.4.0)
- GRDB.swift/SQLCipher (6.1.0):
- SQLCipher (>= 3.4.2)
- libwebp (1.2.1):
- libwebp/demux (= 1.2.1)
- libwebp/mux (= 1.2.1)
@ -154,7 +154,7 @@ DEPENDENCIES:
- SignalCoreKit (from `https://github.com/oxen-io/session-ios-core-kit`, branch `session-version`)
- SocketRocket (~> 0.5.1)
- Sodium (from `https://github.com/oxen-io/session-ios-swift-sodium.git`, branch `session-build`)
- SQLCipher (~> 4.0)
- SQLCipher (~> 4.5.0)
- SwiftProtobuf (~> 1.5.0)
- WebRTC-lib
- YapDatabase/SQLCipher (from `https://github.com/oxen-io/session-ios-yap-database.git`, branch `signal-release`)
@ -222,7 +222,7 @@ SPEC CHECKSUMS:
CryptoSwift: a532e74ed010f8c95f611d00b8bbae42e9fe7c17
Curve25519Kit: e63f9859ede02438ae3defc5e1a87e09d1ec7ee6
DifferenceKit: 5659c430bb7fe45876fa32ce5cba5d6167f0c805
GRDB.swift: 1395cb3556df6b16ed69dfc74c3886abc75d2825
GRDB.swift: 611778a5e113385373baeb3e2ce474887d1aadb7
libwebp: 98a37e597e40bfdb4c911fc98f2c53d0b12d05fc
Nimble: 5316ef81a170ce87baf72dd961f22f89a602ff84
NVActivityIndicatorView: 1f6c5687f1171810aa27a3296814dc2d7dec3667
@ -242,6 +242,6 @@ SPEC CHECKSUMS:
YYImage: f1ddd15ac032a58b78bbed1e012b50302d318331
ZXingObjC: fdbb269f25dd2032da343e06f10224d62f537bdb
PODFILE CHECKSUM: 430e3b57d986dc8890415294fc6cf5e4eabfce3e
PODFILE CHECKSUM: 402850f74d70b3b57fc81eff82d0fc86d695b392
COCOAPODS: 1.11.3

View File

@ -570,6 +570,8 @@
FD17D7E527F6A09900122BE0 /* Identity.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD17D7E427F6A09900122BE0 /* Identity.swift */; };
FD17D7E727F6A16700122BE0 /* _003_YDBToGRDBMigration.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD17D7E627F6A16700122BE0 /* _003_YDBToGRDBMigration.swift */; };
FD17D7EA27F6A1C600122BE0 /* SUKLegacy.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD17D7E927F6A1C600122BE0 /* SUKLegacy.swift */; };
FD1A94FB2900D1C2000D73D3 /* PersistableRecord+Utilities.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD1A94FA2900D1C2000D73D3 /* PersistableRecord+Utilities.swift */; };
FD1A94FE2900D2EA000D73D3 /* PersistableRecordUtilitiesSpec.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD1A94FD2900D2EA000D73D3 /* PersistableRecordUtilitiesSpec.swift */; };
FD1C98E4282E3C5B00B76F9E /* UINavigationBar+Utilities.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD1C98E3282E3C5B00B76F9E /* UINavigationBar+Utilities.swift */; };
FD23EA5C28ED00F80058676E /* Mock.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDC290A527D860CE005DAE71 /* Mock.swift */; };
FD23EA5D28ED00FA0058676E /* TestConstants.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD83B9BD27CF2243005E1583 /* TestConstants.swift */; };
@ -1683,6 +1685,8 @@
FD17D7E427F6A09900122BE0 /* Identity.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Identity.swift; sourceTree = "<group>"; };
FD17D7E627F6A16700122BE0 /* _003_YDBToGRDBMigration.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = _003_YDBToGRDBMigration.swift; sourceTree = "<group>"; };
FD17D7E927F6A1C600122BE0 /* SUKLegacy.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SUKLegacy.swift; sourceTree = "<group>"; };
FD1A94FA2900D1C2000D73D3 /* PersistableRecord+Utilities.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "PersistableRecord+Utilities.swift"; sourceTree = "<group>"; };
FD1A94FD2900D2EA000D73D3 /* PersistableRecordUtilitiesSpec.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = PersistableRecordUtilitiesSpec.swift; sourceTree = "<group>"; };
FD1C98E3282E3C5B00B76F9E /* UINavigationBar+Utilities.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "UINavigationBar+Utilities.swift"; sourceTree = "<group>"; };
FD23EA6028ED0B260058676E /* CombineExtensions.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = CombineExtensions.swift; sourceTree = "<group>"; };
FD245C612850664300B966DD /* Configuration.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = Configuration.swift; sourceTree = "<group>"; };
@ -3619,6 +3623,7 @@
FD17D7C627F5207C00122BE0 /* DatabaseMigrator+Utilities.swift */,
FDF22210281B5E0B000A4995 /* TableRecord+Utilities.swift */,
FDF2220E281B55E6000A4995 /* QueryInterfaceRequest+Utilities.swift */,
FD1A94FA2900D1C2000D73D3 /* PersistableRecord+Utilities.swift */,
);
path = Utilities;
sourceTree = "<group>";
@ -3652,6 +3657,14 @@
path = LegacyDatabase;
sourceTree = "<group>";
};
FD1A94FC2900D2DB000D73D3 /* Utilities */ = {
isa = PBXGroup;
children = (
FD1A94FD2900D2EA000D73D3 /* PersistableRecordUtilitiesSpec.swift */,
);
path = Utilities;
sourceTree = "<group>";
};
FD37E9C428A1C701003AE748 /* Themes */ = {
isa = PBXGroup;
children = (
@ -3698,6 +3711,7 @@
isa = PBXGroup;
children = (
FD37EA1328AB42C1003AE748 /* Models */,
FD1A94FC2900D2DB000D73D3 /* Utilities */,
);
path = Database;
sourceTree = "<group>";
@ -5336,6 +5350,7 @@
7BD477A827EC39F5004E2822 /* Atomic.swift in Sources */,
B8BC00C0257D90E30032E807 /* General.swift in Sources */,
FD17D7A127F40D2500122BE0 /* Storage.swift in Sources */,
FD1A94FB2900D1C2000D73D3 /* PersistableRecord+Utilities.swift in Sources */,
FD5D201E27B0D87C00FEA984 /* SessionId.swift in Sources */,
C32C5A24256DB7DB003C73A2 /* SNUserDefaults.swift in Sources */,
C3BBE0A72554D4DE0050F1E3 /* Promise+Retrying.swift in Sources */,
@ -5776,6 +5791,7 @@
FD23EA6328ED0B260058676E /* CombineExtensions.swift in Sources */,
FD2AAAEE28ED3E1100A49611 /* MockGeneralCache.swift in Sources */,
FD37EA1528AB42CB003AE748 /* IdentitySpec.swift in Sources */,
FD1A94FE2900D2EA000D73D3 /* PersistableRecordUtilitiesSpec.swift in Sources */,
FDC290AA27D9B6FD005DAE71 /* Mock.swift in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;

View File

@ -69,8 +69,8 @@ class AppDelegate: UIResponder, UIApplicationDelegate, UNUserNotificationCenterD
minEstimatedTotalTime: minEstimatedTotalTime
)
},
migrationsCompletion: { [weak self] error, needsConfigSync in
guard error == nil else {
migrationsCompletion: { [weak self] result, needsConfigSync in
if case .failure(let error) = result {
self?.showFailedMigrationAlert(error: error)
return
}
@ -322,8 +322,8 @@ class AppDelegate: UIResponder, UIApplicationDelegate, UNUserNotificationCenterD
minEstimatedTotalTime: minEstimatedTotalTime
)
},
migrationsCompletion: { [weak self] error, needsConfigSync in
guard error == nil else {
migrationsCompletion: { [weak self] result, needsConfigSync in
if case .failure(let error) = result {
self?.showFailedMigrationAlert(error: error)
return
}

View File

@ -597,7 +597,7 @@ class NotificationActionHandler {
interactionId: try thread.interactions
.select(.id)
.order(Interaction.Columns.timestampMs.desc)
.asRequest(of: Int64?.self)
.asRequest(of: Int64.self)
.fetchOne(db),
threadId: thread.id,
includingOlder: true,

View File

@ -61,7 +61,7 @@ class ConversationSettingsViewModel: SessionTableViewModel<NoNav, ConversationSe
.settingBool(key: .trimOpenGroupMessagesOlderThanSixMonths)
),
onTap: {
Storage.shared.writeAsync { db in
Storage.shared.write { db in
db[.trimOpenGroupMessagesOlderThanSixMonths] = !db[.trimOpenGroupMessagesOlderThanSixMonths]
}
}
@ -79,7 +79,7 @@ class ConversationSettingsViewModel: SessionTableViewModel<NoNav, ConversationSe
.settingBool(key: .shouldAutoPlayConsecutiveAudioMessages)
),
onTap: {
Storage.shared.writeAsync { db in
Storage.shared.write { db in
db[.shouldAutoPlayConsecutiveAudioMessages] = !db[.shouldAutoPlayConsecutiveAudioMessages]
}
}

View File

@ -108,7 +108,7 @@ class NotificationSettingsViewModel: SessionTableViewModel<NoNav, NotificationSe
title: "NOTIFICATIONS_STYLE_SOUND_WHEN_OPEN_TITLE".localized(),
rightAccessory: .toggle(.settingBool(key: .playNotificationSoundInForeground)),
onTap: {
Storage.shared.writeAsync { db in
Storage.shared.write { db in
db[.playNotificationSoundInForeground] = !db[.playNotificationSoundInForeground]
}
}

View File

@ -100,7 +100,7 @@ class PrivacySettingsViewModel: SessionTableViewModel<PrivacySettingsViewModel.N
subtitle: "PRIVACY_SCREEN_SECURITY_LOCK_SESSION_DESCRIPTION".localized(),
rightAccessory: .toggle(.settingBool(key: .isScreenLockEnabled)),
onTap: {
Storage.shared.writeAsync { db in
Storage.shared.write { db in
db[.isScreenLockEnabled] = !db[.isScreenLockEnabled]
}
}
@ -116,7 +116,7 @@ class PrivacySettingsViewModel: SessionTableViewModel<PrivacySettingsViewModel.N
subtitle: "PRIVACY_READ_RECEIPTS_DESCRIPTION".localized(),
rightAccessory: .toggle(.settingBool(key: .areReadReceiptsEnabled)),
onTap: {
Storage.shared.writeAsync { db in
Storage.shared.write { db in
db[.areReadReceiptsEnabled] = !db[.areReadReceiptsEnabled]
}
}
@ -158,7 +158,7 @@ class PrivacySettingsViewModel: SessionTableViewModel<PrivacySettingsViewModel.N
},
rightAccessory: .toggle(.settingBool(key: .typingIndicatorsEnabled)),
onTap: {
Storage.shared.writeAsync { db in
Storage.shared.write { db in
db[.typingIndicatorsEnabled] = !db[.typingIndicatorsEnabled]
}
}
@ -174,7 +174,7 @@ class PrivacySettingsViewModel: SessionTableViewModel<PrivacySettingsViewModel.N
subtitle: "PRIVACY_LINK_PREVIEWS_DESCRIPTION".localized(),
rightAccessory: .toggle(.settingBool(key: .areLinkPreviewsEnabled)),
onTap: {
Storage.shared.writeAsync { db in
Storage.shared.write { db in
db[.areLinkPreviewsEnabled] = !db[.areLinkPreviewsEnabled]
}
}
@ -198,7 +198,7 @@ class PrivacySettingsViewModel: SessionTableViewModel<PrivacySettingsViewModel.N
onConfirm: { _ in Permissions.requestMicrophonePermissionIfNeeded() }
),
onTap: {
Storage.shared.writeAsync { db in
Storage.shared.write { db in
db[.areCallsEnabled] = !db[.areCallsEnabled]
}
}

View File

@ -446,7 +446,7 @@ class SettingsViewModel: SessionTableViewModel<SettingsViewModel.NavButton, Sett
try MessageSender.syncConfiguration(db, forceSyncNow: true).retainUntilComplete()
// Wait for the database transaction to complete before updating the UI
db.afterNextTransactionCommit { _ in
db.afterNextTransaction { _ in
DispatchQueue.main.async {
modalActivityIndicator.dismiss(completion: {})
}

View File

@ -177,7 +177,7 @@ enum _001_InitialSetupMigration: Migration {
.notNull()
}
try db.create(table: _006_FixHiddenModAdminSupport.PreMigrationGroupMember.self) { t in
try db.create(table: GroupMember.self) { t in
// Note: Since we don't know whether this will be stored against a 'ClosedGroup' or
// an 'OpenGroup' we add the foreign key constraint against the thread itself (which
// shares the same 'id' as the 'groupId') so we can cascade delete automatically

View File

@ -22,34 +22,34 @@ enum _002_SetupStandardJobs: Migration {
variant: .disappearingMessages,
behaviour: .recurringOnLaunch,
shouldBlock: true
).inserted(db)
).migrationSafeInserted(db)
_ = try Job(
variant: .failedMessageSends,
behaviour: .recurringOnLaunch,
shouldBlock: true
).inserted(db)
).migrationSafeInserted(db)
_ = try Job(
variant: .failedAttachmentDownloads,
behaviour: .recurringOnLaunch,
shouldBlock: true
).inserted(db)
).migrationSafeInserted(db)
_ = try Job(
variant: .updateProfilePicture,
behaviour: .recurringOnActive
).inserted(db)
).migrationSafeInserted(db)
_ = try Job(
variant: .retrieveDefaultOpenGroupRooms,
behaviour: .recurringOnActive
).inserted(db)
).migrationSafeInserted(db)
_ = try Job(
variant: .garbageCollection,
behaviour: .recurringOnActive
).inserted(db)
).migrationSafeInserted(db)
}
Storage.update(progress: 1, for: self, in: target) // In case this is the last migration

View File

@ -422,7 +422,7 @@ enum _003_YDBToGRDBMigration: Migration {
profilePictureUrl: legacyContact.profilePictureURL,
profilePictureFileName: legacyContact.profilePictureFileName,
profileEncryptionKey: legacyContact.profileEncryptionKey
).insert(db)
).migrationSafeInsert(db)
/// **Note:** The blow "shouldForce" flags are here to allow us to avoid having to run legacy migrations they
/// replicate the behaviour of a number of the migrations and perform the changes if the migrations had never run
@ -490,7 +490,7 @@ enum _003_YDBToGRDBMigration: Migration {
shouldForceDidApproveMe
),
hasBeenBlocked: (!isCurrentUser && (legacyContact.hasBeenBlocked || legacyContact.isBlocked))
).insert(db)
).migrationSafeInsert(db)
}
// Increment the progress for each contact
@ -587,7 +587,7 @@ enum _003_YDBToGRDBMigration: Migration {
),
mutedUntilTimestamp: legacyThread.mutedUntilDate?.timeIntervalSince1970,
onlyNotifyForMentions: onlyNotifyForMentions
).insert(db)
).migrationSafeInsert(db)
// Disappearing Messages Configuration
if let config: SMKLegacy._DisappearingMessagesConfiguration = disappearingMessagesConfiguration[threadId] {
@ -595,12 +595,12 @@ enum _003_YDBToGRDBMigration: Migration {
threadId: threadId,
isEnabled: config.isEnabled,
durationSeconds: TimeInterval(config.durationSeconds)
).insert(db)
).migrationSafeInsert(db)
}
else {
try DisappearingMessagesConfiguration
.defaultWith(threadId)
.insert(db)
.migrationSafeInsert(db)
}
// Closed Groups
@ -618,7 +618,7 @@ enum _003_YDBToGRDBMigration: Migration {
threadId: threadId,
name: name,
formationTimestamp: TimeInterval(formationTimestamp)
).insert(db)
).migrationSafeInsert(db)
// Note: If a user has left a closed group then they won't actually have any keys
// but they should still be able to browse the old messages so we do want to allow
@ -629,7 +629,7 @@ enum _003_YDBToGRDBMigration: Migration {
publicKey: legacyKeys.publicKey,
secretKey: legacyKeys.privateKey,
receivedTimestamp: timestamp
).insert(db)
).migrationSafeInsert(db)
}
// Create the 'GroupMember' models for the group (even if the current user is no longer
@ -643,15 +643,16 @@ enum _003_YDBToGRDBMigration: Migration {
try? Profile(
id: profileId,
name: profileId
).save(db)
).migrationSafeSave(db)
}
try groupModel.groupMemberIds.forEach { memberId in
try _006_FixHiddenModAdminSupport.PreMigrationGroupMember(
try GroupMember(
groupId: threadId,
profileId: memberId,
role: .standard
).insert(db)
role: .standard,
isHidden: false // Ignored: Didn't exist at time of migration
).migrationSafeInsert(db)
if !validProfileIds.contains(memberId) {
createDummyProfile(profileId: memberId)
@ -659,11 +660,12 @@ enum _003_YDBToGRDBMigration: Migration {
}
try groupModel.groupAdminIds.forEach { adminId in
try _006_FixHiddenModAdminSupport.PreMigrationGroupMember(
try GroupMember(
groupId: threadId,
profileId: adminId,
role: .admin
).insert(db)
role: .admin,
isHidden: false // Ignored: Didn't exist at time of migration
).migrationSafeInsert(db)
if !validProfileIds.contains(adminId) {
createDummyProfile(profileId: adminId)
@ -671,11 +673,12 @@ enum _003_YDBToGRDBMigration: Migration {
}
try (closedGroupZombieMemberIds[legacyThread.uniqueId] ?? []).forEach { zombieId in
try _006_FixHiddenModAdminSupport.PreMigrationGroupMember(
try GroupMember(
groupId: threadId,
profileId: zombieId,
role: .zombie
).insert(db)
role: .zombie,
isHidden: false // Ignored: Didn't exist at time of migration
).migrationSafeInsert(db)
if !validProfileIds.contains(zombieId) {
createDummyProfile(profileId: zombieId)
@ -707,7 +710,8 @@ enum _003_YDBToGRDBMigration: Migration {
sequenceNumber: 0,
inboxLatestMessageId: 0,
outboxLatestMessageId: 0
).insert(db)
)
.migrationSafeInsert(db)
}
}
@ -930,7 +934,7 @@ enum _003_YDBToGRDBMigration: Migration {
openGroupServerMessageId: openGroupServerMessageId,
openGroupWhisperMods: false,
openGroupWhisperTo: nil
).inserted(db)
).migrationSafeInserted(db)
}
catch {
switch error {
@ -950,7 +954,7 @@ enum _003_YDBToGRDBMigration: Migration {
threadId: threadId,
variant: variant,
timestampMs: Int64.zeroingOverflow(legacyInteraction.timestamp)
)?.insert(db)
)?.migrationSafeInsert(db)
// Remove timestamps we created records for (they will be protected by unique
// constraints so don't need legacy process records)
@ -1012,7 +1016,7 @@ enum _003_YDBToGRDBMigration: Migration {
mostRecentFailureText :
nil
)
).save(db)
).migrationSafeSave(db)
}
// Handle any quote
@ -1045,7 +1049,7 @@ enum _003_YDBToGRDBMigration: Migration {
try Profile(
id: quotedMessage.authorId,
name: quotedMessage.authorId
).save(db)
).migrationSafeSave(db)
}
// Note: It looks like there is a way for a quote to not have it's
@ -1093,7 +1097,7 @@ enum _003_YDBToGRDBMigration: Migration {
timestampMs: Int64.zeroingOverflow(quotedMessage.timestamp),
body: quotedMessage.body,
attachmentId: attachmentId
).insert(db)
).migrationSafeInsert(db)
}
// Handle any LinkPreview
@ -1120,7 +1124,7 @@ enum _003_YDBToGRDBMigration: Migration {
variant: linkPreviewVariant,
title: linkPreview.title,
attachmentId: attachmentId
).save(db)
).migrationSafeSave(db)
}
// Handle any attachments
@ -1156,7 +1160,7 @@ enum _003_YDBToGRDBMigration: Migration {
albumIndex: index,
interactionId: interactionId,
attachmentId: attachmentId
).insert(db)
).migrationSafeInsert(db)
}
// Increment the progress for each contact
@ -1225,7 +1229,7 @@ enum _003_YDBToGRDBMigration: Migration {
timestampMs: legacyJob.message.timestamp
)
)
)?.inserted(db)
)?.migrationSafeInserted(db)
}
}
@ -1256,7 +1260,7 @@ enum _003_YDBToGRDBMigration: Migration {
messages: [processedMessage.messageInfo],
calledFromBackgroundPoller: legacyJob.isBackgroundPoll
)
)?.inserted(db)
)?.migrationSafeInserted(db)
}
}
@ -1346,7 +1350,7 @@ enum _003_YDBToGRDBMigration: Migration {
destination: legacyJob.destination,
message: legacyJob.message.toNonLegacy()
)
)?.inserted(db)
)?.migrationSafeInserted(db)
if let oldId: String = legacyJob.id {
messageSendJobLegacyMap[oldId] = job
@ -1373,7 +1377,7 @@ enum _003_YDBToGRDBMigration: Migration {
messageSendJobId: sendJobId,
attachmentId: legacyJob.attachmentID
)
)?.inserted(db)
)?.migrationSafeInserted(db)
// Add the dependency to the relevant MessageSendJob
guard let uploadJobId: Int64 = uploadJob?.id else {
@ -1384,7 +1388,7 @@ enum _003_YDBToGRDBMigration: Migration {
try JobDependencies(
jobId: sendJobId,
dependantId: uploadJobId
).insert(db)
).migrationSafeInsert(db)
}
}
@ -1413,7 +1417,7 @@ enum _003_YDBToGRDBMigration: Migration {
details: AttachmentDownloadJob.Details(
attachmentId: legacyJob.attachmentID
)
)?.inserted(db)
)?.migrationSafeInserted(db)
}
}
@ -1429,7 +1433,7 @@ enum _003_YDBToGRDBMigration: Migration {
destination: .contact(publicKey: threadId),
timestampMsValues: timestampsMs
)
)?.inserted(db)
)?.migrationSafeInserted(db)
}
}
Storage.update(progress: 0.99, for: self, in: target)
@ -1625,7 +1629,7 @@ enum _003_YDBToGRDBMigration: Migration {
}
}(),
caption: legacyAttachment.caption
).inserted(db)
).migrationSafeInserted(db)
processedAttachmentIds.insert(legacyAttachmentId)
@ -1664,7 +1668,7 @@ enum _003_YDBToGRDBMigration: Migration {
encryptionKey: nil,
digest: nil,
caption: nil
).inserted(db)
).migrationSafeInserted(db)
processedAttachmentIds.insert(legacyAttachmentId)

View File

@ -28,41 +28,3 @@ enum _006_FixHiddenModAdminSupport: Migration {
Storage.update(progress: 1, for: self, in: target) // In case this is the last migration
}
}
// MARK: - Pre-Migration Types
extension _006_FixHiddenModAdminSupport {
internal struct PreMigrationGroupMember: Codable, PersistableRecord, TableRecord, ColumnExpressible {
public static var databaseTableName: String { "groupMember" }
public typealias Columns = CodingKeys
public enum CodingKeys: String, CodingKey, ColumnExpression {
case groupId
case profileId
case role
}
public enum Role: Int, Codable, DatabaseValueConvertible {
case standard
case zombie
case moderator
case admin
}
public let groupId: String
public let profileId: String
public let role: Role
// MARK: - Initialization
public init(
groupId: String,
profileId: String,
role: Role
) {
self.groupId = groupId
self.profileId = profileId
self.role = role
}
}
}

View File

@ -113,7 +113,9 @@ public struct ControlMessageProcessRecord: Codable, FetchableRecord, Persistable
self.serverExpirationTimestamp = serverExpirationTimestamp
}
public func insert(_ db: Database) throws {
// MARK: - Custom Database Interaction
public func willInsert(_ db: Database) throws {
// If this isn't a legacy entry then check if there is a single entry and, if so,
// try to create a "legacy entry" version of this record to see if a unique constraint
// conflict occurs
@ -132,8 +134,6 @@ public struct ControlMessageProcessRecord: Codable, FetchableRecord, Persistable
).insert(db)
}
}
try performInsert(db)
}
}

View File

@ -316,21 +316,22 @@ public struct Interaction: Codable, Identifiable, Equatable, FetchableRecord, Mu
// MARK: - Custom Database Interaction
public mutating func insert(_ db: Database) throws {
public mutating func willInsert(_ db: Database) throws {
// Automatically mark interactions which can't be unread as read so the unread count
// isn't impacted
self.wasRead = (self.wasRead || !self.variant.canBeUnread)
}
public func aroundInsert(_ db: Database, insert: () throws -> InsertionSuccess) throws {
let success: InsertionSuccess = try insert()
try performInsert(db)
// Since we need to do additional logic upon insert we can just set the 'id' value
// here directly instead of in the 'didInsert' method (if you look at the docs the
// 'db.lastInsertedRowID' value is the row id of the newly inserted row which the
// interaction uses as it's id)
let interactionId: Int64 = db.lastInsertedRowID
self.id = interactionId
guard let thread: SessionThread = try? SessionThread.fetchOne(db, id: threadId) else {
guard
let threadVariant: SessionThread.Variant = try? SessionThread
.filter(id: threadId)
.select(.variant)
.asRequest(of: SessionThread.Variant.self)
.fetchOne(db)
else {
SNLog("Inserted an interaction but couldn't find it's associated thead")
return
}
@ -339,10 +340,10 @@ public struct Interaction: Codable, Identifiable, Equatable, FetchableRecord, Mu
case .standardOutgoing:
// New outgoing messages should immediately determine their recipient list
// from current thread state
switch thread.variant {
switch threadVariant {
case .contact:
try RecipientState(
interactionId: interactionId,
interactionId: success.rowID,
recipientId: threadId, // Will be the contact id
state: .sending
).insert(db)
@ -350,7 +351,7 @@ public struct Interaction: Codable, Identifiable, Equatable, FetchableRecord, Mu
case .closedGroup:
let closedGroupMemberIds: Set<String> = (try? GroupMember
.select(.profileId)
.filter(GroupMember.Columns.groupId == thread.id)
.filter(GroupMember.Columns.groupId == threadId)
.asRequest(of: String.self)
.fetchSet(db))
.defaulting(to: [])
@ -367,7 +368,7 @@ public struct Interaction: Codable, Identifiable, Equatable, FetchableRecord, Mu
.filter { memberId -> Bool in memberId != userPublicKey }
.forEach { memberId in
try RecipientState(
interactionId: interactionId,
interactionId: success.rowID,
recipientId: memberId,
state: .sending
).insert(db)
@ -378,7 +379,7 @@ public struct Interaction: Codable, Identifiable, Equatable, FetchableRecord, Mu
// we need to ensure we have a state for all threads; so for open groups
// we just use the open group id as the 'recipientId' value
try RecipientState(
interactionId: interactionId,
interactionId: success.rowID,
recipientId: threadId, // Will be the open group id
state: .sending
).insert(db)
@ -387,6 +388,10 @@ public struct Interaction: Codable, Identifiable, Equatable, FetchableRecord, Mu
default: break
}
}
public mutating func didInsert(_ inserted: InsertionSuccess) {
self.id = inserted.rowID
}
}
// MARK: - Mutation

View File

@ -125,9 +125,7 @@ public struct SessionThread: Codable, Identifiable, Equatable, FetchableRecord,
// MARK: - Custom Database Interaction
public func insert(_ db: Database) throws {
try performInsert(db)
public func willInsert(_ db: Database) throws {
db[.hasSavedThread] = true
}
}
@ -350,73 +348,3 @@ public extension SessionThread {
}
}
}
// MARK: - Objective-C Support
// FIXME: Remove when possible
@objc(SMKThread)
public class SMKThread: NSObject {
@objc(deleteAll)
public static func deleteAll() {
Storage.shared.writeAsync { db in
_ = try SessionThread.deleteAll(db)
}
}
@objc(isThreadMuted:)
public static func isThreadMuted(_ threadId: String) -> Bool {
return Storage.shared.read { db in
let mutedUntilTimestamp: TimeInterval? = try SessionThread
.select(SessionThread.Columns.mutedUntilTimestamp)
.filter(id: threadId)
.asRequest(of: TimeInterval?.self)
.fetchOne(db)
return (mutedUntilTimestamp != nil)
}
.defaulting(to: false)
}
@objc(isOnlyNotifyingForMentions:)
public static func isOnlyNotifyingForMentions(_ threadId: String) -> Bool {
return Storage.shared.read { db in
return try SessionThread
.select(SessionThread.Columns.onlyNotifyForMentions)
.filter(id: threadId)
.asRequest(of: Bool.self)
.fetchOne(db)
}
.defaulting(to: false)
}
@objc(setIsOnlyNotifyingForMentions:to:)
public static func isOnlyNotifyingForMentions(_ threadId: String, isEnabled: Bool) {
Storage.shared.write { db in
try SessionThread
.filter(id: threadId)
.updateAll(db, SessionThread.Columns.onlyNotifyForMentions.set(to: isEnabled))
}
}
@objc(mutedUntilDateFor:)
public static func mutedUntilDateFor(_ threadId: String) -> Date? {
return Storage.shared.read { db in
return try SessionThread
.select(SessionThread.Columns.mutedUntilTimestamp)
.filter(id: threadId)
.asRequest(of: TimeInterval.self)
.fetchOne(db)
}
.map { Date(timeIntervalSince1970: $0) }
}
@objc(updateWithMutedUntilDateTo:forThreadId:)
public static func updateWithMutedUntilDate(to date: Date?, threadId: String) {
Storage.shared.write { db in
try SessionThread
.filter(id: threadId)
.updateAll(db, SessionThread.Columns.mutedUntilTimestamp.set(to: date?.timeIntervalSince1970))
}
}
}

View File

@ -469,7 +469,7 @@ public final class OpenGroupManager: NSObject {
}
}
db.afterNextTransactionCommit { db in
db.afterNextTransaction { db in
// Start the poller if needed
if dependencies.cache.pollers[server.lowercased()] == nil {
dependencies.mutableCache.mutate {

View File

@ -376,7 +376,7 @@ public enum MessageReceiver {
// Download the profile picture if needed
if updatedProfile.profilePictureUrl != profile.profilePictureUrl || updatedProfile.profileEncryptionKey != profile.profileEncryptionKey {
db.afterNextTransactionCommit { _ in
db.afterNextTransaction { _ in
ProfileManager.downloadAvatar(for: updatedProfile)
}
}

View File

@ -38,6 +38,8 @@ public extension MentionInfo {
let request: SQLRequest<MentionInfo> = {
guard let pattern: FTS5Pattern = pattern else {
let finalLimitSQL: SQL = (limitSQL ?? "")
return """
SELECT
\(Profile.self).*,
@ -61,12 +63,13 @@ public extension MentionInfo {
)
GROUP BY \(profile[.id])
ORDER BY \(interaction[.timestampMs].desc)
\(limitSQL ?? "")
\(finalLimitSQL)
"""
}
// If we do have a search patern then use FTS
let matchLiteral: SQL = SQL(stringLiteral: "\(Profile.Columns.nickname.name):\(pattern.rawPattern) OR \(Profile.Columns.name.name):\(pattern.rawPattern)")
let finalLimitSQL: SQL = (limitSQL ?? "")
return """
SELECT
@ -93,7 +96,7 @@ public extension MentionInfo {
WHERE \(profileFullTextSearch) MATCH '\(matchLiteral)'
GROUP BY \(profile[.id])
ORDER BY \(interaction[.timestampMs].desc)
\(limitSQL ?? "")
\(finalLimitSQL)
"""
}()

View File

@ -650,6 +650,7 @@ public extension MessageViewModel {
let groupMemberRoleColumnLiteral: SQL = SQL(stringLiteral: GroupMember.Columns.role.name)
let numColumnsBeforeLinkedRecords: Int = 20
let finalGroupSQL: SQL = (groupSQL ?? "")
let request: SQLRequest<ViewModel> = """
SELECT
\(thread[.id]) AS \(ViewModel.threadIdKey),
@ -736,7 +737,7 @@ public extension MessageViewModel {
\(SQL("\(groupMemberAdminTableLiteral).\(groupMemberRoleColumnLiteral) = \(GroupMember.Role.admin)"))
)
WHERE \(interaction.alias[Column.rowID]) IN \(rowIds)
\(groupSQL ?? "")
\(finalGroupSQL)
ORDER BY \(orderSQL)
"""

View File

@ -34,7 +34,7 @@ class OpenGroupAPISpec: QuickSpec {
beforeEach {
mockStorage = Storage(
customWriter: DatabaseQueue(),
customWriter: try! DatabaseQueue(),
customMigrations: [
SNUtilitiesKit.migrations(),
SNMessagingKit.migrations()

View File

@ -100,7 +100,7 @@ class OpenGroupManagerSpec: QuickSpec {
mockOGMCache = MockOGMCache()
mockGeneralCache = MockGeneralCache()
mockStorage = Storage(
customWriter: DatabaseQueue(),
customWriter: try! DatabaseQueue(),
customMigrations: [
SNUtilitiesKit.migrations(),
SNMessagingKit.migrations()

View File

@ -26,7 +26,7 @@ class MessageReceiverDecryptionSpec: QuickSpec {
describe("a MessageReceiver") {
beforeEach {
mockStorage = Storage(
customWriter: DatabaseQueue(),
customWriter: try! DatabaseQueue(),
customMigrations: [
SNUtilitiesKit.migrations(),
SNMessagingKit.migrations()

View File

@ -23,7 +23,7 @@ class MessageSenderEncryptionSpec: QuickSpec {
describe("a MessageSender") {
beforeEach {
mockStorage = Storage(
customWriter: DatabaseQueue(),
customWriter: try! DatabaseQueue(),
customMigrations: [
SNUtilitiesKit.migrations(),
SNMessagingKit.migrations()

View File

@ -18,7 +18,7 @@ enum _002_SetupStandardJobs: Migration {
variant: .getSnodePool,
behaviour: .recurringOnLaunch,
shouldBlock: true
).inserted(db)
).migrationSafeInserted(db)
// Note: We also want this job to run both onLaunch and onActive as we want it to block
// 'onLaunch' and 'onActive' doesn't support blocking jobs
@ -26,7 +26,7 @@ enum _002_SetupStandardJobs: Migration {
variant: .getSnodePool,
behaviour: .recurringOnActive,
shouldSkipLaunchBecomeActive: true
).inserted(db)
).migrationSafeInserted(db)
}
Storage.update(progress: 1, for: self, in: target) // In case this is the last migration

View File

@ -159,7 +159,7 @@ enum _003_YDBToGRDBMigration: Migration {
port: legacySnode.port,
ed25519PublicKey: legacySnode.publicKeySet.ed25519Key,
x25519PublicKey: legacySnode.publicKeySet.x25519Key
).insert(db)
).migrationSafeInsert(db)
}
Storage.update(progress: 0.96, for: self, in: target)
@ -173,7 +173,7 @@ enum _003_YDBToGRDBMigration: Migration {
nodeIndex: nodeIndex,
address: legacySnode.address,
port: legacySnode.port
).insert(db)
).migrationSafeInsert(db)
}
}
Storage.update(progress: 0.98, for: self, in: target)
@ -188,7 +188,7 @@ enum _003_YDBToGRDBMigration: Migration {
key: key,
hash: hash,
expirationDateMs: SnodeReceivedMessage.defaultExpirationSeconds
).inserted(db)
).migrationSafeInserted(db)
}
}
Storage.update(progress: 0.99, for: self, in: target)
@ -205,7 +205,7 @@ enum _003_YDBToGRDBMigration: Migration {
expirationDateMs :
SnodeReceivedMessage.defaultExpirationSeconds
)
).inserted(db)
).migrationSafeInserted(db)
}
}

View File

@ -23,7 +23,7 @@ class ThreadDisappearingMessagesViewModelSpec: QuickSpec {
beforeEach {
mockStorage = Storage(
customWriter: DatabaseQueue(),
customWriter: try! DatabaseQueue(),
customMigrations: [
SNUtilitiesKit.migrations(),
SNSnodeKit.migrations(),

View File

@ -25,7 +25,7 @@ class ThreadSettingsViewModelSpec: QuickSpec {
beforeEach {
mockStorage = SynchronousStorage(
customWriter: DatabaseQueue(),
customWriter: try! DatabaseQueue(),
customMigrations: [
SNUtilitiesKit.migrations(),
SNSnodeKit.migrations(),

View File

@ -21,7 +21,7 @@ class NotificationContentViewModelSpec: QuickSpec {
beforeEach {
mockStorage = Storage(
customWriter: DatabaseQueue(),
customWriter: try! DatabaseQueue(),
customMigrations: [
SNUtilitiesKit.migrations(),
SNSnodeKit.migrations(),

View File

@ -18,7 +18,7 @@ enum _002_SetupStandardJobs: Migration {
_ = try Job(
variant: .syncPushTokens,
behaviour: .recurringOnLaunch
).inserted(db)
).migrationSafeInserted(db)
// Note: We actually need this job to run both onLaunch and onActive as the logic differs
// slightly and there are cases where a user might not be registered in 'onLaunch' but is
@ -27,7 +27,7 @@ enum _002_SetupStandardJobs: Migration {
variant: .syncPushTokens,
behaviour: .recurringOnActive,
shouldSkipLaunchBecomeActive: true
).inserted(db)
).migrationSafeInserted(db)
}
Storage.update(progress: 1, for: self, in: target) // In case this is the last migration

View File

@ -91,27 +91,27 @@ enum _003_YDBToGRDBMigration: Migration {
try Identity(
variant: .seed,
data: Data(hex: seedHexString)
).insert(db)
).migrationSafeInsert(db)
try Identity(
variant: .ed25519SecretKey,
data: Data(hex: userEd25519SecretKeyHexString)
).insert(db)
).migrationSafeInsert(db)
try Identity(
variant: .ed25519PublicKey,
data: Data(hex: userEd25519PublicKeyHexString)
).insert(db)
).migrationSafeInsert(db)
try Identity(
variant: .x25519PrivateKey,
data: userX25519KeyPair.privateKey
).insert(db)
).migrationSafeInsert(db)
try Identity(
variant: .x25519PublicKey,
data: userX25519KeyPair.publicKey
).insert(db)
).migrationSafeInsert(db)
}
Storage.update(progress: 1, for: self, in: target) // In case this is the last migration

View File

@ -103,7 +103,7 @@ open class Storage {
migrations: [TargetMigrations],
async: Bool = true,
onProgressUpdate: ((CGFloat, TimeInterval) -> ())?,
onComplete: @escaping (Error?, Bool) -> ()
onComplete: @escaping (Swift.Result<Database, Error>, Bool) -> ()
) {
guard isValid, let dbWriter: DatabaseWriter = dbWriter else { return }
@ -179,27 +179,31 @@ open class Storage {
}
// Store the logic to run when the migration completes
let migrationCompleted: (Database, Error?) -> () = { [weak self] db, error in
let migrationCompleted: (Swift.Result<Database, Error>) -> () = { [weak self] result in
self?.hasCompletedMigrations = true
self?.migrationProgressUpdater = nil
SUKLegacy.clearLegacyDatabaseInstance()
if let error = error {
if case .failure(let error) = result {
SNLog("[Migration Error] Migration failed with error: \(error)")
}
onComplete(error, needsConfigSync)
onComplete(result, needsConfigSync)
}
// Note: The non-async migration should only be used for unit tests
guard async else {
do { try self.migrator?.migrate(dbWriter) }
catch { try? dbWriter.read { db in migrationCompleted(db, error) } }
catch {
try? dbWriter.read { db in
migrationCompleted(Swift.Result<Database, Error>.failure(error))
}
}
return
}
self.migrator?.asyncMigrate(dbWriter) { db, error in
migrationCompleted(db, error)
self.migrator?.asyncMigrate(dbWriter) { result in
migrationCompleted(result)
}
}
@ -434,7 +438,7 @@ public extension ValueObservation {
func publisher(
in storage: Storage,
scheduling scheduler: ValueObservationScheduler = Storage.defaultPublisherScheduler
) -> AnyPublisher<Reducer.Value, Error> {
) -> AnyPublisher<Reducer.Value, Error> where Reducer: ValueReducer {
guard storage.isValid, let dbWriter: DatabaseWriter = storage.dbWriter else {
return Fail(error: StorageError.databaseInvalid).eraseToAnyPublisher()
}

View File

@ -1005,10 +1005,11 @@ public enum PagedData {
filterSQL: SQL
) -> Int {
let tableNameLiteral: SQL = SQL(stringLiteral: tableName)
let finalJoinSQL: SQL = (requiredJoinSQL ?? "")
let request: SQLRequest<Int> = """
SELECT \(tableNameLiteral).rowId
FROM \(tableNameLiteral)
\(requiredJoinSQL ?? "")
\(finalJoinSQL)
WHERE \(filterSQL)
"""
@ -1027,12 +1028,14 @@ public enum PagedData {
offset: Int
) -> [Int64] {
let tableNameLiteral: SQL = SQL(stringLiteral: tableName)
let finalJoinSQL: SQL = (requiredJoinSQL ?? "")
let finalGroupSQL: SQL = (groupSQL ?? "")
let request: SQLRequest<Int64> = """
SELECT \(tableNameLiteral).rowId
FROM \(tableNameLiteral)
\(requiredJoinSQL ?? "")
\(finalJoinSQL)
WHERE \(filterSQL)
\(groupSQL ?? "")
\(finalGroupSQL)
ORDER BY \(orderSQL)
LIMIT \(limit) OFFSET \(offset)
"""
@ -1052,6 +1055,7 @@ public enum PagedData {
) -> Int? {
let tableNameLiteral: SQL = SQL(stringLiteral: tableName)
let idColumnLiteral: SQL = SQL(stringLiteral: idColumn)
let finalJoinSQL: SQL = (requiredJoinSQL ?? "")
let request: SQLRequest<Int> = """
SELECT
(data.rowIndex - 1) AS rowIndex -- Converting from 1-Indexed to 0-indexed
@ -1060,7 +1064,7 @@ public enum PagedData {
\(tableNameLiteral).\(idColumnLiteral) AS \(idColumnLiteral),
ROW_NUMBER() OVER (ORDER BY \(orderSQL)) AS rowIndex
FROM \(tableNameLiteral)
\(requiredJoinSQL ?? "")
\(finalJoinSQL)
WHERE \(filterSQL)
) AS data
WHERE \(SQL("data.\(idColumnLiteral) = \(id)"))
@ -1083,6 +1087,7 @@ public enum PagedData {
guard !rowIds.isEmpty else { return [] }
let tableNameLiteral: SQL = SQL(stringLiteral: tableName)
let finalJoinSQL: SQL = (requiredJoinSQL ?? "")
let request: SQLRequest<RowIndexInfo> = """
SELECT
data.rowId AS rowId,
@ -1092,7 +1097,7 @@ public enum PagedData {
\(tableNameLiteral).rowid AS rowid,
ROW_NUMBER() OVER (ORDER BY \(orderSQL)) AS rowIndex
FROM \(tableNameLiteral)
\(requiredJoinSQL ?? "")
\(finalJoinSQL)
WHERE \(filterSQL)
) AS data
WHERE \(SQL("data.rowid IN \(rowIds)"))

View File

@ -20,6 +20,7 @@ public struct TargetMigrations: Comparable {
case snodeKit
case messagingKit
case uiKit
case test
public static func < (lhs: Self, rhs: Self) -> Bool {
let lhsIndex: Int = (Identifier.allCases.firstIndex(of: lhs) ?? Identifier.allCases.count)

View File

@ -0,0 +1,277 @@
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import GRDB
// MARK: - Migration Safe Functions
public extension MutablePersistableRecord where Self: TableRecord & EncodableRecord & Codable {
func migrationSafeInsert(
_ db: Database,
onConflict conflictResolution: Database.ConflictResolution? = nil
) throws {
var record = try MigrationSafeMutableRecord(db, originalRecord: self)
try record.insert(db, onConflict: conflictResolution)
}
func migrationSafeInserted(
_ db: Database,
onConflict conflictResolution: Database.ConflictResolution? = nil
) throws -> Self {
let record = try MigrationSafeMutableRecord(db, originalRecord: self)
let updatedRecord = try record.inserted(db, onConflict: conflictResolution)
return updatedRecord.originalRecord
}
func migrationSafeSave(
_ db: Database,
onConflict conflictResolution: Database.ConflictResolution? = nil
) throws {
var record = try MigrationSafeMutableRecord(db, originalRecord: self)
try record.save(db, onConflict: conflictResolution)
}
func migrationSafeSaved(
_ db: Database,
onConflict conflictResolution: Database.ConflictResolution? = nil
) throws -> Self {
let record = try MigrationSafeMutableRecord(db, originalRecord: self)
let updatedRecord = try record.saved(db, onConflict: conflictResolution)
return updatedRecord.originalRecord
}
func migrationSafeUpsert(_ db: Database) throws {
var record = try MigrationSafeMutableRecord(db, originalRecord: self)
try record.upsert(db)
}
}
// MARK: - MigrationSafeMutableRecord
private class MigrationSafeRecord<T: PersistableRecord & Encodable>: MigrationSafeMutableRecord<T> {}
private class MigrationSafeMutableRecord<T: MutablePersistableRecord & Encodable>: MutablePersistableRecord & Encodable {
public static var databaseTableName: String { T.databaseTableName }
fileprivate var originalRecord: T
private let availableColumnNames: [String]
init(_ db: Database, originalRecord: T) throws {
// Check the current columns in the database and filter out any properties on the object which
// don't exist in the dictionary
self.originalRecord = originalRecord
self.availableColumnNames = try db.columns(in: Self.databaseTableName).map(\.name)
}
func encode(to encoder: Encoder) throws {
let filteredEncoder: FilteredEncoder = FilteredEncoder(
originalEncoder: encoder,
availableKeys: availableColumnNames
)
try originalRecord.encode(to: filteredEncoder)
}
// MARK: - Persistence Callbacks
func willInsert(_ db: Database) throws {
try originalRecord.willInsert(db)
}
func aroundInsert(_ db: Database, insert: () throws -> InsertionSuccess) throws {
try originalRecord.aroundInsert(db, insert: insert)
}
func didInsert(_ inserted: InsertionSuccess) {
originalRecord.didInsert(inserted)
}
func willUpdate(_ db: Database, columns: Set<String>) throws {
try originalRecord.willUpdate(db, columns: columns)
}
func aroundUpdate(_ db: Database, columns: Set<String>, update: () throws -> PersistenceSuccess) throws {
try originalRecord.aroundUpdate(db, columns: columns, update: update)
}
func didUpdate(_ updated: PersistenceSuccess) {
originalRecord.didUpdate(updated)
}
func willSave(_ db: Database) throws {
try originalRecord.willSave(db)
}
func aroundSave(_ db: Database, save: () throws -> PersistenceSuccess) throws {
try originalRecord.aroundSave(db, save: save)
}
func didSave(_ saved: PersistenceSuccess) {
originalRecord.didSave(saved)
}
func willDelete(_ db: Database) throws {
try originalRecord.willDelete(db)
}
func aroundDelete(_ db: Database, delete: () throws -> Bool) throws {
try originalRecord.aroundDelete(db, delete: delete)
}
func didDelete(deleted: Bool) {
originalRecord.didDelete(deleted: deleted)
}
}
// MARK: - FilteredEncoder
private class FilteredEncoder: Encoder {
let originalEncoder: Encoder
let availableKeys: [String]
init(originalEncoder: Encoder, availableKeys: [String]) {
self.originalEncoder = originalEncoder
self.availableKeys = availableKeys
}
var codingPath: [CodingKey] { originalEncoder.codingPath }
var userInfo: [CodingUserInfoKey: Any] { originalEncoder.userInfo }
func container<Key>(keyedBy type: Key.Type) -> KeyedEncodingContainer<Key> where Key: CodingKey {
let container = originalEncoder.container(keyedBy: type)
let filteredContainer = FilteredKeyedEncodingContainer(
availableKeys: availableKeys,
originalContainer: container
)
return KeyedEncodingContainer(filteredContainer)
}
func unkeyedContainer() -> UnkeyedEncodingContainer { originalEncoder.unkeyedContainer() }
func singleValueContainer() -> SingleValueEncodingContainer { originalEncoder.singleValueContainer() }
}
// MARK: - FilteredKeyedEncodingContainer
private class FilteredKeyedEncodingContainer<Key: CodingKey>: KeyedEncodingContainerProtocol {
let codingPath: [CodingKey]
let availableKeys: [String]
var originalContainer: KeyedEncodingContainer<Key>
init(availableKeys: [String], originalContainer: KeyedEncodingContainer<Key>) {
self.availableKeys = availableKeys
self.codingPath = originalContainer.codingPath
self.originalContainer = originalContainer
}
func encodeNil(forKey key: Key) throws {
guard availableKeys.contains(key.stringValue) else { return }
try originalContainer.encodeNil(forKey: key)
}
func encode(_ value: Bool, forKey key: Key) throws {
guard availableKeys.contains(key.stringValue) else { return }
try originalContainer.encode(value, forKey: key)
}
func encode(_ value: String, forKey key: Key) throws {
guard availableKeys.contains(key.stringValue) else { return }
try originalContainer.encode(value, forKey: key)
}
func encode(_ value: Double, forKey key: Key) throws {
guard availableKeys.contains(key.stringValue) else { return }
try originalContainer.encode(value, forKey: key)
}
func encode(_ value: Float, forKey key: Key) throws {
guard availableKeys.contains(key.stringValue) else { return }
try originalContainer.encode(value, forKey: key)
}
func encode(_ value: Int, forKey key: Key) throws {
guard availableKeys.contains(key.stringValue) else { return }
try originalContainer.encode(value, forKey: key)
}
func encode(_ value: Int8, forKey key: Key) throws {
guard availableKeys.contains(key.stringValue) else { return }
try originalContainer.encode(value, forKey: key)
}
func encode(_ value: Int16, forKey key: Key) throws {
guard availableKeys.contains(key.stringValue) else { return }
try originalContainer.encode(value, forKey: key)
}
func encode(_ value: Int32, forKey key: Key) throws {
guard availableKeys.contains(key.stringValue) else { return }
try originalContainer.encode(value, forKey: key)
}
func encode(_ value: Int64, forKey key: Key) throws {
guard availableKeys.contains(key.stringValue) else { return }
try originalContainer.encode(value, forKey: key)
}
func encode(_ value: UInt, forKey key: Key) throws {
guard availableKeys.contains(key.stringValue) else { return }
try originalContainer.encode(value, forKey: key)
}
func encode(_ value: UInt8, forKey key: Key) throws {
guard availableKeys.contains(key.stringValue) else { return }
try originalContainer.encode(value, forKey: key)
}
func encode(_ value: UInt16, forKey key: Key) throws {
guard availableKeys.contains(key.stringValue) else { return }
try originalContainer.encode(value, forKey: key)
}
func encode(_ value: UInt32, forKey key: Key) throws {
guard availableKeys.contains(key.stringValue) else { return }
try originalContainer.encode(value, forKey: key)
}
func encode(_ value: UInt64, forKey key: Key) throws {
guard availableKeys.contains(key.stringValue) else { return }
try originalContainer.encode(value, forKey: key)
}
func encode<T>(_ value: T, forKey key: Key) throws where T: Encodable {
guard availableKeys.contains(key.stringValue) else { return }
try originalContainer.encode(value, forKey: key)
}
func nestedContainer<NestedKey>(keyedBy keyType: NestedKey.Type, forKey key: Key) -> KeyedEncodingContainer<NestedKey> where NestedKey: CodingKey {
return originalContainer.nestedContainer(keyedBy: keyType, forKey: key)
}
func nestedUnkeyedContainer(forKey key: Key) -> UnkeyedEncodingContainer {
return originalContainer.nestedUnkeyedContainer(forKey: key)
}
func superEncoder() -> Encoder {
return originalContainer.superEncoder()
}
func superEncoder(forKey key: Key) -> Encoder {
return originalContainer.superEncoder(forKey: key)
}
}

View File

@ -138,7 +138,7 @@ public final class JobRunner {
guard canStartJob else { return }
// Start the job runner if needed
db.afterNextTransactionCommit { _ in
db.afterNextTransaction { _ in
queues.wrappedValue[updatedJob.variant]?.start()
}
}
@ -154,7 +154,7 @@ public final class JobRunner {
queues.wrappedValue[job.variant]?.upsert(job, canStartJob: canStartJob)
// Start the job runner if needed
db.afterNextTransactionCommit { _ in
db.afterNextTransaction { _ in
queues.wrappedValue[job.variant]?.start()
}
}
@ -177,7 +177,7 @@ public final class JobRunner {
queues.wrappedValue[updatedJob.variant]?.insert(updatedJob, before: otherJob)
// Start the job runner if needed
db.afterNextTransactionCommit { _ in
db.afterNextTransaction { _ in
queues.wrappedValue[updatedJob.variant]?.start()
}

View File

@ -17,7 +17,7 @@ class IdentitySpec: QuickSpec {
describe("an Identity") {
beforeEach {
mockStorage = Storage(
customWriter: DatabaseQueue(),
customWriter: try! DatabaseQueue(),
customMigrations: [
SNUtilitiesKit.migrations()
]

View File

@ -0,0 +1,681 @@
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import GRDB
import Quick
import Nimble
@testable import SessionUtilitiesKit
class PersistableRecordUtilitiesSpec: QuickSpec {
static var customWriter: DatabaseQueue!
struct TestType: Codable, FetchableRecord, PersistableRecord, TableRecord, ColumnExpressible {
public static var databaseTableName: String { "TestType" }
public typealias Columns = CodingKeys
public enum CodingKeys: String, CodingKey, ColumnExpression {
case columnA
case columnB
}
public let columnA: String
public let columnB: String?
}
struct MutableTestType: Codable, FetchableRecord, MutablePersistableRecord, TableRecord, ColumnExpressible {
public static var databaseTableName: String { "MutableTestType" }
public typealias Columns = CodingKeys
public enum CodingKeys: String, CodingKey, ColumnExpression {
case id
case columnA
case columnB
}
public var id: Int64?
public let columnA: String
public let columnB: String?
init(id: Int64? = nil, columnA: String, columnB: String?) {
self.id = id
self.columnA = columnA
self.columnB = columnB
}
mutating func didInsert(_ inserted: InsertionSuccess) {
self.id = inserted.rowID
}
}
enum TestInsertTestTypeMigration: Migration {
static let target: TargetMigrations.Identifier = .test
static let identifier: String = "TestInsertTestType"
static let needsConfigSync: Bool = false
static let minExpectedRunDuration: TimeInterval = 0
static func migrate(_ db: Database) throws {
try db.create(table: TestType.self) { t in
t.column(.columnA, .text).primaryKey()
}
try db.create(table: MutableTestType.self) { t in
t.column(.id, .integer).primaryKey(autoincrement: true)
t.column(.columnA, .text).unique()
}
}
}
enum TestAddColumnMigration: Migration {
static let target: TargetMigrations.Identifier = .test
static let identifier: String = "TestAddColumn"
static let needsConfigSync: Bool = false
static let minExpectedRunDuration: TimeInterval = 0
static func migrate(_ db: Database) throws {
try db.alter(table: TestType.self) { t in
t.add(.columnB, .text)
}
try db.alter(table: MutableTestType.self) { t in
t.add(.columnB, .text)
}
}
}
// MARK: - Spec
override func spec() {
var customWriter: DatabaseQueue!
var mockStorage: Storage!
describe("a PersistableRecord") {
beforeEach {
customWriter = try! DatabaseQueue()
PersistableRecordUtilitiesSpec.customWriter = customWriter
mockStorage = Storage(
customWriter: customWriter,
customMigrations: [
TargetMigrations(
identifier: .test,
migrations: (0..<100)
.map { _ in [] }
.appending([TestInsertTestTypeMigration.self])
)
]
)
}
afterEach {
customWriter = nil
mockStorage = nil
}
context("before running the add column migration") {
it("fails when using the standard insert") {
mockStorage.write { db in
expect {
try TestType(columnA: "Test1", columnB: "Test1B").insert(db)
}
.to(throwError())
}
}
it("fails when using the standard inserted") {
mockStorage.write { db in
expect {
try MutableTestType(columnA: "Test2", columnB: "Test2B").inserted(db)
}
.to(throwError())
}
}
it("fails when using the standard save and the item does not already exist") {
mockStorage.write { db in
expect {
try TestType(columnA: "Test3", columnB: "Test3B").save(db)
}
.to(throwError())
}
}
it("fails when using the standard saved and the item does not already exist") {
mockStorage.write { db in
expect {
try MutableTestType(columnA: "Test4", columnB: "Test4B").saved(db)
}
.to(throwError())
}
}
it("fails when using the standard upsert and the item does not already exist") {
mockStorage.write { db in
expect {
try TestType(columnA: "Test5", columnB: "Test5B").upsert(db)
}
.to(throwError())
}
}
it("fails when using the standard mutable upsert and the item does not already exist") {
mockStorage.write { db in
expect {
var result = MutableTestType(columnA: "Test6", columnB: "Test6B")
try result.upsert(db)
return result
}
.to(throwError())
}
}
it("fails when using the standard upsert and the item already exists") {
mockStorage.write { db in
expect {
try db.execute(
sql: "INSERT INTO TestType (columnA) VALUES (?)",
arguments: StatementArguments(["Test19"])
)
try TestType(columnA: "Test19", columnB: "Test19B").upsert(db)
}
.to(throwError())
}
}
it("fails when using the standard mutable upsert and the item already exists") {
mockStorage.write { db in
expect {
try db.execute(
sql: "INSERT INTO MutableTestType (columnA) VALUES (?)",
arguments: StatementArguments(["Test20"])
)
var result = MutableTestType(id: 1, columnA: "Test20", columnB: "Test20B")
try result.upsert(db)
return result
}
.to(throwError())
}
}
it("succeeds when using the migration safe insert") {
mockStorage.write { db in
expect {
try TestType(columnA: "Test7", columnB: "Test7B").migrationSafeInsert(db)
}
.toNot(throwError())
}
mockStorage.read { db in
expect(try TestType.fetchAll(db))
.toNot(beNil())
}
}
it("succeeds when using the migration safe inserted") {
mockStorage.write { db in
expect {
try MutableTestType(columnA: "Test8", columnB: "Test8B").migrationSafeInserted(db)
}
.toNot(throwError())
expect {
try MutableTestType(columnA: "Test9", columnB: "Test9B")
.migrationSafeInserted(db)
.id
}
.toNot(beNil())
}
mockStorage.read { db in
expect(try MutableTestType.fetchAll(db))
.toNot(beNil())
}
}
it("succeeds when using the migration safe save and the item does not already exist") {
mockStorage.write { db in
expect {
try TestType(columnA: "Test10", columnB: "Test10B").migrationSafeSave(db)
}
.toNot(throwError())
}
}
it("succeeds when using the migration safe saved and the item does not already exist") {
mockStorage.write { db in
expect {
try MutableTestType(columnA: "Test11", columnB: "Test11B").migrationSafeSaved(db)
}
.toNot(throwError())
expect {
try MutableTestType(columnA: "Test12", columnB: "Test12B")
.migrationSafeSaved(db)
.id
}
.toNot(beNil())
}
mockStorage.read { db in
expect(try MutableTestType.fetchAll(db))
.toNot(beNil())
}
}
it("succeeds when using the migration safe upsert and the item does not already exist") {
mockStorage.write { db in
expect {
try TestType(columnA: "Test13", columnB: "Test13B").migrationSafeUpsert(db)
}
.toNot(throwError())
}
}
it("succeeds when using the migration safe mutable upsert and the item does not already exist") {
mockStorage.write { db in
expect {
var result = MutableTestType(columnA: "Test14", columnB: "Test14B")
try result.migrationSafeUpsert(db)
return result
}
.toNot(throwError())
}
mockStorage.read { db in
expect(try MutableTestType.fetchAll(db))
.toNot(beNil())
}
}
// Note: The built-in 'update' method only updates existing columns so this shouldn't fail
it("succeeds when using the standard save and the item already exists") {
mockStorage.write { db in
expect {
try db.execute(
sql: "INSERT INTO TestType (columnA) VALUES (?)",
arguments: StatementArguments(["Test16"])
)
try TestType(columnA: "Test16", columnB: "Test16B").save(db)
}
.toNot(throwError())
}
}
// Note: The built-in 'update' method only updates existing columns so this won't fail
// due to the structure discrepancy but won't update the id as that only happens on
// insert
it("succeeds when using the standard saved and the item already exists") {
mockStorage.write { db in
expect {
try db.execute(
sql: "INSERT INTO MutableTestType (columnA) VALUES (?)",
arguments: StatementArguments(["Test17"])
)
_ = try MutableTestType(id: 1, columnA: "Test17", columnB: "Test17B").saved(db)
}
.toNot(throwError())
expect {
try db.execute(
sql: "INSERT INTO MutableTestType (columnA) VALUES (?)",
arguments: StatementArguments(["Test18"])
)
return try MutableTestType(id: 2, columnA: "Test18", columnB: "Test18B")
.saved(db)
.id
}
.toNot(beNil())
}
mockStorage.read { db in
let types: [MutableTestType]? = try MutableTestType.fetchAll(db)
expect(types).toNot(beNil())
expect(types?.compactMap { $0.id }.count).to(equal(types?.count))
}
}
}
context("after running the add column migration") {
beforeEach {
var migrator: DatabaseMigrator = DatabaseMigrator()
migrator.registerMigration(
TestAddColumnMigration.target,
migration: TestAddColumnMigration.self
)
expect { try migrator.migrate(customWriter) }
.toNot(throwError())
}
it("succeeds when using the standard insert") {
mockStorage.write { db in
expect {
try TestType(columnA: "Test1", columnB: "Test1B").insert(db)
}
.toNot(throwError())
}
mockStorage.read { db in
expect(try TestType.fetchAll(db))
.toNot(beNil())
}
}
it("succeeds when using the standard inserted") {
mockStorage.write { db in
expect {
try MutableTestType(columnA: "Test2", columnB: "Test2B").inserted(db)
}
.toNot(throwError())
}
mockStorage.read { db in
expect(try MutableTestType.fetchAll(db))
.toNot(beNil())
}
}
it("succeeds when using the standard save and the item does not already exist") {
mockStorage.write { db in
expect {
try TestType(columnA: "Test3", columnB: "Test3B").save(db)
}
.toNot(throwError())
}
}
it("succeeds when using the standard saved and the item does not already exist") {
mockStorage.write { db in
expect {
try MutableTestType(columnA: "Test3", columnB: "Test3B").saved(db)
}
.toNot(throwError())
}
}
it("succeeds when using the standard save and the item already exists") {
mockStorage.write { db in
expect {
try db.execute(
sql: "INSERT INTO TestType (columnA) VALUES (?)",
arguments: StatementArguments(["Test4"])
)
try TestType(columnA: "Test4", columnB: "Test4B").save(db)
}
.toNot(throwError())
}
}
// Note: The built-in 'update' method won't update the id as that only happens on
// insert
it("succeeds when using the standard saved and the item already exists") {
mockStorage.write { db in
expect {
try db.execute(
sql: "INSERT INTO MutableTestType (columnA) VALUES (?)",
arguments: StatementArguments(["Test5"])
)
_ = try MutableTestType(id: 1, columnA: "Test5", columnB: "Test5B").saved(db)
}
.toNot(throwError())
expect {
try db.execute(
sql: "INSERT INTO MutableTestType (columnA) VALUES (?)",
arguments: StatementArguments(["Test6"])
)
return try MutableTestType(id: 2, columnA: "Test6", columnB: "Test6B")
.saved(db)
.id
}
.toNot(beNil())
}
mockStorage.read { db in
let types: [MutableTestType]? = try MutableTestType.fetchAll(db)
expect(types).toNot(beNil())
expect(types?.compactMap { $0.id }.count).to(equal(types?.count))
}
}
it("succeeds when using the standard upsert and the item does not already exist") {
mockStorage.write { db in
expect {
try TestType(columnA: "Test7", columnB: "Test7B").upsert(db)
}
.toNot(throwError())
}
}
it("succeeds when using the standard mutable upsert and the item does not already exist") {
mockStorage.write { db in
expect {
var result = MutableTestType(columnA: "Test8", columnB: "Test8B")
try result.upsert(db)
return result
}
.toNot(throwError())
}
}
it("succeeds when using the standard upsert and the item already exists") {
mockStorage.write { db in
expect {
try db.execute(
sql: "INSERT INTO TestType (columnA) VALUES (?)",
arguments: StatementArguments(["Test9"])
)
try TestType(columnA: "Test9", columnB: "Test9B").upsert(db)
}
.toNot(throwError())
}
}
// Note: The built-in 'update' method won't update the id as that only happens on
// insert
it("succeeds when using the standard mutable upsert and the item already exists") {
mockStorage.write { db in
expect {
try db.execute(
sql: "INSERT INTO MutableTestType (columnA) VALUES (?)",
arguments: StatementArguments(["Test10"])
)
var result = MutableTestType(id: 1, columnA: "Test10", columnB: "Test10B")
try result.upsert(db)
return result
}
.toNot(throwError())
expect {
try db.execute(
sql: "INSERT INTO MutableTestType (columnA) VALUES (?)",
arguments: StatementArguments(["Test11"])
)
var result = MutableTestType(id: 2, columnA: "Test11", columnB: "Test11B")
try result.upsert(db)
return result.id
}
.toNot(beNil())
}
mockStorage.read { db in
let types: [MutableTestType]? = try MutableTestType.fetchAll(db)
expect(types).toNot(beNil())
expect(types?.compactMap { $0.id }.count).to(equal(types?.count))
}
}
it("succeeds when using the migration safe insert") {
mockStorage.write { db in
expect {
try TestType(columnA: "Test12", columnB: "Test12B").migrationSafeInsert(db)
}
.toNot(throwError())
}
mockStorage.read { db in
expect(try TestType.fetchAll(db))
.toNot(beNil())
}
}
it("succeeds when using the migration safe inserted") {
mockStorage.write { db in
expect {
try MutableTestType(columnA: "Test13", columnB: "Test13B").migrationSafeInserted(db)
}
.toNot(throwError())
expect {
try MutableTestType(columnA: "Test14", columnB: "Test14B")
.migrationSafeInserted(db)
.id
}
.toNot(beNil())
}
mockStorage.read { db in
expect(try MutableTestType.fetchAll(db))
.toNot(beNil())
}
}
it("succeeds when using the migration safe save and the item does not already exist") {
mockStorage.write { db in
expect {
try TestType(columnA: "Test15", columnB: "Test15B").migrationSafeSave(db)
}
.toNot(throwError())
}
}
it("succeeds when using the migration safe saved and the item does not already exist") {
mockStorage.write { db in
expect {
try MutableTestType(columnA: "Test16", columnB: "Test16B").migrationSafeSaved(db)
}
.toNot(throwError())
}
}
it("succeeds when using the migration safe save and the item already exists") {
mockStorage.write { db in
expect {
try db.execute(
sql: "INSERT INTO TestType (columnA) VALUES (?)",
arguments: StatementArguments(["Test17"])
)
try TestType(columnA: "Test17", columnB: "Test17B").migrationSafeSave(db)
}
.toNot(throwError())
}
}
// Note: The built-in 'update' method won't update the id as that only happens on
// insert
it("succeeds when using the migration safe saved and the item already exists") {
mockStorage.write { db in
expect {
try db.execute(
sql: "INSERT INTO MutableTestType (columnA) VALUES (?)",
arguments: StatementArguments(["Test18"])
)
_ = try MutableTestType(id: 1, columnA: "Test18", columnB: "Test18B")
.migrationSafeSaved(db)
}
.toNot(throwError())
expect {
try db.execute(
sql: "INSERT INTO MutableTestType (columnA) VALUES (?)",
arguments: StatementArguments(["Test19"])
)
return try MutableTestType(id: 2, columnA: "Test19", columnB: "Test19B")
.migrationSafeSaved(db)
.id
}
.toNot(beNil())
}
mockStorage.read { db in
let types: [MutableTestType]? = try MutableTestType.fetchAll(db)
expect(types).toNot(beNil())
expect(types?.compactMap { $0.id }.count).to(equal(types?.count))
}
}
it("succeeds when using the migration safe upsert and the item does not already exist") {
mockStorage.write { db in
expect {
try TestType(columnA: "Test20", columnB: "Test20B").migrationSafeUpsert(db)
}
.toNot(throwError())
}
}
it("succeeds when using the migration safe mutable upsert and the item does not already exist") {
mockStorage.write { db in
expect {
var result = MutableTestType(columnA: "Test21", columnB: "Test21B")
try result.migrationSafeUpsert(db)
return result
}
.toNot(throwError())
}
}
it("succeeds when using the migration safe upsert and the item already exists") {
mockStorage.write { db in
expect {
try db.execute(
sql: "INSERT INTO TestType (columnA) VALUES (?)",
arguments: StatementArguments(["Test22"])
)
try TestType(columnA: "Test22", columnB: "Test22B").migrationSafeUpsert(db)
}
.toNot(throwError())
}
}
// Note: The built-in 'update' method won't update the id as that only happens on
// insert
it("succeeds when using the migration safe mutable upsert and the item already exists") {
mockStorage.write { db in
expect {
try db.execute(
sql: "INSERT INTO MutableTestType (columnA) VALUES (?)",
arguments: StatementArguments(["Test23"])
)
var result = MutableTestType(id: 1, columnA: "Test23", columnB: "Test23B")
try result.migrationSafeUpsert(db)
return result
}
.toNot(throwError())
expect {
try db.execute(
sql: "INSERT INTO MutableTestType (columnA) VALUES (?)",
arguments: StatementArguments(["Test24"])
)
var result = MutableTestType(id: 2, columnA: "Test24", columnB: "Test24B")
try result.migrationSafeUpsert(db)
return result.id
}
.toNot(beNil())
}
mockStorage.read { db in
let types: [MutableTestType]? = try MutableTestType.fetchAll(db)
expect(types).toNot(beNil())
expect(types?.compactMap { $0.id }.count).to(equal(types?.count))
}
}
}
}
}
}

View File

@ -1,9 +1,9 @@
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import GRDB
import SessionMessagingKit
import SessionUtilitiesKit
import UIKit
import SessionUIKit
public enum AppSetup {
@ -12,7 +12,7 @@ public enum AppSetup {
public static func setupEnvironment(
appSpecificBlock: @escaping () -> (),
migrationProgressChanged: ((CGFloat, TimeInterval) -> ())? = nil,
migrationsCompletion: @escaping (Error?, Bool) -> ()
migrationsCompletion: @escaping (Result<Database, Error>, Bool) -> ()
) {
guard !AppSetup.hasRun else { return }
@ -61,7 +61,7 @@ public enum AppSetup {
public static func runPostSetupMigrations(
backgroundTask: OWSBackgroundTask? = nil,
migrationProgressChanged: ((CGFloat, TimeInterval) -> ())? = nil,
migrationsCompletion: @escaping (Error?, Bool) -> ()
migrationsCompletion: @escaping (Result<Database, Error>, Bool) -> ()
) {
var backgroundTask: OWSBackgroundTask? = (backgroundTask ?? OWSBackgroundTask(labelStr: #function))
@ -73,9 +73,9 @@ public enum AppSetup {
SNUIKit.migrations()
],
onProgressUpdate: migrationProgressChanged,
onComplete: { error, needsConfigSync in
onComplete: { result, needsConfigSync in
DispatchQueue.main.async {
migrationsCompletion(error, needsConfigSync)
migrationsCompletion(result, needsConfigSync)
// The 'if' is only there to prevent the "variable never read" warning from showing
if backgroundTask != nil { backgroundTask = nil }