Updated to the latest lib, started handling UserGroups

Added unit tests for the UserGroup config type
Updated the logic to use the 'pinnedPriority' and deprecated 'isPinned' (not sorting yet but using the value)
Updated the code to use the libSession community url parsing instead of custom parsing
Fixed an issue where initialising Data with a libSession value wasn't returning null when the data had no actual value
Fixed an issue where the OpenGroupPoller could user an incorrect failure could when handling poll responses
Fixed the UpdateExpiryRequest signature
This commit is contained in:
Morgan Pretty 2023-02-28 17:23:56 +11:00
parent ff36b3eeab
commit 8eed08b5b4
74 changed files with 3923 additions and 1734 deletions

View File

@ -635,7 +635,7 @@
FD432432299C6933008A0213 /* _011_AddPendingReadReceipts.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD432431299C6933008A0213 /* _011_AddPendingReadReceipts.swift */; };
FD432434299C6985008A0213 /* PendingReadReceipt.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD432433299C6985008A0213 /* PendingReadReceipt.swift */; };
FD432437299DEA38008A0213 /* TypeConversion+Utilities.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD432436299DEA38008A0213 /* TypeConversion+Utilities.swift */; };
FD43EE9D297A5190009C87C5 /* SessionUtil+Groups.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD43EE9C297A5190009C87C5 /* SessionUtil+Groups.swift */; };
FD43EE9D297A5190009C87C5 /* SessionUtil+UserGroups.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD43EE9C297A5190009C87C5 /* SessionUtil+UserGroups.swift */; };
FD43EE9F297E2EE0009C87C5 /* SessionUtil+ConvoInfoVolatile.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD43EE9E297E2EE0009C87C5 /* SessionUtil+ConvoInfoVolatile.swift */; };
FD4B200E283492210034334B /* InsetLockableTableView.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD4B200D283492210034334B /* InsetLockableTableView.swift */; };
FD52090028AF6153006098F6 /* OWSBackgroundTask.m in Sources */ = {isa = PBXBuildFile; fileRef = C33FDC1B255A581F00E217F9 /* OWSBackgroundTask.m */; };
@ -749,6 +749,10 @@
FD9004152818B46300ABAAF6 /* JobRunner.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDF0B7432804EF1B004C14C5 /* JobRunner.swift */; };
FD9004162818B46700ABAAF6 /* JobRunnerError.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDE77F68280F9EDA002CFC5D /* JobRunnerError.swift */; };
FD9B30F3293EA0BF008DEE3E /* BatchResponseSpec.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD9B30F2293EA0BF008DEE3E /* BatchResponseSpec.swift */; };
FDA1E83629A5748F00C5C3BD /* ConfigUserGroupsSpec.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDA1E83529A5748F00C5C3BD /* ConfigUserGroupsSpec.swift */; };
FDA1E83929A5771A00C5C3BD /* LibSessionSpec.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDA1E83829A5771A00C5C3BD /* LibSessionSpec.swift */; };
FDA1E83B29A5F2D500C5C3BD /* SessionUtil+Shared.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDA1E83A29A5F2D500C5C3BD /* SessionUtil+Shared.swift */; };
FDA1E83D29AC71A800C5C3BD /* SessionUtilSpec.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDA1E83C29AC71A800C5C3BD /* SessionUtilSpec.swift */; };
FDA8EAFE280E8B78002B68E5 /* FailedMessageSendsJob.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDA8EAFD280E8B78002B68E5 /* FailedMessageSendsJob.swift */; };
FDA8EB00280E8D58002B68E5 /* FailedAttachmentDownloadsJob.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDA8EAFF280E8D58002B68E5 /* FailedAttachmentDownloadsJob.swift */; };
FDA8EB10280F8238002B68E5 /* Codable+Utilities.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDA8EB0F280F8238002B68E5 /* Codable+Utilities.swift */; };
@ -810,7 +814,7 @@
FDD2506E283711D600198BDA /* DifferenceKit+Utilities.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDD2506D283711D600198BDA /* DifferenceKit+Utilities.swift */; };
FDD250702837199200198BDA /* GarbageCollectionJob.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDD2506F2837199200198BDA /* GarbageCollectionJob.swift */; };
FDD250722837234B00198BDA /* MediaGalleryNavigationController.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDD250712837234B00198BDA /* MediaGalleryNavigationController.swift */; };
FDDC08F229A300E800BF9681 /* TypeConversionUtilitiesSpec.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDDC08F129A300E800BF9681 /* TypeConversionUtilitiesSpec.swift */; };
FDDC08F229A300E800BF9681 /* LibSessionTypeConversionUtilitiesSpec.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDDC08F129A300E800BF9681 /* LibSessionTypeConversionUtilitiesSpec.swift */; };
FDE658A129418C7900A33BC1 /* CryptoKit+Utilities.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDE658A029418C7900A33BC1 /* CryptoKit+Utilities.swift */; };
FDE658A329418E2F00A33BC1 /* KeyPair.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDE658A229418E2F00A33BC1 /* KeyPair.swift */; };
FDE77F6B280FEB28002CFC5D /* ControlMessageProcessRecord.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDE77F6A280FEB28002CFC5D /* ControlMessageProcessRecord.swift */; };
@ -1765,7 +1769,7 @@
FD432431299C6933008A0213 /* _011_AddPendingReadReceipts.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = _011_AddPendingReadReceipts.swift; sourceTree = "<group>"; };
FD432433299C6985008A0213 /* PendingReadReceipt.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = PendingReadReceipt.swift; sourceTree = "<group>"; };
FD432436299DEA38008A0213 /* TypeConversion+Utilities.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "TypeConversion+Utilities.swift"; sourceTree = "<group>"; };
FD43EE9C297A5190009C87C5 /* SessionUtil+Groups.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "SessionUtil+Groups.swift"; sourceTree = "<group>"; };
FD43EE9C297A5190009C87C5 /* SessionUtil+UserGroups.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "SessionUtil+UserGroups.swift"; sourceTree = "<group>"; };
FD43EE9E297E2EE0009C87C5 /* SessionUtil+ConvoInfoVolatile.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "SessionUtil+ConvoInfoVolatile.swift"; sourceTree = "<group>"; };
FD4B200D283492210034334B /* InsetLockableTableView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = InsetLockableTableView.swift; sourceTree = "<group>"; };
FD52090228B4680F006098F6 /* RadioButton.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = RadioButton.swift; sourceTree = "<group>"; };
@ -1871,6 +1875,10 @@
FD8ECF93293856AF00C0D1BB /* Randomness.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Randomness.swift; sourceTree = "<group>"; };
FD9004132818AD0B00ABAAF6 /* _002_SetupStandardJobs.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = _002_SetupStandardJobs.swift; sourceTree = "<group>"; };
FD9B30F2293EA0BF008DEE3E /* BatchResponseSpec.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = BatchResponseSpec.swift; sourceTree = "<group>"; };
FDA1E83529A5748F00C5C3BD /* ConfigUserGroupsSpec.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ConfigUserGroupsSpec.swift; sourceTree = "<group>"; };
FDA1E83829A5771A00C5C3BD /* LibSessionSpec.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LibSessionSpec.swift; sourceTree = "<group>"; };
FDA1E83A29A5F2D500C5C3BD /* SessionUtil+Shared.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "SessionUtil+Shared.swift"; sourceTree = "<group>"; };
FDA1E83C29AC71A800C5C3BD /* SessionUtilSpec.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SessionUtilSpec.swift; sourceTree = "<group>"; };
FDA8EAFD280E8B78002B68E5 /* FailedMessageSendsJob.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = FailedMessageSendsJob.swift; sourceTree = "<group>"; };
FDA8EAFF280E8D58002B68E5 /* FailedAttachmentDownloadsJob.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = FailedAttachmentDownloadsJob.swift; sourceTree = "<group>"; };
FDA8EB0F280F8238002B68E5 /* Codable+Utilities.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "Codable+Utilities.swift"; sourceTree = "<group>"; };
@ -1931,7 +1939,7 @@
FDD2506D283711D600198BDA /* DifferenceKit+Utilities.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "DifferenceKit+Utilities.swift"; sourceTree = "<group>"; };
FDD2506F2837199200198BDA /* GarbageCollectionJob.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = GarbageCollectionJob.swift; sourceTree = "<group>"; };
FDD250712837234B00198BDA /* MediaGalleryNavigationController.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = MediaGalleryNavigationController.swift; sourceTree = "<group>"; };
FDDC08F129A300E800BF9681 /* TypeConversionUtilitiesSpec.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = TypeConversionUtilitiesSpec.swift; sourceTree = "<group>"; };
FDDC08F129A300E800BF9681 /* LibSessionTypeConversionUtilitiesSpec.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LibSessionTypeConversionUtilitiesSpec.swift; sourceTree = "<group>"; };
FDE658A029418C7900A33BC1 /* CryptoKit+Utilities.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "CryptoKit+Utilities.swift"; sourceTree = "<group>"; };
FDE658A229418E2F00A33BC1 /* KeyPair.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = KeyPair.swift; sourceTree = "<group>"; };
FDE7214F287E50D50093DF33 /* ProtoWrappers.py */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.python; path = ProtoWrappers.py; sourceTree = "<group>"; };
@ -4061,10 +4069,10 @@
FD8ECF802934385900C0D1BB /* LibSessionUtil */ = {
isa = PBXGroup;
children = (
FDA1E83729A5770C00C5C3BD /* Configs */,
FDDC08F029A300D500BF9681 /* Utilities */,
FD2B4AFA29429D1000AB4848 /* ConfigContactsSpec.swift */,
FD8ECF812934387A00C0D1BB /* ConfigUserProfileSpec.swift */,
FDBB25E02983909300F1508E /* ConfigConvoInfoVolatileSpec.swift */,
FDA1E83829A5771A00C5C3BD /* LibSessionSpec.swift */,
FDA1E83C29AC71A800C5C3BD /* SessionUtilSpec.swift */,
);
path = LibSessionUtil;
sourceTree = "<group>";
@ -4075,7 +4083,8 @@
FD8ECF8F29381FC200C0D1BB /* SessionUtil+UserProfile.swift */,
FD2B4AFC294688D000AB4848 /* SessionUtil+Contacts.swift */,
FD43EE9E297E2EE0009C87C5 /* SessionUtil+ConvoInfoVolatile.swift */,
FD43EE9C297A5190009C87C5 /* SessionUtil+Groups.swift */,
FD43EE9C297A5190009C87C5 /* SessionUtil+UserGroups.swift */,
FDA1E83A29A5F2D500C5C3BD /* SessionUtil+Shared.swift */,
);
path = "Config Handling";
sourceTree = "<group>";
@ -4097,6 +4106,17 @@
path = Networking;
sourceTree = "<group>";
};
FDA1E83729A5770C00C5C3BD /* Configs */ = {
isa = PBXGroup;
children = (
FD2B4AFA29429D1000AB4848 /* ConfigContactsSpec.swift */,
FD8ECF812934387A00C0D1BB /* ConfigUserProfileSpec.swift */,
FDBB25E02983909300F1508E /* ConfigConvoInfoVolatileSpec.swift */,
FDA1E83529A5748F00C5C3BD /* ConfigUserGroupsSpec.swift */,
);
path = Configs;
sourceTree = "<group>";
};
FDC2909227D710A9005DAE71 /* Types */ = {
isa = PBXGroup;
children = (
@ -4228,7 +4248,7 @@
FDDC08F029A300D500BF9681 /* Utilities */ = {
isa = PBXGroup;
children = (
FDDC08F129A300E800BF9681 /* TypeConversionUtilitiesSpec.swift */,
FDDC08F129A300E800BF9681 /* LibSessionTypeConversionUtilitiesSpec.swift */,
);
path = Utilities;
sourceTree = "<group>";
@ -5738,7 +5758,7 @@
FD245C692850666800B966DD /* ExpirationTimerUpdate.swift in Sources */,
FD42F9A8285064B800A0C77D /* PushNotificationAPI.swift in Sources */,
FD245C6C2850669200B966DD /* MessageReceiveJob.swift in Sources */,
FD43EE9D297A5190009C87C5 /* SessionUtil+Groups.swift in Sources */,
FD43EE9D297A5190009C87C5 /* SessionUtil+UserGroups.swift in Sources */,
FD83B9CC27D179BC005E1583 /* FSEndpoint.swift in Sources */,
FD7115F228C6CB3900B47552 /* _010_AddThreadIdToFTS.swift in Sources */,
FD716E6428502DDD00C96BF4 /* CallManagerProtocol.swift in Sources */,
@ -5774,6 +5794,7 @@
C32C598A256D0664003C73A2 /* SNProtoEnvelope+Conversion.swift in Sources */,
FDC438CB27BB7DB100C60D73 /* UpdateMessageRequest.swift in Sources */,
FD8ECF7F2934298100C0D1BB /* SharedConfigDump.swift in Sources */,
FDA1E83B29A5F2D500C5C3BD /* SessionUtil+Shared.swift in Sources */,
C352A2FF25574B6300338F3E /* MessageSendJob.swift in Sources */,
FDC438C327BB512200C60D73 /* SodiumProtocols.swift in Sources */,
B8856D11256F112A001CE70E /* OWSAudioSession.swift in Sources */,
@ -6058,7 +6079,7 @@
buildActionMask = 2147483647;
files = (
FD3C905C27E3FBEF00CD579F /* BatchRequestInfoSpec.swift in Sources */,
FDDC08F229A300E800BF9681 /* TypeConversionUtilitiesSpec.swift in Sources */,
FDDC08F229A300E800BF9681 /* LibSessionTypeConversionUtilitiesSpec.swift in Sources */,
FD859EFA27C2F5C500510D0C /* MockGenericHash.swift in Sources */,
FDC2909427D710B4005DAE71 /* SOGSEndpointSpec.swift in Sources */,
FDC290B327DFF9F5005DAE71 /* TestOnionRequestAPI.swift in Sources */,
@ -6080,9 +6101,11 @@
FD859EF427C2F49200510D0C /* MockSodium.swift in Sources */,
FD078E4D27E17156000769AF /* MockOGMCache.swift in Sources */,
FD078E5227E1760A000769AF /* OGMDependencyExtensions.swift in Sources */,
FDA1E83629A5748F00C5C3BD /* ConfigUserGroupsSpec.swift in Sources */,
FD859EFC27C2F60700510D0C /* MockEd25519.swift in Sources */,
FDC290A627D860CE005DAE71 /* Mock.swift in Sources */,
FD2B4AFB29429D1000AB4848 /* ConfigContactsSpec.swift in Sources */,
FDA1E83D29AC71A800C5C3BD /* SessionUtilSpec.swift in Sources */,
FD83B9C027CF2294005E1583 /* TestConstants.swift in Sources */,
FD3C906F27E43E8700CD579F /* MockBox.swift in Sources */,
FDC4389A27BA002500C60D73 /* OpenGroupAPISpec.swift in Sources */,
@ -6093,6 +6116,7 @@
FDC290A827D9B46D005DAE71 /* NimbleExtensions.swift in Sources */,
FDC2908F27D70938005DAE71 /* SendDirectMessageRequestSpec.swift in Sources */,
FDC438BD27BB2AB400C60D73 /* Mockable.swift in Sources */,
FDA1E83929A5771A00C5C3BD /* LibSessionSpec.swift in Sources */,
FD859EF627C2F52C00510D0C /* MockSign.swift in Sources */,
FDC2908927D70656005DAE71 /* RoomPollInfoSpec.swift in Sources */,
FDFD645D27F273F300808CA1 /* MockGeneralCache.swift in Sources */,
@ -7577,7 +7601,7 @@
SWIFT_OPTIMIZATION_LEVEL = "-Onone";
SWIFT_VERSION = 5.0;
TARGETED_DEVICE_FAMILY = "1,2";
TEST_HOST = "$(BUILT_PRODUCTS_DIR)/Session.app/Session";
TEST_HOST = "$(BUILT_PRODUCTS_DIR)/Session.app/$(BUNDLE_EXECUTABLE_FOLDER_PATH)/Session";
};
name = Debug;
};
@ -7642,7 +7666,7 @@
SWIFT_OPTIMIZATION_LEVEL = "-O";
SWIFT_VERSION = 5.0;
TARGETED_DEVICE_FAMILY = "1,2";
TEST_HOST = "$(BUILT_PRODUCTS_DIR)/Session.app/Session";
TEST_HOST = "$(BUILT_PRODUCTS_DIR)/Session.app/$(BUNDLE_EXECUTABLE_FOLDER_PATH)/Session";
VALIDATE_PRODUCT = YES;
};
name = "App Store Release";

View File

@ -334,7 +334,7 @@ final class NewClosedGroupVC: BaseVC, UITableViewDataSource, UITableViewDelegate
ModalActivityIndicatorViewController.present(fromViewController: navigationController!, message: message) { [weak self] _ in
Storage.shared
.writePublisherFlatMap(receiveOn: DispatchQueue.global(qos: .userInitiated)) { db in
MessageSender.createClosedGroup(db, name: name, members: selectedContacts)
try MessageSender.createClosedGroup(db, name: name, members: selectedContacts)
}
.receive(on: DispatchQueue.main)
.sinkUntilComplete(
@ -357,7 +357,6 @@ final class NewClosedGroupVC: BaseVC, UITableViewDataSource, UITableViewDelegate
}
},
receiveValue: { thread in
ConfigurationSyncJob.enqueue()
self?.presentingViewController?.dismiss(animated: true, completion: nil)
SessionApp.presentConversation(for: thread.id, action: .compose, animated: false)
}

View File

@ -1375,6 +1375,7 @@ extension ConversationVC:
)
),
to: try Message.Destination.from(db, thread: thread),
namespace: try Message.Destination.from(db, thread: thread).defaultNamespace,
interactionId: cellViewModel.id
)
@ -1571,7 +1572,8 @@ extension ConversationVC:
guard let presentingViewController: UIViewController = modal.presentingViewController else {
return
}
guard let (room, server, publicKey) = OpenGroupManager.parseOpenGroup(from: url) else {
guard let (room, server, publicKey) = SessionUtil.parseCommunity(url: url) else {
let errorModal: ConfirmationModal = ConfirmationModal(
info: ConfirmationModal.Info(
title: "COMMUNITY_ERROR_GENERIC".localized(),
@ -1589,7 +1591,8 @@ extension ConversationVC:
db,
roomToken: room,
server: server,
publicKey: publicKey
publicKey: publicKey,
calledFromConfigHandling: false
)
}
.receive(on: DispatchQueue.main)

View File

@ -334,7 +334,7 @@ class ThreadSettingsViewModel: SessionTableViewModel<ThreadSettingsViewModel.Nav
let publicKey: String = threadViewModel.openGroupPublicKey
else { return }
UIPasteboard.general.string = OpenGroup.urlFor(
UIPasteboard.general.string = SessionUtil.communityUrlFor(
server: server,
roomToken: roomToken,
publicKey: publicKey
@ -706,18 +706,18 @@ class ThreadSettingsViewModel: SessionTableViewModel<ThreadSettingsViewModel.Nav
let publicKey: String = threadViewModel.openGroupPublicKey
else { return }
let communityUrl: String = SessionUtil.communityUrlFor(
server: server,
roomToken: roomToken,
publicKey: publicKey
)
dependencies.storage.writeAsync { db in
let urlString: String = OpenGroup.urlFor(
server: server,
roomToken: roomToken,
publicKey: publicKey
)
try selectedUsers.forEach { userId in
let thread: SessionThread = try SessionThread.fetchOrCreate(db, id: userId, variant: .contact)
try LinkPreview(
url: urlString,
url: communityUrl,
variant: .openGroupInvitation,
title: name
)
@ -734,7 +734,7 @@ class ThreadSettingsViewModel: SessionTableViewModel<ThreadSettingsViewModel.Nav
.filter(DisappearingMessagesConfiguration.Columns.isEnabled == true)
.asRequest(of: TimeInterval.self)
.fetchOne(db),
linkPreviewUrl: urlString
linkPreviewUrl: communityUrl
)
.inserted(db)

View File

@ -747,11 +747,11 @@ final class HomeVC: BaseVC, UITableViewDataSource, UITableViewDelegate, SeedRemi
}
let pin: UIContextualAction = UIContextualAction(
title: (threadViewModel.threadIsPinned ?
title: (threadViewModel.threadPinnedPriority > 0 ?
"UNPIN_BUTTON_TEXT".localized() :
"PIN_BUTTON_TEXT".localized()
),
icon: (threadViewModel.threadIsPinned ?
icon: (threadViewModel.threadPinnedPriority > 0 ?
UIImage(systemName: "pin.slash") :
UIImage(systemName: "pin")
),
@ -763,16 +763,27 @@ final class HomeVC: BaseVC, UITableViewDataSource, UITableViewDelegate, SeedRemi
tableView: tableView
) { _, _, completionHandler in
(tableView.cellForRow(at: indexPath) as? FullConversationCell)?.optimisticUpdate(
isPinned: !threadViewModel.threadIsPinned
isPinned: !(threadViewModel.threadPinnedPriority > 0)
)
completionHandler(true)
// Delay the change to give the cell "unswipe" animation some time to complete
DispatchQueue.global(qos: .default).asyncAfter(deadline: .now() + unswipeAnimationDelay) {
Storage.shared.writeAsync { db in
try SessionThread
.filter(id: threadViewModel.threadId)
.updateAll(db, SessionThread.Columns.isPinned.set(to: !threadViewModel.threadIsPinned))
// If we are unpinning then just clear the value
guard threadViewModel.threadPinnedPriority == 0 else {
try SessionThread
.filter(id: threadViewModel.threadId)
.updateAllAndConfig(
db,
SessionThread.Columns.pinnedPriority.set(to: 0)
)
return
}
// Otherwise we want to reset the priority values for all of the currently
// pinned threads (adding the newly pinned one at the end)
try SessionThread.refreshPinnedPriorities(db, adding: threadViewModel.threadId)
}
}
}

View File

@ -62,7 +62,7 @@ public class HomeViewModel {
columns: [
.id,
.shouldBeVisible,
.isPinned,
.pinnedPriority,
.mutedUntilTimestamp,
.onlyNotifyForMentions,
.markedAsUnread
@ -326,8 +326,9 @@ public class HomeViewModel {
elements: data
.filter { $0.id != SessionThreadViewModel.invalidId }
.sorted { lhs, rhs -> Bool in
if lhs.threadIsPinned && !rhs.threadIsPinned { return true }
if !lhs.threadIsPinned && rhs.threadIsPinned { return false }
guard lhs.threadPinnedPriority == rhs.threadPinnedPriority else {
return lhs.threadPinnedPriority > rhs.threadPinnedPriority
}
return lhs.lastInteractionDate > rhs.lastInteractionDate
}
@ -373,7 +374,11 @@ public class HomeViewModel {
.sinkUntilComplete()
case .community:
OpenGroupManager.shared.delete(db, openGroupId: threadId)
OpenGroupManager.shared.delete(
db,
openGroupId: threadId,
calledFromConfigHandling: false
)
default: break
}

View File

@ -490,7 +490,8 @@ class MessageRequestsViewController: BaseVC, UITableViewDelegate, UITableViewDat
try ClosedGroup.removeKeysAndUnsubscribe(
db,
threadIds: closedGroupThreadIds,
removeGroupData: true
removeGroupData: true,
calledFromConfigHandling: false
)
}
})

View File

@ -195,11 +195,12 @@ public class MessageRequestsViewModel {
try ClosedGroup.removeKeysAndUnsubscribe(
db,
threadId: threadId,
removeGroupData: true
removeGroupData: true,
calledFromConfigHandling: false
)
// Trigger a config sync
ConfigurationSyncJob.enqueue(db)
ConfigurationSyncJob.enqueue(db, publicKey: getUserHexEncodedPublicKey(db))
}
}

View File

@ -295,7 +295,7 @@ class AppDelegate: UIResponder, UIApplicationDelegate, UNUserNotificationCenterD
appVersion.lastAppVersion != appVersion.currentAppVersion
)
{
ConfigurationSyncJob.enqueue(db)
ConfigurationSyncJob.enqueue(db, publicKey: getUserHexEncodedPublicKey(db))
}
}
}
@ -666,7 +666,9 @@ class AppDelegate: UIResponder, UIApplicationDelegate, UNUserNotificationCenterD
Storage.shared
.writeAsync(
updates: { db in ConfigurationSyncJob.enqueue(db) },
updates: { db in
ConfigurationSyncJob.enqueue(db, publicKey: getUserHexEncodedPublicKey(db))
},
completion: { _, result in
switch result {
case .failure: break

View File

@ -151,7 +151,7 @@ final class JoinOpenGroupVC: BaseVC, UIPageViewControllerDataSource, UIPageViewC
fileprivate func joinOpenGroup(with urlString: String) {
// A V2 open group URL will look like: <optional scheme> + <host> + <optional port> + <room> + <public key>
// The host doesn't parse if no explicit scheme is provided
guard let (room, server, publicKey) = OpenGroupManager.parseOpenGroup(from: urlString) else {
guard let (room, server, publicKey) = SessionUtil.parseCommunity(url: urlString) else {
showError(
title: "invalid_url".localized(),
message: "COMMUNITY_ERROR_INVALID_URL".localized()
@ -169,12 +169,13 @@ final class JoinOpenGroupVC: BaseVC, UIPageViewControllerDataSource, UIPageViewC
ModalActivityIndicatorViewController.present(fromViewController: navigationController, canCancel: false) { [weak self] _ in
Storage.shared
.writePublisherFlatMap(receiveOn: DispatchQueue.main) { db in
.writePublisherFlatMap(receiveOn: DispatchQueue.global(qos: .userInitiated)) { db in
OpenGroupManager.shared.add(
db,
roomToken: roomToken,
server: server,
publicKey: publicKey
publicKey: publicKey,
calledFromConfigHandling: false
)
}
.receive(on: DispatchQueue.main)

View File

@ -397,7 +397,7 @@ public final class FullConversationCell: UITableViewCell {
accentLineView.alpha = (unreadCount > 0 ? 1 : 0.0001) // Setting the alpha to exactly 0 causes an issue on iOS 12
}
isPinnedIcon.isHidden = !cellViewModel.threadIsPinned
isPinnedIcon.isHidden = (cellViewModel.threadPinnedPriority == 0)
unreadCountView.isHidden = (unreadCount <= 0)
unreadImageView.isHidden = (!unreadCountView.isHidden || !threadIsUnread)
unreadCountLabel.text = (unreadCount <= 0 ?

View File

@ -135,6 +135,7 @@ public final class WebRTCSession : NSObject, RTCPeerConnectionDelegate {
db,
message: message,
to: try Message.Destination.from(db, thread: thread),
namespace: try Message.Destination.from(db, thread: thread).defaultNamespace,
interactionId: interactionId
)
)
@ -194,6 +195,8 @@ public final class WebRTCSession : NSObject, RTCPeerConnectionDelegate {
sentTimestampMs: UInt64(SnodeAPI.currentOffsetTimestampMs())
),
to: try Message.Destination.from(db, thread: thread),
namespace: try Message.Destination.from(db, thread: thread)
.defaultNamespace,
interactionId: nil
)
}
@ -257,6 +260,8 @@ public final class WebRTCSession : NSObject, RTCPeerConnectionDelegate {
sdps: [ sdp.sdp ]
),
to: try Message.Destination.from(db, thread: thread),
namespace: try Message.Destination.from(db, thread: thread)
.defaultNamespace,
interactionId: nil
)
}
@ -314,6 +319,8 @@ public final class WebRTCSession : NSObject, RTCPeerConnectionDelegate {
sdps: candidates.map { $0.sdp }
),
to: try Message.Destination.from(db, thread: thread),
namespace: try Message.Destination.from(db, thread: thread)
.defaultNamespace,
interactionId: nil
)
)
@ -338,6 +345,7 @@ public final class WebRTCSession : NSObject, RTCPeerConnectionDelegate {
sdps: []
),
to: try Message.Destination.from(db, thread: thread),
namespace: try Message.Destination.from(db, thread: thread).defaultNamespace,
interactionId: nil
)

View File

@ -17,8 +17,15 @@ enum _012_SharedUtilChanges: Migration {
// Add `markedAsUnread` to the thread table
try db.alter(table: SessionThread.self) { t in
t.add(.markedAsUnread, .boolean)
t.add(.pinnedPriority, .integer)
}
// Add an index for the 'ClosedGroupKeyPair' so we can lookup existing keys
try db.createIndex(
on: ClosedGroupKeyPair.self,
columns: [.threadId, .publicKey, .secretKey]
)
// New table for storing the latest config dump for each type
try db.create(table: ConfigDump.self) { t in
t.column(.variant, .text)
@ -28,11 +35,18 @@ enum _012_SharedUtilChanges: Migration {
.indexed()
t.column(.data, .blob)
.notNull()
t.column(.combinedMessageHashes, .text)
t.primaryKey([.variant, .publicKey])
}
// Migrate the 'isPinned' value to 'pinnedPriority'
try SessionThread
.filter(SessionThread.Columns.isPinned == true)
.updateAll(
db,
SessionThread.Columns.pinnedPriority.set(to: 1)
)
// If we don't have an ed25519 key then no need to create cached dump data
let userPublicKey: String = getUserHexEncodedPublicKey(db)
@ -41,7 +55,17 @@ enum _012_SharedUtilChanges: Migration {
return
}
// Create a dump for the user profile data
// MARK: - Shared Data
let pinnedThreadIds: [String] = try SessionThread
.select(SessionThread.Columns.id)
.filter(SessionThread.Columns.isPinned)
.order(Column.rowID)
.asRequest(of: String.self)
.fetchAll(db)
// MARK: - UserProfile Config Dump
let userProfileConf: UnsafeMutablePointer<config_object>? = try SessionUtil.loadState(
for: .userProfile,
secretKey: secretKey,
@ -49,7 +73,7 @@ enum _012_SharedUtilChanges: Migration {
)
let userProfileConfResult: SessionUtil.ConfResult = try SessionUtil.update(
profile: Profile.fetchOrCreateCurrentUser(db),
in: Atomic(userProfileConf)
in: userProfileConf
)
if userProfileConfResult.needsDump {
@ -57,23 +81,13 @@ enum _012_SharedUtilChanges: Migration {
.createDump(
conf: userProfileConf,
for: .userProfile,
publicKey: userPublicKey,
messageHashes: nil
publicKey: userPublicKey
)?
.save(db)
}
// Create a dump for the contacts data
struct ContactInfo: FetchableRecord, Decodable, ColumnExpressible {
typealias Columns = CodingKeys
enum CodingKeys: String, CodingKey, ColumnExpression, CaseIterable {
case contact
case profile
}
let contact: Contact
let profile: Profile?
}
// MARK: - Contact Config Dump
let contactsData: [ContactInfo] = try Contact
.including(optional: Contact.profile)
.asRequest(of: ContactInfo.self)
@ -85,8 +99,17 @@ enum _012_SharedUtilChanges: Migration {
cachedData: nil
)
let contactsConfResult: SessionUtil.ConfResult = try SessionUtil.upsert(
contactData: contactsData.map { ($0.contact.id, $0.contact, $0.profile) },
in: Atomic(contactsConf)
contactData: contactsData
.map { data in
(
data.contact.id,
data.contact,
data.profile,
Int32(pinnedThreadIds.firstIndex(of: data.contact.id) ?? 0),
false
)
},
in: contactsConf
)
if contactsConfResult.needsDump {
@ -94,13 +117,13 @@ enum _012_SharedUtilChanges: Migration {
.createDump(
conf: contactsConf,
for: .contacts,
publicKey: userPublicKey,
messageHashes: nil
publicKey: userPublicKey
)?
.save(db)
}
// Create a dump for the convoInfoVolatile data
// MARK: - ConvoInfoVolatile Config Dump
let volatileThreadInfo: [SessionUtil.VolatileThreadInfo] = SessionUtil.VolatileThreadInfo.fetchAll(db)
let convoInfoVolatileConf: UnsafeMutablePointer<config_object>? = try SessionUtil.loadState(
for: .convoInfoVolatile,
@ -109,7 +132,7 @@ enum _012_SharedUtilChanges: Migration {
)
let convoInfoVolatileConfResult: SessionUtil.ConfResult = try SessionUtil.upsert(
convoInfoVolatileChanges: volatileThreadInfo,
in: Atomic(convoInfoVolatileConf)
in: convoInfoVolatileConf
)
if convoInfoVolatileConfResult.needsDump {
@ -117,11 +140,93 @@ enum _012_SharedUtilChanges: Migration {
.createDump(
conf: contactsConf,
for: .convoInfoVolatile,
publicKey: userPublicKey,
messageHashes: nil
publicKey: userPublicKey
)?
.save(db)
}
// MARK: - UserGroups Config Dump
let legacyGroupData: [SessionUtil.LegacyGroupInfo] = try SessionUtil.LegacyGroupInfo.fetchAll(db)
let communityData: [SessionUtil.OpenGroupUrlInfo] = try SessionUtil.OpenGroupUrlInfo.fetchAll(db)
let userGroupsConf: UnsafeMutablePointer<config_object>? = try SessionUtil.loadState(
for: .userGroups,
secretKey: secretKey,
cachedData: nil
)
let userGroupConfResult1: SessionUtil.ConfResult = try SessionUtil.upsert(
legacyGroups: legacyGroupData,
in: userGroupsConf
)
let userGroupConfResult2: SessionUtil.ConfResult = try SessionUtil.upsert(
communities: communityData.map { ($0, nil) },
in: userGroupsConf
)
if userGroupConfResult1.needsDump || userGroupConfResult2.needsDump {
try SessionUtil
.createDump(
conf: userGroupsConf,
for: .userGroups,
publicKey: userPublicKey
)?
.save(db)
}
// MARK: - Pinned thread priorities
struct PinnedTeadInfo: Decodable, FetchableRecord {
let id: String
let creationDateTimestamp: TimeInterval
let maxInteractionTimestampMs: Int64?
var targetTimestamp: Int64 {
(maxInteractionTimestampMs ?? Int64(creationDateTimestamp * 1000))
}
}
// At the time of writing the thread sorting was 'pinned (flag), most recent interaction
// timestamp, thread creation timestamp)
let thread: TypedTableAlias<SessionThread> = TypedTableAlias()
let interaction: TypedTableAlias<Interaction> = TypedTableAlias()
let pinnedThreads: [PinnedTeadInfo] = try SessionThread
.select(.id, .creationDateTimestamp)
.filter(SessionThread.Columns.isPinned == true)
.annotated(with: SessionThread.interactions.max(Interaction.Columns.timestampMs))
.asRequest(of: PinnedTeadInfo.self)
.fetchAll(db)
.sorted { lhs, rhs in lhs.targetTimestamp > rhs.targetTimestamp }
// Update the pinned thread priorities
try SessionUtil
.updateThreadPrioritiesIfNeeded(db, [SessionThread.Columns.pinnedPriority.set(to: 0)], [])
Storage.update(progress: 1, for: self, in: target) // In case this is the last migration
}
// MARK: Fetchable Types
struct ContactInfo: FetchableRecord, Decodable, ColumnExpressible {
typealias Columns = CodingKeys
enum CodingKeys: String, CodingKey, ColumnExpression, CaseIterable {
case contact
case profile
}
let contact: Contact
let profile: Profile?
}
struct GroupInfo: FetchableRecord, Decodable, ColumnExpressible {
typealias Columns = CodingKeys
enum CodingKeys: String, CodingKey, ColumnExpression, CaseIterable {
case closedGroup
case disappearingMessagesConfiguration
case groupMembers
}
let closedGroup: ClosedGroup
let disappearingMessagesConfiguration: DisappearingMessagesConfiguration?
let groupMembers: [GroupMember]
}
}

View File

@ -8,7 +8,7 @@ import SessionUtilitiesKit
public struct ClosedGroup: Codable, Identifiable, FetchableRecord, PersistableRecord, TableRecord, ColumnExpressible {
public static var databaseTableName: String { "closedGroup" }
internal static let threadForeignKey = ForeignKey([Columns.threadId], to: [SessionThread.Columns.id])
private static let thread = belongsTo(SessionThread.self, using: threadForeignKey)
public static let thread = belongsTo(SessionThread.self, using: threadForeignKey)
internal static let keyPairs = hasMany(
ClosedGroupKeyPair.self,
using: ClosedGroupKeyPair.closedGroupForeignKey
@ -22,6 +22,12 @@ public struct ClosedGroup: Codable, Identifiable, FetchableRecord, PersistableRe
case formationTimestamp
}
/// The Group public key takes up 32 bytes
static let pubkeyByteLength: Int = 32
/// The Group secret key takes up 32 bytes
static let secretKeyByteLength: Int = 32
public var id: String { threadId } // Identifiable
public var publicKey: String { threadId }
@ -95,22 +101,32 @@ public extension ClosedGroup {
static func removeKeysAndUnsubscribe(
_ db: Database? = nil,
threadId: String,
removeGroupData: Bool = false
removeGroupData: Bool,
calledFromConfigHandling: Bool
) throws {
try removeKeysAndUnsubscribe(db, threadIds: [threadId], removeGroupData: removeGroupData)
try removeKeysAndUnsubscribe(
db,
threadIds: [threadId],
removeGroupData: removeGroupData,
calledFromConfigHandling: calledFromConfigHandling
)
}
static func removeKeysAndUnsubscribe(
_ db: Database? = nil,
threadIds: [String],
removeGroupData: Bool = false
removeGroupData: Bool,
calledFromConfigHandling: Bool
) throws {
guard !threadIds.isEmpty else { return }
guard let db: Database = db else {
Storage.shared.write { db in
try ClosedGroup.removeKeysAndUnsubscribe(
db,
threadIds: threadIds,
removeGroupData: removeGroupData)
removeGroupData: removeGroupData,
calledFromConfigHandling: calledFromConfigHandling
)
}
return
}
@ -135,6 +151,17 @@ public extension ClosedGroup {
.filter(threadIds.contains(ClosedGroupKeyPair.Columns.threadId))
.deleteAll(db)
struct ThreadIdVariant: Decodable, FetchableRecord {
let id: String
let variant: SessionThread.Variant
}
let threadVariants: [ThreadIdVariant] = try SessionThread
.select(.id, .variant)
.filter(ids: threadIds)
.asRequest(of: ThreadIdVariant.self)
.fetchAll(db)
// Remove the remaining group data if desired
if removeGroupData {
try SessionThread
@ -149,5 +176,23 @@ public extension ClosedGroup {
.filter(threadIds.contains(GroupMember.Columns.groupId))
.deleteAll(db)
}
// If we weren't called from config handling then we need to remove the group
// data from the config
if !calledFromConfigHandling {
try SessionUtil.remove(
db,
legacyGroupIds: threadVariants
.filter { $0.variant == .legacyGroup }
.map { $0.id }
)
try SessionUtil.remove(
db,
groupIds: threadVariants
.filter { $0.variant == .group }
.map { $0.id }
)
}
}
}

View File

@ -8,7 +8,7 @@ public struct OpenGroup: Codable, Identifiable, FetchableRecord, PersistableReco
public static var databaseTableName: String { "openGroup" }
internal static let threadForeignKey = ForeignKey([Columns.threadId], to: [SessionThread.Columns.id])
private static let thread = belongsTo(SessionThread.self, using: threadForeignKey)
private static let members = hasMany(GroupMember.self, using: GroupMember.openGroupForeignKey)
public static let members = hasMany(GroupMember.self, using: GroupMember.openGroupForeignKey)
public typealias Columns = CodingKeys
public enum CodingKeys: String, CodingKey, ColumnExpression {
@ -61,6 +61,9 @@ public struct OpenGroup: Codable, Identifiable, FetchableRecord, PersistableReco
static let all: Permissions = [ .read, .write, .upload ]
}
/// The Community public key takes up 32 bytes
static let pubkeyByteLength: Int = 32
public var id: String { threadId } // Identifiable
/// The id for the thread this open group belongs to
@ -219,10 +222,6 @@ public extension OpenGroup {
// Always force the server to lowercase
return "\(server.lowercased()).\(roomToken)"
}
static func urlFor(server: String, roomToken: String, publicKey: String) -> String {
return "\(server)/\(roomToken)?public_key=\(publicKey)"
}
}
extension OpenGroup: CustomStringConvertible, CustomDebugStringConvertible {

View File

@ -11,7 +11,7 @@ public struct SessionThread: Codable, Identifiable, Equatable, FetchableRecord,
public static let contact = hasOne(Contact.self, using: Contact.threadForeignKey)
public static let closedGroup = hasOne(ClosedGroup.self, using: ClosedGroup.threadForeignKey)
public static let openGroup = hasOne(OpenGroup.self, using: OpenGroup.threadForeignKey)
private static let disappearingMessagesConfiguration = hasOne(
public static let disappearingMessagesConfiguration = hasOne(
DisappearingMessagesConfiguration.self,
using: DisappearingMessagesConfiguration.threadForeignKey
)
@ -33,6 +33,7 @@ public struct SessionThread: Codable, Identifiable, Equatable, FetchableRecord,
case mutedUntilTimestamp
case onlyNotifyForMentions
case markedAsUnread
case pinnedPriority
}
public enum Variant: Int, Codable, Hashable, DatabaseValueConvertible {
@ -60,7 +61,8 @@ public struct SessionThread: Codable, Identifiable, Equatable, FetchableRecord,
public let shouldBeVisible: Bool
/// A flag indicating whether the thread is pinned
public let isPinned: Bool
@available(*, unavailable, message: "use 'pinnedPriority' instead")
public let isPinned: Bool = false
/// The value the user started entering into the input field before they left the conversation screen
public let messageDraft: String?
@ -79,6 +81,9 @@ public struct SessionThread: Codable, Identifiable, Equatable, FetchableRecord,
/// A flag indicating whether this thread has been manually marked as unread by the user
public let markedAsUnread: Bool?
/// A value indicating the priority of this conversation within the pinned conversations
public let pinnedPriority: Int32?
// MARK: - Relationships
public var contact: QueryInterfaceRequest<Contact> {
@ -117,18 +122,21 @@ public struct SessionThread: Codable, Identifiable, Equatable, FetchableRecord,
notificationSound: Preferences.Sound? = nil,
mutedUntilTimestamp: TimeInterval? = nil,
onlyNotifyForMentions: Bool = false,
markedAsUnread: Bool? = false
markedAsUnread: Bool? = false,
pinnedPriority: Int32? = nil
) {
self.id = id
self.variant = variant
self.creationDateTimestamp = creationDateTimestamp
self.shouldBeVisible = shouldBeVisible
self.isPinned = isPinned
self.messageDraft = messageDraft
self.notificationSound = notificationSound
self.mutedUntilTimestamp = mutedUntilTimestamp
self.onlyNotifyForMentions = onlyNotifyForMentions
self.markedAsUnread = markedAsUnread
self.pinnedPriority = ((pinnedPriority ?? 0) > 0 ? pinnedPriority :
(isPinned ? 1 : 0)
)
}
// MARK: - Custom Database Interaction
@ -143,19 +151,19 @@ public struct SessionThread: Codable, Identifiable, Equatable, FetchableRecord,
public extension SessionThread {
func with(
shouldBeVisible: Bool? = nil,
isPinned: Bool? = nil
pinnedPriority: Int32? = nil
) -> SessionThread {
return SessionThread(
id: id,
variant: variant,
creationDateTimestamp: creationDateTimestamp,
shouldBeVisible: (shouldBeVisible ?? self.shouldBeVisible),
isPinned: (isPinned ?? self.isPinned),
messageDraft: messageDraft,
notificationSound: notificationSound,
mutedUntilTimestamp: mutedUntilTimestamp,
onlyNotifyForMentions: onlyNotifyForMentions,
markedAsUnread: markedAsUnread
markedAsUnread: markedAsUnread,
pinnedPriority: (pinnedPriority ?? self.pinnedPriority)
)
}
}
@ -190,6 +198,43 @@ public extension SessionThread {
.defaulting(to: false) == false
)
}
static func refreshPinnedPriorities(_ db: Database, adding threadId: String) throws {
struct PinnedPriority: TableRecord, ColumnExpressible {
public typealias Columns = CodingKeys
public enum CodingKeys: String, CodingKey, ColumnExpression {
case id
case rowIndex
}
}
let thread: TypedTableAlias<SessionThread> = TypedTableAlias()
let pinnedPriority: TypedTableAlias<PinnedPriority> = TypedTableAlias()
let rowIndexLiteral: SQL = SQL(stringLiteral: PinnedPriority.Columns.rowIndex.name)
let pinnedPriorityLiteral: SQL = SQL(stringLiteral: SessionThread.Columns.pinnedPriority.name)
try db.execute(literal: """
WITH \(PinnedPriority.self) AS (
SELECT
\(thread[.id]),
ROW_NUMBER() OVER (
ORDER BY \(SQL("\(thread[.id]) != \(threadId)")),
\(thread[.pinnedPriority]) ASC
) AS \(rowIndexLiteral)
FROM \(SessionThread.self)
WHERE
\(thread[.pinnedPriority]) > 0 OR
\(SQL("\(thread[.id]) = \(threadId)"))
)
UPDATE \(SessionThread.self)
SET \(pinnedPriorityLiteral) = (
SELECT \(pinnedPriority[.rowIndex])
FROM \(PinnedPriority.self)
WHERE \(pinnedPriority[.id]) = \(thread[.id])
)
""")
}
}
// MARK: - Convenience

View File

@ -13,7 +13,6 @@ public struct ConfigDump: Codable, Equatable, Hashable, FetchableRecord, Persist
case variant
case publicKey
case data
case combinedMessageHashes
}
public enum Variant: String, Codable, DatabaseValueConvertible {
@ -34,37 +33,19 @@ public struct ConfigDump: Codable, Equatable, Hashable, FetchableRecord, Persist
/// The data for this dump
public let data: Data
/// A comma delimited array of message hashes for previously stored messages on the server
private let combinedMessageHashes: String?
/// An array of message hashes for previously stored messages on the server
var messageHashes: [String]? { ConfigDump.messageHashes(from: combinedMessageHashes) }
internal init(
variant: Variant,
publicKey: String,
data: Data,
messageHashes: [String]?
data: Data
) {
self.variant = variant
self.publicKey = publicKey
self.data = data
self.combinedMessageHashes = ConfigDump.combinedMessageHashes(from: messageHashes)
}
}
// MARK: - Convenience
public extension ConfigDump {
static func combinedMessageHashes(from messageHashes: [String]?) -> String? {
return messageHashes?.joined(separator: ",")
}
static func messageHashes(from combinedMessageHashes: String?) -> [String]? {
return combinedMessageHashes?.components(separatedBy: ",")
}
}
public extension ConfigDump.Variant {
static let userVariants: [ConfigDump.Variant] = [
.userProfile, .contacts, .convoInfoVolatile, .userGroups
@ -89,13 +70,15 @@ public extension ConfigDump.Variant {
}
/// This value defines the order that the SharedConfigMessages should be processed in, while we re-process config
/// messages every time we poll this will prevent an edge-case where we have `convoInfoVolatile` data related
/// to a new conversation which hasn't been created yet because it's associated `contacts`/`userGroups` message
/// hasn't yet been processed (without this we would have to wait until the next poll for it to be processed correctly)
/// messages every time we poll this will prevent an edge-case where data/logic between different config messages
/// could be dependant on each other (eg. there could be `convoInfoVolatile` data related to a new conversation
/// which hasn't been created yet because it's associated `contacts`/`userGroups` message hasn't yet been
/// processed (without this we would have to wait until the next poll for it to be processed correctly)
var processingOrder: Int {
switch self {
case .userProfile, .contacts, .userGroups: return 0
case .convoInfoVolatile: return 1
case .userProfile, .contacts: return 0
case .userGroups: return 1
case .convoInfoVolatile: return 2
}
}
}

View File

@ -38,19 +38,9 @@ public enum ConfigurationSyncJob: JobExecutor {
// as the user doesn't exist yet (this will get triggered on the first launch of a
// fresh install due to the migrations getting run)
guard
let pendingSwarmConfigChanges: [SingleDestinationChanges] = Storage.shared
.read({ db in try SessionUtil.pendingChanges(db) })?
.grouped(by: { $0.destination })
.map({ (destination: Message.Destination, value: [SessionUtil.OutgoingConfResult]) -> SingleDestinationChanges in
SingleDestinationChanges(
destination: destination,
messages: value,
allOldHashes: value
.map { ($0.oldMessageHashes ?? []) }
.reduce([], +)
.asSet()
)
})
let publicKey: String = job.threadId,
let pendingConfigChanges: [SessionUtil.OutgoingConfResult] = Storage.shared
.read({ db in try SessionUtil.pendingChanges(db, publicKey: publicKey) })
else {
failure(job, StorageError.generic, false)
return
@ -58,137 +48,69 @@ public enum ConfigurationSyncJob: JobExecutor {
// If there are no pending changes then the job can just complete (next time something
// is updated we want to try and run immediately so don't scuedule another run in this case)
guard !pendingSwarmConfigChanges.isEmpty else {
guard !pendingConfigChanges.isEmpty else {
success(job, true)
return
}
// Identify the destination and merge all obsolete hashes into a single set
let destination: Message.Destination = (publicKey == getUserHexEncodedPublicKey() ?
Message.Destination.contact(publicKey: publicKey) :
Message.Destination.closedGroup(groupPublicKey: publicKey)
)
let allObsoleteHashes: Set<String> = pendingConfigChanges
.map { $0.obsoleteHashes }
.reduce([], +)
.asSet()
Storage.shared
.readPublisher(receiveOn: queue) { db in
try pendingSwarmConfigChanges
.map { (change: SingleDestinationChanges) -> (messages: [TargetedMessage], allOldHashes: Set<String>) in
(
messages: try change.messages
.map { (outgoingConf: SessionUtil.OutgoingConfResult) -> TargetedMessage in
TargetedMessage(
sendData: try MessageSender.preparedSendData(
db,
message: outgoingConf.message,
to: change.destination,
interactionId: nil
),
namespace: outgoingConf.namespace,
oldHashes: (outgoingConf.oldMessageHashes ?? [])
)
},
allOldHashes: change.allOldHashes
)
}
}
.flatMap { (pendingSwarmChange: [(messages: [TargetedMessage], allOldHashes: Set<String>)]) -> AnyPublisher<[HTTP.BatchResponse], Error> in
Publishers
.MergeMany(
pendingSwarmChange
.map { (messages: [TargetedMessage], oldHashes: Set<String>) in
// Note: We do custom sending logic here because we want to batch the
// sending and deletion of messages within the same swarm
SnodeAPI
.sendConfigMessages(
messages
.compactMap { targetedMessage -> SnodeAPI.TargetedMessage? in
targetedMessage.sendData.snodeMessage
.map { ($0, targetedMessage.namespace) }
},
oldHashes: Array(oldHashes)
)
}
try pendingConfigChanges.map { change -> MessageSender.PreparedSendData in
try MessageSender.preparedSendData(
db,
message: change.message,
to: destination,
namespace: change.namespace,
interactionId: nil
)
}
}
.flatMap { (changes: [MessageSender.PreparedSendData]) -> AnyPublisher<HTTP.BatchResponse, Error> in
SnodeAPI
.sendConfigMessages(
changes.compactMap { change in
guard
let namespace: SnodeAPI.Namespace = change.namespace,
let snodeMessage: SnodeMessage = change.snodeMessage
else { return nil }
return (snodeMessage, namespace)
},
allObsoleteHashes: Array(allObsoleteHashes)
)
.collect()
.eraseToAnyPublisher()
}
.receive(on: queue)
.tryMap { (responses: [HTTP.BatchResponse]) -> [SuccessfulChange] in
// We make a sequence call for this so it's possible to get fewer responses than
// expected so if that happens fail and re-run later
guard responses.count == pendingSwarmConfigChanges.count else {
throw HTTPError.invalidResponse
}
// Process the response data into an easy to understand for (this isn't strictly
// needed but the code gets convoluted without this)
return zip(responses, pendingSwarmConfigChanges)
.compactMap { (batchResponse: HTTP.BatchResponse, pendingSwarmChange: SingleDestinationChanges) -> [SuccessfulChange]? in
let maybePublicKey: String? = {
switch pendingSwarmChange.destination {
case .contact(let publicKey), .closedGroup(let publicKey):
return publicKey
default: return nil
}
}()
.map { (response: HTTP.BatchResponse) -> [ConfigDump] in
/// The number of responses returned might not match the number of changes sent but they will be returned
/// in the same order, this means we can just `zip` the two arrays as it will take the smaller of the two and
/// correctly align the response to the change
zip(response.responses, pendingConfigChanges)
.compactMap { (subResponse: Codable, change: SessionUtil.OutgoingConfResult) in
/// If the request wasn't successful then just ignore it (the next time we sync this config we will try
/// to send the changes again)
guard
let typedResponse: HTTP.BatchSubResponse<SendMessagesResponse> = (subResponse as? HTTP.BatchSubResponse<SendMessagesResponse>),
200...299 ~= typedResponse.code,
!typedResponse.failedToParseBody,
let sendMessageResponse: SendMessagesResponse = typedResponse.body
else { return nil }
// If we don't have a publicKey then this is an invalid config
guard let publicKey: String = maybePublicKey else { return nil }
// Need to know if we successfully deleted old messages (if we didn't then
// we want to keep the old hashes so we can delete them the next time)
let didDeleteOldConfigMessages: Bool = {
guard
let subResponse: HTTP.BatchSubResponse<DeleteMessagesResponse> = (batchResponse.responses.last as? HTTP.BatchSubResponse<DeleteMessagesResponse>),
200...299 ~= subResponse.code
else { return false }
return true
}()
return zip(batchResponse.responses, pendingSwarmChange.messages)
.reduce(into: []) { (result: inout [SuccessfulChange], next: ResponseChange) in
// If the request wasn't successful then just ignore it (the next
// config sync will try make the changes again
guard
let subResponse: HTTP.BatchSubResponse<SendMessagesResponse> = (next.response as? HTTP.BatchSubResponse<SendMessagesResponse>),
200...299 ~= subResponse.code,
!subResponse.failedToParseBody,
let sendMessageResponse: SendMessagesResponse = subResponse.body
else { return }
result.append(
SuccessfulChange(
message: next.change.message,
publicKey: publicKey,
updatedHashes: (didDeleteOldConfigMessages ?
[sendMessageResponse.hash] :
(next.change.oldMessageHashes ?? [])
.appending(sendMessageResponse.hash)
)
)
)
}
}
.flatMap { $0 }
}
.map { (successfulChanges: [SuccessfulChange]) -> [ConfigDump] in
// Now that we have the successful changes, we need to mark them as pushed and
// generate any config dumps which need to be stored
successfulChanges
.compactMap { successfulChange -> ConfigDump? in
// Updating the pushed state returns a flag indicating whether the config
// needs to be dumped
guard SessionUtil.markAsPushed(message: successfulChange.message, publicKey: successfulChange.publicKey) else {
return nil
}
let variant: ConfigDump.Variant = successfulChange.message.kind.configDumpVariant
let atomicConf: Atomic<UnsafeMutablePointer<config_object>?> = SessionUtil.config(
for: variant,
publicKey: successfulChange.publicKey
)
return try? SessionUtil.createDump(
conf: atomicConf.wrappedValue,
for: variant,
publicKey: successfulChange.publicKey,
messageHashes: successfulChange.updatedHashes
/// Since this change was successful we need to mark it as pushed and generate any config dumps
/// which need to be stored
return SessionUtil.markingAsPushed(
message: change.message,
serverHash: sendMessageResponse.hash,
publicKey: publicKey
)
}
}
@ -219,6 +141,7 @@ public enum ConfigurationSyncJob: JobExecutor {
let existingJob: Job = try? Job
.filter(Job.Columns.id != job.id)
.filter(Job.Columns.variant == Job.Variant.configurationSync)
.filter(Job.Columns.threadId == publicKey)
.fetchOne(db)
{
// If the next job isn't currently running then delay it's start time
@ -245,39 +168,11 @@ public enum ConfigurationSyncJob: JobExecutor {
}
}
// MARK: - Convenience Types
public extension ConfigurationSyncJob {
fileprivate struct SingleDestinationChanges {
let destination: Message.Destination
let messages: [SessionUtil.OutgoingConfResult]
let allOldHashes: Set<String>
}
fileprivate struct TargetedMessage {
let sendData: MessageSender.PreparedSendData
let namespace: SnodeAPI.Namespace
let oldHashes: [String]
}
typealias ResponseChange = (response: Codable, change: SessionUtil.OutgoingConfResult)
fileprivate struct SuccessfulChange {
let message: SharedConfigMessage
let publicKey: String
let updatedHashes: [String]
}
}
// MARK: - Convenience
public extension ConfigurationSyncJob {
static func enqueue(_ db: Database? = nil) {
guard let db: Database = db else {
Storage.shared.writeAsync { ConfigurationSyncJob.enqueue($0) }
return
}
static func enqueue(_ db: Database, publicKey: String) {
// FIXME: Remove this once `useSharedUtilForUserConfig` is permanent
guard Features.useSharedUtilForUserConfig else {
// If we don't have a userKeyPair (or name) yet then there is no need to sync the
@ -310,15 +205,16 @@ public extension ConfigurationSyncJob {
// to add another one)
JobRunner.upsert(
db,
job: ConfigurationSyncJob.createOrUpdateIfNeeded(db)
job: ConfigurationSyncJob.createOrUpdateIfNeeded(db, publicKey: publicKey)
)
}
@discardableResult static func createOrUpdateIfNeeded(_ db: Database) -> Job {
@discardableResult static func createOrUpdateIfNeeded(_ db: Database, publicKey: String) -> Job {
// Try to get an existing job (if there is one that's not running)
if
let existingJobs: [Job] = try? Job
.filter(Job.Columns.variant == Job.Variant.configurationSync)
.filter(Job.Columns.threadId == publicKey)
.fetchAll(db),
let existingJob: Job = existingJobs.first(where: { !JobRunner.isCurrentlyRunning($0) })
{
@ -328,7 +224,8 @@ public extension ConfigurationSyncJob {
// Otherwise create a new job
return Job(
variant: .configurationSync,
behaviour: .recurring
behaviour: .recurring,
threadId: publicKey
)
}
@ -348,6 +245,7 @@ public extension ConfigurationSyncJob {
db,
message: try ConfigurationMessage.getCurrent(db),
to: Message.Destination.contact(publicKey: publicKey),
namespace: .default,
interactionId: nil
)
}

View File

@ -116,6 +116,8 @@ public enum GarbageCollectionJob: JobExecutor {
LEFT JOIN \(SessionThread.self) ON \(thread[.id]) = \(job[.threadId])
LEFT JOIN \(Interaction.self) ON \(interaction[.id]) = \(job[.interactionId])
WHERE (
-- Never delete config sync jobs, even if their threads were deleted
\(SQL("\(job[.variant]) != \(Job.Variant.configurationSync)")) AND
(
\(job[.threadId]) IS NOT NULL AND
\(thread[.id]) IS NULL

View File

@ -170,6 +170,7 @@ public enum MessageSendJob: JobExecutor {
db,
message: details.message,
to: details.destination,
namespace: details.destination.defaultNamespace,
interactionId: job.interactionId
)
}

View File

@ -43,6 +43,7 @@ public enum SendReadReceiptsJob: JobExecutor {
timestamps: details.timestampMsValues.map { UInt64($0) }
),
to: details.destination,
namespace: details.destination.defaultNamespace,
interactionId: nil,
isSyncMessage: false
)

View File

@ -10,9 +10,9 @@ internal extension SessionUtil {
static func handleContactsUpdate(
_ db: Database,
in atomicConf: Atomic<UnsafeMutablePointer<config_object>?>,
mergeResult: ConfResult
) throws -> ConfResult {
in conf: UnsafeMutablePointer<config_object>?,
mergeNeedsDump: Bool
) throws {
typealias ContactData = [
String: (
contact: Contact,
@ -21,60 +21,53 @@ internal extension SessionUtil {
)
]
guard mergeResult.needsDump else { return mergeResult }
guard atomicConf.wrappedValue != nil else { throw SessionUtilError.nilConfigObject }
guard mergeNeedsDump else { return }
guard conf != nil else { throw SessionUtilError.nilConfigObject }
// Since we are doing direct memory manipulation we are using an `Atomic` type which has
// blocking access in it's `mutate` closure
let contactData: ContactData = atomicConf.mutate { conf -> ContactData in
var contactData: ContactData = [:]
var contact: contacts_contact = contacts_contact()
let contactIterator: UnsafeMutablePointer<contacts_iterator> = contacts_iterator_new(conf)
while !contacts_iterator_done(contactIterator, &contact) {
let contactId: String = String(cString: withUnsafeBytes(of: contact.session_id) { [UInt8]($0) }
.map { CChar($0) }
.nullTerminated()
)
let contactResult: Contact = Contact(
id: contactId,
isApproved: contact.approved,
isBlocked: contact.blocked,
didApproveMe: contact.approved_me
)
let profilePictureUrl: String? = String(libSessionVal: contact.profile_pic.url, nullIfEmpty: true)
let profileResult: Profile = Profile(
id: contactId,
name: (String(libSessionVal: contact.name) ?? ""),
nickname: String(libSessionVal: contact.nickname, nullIfEmpty: true),
profilePictureUrl: profilePictureUrl,
profileEncryptionKey: (profilePictureUrl == nil ? nil :
Data(
libSessionVal: contact.profile_pic.key,
count: ProfileManager.avatarAES256KeyByteLength
)
var contactData: ContactData = [:]
var contact: contacts_contact = contacts_contact()
let contactIterator: UnsafeMutablePointer<contacts_iterator> = contacts_iterator_new(conf)
while !contacts_iterator_done(contactIterator, &contact) {
let contactId: String = String(cString: withUnsafeBytes(of: contact.session_id) { [UInt8]($0) }
.map { CChar($0) }
.nullTerminated()
)
let contactResult: Contact = Contact(
id: contactId,
isApproved: contact.approved,
isBlocked: contact.blocked,
didApproveMe: contact.approved_me
)
let profilePictureUrl: String? = String(libSessionVal: contact.profile_pic.url, nullIfEmpty: true)
let profileResult: Profile = Profile(
id: contactId,
name: String(libSessionVal: contact.name),
nickname: String(libSessionVal: contact.nickname, nullIfEmpty: true),
profilePictureUrl: profilePictureUrl,
profileEncryptionKey: (profilePictureUrl == nil ? nil :
Data(
libSessionVal: contact.profile_pic.key,
count: ProfileManager.avatarAES256KeyByteLength
)
)
contactData[contactId] = (
contactResult,
profileResult,
false
)
contacts_iterator_advance(contactIterator)
}
contacts_iterator_free(contactIterator) // Need to free the iterator
)
return contactData
contactData[contactId] = (
contactResult,
profileResult,
)
contacts_iterator_advance(contactIterator)
}
contacts_iterator_free(contactIterator) // Need to free the iterator
// The current users contact data is handled separately so exclude it if it's present (as that's
// actually a bug)
let userPublicKey: String = getUserHexEncodedPublicKey()
let userPublicKey: String = getUserHexEncodedPublicKey(db)
let targetContactData: ContactData = contactData.filter { $0.key != userPublicKey }
// If we only updated the current user contact then no need to continue
guard !targetContactData.isEmpty else { return mergeResult }
guard !targetContactData.isEmpty else { return }
// Since we don't sync 100% of the data stored against the contact and profile objects we
// need to only update the data we do have to ensure we don't overwrite anything that doesn't
@ -141,79 +134,100 @@ internal extension SessionUtil {
].compactMap { $0 }
)
}
/// If the contact's `hidden` flag doesn't match the visibility of their conversation then create/delete the
/// associated contact conversation accordingly
let threadExists: Bool = try SessionThread.exists(db, id: contact.id)
if data.isHiddenConversation && threadExists {
try SessionThread
.deleteOne(db, id: contact.id)
}
else if !data.isHiddenConversation && !threadExists {
try SessionThread(id: contact.id, variant: .contact)
.save(db)
}
}
return mergeResult
}
// MARK: - Outgoing Changes
typealias ContactData = (
id: String,
contact: Contact?,
profile: Profile?,
priority: Int32?,
hidden: Bool?
)
static func upsert(
contactData: [(id: String, contact: Contact?, profile: Profile?)],
in atomicConf: Atomic<UnsafeMutablePointer<config_object>?>
contactData: [ContactData],
in conf: UnsafeMutablePointer<config_object>?
) throws -> ConfResult {
guard atomicConf.wrappedValue != nil else { throw SessionUtilError.nilConfigObject }
guard conf != nil else { throw SessionUtilError.nilConfigObject }
// The current users contact data doesn't need to sync so exclude it
let userPublicKey: String = getUserHexEncodedPublicKey()
let targetContacts: [(id: String, contact: Contact?, profile: Profile?)] = contactData
.filter { $0.id != userPublicKey }
let targetContacts: [(id: String, contact: Contact?, profile: Profile?, priority: Int32?, hidden: Bool?)] = contactData
// If we only updated the current user contact then no need to continue
guard !targetContacts.isEmpty else { return ConfResult(needsPush: false, needsDump: false) }
// Since we are doing direct memory manipulation we are using an `Atomic` type which has
// blocking access in it's `mutate` closure
return atomicConf.mutate { conf in
// Update the name
targetContacts
.forEach { (id, maybeContact, maybeProfile) in
var sessionId: [CChar] = id.cArray
var contact: contacts_contact = contacts_contact()
guard contacts_get_or_construct(conf, &contact, &sessionId) else {
SNLog("Unable to upsert contact from Config Message")
return
}
// Assign all properties to match the updated contact (if there is one)
if let updatedContact: Contact = maybeContact {
contact.approved = updatedContact.isApproved
contact.approved_me = updatedContact.didApproveMe
contact.blocked = updatedContact.isBlocked
// Store the updated contact (needs to happen before variables go out of scope)
contacts_set(conf, &contact)
}
// Update the profile data (if there is one - users we have sent a message request to may
// not have profile info in certain situations)
if let updatedProfile: Profile = maybeProfile {
let oldAvatarUrl: String? = String(libSessionVal: contact.profile_pic.url)
let oldAvatarKey: Data? = Data(
libSessionVal: contact.profile_pic.key,
count: ProfileManager.avatarAES256KeyByteLength
)
contact.name = updatedProfile.name.toLibSession()
contact.nickname = updatedProfile.nickname.toLibSession()
contact.profile_pic.url = updatedProfile.profilePictureUrl.toLibSession()
contact.profile_pic.key = updatedProfile.profileEncryptionKey.toLibSession()
// Download the profile picture if needed
if oldAvatarUrl != updatedProfile.profilePictureUrl || oldAvatarKey != updatedProfile.profileEncryptionKey {
ProfileManager.downloadAvatar(for: updatedProfile)
}
// Store the updated contact (needs to happen before variables go out of scope)
contacts_set(conf, &contact)
}
// Update the name
targetContacts
.forEach { (id, maybeContact, maybeProfile, priority, hidden) in
var sessionId: [CChar] = id.cArray
var contact: contacts_contact = contacts_contact()
guard contacts_get_or_construct(conf, &contact, &sessionId) else {
SNLog("Unable to upsert contact from Config Message")
return
}
return ConfResult(
needsPush: config_needs_push(conf),
needsDump: config_needs_dump(conf)
)
}
// Assign all properties to match the updated contact (if there is one)
if let updatedContact: Contact = maybeContact {
contact.approved = updatedContact.isApproved
contact.approved_me = updatedContact.didApproveMe
contact.blocked = updatedContact.isBlocked
// Store the updated contact (needs to happen before variables go out of scope)
contacts_set(conf, &contact)
}
// Update the profile data (if there is one - users we have sent a message request to may
// not have profile info in certain situations)
if let updatedProfile: Profile = maybeProfile {
let oldAvatarUrl: String? = String(libSessionVal: contact.profile_pic.url)
let oldAvatarKey: Data? = Data(
libSessionVal: contact.profile_pic.key,
count: ProfileManager.avatarAES256KeyByteLength
)
contact.name = updatedProfile.name.toLibSession()
contact.nickname = updatedProfile.nickname.toLibSession()
contact.profile_pic.url = updatedProfile.profilePictureUrl.toLibSession()
contact.profile_pic.key = updatedProfile.profileEncryptionKey.toLibSession()
// Download the profile picture if needed
if oldAvatarUrl != updatedProfile.profilePictureUrl || oldAvatarKey != updatedProfile.profileEncryptionKey {
ProfileManager.downloadAvatar(for: updatedProfile)
}
// Store the updated contact (needs to happen before variables go out of scope)
contacts_set(conf, &contact)
}
// Store the updated contact (can't be sure if we made any changes above)
contact.hidden = (hidden ?? contact.hidden)
contact.priority = (priority ?? contact.priority)
contacts_set(conf, &contact)
}
return ConfResult(
needsPush: config_needs_push(conf),
needsDump: config_needs_dump(conf)
)
}
}
@ -230,38 +244,34 @@ internal extension SessionUtil {
// If we only updated the current user contact then no need to continue
guard !targetContacts.isEmpty else { return updated }
db.afterNextTransactionNested { db in
do {
let atomicConf: Atomic<UnsafeMutablePointer<config_object>?> = SessionUtil.config(
for: .contacts,
publicKey: userPublicKey
)
do {
let atomicConf: Atomic<UnsafeMutablePointer<config_object>?> = SessionUtil.config(
for: .contacts,
publicKey: userPublicKey
)
// Since we are doing direct memory manipulation we are using an `Atomic` type which has
// blocking access in it's `mutate` closure
try atomicConf.mutate { conf in
let result: ConfResult = try SessionUtil
.upsert(
contactData: targetContacts.map { (id: $0.id, contact: $0, profile: nil) },
in: atomicConf
contactData: targetContacts.map { ($0.id, $0, nil, nil, nil) },
in: conf
)
// If we don't need to dump the data the we can finish early
guard result.needsDump else { return }
try SessionUtil.saveState(
db,
keepingExistingMessageHashes: true,
configDump: try atomicConf.mutate { conf in
try SessionUtil.createDump(
conf: conf,
for: .contacts,
publicKey: userPublicKey,
messageHashes: nil
)
}
)
}
catch {
SNLog("[libSession-util] Failed to dump updated data")
try SessionUtil.createDump(
conf: conf,
for: .contacts,
publicKey: userPublicKey
)?.save(db)
}
}
catch {
SNLog("[libSession-util] Failed to dump updated data")
}
return updated
}
@ -285,68 +295,64 @@ internal extension SessionUtil {
// Get the user public key (updating their profile is handled separately
let userPublicKey: String = getUserHexEncodedPublicKey(db)
let targetProfiles: [Profile] = updatedProfiles
.filter {
$0.id != userPublicKey &&
existingContactIds.contains($0.id)
}
db.afterNextTransactionNested { db in
do {
// Update the user profile first (if needed)
if let updatedUserProfile: Profile = updatedProfiles.first(where: { $0.id == userPublicKey }) {
let atomicConf: Atomic<UnsafeMutablePointer<config_object>?> = SessionUtil.config(
do {
// Update the user profile first (if needed)
if let updatedUserProfile: Profile = updatedProfiles.first(where: { $0.id == userPublicKey }) {
try SessionUtil
.config(
for: .userProfile,
publicKey: userPublicKey
)
let result: ConfResult = try SessionUtil.update(
profile: updatedUserProfile,
in: atomicConf
)
if result.needsDump {
try SessionUtil.saveState(
db,
keepingExistingMessageHashes: true,
configDump: try atomicConf.mutate { conf in
try SessionUtil.createDump(
conf: conf,
for: .userProfile,
publicKey: userPublicKey,
messageHashes: nil
)
}
.mutate { conf in
let result: ConfResult = try SessionUtil.update(
profile: updatedUserProfile,
in: conf
)
// If we don't need to dump the data the we can finish early
guard result.needsDump else { return }
try SessionUtil.createDump(
conf: conf,
for: .userProfile,
publicKey: userPublicKey
)?.save(db)
}
}
// Then update other contacts
let atomicConf: Atomic<UnsafeMutablePointer<config_object>?> = SessionUtil.config(
}
// Since we are doing direct memory manipulation we are using an `Atomic` type which has
// blocking access in it's `mutate` closure
try SessionUtil
.config(
for: .contacts,
publicKey: userPublicKey
)
let result: ConfResult = try SessionUtil
.upsert(
contactData: updatedProfiles
.filter { $0.id != userPublicKey }
.map { (id: $0.id, contact: nil, profile: $0) },
in: atomicConf
)
// If we don't need to dump the data the we can finish early
guard result.needsDump else { return }
try SessionUtil.saveState(
db,
keepingExistingMessageHashes: true,
configDump: try atomicConf.mutate { conf in
try SessionUtil.createDump(
conf: conf,
for: .contacts,
publicKey: userPublicKey,
messageHashes: nil
.mutate { conf in
let result: ConfResult = try SessionUtil
.upsert(
contactData: targetProfiles
.map { ($0.id, nil, $0, nil, nil) },
in: conf
)
}
)
}
catch {
SNLog("[libSession-util] Failed to dump updated data")
}
// If we don't need to dump the data the we can finish early
guard result.needsDump else { return }
try SessionUtil.createDump(
conf: conf,
for: .contacts,
publicKey: userPublicKey
)?.save(db)
}
}
catch {
SNLog("[libSession-util] Failed to dump updated data")
}
return updated

View File

@ -6,103 +6,88 @@ import SessionUtil
import SessionUtilitiesKit
internal extension SessionUtil {
static let columnsRelatedToConvoInfoVolatile: [ColumnExpression] = [
// Note: We intentionally exclude 'Interaction.Columns.wasRead' from here as we want to
// manually manage triggering config updates from marking as read
SessionThread.Columns.markedAsUnread
]
// MARK: - Incoming Changes
static func handleConvoInfoVolatileUpdate(
_ db: Database,
in atomicConf: Atomic<UnsafeMutablePointer<config_object>?>,
mergeResult: ConfResult
) throws -> ConfResult {
guard mergeResult.needsDump else { return mergeResult }
guard atomicConf.wrappedValue != nil else { throw SessionUtilError.nilConfigObject }
in conf: UnsafeMutablePointer<config_object>?,
mergeNeedsDump: Bool
) throws {
guard mergeNeedsDump else { return }
guard conf != nil else { throw SessionUtilError.nilConfigObject }
// Since we are doing direct memory manipulation we are using an `Atomic` type which has
// blocking access in it's `mutate` closure
let volatileThreadInfo: [VolatileThreadInfo] = atomicConf.mutate { conf -> [VolatileThreadInfo] in
var volatileThreadInfo: [VolatileThreadInfo] = []
var oneToOne: convo_info_volatile_1to1 = convo_info_volatile_1to1()
var community: convo_info_volatile_community = convo_info_volatile_community()
var legacyGroup: convo_info_volatile_legacy_group = convo_info_volatile_legacy_group()
let convoIterator: OpaquePointer = convo_info_volatile_iterator_new(conf)
var volatileThreadInfo: [VolatileThreadInfo] = []
var oneToOne: convo_info_volatile_1to1 = convo_info_volatile_1to1()
var community: convo_info_volatile_community = convo_info_volatile_community()
var legacyGroup: convo_info_volatile_legacy_group = convo_info_volatile_legacy_group()
let convoIterator: OpaquePointer = convo_info_volatile_iterator_new(conf)
while !convo_info_volatile_iterator_done(convoIterator) {
if convo_info_volatile_it_is_1to1(convoIterator, &oneToOne) {
let sessionId: String = String(cString: withUnsafeBytes(of: oneToOne.session_id) { [UInt8]($0) }
.map { CChar($0) }
.nullTerminated()
while !convo_info_volatile_iterator_done(convoIterator) {
if convo_info_volatile_it_is_1to1(convoIterator, &oneToOne) {
volatileThreadInfo.append(
VolatileThreadInfo(
threadId: String(libSessionVal: oneToOne.session_id),
variant: .contact,
changes: [
.markedAsUnread(oneToOne.unread),
.lastReadTimestampMs(oneToOne.last_read)
]
)
volatileThreadInfo.append(
VolatileThreadInfo(
threadId: sessionId,
variant: .contact,
changes: [
.markedAsUnread(oneToOne.unread),
.lastReadTimestampMs(oneToOne.last_read)
]
)
)
}
else if convo_info_volatile_it_is_community(convoIterator, &community) {
let server: String = String(cString: withUnsafeBytes(of: community.base_url) { [UInt8]($0) }
.map { CChar($0) }
.nullTerminated()
)
let roomToken: String = String(cString: withUnsafeBytes(of: community.room) { [UInt8]($0) }
.map { CChar($0) }
.nullTerminated()
)
let publicKey: String = withUnsafePointer(to: community.pubkey, { pubkeyBytes in
Data(bytes: pubkeyBytes, count: 32).toHexString()
})
volatileThreadInfo.append(
VolatileThreadInfo(
threadId: OpenGroup.idFor(roomToken: roomToken, server: server),
variant: .community,
openGroupUrlInfo: VolatileThreadInfo.OpenGroupUrlInfo(
threadId: OpenGroup.idFor(roomToken: roomToken, server: server),
server: server,
roomToken: roomToken,
publicKey: publicKey
),
changes: [
.markedAsUnread(community.unread),
.lastReadTimestampMs(community.last_read)
]
)
)
}
else if convo_info_volatile_it_is_legacy_group(convoIterator, &legacyGroup) {
let groupId: String = String(cString: withUnsafeBytes(of: legacyGroup.group_id) { [UInt8]($0) }
.map { CChar($0) }
.nullTerminated()
)
volatileThreadInfo.append(
VolatileThreadInfo(
threadId: groupId,
variant: .legacyGroup,
changes: [
.markedAsUnread(legacyGroup.unread),
.lastReadTimestampMs(legacyGroup.last_read)
]
)
)
}
else {
SNLog("Ignoring unknown conversation type when iterating through volatile conversation info update")
}
convo_info_volatile_iterator_advance(convoIterator)
)
}
else if convo_info_volatile_it_is_community(convoIterator, &community) {
let server: String = String(libSessionVal: community.base_url)
let roomToken: String = String(libSessionVal: community.room)
let publicKey: String = Data(
libSessionVal: community.pubkey,
count: OpenGroup.pubkeyByteLength
).toHexString()
volatileThreadInfo.append(
VolatileThreadInfo(
threadId: OpenGroup.idFor(roomToken: roomToken, server: server),
variant: .community,
openGroupUrlInfo: OpenGroupUrlInfo(
threadId: OpenGroup.idFor(roomToken: roomToken, server: server),
server: server,
roomToken: roomToken,
publicKey: publicKey
),
changes: [
.markedAsUnread(community.unread),
.lastReadTimestampMs(community.last_read)
]
)
)
}
else if convo_info_volatile_it_is_legacy_group(convoIterator, &legacyGroup) {
volatileThreadInfo.append(
VolatileThreadInfo(
threadId: String(libSessionVal: legacyGroup.group_id),
variant: .legacyGroup,
changes: [
.markedAsUnread(legacyGroup.unread),
.lastReadTimestampMs(legacyGroup.last_read)
]
)
)
}
else {
SNLog("Ignoring unknown conversation type when iterating through volatile conversation info update")
}
convo_info_volatile_iterator_free(convoIterator) // Need to free the iterator
return volatileThreadInfo
convo_info_volatile_iterator_advance(convoIterator)
}
convo_info_volatile_iterator_free(convoIterator) // Need to free the iterator
// If we don't have any conversations then no need to continue
guard !volatileThreadInfo.isEmpty else { return mergeResult }
guard !volatileThreadInfo.isEmpty else { return }
// Get the local volatile thread info from all conversations
let localVolatileThreadInfo: [String: VolatileThreadInfo] = VolatileThreadInfo.fetchAll(db)
@ -171,109 +156,105 @@ internal extension SessionUtil {
}
// If there are no newer local last read timestamps then just return the mergeResult
guard !newerLocalChanges.isEmpty else { return mergeResult }
guard !newerLocalChanges.isEmpty else { return }
return try upsert(
try upsert(
convoInfoVolatileChanges: newerLocalChanges,
in: atomicConf
in: conf
)
}
static func upsert(
@discardableResult static func upsert(
convoInfoVolatileChanges: [VolatileThreadInfo],
in atomicConf: Atomic<UnsafeMutablePointer<config_object>?>
in conf: UnsafeMutablePointer<config_object>?
) throws -> ConfResult {
guard atomicConf.wrappedValue != nil else { throw SessionUtilError.nilConfigObject }
guard conf != nil else { throw SessionUtilError.nilConfigObject }
// Since we are doing direct memory manipulation we are using an `Atomic` type which has
// blocking access in it's `mutate` closure
return atomicConf.mutate { conf in
convoInfoVolatileChanges.forEach { threadInfo in
var cThreadId: [CChar] = threadInfo.threadId.cArray
switch threadInfo.variant {
case .contact:
var oneToOne: convo_info_volatile_1to1 = convo_info_volatile_1to1()
guard convo_info_volatile_get_or_construct_1to1(conf, &oneToOne, &cThreadId) else {
SNLog("Unable to create contact conversation when updating last read timestamp")
return
}
threadInfo.changes.forEach { change in
switch change {
case .lastReadTimestampMs(let lastReadMs):
oneToOne.last_read = lastReadMs
case .markedAsUnread(let unread):
oneToOne.unread = unread
}
}
convo_info_volatile_set_1to1(conf, &oneToOne)
case .legacyGroup:
var legacyGroup: convo_info_volatile_legacy_group = convo_info_volatile_legacy_group()
guard convo_info_volatile_get_or_construct_legacy_group(conf, &legacyGroup, &cThreadId) else {
SNLog("Unable to create legacy group conversation when updating last read timestamp")
return
}
threadInfo.changes.forEach { change in
switch change {
case .lastReadTimestampMs(let lastReadMs):
legacyGroup.last_read = lastReadMs
case .markedAsUnread(let unread):
legacyGroup.unread = unread
}
}
convo_info_volatile_set_legacy_group(conf, &legacyGroup)
case .community:
guard
var cBaseUrl: [CChar] = threadInfo.openGroupUrlInfo?.server.cArray,
var cRoomToken: [CChar] = threadInfo.openGroupUrlInfo?.roomToken.cArray,
var cPubkey: [UInt8] = threadInfo.openGroupUrlInfo?.publicKey.bytes
else {
SNLog("Unable to create community conversation when updating last read timestamp due to missing URL info")
return
}
var community: convo_info_volatile_community = convo_info_volatile_community()
guard convo_info_volatile_get_or_construct_community(conf, &community, &cBaseUrl, &cRoomToken, &cPubkey) else {
SNLog("Unable to create legacy group conversation when updating last read timestamp")
return
}
threadInfo.changes.forEach { change in
switch change {
case .lastReadTimestampMs(let lastReadMs):
community.last_read = lastReadMs
case .markedAsUnread(let unread):
community.unread = unread
}
}
convo_info_volatile_set_community(conf, &community)
case .group: return // TODO: Need to add when the type is added to the lib
}
}
convoInfoVolatileChanges.forEach { threadInfo in
var cThreadId: [CChar] = threadInfo.threadId.cArray
return ConfResult(
needsPush: config_needs_push(conf),
needsDump: config_needs_dump(conf)
)
switch threadInfo.variant {
case .contact:
var oneToOne: convo_info_volatile_1to1 = convo_info_volatile_1to1()
guard convo_info_volatile_get_or_construct_1to1(conf, &oneToOne, &cThreadId) else {
SNLog("Unable to create contact conversation when updating last read timestamp")
return
}
threadInfo.changes.forEach { change in
switch change {
case .lastReadTimestampMs(let lastReadMs):
oneToOne.last_read = lastReadMs
case .markedAsUnread(let unread):
oneToOne.unread = unread
}
}
convo_info_volatile_set_1to1(conf, &oneToOne)
case .legacyGroup:
var legacyGroup: convo_info_volatile_legacy_group = convo_info_volatile_legacy_group()
guard convo_info_volatile_get_or_construct_legacy_group(conf, &legacyGroup, &cThreadId) else {
SNLog("Unable to create legacy group conversation when updating last read timestamp")
return
}
threadInfo.changes.forEach { change in
switch change {
case .lastReadTimestampMs(let lastReadMs):
legacyGroup.last_read = lastReadMs
case .markedAsUnread(let unread):
legacyGroup.unread = unread
}
}
convo_info_volatile_set_legacy_group(conf, &legacyGroup)
case .community:
guard
var cBaseUrl: [CChar] = threadInfo.openGroupUrlInfo?.server.cArray,
var cRoomToken: [CChar] = threadInfo.openGroupUrlInfo?.roomToken.cArray,
var cPubkey: [UInt8] = threadInfo.openGroupUrlInfo?.publicKey.bytes
else {
SNLog("Unable to create community conversation when updating last read timestamp due to missing URL info")
return
}
var community: convo_info_volatile_community = convo_info_volatile_community()
guard convo_info_volatile_get_or_construct_community(conf, &community, &cBaseUrl, &cRoomToken, &cPubkey) else {
SNLog("Unable to create legacy group conversation when updating last read timestamp")
return
}
threadInfo.changes.forEach { change in
switch change {
case .lastReadTimestampMs(let lastReadMs):
community.last_read = lastReadMs
case .markedAsUnread(let unread):
community.unread = unread
}
}
convo_info_volatile_set_community(conf, &community)
case .group: return // TODO: Need to add when the type is added to the lib.
}
}
return ConfResult(
needsPush: config_needs_push(conf),
needsDump: config_needs_dump(conf)
)
}
}
// MARK: - Convenience
internal extension SessionUtil {
static func updatingThreads<T>(_ db: Database, _ updated: [T]) throws -> [T] {
@discardableResult static func updatingThreadsConvoInfoVolatile<T>(_ db: Database, _ updated: [T]) throws -> [T] {
guard let updatedThreads: [SessionThread] = updated as? [SessionThread] else {
throw StorageError.generic
}
@ -287,42 +268,38 @@ internal extension SessionUtil {
threadId: thread.id,
variant: thread.variant,
openGroupUrlInfo: (thread.variant != .community ? nil :
try VolatileThreadInfo.OpenGroupUrlInfo.fetchOne(db, id: thread.id)
try OpenGroupUrlInfo.fetchOne(db, id: thread.id)
),
changes: [.markedAsUnread(thread.markedAsUnread ?? false)]
)
}
db.afterNextTransactionNested { db in
do {
let atomicConf: Atomic<UnsafeMutablePointer<config_object>?> = SessionUtil.config(
do {
try SessionUtil
.config(
for: .convoInfoVolatile,
publicKey: userPublicKey
)
let result: ConfResult = try upsert(
convoInfoVolatileChanges: changes,
in: atomicConf
)
// If we don't need to dump the data the we can finish early
guard result.needsDump else { return }
try SessionUtil.saveState(
db,
keepingExistingMessageHashes: true,
configDump: try atomicConf.mutate { conf in
try SessionUtil.createDump(
conf: conf,
for: .convoInfoVolatile,
publicKey: userPublicKey,
messageHashes: nil
)
}
)
}
catch {
SNLog("[libSession-util] Failed to dump updated data")
}
.mutate { conf in
guard conf != nil else { throw SessionUtilError.nilConfigObject }
let result: ConfResult = try upsert(
convoInfoVolatileChanges: changes,
in: conf
)
// If we don't need to dump the data the we can finish early
guard result.needsDump else { return }
try SessionUtil.createDump(
conf: conf,
for: .convoInfoVolatile,
publicKey: userPublicKey
)?.save(db)
}
}
catch {
SNLog("[libSession-util] Failed to dump updated data")
}
return updated
@ -335,44 +312,45 @@ internal extension SessionUtil {
lastReadTimestampMs: Int64
) throws {
let userPublicKey: String = getUserHexEncodedPublicKey(db)
let atomicConf: Atomic<UnsafeMutablePointer<config_object>?> = SessionUtil.config(
for: .convoInfoVolatile,
publicKey: userPublicKey
)
let change: VolatileThreadInfo = VolatileThreadInfo(
threadId: threadId,
variant: threadVariant,
openGroupUrlInfo: (threadVariant != .community ? nil :
try VolatileThreadInfo.OpenGroupUrlInfo.fetchOne(db, id: threadId)
try OpenGroupUrlInfo.fetchOne(db, id: threadId)
),
changes: [.lastReadTimestampMs(lastReadTimestampMs)]
)
// Update the conf
let result: ConfResult = try upsert(
convoInfoVolatileChanges: [change],
in: atomicConf
)
// If we need to dump then do so here
if result.needsDump {
try SessionUtil.saveState(
db,
keepingExistingMessageHashes: true,
configDump: try atomicConf.mutate { conf in
try SessionUtil.createDump(
conf: conf,
for: .contacts,
publicKey: userPublicKey,
messageHashes: nil
)
}
let needsPush: Bool = try SessionUtil
.config(
for: .convoInfoVolatile,
publicKey: userPublicKey
)
}
.mutate { conf in
guard conf != nil else { throw SessionUtilError.nilConfigObject }
let result: ConfResult = try upsert(
convoInfoVolatileChanges: [change],
in: conf
)
// If we don't need to dump the data the we can finish early
guard result.needsDump else { return result.needsPush }
try SessionUtil.createDump(
conf: conf,
for: .contacts,
publicKey: userPublicKey
)?.save(db)
return result.needsPush
}
// If we need to push then enqueue a 'ConfigurationSyncJob'
if result.needsPush {
ConfigurationSyncJob.enqueue(db)
guard needsPush else { return }
db.afterNextTransactionNestedOnce(dedupeId: SessionUtil.syncDedupeId(userPublicKey)) { db in
ConfigurationSyncJob.enqueue(db, publicKey: userPublicKey)
}
}
@ -434,27 +412,42 @@ internal extension SessionUtil {
// MARK: - VolatileThreadInfo
public extension SessionUtil {
internal struct OpenGroupUrlInfo: FetchableRecord, Codable, Hashable {
let threadId: String
let server: String
let roomToken: String
let publicKey: String
static func fetchOne(_ db: Database, id: String) throws -> OpenGroupUrlInfo? {
return try OpenGroup
.filter(id: id)
.select(.threadId, .server, .roomToken, .publicKey)
.asRequest(of: OpenGroupUrlInfo.self)
.fetchOne(db)
}
static func fetchAll(_ db: Database, ids: [String]) throws -> [OpenGroupUrlInfo] {
return try OpenGroup
.filter(ids: ids)
.select(.threadId, .server, .roomToken, .publicKey)
.asRequest(of: OpenGroupUrlInfo.self)
.fetchAll(db)
}
static func fetchAll(_ db: Database) throws -> [OpenGroupUrlInfo] {
return try OpenGroup
.select(.threadId, .server, .roomToken, .publicKey)
.asRequest(of: OpenGroupUrlInfo.self)
.fetchAll(db)
}
}
struct VolatileThreadInfo {
enum Change {
case markedAsUnread(Bool)
case lastReadTimestampMs(Int64)
}
fileprivate struct OpenGroupUrlInfo: FetchableRecord, Codable, Hashable {
let threadId: String
let server: String
let roomToken: String
let publicKey: String
static func fetchOne(_ db: Database, id: String) throws -> OpenGroupUrlInfo? {
return try OpenGroup
.filter(id: id)
.select(.threadId, .server, .roomToken, .publicKey)
.asRequest(of: OpenGroupUrlInfo.self)
.fetchOne(db)
}
}
let threadId: String
let variant: SessionThread.Variant
fileprivate let openGroupUrlInfo: OpenGroupUrlInfo?
@ -550,7 +543,7 @@ public extension SessionUtil {
let publicKey: String = threadInfo.publicKey
else { return nil }
return VolatileThreadInfo.OpenGroupUrlInfo(
return OpenGroupUrlInfo(
threadId: threadInfo.id,
server: server,
roomToken: roomToken,

View File

@ -1,19 +0,0 @@
// Copyright © 2023 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import GRDB
import SessionUtil
import SessionUtilitiesKit
internal extension SessionUtil {
// MARK: - Incoming Changes
static func handleGroupsUpdate(
_ db: Database,
in atomicConf: Atomic<UnsafeMutablePointer<config_object>?>,
mergeResult: ConfResult
) throws -> ConfResult {
// TODO: This
return mergeResult
}
}

View File

@ -0,0 +1,195 @@
// Copyright © 2023 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import GRDB
import SessionUtil
import SessionUtilitiesKit
// MARK: - Convenience
internal extension SessionUtil {
static func assignmentsRequireConfigUpdate(_ assignments: [ConfigColumnAssignment]) -> Bool {
let targetColumns: Set<ColumnKey> = Set(assignments.map { ColumnKey($0.column) })
let allColumnsThatTriggerConfigUpdate: Set<ColumnKey> = []
.appending(contentsOf: columnsRelatedToUserProfile)
.appending(contentsOf: columnsRelatedToContacts)
.appending(contentsOf: columnsRelatedToConvoInfoVolatile)
.map { ColumnKey($0) }
.asSet()
return !allColumnsThatTriggerConfigUpdate.isDisjoint(with: targetColumns)
}
/// This function assumes that the `pinnedPriority` values get set correctly elsewhere rather than trying to enforce
/// uniqueness in here (this means if we eventually allow for "priority grouping" this logic wouldn't change - just where the
/// priorities get updated in the HomeVC
static func updateThreadPrioritiesIfNeeded<T>(
_ db: Database,
_ assignments: [ConfigColumnAssignment],
_ updated: [T]
) throws {
// Note: This logic assumes that the 'pinnedPriority' values get set correctly elsewhere
// rather than trying to enforce uniqueness in here (this means if we eventually allow for
// "priority grouping" this logic wouldn't change - just where the priorities get updated
// in the HomeVC
let userPublicKey: String = getUserHexEncodedPublicKey(db)
let pinnedThreadInfo: [PriorityInfo] = try SessionThread
.select(.id, .variant, .pinnedPriority)
.asRequest(of: PriorityInfo.self)
.fetchAll(db)
let groupedPriorityInfo: [SessionThread.Variant: [PriorityInfo]] = pinnedThreadInfo
.grouped(by: \.variant)
let pinnedCommunities: [String: OpenGroupUrlInfo] = try OpenGroupUrlInfo
.fetchAll(db, ids: pinnedThreadInfo.map { $0.id })
.reduce(into: [:]) { result, next in result[next.threadId] = next }
do {
try groupedPriorityInfo.forEach { variant, priorityInfo in
switch variant {
case .contact:
// If the 'Note to Self' conversation is pinned then we need to custom handle it
// first as it's part of the UserProfile config
if let noteToSelfPriority: PriorityInfo = priorityInfo.first(where: { $0.id == userPublicKey }) {
let atomicConf: Atomic<UnsafeMutablePointer<config_object>?> = SessionUtil.config(
for: .userProfile,
publicKey: userPublicKey
)
try SessionUtil.updateNoteToSelfPriority(
db,
priority: Int32(noteToSelfPriority.pinnedPriority ?? 0),
in: atomicConf
)
}
// Remove the 'Note to Self' convo from the list for updating contact priorities
let targetPriorities: [PriorityInfo] = priorityInfo.filter { $0.id != userPublicKey }
guard !targetPriorities.isEmpty else { return }
// Since we are doing direct memory manipulation we are using an `Atomic`
// type which has blocking access in it's `mutate` closure
try SessionUtil
.config(
for: .contacts,
publicKey: userPublicKey
)
.mutate { conf in
let result: ConfResult = try SessionUtil.upsert(
contactData: targetPriorities
.map { ($0.id, nil, nil, Int32($0.pinnedPriority ?? 0), nil) },
in: conf
)
// If we don't need to dump the data the we can finish early
guard result.needsDump else { return }
try SessionUtil.createDump(
conf: conf,
for: .contacts,
publicKey: userPublicKey
)?.save(db)
}
case .community:
// Since we are doing direct memory manipulation we are using an `Atomic`
// type which has blocking access in it's `mutate` closure
try SessionUtil
.config(
for: .userGroups,
publicKey: userPublicKey
)
.mutate { conf in
let result: ConfResult = try SessionUtil.upsert(
communities: priorityInfo
.compactMap { info in
guard let communityInfo: OpenGroupUrlInfo = pinnedCommunities[info.id] else {
return nil
}
return (communityInfo, info.pinnedPriority)
},
in: conf
)
// If we don't need to dump the data the we can finish early
guard result.needsDump else { return }
try SessionUtil.createDump(
conf: conf,
for: .userGroups,
publicKey: userPublicKey
)?.save(db)
}
case .legacyGroup:
// Since we are doing direct memory manipulation we are using an `Atomic`
// type which has blocking access in it's `mutate` closure
try SessionUtil
.config(
for: .userGroups,
publicKey: userPublicKey
)
.mutate { conf in
let result: ConfResult = try SessionUtil.upsert(
legacyGroups: priorityInfo
.map { LegacyGroupInfo(id: $0.id, priority: $0.pinnedPriority) },
in: conf
)
// If we don't need to dump the data the we can finish early
guard result.needsDump else { return }
try SessionUtil.createDump(
conf: conf,
for: .userGroups,
publicKey: userPublicKey
)?.save(db)
}
case .group:
// TODO: Add this
break
}
}
}
catch {
SNLog("[libSession-util] Failed to dump updated data")
}
}
}
// MARK: - ColumnKey
internal extension SessionUtil {
struct ColumnKey: Equatable, Hashable {
let sourceType: Any.Type
let columnName: String
init(_ column: ColumnExpression) {
self.sourceType = type(of: column)
self.columnName = column.name
}
func hash(into hasher: inout Hasher) {
ObjectIdentifier(sourceType).hash(into: &hasher)
columnName.hash(into: &hasher)
}
static func == (lhs: ColumnKey, rhs: ColumnKey) -> Bool {
return (
lhs.sourceType == rhs.sourceType &&
lhs.columnName == rhs.columnName
)
}
}
}
// MARK: - Pinned Priority
extension SessionUtil {
struct PriorityInfo: Codable, FetchableRecord, Identifiable {
let id: String
let variant: SessionThread.Variant
let pinnedPriority: Int32?
}
}

View File

@ -0,0 +1,651 @@
// Copyright © 2023 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import GRDB
import Sodium
import SessionUtil
import SessionUtilitiesKit
import SessionSnodeKit
// TODO: Expose 'GROUP_NAME_MAX_LENGTH', 'COMMUNITY_URL_MAX_LENGTH' & 'COMMUNITY_ROOM_MAX_LENGTH'
internal extension SessionUtil {
// MARK: - Incoming Changes
static func handleGroupsUpdate(
_ db: Database,
in conf: UnsafeMutablePointer<config_object>?,
mergeNeedsDump: Bool,
latestConfigUpdateSentTimestamp: TimeInterval
) throws {
guard mergeNeedsDump else { return }
guard conf != nil else { throw SessionUtilError.nilConfigObject }
var communities: [PrioritisedData<OpenGroupUrlInfo>] = []
var legacyGroups: [PrioritisedData<LegacyGroupInfo>] = []
var groups: [PrioritisedData<String>] = []
var community: ugroups_community_info = ugroups_community_info()
var legacyGroup: ugroups_legacy_group_info = ugroups_legacy_group_info()
let groupsIterator: OpaquePointer = user_groups_iterator_new(conf)
while !user_groups_iterator_done(groupsIterator) {
if user_groups_it_is_community(groupsIterator, &community) {
let server: String = String(libSessionVal: community.base_url)
let roomToken: String = String(libSessionVal: community.room)
communities.append(
PrioritisedData(
data: OpenGroupUrlInfo(
threadId: OpenGroup.idFor(roomToken: roomToken, server: server),
server: server,
roomToken: roomToken,
publicKey: Data(
libSessionVal: community.pubkey,
count: OpenGroup.pubkeyByteLength
).toHexString()
),
priority: community.priority
)
)
}
else if user_groups_it_is_legacy_group(groupsIterator, &legacyGroup) {
let groupId: String = String(libSessionVal: legacyGroup.session_id)
legacyGroups.append(
PrioritisedData(
data: LegacyGroupInfo(
id: groupId,
name: String(libSessionVal: legacyGroup.name),
lastKeyPair: ClosedGroupKeyPair(
threadId: groupId,
publicKey: Data(
libSessionVal: legacyGroup.enc_pubkey,
count: ClosedGroup.pubkeyByteLength
),
secretKey: Data(
libSessionVal: legacyGroup.enc_seckey,
count: ClosedGroup.secretKeyByteLength
),
receivedTimestamp: (TimeInterval(SnodeAPI.currentOffsetTimestampMs()) / 1000)
),
disappearingConfig: DisappearingMessagesConfiguration
.defaultWith(groupId)
.with(
// TODO: double check the 'isEnabled' flag
isEnabled: (legacyGroup.disappearing_timer > 0),
durationSeconds: (legacyGroup.disappearing_timer == 0 ? nil :
TimeInterval(legacyGroup.disappearing_timer)
)
),
groupMembers: [], //[GroupMember] // TODO: This
hidden: legacyGroup.hidden
),
priority: legacyGroup.priority
)
)
}
else {
SNLog("Ignoring unknown conversation type when iterating through volatile conversation info update")
}
user_groups_iterator_advance(groupsIterator)
}
user_groups_iterator_free(groupsIterator) // Need to free the iterator
// If we don't have any conversations then no need to continue
guard !communities.isEmpty || !legacyGroups.isEmpty || !groups.isEmpty else { return }
// Extract all community/legacyGroup/group thread priorities
let existingThreadPriorities: [String: PriorityInfo] = (try? SessionThread
.select(.id, .variant, .pinnedPriority)
.filter(
[
SessionThread.Variant.community,
SessionThread.Variant.legacyGroup,
SessionThread.Variant.group
].contains(SessionThread.Columns.variant)
)
.asRequest(of: PriorityInfo.self)
.fetchAll(db))
.defaulting(to: [])
.reduce(into: [:]) { result, next in result[next.id] = next }
// MARK: -- Handle Community Changes
// Add any new communities (via the OpenGroupManager)
communities.forEach { community in
OpenGroupManager.shared
.add(
db,
roomToken: community.data.roomToken,
server: community.data.server,
publicKey: community.data.publicKey,
calledFromConfigHandling: true
)
.sinkUntilComplete()
// Set the priority if it's changed (new communities will have already been inserted at
// this stage)
if existingThreadPriorities[community.data.threadId]?.pinnedPriority != community.priority {
_ = try? SessionThread
.filter(id: community.data.threadId)
.updateAll( // Handling a config update so don't use `updateAllAndConfig`
db,
SessionThread.Columns.pinnedPriority.set(to: community.priority)
)
}
}
// Remove any communities which are no longer in the config
let communityIdsToRemove: Set<String> = Set(existingThreadPriorities
.filter { $0.value.variant == .community }
.keys)
.subtracting(communities.map { $0.data.threadId })
communityIdsToRemove.forEach { threadId in
OpenGroupManager.shared.delete(
db,
openGroupId: threadId,
calledFromConfigHandling: true
)
}
// MARK: -- Handle Legacy Group Changes
let existingLegacyGroupIds: Set<String> = Set(existingThreadPriorities
.filter { $0.value.variant == .legacyGroup }
.keys)
try legacyGroups.forEach { group in
guard
let name: String = group.data.name,
let lastKeyPair: ClosedGroupKeyPair = group.data.lastKeyPair,
let members: [GroupMember] = group.data.groupMembers
else { return }
if !existingLegacyGroupIds.contains(group.data.id) {
// Add a new group if it doesn't already exist
try MessageReceiver.handleNewClosedGroup(
db,
groupPublicKey: group.data.id,
name: name,
encryptionKeyPair: Box.KeyPair(
publicKey: lastKeyPair.publicKey.bytes,
secretKey: lastKeyPair.secretKey.bytes
),
members: members
.filter { $0.role == .standard }
.map { $0.profileId },
admins: members
.filter { $0.role == .admin }
.map { $0.profileId },
expirationTimer: UInt32(group.data.disappearingConfig?.durationSeconds ?? 0),
messageSentTimestamp: UInt64(latestConfigUpdateSentTimestamp * 1000)
)
}
else {
// Otherwise update the existing group
_ = try? ClosedGroup
.filter(id: group.data.id)
.updateAll( // Handling a config update so don't use `updateAllAndConfig`
db,
ClosedGroup.Columns.name.set(to: name)
)
// Update the lastKey
let keyPairExists: Bool = ClosedGroupKeyPair
.filter(
ClosedGroupKeyPair.Columns.threadId == lastKeyPair.threadId &&
ClosedGroupKeyPair.Columns.publicKey == lastKeyPair.publicKey &&
ClosedGroupKeyPair.Columns.secretKey == lastKeyPair.secretKey
)
.isNotEmpty(db)
if !keyPairExists {
try lastKeyPair.insert(db)
}
// Update the disappearing messages timer
_ = try DisappearingMessagesConfiguration
.fetchOne(db, id: group.data.id)
.defaulting(to: DisappearingMessagesConfiguration.defaultWith(group.data.id))
.with(
// TODO: double check the 'isEnabled' flag
isEnabled: (group.data.disappearingConfig?.isEnabled == true),
durationSeconds: group.data.disappearingConfig?.durationSeconds
)
.saved(db)
// Update the members
// TODO: This
// TODO: Going to need to decide whether we want to update the 'GroupMember' records in the database based on this config message changing
// let members: [String]
// let admins: [String]
}
// TODO: 'hidden' flag - just toggle the 'shouldBeVisible' flag? Delete messages as well???
// Set the priority if it's changed
if existingThreadPriorities[group.data.id]?.pinnedPriority != group.priority {
_ = try? SessionThread
.filter(id: group.data.id)
.updateAll( // Handling a config update so don't use `updateAllAndConfig`
db,
SessionThread.Columns.pinnedPriority.set(to: group.priority)
)
}
}
// Remove any legacy groups which are no longer in the config
let legacyGroupIdsToRemove: Set<String> = existingLegacyGroupIds
.subtracting(legacyGroups.map { $0.data.id })
if !legacyGroupIdsToRemove.isEmpty {
try ClosedGroup.removeKeysAndUnsubscribe(
db,
threadIds: Array(legacyGroupIdsToRemove),
removeGroupData: true,
calledFromConfigHandling: true
)
}
// MARK: -- Handle Group Changes
// TODO: Add this
}
// MARK: - Outgoing Changes
static func upsert(
legacyGroups: [LegacyGroupInfo],
in conf: UnsafeMutablePointer<config_object>?
) throws -> ConfResult {
guard conf != nil else { throw SessionUtilError.nilConfigObject }
guard !legacyGroups.isEmpty else { return ConfResult(needsPush: false, needsDump: false) }
// Since we are doing direct memory manipulation we are using an `Atomic` type which has
// blocking access in it's `mutate` closure
legacyGroups
.forEach { legacyGroup in
var cGroupId: [CChar] = legacyGroup.id.cArray
let userGroup: UnsafeMutablePointer<ugroups_legacy_group_info> = user_groups_get_or_construct_legacy_group(conf, &cGroupId)
// Assign all properties to match the updated group (if there is one)
if let updatedName: String = legacyGroup.name {
userGroup.pointee.name = updatedName.toLibSession()
// Store the updated group (needs to happen before variables go out of scope)
user_groups_set_legacy_group(conf, &userGroup)
}
if let lastKeyPair: ClosedGroupKeyPair = legacyGroup.lastKeyPair {
userGroup.pointee.enc_pubkey = lastKeyPair.publicKey.toLibSession()
userGroup.pointee.enc_seckey = lastKeyPair.secretKey.toLibSession()
// Store the updated group (needs to happen before variables go out of scope)
user_groups_set_legacy_group(conf, &userGroup)
}
// Assign all properties to match the updated disappearing messages config (if there is one)
if let updatedConfig: DisappearingMessagesConfiguration = legacyGroup.disappearingConfig {
// TODO: double check the 'isEnabled' flag
userGroup.pointee.disappearing_timer = (!updatedConfig.isEnabled ? 0 :
Int64(floor(updatedConfig.durationSeconds))
)
}
// TODO: Need to add members/admins
// Store the updated group (can't be sure if we made any changes above)
userGroup.pointee.hidden = (legacyGroup.hidden ?? userGroup.pointee.hidden)
userGroup.pointee.priority = (legacyGroup.priority ?? userGroup.pointee.priority)
// Note: Need to free the legacy group pointer
user_groups_set_free_legacy_group(conf, userGroup)
}
return ConfResult(
needsPush: config_needs_push(conf),
needsDump: config_needs_dump(conf)
)
}
static func upsert(
communities: [(info: OpenGroupUrlInfo, priority: Int32?)],
in conf: UnsafeMutablePointer<config_object>?
) throws -> ConfResult {
guard conf != nil else { throw SessionUtilError.nilConfigObject }
guard !communities.isEmpty else { return ConfResult.init(needsPush: false, needsDump: false) }
communities
.forEach { info, priority in
var cBaseUrl: [CChar] = info.server.cArray
var cRoom: [CChar] = info.roomToken.cArray
var cPubkey: [UInt8] = Data(hex: info.publicKey).cArray
var userCommunity: ugroups_community_info = ugroups_community_info()
guard user_groups_get_or_construct_community(conf, &userCommunity, &cBaseUrl, &cRoom, &cPubkey) else {
SNLog("Unable to upsert community conversation to Config Message")
return
}
userCommunity.priority = (priority ?? userCommunity.priority)
user_groups_set_community(conf, &userCommunity)
}
return ConfResult(
needsPush: config_needs_push(conf),
needsDump: config_needs_dump(conf)
)
}
// MARK: -- Communities
static func add(
_ db: Database,
server: String,
rootToken: String,
publicKey: String
) throws {
let userPublicKey: String = getUserHexEncodedPublicKey(db)
// Since we are doing direct memory manipulation we are using an `Atomic` type which has
// blocking access in it's `mutate` closure
let needsPush: Bool = try SessionUtil
.config(
for: .userGroups,
publicKey: userPublicKey
)
.mutate { conf in
guard conf != nil else { throw SessionUtilError.nilConfigObject }
let result: ConfResult = try SessionUtil.upsert(
communities: [
(
OpenGroupUrlInfo(
threadId: OpenGroup.idFor(roomToken: rootToken, server: server),
server: server,
roomToken: rootToken,
publicKey: publicKey
),
nil
)
],
in: conf
)
// If we don't need to dump the data the we can finish early
guard result.needsDump else { return result.needsPush }
try SessionUtil.createDump(
conf: conf,
for: .userGroups,
publicKey: userPublicKey
)?.save(db)
return result.needsPush
}
// Make sure we need a push before scheduling one
guard needsPush else { return }
db.afterNextTransactionNestedOnce(dedupeId: SessionUtil.syncDedupeId(userPublicKey)) { db in
ConfigurationSyncJob.enqueue(db, publicKey: userPublicKey)
}
}
static func remove(_ db: Database, server: String, roomToken: String) throws {
let userPublicKey: String = getUserHexEncodedPublicKey(db)
// Since we are doing direct memory manipulation we are using an `Atomic` type which has
// blocking access in it's `mutate` closure
let needsPush: Bool = try SessionUtil
.config(
for: .userGroups,
publicKey: userPublicKey
)
.mutate { conf in
guard conf != nil else { throw SessionUtilError.nilConfigObject }
var cBaseUrl: [CChar] = server.cArray
var cRoom: [CChar] = roomToken.cArray
// Don't care if the community doesn't exist
user_groups_erase_community(conf, &cBaseUrl, &cRoom)
let needsPush: Bool = config_needs_push(conf)
// If we don't need to dump the data the we can finish early
guard config_needs_dump(conf) else { return needsPush }
try SessionUtil.createDump(
conf: conf,
for: .userGroups,
publicKey: userPublicKey
)?.save(db)
return needsPush
}
// Make sure we need a push before scheduling one
guard needsPush else { return }
db.afterNextTransactionNestedOnce(dedupeId: SessionUtil.syncDedupeId(userPublicKey)) { db in
ConfigurationSyncJob.enqueue(db, publicKey: userPublicKey)
}
}
// MARK: -- Legacy Group Changes
static func add(
_ db: Database,
groupPublicKey: String,
name: String,
latestKeyPairPublicKey: Data,
latestKeyPairSecretKey: Data,
latestKeyPairReceivedTimestamp: TimeInterval,
members: Set<String>,
admins: Set<String>
) throws {
let userPublicKey: String = getUserHexEncodedPublicKey(db)
// Since we are doing direct memory manipulation we are using an `Atomic` type which has
// blocking access in it's `mutate` closure
let needsPush: Bool = try SessionUtil
.config(
for: .userGroups,
publicKey: userPublicKey
)
.mutate { conf in
guard conf != nil else { throw SessionUtilError.nilConfigObject }
let result: ConfResult = try SessionUtil.upsert(
legacyGroups: [
LegacyGroupInfo(
id: groupPublicKey,
name: name,
lastKeyPair: ClosedGroupKeyPair(
threadId: groupPublicKey,
publicKey: latestKeyPairPublicKey,
secretKey: latestKeyPairSecretKey,
receivedTimestamp: latestKeyPairReceivedTimestamp
),
groupMembers: members
.map { memberId in
GroupMember(
groupId: groupPublicKey,
profileId: memberId,
role: .standard,
isHidden: false
)
}
.appending(
contentsOf: admins
.map { memberId in
GroupMember(
groupId: groupPublicKey,
profileId: memberId,
role: .admin,
isHidden: false
)
}
)
)
],
in: conf
)
// If we don't need to dump the data the we can finish early
guard result.needsDump else { return result.needsPush }
try SessionUtil.createDump(
conf: conf,
for: .userGroups,
publicKey: userPublicKey
)?.save(db)
return result.needsPush
}
// Make sure we need a push before scheduling one
guard needsPush else { return }
db.afterNextTransactionNestedOnce(dedupeId: SessionUtil.syncDedupeId(userPublicKey)) { db in
ConfigurationSyncJob.enqueue(db, publicKey: userPublicKey)
}
}
static func hide(_ db: Database, legacyGroupIds: [String]) throws {
}
static func remove(_ db: Database, legacyGroupIds: [String]) throws {
let userPublicKey: String = getUserHexEncodedPublicKey(db)
// Since we are doing direct memory manipulation we are using an `Atomic` type which has
// blocking access in it's `mutate` closure
let needsPush: Bool = try SessionUtil
.config(
for: .userGroups,
publicKey: userPublicKey
)
.mutate { conf in
guard conf != nil else { throw SessionUtilError.nilConfigObject }
legacyGroupIds.forEach { threadId in
var cGroupId: [CChar] = threadId.cArray
// Don't care if the group doesn't exist
user_groups_erase_legacy_group(conf, &cGroupId)
}
let needsPush: Bool = config_needs_push(conf)
// If we don't need to dump the data the we can finish early
guard config_needs_dump(conf) else { return needsPush }
try SessionUtil.createDump(
conf: conf,
for: .userGroups,
publicKey: userPublicKey
)?.save(db)
return needsPush
}
// Make sure we need a push before scheduling one
guard needsPush else { return }
db.afterNextTransactionNestedOnce(dedupeId: SessionUtil.syncDedupeId(userPublicKey)) { db in
ConfigurationSyncJob.enqueue(db, publicKey: userPublicKey)
}
}
// MARK: -- Group Changes
static func hide(_ db: Database, groupIds: [String]) throws {
}
static func remove(_ db: Database, groupIds: [String]) throws {
}
}
}
return updated
}
}
// MARK: - LegacyGroupInfo
extension SessionUtil {
struct LegacyGroupInfo: Decodable, FetchableRecord, ColumnExpressible {
typealias Columns = CodingKeys
enum CodingKeys: String, CodingKey, ColumnExpression, CaseIterable {
case threadId
case name
case lastKeyPair
case disappearingConfig
case groupMembers
case hidden
case priority
}
var id: String { threadId }
let threadId: String
let name: String?
let lastKeyPair: ClosedGroupKeyPair?
let disappearingConfig: DisappearingMessagesConfiguration?
let groupMembers: [GroupMember]?
let hidden: Bool?
let priority: Int32?
init(
id: String,
name: String? = nil,
lastKeyPair: ClosedGroupKeyPair? = nil,
disappearingConfig: DisappearingMessagesConfiguration? = nil,
groupMembers: [GroupMember]? = nil,
hidden: Bool? = nil,
priority: Int32? = nil
) {
self.threadId = id
self.name = name
self.lastKeyPair = lastKeyPair
self.disappearingConfig = disappearingConfig
self.groupMembers = groupMembers
self.hidden = hidden
self.priority = priority
}
static func fetchAll(_ db: Database) throws -> [LegacyGroupInfo] {
return try ClosedGroup
.filter(ClosedGroup.Columns.threadId.like("\(SessionId.Prefix.standard.rawValue)%"))
.including(
required: ClosedGroup.keyPairs
.order(ClosedGroupKeyPair.Columns.receivedTimestamp.desc)
.forKey(Columns.lastKeyPair.name)
)
.including(all: ClosedGroup.members)
.joining(
optional: ClosedGroup.thread
.including(
optional: SessionThread.disappearingMessagesConfiguration
.forKey(Columns.disappearingConfig.name)
)
)
.asRequest(of: LegacyGroupInfo.self)
.fetchAll(db)
}
}
fileprivate struct GroupThreadData {
let communities: [PrioritisedData<SessionUtil.OpenGroupUrlInfo>]
let legacyGroups: [PrioritisedData<LegacyGroupInfo>]
let groups: [PrioritisedData<String>]
}
fileprivate struct PrioritisedData<T> {
let data: T
let priority: Int32
}
}

View File

@ -6,63 +6,47 @@ import SessionUtil
import SessionUtilitiesKit
internal extension SessionUtil {
static let columnsRelatedToUserProfile: [Profile.Columns] = [
Profile.Columns.name,
Profile.Columns.profilePictureUrl,
Profile.Columns.profileEncryptionKey
]
// MARK: - Incoming Changes
static func handleUserProfileUpdate(
_ db: Database,
in atomicConf: Atomic<UnsafeMutablePointer<config_object>?>,
mergeResult: ConfResult,
in conf: UnsafeMutablePointer<config_object>?,
mergeNeedsDump: Bool,
latestConfigUpdateSentTimestamp: TimeInterval
) throws -> ConfResult {
) throws {
typealias ProfileData = (profileName: String, profilePictureUrl: String?, profilePictureKey: Data?)
guard mergeResult.needsDump else { return mergeResult }
guard atomicConf.wrappedValue != nil else { throw SessionUtilError.nilConfigObject }
guard mergeNeedsDump else { return }
guard conf != nil else { throw SessionUtilError.nilConfigObject }
// A profile must have a name so if this is null then it's invalid and can be ignored
guard let profileNamePtr: UnsafePointer<CChar> = user_profile_get_name(conf) else { return }
let userPublicKey: String = getUserHexEncodedPublicKey(db)
// Since we are doing direct memory manipulation we are using an `Atomic` type which has
// blocking access in it's `mutate` closure
let maybeProfileData: ProfileData? = atomicConf.mutate { conf -> ProfileData? in
// A profile must have a name so if this is null then it's invalid and can be ignored
guard let profileNamePtr: UnsafePointer<CChar> = user_profile_get_name(conf) else {
return nil
}
let profileName: String = String(cString: profileNamePtr)
let profilePic: user_profile_pic = user_profile_get_pic(conf)
let profilePictureUrl: String? = String(libSessionVal: profilePic.url, nullIfEmpty: true)
// Make sure the url and key exists before reading the memory
return (
profileName: profileName,
profilePictureUrl: profilePictureUrl,
profilePictureKey: (profilePictureUrl == nil ? nil :
Data(
libSessionVal: profilePic.url,
count: ProfileManager.avatarAES256KeyByteLength
)
)
)
}
// Only save the data in the database if it's valid
guard let profileData: ProfileData = maybeProfileData else { return mergeResult }
let profileName: String = String(cString: profileNamePtr)
let profilePic: user_profile_pic = user_profile_get_pic(conf)
let profilePictureUrl: String? = String(libSessionVal: profilePic.url, nullIfEmpty: true)
// Handle user profile changes
try ProfileManager.updateProfileIfNeeded(
db,
publicKey: userPublicKey,
name: profileData.profileName,
name: profileName,
avatarUpdate: {
guard
let profilePictureUrl: String = profileData.profilePictureUrl,
let profileKey: Data = profileData.profilePictureKey
else { return .remove }
guard let profilePictureUrl: String = profilePictureUrl else { return .remove }
return .updateTo(
url: profilePictureUrl,
key: profileKey,
key: Data(
libSessionVal: profilePic.url,
count: ProfileManager.avatarAES256KeyByteLength
),
fileName: nil
)
}(),
@ -85,35 +69,54 @@ internal extension SessionUtil {
Contact.Columns.didApproveMe.set(to: true)
)
}
return mergeResult
}
// MARK: - Outgoing Changes
static func update(
profile: Profile,
in atomicConf: Atomic<UnsafeMutablePointer<config_object>?>
in conf: UnsafeMutablePointer<config_object>?
) throws -> ConfResult {
guard conf != nil else { throw SessionUtilError.nilConfigObject }
// Update the name
var updatedName: [CChar] = profile.name.cArray
user_profile_set_name(conf, &updatedName)
// Either assign the updated profile pic, or sent a blank profile pic (to remove the current one)
var profilePic: user_profile_pic = user_profile_pic()
profilePic.url = profile.profilePictureUrl.toLibSession()
profilePic.key = profile.profileEncryptionKey.toLibSession()
user_profile_set_pic(conf, profilePic)
return ConfResult(
needsPush: config_needs_push(conf),
needsDump: config_needs_dump(conf)
)
}
static func updateNoteToSelfPriority(
_ db: Database,
priority: Int32,
in atomicConf: Atomic<UnsafeMutablePointer<config_object>?>
) throws {
guard atomicConf.wrappedValue != nil else { throw SessionUtilError.nilConfigObject }
let userPublicKey: String = getUserHexEncodedPublicKey(db)
// Since we are doing direct memory manipulation we are using an `Atomic` type which has
// blocking access in it's `mutate` closure
return atomicConf.mutate { conf in
// Update the name
var updatedName: [CChar] = profile.name.cArray
user_profile_set_name(conf, &updatedName)
try atomicConf.mutate { conf in
user_profile_set_nts_priority(conf, priority)
// Either assign the updated profile pic, or sent a blank profile pic (to remove the current one)
var profilePic: user_profile_pic = user_profile_pic()
profilePic.url = profile.profilePictureUrl.toLibSession()
profilePic.key = profile.profileEncryptionKey.toLibSession()
user_profile_set_pic(conf, profilePic)
// If we don't need to dump the data the we can finish early
guard config_needs_dump(conf) else { return }
return ConfResult(
needsPush: config_needs_push(conf),
needsDump: config_needs_dump(conf)
)
try SessionUtil.createDump(
conf: conf,
for: .userProfile,
publicKey: userPublicKey
)?.save(db)
}
}
}

View File

@ -4,13 +4,52 @@ import Foundation
import GRDB
import SessionUtilitiesKit
// MARK: - GRDB
// MARK: - ConfigColumnAssignment
public struct ConfigColumnAssignment {
var column: ColumnExpression
var assignment: ColumnAssignment
init(
column: ColumnExpression,
assignment: ColumnAssignment
) {
self.column = column
self.assignment = assignment
}
}
// MARK: - ColumnExpression
extension ColumnExpression {
public func set(to value: (any SQLExpressible)?) -> ConfigColumnAssignment {
ConfigColumnAssignment(column: self, assignment: self.set(to: value))
}
}
// MARK: - QueryInterfaceRequest
public extension QueryInterfaceRequest {
@discardableResult
func updateAll(
_ db: Database,
_ assignments: ConfigColumnAssignment...
) throws -> Int {
return try updateAll(db, assignments)
}
@discardableResult
func updateAll(
_ db: Database,
_ assignments: [ConfigColumnAssignment]
) throws -> Int {
return try self.updateAll(db, assignments.map { $0.assignment })
}
@discardableResult
func updateAllAndConfig(
_ db: Database,
_ assignments: ColumnAssignment...
_ assignments: ConfigColumnAssignment...
) throws -> Int {
return try updateAllAndConfig(db, assignments)
}
@ -18,8 +57,15 @@ public extension QueryInterfaceRequest {
@discardableResult
func updateAllAndConfig(
_ db: Database,
_ assignments: [ColumnAssignment]
_ assignments: [ConfigColumnAssignment]
) throws -> Int {
let targetAssignments: [ColumnAssignment] = assignments.map { $0.assignment }
// Before we do anything make sure the changes actually do need to be sunced
guard SessionUtil.assignmentsRequireConfigUpdate(assignments) else {
return try self.updateAll(db, targetAssignments)
}
switch self {
case let contactRequest as QueryInterfaceRequest<Contact>:
return try contactRequest.updateAndFetchAllAndUpdateConfig(db, assignments).count
@ -29,9 +75,11 @@ public extension QueryInterfaceRequest {
case let threadRequest as QueryInterfaceRequest<SessionThread>:
return try threadRequest.updateAndFetchAllAndUpdateConfig(db, assignments).count
case let threadRequest as QueryInterfaceRequest<ClosedGroup>:
return try threadRequest.updateAndFetchAllAndUpdateConfig(db, assignments).count
default: return try self.updateAll(db, assignments)
default: return try self.updateAll(db, targetAssignments)
}
}
}
@ -40,7 +88,7 @@ public extension QueryInterfaceRequest where RowDecoder: FetchableRecord & Table
@discardableResult
func updateAndFetchAllAndUpdateConfig(
_ db: Database,
_ assignments: ColumnAssignment...
_ assignments: ConfigColumnAssignment...
) throws -> [RowDecoder] {
return try updateAndFetchAllAndUpdateConfig(db, assignments)
}
@ -48,41 +96,45 @@ public extension QueryInterfaceRequest where RowDecoder: FetchableRecord & Table
@discardableResult
func updateAndFetchAllAndUpdateConfig(
_ db: Database,
_ assignments: [ColumnAssignment]
_ assignments: [ConfigColumnAssignment]
) throws -> [RowDecoder] {
// First perform the actual updates
let updatedData: [RowDecoder] = try self.updateAndFetchAll(db, assignments.map { $0.assignment })
// Then check if any of the changes could affect the config
guard
// FIXME: Remove this once `useSharedUtilForUserConfig` is permanent
Features.useSharedUtilForUserConfig,
SessionUtil.assignmentsRequireConfigUpdate(assignments)
else { return updatedData }
defer {
// If we change one of these types then we may as well automatically enqueue
// a new config sync job once the transaction completes (but only enqueue it
// once per transaction - doing it more than once is pointless)
if
self is QueryInterfaceRequest<Contact> ||
self is QueryInterfaceRequest<Profile> ||
self is QueryInterfaceRequest<SessionThread> ||
self is QueryInterfaceRequest<ClosedGroup>
{
db.afterNextTransactionNestedOnce(dedupeIdentifier: "EnqueueConfigurationSyncJob") { db in
ConfigurationSyncJob.enqueue(db)
}
// If we changed a column that requires a config update then we may as well automatically
// enqueue a new config sync job once the transaction completes (but only enqueue it once
// per transaction - doing it more than once is pointless)
let userPublicKey: String = getUserHexEncodedPublicKey(db)
db.afterNextTransactionNestedOnce(dedupeId: SessionUtil.syncDedupeId(userPublicKey)) { db in
ConfigurationSyncJob.enqueue(db, publicKey: userPublicKey)
}
}
// FIXME: Remove this once `useSharedUtilForUserConfig` is permanent
guard Features.useSharedUtilForUserConfig else {
return try self.updateAndFetchAll(db, assignments)
}
// Update the config dump state where needed
try SessionUtil.updateThreadPrioritiesIfNeeded(db, assignments, updatedData)
switch self {
case is QueryInterfaceRequest<Contact>:
return try SessionUtil.updatingContacts(db, try updateAndFetchAll(db, assignments))
return try SessionUtil.updatingContacts(db, updatedData)
case is QueryInterfaceRequest<Profile>:
return try SessionUtil.updatingProfiles(db, try updateAndFetchAll(db, assignments))
return try SessionUtil.updatingProfiles(db, updatedData)
case is QueryInterfaceRequest<SessionThread>:
return try SessionUtil.updatingThreads(db, try updateAndFetchAll(db, assignments))
return updatedData
default: return try self.updateAndFetchAll(db, assignments)
default: return updatedData
}
}
}

View File

@ -24,8 +24,7 @@ public enum SessionUtil {
public struct OutgoingConfResult {
let message: SharedConfigMessage
let namespace: SnodeAPI.Namespace
let destination: Message.Destination
let oldMessageHashes: [String]?
let obsoleteHashes: [String]
}
// MARK: - Configs
@ -43,6 +42,10 @@ public enum SessionUtil {
// MARK: - Variables
internal static func syncDedupeId(_ publicKey: String) -> String {
return "EnqueueConfigurationSyncJob-\(publicKey)"
}
/// Returns `true` if there is a config which needs to be pushed, but returns `false` if the configs are all up to date or haven't been
/// loaded yet (eg. fresh install)
public static var needsSync: Bool {
@ -150,44 +153,10 @@ public enum SessionUtil {
return conf
}
internal static func saveState(
_ db: Database,
keepingExistingMessageHashes: Bool,
configDump: ConfigDump?
) throws {
guard let configDump: ConfigDump = configDump else { return }
// If we want to keep the existing message hashes then we need
// to fetch them from the db and create a new 'ConfigDump' instance
let targetDump: ConfigDump = try {
guard keepingExistingMessageHashes else { return configDump }
let existingCombinedMessageHashes: String? = try ConfigDump
.filter(
ConfigDump.Columns.variant == configDump.variant &&
ConfigDump.Columns.publicKey == configDump.publicKey
)
.select(.combinedMessageHashes)
.asRequest(of: String.self)
.fetchOne(db)
return ConfigDump(
variant: configDump.variant,
publicKey: configDump.publicKey,
data: configDump.data,
messageHashes: ConfigDump.messageHashes(from: existingCombinedMessageHashes)
)
}()
// Actually save the dump
try targetDump.save(db)
}
internal static func createDump(
conf: UnsafeMutablePointer<config_object>?,
for variant: ConfigDump.Variant,
publicKey: String,
messageHashes: [String]?
publicKey: String
) throws -> ConfigDump? {
guard conf != nil else { throw SessionUtilError.nilConfigObject }
@ -206,103 +175,131 @@ public enum SessionUtil {
return ConfigDump(
variant: variant,
publicKey: publicKey,
data: dumpData,
messageHashes: messageHashes
data: dumpData
)
}
// MARK: - Pushes
public static func pendingChanges(_ db: Database) throws -> [OutgoingConfResult] {
public static func pendingChanges(
_ db: Database,
publicKey: String
) throws -> [OutgoingConfResult] {
guard Identity.userExists(db) else { throw SessionUtilError.userDoesNotExist }
let userPublicKey: String = getUserHexEncodedPublicKey(db)
let existingDumpInfo: Set<DumpInfo> = try ConfigDump
.select(.variant, .publicKey, .combinedMessageHashes)
.asRequest(of: DumpInfo.self)
var existingDumpVariants: Set<ConfigDump.Variant> = try ConfigDump
.select(.variant)
.filter(ConfigDump.Columns.publicKey == publicKey)
.asRequest(of: ConfigDump.Variant.self)
.fetchSet(db)
// Ensure we always check the required user config types for changes even if there is no dump
// data yet (to deal with first launch cases)
return existingDumpInfo
.inserting(
contentsOf: DumpInfo.requiredUserConfigDumpInfo(userPublicKey: userPublicKey)
.filter { requiredInfo -> Bool in
!existingDumpInfo.contains(where: {
$0.variant == requiredInfo.variant &&
$0.publicKey == requiredInfo.publicKey
})
if publicKey == userPublicKey {
ConfigDump.Variant.userVariants.forEach { existingDumpVariants.insert($0) }
}
// Ensure we always check the required user config types for changes even if there is no dump
// data yet (to deal with first launch cases)
return existingDumpVariants
.compactMap { variant -> OutgoingConfResult? in
SessionUtil
.config(for: variant, publicKey: publicKey)
.mutate { conf in
// Check if the config needs to be pushed
guard conf != nil && config_needs_push(conf) else { return nil }
let cPushData: UnsafeMutablePointer<config_push_data> = config_push(conf)
let pushData: Data = Data(
bytes: cPushData.pointee.config,
count: cPushData.pointee.config_len
)
let hashesToRemove: [String] = [String](
pointer: cPushData.pointee.obsolete,
count: cPushData.pointee.obsolete_len,
defaultValue: []
)
let seqNo: Int64 = cPushData.pointee.seqno
cPushData.deallocate()
return OutgoingConfResult(
message: SharedConfigMessage(
kind: variant.configMessageKind,
seqNo: seqNo,
data: pushData
),
namespace: variant.namespace,
obsoleteHashes: hashesToRemove
)
}
)
.compactMap { dumpInfo -> OutgoingConfResult? in
let key: ConfigKey = ConfigKey(variant: dumpInfo.variant, publicKey: dumpInfo.publicKey)
let atomicConf: Atomic<UnsafeMutablePointer<config_object>?> = (
SessionUtil.configStore.wrappedValue[key] ??
Atomic(nil)
)
// Check if the config needs to be pushed
guard
atomicConf.wrappedValue != nil &&
config_needs_push(atomicConf.wrappedValue)
else { return nil }
var toPush: UnsafeMutablePointer<UInt8>? = nil
var toPushLen: Int = 0
let seqNo: Int64 = atomicConf.mutate { config_push($0, &toPush, &toPushLen) }
guard let toPush: UnsafeMutablePointer<UInt8> = toPush else { return nil }
let pushData: Data = Data(bytes: toPush, count: toPushLen)
toPush.deallocate()
return OutgoingConfResult(
message: SharedConfigMessage(
kind: dumpInfo.variant.configMessageKind,
seqNo: seqNo,
data: pushData
),
namespace: dumpInfo.variant.namespace,
destination: (dumpInfo.publicKey == userPublicKey ?
Message.Destination.contact(publicKey: userPublicKey) :
Message.Destination.closedGroup(groupPublicKey: dumpInfo.publicKey)
),
oldMessageHashes: dumpInfo.messageHashes
)
}
}
public static func markAsPushed(
public static func markingAsPushed(
message: SharedConfigMessage,
serverHash: String,
publicKey: String
) -> Bool {
let key: ConfigKey = ConfigKey(variant: message.kind.configDumpVariant, publicKey: publicKey)
let atomicConf: Atomic<UnsafeMutablePointer<config_object>?> = (
SessionUtil.configStore.wrappedValue[key] ??
Atomic(nil)
)
guard atomicConf.wrappedValue != nil else { return false }
// Mark the config as pushed
config_confirm_pushed(atomicConf.wrappedValue, message.seqNo)
// Update the result to indicate whether the config needs to be dumped
return config_needs_dump(atomicConf.wrappedValue)
) -> ConfigDump? {
return SessionUtil
.config(
for: message.kind.configDumpVariant,
publicKey: publicKey
)
.mutate { conf in
guard conf != nil else { return nil }
// Mark the config as pushed
var cHash: [CChar] = serverHash.cArray
config_confirm_pushed(conf, message.seqNo, &cHash)
// Update the result to indicate whether the config needs to be dumped
guard config_needs_dump(conf) else { return nil }
return try? SessionUtil.createDump(
conf: conf,
for: message.kind.configDumpVariant,
publicKey: publicKey
)
}
}
public static func configHashes(for publicKey: String) -> [String] {
return Storage.shared
.read { db in
try ConfigDump
.read { db -> [String] in
guard Identity.userExists(db) else { return [] }
let existingDumpVariants: Set<ConfigDump.Variant> = (try? ConfigDump
.select(.variant)
.filter(ConfigDump.Columns.publicKey == publicKey)
.select(.combinedMessageHashes)
.asRequest(of: String.self)
.fetchAll(db)
.asRequest(of: ConfigDump.Variant.self)
.fetchSet(db))
.defaulting(to: [])
/// Extract all existing hashes for any dumps associated with the given `publicKey`
return existingDumpVariants
.map { variant -> [String] in
guard
let conf = SessionUtil
.config(for: variant, publicKey: publicKey)
.wrappedValue,
let hashList: UnsafeMutablePointer<config_string_list> = config_current_hashes(conf)
else {
return []
}
let result: [String] = [String](
pointer: hashList.pointee.value,
count: hashList.pointee.len,
defaultValue: []
)
hashList.deallocate()
return result
}
.reduce([], +)
}
.defaulting(to: [])
.compactMap { ConfigDump.messageHashes(from: $0) }
.flatMap { $0 }
}
// MARK: - Receiving
@ -319,143 +316,87 @@ public enum SessionUtil {
let groupedMessages: [ConfigDump.Variant: [SharedConfigMessage]] = messages
.grouped(by: \.kind.configDumpVariant)
// Merge the config messages into the current state
let mergeResults: [ConfigDump.Variant: IncomingConfResult] = groupedMessages
let needsPush: Bool = try groupedMessages
.sorted { lhs, rhs in lhs.key.processingOrder < rhs.key.processingOrder }
.reduce(into: [:]) { result, next in
let key: ConfigKey = ConfigKey(variant: next.key, publicKey: publicKey)
let atomicConf: Atomic<UnsafeMutablePointer<config_object>?> = (
SessionUtil.configStore.wrappedValue[key] ??
Atomic(nil)
)
var needsPush: Bool = false
var needsDump: Bool = false
let messageHashes: [String] = next.value.compactMap { $0.serverHash }
.reduce(false) { prevNeedsPush, next -> Bool in
let messageSentTimestamp: TimeInterval = TimeInterval(
(next.value.compactMap { $0.sentTimestamp }.max() ?? 0) / 1000
)
let needsPush: Bool = try SessionUtil
.config(for: next.key, publicKey: publicKey)
.mutate { conf in
// Merge the messages
var mergeHashes: [UnsafePointer<CChar>?] = next.value
.map { message in
(message.serverHash ?? "").cArray
.nullTerminated()
}
.unsafeCopy()
var mergeData: [UnsafePointer<UInt8>?] = next.value
.map { message -> [UInt8] in message.data.bytes }
.unsafeCopy()
var mergeSize: [Int] = next.value.map { $0.data.count }
config_merge(conf, &mergeHashes, &mergeData, &mergeSize, next.value.count)
mergeHashes.forEach { $0?.deallocate() }
mergeData.forEach { $0?.deallocate() }
// Apply the updated states to the database
switch next.key {
case .userProfile:
try SessionUtil.handleUserProfileUpdate(
db,
in: conf,
mergeNeedsDump: config_needs_dump(conf),
latestConfigUpdateSentTimestamp: messageSentTimestamp
)
case .contacts:
try SessionUtil.handleContactsUpdate(
db,
in: conf,
mergeNeedsDump: config_needs_dump(conf)
)
case .convoInfoVolatile:
try SessionUtil.handleConvoInfoVolatileUpdate(
db,
in: conf,
mergeNeedsDump: config_needs_dump(conf)
)
case .userGroups:
try SessionUtil.handleGroupsUpdate(
db,
in: conf,
mergeNeedsDump: config_needs_dump(conf),
latestConfigUpdateSentTimestamp: messageSentTimestamp
)
}
// Need to check if the config needs to be dumped (this might have changed
// after handling the merge changes)
guard config_needs_dump(conf) else { return config_needs_push(conf) }
try SessionUtil.createDump(
conf: conf,
for: next.key,
publicKey: publicKey
)?.save(db)
// Block the config while we are merging
atomicConf.mutate { conf in
var mergeData: [UnsafePointer<UInt8>?] = next.value
.map { message -> [UInt8] in message.data.bytes }
.unsafeCopy()
var mergeSize: [Int] = next.value.map { $0.data.count }
config_merge(conf, &mergeData, &mergeSize, next.value.count)
mergeData.forEach { $0?.deallocate() }
// Get the state of this variant
needsPush = config_needs_push(conf)
needsDump = config_needs_dump(conf)
}
return config_needs_push(conf)
}
// Return the current state of the config
result[next.key] = IncomingConfResult(
needsPush: needsPush,
needsDump: needsDump,
messageHashes: messageHashes,
latestSentTimestamp: messageSentTimestamp
)
// Update the 'needsPush' state as needed
return (prevNeedsPush || needsPush)
}
// Process the results from the merging
let finalResults: [ConfResult] = try mergeResults.map { variant, mergeResult in
let key: ConfigKey = ConfigKey(variant: variant, publicKey: publicKey)
let atomicConf: Atomic<UnsafeMutablePointer<config_object>?> = (
SessionUtil.configStore.wrappedValue[key] ??
Atomic(nil)
)
// Apply the updated states to the database
let postHandlingResult: ConfResult = try {
switch variant {
case .userProfile:
return try SessionUtil.handleUserProfileUpdate(
db,
in: atomicConf,
mergeResult: mergeResult.result,
latestConfigUpdateSentTimestamp: mergeResult.latestSentTimestamp
)
case .contacts:
return try SessionUtil.handleContactsUpdate(
db,
in: atomicConf,
mergeResult: mergeResult.result
)
case .convoInfoVolatile:
return try SessionUtil.handleConvoInfoVolatileUpdate(
db,
in: atomicConf,
mergeResult: mergeResult.result
)
case .userGroups:
return try SessionUtil.handleGroupsUpdate(
db,
in: atomicConf,
mergeResult: mergeResult.result
)
}
}()
// We need to get the existing message hashes and combine them with the latest from the
// service node to ensure the next push will properly clean up old messages
let oldMessageHashes: Set<String> = try ConfigDump
.filter(
ConfigDump.Columns.variant == variant &&
ConfigDump.Columns.publicKey == publicKey
)
.select(.combinedMessageHashes)
.asRequest(of: String.self)
.fetchOne(db)
.map { ConfigDump.messageHashes(from: $0) }
.defaulting(to: [])
.asSet()
let allMessageHashes: [String] = Array(oldMessageHashes
.inserting(contentsOf: mergeResult.messageHashes.asSet()))
let messageHashesChanged: Bool = (oldMessageHashes != mergeResult.messageHashes.asSet())
// Now that the changes are applied, update the cached dumps
switch (postHandlingResult.needsDump, messageHashesChanged) {
case (true, _):
// The config data had changes so regenerate the dump and save it
try atomicConf
.mutate { conf -> ConfigDump? in
try SessionUtil.createDump(
conf: conf,
for: variant,
publicKey: publicKey,
messageHashes: allMessageHashes
)
}?
.save(db)
case (false, true):
// The config data didn't change but there were different messages on the service node
// so just update the message hashes so the next sync can properly remove any old ones
try ConfigDump
.filter(
ConfigDump.Columns.variant == variant &&
ConfigDump.Columns.publicKey == publicKey
)
.updateAll(
db,
ConfigDump.Columns.combinedMessageHashes
.set(to: ConfigDump.combinedMessageHashes(from: allMessageHashes))
)
default: break
}
return postHandlingResult
}
// Now that the local state has been updated, schedule a config sync if needed (this will
// push any pending updates and properly update the state)
guard needsPush else { return }
// Now that the local state has been updated, trigger a config sync (this will push any
// pending updates and properly update the state)
if finalResults.contains(where: { $0.needsPush }) {
ConfigurationSyncJob.enqueue(db)
db.afterNextTransactionNestedOnce(dedupeId: SessionUtil.syncDedupeId(publicKey)) { db in
ConfigurationSyncJob.enqueue(db, publicKey: publicKey)
}
}
}
@ -467,20 +408,41 @@ fileprivate extension SessionUtil {
let variant: ConfigDump.Variant
let publicKey: String
}
}
// MARK: - Convenience
public extension SessionUtil {
static func parseCommunity(url: String) -> (room: String, server: String, publicKey: String)? {
var cFullUrl: [CChar] = url.cArray
var cBaseUrl: [CChar] = [CChar](repeating: 0, count: COMMUNITY_BASE_URL_MAX_LENGTH)
var cRoom: [CChar] = [CChar](repeating: 0, count: COMMUNITY_ROOM_MAX_LENGTH)
var cPubkey: [UInt8] = [UInt8](repeating: 0, count: OpenGroup.pubkeyByteLength)
guard
community_parse_full_url(&cFullUrl, &cBaseUrl, &cRoom, &cPubkey) &&
!String(cString: cRoom).isEmpty &&
!String(cString: cBaseUrl).isEmpty &&
cPubkey.contains(where: { $0 != 0 })
else { return nil }
// Note: Need to store them in variables instead of returning directly to ensure they
// don't get freed from memory early (was seeing this happen intermittently during
// unit tests...)
let room: String = String(cString: cRoom)
let baseUrl: String = String(cString: cBaseUrl)
let pubkeyHex: String = Data(cPubkey).toHexString()
return (room, baseUrl, pubkeyHex)
}
struct DumpInfo: FetchableRecord, Decodable, Hashable {
let variant: ConfigDump.Variant
let publicKey: String
private let combinedMessageHashes: String?
static func communityUrlFor(server: String, roomToken: String, publicKey: String) -> String {
var cBaseUrl: [CChar] = server.cArray
var cRoom: [CChar] = roomToken.cArray
var cPubkey: [UInt8] = Data(hex: publicKey).cArray
var cFullUrl: [CChar] = [CChar](repeating: 0, count: COMMUNITY_FULL_URL_MAX_LENGTH)
community_make_full_url(&cBaseUrl, &cRoom, &cPubkey, &cFullUrl)
var messageHashes: [String]? { ConfigDump.messageHashes(from: combinedMessageHashes) }
// MARK: - Convenience
static func requiredUserConfigDumpInfo(userPublicKey: String) -> Set<DumpInfo> {
return ConfigDump.Variant.userVariants
.map { DumpInfo(variant: $0, publicKey: userPublicKey, combinedMessageHashes: nil) }
.asSet()
}
return String(cString: cFullUrl)
}
}

View File

@ -17,27 +17,41 @@ public extension String {
self = result
}
init<T>(
libSessionVal: T,
fixedLength: Int? = .none
) {
guard let fixedLength: Int = fixedLength else {
// Note: The `String(cString:)` function requires that the value is null-terminated
// so add a null-termination character if needed
self = String(
cString: withUnsafeBytes(of: libSessionVal) { [UInt8]($0) }
.nullTerminated()
)
return
}
guard
let fixedLengthData: Data = Data(
libSessionVal: libSessionVal,
count: fixedLength,
nullIfEmpty: true
),
let result: String = String(data: fixedLengthData, encoding: .utf8)
else {
self = ""
return
}
self = result
}
init?<T>(
libSessionVal: T,
fixedLength: Int? = .none,
nullIfEmpty: Bool = false
nullIfEmpty: Bool
) {
let result: String = {
guard let fixedLength: Int = fixedLength else {
// Note: The `String(cString:)` function requires that the value is null-terminated
// so add a null-termination character if needed
return String(
cString: withUnsafeBytes(of: libSessionVal) { [UInt8]($0) }
.nullTerminated()
)
}
return String(
data: Data(libSessionVal: libSessionVal, count: fixedLength),
encoding: .utf8
)
.defaulting(to: "")
}()
let result = String(libSessionVal: libSessionVal, fixedLength: fixedLength)
guard !nullIfEmpty || !result.isEmpty else { return nil }
@ -46,13 +60,15 @@ public extension String {
func toLibSession<T>() -> T {
let targetSize: Int = MemoryLayout<T>.stride
let result: UnsafeMutableRawPointer = UnsafeMutableRawPointer.allocate(
byteCount: targetSize,
alignment: MemoryLayout<T>.alignment
var dataMatchingDestinationSize: [CChar] = [CChar](repeating: 0, count: targetSize)
dataMatchingDestinationSize.replaceSubrange(
0..<Swift.min(targetSize, self.utf8CString.count),
with: self.utf8CString
)
self.utf8CString.withUnsafeBytes { result.copyMemory(from: $0.baseAddress!, byteCount: $0.count) }
return result.withMemoryRebound(to: T.self, capacity: targetSize) { $0.pointee }
return dataMatchingDestinationSize.withUnsafeBytes { ptr in
ptr.baseAddress!.assumingMemoryBound(to: T.self).pointee
}
}
}
@ -71,21 +87,33 @@ public extension Data {
var cArray: [UInt8] { [UInt8](self) }
init<T>(libSessionVal: T, count: Int) {
self = Data(
bytes: Swift.withUnsafeBytes(of: libSessionVal) { [UInt8]($0) },
count: count
)
let result: Data = Swift.withUnsafePointer(to: libSessionVal) {
Data(bytes: $0, count: count)
}
self = result
}
init?<T>(libSessionVal: T, count: Int, nullIfEmpty: Bool) {
let result: Data = Data(libSessionVal: libSessionVal, count: count)
// If all of the values are 0 then return the data as null
guard !nullIfEmpty || result.contains(where: { $0 != 0 }) else { return nil }
self = result
}
func toLibSession<T>() -> T {
let targetSize: Int = MemoryLayout<T>.stride
let result: UnsafeMutableRawPointer = UnsafeMutableRawPointer.allocate(
byteCount: targetSize,
alignment: MemoryLayout<T>.alignment
var dataMatchingDestinationSize: Data = Data(count: targetSize)
dataMatchingDestinationSize.replaceSubrange(
0..<Swift.min(targetSize, self.count),
with: self
)
self.withUnsafeBytes { result.copyMemory(from: $0.baseAddress!, byteCount: $0.count) }
return result.withMemoryRebound(to: T.self, capacity: targetSize) { $0.pointee }
return dataMatchingDestinationSize.withUnsafeBytes { ptr in
ptr.baseAddress!.assumingMemoryBound(to: T.self).pointee
}
}
}
@ -100,6 +128,38 @@ public extension Optional<Data> {
// MARK: - Array
public extension Array where Element == String {
init?(
pointer: UnsafeMutablePointer<UnsafeMutablePointer<CChar>?>?,
count: Int?
) {
guard
let pointee: UnsafeMutablePointer<CChar> = pointer?.pointee,
let count: Int = count
else { return nil }
self = (0..<count)
.reduce(into: []) { result, index in
/// We need to calculate the start position of each of the hashes in memory which will
/// be at the end of the previous hash plus one (due to the null termination character
/// which isn't included in Swift strings so isn't included in `count`)
let prevLength: Int = (result.isEmpty ? 0 :
result.map { ($0.count + 1) }.reduce(0, +)
)
result.append(String(cString: pointee.advanced(by: prevLength)))
}
}
init(
pointer: UnsafeMutablePointer<UnsafeMutablePointer<CChar>?>?,
count: Int?,
defaultValue: [String]
) {
self = ([String](pointer: pointer, count: count) ?? defaultValue)
}
}
public extension Array where Element == CChar {
func nullTerminated() -> [Element] {
guard self.last != CChar(0) else { return self }

View File

@ -4,6 +4,7 @@ module SessionUtil {
header "session/export.h"
header "session/config.h"
header "session/config/error.h"
header "session/config/community.h"
header "session/config/expiring.h"
header "session/config/user_groups.h"
header "session/config/convo_info_volatile.h"

View File

@ -89,7 +89,11 @@ class ConfigMessage {
bool verified_signature_ = false;
bool merged_ = false;
// This will be set during construction from configs based on the merge result:
// -1 means we had to merge one or more configs together into a new merged config
// >= 0 indicates the index of the config we used if we did not merge (i.e. there was only one
// config, or there were multiple but one of them referenced all the others).
int unmerged_ = -1;
public:
constexpr static int DEFAULT_DIFF_LAGS = 5;
@ -147,19 +151,20 @@ class ConfigMessage {
/// set, thus allowing unsigned messages (though messages with an invalid signature are still
/// not allowed). This option is ignored when verifier is not set.
///
/// error_callback - if set then any config message parsing error will be passed to this
/// function for handling: the callback typically warns and, if the overall construction should
/// abort, rethrows the error. If this function is omitted then the default skips (without
/// failing) individual parse errors and only aborts construction if *all* messages fail to
/// parse. A simple handler such as `[](const auto& e) { throw e; }` can be used to make any
/// parse error of any message fatal.
/// error_handler - if set then any config message parsing error will be passed to this function
/// for handling with the index of `configs` that failed and the error exception: the callback
/// typically warns and, if the overall construction should abort, rethrows the error. If this
/// function is omitted then the default skips (without failing) individual parse errors and
/// only aborts construction if *all* messages fail to parse. A simple handler such as
/// `[](size_t, const auto& e) { throw e; }` can be used to make any parse error of any message
/// fatal.
explicit ConfigMessage(
const std::vector<ustring_view>& configs,
verify_callable verifier = nullptr,
sign_callable signer = nullptr,
int lag = DEFAULT_DIFF_LAGS,
bool signature_optional = false,
std::function<void(const config_error&)> error_handler = nullptr);
std::function<void(size_t, const config_error&)> error_handler = nullptr);
/// Returns a read-only reference to the contained data. (To get a mutable config object use
/// MutableConfigMessage).
@ -198,7 +203,13 @@ class ConfigMessage {
/// After loading multiple config files this flag indicates whether or not we had to produce a
/// new, merged configuration message (true) or did not need to merge (false). (For config
/// messages that were not loaded from serialized data this is always true).
bool merged() const { return merged_; }
bool merged() const { return unmerged_ == -1; }
/// After loading multiple config files this field contains the index of the single config we
/// used if we didn't need to merge (that is: there was only one config or one config that
/// superceded all the others). If we had to merge (or this wasn't loaded from serialized
/// data), this will return -1.
int unmerged_index() const { return unmerged_; }
/// Returns true if this message contained a valid, verified signature when it was parsed.
/// Returns false otherwise (e.g. not loaded from verification at all; loaded without a
@ -273,7 +284,7 @@ class MutableConfigMessage : public ConfigMessage {
sign_callable signer = nullptr,
int lag = DEFAULT_DIFF_LAGS,
bool signature_optional = false,
std::function<void(const config_error&)> error_handler = nullptr);
std::function<void(size_t, const config_error&)> error_handler = nullptr);
/// Wrapper around the above that takes a single string view to load a single message, doesn't
/// take an error handler and instead always throws on parse errors (the above also throws for

View File

@ -55,31 +55,50 @@ int16_t config_storage_namespace(const config_object* conf);
/// config object may be unchanged, complete replaced, or updated and needing a push, depending on
/// the messages that are merged; the caller should check config_needs_push().
///
/// `configs` is an array of pointers to the start of the strings; `lengths` is an array of string
/// lengths; `count` is the length of those two arrays.
/// `msg_hashes` is an array of null-terminated C strings containing the hashes of the configs being
/// provided.
/// `configs` is an array of pointers to the start of the (binary) data.
/// `lengths` is an array of lengths of the binary data
/// `count` is the length of all three arrays.
int config_merge(
config_object* conf, const unsigned char** configs, const size_t* lengths, size_t count);
config_object* conf,
const char** msg_hashes,
const unsigned char** configs,
const size_t* lengths,
size_t count);
/// Returns true if this config object contains updated data that has not yet been confirmed stored
/// on the server.
bool config_needs_push(const config_object* conf);
/// Obtains the configuration data that needs to be pushed to the server. A new buffer of the
/// appropriate size is malloc'd and set to `out` The output is written to a new malloc'ed buffer of
/// the appropriate size; the buffer and the output length are set in the `out` and `outlen`
/// parameters. Note that this is binary data, *not* a null-terminated C string.
/// Returned struct of config push data.
typedef struct config_push_data {
// The config seqno (to be provided later in `config_confirm_pushed`).
seqno_t seqno;
// The config message to push (binary data, not null-terminated).
unsigned char* config;
// The length of `config`
size_t config_len;
// Array of obsolete message hashes to delete; each element is a null-terminated C string
char** obsolete;
// length of `obsolete`
size_t obsolete_len;
} config_push_data;
/// Obtains the configuration data that needs to be pushed to the server.
///
/// Generally this call should be guarded by a call to `config_needs_push`, however it can be used
/// to re-obtain the current serialized config even if no push is needed (for example, if the client
/// wants to re-submit it after a network error).
///
/// NB: The returned buffer belongs to the caller: that is, the caller *MUST* free() it when done
/// with it.
seqno_t config_push(config_object* conf, unsigned char** out, size_t* outlen);
/// NB: The returned pointer belongs to the caller: that is, the caller *MUST* free() it when
/// done with it.
config_push_data* config_push(config_object* conf);
/// Reports that data obtained from `config_push` has been successfully stored on the server. The
/// seqno value is the one returned by the config_push call that yielded the config data.
void config_confirm_pushed(config_object* conf, seqno_t seqno);
/// Reports that data obtained from `config_push` has been successfully stored on the server with
/// message hash `msg_hash`. The seqno value is the one returned by the config_push call that
/// yielded the config data.
void config_confirm_pushed(config_object* conf, seqno_t seqno, const char* msg_hash);
/// Returns a binary dump of the current state of the config object. This dump can be used to
/// resurrect the object at a later point (e.g. after a restart). Allocates a new buffer and sets
@ -96,6 +115,20 @@ void config_dump(config_object* conf, unsigned char** out, size_t* outlen);
/// and saving the `config_dump()` data again.
bool config_needs_dump(const config_object* conf);
/// Struct containing a list of C strings. Typically where this is returned by this API it must be
/// freed (via `free()`) when done with it.
typedef struct config_string_list {
char** value; // array of null-terminated C strings
size_t len; // length of `value`
} config_string_list;
/// Obtains the current active hashes. Note that this will be empty if the current hash is unknown
/// or not yet determined (for example, because the current state is dirty or because the most
/// recent push is still pending and we don't know the hash yet).
///
/// The returned pointer belongs to the caller and must be freed via `free()` when done with it.
config_string_list* config_current_hashes(const config_object* conf);
/// Config key management; see the corresponding method docs in base.hpp. All `key` arguments here
/// are 32-byte binary buffers (and since fixed-length, there is no keylen argument).
void config_add_key(config_object* conf, const unsigned char* key);

View File

@ -4,6 +4,7 @@
#include <memory>
#include <session/config.hpp>
#include <type_traits>
#include <unordered_set>
#include <variant>
#include <vector>
@ -64,6 +65,15 @@ class ConfigBase {
size_t _keys_size = 0;
size_t _keys_capacity = 0;
// Contains the current active message hash, as fed into us in `confirm_pushed()`. Empty if we
// don't know it yet. When we dirty the config this value gets moved into `old_hashes_` to be
// removed by the next push.
std::string _curr_hash;
// Contains obsolete known message hashes that are obsoleted by the most recent merge or push;
// these are returned (and cleared) when `push` is called.
std::unordered_set<std::string> _old_hashes;
protected:
// Constructs a base config by loading the data from a dump as produced by `dump()`. If the
// dump is nullopt then an empty base config is constructed with no config settings and seqno
@ -74,11 +84,10 @@ class ConfigBase {
// calling set_state, which sets to to true implicitly).
bool _needs_dump = false;
// Sets the current state; this also sets _needs_dump to true.
void set_state(ConfigState s) {
_state = s;
_needs_dump = true;
}
// Sets the current state; this also sets _needs_dump to true. If transitioning to a dirty
// state and we know our current message hash, that hash gets added to `old_hashes_` to be
// deleted at the next push.
void set_state(ConfigState s);
// Invokes the `logger` callback if set, does nothing if there is no logger.
void log(LogLevel lvl, std::string msg) {
@ -445,18 +454,21 @@ class ConfigBase {
// This takes all of the messages pulled down from the server and does whatever is necessary to
// merge (or replace) the current values.
//
// Values are pairs of the message hash (as provided by the server) and the raw message body.
//
// After this call the caller should check `needs_push()` to see if the data on hand was updated
// and needs to be pushed to the server again.
// and needs to be pushed to the server again (for example, because the data contained conflicts
// that required another update to resolve).
//
// Returns the number of the given config messages that were successfully parsed.
//
// Will throw on serious error (i.e. if neither the current nor any of the given configs are
// parseable). This should not happen (the current config, at least, should always be
// re-parseable).
virtual int merge(const std::vector<ustring_view>& configs);
virtual int merge(const std::vector<std::pair<std::string, ustring_view>>& configs);
// Same as above but takes a vector of ustring's as sometimes that is more convenient.
int merge(const std::vector<ustring>& configs);
// Same as above but takes the values as ustring's as sometimes that is more convenient.
int merge(const std::vector<std::pair<std::string, ustring>>& configs);
// Returns true if we are currently dirty (i.e. have made changes that haven't been serialized
// yet).
@ -466,31 +478,51 @@ class ConfigBase {
// unmodified).
bool is_clean() const { return _state == ConfigState::Clean; }
// The current config hash(es); this can be empty if the current hash is unknown or the current
// state is not clean (i.e. a push is needed or pending).
std::vector<std::string> current_hashes() const;
// Returns true if this object contains updated data that has not yet been confirmed stored on
// the server. This will be true whenever `is_clean()` is false: that is, if we are currently
// "dirty" (i.e. have changes that haven't been pushed) or are still awaiting confirmation of
// storage of the most recent serialized push data.
virtual bool needs_push() const;
// Returns the data messages to push to the server along with the seqno value of the data. If
// the config is currently dirty (i.e. has previously unsent modifications) then this marks it
// as awaiting-confirmation instead of dirty so that any future change immediately increments
// the seqno.
// Returns a tuple of three elements:
// - the seqno value of the data
// - the data message to push to the server
// - a list of known message hashes that are obsoleted by this push.
//
// Additionally, if the internal state is currently dirty (i.e. there are unpushed changes), the
// internal state will be marked as awaiting-confirmation. Any further data changes made after
// this call will re-dirty the data (incrementing seqno and requiring another push).
//
// The client is expected to send a sequence request to the server that stores the message and
// deletes the hashes (if any). It is strongly recommended to use a sequence rather than a
// batch so that the deletions won't happen if the store fails for some reason.
//
// Upon successful completion of the store+deletion requests the client should call
// `confirm_pushed` with the seqno value to confirm that the message has been stored.
//
// Subclasses that need to perform pre-push tasks (such as pruning stale data) can override this
// to prune and then call the base method to perform the actual push generation.
virtual std::pair<ustring, seqno_t> push();
virtual std::tuple<seqno_t, ustring, std::vector<std::string>> push();
// Should be called after the push is confirmed stored on the storage server swarm to let the
// object know the data is stored. (Once this is called `needs_push` will start returning false
// until something changes). Takes the seqno that was pushed so that the object can ensure that
// the latest version was pushed (i.e. in case there have been other changes since the `push()`
// call that returned this seqno).
// object know the config message has been stored and, ideally, that the obsolete messages
// returned by `push()` are deleted. Once this is called `needs_push` will start returning
// false until something changes. Takes the seqno that was pushed so that the object can ensure
// that the latest version was pushed (i.e. in case there have been other changes since the
// `push()` call that returned this seqno).
//
// Ideally the caller should have both stored the returned message and deleted the given
// messages. The deletion step isn't critical (it is just cleanup) and callers should call this
// as long as the store succeeded even if there were errors in the deletions.
//
// It is safe to call this multiple times with the same seqno value, and with out-of-order
// seqnos (e.g. calling with seqno 122 after having called with 123; the duplicates and earlier
// ones will just be ignored).
virtual void confirm_pushed(seqno_t seqno);
virtual void confirm_pushed(seqno_t seqno, std::string msg_hash);
// Returns a dump of the current state for storage in the database; this value would get passed
// into the constructor to reconstitute the object (including the push/not pushed status). This

View File

@ -0,0 +1,44 @@
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
#include <stdbool.h>
#include <stddef.h>
// Maximum string length of a community base URL
extern const size_t COMMUNITY_BASE_URL_MAX_LENGTH;
// Maximum string length of a community room token
extern const size_t COMMUNITY_ROOM_MAX_LENGTH;
// Maximum string length of a full URL as produced by the community_make_full_url() function.
// Unlike the above constants, this *includes* space for a NULL string terminator.
extern const size_t COMMUNITY_FULL_URL_MAX_LENGTH;
// Parses a community URL. Writes the canonical base url, room token, and pubkey bytes into the
// given pointers. base_url must be at least BASE_URL_MAX_LENGTH+1; room must be at least
// ROOM_MAX_LENGTH+1; and pubkey must be (at least) 32 bytes.
//
// Returns true if the url was parsed successfully, false if parsing failed (i.e. an invalid URL).
bool community_parse_full_url(
const char* full_url, char* base_url, char* room_token, unsigned char* pubkey);
// Similar to the above, but allows a URL to omit the pubkey. If no pubkey is found, `pubkey` is
// left unchanged and `has_pubkey` is set to false; otherwise `pubkey` is written and `has_pubkey`
// is set to true. `pubkey` may be set to NULL, in which case it is never written. `has_pubkey`
// may be NULL in which case it is not set (typically both pubkey arguments would be null for cases
// where you don't care at all about the pubkey).
bool community_parse_partial_url(
const char* full_url, char* base_url, char* room_token, unsigned char* pubkey, bool* has_pubkey);
// Produces a standard full URL from a given base_url (c string), room token (c string), and pubkey
// (fixed-length 32 byte buffer). The full URL is written to `full_url`, which must be at least
// COMMUNITY_FULL_URL_MAX_LENGTH in size.
void community_make_full_url(
const char* base_url, const char* room, const unsigned char* pubkey, char* full_url);
#ifdef __cplusplus
}
#endif

View File

@ -16,7 +16,7 @@ namespace session::config {
struct community {
// 267 = len('https://') + 253 (max valid DNS name length) + len(':XXXXX')
static constexpr size_t URL_MAX_LENGTH = 267;
static constexpr size_t BASE_URL_MAX_LENGTH = 267;
static constexpr size_t ROOM_MAX_LENGTH = 64;
community() = default;
@ -86,6 +86,16 @@ struct community {
std::string pubkey_b64() const; // Accesses the server pubkey as unpadded base64 (43 from
// alphanumeric, '+', and '/').
// Constructs and returns the full URL for this room. See below.
std::string full_url() const;
// Constructs and returns the full URL for a given base, room, and pubkey. Currently this
// returns it in a Session-compatibility form (https://server.com/RoomName?public_key=....), but
// future versions are expected to change to use (https://server.com/r/RoomName?public_key=...),
// which this library also accepts.
static std::string full_url(
std::string_view base_url, std::string_view room, ustring_view pubkey);
// Takes a base URL as input and returns it in canonical form. This involves doing things
// like lower casing it and removing redundant ports (e.g. :80 when using http://). Throws
// std::invalid_argument if given an invalid base URL.
@ -115,6 +125,11 @@ struct community {
// Throw std::invalid_argument if anything in the URL is unparseable or invalid.
static std::tuple<std::string, std::string, ustring> parse_full_url(std::string_view full_url);
// Takes a full or partial room URL (partial here meaning missing the ?public_key=...) and
// splits it up into canonical url, room, and (if present) pubkey.
static std::tuple<std::string, std::string, std::optional<ustring>> parse_partial_url(
std::string_view url);
protected:
// The canonical base url and room (i.e. lower-cased, URL cleaned up):
std::string base_url_, room_;

View File

@ -28,7 +28,7 @@ typedef struct contacts_contact {
int priority;
CONVO_EXPIRATION_MODE exp_mode;
int exp_minutes;
int exp_seconds;
} contacts_contact;

View File

@ -38,7 +38,7 @@ namespace session::config {
/// e - Disappearing messages expiration type. Omitted if disappearing messages are not enabled
/// for the conversation with this contact; 1 for delete-after-send, and 2 for
/// delete-after-read.
/// E - Disappearing message timer, in minutes. Omitted when `e` is omitted.
/// E - Disappearing message timer, in seconds. Omitted when `e` is omitted.
/// Struct containing contact info.
struct contact_info {
@ -56,7 +56,7 @@ struct contact_info {
int priority = 0; // If >0 then this message is pinned; higher values mean higher priority
// (i.e. pinned earlier in the pinned list).
expiration_mode exp_mode = expiration_mode::none; // The expiry time; none if not expiring.
std::chrono::minutes exp_timer{0}; // The expiration timer (in minutes)
std::chrono::seconds exp_timer{0}; // The expiration timer (in seconds)
explicit contact_info(std::string sid);
@ -132,7 +132,7 @@ class Contacts : public ConfigBase {
void set_expiry(
std::string_view session_id,
expiration_mode exp_mode,
std::chrono::minutes expiration_timer = 0min);
std::chrono::seconds expiration_timer = 0min);
/// Removes a contact, if present. Returns true if it was found and removed, false otherwise.
/// Note that this removes all fields related to a contact, even fields we do not know about.

View File

@ -136,7 +136,7 @@ class ConvoInfoVolatile : public ConfigBase {
static constexpr auto PRUNE_HIGH = 45 * 24h;
/// Overrides push() to prune stale last-read values before we do the push.
std::pair<ustring, seqno_t> push() override;
std::tuple<seqno_t, ustring, std::vector<std::string>> push() override;
/// Looks up and returns a contact by session ID (hex). Returns nullopt if the session ID was
/// not found, otherwise returns a filled out `convo::one_to_one`.
@ -148,6 +148,10 @@ class ConvoInfoVolatile : public ConfigBase {
std::optional<convo::community> get_community(
std::string_view base_url, std::string_view room) const;
/// Shortcut for calling community::parse_partial_url then calling the above with the base url
/// and room. The URL is not required to contain the pubkey (if present it will be ignored).
std::optional<convo::community> get_community(std::string_view partial_url) const;
/// Looks up and returns a legacy group conversation by ID. The ID looks like a hex Session ID,
/// but isn't really a Session ID. Returns nullopt if there is no record of the group
/// conversation.
@ -172,6 +176,9 @@ class ConvoInfoVolatile : public ConfigBase {
convo::community get_or_construct_community(
std::string_view base_url, std::string_view room, ustring_view pubkey) const;
// Shortcut for calling community::parse_full_url then calling the above
convo::community get_or_construct_community(std::string_view full_url) const;
/// Inserts or replaces existing conversation info. For example, to update a 1-to-1
/// conversation last read time you would do:
///

View File

@ -10,12 +10,9 @@ extern "C" {
// Maximum length of a group name, in bytes
extern const size_t GROUP_NAME_MAX_LENGTH;
// Maximum length of a community full URL
extern const size_t COMMUNITY_URL_MAX_LENGTH;
// Maximum length of a community room token
extern const size_t COMMUNITY_ROOM_MAX_LENGTH;
/// Struct holding legacy group info; this struct owns allocated memory and *must* be freed via
/// either `ugroups_legacy_group_free()` or `user_groups_set_free_legacy_group()` when finished with
/// it.
typedef struct ugroups_legacy_group_info {
char session_id[67]; // in hex; 66 hex chars + null terminator.
@ -32,6 +29,10 @@ typedef struct ugroups_legacy_group_info {
bool hidden; // true if hidden from the convo list
int priority; // pinned message priority; 0 = unpinned, larger means pinned higher (i.e. higher
// priority conversations come first).
// For members use the ugroups_legacy_group_members and associated calls.
void* _internal; // Internal storage, do not touch.
} ugroups_legacy_group_info;
typedef struct ugroups_community_info {
@ -82,35 +83,99 @@ bool user_groups_get_or_construct_community(
const char* room,
unsigned const char* pubkey) __attribute__((warn_unused_result));
/// Fills `group` with the conversation info given a legacy group ID (specified as a null-terminated
/// hex string), if the conversation exists, and returns true. If the conversation does not exist
/// then `group` is left unchanged and false is returned.
bool user_groups_get_legacy_group(
const config_object* conf, ugroups_legacy_group_info* group, const char* id)
/// Returns a ugroups_legacy_group_info pointer containing the conversation info for a given legacy
/// group ID (specified as a null-terminated hex string), if the conversation exists. If the
/// conversation does not exist, returns NULL.
///
/// The returned pointer *must* be freed either by calling `ugroups_legacy_group_free()` when done
/// with it, or by passing it to `user_groups_set_free_legacy_group()`.
ugroups_legacy_group_info* user_groups_get_legacy_group(const config_object* conf, const char* id)
__attribute__((warn_unused_result));
/// Same as the above except that when the conversation does not exist, this sets all the group
/// fields to defaults and loads it with the given id.
///
/// Returns true as long as it is given a valid legacy group group id (i.e. same format as a session
/// id). A false return is considered an error, and means the id was not a valid session id.
/// Returns a ugroups_legacy_group_info as long as it is given a valid legacy group id (i.e. same
/// format as a session id); it will return NULL only if the given id is invalid (and so the caller
/// needs to either pre-validate the id, or post-validate the return value).
///
/// The returned pointer *must* be freed either by calling `ugroups_legacy_group_free()` when done
/// with it, or by passing it to `user_groups_set_free_legacy_group()`.
///
/// This is the method that should usually be used to create or update a conversation, followed by
/// setting fields in the group, and then giving it to user_groups_set().
bool user_groups_get_or_construct_legacy_group(
const config_object* conf, ugroups_legacy_group_info* group, const char* id)
__attribute__((warn_unused_result));
ugroups_legacy_group_info* user_groups_get_or_construct_legacy_group(
const config_object* conf, const char* id) __attribute__((warn_unused_result));
/// Adds or updates a conversation from the given group info
/// Properly frees memory associated with a ugroups_legacy_group_info pointer (as returned by
/// get_legacy_group/get_or_construct_legacy_group).
void ugroups_legacy_group_free(ugroups_legacy_group_info* group);
/// Adds or updates a community conversation from the given group info
void user_groups_set_community(config_object* conf, const ugroups_community_info* group);
/// Adds or updates a legacy group conversation from the into. This version of the method should
/// only be used when you explicitly want the `group` to remain valid; if the set is the last thing
/// you need to do with it (which is common) it is more efficient to call the freeing version,
/// below.
void user_groups_set_legacy_group(config_object* conf, const ugroups_legacy_group_info* group);
/// Same as above, except that this also frees the pointer for you, which is commonly what is wanted
/// when updating fields. This is equivalent to, but more efficient than, setting and then freeing.
void user_groups_set_free_legacy_group(config_object* conf, ugroups_legacy_group_info* group);
/// Erases a conversation from the conversation list. Returns true if the conversation was found
/// and removed, false if the conversation was not present. You must not call this during
/// iteration; see details below.
bool user_groups_erase_community(config_object* conf, const char* base_url, const char* room);
bool user_groups_erase_legacy_group(config_object* conf, const char* group_id);
typedef struct ugroups_legacy_members_iterator ugroups_legacy_members_iterator;
/// Group member iteration; this lets you walk through the full group member list. Example usage:
///
/// const char* session_id;
/// bool admin;
/// ugroups_legacy_members_iterator* it = ugroups_legacy_members_begin(legacy_info);
/// while (ugroups_legacy_members_next(it, &session_id, &admin)) {
/// if (admin)
/// printf("ADMIN: %s", session_id);
/// }
/// ugroups_legacy_members_free(it);
///
ugroups_legacy_members_iterator* ugroups_legacy_members_begin(ugroups_legacy_group_info* group);
bool ugroups_legacy_members_next(
ugroups_legacy_members_iterator* it, const char** session_id, bool* admin);
void ugroups_legacy_members_free(ugroups_legacy_members_iterator* it);
/// This erases the group member at the current iteration location during a member iteration,
/// allowing iteration to continue.
///
/// Example:
///
/// while (ugroups_legacy_members_next(it, &sid, &admin)) {
/// if (should_remove(sid))
/// ugroups_legacy_members_erase(it);
/// }
void ugroups_legacy_members_erase(ugroups_legacy_members_iterator* it);
/// Adds a member (by session id and admin status) to this group. Returns true if the member was
/// inserted or had the admin status changed, false if the member already existed with the given
/// status, or if the session_id is not valid.
bool ugroups_legacy_member_add(
ugroups_legacy_group_info* group, const char* session_id, bool admin);
/// Removes a member (including admins) from the group given the member's session id. This is not
/// safe to use on the current member during member iteration; for that see the above method
/// instead. Returns true if the session id was found and removed, false if not found.
bool ugroups_legacy_member_remove(ugroups_legacy_group_info* group, const char* session_id);
/// Accesses the number of members in the group. The overall number is returned (both admins and
/// non-admins); if the given variables are not NULL, they will be populated with the individual
/// counts of members/admins.
size_t ugroups_legacy_members_count(
const ugroups_legacy_group_info* group, size_t* members, size_t* admins);
/// Returns the number of conversations.
size_t user_groups_size(const config_object* conf);
/// Returns the number of conversations of the specific type.

View File

@ -27,7 +27,7 @@ namespace session::config {
/// K - encryption secret key (32 bytes). Optional.
/// m - set of member session ids (each 33 bytes).
/// a - set of admin session ids (each 33 bytes).
/// E - disappearing messages duration, in minutes, > 0. Omitted if disappearing messages is
/// E - disappearing messages duration, in seconds, > 0. Omitted if disappearing messages is
/// disabled. (Note that legacy groups only support expire after-read)
/// h - hidden: 1 if the conversation has been removed from the conversation list, omitted if
/// visible.
@ -60,7 +60,7 @@ struct legacy_group_info {
// set to an empty string.
ustring enc_pubkey; // bytes (32 or empty)
ustring enc_seckey; // bytes (32 or empty)
std::chrono::minutes disappearing_timer{0}; // 0 == disabled.
std::chrono::seconds disappearing_timer{0}; // 0 == disabled.
bool hidden = false; // true if the conversation is hidden from the convo list
int priority = 0; // The priority; 0 means unpinned, larger means pinned higher (i.e.
// higher priority conversations come first).
@ -88,7 +88,9 @@ struct legacy_group_info {
// Internal ctor/method for C API implementations:
legacy_group_info(const struct ugroups_legacy_group_info& c); // From c struct
void into(struct ugroups_legacy_group_info& c) const; // Into c struct
legacy_group_info(struct ugroups_legacy_group_info&& c); // From c struct
void into(struct ugroups_legacy_group_info& c) const&; // Copy into c struct
void into(struct ugroups_legacy_group_info& c) &&; // Move into c struct
private:
// session_id => (is admin)
@ -96,6 +98,12 @@ struct legacy_group_info {
friend class UserGroups;
// Private implementations of the to/from C struct methods
struct impl_t {};
static constexpr inline impl_t impl{};
legacy_group_info(const struct ugroups_legacy_group_info& c, impl_t);
void into(struct ugroups_legacy_group_info& c, impl_t) const;
void load(const dict& info_dict);
};
@ -154,6 +162,10 @@ class UserGroups : public ConfigBase {
std::optional<community_info> get_community(
std::string_view base_url, std::string_view room) const;
/// Looks up a community from a full URL. It is permitted for the URL to omit the pubkey (it
/// is not used or needed by this call).
std::optional<community_info> get_community(std::string_view partial_url) const;
/// Looks up and returns a legacy group by group ID (hex, looks like a Session ID). Returns
/// nullopt if the group was not found, otherwise returns a filled out `legacy_group_info`.
std::optional<legacy_group_info> get_legacy_group(std::string_view pubkey_hex) const;
@ -177,6 +189,8 @@ class UserGroups : public ConfigBase {
std::string_view pubkey_encoded) const;
community_info get_or_construct_community(
std::string_view base_url, std::string_view room, ustring_view pubkey) const;
/// Shortcut to pass the url through community::parse_full_url, then call the above.
community_info get_or_construct_community(std::string_view full_url) const;
/// Gets or constructs a blank legacy_group_info for the given group id.
legacy_group_info get_or_construct_legacy_group(std::string_view pubkey_hex) const;

View File

@ -50,6 +50,12 @@ user_profile_pic user_profile_get_pic(const config_object* conf);
// Sets a user profile
int user_profile_set_pic(config_object* conf, user_profile_pic pic);
// Gets the current note-to-self priority level. Will always be >= 0.
int user_profile_get_nts_priority(const config_object* conf);
// Sets the current note-to-self priority level. Should be >= 0 (negatives will be set to 0).
void user_profile_set_nts_priority(config_object* conf, int priority);
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -14,6 +14,8 @@ namespace session::config {
/// n - user profile name
/// p - user profile url
/// q - user profile decryption key (binary)
/// + - the priority value for the "Note to Self" pseudo-conversation (higher = higher in the
/// conversation list). Omitted when 0.
class UserProfile final : public ConfigBase {
@ -52,6 +54,12 @@ class UserProfile final : public ConfigBase {
/// one is empty.
void set_profile_pic(std::string_view url, ustring_view key);
void set_profile_pic(profile_pic pic);
/// Gets/sets the Note-to-self conversation priority. Will always be >= 0.
int get_nts_priority() const;
/// Sets the Note-to-self conversation priority. Should be >= 0 (negatives will be set to 0).
void set_nts_priority(int priority);
};
} // namespace session::config

View File

@ -42,8 +42,8 @@ extension ConfigurationMessage {
.filter(OpenGroup.Columns.roomToken != "")
.filter(OpenGroup.Columns.isActive)
.fetchAll(db)
.map { openGroup in
OpenGroup.urlFor(
.compactMap { openGroup in
SessionUtil.communityUrlFor(
server: openGroup.server,
roomToken: openGroup.roomToken,
publicKey: openGroup.publicKey

View File

@ -18,6 +18,14 @@ public extension Message {
)
case openGroupInbox(server: String, openGroupPublicKey: String, blindedPublicKey: String)
public var defaultNamespace: SnodeAPI.Namespace? {
switch self {
case .contact: return .`default`
case .closedGroup: return .legacyClosedGroup
default: return nil
}
}
public static func from(
_ db: Database,
thread: SessionThread,

View File

@ -207,7 +207,7 @@ public final class OpenGroupManager {
roomToken: String,
server: String,
publicKey: String,
calledFromConfigHandling: Bool = false,
calledFromConfigHandling: Bool,
dependencies: OGMDependencies = OGMDependencies()
) -> AnyPublisher<Void, Error> {
// If we are currently polling for this server and already have a TSGroupThread for this room the do nothing
@ -231,7 +231,15 @@ public final class OpenGroupManager {
// Optionally try to insert a new version of the OpenGroup (it will fail if there is already an
// inactive one but that won't matter as we then activate it
_ = try? SessionThread.fetchOrCreate(db, id: threadId, variant: .community)
_ = try? SessionThread.filter(id: threadId).updateAll(db, SessionThread.Columns.shouldBeVisible.set(to: true))
// If we didn't add this open group via config handling then flag it to be visible (if it did
// come via config handling then we want to wait until it actually has messages before making
// it visible)
if !calledFromConfigHandling {
_ = try? SessionThread
.filter(id: threadId)
.updateAll(db, SessionThread.Columns.shouldBeVisible.set(to: true))
}
if (try? OpenGroup.exists(db, id: threadId)) == false {
try? OpenGroup
@ -241,18 +249,36 @@ public final class OpenGroupManager {
// Set the group to active and reset the sequenceNumber (handle groups which have
// been deactivated)
_ = try? OpenGroup
.filter(id: OpenGroup.idFor(roomToken: roomToken, server: targetServer))
.updateAll(
db,
OpenGroup.Columns.isActive.set(to: true),
OpenGroup.Columns.sequenceNumber.set(to: 0)
)
if calledFromConfigHandling {
_ = try? OpenGroup
.filter(id: OpenGroup.idFor(roomToken: roomToken, server: targetServer))
.updateAll( // Handling a config update so don't use `updateAllAndConfig`
db,
OpenGroup.Columns.isActive.set(to: true),
OpenGroup.Columns.sequenceNumber.set(to: 0)
)
}
else {
_ = try? OpenGroup
.filter(id: OpenGroup.idFor(roomToken: roomToken, server: targetServer))
.updateAllAndConfig(
db,
OpenGroup.Columns.isActive.set(to: true),
OpenGroup.Columns.sequenceNumber.set(to: 0)
)
}
// We want to avoid blocking the db write thread so we dispatch the API call to a different thread
//
// Note: We don't do this after the db commit as it can fail (resulting in endless loading)
return Deferred {
/// We want to avoid blocking the db write thread so we return a future which resolves once the db transaction completes
/// and dispatches the result to another queue, this means that the caller can respond to errors resulting from attepting to
/// join the community
return Future<Void, Error> { resolver in
db.afterNextTransactionNested { _ in
OpenGroupAPI.workQueue.async {
resolver(Result.success(()))
}
}
}
.flatMap { _ in
dependencies.storage
.readPublisherFlatMap(receiveOn: OpenGroupAPI.workQueue) { db in
// Note: The initial request for room info and it's capabilities should NOT be
@ -272,9 +298,14 @@ public final class OpenGroupManager {
.flatMap { response -> Future<Void, Error> in
Future<Void, Error> { resolver in
dependencies.storage.write { db in
// Enqueue a config sync job (have a newly added open group to sync)
// Add the new open group to libSession
if !calledFromConfigHandling {
ConfigurationSyncJob.enqueue(db)
try SessionUtil.add(
db,
server: server,
rootToken: roomToken,
publicKey: publicKey
)
}
// Store the capabilities first
@ -309,12 +340,22 @@ public final class OpenGroupManager {
.eraseToAnyPublisher()
}
public func delete(_ db: Database, openGroupId: String, dependencies: OGMDependencies = OGMDependencies()) {
public func delete(
_ db: Database,
openGroupId: String,
calledFromConfigHandling: Bool,
dependencies: OGMDependencies = OGMDependencies()
) {
let server: String? = try? OpenGroup
.select(.server)
.filter(id: openGroupId)
.asRequest(of: String.self)
.fetchOne(db)
let roomToken: String? = try? OpenGroup
.select(.roomToken)
.filter(id: openGroupId)
.asRequest(of: String.self)
.fetchOne(db)
// Stop the poller if needed
//
@ -348,13 +389,17 @@ public final class OpenGroupManager {
// If it's a session-run room then just set it to inactive
_ = try? OpenGroup
.filter(id: openGroupId)
.updateAll(db, OpenGroup.Columns.isActive.set(to: false))
.updateAllAndConfig(db, OpenGroup.Columns.isActive.set(to: false))
}
// Remove the thread and associated data
_ = try? SessionThread
.filter(id: openGroupId)
.deleteAll(db)
if !calledFromConfigHandling, let server: String = server, let roomToken: String = roomToken {
try? SessionUtil.remove(db, server: server, roomToken: roomToken)
}
}
// MARK: - Response Processing
@ -405,43 +450,34 @@ public final class OpenGroupManager {
// Only update the database columns which have changed (this is to prevent the UI from triggering
// updates due to changing database columns to the existing value)
let permissions = OpenGroup.Permissions(roomInfo: pollInfo)
let hasDetails: Bool = (pollInfo.details != nil)
let permissions: OpenGroup.Permissions = OpenGroup.Permissions(roomInfo: pollInfo)
let changes: [ConfigColumnAssignment] = []
.appending(openGroup.publicKey == maybePublicKey ? nil :
maybePublicKey.map { OpenGroup.Columns.publicKey.set(to: $0) }
)
.appending(openGroup.userCount == pollInfo.activeUsers ? nil :
OpenGroup.Columns.userCount.set(to: pollInfo.activeUsers)
)
.appending(openGroup.permissions == permissions ? nil :
OpenGroup.Columns.permissions.set(to: permissions)
)
.appending(!hasDetails || openGroup.name == pollInfo.details?.name ? nil :
OpenGroup.Columns.name.set(to: pollInfo.details?.name)
)
.appending(!hasDetails || openGroup.roomDescription == pollInfo.details?.roomDescription ? nil :
OpenGroup.Columns.roomDescription.set(to: pollInfo.details?.roomDescription)
)
.appending(!hasDetails || openGroup.imageId == pollInfo.details?.imageId ? nil :
OpenGroup.Columns.imageId.set(to: pollInfo.details?.imageId)
)
.appending(!hasDetails || openGroup.infoUpdates == pollInfo.details?.infoUpdates ? nil :
OpenGroup.Columns.infoUpdates.set(to: pollInfo.details?.infoUpdates)
)
try OpenGroup
.filter(id: openGroup.id)
.updateAll(
db,
[
(openGroup.publicKey != maybePublicKey ?
maybePublicKey.map { OpenGroup.Columns.publicKey.set(to: $0) } :
nil
),
(pollInfo.details != nil && openGroup.name != pollInfo.details?.name ?
(pollInfo.details?.name).map { OpenGroup.Columns.name.set(to: $0) } :
nil
),
(pollInfo.details != nil && openGroup.roomDescription != pollInfo.details?.roomDescription ?
(pollInfo.details?.roomDescription).map { OpenGroup.Columns.roomDescription.set(to: $0) } :
nil
),
(pollInfo.details != nil && openGroup.imageId != pollInfo.details?.imageId ?
(pollInfo.details?.imageId).map { OpenGroup.Columns.imageId.set(to: $0) } :
nil
),
(openGroup.userCount != pollInfo.activeUsers ?
OpenGroup.Columns.userCount.set(to: pollInfo.activeUsers) :
nil
),
(pollInfo.details != nil && openGroup.infoUpdates != pollInfo.details?.infoUpdates ?
(pollInfo.details?.infoUpdates).map { OpenGroup.Columns.infoUpdates.set(to: $0) } :
nil
),
(openGroup.permissions != permissions ?
OpenGroup.Columns.permissions.set(to: permissions) :
nil
)
].compactMap { $0 }
)
.updateAllAndConfig(db, changes)
// Update the admin/moderator group members
if let roomDetails: OpenGroupAPI.Room = pollInfo.details {
@ -1120,34 +1156,6 @@ public final class OpenGroupManager {
return publisher
}
public static func parseOpenGroup(from string: String) -> (room: String, server: String, publicKey: String)? {
guard let url = URL(string: string), let host = url.host ?? given(string.split(separator: "/").first, { String($0) }), let query = url.query else { return nil }
// Inputs that should work:
// https://sessionopengroup.co/r/main?public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c
// https://sessionopengroup.co/main?public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c
// http://sessionopengroup.co/r/main?public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c
// http://sessionopengroup.co/main?public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c
// sessionopengroup.co/main?public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c (does NOT go to HTTPS)
// sessionopengroup.co/r/main?public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c (does NOT go to HTTPS)
// https://143.198.213.225:443/r/main?public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c
// https://143.198.213.225:443/main?public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c
// 143.198.213.255:80/main?public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c
// 143.198.213.255:80/r/main?public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c
let useTLS = (url.scheme == "https")
// If there is no scheme then the host is included in the path (so handle that case)
let hostFreePath = (url.host != nil || !url.path.starts(with: host) ? url.path : url.path.substring(from: host.count))
let updatedPath = (hostFreePath.starts(with: "/r/") ? hostFreePath.substring(from: 2) : hostFreePath)
let room = String(updatedPath.dropFirst()) // Drop the leading slash
let queryParts = query.split(separator: "=")
guard !room.isEmpty && !room.contains("/"), queryParts.count == 2, queryParts[0] == "public_key" else { return nil }
let publicKey = String(queryParts[1])
guard publicKey.count == 64 && Hex.isValid(publicKey) else { return nil }
var server = (useTLS ? "https://" : "http://") + host
if let port = url.port { server += ":\(port)" }
return (room: room, server: server, publicKey: publicKey)
}
}

View File

@ -206,6 +206,7 @@ extension MessageReceiver {
sentTimestampMs: nil // Explicitly nil as it's a separate message from above
),
to: try Message.Destination.from(db, thread: thread),
namespace: try Message.Destination.from(db, thread: thread).defaultNamespace,
interactionId: nil // Explicitly nil as it's a separate message from above
)
)

View File

@ -196,7 +196,7 @@ extension MessageReceiver {
// Open groups
for openGroupURL in message.openGroups {
if let (room, server, publicKey) = OpenGroupManager.parseOpenGroup(from: openGroupURL) {
if let (room, server, publicKey) = SessionUtil.parseCommunity(url: openGroupURL) {
OpenGroupManager.shared
.add(
db,

View File

@ -604,7 +604,12 @@ extension MessageSender {
receiveCompletion: { result in
switch result {
case .failure: break
case .finished: try? ClosedGroup.removeKeysAndUnsubscribe(threadId: groupPublicKey)
case .finished:
try? ClosedGroup.removeKeysAndUnsubscribe(
threadId: groupPublicKey,
removeGroupData: false,
calledFromConfigHandling: false
)
}
}
)

View File

@ -80,6 +80,7 @@ extension MessageSender {
db,
message: VisibleMessage.from(db, interaction: interaction),
to: try Message.Destination.from(db, thread: thread),
namespace: try Message.Destination.from(db, thread: thread).defaultNamespace,
interactionId: interactionId
)
}
@ -95,14 +96,8 @@ extension MessageSender {
.eraseToAnyPublisher()
}
// Ensure we have the rest of the required data
guard let destination: Message.Destination = preparedSendData.destination else {
return Fail<PreparedSendData, Error>(error: MessageSenderError.invalidMessage)
.eraseToAnyPublisher()
}
let threadId: String = {
switch destination {
switch preparedSendData.destination {
case .contact(let publicKey): return publicKey
case .closedGroup(let groupPublicKey): return groupPublicKey
case .openGroup(let roomToken, let server, _, _, _):

View File

@ -12,9 +12,10 @@ public final class MessageSender {
public struct PreparedSendData {
let shouldSend: Bool
let destination: Message.Destination
let namespace: SnodeAPI.Namespace?
let message: Message?
let destination: Message.Destination?
let interactionId: Int64?
let isSyncMessage: Bool?
let totalAttachmentsUploaded: Int
@ -26,7 +27,8 @@ public final class MessageSender {
private init(
shouldSend: Bool,
message: Message?,
destination: Message.Destination?,
destination: Message.Destination,
namespace: SnodeAPI.Namespace?,
interactionId: Int64?,
isSyncMessage: Bool?,
totalAttachmentsUploaded: Int = 0,
@ -38,6 +40,7 @@ public final class MessageSender {
self.message = message
self.destination = destination
self.namespace = namespace
self.interactionId = interactionId
self.isSyncMessage = isSyncMessage
self.totalAttachmentsUploaded = totalAttachmentsUploaded
@ -47,25 +50,11 @@ public final class MessageSender {
self.ciphertext = ciphertext
}
// The default constructor creats an instance that doesn't actually send a message
fileprivate init() {
self.shouldSend = false
self.message = nil
self.destination = nil
self.interactionId = nil
self.isSyncMessage = nil
self.totalAttachmentsUploaded = 0
self.snodeMessage = nil
self.plaintext = nil
self.ciphertext = nil
}
/// This should be used to send a message to one-to-one or closed group conversations
fileprivate init(
message: Message,
destination: Message.Destination,
namespace: SnodeAPI.Namespace,
interactionId: Int64?,
isSyncMessage: Bool?,
snodeMessage: SnodeMessage
@ -74,6 +63,7 @@ public final class MessageSender {
self.message = message
self.destination = destination
self.namespace = namespace
self.interactionId = interactionId
self.isSyncMessage = isSyncMessage
self.totalAttachmentsUploaded = 0
@ -94,6 +84,7 @@ public final class MessageSender {
self.message = message
self.destination = destination
self.namespace = nil
self.interactionId = interactionId
self.isSyncMessage = false
self.totalAttachmentsUploaded = 0
@ -114,6 +105,7 @@ public final class MessageSender {
self.message = message
self.destination = destination
self.namespace = nil
self.interactionId = interactionId
self.isSyncMessage = false
self.totalAttachmentsUploaded = 0
@ -129,7 +121,8 @@ public final class MessageSender {
return PreparedSendData(
shouldSend: shouldSend,
message: message,
destination: destination?.with(fileIds: fileIds),
destination: destination.with(fileIds: fileIds),
namespace: namespace,
interactionId: interactionId,
isSyncMessage: isSyncMessage,
totalAttachmentsUploaded: fileIds.count,
@ -144,6 +137,7 @@ public final class MessageSender {
_ db: Database,
message: Message,
to destination: Message.Destination,
namespace: SnodeAPI.Namespace?,
interactionId: Int64?,
isSyncMessage: Bool = false,
using dependencies: SMKDependencies = SMKDependencies()
@ -165,6 +159,7 @@ public final class MessageSender {
db,
message: updatedMessage,
to: destination,
namespace: namespace,
interactionId: interactionId,
userPublicKey: currentUserPublicKey,
messageSendTimestamp: messageSendTimestamp,
@ -199,6 +194,7 @@ public final class MessageSender {
_ db: Database,
message: Message,
to destination: Message.Destination,
namespace: SnodeAPI.Namespace?,
interactionId: Int64?,
userPublicKey: String,
messageSendTimestamp: Int64,
@ -215,7 +211,7 @@ public final class MessageSender {
}()
// Validate the message
guard message.isValid else {
guard message.isValid, let namespace: SnodeAPI.Namespace = namespace else {
throw MessageSender.handleFailedMessageSend(
db,
message: message,
@ -225,8 +221,8 @@ public final class MessageSender {
)
}
// Attach the user's profile if needed (no need to do so for 'Note to Self' or sync messages as they
// will be managed by the user config handling
// Attach the user's profile if needed (no need to do so for 'Note to Self' or sync
// messages as they will be managed by the user config handling
let isSelfSend: Bool = (message.recipient == userPublicKey)
if !isSelfSend, !isSyncMessage, var messageWithProfile: MessageWithProfile = message as? MessageWithProfile {
@ -356,6 +352,7 @@ public final class MessageSender {
return PreparedSendData(
message: message,
destination: destination,
namespace: namespace,
interactionId: interactionId,
isSyncMessage: isSyncMessage,
snodeMessage: snodeMessage
@ -616,9 +613,6 @@ public final class MessageSender {
case .contact, .closedGroup: return sendToSnodeDestination(data: preparedSendData, using: dependencies)
case .openGroup: return sendToOpenGroupDestination(data: preparedSendData, using: dependencies)
case .openGroupInbox: return sendToOpenGroupInbox(data: preparedSendData, using: dependencies)
case .none:
return Fail(error: MessageSenderError.invalidMessage)
.eraseToAnyPublisher()
}
}
@ -630,7 +624,7 @@ public final class MessageSender {
) -> AnyPublisher<Void, Error> {
guard
let message: Message = data.message,
let destination: Message.Destination = data.destination,
let namespace: SnodeAPI.Namespace = data.namespace,
let isSyncMessage: Bool = data.isSyncMessage,
let snodeMessage: SnodeMessage = data.snodeMessage
else {
@ -641,12 +635,7 @@ public final class MessageSender {
return SnodeAPI
.sendMessage(
snodeMessage,
in: {
switch destination {
case .closedGroup: return .legacyClosedGroup
default: return .`default`
}
}()
in: namespace
)
.subscribe(on: DispatchQueue.global(qos: .default))
.flatMap { response -> AnyPublisher<Bool, Error> in
@ -676,7 +665,7 @@ public final class MessageSender {
try MessageSender.handleSuccessfulMessageSend(
db,
message: updatedMessage,
to: destination,
to: data.destination,
interactionId: data.interactionId,
isSyncMessage: isSyncMessage,
using: dependencies
@ -755,8 +744,7 @@ public final class MessageSender {
) -> AnyPublisher<Void, Error> {
guard
let message: Message = data.message,
let destination: Message.Destination = data.destination,
case .openGroup(let roomToken, let server, let whisperTo, let whisperMods, let fileIds) = destination,
case .openGroup(let roomToken, let server, let whisperTo, let whisperMods, let fileIds) = data.destination,
let plaintext: Data = data.plaintext
else {
return Fail(error: MessageSenderError.invalidMessage)
@ -789,7 +777,7 @@ public final class MessageSender {
try MessageSender.handleSuccessfulMessageSend(
db,
message: updatedMessage,
to: destination,
to: data.destination,
interactionId: data.interactionId,
serverTimestampMs: serverTimestampMs,
using: dependencies
@ -824,8 +812,7 @@ public final class MessageSender {
) -> AnyPublisher<Void, Error> {
guard
let message: Message = data.message,
let destination: Message.Destination = data.destination,
case .openGroupInbox(let server, _, let recipientBlindedPublicKey) = destination,
case .openGroupInbox(let server, _, let recipientBlindedPublicKey) = data.destination,
let ciphertext: Data = data.ciphertext
else {
return Fail(error: MessageSenderError.invalidMessage)
@ -854,7 +841,7 @@ public final class MessageSender {
try MessageSender.handleSuccessfulMessageSend(
db,
message: updatedMessage,
to: destination,
to: data.destination,
interactionId: data.interactionId,
serverTimestampMs: UInt64(floor(responseData.posted * 1000)),
using: dependencies

View File

@ -93,6 +93,7 @@ extension OpenGroupAPI {
return dependencies.storage
.readPublisherFlatMap(receiveOn: Threading.pollerQueue) { db -> AnyPublisher<(Int64, PollResponse), Error> in
let failureCount: Int64 = (try? OpenGroup
.filter(OpenGroup.Columns.server == server)
.select(max(OpenGroup.Columns.pollFailureCount))
.asRequest(of: Int64.self)
.fetchOne(db))
@ -176,17 +177,73 @@ extension OpenGroupAPI {
.fetchOne(db)
}
.defaulting(to: 0)
var prunedIds: [String] = []
Storage.shared.writeAsync { db in
struct Info: Decodable, FetchableRecord {
let id: String
let shouldBeVisible: Bool
}
let rooms: [String] = try OpenGroup
.filter(
OpenGroup.Columns.server == server &&
OpenGroup.Columns.isActive == true
)
.select(.roomToken)
.asRequest(of: String.self)
.fetchAll(db)
let roomsAreVisible: [Info] = try SessionThread
.select(.id, .shouldBeVisible)
.filter(
ids: rooms.map {
OpenGroup.idFor(roomToken: $0, server: server)
}
)
.asRequest(of: Info.self)
.fetchAll(db)
// Increase the failure count
try OpenGroup
.filter(OpenGroup.Columns.server == server)
.updateAll(
db,
OpenGroup.Columns.pollFailureCount.set(to: (pollFailureCount + 1))
OpenGroup.Columns.pollFailureCount
.set(to: (pollFailureCount + 1))
)
/// If the polling has failed 10+ times then try to prune any invalid rooms that
/// aren't visible (they would have been added via config messages and will
/// likely always fail but the user has no way to delete them)
guard pollFailureCount > 10 else { return }
prunedIds = roomsAreVisible
.filter { !$0.shouldBeVisible }
.map { $0.id }
prunedIds.forEach { id in
OpenGroupManager.shared.delete(
db,
openGroupId: id,
/// **Note:** We pass `calledFromConfigHandling` as `true`
/// here because we want to avoid syncing this deletion as the room might
/// not be in an invalid state on other devices - one of the other devices
/// will eventually trigger a new config update which will re-add this room
/// and hopefully at that time it'll work again
calledFromConfigHandling: true
)
}
}
SNLog("Open group polling failed due to error: \(error). Setting failure count to \(pollFailureCount).")
// Add a note to the logs that this happened
if !prunedIds.isEmpty {
let rooms: String = prunedIds
.compactMap { $0.components(separatedBy: server).last }
.joined(separator: ", ")
SNLog("Hidden open group failure count surpassed 10, removed hidden rooms \(rooms).")
}
}
self?.isPolling = false

View File

@ -24,7 +24,7 @@ public struct SessionThreadViewModel: FetchableRecordWithRowId, Decodable, Equat
public static let threadIsMessageRequestKey: SQL = SQL(stringLiteral: CodingKeys.threadIsMessageRequest.stringValue)
public static let threadRequiresApprovalKey: SQL = SQL(stringLiteral: CodingKeys.threadRequiresApproval.stringValue)
public static let threadShouldBeVisibleKey: SQL = SQL(stringLiteral: CodingKeys.threadShouldBeVisible.stringValue)
public static let threadIsPinnedKey: SQL = SQL(stringLiteral: CodingKeys.threadIsPinned.stringValue)
public static let threadPinnedPriorityKey: SQL = SQL(stringLiteral: CodingKeys.threadPinnedPriority.stringValue)
public static let threadIsBlockedKey: SQL = SQL(stringLiteral: CodingKeys.threadIsBlocked.stringValue)
public static let threadMutedUntilTimestampKey: SQL = SQL(stringLiteral: CodingKeys.threadMutedUntilTimestamp.stringValue)
public static let threadOnlyNotifyForMentionsKey: SQL = SQL(stringLiteral: CodingKeys.threadOnlyNotifyForMentions.stringValue)
@ -89,7 +89,7 @@ public struct SessionThreadViewModel: FetchableRecordWithRowId, Decodable, Equat
/// This flag indicates whether the thread is an incoming message request
public let threadRequiresApproval: Bool?
public let threadShouldBeVisible: Bool?
public let threadIsPinned: Bool
public let threadPinnedPriority: Int32
public let threadIsBlocked: Bool?
public let threadMutedUntilTimestamp: TimeInterval?
public let threadOnlyNotifyForMentions: Bool?
@ -346,7 +346,7 @@ public extension SessionThreadViewModel {
self.threadIsMessageRequest = false
self.threadRequiresApproval = false
self.threadShouldBeVisible = false
self.threadIsPinned = false
self.threadPinnedPriority = 0
self.threadIsBlocked = nil
self.threadMutedUntilTimestamp = nil
self.threadOnlyNotifyForMentions = nil
@ -412,7 +412,7 @@ public extension SessionThreadViewModel {
threadIsMessageRequest: self.threadIsMessageRequest,
threadRequiresApproval: self.threadRequiresApproval,
threadShouldBeVisible: self.threadShouldBeVisible,
threadIsPinned: self.threadIsPinned,
threadPinnedPriority: self.threadPinnedPriority,
threadIsBlocked: self.threadIsBlocked,
threadMutedUntilTimestamp: self.threadMutedUntilTimestamp,
threadOnlyNotifyForMentions: self.threadOnlyNotifyForMentions,
@ -467,7 +467,7 @@ public extension SessionThreadViewModel {
threadIsMessageRequest: self.threadIsMessageRequest,
threadRequiresApproval: self.threadRequiresApproval,
threadShouldBeVisible: self.threadShouldBeVisible,
threadIsPinned: self.threadIsPinned,
threadPinnedPriority: self.threadPinnedPriority,
threadIsBlocked: self.threadIsBlocked,
threadMutedUntilTimestamp: self.threadMutedUntilTimestamp,
threadOnlyNotifyForMentions: self.threadOnlyNotifyForMentions,
@ -570,7 +570,7 @@ public extension SessionThreadViewModel {
\(thread[.creationDateTimestamp]) AS \(ViewModel.threadCreationDateTimestampKey),
(\(SQL("\(thread[.id]) = \(userPublicKey)"))) AS \(ViewModel.threadIsNoteToSelfKey),
\(thread[.isPinned]) AS \(ViewModel.threadIsPinnedKey),
IFNULL(\(thread[.pinnedPriority]), 0) AS \(ViewModel.threadPinnedPriorityKey),
\(contact[.isBlocked]) AS \(ViewModel.threadIsBlockedKey),
\(thread[.mutedUntilTimestamp]) AS \(ViewModel.threadMutedUntilTimestampKey),
\(thread[.onlyNotifyForMentions]) AS \(ViewModel.threadOnlyNotifyForMentionsKey),
@ -840,7 +840,7 @@ public extension SessionThreadViewModel {
) AS \(ViewModel.threadRequiresApprovalKey),
\(thread[.shouldBeVisible]) AS \(ViewModel.threadShouldBeVisibleKey),
\(thread[.isPinned]) AS \(ViewModel.threadIsPinnedKey),
IFNULL(\(thread[.pinnedPriority]), 0) AS \(ViewModel.threadPinnedPriorityKey),
\(contact[.isBlocked]) AS \(ViewModel.threadIsBlockedKey),
\(thread[.mutedUntilTimestamp]) AS \(ViewModel.threadMutedUntilTimestampKey),
\(thread[.onlyNotifyForMentions]) AS \(ViewModel.threadOnlyNotifyForMentionsKey),
@ -938,7 +938,7 @@ public extension SessionThreadViewModel {
(\(SQL("\(thread[.id]) = \(userPublicKey)"))) AS \(ViewModel.threadIsNoteToSelfKey),
\(thread[.isPinned]) AS \(ViewModel.threadIsPinnedKey),
IFNULL(\(thread[.pinnedPriority]), 0) AS \(ViewModel.threadPinnedPriorityKey),
\(contact[.isBlocked]) AS \(ViewModel.threadIsBlockedKey),
\(thread[.mutedUntilTimestamp]) AS \(ViewModel.threadMutedUntilTimestampKey),
\(thread[.onlyNotifyForMentions]) AS \(ViewModel.threadOnlyNotifyForMentionsKey),
@ -1106,7 +1106,7 @@ public extension SessionThreadViewModel {
\(thread[.creationDateTimestamp]) AS \(ViewModel.threadCreationDateTimestampKey),
(\(SQL("\(thread[.id]) = \(userPublicKey)"))) AS \(ViewModel.threadIsNoteToSelfKey),
\(thread[.isPinned]) AS \(ViewModel.threadIsPinnedKey),
IFNULL(\(thread[.pinnedPriority]), 0) AS \(ViewModel.threadPinnedPriorityKey),
\(ViewModel.contactProfileKey).*,
\(ViewModel.closedGroupProfileFrontKey).*,
@ -1243,7 +1243,7 @@ public extension SessionThreadViewModel {
\(groupMemberInfoLiteral).\(ViewModel.threadMemberNamesKey),
(\(SQL("\(thread[.id]) = \(userPublicKey)"))) AS \(ViewModel.threadIsNoteToSelfKey),
\(thread[.isPinned]) AS \(ViewModel.threadIsPinnedKey),
IFNULL(\(thread[.pinnedPriority]), 0) AS \(ViewModel.threadPinnedPriorityKey),
\(ViewModel.contactProfileKey).*,
\(ViewModel.closedGroupProfileFrontKey).*,
@ -1590,7 +1590,7 @@ public extension SessionThreadViewModel {
'' AS \(ViewModel.threadMemberNamesKey),
true AS \(ViewModel.threadIsNoteToSelfKey),
\(thread[.isPinned]) AS \(ViewModel.threadIsPinnedKey),
IFNULL(\(thread[.pinnedPriority]), 0) AS \(ViewModel.threadPinnedPriorityKey),
\(ViewModel.contactProfileKey).*,
@ -1647,7 +1647,7 @@ public extension SessionThreadViewModel {
(\(SQL("\(thread[.id]) = \(userPublicKey)"))) AS \(ViewModel.threadIsNoteToSelfKey),
\(thread[.isPinned]) AS \(ViewModel.threadIsPinnedKey),
IFNULL(\(thread[.pinnedPriority]), 0) AS \(ViewModel.threadPinnedPriorityKey),
\(contact[.isBlocked]) AS \(ViewModel.threadIsBlockedKey),
\(ViewModel.contactProfileKey).*,

View File

@ -507,7 +507,7 @@ public struct ProfileManager {
) throws {
let isCurrentUser = (publicKey == getUserHexEncodedPublicKey(db, dependencies: dependencies))
let profile: Profile = Profile.fetchOrCreate(id: publicKey)
var profileChanges: [ColumnAssignment] = []
var profileChanges: [ConfigColumnAssignment] = []
// Name
if let name: String = name, !name.isEmpty, name != profile.name {
@ -622,7 +622,7 @@ public struct ProfileManager {
let dedupeIdentifier: String = "AvatarDownload-\(publicKey)-\(targetAvatarUrl ?? "remove")"
db.afterNextTransactionNestedOnce(dedupeIdentifier: dedupeIdentifier) { db in
db.afterNextTransactionNestedOnce(dedupeId: dedupeIdentifier) { db in
// Need to refetch to ensure the db changes have occurred
ProfileManager.downloadAvatar(for: Profile.fetchOrCreate(db, id: publicKey))
}

View File

@ -9,13 +9,13 @@ import Quick
import Nimble
/// This spec is designed to replicate the initial test cases for the libSession-util to ensure the behaviour matches
class ConfigContactsSpec: QuickSpec {
class ConfigContactsSpec {
// MARK: - Spec
override func spec() {
static func spec() {
it("generates Contact configs correctly") {
let seed: Data = Data(hex: "0123456789abcdef0123456789abcdef")
// FIXME: Would be good to move these into the libSession-util instead of using Sodium separately
let identity = try! Identity.generate(from: seed)
var edSK: [UInt8] = identity.ed25519KeyPair.secretKey
@ -24,7 +24,7 @@ class ConfigContactsSpec: QuickSpec {
expect(identity.x25519KeyPair.publicKey.toHexString())
.to(equal("d2ad010eeb72d72e561d9de7bd7b6989af77dcabffa03a5111a6c859ae5c3a72"))
expect(String(edSK.toHexString().prefix(32))).to(equal(seed.toHexString()))
// Initialize a brand new, empty config because we have no dump data to deal with.
let error: UnsafeMutablePointer<CChar>? = nil
var conf: UnsafeMutablePointer<config_object>? = nil
@ -48,21 +48,13 @@ class ConfigContactsSpec: QuickSpec {
expect(contact2.blocked).to(beFalse())
expect(contact2.profile_pic).toNot(beNil()) // Creates an empty instance apparently
expect(String(libSessionVal: contact2.profile_pic.url)).to(beEmpty())
// We don't need to push anything, since this is a default contact
expect(config_needs_push(conf)).to(beFalse())
// And we haven't changed anything so don't need to dump to db
expect(config_needs_dump(conf)).to(beFalse())
var toPush: UnsafeMutablePointer<UInt8>? = nil
var toPushLen: Int = 0
// We don't need to push since we haven't changed anything, so this call is mainly just for
// testing:
let seqno: Int64 = config_push(conf, &toPush, &toPushLen)
expect(toPush).toNot(beNil())
expect(seqno).to(equal(0))
expect(toPushLen).to(equal(256))
toPush?.deallocate()
let pushData1: UnsafeMutablePointer<config_push_data> = config_push(conf)
expect(pushData1.pointee.seqno).to(equal(0))
pushData1.deallocate()
// Update the contact data
contact2.name = "Joe".toLibSession()
@ -90,18 +82,21 @@ class ConfigContactsSpec: QuickSpec {
expect(config_needs_push(conf)).to(beTrue())
expect(config_needs_dump(conf)).to(beTrue())
var toPush2: UnsafeMutablePointer<UInt8>? = nil
var toPush2Len: Int = 0
let seqno2: Int64 = config_push(conf, &toPush2, &toPush2Len);
// incremented since we made changes (this only increments once between
// dumps; even though we changed multiple fields here).
expect(seqno2).to(equal(1))
toPush2?.deallocate()
let pushData2: UnsafeMutablePointer<config_push_data> = config_push(conf)
// incremented since we made changes (this only increments once between
// dumps; even though we changed multiple fields here).
expect(pushData2.pointee.seqno).to(equal(1))
// Pretend we uploaded it
config_confirm_pushed(conf, seqno2)
let fakeHash1: String = "fakehash1"
var cFakeHash1: [CChar] = fakeHash1.cArray
config_confirm_pushed(conf, pushData2.pointee.seqno, &cFakeHash1)
expect(config_needs_push(conf)).to(beFalse())
expect(config_needs_dump(conf)).to(beTrue())
pushData2.deallocate()
// NB: Not going to check encrypted data and decryption here because that's general (not
// specific to contacts) and is covered already in the user profile tests.
@ -118,11 +113,9 @@ class ConfigContactsSpec: QuickSpec {
expect(config_needs_push(conf2)).to(beFalse())
expect(config_needs_dump(conf2)).to(beFalse())
var toPush3: UnsafeMutablePointer<UInt8>? = nil
var toPush3Len: Int = 0
let seqno3: Int64 = config_push(conf, &toPush3, &toPush3Len);
expect(seqno3).to(equal(1))
toPush3?.deallocate()
let pushData3: UnsafeMutablePointer<config_push_data> = config_push(conf2)
expect(pushData3.pointee.seqno).to(equal(1))
pushData3.deallocate()
// Because we just called dump() above, to load up contacts2
expect(config_needs_dump(conf)).to(beFalse())
@ -154,26 +147,26 @@ class ConfigContactsSpec: QuickSpec {
contacts_set(conf2, &contact5)
expect(config_needs_push(conf2)).to(beTrue())
var toPush4: UnsafeMutablePointer<UInt8>? = nil
var toPush4Len: Int = 0
let seqno4: Int64 = config_push(conf2, &toPush4, &toPush4Len);
expect(seqno4).to(equal(2))
let pushData4: UnsafeMutablePointer<config_push_data> = config_push(conf2)
expect(pushData4.pointee.seqno).to(equal(2))
// Check the merging
var mergeData: [UnsafePointer<UInt8>?] = [UnsafePointer(toPush4)]
var mergeSize: [Int] = [toPush4Len]
expect(config_merge(conf, &mergeData, &mergeSize, 1)).to(equal(1))
config_confirm_pushed(conf2, seqno4)
toPush4?.deallocate()
let fakeHash2: String = "fakehash2"
var cFakeHash2: [CChar] = fakeHash2.cArray
var mergeHashes: [UnsafePointer<CChar>?] = [cFakeHash2].unsafeCopy()
var mergeData: [UnsafePointer<UInt8>?] = [UnsafePointer(pushData4.pointee.config)]
var mergeSize: [Int] = [pushData4.pointee.config_len]
expect(config_merge(conf, &mergeHashes, &mergeData, &mergeSize, 1)).to(equal(1))
config_confirm_pushed(conf2, pushData4.pointee.seqno, &cFakeHash2)
mergeHashes.forEach { $0?.deallocate() }
pushData4.deallocate()
expect(config_needs_push(conf)).to(beFalse())
var toPush5: UnsafeMutablePointer<UInt8>? = nil
var toPush5Len: Int = 0
let seqno5: Int64 = config_push(conf2, &toPush5, &toPush5Len);
expect(seqno5).to(equal(2))
toPush5?.deallocate()
let pushData5: UnsafeMutablePointer<config_push_data> = config_push(conf)
expect(pushData5.pointee.seqno).to(equal(2))
pushData5.deallocate()
// Iterate through and make sure we got everything we expected
var sessionIds: [String] = []
var nicknames: [String] = []
@ -182,7 +175,7 @@ class ConfigContactsSpec: QuickSpec {
var contact6: contacts_contact = contacts_contact()
let contactIterator: UnsafeMutablePointer<contacts_iterator> = contacts_iterator_new(conf)
while !contacts_iterator_done(contactIterator, &contact6) {
sessionIds.append(String(libSessionVal: contact6.session_id) ?? "(N/A)")
sessionIds.append(String(libSessionVal: contact6.session_id))
nicknames.append(String(libSessionVal: contact6.nickname, nullIfEmpty: true) ?? "(N/A)")
contacts_iterator_advance(contactIterator)
}
@ -194,9 +187,9 @@ class ConfigContactsSpec: QuickSpec {
expect(sessionIds.last).to(equal(anotherId))
expect(nicknames.first).to(equal("Joey"))
expect(nicknames.last).to(equal("(N/A)"))
// Conflict! Oh no!
// On client 1 delete a contact:
contacts_erase(conf, definitelyRealId)
@ -215,51 +208,63 @@ class ConfigContactsSpec: QuickSpec {
expect(config_needs_push(conf)).to(beTrue())
expect(config_needs_push(conf2)).to(beTrue())
var toPush6: UnsafeMutablePointer<UInt8>? = nil
var toPush6Len: Int = 0
let seqno6: Int64 = config_push(conf, &toPush6, &toPush6Len);
expect(seqno6).to(equal(3))
let pushData6: UnsafeMutablePointer<config_push_data> = config_push(conf)
expect(pushData6.pointee.seqno).to(equal(3))
var toPush7: UnsafeMutablePointer<UInt8>? = nil
var toPush7Len: Int = 0
let seqno7: Int64 = config_push(conf2, &toPush7, &toPush7Len);
expect(seqno7).to(equal(3))
expect(String(pointer: toPush6, length: toPush6Len, encoding: .ascii))
.toNot(equal(String(pointer: toPush7, length: toPush7Len, encoding: .ascii)))
let pushData7: UnsafeMutablePointer<config_push_data> = config_push(conf2)
expect(pushData7.pointee.seqno).to(equal(3))
config_confirm_pushed(conf, seqno6)
config_confirm_pushed(conf2, seqno7)
let pushData6Str: String = String(pointer: pushData6.pointee.config, length: pushData6.pointee.config_len, encoding: .ascii)!
let pushData7Str: String = String(pointer: pushData7.pointee.config, length: pushData7.pointee.config_len, encoding: .ascii)!
expect(pushData6Str).toNot(equal(pushData7Str))
expect([String](pointer: pushData6.pointee.obsolete, count: pushData6.pointee.obsolete_len))
.to(equal([fakeHash2]))
expect([String](pointer: pushData7.pointee.obsolete, count: pushData7.pointee.obsolete_len))
.to(equal([fakeHash2]))
var mergeData2: [UnsafePointer<UInt8>?] = [UnsafePointer(toPush7)]
var mergeSize2: [Int] = [toPush7Len]
expect(config_merge(conf, &mergeData2, &mergeSize2, 1)).to(equal(1))
let fakeHash3a: String = "fakehash3a"
var cFakeHash3a: [CChar] = fakeHash3a.cArray
let fakeHash3b: String = "fakehash3b"
var cFakeHash3b: [CChar] = fakeHash3b.cArray
config_confirm_pushed(conf, pushData6.pointee.seqno, &cFakeHash3a)
config_confirm_pushed(conf2, pushData7.pointee.seqno, &cFakeHash3b)
var mergeHashes2: [UnsafePointer<CChar>?] = [cFakeHash3b].unsafeCopy()
var mergeData2: [UnsafePointer<UInt8>?] = [UnsafePointer(pushData7.pointee.config)]
var mergeSize2: [Int] = [pushData7.pointee.config_len]
expect(config_merge(conf, &mergeHashes2, &mergeData2, &mergeSize2, 1)).to(equal(1))
expect(config_needs_push(conf)).to(beTrue())
var mergeData3: [UnsafePointer<UInt8>?] = [UnsafePointer(toPush6)]
var mergeSize3: [Int] = [toPush6Len]
expect(config_merge(conf2, &mergeData3, &mergeSize3, 1)).to(equal(1))
var mergeHashes3: [UnsafePointer<CChar>?] = [cFakeHash3a].unsafeCopy()
var mergeData3: [UnsafePointer<UInt8>?] = [UnsafePointer(pushData6.pointee.config)]
var mergeSize3: [Int] = [pushData6.pointee.config_len]
expect(config_merge(conf2, &mergeHashes3, &mergeData3, &mergeSize3, 1)).to(equal(1))
expect(config_needs_push(conf2)).to(beTrue())
toPush6?.deallocate()
toPush7?.deallocate()
mergeHashes2.forEach { $0?.deallocate() }
mergeHashes3.forEach { $0?.deallocate() }
pushData6.deallocate()
pushData7.deallocate()
var toPush8: UnsafeMutablePointer<UInt8>? = nil
var toPush8Len: Int = 0
let seqno8: Int64 = config_push(conf, &toPush8, &toPush8Len);
expect(seqno8).to(equal(4))
let pushData8: UnsafeMutablePointer<config_push_data> = config_push(conf)
expect(pushData8.pointee.seqno).to(equal(4))
var toPush9: UnsafeMutablePointer<UInt8>? = nil
var toPush9Len: Int = 0
let seqno9: Int64 = config_push(conf2, &toPush9, &toPush9Len);
expect(seqno9).to(equal(seqno8))
let pushData9: UnsafeMutablePointer<config_push_data> = config_push(conf2)
expect(pushData9.pointee.seqno).to(equal(pushData8.pointee.seqno))
expect(String(pointer: toPush8, length: toPush8Len, encoding: .ascii))
.to(equal(String(pointer: toPush9, length: toPush9Len, encoding: .ascii)))
toPush8?.deallocate()
toPush9?.deallocate()
let pushData8Str: String = String(pointer: pushData8.pointee.config, length: pushData8.pointee.config_len, encoding: .ascii)!
let pushData9Str: String = String(pointer: pushData9.pointee.config, length: pushData9.pointee.config_len, encoding: .ascii)!
expect(pushData8Str).to(equal(pushData9Str))
expect([String](pointer: pushData8.pointee.obsolete, count: pushData8.pointee.obsolete_len))
.to(equal([fakeHash3b, fakeHash3a]))
expect([String](pointer: pushData9.pointee.obsolete, count: pushData9.pointee.obsolete_len))
.to(equal([fakeHash3a, fakeHash3b]))
config_confirm_pushed(conf, seqno8)
config_confirm_pushed(conf2, seqno9)
let fakeHash4: String = "fakeHash4"
var cFakeHash4: [CChar] = fakeHash4.cArray
config_confirm_pushed(conf, pushData8.pointee.seqno, &cFakeHash4)
config_confirm_pushed(conf2, pushData9.pointee.seqno, &cFakeHash4)
pushData8.deallocate()
pushData9.deallocate()
expect(config_needs_push(conf)).to(beFalse())
expect(config_needs_push(conf2)).to(beFalse())
@ -268,11 +273,11 @@ class ConfigContactsSpec: QuickSpec {
var sessionIds2: [String] = []
var nicknames2: [String] = []
expect(contacts_size(conf)).to(equal(2))
var contact8: contacts_contact = contacts_contact()
let contactIterator2: UnsafeMutablePointer<contacts_iterator> = contacts_iterator_new(conf)
while !contacts_iterator_done(contactIterator2, &contact8) {
sessionIds2.append(String(libSessionVal: contact8.session_id) ?? "(N/A)")
sessionIds2.append(String(libSessionVal: contact8.session_id))
nicknames2.append(String(libSessionVal: contact8.nickname, nullIfEmpty: true) ?? "(N/A)")
contacts_iterator_advance(contactIterator2)
}

View File

@ -9,11 +9,11 @@ import Quick
import Nimble
/// This spec is designed to replicate the initial test cases for the libSession-util to ensure the behaviour matches
class ConfigConvoInfoVolatileSpec: QuickSpec {
class ConfigConvoInfoVolatileSpec {
// MARK: - Spec
override func spec() {
it("generates ConvoInfoVolatileS configs correctly") {
static func spec() {
it("generates ConvoInfoVolatile configs correctly") {
let seed: Data = Data(hex: "0123456789abcdef0123456789abcdef")
// FIXME: Would be good to move these into the libSession-util instead of using Sodium separately
@ -62,26 +62,26 @@ class ConfigConvoInfoVolatileSpec: QuickSpec {
.to(beFalse())
expect(convo_info_volatile_get_1to1(conf, &oneToOne3, &cDefinitelyRealId)).to(beTrue())
expect(oneToOne3.last_read).to(equal(nowTimestampMs))
expect(config_needs_push(conf)).to(beTrue())
expect(config_needs_dump(conf)).to(beTrue())
let openGroupBaseUrl: String = "http://Example.ORG:5678"
var cOpenGroupBaseUrl: [CChar] = openGroupBaseUrl.cArray
let openGroupBaseUrlResult: String = openGroupBaseUrl.lowercased()
// ("http://Example.ORG:5678"
// .lowercased()
// .cArray +
// [CChar](repeating: 0, count: (268 - openGroupBaseUrl.count))
// )
// ("http://Example.ORG:5678"
// .lowercased()
// .cArray +
// [CChar](repeating: 0, count: (268 - openGroupBaseUrl.count))
// )
let openGroupRoom: String = "SudokuRoom"
var cOpenGroupRoom: [CChar] = openGroupRoom.cArray
let openGroupRoomResult: String = openGroupRoom.lowercased()
// ("SudokuRoom"
// .lowercased()
// .cArray +
// [CChar](repeating: 0, count: (65 - openGroupRoom.count))
// )
// ("SudokuRoom"
// .lowercased()
// .cArray +
// [CChar](repeating: 0, count: (65 - openGroupRoom.count))
// )
var cOpenGroupPubkey: [UInt8] = Data(hex: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef")
.bytes
var community1: convo_info_volatile_community = convo_info_volatile_community()
@ -95,19 +95,18 @@ class ConfigConvoInfoVolatileSpec: QuickSpec {
// The new data doesn't get stored until we call this:
convo_info_volatile_set_community(conf, &community1);
var toPush: UnsafeMutablePointer<UInt8>? = nil
var toPushLen: Int = 0
// We don't need to push since we haven't changed anything, so this call is mainly just for
// testing:
let seqno: Int64 = config_push(conf, &toPush, &toPushLen)
expect(toPush).toNot(beNil())
expect(seqno).to(equal(1))
toPush?.deallocate()
var pushData1: UnsafeMutablePointer<config_push_data> = config_push(conf)
expect(pushData1.pointee.seqno).to(equal(1))
// Pretend we uploaded it
config_confirm_pushed(conf, seqno)
let fakeHash1: String = "fakehash1"
var cFakeHash1: [CChar] = fakeHash1.cArray
config_confirm_pushed(conf, pushData1.pointee.seqno, &cFakeHash1)
expect(config_needs_dump(conf)).to(beTrue())
expect(config_needs_push(conf)).to(beFalse())
pushData1.deallocate()
var dump1: UnsafeMutablePointer<UInt8>? = nil
var dump1Len: Int = 0
@ -121,7 +120,7 @@ class ConfigConvoInfoVolatileSpec: QuickSpec {
expect(config_needs_dump(conf2)).to(beFalse())
expect(config_needs_push(conf2)).to(beFalse())
var oneToOne4: convo_info_volatile_1to1 = convo_info_volatile_1to1()
expect(convo_info_volatile_get_1to1(conf2, &oneToOne4, &cDefinitelyRealId)).to(equal(true))
expect(oneToOne4.last_read).to(equal(nowTimestampMs))
@ -135,14 +134,14 @@ class ConfigConvoInfoVolatileSpec: QuickSpec {
expect(Data(libSessionVal: community2.pubkey, count: 32).toHexString())
.to(equal("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"))
community2.unread = true
let anotherId: String = "051111111111111111111111111111111111111111111111111111111111111111"
var cAnotherId: [CChar] = anotherId.cArray
var oneToOne5: convo_info_volatile_1to1 = convo_info_volatile_1to1()
expect(convo_info_volatile_get_or_construct_1to1(conf2, &oneToOne5, &cAnotherId)).to(beTrue())
oneToOne5.unread = true
convo_info_volatile_set_1to1(conf2, &oneToOne5)
let thirdId: String = "05cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"
var cThirdId: [CChar] = thirdId.cArray
var legacyGroup2: convo_info_volatile_legacy_group = convo_info_volatile_legacy_group()
@ -150,21 +149,22 @@ class ConfigConvoInfoVolatileSpec: QuickSpec {
legacyGroup2.last_read = (nowTimestampMs - 50)
convo_info_volatile_set_legacy_group(conf2, &legacyGroup2)
expect(config_needs_push(conf2)).to(beTrue())
var toPush2: UnsafeMutablePointer<UInt8>? = nil
var toPush2Len: Int = 0
let seqno2: Int64 = config_push(conf2, &toPush2, &toPush2Len)
expect(seqno2).to(equal(2))
var pushData2: UnsafeMutablePointer<config_push_data> = config_push(conf2)
expect(pushData2.pointee.seqno).to(equal(2))
// Check the merging
var mergeData: [UnsafePointer<UInt8>?] = [UnsafePointer(toPush2)]
var mergeSize: [Int] = [toPush2Len]
expect(config_merge(conf, &mergeData, &mergeSize, 1)).to(equal(1))
config_confirm_pushed(conf, seqno)
toPush2?.deallocate()
let fakeHash2: String = "fakehash2"
var cFakeHash2: [CChar] = fakeHash2.cArray
var mergeHashes: [UnsafePointer<CChar>?] = [cFakeHash2].unsafeCopy()
var mergeData: [UnsafePointer<UInt8>?] = [UnsafePointer(pushData2.pointee.config)]
var mergeSize: [Int] = [pushData2.pointee.config_len]
expect(config_merge(conf, &mergeHashes, &mergeData, &mergeSize, 1)).to(equal(1))
config_confirm_pushed(conf, pushData2.pointee.seqno, &cFakeHash2)
pushData2.deallocate()
expect(config_needs_push(conf)).to(beFalse())
for targetConf in [conf, conf2] {
// Iterate through and make sure we got everything we expected
var seen: [String] = []
@ -180,30 +180,13 @@ class ConfigConvoInfoVolatileSpec: QuickSpec {
while !convo_info_volatile_iterator_done(it) {
if convo_info_volatile_it_is_1to1(it, &c1) {
let sessionId: String = String(cString: withUnsafeBytes(of: c1.session_id) { [UInt8]($0) }
.map { CChar($0) }
.nullTerminated()
)
seen.append("1-to-1: \(sessionId)")
seen.append("1-to-1: \(String(libSessionVal: c1.session_id))")
}
else if convo_info_volatile_it_is_community(it, &c2) {
let baseUrl: String = String(cString: withUnsafeBytes(of: c2.base_url) { [UInt8]($0) }
.map { CChar($0) }
.nullTerminated()
)
let room: String = String(cString: withUnsafeBytes(of: c2.room) { [UInt8]($0) }
.map { CChar($0) }
.nullTerminated()
)
seen.append("og: \(baseUrl)/r/\(room)")
seen.append("og: \(String(libSessionVal: c2.base_url))/r/\(String(libSessionVal: c2.room))")
}
else if convo_info_volatile_it_is_legacy_group(it, &c3) {
let groupId: String = String(cString: withUnsafeBytes(of: c3.group_id) { [UInt8]($0) }
.map { CChar($0) }
.nullTerminated()
)
seen.append("cl: \(groupId)")
seen.append("cl: \(String(libSessionVal: c3.group_id))")
}
convo_info_volatile_iterator_advance(it)
@ -219,17 +202,16 @@ class ConfigConvoInfoVolatileSpec: QuickSpec {
]))
}
var fourthId: [CChar] = "052000000000000000000000000000000000000000000000000000000000000000"
.bytes
.map { CChar(bitPattern: $0) }
let fourthId: String = "052000000000000000000000000000000000000000000000000000000000000000"
var cFourthId: [CChar] = fourthId.cArray
expect(config_needs_push(conf)).to(beFalse())
convo_info_volatile_erase_1to1(conf, &fourthId)
convo_info_volatile_erase_1to1(conf, &cFourthId)
expect(config_needs_push(conf)).to(beFalse())
convo_info_volatile_erase_1to1(conf, &cDefinitelyRealId)
expect(config_needs_push(conf)).to(beTrue())
expect(convo_info_volatile_size(conf)).to(equal(3))
expect(convo_info_volatile_size_1to1(conf)).to(equal(1))
// Check the single-type iterators:
var seen1: [String?] = []
var c1: convo_info_volatile_1to1 = convo_info_volatile_1to1()

View File

@ -0,0 +1,576 @@
// Copyright © 2023 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import Sodium
import SessionUtil
import SessionUtilitiesKit
import SessionMessagingKit
import Quick
import Nimble
/// This spec is designed to replicate the initial test cases for the libSession-util to ensure the behaviour matches
class ConfigUserGroupsSpec {
// MARK: - Spec
static func spec() {
it("parses community URLs correctly") {
let result1 = SessionUtil.parseCommunity(url: [
"https://example.com/",
"SomeRoom?public_key=0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
].joined())
let result2 = SessionUtil.parseCommunity(url: [
"HTTPS://EXAMPLE.COM/",
"sOMErOOM?public_key=0123456789aBcdEF0123456789abCDEF0123456789ABCdef0123456789ABCDEF"
].joined())
let result3 = SessionUtil.parseCommunity(url: [
"HTTPS://EXAMPLE.COM/r/",
"someroom?public_key=0123456789aBcdEF0123456789abCDEF0123456789ABCdef0123456789ABCDEF"
].joined())
let result4 = SessionUtil.parseCommunity(url: [
"http://example.com/r/",
"someroom?public_key=0123456789aBcdEF0123456789abCDEF0123456789ABCdef0123456789ABCDEF"
].joined())
let result5 = SessionUtil.parseCommunity(url: [
"HTTPS://EXAMPLE.com:443/r/",
"someroom?public_key=0123456789aBcdEF0123456789abCDEF0123456789ABCdef0123456789ABCDEF"
].joined())
let result6 = SessionUtil.parseCommunity(url: [
"HTTP://EXAMPLE.com:80/r/",
"someroom?public_key=0123456789aBcdEF0123456789abCDEF0123456789ABCdef0123456789ABCDEF"
].joined())
let result7 = SessionUtil.parseCommunity(url: [
"http://example.com:80/r/",
"someroom?public_key=ASNFZ4mrze8BI0VniavN7wEjRWeJq83vASNFZ4mrze8"
].joined())
let result8 = SessionUtil.parseCommunity(url: [
"http://example.com:80/r/",
"someroom?public_key=yrtwk3hjixg66yjdeiuauk6p7hy1gtm8tgih55abrpnsxnpm3zzo"
].joined())
expect(result1?.server).to(equal("https://example.com"))
expect(result1?.server).to(equal(result2?.server))
expect(result1?.server).to(equal(result3?.server))
expect(result1?.server).toNot(equal(result4?.server))
expect(result4?.server).to(equal("http://example.com"))
expect(result1?.server).to(equal(result5?.server))
expect(result4?.server).to(equal(result6?.server))
expect(result4?.server).to(equal(result7?.server))
expect(result4?.server).to(equal(result8?.server))
expect(result1?.room).to(equal("SomeRoom"))
expect(result2?.room).to(equal("sOMErOOM"))
expect(result3?.room).to(equal("someroom"))
expect(result4?.room).to(equal("someroom"))
expect(result5?.room).to(equal("someroom"))
expect(result6?.room).to(equal("someroom"))
expect(result7?.room).to(equal("someroom"))
expect(result8?.room).to(equal("someroom"))
expect(result1?.publicKey)
.to(equal("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"))
expect(result2?.publicKey)
.to(equal("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"))
expect(result3?.publicKey)
.to(equal("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"))
expect(result4?.publicKey)
.to(equal("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"))
expect(result5?.publicKey)
.to(equal("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"))
expect(result6?.publicKey)
.to(equal("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"))
expect(result7?.publicKey)
.to(equal("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"))
expect(result8?.publicKey)
.to(equal("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"))
}
it("generates UserGroup configs correctly") {
let seed: Data = Data(hex: "0123456789abcdef0123456789abcdef")
// FIXME: Would be good to move these into the libSession-util instead of using Sodium separately
let identity = try! Identity.generate(from: seed)
var edSK: [UInt8] = identity.ed25519KeyPair.secretKey
expect(edSK.toHexString().suffix(64))
.to(equal("4cb76fdc6d32278e3f83dbf608360ecc6b65727934b85d2fb86862ff98c46ab7"))
expect(identity.x25519KeyPair.publicKey.toHexString())
.to(equal("d2ad010eeb72d72e561d9de7bd7b6989af77dcabffa03a5111a6c859ae5c3a72"))
expect(String(edSK.toHexString().prefix(32))).to(equal(seed.toHexString()))
// Initialize a brand new, empty config because we have no dump data to deal with.
let error: UnsafeMutablePointer<CChar>? = nil
var conf: UnsafeMutablePointer<config_object>? = nil
expect(user_groups_init(&conf, &edSK, nil, 0, error)).to(equal(0))
error?.deallocate()
// Empty contacts shouldn't have an existing contact
let definitelyRealId: String = "055000000000000000000000000000000000000000000000000000000000000000"
var cDefinitelyRealId: [CChar] = definitelyRealId.cArray
let legacyGroup1: UnsafeMutablePointer<ugroups_legacy_group_info>? = user_groups_get_legacy_group(conf, &cDefinitelyRealId)
expect(legacyGroup1?.pointee).to(beNil())
expect(user_groups_size(conf)).to(equal(0))
let legacyGroup2: UnsafeMutablePointer<ugroups_legacy_group_info> = user_groups_get_or_construct_legacy_group(conf, &cDefinitelyRealId)
expect(legacyGroup2.pointee).toNot(beNil())
expect(String(libSessionVal: legacyGroup2.pointee.session_id))
.to(equal(definitelyRealId))
expect(legacyGroup2.pointee.hidden).to(beFalse())
expect(legacyGroup2.pointee.disappearing_timer).to(equal(0))
expect(String(libSessionVal: legacyGroup2.pointee.enc_pubkey, fixedLength: 32)).to(equal(""))
expect(String(libSessionVal: legacyGroup2.pointee.enc_seckey, fixedLength: 32)).to(equal(""))
expect(legacyGroup2.pointee.priority).to(equal(0))
expect(String(libSessionVal: legacyGroup2.pointee.name)).to(equal(""))
// Iterate through and make sure we got everything we expected
var membersSeen1: [String: Bool] = [:]
var memberSessionId1: UnsafePointer<CChar>? = nil
var memberAdmin1: Bool = false
let membersIt1: OpaquePointer = ugroups_legacy_members_begin(legacyGroup2)
while ugroups_legacy_members_next(membersIt1, &memberSessionId1, &memberAdmin1) {
membersSeen1[String(cString: memberSessionId1!)] = memberAdmin1
}
ugroups_legacy_members_free(membersIt1)
expect(membersSeen1).to(beEmpty())
// No need to sync a conversation with a default state
expect(config_needs_push(conf)).to(beFalse())
expect(config_needs_dump(conf)).to(beFalse())
// We don't need to push since we haven't changed anything, so this call is mainly just for
// testing:
let pushData1: UnsafeMutablePointer<config_push_data> = config_push(conf)
expect(pushData1.pointee.seqno).to(equal(0))
expect([String](pointer: pushData1.pointee.obsolete, count: pushData1.pointee.obsolete_len))
.to(beEmpty())
expect(pushData1.pointee.config_len).to(equal(256))
pushData1.deallocate()
let users: [String] = [
"050000000000000000000000000000000000000000000000000000000000000000",
"051111111111111111111111111111111111111111111111111111111111111111",
"052222222222222222222222222222222222222222222222222222222222222222",
"053333333333333333333333333333333333333333333333333333333333333333",
"054444444444444444444444444444444444444444444444444444444444444444",
"055555555555555555555555555555555555555555555555555555555555555555",
"056666666666666666666666666666666666666666666666666666666666666666"
]
var cUsers: [[CChar]] = users.map { $0.cArray }
legacyGroup2.pointee.name = "Englishmen".toLibSession()
legacyGroup2.pointee.disappearing_timer = 60
expect(ugroups_legacy_member_add(legacyGroup2, &cUsers[0], false)).to(beTrue())
expect(ugroups_legacy_member_add(legacyGroup2, &cUsers[1], true)).to(beTrue())
expect(ugroups_legacy_member_add(legacyGroup2, &cUsers[2], false)).to(beTrue())
expect(ugroups_legacy_member_add(legacyGroup2, &cUsers[4], true)).to(beTrue())
expect(ugroups_legacy_member_add(legacyGroup2, &cUsers[5], false)).to(beTrue())
expect(ugroups_legacy_member_add(legacyGroup2, &cUsers[2], false)).to(beFalse())
// Flip to and from admin
expect(ugroups_legacy_member_add(legacyGroup2, &cUsers[2], true)).to(beTrue())
expect(ugroups_legacy_member_add(legacyGroup2, &cUsers[1], false)).to(beTrue())
expect(ugroups_legacy_member_remove(legacyGroup2, &cUsers[5])).to(beTrue())
expect(ugroups_legacy_member_remove(legacyGroup2, &cUsers[4])).to(beTrue())
var membersSeen2: [String: Bool] = [:]
var memberSessionId2: UnsafePointer<CChar>? = nil
var memberAdmin2: Bool = false
let membersIt2: OpaquePointer = ugroups_legacy_members_begin(legacyGroup2)
while ugroups_legacy_members_next(membersIt2, &memberSessionId2, &memberAdmin2) {
membersSeen2[String(cString: memberSessionId2!)] = memberAdmin2
}
ugroups_legacy_members_free(membersIt2)
expect(membersSeen2).to(equal([
"050000000000000000000000000000000000000000000000000000000000000000": false,
"051111111111111111111111111111111111111111111111111111111111111111": false,
"052222222222222222222222222222222222222222222222222222222222222222": true
]))
// FIXME: Would be good to move these into the libSession-util instead of using Sodium separately
let groupSeed: Data = Data(hex: "00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff")
let groupEd25519KeyPair = Sodium().sign.keyPair(seed: groupSeed.bytes)!
let groupX25519PublicKey = Sodium().sign.toX25519(ed25519PublicKey: groupEd25519KeyPair.publicKey)!
// Note: this isn't exactly what Session actually does here for legacy closed
// groups (rather it uses X25519 keys) but for this test the distinction doesn't matter.
legacyGroup2.pointee.enc_pubkey = Data(groupX25519PublicKey).toLibSession()
legacyGroup2.pointee.enc_seckey = Data(groupEd25519KeyPair.secretKey).toLibSession()
legacyGroup2.pointee.priority = 3
expect(Data(libSessionVal: legacyGroup2.pointee.enc_pubkey, count: 32).toHexString())
.to(equal("c5ba413c336f2fe1fb9a2c525f8a86a412a1db128a7841b4e0e217fa9eb7fd5e"))
expect(Data(libSessionVal: legacyGroup2.pointee.enc_seckey, count: 32).toHexString())
.to(equal("00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff"))
// The new data doesn't get stored until we call this:
user_groups_set_legacy_group(conf, legacyGroup2)
let legacyGroup3: UnsafeMutablePointer<ugroups_legacy_group_info>? = user_groups_get_legacy_group(conf, &cDefinitelyRealId)
expect(legacyGroup3?.pointee).toNot(beNil())
expect(config_needs_push(conf)).to(beTrue())
expect(config_needs_dump(conf)).to(beTrue())
let communityPubkey: String = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
var cCommunityPubkey: [UInt8] = Data(hex: communityPubkey).cArray
var cCommunityBaseUrl: [CChar] = "http://Example.ORG:5678".cArray
var cCommunityRoom: [CChar] = "SudokuRoom".cArray
var community1: ugroups_community_info = ugroups_community_info()
expect(user_groups_get_or_construct_community(conf, &community1, &cCommunityBaseUrl, &cCommunityRoom, &cCommunityPubkey))
.to(beTrue())
expect(String(libSessionVal: community1.base_url)).to(equal("http://example.org:5678")) // Note: lower-case
expect(String(libSessionVal: community1.room)).to(equal("SudokuRoom")) // Note: case-preserving
expect(Data(libSessionVal: community1.pubkey, count: 32).toHexString())
.to(equal("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"))
community1.priority = 14
// The new data doesn't get stored until we call this:
user_groups_set_community(conf, &community1)
// incremented since we made changes (this only increments once between
// dumps; even though we changed two fields here).
let pushData2: UnsafeMutablePointer<config_push_data> = config_push(conf)
expect(pushData2.pointee.seqno).to(equal(1))
expect([String](pointer: pushData2.pointee.obsolete, count: pushData2.pointee.obsolete_len))
.to(beEmpty())
// Pretend we uploaded it
let fakeHash1: String = "fakehash1"
var cFakeHash1: [CChar] = fakeHash1.cArray
config_confirm_pushed(conf, pushData2.pointee.seqno, &cFakeHash1)
expect(config_needs_dump(conf)).to(beTrue())
expect(config_needs_push(conf)).to(beFalse())
var dump1: UnsafeMutablePointer<UInt8>? = nil
var dump1Len: Int = 0
config_dump(conf, &dump1, &dump1Len)
let error2: UnsafeMutablePointer<CChar>? = nil
var conf2: UnsafeMutablePointer<config_object>? = nil
expect(user_groups_init(&conf2, &edSK, dump1, dump1Len, error2)).to(equal(0))
error2?.deallocate()
dump1?.deallocate()
expect(config_needs_dump(conf)).to(beFalse()) // Because we just called dump() above, to load up conf2
expect(config_needs_push(conf)).to(beFalse())
let pushData3: UnsafeMutablePointer<config_push_data> = config_push(conf)
expect(pushData3.pointee.seqno).to(equal(1))
expect([String](pointer: pushData3.pointee.obsolete, count: pushData3.pointee.obsolete_len))
.to(beEmpty())
pushData3.deallocate()
let currentHashes1: UnsafeMutablePointer<config_string_list>? = config_current_hashes(conf)
expect([String](pointer: currentHashes1?.pointee.value, count: currentHashes1?.pointee.len))
.to(equal(["fakehash1"]))
currentHashes1?.deallocate()
expect(config_needs_push(conf2)).to(beFalse())
expect(config_needs_dump(conf2)).to(beFalse())
let pushData4: UnsafeMutablePointer<config_push_data> = config_push(conf2)
expect(pushData4.pointee.seqno).to(equal(1))
expect(config_needs_dump(conf2)).to(beFalse())
expect([String](pointer: pushData4.pointee.obsolete, count: pushData4.pointee.obsolete_len))
.to(beEmpty())
pushData4.deallocate()
let currentHashes2: UnsafeMutablePointer<config_string_list>? = config_current_hashes(conf2)
expect([String](pointer: currentHashes2?.pointee.value, count: currentHashes2?.pointee.len))
.to(equal(["fakehash1"]))
currentHashes2?.deallocate()
expect(user_groups_size(conf2)).to(equal(2))
expect(user_groups_size_communities(conf2)).to(equal(1))
expect(user_groups_size_legacy_groups(conf2)).to(equal(1))
let legacyGroup4: UnsafeMutablePointer<ugroups_legacy_group_info>? = user_groups_get_legacy_group(conf2, &cDefinitelyRealId)
expect(legacyGroup4?.pointee).toNot(beNil())
expect(String(libSessionVal: legacyGroup4?.pointee.enc_pubkey, fixedLength: 32)).to(equal(""))
expect(String(libSessionVal: legacyGroup4?.pointee.enc_seckey, fixedLength: 32)).to(equal(""))
expect(legacyGroup4?.pointee.disappearing_timer).to(equal(60))
expect(String(libSessionVal: legacyGroup4?.pointee.session_id)).to(equal(definitelyRealId))
expect(legacyGroup4?.pointee.hidden).to(beFalse())
expect(legacyGroup4?.pointee.priority).to(equal(3))
expect(String(libSessionVal: legacyGroup4?.pointee.name)).to(equal("Englishmen"))
var membersSeen3: [String: Bool] = [:]
var memberSessionId3: UnsafePointer<CChar>? = nil
var memberAdmin3: Bool = false
let membersIt3: OpaquePointer = ugroups_legacy_members_begin(legacyGroup4)
while ugroups_legacy_members_next(membersIt3, &memberSessionId3, &memberAdmin3) {
membersSeen3[String(cString: memberSessionId3!)] = memberAdmin3
}
ugroups_legacy_members_free(membersIt3)
expect(membersSeen3).to(equal([
"050000000000000000000000000000000000000000000000000000000000000000": false,
"051111111111111111111111111111111111111111111111111111111111111111": false,
"052222222222222222222222222222222222222222222222222222222222222222": true
]))
expect(config_needs_push(conf2)).to(beFalse())
expect(config_needs_dump(conf2)).to(beFalse())
let pushData5: UnsafeMutablePointer<config_push_data> = config_push(conf2)
expect(pushData5.pointee.seqno).to(equal(1))
expect(config_needs_dump(conf2)).to(beFalse())
pushData5.deallocate()
for targetConf in [conf, conf2] {
// Iterate through and make sure we got everything we expected
var seen: [String] = []
var c1: ugroups_legacy_group_info = ugroups_legacy_group_info()
var c2: ugroups_community_info = ugroups_community_info()
let it: OpaquePointer = user_groups_iterator_new(targetConf)
while !user_groups_iterator_done(it) {
if user_groups_it_is_legacy_group(it, &c1) {
var memberCount: Int = 0
var adminCount: Int = 0
ugroups_legacy_members_count(&c1, &memberCount, &adminCount)
seen.append("legacy: \(String(libSessionVal: c1.name)), \(adminCount) admins, \(memberCount) members")
}
else if user_groups_it_is_community(it, &c2) {
seen.append("community: \(String(libSessionVal: c2.base_url))/r/\(String(libSessionVal: c2.room))")
}
else {
seen.append("unknown")
}
user_groups_iterator_advance(it)
}
user_groups_iterator_free(it)
expect(seen).to(equal([
"community: http://example.org:5678/r/SudokuRoom",
"legacy: Englishmen, 1 admins, 2 members"
]))
}
var cCommunity2BaseUrl: [CChar] = "http://example.org:5678".cArray
var cCommunity2Room: [CChar] = "sudokuRoom".cArray
var community2: ugroups_community_info = ugroups_community_info()
expect(user_groups_get_community(conf2, &community2, &cCommunity2BaseUrl, &cCommunity2Room))
.to(beTrue())
expect(String(libSessionVal: community2.base_url)).to(equal("http://example.org:5678"))
expect(String(libSessionVal: community2.room)).to(equal("SudokuRoom")) // Case preserved from the stored value, not the input value
expect(Data(libSessionVal: community2.pubkey, count: 32).toHexString())
.to(equal("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"))
expect(community2.priority).to(equal(14))
expect(config_needs_push(conf2)).to(beFalse())
expect(config_needs_dump(conf2)).to(beFalse())
let pushData6: UnsafeMutablePointer<config_push_data> = config_push(conf2)
expect(pushData6.pointee.seqno).to(equal(1))
expect(config_needs_dump(conf2)).to(beFalse())
pushData6.deallocate()
community2.room = "sudokuRoom".toLibSession() // Change capitalization
user_groups_set_community(conf2, &community2)
expect(config_needs_push(conf2)).to(beTrue())
expect(config_needs_dump(conf2)).to(beTrue())
let fakeHash2: String = "fakehash2"
var cFakeHash2: [CChar] = fakeHash2.cArray
let pushData7: UnsafeMutablePointer<config_push_data> = config_push(conf2)
expect(pushData7.pointee.seqno).to(equal(2))
config_confirm_pushed(conf2, pushData7.pointee.seqno, &cFakeHash2)
expect([String](pointer: pushData7.pointee.obsolete, count: pushData7.pointee.obsolete_len))
.to(equal([fakeHash1]))
let currentHashes3: UnsafeMutablePointer<config_string_list>? = config_current_hashes(conf2)
expect([String](pointer: currentHashes3?.pointee.value, count: currentHashes3?.pointee.len))
.to(equal([fakeHash2]))
currentHashes3?.deallocate()
var dump2: UnsafeMutablePointer<UInt8>? = nil
var dump2Len: Int = 0
config_dump(conf2, &dump2, &dump2Len)
expect(config_needs_push(conf2)).to(beFalse())
expect(config_needs_dump(conf2)).to(beFalse())
let pushData8: UnsafeMutablePointer<config_push_data> = config_push(conf2)
expect(pushData8.pointee.seqno).to(equal(2))
config_confirm_pushed(conf2, pushData8.pointee.seqno, &cFakeHash2)
expect(config_needs_dump(conf2)).to(beFalse())
var mergeHashes1: [UnsafePointer<CChar>?] = [cFakeHash2].unsafeCopy()
var mergeData1: [UnsafePointer<UInt8>?] = [UnsafePointer(pushData8.pointee.config)]
var mergeSize1: [Int] = [pushData8.pointee.config_len]
expect(config_merge(conf, &mergeHashes1, &mergeData1, &mergeSize1, 1)).to(equal(1))
pushData8.deallocate()
var cCommunity3BaseUrl: [CChar] = "http://example.org:5678".cArray
var cCommunity3Room: [CChar] = "SudokuRoom".cArray
var community3: ugroups_community_info = ugroups_community_info()
expect(user_groups_get_community(conf, &community3, &cCommunity3BaseUrl, &cCommunity3Room))
.to(beTrue())
expect(String(libSessionVal: community3.room)).to(equal("sudokuRoom")) // We picked up the capitalization change
expect(user_groups_size(conf)).to(equal(2))
expect(user_groups_size_communities(conf)).to(equal(1))
expect(user_groups_size_legacy_groups(conf)).to(equal(1))
let legacyGroup5: UnsafeMutablePointer<ugroups_legacy_group_info>? = user_groups_get_legacy_group(conf2, &cDefinitelyRealId)
expect(ugroups_legacy_member_add(legacyGroup5, &cUsers[4], false)).to(beTrue())
expect(ugroups_legacy_member_add(legacyGroup5, &cUsers[5], true)).to(beTrue())
expect(ugroups_legacy_member_add(legacyGroup5, &cUsers[6], true)).to(beTrue())
expect(ugroups_legacy_member_remove(legacyGroup5, &cUsers[1])).to(beTrue())
expect(config_needs_push(conf2)).to(beFalse())
expect(config_needs_dump(conf2)).to(beFalse())
let pushData9: UnsafeMutablePointer<config_push_data> = config_push(conf2)
expect(pushData9.pointee.seqno).to(equal(2))
expect(config_needs_dump(conf2)).to(beFalse())
pushData9.deallocate()
user_groups_set_legacy_group(conf2, legacyGroup5)
expect(config_needs_push(conf2)).to(beTrue())
expect(config_needs_dump(conf2)).to(beTrue())
var cCommunity4BaseUrl: [CChar] = "http://exAMple.ORG:5678".cArray
var cCommunity4Room: [CChar] = "sudokuROOM".cArray
user_groups_erase_community(conf2, &cCommunity4BaseUrl, &cCommunity4Room)
let fakeHash3: String = "fakehash3"
var cFakeHash3: [CChar] = fakeHash3.cArray
let pushData10: UnsafeMutablePointer<config_push_data> = config_push(conf2)
config_confirm_pushed(conf2, pushData10.pointee.seqno, &cFakeHash3)
expect(pushData10.pointee.seqno).to(equal(3))
expect([String](pointer: pushData10.pointee.obsolete, count: pushData10.pointee.obsolete_len))
.to(equal([fakeHash2]))
let currentHashes4: UnsafeMutablePointer<config_string_list>? = config_current_hashes(conf2)
expect([String](pointer: currentHashes4?.pointee.value, count: currentHashes4?.pointee.len))
.to(equal([fakeHash3]))
currentHashes4?.deallocate()
var mergeHashes2: [UnsafePointer<CChar>?] = [cFakeHash3].unsafeCopy()
var mergeData2: [UnsafePointer<UInt8>?] = [UnsafePointer(pushData10.pointee.config)]
var mergeSize2: [Int] = [pushData10.pointee.config_len]
expect(config_merge(conf, &mergeHashes2, &mergeData2, &mergeSize2, 1)).to(equal(1))
expect(user_groups_size(conf)).to(equal(1))
expect(user_groups_size_communities(conf)).to(equal(0))
expect(user_groups_size_legacy_groups(conf)).to(equal(1))
var prio: Int32 = 0
var cBeanstalkBaseUrl: [CChar] = "http://jacksbeanstalk.org".cArray
var cBeanstalkPubkey: [UInt8] = Data(
hex: "0000111122223333444455556666777788889999aaaabbbbccccddddeeeeffff"
).cArray
["fee", "fi", "fo", "fum"].forEach { room in
var cRoom: [CChar] = room.cArray
prio += 1
var community4: ugroups_community_info = ugroups_community_info()
expect(user_groups_get_or_construct_community(conf, &community4, &cBeanstalkBaseUrl, &cRoom, &cBeanstalkPubkey))
.to(beTrue())
community4.priority = prio
user_groups_set_community(conf, &community4)
}
expect(user_groups_size(conf)).to(equal(5))
expect(user_groups_size_communities(conf)).to(equal(4))
expect(user_groups_size_legacy_groups(conf)).to(equal(1))
let fakeHash4: String = "fakehash4"
var cFakeHash4: [CChar] = fakeHash4.cArray
let pushData11: UnsafeMutablePointer<config_push_data> = config_push(conf)
config_confirm_pushed(conf, pushData11.pointee.seqno, &cFakeHash4)
expect(pushData11.pointee.seqno).to(equal(4))
expect([String](pointer: pushData11.pointee.obsolete, count: pushData11.pointee.obsolete_len))
.to(equal([fakeHash3, fakeHash2, fakeHash1]))
// Load some obsolete ones in just to check that they get immediately obsoleted
let fakeHash10: String = "fakehash10"
let cFakeHash10: [CChar] = fakeHash10.cArray
let fakeHash11: String = "fakehash11"
let cFakeHash11: [CChar] = fakeHash11.cArray
let fakeHash12: String = "fakehash12"
let cFakeHash12: [CChar] = fakeHash12.cArray
var mergeHashes3: [UnsafePointer<CChar>?] = [cFakeHash10, cFakeHash11, cFakeHash12, cFakeHash4].unsafeCopy()
var mergeData3: [UnsafePointer<UInt8>?] = [
UnsafePointer(pushData10.pointee.config),
UnsafePointer(pushData2.pointee.config),
UnsafePointer(pushData7.pointee.config),
UnsafePointer(pushData11.pointee.config)
]
var mergeSize3: [Int] = [
pushData10.pointee.config_len,
pushData2.pointee.config_len,
pushData7.pointee.config_len,
pushData11.pointee.config_len
]
expect(config_merge(conf2, &mergeHashes3, &mergeData3, &mergeSize3, 4)).to(equal(4))
expect(config_needs_dump(conf2)).to(beTrue())
expect(config_needs_push(conf2)).to(beFalse())
pushData2.deallocate()
pushData7.deallocate()
pushData10.deallocate()
pushData11.deallocate()
let currentHashes5: UnsafeMutablePointer<config_string_list>? = config_current_hashes(conf2)
expect([String](pointer: currentHashes5?.pointee.value, count: currentHashes5?.pointee.len))
.to(equal([fakeHash4]))
currentHashes5?.deallocate()
let pushData12: UnsafeMutablePointer<config_push_data> = config_push(conf2)
expect(pushData12.pointee.seqno).to(equal(4))
expect([String](pointer: pushData12.pointee.obsolete, count: pushData12.pointee.obsolete_len))
.to(equal([fakeHash11, fakeHash12, fakeHash10, fakeHash3]))
pushData12.deallocate()
for targetConf in [conf, conf2] {
// Iterate through and make sure we got everything we expected
var seen: [String] = []
var c1: ugroups_legacy_group_info = ugroups_legacy_group_info()
var c2: ugroups_community_info = ugroups_community_info()
let it: OpaquePointer = user_groups_iterator_new(targetConf)
while !user_groups_iterator_done(it) {
if user_groups_it_is_legacy_group(it, &c1) {
var memberCount: Int = 0
var adminCount: Int = 0
ugroups_legacy_members_count(&c1, &memberCount, &adminCount)
seen.append("legacy: \(String(libSessionVal: c1.name)), \(adminCount) admins, \(memberCount) members")
}
else if user_groups_it_is_community(it, &c2) {
seen.append("community: \(String(libSessionVal: c2.base_url))/r/\(String(libSessionVal: c2.room))")
}
else {
seen.append("unknown")
}
user_groups_iterator_advance(it)
}
user_groups_iterator_free(it)
expect(seen).to(equal([
"community: http://jacksbeanstalk.org/r/fee",
"community: http://jacksbeanstalk.org/r/fi",
"community: http://jacksbeanstalk.org/r/fo",
"community: http://jacksbeanstalk.org/r/fum",
"legacy: Englishmen, 3 admins, 2 members"
]))
}
}
}
}

View File

@ -10,10 +10,10 @@ import Quick
import Nimble
/// This spec is designed to replicate the initial test cases for the libSession-util to ensure the behaviour matches
class ConfigUserProfileSpec: QuickSpec {
class ConfigUserProfileSpec {
// MARK: - Spec
override func spec() {
static func spec() {
it("generates UserProfile configs correctly") {
let seed: Data = Data(hex: "0123456789abcdef0123456789abcdef")
@ -41,14 +41,12 @@ class ConfigUserProfileSpec: QuickSpec {
let namePtr: UnsafePointer<CChar>? = user_profile_get_name(conf)
expect(namePtr).to(beNil())
var toPush: UnsafeMutablePointer<UInt8>? = nil
var toPushLen: Int = 0
// We don't need to push since we haven't changed anything, so this call is mainly just for
// testing:
let seqno: Int64 = config_push(conf, &toPush, &toPushLen)
expect(toPush).toNot(beNil())
expect(seqno).to(equal(0))
expect(toPushLen).to(equal(256))
let pushData1: UnsafeMutablePointer<config_push_data> = config_push(conf)
expect(pushData1.pointee).toNot(beNil())
expect(pushData1.pointee.seqno).to(equal(0))
expect(pushData1.pointee.config_len).to(equal(256))
let encDomain: [CChar] = "UserProfile"
.bytes
@ -56,7 +54,7 @@ class ConfigUserProfileSpec: QuickSpec {
expect(String(cString: config_encryption_domain(conf))).to(equal("UserProfile"))
var toPushDecSize: Int = 0
let toPushDecrypted: UnsafeMutablePointer<UInt8>? = config_decrypt(toPush, toPushLen, edSK, encDomain, &toPushDecSize)
let toPushDecrypted: UnsafeMutablePointer<UInt8>? = config_decrypt(pushData1.pointee.config, pushData1.pointee.config_len, edSK, encDomain, &toPushDecSize)
let prefixPadding: String = (0..<193)
.map { _ in "\0" }
.joined()
@ -64,7 +62,7 @@ class ConfigUserProfileSpec: QuickSpec {
expect(toPushDecSize).to(equal(216)) // 256 - 40 overhead
expect(String(pointer: toPushDecrypted, length: toPushDecSize))
.to(equal("\(prefixPadding)d1:#i0e1:&de1:<le1:=dee"))
toPush?.deallocate()
pushData1.deallocate()
toPushDecrypted?.deallocate()
// This should also be unset:
@ -78,6 +76,7 @@ class ConfigUserProfileSpec: QuickSpec {
key: "secret78901234567890123456789012".data(using: .utf8)!.toLibSession()
)
expect(user_profile_set_pic(conf, p)).to(equal(0))
user_profile_set_nts_priority(conf, 9)
// Retrieve them just to make sure they set properly:
let namePtr2: UnsafePointer<CChar>? = user_profile_get_name(conf)
@ -88,18 +87,17 @@ class ConfigUserProfileSpec: QuickSpec {
expect(String(libSessionVal: pic2.url)).to(equal("http://example.org/omg-pic-123.bmp"))
expect(Data(libSessionVal: pic2.key, count: ProfileManager.avatarAES256KeyByteLength))
.to(equal("secret78901234567890123456789012".data(using: .utf8)))
expect(user_profile_get_nts_priority(conf)).to(equal(9))
// Since we've made changes, we should need to push new config to the swarm, *and* should need
// to dump the updated state:
expect(config_needs_push(conf)).to(beTrue())
expect(config_needs_dump(conf)).to(beTrue())
var toPush2: UnsafeMutablePointer<UInt8>? = nil
var toPush2Len: Int = 0
let seqno2: Int64 = config_push(conf, &toPush2, &toPush2Len);
// incremented since we made changes (this only increments once between
// dumps; even though we changed two fields here).
expect(seqno2).to(equal(1))
let pushData2: UnsafeMutablePointer<config_push_data> = config_push(conf)
expect(pushData2.pointee.seqno).to(equal(1))
// Note: This hex value differs from the value in the library tests because
// it looks like the library has an "end of cell mark" character added at the
@ -111,6 +109,7 @@ class ConfigUserProfileSpec: QuickSpec {
d
1:#i1e
1:& d
1:+ i9e
1:n 6:Kallie
1:p 34:http://example.org/omg-pic-123.bmp
1:q 32:secret78901234567890123456789012
@ -124,6 +123,7 @@ class ConfigUserProfileSpec: QuickSpec {
de e
e
1:= d
1:+ 0:
1:n 0:
1:p 0:
1:q 0:
@ -131,38 +131,40 @@ class ConfigUserProfileSpec: QuickSpec {
e
""".removeCharacters(characterSet: CharacterSet.whitespacesAndNewlines) // For readability
.bytes
]
.flatMap { $0 }
].flatMap { $0 }
let expPush1Encrypted: [UInt8] = Data(hex: [
"877c8e0f5d33f5fffa5a4e162785a9a89918e95de1c4b925201f1f5c29d9ee4f8c36e2b278fce1e6",
"b9d999689dd86ff8e79e0a04004fa54d24da89bc2604cb1df8c1356da8f14710543ecec44f2d57fc",
"56ea8b7e73d119c69d755f4d513d5d069f02396b8ec0cbed894169836f57ca4b782ce705895c593b",
"4230d50c175d44a08045388d3f4160bacb617b9ae8de3ebc8d9024245cd09ce102627cab2acf1b91",
"26159211359606611ca5814de320d1a7099a65c99b0eebbefb92a115f5efa6b9132809300ac010c6",
"857cfbd62af71b0fa97eccec75cb95e67edf40b35fdb9cad125a6976693ab085c6bba96a2e51826e",
"81e16b9ec1232af5680f2ced55310486"
"9693a69686da3055f1ecdfb239c3bf8e746951a36d888c2fb7c02e856a5c2091b24e39a7e1af828f",
"1fa09fe8bf7d274afde0a0847ba143c43ffb8722301b5ae32e2f078b9a5e19097403336e50b18c84",
"aade446cd2823b011f97d6ad2116a53feb814efecc086bc172d31f4214b4d7c630b63bbe575b0868",
"2d146da44915063a07a78556ab5eff4f67f6aa26211e8d330b53d28567a931028c393709a325425d",
"e7486ccde24416a7fd4a8ba5fa73899c65f4276dfaddd5b2100adcf0f793104fb235b31ce32ec656",
"056009a9ebf58d45d7d696b74e0c7ff0499c4d23204976f19561dc0dba6dc53a2497d28ce03498ea",
"49bf122762d7bc1d6d9c02f6d54f8384"
].joined()).bytes
expect(String(pointer: toPush2, length: toPush2Len, encoding: .ascii))
.to(equal(String(pointer: expPush1Encrypted, length: expPush1Encrypted.count, encoding: .ascii)))
let pushData2Str: String = String(pointer: pushData2.pointee.config, length: pushData2.pointee.config_len, encoding: .ascii)!
let expPush1EncryptedStr: String = String(pointer: expPush1Encrypted, length: expPush1Encrypted.count, encoding: .ascii)!
expect(pushData2Str).to(equal(expPush1EncryptedStr))
// Raw decryption doesn't unpad (i.e. the padding is part of the encrypted data)
var toPush2DecSize: Int = 0
let toPush2Decrypted: UnsafeMutablePointer<UInt8>? = config_decrypt(
toPush2,
toPush2Len,
var pushData2DecSize: Int = 0
let pushData2Decrypted: UnsafeMutablePointer<UInt8>? = config_decrypt(
pushData2.pointee.config,
pushData2.pointee.config_len,
edSK,
encDomain,
&toPush2DecSize
&pushData2DecSize
)
let prefixPadding2: String = (0..<(256 - 40 - expPush1Decrypted.count))
.map { _ in "\0" }
.joined()
expect(toPush2DecSize).to(equal(216)) // 256 - 40 overhead
expect(String(pointer: toPush2Decrypted, length: toPush2DecSize, encoding: .ascii))
.to(equal(String(pointer: expPush1Decrypted, length: expPush1Decrypted.count, encoding: .ascii).map { "\(prefixPadding2)\($0)" }))
toPush2?.deallocate()
toPush2Decrypted?.deallocate()
expect(pushData2DecSize).to(equal(216)) // 256 - 40 overhead
let pushData2DecryptedStr: String = String(pointer: pushData2Decrypted, length: pushData2DecSize, encoding: .ascii)!
let expPush1DecryptedStr: String = String(pointer: expPush1Decrypted, length: expPush1Decrypted.count, encoding: .ascii)
.map { "\(prefixPadding2)\($0)" }!
expect(pushData2DecryptedStr).to(equal(expPush1DecryptedStr))
pushData2Decrypted?.deallocate()
// We haven't dumped, so still need to dump:
expect(config_needs_dump(conf)).to(beTrue())
@ -189,18 +191,22 @@ class ConfigUserProfileSpec: QuickSpec {
expPush1Decrypted
.map { CChar(bitPattern: $0) },
"""
1:(0:
1:)le
e
""".removeCharacters(characterSet: CharacterSet.whitespacesAndNewlines)
.bytes
.map { CChar(bitPattern: $0) }
]
.flatMap { $0 }
].flatMap { $0 }
expect(String(pointer: dump1, length: dump1Len, encoding: .ascii))
.to(equal(String(pointer: expDump1, length: expDump1.count, encoding: .ascii)))
dump1?.deallocate()
// So now imagine we got back confirmation from the swarm that the push has been stored:
config_confirm_pushed(conf, seqno2)
let fakeHash1: String = "fakehash1"
var cFakeHash1: [CChar] = fakeHash1.cArray
config_confirm_pushed(conf, pushData2.pointee.seqno, &cFakeHash1)
pushData2.deallocate()
expect(config_needs_push(conf)).to(beFalse())
expect(config_needs_dump(conf)).to(beTrue()) // The confirmation changes state, so this makes us need a dump
@ -208,12 +214,34 @@ class ConfigUserProfileSpec: QuickSpec {
var dump2: UnsafeMutablePointer<UInt8>? = nil
var dump2Len: Int = 0
config_dump(conf, &dump2, &dump2Len)
let expDump2: [CChar] = [
"""
d
1:! i0e
1:$ \(expPush1Decrypted.count):
"""
.removeCharacters(characterSet: CharacterSet.whitespacesAndNewlines)
.bytes
.map { CChar(bitPattern: $0) },
expPush1Decrypted
.map { CChar(bitPattern: $0) },
"""
1:(9:fakehash1
1:)le
e
""".removeCharacters(characterSet: CharacterSet.whitespacesAndNewlines)
.bytes
.map { CChar(bitPattern: $0) }
].flatMap { $0 }
expect(String(pointer: dump2, length: dump2Len, encoding: .ascii))
.to(equal(String(pointer: expDump2, length: expDump2.count, encoding: .ascii)))
dump2?.deallocate()
expect(config_needs_dump(conf)).to(beFalse())
// Now we're going to set up a second, competing config object (in the real world this would be
// another Session client somewhere).
// Start with an empty config, as above:
let error2: UnsafeMutablePointer<CChar>? = nil
var conf2: UnsafeMutablePointer<config_object>? = nil
@ -223,11 +251,13 @@ class ConfigUserProfileSpec: QuickSpec {
// Now imagine we just pulled down the `exp_push1` string from the swarm; we merge it into
// conf2:
var mergeHashes: [UnsafePointer<CChar>?] = [cFakeHash1].unsafeCopy()
var mergeData: [UnsafePointer<UInt8>?] = [expPush1Encrypted].unsafeCopy()
var mergeSize: [Int] = [expPush1Encrypted.count]
expect(config_merge(conf2, &mergeData, &mergeSize, 1)).to(equal(1))
expect(config_merge(conf2, &mergeHashes, &mergeData, &mergeSize, 1)).to(equal(1))
mergeHashes.forEach { $0?.deallocate() }
mergeData.forEach { $0?.deallocate() }
// Our state has changed, so we need to dump:
expect(config_needs_dump(conf2)).to(beTrue())
var dump3: UnsafeMutablePointer<UInt8>? = nil
@ -240,9 +270,9 @@ class ConfigUserProfileSpec: QuickSpec {
// We *don't* need to push: even though we updated, all we did is update to the merged data (and
// didn't have any sort of merge conflict needed):
expect(config_needs_push(conf2)).to(beFalse())
// Now let's create a conflicting update:
// Change the name on both clients:
user_profile_set_name(conf, "Nibbler")
user_profile_set_name(conf2, "Raz")
@ -257,16 +287,19 @@ class ConfigUserProfileSpec: QuickSpec {
// Both have changes, so push need a push
expect(config_needs_push(conf)).to(beTrue())
expect(config_needs_push(conf2)).to(beTrue())
var toPush3: UnsafeMutablePointer<UInt8>? = nil
var toPush3Len: Int = 0
let seqno3: Int64 = config_push(conf, &toPush3, &toPush3Len)
expect(seqno3).to(equal(2)) // incremented, since we made a field change
var toPush4: UnsafeMutablePointer<UInt8>? = nil
var toPush4Len: Int = 0
let seqno4: Int64 = config_push(conf2, &toPush4, &toPush4Len)
expect(seqno4).to(equal(2)) // incremented, since we made a field change
let fakeHash2: String = "fakehash2"
var cFakeHash2: [CChar] = fakeHash2.cArray
let pushData3: UnsafeMutablePointer<config_push_data> = config_push(conf)
expect(pushData3.pointee.seqno).to(equal(2)) // incremented, since we made a field change
config_confirm_pushed(conf, pushData3.pointee.seqno, &cFakeHash2)
let fakeHash3: String = "fakehash3"
var cFakeHash3: [CChar] = fakeHash3.cArray
let pushData4: UnsafeMutablePointer<config_push_data> = config_push(conf2)
expect(pushData4.pointee.seqno).to(equal(2)) // incremented, since we made a field change
config_confirm_pushed(conf, pushData4.pointee.seqno, &cFakeHash3)
var dump4: UnsafeMutablePointer<UInt8>? = nil
var dump4Len: Int = 0
config_dump(conf, &dump4, &dump4Len);
@ -279,32 +312,34 @@ class ConfigUserProfileSpec: QuickSpec {
// Since we set different things, we're going to get back different serialized data to be
// pushed:
expect(String(pointer: toPush3, length: toPush3Len, encoding: .ascii))
.toNot(equal(String(pointer: toPush4, length: toPush4Len, encoding: .ascii)))
let pushData3Str: String? = String(pointer: pushData3.pointee.config, length: pushData3.pointee.config_len, encoding: .ascii)
let pushData4Str: String? = String(pointer: pushData4.pointee.config, length: pushData4.pointee.config_len, encoding: .ascii)
expect(pushData3Str).toNot(equal(pushData4Str))
// Now imagine that each client pushed its `seqno=2` config to the swarm, but then each client
// also fetches new messages and pulls down the other client's `seqno=2` value.
// Feed the new config into each other. (This array could hold multiple configs if we pulled
// down more than one).
var mergeData2: [UnsafePointer<UInt8>?] = [UnsafePointer(toPush3)]
var mergeSize2: [Int] = [toPush3Len]
expect(config_merge(conf2, &mergeData2, &mergeSize2, 1)).to(equal(1))
toPush3?.deallocate()
var mergeData3: [UnsafePointer<UInt8>?] = [UnsafePointer(toPush4)]
var mergeSize3: [Int] = [toPush4Len]
expect(config_merge(conf, &mergeData3, &mergeSize3, 1)).to(equal(1))
toPush4?.deallocate()
var mergeHashes2: [UnsafePointer<CChar>?] = [cFakeHash2].unsafeCopy()
var mergeData2: [UnsafePointer<UInt8>?] = [UnsafePointer(pushData3.pointee.config)]
var mergeSize2: [Int] = [pushData3.pointee.config_len]
expect(config_merge(conf2, &mergeHashes2, &mergeData2, &mergeSize2, 1)).to(equal(1))
pushData3.deallocate()
var mergeHashes3: [UnsafePointer<CChar>?] = [cFakeHash3].unsafeCopy()
var mergeData3: [UnsafePointer<UInt8>?] = [UnsafePointer(pushData4.pointee.config)]
var mergeSize3: [Int] = [pushData4.pointee.config_len]
expect(config_merge(conf, &mergeHashes3, &mergeData3, &mergeSize3, 1)).to(equal(1))
pushData4.deallocate()
// Now after the merge we *will* want to push from both client, since both will have generated a
// merge conflict update (with seqno = 3).
expect(config_needs_push(conf)).to(beTrue())
expect(config_needs_push(conf2)).to(beTrue())
let seqno5: Int64 = config_push(conf, &toPush3, &toPush3Len);
let seqno6: Int64 = config_push(conf2, &toPush4, &toPush4Len);
expect(seqno5).to(equal(3))
expect(seqno6).to(equal(3))
let pushData5: UnsafeMutablePointer<config_push_data> = config_push(conf)
let pushData6: UnsafeMutablePointer<config_push_data> = config_push(conf2)
expect(pushData5.pointee.seqno).to(equal(3))
expect(pushData6.pointee.seqno).to(equal(3))
// They should have resolved the conflict to the same thing:
expect(String(cString: user_profile_get_name(conf)!)).to(equal("Nibbler"))
@ -312,7 +347,7 @@ class ConfigUserProfileSpec: QuickSpec {
// (Note that they could have also both resolved to "Raz" here, but the hash of the serialized
// message just happens to have a higher hash -- and thus gets priority -- for this particular
// test).
// Since only one of them set a profile pic there should be no conflict there:
let pic3: user_profile_pic = user_profile_get_pic(conf)
expect(pic3.url).toNot(beNil())
@ -326,9 +361,17 @@ class ConfigUserProfileSpec: QuickSpec {
expect(pic4.key).toNot(beNil())
expect(Data(libSessionVal: pic4.key, count: 32).toHexString())
.to(equal("7177657274007975696f31323334353637383930313233343536373839303132"))
config_confirm_pushed(conf, seqno5)
config_confirm_pushed(conf2, seqno6)
expect(user_profile_get_nts_priority(conf)).to(equal(9))
expect(user_profile_get_nts_priority(conf2)).to(equal(9))
let fakeHash4: String = "fakehash4"
var cFakeHash4: [CChar] = fakeHash4.cArray
let fakeHash5: String = "fakehash5"
var cFakeHash5: [CChar] = fakeHash5.cArray
config_confirm_pushed(conf, pushData5.pointee.seqno, &cFakeHash4)
config_confirm_pushed(conf2, pushData6.pointee.seqno, &cFakeHash5)
pushData5.deallocate()
pushData6.deallocate()
var dump6: UnsafeMutablePointer<UInt8>? = nil
var dump6Len: Int = 0
@ -339,7 +382,7 @@ class ConfigUserProfileSpec: QuickSpec {
// (store in db)
dump6?.deallocate()
dump7?.deallocate()
expect(config_needs_dump(conf)).to(beFalse())
expect(config_needs_dump(conf2)).to(beFalse())
expect(config_needs_push(conf)).to(beFalse())

View File

@ -0,0 +1,20 @@
// Copyright © 2023 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import Sodium
import SessionUtil
import SessionUtilitiesKit
import Quick
import Nimble
class LibSessionSpec: QuickSpec {
// MARK: - Spec
override func spec() {
ConfigContactsSpec.spec()
ConfigUserProfileSpec.spec()
ConfigConvoInfoVolatileSpec.spec()
ConfigUserGroupsSpec.spec()
}
}

View File

@ -0,0 +1,212 @@
// Copyright © 2023 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import Foundation
import Sodium
import SessionUtil
import SessionUtilitiesKit
import SessionMessagingKit
import Quick
import Nimble
class SessionUtilSpec: QuickSpec {
// MARK: - Spec
override func spec() {
describe("SessionUtil") {
// MARK: - Parsing URLs
context("when parsing a community url") {
it("handles the example urls correctly") {
let validUrls: [String] = [
[
"https://sessionopengroup.co/r/main?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
],
[
"https://sessionopengroup.co/main?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
],
[
"http://sessionopengroup.co/r/main?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
],
[
"http://sessionopengroup.co/main?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
],
[
"https://143.198.213.225:443/r/main?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
],
[
"https://143.198.213.225:443/main?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
],
[
"http://143.198.213.255:80/main?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
],
[
"http://143.198.213.255:80/r/main?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
]
].map { $0.joined() }
let processedValues: [(room: String, server: String, publicKey: String)] = validUrls
.map { SessionUtil.parseCommunity(url: $0) }
.compactMap { $0 }
let processedRooms: [String] = processedValues.map { $0.room }
let processedServers: [String] = processedValues.map { $0.server }
let processedPublicKeys: [String] = processedValues.map { $0.publicKey }
let expectedRooms: [String] = [String](repeating: "main", count: 8)
let expectedServers: [String] = [
"https://sessionopengroup.co",
"https://sessionopengroup.co",
"http://sessionopengroup.co",
"http://sessionopengroup.co",
"https://143.198.213.225",
"https://143.198.213.225",
"http://143.198.213.255",
"http://143.198.213.255"
]
let expectedPublicKeys: [String] = [String](
repeating: "658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c",
count: 8
)
expect(processedValues.count).to(equal(validUrls.count))
expect(processedRooms).to(equal(expectedRooms))
expect(processedServers).to(equal(expectedServers))
expect(processedPublicKeys).to(equal(expectedPublicKeys))
}
it("handles the r prefix if present") {
let info = SessionUtil.parseCommunity(
url: [
"https://sessionopengroup.co/r/main?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
].joined()
)
expect(info?.room).to(equal("main"))
expect(info?.server).to(equal("https://sessionopengroup.co"))
expect(info?.publicKey).to(equal("658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"))
}
it("fails if no scheme is provided") {
let info = SessionUtil.parseCommunity(
url: [
"sessionopengroup.co/r/main?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
].joined()
)
expect(info?.room).to(beNil())
expect(info?.server).to(beNil())
expect(info?.publicKey).to(beNil())
}
it("fails if there is no room") {
let info = SessionUtil.parseCommunity(
url: [
"https://sessionopengroup.co?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
].joined()
)
expect(info?.room).to(beNil())
expect(info?.server).to(beNil())
expect(info?.publicKey).to(beNil())
}
it("fails if there is no public key parameter") {
let info = SessionUtil.parseCommunity(
url: "https://sessionopengroup.co/r/main"
)
expect(info?.room).to(beNil())
expect(info?.server).to(beNil())
expect(info?.publicKey).to(beNil())
}
it("fails if the public key parameter is not 64 characters") {
let info = SessionUtil.parseCommunity(
url: [
"https://sessionopengroup.co/r/main?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231"
].joined()
)
expect(info?.room).to(beNil())
expect(info?.server).to(beNil())
expect(info?.publicKey).to(beNil())
}
it("fails if the public key parameter is not a hex string") {
let info = SessionUtil.parseCommunity(
url: [
"https://sessionopengroup.co/r/main?",
"public_key=!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
].joined()
)
expect(info?.room).to(beNil())
expect(info?.server).to(beNil())
expect(info?.publicKey).to(beNil())
}
it("maintains the same TLS") {
let server1 = SessionUtil.parseCommunity(
url: [
"http://sessionopengroup.co/r/main?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
].joined()
)?.server
let server2 = SessionUtil.parseCommunity(
url: [
"https://sessionopengroup.co/r/main?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
].joined()
)?.server
expect(server1).to(equal("http://sessionopengroup.co"))
expect(server2).to(equal("https://sessionopengroup.co"))
}
it("maintains the same port") {
let server1 = SessionUtil.parseCommunity(
url: [
"https://sessionopengroup.co/r/main?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
].joined()
)?.server
let server2 = SessionUtil.parseCommunity(
url: [
"https://sessionopengroup.co:1234/r/main?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
].joined()
)?.server
expect(server1).to(equal("https://sessionopengroup.co"))
expect(server2).to(equal("https://sessionopengroup.co:1234"))
}
}
// MARK: - Generating URLs
context("when generating a url") {
it("generates the url correctly") {
expect(SessionUtil.communityUrlFor(server: "server", roomToken: "room", publicKey: "f8fec9b701000000ffffffff0400008000000000000000000000000000000000"))
.to(equal("server/room?public_key=f8fec9b701000000ffffffff0400008000000000000000000000000000000000"))
}
it("maintains the casing provided") {
expect(SessionUtil.communityUrlFor(server: "SeRVer", roomToken: "RoOM", publicKey: "f8fec9b701000000ffffffff0400008000000000000000000000000000000000"))
.to(equal("SeRVer/RoOM?public_key=f8fec9b701000000ffffffff0400008000000000000000000000000000000000"))
}
}
}
}
}

View File

@ -8,7 +8,7 @@ import Nimble
@testable import SessionMessagingKit
class TypeConversionUtilitiesSpec: QuickSpec {
class LibSessionTypeConversionUtilitiesSpec: QuickSpec {
// MARK: - Spec
override func spec() {
@ -85,6 +85,13 @@ class TypeConversionUtilitiesSpec: QuickSpec {
expect(result).to(equal("TestT"))
}
it("returns an empty string when given a value only containing null termination characters with a fixed length") {
let value: (CChar, CChar, CChar, CChar, CChar) = (0, 0, 0, 0, 0)
let result = String(libSessionVal: value, fixedLength: 5)
expect(result).to(equal(""))
}
it("defaults the fixed length value to none") {
let value: (CChar, CChar, CChar, CChar, CChar) = (84, 101, 0, 0, 0)
let result = String(libSessionVal: value)
@ -134,22 +141,26 @@ class TypeConversionUtilitiesSpec: QuickSpec {
}
context("when optional") {
context("returns null when null") {
context("returns empty when null") {
let value: String? = nil
let result: (CChar, CChar, CChar, CChar, CChar)? = value?.toLibSession()
let result: (CChar, CChar, CChar, CChar, CChar) = value.toLibSession()
expect(result).to(beNil())
expect(result.0).to(equal(0))
expect(result.1).to(equal(0))
expect(result.2).to(equal(0))
expect(result.3).to(equal(0))
expect(result.4).to(equal(0))
}
context("returns a libSession value when not null") {
let value: String? = "Test"
let result: (CChar, CChar, CChar, CChar, CChar)? = value?.toLibSession()
let result: (CChar, CChar, CChar, CChar, CChar) = value.toLibSession()
expect(result?.0).to(equal(84))
expect(result?.1).to(equal(101))
expect(result?.2).to(equal(115))
expect(result?.3).to(equal(116))
expect(result?.4).to(equal(0))
expect(result.0).to(equal(84))
expect(result.1).to(equal(101))
expect(result.2).to(equal(115))
expect(result.3).to(equal(116))
expect(result.4).to(equal(0))
}
}
}
@ -176,6 +187,20 @@ class TypeConversionUtilitiesSpec: QuickSpec {
expect(result).to(equal(Data([1, 2, 3, 4, 5])))
}
it("returns data when all bytes are zero and nullIfEmpty is false") {
let value: (UInt8, UInt8, UInt8, UInt8, UInt8) = (0, 0, 0, 0, 0)
let result = Data(libSessionVal: value, count: 5, nullIfEmpty: false)
expect(result).to(equal(Data([0, 0, 0, 0, 0])))
}
it("returns null when all bytes are zero and nullIfEmpty is true") {
let value: (UInt8, UInt8, UInt8, UInt8, UInt8) = (0, 0, 0, 0, 0)
let result = Data(libSessionVal: value, count: 5, nullIfEmpty: true)
expect(result).to(beNil())
}
}
context("when converting to a libSession value") {
@ -197,23 +222,38 @@ class TypeConversionUtilitiesSpec: QuickSpec {
expect(result.4).to(equal(1))
}
context("fills with empty data when too short") {
let value: Data? = Data([1, 2, 3])
let result: (Int8, Int8, Int8, Int8, Int8) = value.toLibSession()
expect(result.0).to(equal(1))
expect(result.1).to(equal(2))
expect(result.2).to(equal(3))
expect(result.3).to(equal(0))
expect(result.4).to(equal(0))
}
context("when optional") {
context("returns null when null") {
let value: Data? = nil
let result: (Int8, Int8, Int8, Int8, Int8)? = value?.toLibSession()
let result: (Int8, Int8, Int8, Int8, Int8) = value.toLibSession()
expect(result).to(beNil())
expect(result.0).to(equal(0))
expect(result.1).to(equal(0))
expect(result.2).to(equal(0))
expect(result.3).to(equal(0))
expect(result.4).to(equal(0))
}
context("returns a libSession value when not null") {
let value: Data? = Data([1, 2, 3, 4, 5])
let result: (Int8, Int8, Int8, Int8, Int8)? = value?.toLibSession()
let result: (Int8, Int8, Int8, Int8, Int8) = value.toLibSession()
expect(result?.0).to(equal(1))
expect(result?.1).to(equal(2))
expect(result?.2).to(equal(3))
expect(result?.3).to(equal(4))
expect(result?.4).to(equal(5))
expect(result.0).to(equal(1))
expect(result.1).to(equal(2))
expect(result.2).to(equal(3))
expect(result.3).to(equal(4))
expect(result.4).to(equal(5))
}
}
}
@ -222,6 +262,69 @@ class TypeConversionUtilitiesSpec: QuickSpec {
// MARK: - Array
describe("an Array") {
context("when initialised with a 2D C array") {
it("returns the correct array") {
var test: [CChar] = (
"Test1".cArray.nullTerminated() +
"Test2".cArray.nullTerminated() +
"Test3AndExtra".cArray.nullTerminated()
)
let result = test.withUnsafeMutableBufferPointer { ptr in
var mutablePtr = UnsafeMutablePointer(ptr.baseAddress)
return [String](pointer: &mutablePtr, count: 3)
}
expect(result).to(equal(["Test1", "Test2", "Test3AndExtra"]))
}
it("returns an empty array if given one") {
var test = [CChar]()
let result = test.withUnsafeMutableBufferPointer { ptr in
var mutablePtr = UnsafeMutablePointer(ptr.baseAddress)
return [String](pointer: &mutablePtr, count: 0)
}
expect(result).to(equal([]))
}
it("handles empty strings without issues") {
var test: [CChar] = (
"Test1".cArray.nullTerminated() +
"".cArray.nullTerminated() +
"Test2".cArray.nullTerminated()
)
let result = test.withUnsafeMutableBufferPointer { ptr in
var mutablePtr = UnsafeMutablePointer(ptr.baseAddress)
return [String](pointer: &mutablePtr, count: 3)
}
expect(result).to(equal(["Test1", "", "Test2"]))
}
it("returns null when given a null pointer") {
expect([String](pointer: nil, count: 5)).to(beNil())
}
it("returns null when given a null count") {
var test: [CChar] = "Test1".cArray.nullTerminated()
let result = test.withUnsafeMutableBufferPointer { ptr in
var mutablePtr = UnsafeMutablePointer(ptr.baseAddress)
return [String](pointer: &mutablePtr, count: nil)
}
expect(result).to(beNil())
}
it("returns the default value if given null values") {
expect([String](pointer: nil, count: 5, defaultValue: ["Test"]))
.to(equal(["Test"]))
}
}
context("when adding a null terminated character") {
it("adds a null termination character when not present") {
let value: [CChar] = [1, 2, 3, 4, 5]

View File

@ -93,18 +93,6 @@ class OpenGroupSpec: QuickSpec {
expect(OpenGroup.idFor(roomToken: "RoOM", server: "server")).to(equal("server.RoOM"))
}
}
context("when generating a url") {
it("generates the url correctly") {
expect(OpenGroup.urlFor(server: "server", roomToken: "room", publicKey: "key"))
.to(equal("server/room?public_key=key"))
}
it("maintains the casing provided") {
expect(OpenGroup.urlFor(server: "SeRVer", roomToken: "RoOM", publicKey: "KEy"))
.to(equal("SeRVer/RoOM?public_key=KEy"))
}
}
}
}
}

View File

@ -822,6 +822,7 @@ class OpenGroupManagerSpec: QuickSpec {
roomToken: "testRoom",
server: "testServer",
publicKey: TestConstants.serverPublicKey,
calledFromConfigHandling: true, // Don't trigger SessionUtil logic
dependencies: dependencies
)
}
@ -852,6 +853,7 @@ class OpenGroupManagerSpec: QuickSpec {
roomToken: "testRoom",
server: "testServer",
publicKey: TestConstants.serverPublicKey,
calledFromConfigHandling: true, // Don't trigger SessionUtil logic
dependencies: dependencies
)
}
@ -890,6 +892,7 @@ class OpenGroupManagerSpec: QuickSpec {
publicKey: TestConstants.serverPublicKey
.replacingOccurrences(of: "c3", with: "00")
.replacingOccurrences(of: "b3", with: "00"),
calledFromConfigHandling: true, // Don't trigger SessionUtil logic
dependencies: dependencies
)
}
@ -943,6 +946,7 @@ class OpenGroupManagerSpec: QuickSpec {
roomToken: "testRoom",
server: "testServer",
publicKey: TestConstants.serverPublicKey,
calledFromConfigHandling: true, // Don't trigger SessionUtil logic
dependencies: dependencies
)
}
@ -986,6 +990,7 @@ class OpenGroupManagerSpec: QuickSpec {
.delete(
db,
openGroupId: OpenGroup.idFor(roomToken: "testRoom", server: "testServer"),
calledFromConfigHandling: true, // Don't trigger SessionUtil logic
dependencies: dependencies
)
}
@ -1000,6 +1005,7 @@ class OpenGroupManagerSpec: QuickSpec {
.delete(
db,
openGroupId: OpenGroup.idFor(roomToken: "testRoom", server: "testServer"),
calledFromConfigHandling: true, // Don't trigger SessionUtil logic
dependencies: dependencies
)
}
@ -1017,6 +1023,7 @@ class OpenGroupManagerSpec: QuickSpec {
.delete(
db,
openGroupId: OpenGroup.idFor(roomToken: "testRoom", server: "testServer"),
calledFromConfigHandling: true, // Don't trigger SessionUtil logic
dependencies: dependencies
)
}
@ -1030,6 +1037,7 @@ class OpenGroupManagerSpec: QuickSpec {
.delete(
db,
openGroupId: OpenGroup.idFor(roomToken: "testRoom", server: "testServer"),
calledFromConfigHandling: true, // Don't trigger SessionUtil logic
dependencies: dependencies
)
}
@ -1068,6 +1076,7 @@ class OpenGroupManagerSpec: QuickSpec {
.delete(
db,
openGroupId: OpenGroup.idFor(roomToken: "testRoom", server: "testServer"),
calledFromConfigHandling: true, // Don't trigger SessionUtil logic
dependencies: dependencies
)
}
@ -1120,6 +1129,7 @@ class OpenGroupManagerSpec: QuickSpec {
.delete(
db,
openGroupId: OpenGroup.idFor(roomToken: "testRoom", server: OpenGroupAPI.defaultServer),
calledFromConfigHandling: true, // Don't trigger SessionUtil logic
dependencies: dependencies
)
}
@ -1134,6 +1144,7 @@ class OpenGroupManagerSpec: QuickSpec {
.delete(
db,
openGroupId: OpenGroup.idFor(roomToken: "testRoom", server: OpenGroupAPI.defaultServer),
calledFromConfigHandling: true, // Don't trigger SessionUtil logic
dependencies: dependencies
)
}
@ -3853,158 +3864,6 @@ class OpenGroupManagerSpec: QuickSpec {
}
}
}
// MARK: - --parseOpenGroup
context("when parsing an open group url") {
it("handles the example urls correctly") {
let validUrls: [String] = [
"https://sessionopengroup.co/r/main?public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c",
"https://sessionopengroup.co/main?public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c",
"http://sessionopengroup.co/r/main?public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c",
"http://sessionopengroup.co/main?public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c",
"sessionopengroup.co/main?public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c",
"sessionopengroup.co/r/main?public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c",
"https://143.198.213.225:443/r/main?public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c",
"https://143.198.213.225:443/main?public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c",
"143.198.213.255:80/main?public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c",
"143.198.213.255:80/r/main?public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
]
let processedValues: [(room: String, server: String, publicKey: String)] = validUrls
.map { OpenGroupManager.parseOpenGroup(from: $0) }
.compactMap { $0 }
let processedRooms: [String] = processedValues.map { $0.room }
let processedServers: [String] = processedValues.map { $0.server }
let processedPublicKeys: [String] = processedValues.map { $0.publicKey }
let expectedRooms: [String] = [String](repeating: "main", count: 10)
let expectedServers: [String] = [
"https://sessionopengroup.co",
"https://sessionopengroup.co",
"http://sessionopengroup.co",
"http://sessionopengroup.co",
"http://sessionopengroup.co",
"http://sessionopengroup.co",
"https://143.198.213.225:443",
"https://143.198.213.225:443",
"http://143.198.213.255:80",
"http://143.198.213.255:80"
]
let expectedPublicKeys: [String] = [String](
repeating: "658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c",
count: 10
)
expect(processedValues.count).to(equal(validUrls.count))
expect(processedRooms).to(equal(expectedRooms))
expect(processedServers).to(equal(expectedServers))
expect(processedPublicKeys).to(equal(expectedPublicKeys))
}
it("handles the r prefix if present") {
let info = OpenGroupManager.parseOpenGroup(
from: [
"https://sessionopengroup.co/r/main?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
].joined()
)
expect(info?.room).to(equal("main"))
expect(info?.server).to(equal("https://sessionopengroup.co"))
expect(info?.publicKey).to(equal("658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"))
}
it("fails if there is no room") {
let info = OpenGroupManager.parseOpenGroup(
from: [
"https://sessionopengroup.co?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
].joined()
)
expect(info?.room).to(beNil())
expect(info?.server).to(beNil())
expect(info?.publicKey).to(beNil())
}
it("fails if there is no public key parameter") {
let info = OpenGroupManager.parseOpenGroup(
from: "https://sessionopengroup.co/r/main"
)
expect(info?.room).to(beNil())
expect(info?.server).to(beNil())
expect(info?.publicKey).to(beNil())
}
it("fails if the public key parameter is not 64 characters") {
let info = OpenGroupManager.parseOpenGroup(
from: [
"https://sessionopengroup.co/r/main?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231"
].joined()
)
expect(info?.room).to(beNil())
expect(info?.server).to(beNil())
expect(info?.publicKey).to(beNil())
}
it("fails if the public key parameter is not a hex string") {
let info = OpenGroupManager.parseOpenGroup(
from: [
"https://sessionopengroup.co/r/main?",
"public_key=!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
].joined()
)
expect(info?.room).to(beNil())
expect(info?.server).to(beNil())
expect(info?.publicKey).to(beNil())
}
it("maintains the same TLS") {
let server1 = OpenGroupManager.parseOpenGroup(
from: [
"sessionopengroup.co/r/main?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
].joined()
)?.server
let server2 = OpenGroupManager.parseOpenGroup(
from: [
"http://sessionopengroup.co/r/main?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
].joined()
)?.server
let server3 = OpenGroupManager.parseOpenGroup(
from: [
"https://sessionopengroup.co/r/main?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
].joined()
)?.server
expect(server1).to(equal("http://sessionopengroup.co"))
expect(server2).to(equal("http://sessionopengroup.co"))
expect(server3).to(equal("https://sessionopengroup.co"))
}
it("maintains the same port") {
let server1 = OpenGroupManager.parseOpenGroup(
from: [
"https://sessionopengroup.co/r/main?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
].joined()
)?.server
let server2 = OpenGroupManager.parseOpenGroup(
from: [
"https://sessionopengroup.co:1234/r/main?",
"public_key=658d29b91892a2389505596b135e76a53db6e11d613a51dbd3d0816adffb231c"
].joined()
)?.server
expect(server1).to(equal("https://sessionopengroup.co"))
expect(server2).to(equal("https://sessionopengroup.co:1234"))
}
}
}
}
}

View File

@ -218,7 +218,9 @@ public final class NotificationServiceExtension: UNNotificationServiceExtension
// If we need a config sync then trigger it now
if needsConfigSync {
ConfigurationSyncJob.enqueue()
Storage.shared.write { db in
ConfigurationSyncJob.enqueue(db, publicKey: getUserHexEncodedPublicKey(db))
}
}
checkIsAppReady()

View File

@ -92,7 +92,9 @@ final class ShareNavController: UINavigationController, ShareViewDelegate {
// If we need a config sync then trigger it now
if needsConfigSync {
ConfigurationSyncJob.enqueue()
Storage.shared.write { db in
ConfigurationSyncJob.enqueue(db, publicKey: getUserHexEncodedPublicKey(db))
}
}
checkIsAppReady()

View File

@ -72,10 +72,13 @@ extension SnodeAPI {
// MARK: - Abstract Methods
override func generateSignature() throws -> [UInt8] {
/// Ed25519 signature of `("expire" || expiry || messages[0] || ... || messages[N])`
/// where `expiry` is the expiry timestamp expressed as a string. The signature must be base64
/// encoded (json) or bytes (bt).
/// Ed25519 signature of `("expire" || ShortenOrExtend || expiry || messages[0] || ...`
/// ` || messages[N])` where `expiry` is the expiry timestamp expressed as a string.
/// `ShortenOrExtend` is string signature must be base64 "shorten" if the shorten option is given (and true),
/// "extend" if `extend` is true, and empty otherwise. The signature must be base64 encoded (json) or bytes (bt).
let verificationBytes: [UInt8] = SnodeAPI.Endpoint.expire.rawValue.bytes
.appending(contentsOf: (shorten == true ? "shorten".bytes : []))
.appending(contentsOf: (extend == true ? "extend".bytes : []))
.appending(contentsOf: "\(expiryMs)".data(using: .ascii)?.bytes)
.appending(contentsOf: messageHashes.joined().bytes)

View File

@ -7,8 +7,6 @@ import GRDB
import SessionUtilitiesKit
public final class SnodeAPI {
public typealias TargetedMessage = (message: SnodeMessage, namespace: Namespace)
internal static let sodium: Atomic<Sodium> = Atomic(Sodium())
private static var hasLoadedSnodePool: Atomic<Bool> = Atomic(false)
@ -315,18 +313,14 @@ public final class SnodeAPI {
namespace: namespace,
associatedWith: publicKey
)
let maybeLastHash: String? = SnodeReceivedMessageInfo
result[namespace] = SnodeReceivedMessageInfo
.fetchLastNotExpired(
for: snode,
namespace: namespace,
associatedWith: publicKey
)?
.hash
guard let lastHash: String = maybeLastHash else { return }
result[namespace] = lastHash
}
}
.flatMap { namespaceLastHash -> AnyPublisher<[SnodeAPI.Namespace: (info: ResponseInfoType, data: (messages: [SnodeReceivedMessage], lastHash: String?)?)], Error> in
@ -625,13 +619,13 @@ public final class SnodeAPI {
}
public static func sendConfigMessages(
_ targetedMessages: [TargetedMessage],
oldHashes: [String],
_ messages: [(message: SnodeMessage, namespace: Namespace)],
allObsoleteHashes: [String],
using dependencies: SSKDependencies = SSKDependencies()
) -> AnyPublisher<HTTP.BatchResponse, Error> {
guard
!targetedMessages.isEmpty,
let recipient: String = targetedMessages.first?.message.recipient
!messages.isEmpty,
let recipient: String = messages.first?.message.recipient
else {
return Fail(error: SnodeAPIError.generic)
.eraseToAnyPublisher()
@ -644,7 +638,7 @@ public final class SnodeAPI {
let userX25519PublicKey: String = getUserHexEncodedPublicKey()
let publicKey: String = recipient
var requests: [SnodeAPI.BatchRequest.Info] = targetedMessages
var requests: [SnodeAPI.BatchRequest.Info] = messages
.map { message, namespace in
// Check if this namespace requires authentication
guard namespace.requiresWriteAuthentication else {
@ -677,13 +671,13 @@ public final class SnodeAPI {
}
// If we had any previous config messages then we should delete them
if !oldHashes.isEmpty {
if !allObsoleteHashes.isEmpty {
requests.append(
BatchRequest.Info(
request: SnodeRequest(
endpoint: .deleteMessages,
body: DeleteMessagesRequest(
messageHashes: oldHashes,
messageHashes: allObsoleteHashes,
requireSuccessfulDeletion: false,
pubkey: userX25519PublicKey,
ed25519PublicKey: userED25519KeyPair.publicKey,

View File

@ -14,6 +14,7 @@ public enum StorageError: Error {
case objectNotSaved
case invalidSearchPattern
case invalidData
case devRemigrationRequired
}

View File

@ -27,6 +27,29 @@ public extension Database {
}
}
func createIndex<T>(
withCustomName customName: String? = nil,
on table: T.Type,
columns: [T.Columns],
options: IndexOptions = [],
condition: (any SQLExpressible)? = nil
) throws where T: TableRecord, T: ColumnExpressible {
guard !columns.isEmpty else { throw StorageError.invalidData }
let indexName: String = (
customName ??
"\(T.databaseTableName)_on_\(columns.map { $0.name }.joined(separator: "_and_"))"
)
try create(
index: indexName,
on: T.databaseTableName,
columns: columns.map { $0.name },
options: options,
condition: condition
)
}
func makeFTS5Pattern<T>(rawPattern: String, forTable table: T.Type) throws -> FTS5Pattern where T: TableRecord, T: ColumnExpressible {
return try makeFTS5Pattern(rawPattern: rawPattern, forTable: table.databaseTableName)
}
@ -46,27 +69,27 @@ public extension Database {
onRollback: @escaping (Database) -> Void = { _ in }
) {
afterNextTransactionNestedOnce(
dedupeIdentifier: UUID().uuidString,
dedupeId: UUID().uuidString,
onCommit: onCommit,
onRollback: onRollback
)
}
func afterNextTransactionNestedOnce(
dedupeIdentifier: String,
dedupeId: String,
onCommit: @escaping (Database) -> Void,
onRollback: @escaping (Database) -> Void = { _ in }
) {
// Only allow a single observer per `dedupeIdentifier` per transaction, this allows us to
// Only allow a single observer per `dedupeId` per transaction, this allows us to
// schedule an action to run at most once per transaction (eg. auto-scheduling a ConfigSyncJob
// when receiving messages)
guard !TransactionHandler.registeredHandlers.wrappedValue.contains(dedupeIdentifier) else {
guard !TransactionHandler.registeredHandlers.wrappedValue.contains(dedupeId) else {
return
}
add(
transactionObserver: TransactionHandler(
identifier: dedupeIdentifier,
identifier: dedupeId,
onCommit: onCommit,
onRollback: onRollback
),

View File

@ -6,23 +6,28 @@ import Foundation
/// The `Atomic<Value>` wrapper is a generic wrapper providing a thread-safe way to get and set a value
///
/// A write-up on the need for this class and it's approach can be found here:
/// A write-up on the need for this class and it's approaches can be found at these links:
/// https://www.vadimbulavin.com/atomic-properties/
/// https://www.vadimbulavin.com/swift-atomic-properties-with-property-wrappers/
/// there is also another approach which can be taken but it requires separate types for collections and results in
/// a somewhat inconsistent interface between different `Atomic` wrappers
///
/// We use a Read-write lock approach because the `DispatchQueue` approach means mutating the property
/// occurs on a different thread, and GRDB requires it's changes to be executed on specific threads so using a lock
/// is more compatible (and Read-write locks allow for concurrent reads which shouldn't be a huge issue but could
/// help reduce cases of blocking)
@propertyWrapper
public class Atomic<Value> {
// Note: Using 'userInteractive' to ensure this can't be blockedby higher priority queues
// which could result in the main thread getting blocked
private let queue: DispatchQueue = DispatchQueue(
label: "io.oxen.\(UUID().uuidString)",
qos: .userInteractive
)
private var value: Value
private let lock: ReadWriteLock = ReadWriteLock()
/// In order to change the value you **must** use the `mutate` function
public var wrappedValue: Value {
return queue.sync { return value }
lock.readLock()
let result: Value = value
lock.unlock()
return result
}
/// For more information see https://github.com/apple/swift-evolution/blob/master/proposals/0258-property-wrappers.md#projections
@ -36,18 +41,34 @@ public class Atomic<Value> {
self.value = initialValue
}
public init(wrappedValue: Value) {
self.value = wrappedValue
}
// MARK: - Functions
@discardableResult public func mutate<T>(_ mutation: (inout Value) -> T) -> T {
return queue.sync {
return mutation(&value)
}
lock.writeLock()
let result: T = mutation(&value)
lock.unlock()
return result
}
@discardableResult public func mutate<T>(_ mutation: (inout Value) throws -> T) throws -> T {
return try queue.sync {
return try mutation(&value)
let result: T
do {
lock.writeLock()
result = try mutation(&value)
lock.unlock()
}
catch {
lock.unlock()
throw error
}
return result
}
}
@ -56,3 +77,25 @@ extension Atomic where Value: CustomDebugStringConvertible {
return value.debugDescription
}
}
// MARK: - ReadWriteLock
private class ReadWriteLock {
private var rwlock: pthread_rwlock_t = {
var rwlock = pthread_rwlock_t()
pthread_rwlock_init(&rwlock, nil)
return rwlock
}()
func writeLock() {
pthread_rwlock_wrlock(&rwlock)
}
func readLock() {
pthread_rwlock_rdlock(&rwlock)
}
func unlock() {
pthread_rwlock_unlock(&rwlock)
}
}

View File

@ -18,7 +18,6 @@ public extension Collection {
}
}
public extension Collection where Element == [CChar] {
/// This creates an array of UnsafePointer types to access data of the C strings in memory. This array provides no automated
/// memory management of it's children so after use you are responsible for handling the life cycle of the child elements and

View File

@ -143,7 +143,7 @@ public final class JobRunner {
guard canStartJob else { return }
// Start the job runner if needed
db.afterNextTransactionNestedOnce(dedupeIdentifier: "JobRunner-Start: \(updatedJob.variant)") { _ in
db.afterNextTransactionNestedOnce(dedupeId: "JobRunner-Start: \(updatedJob.variant)") { _ in
queues.wrappedValue[updatedJob.variant]?.start()
}
}
@ -166,7 +166,7 @@ public final class JobRunner {
guard canStartJob else { return }
// Start the job runner if needed
db.afterNextTransactionNestedOnce(dedupeIdentifier: "JobRunner-Start: \(job.variant)") { _ in
db.afterNextTransactionNestedOnce(dedupeId: "JobRunner-Start: \(job.variant)") { _ in
queues.wrappedValue[job.variant]?.start()
}
}