Sorted out a bunch of the config syncing logic

Updated the onboarding to attempt to retrieve the current user profile config and skip display name collection if it already exists
Updated the logic to get the snode pool and build paths immediately on launch even if the user hasn't been created yet (faster onboarding)
Removed the iOS-specific concurrent dual snode '/store' behaviour
Cleaned up the profile updating logic
Fixed an issue where the pollers could end up deadlocking the main thread if too many tried to start concurrently
This commit is contained in:
Morgan Pretty 2022-12-16 16:51:08 +11:00
parent 893967e380
commit 8f3dcbc6be
67 changed files with 2314 additions and 1146 deletions

View file

@ -502,7 +502,6 @@
FD09797527FAB64300936362 /* ProfileManager.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD09797327FAB3E200936362 /* ProfileManager.swift */; };
FD09797727FAB7A600936362 /* Data+Image.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD09797627FAB7A600936362 /* Data+Image.swift */; };
FD09797927FAB7E800936362 /* ImageFormat.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD09797827FAB7E800936362 /* ImageFormat.swift */; };
FD09797B27FBB25900936362 /* Updatable.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD09797A27FBB25900936362 /* Updatable.swift */; };
FD09797D27FBDB2000936362 /* Notification+Utilities.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD09797C27FBDB2000936362 /* Notification+Utilities.swift */; };
FD09798127FCFEE800936362 /* SessionThread.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD09798027FCFEE800936362 /* SessionThread.swift */; };
FD09798327FD1A1500936362 /* ClosedGroup.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD09798227FD1A1500936362 /* ClosedGroup.swift */; };
@ -591,6 +590,9 @@
FD2AAAF128ED57B500A49611 /* SynchronousStorage.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD2AAAEF28ED57B500A49611 /* SynchronousStorage.swift */; };
FD2AAAF228ED57B500A49611 /* SynchronousStorage.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD2AAAEF28ED57B500A49611 /* SynchronousStorage.swift */; };
FD2B4AFB29429D1000AB4848 /* ConfigContactsSpec.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD2B4AFA29429D1000AB4848 /* ConfigContactsSpec.swift */; };
FD2B4AFD294688D000AB4848 /* SessionUtil+Contacts.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD2B4AFC294688D000AB4848 /* SessionUtil+Contacts.swift */; };
FD2B4AFF2946C93200AB4848 /* ConfigurationSyncJob.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD2B4AFE2946C93200AB4848 /* ConfigurationSyncJob.swift */; };
FD2B4B042949887A00AB4848 /* QueryInterfaceRequest+Utilities.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD2B4B032949887A00AB4848 /* QueryInterfaceRequest+Utilities.swift */; };
FD37E9C328A1C6F3003AE748 /* ThemeManager.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD37E9C228A1C6F3003AE748 /* ThemeManager.swift */; };
FD37E9C628A1D4EC003AE748 /* Theme+ClassicDark.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD37E9C528A1D4EC003AE748 /* Theme+ClassicDark.swift */; };
FD37E9C828A1D73F003AE748 /* Theme+Colors.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD37E9C728A1D73F003AE748 /* Theme+Colors.swift */; };
@ -1656,7 +1658,6 @@
FD09797327FAB3E200936362 /* ProfileManager.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ProfileManager.swift; sourceTree = "<group>"; };
FD09797627FAB7A600936362 /* Data+Image.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "Data+Image.swift"; sourceTree = "<group>"; };
FD09797827FAB7E800936362 /* ImageFormat.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ImageFormat.swift; sourceTree = "<group>"; };
FD09797A27FBB25900936362 /* Updatable.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Updatable.swift; sourceTree = "<group>"; };
FD09797C27FBDB2000936362 /* Notification+Utilities.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "Notification+Utilities.swift"; sourceTree = "<group>"; };
FD09798027FCFEE800936362 /* SessionThread.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SessionThread.swift; sourceTree = "<group>"; };
FD09798227FD1A1500936362 /* ClosedGroup.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ClosedGroup.swift; sourceTree = "<group>"; };
@ -1707,6 +1708,9 @@
FD28A4F527EAD44C00FF65E7 /* Storage.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Storage.swift; sourceTree = "<group>"; };
FD2AAAEF28ED57B500A49611 /* SynchronousStorage.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SynchronousStorage.swift; sourceTree = "<group>"; };
FD2B4AFA29429D1000AB4848 /* ConfigContactsSpec.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ConfigContactsSpec.swift; sourceTree = "<group>"; };
FD2B4AFC294688D000AB4848 /* SessionUtil+Contacts.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "SessionUtil+Contacts.swift"; sourceTree = "<group>"; };
FD2B4AFE2946C93200AB4848 /* ConfigurationSyncJob.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ConfigurationSyncJob.swift; sourceTree = "<group>"; };
FD2B4B032949887A00AB4848 /* QueryInterfaceRequest+Utilities.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "QueryInterfaceRequest+Utilities.swift"; sourceTree = "<group>"; };
FD37E9C228A1C6F3003AE748 /* ThemeManager.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ThemeManager.swift; sourceTree = "<group>"; };
FD37E9C528A1D4EC003AE748 /* Theme+ClassicDark.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "Theme+ClassicDark.swift"; sourceTree = "<group>"; };
FD37E9C728A1D73F003AE748 /* Theme+Colors.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "Theme+Colors.swift"; sourceTree = "<group>"; };
@ -2550,7 +2554,6 @@
C33FDB22255A580900E217F9 /* OWSMediaUtils.swift */,
C33FDB1C255A580900E217F9 /* UIImage+OWS.h */,
C33FDB81255A581100E217F9 /* UIImage+OWS.m */,
FD09797A27FBB25900936362 /* Updatable.swift */,
);
path = Media;
sourceTree = "<group>";
@ -3719,6 +3722,14 @@
path = Utilities;
sourceTree = "<group>";
};
FD2B4B022949886900AB4848 /* Database */ = {
isa = PBXGroup;
children = (
FD2B4B032949887A00AB4848 /* QueryInterfaceRequest+Utilities.swift */,
);
path = Database;
sourceTree = "<group>";
};
FD37E9C428A1C701003AE748 /* Themes */ = {
isa = PBXGroup;
children = (
@ -4005,6 +4016,7 @@
FD8ECF7529340F4800C0D1BB /* LibSessionUtil */ = {
isa = PBXGroup;
children = (
FD2B4B022949886900AB4848 /* Database */,
FD8ECF8E29381FB200C0D1BB /* Config Handling */,
FD8ECF7829340F7100C0D1BB /* libsession-util.xcframework */,
FD8ECF882935AB7200C0D1BB /* SessionUtilError.swift */,
@ -4026,6 +4038,7 @@
isa = PBXGroup;
children = (
FD8ECF8F29381FC200C0D1BB /* SessionUtil+UserProfile.swift */,
FD2B4AFC294688D000AB4848 /* SessionUtil+Contacts.swift */,
);
path = "Config Handling";
sourceTree = "<group>";
@ -4199,6 +4212,7 @@
FDF0B74E28079E5E004C14C5 /* SendReadReceiptsJob.swift */,
C352A348255781F400338F3E /* AttachmentDownloadJob.swift */,
C352A35A2557824E00338F3E /* AttachmentUploadJob.swift */,
FD2B4AFE2946C93200AB4848 /* ConfigurationSyncJob.swift */,
);
path = Types;
sourceTree = "<group>";
@ -5468,7 +5482,6 @@
FD17D7EA27F6A1C600122BE0 /* SUKLegacy.swift in Sources */,
FDA8EB10280F8238002B68E5 /* Codable+Utilities.swift in Sources */,
C352A36D2557858E00338F3E /* NSTimer+Proxying.m in Sources */,
FD09797B27FBB25900936362 /* Updatable.swift in Sources */,
7B7CB192271508AD0079FF93 /* CallRingTonePlayer.swift in Sources */,
C3C2ABD22553C6C900C340D1 /* Data+SecureRandom.swift in Sources */,
FD848B8B283DC509000E298B /* PagedDatabaseObserver.swift in Sources */,
@ -5623,7 +5636,9 @@
FDC4386527B4DE7600C60D73 /* RoomPollInfo.swift in Sources */,
FD245C6B2850667400B966DD /* VisibleMessage+Profile.swift in Sources */,
FD37EA0F28AB3330003AE748 /* _006_FixHiddenModAdminSupport.swift in Sources */,
FD2B4AFD294688D000AB4848 /* SessionUtil+Contacts.swift in Sources */,
7B81682328A4C1210069F315 /* UpdateTypes.swift in Sources */,
FD2B4AFF2946C93200AB4848 /* ConfigurationSyncJob.swift in Sources */,
FDC438A627BB113A00C60D73 /* UserUnbanRequest.swift in Sources */,
FD5C72FB284F0EA10029977D /* MessageReceiver+DataExtractionNotification.swift in Sources */,
FDC4386727B4E10E00C60D73 /* Capabilities.swift in Sources */,
@ -5728,6 +5743,7 @@
C32C5DBF256DD743003C73A2 /* ClosedGroupPoller.swift in Sources */,
C352A35B2557824E00338F3E /* AttachmentUploadJob.swift in Sources */,
FD5C7305284F0FF30029977D /* MessageReceiver+VisibleMessages.swift in Sources */,
FD2B4B042949887A00AB4848 /* QueryInterfaceRequest+Utilities.swift in Sources */,
FD09797027FA6FF300936362 /* Profile.swift in Sources */,
FD245C56285065EA00B966DD /* SNProto.swift in Sources */,
FD09798B27FD1CFE00936362 /* Capability.swift in Sources */,

View file

@ -1837,7 +1837,7 @@ extension ConversationVC:
message: unsendRequest,
threadId: threadId,
interactionId: nil,
to: .contact(publicKey: userPublicKey, namespace: .default)
to: .contact(publicKey: userPublicKey)
)
}
return
@ -1856,7 +1856,7 @@ extension ConversationVC:
message: unsendRequest,
threadId: threadId,
interactionId: nil,
to: .contact(publicKey: userPublicKey, namespace: .default)
to: .contact(publicKey: userPublicKey)
)
}
self?.showInputAccessoryView()
@ -2303,8 +2303,8 @@ extension ConversationVC {
return
}
Storage.shared.writeAsync(
updates: { db in
Storage.shared
.writePublisher { db in
// If we aren't creating a new thread (ie. sending a message request) then send a
// messageRequestResponse back to the sender (this allows the sender to know that
// they have been approved and can now use this contact in closed groups)
@ -2321,21 +2321,22 @@ extension ConversationVC {
}
// Default 'didApproveMe' to true for the person approving the message request
try approvalData.contact
.with(
isApproved: true,
didApproveMe: .update(approvalData.contact.didApproveMe || !isNewThread)
try approvalData.contact.save(db)
try Contact
.filter(id: approvalData.contact.id)
.updateAllAndConfig(
db,
Contact.Columns.isApproved.set(to: true),
Contact.Columns.didApproveMe
.set(to: approvalData.contact.didApproveMe || !isNewThread)
)
.save(db)
// Update the config with the approved contact
try MessageSender
.syncConfiguration(db, forceSyncNow: true)
.sinkUntilComplete()
},
completion: { _, _ in updateNavigationBackStack() }
)
}
.sinkUntilComplete(
receiveCompletion: { _ in
// Update the UI
updateNavigationBackStack()
}
)
}
@objc func acceptMessageRequest() {

View file

@ -481,11 +481,7 @@ public class ConversationViewModel: OWSAudioPlayerDelegate {
Storage.shared.writeAsync { db in
try Contact
.filter(id: threadId)
.updateAll(db, Contact.Columns.isBlocked.set(to: false))
try MessageSender
.syncConfiguration(db, forceSyncNow: true)
.sinkUntilComplete()
.updateAllAndConfig(db, Contact.Columns.isBlocked.set(to: false))
}
}

View file

@ -152,7 +152,7 @@ class ThreadSettingsViewModel: SessionTableViewModel<ThreadSettingsViewModel.Nav
dependencies.storage.writeAsync { db in
try Profile
.filter(id: threadId)
.updateAll(
.updateAllAndConfig(
db,
Profile.Columns.nickname
.set(to: (updatedNickname.isEmpty ? nil : editedDisplayName))
@ -749,15 +749,13 @@ class ThreadSettingsViewModel: SessionTableViewModel<ThreadSettingsViewModel.Nav
dependencies.storage.writeAsync(
updates: { db in
try Contact
.fetchOrCreate(db, id: threadId)
.with(isBlocked: .updateTo(isBlocked))
.save(db)
.filter(id: threadId)
.updateAllAndConfig(
db,
Contact.Columns.isBlocked.set(to: isBlocked)
)
},
completion: { [weak self] db, _ in
try MessageSender
.syncConfiguration(db, forceSyncNow: true)
.sinkUntilComplete()
DispatchQueue.main.async {
let modal: ConfirmationModal = ConfirmationModal(
info: ConfirmationModal.Info(

View file

@ -278,9 +278,11 @@ final class HomeVC: BaseVC, UITableViewDataSource, UITableViewDelegate, SeedRemi
if Identity.userExists(), let appDelegate: AppDelegate = UIApplication.shared.delegate as? AppDelegate {
appDelegate.startPollersIfNeeded()
// Do this only if we created a new Session ID, or if we already received the initial configuration message
if UserDefaults.standard[.hasSyncedInitialConfiguration] {
appDelegate.syncConfigurationIfNeeded()
if !Features.useSharedUtilForUserConfig {
// Do this only if we created a new Session ID, or if we already received the initial configuration message
if UserDefaults.standard[.hasSyncedInitialConfiguration] {
appDelegate.syncConfigurationIfNeeded()
}
}
}
@ -709,22 +711,21 @@ final class HomeVC: BaseVC, UITableViewDataSource, UITableViewDelegate, SeedRemi
// Delay the change to give the cell "unswipe" animation some time to complete
DispatchQueue.global(qos: .default).asyncAfter(deadline: .now() + unswipeAnimationDelay) {
Storage.shared.writeAsync { db in
try Contact
.filter(id: threadViewModel.threadId)
.updateAll(
db,
Contact.Columns.isBlocked.set(
to: (threadViewModel.threadIsBlocked == false ?
true:
false
Storage.shared
.writePublisher { db in
try Contact
.filter(id: threadViewModel.threadId)
.updateAllAndConfig(
db,
Contact.Columns.isBlocked.set(
to: (threadViewModel.threadIsBlocked == false ?
true:
false
)
)
)
)
try MessageSender.syncConfiguration(db, forceSyncNow: true)
.sinkUntilComplete()
}
}
.sinkUntilComplete()
}
}
block.themeBackgroundColor = .conversationButton_swipeSecondary

View file

@ -195,10 +195,8 @@ public class MessageRequestsViewModel {
removeGroupData: true
)
// Force a config sync
try MessageSender
.syncConfiguration(db, forceSyncNow: true)
.sinkUntilComplete()
// Trigger a config sync
ConfigurationSyncJob.enqueue(db)
}
}
@ -227,28 +225,26 @@ public class MessageRequestsViewModel {
Storage.shared.writeAsync(
updates: { db in
// Update the contact
_ = try Contact
try Contact
.fetchOrCreate(db, id: threadId)
.with(
isApproved: false,
isBlocked: true,
.save(db)
try Contact
.filter(id: threadId)
.updateAllAndConfig(
db,
Contact.Columns.isApproved.set(to: false),
Contact.Columns.isBlocked.set(to: true),
// Note: We set this to true so the current user will be able to send a
// message to the person who originally sent them the message request in
// the future if they unblock them
didApproveMe: true
Contact.Columns.didApproveMe.set(to: true)
)
.saved(db)
// Remove the thread
_ = try SessionThread
.filter(id: threadId)
.deleteAll(db)
// Force a config sync
try MessageSender
.syncConfiguration(db, forceSyncNow: true)
.sinkUntilComplete()
},
completion: { _, _ in completion?() }
)

View file

@ -652,17 +652,20 @@ class AppDelegate: UIResponder, UIApplicationDelegate, UNUserNotificationCenterD
// MARK: - Config Sync
func syncConfigurationIfNeeded() {
// FIXME: Remove this once `useSharedUtilForUserConfig` is permanent
guard !Features.useSharedUtilForUserConfig else { return }
let lastSync: Date = (UserDefaults.standard[.lastConfigurationSync] ?? .distantPast)
guard Date().timeIntervalSince(lastSync) > (7 * 24 * 60 * 60) else { return } // Sync every 2 days
Storage.shared
.writePublisherFlatMap { db in try MessageSender.syncConfiguration(db, forceSyncNow: false) }
.sinkUntilComplete(
receiveCompletion: { result in
.writeAsync(
updates: { db in ConfigurationSyncJob.enqueue(db) },
completion: { _, result in
switch result {
case .failure: break
case .finished:
case .success:
// Only update the 'lastConfigurationSync' timestamp if we have done the
// first sync (Don't want a new device config sync to override config
// syncs from other devices)

View file

@ -6,11 +6,25 @@ import SessionMessagingKit
import SignalUtilitiesKit
final class DisplayNameVC: BaseVC {
private let flow: Onboarding.Flow
private var spacer1HeightConstraint: NSLayoutConstraint!
private var spacer2HeightConstraint: NSLayoutConstraint!
private var registerButtonBottomOffsetConstraint: NSLayoutConstraint!
private var bottomConstraint: NSLayoutConstraint!
// MARK: - Initialization
init(flow: Onboarding.Flow) {
self.flow = flow
super.init(nibName: nil, bundle: nil)
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
// MARK: - Components
private lazy var displayNameTextField: TextField = {
@ -176,11 +190,22 @@ final class DisplayNameVC: BaseVC {
// Try to save the user name but ignore the result
ProfileManager.updateLocal(
queue: DispatchQueue.global(qos: .default),
profileName: displayName,
image: nil,
imageFilePath: nil
profileName: displayName
)
let pnModeVC = PNModeVC()
// If we are not in the registration flow then we are finished and should go straight
// to the home screen
guard self.flow == .register else {
self.flow.completeRegistration()
// Go to the home screen
let homeVC: HomeVC = HomeVC()
self.navigationController?.setViewControllers([ homeVC ], animated: true)
return
}
// Need to get the PN mode if registering
let pnModeVC = PNModeVC(flow: .register)
navigationController?.pushViewController(pnModeVC, animated: true)
}
}

View file

@ -91,10 +91,6 @@ final class LinkDeviceVC: BaseVC, UIPageViewControllerDataSource, UIPageViewCont
tabBarTopConstraint.constant = navigationController!.navigationBar.height()
}
deinit {
NotificationCenter.default.removeObserver(self)
}
// MARK: - General
func pageViewController(_ pageViewController: UIPageViewController, viewControllerBefore viewController: UIViewController) -> UIViewController? {
@ -154,32 +150,17 @@ final class LinkDeviceVC: BaseVC, UIPageViewControllerDataSource, UIPageViewCont
return
}
let (ed25519KeyPair, x25519KeyPair) = try! Identity.generate(from: seed)
Onboarding.Flow.link.preregister(with: seed, ed25519KeyPair: ed25519KeyPair, x25519KeyPair: x25519KeyPair)
Identity.didRegister()
Onboarding.Flow.link
.preregister(
with: seed,
ed25519KeyPair: ed25519KeyPair,
x25519KeyPair: x25519KeyPair
)
// Now that we have registered get the Snode pool
GetSnodePoolJob.run()
NotificationCenter.default.addObserver(self, selector: #selector(handleInitialConfigurationMessageReceived), name: .initialConfigurationMessageReceived, object: nil)
ModalActivityIndicatorViewController
.present(
// There was some crashing here due to force-unwrapping so just falling back to
// using self if there is no nav controller
fromViewController: (self.navigationController ?? self)
) { [weak self] modal in
self?.activityIndicatorModal = modal
}
}
@objc private func handleInitialConfigurationMessageReceived(_ notification: Notification) {
DispatchQueue.main.async {
self.navigationController!.dismiss(animated: true) {
let pnModeVC = PNModeVC()
self.navigationController!.setViewControllers([ pnModeVC ], animated: true)
}
}
// Otherwise continue on to request push notifications permissions
let pnModeVC: PNModeVC = PNModeVC(flow: .link)
self.navigationController?.pushViewController(pnModeVC, animated: true)
}
}

View file

@ -1,58 +1,124 @@
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import Combine
import Sodium
import GRDB
import SessionUtilitiesKit
import SessionMessagingKit
enum Onboarding {
private static let profileNameRetrievalPublisher: Atomic<AnyPublisher<String?, Error>> = {
let userPublicKey: String = getUserHexEncodedPublicKey()
return Atomic(
SnodeAPI.getSwarm(for: userPublicKey)
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
.flatMap { swarm -> AnyPublisher<Void, Error> in
guard let snode = swarm.randomElement() else {
return Fail(error: SnodeAPIError.generic)
.eraseToAnyPublisher()
}
return CurrentUserPoller.poll(
namespaces: [.configUserProfile],
from: snode,
for: userPublicKey,
on: DispatchQueue.global(qos: .userInitiated),
// Note: These values mean the received messages will be
// processed immediately rather than async as part of a Job
calledFromBackgroundPoller: true,
isBackgroundPollValid: { true }
)
}
.flatMap { _ -> AnyPublisher<String?, Error> in
Storage.shared.readPublisher { db in
try Profile
.filter(id: userPublicKey)
.select(.name)
.asRequest(of: String.self)
.fetchOne(db)
}
}
.shareReplay(1)
.eraseToAnyPublisher()
)
}()
public static var profileNamePublisher: AnyPublisher<String?, Error> {
profileNameRetrievalPublisher.wrappedValue
}
enum Flow {
case register, recover, link
func preregister(with seed: Data, ed25519KeyPair: KeyPair, x25519KeyPair: KeyPair) {
let userDefaults = UserDefaults.standard
Identity.store(seed: seed, ed25519KeyPair: ed25519KeyPair, x25519KeyPair: x25519KeyPair)
let x25519PublicKey = x25519KeyPair.hexEncodedPublicKey
// Create the initial shared util state (won't have been created on
// launch due to lack of ed25519 key)
SessionUtil.loadState(
userPublicKey: x25519PublicKey,
ed25519SecretKey: ed25519KeyPair.secretKey
)
// Store the user identity information
Storage.shared.write { db in
try Contact(id: x25519PublicKey)
.with(
isApproved: true,
didApproveMe: true
)
.save(db)
try Identity.store(
db,
seed: seed,
ed25519KeyPair: ed25519KeyPair,
x25519KeyPair: x25519KeyPair
)
// No need to show the seed again if the user is restoring or linking
db[.hasViewedSeed] = (self == .recover || self == .link)
// Create a contact for the current user and set their approval/trusted statuses so
// they don't get weird behaviours
try Contact
.fetchOrCreate(db, id: x25519PublicKey)
.save(db)
try Contact
.filter(id: x25519PublicKey)
.updateAllAndConfig(
db,
Contact.Columns.isTrusted.set(to: true), // Always trust the current user
Contact.Columns.isApproved.set(to: true),
Contact.Columns.didApproveMe.set(to: true)
)
// Create the 'Note to Self' thread (not visible by default)
try SessionThread
.fetchOrCreate(db, id: x25519PublicKey, variant: .contact)
.save(db)
// Create the initial shared util state (won't have been created on
// launch due to lack of ed25519 key)
SessionUtil.loadState(ed25519SecretKey: ed25519KeyPair.secretKey)
// No need to show the seed again if the user is restoring or linking
db[.hasViewedSeed] = (self == .recover || self == .link)
}
// Set hasSyncedInitialConfiguration to true so that when we hit the
// home screen a configuration sync is triggered (yes, the logic is a
// bit weird). This is needed so that if the user registers and
// immediately links a device, there'll be a configuration in their swarm.
userDefaults[.hasSyncedInitialConfiguration] = (self == .register)
UserDefaults.standard[.hasSyncedInitialConfiguration] = (self == .register)
switch self {
case .register, .recover:
// Set both lastDisplayNameUpdate and lastProfilePictureUpdate to the
// current date, so that we don't overwrite what the user set in the
// display name step with whatever we find in their swarm.
userDefaults[.lastDisplayNameUpdate] = Date()
userDefaults[.lastProfilePictureUpdate] = Date()
case .link: break
}
// Only continue if this isn't a new account
guard self != .register else { return }
// Fetch the
Onboarding.profileNamePublisher.sinkUntilComplete()
}
func completeRegistration() {
// Set the `lastDisplayNameUpdate` to the current date, so that we don't
// overwrite what the user set in the display name step with whatever we
// find in their swarm (otherwise the user could enter a display name and
// have it immediately overwritten due to the config request running slow)
UserDefaults.standard[.lastDisplayNameUpdate] = Date()
// Notify the app that registration is complete
Identity.didRegister()
// Now that we have registered get the Snode pool and sync push tokens
GetSnodePoolJob.run()
SyncPushTokensJob.run(uploadOnlyIfStale: false)
}
}
}

View file

@ -1,13 +1,15 @@
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
import UIKit
import Combine
import SessionUIKit
import SessionMessagingKit
import SessionSnodeKit
import SignalUtilitiesKit
final class PNModeVC: BaseVC, OptionViewDelegate {
private let flow: Onboarding.Flow
private var optionViews: [OptionView] {
[ apnsOptionView, backgroundPollingOptionView ]
}
@ -15,7 +17,19 @@ final class PNModeVC: BaseVC, OptionViewDelegate {
private var selectedOptionView: OptionView? {
return optionViews.first { $0.isSelected }
}
// MARK: - Initialization
init(flow: Onboarding.Flow) {
self.flow = flow
super.init(nibName: nil, bundle: nil)
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
// MARK: - Components
private lazy var apnsOptionView: OptionView = {
@ -128,14 +142,68 @@ final class PNModeVC: BaseVC, OptionViewDelegate {
}
UserDefaults.standard[.isUsingFullAPNs] = (selectedOptionView == apnsOptionView)
Identity.didRegister()
// If we are registering then we can just continue on
guard flow != .register else {
self.flow.completeRegistration()
// Go to the home screen
let homeVC: HomeVC = HomeVC()
self.navigationController?.setViewControllers([ homeVC ], animated: true)
return
}
// Go to the home screen
let homeVC: HomeVC = HomeVC()
self.navigationController?.setViewControllers([ homeVC ], animated: true)
// Check if we already have a profile name (ie. profile retrieval completed while waiting on
// this screen)
let existingProfileName: String? = Storage.shared
.read { db in
try Profile
.filter(id: getUserHexEncodedPublicKey(db))
.select(.name)
.asRequest(of: String.self)
.fetchOne(db)
}
// Now that we have registered get the Snode pool and sync push tokens
GetSnodePoolJob.run()
SyncPushTokensJob.run(uploadOnlyIfStale: false)
guard existingProfileName?.isEmpty != false else {
// If we have one then we can go straight to the home screen
self.flow.completeRegistration()
// Go to the home screen
let homeVC: HomeVC = HomeVC()
self.navigationController?.setViewControllers([ homeVC ], animated: true)
return
}
// If we don't have one then show a loading indicator and try to retrieve the existing name
ModalActivityIndicatorViewController.present(fromViewController: self) { viewController in
Onboarding.profileNamePublisher
.timeout(.seconds(10), scheduler: DispatchQueue.main, customError: { HTTPError.timeout })
.catch { _ -> AnyPublisher<String?, Error> in
SNLog("Onboarding failed to retrieve existing profile information")
return Just(nil)
.setFailureType(to: Error.self)
.eraseToAnyPublisher()
}
.receive(on: DispatchQueue.main)
.sinkUntilComplete(
receiveValue: { [weak self, flow = self.flow] value in
// Hide the loading indicator
viewController.dismiss(animated: true)
// If we have no display name we need to collect one
guard value?.isEmpty == false else {
let displayNameVC: DisplayNameVC = DisplayNameVC(flow: flow)
self?.navigationController?.pushViewController(displayNameVC, animated: true)
return
}
// Otherwise we are done and can go to the home screen
self?.flow.completeRegistration()
// Go to the home screen
let homeVC: HomeVC = HomeVC()
self?.navigationController?.setViewControllers([ homeVC ], animated: true)
}
)
}
}
}

View file

@ -198,11 +198,18 @@ final class RegisterVC : BaseVC {
animate()
}
// MARK: Interaction
// MARK: - Interaction
@objc private func register() {
Onboarding.Flow.register.preregister(with: seed, ed25519KeyPair: ed25519KeyPair, x25519KeyPair: x25519KeyPair)
let displayNameVC = DisplayNameVC()
navigationController!.pushViewController(displayNameVC, animated: true)
Onboarding.Flow.register
.preregister(
with: seed,
ed25519KeyPair: ed25519KeyPair,
x25519KeyPair: x25519KeyPair
)
let displayNameVC: DisplayNameVC = DisplayNameVC(flow: .register)
self.navigationController?.pushViewController(displayNameVC, animated: true)
}
@objc private func copyPublicKey() {

View file

@ -194,22 +194,33 @@ final class RestoreVC: BaseVC {
present(modal, animated: true)
}
let mnemonic = mnemonicTextView.text!.lowercased()
let seed: Data
let keyPairs: (ed25519KeyPair: KeyPair, x25519KeyPair: KeyPair)
do {
let hexEncodedSeed = try Mnemonic.decode(mnemonic: mnemonic)
let seed = Data(hex: hexEncodedSeed)
let (ed25519KeyPair, x25519KeyPair) = try! Identity.generate(from: seed)
Onboarding.Flow.recover.preregister(with: seed, ed25519KeyPair: ed25519KeyPair, x25519KeyPair: x25519KeyPair)
mnemonicTextView.resignFirstResponder()
Timer.scheduledTimer(withTimeInterval: 0.25, repeats: false) { _ in
let displayNameVC = DisplayNameVC()
self.navigationController!.pushViewController(displayNameVC, animated: true)
}
} catch let error {
let mnemonic: String = mnemonicTextView.text!.lowercased()
let hexEncodedSeed: String = try Mnemonic.decode(mnemonic: mnemonic)
seed = Data(hex: hexEncodedSeed)
keyPairs = try Identity.generate(from: seed)
}
catch let error {
let error = error as? Mnemonic.DecodingError ?? Mnemonic.DecodingError.generic
showError(title: error.errorDescription!)
return
}
// Load in the user config and progress to the next screen
mnemonicTextView.resignFirstResponder()
Onboarding.Flow.recover
.preregister(
with: seed,
ed25519KeyPair: keyPairs.ed25519KeyPair,
x25519KeyPair: keyPairs.x25519KeyPair
)
let pnModeVC: PNModeVC = PNModeVC(flow: .recover)
self.navigationController?.pushViewController(pnModeVC, animated: true)
}
@objc private func handleLegalLabelTapped(_ tapGestureRecognizer: UITapGestureRecognizer) {

View file

@ -246,10 +246,7 @@ class BlockedContactsViewModel: SessionTableViewModel<NoNav, BlockedContactsView
Storage.shared.write { db in
_ = try Contact
.filter(ids: contactIds)
.updateAll(db, Contact.Columns.isBlocked.set(to: false))
// Force a config sync
try MessageSender.syncConfiguration(db, forceSyncNow: true).sinkUntilComplete()
.updateAllAndConfig(db, Contact.Columns.isBlocked.set(to: false))
}
self?.selectedContactIdsSubject.send([])

View file

@ -4,16 +4,19 @@ import Foundation
class ImagePickerHandler: NSObject, UIImagePickerControllerDelegate & UINavigationControllerDelegate {
private let onTransition: (UIViewController, TransitionType) -> Void
private let onImagePicked: (UIImage?, String?) -> Void
private let onImagePicked: (UIImage) -> Void
private let onImageFilePicked: (String) -> Void
// MARK: - Initialization
init(
onTransition: @escaping (UIViewController, TransitionType) -> Void,
onImagePicked: @escaping (UIImage?, String?) -> Void
onImagePicked: @escaping (UIImage) -> Void,
onImageFilePicked: @escaping (String) -> Void
) {
self.onTransition = onTransition
self.onImagePicked = onImagePicked
self.onImageFilePicked = onImageFilePicked
}
// MARK: - UIImagePickerControllerDelegate
@ -44,14 +47,14 @@ class ImagePickerHandler: NSObject, UIImagePickerControllerDelegate & UINavigati
let viewController: CropScaleImageViewController = CropScaleImageViewController(
srcImage: rawAvatar,
successCompletion: { resultImage in
self?.onImagePicked(resultImage, nil)
self?.onImagePicked(resultImage)
}
)
self?.onTransition(viewController, .present)
return
}
self?.onImagePicked(nil, imageUrl.path)
self?.onImageFilePicked(imageUrl.path)
}
}
}

View file

@ -149,8 +149,7 @@ final class NukeDataModal: Modal {
private func clearDeviceOnly() {
ModalActivityIndicatorViewController.present(fromViewController: self, canCancel: false) { [weak self] _ in
Storage.shared
.writePublisherFlatMap { db in try MessageSender.syncConfiguration(db, forceSyncNow: true) }
ConfigurationSyncJob.run()
.receive(on: DispatchQueue.main)
.sinkUntilComplete(
receiveCompletion: { _ in

View file

@ -70,13 +70,20 @@ class SettingsViewModel: SessionTableViewModel<SettingsViewModel.NavButton, Sett
private let userSessionId: String
private lazy var imagePickerHandler: ImagePickerHandler = ImagePickerHandler(
onTransition: { [weak self] in self?.transitionToScreen($0, transitionType: $1) },
onImagePicked: { [weak self] resultImage, resultImagePath in
onImagePicked: { [weak self] resultImage in
guard let oldDisplayName: String = self?.oldDisplayName else { return }
self?.updateProfile(
name: (self?.oldDisplayName ?? ""),
profilePicture: resultImage,
profilePictureFilePath: resultImagePath,
isUpdatingDisplayName: false,
isUpdatingProfilePicture: true
name: oldDisplayName,
avatarUpdate: .uploadImage(resultImage)
)
},
onImageFilePicked: { [weak self] resultImagePath in
guard let oldDisplayName: String = self?.oldDisplayName else { return }
self?.updateProfile(
name: oldDisplayName,
avatarUpdate: .uploadFilePath(resultImagePath)
)
}
)
@ -204,10 +211,7 @@ class SettingsViewModel: SessionTableViewModel<SettingsViewModel.NavButton, Sett
self?.oldDisplayName = updatedNickname
self?.updateProfile(
name: updatedNickname,
profilePicture: nil,
profilePictureFilePath: nil,
isUpdatingDisplayName: true,
isUpdatingProfilePicture: false
avatarUpdate: .none
)
}
]
@ -512,17 +516,14 @@ class SettingsViewModel: SessionTableViewModel<SettingsViewModel.NavButton, Sett
}
private func removeProfileImage() {
let oldDisplayName: String = self.oldDisplayName
let viewController = ModalActivityIndicatorViewController(canCancel: false) { [weak self] modalActivityIndicator in
ProfileManager.updateLocal(
queue: DispatchQueue.global(qos: .default),
profileName: (self?.oldDisplayName ?? ""),
image: nil,
imageFilePath: nil,
success: { db, updatedProfile in
UserDefaults.standard[.lastProfilePictureUpdate] = Date()
try MessageSender.syncConfiguration(db, forceSyncNow: true).sinkUntilComplete()
profileName: oldDisplayName,
avatarUpdate: .remove,
success: { db in
// Wait for the database transaction to complete before updating the UI
db.afterNextTransaction { _ in
DispatchQueue.main.async {
@ -554,33 +555,14 @@ class SettingsViewModel: SessionTableViewModel<SettingsViewModel.NavButton, Sett
fileprivate func updateProfile(
name: String,
profilePicture: UIImage?,
profilePictureFilePath: String?,
isUpdatingDisplayName: Bool,
isUpdatingProfilePicture: Bool
avatarUpdate: ProfileManager.AvatarUpdate
) {
let imageFilePath: String? = (
profilePictureFilePath ??
ProfileManager.profileAvatarFilepath(id: self.userSessionId)
)
let viewController = ModalActivityIndicatorViewController(canCancel: false) { [weak self] modalActivityIndicator in
ProfileManager.updateLocal(
queue: DispatchQueue.global(qos: .default),
profileName: name,
image: profilePicture,
imageFilePath: imageFilePath,
success: { db, updatedProfile in
if isUpdatingDisplayName {
UserDefaults.standard[.lastDisplayNameUpdate] = Date()
}
if isUpdatingProfilePicture {
UserDefaults.standard[.lastProfilePictureUpdate] = Date()
}
try MessageSender.syncConfiguration(db, forceSyncNow: true).sinkUntilComplete()
avatarUpdate: avatarUpdate,
success: { db in
// Wait for the database transaction to complete before updating the UI
db.afterNextTransaction { _ in
DispatchQueue.main.async {

View file

@ -43,7 +43,8 @@ public enum SNMessagingKit { // Just to make the external API nice
JobRunner.add(executor: MessageReceiveJob.self, for: .messageReceive)
JobRunner.add(executor: NotifyPushServerJob.self, for: .notifyPushServer)
JobRunner.add(executor: SendReadReceiptsJob.self, for: .sendReadReceipts)
JobRunner.add(executor: AttachmentDownloadJob.self, for: .attachmentDownload)
JobRunner.add(executor: AttachmentUploadJob.self, for: .attachmentUpload)
JobRunner.add(executor: AttachmentDownloadJob.self, for: .attachmentDownload)
JobRunner.add(executor: ConfigurationSyncJob.self, for: .configurationSync)
}
}

View file

@ -1639,10 +1639,10 @@ public enum SMKLegacy {
self.message = message
if let destString: String = _MessageSendJob.process(rawDestination, type: "contact") {
destination = .contact(publicKey: destString, namespace: .default)
destination = .contact(publicKey: destString)
}
else if let destString: String = _MessageSendJob.process(rawDestination, type: "closedGroup") {
destination = .closedGroup(groupPublicKey: destString, namespace: .legacyClosedGroup)
destination = .closedGroup(groupPublicKey: destString)
}
else if _MessageSendJob.process(rawDestination, type: "openGroup") != nil {
// We can no longer support sending messages to legacy open groups

View file

@ -525,7 +525,7 @@ enum _003_YDBToGRDBMigration: Migration {
let recipientString: String = {
if let destination: Message.Destination = destination {
switch destination {
case .contact(let publicKey, _): return publicKey
case .contact(let publicKey): return publicKey
default: break
}
}
@ -974,7 +974,7 @@ enum _003_YDBToGRDBMigration: Migration {
.map { $0 })
.defaulting(to: []),
destination: (threadVariant == .contact ?
.contact(publicKey: threadId, namespace: .default) :
.contact(publicKey: threadId) :
nil
),
variant: variant,
@ -989,7 +989,7 @@ enum _003_YDBToGRDBMigration: Migration {
.map { $0 })
.defaulting(to: []),
destination: (threadVariant == .contact ?
.contact(publicKey: threadId, namespace: .default) :
.contact(publicKey: threadId) :
nil
),
variant: variant,
@ -1278,8 +1278,8 @@ enum _003_YDBToGRDBMigration: Migration {
// Fetch the threadId and interactionId this job should be associated with
let threadId: String = {
switch legacyJob.destination {
case .contact(let publicKey, _): return publicKey
case .closedGroup(let groupPublicKey, _): return groupPublicKey
case .contact(let publicKey): return publicKey
case .closedGroup(let groupPublicKey): return groupPublicKey
case .openGroup(let roomToken, let server, _, _, _):
return OpenGroup.idFor(roomToken: roomToken, server: server)
@ -1435,7 +1435,7 @@ enum _003_YDBToGRDBMigration: Migration {
behaviour: .recurring,
threadId: threadId,
details: SendReadReceiptsJob.Details(
destination: .contact(publicKey: threadId, namespace: .default),
destination: .contact(publicKey: threadId),
timestampMsValues: timestampsMs
)
)?.migrationSafeInserted(db)

View file

@ -17,12 +17,19 @@ enum _011_SharedUtilChanges: Migration {
try db.create(table: ConfigDump.self) { t in
t.column(.variant, .text)
.notNull()
.primaryKey()
t.column(.publicKey, .text)
.notNull()
.indexed()
t.column(.data, .blob)
.notNull()
t.column(.combinedMessageHashes, .text)
t.primaryKey([.variant, .publicKey])
}
// If we don't have an ed25519 key then no need to create cached dump data
let userPublicKey: String = getUserHexEncodedPublicKey(db)
guard let secretKey: [UInt8] = Identity.fetchUserEd25519KeyPair(db)?.secretKey else {
Storage.update(progress: 1, for: self, in: target) // In case this is the last migration
return
@ -34,13 +41,57 @@ enum _011_SharedUtilChanges: Migration {
secretKey: secretKey,
cachedData: nil
)
let confResult: SessionUtil.ConfResult = try SessionUtil.update(
let userProfileConfResult: SessionUtil.ConfResult = try SessionUtil.update(
profile: Profile.fetchOrCreateCurrentUser(db),
in: .custom(conf: Atomic(userProfileConf))
in: Atomic(userProfileConf)
)
if confResult.needsDump {
try SessionUtil.saveState(db, conf: userProfileConf, for: .userProfile)
if userProfileConfResult.needsDump {
try SessionUtil
.createDump(
conf: userProfileConf,
for: .userProfile,
publicKey: userPublicKey,
messageHashes: nil
)?
.save(db)
}
// Create a dump for the contacts data
struct ContactInfo: FetchableRecord, Decodable, ColumnExpressible {
typealias Columns = CodingKeys
enum CodingKeys: String, CodingKey, ColumnExpression, CaseIterable {
case contact
case profile
}
let contact: Contact
let profile: Profile?
}
let contactsData: [ContactInfo] = try Contact
.including(optional: Contact.profile)
.asRequest(of: ContactInfo.self)
.fetchAll(db)
let contactsConf: UnsafeMutablePointer<config_object>? = try SessionUtil.loadState(
for: .contacts,
secretKey: secretKey,
cachedData: nil
)
let contactsConfResult: SessionUtil.ConfResult = try SessionUtil.upsert(
contactData: contactsData.map { ($0.contact.id, $0.contact, $0.profile) },
in: Atomic(contactsConf)
)
if contactsConfResult.needsDump {
try SessionUtil
.createDump(
conf: contactsConf,
for: .contacts,
publicKey: userPublicKey,
messageHashes: nil
)?
.save(db)
}
Storage.update(progress: 1, for: self, in: target) // In case this is the last migration

View file

@ -130,7 +130,7 @@ public extension BlindedIdLookup {
if isCheckingForOutbox && !contact.isApproved {
try Contact
.filter(id: contact.id)
.updateAll(db, Contact.Columns.isApproved.set(to: true))
.updateAllAndConfig(db, Contact.Columns.isApproved.set(to: true))
}
break

View file

@ -4,6 +4,8 @@ import Foundation
import GRDB
import SessionUtilitiesKit
/// This type is duplicate in both the database and within the SessionUtil config so should only ever have it's data changes via the
/// `updateAllAndConfig` function. Updating it elsewhere could result in issues with syncing data between devices
public struct Contact: Codable, Identifiable, Equatable, FetchableRecord, PersistableRecord, TableRecord, ColumnExpressible {
public static var databaseTableName: String { "contact" }
internal static let threadForeignKey = ForeignKey([Columns.id], to: [SessionThread.Columns.id])
@ -66,29 +68,6 @@ public struct Contact: Codable, Identifiable, Equatable, FetchableRecord, Persis
}
}
// MARK: - Convenience
public extension Contact {
func with(
isTrusted: Updatable<Bool> = .existing,
isApproved: Updatable<Bool> = .existing,
isBlocked: Updatable<Bool> = .existing,
didApproveMe: Updatable<Bool> = .existing
) -> Contact {
return Contact(
id: id,
isTrusted: (
(isTrusted ?? self.isTrusted) ||
self.id == getUserHexEncodedPublicKey() // Always trust ourselves
),
isApproved: (isApproved ?? self.isApproved),
isBlocked: (isBlocked ?? self.isBlocked),
didApproveMe: (didApproveMe ?? self.didApproveMe),
hasBeenBlocked: ((isBlocked ?? self.isBlocked) || self.hasBeenBlocked)
)
}
}
// MARK: - GRDB Interactions
public extension Contact {

View file

@ -5,6 +5,8 @@ import GRDB
import DifferenceKit
import SessionUtilitiesKit
/// This type is duplicate in both the database and within the SessionUtil config so should only ever have it's data changes via the
/// `updateAllAndConfig` function. Updating it elsewhere could result in issues with syncing data between devices
public struct Profile: Codable, Identifiable, Equatable, Hashable, FetchableRecord, PersistableRecord, TableRecord, ColumnExpressible, CustomStringConvertible, Differentiable {
public static var databaseTableName: String { "profile" }
internal static let interactionForeignKey = ForeignKey([Columns.id], to: [Interaction.Columns.authorId])
@ -160,26 +162,6 @@ public extension Profile {
}
}
// MARK: - Mutation
public extension Profile {
func with(
name: String? = nil,
profilePictureUrl: Updatable<String?> = .existing,
profilePictureFileName: Updatable<String?> = .existing,
profileEncryptionKey: Updatable<Data?> = .existing
) -> Profile {
return Profile(
id: id,
name: (name ?? self.name),
nickname: self.nickname,
profilePictureUrl: (profilePictureUrl ?? self.profilePictureUrl),
profilePictureFileName: (profilePictureFileName ?? self.profilePictureFileName),
profileEncryptionKey: (profileEncryptionKey ?? self.profileEncryptionKey)
)
}
}
// MARK: - GRDB Interactions
public extension Profile {

View file

@ -2,38 +2,81 @@
import Foundation
import GRDB
import SessionSnodeKit
import SessionUtilitiesKit
public struct ConfigDump: Codable, Equatable, Hashable, Identifiable, FetchableRecord, PersistableRecord, TableRecord, ColumnExpressible {
public struct ConfigDump: Codable, Equatable, Hashable, FetchableRecord, PersistableRecord, TableRecord, ColumnExpressible {
public static var databaseTableName: String { "configDump" }
public typealias Columns = CodingKeys
public enum CodingKeys: String, CodingKey, ColumnExpression {
case variant
case publicKey
case data
case combinedMessageHashes
}
public enum Variant: String, Codable, DatabaseValueConvertible, CaseIterable {
public enum Variant: String, Codable, DatabaseValueConvertible {
case userProfile
case contacts
}
public var id: Variant { variant }
/// The type of config this dump is for
public let variant: Variant
/// The public key for the swarm this dump is for
///
/// **Note:** For user config items this will be an empty string
public let publicKey: String
/// The data for this dump
public let data: Data
/// A comma delimited array of message hashes for previously stored messages on the server
private let combinedMessageHashes: String?
/// An array of message hashes for previously stored messages on the server
var messageHashes: [String]? { ConfigDump.messageHashes(from: combinedMessageHashes) }
internal init(
variant: Variant,
publicKey: String,
data: Data,
messageHashes: [String]?
) {
self.variant = variant
self.publicKey = publicKey
self.data = data
self.combinedMessageHashes = ConfigDump.combinedMessageHashes(from: messageHashes)
}
}
// MARK: - Convenience
public extension ConfigDump {
static func combinedMessageHashes(from messageHashes: [String]?) -> String? {
return messageHashes?.joined(separator: ",")
}
static func messageHashes(from combinedMessageHashes: String?) -> [String]? {
return combinedMessageHashes?.components(separatedBy: ",")
}
}
public extension ConfigDump.Variant {
static let userVariants: [ConfigDump.Variant] = [ .userProfile, .contacts ]
var configMessageKind: SharedConfigMessage.Kind {
switch self {
case .userProfile: return .userProfile
case .contacts: return .contacts
}
}
var namespace: SnodeAPI.Namespace {
switch self {
case .userProfile: return SnodeAPI.Namespace.configUserProfile
case .contacts: return SnodeAPI.Namespace.configContacts
}
}
}

View file

@ -0,0 +1,355 @@
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import Combine
import GRDB
import SessionUtil
import SessionSnodeKit
import SessionUtilitiesKit
public enum ConfigurationSyncJob: JobExecutor {
public static let maxFailureCount: Int = -1
public static let requiresThreadId: Bool = false
public static let requiresInteractionId: Bool = false
private static let maxRunFrequency: TimeInterval = 3
public static func run(
_ job: Job,
queue: DispatchQueue,
success: @escaping (Job, Bool) -> (),
failure: @escaping (Job, Error?, Bool) -> (),
deferred: @escaping (Job) -> ()
) {
guard Features.useSharedUtilForUserConfig else {
success(job, true)
return
}
// If we don't have a userKeyPair yet then there is no need to sync the configuration
// as the user doesn't exist yet (this will get triggered on the first launch of a
// fresh install due to the migrations getting run)
guard
let pendingSwarmConfigChanges: [SingleDestinationChanges] = Storage.shared
.read({ db -> [SessionUtil.OutgoingConfResult]? in
guard
Identity.userExists(db),
let ed25519SecretKey: [UInt8] = Identity.fetchUserEd25519KeyPair(db)?.secretKey
else { return nil }
return try SessionUtil.pendingChanges(
db,
userPublicKey: getUserHexEncodedPublicKey(db),
ed25519SecretKey: ed25519SecretKey
)
})?
.grouped(by: { $0.destination })
.map({ (destination: Message.Destination, value: [SessionUtil.OutgoingConfResult]) -> SingleDestinationChanges in
SingleDestinationChanges(
destination: destination,
messages: value,
allOldHashes: value
.map { ($0.oldMessageHashes ?? []) }
.reduce([], +)
.asSet()
)
})
else {
failure(job, StorageError.generic, false)
return
}
// If there are no pending changes then the job can just complete (next time something
// is updated we want to try and run immediately so don't scuedule another run in this case)
guard !pendingSwarmConfigChanges.isEmpty else {
success(job, true)
return
}
Storage.shared
.readPublisher { db in
try pendingSwarmConfigChanges
.map { (change: SingleDestinationChanges) -> (messages: [TargetedMessage], allOldHashes: Set<String>) in
(
messages: try change.messages
.map { (outgoingConf: SessionUtil.OutgoingConfResult) -> TargetedMessage in
TargetedMessage(
sendData: try MessageSender.preparedSendData(
db,
message: outgoingConf.message,
to: change.destination,
interactionId: nil
),
namespace: outgoingConf.namespace,
oldHashes: (outgoingConf.oldMessageHashes ?? [])
)
},
allOldHashes: change.allOldHashes
)
}
}
.subscribe(on: queue)
.receive(on: queue)
.flatMap { (pendingSwarmChange: [(messages: [TargetedMessage], allOldHashes: Set<String>)]) -> AnyPublisher<[HTTP.BatchResponse], Error> in
Publishers
.MergeMany(
pendingSwarmChange
.map { (messages: [TargetedMessage], oldHashes: Set<String>) in
// Note: We do custom sending logic here because we want to batch the
// sending and deletion of messages within the same swarm
SnodeAPI
.sendConfigMessages(
messages
.compactMap { targetedMessage -> SnodeAPI.TargetedMessage? in
targetedMessage.sendData.snodeMessage
.map { ($0, targetedMessage.namespace) }
},
oldHashes: Array(oldHashes)
)
}
)
.collect()
.eraseToAnyPublisher()
}
.map { (responses: [HTTP.BatchResponse]) -> [SuccessfulChange] in
// Process the response data into an easy to understand for (this isn't strictly
// needed but the code gets convoluted without this)
zip(responses, pendingSwarmConfigChanges)
.compactMap { (batchResponse: HTTP.BatchResponse, pendingSwarmChange: SingleDestinationChanges) -> [SuccessfulChange]? in
let maybePublicKey: String? = {
switch pendingSwarmChange.destination {
case .contact(let publicKey), .closedGroup(let publicKey):
return publicKey
default: return nil
}
}()
// If we don't have a publicKey then this is an invalid config
guard let publicKey: String = maybePublicKey else { return nil }
// Need to know if we successfully deleted old messages (if we didn't then
// we want to keep the old hashes so we can delete them the next time)
let didDeleteOldConfigMessages: Bool = {
guard
let subResponse: HTTP.BatchSubResponse<DeleteMessagesResponse> = (batchResponse.responses.last as? HTTP.BatchSubResponse<DeleteMessagesResponse>),
200...299 ~= subResponse.code
else { return false }
return true
}()
return zip(batchResponse.responses, pendingSwarmChange.messages)
.reduce(into: []) { (result: inout [SuccessfulChange], next: ResponseChange) in
// If the request wasn't successful then just ignore it (the next
// config sync will try make the changes again
guard
let subResponse: HTTP.BatchSubResponse<SendMessagesResponse> = (next.response as? HTTP.BatchSubResponse<SendMessagesResponse>),
200...299 ~= subResponse.code,
let sendMessageResponse: SendMessagesResponse = subResponse.body
else { return }
result.append(
SuccessfulChange(
message: next.change.message,
publicKey: publicKey,
updatedHashes: (didDeleteOldConfigMessages ?
[sendMessageResponse.hash] :
(next.change.oldMessageHashes ?? [])
.appending(sendMessageResponse.hash)
)
)
)
}
}
.flatMap { $0 }
}
.map { (successfulChanges: [SuccessfulChange]) -> [ConfigDump] in
// Now that we have the successful changes, we need to mark them as pushed and
// generate any config dumps which need to be stored
successfulChanges
.compactMap { successfulChange -> ConfigDump? in
// Updating the pushed state returns a flag indicating whether the config
// needs to be dumped
guard SessionUtil.markAsPushed(message: successfulChange.message, publicKey: successfulChange.publicKey) else {
return nil
}
let variant: ConfigDump.Variant = successfulChange.message.kind.configDumpVariant
let atomicConf: Atomic<UnsafeMutablePointer<config_object>?> = SessionUtil.config(
for: variant,
publicKey: successfulChange.publicKey
)
return try? SessionUtil.createDump(
conf: atomicConf.wrappedValue,
for: variant,
publicKey: successfulChange.publicKey,
messageHashes: successfulChange.updatedHashes
)
}
}
.sinkUntilComplete(
receiveValue: { (configDumps: [ConfigDump]) in
// Flag to indicate whether the job should be finished or will run again
var shouldFinishCurrentJob: Bool = false
// Lastly we need to save the updated dumps to the database
let updatedJob: Job? = Storage.shared.write { db in
// Save the updated dumps to the database
try configDumps.forEach { try $0.save(db) }
// When we complete the 'ConfigurationSync' job we want to immediately schedule
// another one with a 'nextRunTimestamp' set to the 'maxRunFrequency' value to
// throttle the config sync requests
let nextRunTimestamp: TimeInterval = (Date().timeIntervalSince1970 + maxRunFrequency)
// If another 'ConfigurationSync' job was scheduled then update that one
// to run at 'nextRunTimestamp' and make the current job stop
if
let existingJob: Job = try? Job
.filter(Job.Columns.id != job.id)
.filter(Job.Columns.variant == Job.Variant.configurationSync)
.fetchOne(db),
!JobRunner.isCurrentlyRunning(existingJob)
{
_ = try existingJob
.with(nextRunTimestamp: nextRunTimestamp)
.saved(db)
shouldFinishCurrentJob = true
return job
}
return try job
.with(nextRunTimestamp: nextRunTimestamp)
.saved(db)
}
success((updatedJob ?? job), shouldFinishCurrentJob)
}
)
}
}
// MARK: - Convenience Types
public extension ConfigurationSyncJob {
fileprivate struct SingleDestinationChanges {
let destination: Message.Destination
let messages: [SessionUtil.OutgoingConfResult]
let allOldHashes: Set<String>
}
fileprivate struct TargetedMessage {
let sendData: MessageSender.PreparedSendData
let namespace: SnodeAPI.Namespace
let oldHashes: [String]
}
typealias ResponseChange = (response: Codable, change: SessionUtil.OutgoingConfResult)
fileprivate struct SuccessfulChange {
let message: SharedConfigMessage
let publicKey: String
let updatedHashes: [String]
}
}
// MARK: - Convenience
public extension ConfigurationSyncJob {
static func enqueue(_ db: Database? = nil) {
guard let db: Database = db else {
Storage.shared.writeAsync { ConfigurationSyncJob.enqueue($0) }
return
}
// FIXME: Remove this once `useSharedUtilForUserConfig` is permanent
guard Features.useSharedUtilForUserConfig else {
// If we don't have a userKeyPair yet then there is no need to sync the configuration
// as the user doesn't exist yet (this will get triggered on the first launch of a
// fresh install due to the migrations getting run)
guard
Identity.userExists(db),
let legacyConfigMessage: Message = try? ConfigurationMessage.getCurrent(db)
else { return }
let publicKey: String = getUserHexEncodedPublicKey(db)
JobRunner.add(
db,
job: Job(
variant: .messageSend,
threadId: publicKey,
details: MessageSendJob.Details(
destination: Message.Destination.contact(publicKey: publicKey),
message: legacyConfigMessage
)
)
)
return
}
// Upsert a config sync job (if there is already an pending one then no need
// to add another one)
JobRunner.upsert(
db,
job: ConfigurationSyncJob.createOrUpdateIfNeeded(db)
)
}
@discardableResult static func createOrUpdateIfNeeded(_ db: Database) -> Job {
// Try to get an existing job (if there is one that's not running)
if
let existingJob: Job = try? Job
.filter(Job.Columns.variant == Job.Variant.configurationSync)
.fetchOne(db),
!JobRunner.isCurrentlyRunning(existingJob)
{
return existingJob
}
// Otherwise create a new job
return Job(
variant: .configurationSync,
behaviour: .recurring
)
}
static func run() -> AnyPublisher<Void, Error> {
// FIXME: Remove this once `useSharedUtilForUserConfig` is permanent
guard Features.useSharedUtilForUserConfig else {
return Storage.shared
.writePublisher { db -> MessageSender.PreparedSendData in
// If we don't have a userKeyPair yet then there is no need to sync the configuration
// as the user doesn't exist yet (this will get triggered on the first launch of a
// fresh install due to the migrations getting run)
guard Identity.userExists(db) else { throw StorageError.generic }
let publicKey: String = getUserHexEncodedPublicKey(db)
return try MessageSender.preparedSendData(
db,
message: try ConfigurationMessage.getCurrent(db),
to: Message.Destination.contact(publicKey: publicKey),
interactionId: nil
)
}
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
.receive(on: DispatchQueue.global(qos: .userInitiated))
.flatMap { MessageSender.sendImmediate(preparedSendData: $0) }
.eraseToAnyPublisher()
}
// Trigger the job emitting the result when completed
return Future { resolver in
ConfigurationSyncJob.run(
Job(variant: .configurationSync),
queue: DispatchQueue.global(qos: .userInitiated),
success: { _, _ in resolver(Result.success(())) },
failure: { _, error, _ in resolver(Result.failure(error ?? HTTPError.generic)) },
deferred: { _ in }
)
}
.eraseToAnyPublisher()
}
}

View file

@ -33,7 +33,11 @@ public enum MessageReceiveJob: JobExecutor {
Storage.shared.write { db in
// Send any SharedConfigMessages to the SessionUtil to handle it
try SessionUtil.handleConfigMessages(db, messages: sharedConfigMessages)
try SessionUtil.handleConfigMessages(
db,
messages: sharedConfigMessages,
publicKey: (job.threadId ?? "")
)
// Handle the remaining messages
var remainingMessagesToProcess: [Details.MessageInfo] = []

View file

@ -9,7 +9,7 @@ public enum SendReadReceiptsJob: JobExecutor {
public static let maxFailureCount: Int = -1
public static let requiresThreadId: Bool = false
public static let requiresInteractionId: Bool = false
private static let minRunFrequency: TimeInterval = 3
private static let maxRunFrequency: TimeInterval = 3
public static func run(
_ job: Job,
@ -56,9 +56,9 @@ public enum SendReadReceiptsJob: JobExecutor {
case .finished:
// When we complete the 'SendReadReceiptsJob' we want to immediately schedule
// another one for the same thread but with a 'nextRunTimestamp' set to the
// 'minRunFrequency' value to throttle the read receipt requests
// 'maxRunFrequency' value to throttle the read receipt requests
var shouldFinishCurrentJob: Bool = false
let nextRunTimestamp: TimeInterval = (Date().timeIntervalSince1970 + minRunFrequency)
let nextRunTimestamp: TimeInterval = (Date().timeIntervalSince1970 + maxRunFrequency)
let updatedJob: Job? = Storage.shared.write { db in
// If another 'sendReadReceipts' job was scheduled then update that one
@ -163,7 +163,7 @@ public extension SendReadReceiptsJob {
behaviour: .recurring,
threadId: threadId,
details: Details(
destination: .contact(publicKey: threadId, namespace: .default),
destination: .contact(publicKey: threadId),
timestampMsValues: timestampMsValues.asSet()
)
)

View file

@ -49,11 +49,8 @@ public enum UpdateProfilePictureJob: JobExecutor {
ProfileManager.updateLocal(
queue: queue,
profileName: profile.name,
image: nil,
imageFilePath: profileFilePath,
success: { db, _ in
try MessageSender.syncConfiguration(db, forceSyncNow: true).sinkUntilComplete()
avatarUpdate: (profileFilePath.map { .uploadFilePath($0) } ?? .none),
success: { db in
// Need to call the 'success' closure asynchronously on the queue to prevent a reentrancy
// issue as it will write to the database and this closure is already called within
// another database write

View file

@ -0,0 +1,366 @@
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import GRDB
import SessionUtil
import SessionUtilitiesKit
internal extension SessionUtil {
// MARK: - Incoming Changes
static func handleContactsUpdate(
_ db: Database,
in atomicConf: Atomic<UnsafeMutablePointer<config_object>?>,
needsDump: Bool
) throws {
typealias ContactData = [String: (contact: Contact, profile: Profile)]
guard needsDump else { return }
guard atomicConf.wrappedValue != nil else { throw SessionUtilError.nilConfigObject }
// Since we are doing direct memory manipulation we are using an `Atomic` type which has
// blocking access in it's `mutate` closure
let contactData: ContactData = atomicConf.mutate { conf -> ContactData in
var contactData: ContactData = [:]
var contact: contacts_contact = contacts_contact()
let contactIterator: UnsafeMutablePointer<contacts_iterator> = contacts_iterator_new(conf)
while !contacts_iterator_done(contactIterator, &contact) {
let contactId: String = String(cString: withUnsafeBytes(of: contact.session_id) { [UInt8]($0) }
.map { CChar($0) }
.nullTerminated()
)
let contactResult: Contact = Contact(
id: contactId,
isApproved: contact.approved,
isBlocked: contact.blocked,
didApproveMe: contact.approved_me
)
let profileResult: Profile = Profile(
id: contactId,
name: (contact.name.map { String(cString: $0) } ?? ""),
nickname: contact.nickname.map { String(cString: $0) },
profilePictureUrl: contact.profile_pic.url.map { String(cString: $0) },
profileEncryptionKey: (contact.profile_pic.key != nil && contact.profile_pic.keylen > 0 ?
Data(bytes: contact.profile_pic.key, count: contact.profile_pic.keylen) :
nil
)
)
contactData[contactId] = (contactResult, profileResult)
contacts_iterator_advance(contactIterator)
}
contacts_iterator_free(contactIterator) // Need to free the iterator
return contactData
}
// The current users contact data is handled separately so exclude it if it's present (as that's
// actually a bug)
let userPublicKey: String = getUserHexEncodedPublicKey()
let targetContactData: ContactData = contactData.filter { $0.key != userPublicKey }
// If we only updated the current user contact then no need to continue
guard !targetContactData.isEmpty else { return }
// Since we don't sync 100% of the data stored against the contact and profile objects we
// need to only update the data we do have to ensure we don't overwrite anything that doesn't
// get synced
try targetContactData
.forEach { sessionId, data in
// Note: We only update the contact and profile records if the data has actually changed
// in order to avoid triggering UI updates for every thread on the home screen (the DB
// observation system can't differ between update calls which do and don't change anything)
let contact: Contact = Contact.fetchOrCreate(db, id: sessionId)
let profile: Profile = Profile.fetchOrCreate(db, id: sessionId)
if
(!data.profile.name.isEmpty && profile.name != data.profile.name) ||
profile.nickname != data.profile.nickname ||
profile.profilePictureUrl != data.profile.profilePictureUrl ||
profile.profileEncryptionKey != data.profile.profileEncryptionKey
{
try profile.save(db)
try Profile
.filter(id: sessionId)
.updateAll( // Handling a config update so don't use `updateAllAndConfig`
db,
[
(data.profile.name.isEmpty || profile.name == data.profile.name ? nil :
Profile.Columns.name.set(to: data.profile.name)
),
(profile.nickname == data.profile.nickname ? nil :
Profile.Columns.nickname.set(to: data.profile.nickname)
),
(profile.profilePictureUrl != data.profile.profilePictureUrl ? nil :
Profile.Columns.profilePictureUrl.set(to: data.profile.profilePictureUrl)
),
(profile.profileEncryptionKey != data.profile.profileEncryptionKey ? nil :
Profile.Columns.profileEncryptionKey.set(to: data.profile.profileEncryptionKey)
)
].compactMap { $0 }
)
}
/// Since message requests have no reverse, we should only handle setting `isApproved`
/// and `didApproveMe` to `true`. This may prevent some weird edge cases where a config message
/// swapping `isApproved` and `didApproveMe` to `false`
if
(contact.isApproved != data.contact.isApproved) ||
(contact.isBlocked != data.contact.isBlocked) ||
(contact.didApproveMe != data.contact.didApproveMe)
{
try contact.save(db)
try Contact
.filter(id: sessionId)
.updateAll( // Handling a config update so don't use `updateAllAndConfig`
db,
[
(!data.contact.isApproved ? nil :
Contact.Columns.isApproved.set(to: true)
),
Contact.Columns.isBlocked.set(to: data.contact.isBlocked),
(!data.contact.didApproveMe ? nil :
Contact.Columns.didApproveMe.set(to: true)
)
].compactMap { $0 }
)
}
}
}
// MARK: - Outgoing Changes
static func upsert(
contactData: [(id: String, contact: Contact?, profile: Profile?)],
in atomicConf: Atomic<UnsafeMutablePointer<config_object>?>
) throws -> ConfResult {
guard atomicConf.wrappedValue != nil else { throw SessionUtilError.nilConfigObject }
// The current users contact data doesn't need to sync so exclude it
let userPublicKey: String = getUserHexEncodedPublicKey()
let targetContacts: [(id: String, contact: Contact?, profile: Profile?)] = contactData
.filter { $0.id != userPublicKey }
// If we only updated the current user contact then no need to continue
guard !targetContacts.isEmpty else { return ConfResult(needsPush: false, needsDump: false) }
// Since we are doing direct memory manipulation we are using an `Atomic` type which has
// blocking access in it's `mutate` closure
return atomicConf.mutate { conf in
// Update the name
targetContacts
.forEach { (id, maybeContact, maybeProfile) in
var sessionId: [CChar] = id
.bytes
.map { CChar(bitPattern: $0) }
var contact: contacts_contact = contacts_contact()
guard contacts_get_or_create(conf, &contact, &sessionId) else {
SNLog("Unable to upsert contact from Config Message")
return
}
// Assign all properties to match the updated contact (if there is one)
if let updatedContact: Contact = maybeContact {
contact.approved = updatedContact.isApproved
contact.approved_me = updatedContact.didApproveMe
contact.blocked = updatedContact.isBlocked
}
// Update the profile data (if there is one)
if let updatedProfile: Profile = maybeProfile {
/// Users we have sent a message request to may not have profile info in certain situations
///
/// Note: We **MUST** store these in local variables rather than access them directly or they won't
/// exist in memory long enough to actually be assigned in the C type
let updatedName: [CChar]? = (updatedProfile.name.isEmpty ?
nil :
updatedProfile.name
.bytes
.map { CChar(bitPattern: $0) }
)
let updatedNickname: [CChar]? = updatedProfile.nickname?
.bytes
.map { CChar(bitPattern: $0) }
let updatedAvatarUrl: [CChar]? = updatedProfile.profilePictureUrl?
.bytes
.map { CChar(bitPattern: $0) }
let updatedAvatarKey: [UInt8]? = updatedProfile.profileEncryptionKey?
.bytes
let oldAvatarUrl: String? = contact.profile_pic.url.map { String(cString: $0) }
let oldAvatarKey: Data? = (contact.profile_pic.key != nil && contact.profile_pic.keylen > 0 ?
Data(bytes: contact.profile_pic.key, count: contact.profile_pic.keylen) :
nil
)
updatedName?.withUnsafeBufferPointer { contact.name = $0.baseAddress }
(updatedNickname == nil ?
contact.nickname = nil :
updatedNickname?.withUnsafeBufferPointer { contact.nickname = $0.baseAddress }
)
(updatedAvatarUrl == nil ?
contact.profile_pic.url = nil :
updatedAvatarUrl?.withUnsafeBufferPointer {
contact.profile_pic.url = $0.baseAddress
}
)
(updatedAvatarKey == nil ?
contact.profile_pic.key = nil :
updatedAvatarKey?.withUnsafeBufferPointer {
contact.profile_pic.key = $0.baseAddress
}
)
contact.profile_pic.keylen = (updatedAvatarKey?.count ?? 0)
// Download the profile picture if needed
if oldAvatarUrl != updatedProfile.profilePictureUrl || oldAvatarKey != updatedProfile.profileEncryptionKey {
ProfileManager.downloadAvatar(for: updatedProfile)
}
}
// Store the updated contact
contacts_set(conf, &contact)
}
return ConfResult(
needsPush: config_needs_push(conf),
needsDump: config_needs_dump(conf)
)
}
}
}
// MARK: - Convenience
internal extension SessionUtil {
static func updatingContacts<T>(_ db: Database, _ updated: [T]) throws -> [T] {
guard let updatedContacts: [Contact] = updated as? [Contact] else { throw StorageError.generic }
// The current users contact data doesn't need to sync so exclude it
let userPublicKey: String = getUserHexEncodedPublicKey(db)
let targetContacts: [Contact] = updatedContacts.filter { $0.id != userPublicKey }
// If we only updated the current user contact then no need to continue
guard !targetContacts.isEmpty else { return updated }
db.afterNextTransaction { db in
do {
let atomicConf: Atomic<UnsafeMutablePointer<config_object>?> = SessionUtil.config(
for: .contacts,
publicKey: userPublicKey
)
let result: ConfResult = try SessionUtil
.upsert(
contactData: targetContacts.map { (id: $0.id, contact: $0, profile: nil) },
in: atomicConf
)
// If we don't need to dump the data the we can finish early
guard result.needsDump else { return }
try SessionUtil.saveState(
db,
keepingExistingMessageHashes: true,
configDump: try atomicConf.mutate { conf in
try SessionUtil.createDump(
conf: conf,
for: .contacts,
publicKey: userPublicKey,
messageHashes: nil
)
}
)
}
catch {
SNLog("[libSession-util] Failed to dump updated data")
}
}
return updated
}
static func updatingProfiles<T>(_ db: Database, _ updated: [T]) throws -> [T] {
guard let updatedProfiles: [Profile] = updated as? [Profile] else { throw StorageError.generic }
// We should only sync profiles which are associated to contact data to avoid including profiles
// for random people in community conversations so filter out any profiles which don't have an
// associated contact
let existingContactIds: [String] = (try? Contact
.filter(ids: updatedProfiles.map { $0.id })
.select(.id)
.asRequest(of: String.self)
.fetchAll(db))
.defaulting(to: [])
// If none of the profiles are associated with existing contacts then ignore the changes (no need
// to do a config sync)
guard !existingContactIds.isEmpty else { return updated }
// Get the user public key (updating their profile is handled separately
let userPublicKey: String = getUserHexEncodedPublicKey(db)
db.afterNextTransaction { db in
do {
// Update the user profile first (if needed)
if let updatedUserProfile: Profile = updatedProfiles.first(where: { $0.id == userPublicKey }) {
let atomicConf: Atomic<UnsafeMutablePointer<config_object>?> = SessionUtil.config(
for: .userProfile,
publicKey: userPublicKey
)
let result: ConfResult = try SessionUtil.update(
profile: updatedUserProfile,
in: atomicConf
)
if result.needsDump {
try SessionUtil.saveState(
db,
keepingExistingMessageHashes: true,
configDump: try atomicConf.mutate { conf in
try SessionUtil.createDump(
conf: conf,
for: .userProfile,
publicKey: userPublicKey,
messageHashes: nil
)
}
)
}
}
// Then update other contacts
let atomicConf: Atomic<UnsafeMutablePointer<config_object>?> = SessionUtil.config(
for: .contacts,
publicKey: userPublicKey
)
let result: ConfResult = try SessionUtil
.upsert(
contactData: updatedProfiles
.filter { $0.id != userPublicKey }
.map { (id: $0.id, contact: nil, profile: $0) },
in: atomicConf
)
// If we don't need to dump the data the we can finish early
guard result.needsDump else { return }
try SessionUtil.saveState(
db,
keepingExistingMessageHashes: true,
configDump: try atomicConf.mutate { conf in
try SessionUtil.createDump(
conf: conf,
for: .contacts,
publicKey: userPublicKey,
messageHashes: nil
)
}
)
}
catch {
SNLog("[libSession-util] Failed to dump updated data")
}
}
return updated
}
}

View file

@ -6,22 +6,24 @@ import SessionUtil
import SessionUtilitiesKit
internal extension SessionUtil {
// MARK: - Incoming Changes
static func handleUserProfileUpdate(
_ db: Database,
in target: Target,
in atomicConf: Atomic<UnsafeMutablePointer<config_object>?>,
needsDump: Bool,
latestConfigUpdateSentTimestamp: TimeInterval
) throws {
typealias ProfileData = (profileName: String, profilePictureUrl: String?, profilePictureKey: Data?)
guard needsDump else { return }
guard target.conf.wrappedValue != nil else { throw SessionUtilError.nilConfigObject }
guard atomicConf.wrappedValue != nil else { throw SessionUtilError.nilConfigObject }
let userPublicKey: String = getUserHexEncodedPublicKey(db)
// Since we are doing direct memory manipulation we are using an `Atomic` type which has
// blocking access in it's `mutate` closure
let maybeProfileData: ProfileData? = target.conf.mutate { conf -> ProfileData? in
let maybeProfileData: ProfileData? = atomicConf.mutate { conf -> ProfileData? in
// A profile must have a name so if this is null then it's invalid and can be ignored
guard let profileNamePtr: UnsafePointer<CChar> = user_profile_get_name(conf) else {
return nil
@ -52,33 +54,55 @@ internal extension SessionUtil {
// Only save the data in the database if it's valid
guard let profileData: ProfileData = maybeProfileData else { return }
// Profile (also force-approve the current user in case the account got into a weird state or
// restored directly from a migration)
try MessageReceiver.updateProfileIfNeeded(
// Handle user profile changes
try ProfileManager.updateProfileIfNeeded(
db,
publicKey: userPublicKey,
name: profileData.profileName,
profilePictureUrl: profileData.profilePictureUrl,
profileKey: profileData.profilePictureKey,
sentTimestamp: latestConfigUpdateSentTimestamp
avatarUpdate: {
guard
let profilePictureUrl: String = profileData.profilePictureUrl,
let profileKey: Data = profileData.profilePictureKey
else { return .none }
return .updateTo(
url: profilePictureUrl,
key: profileKey,
fileName: nil
)
}(),
sentTimestamp: latestConfigUpdateSentTimestamp,
calledFromConfigHandling: true
)
try Contact(id: userPublicKey)
.with(
isApproved: true,
didApproveMe: true
)
.save(db)
// Create a contact for the current user if needed (also force-approve the current user
// in case the account got into a weird state or restored directly from a migration)
let userContact: Contact = Contact.fetchOrCreate(db, id: userPublicKey)
if !userContact.isTrusted || !userContact.isApproved || !userContact.didApproveMe {
try userContact.save(db)
try Contact
.filter(id: userPublicKey)
.updateAll( // Handling a config update so don't use `updateAllAndConfig`
db,
Contact.Columns.isTrusted.set(to: true), // Always trust the current user
Contact.Columns.isApproved.set(to: true),
Contact.Columns.didApproveMe.set(to: true)
)
}
}
@discardableResult static func update(
// MARK: - Outgoing Changes
static func update(
profile: Profile,
in target: Target
in atomicConf: Atomic<UnsafeMutablePointer<config_object>?>
) throws -> ConfResult {
guard target.conf.wrappedValue != nil else { throw SessionUtilError.nilConfigObject }
guard atomicConf.wrappedValue != nil else { throw SessionUtilError.nilConfigObject }
// Since we are doing direct memory manipulation we are using an `Atomic` type which has
// blocking access in it's `mutate` closure
return target.conf.mutate { conf in
return atomicConf.mutate { conf in
// Update the name
user_profile_set_name(conf, profile.name)
@ -101,7 +125,7 @@ internal extension SessionUtil {
user_profile_set_pic(conf, profilePic)
}
return (
return ConfResult(
needsPush: config_needs_push(conf),
needsDump: config_needs_dump(conf)
)

View file

@ -0,0 +1,73 @@
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import GRDB
import SessionUtilitiesKit
// MARK: - GRDB
public extension QueryInterfaceRequest {
@discardableResult
func updateAllAndConfig(
_ db: Database,
_ assignments: ColumnAssignment...
) throws -> Int {
return try updateAllAndConfig(db, assignments)
}
@discardableResult
func updateAllAndConfig(
_ db: Database,
_ assignments: [ColumnAssignment]
) throws -> Int {
switch self {
case let contactRequest as QueryInterfaceRequest<Contact>:
return try contactRequest.updateAndFetchAllAndUpdateConfig(db, assignments).count
case let profileRequest as QueryInterfaceRequest<Profile>:
return try profileRequest.updateAndFetchAllAndUpdateConfig(db, assignments).count
default: return try self.updateAll(db, assignments)
}
}
}
public extension QueryInterfaceRequest where RowDecoder: FetchableRecord & TableRecord {
@discardableResult
func updateAndFetchAllAndUpdateConfig(
_ db: Database,
_ assignments: ColumnAssignment...
) throws -> [RowDecoder] {
return try updateAndFetchAllAndUpdateConfig(db, assignments)
}
@discardableResult
func updateAndFetchAllAndUpdateConfig(
_ db: Database,
_ assignments: [ColumnAssignment]
) throws -> [RowDecoder] {
defer {
db.afterNextTransaction { db in
guard
self is QueryInterfaceRequest<Contact> ||
self is QueryInterfaceRequest<Profile> ||
self is QueryInterfaceRequest<ClosedGroup>
else { return }
// If we change one of these types then we may as well automatically enqueue
// a new config sync job once the transaction completes
ConfigurationSyncJob.enqueue(db)
}
}
switch self {
case is QueryInterfaceRequest<Contact>:
return try SessionUtil.updatingContacts(db, try updateAndFetchAll(db, assignments))
case is QueryInterfaceRequest<Profile>:
return try SessionUtil.updatingProfiles(db, try updateAndFetchAll(db, assignments))
default: return try self.updateAndFetchAll(db, assignments)
}
}
}

View file

@ -2,73 +2,94 @@
import Foundation
import GRDB
import SessionSnodeKit
import SessionUtil
import SessionUtilitiesKit
/*internal*/public enum SessionUtil {
public typealias ConfResult = (needsPush: Bool, needsDump: Bool)
public typealias IncomingConfResult = (needsPush: Bool, needsDump: Bool, latestSentTimestamp: TimeInterval)
public enum SessionUtil {
public struct ConfResult {
let needsPush: Bool
let needsDump: Bool
}
enum Target {
case global(variant: ConfigDump.Variant)
case custom(conf: Atomic<UnsafeMutablePointer<config_object>?>)
var conf: Atomic<UnsafeMutablePointer<config_object>?> {
switch self {
case .global(let variant): return SessionUtil.config(for: variant)
case .custom(let conf): return conf
}
}
public struct IncomingConfResult {
let needsPush: Bool
let needsDump: Bool
let messageHashes: [String]
let latestSentTimestamp: TimeInterval
}
public struct OutgoingConfResult {
let message: SharedConfigMessage
let namespace: SnodeAPI.Namespace
let destination: Message.Destination
let oldMessageHashes: [String]?
}
// MARK: - Configs
private static var userProfileConfig: Atomic<UnsafeMutablePointer<config_object>?> = Atomic(nil)
private static var contactsConfig: Atomic<UnsafeMutablePointer<config_object>?> = Atomic(nil)
fileprivate static var configStore: Atomic<[ConfigKey: Atomic<UnsafeMutablePointer<config_object>?>]> = Atomic([:])
public static func config(for variant: ConfigDump.Variant, publicKey: String) -> Atomic<UnsafeMutablePointer<config_object>?> {
let key: ConfigKey = ConfigKey(variant: variant, publicKey: publicKey)
return (
SessionUtil.configStore.wrappedValue[key] ??
Atomic(nil)
)
}
// MARK: - Variables
/// Returns `true` if there is a config which needs to be pushed, but returns `false` if the configs are all up to date or haven't been
/// loaded yet (eg. fresh install)
public static var needsSync: Bool {
return ConfigDump.Variant.allCases.contains { variant in
switch variant {
case .userProfile:
return (userProfileConfig.wrappedValue.map { config_needs_push($0) } ?? false)
case .contacts:
return (contactsConfig.wrappedValue.map { config_needs_push($0) } ?? false)
}
}
}
// MARK: - Convenience
private static func config(for variant: ConfigDump.Variant) -> Atomic<UnsafeMutablePointer<config_object>?> {
switch variant {
case .userProfile: return SessionUtil.userProfileConfig
case .contacts: return SessionUtil.contactsConfig
}
return configStore
.wrappedValue
.contains { _, atomicConf in config_needs_push(atomicConf.wrappedValue) }
}
// MARK: - Loading
/*internal*/public static func loadState(ed25519SecretKey: [UInt8]?) {
public static func loadState(
userPublicKey: String,
ed25519SecretKey: [UInt8]?
) {
guard let secretKey: [UInt8] = ed25519SecretKey else { return }
SessionUtil.userProfileConfig.mutate { $0 = loadState(for: .userProfile, secretKey: secretKey) }
SessionUtil.contactsConfig.mutate { $0 = loadState(for: .contacts, secretKey: secretKey) }
}
private static func loadState(
for variant: ConfigDump.Variant,
secretKey ed25519SecretKey: [UInt8]?
) -> UnsafeMutablePointer<config_object>? {
guard let secretKey: [UInt8] = ed25519SecretKey else { return nil }
// Retrieve the existing dumps from the database
let existingDumps: Set<ConfigDump> = Storage.shared
.read { db in try ConfigDump.fetchSet(db) }
.defaulting(to: [])
let existingDumpVariants: Set<ConfigDump.Variant> = existingDumps
.map { $0.variant }
.asSet()
let missingRequiredVariants: Set<ConfigDump.Variant> = ConfigDump.Variant.userVariants
.asSet()
.subtracting(existingDumpVariants)
// Load any
let storedDump: Data? = Storage.shared
.read { db in try ConfigDump.fetchOne(db, id: variant) }?
.data
return try? loadState(for: variant, secretKey: secretKey, cachedData: storedDump)
// Create the 'config_object' records for each dump
SessionUtil.configStore.mutate { confStore in
existingDumps.forEach { dump in
confStore[ConfigKey(variant: dump.variant, publicKey: dump.publicKey)] = Atomic(
try? SessionUtil.loadState(
for: dump.variant,
secretKey: secretKey,
cachedData: dump.data
)
)
}
missingRequiredVariants.forEach { variant in
confStore[ConfigKey(variant: variant, publicKey: userPublicKey)] = Atomic(
try? SessionUtil.loadState(
for: variant,
secretKey: secretKey,
cachedData: nil
)
)
}
}
}
internal static func loadState(
@ -117,87 +138,165 @@ import SessionUtilitiesKit
internal static func saveState(
_ db: Database,
conf: UnsafeMutablePointer<config_object>?,
for variant: ConfigDump.Variant
keepingExistingMessageHashes: Bool,
configDump: ConfigDump?
) throws {
guard let configDump: ConfigDump = configDump else { return }
// If we want to keep the existing message hashes then we need
// to fetch them from the db and create a new 'ConfigDump' instance
let targetDump: ConfigDump = try {
guard keepingExistingMessageHashes else { return configDump }
let existingCombinedMessageHashes: String? = try ConfigDump
.filter(
ConfigDump.Columns.variant == configDump.variant &&
ConfigDump.Columns.publicKey == configDump.publicKey
)
.select(.combinedMessageHashes)
.asRequest(of: String.self)
.fetchOne(db)
return ConfigDump(
variant: configDump.variant,
publicKey: configDump.publicKey,
data: configDump.data,
messageHashes: ConfigDump.messageHashes(from: existingCombinedMessageHashes)
)
}()
// Actually save the dump
try targetDump.save(db)
}
internal static func createDump(
conf: UnsafeMutablePointer<config_object>?,
for variant: ConfigDump.Variant,
publicKey: String,
messageHashes: [String]?
) throws -> ConfigDump? {
guard conf != nil else { throw SessionUtilError.nilConfigObject }
// If it doesn't need a dump then do nothing
guard config_needs_dump(conf) else { return }
guard config_needs_dump(conf) else { return nil }
var dumpResult: UnsafeMutablePointer<UInt8>? = nil
var dumpResultLen: Int = 0
config_dump(conf, &dumpResult, &dumpResultLen)
guard let dumpResult: UnsafeMutablePointer<UInt8> = dumpResult else { return }
guard let dumpResult: UnsafeMutablePointer<UInt8> = dumpResult else { return nil }
let dumpData: Data = Data(bytes: dumpResult, count: dumpResultLen)
dumpResult.deallocate()
try ConfigDump(
return ConfigDump(
variant: variant,
data: dumpData
publicKey: publicKey,
data: dumpData,
messageHashes: messageHashes
)
.save(db)
}
// MARK: - Pushes
public static func getChanges(
for variants: [ConfigDump.Variant] = ConfigDump.Variant.allCases,
public static func pendingChanges(
_ db: Database,
userPublicKey: String,
ed25519SecretKey: [UInt8]
) -> [SharedConfigMessage] {
return variants
.compactMap { variant -> SharedConfigMessage? in
let conf = SessionUtil.config(for: variant)
) throws -> [OutgoingConfResult] {
let existingDumpInfo: Set<DumpInfo> = try ConfigDump
.select(.variant, .publicKey, .combinedMessageHashes)
.asRequest(of: DumpInfo.self)
.fetchSet(db)
// Ensure we always check the required user config types for changes even if there is no dump
// data yet (to deal with first launch cases)
return existingDumpInfo
.inserting(
contentsOf: DumpInfo.requiredUserConfigDumpInfo(userPublicKey: userPublicKey)
.filter { requiredInfo -> Bool in
!existingDumpInfo.contains(where: {
$0.variant == requiredInfo.variant &&
$0.publicKey == requiredInfo.publicKey
})
}
)
.compactMap { dumpInfo -> OutgoingConfResult? in
let key: ConfigKey = ConfigKey(variant: dumpInfo.variant, publicKey: dumpInfo.publicKey)
let atomicConf: Atomic<UnsafeMutablePointer<config_object>?> = (
SessionUtil.configStore.wrappedValue[key] ??
Atomic(nil)
)
// Check if the config needs to be pushed
guard config_needs_push(conf.wrappedValue) else { return nil }
guard config_needs_push(atomicConf.wrappedValue) else { return nil }
var toPush: UnsafeMutablePointer<UInt8>? = nil
var toPushLen: Int = 0
let seqNo: Int64 = conf.mutate { config_push($0, &toPush, &toPushLen) }
let seqNo: Int64 = atomicConf.mutate { config_push($0, &toPush, &toPushLen) }
guard let toPush: UnsafeMutablePointer<UInt8> = toPush else { return nil }
let pushData: Data = Data(bytes: toPush, count: toPushLen)
toPush.deallocate()
return SharedConfigMessage(
kind: variant.configMessageKind,
seqNo: seqNo,
data: pushData
return OutgoingConfResult(
message: SharedConfigMessage(
kind: dumpInfo.variant.configMessageKind,
seqNo: seqNo,
data: pushData
),
namespace: dumpInfo.variant.namespace,
destination: (dumpInfo.publicKey == userPublicKey ?
Message.Destination.contact(publicKey: userPublicKey) :
Message.Destination.closedGroup(groupPublicKey: dumpInfo.publicKey)
),
oldMessageHashes: dumpInfo.messageHashes
)
}
}
public static func markAsPushed(messages: [SharedConfigMessage]) -> [ConfigDump.Variant: Bool] {
messages.reduce(into: [:]) { result, message in
let conf = SessionUtil.config(for: message.kind.configDumpVariant)
// Mark the config as pushed
config_confirm_pushed(conf.wrappedValue, message.seqNo)
// Update the result to indicate whether the config needs to be dumped
result[message.kind.configDumpVariant] = config_needs_dump(conf.wrappedValue)
}
public static func markAsPushed(
message: SharedConfigMessage,
publicKey: String
) -> Bool {
let key: ConfigKey = ConfigKey(variant: message.kind.configDumpVariant, publicKey: publicKey)
let atomicConf: Atomic<UnsafeMutablePointer<config_object>?> = (
SessionUtil.configStore.wrappedValue[key] ??
Atomic(nil)
)
// Mark the config as pushed
config_confirm_pushed(atomicConf.wrappedValue, message.seqNo)
// Update the result to indicate whether the config needs to be dumped
return config_needs_dump(atomicConf.wrappedValue)
}
// MARK: - Receiving
public static func handleConfigMessages(
_ db: Database,
messages: [SharedConfigMessage]
messages: [SharedConfigMessage],
publicKey: String
) throws {
guard !messages.isEmpty else { return }
guard !publicKey.isEmpty else { throw MessageReceiverError.noThread }
let groupedMessages: [SharedConfigMessage.Kind: [SharedConfigMessage]] = messages
.grouped(by: \.kind)
// Merge the config messages into the current state
let results: [ConfigDump.Variant: IncomingConfResult] = groupedMessages
.reduce(into: [:]) { result, next in
let atomicConf = SessionUtil.config(for: next.key.configDumpVariant)
let key: ConfigKey = ConfigKey(variant: next.key.configDumpVariant, publicKey: publicKey)
let atomicConf: Atomic<UnsafeMutablePointer<config_object>?> = (
SessionUtil.configStore.wrappedValue[key] ??
Atomic(nil)
)
var needsPush: Bool = false
var needsDump: Bool = false
let messageHashes: [String] = next.value.compactMap { $0.serverHash }
let messageSentTimestamp: TimeInterval = TimeInterval(
(next.value.compactMap { $0.sentTimestamp }.max() ?? 0) / 1000
)
@ -217,25 +316,119 @@ import SessionUtilitiesKit
}
// Return the current state of the config
result[next.key.configDumpVariant] = (
result[next.key.configDumpVariant] = IncomingConfResult(
needsPush: needsPush,
needsDump: needsDump,
messageHashes: messageHashes,
latestSentTimestamp: messageSentTimestamp
)
}
// If the data needs to be dumped then apply the relevant local changes
// Process the results from the merging
try results.forEach { variant, result in
let key: ConfigKey = ConfigKey(variant: variant, publicKey: publicKey)
let atomicConf: Atomic<UnsafeMutablePointer<config_object>?> = (
SessionUtil.configStore.wrappedValue[key] ??
Atomic(nil)
)
// Apply the updated states to the database
switch variant {
case .userProfile:
try SessionUtil.handleUserProfileUpdate(
db,
in: .global(variant: variant),
in: atomicConf,
needsDump: result.needsDump,
latestConfigUpdateSentTimestamp: result.latestSentTimestamp
)
case .contacts:
try SessionUtil.handleContactsUpdate(
db,
in: atomicConf,
needsDump: result.needsDump
)
}
// We need to get the existing message hashes and combine them with the latest from the
// service node to ensure the next push will properly clean up old messages
let oldMessageHashes: Set<String> = try ConfigDump
.filter(
ConfigDump.Columns.variant == variant &&
ConfigDump.Columns.publicKey == publicKey
)
.select(.combinedMessageHashes)
.asRequest(of: String.self)
.fetchOne(db)
.map { ConfigDump.messageHashes(from: $0) }
.defaulting(to: [])
.asSet()
let allMessageHashes: [String] = Array(oldMessageHashes
.inserting(contentsOf: result.messageHashes.asSet()))
let messageHashesChanged: Bool = (oldMessageHashes != result.messageHashes.asSet())
// Now that the changes are applied, update the cached dumps
switch (result.needsDump, messageHashesChanged) {
case (true, _):
// The config data had changes so regenerate the dump and save it
try atomicConf
.mutate { conf -> ConfigDump? in
try SessionUtil.createDump(
conf: conf,
for: variant,
publicKey: publicKey,
messageHashes: allMessageHashes
)
}?
.save(db)
case (false, true):
// The config data didn't change but there were different messages on the service node
// so just update the message hashes so the next sync can properly remove any old ones
try ConfigDump
.filter(
ConfigDump.Columns.variant == variant &&
ConfigDump.Columns.publicKey == publicKey
)
.updateAll(
db,
ConfigDump.Columns.combinedMessageHashes
.set(to: ConfigDump.combinedMessageHashes(from: allMessageHashes))
)
default: break
}
// Now that the local state has been updated, trigger a config sync (this will push any
// pending updates and properly update the state)
if results.contains(where: { $0.value.needsPush }) {
ConfigurationSyncJob.enqueue(db)
}
}
}
// MARK: - Internal Convenience
fileprivate extension SessionUtil {
struct ConfigKey: Hashable {
let variant: ConfigDump.Variant
let publicKey: String
}
struct DumpInfo: FetchableRecord, Decodable, Hashable {
let variant: ConfigDump.Variant
let publicKey: String
private let combinedMessageHashes: String?
var messageHashes: [String]? { ConfigDump.messageHashes(from: combinedMessageHashes) }
// MARK: - Convenience
static func requiredUserConfigDumpInfo(userPublicKey: String) -> Set<DumpInfo> {
return ConfigDump.Variant.userVariants
.map { DumpInfo(variant: $0, publicKey: userPublicKey, combinedMessageHashes: nil) }
.asSet()
}
}
}

View file

@ -6,15 +6,9 @@ import SessionSnodeKit
import SessionUtilitiesKit
public extension Message {
enum Destination: Codable {
case contact(
publicKey: String,
namespace: SnodeAPI.Namespace
)
case closedGroup(
groupPublicKey: String,
namespace: SnodeAPI.Namespace
)
enum Destination: Codable, Hashable {
case contact(publicKey: String)
case closedGroup(groupPublicKey: String)
case openGroup(
roomToken: String,
server: String,
@ -23,13 +17,6 @@ public extension Message {
fileIds: [String]? = nil
)
case openGroupInbox(server: String, openGroupPublicKey: String, blindedPublicKey: String)
var namespace: SnodeAPI.Namespace {
switch self {
case .contact(_, let namespace), .closedGroup(_, let namespace): return namespace
default: preconditionFailure("Attepted to retrieve namespace for invalid destination")
}
}
public static func from(
_ db: Database,
@ -50,10 +37,10 @@ public extension Message {
)
}
return .contact(publicKey: thread.id, namespace: .default)
return .contact(publicKey: thread.id)
case .closedGroup:
return .closedGroup(groupPublicKey: thread.id, namespace: .legacyClosedGroup)
return .closedGroup(groupPublicKey: thread.id)
case .openGroup:
guard let openGroup: OpenGroup = try thread.openGroup.fetchOne(db) else {
@ -79,59 +66,5 @@ public extension Message {
default: return self
}
}
// MARK: - Codable
// FIXME: Remove this custom implementation after enough time has passed (added the 'namespace' properties)
public init(from decoder: Decoder) throws {
let container: KeyedDecodingContainer<CodingKeys> = try decoder.container(keyedBy: CodingKeys.self)
// Should only have a single root key so we can just switch on it to have cleaner code
switch container.allKeys.first {
case .contact:
let childContainer: KeyedDecodingContainer<ContactCodingKeys> = try container.nestedContainer(keyedBy: ContactCodingKeys.self, forKey: .contact)
self = .contact(
publicKey: try childContainer.decode(String.self, forKey: .publicKey),
namespace: (
(try? childContainer.decode(SnodeAPI.Namespace.self, forKey: .namespace)) ??
.default
)
)
case .closedGroup:
let childContainer: KeyedDecodingContainer<ClosedGroupCodingKeys> = try container.nestedContainer(keyedBy: ClosedGroupCodingKeys.self, forKey: .closedGroup)
self = .closedGroup(
groupPublicKey: try childContainer.decode(String.self, forKey: .groupPublicKey),
namespace: (
(try? childContainer.decode(SnodeAPI.Namespace.self, forKey: .namespace)) ??
.legacyClosedGroup
)
)
case .openGroup:
let childContainer: KeyedDecodingContainer<OpenGroupCodingKeys> = try container.nestedContainer(keyedBy: OpenGroupCodingKeys.self, forKey: .openGroup)
self = .openGroup(
roomToken: try childContainer.decode(String.self, forKey: .roomToken),
server: try childContainer.decode(String.self, forKey: .server),
whisperTo: try? childContainer.decode(String.self, forKey: .whisperTo),
whisperMods: try childContainer.decode(Bool.self, forKey: .whisperMods),
fileIds: try? childContainer.decode([String].self, forKey: .fileIds)
)
case .openGroupInbox:
let childContainer: KeyedDecodingContainer<OpenGroupInboxCodingKeys> = try container.nestedContainer(keyedBy: OpenGroupInboxCodingKeys.self, forKey: .openGroupInbox)
self = .openGroupInbox(
server: try childContainer.decode(String.self, forKey: .server),
openGroupPublicKey: try childContainer.decode(String.self, forKey: .openGroupPublicKey),
blindedPublicKey: try childContainer.decode(String.self, forKey: .blindedPublicKey)
)
default: throw MessageReceiverError.invalidMessage
}
}
}
}

View file

@ -183,7 +183,7 @@ public extension Message {
default: return false
}
case is ConfigurationMessage: return true
case is ConfigurationMessage, is SharedConfigMessage: return true
case is UnsendRequest: return true
default: return false
}

View file

@ -108,15 +108,18 @@ internal extension AnyPublisher where Output == HTTP.BatchResponse, Failure == E
func map<E: EndpointType>(
requests: [OpenGroupAPI.BatchRequest.Info],
toHashMapFor endpointType: E.Type
) -> AnyPublisher<[E: (ResponseInfoType, Codable?)], Error> {
) -> AnyPublisher<(info: ResponseInfoType, data: [E: Codable]), Error> {
return self
.map { result in
result.enumerated()
.reduce(into: [:]) { prev, next in
guard let endpoint: E = requests[next.offset].endpoint as? E else { return }
prev[endpoint] = next.element
}
.map { result -> (info: ResponseInfoType, data: [E: Codable]) in
(
info: result.info,
data: result.responses.enumerated()
.reduce(into: [:]) { prev, next in
guard let endpoint: E = requests[next.offset].endpoint as? E else { return }
prev[endpoint] = next.element
}
)
}
.eraseToAnyPublisher()
}

View file

@ -32,7 +32,7 @@ public enum OpenGroupAPI {
hasPerformedInitialPoll: Bool,
timeSinceLastPoll: TimeInterval,
using dependencies: SMKDependencies = SMKDependencies()
) -> AnyPublisher<[Endpoint: (ResponseInfoType, Codable?)], Error> {
) -> AnyPublisher<(info: ResponseInfoType, data: [Endpoint: Codable]), Error> {
let lastInboxMessageId: Int64 = (try? OpenGroup
.select(.inboxLatestMessageId)
.filter(OpenGroup.Columns.server == server)
@ -152,7 +152,7 @@ public enum OpenGroupAPI {
server: String,
requests: [BatchRequest.Info],
using dependencies: SMKDependencies = SMKDependencies()
) -> AnyPublisher<[Endpoint: (ResponseInfoType, Codable?)], Error> {
) -> AnyPublisher<(info: ResponseInfoType, data: [Endpoint: Codable]), Error> {
let responseTypes = requests.map { $0.responseType }
return OpenGroupAPI
@ -184,7 +184,7 @@ public enum OpenGroupAPI {
server: String,
requests: [BatchRequest.Info],
using dependencies: SMKDependencies = SMKDependencies()
) -> AnyPublisher<[Endpoint: (ResponseInfoType, Codable?)], Error> {
) -> AnyPublisher<(info: ResponseInfoType, data: [Endpoint: Codable]), Error> {
let responseTypes = requests.map { $0.responseType }
return OpenGroupAPI
@ -339,10 +339,9 @@ public enum OpenGroupAPI {
requests: requestResponseType,
using: dependencies
)
.flatMap { (response: [Endpoint: (ResponseInfoType, Codable?)]) -> AnyPublisher<(capabilities: (info: ResponseInfoType, data: Capabilities), room: (info: ResponseInfoType, data: Room)), Error> in
let maybeCapabilities: (info: ResponseInfoType, data: Capabilities?)? = response[.capabilities]
.map { info, data in (info, (data as? HTTP.BatchSubResponse<Capabilities>)?.body) }
let maybeRoomResponse: (ResponseInfoType, Codable?)? = response
.flatMap { (info: ResponseInfoType, data: [Endpoint: Codable]) -> AnyPublisher<(capabilities: (info: ResponseInfoType, data: Capabilities), room: (info: ResponseInfoType, data: Room)), Error> in
let maybeCapabilities: HTTP.BatchSubResponse<Capabilities>? = (data[.capabilities] as? HTTP.BatchSubResponse<Capabilities>)
let maybeRoomResponse: Codable? = data
.first(where: { key, _ in
switch key {
case .room: return true
@ -350,14 +349,13 @@ public enum OpenGroupAPI {
}
})
.map { _, value in value }
let maybeRoom: (info: ResponseInfoType, data: Room?)? = maybeRoomResponse
.map { info, data in (info, (data as? HTTP.BatchSubResponse<Room>)?.body) }
let maybeRoom: HTTP.BatchSubResponse<Room>? = (maybeRoomResponse as? HTTP.BatchSubResponse<Room>)
guard
let capabilitiesInfo: ResponseInfoType = maybeCapabilities?.info,
let capabilities: Capabilities = maybeCapabilities?.data,
let roomInfo: ResponseInfoType = maybeRoom?.info,
let room: Room = maybeRoom?.data
let capabilitiesInfo: ResponseInfoType = maybeCapabilities?.responseInfo,
let capabilities: Capabilities = maybeCapabilities?.body,
let roomInfo: ResponseInfoType = maybeRoom?.responseInfo,
let room: Room = maybeRoom?.body
else {
return Fail(error: HTTPError.parsingFailed)
.eraseToAnyPublisher()
@ -407,25 +405,22 @@ public enum OpenGroupAPI {
requests: requestResponseType,
using: dependencies
)
.flatMap { (response: [Endpoint: (ResponseInfoType, Codable?)]) -> AnyPublisher<(capabilities: (info: ResponseInfoType, data: Capabilities), rooms: (info: ResponseInfoType, data: [Room])), Error> in
let maybeCapabilities: (info: ResponseInfoType, data: Capabilities?)? = response[.capabilities]
.map { info, data in (info, (data as? HTTP.BatchSubResponse<Capabilities>)?.body) }
let maybeRoomResponse: (ResponseInfoType, Codable?)? = response
.flatMap { (info: ResponseInfoType, data: [Endpoint: Codable]) -> AnyPublisher<(capabilities: (info: ResponseInfoType, data: Capabilities), rooms: (info: ResponseInfoType, data: [Room])), Error> in
let maybeCapabilities: HTTP.BatchSubResponse<Capabilities>? = (data[.capabilities] as? HTTP.BatchSubResponse<Capabilities>)
let maybeRooms: HTTP.BatchSubResponse<[Room]>? = data
.first(where: { key, _ in
switch key {
case .rooms: return true
default: return false
}
})
.map { _, value in value }
let maybeRooms: (info: ResponseInfoType, data: [Room]?)? = maybeRoomResponse
.map { info, data in (info, (data as? HTTP.BatchSubResponse<[Room]>)?.body) }
.map { _, value in value as? HTTP.BatchSubResponse<[Room]> }
guard
let capabilitiesInfo: ResponseInfoType = maybeCapabilities?.info,
let capabilities: Capabilities = maybeCapabilities?.data,
let roomsInfo: ResponseInfoType = maybeRooms?.info,
let rooms: [Room] = maybeRooms?.data
let capabilitiesInfo: ResponseInfoType = maybeCapabilities?.responseInfo,
let capabilities: Capabilities = maybeCapabilities?.body,
let roomsInfo: ResponseInfoType = maybeRooms?.responseInfo,
let rooms: [Room] = maybeRooms?.body
else {
return Fail(error: HTTPError.parsingFailed)
.eraseToAnyPublisher()
@ -1263,7 +1258,9 @@ public enum OpenGroupAPI {
requests: requestResponseType,
using: dependencies
)
.map { $0.values.map { responseInfo, _ in responseInfo } }
.map { _, data -> [ResponseInfoType] in
data.values.compactMap { ($0 as? BatchSubResponseType)?.responseInfo }
}
.eraseToAnyPublisher()
}

View file

@ -68,36 +68,40 @@ public final class OpenGroupManager {
// MARK: - Polling
public func startPolling(using dependencies: OGMDependencies = OGMDependencies()) {
guard !dependencies.cache.isPolling else { return }
// Run on the 'workQueue' to ensure any 'Atomic' access doesn't block the main thread
// on startup
OpenGroupAPI.workQueue.async {
guard !dependencies.cache.isPolling else { return }
let servers: Set<String> = dependencies.storage
.read { db in
// The default room promise creates an OpenGroup with an empty `roomToken` value,
// we don't want to start a poller for this as the user hasn't actually joined a room
try OpenGroup
.select(.server)
.filter(OpenGroup.Columns.isActive == true)
.filter(OpenGroup.Columns.roomToken != "")
.distinct()
.asRequest(of: String.self)
.fetchSet(db)
}
.defaulting(to: [])
dependencies.mutableCache.mutate { cache in
cache.isPolling = true
cache.pollers = servers
.reduce(into: [:]) { result, server in
result[server.lowercased()]?.stop() // Should never occur
result[server.lowercased()] = OpenGroupAPI.Poller(for: server.lowercased())
let servers: Set<String> = dependencies.storage
.read { db in
// The default room promise creates an OpenGroup with an empty `roomToken` value,
// we don't want to start a poller for this as the user hasn't actually joined a room
try OpenGroup
.select(.server)
.filter(OpenGroup.Columns.isActive == true)
.filter(OpenGroup.Columns.roomToken != "")
.distinct()
.asRequest(of: String.self)
.fetchSet(db)
}
.defaulting(to: [])
// Note: We loop separately here because when the cache is mocked-out for tests it
// doesn't actually store the value (meaning the pollers won't be started), but if
// we do it in the 'reduce' function, the 'reduce' result will actually store the
// poller value resulting in a bunch of OpenGroup pollers running in a way that can't
// be stopped during unit tests
cache.pollers.forEach { _, poller in poller.startIfNeeded(using: dependencies) }
dependencies.mutableCache.mutate { cache in
cache.isPolling = true
cache.pollers = servers
.reduce(into: [:]) { result, server in
result[server.lowercased()]?.stop() // Should never occur
result[server.lowercased()] = OpenGroupAPI.Poller(for: server.lowercased())
}
// Note: We loop separately here because when the cache is mocked-out for tests it
// doesn't actually store the value (meaning the pollers won't be started), but if
// we do it in the 'reduce' function, the 'reduce' result will actually store the
// poller value resulting in a bunch of OpenGroup pollers running in a way that can't
// be stopped during unit tests
cache.pollers.forEach { _, poller in poller.startIfNeeded(using: dependencies) }
}
}
}

View file

@ -7,6 +7,11 @@ import SessionUtilitiesKit
extension MessageReceiver {
internal static func handleConfigurationMessage(_ db: Database, message: ConfigurationMessage) throws {
guard !Features.useSharedUtilForUserConfig else {
// TODO: Show warning prompt for X days
return
}
let userPublicKey = getUserHexEncodedPublicKey(db)
guard message.sender == userPublicKey else { return }
@ -21,22 +26,41 @@ extension MessageReceiver {
.defaulting(to: Date(timeIntervalSince1970: 0))
.timeIntervalSince1970
// Profile (also force-approve the current user in case the account got into a weird state or
// restored directly from a migration)
try MessageReceiver.updateProfileIfNeeded(
// Handle user profile changes
try ProfileManager.updateProfileIfNeeded(
db,
publicKey: userPublicKey,
name: message.displayName,
profilePictureUrl: message.profilePictureUrl,
profileKey: message.profileKey,
avatarUpdate: {
guard
let profilePictureUrl: String = message.profilePictureUrl,
let profileKey: Data = message.profileKey
else { return .none }
return .updateTo(
url: profilePictureUrl,
key: profileKey,
fileName: nil
)
}(),
sentTimestamp: messageSentTimestamp
)
try Contact(id: userPublicKey)
.with(
isApproved: true,
didApproveMe: true
)
.save(db)
// Create a contact for the current user if needed (also force-approve the current user
// in case the account got into a weird state or restored directly from a migration)
let userContact: Contact = Contact.fetchOrCreate(db, id: userPublicKey)
if !userContact.isTrusted || !userContact.isApproved || !userContact.didApproveMe {
try userContact.save(db)
try Contact
.filter(id: userPublicKey)
.updateAll( // Handling a config update so don't use `updateAllAndConfig`
db,
Contact.Columns.isTrusted.set(to: true),
Contact.Columns.isApproved.set(to: true),
Contact.Columns.didApproveMe.set(to: true)
)
}
if isInitialSync || messageSentTimestamp > lastConfigTimestamp {
if isInitialSync {
@ -53,11 +77,10 @@ extension MessageReceiver {
// If the contact is a blinded contact then only add them if they haven't already been
// unblinded
if SessionId.Prefix(from: sessionId) == .blinded {
let hasUnblindedContact: Bool = (try? BlindedIdLookup
let hasUnblindedContact: Bool = BlindedIdLookup
.filter(BlindedIdLookup.Columns.blindedId == sessionId)
.filter(BlindedIdLookup.Columns.sessionId != nil)
.isNotEmpty(db))
.defaulting(to: false)
.isNotEmpty(db)
if hasUnblindedContact {
return
@ -74,13 +97,21 @@ extension MessageReceiver {
profile.profilePictureUrl != contactInfo.profilePictureUrl ||
profile.profileEncryptionKey != contactInfo.profileKey
{
try profile
.with(
name: contactInfo.displayName,
profilePictureUrl: .updateIf(contactInfo.profilePictureUrl),
profileEncryptionKey: .updateIf(contactInfo.profileKey)
try profile.save(db)
try Profile
.filter(id: sessionId)
.updateAll( // Handling a config update so don't use `updateAllAndConfig`
db,
[
Profile.Columns.name.set(to: contactInfo.displayName),
(contactInfo.profilePictureUrl == nil ? nil :
Profile.Columns.profilePictureUrl.set(to: contactInfo.profilePictureUrl)
),
(contactInfo.profileKey == nil ? nil :
Profile.Columns.profileEncryptionKey.set(to: contactInfo.profileKey)
)
].compactMap { $0 }
)
.save(db)
}
/// We only update these values if the proto actually has values for them (this is to prevent an
@ -94,22 +125,23 @@ extension MessageReceiver {
(contactInfo.hasIsBlocked && (contact.isBlocked != contactInfo.isBlocked)) ||
(contactInfo.hasDidApproveMe && (contact.didApproveMe != contactInfo.didApproveMe))
{
try contact
.with(
isApproved: (contactInfo.hasIsApproved && contactInfo.isApproved ?
true :
.existing
),
isBlocked: (contactInfo.hasIsBlocked ?
.update(contactInfo.isBlocked) :
.existing
),
didApproveMe: (contactInfo.hasDidApproveMe && contactInfo.didApproveMe ?
true :
.existing
)
try contact.save(db)
try Contact
.filter(id: sessionId)
.updateAll( // Handling a config update so don't use `updateAllAndConfig`
db,
[
(!contactInfo.hasIsApproved || !contactInfo.isApproved ? nil :
Contact.Columns.isApproved.set(to: true)
),
(!contactInfo.hasIsBlocked ? nil :
Contact.Columns.isBlocked.set(to: contactInfo.isBlocked)
),
(!contactInfo.hasDidApproveMe || !contactInfo.didApproveMe ? nil :
Contact.Columns.didApproveMe.set(to: contactInfo.didApproveMe)
)
].compactMap { $0 }
)
.save(db)
}
// If the contact is blocked

View file

@ -23,12 +23,22 @@ extension MessageReceiver {
if let profile = message.profile {
let messageSentTimestamp: TimeInterval = (TimeInterval(message.sentTimestamp ?? 0) / 1000)
try MessageReceiver.updateProfileIfNeeded(
try ProfileManager.updateProfileIfNeeded(
db,
publicKey: senderId,
name: profile.displayName,
profilePictureUrl: profile.profilePictureUrl,
profileKey: profile.profileKey,
avatarUpdate: {
guard
let profilePictureUrl: String = profile.profilePictureUrl,
let profileKey: Data = profile.profileKey
else { return .none }
return .updateTo(
url: profilePictureUrl,
key: profileKey,
fileName: nil
)
}(),
sentTimestamp: messageSentTimestamp
)
}
@ -88,8 +98,7 @@ extension MessageReceiver {
try updateContactApprovalStatusIfNeeded(
db,
senderSessionId: senderId,
threadId: nil,
forceConfigSync: blindedContactIds.isEmpty // Sync here if there were no blinded contacts
threadId: nil
)
// If there were blinded contacts which have now been resolved to this contact then we should remove
@ -103,8 +112,7 @@ extension MessageReceiver {
try updateContactApprovalStatusIfNeeded(
db,
senderSessionId: userPublicKey,
threadId: unblindedThread.id,
forceConfigSync: true
threadId: unblindedThread.id
)
}
@ -128,8 +136,7 @@ extension MessageReceiver {
internal static func updateContactApprovalStatusIfNeeded(
_ db: Database,
senderSessionId: String,
threadId: String?,
forceConfigSync: Bool
threadId: String?
) throws {
let userPublicKey: String = getUserHexEncodedPublicKey(db)
@ -149,9 +156,10 @@ extension MessageReceiver {
guard !contact.isApproved else { return }
_ = try? contact
.with(isApproved: true)
.saved(db)
try? contact.save(db)
_ = try? Contact
.filter(id: threadId)
.updateAllAndConfig(db, Contact.Columns.isApproved.set(to: true))
}
else {
// The message was sent to the current user so flag their 'didApproveMe' as true (can't send a message to
@ -160,14 +168,10 @@ extension MessageReceiver {
guard !contact.didApproveMe else { return }
_ = try? contact
.with(didApproveMe: true)
.saved(db)
try? contact.save(db)
_ = try? Contact
.filter(id: senderSessionId)
.updateAllAndConfig(db, Contact.Columns.didApproveMe.set(to: true))
}
// Force a config sync to ensure all devices know the contact approval state if desired
guard forceConfigSync else { return }
try MessageSender.syncConfiguration(db, forceSyncNow: true).sinkUntilComplete()
}
}

View file

@ -25,12 +25,22 @@ extension MessageReceiver {
// Update profile if needed (want to do this regardless of whether the message exists or
// not to ensure the profile info gets sync between a users devices at every chance)
if let profile = message.profile {
try MessageReceiver.updateProfileIfNeeded(
try ProfileManager.updateProfileIfNeeded(
db,
publicKey: sender,
name: profile.displayName,
profilePictureUrl: profile.profilePictureUrl,
profileKey: profile.profileKey,
avatarUpdate: {
guard
let profilePictureUrl: String = profile.profilePictureUrl,
let profileKey: Data = profile.profileKey
else { return .none }
return .updateTo(
url: profilePictureUrl,
key: profileKey,
fileName: nil
)
}(),
sentTimestamp: messageSentTimestamp
)
}
@ -272,8 +282,7 @@ extension MessageReceiver {
try MessageReceiver.updateContactApprovalStatusIfNeeded(
db,
senderSessionId: sender,
threadId: thread.id,
forceConfigSync: false
threadId: thread.id
)
}

View file

@ -103,7 +103,7 @@ extension MessageSender {
// the 'ClosedGroup' object we created
sentTimestampMs: UInt64(floor(formationTimestamp * 1000))
),
to: .contact(publicKey: memberId, namespace: .default),
to: .contact(publicKey: memberId),
interactionId: nil
)
}
@ -197,7 +197,8 @@ extension MessageSender {
ClosedGroupControlMessage.KeyPairWrapper(
publicKey: memberPublicKey,
encryptedKeyPair: try MessageSender.encryptWithSessionProtocol(
plaintext,
db,
plaintext: plaintext,
for: memberPublicKey
)
)
@ -645,7 +646,11 @@ extension MessageSender {
let plaintext = try proto.serializedData()
let thread: SessionThread = try SessionThread
.fetchOrCreate(db, id: publicKey, variant: .contact)
let ciphertext = try MessageSender.encryptWithSessionProtocol(plaintext, for: publicKey)
let ciphertext = try MessageSender.encryptWithSessionProtocol(
db,
plaintext: plaintext,
for: publicKey
)
SNLog("Sending latest encryption key pair to: \(publicKey).")
try MessageSender.send(

View file

@ -305,82 +305,4 @@ public enum MessageReceiver {
return (contactId, .contact)
}
internal static func updateProfileIfNeeded(
_ db: Database,
publicKey: String,
name: String?,
profilePictureUrl: String?,
profileKey: Data?,
sentTimestamp: TimeInterval,
dependencies: Dependencies = Dependencies()
) throws {
let isCurrentUser = (publicKey == getUserHexEncodedPublicKey(db, dependencies: dependencies))
let profile: Profile = Profile.fetchOrCreate(id: publicKey)
var updatedProfile: Profile = profile
// Name
if let name = name, name != profile.name {
let shouldUpdate: Bool
if isCurrentUser {
shouldUpdate = given(UserDefaults.standard[.lastDisplayNameUpdate]) {
sentTimestamp > $0.timeIntervalSince1970
}
.defaulting(to: true)
}
else {
shouldUpdate = true
}
if shouldUpdate {
if isCurrentUser {
UserDefaults.standard[.lastDisplayNameUpdate] = Date(timeIntervalSince1970: sentTimestamp)
}
updatedProfile = updatedProfile.with(name: name)
}
}
// Profile picture & profile key
if
let profileKey: Data = profileKey,
let profilePictureUrl: String = profilePictureUrl,
profileKey.count == ProfileManager.avatarAES256KeyByteLength,
profileKey != profile.profileEncryptionKey
{
let shouldUpdate: Bool
if isCurrentUser {
shouldUpdate = given(UserDefaults.standard[.lastProfilePictureUpdate]) {
sentTimestamp > $0.timeIntervalSince1970
}
.defaulting(to: true)
}
else {
shouldUpdate = true
}
if shouldUpdate {
if isCurrentUser {
UserDefaults.standard[.lastProfilePictureUpdate] = Date(timeIntervalSince1970: sentTimestamp)
}
updatedProfile = updatedProfile.with(
profilePictureUrl: .update(profilePictureUrl),
profileEncryptionKey: .update(profileKey)
)
}
}
// Persist any changes
if updatedProfile != profile {
try updatedProfile.save(db)
}
// Download the profile picture if needed
if updatedProfile.profilePictureUrl != profile.profilePictureUrl || updatedProfile.profileEncryptionKey != profile.profileEncryptionKey {
db.afterNextTransaction { _ in
ProfileManager.downloadAvatar(for: updatedProfile)
}
}
}
}

View file

@ -85,8 +85,8 @@ extension MessageSender {
let threadId: String = {
switch destination {
case .contact(let publicKey, _): return publicKey
case .closedGroup(let groupPublicKey, _): return groupPublicKey
case .contact(let publicKey): return publicKey
case .closedGroup(let groupPublicKey): return groupPublicKey
case .openGroup(let roomToken, let server, _, _, _):
return OpenGroup.idFor(roomToken: roomToken, server: server)
@ -152,87 +152,4 @@ extension MessageSender {
}
.eraseToAnyPublisher()
}
/// This method requires the `db` value to be passed in because if it's called within a `writeAsync` completion block
/// it will throw a "re-entrant" fatal error when attempting to write again
public static func syncConfiguration(
_ db: Database,
forceSyncNow: Bool = true
) throws -> AnyPublisher<Void, Error> {
// If we don't have a userKeyPair yet then there is no need to sync the configuration
// as the user doesn't exist yet (this will get triggered on the first launch of a
// fresh install due to the migrations getting run)
guard
Identity.userExists(db),
let ed25519SecretKey: [UInt8] = Identity.fetchUserEd25519KeyPair(db)?.secretKey
else {
return Fail(error: StorageError.generic)
.eraseToAnyPublisher()
}
let publicKey: String = getUserHexEncodedPublicKey(db)
let legacyDestination: Message.Destination = Message.Destination.contact(
publicKey: publicKey,
namespace: .default
)
let legacyConfigurationMessage = try ConfigurationMessage.getCurrent(db)
let userConfigMessageChanges: [SharedConfigMessage] = SessionUtil.getChanges(
ed25519SecretKey: ed25519SecretKey
)
let destination: Message.Destination = Message.Destination.contact(
publicKey: publicKey,
namespace: .userProfileConfig
)
guard forceSyncNow else {
JobRunner.add(
db,
job: Job(
variant: .messageSend,
threadId: publicKey,
details: MessageSendJob.Details(
destination: legacyDestination,
message: legacyConfigurationMessage
)
)
)
return Just(())
.setFailureType(to: Error.self)
.eraseToAnyPublisher()
}
let sendData: PreparedSendData = try MessageSender.preparedSendData(
db,
message: legacyConfigurationMessage,
to: legacyDestination,
interactionId: nil
)
let userConfigSendData: [PreparedSendData] = try userConfigMessageChanges
.map { message in
try MessageSender.preparedSendData(
db,
message: message,
to: destination,
interactionId: nil
)
}
/// We want to avoid blocking the db write thread so we dispatch the API call to a different thread
return Just(())
.setFailureType(to: Error.self)
.receive(on: DispatchQueue.global(qos: .userInitiated))
.flatMap { _ -> AnyPublisher<Void, Error> in
Publishers
.MergeMany(
([sendData] + userConfigSendData)
.map { MessageSender.sendImmediate(preparedSendData: $0) }
)
.collect()
.map { _ in () }
.eraseToAnyPublisher()
}
.eraseToAnyPublisher()
}
}

View file

@ -1,16 +1,18 @@
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import GRDB
import Sodium
import SessionUtilitiesKit
extension MessageSender {
internal static func encryptWithSessionProtocol(
_ plaintext: Data,
_ db: Database,
plaintext: Data,
for recipientHexEncodedX25519PublicKey: String,
using dependencies: SMKDependencies = SMKDependencies()
) throws -> Data {
guard let userEd25519KeyPair: Box.KeyPair = dependencies.storage.read({ db in Identity.fetchUserEd25519KeyPair(db) }) else {
guard let userEd25519KeyPair: Box.KeyPair = Identity.fetchUserEd25519KeyPair(db) else {
throw MessageSenderError.noUserED25519KeyPair
}
@ -30,13 +32,16 @@ extension MessageSender {
}
internal static func encryptWithSessionBlindingProtocol(
_ plaintext: Data,
_ db: Database,
plaintext: Data,
for recipientBlindedId: String,
openGroupPublicKey: String,
using dependencies: SMKDependencies = SMKDependencies()
) throws -> Data {
guard SessionId.Prefix(from: recipientBlindedId) == .blinded else { throw MessageSenderError.signingFailed }
guard let userEd25519KeyPair: Box.KeyPair = dependencies.storage.read({ db in Identity.fetchUserEd25519KeyPair(db) }) else {
guard SessionId.Prefix(from: recipientBlindedId) == .blinded else {
throw MessageSenderError.signingFailed
}
guard let userEd25519KeyPair: Box.KeyPair = Identity.fetchUserEd25519KeyPair(db) else {
throw MessageSenderError.noUserED25519KeyPair
}
guard let blindedKeyPair = dependencies.sodium.blindedKeyPair(serverPublicKey: openGroupPublicKey, edKeyPair: userEd25519KeyPair, genericHash: dependencies.genericHash) else {

View file

@ -206,8 +206,8 @@ public final class MessageSender {
message.sender = userPublicKey
message.recipient = {
switch destination {
case .contact(let publicKey, _): return publicKey
case .closedGroup(let groupPublicKey, _): return groupPublicKey
case .contact(let publicKey): return publicKey
case .closedGroup(let groupPublicKey): return groupPublicKey
case .openGroup, .openGroupInbox: preconditionFailure()
}
}()
@ -283,16 +283,17 @@ public final class MessageSender {
let ciphertext: Data
do {
switch destination {
case .contact(let publicKey, _):
ciphertext = try encryptWithSessionProtocol(plaintext, for: publicKey)
case .contact(let publicKey):
ciphertext = try encryptWithSessionProtocol(db, plaintext: plaintext, for: publicKey)
case .closedGroup(let groupPublicKey, _):
case .closedGroup(let groupPublicKey):
guard let encryptionKeyPair: ClosedGroupKeyPair = try? ClosedGroupKeyPair.fetchLatestKeyPair(db, threadId: groupPublicKey) else {
throw MessageSenderError.noKeyPair
}
ciphertext = try encryptWithSessionProtocol(
plaintext,
db,
plaintext: plaintext,
for: SessionId(.standard, publicKey: encryptionKeyPair.publicKey.bytes).hexString
)
@ -319,7 +320,7 @@ public final class MessageSender {
kind = .sessionMessage
senderPublicKey = ""
case .closedGroup(let groupPublicKey, _):
case .closedGroup(let groupPublicKey):
kind = .closedGroupMessage
senderPublicKey = groupPublicKey
@ -553,7 +554,8 @@ public final class MessageSender {
do {
ciphertext = try encryptWithSessionBlindingProtocol(
plaintext,
db,
plaintext: plaintext,
for: recipientBlindedPublicKey,
openGroupPublicKey: openGroupPublicKey,
using: dependencies
@ -636,107 +638,86 @@ public final class MessageSender {
let isMainAppActive: Bool = (UserDefaults.sharedLokiProject?[.isMainAppActive])
.defaulting(to: false)
var isSuccess = false
var errorCount = 0
return SnodeAPI
.sendMessage(
snodeMessage,
in: destination.namespace
in: {
switch destination {
case .closedGroup: return .legacyClosedGroup
default: return .`default`
}
}()
)
.subscribe(on: DispatchQueue.global(qos: .default))
.flatMap { result, totalCount -> AnyPublisher<Bool, Error> in
switch result {
case .success(let response):
// Don't emit if we've already succeeded
guard !isSuccess else {
return Just(false)
.setFailureType(to: Error.self)
.eraseToAnyPublisher()
}
isSuccess = true
let updatedMessage: Message = message
updatedMessage.serverHash = response.1.hash
let job: Job? = Job(
variant: .notifyPushServer,
behaviour: .runOnce,
details: NotifyPushServerJob.Details(message: snodeMessage)
)
let shouldNotify: Bool = {
switch updatedMessage {
case is VisibleMessage, is UnsendRequest: return !isSyncMessage
case let callMessage as CallMessage:
switch callMessage.kind {
case .preOffer: return true
default: return false
}
.flatMap { response -> AnyPublisher<Bool, Error> in
let updatedMessage: Message = message
updatedMessage.serverHash = response.1.hash
let job: Job? = Job(
variant: .notifyPushServer,
behaviour: .runOnce,
details: NotifyPushServerJob.Details(message: snodeMessage)
)
let shouldNotify: Bool = {
switch updatedMessage {
case is VisibleMessage, is UnsendRequest: return !isSyncMessage
case let callMessage as CallMessage:
switch callMessage.kind {
case .preOffer: return true
default: return false
}
}()
return dependencies.storage
.writePublisher { db -> Void in
try MessageSender.handleSuccessfulMessageSend(
db,
message: updatedMessage,
to: destination,
interactionId: data.interactionId,
isSyncMessage: isSyncMessage,
using: dependencies
)
default: return false
}
}()
guard shouldNotify && isMainAppActive else { return () }
return dependencies.storage
.writePublisher { db -> Void in
try MessageSender.handleSuccessfulMessageSend(
db,
message: updatedMessage,
to: destination,
interactionId: data.interactionId,
isSyncMessage: isSyncMessage,
using: dependencies
)
JobRunner.add(db, job: job)
return ()
}
.flatMap { _ -> AnyPublisher<Bool, Error> in
guard shouldNotify && !isMainAppActive else {
return Just(true)
.setFailureType(to: Error.self)
.eraseToAnyPublisher()
}
guard let job: Job = job else {
return Just(true)
.setFailureType(to: Error.self)
.eraseToAnyPublisher()
}
guard shouldNotify else { return () }
return Future<Bool, Error> { resolver in
NotifyPushServerJob.run(
job,
queue: DispatchQueue.global(qos: .default),
success: { _, _ in resolver(Result.success(true)) },
failure: { _, _, _ in
// Always fulfill because the notify PN server job isn't critical.
resolver(Result.success(true))
},
deferred: { _ in
// Always fulfill because the notify PN server job isn't critical.
resolver(Result.success(true))
}
)
}
JobRunner.add(db, job: job)
return ()
}
.flatMap { _ -> AnyPublisher<Bool, Error> in
guard shouldNotify && !isMainAppActive else {
return Just(true)
.setFailureType(to: Error.self)
.eraseToAnyPublisher()
}
.eraseToAnyPublisher()
case .failure(let error):
errorCount += 1
// Only process the error if all promises failed
guard errorCount == totalCount else {
return Just(false)
}
guard let job: Job = job else {
return Just(true)
.setFailureType(to: Error.self)
.eraseToAnyPublisher()
}
return Fail(error: error)
.eraseToAnyPublisher()
}
return Future<Bool, Error> { resolver in
NotifyPushServerJob.run(
job,
queue: DispatchQueue.global(qos: .default),
success: { _, _ in resolver(Result.success(true)) },
failure: { _, _, _ in
// Always fulfill because the notify PN server job isn't critical.
resolver(Result.success(true))
},
deferred: { _ in
// Always fulfill because the notify PN server job isn't critical.
resolver(Result.success(true))
}
)
}
.eraseToAnyPublisher()
}
.eraseToAnyPublisher()
}
.filter { $0 }
.handleEvents(
@ -960,8 +941,8 @@ public final class MessageSender {
try? ControlMessageProcessRecord(
threadId: {
switch destination {
case .contact(let publicKey, _): return publicKey
case .closedGroup(let groupPublicKey, _): return groupPublicKey
case .contact(let publicKey): return publicKey
case .closedGroup(let groupPublicKey): return groupPublicKey
case .openGroup(let roomToken, let server, _, _, _):
return OpenGroup.idFor(roomToken: roomToken, server: server)
@ -977,7 +958,7 @@ public final class MessageSender {
// the destination was a contact
// we didn't sync it already
let userPublicKey = getUserHexEncodedPublicKey(db)
if case .contact(let publicKey, let namespace) = destination, !isSyncMessage {
if case .contact(let publicKey) = destination, !isSyncMessage {
if let message = message as? VisibleMessage { message.syncTarget = publicKey }
if let message = message as? ExpirationTimerUpdate { message.syncTarget = publicKey }
@ -986,7 +967,7 @@ public final class MessageSender {
data: try prepareSendToSnodeDestination(
db,
message: message,
to: .contact(publicKey: userPublicKey, namespace: namespace),
to: .contact(publicKey: userPublicKey),
interactionId: interactionId,
userPublicKey: userPublicKey,
messageSendTimestamp: Int64(floor(Date().timeIntervalSince1970 * 1000)),

View file

@ -4,6 +4,7 @@ import Foundation
public extension Notification.Name {
// FIXME: Remove once `useSharedUtilForUserConfig` is permanent
static let initialConfigurationMessageReceived = Notification.Name("initialConfigurationMessageReceived")
static let missedCall = Notification.Name("missedCall")
}
@ -14,5 +15,6 @@ public extension Notification.Key {
@objc public extension NSNotification {
// FIXME: Remove once `useSharedUtilForUserConfig` is permanent
@objc static let initialConfigurationMessageReceived = Notification.Name.initialConfigurationMessageReceived.rawValue as NSString
}

View file

@ -8,7 +8,7 @@ import SessionSnodeKit
import SessionUtilitiesKit
public final class CurrentUserPoller: Poller {
public static var namespaces: [SnodeAPI.Namespace] = [.default, .userProfileConfig]
public static var namespaces: [SnodeAPI.Namespace] = [.default, .configUserProfile, .configContacts]
private var targetSnode: Atomic<Snode?> = Atomic(nil)
private var usedSnodes: Atomic<Set<Snode>> = Atomic([])

View file

@ -8,7 +8,7 @@ import SessionUtilitiesKit
extension OpenGroupAPI {
public final class Poller {
typealias PollResponse = [OpenGroupAPI.Endpoint: (info: ResponseInfoType, data: Codable?)]
typealias PollResponse = (info: ResponseInfoType, data: [OpenGroupAPI.Endpoint: Codable])
private let server: String
private var timer: Timer? = nil
@ -279,11 +279,11 @@ extension OpenGroupAPI {
using dependencies: OpenGroupManager.OGMDependencies = OpenGroupManager.OGMDependencies()
) {
let server: String = self.server
let validResponses: PollResponse = response
.filter { endpoint, endpointResponse in
let validResponses: [OpenGroupAPI.Endpoint: Codable] = response.data
.filter { endpoint, data in
switch endpoint {
case .capabilities:
guard (endpointResponse.data as? HTTP.BatchSubResponse<Capabilities>)?.body != nil else {
guard (data as? HTTP.BatchSubResponse<Capabilities>)?.body != nil else {
SNLog("Open group polling failed due to invalid capability data.")
return false
}
@ -291,8 +291,8 @@ extension OpenGroupAPI {
return true
case .roomPollInfo(let roomToken, _):
guard (endpointResponse.data as? HTTP.BatchSubResponse<RoomPollInfo>)?.body != nil else {
switch (endpointResponse.data as? HTTP.BatchSubResponse<RoomPollInfo>)?.code {
guard (data as? HTTP.BatchSubResponse<RoomPollInfo>)?.body != nil else {
switch (data as? HTTP.BatchSubResponse<RoomPollInfo>)?.code {
case 404: SNLog("Open group polling failed to retrieve info for unknown room '\(roomToken)'.")
default: SNLog("Open group polling failed due to invalid room info data.")
}
@ -303,10 +303,10 @@ extension OpenGroupAPI {
case .roomMessagesRecent(let roomToken), .roomMessagesBefore(let roomToken, _), .roomMessagesSince(let roomToken, _):
guard
let responseData: HTTP.BatchSubResponse<[Failable<Message>]> = endpointResponse.data as? HTTP.BatchSubResponse<[Failable<Message>]>,
let responseData: HTTP.BatchSubResponse<[Failable<Message>]> = data as? HTTP.BatchSubResponse<[Failable<Message>]>,
let responseBody: [Failable<Message>] = responseData.body
else {
switch (endpointResponse.data as? HTTP.BatchSubResponse<[Failable<Message>]>)?.code {
switch (data as? HTTP.BatchSubResponse<[Failable<Message>]>)?.code {
case 404: SNLog("Open group polling failed to retrieve messages for unknown room '\(roomToken)'.")
default: SNLog("Open group polling failed due to invalid messages data.")
}
@ -325,7 +325,7 @@ extension OpenGroupAPI {
case .inbox, .inboxSince, .outbox, .outboxSince:
guard
let responseData: HTTP.BatchSubResponse<[DirectMessage]?> = endpointResponse.data as? HTTP.BatchSubResponse<[DirectMessage]?>,
let responseData: HTTP.BatchSubResponse<[DirectMessage]?> = data as? HTTP.BatchSubResponse<[DirectMessage]?>,
!responseData.failedToParseBody
else {
SNLog("Open group polling failed due to invalid inbox/outbox data.")
@ -378,12 +378,12 @@ extension OpenGroupAPI {
return (capabilities, groups)
}
let changedResponses: PollResponse = validResponses
.filter { endpoint, endpointResponse in
let changedResponses: [OpenGroupAPI.Endpoint: Codable] = validResponses
.filter { endpoint, data in
switch endpoint {
case .capabilities:
guard
let responseData: HTTP.BatchSubResponse<Capabilities> = endpointResponse.data as? HTTP.BatchSubResponse<Capabilities>,
let responseData: HTTP.BatchSubResponse<Capabilities> = data as? HTTP.BatchSubResponse<Capabilities>,
let responseBody: Capabilities = responseData.body
else { return false }
@ -391,7 +391,7 @@ extension OpenGroupAPI {
case .roomPollInfo(let roomToken, _):
guard
let responseData: HTTP.BatchSubResponse<RoomPollInfo> = endpointResponse.data as? HTTP.BatchSubResponse<RoomPollInfo>,
let responseData: HTTP.BatchSubResponse<RoomPollInfo> = data as? HTTP.BatchSubResponse<RoomPollInfo>,
let responseBody: RoomPollInfo = responseData.body
else { return false }
guard let existingOpenGroup: OpenGroup = currentInfo?.groups.first(where: { $0.roomToken == roomToken }) else {
@ -424,11 +424,11 @@ extension OpenGroupAPI {
.updateAll(db, OpenGroup.Columns.pollFailureCount.set(to: 0))
}
try changedResponses.forEach { endpoint, endpointResponse in
try changedResponses.forEach { endpoint, data in
switch endpoint {
case .capabilities:
guard
let responseData: HTTP.BatchSubResponse<Capabilities> = endpointResponse.data as? HTTP.BatchSubResponse<Capabilities>,
let responseData: HTTP.BatchSubResponse<Capabilities> = data as? HTTP.BatchSubResponse<Capabilities>,
let responseBody: Capabilities = responseData.body
else { return }
@ -440,7 +440,7 @@ extension OpenGroupAPI {
case .roomPollInfo(let roomToken, _):
guard
let responseData: HTTP.BatchSubResponse<RoomPollInfo> = endpointResponse.data as? HTTP.BatchSubResponse<RoomPollInfo>,
let responseData: HTTP.BatchSubResponse<RoomPollInfo> = data as? HTTP.BatchSubResponse<RoomPollInfo>,
let responseBody: RoomPollInfo = responseData.body
else { return }
@ -455,7 +455,7 @@ extension OpenGroupAPI {
case .roomMessagesRecent(let roomToken), .roomMessagesBefore(let roomToken, _), .roomMessagesSince(let roomToken, _):
guard
let responseData: HTTP.BatchSubResponse<[Failable<Message>]> = endpointResponse.data as? HTTP.BatchSubResponse<[Failable<Message>]>,
let responseData: HTTP.BatchSubResponse<[Failable<Message>]> = data as? HTTP.BatchSubResponse<[Failable<Message>]>,
let responseBody: [Failable<Message>] = responseData.body
else { return }
@ -469,7 +469,7 @@ extension OpenGroupAPI {
case .inbox, .inboxSince, .outbox, .outboxSince:
guard
let responseData: HTTP.BatchSubResponse<[DirectMessage]?> = endpointResponse.data as? HTTP.BatchSubResponse<[DirectMessage]?>,
let responseData: HTTP.BatchSubResponse<[DirectMessage]?> = data as? HTTP.BatchSubResponse<[DirectMessage]?>,
!responseData.failedToParseBody
else { return }

View file

@ -66,13 +66,17 @@ public class Poller {
// MARK: - Private API
internal func startIfNeeded(for publicKey: String) {
guard isPolling.wrappedValue[publicKey] != true else { return }
// Might be a race condition that the setUpPolling finishes too soon,
// and the timer is not created, if we mark the group as is polling
// after setUpPolling. So the poller may not work, thus misses messages
isPolling.mutate { $0[publicKey] = true }
setUpPolling(for: publicKey)
// Run on the 'pollerQueue' to ensure any 'Atomic' access doesn't block the main thread
// on startup
Threading.pollerQueue.async { [weak self] in
guard self?.isPolling.wrappedValue[publicKey] != true else { return }
// Might be a race condition that the setUpPolling finishes too soon,
// and the timer is not created, if we mark the group as is polling
// after setUpPolling. So the poller may not work, thus misses messages
self?.isPolling.mutate { $0[publicKey] = true }
self?.setUpPolling(for: publicKey)
}
}
/// We want to initially trigger a poll against the target service node and then run the recursive polling,

View file

@ -146,8 +146,8 @@ public struct MessageViewModel: FetchableRecordWithRowId, Decodable, Equatable,
// MARK: - Mutation
public func with(
attachments: Updatable<[Attachment]> = .existing,
reactionInfo: Updatable<[ReactionInfo]> = .existing
attachments: [Attachment]? = nil,
reactionInfo: [ReactionInfo]? = nil
) -> MessageViewModel {
return MessageViewModel(
threadId: self.threadId,
@ -845,11 +845,9 @@ public extension MessageViewModel.AttachmentInteractionInfo {
updatedPagedDataCache = updatedPagedDataCache.upserting(
dataToUpdate.with(
attachments: .update(
attachments
.sorted()
.map { $0.attachment }
)
attachments: attachments
.sorted()
.map { $0.attachment }
)
)
}
@ -927,7 +925,7 @@ public extension MessageViewModel.ReactionInfo {
else { return }
updatedPagedDataCache = updatedPagedDataCache.upserting(
dataToUpdate.with(reactionInfo: .update(reactionInfo.sorted()))
dataToUpdate.with(reactionInfo: reactionInfo.sorted())
)
pagedRowIdsWithNoReactions.remove(interactionRowId)
}

View file

@ -8,6 +8,28 @@ import SignalCoreKit
import SessionUtilitiesKit
public struct ProfileManager {
public enum AvatarUpdate {
case none
case remove
case uploadImage(UIImage)
case uploadFilePath(String)
case updateTo(url: String, key: Data, fileName: String?)
var image: UIImage? {
switch self {
case .uploadImage(let image): return image
default: return nil
}
}
var filePath: String? {
switch self {
case .uploadFilePath(let filePath): return filePath
default: return nil
}
}
}
// The max bytes for a user's profile name, encoded in UTF8.
// Before encrypting and submitting we NULL pad the name data to this length.
private static let nameDataLength: UInt = 64
@ -263,77 +285,85 @@ public struct ProfileManager {
public static func updateLocal(
queue: DispatchQueue,
profileName: String,
image: UIImage?,
imageFilePath: String?,
success: ((Database, Profile) throws -> ())? = nil,
avatarUpdate: AvatarUpdate = .none,
success: ((Database) throws -> ())? = nil,
failure: ((ProfileManagerError) -> ())? = nil
) {
prepareAndUploadAvatarImage(
queue: queue,
image: image,
imageFilePath: imageFilePath,
success: { fileInfo, newProfileKey in
// If we have no download url the we are removing the profile image
guard let (downloadUrl, fileName): (String, String) = fileInfo else {
Storage.shared.writeAsync { db in
let existingProfile: Profile = Profile.fetchOrCreateCurrentUser(db)
let userPublicKey: String = getUserHexEncodedPublicKey()
let isRemovingAvatar: Bool = {
switch avatarUpdate {
case .remove: return true
default: return false
}
}()
switch avatarUpdate {
case .none, .remove, .updateTo:
Storage.shared.writeAsync { db in
if isRemovingAvatar {
let existingProfileUrl: String? = try Profile
.filter(id: userPublicKey)
.select(.profilePictureUrl)
.asRequest(of: String.self)
.fetchOne(db)
let existingProfileFileName: String? = try Profile
.filter(id: userPublicKey)
.select(.profilePictureFileName)
.asRequest(of: String.self)
.fetchOne(db)
OWSLogger.verbose(existingProfile.profilePictureUrl != nil ?
// Remove any cached avatar image value
if let fileName: String = existingProfileFileName {
profileAvatarCache.mutate { $0[fileName] = nil }
}
OWSLogger.verbose(existingProfileUrl != nil ?
"Updating local profile on service with cleared avatar." :
"Updating local profile on service with no avatar."
)
let updatedProfile: Profile = try existingProfile
.with(
name: profileName,
profilePictureUrl: nil,
profilePictureFileName: nil,
profileEncryptionKey: (existingProfile.profilePictureUrl != nil ?
.update(newProfileKey) :
.existing
)
)
.saved(db)
try SessionUtil.update(
profile: updatedProfile,
in: .global(variant: .userProfile)
)
SNLog("Successfully updated service with profile.")
try success?(db, updatedProfile)
}
return
}
// Update user defaults
UserDefaults.standard[.lastProfilePictureUpload] = Date()
// Update the profile
Storage.shared.writeAsync { db in
let profile: Profile = try Profile
.fetchOrCreateCurrentUser(db)
.with(
name: profileName,
profilePictureUrl: .update(downloadUrl),
profilePictureFileName: .update(fileName),
profileEncryptionKey: .update(newProfileKey)
)
.saved(db)
try ProfileManager.updateProfileIfNeeded(
db,
publicKey: userPublicKey,
name: profileName,
avatarUpdate: avatarUpdate,
sentTimestamp: Date().timeIntervalSince1970
)
SNLog("Successfully updated service with profile.")
try success?(db, profile)
try success?(db)
}
},
failure: failure
)
case .uploadFilePath, .uploadImage:
prepareAndUploadAvatarImage(
queue: queue,
image: avatarUpdate.image,
imageFilePath: avatarUpdate.filePath,
success: { downloadUrl, fileName, newProfileKey in
Storage.shared.writeAsync { db in
try ProfileManager.updateProfileIfNeeded(
db,
publicKey: userPublicKey,
name: profileName,
avatarUpdate: .updateTo(url: downloadUrl, key: newProfileKey, fileName: fileName),
sentTimestamp: Date().timeIntervalSince1970
)
SNLog("Successfully updated service with profile.")
try success?(db)
}
},
failure: failure
)
}
}
public static func prepareAndUploadAvatarImage(
private static func prepareAndUploadAvatarImage(
queue: DispatchQueue,
image: UIImage?,
imageFilePath: String?,
success: @escaping ((downloadUrl: String, fileName: String)?, Data) -> (),
success: @escaping ((downloadUrl: String, fileName: String, profileKey: Data)) -> (),
failure: ((ProfileManagerError) -> ())? = nil
) {
queue.async {
@ -348,7 +378,9 @@ public struct ProfileManager {
avatarImageData = try {
guard var image: UIImage = image else {
guard let imageFilePath: String = imageFilePath else { return nil }
guard let imageFilePath: String = imageFilePath else {
throw ProfileManagerError.invalidCall
}
let data: Data = try Data(contentsOf: URL(fileURLWithPath: imageFilePath))
@ -397,20 +429,8 @@ public struct ProfileManager {
// If we have no image then we should succeed (database changes happen in the callback)
guard let data: Data = avatarImageData else {
// Remove any cached avatar image value
let maybeExistingFileName: String? = Storage.shared
.read { db in
try Profile
.select(.profilePictureFileName)
.asRequest(of: String.self)
.fetchOne(db)
}
if let fileName: String = maybeExistingFileName {
profileAvatarCache.mutate { $0[fileName] = nil }
}
return success(nil, newProfileKey)
failure?(ProfileManagerError.invalidCall)
return
}
// If we have a new avatar image, we must first:
@ -469,9 +489,124 @@ public struct ProfileManager {
profileAvatarCache.mutate { $0[fileName] = data }
SNLog("Successfully uploaded avatar image.")
success((downloadUrl, fileName), newProfileKey)
success((downloadUrl, fileName, newProfileKey))
}
)
}
}
public static func updateProfileIfNeeded(
_ db: Database,
publicKey: String,
name: String?,
avatarUpdate: AvatarUpdate,
sentTimestamp: TimeInterval,
calledFromConfigHandling: Bool = false,
dependencies: Dependencies = Dependencies()
) throws {
let isCurrentUser = (publicKey == getUserHexEncodedPublicKey(db, dependencies: dependencies))
let profile: Profile = Profile.fetchOrCreate(id: publicKey)
var profileChanges: [ColumnAssignment] = []
// Name
if let name: String = name, !name.isEmpty, name != profile.name {
let shouldUpdate: Bool
if isCurrentUser {
shouldUpdate = given(UserDefaults.standard[.lastDisplayNameUpdate]) {
sentTimestamp > $0.timeIntervalSince1970
}
.defaulting(to: true)
}
else {
shouldUpdate = true
}
if shouldUpdate {
if isCurrentUser {
UserDefaults.standard[.lastDisplayNameUpdate] = Date(timeIntervalSince1970: sentTimestamp)
}
profileChanges.append(Profile.Columns.name.set(to: name))
}
}
// Profile picture & profile key
var avatarNeedsDownload: Bool = false
let shouldUpdateAvatar: Bool = {
guard isCurrentUser else { return true }
return given(UserDefaults.standard[.lastProfilePictureUpdate]) {
sentTimestamp > $0.timeIntervalSince1970
}
.defaulting(to: true)
}()
if shouldUpdateAvatar {
switch avatarUpdate {
case .none: break
case .uploadImage, .uploadFilePath: preconditionFailure("Invalid options for this function")
case .remove:
if isCurrentUser {
UserDefaults.standard[.lastProfilePictureUpdate] = Date(timeIntervalSince1970: sentTimestamp)
}
profileChanges.append(Profile.Columns.profilePictureUrl.set(to: nil))
profileChanges.append(Profile.Columns.profileEncryptionKey.set(to: nil))
// Profile filename (this isn't synchronized between devices so can be immediately saved)
_ = try? Profile
.filter(id: publicKey)
.updateAll(db, Profile.Columns.profilePictureFileName.set(to: nil))
case .updateTo(let url, let key, let fileName):
if
(
url != profile.profilePictureUrl ||
key != profile.profileEncryptionKey
) &&
key.count == ProfileManager.avatarAES256KeyByteLength &&
key != profile.profileEncryptionKey
{
profileChanges.append(Profile.Columns.profilePictureUrl.set(to: url))
profileChanges.append(Profile.Columns.profileEncryptionKey.set(to: key))
avatarNeedsDownload = true
}
// Profile filename (this isn't synchronized between devices so can be immediately saved)
if let fileName: String = fileName {
_ = try? Profile
.filter(id: publicKey)
.updateAll(db, Profile.Columns.profilePictureFileName.set(to: fileName))
}
}
}
// Persist any changes
if !profileChanges.isEmpty {
try profile.save(db)
if calledFromConfigHandling {
try Profile
.filter(id: publicKey)
.updateAll( // Handling a config update so don't use `updateAllAndConfig`
db,
profileChanges
)
}
else {
try Profile
.filter(id: publicKey)
.updateAllAndConfig(db, profileChanges)
}
}
// Download the profile picture if needed
guard avatarNeedsDownload else { return }
db.afterNextTransaction { db in
// Need to refetch to ensure the db changes have occurred
ProfileManager.downloadAvatar(for: Profile.fetchOrCreate(id: publicKey))
}
}
}

View file

@ -8,6 +8,7 @@ public enum ProfileManagerError: LocalizedError {
case avatarEncryptionFailed
case avatarUploadFailed
case avatarUploadMaxFileSizeExceeded
case invalidCall
var localizedDescription: String {
switch self {
@ -16,6 +17,7 @@ public enum ProfileManagerError: LocalizedError {
case .avatarEncryptionFailed: return "Avatar encryption failed."
case .avatarUploadFailed: return "Avatar upload failed."
case .avatarUploadMaxFileSizeExceeded: return "Maximum file size exceeded."
case .invalidCall: return "Attempted to remove avatar using the wrong method."
}
}
}

View file

@ -143,7 +143,11 @@ public final class NotificationServiceExtension: UNNotificationServiceExtension
self.handleSuccessForIncomingCall(db, for: callMessage)
case let sharedConfigMessage as SharedConfigMessage:
try SessionUtil.handleConfigMessages(db, messages: [sharedConfigMessage])
try SessionUtil.handleConfigMessages(
db,
messages: [sharedConfigMessage],
publicKey: (processedMessage.threadId ?? "")
)
default: break
}
@ -214,9 +218,7 @@ public final class NotificationServiceExtension: UNNotificationServiceExtension
// If we need a config sync then trigger it now
if needsConfigSync {
Storage.shared.write { db in
try MessageSender.syncConfiguration(db, forceSyncNow: true).sinkUntilComplete()
}
ConfigurationSyncJob.enqueue()
}
checkIsAppReady()

View file

@ -92,9 +92,7 @@ final class ShareNavController: UINavigationController, ShareViewDelegate {
// If we need a config sync then trigger it now
if needsConfigSync {
Storage.shared.write { db in
try? MessageSender.syncConfiguration(db, forceSyncNow: true).sinkUntilComplete()
}
ConfigurationSyncJob.enqueue()
}
checkIsAppReady()

View file

@ -18,13 +18,6 @@ public enum GetSnodePoolJob: JobExecutor {
failure: @escaping (Job, Error?, Bool) -> (),
deferred: @escaping (Job) -> ()
) {
// If the user doesn't exist then don't do anything (when the user registers we run this
// job directly)
guard Identity.userExists() else {
deferred(job)
return
}
// If we already have cached Snodes then we still want to trigger the 'SnodeAPI.getSnodePool'
// but we want to succeed this job immediately (since it's marked as blocking), this allows us
// to block if we have no Snode pool and prevent other jobs from failing but avoids having to
@ -35,7 +28,10 @@ public enum GetSnodePoolJob: JobExecutor {
return
}
// If we don't have the snode pool cached then we should also try to build the path (this will
// speed up the onboarding process for new users because it can run before the user is created)
SnodeAPI.getSnodePool()
.flatMap { _ in OnionRequestAPI.getPath(excluding: nil) }
.subscribe(on: queue)
.receive(on: queue)
.sinkUntilComplete(

View file

@ -6,7 +6,7 @@ extension SnodeAPI {
public class SendMessageRequest: SnodeAuthenticatedRequestBody {
enum CodingKeys: String, CodingKey {
case namespace
case signatureTimestamp = "sig_timestamp"
case signatureTimestamp = "timestamp"//"sig_timestamp" // TODO: Add this back once the snodes are fixed!!
}
let message: SnodeMessage

View file

@ -209,7 +209,7 @@ public enum OnionRequestAPI: OnionRequestAPIType {
}
/// Returns a `Path` to be used for building an onion request. Builds new paths as needed.
private static func getPath(excluding snode: Snode?) -> AnyPublisher<[Snode], Error> {
internal static func getPath(excluding snode: Snode?) -> AnyPublisher<[Snode], Error> {
guard pathSize >= 1 else { preconditionFailure("Can't build path of size zero.") }
let paths: [[Snode]] = OnionRequestAPI.paths

View file

@ -7,6 +7,8 @@ import GRDB
import SessionUtilitiesKit
public final class SnodeAPI {
public typealias TargetedMessage = (message: SnodeMessage, namespace: Namespace)
internal static let sodium: Atomic<Sodium> = Atomic(Sodium())
private static var hasLoadedSnodePool: Atomic<Bool> = Atomic(false)
@ -47,7 +49,6 @@ public final class SnodeAPI {
]
}()
private static let snodeFailureThreshold: Int = 3
private static let targetSwarmSnodeCount: Int = 2
private static let minSnodePoolCount: Int = 12
private static func offsetTimestampMsNow() -> UInt64 {
@ -269,13 +270,6 @@ public final class SnodeAPI {
.eraseToAnyPublisher()
}
public static func getTargetSnodes(for publicKey: String) -> AnyPublisher<[Snode], Error> {
// shuffled() uses the system's default random generator, which is cryptographically secure
return getSwarm(for: publicKey)
.map { Array($0.shuffled().prefix(targetSwarmSnodeCount)) }
.eraseToAnyPublisher()
}
public static func getSwarm(
for publicKey: String,
using dependencies: SSKDependencies = SSKDependencies()
@ -422,19 +416,21 @@ public final class SnodeAPI {
)
.decoded(as: responseTypes, using: dependencies)
.map { batchResponse -> [SnodeAPI.Namespace: (info: ResponseInfoType, data: (messages: [SnodeReceivedMessage], lastHash: String?)?)] in
zip(namespaces, batchResponse)
zip(namespaces, batchResponse.responses)
.reduce(into: [:]) { result, next in
guard let messageResponse: GetMessagesResponse = (next.1.1 as? HTTP.BatchSubResponse<GetMessagesResponse>)?.body else {
guard
let subResponse: HTTP.BatchSubResponse<GetMessagesResponse> = (next.1 as? HTTP.BatchSubResponse<GetMessagesResponse>),
let messageResponse: GetMessagesResponse = subResponse.body
else {
return
}
let namespace: SnodeAPI.Namespace = next.0
let requestInfo: ResponseInfoType = next.1.0
result[namespace] = (
requestInfo,
(
messageResponse.messages
info: subResponse.responseInfo,
data: (
messages: messageResponse.messages
.compactMap { rawMessage -> SnodeReceivedMessage? in
SnodeReceivedMessage(
snode: snode,
@ -443,7 +439,7 @@ public final class SnodeAPI {
rawMessage: rawMessage
)
},
namespaceLastHash[namespace]
lastHash: namespaceLastHash[namespace]
)
)
}
@ -453,13 +449,13 @@ public final class SnodeAPI {
.eraseToAnyPublisher()
}
// MARK: Store
// MARK: - Store
public static func sendMessage(
_ message: SnodeMessage,
in namespace: Namespace,
using dependencies: SSKDependencies = SSKDependencies()
) -> AnyPublisher<(Result<(ResponseInfoType, SendMessagesResponse), Error>, Int), Error> {
) -> AnyPublisher<(ResponseInfoType, SendMessagesResponse), Error> {
let publicKey: String = (Features.useTestnet ?
message.recipient.removingIdPrefixIfNeeded() :
message.recipient
@ -511,27 +507,125 @@ public final class SnodeAPI {
.eraseToAnyPublisher()
}
return getTargetSnodes(for: publicKey)
return getSwarm(for: publicKey)
.subscribe(on: Threading.workQueue)
.flatMap { targetSnodes -> AnyPublisher<(Result<(ResponseInfoType, SendMessagesResponse), Error>, Int), Error> in
Publishers
.MergeMany(
targetSnodes
.map { targetSnode -> AnyPublisher<Result<(ResponseInfoType, SendMessagesResponse), Error>, Never> in
return sendMessage(to: targetSnode)
.retry(maxRetryCount)
.eraseToAnyPublisher()
.asResult()
}
)
.map { result in (result, targetSnodes.count) }
.setFailureType(to: Error.self)
.flatMap { swarm -> AnyPublisher<(ResponseInfoType, SendMessagesResponse), Error> in
guard let snode: Snode = swarm.randomElement() else {
return Fail(error: SnodeAPIError.generic)
.eraseToAnyPublisher()
}
return sendMessage(to: snode)
.retry(maxRetryCount)
.eraseToAnyPublisher()
}
.retry(maxRetryCount)
.eraseToAnyPublisher()
}
// MARK: Edit
public static func sendConfigMessages(
_ targetedMessages: [TargetedMessage],
oldHashes: [String],
using dependencies: SSKDependencies = SSKDependencies()
) -> AnyPublisher<HTTP.BatchResponse, Error> {
guard
!targetedMessages.isEmpty,
let recipient: String = targetedMessages.first?.message.recipient
else {
return Fail(error: SnodeAPIError.generic)
.eraseToAnyPublisher()
}
// TODO: Need to get either the closed group subKey or the userEd25519 key for auth
guard let userED25519KeyPair = Identity.fetchUserEd25519KeyPair() else {
return Fail(error: SnodeAPIError.noKeyPair)
.eraseToAnyPublisher()
}
let userX25519PublicKey: String = getUserHexEncodedPublicKey()
let publicKey: String = (Features.useTestnet ?
recipient.removingIdPrefixIfNeeded() :
recipient
)
var requests: [SnodeAPI.BatchRequest.Info] = targetedMessages
.map { message, namespace in
// Check if this namespace requires authentication
guard namespace.requiresReadAuthentication else {
return BatchRequest.Info(
request: SnodeRequest(
endpoint: .sendMessage,
body: LegacySendMessagesRequest(
message: message,
namespace: namespace
)
),
responseType: SendMessagesResponse.self
)
}
return BatchRequest.Info(
request: SnodeRequest(
endpoint: .sendMessage,
body: SendMessageRequest(
message: message,
namespace: namespace,
subkey: nil, // TODO: Need to get this
timestampMs: SnodeAPI.offsetTimestampMsNow(),
ed25519PublicKey: userED25519KeyPair.publicKey,
ed25519SecretKey: userED25519KeyPair.secretKey
)
),
responseType: SendMessagesResponse.self
)
}
// If we had any previous config messages then we should delete them
if !oldHashes.isEmpty {
requests.append(
BatchRequest.Info(
request: SnodeRequest(
endpoint: .deleteMessages,
body: DeleteMessagesRequest(
messageHashes: oldHashes,
requireSuccessfulDeletion: false,
pubkey: userX25519PublicKey,
ed25519PublicKey: userED25519KeyPair.publicKey,
ed25519SecretKey: userED25519KeyPair.secretKey
)
),
responseType: DeleteMessagesResponse.self
)
)
}
let responseTypes = requests.map { $0.responseType }
return getSwarm(for: publicKey)
.subscribe(on: Threading.workQueue)
.flatMap { swarm -> AnyPublisher<HTTP.BatchResponse, Error> in
guard let snode: Snode = swarm.randomElement() else {
return Fail(error: SnodeAPIError.generic)
.eraseToAnyPublisher()
}
return SnodeAPI
.send(
request: SnodeRequest(
endpoint: .sequence,
body: BatchRequest(requests: requests)
),
to: snode,
associatedWith: publicKey,
using: dependencies
)
.eraseToAnyPublisher()
.decoded(as: responseTypes, using: dependencies)
.eraseToAnyPublisher()
}
.retry(maxRetryCount)
.eraseToAnyPublisher()
}
// MARK: - Edit
public static func updateExpiry(
publicKey: String,

View file

@ -3,10 +3,12 @@
import Foundation
public extension SnodeAPI {
enum Namespace: Int, Codable {
enum Namespace: Int, Codable, Hashable {
case `default` = 0
case userProfileConfig = 2
case configUserProfile = 2
case configContacts = 3
case configClosedGroupInfo = 11
case legacyClosedGroup = -10

View file

@ -65,14 +65,12 @@ public extension Identity {
)
}
static func store(seed: Data, ed25519KeyPair: KeyPair, x25519KeyPair: KeyPair) {
Storage.shared.write { db in
try Identity(variant: .seed, data: seed).save(db)
try Identity(variant: .ed25519SecretKey, data: Data(ed25519KeyPair.secretKey)).save(db)
try Identity(variant: .ed25519PublicKey, data: Data(ed25519KeyPair.publicKey)).save(db)
try Identity(variant: .x25519PrivateKey, data: Data(x25519KeyPair.secretKey)).save(db)
try Identity(variant: .x25519PublicKey, data: Data(x25519KeyPair.publicKey)).save(db)
}
static func store(_ db: Database, seed: Data, ed25519KeyPair: KeyPair, x25519KeyPair: KeyPair) throws {
try Identity(variant: .seed, data: seed).save(db)
try Identity(variant: .ed25519SecretKey, data: Data(ed25519KeyPair.secretKey)).save(db)
try Identity(variant: .ed25519PublicKey, data: Data(ed25519KeyPair.publicKey)).save(db)
try Identity(variant: .x25519PrivateKey, data: Data(x25519KeyPair.secretKey)).save(db)
try Identity(variant: .x25519PublicKey, data: Data(x25519KeyPair.publicKey)).save(db)
}
static func userExists(_ db: Database? = nil) -> Bool {

View file

@ -102,6 +102,10 @@ public struct Job: Codable, Equatable, Identifiable, FetchableRecord, MutablePer
/// This is a job that runs once whenever an attachment is downloaded to attempt to decode and properly
/// download the attachment
case attachmentDownload
/// This is a job that runs once whenever the user config or a closed group config changes, it retrieves the
/// state of all config objects and syncs any that are flagged as needing to be synced
case configurationSync
}
public enum Behaviour: Int, Codable, DatabaseValueConvertible, CaseIterable {

View file

@ -1,6 +1,10 @@
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
@objc(SNFeatures)
public final class Features : NSObject {
public static let useOnionRequests = true
public static let useTestnet = false
import Foundation
public final class Features {
public static let useOnionRequests: Bool = true
public static let useTestnet: Bool = false
public static let useSharedUtilForUserConfig: Bool = true
}

View file

@ -65,7 +65,8 @@ public final class JobRunner {
jobVariants.remove(.attachmentUpload),
jobVariants.remove(.messageSend),
jobVariants.remove(.notifyPushServer),
jobVariants.remove(.sendReadReceipts)
jobVariants.remove(.sendReadReceipts),
jobVariants.remove(.configurationSync)
].compactMap { $0 }
)
let messageReceiveQueue: JobQueue = JobQueue(

View file

@ -1,121 +0,0 @@
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
import Foundation
public enum Updatable<Wrapped>: ExpressibleByNilLiteral {
/// A cleared value.
///
/// In code, the cleared of a value is typically written using the `nil`
/// literal rather than the explicit `.remove` enumeration case.
case remove
/// The existing value, this will leave whatever value is currently available.
case existing
/// An updated value, stored as `Wrapped`.
case update(Wrapped)
// MARK: - ExpressibleByNilLiteral
public init(nilLiteral: ()) {
self = .remove
}
public static func updateIf(_ maybeValue: Wrapped?) -> Updatable<Wrapped> {
switch maybeValue {
case .some(let value): return .update(value)
default: return .existing
}
}
public static func updateTo(_ maybeValue: Wrapped?) -> Updatable<Wrapped> {
switch maybeValue {
case .some(let value): return .update(value)
default: return .remove
}
}
// MARK: - Functions
public func value(existing: Wrapped) -> Wrapped? {
switch self {
case .remove: return nil
case .existing: return existing
case .update(let newValue): return newValue
}
}
public func value(existing: Wrapped) -> Wrapped {
switch self {
case .remove: fatalError("Attempted to assign a 'removed' value to a non-null")
case .existing: return existing
case .update(let newValue): return newValue
}
}
}
// MARK: - Coalesing-nil operator
public func ?? <T>(updatable: Updatable<T>, existingValue: @autoclosure () throws -> T) rethrows -> T {
switch updatable {
case .remove: fatalError("Attempted to assign a 'removed' value to a non-null")
case .existing: return try existingValue()
case .update(let newValue): return newValue
}
}
public func ?? <T>(updatable: Updatable<T>, existingValue: @autoclosure () throws -> T?) rethrows -> T? {
switch updatable {
case .remove: return nil
case .existing: return try existingValue()
case .update(let newValue): return newValue
}
}
public func ?? <T>(updatable: Updatable<Optional<T>>, existingValue: @autoclosure () throws -> T?) rethrows -> T? {
switch updatable {
case .remove: return nil
case .existing: return try existingValue()
case .update(let newValue): return newValue
}
}
// MARK: - ExpressibleBy Conformance
extension Updatable {
public init(_ value: Wrapped) {
self = .update(value)
}
}
extension Updatable: ExpressibleByUnicodeScalarLiteral, ExpressibleByExtendedGraphemeClusterLiteral, ExpressibleByStringLiteral where Wrapped == String {
public init(stringLiteral value: Wrapped) {
self = .update(value)
}
public init(extendedGraphemeClusterLiteral value: Wrapped) {
self = .update(value)
}
public init(unicodeScalarLiteral value: Wrapped) {
self = .update(value)
}
}
extension Updatable: ExpressibleByIntegerLiteral where Wrapped == Int {
public init(integerLiteral value: Int) {
self = .update(value)
}
}
extension Updatable: ExpressibleByFloatLiteral where Wrapped == Double {
public init(floatLiteral value: Double) {
self = .update(value)
}
}
extension Updatable: ExpressibleByBooleanLiteral where Wrapped == Bool {
public init(booleanLiteral value: Bool) {
self = .update(value)
}
}

View file

@ -4,16 +4,20 @@ import Foundation
import Combine
public extension HTTP {
// MARK: - Convenience Aliases
typealias BatchResponse = [(ResponseInfoType, Codable?)]
typealias BatchResponseTypes = [Codable.Type]
// MARK: - BatchResponse
struct BatchResponse {
public let info: ResponseInfoType
public let responses: [Codable]
}
// MARK: - BatchSubResponse<T>
struct BatchSubResponse<T: Codable>: Codable {
struct BatchSubResponse<T: Codable>: BatchSubResponseType {
/// The numeric http response code (e.g. 200 for success)
public let code: Int32
public let code: Int
/// Any headers returned by the request
public let headers: [String: String]
@ -25,7 +29,7 @@ public extension HTTP {
public let failedToParseBody: Bool
public init(
code: Int32,
code: Int,
headers: [String: String] = [:],
body: T? = nil,
failedToParseBody: Bool = false
@ -38,13 +42,23 @@ public extension HTTP {
}
}
public protocol BatchSubResponseType: Codable {
var code: Int { get }
var headers: [String: String] { get }
var failedToParseBody: Bool { get }
}
extension BatchSubResponseType {
public var responseInfo: ResponseInfoType { HTTP.ResponseInfo(code: code, headers: headers) }
}
public extension HTTP.BatchSubResponse {
init(from decoder: Decoder) throws {
let container: KeyedDecodingContainer<CodingKeys> = try decoder.container(keyedBy: CodingKeys.self)
let body: T? = try? container.decode(T.self, forKey: .body)
self = HTTP.BatchSubResponse(
code: try container.decode(Int32.self, forKey: .code),
code: try container.decode(Int.self, forKey: .code),
headers: ((try? container.decode([String: String].self, forKey: .headers)) ?? [:]),
body: body,
failedToParseBody: (
@ -111,13 +125,15 @@ public extension AnyPublisher where Output == (ResponseInfoType, Data?), Failure
do {
// TODO: Remove the 'Swift.'
let result: HTTP.BatchResponse = try Swift.zip(dataArray, types)
.map { data, type in try type.decoded(from: data, using: dependencies) }
.map { data in (responseInfo, data) }
return Just(result)
.setFailureType(to: Error.self)
.eraseToAnyPublisher()
return Just(
HTTP.BatchResponse(
info: responseInfo,
responses: try Swift.zip(dataArray, types)
.map { data, type in try type.decoded(from: data, using: dependencies) }
)
)
.setFailureType(to: Error.self)
.eraseToAnyPublisher()
}
catch {
return Fail(error: HTTPError.parsingFailed)

View file

@ -77,9 +77,12 @@ public enum AppSetup {
// After the migrations have run but before the migration completion we load the
// SessionUtil state and update the 'needsConfigSync' flag based on whether the
// configs also need to be sync'ed
SessionUtil.loadState(
ed25519SecretKey: Identity.fetchUserEd25519KeyPair()?.secretKey
)
if Identity.userExists() {
SessionUtil.loadState(
userPublicKey: getUserHexEncodedPublicKey(),
ed25519SecretKey: Identity.fetchUserEd25519KeyPair()?.secretKey
)
}
DispatchQueue.main.async {
migrationsCompletion(result, (needsConfigSync || SessionUtil.needsSync))