Updated to the latest libSession, fixed remaining items
Updated to the latest libSession version Updated the 'hidden' logic to be based on a negative 'priority' value Added an index on the Quote table to speed up conversation query Fixed an odd behaviour with GRDB and Combine (simplified the interface as well) Fixed an issue where migrations could fail
This commit is contained in:
parent
fa39b5f61c
commit
8c8453d922
|
@ -216,6 +216,7 @@ public final class SessionCall: CurrentCallProtocol, WebRTCSessionDelegate {
|
|||
let thread: SessionThread = try? SessionThread.fetchOne(db, id: sessionId)
|
||||
else { return }
|
||||
|
||||
let webRTCSession: WebRTCSession = self.webRTCSession
|
||||
let timestampMs: Int64 = SnodeAPI.currentOffsetTimestampMs()
|
||||
let message: CallMessage = CallMessage(
|
||||
uuid: self.uuid,
|
||||
|
@ -235,26 +236,21 @@ public final class SessionCall: CurrentCallProtocol, WebRTCSessionDelegate {
|
|||
|
||||
self.callInteractionId = interaction?.id
|
||||
|
||||
try? self.webRTCSession
|
||||
try? webRTCSession
|
||||
.sendPreOffer(
|
||||
db,
|
||||
message: message,
|
||||
interactionId: interaction?.id,
|
||||
in: thread
|
||||
)
|
||||
.sinkUntilComplete(
|
||||
receiveCompletion: { [weak self] result in
|
||||
switch result {
|
||||
case .failure: break
|
||||
case .finished:
|
||||
Storage.shared.writeAsync { db in
|
||||
self?.webRTCSession.sendOffer(db, to: sessionId)
|
||||
}
|
||||
|
||||
self?.setupTimeoutTimer()
|
||||
}
|
||||
// Start the timeout timer for the call
|
||||
.handleEvents(receiveOutput: { [weak self] _ in self?.setupTimeoutTimer() })
|
||||
.flatMap { _ in
|
||||
Storage.shared.writePublisherFlatMap { db -> AnyPublisher<Void, Error> in
|
||||
webRTCSession.sendOffer(db, to: sessionId)
|
||||
}
|
||||
)
|
||||
}
|
||||
.sinkUntilComplete()
|
||||
}
|
||||
|
||||
func answerSessionCall() {
|
||||
|
@ -435,9 +431,10 @@ public final class SessionCall: CurrentCallProtocol, WebRTCSessionDelegate {
|
|||
let webRTCSession: WebRTCSession = self.webRTCSession
|
||||
|
||||
Storage.shared
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.global(qos: .userInitiated)) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
webRTCSession.sendOffer(db, to: sessionId, isRestartingICEConnection: true)
|
||||
}
|
||||
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
|
||||
.sinkUntilComplete()
|
||||
}
|
||||
|
||||
|
|
|
@ -465,7 +465,7 @@ final class EditClosedGroupVC: BaseVC, UITableViewDataSource, UITableViewDelegat
|
|||
|
||||
ModalActivityIndicatorViewController.present(fromViewController: navigationController) { _ in
|
||||
Storage.shared
|
||||
.writePublisherFlatMap(receiveOn: DispatchQueue.main) { db -> AnyPublisher<Void, Error> in
|
||||
.writePublisherFlatMap { db -> AnyPublisher<Void, Error> in
|
||||
if !updatedMemberIds.contains(userPublicKey) {
|
||||
try MessageSender.leave(
|
||||
db,
|
||||
|
@ -485,6 +485,7 @@ final class EditClosedGroupVC: BaseVC, UITableViewDataSource, UITableViewDelegat
|
|||
name: updatedName
|
||||
)
|
||||
}
|
||||
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
|
||||
.receive(on: DispatchQueue.main)
|
||||
.sinkUntilComplete(
|
||||
receiveCompletion: { [weak self] result in
|
||||
|
|
|
@ -333,9 +333,10 @@ final class NewClosedGroupVC: BaseVC, UITableViewDataSource, UITableViewDelegate
|
|||
let message: String? = (selectedContacts.count > 20 ? "GROUP_CREATION_PLEASE_WAIT".localized() : nil)
|
||||
ModalActivityIndicatorViewController.present(fromViewController: navigationController!, message: message) { [weak self] _ in
|
||||
Storage.shared
|
||||
.writePublisherFlatMap(receiveOn: DispatchQueue.global(qos: .userInitiated)) { db in
|
||||
.writePublisherFlatMap { db in
|
||||
try MessageSender.createClosedGroup(db, name: name, members: selectedContacts)
|
||||
}
|
||||
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
|
||||
.receive(on: DispatchQueue.main)
|
||||
.sinkUntilComplete(
|
||||
receiveCompletion: { result in
|
||||
|
|
|
@ -448,7 +448,7 @@ extension ConversationVC:
|
|||
|
||||
// Send the message
|
||||
Storage.shared
|
||||
.writePublisher(receiveOn: DispatchQueue.global(qos: .userInitiated)) { [weak self] db in
|
||||
.writePublisher { [weak self] db in
|
||||
// Let the viewModel know we are about to send a message
|
||||
self?.viewModel.sentMessageBeforeUpdate = true
|
||||
|
||||
|
@ -517,6 +517,7 @@ extension ConversationVC:
|
|||
threadVariant: threadVariant
|
||||
)
|
||||
}
|
||||
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
|
||||
.sinkUntilComplete(
|
||||
receiveCompletion: { [weak self] _ in
|
||||
self?.handleMessageSent()
|
||||
|
@ -579,7 +580,7 @@ extension ConversationVC:
|
|||
|
||||
// Send the message
|
||||
Storage.shared
|
||||
.writePublisher(receiveOn: DispatchQueue.global(qos: .userInitiated)) { [weak self] db in
|
||||
.writePublisher { [weak self] db in
|
||||
// Let the viewModel know we are about to send a message
|
||||
self?.viewModel.sentMessageBeforeUpdate = true
|
||||
|
||||
|
@ -625,6 +626,7 @@ extension ConversationVC:
|
|||
threadVariant: threadVariant
|
||||
)
|
||||
}
|
||||
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
|
||||
.sinkUntilComplete(
|
||||
receiveCompletion: { [weak self] _ in
|
||||
self?.handleMessageSent()
|
||||
|
@ -1222,7 +1224,7 @@ extension ConversationVC:
|
|||
guard cellViewModel.threadVariant == .community else { return }
|
||||
|
||||
Storage.shared
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.global(qos: .userInitiated)) { db -> AnyPublisher<(OpenGroupAPI.ReactionRemoveAllResponse, OpenGroupAPI.PendingChange), Error> in
|
||||
.readPublisherFlatMap { db -> AnyPublisher<(OpenGroupAPI.ReactionRemoveAllResponse, OpenGroupAPI.PendingChange), Error> in
|
||||
guard
|
||||
let openGroup: OpenGroup = try? OpenGroup
|
||||
.fetchOne(db, id: cellViewModel.threadId),
|
||||
|
@ -1253,6 +1255,7 @@ extension ConversationVC:
|
|||
.map { _, response in (response, pendingChange) }
|
||||
.eraseToAnyPublisher()
|
||||
}
|
||||
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
|
||||
.handleEvents(
|
||||
receiveOutput: { response, pendingChange in
|
||||
OpenGroupManager
|
||||
|
@ -1310,7 +1313,7 @@ extension ConversationVC:
|
|||
|
||||
// Perform the sending logic
|
||||
Storage.shared
|
||||
.writePublisherFlatMap(receiveOn: DispatchQueue.global(qos: .userInitiated)) { [weak self] db -> AnyPublisher<MessageSender.PreparedSendData?, Error> in
|
||||
.writePublisherFlatMap { [weak self] db -> AnyPublisher<MessageSender.PreparedSendData?, Error> in
|
||||
// Update the thread to be visible (if it isn't already)
|
||||
if self?.viewModel.threadData.threadShouldBeVisible == false {
|
||||
_ = try SessionThread
|
||||
|
@ -1467,6 +1470,7 @@ extension ConversationVC:
|
|||
.map { _ in nil }
|
||||
.eraseToAnyPublisher()
|
||||
}
|
||||
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
|
||||
.flatMap { maybeSendData -> AnyPublisher<Void, Error> in
|
||||
guard let sendData: MessageSender.PreparedSendData = maybeSendData else {
|
||||
return Just(())
|
||||
|
@ -1598,7 +1602,7 @@ extension ConversationVC:
|
|||
}
|
||||
|
||||
Storage.shared
|
||||
.writePublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.writePublisherFlatMap { db in
|
||||
OpenGroupManager.shared.add(
|
||||
db,
|
||||
roomToken: room,
|
||||
|
@ -1607,6 +1611,7 @@ extension ConversationVC:
|
|||
calledFromConfigHandling: false
|
||||
)
|
||||
}
|
||||
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
|
||||
.receive(on: DispatchQueue.main)
|
||||
.sinkUntilComplete(
|
||||
receiveCompletion: { result in
|
||||
|
@ -1787,6 +1792,7 @@ extension ConversationVC:
|
|||
}
|
||||
}
|
||||
.flatMap { _ in request }
|
||||
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
|
||||
.receive(on: DispatchQueue.main)
|
||||
.sinkUntilComplete(
|
||||
receiveCompletion: { [weak self] result in
|
||||
|
@ -1893,7 +1899,7 @@ extension ConversationVC:
|
|||
// Delete the message from the open group
|
||||
deleteRemotely(
|
||||
from: self,
|
||||
request: Storage.shared.readPublisherFlatMap(receiveOn: DispatchQueue.global(qos: .userInitiated)) { db in
|
||||
request: Storage.shared.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.messageDelete(
|
||||
db,
|
||||
id: openGroupServerMessageId,
|
||||
|
@ -2091,7 +2097,7 @@ extension ConversationVC:
|
|||
cancelStyle: .alert_text,
|
||||
onConfirm: { [weak self] _ in
|
||||
Storage.shared
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db -> AnyPublisher<Void, Error> in
|
||||
.readPublisherFlatMap { db -> AnyPublisher<Void, Error> in
|
||||
guard let openGroup: OpenGroup = try OpenGroup.fetchOne(db, id: threadId) else {
|
||||
throw StorageError.objectNotFound
|
||||
}
|
||||
|
@ -2106,6 +2112,7 @@ extension ConversationVC:
|
|||
.map { _ in () }
|
||||
.eraseToAnyPublisher()
|
||||
}
|
||||
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
|
||||
.receive(on: DispatchQueue.main)
|
||||
.sinkUntilComplete(
|
||||
receiveCompletion: { result in
|
||||
|
@ -2147,7 +2154,7 @@ extension ConversationVC:
|
|||
cancelStyle: .alert_text,
|
||||
onConfirm: { [weak self] _ in
|
||||
Storage.shared
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db -> AnyPublisher<Void, Error> in
|
||||
.readPublisherFlatMap { db -> AnyPublisher<Void, Error> in
|
||||
guard let openGroup: OpenGroup = try OpenGroup.fetchOne(db, id: threadId) else {
|
||||
throw StorageError.objectNotFound
|
||||
}
|
||||
|
@ -2162,6 +2169,7 @@ extension ConversationVC:
|
|||
.map { _ in () }
|
||||
.eraseToAnyPublisher()
|
||||
}
|
||||
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
|
||||
.receive(on: DispatchQueue.main)
|
||||
.sinkUntilComplete(
|
||||
receiveCompletion: { result in
|
||||
|
@ -2407,7 +2415,7 @@ extension ConversationVC {
|
|||
else { return }
|
||||
|
||||
Storage.shared
|
||||
.writePublisher(receiveOn: DispatchQueue.global(qos: .userInitiated)) { db in
|
||||
.writePublisher { db in
|
||||
// If we aren't creating a new thread (ie. sending a message request) then send a
|
||||
// messageRequestResponse back to the sender (this allows the sender to know that
|
||||
// they have been approved and can now use this contact in closed groups)
|
||||
|
@ -2435,6 +2443,8 @@ extension ConversationVC {
|
|||
.set(to: contact.didApproveMe || !isNewThread)
|
||||
)
|
||||
}
|
||||
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
|
||||
.receive(on: DispatchQueue.main)
|
||||
.sinkUntilComplete(
|
||||
receiveCompletion: { _ in
|
||||
// Update the UI
|
||||
|
|
|
@ -549,7 +549,7 @@ final class ConversationVC: BaseVC, SessionUtilRespondingViewController, Convers
|
|||
if
|
||||
viewModel.threadData.threadIsNoteToSelf == false &&
|
||||
viewModel.threadData.threadShouldBeVisible == false &&
|
||||
!SessionUtil.conversationExistsInConfig(
|
||||
!SessionUtil.conversationVisibleInConfig(
|
||||
threadId: threadId,
|
||||
threadVariant: viewModel.threadData.threadVariant
|
||||
)
|
||||
|
@ -1399,6 +1399,7 @@ final class ConversationVC: BaseVC, SessionUtilRespondingViewController, Convers
|
|||
// after the app goes into background and goes back in foreground.
|
||||
DispatchQueue.main.async {
|
||||
self.snInputView.text = self.snInputView.text
|
||||
completion?()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -121,8 +121,7 @@ public class ConversationViewModel: OWSAudioPlayerDelegate {
|
|||
.defaulting(to: false)
|
||||
}
|
||||
),
|
||||
currentUserIsClosedGroupMember: ((self.initialThreadVariant != .legacyGroup && self.initialThreadVariant != .group) ?
|
||||
nil :
|
||||
currentUserIsClosedGroupMember: (![.legacyGroup, .group].contains(self.initialThreadVariant) ? nil :
|
||||
Storage.shared.read { db in
|
||||
GroupMember
|
||||
.filter(GroupMember.Columns.groupId == self.threadId)
|
||||
|
|
|
@ -102,8 +102,8 @@ public class HomeViewModel {
|
|||
return SQL("""
|
||||
JOIN \(Profile.self) ON (
|
||||
( -- Contact profile change
|
||||
\(SQL("\(thread[.variant]) = \(SessionThread.Variant.contact)")) AND
|
||||
\(profile[.id]) = \(thread[.id])
|
||||
\(profile[.id]) = \(thread[.id]) AND
|
||||
\(SQL("\(thread[.variant]) = \(SessionThread.Variant.contact)"))
|
||||
) OR ( -- Closed group profile change
|
||||
\(SQL("\(thread[.variant]) IN \(threadVariants)")) AND (
|
||||
profile.id = ( -- Front profile
|
||||
|
@ -111,8 +111,8 @@ public class HomeViewModel {
|
|||
FROM \(GroupMember.self)
|
||||
JOIN \(Profile.self) ON \(profile[.id]) = \(groupMember[.profileId])
|
||||
WHERE (
|
||||
\(SQL("\(groupMember[.role]) = \(targetRole)")) AND
|
||||
\(groupMember[.groupId]) = \(thread[.id]) AND
|
||||
\(SQL("\(groupMember[.role]) = \(targetRole)")) AND
|
||||
\(groupMember[.profileId]) != \(userPublicKey)
|
||||
)
|
||||
) OR
|
||||
|
@ -121,8 +121,8 @@ public class HomeViewModel {
|
|||
FROM \(GroupMember.self)
|
||||
JOIN \(Profile.self) ON \(profile[.id]) = \(groupMember[.profileId])
|
||||
WHERE (
|
||||
\(SQL("\(groupMember[.role]) = \(targetRole)")) AND
|
||||
\(groupMember[.groupId]) = \(thread[.id]) AND
|
||||
\(SQL("\(groupMember[.role]) = \(targetRole)")) AND
|
||||
\(groupMember[.profileId]) != \(userPublicKey)
|
||||
)
|
||||
) OR ( -- Fallback profile
|
||||
|
@ -132,8 +132,8 @@ public class HomeViewModel {
|
|||
FROM \(GroupMember.self)
|
||||
JOIN \(Profile.self) ON \(profile[.id]) = \(groupMember[.profileId])
|
||||
WHERE (
|
||||
\(SQL("\(groupMember[.role]) = \(targetRole)")) AND
|
||||
\(groupMember[.groupId]) = \(thread[.id]) AND
|
||||
\(SQL("\(groupMember[.role]) = \(targetRole)")) AND
|
||||
\(groupMember[.profileId]) != \(userPublicKey)
|
||||
)
|
||||
) = 1
|
||||
|
|
|
@ -170,6 +170,7 @@ extension MediaInfoVC {
|
|||
}
|
||||
|
||||
// MARK: - Interaction
|
||||
|
||||
public func update(attachment: Attachment?) {
|
||||
guard let attachment: Attachment = attachment else { return }
|
||||
|
||||
|
@ -177,7 +178,7 @@ extension MediaInfoVC {
|
|||
|
||||
fileIdLabel.text = attachment.serverId
|
||||
fileTypeLabel.text = attachment.contentType
|
||||
fileSizeLabel.text = OWSFormat.formatFileSize(attachment.byteCount)
|
||||
fileSizeLabel.text = Format.fileSize(attachment.byteCount)
|
||||
resolutionLabel.text = {
|
||||
guard let width = attachment.width, let height = attachment.height else { return "N/A" }
|
||||
return "\(width)×\(height)"
|
||||
|
|
|
@ -539,7 +539,7 @@ class NotificationActionHandler {
|
|||
}
|
||||
|
||||
return Storage.shared
|
||||
.writePublisher(receiveOn: DispatchQueue.main) { db in
|
||||
.writePublisher { db in
|
||||
let interaction: Interaction = try Interaction(
|
||||
threadId: threadId,
|
||||
authorId: getUserHexEncodedPublicKey(db),
|
||||
|
@ -575,7 +575,9 @@ class NotificationActionHandler {
|
|||
threadVariant: thread.variant
|
||||
)
|
||||
}
|
||||
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
|
||||
.flatMap { MessageSender.sendImmediate(preparedSendData: $0) }
|
||||
.receive(on: DispatchQueue.main)
|
||||
.handleEvents(
|
||||
receiveCompletion: { result in
|
||||
switch result {
|
||||
|
@ -612,7 +614,7 @@ class NotificationActionHandler {
|
|||
|
||||
private func markAsRead(threadId: String) -> AnyPublisher<Void, Error> {
|
||||
return Storage.shared
|
||||
.writePublisher(receiveOn: DispatchQueue.global(qos: .userInitiated)) { db in
|
||||
.writePublisher { db in
|
||||
guard
|
||||
let threadVariant: SessionThread.Variant = try SessionThread
|
||||
.filter(id: threadId)
|
||||
|
|
|
@ -94,7 +94,7 @@ enum Onboarding {
|
|||
}
|
||||
}
|
||||
.flatMap { _ -> AnyPublisher<String?, Error> in
|
||||
Storage.shared.readPublisher(receiveOn: DispatchQueue.global(qos: .userInitiated)) { db in
|
||||
Storage.shared.readPublisher { db in
|
||||
try Profile
|
||||
.filter(id: userPublicKey)
|
||||
.select(.name)
|
||||
|
|
|
@ -169,7 +169,7 @@ final class JoinOpenGroupVC: BaseVC, UIPageViewControllerDataSource, UIPageViewC
|
|||
|
||||
ModalActivityIndicatorViewController.present(fromViewController: navigationController, canCancel: false) { [weak self] _ in
|
||||
Storage.shared
|
||||
.writePublisherFlatMap(receiveOn: DispatchQueue.global(qos: .userInitiated)) { db in
|
||||
.writePublisherFlatMap { db in
|
||||
OpenGroupManager.shared.add(
|
||||
db,
|
||||
roomToken: roomToken,
|
||||
|
@ -178,6 +178,7 @@ final class JoinOpenGroupVC: BaseVC, UIPageViewControllerDataSource, UIPageViewC
|
|||
calledFromConfigHandling: false
|
||||
)
|
||||
}
|
||||
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
|
||||
.receive(on: DispatchQueue.main)
|
||||
.sinkUntilComplete(
|
||||
receiveCompletion: { result in
|
||||
|
|
|
@ -322,7 +322,7 @@ extension OpenGroupSuggestionGrid {
|
|||
Publishers
|
||||
.MergeMany(
|
||||
Storage.shared
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupManager
|
||||
.roomImage(db, fileId: imageId, for: room.token, on: OpenGroupAPI.defaultServer)
|
||||
}
|
||||
|
@ -335,6 +335,7 @@ extension OpenGroupSuggestionGrid {
|
|||
.delay(for: .milliseconds(10), scheduler: DispatchQueue.main)
|
||||
.eraseToAnyPublisher()
|
||||
)
|
||||
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
|
||||
.receiveOnMain(immediately: true)
|
||||
.sinkUntilComplete(
|
||||
receiveValue: { [weak self] imageData, hasData in
|
||||
|
|
|
@ -430,6 +430,13 @@ public final class FullConversationCell: UITableViewCell, SwipeActionOptimisticC
|
|||
typingIndicatorView.startAnimation()
|
||||
}
|
||||
else {
|
||||
displayNameLabel.themeTextColor = {
|
||||
guard cellViewModel.interactionVariant != .infoClosedGroupCurrentUserLeaving else {
|
||||
return .textSecondary
|
||||
}
|
||||
|
||||
return .textPrimary
|
||||
}()
|
||||
typingIndicatorView.isHidden = true
|
||||
typingIndicatorView.stopAnimation()
|
||||
|
||||
|
@ -437,8 +444,6 @@ public final class FullConversationCell: UITableViewCell, SwipeActionOptimisticC
|
|||
if cellViewModel.interactionVariant == .infoClosedGroupCurrentUserLeaving {
|
||||
guard let textColor: UIColor = theme.color(for: .textSecondary) else { return }
|
||||
|
||||
self?.displayNameLabel.themeTextColor = .textSecondary
|
||||
|
||||
snippetLabel?.attributedText = self?.getSnippet(
|
||||
cellViewModel: cellViewModel,
|
||||
textColor: textColor
|
||||
|
@ -446,8 +451,6 @@ public final class FullConversationCell: UITableViewCell, SwipeActionOptimisticC
|
|||
} else if cellViewModel.interactionVariant == .infoClosedGroupCurrentUserErrorLeaving {
|
||||
guard let textColor: UIColor = theme.color(for: .danger) else { return }
|
||||
|
||||
self?.displayNameLabel.themeTextColor = .textPrimary
|
||||
|
||||
snippetLabel?.attributedText = self?.getSnippet(
|
||||
cellViewModel: cellViewModel,
|
||||
textColor: textColor
|
||||
|
@ -455,8 +458,6 @@ public final class FullConversationCell: UITableViewCell, SwipeActionOptimisticC
|
|||
} else {
|
||||
guard let textColor: UIColor = theme.color(for: .textPrimary) else { return }
|
||||
|
||||
self?.displayNameLabel.themeTextColor = .textPrimary
|
||||
|
||||
snippetLabel?.attributedText = self?.getSnippet(
|
||||
cellViewModel: cellViewModel,
|
||||
textColor: textColor
|
||||
|
|
|
@ -286,12 +286,12 @@ public extension UIContextualAction {
|
|||
let threadIsMessageRequest: Bool = (threadViewModel.threadIsMessageRequest == true)
|
||||
let contactChanges: [ConfigColumnAssignment] = [
|
||||
Contact.Columns.isBlocked.set(to: !threadIsBlocked),
|
||||
(!threadIsMessageRequest ? nil : Contact.Columns.isApproved.set(to: false)),
|
||||
|
||||
// Note: We set this to true so the current user will be able to send a
|
||||
// message to the person who originally sent them the message request in
|
||||
// the future if they unblock them
|
||||
(!threadIsMessageRequest ? nil : Contact.Columns.didApproveMe.set(to: true))
|
||||
/// **Note:** We set `didApproveMe` to `true` so the current user will be able to send a
|
||||
/// message to the person who originally sent them the message request in the future if they
|
||||
/// unblock them
|
||||
(!threadIsMessageRequest ? nil : Contact.Columns.didApproveMe.set(to: true)),
|
||||
(!threadIsMessageRequest ? nil : Contact.Columns.isApproved.set(to: false))
|
||||
].compactMap { $0 }
|
||||
|
||||
let performBlock: (UIViewController?) -> () = { viewController in
|
||||
|
@ -305,7 +305,7 @@ public extension UIContextualAction {
|
|||
// Delay the change to give the cell "unswipe" animation some time to complete
|
||||
DispatchQueue.global(qos: .default).asyncAfter(deadline: .now() + unswipeAnimationDelay) {
|
||||
Storage.shared
|
||||
.writePublisher(receiveOn: DispatchQueue.global(qos: .userInitiated)) { db in
|
||||
.writePublisher { db in
|
||||
// Create the contact if it doesn't exist
|
||||
try Contact
|
||||
.fetchOrCreate(db, id: threadViewModel.threadId)
|
||||
|
@ -325,6 +325,7 @@ public extension UIContextualAction {
|
|||
)
|
||||
}
|
||||
}
|
||||
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
|
||||
.sinkUntilComplete()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -141,14 +141,7 @@ public final class WebRTCSession : NSObject, RTCPeerConnectionDelegate {
|
|||
interactionId: interactionId
|
||||
)
|
||||
)
|
||||
.handleEvents(
|
||||
receiveCompletion: { result in
|
||||
switch result {
|
||||
case .failure: break
|
||||
case .finished: SNLog("[Calls] Pre-offer message has been sent.")
|
||||
}
|
||||
}
|
||||
)
|
||||
.handleEvents(receiveOutput: { _ in SNLog("[Calls] Pre-offer message has been sent.") })
|
||||
.eraseToAnyPublisher()
|
||||
}
|
||||
|
||||
|
@ -186,7 +179,7 @@ public final class WebRTCSession : NSObject, RTCPeerConnectionDelegate {
|
|||
}
|
||||
|
||||
Storage.shared
|
||||
.writePublisher(receiveOn: DispatchQueue.global(qos: .userInitiated)) { db in
|
||||
.writePublisher { db in
|
||||
try MessageSender
|
||||
.preparedSendData(
|
||||
db,
|
||||
|
@ -225,14 +218,12 @@ public final class WebRTCSession : NSObject, RTCPeerConnectionDelegate {
|
|||
let mediaConstraints: RTCMediaConstraints = mediaConstraints(false)
|
||||
|
||||
return Storage.shared
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.global(qos: .userInitiated)) { db -> AnyPublisher<SessionThread, Error> in
|
||||
.readPublisher { db -> SessionThread in
|
||||
guard let thread: SessionThread = try? SessionThread.fetchOne(db, id: sessionId) else {
|
||||
throw WebRTCSessionError.noThread
|
||||
}
|
||||
|
||||
return Just(thread)
|
||||
.setFailureType(to: Error.self)
|
||||
.eraseToAnyPublisher()
|
||||
return thread
|
||||
}
|
||||
.flatMap { [weak self] thread in
|
||||
Future<Void, Error> { resolver in
|
||||
|
@ -254,7 +245,7 @@ public final class WebRTCSession : NSObject, RTCPeerConnectionDelegate {
|
|||
}
|
||||
|
||||
Storage.shared
|
||||
.writePublisher(receiveOn: DispatchQueue.global(qos: .userInitiated)) { db in
|
||||
.writePublisher { db in
|
||||
try MessageSender
|
||||
.preparedSendData(
|
||||
db,
|
||||
|
@ -305,36 +296,33 @@ public final class WebRTCSession : NSObject, RTCPeerConnectionDelegate {
|
|||
self.queuedICECandidates.removeAll()
|
||||
|
||||
Storage.shared
|
||||
.writePublisherFlatMap(receiveOn: DispatchQueue.global(qos: .userInitiated)) { db in
|
||||
.writePublisher { db in
|
||||
guard let thread: SessionThread = try SessionThread.fetchOne(db, id: contactSessionId) else {
|
||||
throw WebRTCSessionError.noThread
|
||||
}
|
||||
|
||||
SNLog("[Calls] Batch sending \(candidates.count) ICE candidates.")
|
||||
|
||||
return Just(
|
||||
try MessageSender
|
||||
.preparedSendData(
|
||||
db,
|
||||
message: CallMessage(
|
||||
uuid: uuid,
|
||||
kind: .iceCandidates(
|
||||
sdpMLineIndexes: candidates.map { UInt32($0.sdpMLineIndex) },
|
||||
sdpMids: candidates.map { $0.sdpMid! }
|
||||
),
|
||||
sdps: candidates.map { $0.sdp }
|
||||
return try MessageSender
|
||||
.preparedSendData(
|
||||
db,
|
||||
message: CallMessage(
|
||||
uuid: uuid,
|
||||
kind: .iceCandidates(
|
||||
sdpMLineIndexes: candidates.map { UInt32($0.sdpMLineIndex) },
|
||||
sdpMids: candidates.map { $0.sdpMid! }
|
||||
),
|
||||
to: try Message.Destination
|
||||
.from(db, threadId: thread.id, threadVariant: thread.variant),
|
||||
namespace: try Message.Destination
|
||||
.from(db, threadId: thread.id, threadVariant: thread.variant)
|
||||
.defaultNamespace,
|
||||
interactionId: nil
|
||||
)
|
||||
sdps: candidates.map { $0.sdp }
|
||||
),
|
||||
to: try Message.Destination
|
||||
.from(db, threadId: thread.id, threadVariant: thread.variant),
|
||||
namespace: try Message.Destination
|
||||
.from(db, threadId: thread.id, threadVariant: thread.variant)
|
||||
.defaultNamespace,
|
||||
interactionId: nil
|
||||
)
|
||||
.setFailureType(to: Error.self)
|
||||
.eraseToAnyPublisher()
|
||||
}
|
||||
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
|
||||
.flatMap { MessageSender.sendImmediate(preparedSendData: $0) }
|
||||
.sinkUntilComplete()
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ enum _013_SessionUtilChanges: Migration {
|
|||
static let target: TargetMigrations.Identifier = .messagingKit
|
||||
static let identifier: String = "SessionUtilChanges"
|
||||
static let needsConfigSync: Bool = true
|
||||
static let minExpectedRunDuration: TimeInterval = 0.1
|
||||
static let minExpectedRunDuration: TimeInterval = 0.4
|
||||
|
||||
static func migrate(_ db: Database) throws {
|
||||
// Add `markedAsUnread` to the thread table
|
||||
|
@ -139,6 +139,12 @@ enum _013_SessionUtilChanges: Migration {
|
|||
columns: [.threadId, .threadKeyPairHash]
|
||||
)
|
||||
|
||||
// Add an index for the 'Quote' table to speed up queries
|
||||
try db.createIndex(
|
||||
on: Quote.self,
|
||||
columns: [.timestampMs]
|
||||
)
|
||||
|
||||
// New table for storing the latest config dump for each type
|
||||
try db.create(table: ConfigDump.self) { t in
|
||||
t.column(.variant, .text)
|
||||
|
@ -163,13 +169,46 @@ enum _013_SessionUtilChanges: Migration {
|
|||
// If we don't have an ed25519 key then no need to create cached dump data
|
||||
let userPublicKey: String = getUserHexEncodedPublicKey(db)
|
||||
|
||||
// Remove any hidden threads to avoid syncing them (they are basically shadow threads created
|
||||
// by starting a conversation but not sending a message so can just be cleared out)
|
||||
try SessionThread
|
||||
/// Remove any hidden threads to avoid syncing them (they are basically shadow threads created by starting a conversation
|
||||
/// but not sending a message so can just be cleared out)
|
||||
///
|
||||
/// **Note:** Our settings defer foreign key checks to the end of the migration, unfortunately the `PRAGMA foreign_keys`
|
||||
/// setting is also a no-on during transactions so we can't enable it for the delete action, as a result we need to manually clean
|
||||
/// up any data associated with the threads we want to delete, at the time of this migration the following tables should cascade
|
||||
/// delete when a thread is deleted:
|
||||
/// - DisappearingMessagesConfiguration
|
||||
/// - ClosedGroup
|
||||
/// - GroupMember
|
||||
/// - Interaction
|
||||
/// - ThreadTypingIndicator
|
||||
/// - PendingReadReceipt
|
||||
let threadIdsToDelete: [String] = try SessionThread
|
||||
.filter(
|
||||
SessionThread.Columns.shouldBeVisible == false &&
|
||||
SessionThread.Columns.id != userPublicKey
|
||||
)
|
||||
.select(.id)
|
||||
.asRequest(of: String.self)
|
||||
.fetchAll(db)
|
||||
try SessionThread
|
||||
.deleteAll(db, ids: threadIdsToDelete)
|
||||
try DisappearingMessagesConfiguration
|
||||
.filter(threadIdsToDelete.contains(DisappearingMessagesConfiguration.Columns.threadId))
|
||||
.deleteAll(db)
|
||||
try ClosedGroup
|
||||
.filter(threadIdsToDelete.contains(ClosedGroup.Columns.threadId))
|
||||
.deleteAll(db)
|
||||
try GroupMember
|
||||
.filter(threadIdsToDelete.contains(GroupMember.Columns.groupId))
|
||||
.deleteAll(db)
|
||||
try Interaction
|
||||
.filter(threadIdsToDelete.contains(Interaction.Columns.threadId))
|
||||
.deleteAll(db)
|
||||
try ThreadTypingIndicator
|
||||
.filter(threadIdsToDelete.contains(ThreadTypingIndicator.Columns.threadId))
|
||||
.deleteAll(db)
|
||||
try PendingReadReceipt
|
||||
.filter(threadIdsToDelete.contains(PendingReadReceipt.Columns.threadId))
|
||||
.deleteAll(db)
|
||||
|
||||
/// There was previously a bug which allowed users to fully delete the 'Note to Self' conversation but we don't want that, so
|
||||
|
|
|
@ -12,7 +12,7 @@ enum _014_GenerateInitialUserConfigDumps: Migration {
|
|||
static let target: TargetMigrations.Identifier = .messagingKit
|
||||
static let identifier: String = "GenerateInitialUserConfigDumps"
|
||||
static let needsConfigSync: Bool = true
|
||||
static let minExpectedRunDuration: TimeInterval = 0.1 // TODO: Need to test this
|
||||
static let minExpectedRunDuration: TimeInterval = 4.0
|
||||
|
||||
static func migrate(_ db: Database) throws {
|
||||
// If we have no ed25519 key then there is no need to create cached dump data
|
||||
|
@ -40,8 +40,11 @@ enum _014_GenerateInitialUserConfigDumps: Migration {
|
|||
)
|
||||
|
||||
try SessionUtil.updateNoteToSelf(
|
||||
hidden: (allThreads[userPublicKey]?.shouldBeVisible == true),
|
||||
priority: Int32(allThreads[userPublicKey]?.pinnedPriority ?? 0),
|
||||
priority: {
|
||||
guard allThreads[userPublicKey]?.shouldBeVisible == true else { return SessionUtil.hiddenPriority }
|
||||
|
||||
return Int32(allThreads[userPublicKey]?.pinnedPriority ?? 0)
|
||||
}(),
|
||||
in: conf
|
||||
)
|
||||
|
||||
|
@ -78,16 +81,32 @@ enum _014_GenerateInitialUserConfigDumps: Migration {
|
|||
.including(optional: Contact.profile)
|
||||
.asRequest(of: ContactInfo.self)
|
||||
.fetchAll(db)
|
||||
let threadIdsNeedingContacts: [String] = validContactIds
|
||||
.filter { contactId in !contactsData.contains(where: { $0.contact.id == contactId }) }
|
||||
|
||||
try SessionUtil.upsert(
|
||||
contactData: contactsData
|
||||
.appending(
|
||||
contentsOf: threadIdsNeedingContacts
|
||||
.map { contactId in
|
||||
ContactInfo(
|
||||
contact: Contact.fetchOrCreate(db, id: contactId),
|
||||
profile: nil
|
||||
)
|
||||
}
|
||||
)
|
||||
.map { data in
|
||||
SessionUtil.SyncedContactInfo(
|
||||
id: data.contact.id,
|
||||
contact: data.contact,
|
||||
profile: data.profile,
|
||||
hidden: (allThreads[data.contact.id]?.shouldBeVisible == true),
|
||||
priority: Int32(allThreads[data.contact.id]?.pinnedPriority ?? 0)
|
||||
priority: {
|
||||
guard allThreads[data.contact.id]?.shouldBeVisible == true else {
|
||||
return SessionUtil.hiddenPriority
|
||||
}
|
||||
|
||||
return Int32(allThreads[data.contact.id]?.pinnedPriority ?? 0)
|
||||
}()
|
||||
)
|
||||
},
|
||||
in: conf
|
||||
|
|
|
@ -1022,10 +1022,7 @@ extension Attachment {
|
|||
}
|
||||
}
|
||||
|
||||
internal func upload(
|
||||
to destination: Attachment.Destination,
|
||||
queue: DispatchQueue
|
||||
) -> AnyPublisher<String?, Error> {
|
||||
internal func upload(to destination: Attachment.Destination) -> AnyPublisher<String?, Error> {
|
||||
// This can occur if an AttachmnetUploadJob was explicitly created for a message
|
||||
// dependant on the attachment being uploaded (in this case the attachment has
|
||||
// already been uploaded so just succeed)
|
||||
|
@ -1045,7 +1042,7 @@ extension Attachment {
|
|||
let attachmentId: String = self.id
|
||||
|
||||
return Storage.shared
|
||||
.writePublisherFlatMap(receiveOn: queue) { db -> AnyPublisher<(String?, Data?, Data?), Error> in
|
||||
.writePublisherFlatMap { db -> AnyPublisher<(String?, Data?, Data?), Error> in
|
||||
// If the attachment is a downloaded attachment, check if it came from
|
||||
// the server and if so just succeed immediately (no use re-uploading
|
||||
// an attachment that is already present on the server) - or if we want
|
||||
|
@ -1136,14 +1133,13 @@ extension Attachment {
|
|||
.eraseToAnyPublisher()
|
||||
}
|
||||
}
|
||||
.receive(on: queue)
|
||||
.flatMap { fileId, encryptionKey, digest -> AnyPublisher<String?, Error> in
|
||||
/// Save the final upload info
|
||||
///
|
||||
/// **Note:** We **MUST** use the `.with` function here to ensure the `isValid` flag is
|
||||
/// updated correctly
|
||||
Storage.shared
|
||||
.writePublisher(receiveOn: queue) { db in
|
||||
.writePublisher { db in
|
||||
try self
|
||||
.with(
|
||||
serverId: fileId,
|
||||
|
|
|
@ -59,7 +59,7 @@ public enum FileServerAPI {
|
|||
]
|
||||
)
|
||||
|
||||
return send(request, serverPublicKey: serverPublicKey, timeout: HTTP.timeout)
|
||||
return send(request, serverPublicKey: serverPublicKey, timeout: HTTP.defaultTimeout)
|
||||
.decoded(as: VersionResponse.self)
|
||||
.map { response in response.version }
|
||||
.eraseToAnyPublisher()
|
||||
|
|
|
@ -94,7 +94,7 @@ public enum AttachmentDownloadJob: JobExecutor {
|
|||
else { throw AttachmentDownloadError.invalidUrl }
|
||||
|
||||
return Storage.shared
|
||||
.readPublisher(receiveOn: queue) { db in try OpenGroup.fetchOne(db, id: threadId) }
|
||||
.readPublisher { db in try OpenGroup.fetchOne(db, id: threadId) }
|
||||
.flatMap { maybeOpenGroup -> AnyPublisher<Data, Error> in
|
||||
guard let openGroup: OpenGroup = maybeOpenGroup else {
|
||||
return FileServerAPI
|
||||
|
@ -106,7 +106,7 @@ public enum AttachmentDownloadJob: JobExecutor {
|
|||
}
|
||||
|
||||
return Storage.shared
|
||||
.readPublisherFlatMap(receiveOn: queue) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.downloadFile(
|
||||
db,
|
||||
|
@ -120,6 +120,7 @@ public enum AttachmentDownloadJob: JobExecutor {
|
|||
}
|
||||
.eraseToAnyPublisher()
|
||||
}
|
||||
.subscribe(on: queue)
|
||||
.receive(on: queue)
|
||||
.tryMap { data -> Void in
|
||||
// Store the encrypted data temporarily
|
||||
|
|
|
@ -70,40 +70,40 @@ public enum AttachmentUploadJob: JobExecutor {
|
|||
// Note: In the AttachmentUploadJob we intentionally don't provide our own db instance to prevent
|
||||
// reentrancy issues when the success/failure closures get called before the upload as the JobRunner
|
||||
// will attempt to update the state of the job immediately
|
||||
attachment.upload(
|
||||
to: (openGroup.map { .openGroup($0) } ?? .fileServer),
|
||||
queue: queue
|
||||
)
|
||||
.sinkUntilComplete(
|
||||
receiveCompletion: { result in
|
||||
switch result {
|
||||
case .failure(let error):
|
||||
// If this upload is related to sending a message then trigger the
|
||||
// 'handleFailedMessageSend' logic as we want to ensure the message
|
||||
// has the correct delivery status
|
||||
Storage.shared.read { db in
|
||||
guard
|
||||
let sendJob: Job = try Job.fetchOne(db, id: details.messageSendJobId),
|
||||
let sendJobDetails: Data = sendJob.details,
|
||||
let details: MessageSendJob.Details = try? JSONDecoder()
|
||||
.decode(MessageSendJob.Details.self, from: sendJobDetails)
|
||||
else { return }
|
||||
attachment
|
||||
.upload(to: (openGroup.map { .openGroup($0) } ?? .fileServer))
|
||||
.subscribe(on: queue)
|
||||
.receive(on: queue)
|
||||
.sinkUntilComplete(
|
||||
receiveCompletion: { result in
|
||||
switch result {
|
||||
case .failure(let error):
|
||||
// If this upload is related to sending a message then trigger the
|
||||
// 'handleFailedMessageSend' logic as we want to ensure the message
|
||||
// has the correct delivery status
|
||||
Storage.shared.read { db in
|
||||
guard
|
||||
let sendJob: Job = try Job.fetchOne(db, id: details.messageSendJobId),
|
||||
let sendJobDetails: Data = sendJob.details,
|
||||
let details: MessageSendJob.Details = try? JSONDecoder()
|
||||
.decode(MessageSendJob.Details.self, from: sendJobDetails)
|
||||
else { return }
|
||||
|
||||
MessageSender.handleFailedMessageSend(
|
||||
db,
|
||||
message: details.message,
|
||||
with: .other(error),
|
||||
interactionId: interactionId,
|
||||
isSyncMessage: details.isSyncMessage
|
||||
)
|
||||
}
|
||||
|
||||
MessageSender.handleFailedMessageSend(
|
||||
db,
|
||||
message: details.message,
|
||||
with: .other(error),
|
||||
interactionId: interactionId,
|
||||
isSyncMessage: details.isSyncMessage
|
||||
)
|
||||
}
|
||||
failure(job, error, false)
|
||||
|
||||
failure(job, error, false)
|
||||
|
||||
case .finished: success(job, false)
|
||||
case .finished: success(job, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ public enum ConfigurationSyncJob: JobExecutor {
|
|||
.asSet()
|
||||
|
||||
Storage.shared
|
||||
.readPublisher(receiveOn: queue) { db in
|
||||
.readPublisher { db in
|
||||
try pendingConfigChanges.map { change -> MessageSender.PreparedSendData in
|
||||
try MessageSender.preparedSendData(
|
||||
db,
|
||||
|
@ -89,6 +89,7 @@ public enum ConfigurationSyncJob: JobExecutor {
|
|||
allObsoleteHashes: Array(allObsoleteHashes)
|
||||
)
|
||||
}
|
||||
.subscribe(on: queue)
|
||||
.receive(on: queue)
|
||||
.map { (response: HTTP.BatchResponse) -> [ConfigDump] in
|
||||
/// The number of responses returned might not match the number of changes sent but they will be returned
|
||||
|
@ -233,7 +234,7 @@ public extension ConfigurationSyncJob {
|
|||
// FIXME: Remove this once `useSharedUtilForUserConfig` is permanent
|
||||
guard Features.useSharedUtilForUserConfig else {
|
||||
return Storage.shared
|
||||
.writePublisher(receiveOn: DispatchQueue.global(qos: .userInitiated)) { db -> MessageSender.PreparedSendData in
|
||||
.writePublisher { db -> MessageSender.PreparedSendData in
|
||||
// If we don't have a userKeyPair yet then there is no need to sync the configuration
|
||||
// as the user doesn't exist yet (this will get triggered on the first launch of a
|
||||
// fresh install due to the migrations getting run)
|
||||
|
|
|
@ -32,7 +32,7 @@ public enum GroupLeavingJob: JobExecutor {
|
|||
let destination: Message.Destination = .closedGroup(groupPublicKey: threadId)
|
||||
|
||||
Storage.shared
|
||||
.writePublisher(receiveOn: queue) { db in
|
||||
.writePublisher { db in
|
||||
guard (try? SessionThread.exists(db, id: threadId)) == true else {
|
||||
SNLog("Can't update nonexistent closed group.")
|
||||
throw MessageSenderError.noThread
|
||||
|
|
|
@ -163,7 +163,7 @@ public enum MessageSendJob: JobExecutor {
|
|||
/// **Note:** No need to upload attachments as part of this process as the above logic splits that out into it's own job
|
||||
/// so we shouldn't get here until attachments have already been uploaded
|
||||
Storage.shared
|
||||
.writePublisher(receiveOn: queue) { db in
|
||||
.writePublisher { db in
|
||||
try MessageSender.preparedSendData(
|
||||
db,
|
||||
message: details.message,
|
||||
|
|
|
@ -36,7 +36,7 @@ public enum SendReadReceiptsJob: JobExecutor {
|
|||
}
|
||||
|
||||
Storage.shared
|
||||
.writePublisher(receiveOn: queue) { db in
|
||||
.writePublisher { db in
|
||||
try MessageSender.preparedSendData(
|
||||
db,
|
||||
message: ReadReceipt(
|
||||
|
|
|
@ -37,8 +37,8 @@ internal extension SessionUtil {
|
|||
String: (
|
||||
contact: Contact,
|
||||
profile: Profile,
|
||||
shouldBeVisible: Bool,
|
||||
priority: Int32
|
||||
priority: Int32,
|
||||
created: TimeInterval
|
||||
)
|
||||
]
|
||||
|
||||
|
@ -77,8 +77,8 @@ internal extension SessionUtil {
|
|||
contactData[contactId] = (
|
||||
contactResult,
|
||||
profileResult,
|
||||
(contact.hidden == false),
|
||||
contact.priority
|
||||
contact.priority,
|
||||
TimeInterval(contact.created)
|
||||
)
|
||||
contacts_iterator_advance(contactIterator)
|
||||
}
|
||||
|
@ -165,8 +165,9 @@ internal extension SessionUtil {
|
|||
.asRequest(of: PriorityVisibilityInfo.self)
|
||||
.fetchOne(db)
|
||||
let threadExists: Bool = (threadInfo != nil)
|
||||
|
||||
switch (data.shouldBeVisible, threadExists) {
|
||||
let updatedShouldBeVisible: Bool = SessionUtil.shouldBeVisible(priority: data.priority)
|
||||
|
||||
switch (updatedShouldBeVisible, threadExists) {
|
||||
case (false, true):
|
||||
SessionUtil.kickFromConversationUIIfNeeded(removedThreadIds: [contact.id])
|
||||
|
||||
|
@ -183,14 +184,15 @@ internal extension SessionUtil {
|
|||
try SessionThread(
|
||||
id: contact.id,
|
||||
variant: .contact,
|
||||
creationDateTimestamp: data.created,
|
||||
shouldBeVisible: true,
|
||||
pinnedPriority: data.priority
|
||||
).save(db)
|
||||
|
||||
case (true, true):
|
||||
let changes: [ConfigColumnAssignment] = [
|
||||
(threadInfo?.shouldBeVisible == data.shouldBeVisible ? nil :
|
||||
SessionThread.Columns.shouldBeVisible.set(to: data.shouldBeVisible)
|
||||
(threadInfo?.shouldBeVisible == updatedShouldBeVisible ? nil :
|
||||
SessionThread.Columns.shouldBeVisible.set(to: updatedShouldBeVisible)
|
||||
),
|
||||
(threadInfo?.pinnedPriority == data.priority ? nil :
|
||||
SessionThread.Columns.pinnedPriority.set(to: data.priority)
|
||||
|
@ -208,7 +210,7 @@ internal extension SessionUtil {
|
|||
}
|
||||
}
|
||||
|
||||
// Delete any contact records which have been removed
|
||||
// Delete any contact/thread records which aren't in the config message
|
||||
let syncedContactIds: [String] = targetContactData
|
||||
.map { $0.key }
|
||||
.appending(userPublicKey)
|
||||
|
@ -217,17 +219,25 @@ internal extension SessionUtil {
|
|||
.select(.id)
|
||||
.asRequest(of: String.self)
|
||||
.fetchAll(db)
|
||||
let threadIdsToRemove: [String] = try SessionThread
|
||||
.filter(!syncedContactIds.contains(SessionThread.Columns.id))
|
||||
.filter(SessionThread.Columns.variant == SessionThread.Variant.contact)
|
||||
.filter(!SessionThread.Columns.id.like("\(SessionId.Prefix.blinded.rawValue)%"))
|
||||
.select(.id)
|
||||
.asRequest(of: String.self)
|
||||
.fetchAll(db)
|
||||
let combinedIds: [String] = contactIdsToRemove.appending(contentsOf: threadIdsToRemove)
|
||||
|
||||
if !contactIdsToRemove.isEmpty {
|
||||
SessionUtil.kickFromConversationUIIfNeeded(removedThreadIds: contactIdsToRemove)
|
||||
if !combinedIds.isEmpty {
|
||||
SessionUtil.kickFromConversationUIIfNeeded(removedThreadIds: combinedIds)
|
||||
|
||||
try Contact
|
||||
.filter(ids: contactIdsToRemove)
|
||||
.filter(ids: combinedIds)
|
||||
.deleteAll(db)
|
||||
|
||||
// Also need to remove any 'nickname' values since they are associated to contact data
|
||||
try Profile
|
||||
.filter(ids: contactIdsToRemove)
|
||||
.filter(ids: combinedIds)
|
||||
.updateAll(
|
||||
db,
|
||||
Profile.Columns.nickname.set(to: nil)
|
||||
|
@ -237,13 +247,13 @@ internal extension SessionUtil {
|
|||
try SessionThread
|
||||
.deleteOrLeave(
|
||||
db,
|
||||
threadIds: contactIdsToRemove,
|
||||
threadIds: combinedIds,
|
||||
threadVariant: .contact,
|
||||
groupLeaveType: .forced,
|
||||
calledFromConfigHandling: true
|
||||
)
|
||||
|
||||
try SessionUtil.remove(db, volatileContactIds: contactIdsToRemove)
|
||||
try SessionUtil.remove(db, volatileContactIds: combinedIds)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -268,13 +278,15 @@ internal extension SessionUtil {
|
|||
guard !targetContacts.isEmpty else { return }
|
||||
|
||||
// Update the name
|
||||
targetContacts
|
||||
try targetContacts
|
||||
.forEach { info in
|
||||
var sessionId: [CChar] = info.id.cArray
|
||||
var sessionId: [CChar] = info.id.cArray.nullTerminated()
|
||||
var contact: contacts_contact = contacts_contact()
|
||||
guard contacts_get_or_construct(conf, &contact, &sessionId) else {
|
||||
SNLog("Unable to upsert contact from Config Message")
|
||||
return
|
||||
/// It looks like there are some situations where this object might not get created correctly (and
|
||||
/// will throw due to the implicit unwrapping) as a result we put it in a guard and throw instead
|
||||
SNLog("Unable to upsert contact to SessionUtil: \(SessionUtil.lastError(conf))")
|
||||
throw SessionUtilError.getOrConstructFailedUnexpectedly
|
||||
}
|
||||
|
||||
// Assign all properties to match the updated contact (if there is one)
|
||||
|
@ -304,7 +316,10 @@ internal extension SessionUtil {
|
|||
// Download the profile picture if needed (this can be triggered within
|
||||
// database reads/writes so dispatch the download to a separate queue to
|
||||
// prevent blocking)
|
||||
if oldAvatarUrl != updatedProfile.profilePictureUrl || oldAvatarKey != updatedProfile.profileEncryptionKey {
|
||||
if
|
||||
oldAvatarUrl != (updatedProfile.profilePictureUrl ?? "") ||
|
||||
oldAvatarKey != (updatedProfile.profileEncryptionKey ?? Data(repeating: 0, count: ProfileManager.avatarAES256KeyByteLength))
|
||||
{
|
||||
DispatchQueue.global(qos: .background).async {
|
||||
ProfileManager.downloadAvatar(for: updatedProfile)
|
||||
}
|
||||
|
@ -315,7 +330,6 @@ internal extension SessionUtil {
|
|||
}
|
||||
|
||||
// Store the updated contact (can't be sure if we made any changes above)
|
||||
contact.hidden = (info.hidden ?? contact.hidden)
|
||||
contact.priority = (info.priority ?? contact.priority)
|
||||
contacts_set(conf, &contact)
|
||||
}
|
||||
|
@ -350,7 +364,7 @@ internal extension SessionUtil {
|
|||
// contacts are new/invalid, and if so, fetch any profile data we have for them
|
||||
let newContactIds: [String] = targetContacts
|
||||
.compactMap { contactData -> String? in
|
||||
var cContactId: [CChar] = contactData.id.cArray
|
||||
var cContactId: [CChar] = contactData.id.cArray.nullTerminated()
|
||||
var contact: contacts_contact = contacts_contact()
|
||||
|
||||
guard
|
||||
|
@ -454,8 +468,7 @@ public extension SessionUtil {
|
|||
.map {
|
||||
SyncedContactInfo(
|
||||
id: $0,
|
||||
hidden: true,
|
||||
priority: 0
|
||||
priority: SessionUtil.hiddenPriority
|
||||
)
|
||||
},
|
||||
in: conf
|
||||
|
@ -472,7 +485,7 @@ public extension SessionUtil {
|
|||
publicKey: getUserHexEncodedPublicKey(db)
|
||||
) { conf in
|
||||
contactIds.forEach { sessionId in
|
||||
var cSessionId: [CChar] = sessionId.cArray
|
||||
var cSessionId: [CChar] = sessionId.cArray.nullTerminated()
|
||||
|
||||
// Don't care if the contact doesn't exist
|
||||
contacts_erase(conf, &cSessionId)
|
||||
|
@ -488,20 +501,17 @@ extension SessionUtil {
|
|||
let id: String
|
||||
let contact: Contact?
|
||||
let profile: Profile?
|
||||
let hidden: Bool?
|
||||
let priority: Int32?
|
||||
|
||||
init(
|
||||
id: String,
|
||||
contact: Contact? = nil,
|
||||
profile: Profile? = nil,
|
||||
hidden: Bool? = nil,
|
||||
priority: Int32? = nil
|
||||
) {
|
||||
self.id = id
|
||||
self.contact = contact
|
||||
self.profile = profile
|
||||
self.hidden = hidden
|
||||
self.priority = priority
|
||||
}
|
||||
}
|
||||
|
|
|
@ -184,16 +184,18 @@ internal extension SessionUtil {
|
|||
}
|
||||
}
|
||||
|
||||
validChanges.forEach { threadInfo in
|
||||
var cThreadId: [CChar] = threadInfo.threadId.cArray
|
||||
try validChanges.forEach { threadInfo in
|
||||
var cThreadId: [CChar] = threadInfo.threadId.cArray.nullTerminated()
|
||||
|
||||
switch threadInfo.variant {
|
||||
case .contact:
|
||||
var oneToOne: convo_info_volatile_1to1 = convo_info_volatile_1to1()
|
||||
|
||||
guard convo_info_volatile_get_or_construct_1to1(conf, &oneToOne, &cThreadId) else {
|
||||
SNLog("Unable to create contact conversation when updating last read timestamp")
|
||||
return
|
||||
/// It looks like there are some situations where this object might not get created correctly (and
|
||||
/// will throw due to the implicit unwrapping) as a result we put it in a guard and throw instead
|
||||
SNLog("Unable to upsert contact volatile info to SessionUtil: \(SessionUtil.lastError(conf))")
|
||||
throw SessionUtilError.getOrConstructFailedUnexpectedly
|
||||
}
|
||||
|
||||
threadInfo.changes.forEach { change in
|
||||
|
@ -211,8 +213,10 @@ internal extension SessionUtil {
|
|||
var legacyGroup: convo_info_volatile_legacy_group = convo_info_volatile_legacy_group()
|
||||
|
||||
guard convo_info_volatile_get_or_construct_legacy_group(conf, &legacyGroup, &cThreadId) else {
|
||||
SNLog("Unable to create legacy group conversation when updating last read timestamp")
|
||||
return
|
||||
/// It looks like there are some situations where this object might not get created correctly (and
|
||||
/// will throw due to the implicit unwrapping) as a result we put it in a guard and throw instead
|
||||
SNLog("Unable to upsert legacy group volatile info to SessionUtil: \(SessionUtil.lastError(conf))")
|
||||
throw SessionUtilError.getOrConstructFailedUnexpectedly
|
||||
}
|
||||
|
||||
threadInfo.changes.forEach { change in
|
||||
|
@ -228,8 +232,8 @@ internal extension SessionUtil {
|
|||
|
||||
case .community:
|
||||
guard
|
||||
var cBaseUrl: [CChar] = threadInfo.openGroupUrlInfo?.server.cArray,
|
||||
var cRoomToken: [CChar] = threadInfo.openGroupUrlInfo?.roomToken.cArray,
|
||||
var cBaseUrl: [CChar] = threadInfo.openGroupUrlInfo?.server.cArray.nullTerminated(),
|
||||
var cRoomToken: [CChar] = threadInfo.openGroupUrlInfo?.roomToken.cArray.nullTerminated(),
|
||||
var cPubkey: [UInt8] = threadInfo.openGroupUrlInfo?.publicKey.bytes
|
||||
else {
|
||||
SNLog("Unable to create community conversation when updating last read timestamp due to missing URL info")
|
||||
|
@ -239,8 +243,10 @@ internal extension SessionUtil {
|
|||
var community: convo_info_volatile_community = convo_info_volatile_community()
|
||||
|
||||
guard convo_info_volatile_get_or_construct_community(conf, &community, &cBaseUrl, &cRoomToken, &cPubkey) else {
|
||||
SNLog("Unable to create legacy group conversation when updating last read timestamp")
|
||||
return
|
||||
/// It looks like there are some situations where this object might not get created correctly (and
|
||||
/// will throw due to the implicit unwrapping) as a result we put it in a guard and throw instead
|
||||
SNLog("Unable to upsert community volatile info to SessionUtil: \(SessionUtil.lastError(conf))")
|
||||
throw SessionUtilError.getOrConstructFailedUnexpectedly
|
||||
}
|
||||
|
||||
threadInfo.changes.forEach { change in
|
||||
|
@ -296,7 +302,7 @@ internal extension SessionUtil {
|
|||
publicKey: getUserHexEncodedPublicKey(db)
|
||||
) { conf in
|
||||
volatileContactIds.forEach { contactId in
|
||||
var cSessionId: [CChar] = contactId.cArray
|
||||
var cSessionId: [CChar] = contactId.cArray.nullTerminated()
|
||||
|
||||
// Don't care if the data doesn't exist
|
||||
convo_info_volatile_erase_1to1(conf, &cSessionId)
|
||||
|
@ -311,7 +317,7 @@ internal extension SessionUtil {
|
|||
publicKey: getUserHexEncodedPublicKey(db)
|
||||
) { conf in
|
||||
volatileLegacyGroupIds.forEach { legacyGroupId in
|
||||
var cLegacyGroupId: [CChar] = legacyGroupId.cArray
|
||||
var cLegacyGroupId: [CChar] = legacyGroupId.cArray.nullTerminated()
|
||||
|
||||
// Don't care if the data doesn't exist
|
||||
convo_info_volatile_erase_legacy_group(conf, &cLegacyGroupId)
|
||||
|
@ -326,8 +332,8 @@ internal extension SessionUtil {
|
|||
publicKey: getUserHexEncodedPublicKey(db)
|
||||
) { conf in
|
||||
volatileCommunityInfo.forEach { urlInfo in
|
||||
var cBaseUrl: [CChar] = urlInfo.server.cArray
|
||||
var cRoom: [CChar] = urlInfo.roomToken.cArray
|
||||
var cBaseUrl: [CChar] = urlInfo.server.cArray.nullTerminated()
|
||||
var cRoom: [CChar] = urlInfo.roomToken.cArray.nullTerminated()
|
||||
|
||||
// Don't care if the data doesn't exist
|
||||
convo_info_volatile_erase_community(conf, &cBaseUrl, &cRoom)
|
||||
|
@ -382,7 +388,7 @@ public extension SessionUtil {
|
|||
.map { conf in
|
||||
switch threadVariant {
|
||||
case .contact:
|
||||
var cThreadId: [CChar] = threadId.cArray
|
||||
var cThreadId: [CChar] = threadId.cArray.nullTerminated()
|
||||
var oneToOne: convo_info_volatile_1to1 = convo_info_volatile_1to1()
|
||||
guard convo_info_volatile_get_1to1(conf, &oneToOne, &cThreadId) else {
|
||||
return false
|
||||
|
@ -391,7 +397,7 @@ public extension SessionUtil {
|
|||
return (oneToOne.last_read > timestampMs)
|
||||
|
||||
case .legacyGroup:
|
||||
var cThreadId: [CChar] = threadId.cArray
|
||||
var cThreadId: [CChar] = threadId.cArray.nullTerminated()
|
||||
var legacyGroup: convo_info_volatile_legacy_group = convo_info_volatile_legacy_group()
|
||||
|
||||
guard convo_info_volatile_get_legacy_group(conf, &legacyGroup, &cThreadId) else {
|
||||
|
@ -403,8 +409,8 @@ public extension SessionUtil {
|
|||
case .community:
|
||||
guard let openGroup: OpenGroup = openGroup else { return false }
|
||||
|
||||
var cBaseUrl: [CChar] = openGroup.server.cArray
|
||||
var cRoomToken: [CChar] = openGroup.roomToken.cArray
|
||||
var cBaseUrl: [CChar] = openGroup.server.cArray.nullTerminated()
|
||||
var cRoomToken: [CChar] = openGroup.roomToken.cArray.nullTerminated()
|
||||
var convoCommunity: convo_info_volatile_community = convo_info_volatile_community()
|
||||
|
||||
guard convo_info_volatile_get_community(conf, &convoCommunity, &cBaseUrl, &cRoomToken) else {
|
||||
|
|
|
@ -28,6 +28,13 @@ internal extension SessionUtil {
|
|||
return !allColumnsThatTriggerConfigUpdate.isDisjoint(with: targetColumns)
|
||||
}
|
||||
|
||||
/// A negative `priority` value indicates hidden
|
||||
static let hiddenPriority: Int32 = -1
|
||||
|
||||
static func shouldBeVisible(priority: Int32) -> Bool {
|
||||
return (priority >= 0)
|
||||
}
|
||||
|
||||
static func performAndPushChange(
|
||||
_ db: Database,
|
||||
for variant: ConfigDump.Variant,
|
||||
|
@ -37,6 +44,10 @@ internal extension SessionUtil {
|
|||
// FIXME: Remove this once `useSharedUtilForUserConfig` is permanent
|
||||
guard Features.useSharedUtilForUserConfig else { return }
|
||||
|
||||
// If we haven't completed the required migrations then do nothing (assume that
|
||||
// this is called from a migration change and we won't miss a change)
|
||||
guard SessionUtil.requiredMigrationsCompleted(db) else { return }
|
||||
|
||||
// Since we are doing direct memory manipulation we are using an `Atomic`
|
||||
// type which has blocking access in it's `mutate` closure
|
||||
let needsPush: Bool
|
||||
|
@ -109,10 +120,13 @@ internal extension SessionUtil {
|
|||
publicKey: userPublicKey
|
||||
) { conf in
|
||||
try SessionUtil.updateNoteToSelf(
|
||||
hidden: !noteToSelf.shouldBeVisible,
|
||||
priority: noteToSelf.pinnedPriority
|
||||
.map { Int32($0 == 0 ? 0 : max($0, 1)) }
|
||||
.defaulting(to: 0),
|
||||
priority: {
|
||||
guard noteToSelf.shouldBeVisible else { return SessionUtil.hiddenPriority }
|
||||
|
||||
return noteToSelf.pinnedPriority
|
||||
.map { Int32($0 == 0 ? 0 : max($0, 1)) }
|
||||
.defaulting(to: 0)
|
||||
}(),
|
||||
in: conf
|
||||
)
|
||||
}
|
||||
|
@ -133,10 +147,13 @@ internal extension SessionUtil {
|
|||
.map { thread in
|
||||
SyncedContactInfo(
|
||||
id: thread.id,
|
||||
hidden: !thread.shouldBeVisible,
|
||||
priority: thread.pinnedPriority
|
||||
.map { Int32($0 == 0 ? 0 : max($0, 1)) }
|
||||
.defaulting(to: 0)
|
||||
priority: {
|
||||
guard thread.shouldBeVisible else { return SessionUtil.hiddenPriority }
|
||||
|
||||
return thread.pinnedPriority
|
||||
.map { Int32($0 == 0 ? 0 : max($0, 1)) }
|
||||
.defaulting(to: 0)
|
||||
}()
|
||||
)
|
||||
},
|
||||
in: conf
|
||||
|
@ -285,7 +302,7 @@ internal extension SessionUtil {
|
|||
// MARK: - External Outgoing Changes
|
||||
|
||||
public extension SessionUtil {
|
||||
static func conversationExistsInConfig(
|
||||
static func conversationVisibleInConfig(
|
||||
threadId: String,
|
||||
threadVariant: SessionThread.Variant
|
||||
) -> Bool {
|
||||
|
@ -303,7 +320,7 @@ public extension SessionUtil {
|
|||
.config(for: configVariant, publicKey: getUserHexEncodedPublicKey())
|
||||
.wrappedValue
|
||||
.map { conf in
|
||||
var cThreadId: [CChar] = threadId.cArray
|
||||
var cThreadId: [CChar] = threadId.cArray.nullTerminated()
|
||||
|
||||
switch threadVariant {
|
||||
case .contact:
|
||||
|
@ -311,7 +328,10 @@ public extension SessionUtil {
|
|||
|
||||
guard contacts_get(conf, &contact, &cThreadId) else { return false }
|
||||
|
||||
return !contact.hidden
|
||||
/// If the user opens a conversation with an existing contact but doesn't send them a message
|
||||
/// then the one-to-one conversation should remain hidden so we want to delete the `SessionThread`
|
||||
/// when leaving the conversation
|
||||
return SessionUtil.shouldBeVisible(priority: contact.priority)
|
||||
|
||||
case .community:
|
||||
let maybeUrlInfo: OpenGroupUrlInfo? = Storage.shared
|
||||
|
@ -320,15 +340,17 @@ public extension SessionUtil {
|
|||
|
||||
guard let urlInfo: OpenGroupUrlInfo = maybeUrlInfo else { return false }
|
||||
|
||||
var cBaseUrl: [CChar] = urlInfo.server.cArray
|
||||
var cRoom: [CChar] = urlInfo.roomToken.cArray
|
||||
var cBaseUrl: [CChar] = urlInfo.server.cArray.nullTerminated()
|
||||
var cRoom: [CChar] = urlInfo.roomToken.cArray.nullTerminated()
|
||||
var community: ugroups_community_info = ugroups_community_info()
|
||||
|
||||
/// Not handling the `hidden` behaviour for communities so just indicate the existence
|
||||
return user_groups_get_community(conf, &community, &cBaseUrl, &cRoom)
|
||||
|
||||
case .legacyGroup:
|
||||
let groupInfo: UnsafeMutablePointer<ugroups_legacy_group_info>? = user_groups_get_legacy_group(conf, &cThreadId)
|
||||
|
||||
/// Not handling the `hidden` behaviour for legacy groups so just indicate the existence
|
||||
if groupInfo != nil {
|
||||
ugroups_legacy_group_free(groupInfo)
|
||||
return true
|
||||
|
|
|
@ -106,7 +106,8 @@ internal extension SessionUtil {
|
|||
isHidden: false
|
||||
)
|
||||
},
|
||||
priority: legacyGroup.priority
|
||||
priority: legacyGroup.priority,
|
||||
joinedAt: legacyGroup.joined_at
|
||||
)
|
||||
)
|
||||
}
|
||||
|
@ -201,7 +202,8 @@ internal extension SessionUtil {
|
|||
let name: String = group.name,
|
||||
let lastKeyPair: ClosedGroupKeyPair = group.lastKeyPair,
|
||||
let members: [GroupMember] = group.groupMembers,
|
||||
let updatedAdmins: Set<GroupMember> = group.groupAdmins?.asSet()
|
||||
let updatedAdmins: Set<GroupMember> = group.groupAdmins?.asSet(),
|
||||
let joinedAt: Int64 = group.joinedAt
|
||||
else { return }
|
||||
|
||||
if !existingLegacyGroupIds.contains(group.id) {
|
||||
|
@ -220,18 +222,28 @@ internal extension SessionUtil {
|
|||
.map { $0.profileId },
|
||||
admins: updatedAdmins.map { $0.profileId },
|
||||
expirationTimer: UInt32(group.disappearingConfig?.durationSeconds ?? 0),
|
||||
messageSentTimestamp: UInt64(latestConfigUpdateSentTimestamp * 1000),
|
||||
formationTimestampMs: UInt64((group.joinedAt ?? Int64(latestConfigUpdateSentTimestamp)) * 1000),
|
||||
calledFromConfigHandling: true
|
||||
)
|
||||
}
|
||||
else {
|
||||
// Otherwise update the existing group
|
||||
if existingLegacyGroups[group.id]?.name != name {
|
||||
let groupChanges: [ConfigColumnAssignment] = [
|
||||
(existingLegacyGroups[group.id]?.name == name ? nil :
|
||||
ClosedGroup.Columns.name.set(to: name)
|
||||
),
|
||||
(existingLegacyGroups[group.id]?.formationTimestamp == TimeInterval(joinedAt) ? nil :
|
||||
ClosedGroup.Columns.formationTimestamp.set(to: TimeInterval(joinedAt))
|
||||
)
|
||||
].compactMap { $0 }
|
||||
|
||||
// Apply any group changes
|
||||
if !groupChanges.isEmpty {
|
||||
_ = try? ClosedGroup
|
||||
.filter(id: group.id)
|
||||
.updateAll( // Handling a config update so don't use `updateAllAndConfig`
|
||||
db,
|
||||
ClosedGroup.Columns.name.set(to: name)
|
||||
groupChanges
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -371,10 +383,15 @@ internal extension SessionUtil {
|
|||
guard conf != nil else { throw SessionUtilError.nilConfigObject }
|
||||
guard !legacyGroups.isEmpty else { return }
|
||||
|
||||
legacyGroups
|
||||
try legacyGroups
|
||||
.forEach { legacyGroup in
|
||||
var cGroupId: [CChar] = legacyGroup.id.cArray
|
||||
let userGroup: UnsafeMutablePointer<ugroups_legacy_group_info> = user_groups_get_or_construct_legacy_group(conf, &cGroupId)
|
||||
var cGroupId: [CChar] = legacyGroup.id.cArray.nullTerminated()
|
||||
guard let userGroup: UnsafeMutablePointer<ugroups_legacy_group_info> = user_groups_get_or_construct_legacy_group(conf, &cGroupId) else {
|
||||
/// It looks like there are some situations where this object might not get created correctly (and
|
||||
/// will throw due to the implicit unwrapping) as a result we put it in a guard and throw instead
|
||||
SNLog("Unable to upsert legacy group conversation to SessionUtil: \(SessionUtil.lastError(conf))")
|
||||
throw SessionUtilError.getOrConstructFailedUnexpectedly
|
||||
}
|
||||
|
||||
// Assign all properties to match the updated group (if there is one)
|
||||
if let updatedName: String = legacyGroup.name {
|
||||
|
@ -424,12 +441,12 @@ internal extension SessionUtil {
|
|||
let membersIdsToRemove: Set<String> = existingMemberIds.subtracting(memberIds)
|
||||
|
||||
membersIdsToAdd.forEach { memberId in
|
||||
var cProfileId: [CChar] = memberId.cArray
|
||||
var cProfileId: [CChar] = memberId.cArray.nullTerminated()
|
||||
ugroups_legacy_member_add(userGroup, &cProfileId, false)
|
||||
}
|
||||
|
||||
membersIdsToRemove.forEach { memberId in
|
||||
var cProfileId: [CChar] = memberId.cArray
|
||||
var cProfileId: [CChar] = memberId.cArray.nullTerminated()
|
||||
ugroups_legacy_member_remove(userGroup, &cProfileId)
|
||||
}
|
||||
}
|
||||
|
@ -444,16 +461,20 @@ internal extension SessionUtil {
|
|||
let adminIdsToRemove: Set<String> = existingAdminIds.subtracting(adminIds)
|
||||
|
||||
adminIdsToAdd.forEach { adminId in
|
||||
var cProfileId: [CChar] = adminId.cArray
|
||||
var cProfileId: [CChar] = adminId.cArray.nullTerminated()
|
||||
ugroups_legacy_member_add(userGroup, &cProfileId, true)
|
||||
}
|
||||
|
||||
adminIdsToRemove.forEach { adminId in
|
||||
var cProfileId: [CChar] = adminId.cArray
|
||||
var cProfileId: [CChar] = adminId.cArray.nullTerminated()
|
||||
ugroups_legacy_member_remove(userGroup, &cProfileId)
|
||||
}
|
||||
}
|
||||
|
||||
if let joinedAt: Int64 = legacyGroup.joinedAt {
|
||||
userGroup.pointee.joined_at = joinedAt
|
||||
}
|
||||
|
||||
// Store the updated group (can't be sure if we made any changes above)
|
||||
userGroup.pointee.priority = (legacyGroup.priority ?? userGroup.pointee.priority)
|
||||
|
||||
|
@ -469,16 +490,18 @@ internal extension SessionUtil {
|
|||
guard conf != nil else { throw SessionUtilError.nilConfigObject }
|
||||
guard !communities.isEmpty else { return }
|
||||
|
||||
communities
|
||||
try communities
|
||||
.forEach { community in
|
||||
var cBaseUrl: [CChar] = community.urlInfo.server.cArray
|
||||
var cRoom: [CChar] = community.urlInfo.roomToken.cArray
|
||||
var cBaseUrl: [CChar] = community.urlInfo.server.cArray.nullTerminated()
|
||||
var cRoom: [CChar] = community.urlInfo.roomToken.cArray.nullTerminated()
|
||||
var cPubkey: [UInt8] = Data(hex: community.urlInfo.publicKey).cArray
|
||||
var userCommunity: ugroups_community_info = ugroups_community_info()
|
||||
|
||||
guard user_groups_get_or_construct_community(conf, &userCommunity, &cBaseUrl, &cRoom, &cPubkey) else {
|
||||
SNLog("Unable to upsert community conversation to Config Message")
|
||||
return
|
||||
/// It looks like there are some situations where this object might not get created correctly (and
|
||||
/// will throw due to the implicit unwrapping) as a result we put it in a guard and throw instead
|
||||
SNLog("Unable to upsert community conversation to SessionUtil: \(SessionUtil.lastError(conf))")
|
||||
throw SessionUtilError.getOrConstructFailedUnexpectedly
|
||||
}
|
||||
|
||||
userCommunity.priority = (community.priority ?? userCommunity.priority)
|
||||
|
@ -526,8 +549,8 @@ public extension SessionUtil {
|
|||
for: .userGroups,
|
||||
publicKey: getUserHexEncodedPublicKey(db)
|
||||
) { conf in
|
||||
var cBaseUrl: [CChar] = server.cArray
|
||||
var cRoom: [CChar] = roomToken.cArray
|
||||
var cBaseUrl: [CChar] = server.cArray.nullTerminated()
|
||||
var cRoom: [CChar] = roomToken.cArray.nullTerminated()
|
||||
|
||||
// Don't care if the community doesn't exist
|
||||
user_groups_erase_community(conf, &cBaseUrl, &cRoom)
|
||||
|
@ -567,7 +590,7 @@ public extension SessionUtil {
|
|||
) { conf in
|
||||
guard conf != nil else { throw SessionUtilError.nilConfigObject }
|
||||
|
||||
var cGroupId: [CChar] = groupPublicKey.cArray
|
||||
var cGroupId: [CChar] = groupPublicKey.cArray.nullTerminated()
|
||||
let userGroup: UnsafeMutablePointer<ugroups_legacy_group_info>? = user_groups_get_legacy_group(conf, &cGroupId)
|
||||
|
||||
// Need to make sure the group doesn't already exist (otherwise we will end up overriding the
|
||||
|
@ -670,7 +693,7 @@ public extension SessionUtil {
|
|||
publicKey: getUserHexEncodedPublicKey(db)
|
||||
) { conf in
|
||||
legacyGroupIds.forEach { threadId in
|
||||
var cGroupId: [CChar] = threadId.cArray
|
||||
var cGroupId: [CChar] = threadId.cArray.nullTerminated()
|
||||
|
||||
// Don't care if the group doesn't exist
|
||||
user_groups_erase_legacy_group(conf, &cGroupId)
|
||||
|
@ -692,6 +715,13 @@ public extension SessionUtil {
|
|||
|
||||
extension SessionUtil {
|
||||
struct LegacyGroupInfo: Decodable, FetchableRecord, ColumnExpressible {
|
||||
private static let threadIdKey: SQL = SQL(stringLiteral: CodingKeys.threadId.stringValue)
|
||||
private static let nameKey: SQL = SQL(stringLiteral: CodingKeys.name.stringValue)
|
||||
private static let lastKeyPairKey: SQL = SQL(stringLiteral: CodingKeys.lastKeyPair.stringValue)
|
||||
private static let disappearingConfigKey: SQL = SQL(stringLiteral: CodingKeys.disappearingConfig.stringValue)
|
||||
private static let priorityKey: SQL = SQL(stringLiteral: CodingKeys.priority.stringValue)
|
||||
private static let joinedAtKey: SQL = SQL(stringLiteral: CodingKeys.joinedAt.stringValue)
|
||||
|
||||
typealias Columns = CodingKeys
|
||||
enum CodingKeys: String, CodingKey, ColumnExpression, CaseIterable {
|
||||
case threadId
|
||||
|
@ -701,6 +731,7 @@ extension SessionUtil {
|
|||
case groupMembers
|
||||
case groupAdmins
|
||||
case priority
|
||||
case joinedAt = "formationTimestamp"
|
||||
}
|
||||
|
||||
var id: String { threadId }
|
||||
|
@ -712,6 +743,7 @@ extension SessionUtil {
|
|||
let groupMembers: [GroupMember]?
|
||||
let groupAdmins: [GroupMember]?
|
||||
let priority: Int32?
|
||||
let joinedAt: Int64?
|
||||
|
||||
init(
|
||||
id: String,
|
||||
|
@ -720,7 +752,8 @@ extension SessionUtil {
|
|||
disappearingConfig: DisappearingMessagesConfiguration? = nil,
|
||||
groupMembers: [GroupMember]? = nil,
|
||||
groupAdmins: [GroupMember]? = nil,
|
||||
priority: Int32? = nil
|
||||
priority: Int32? = nil,
|
||||
joinedAt: Int64? = nil
|
||||
) {
|
||||
self.threadId = id
|
||||
self.name = name
|
||||
|
@ -729,36 +762,87 @@ extension SessionUtil {
|
|||
self.groupMembers = groupMembers
|
||||
self.groupAdmins = groupAdmins
|
||||
self.priority = priority
|
||||
self.joinedAt = joinedAt
|
||||
}
|
||||
|
||||
static func fetchAll(_ db: Database) throws -> [LegacyGroupInfo] {
|
||||
return try ClosedGroup
|
||||
.filter(ClosedGroup.Columns.threadId.like("\(SessionId.Prefix.standard.rawValue)%"))
|
||||
.including(
|
||||
required: ClosedGroup.keyPairs
|
||||
.order(ClosedGroupKeyPair.Columns.receivedTimestamp.desc)
|
||||
.forKey(Columns.lastKeyPair.name)
|
||||
)
|
||||
.including(
|
||||
all: ClosedGroup.members
|
||||
.filter([GroupMember.Role.standard, GroupMember.Role.zombie]
|
||||
.contains(GroupMember.Columns.role))
|
||||
.forKey(Columns.groupMembers.name)
|
||||
)
|
||||
.including(
|
||||
all: ClosedGroup.members
|
||||
.filter(GroupMember.Columns.role == GroupMember.Role.admin)
|
||||
.forKey(Columns.groupAdmins.name)
|
||||
)
|
||||
.joining(
|
||||
optional: ClosedGroup.thread
|
||||
.including(
|
||||
optional: SessionThread.disappearingMessagesConfiguration
|
||||
.forKey(Columns.disappearingConfig.name)
|
||||
)
|
||||
)
|
||||
.asRequest(of: LegacyGroupInfo.self)
|
||||
let closedGroup: TypedTableAlias<ClosedGroup> = TypedTableAlias()
|
||||
let thread: TypedTableAlias<SessionThread> = TypedTableAlias()
|
||||
let keyPair: TypedTableAlias<ClosedGroupKeyPair> = TypedTableAlias()
|
||||
|
||||
let prefixLiteral: SQL = SQL(stringLiteral: "\(SessionId.Prefix.standard.rawValue)%")
|
||||
let keyPairThreadIdColumnLiteral: SQL = SQL(stringLiteral: ClosedGroupKeyPair.Columns.threadId.name)
|
||||
let receivedTimestampColumnLiteral: SQL = SQL(stringLiteral: ClosedGroupKeyPair.Columns.receivedTimestamp.name)
|
||||
let threadIdColumnLiteral: SQL = SQL(stringLiteral: DisappearingMessagesConfiguration.Columns.threadId.name)
|
||||
|
||||
/// **Note:** The `numColumnsBeforeTypes` value **MUST** match the number of fields before
|
||||
/// the `LegacyGroupInfo.lastKeyPairKey` entry below otherwise the query will fail to
|
||||
/// parse and might throw
|
||||
///
|
||||
/// Explicitly set default values for the fields ignored for search results
|
||||
let numColumnsBeforeTypes: Int = 4
|
||||
|
||||
let request: SQLRequest<LegacyGroupInfo> = """
|
||||
SELECT
|
||||
\(closedGroup[.threadId]) AS \(LegacyGroupInfo.threadIdKey),
|
||||
\(closedGroup[.name]) AS \(LegacyGroupInfo.nameKey),
|
||||
\(closedGroup[.formationTimestamp]) AS \(LegacyGroupInfo.joinedAtKey),
|
||||
\(thread[.pinnedPriority]) AS \(LegacyGroupInfo.priorityKey),
|
||||
\(LegacyGroupInfo.lastKeyPairKey).*,
|
||||
\(LegacyGroupInfo.disappearingConfigKey).*
|
||||
|
||||
FROM \(ClosedGroup.self)
|
||||
JOIN \(SessionThread.self) ON \(thread[.id]) = \(closedGroup[.threadId])
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
\(keyPair[.threadId]),
|
||||
\(keyPair[.publicKey]),
|
||||
\(keyPair[.secretKey]),
|
||||
MAX(\(keyPair[.receivedTimestamp])) AS \(receivedTimestampColumnLiteral),
|
||||
\(keyPair[.threadKeyPairHash])
|
||||
FROM \(ClosedGroupKeyPair.self)
|
||||
GROUP BY \(keyPair[.threadId])
|
||||
) AS \(LegacyGroupInfo.lastKeyPairKey) ON \(LegacyGroupInfo.lastKeyPairKey).\(keyPairThreadIdColumnLiteral) = \(closedGroup[.threadId])
|
||||
LEFT JOIN \(DisappearingMessagesConfiguration.self) AS \(LegacyGroupInfo.disappearingConfigKey) ON \(LegacyGroupInfo.disappearingConfigKey).\(threadIdColumnLiteral) = \(closedGroup[.threadId])
|
||||
|
||||
WHERE \(SQL("\(closedGroup[.threadId]) LIKE '\(prefixLiteral)'"))
|
||||
"""
|
||||
|
||||
let legacyGroupInfoNoMembers: [LegacyGroupInfo] = try request
|
||||
.adapted { db in
|
||||
let adapters = try splittingRowAdapters(columnCounts: [
|
||||
numColumnsBeforeTypes,
|
||||
ClosedGroupKeyPair.numberOfSelectedColumns(db),
|
||||
DisappearingMessagesConfiguration.numberOfSelectedColumns(db)
|
||||
])
|
||||
|
||||
return ScopeAdapter([
|
||||
CodingKeys.lastKeyPair.stringValue: adapters[1],
|
||||
CodingKeys.disappearingConfig.stringValue: adapters[2]
|
||||
])
|
||||
}
|
||||
.fetchAll(db)
|
||||
let legacyGroupIds: [String] = legacyGroupInfoNoMembers.map { $0.threadId }
|
||||
let allLegacyGroupMembers: [String: [GroupMember]] = try GroupMember
|
||||
.filter(legacyGroupIds.contains(GroupMember.Columns.groupId))
|
||||
.fetchAll(db)
|
||||
.grouped(by: \.groupId)
|
||||
|
||||
return legacyGroupInfoNoMembers
|
||||
.map { nonMemberGroup in
|
||||
LegacyGroupInfo(
|
||||
id: nonMemberGroup.id,
|
||||
name: nonMemberGroup.name,
|
||||
lastKeyPair: nonMemberGroup.lastKeyPair,
|
||||
disappearingConfig: nonMemberGroup.disappearingConfig,
|
||||
groupMembers: allLegacyGroupMembers[nonMemberGroup.id]?
|
||||
.filter { $0.role == .standard || $0.role == .zombie },
|
||||
groupAdmins: allLegacyGroupMembers[nonMemberGroup.id]?
|
||||
.filter { $0.role == .admin },
|
||||
priority: nonMemberGroup.priority,
|
||||
joinedAt: nonMemberGroup.joinedAt
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -61,13 +61,12 @@ internal extension SessionUtil {
|
|||
.asRequest(of: PriorityVisibilityInfo.self)
|
||||
.fetchOne(db)
|
||||
let targetPriority: Int32 = user_profile_get_nts_priority(conf)
|
||||
let targetHiddenState: Bool = user_profile_get_nts_hidden(conf)
|
||||
|
||||
// Create the 'Note to Self' thread if it doesn't exist
|
||||
if let threadInfo: PriorityVisibilityInfo = threadInfo {
|
||||
let threadChanges: [ConfigColumnAssignment] = [
|
||||
(threadInfo.shouldBeVisible == (targetHiddenState == false) ? nil :
|
||||
SessionThread.Columns.shouldBeVisible.set(to: (targetHiddenState == false))
|
||||
((threadInfo.shouldBeVisible == SessionUtil.shouldBeVisible(priority: targetPriority)) ? nil :
|
||||
SessionThread.Columns.shouldBeVisible.set(to: SessionUtil.shouldBeVisible(priority: targetPriority))
|
||||
),
|
||||
(threadInfo.pinnedPriority == targetPriority ? nil :
|
||||
SessionThread.Columns.pinnedPriority.set(to: targetPriority)
|
||||
|
@ -89,7 +88,7 @@ internal extension SessionUtil {
|
|||
db,
|
||||
id: userPublicKey,
|
||||
variant: .contact,
|
||||
shouldBeVisible: (targetHiddenState == false)
|
||||
shouldBeVisible: SessionUtil.shouldBeVisible(priority: targetPriority)
|
||||
)
|
||||
|
||||
try SessionThread
|
||||
|
@ -102,7 +101,7 @@ internal extension SessionUtil {
|
|||
// If the 'Note to Self' conversation is hidden then we should trigger the proper
|
||||
// `deleteOrLeave` behaviour (for 'Note to Self' this will leave the conversation
|
||||
// but remove the associated interactions)
|
||||
if targetHiddenState {
|
||||
if !SessionUtil.shouldBeVisible(priority: targetPriority) {
|
||||
try SessionThread
|
||||
.deleteOrLeave(
|
||||
db,
|
||||
|
@ -140,7 +139,7 @@ internal extension SessionUtil {
|
|||
guard conf != nil else { throw SessionUtilError.nilConfigObject }
|
||||
|
||||
// Update the name
|
||||
var updatedName: [CChar] = profile.name.cArray
|
||||
var updatedName: [CChar] = profile.name.cArray.nullTerminated()
|
||||
user_profile_set_name(conf, &updatedName)
|
||||
|
||||
// Either assign the updated profile pic, or sent a blank profile pic (to remove the current one)
|
||||
|
@ -151,18 +150,11 @@ internal extension SessionUtil {
|
|||
}
|
||||
|
||||
static func updateNoteToSelf(
|
||||
hidden: Bool? = nil,
|
||||
priority: Int32? = nil,
|
||||
priority: Int32,
|
||||
in conf: UnsafeMutablePointer<config_object>?
|
||||
) throws {
|
||||
guard conf != nil else { throw SessionUtilError.nilConfigObject }
|
||||
|
||||
if let hidden: Bool = hidden {
|
||||
user_profile_set_nts_hidden(conf, hidden)
|
||||
}
|
||||
|
||||
if let priority: Int32 = priority {
|
||||
user_profile_set_nts_priority(conf, priority)
|
||||
}
|
||||
user_profile_set_nts_priority(conf, priority)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -63,6 +63,22 @@ public enum SessionUtil {
|
|||
|
||||
public static var libSessionVersion: String { String(cString: LIBSESSION_UTIL_VERSION_STR) }
|
||||
|
||||
private static var hasCompletedRequiredMigrations: Bool = false
|
||||
|
||||
internal static func requiredMigrationsCompleted(_ db: Database) -> Bool {
|
||||
guard !hasCompletedRequiredMigrations else { return true }
|
||||
|
||||
return Storage.appliedMigrationIdentifiers(db)
|
||||
.isSuperset(of: [
|
||||
_013_SessionUtilChanges.identifier,
|
||||
_014_GenerateInitialUserConfigDumps.identifier
|
||||
])
|
||||
}
|
||||
|
||||
internal static func lastError(_ conf: UnsafeMutablePointer<config_object>?) -> String {
|
||||
return (conf?.pointee.last_error.map { String(cString: $0) } ?? "Unknown")
|
||||
}
|
||||
|
||||
// MARK: - Loading
|
||||
|
||||
public static func loadState(
|
||||
|
@ -268,7 +284,7 @@ public enum SessionUtil {
|
|||
guard conf != nil else { return nil }
|
||||
|
||||
// Mark the config as pushed
|
||||
var cHash: [CChar] = serverHash.cArray
|
||||
var cHash: [CChar] = serverHash.cArray.nullTerminated()
|
||||
config_confirm_pushed(conf, message.seqNo, &cHash)
|
||||
|
||||
// Update the result to indicate whether the config needs to be dumped
|
||||
|
@ -349,10 +365,7 @@ public enum SessionUtil {
|
|||
.mutate { conf in
|
||||
// Merge the messages
|
||||
var mergeHashes: [UnsafePointer<CChar>?] = next.value
|
||||
.map { message in
|
||||
(message.serverHash ?? "").cArray
|
||||
.nullTerminated()
|
||||
}
|
||||
.map { message in (message.serverHash ?? "").cArray.nullTerminated() }
|
||||
.unsafeCopy()
|
||||
var mergeData: [UnsafePointer<UInt8>?] = next.value
|
||||
.map { message -> [UInt8] in message.data.bytes }
|
||||
|
@ -441,7 +454,7 @@ fileprivate extension SessionUtil {
|
|||
|
||||
public extension SessionUtil {
|
||||
static func parseCommunity(url: String) -> (room: String, server: String, publicKey: String)? {
|
||||
var cFullUrl: [CChar] = url.cArray
|
||||
var cFullUrl: [CChar] = url.cArray.nullTerminated()
|
||||
var cBaseUrl: [CChar] = [CChar](repeating: 0, count: COMMUNITY_BASE_URL_MAX_LENGTH)
|
||||
var cRoom: [CChar] = [CChar](repeating: 0, count: COMMUNITY_ROOM_MAX_LENGTH)
|
||||
var cPubkey: [UInt8] = [UInt8](repeating: 0, count: OpenGroup.pubkeyByteLength)
|
||||
|
@ -464,8 +477,8 @@ public extension SessionUtil {
|
|||
}
|
||||
|
||||
static func communityUrlFor(server: String, roomToken: String, publicKey: String) -> String {
|
||||
var cBaseUrl: [CChar] = server.cArray
|
||||
var cRoom: [CChar] = roomToken.cArray
|
||||
var cBaseUrl: [CChar] = server.cArray.nullTerminated()
|
||||
var cRoom: [CChar] = roomToken.cArray.nullTerminated()
|
||||
var cPubkey: [UInt8] = Data(hex: publicKey).cArray
|
||||
var cFullUrl: [CChar] = [CChar](repeating: 0, count: COMMUNITY_FULL_URL_MAX_LENGTH)
|
||||
community_make_full_url(&cBaseUrl, &cRoom, &cPubkey, &cFullUrl)
|
||||
|
|
|
@ -6,4 +6,5 @@ public enum SessionUtilError: Error {
|
|||
case unableToCreateConfigObject
|
||||
case nilConfigObject
|
||||
case userDoesNotExist
|
||||
case getOrConstructFailedUnexpectedly
|
||||
}
|
||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -8,6 +8,7 @@ module SessionUtil {
|
|||
header "session/config/expiring.h"
|
||||
header "session/config/user_groups.h"
|
||||
header "session/config/convo_info_volatile.h"
|
||||
header "session/config/notify.h"
|
||||
header "session/config/user_profile.h"
|
||||
header "session/config/util.h"
|
||||
header "session/config/contacts.h"
|
||||
|
|
|
@ -16,8 +16,11 @@ typedef struct config_object {
|
|||
// Internal opaque object pointer; calling code should leave this alone.
|
||||
void* internals;
|
||||
// When an error occurs in the C API this string will be set to the specific error message. May
|
||||
// be NULL.
|
||||
// be empty.
|
||||
const char* last_error;
|
||||
|
||||
// Sometimes used as the backing buffer for `last_error`. Should not be touched externally.
|
||||
char _error_buf[256];
|
||||
} config_object;
|
||||
|
||||
// Common functions callable on any config instance:
|
||||
|
|
|
@ -6,6 +6,7 @@ extern "C" {
|
|||
|
||||
#include "base.h"
|
||||
#include "expiring.h"
|
||||
#include "notify.h"
|
||||
#include "profile_pic.h"
|
||||
#include "util.h"
|
||||
|
||||
|
@ -23,13 +24,16 @@ typedef struct contacts_contact {
|
|||
bool approved;
|
||||
bool approved_me;
|
||||
bool blocked;
|
||||
bool hidden;
|
||||
|
||||
int priority;
|
||||
CONVO_NOTIFY_MODE notifications;
|
||||
int64_t mute_until;
|
||||
|
||||
CONVO_EXPIRATION_MODE exp_mode;
|
||||
int exp_seconds;
|
||||
|
||||
int64_t created; // unix timestamp (seconds)
|
||||
|
||||
} contacts_contact;
|
||||
|
||||
/// Constructs a contacts config object and sets a pointer to it in `conf`.
|
||||
|
@ -62,7 +66,7 @@ int contacts_init(
|
|||
/// Fills `contact` with the contact info given a session ID (specified as a null-terminated hex
|
||||
/// string), if the contact exists, and returns true. If the contact does not exist then `contact`
|
||||
/// is left unchanged and false is returned.
|
||||
bool contacts_get(const config_object* conf, contacts_contact* contact, const char* session_id)
|
||||
bool contacts_get(config_object* conf, contacts_contact* contact, const char* session_id)
|
||||
__attribute__((warn_unused_result));
|
||||
|
||||
/// Same as the above except that when the contact does not exist, this sets all the contact fields
|
||||
|
@ -74,7 +78,7 @@ bool contacts_get(const config_object* conf, contacts_contact* contact, const ch
|
|||
/// This is the method that should usually be used to create or update a contact, followed by
|
||||
/// setting fields in the contact, and then giving it to contacts_set().
|
||||
bool contacts_get_or_construct(
|
||||
const config_object* conf, contacts_contact* contact, const char* session_id)
|
||||
config_object* conf, contacts_contact* contact, const char* session_id)
|
||||
__attribute__((warn_unused_result));
|
||||
|
||||
/// Adds or updates a contact from the given contact info struct.
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include "base.hpp"
|
||||
#include "expiring.hpp"
|
||||
#include "namespaces.hpp"
|
||||
#include "notify.hpp"
|
||||
#include "profile_pic.hpp"
|
||||
|
||||
extern "C" struct contacts_contact;
|
||||
|
@ -31,14 +32,18 @@ namespace session::config {
|
|||
/// a - 1 if approved, omitted otherwise (int)
|
||||
/// A - 1 if remote has approved me, omitted otherwise (int)
|
||||
/// b - 1 if contact is blocked, omitted otherwise
|
||||
/// h - 1 if the conversation with this contact is hidden, omitted if visible.
|
||||
/// + - the conversation priority, for pinned messages. Omitted means not pinned; otherwise an
|
||||
/// @ - notification setting (int). Omitted = use default setting; 1 = all; 2 = disabled.
|
||||
/// ! - mute timestamp: if this is set then notifications are to be muted until the given unix
|
||||
/// timestamp (seconds, not milliseconds).
|
||||
/// + - the conversation priority; -1 means hidden; omitted means not pinned; otherwise an
|
||||
/// integer value >0, where a higher priority means the conversation is meant to appear
|
||||
/// earlier in the pinned conversation list.
|
||||
/// e - Disappearing messages expiration type. Omitted if disappearing messages are not enabled
|
||||
/// for the conversation with this contact; 1 for delete-after-send, and 2 for
|
||||
/// delete-after-read.
|
||||
/// E - Disappearing message timer, in seconds. Omitted when `e` is omitted.
|
||||
/// j - Unix timestamp (seconds) when the contact was created ("j" to match user_groups
|
||||
/// equivalent "j"oined field). Omitted if 0.
|
||||
|
||||
/// Struct containing contact info.
|
||||
struct contact_info {
|
||||
|
@ -51,12 +56,17 @@ struct contact_info {
|
|||
bool approved = false;
|
||||
bool approved_me = false;
|
||||
bool blocked = false;
|
||||
bool hidden = false; // True if the conversation with this contact is not visible in the convo
|
||||
// list (typically because it has been deleted).
|
||||
int priority = 0; // If >0 then this message is pinned; higher values mean higher priority
|
||||
// (i.e. pinned earlier in the pinned list).
|
||||
int priority = 0; // If >0 then this message is pinned; higher values mean higher priority
|
||||
// (i.e. pinned earlier in the pinned list). If negative then this
|
||||
// conversation is hidden. Otherwise (0) this is a regular, unpinned
|
||||
// conversation.
|
||||
notify_mode notifications = notify_mode::defaulted;
|
||||
int64_t mute_until = 0; // If non-zero, disable notifications until the given unix timestamp
|
||||
// (overriding whatever the current `notifications` value is until the
|
||||
// timestamp expires).
|
||||
expiration_mode exp_mode = expiration_mode::none; // The expiry time; none if not expiring.
|
||||
std::chrono::seconds exp_timer{0}; // The expiration timer (in seconds)
|
||||
int64_t created = 0; // Unix timestamp when this contact was added
|
||||
|
||||
explicit contact_info(std::string sid);
|
||||
|
||||
|
@ -127,12 +137,13 @@ class Contacts : public ConfigBase {
|
|||
void set_approved(std::string_view session_id, bool approved);
|
||||
void set_approved_me(std::string_view session_id, bool approved_me);
|
||||
void set_blocked(std::string_view session_id, bool blocked);
|
||||
void set_hidden(std::string_view session_id, bool hidden);
|
||||
void set_priority(std::string_view session_id, int priority);
|
||||
void set_notifications(std::string_view session_id, notify_mode notifications);
|
||||
void set_expiry(
|
||||
std::string_view session_id,
|
||||
expiration_mode exp_mode,
|
||||
std::chrono::seconds expiration_timer = 0min);
|
||||
void set_created(std::string_view session_id, int64_t timestamp);
|
||||
|
||||
/// Removes a contact, if present. Returns true if it was found and removed, false otherwise.
|
||||
/// Note that this removes all fields related to a contact, even fields we do not know about.
|
||||
|
|
|
@ -61,34 +61,39 @@ int convo_info_volatile_init(
|
|||
|
||||
/// Fills `convo` with the conversation info given a session ID (specified as a null-terminated hex
|
||||
/// string), if the conversation exists, and returns true. If the conversation does not exist then
|
||||
/// `convo` is left unchanged and false is returned.
|
||||
/// `convo` is left unchanged and false is returned. If an error occurs, false is returned and
|
||||
/// `conf->last_error` will be set to non-NULL containing the error string (if no error occurs, such
|
||||
/// as in the case where the conversation merely doesn't exist, `last_error` will be set to NULL).
|
||||
bool convo_info_volatile_get_1to1(
|
||||
const config_object* conf, convo_info_volatile_1to1* convo, const char* session_id)
|
||||
config_object* conf, convo_info_volatile_1to1* convo, const char* session_id)
|
||||
__attribute__((warn_unused_result));
|
||||
|
||||
/// Same as the above except that when the conversation does not exist, this sets all the convo
|
||||
/// fields to defaults and loads it with the given session_id.
|
||||
///
|
||||
/// Returns true as long as it is given a valid session_id. A false return is considered an error,
|
||||
/// and means the session_id was not a valid session_id.
|
||||
/// and means the session_id was not a valid session_id. In such a case `conf->last_error` will be
|
||||
/// set to an error string.
|
||||
///
|
||||
/// This is the method that should usually be used to create or update a conversation, followed by
|
||||
/// setting fields in the convo, and then giving it to convo_info_volatile_set().
|
||||
bool convo_info_volatile_get_or_construct_1to1(
|
||||
const config_object* conf, convo_info_volatile_1to1* convo, const char* session_id)
|
||||
config_object* conf, convo_info_volatile_1to1* convo, const char* session_id)
|
||||
__attribute__((warn_unused_result));
|
||||
|
||||
/// community versions of the 1-to-1 functions:
|
||||
///
|
||||
/// Gets a community convo info. `base_url` and `room` are null-terminated c strings; pubkey is
|
||||
/// 32 bytes. base_url and room will always be lower-cased (if not already).
|
||||
///
|
||||
/// Error handling works the same as the 1-to-1 version.
|
||||
bool convo_info_volatile_get_community(
|
||||
const config_object* conf,
|
||||
config_object* conf,
|
||||
convo_info_volatile_community* comm,
|
||||
const char* base_url,
|
||||
const char* room) __attribute__((warn_unused_result));
|
||||
bool convo_info_volatile_get_or_construct_community(
|
||||
const config_object* conf,
|
||||
config_object* conf,
|
||||
convo_info_volatile_community* convo,
|
||||
const char* base_url,
|
||||
const char* room,
|
||||
|
@ -96,21 +101,23 @@ bool convo_info_volatile_get_or_construct_community(
|
|||
|
||||
/// Fills `convo` with the conversation info given a legacy group ID (specified as a null-terminated
|
||||
/// hex string), if the conversation exists, and returns true. If the conversation does not exist
|
||||
/// then `convo` is left unchanged and false is returned.
|
||||
/// then `convo` is left unchanged and false is returned. On error, false is returned and the error
|
||||
/// is set in conf->last_error (on non-error, last_error is cleared).
|
||||
bool convo_info_volatile_get_legacy_group(
|
||||
const config_object* conf, convo_info_volatile_legacy_group* convo, const char* id)
|
||||
config_object* conf, convo_info_volatile_legacy_group* convo, const char* id)
|
||||
__attribute__((warn_unused_result));
|
||||
|
||||
/// Same as the above except that when the conversation does not exist, this sets all the convo
|
||||
/// fields to defaults and loads it with the given id.
|
||||
///
|
||||
/// Returns true as long as it is given a valid legacy group id (i.e. same format as a session id).
|
||||
/// A false return is considered an error, and means the id was not a valid session id.
|
||||
/// A false return is considered an error, and means the id was not a valid session id; an error
|
||||
/// string will be set in `conf->last_error`.
|
||||
///
|
||||
/// This is the method that should usually be used to create or update a conversation, followed by
|
||||
/// setting fields in the convo, and then giving it to convo_info_volatile_set().
|
||||
bool convo_info_volatile_get_or_construct_legacy_group(
|
||||
const config_object* conf, convo_info_volatile_legacy_group* convo, const char* id)
|
||||
config_object* conf, convo_info_volatile_legacy_group* convo, const char* id)
|
||||
__attribute__((warn_unused_result));
|
||||
|
||||
/// Adds or updates a conversation from the given convo info
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
#pragma once
|
||||
|
||||
typedef enum CONVO_NOTIFY_MODE {
|
||||
CONVO_NOTIFY_DEFAULT = 0,
|
||||
CONVO_NOTIFY_ALL = 1,
|
||||
CONVO_NOTIFY_DISABLED = 2,
|
||||
CONVO_NOTIFY_MENTIONS_ONLY = 3,
|
||||
} CONVO_NOTIFY_MODE;
|
|
@ -0,0 +1,12 @@
|
|||
#pragma once
|
||||
|
||||
namespace session::config {
|
||||
|
||||
enum class notify_mode {
|
||||
defaulted = 0,
|
||||
all = 1,
|
||||
disabled = 2,
|
||||
mentions_only = 3, // Only for groups; for DMs this becomes `all`
|
||||
};
|
||||
|
||||
}
|
|
@ -5,6 +5,7 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
#include "base.h"
|
||||
#include "notify.h"
|
||||
#include "util.h"
|
||||
|
||||
// Maximum length of a group name, in bytes
|
||||
|
@ -26,9 +27,12 @@ typedef struct ugroups_legacy_group_info {
|
|||
// terminator).
|
||||
|
||||
int64_t disappearing_timer; // Minutes. 0 == disabled.
|
||||
bool hidden; // true if hidden from the convo list
|
||||
int priority; // pinned message priority; 0 = unpinned, larger means pinned higher (i.e. higher
|
||||
// priority conversations come first).
|
||||
int priority; // pinned message priority; 0 = unpinned, negative = hidden, positive = pinned
|
||||
// (with higher meaning pinned higher).
|
||||
int64_t joined_at; // unix timestamp when joined (or re-joined)
|
||||
CONVO_NOTIFY_MODE notifications; // When the user wants notifications
|
||||
int64_t mute_until; // Mute notifications until this timestamp (overrides `notifications`
|
||||
// setting until the timestamp)
|
||||
|
||||
// For members use the ugroups_legacy_group_members and associated calls.
|
||||
|
||||
|
@ -43,8 +47,12 @@ typedef struct ugroups_community_info {
|
|||
// info (that one is always forced lower-cased).
|
||||
unsigned char pubkey[32]; // 32 bytes (not terminated, can contain nulls)
|
||||
|
||||
int priority; // pinned message priority; 0 = unpinned, larger means pinned higher (i.e. higher
|
||||
// priority conversations come first).
|
||||
int priority; // pinned message priority; 0 = unpinned, negative = hidden, positive = pinned
|
||||
// (with higher meaning pinned higher).
|
||||
int64_t joined_at; // unix timestamp when joined (or re-joined)
|
||||
CONVO_NOTIFY_MODE notifications; // When the user wants notifications
|
||||
int64_t mute_until; // Mute notifications until this timestamp (overrides `notifications`
|
||||
// setting until the timestamp)
|
||||
} ugroups_community_info;
|
||||
|
||||
int user_groups_init(
|
||||
|
@ -59,9 +67,11 @@ int user_groups_init(
|
|||
/// normalized/lower-cased; room is case-insensitive for the lookup: note that this may well return
|
||||
/// a community info with a different room capitalization than the one provided to the call.
|
||||
///
|
||||
/// Returns true if the community was found and `comm` populated; false otherwise.
|
||||
/// Returns true if the community was found and `comm` populated; false otherwise. A false return
|
||||
/// can either be because it didn't exist (`conf->last_error` will be NULL) or because of some error
|
||||
/// (`last_error` will be set to an error string).
|
||||
bool user_groups_get_community(
|
||||
const config_object* conf,
|
||||
config_object* conf,
|
||||
ugroups_community_info* comm,
|
||||
const char* base_url,
|
||||
const char* room) __attribute__((warn_unused_result));
|
||||
|
@ -76,8 +86,10 @@ bool user_groups_get_community(
|
|||
///
|
||||
/// Note that this is all different from convo_info_volatile, which always forces the room token to
|
||||
/// lower-case (because it does not preserve the case).
|
||||
///
|
||||
/// Returns false (and sets `conf->last_error`) on error.
|
||||
bool user_groups_get_or_construct_community(
|
||||
const config_object* conf,
|
||||
config_object* conf,
|
||||
ugroups_community_info* comm,
|
||||
const char* base_url,
|
||||
const char* room,
|
||||
|
@ -85,11 +97,11 @@ bool user_groups_get_or_construct_community(
|
|||
|
||||
/// Returns a ugroups_legacy_group_info pointer containing the conversation info for a given legacy
|
||||
/// group ID (specified as a null-terminated hex string), if the conversation exists. If the
|
||||
/// conversation does not exist, returns NULL.
|
||||
/// conversation does not exist, returns NULL. Sets conf->last_error on error.
|
||||
///
|
||||
/// The returned pointer *must* be freed either by calling `ugroups_legacy_group_free()` when done
|
||||
/// with it, or by passing it to `user_groups_set_free_legacy_group()`.
|
||||
ugroups_legacy_group_info* user_groups_get_legacy_group(const config_object* conf, const char* id)
|
||||
ugroups_legacy_group_info* user_groups_get_legacy_group(config_object* conf, const char* id)
|
||||
__attribute__((warn_unused_result));
|
||||
|
||||
/// Same as the above except that when the conversation does not exist, this sets all the group
|
||||
|
@ -104,8 +116,10 @@ ugroups_legacy_group_info* user_groups_get_legacy_group(const config_object* con
|
|||
///
|
||||
/// This is the method that should usually be used to create or update a conversation, followed by
|
||||
/// setting fields in the group, and then giving it to user_groups_set().
|
||||
///
|
||||
/// On error, this returns NULL and sets `conf->last_error`.
|
||||
ugroups_legacy_group_info* user_groups_get_or_construct_legacy_group(
|
||||
const config_object* conf, const char* id) __attribute__((warn_unused_result));
|
||||
config_object* conf, const char* id) __attribute__((warn_unused_result));
|
||||
|
||||
/// Properly frees memory associated with a ugroups_legacy_group_info pointer (as returned by
|
||||
/// get_legacy_group/get_or_construct_legacy_group).
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include "base.hpp"
|
||||
#include "community.hpp"
|
||||
#include "namespaces.hpp"
|
||||
#include "notify.hpp"
|
||||
|
||||
extern "C" {
|
||||
struct ugroups_legacy_group_info;
|
||||
|
@ -29,11 +30,16 @@ namespace session::config {
|
|||
/// a - set of admin session ids (each 33 bytes).
|
||||
/// E - disappearing messages duration, in seconds, > 0. Omitted if disappearing messages is
|
||||
/// disabled. (Note that legacy groups only support expire after-read)
|
||||
/// h - hidden: 1 if the conversation has been removed from the conversation list, omitted if
|
||||
/// visible.
|
||||
/// + - the conversation priority, for pinned messages. Omitted means not pinned; otherwise an
|
||||
/// integer value >0, where a higher priority means the conversation is meant to appear
|
||||
/// earlier in the pinned conversation list.
|
||||
/// @ - notification setting (int). Omitted = use default setting; 1 = all, 2 = disabled, 3 =
|
||||
/// mentions-only.
|
||||
/// ! - mute timestamp: if set then don't show notifications for this contact's messages until
|
||||
/// this unix timestamp (i.e. overriding the current notification setting until the given
|
||||
/// time).
|
||||
/// + - the conversation priority, for pinned/hidden messages. Integer. Omitted means not
|
||||
/// pinned; -1 means hidden, and a positive value is a pinned message for which higher
|
||||
/// priority values means the conversation is meant to appear earlier in the pinned
|
||||
/// conversation list.
|
||||
/// j - joined at unix timestamp. Omitted if 0.
|
||||
///
|
||||
/// o - dict of communities (AKA open groups); within this dict (which deliberately has the same
|
||||
/// layout as convo_info_volatile) each key is the SOGS base URL (in canonical form), and value
|
||||
|
@ -45,14 +51,29 @@ namespace session::config {
|
|||
/// appropriate). For instance, a room name SudokuSolvers would be "sudokusolvers" in
|
||||
/// the outer key, with the capitalization variation in use ("SudokuSolvers") in this
|
||||
/// key. This key is *always* present (to keep the room dict non-empty).
|
||||
/// + - the conversation priority, for pinned messages. Omitted means not pinned; otherwise
|
||||
/// an integer value >0, where a higher priority means the conversation is meant to
|
||||
/// appear earlier in the pinned conversation list.
|
||||
/// @ - notification setting (see above).
|
||||
/// ! - mute timestamp (see above).
|
||||
/// + - the conversation priority, for pinned messages. Omitted means not pinned; -1 means
|
||||
/// hidden; otherwise an integer value >0, where a higher priority means the
|
||||
/// conversation is meant to appear earlier in the pinned conversation list.
|
||||
/// j - joined at unix timestamp. Omitted if 0.
|
||||
///
|
||||
/// c - reserved for future storage of new-style group info.
|
||||
|
||||
/// Common base type with fields shared by all the groups
|
||||
struct base_group_info {
|
||||
int priority = 0; // The priority; 0 means unpinned, -1 means hidden, positive means
|
||||
// pinned higher (i.e. higher priority conversations come first).
|
||||
int64_t joined_at = 0; // unix timestamp (seconds) when the group was joined (or re-joined)
|
||||
notify_mode notifications = notify_mode::defaulted; // When the user wants notifications
|
||||
int64_t mute_until = 0; // unix timestamp (seconds) until which notifications are disabled
|
||||
|
||||
protected:
|
||||
void load(const dict& info_dict);
|
||||
};
|
||||
|
||||
/// Struct containing legacy group info (aka "closed groups").
|
||||
struct legacy_group_info {
|
||||
struct legacy_group_info : base_group_info {
|
||||
static constexpr size_t NAME_MAX_LENGTH = 100; // in bytes; name will be truncated if exceeded
|
||||
|
||||
std::string session_id; // The legacy group "session id" (33 bytes).
|
||||
|
@ -61,9 +82,6 @@ struct legacy_group_info {
|
|||
ustring enc_pubkey; // bytes (32 or empty)
|
||||
ustring enc_seckey; // bytes (32 or empty)
|
||||
std::chrono::seconds disappearing_timer{0}; // 0 == disabled.
|
||||
bool hidden = false; // true if the conversation is hidden from the convo list
|
||||
int priority = 0; // The priority; 0 means unpinned, larger means pinned higher (i.e.
|
||||
// higher priority conversations come first).
|
||||
|
||||
/// Constructs a new legacy group info from an id (which must look like a session_id). Throws
|
||||
/// if id is invalid.
|
||||
|
@ -108,7 +126,7 @@ struct legacy_group_info {
|
|||
};
|
||||
|
||||
/// Community (aka open group) info
|
||||
struct community_info : community {
|
||||
struct community_info : base_group_info, community {
|
||||
// Note that *changing* url/room/pubkey and then doing a set inserts a new room under the given
|
||||
// url/room/pubkey, it does *not* update an existing room.
|
||||
|
||||
|
@ -119,9 +137,6 @@ struct community_info : community {
|
|||
community_info(const struct ugroups_community_info& c); // From c struct
|
||||
void into(ugroups_community_info& c) const; // Into c struct
|
||||
|
||||
int priority = 0; // The priority; 0 means unpinned, larger means pinned higher (i.e.
|
||||
// higher priority conversations come first).
|
||||
|
||||
private:
|
||||
void load(const dict& info_dict);
|
||||
|
||||
|
@ -212,6 +227,8 @@ class UserGroups : public ConfigBase {
|
|||
DictFieldProxy community_field(
|
||||
const community_info& og, ustring_view* get_pubkey = nullptr) const;
|
||||
|
||||
void set_base(const base_group_info& bg, DictFieldProxy& info) const;
|
||||
|
||||
public:
|
||||
/// Removes a community group. Returns true if found and removed, false if not present.
|
||||
/// Arguments are the same as `get_community`.
|
||||
|
@ -253,7 +270,7 @@ class UserGroups : public ConfigBase {
|
|||
/// if (auto* comm = std::get_if<community_info>(&group)) {
|
||||
/// // use comm->name, comm->priority, etc.
|
||||
/// } else if (auto* lg = std::get_if<legacy_group_info>(&convo)) {
|
||||
/// // use lg->session_id, lg->hidden, etc.
|
||||
/// // use lg->session_id, lg->priority, etc.
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
|
|
|
@ -50,18 +50,14 @@ user_profile_pic user_profile_get_pic(const config_object* conf);
|
|||
// Sets a user profile
|
||||
int user_profile_set_pic(config_object* conf, user_profile_pic pic);
|
||||
|
||||
// Gets the current note-to-self priority level. Will always be >= 0.
|
||||
// Gets the current note-to-self priority level. Will be negative for hidden, 0 for unpinned, and >
|
||||
// 0 for pinned (with higher value = higher priority).
|
||||
int user_profile_get_nts_priority(const config_object* conf);
|
||||
|
||||
// Sets the current note-to-self priority level. Should be >= 0 (negatives will be set to 0).
|
||||
// Sets the current note-to-self priority level. Set to -1 for hidden; 0 for unpinned, and > 0 for
|
||||
// higher priority in the conversation list.
|
||||
void user_profile_set_nts_priority(config_object* conf, int priority);
|
||||
|
||||
// Gets the current note-to-self priority level. Will always be >= 0.
|
||||
bool user_profile_get_nts_hidden(const config_object* conf);
|
||||
|
||||
// Sets the current note-to-self priority level. Should be >= 0 (negatives will be set to 0).
|
||||
void user_profile_set_nts_hidden(config_object* conf, bool hidden);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
|
|
@ -15,9 +15,7 @@ namespace session::config {
|
|||
/// p - user profile url
|
||||
/// q - user profile decryption key (binary)
|
||||
/// + - the priority value for the "Note to Self" pseudo-conversation (higher = higher in the
|
||||
/// conversation list). Omitted when 0.
|
||||
/// h - the "hidden" value for the "Note to Self" pseudo-conversation (true = hide). Omitted when
|
||||
/// false.
|
||||
/// conversation list). Omitted when 0. -1 means hidden.
|
||||
|
||||
class UserProfile final : public ConfigBase {
|
||||
|
||||
|
@ -57,18 +55,13 @@ class UserProfile final : public ConfigBase {
|
|||
void set_profile_pic(std::string_view url, ustring_view key);
|
||||
void set_profile_pic(profile_pic pic);
|
||||
|
||||
/// Gets the Note-to-self conversation priority. Will always be >= 0.
|
||||
/// Gets the Note-to-self conversation priority. Negative means hidden; 0 means unpinned;
|
||||
/// higher means higher priority (i.e. hidden in the convo list).
|
||||
int get_nts_priority() const;
|
||||
|
||||
/// Sets the Note-to-self conversation priority. Should be >= 0 (negatives will be set to 0).
|
||||
/// Sets the Note-to-self conversation priority. -1 for hidden, 0 for unpinned, higher for
|
||||
/// pinned higher.
|
||||
void set_nts_priority(int priority);
|
||||
|
||||
/// Gets the Note-to-self hidden flag; true means the Note-to-self "conversation" should be
|
||||
/// hidden from the conversation list.
|
||||
bool get_nts_hidden() const;
|
||||
|
||||
/// Sets or clears the `hidden` flag that hides the Note-to-self from the conversation list.
|
||||
void set_nts_hidden(bool hidden);
|
||||
};
|
||||
|
||||
} // namespace session::config
|
||||
|
|
|
@ -282,7 +282,7 @@ public final class OpenGroupManager {
|
|||
}
|
||||
.flatMap { _ in
|
||||
dependencies.storage
|
||||
.readPublisherFlatMap(receiveOn: OpenGroupAPI.workQueue) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
// Note: The initial request for room info and it's capabilities should NOT be
|
||||
// authenticated (this is because if the server requires blinding and the auth
|
||||
// headers aren't blinded it will error - these endpoints do support unauthenticated
|
||||
|
@ -296,6 +296,7 @@ public final class OpenGroupManager {
|
|||
)
|
||||
}
|
||||
}
|
||||
.subscribe(on: OpenGroupAPI.workQueue)
|
||||
.receive(on: OpenGroupAPI.workQueue)
|
||||
.flatMap { response -> Future<Void, Error> in
|
||||
Future<Void, Error> { resolver in
|
||||
|
@ -1000,13 +1001,14 @@ public final class OpenGroupManager {
|
|||
|
||||
// Try to retrieve the default rooms 8 times
|
||||
let publisher: AnyPublisher<[OpenGroupAPI.Room], Error> = dependencies.storage
|
||||
.readPublisherFlatMap(receiveOn: OpenGroupAPI.workQueue) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.capabilitiesAndRooms(
|
||||
db,
|
||||
on: OpenGroupAPI.defaultServer,
|
||||
using: dependencies
|
||||
)
|
||||
}
|
||||
.subscribe(on: OpenGroupAPI.workQueue)
|
||||
.receive(on: OpenGroupAPI.workQueue)
|
||||
.retry(8)
|
||||
.map { response in
|
||||
|
|
|
@ -79,7 +79,7 @@ extension MessageReceiver {
|
|||
members: membersAsData.map { $0.toHexString() },
|
||||
admins: adminsAsData.map { $0.toHexString() },
|
||||
expirationTimer: expirationTimer,
|
||||
messageSentTimestamp: sentTimestamp,
|
||||
formationTimestampMs: sentTimestamp,
|
||||
calledFromConfigHandling: false
|
||||
)
|
||||
}
|
||||
|
@ -92,7 +92,7 @@ extension MessageReceiver {
|
|||
members: [String],
|
||||
admins: [String],
|
||||
expirationTimer: UInt32,
|
||||
messageSentTimestamp: UInt64,
|
||||
formationTimestampMs: UInt64,
|
||||
calledFromConfigHandling: Bool
|
||||
) throws {
|
||||
// With new closed groups we only want to create them if the admin creating the closed group is an
|
||||
|
@ -118,7 +118,7 @@ extension MessageReceiver {
|
|||
let closedGroup: ClosedGroup = try ClosedGroup(
|
||||
threadId: groupPublicKey,
|
||||
name: name,
|
||||
formationTimestamp: (TimeInterval(messageSentTimestamp) / 1000)
|
||||
formationTimestamp: (TimeInterval(formationTimestampMs) / 1000)
|
||||
).saved(db)
|
||||
|
||||
// Clear the zombie list if the group wasn't active (ie. had no keys)
|
||||
|
|
|
@ -190,7 +190,7 @@ extension MessageReceiver {
|
|||
members: [String](closedGroup.members),
|
||||
admins: [String](closedGroup.admins),
|
||||
expirationTimer: closedGroup.expirationTimer,
|
||||
messageSentTimestamp: message.sentTimestamp!,
|
||||
formationTimestampMs: message.sentTimestamp!,
|
||||
calledFromConfigHandling: false // Legacy config isn't an issue
|
||||
)
|
||||
}
|
||||
|
|
|
@ -549,7 +549,7 @@ extension MessageSender {
|
|||
)
|
||||
.flatMap { _ -> AnyPublisher<Void, Error> in
|
||||
Storage.shared
|
||||
.writePublisherFlatMap(receiveOn: DispatchQueue.global(qos: .userInitiated)) { db in
|
||||
.writePublisherFlatMap { db in
|
||||
generateAndSendNewEncryptionKeyPair(
|
||||
db,
|
||||
targetMembers: members,
|
||||
|
|
|
@ -131,29 +131,23 @@ extension MessageSender {
|
|||
}()
|
||||
|
||||
return Storage.shared
|
||||
.readPublisherFlatMap(receiveOn: queue) { db -> AnyPublisher<(attachments: [Attachment], openGroup: OpenGroup?), Error> in
|
||||
.readPublisher { db -> (attachments: [Attachment], openGroup: OpenGroup?) in
|
||||
let attachmentStateInfo: [Attachment.StateInfo] = (try? Attachment
|
||||
.stateInfo(interactionId: interactionId, state: .uploading)
|
||||
.fetchAll(db))
|
||||
.defaulting(to: [])
|
||||
|
||||
// If there is no attachment data then just return early
|
||||
guard !attachmentStateInfo.isEmpty else {
|
||||
return Just(([], nil))
|
||||
.setFailureType(to: Error.self)
|
||||
.eraseToAnyPublisher()
|
||||
}
|
||||
guard !attachmentStateInfo.isEmpty else { return ([], nil) }
|
||||
|
||||
// Otherwise fetch the open group (if there is one)
|
||||
return Just((
|
||||
return (
|
||||
(try? Attachment
|
||||
.filter(ids: attachmentStateInfo.map { $0.attachmentId })
|
||||
.fetchAll(db))
|
||||
.defaulting(to: []),
|
||||
try? OpenGroup.fetchOne(db, id: threadId)
|
||||
))
|
||||
.setFailureType(to: Error.self)
|
||||
.eraseToAnyPublisher()
|
||||
)
|
||||
}
|
||||
.flatMap { attachments, openGroup -> AnyPublisher<[String?], Error> in
|
||||
guard !attachments.isEmpty else {
|
||||
|
@ -171,8 +165,7 @@ extension MessageSender {
|
|||
to: (
|
||||
openGroup.map { Attachment.Destination.openGroup($0) } ??
|
||||
.fileServer
|
||||
),
|
||||
queue: DispatchQueue.global(qos: .userInitiated)
|
||||
)
|
||||
)
|
||||
}
|
||||
)
|
||||
|
|
|
@ -644,7 +644,7 @@ public final class MessageSender {
|
|||
in: namespace
|
||||
)
|
||||
.subscribe(on: DispatchQueue.global(qos: .default))
|
||||
.flatMap { response -> AnyPublisher<Bool, Error> in
|
||||
.flatMap { response -> AnyPublisher<Void, Error> in
|
||||
let updatedMessage: Message = message
|
||||
updatedMessage.serverHash = response.1.hash
|
||||
|
||||
|
@ -669,7 +669,7 @@ public final class MessageSender {
|
|||
}()
|
||||
|
||||
return dependencies.storage
|
||||
.writePublisher(receiveOn: DispatchQueue.global(qos: .default)) { db -> Void in
|
||||
.writePublisher { db -> Void in
|
||||
try MessageSender.handleSuccessfulMessageSend(
|
||||
db,
|
||||
message: updatedMessage,
|
||||
|
@ -684,34 +684,34 @@ public final class MessageSender {
|
|||
JobRunner.add(db, job: job)
|
||||
return ()
|
||||
}
|
||||
.flatMap { _ -> AnyPublisher<Bool, Error> in
|
||||
.flatMap { _ -> AnyPublisher<Void, Error> in
|
||||
let isMainAppActive: Bool = (UserDefaults.sharedLokiProject?[.isMainAppActive])
|
||||
.defaulting(to: false)
|
||||
|
||||
guard shouldNotify && !isMainAppActive else {
|
||||
return Just(true)
|
||||
return Just(())
|
||||
.setFailureType(to: Error.self)
|
||||
.eraseToAnyPublisher()
|
||||
}
|
||||
guard let job: Job = job else {
|
||||
return Just(true)
|
||||
return Just(())
|
||||
.setFailureType(to: Error.self)
|
||||
.eraseToAnyPublisher()
|
||||
}
|
||||
|
||||
return Deferred {
|
||||
Future<Bool, Error> { resolver in
|
||||
Future<Void, Error> { resolver in
|
||||
NotifyPushServerJob.run(
|
||||
job,
|
||||
queue: DispatchQueue.global(qos: .default),
|
||||
success: { _, _ in resolver(Result.success(true)) },
|
||||
success: { _, _ in resolver(Result.success(())) },
|
||||
failure: { _, _, _ in
|
||||
// Always fulfill because the notify PN server job isn't critical.
|
||||
resolver(Result.success(true))
|
||||
resolver(Result.success(()))
|
||||
},
|
||||
deferred: { _ in
|
||||
// Always fulfill because the notify PN server job isn't critical.
|
||||
resolver(Result.success(true))
|
||||
resolver(Result.success(()))
|
||||
}
|
||||
)
|
||||
}
|
||||
|
@ -720,7 +720,6 @@ public final class MessageSender {
|
|||
}
|
||||
.eraseToAnyPublisher()
|
||||
}
|
||||
.filter { $0 }
|
||||
.handleEvents(
|
||||
receiveCompletion: { result in
|
||||
switch result {
|
||||
|
@ -761,7 +760,7 @@ public final class MessageSender {
|
|||
|
||||
// Send the result
|
||||
return dependencies.storage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.global(qos: .default)) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.send(
|
||||
db,
|
||||
|
@ -780,7 +779,7 @@ public final class MessageSender {
|
|||
let updatedMessage: Message = message
|
||||
updatedMessage.openGroupServerMessageId = UInt64(responseData.id)
|
||||
|
||||
return dependencies.storage.writePublisher(receiveOn: DispatchQueue.global(qos: .default)) { db in
|
||||
return dependencies.storage.writePublisher { db in
|
||||
// The `posted` value is in seconds but we sent it in ms so need that for de-duping
|
||||
try MessageSender.handleSuccessfulMessageSend(
|
||||
db,
|
||||
|
@ -829,7 +828,7 @@ public final class MessageSender {
|
|||
|
||||
// Send the result
|
||||
return dependencies.storage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.global(qos: .default)) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
return OpenGroupAPI
|
||||
.send(
|
||||
db,
|
||||
|
@ -844,7 +843,7 @@ public final class MessageSender {
|
|||
let updatedMessage: Message = message
|
||||
updatedMessage.openGroupServerMessageId = UInt64(responseData.id)
|
||||
|
||||
return dependencies.storage.writePublisher(receiveOn: DispatchQueue.global(qos: .default)) { db in
|
||||
return dependencies.storage.writePublisher { db in
|
||||
// The `posted` value is in seconds but we sent it in ms so need that for de-duping
|
||||
try MessageSender.handleSuccessfulMessageSend(
|
||||
db,
|
||||
|
|
|
@ -59,7 +59,7 @@ public enum PushNotificationAPI {
|
|||
// Unsubscribe from all closed groups (including ones the user is no longer a member of,
|
||||
// just in case)
|
||||
Storage.shared
|
||||
.readPublisher(receiveOn: DispatchQueue.global(qos: .background)) { db -> (String, Set<String>) in
|
||||
.readPublisher { db -> (String, Set<String>) in
|
||||
(
|
||||
getUserHexEncodedPublicKey(db),
|
||||
try ClosedGroup
|
||||
|
@ -84,6 +84,7 @@ public enum PushNotificationAPI {
|
|||
.collect()
|
||||
.eraseToAnyPublisher()
|
||||
}
|
||||
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
|
||||
.sinkUntilComplete()
|
||||
|
||||
// Unregister for normal push notifications
|
||||
|
|
|
@ -91,7 +91,7 @@ extension OpenGroupAPI {
|
|||
let server: String = self.server
|
||||
|
||||
return dependencies.storage
|
||||
.readPublisherFlatMap(receiveOn: Threading.pollerQueue) { db -> AnyPublisher<(Int64, PollResponse), Error> in
|
||||
.readPublisherFlatMap { db -> AnyPublisher<(Int64, PollResponse), Error> in
|
||||
let failureCount: Int64 = (try? OpenGroup
|
||||
.filter(OpenGroup.Columns.server == server)
|
||||
.select(max(OpenGroup.Columns.pollFailureCount))
|
||||
|
@ -114,6 +114,7 @@ extension OpenGroupAPI {
|
|||
.eraseToAnyPublisher()
|
||||
}
|
||||
.subscribe(on: Threading.pollerQueue)
|
||||
.receive(on: Threading.pollerQueue)
|
||||
.handleEvents(
|
||||
receiveOutput: { [weak self] failureCount, response in
|
||||
guard !calledFromBackgroundPoller || isBackgroundPollerValid() else {
|
||||
|
@ -282,7 +283,7 @@ extension OpenGroupAPI {
|
|||
}
|
||||
|
||||
return dependencies.storage
|
||||
.readPublisherFlatMap(receiveOn: OpenGroupAPI.workQueue) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.capabilities(
|
||||
db,
|
||||
server: server,
|
||||
|
@ -291,6 +292,7 @@ extension OpenGroupAPI {
|
|||
)
|
||||
}
|
||||
.subscribe(on: OpenGroupAPI.workQueue)
|
||||
.receive(on: OpenGroupAPI.workQueue)
|
||||
.flatMap { [weak self] _, responseBody -> AnyPublisher<Void, Error> in
|
||||
guard let strongSelf = self, isBackgroundPollerValid() else {
|
||||
return Just(())
|
||||
|
|
|
@ -712,7 +712,7 @@ public extension MessageViewModel {
|
|||
WHERE (
|
||||
\(groupMember[.groupId]) = \(interaction[.threadId]) AND
|
||||
\(groupMember[.profileId]) = \(interaction[.authorId]) AND
|
||||
\(SQL("\(thread[.variant]) = \(SessionThread.Variant.openGroup)")) AND
|
||||
\(SQL("\(thread[.variant]) = \(SessionThread.Variant.community)")) AND
|
||||
\(SQL("\(groupMember[.role]) IN \([GroupMember.Role.moderator, GroupMember.Role.admin])"))
|
||||
)
|
||||
) AS \(ViewModel.isSenderOpenGroupModeratorKey),
|
||||
|
|
|
@ -792,10 +792,6 @@ public extension SessionThreadViewModel {
|
|||
\(SQL("\(thread[.variant]) != \(SessionThread.Variant.contact)")) OR
|
||||
\(SQL("\(thread[.id]) = \(userPublicKey)")) OR
|
||||
\(contact[.isApproved]) = true
|
||||
) AND (
|
||||
-- Only show the 'Note to Self' thread if it has an interaction
|
||||
\(SQL("\(thread[.id]) != \(userPublicKey)")) OR
|
||||
\(interaction[.timestampMs]) IS NOT NULL
|
||||
)
|
||||
"""
|
||||
}
|
||||
|
@ -826,7 +822,7 @@ public extension SessionThreadViewModel {
|
|||
|
||||
return SQL("""
|
||||
(IFNULL(\(thread[.pinnedPriority]), 0) > 0) DESC,
|
||||
IFNULL(\(interaction[.timestampMs]), (\(thread[.creationDateTimestamp]) * 1000)) END DESC
|
||||
IFNULL(\(interaction[.timestampMs]), (\(thread[.creationDateTimestamp]) * 1000)) DESC
|
||||
""")
|
||||
}()
|
||||
|
||||
|
|
|
@ -14,6 +14,8 @@ class ConfigContactsSpec {
|
|||
|
||||
static func spec() {
|
||||
it("generates Contact configs correctly") {
|
||||
let createdTs: Int64 = 1680064059
|
||||
let nowTs: Int64 = Int64(Date().timeIntervalSince1970)
|
||||
let seed: Data = Data(hex: "0123456789abcdef0123456789abcdef")
|
||||
|
||||
// FIXME: Would be good to move these into the libSession-util instead of using Sodium separately
|
||||
|
@ -33,7 +35,7 @@ class ConfigContactsSpec {
|
|||
|
||||
// Empty contacts shouldn't have an existing contact
|
||||
let definitelyRealId: String = "050000000000000000000000000000000000000000000000000000000000000000"
|
||||
var cDefinitelyRealId: [CChar] = definitelyRealId.cArray
|
||||
var cDefinitelyRealId: [CChar] = definitelyRealId.cArray.nullTerminated()
|
||||
let contactPtr: UnsafeMutablePointer<contacts_contact>? = nil
|
||||
expect(contacts_get(conf, contactPtr, &cDefinitelyRealId)).to(beFalse())
|
||||
|
||||
|
@ -48,6 +50,9 @@ class ConfigContactsSpec {
|
|||
expect(contact2.blocked).to(beFalse())
|
||||
expect(contact2.profile_pic).toNot(beNil()) // Creates an empty instance apparently
|
||||
expect(String(libSessionVal: contact2.profile_pic.url)).to(beEmpty())
|
||||
expect(contact2.created).to(equal(0))
|
||||
expect(contact2.notifications).to(equal(CONVO_NOTIFY_DEFAULT))
|
||||
expect(contact2.mute_until).to(equal(0))
|
||||
|
||||
expect(config_needs_push(conf)).to(beFalse())
|
||||
expect(config_needs_dump(conf)).to(beFalse())
|
||||
|
@ -61,6 +66,9 @@ class ConfigContactsSpec {
|
|||
contact2.nickname = "Joey".toLibSession()
|
||||
contact2.approved = true
|
||||
contact2.approved_me = true
|
||||
contact2.created = createdTs
|
||||
contact2.notifications = CONVO_NOTIFY_ALL
|
||||
contact2.mute_until = nowTs + 1800
|
||||
|
||||
// Update the contact
|
||||
contacts_set(conf, &contact2)
|
||||
|
@ -76,6 +84,10 @@ class ConfigContactsSpec {
|
|||
expect(String(libSessionVal: contact3.profile_pic.url)).to(beEmpty())
|
||||
expect(contact3.blocked).to(beFalse())
|
||||
expect(String(libSessionVal: contact3.session_id)).to(equal(definitelyRealId))
|
||||
expect(contact3.created).to(equal(createdTs))
|
||||
expect(contact2.notifications).to(equal(CONVO_NOTIFY_ALL))
|
||||
expect(contact2.mute_until).to(equal(nowTs + 1800))
|
||||
|
||||
|
||||
// Since we've made changes, we should need to push new config to the swarm, *and* should need
|
||||
// to dump the updated state:
|
||||
|
@ -92,7 +104,7 @@ class ConfigContactsSpec {
|
|||
|
||||
// Pretend we uploaded it
|
||||
let fakeHash1: String = "fakehash1"
|
||||
var cFakeHash1: [CChar] = fakeHash1.cArray
|
||||
var cFakeHash1: [CChar] = fakeHash1.cArray.nullTerminated()
|
||||
config_confirm_pushed(conf, pushData2.pointee.seqno, &cFakeHash1)
|
||||
expect(config_needs_push(conf)).to(beFalse())
|
||||
expect(config_needs_dump(conf)).to(beTrue())
|
||||
|
@ -130,9 +142,10 @@ class ConfigContactsSpec {
|
|||
expect(contact4.profile_pic).toNot(beNil()) // Creates an empty instance apparently
|
||||
expect(String(libSessionVal: contact4.profile_pic.url)).to(beEmpty())
|
||||
expect(contact4.blocked).to(beFalse())
|
||||
expect(contact4.created).to(equal(createdTs))
|
||||
|
||||
let anotherId: String = "051111111111111111111111111111111111111111111111111111111111111111"
|
||||
var cAnotherId: [CChar] = anotherId.cArray
|
||||
var cAnotherId: [CChar] = anotherId.cArray.nullTerminated()
|
||||
var contact5: contacts_contact = contacts_contact()
|
||||
expect(contacts_get_or_construct(conf2, &contact5, &cAnotherId)).to(beTrue())
|
||||
expect(String(libSessionVal: contact5.name)).to(beEmpty())
|
||||
|
@ -152,7 +165,7 @@ class ConfigContactsSpec {
|
|||
|
||||
// Check the merging
|
||||
let fakeHash2: String = "fakehash2"
|
||||
var cFakeHash2: [CChar] = fakeHash2.cArray
|
||||
var cFakeHash2: [CChar] = fakeHash2.cArray.nullTerminated()
|
||||
var mergeHashes: [UnsafePointer<CChar>?] = [cFakeHash2].unsafeCopy()
|
||||
var mergeData: [UnsafePointer<UInt8>?] = [UnsafePointer(pushData4.pointee.config)]
|
||||
var mergeSize: [Int] = [pushData4.pointee.config_len]
|
||||
|
@ -195,7 +208,7 @@ class ConfigContactsSpec {
|
|||
|
||||
// Client 2 adds a new friend:
|
||||
let thirdId: String = "052222222222222222222222222222222222222222222222222222222222222222"
|
||||
var cThirdId: [CChar] = thirdId.cArray
|
||||
var cThirdId: [CChar] = thirdId.cArray.nullTerminated()
|
||||
var contact7: contacts_contact = contacts_contact()
|
||||
expect(contacts_get_or_construct(conf2, &contact7, &cThirdId)).to(beTrue())
|
||||
contact7.nickname = "Nickname 3".toLibSession()
|
||||
|
@ -223,9 +236,9 @@ class ConfigContactsSpec {
|
|||
.to(equal([fakeHash2]))
|
||||
|
||||
let fakeHash3a: String = "fakehash3a"
|
||||
var cFakeHash3a: [CChar] = fakeHash3a.cArray
|
||||
var cFakeHash3a: [CChar] = fakeHash3a.cArray.nullTerminated()
|
||||
let fakeHash3b: String = "fakehash3b"
|
||||
var cFakeHash3b: [CChar] = fakeHash3b.cArray
|
||||
var cFakeHash3b: [CChar] = fakeHash3b.cArray.nullTerminated()
|
||||
config_confirm_pushed(conf, pushData6.pointee.seqno, &cFakeHash3a)
|
||||
config_confirm_pushed(conf2, pushData7.pointee.seqno, &cFakeHash3b)
|
||||
|
||||
|
@ -260,7 +273,7 @@ class ConfigContactsSpec {
|
|||
.to(equal([fakeHash3a, fakeHash3b]))
|
||||
|
||||
let fakeHash4: String = "fakeHash4"
|
||||
var cFakeHash4: [CChar] = fakeHash4.cArray
|
||||
var cFakeHash4: [CChar] = fakeHash4.cArray.nullTerminated()
|
||||
config_confirm_pushed(conf, pushData8.pointee.seqno, &cFakeHash4)
|
||||
config_confirm_pushed(conf2, pushData9.pointee.seqno, &cFakeHash4)
|
||||
pushData8.deallocate()
|
||||
|
|
|
@ -33,7 +33,7 @@ class ConfigConvoInfoVolatileSpec {
|
|||
|
||||
// Empty contacts shouldn't have an existing contact
|
||||
let definitelyRealId: String = "055000000000000000000000000000000000000000000000000000000000000000"
|
||||
var cDefinitelyRealId: [CChar] = definitelyRealId.cArray
|
||||
var cDefinitelyRealId: [CChar] = definitelyRealId.cArray.nullTerminated()
|
||||
var oneToOne1: convo_info_volatile_1to1 = convo_info_volatile_1to1()
|
||||
expect(convo_info_volatile_get_1to1(conf, &oneToOne1, &cDefinitelyRealId)).to(beFalse())
|
||||
expect(convo_info_volatile_size(conf)).to(equal(0))
|
||||
|
@ -67,7 +67,7 @@ class ConfigConvoInfoVolatileSpec {
|
|||
expect(config_needs_dump(conf)).to(beTrue())
|
||||
|
||||
let openGroupBaseUrl: String = "http://Example.ORG:5678"
|
||||
var cOpenGroupBaseUrl: [CChar] = openGroupBaseUrl.cArray
|
||||
var cOpenGroupBaseUrl: [CChar] = openGroupBaseUrl.cArray.nullTerminated()
|
||||
let openGroupBaseUrlResult: String = openGroupBaseUrl.lowercased()
|
||||
// ("http://Example.ORG:5678"
|
||||
// .lowercased()
|
||||
|
@ -75,7 +75,7 @@ class ConfigConvoInfoVolatileSpec {
|
|||
// [CChar](repeating: 0, count: (268 - openGroupBaseUrl.count))
|
||||
// )
|
||||
let openGroupRoom: String = "SudokuRoom"
|
||||
var cOpenGroupRoom: [CChar] = openGroupRoom.cArray
|
||||
var cOpenGroupRoom: [CChar] = openGroupRoom.cArray.nullTerminated()
|
||||
let openGroupRoomResult: String = openGroupRoom.lowercased()
|
||||
// ("SudokuRoom"
|
||||
// .lowercased()
|
||||
|
@ -97,12 +97,12 @@ class ConfigConvoInfoVolatileSpec {
|
|||
|
||||
// We don't need to push since we haven't changed anything, so this call is mainly just for
|
||||
// testing:
|
||||
var pushData1: UnsafeMutablePointer<config_push_data> = config_push(conf)
|
||||
let pushData1: UnsafeMutablePointer<config_push_data> = config_push(conf)
|
||||
expect(pushData1.pointee.seqno).to(equal(1))
|
||||
|
||||
// Pretend we uploaded it
|
||||
let fakeHash1: String = "fakehash1"
|
||||
var cFakeHash1: [CChar] = fakeHash1.cArray
|
||||
var cFakeHash1: [CChar] = fakeHash1.cArray.nullTerminated()
|
||||
config_confirm_pushed(conf, pushData1.pointee.seqno, &cFakeHash1)
|
||||
expect(config_needs_dump(conf)).to(beTrue())
|
||||
expect(config_needs_push(conf)).to(beFalse())
|
||||
|
@ -136,26 +136,26 @@ class ConfigConvoInfoVolatileSpec {
|
|||
community2.unread = true
|
||||
|
||||
let anotherId: String = "051111111111111111111111111111111111111111111111111111111111111111"
|
||||
var cAnotherId: [CChar] = anotherId.cArray
|
||||
var cAnotherId: [CChar] = anotherId.cArray.nullTerminated()
|
||||
var oneToOne5: convo_info_volatile_1to1 = convo_info_volatile_1to1()
|
||||
expect(convo_info_volatile_get_or_construct_1to1(conf2, &oneToOne5, &cAnotherId)).to(beTrue())
|
||||
oneToOne5.unread = true
|
||||
convo_info_volatile_set_1to1(conf2, &oneToOne5)
|
||||
|
||||
let thirdId: String = "05cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"
|
||||
var cThirdId: [CChar] = thirdId.cArray
|
||||
var cThirdId: [CChar] = thirdId.cArray.nullTerminated()
|
||||
var legacyGroup2: convo_info_volatile_legacy_group = convo_info_volatile_legacy_group()
|
||||
expect(convo_info_volatile_get_or_construct_legacy_group(conf2, &legacyGroup2, &cThirdId)).to(beTrue())
|
||||
legacyGroup2.last_read = (nowTimestampMs - 50)
|
||||
convo_info_volatile_set_legacy_group(conf2, &legacyGroup2)
|
||||
expect(config_needs_push(conf2)).to(beTrue())
|
||||
|
||||
var pushData2: UnsafeMutablePointer<config_push_data> = config_push(conf2)
|
||||
let pushData2: UnsafeMutablePointer<config_push_data> = config_push(conf2)
|
||||
expect(pushData2.pointee.seqno).to(equal(2))
|
||||
|
||||
// Check the merging
|
||||
let fakeHash2: String = "fakehash2"
|
||||
var cFakeHash2: [CChar] = fakeHash2.cArray
|
||||
var cFakeHash2: [CChar] = fakeHash2.cArray.nullTerminated()
|
||||
var mergeHashes: [UnsafePointer<CChar>?] = [cFakeHash2].unsafeCopy()
|
||||
var mergeData: [UnsafePointer<UInt8>?] = [UnsafePointer(pushData2.pointee.config)]
|
||||
var mergeSize: [Int] = [pushData2.pointee.config_len]
|
||||
|
@ -203,7 +203,7 @@ class ConfigConvoInfoVolatileSpec {
|
|||
}
|
||||
|
||||
let fourthId: String = "052000000000000000000000000000000000000000000000000000000000000000"
|
||||
var cFourthId: [CChar] = fourthId.cArray
|
||||
var cFourthId: [CChar] = fourthId.cArray.nullTerminated()
|
||||
expect(config_needs_push(conf)).to(beFalse())
|
||||
convo_info_volatile_erase_1to1(conf, &cFourthId)
|
||||
expect(config_needs_push(conf)).to(beFalse())
|
||||
|
|
|
@ -84,6 +84,8 @@ class ConfigUserGroupsSpec {
|
|||
}
|
||||
|
||||
it("generates UserGroup configs correctly") {
|
||||
let createdTs: Int64 = 1680064059
|
||||
let nowTs: Int64 = Int64(Date().timeIntervalSince1970)
|
||||
let seed: Data = Data(hex: "0123456789abcdef0123456789abcdef")
|
||||
|
||||
// FIXME: Would be good to move these into the libSession-util instead of using Sodium separately
|
||||
|
@ -103,7 +105,7 @@ class ConfigUserGroupsSpec {
|
|||
|
||||
// Empty contacts shouldn't have an existing contact
|
||||
let definitelyRealId: String = "055000000000000000000000000000000000000000000000000000000000000000"
|
||||
var cDefinitelyRealId: [CChar] = definitelyRealId.cArray
|
||||
var cDefinitelyRealId: [CChar] = definitelyRealId.cArray.nullTerminated()
|
||||
let legacyGroup1: UnsafeMutablePointer<ugroups_legacy_group_info>? = user_groups_get_legacy_group(conf, &cDefinitelyRealId)
|
||||
expect(legacyGroup1?.pointee).to(beNil())
|
||||
expect(user_groups_size(conf)).to(equal(0))
|
||||
|
@ -112,12 +114,14 @@ class ConfigUserGroupsSpec {
|
|||
expect(legacyGroup2.pointee).toNot(beNil())
|
||||
expect(String(libSessionVal: legacyGroup2.pointee.session_id))
|
||||
.to(equal(definitelyRealId))
|
||||
expect(legacyGroup2.pointee.hidden).to(beFalse())
|
||||
expect(legacyGroup2.pointee.disappearing_timer).to(equal(0))
|
||||
expect(String(libSessionVal: legacyGroup2.pointee.enc_pubkey, fixedLength: 32)).to(equal(""))
|
||||
expect(String(libSessionVal: legacyGroup2.pointee.enc_seckey, fixedLength: 32)).to(equal(""))
|
||||
expect(legacyGroup2.pointee.priority).to(equal(0))
|
||||
expect(String(libSessionVal: legacyGroup2.pointee.name)).to(equal(""))
|
||||
expect(legacyGroup2.pointee.joined_at).to(equal(0))
|
||||
expect(legacyGroup2.pointee.notifications).to(equal(CONVO_NOTIFY_DEFAULT))
|
||||
expect(legacyGroup2.pointee.mute_until).to(equal(0))
|
||||
|
||||
// Iterate through and make sure we got everything we expected
|
||||
var membersSeen1: [String: Bool] = [:]
|
||||
|
@ -155,9 +159,12 @@ class ConfigUserGroupsSpec {
|
|||
"055555555555555555555555555555555555555555555555555555555555555555",
|
||||
"056666666666666666666666666666666666666666666666666666666666666666"
|
||||
]
|
||||
var cUsers: [[CChar]] = users.map { $0.cArray }
|
||||
var cUsers: [[CChar]] = users.map { $0.cArray.nullTerminated() }
|
||||
legacyGroup2.pointee.name = "Englishmen".toLibSession()
|
||||
legacyGroup2.pointee.disappearing_timer = 60
|
||||
legacyGroup2.pointee.joined_at = createdTs
|
||||
legacyGroup2.pointee.notifications = CONVO_NOTIFY_ALL
|
||||
legacyGroup2.pointee.mute_until = (nowTs + 3600)
|
||||
expect(ugroups_legacy_member_add(legacyGroup2, &cUsers[0], false)).to(beTrue())
|
||||
expect(ugroups_legacy_member_add(legacyGroup2, &cUsers[1], true)).to(beTrue())
|
||||
expect(ugroups_legacy_member_add(legacyGroup2, &cUsers[2], false)).to(beTrue())
|
||||
|
@ -216,8 +223,8 @@ class ConfigUserGroupsSpec {
|
|||
|
||||
let communityPubkey: String = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
|
||||
var cCommunityPubkey: [UInt8] = Data(hex: communityPubkey).cArray
|
||||
var cCommunityBaseUrl: [CChar] = "http://Example.ORG:5678".cArray
|
||||
var cCommunityRoom: [CChar] = "SudokuRoom".cArray
|
||||
var cCommunityBaseUrl: [CChar] = "http://Example.ORG:5678".cArray.nullTerminated()
|
||||
var cCommunityRoom: [CChar] = "SudokuRoom".cArray.nullTerminated()
|
||||
var community1: ugroups_community_info = ugroups_community_info()
|
||||
expect(user_groups_get_or_construct_community(conf, &community1, &cCommunityBaseUrl, &cCommunityRoom, &cCommunityPubkey))
|
||||
.to(beTrue())
|
||||
|
@ -240,7 +247,7 @@ class ConfigUserGroupsSpec {
|
|||
|
||||
// Pretend we uploaded it
|
||||
let fakeHash1: String = "fakehash1"
|
||||
var cFakeHash1: [CChar] = fakeHash1.cArray
|
||||
var cFakeHash1: [CChar] = fakeHash1.cArray.nullTerminated()
|
||||
config_confirm_pushed(conf, pushData2.pointee.seqno, &cFakeHash1)
|
||||
expect(config_needs_dump(conf)).to(beTrue())
|
||||
expect(config_needs_push(conf)).to(beFalse())
|
||||
|
@ -294,9 +301,11 @@ class ConfigUserGroupsSpec {
|
|||
expect(String(libSessionVal: legacyGroup4?.pointee.enc_seckey, fixedLength: 32)).to(equal(""))
|
||||
expect(legacyGroup4?.pointee.disappearing_timer).to(equal(60))
|
||||
expect(String(libSessionVal: legacyGroup4?.pointee.session_id)).to(equal(definitelyRealId))
|
||||
expect(legacyGroup4?.pointee.hidden).to(beFalse())
|
||||
expect(legacyGroup4?.pointee.priority).to(equal(3))
|
||||
expect(String(libSessionVal: legacyGroup4?.pointee.name)).to(equal("Englishmen"))
|
||||
expect(legacyGroup4?.pointee.joined_at).to(equal(createdTs))
|
||||
expect(legacyGroup2.pointee.notifications).to(equal(CONVO_NOTIFY_ALL))
|
||||
expect(legacyGroup2.pointee.mute_until).to(equal(nowTs + 3600))
|
||||
|
||||
var membersSeen3: [String: Bool] = [:]
|
||||
var memberSessionId3: UnsafePointer<CChar>? = nil
|
||||
|
@ -357,8 +366,8 @@ class ConfigUserGroupsSpec {
|
|||
]))
|
||||
}
|
||||
|
||||
var cCommunity2BaseUrl: [CChar] = "http://example.org:5678".cArray
|
||||
var cCommunity2Room: [CChar] = "sudokuRoom".cArray
|
||||
var cCommunity2BaseUrl: [CChar] = "http://example.org:5678".cArray.nullTerminated()
|
||||
var cCommunity2Room: [CChar] = "sudokuRoom".cArray.nullTerminated()
|
||||
var community2: ugroups_community_info = ugroups_community_info()
|
||||
expect(user_groups_get_community(conf2, &community2, &cCommunity2BaseUrl, &cCommunity2Room))
|
||||
.to(beTrue())
|
||||
|
@ -383,7 +392,7 @@ class ConfigUserGroupsSpec {
|
|||
expect(config_needs_dump(conf2)).to(beTrue())
|
||||
|
||||
let fakeHash2: String = "fakehash2"
|
||||
var cFakeHash2: [CChar] = fakeHash2.cArray
|
||||
var cFakeHash2: [CChar] = fakeHash2.cArray.nullTerminated()
|
||||
let pushData7: UnsafeMutablePointer<config_push_data> = config_push(conf2)
|
||||
expect(pushData7.pointee.seqno).to(equal(2))
|
||||
config_confirm_pushed(conf2, pushData7.pointee.seqno, &cFakeHash2)
|
||||
|
@ -413,8 +422,8 @@ class ConfigUserGroupsSpec {
|
|||
expect(config_merge(conf, &mergeHashes1, &mergeData1, &mergeSize1, 1)).to(equal(1))
|
||||
pushData8.deallocate()
|
||||
|
||||
var cCommunity3BaseUrl: [CChar] = "http://example.org:5678".cArray
|
||||
var cCommunity3Room: [CChar] = "SudokuRoom".cArray
|
||||
var cCommunity3BaseUrl: [CChar] = "http://example.org:5678".cArray.nullTerminated()
|
||||
var cCommunity3Room: [CChar] = "SudokuRoom".cArray.nullTerminated()
|
||||
var community3: ugroups_community_info = ugroups_community_info()
|
||||
expect(user_groups_get_community(conf, &community3, &cCommunity3BaseUrl, &cCommunity3Room))
|
||||
.to(beTrue())
|
||||
|
@ -442,12 +451,12 @@ class ConfigUserGroupsSpec {
|
|||
expect(config_needs_push(conf2)).to(beTrue())
|
||||
expect(config_needs_dump(conf2)).to(beTrue())
|
||||
|
||||
var cCommunity4BaseUrl: [CChar] = "http://exAMple.ORG:5678".cArray
|
||||
var cCommunity4Room: [CChar] = "sudokuROOM".cArray
|
||||
var cCommunity4BaseUrl: [CChar] = "http://exAMple.ORG:5678".cArray.nullTerminated()
|
||||
var cCommunity4Room: [CChar] = "sudokuROOM".cArray.nullTerminated()
|
||||
user_groups_erase_community(conf2, &cCommunity4BaseUrl, &cCommunity4Room)
|
||||
|
||||
let fakeHash3: String = "fakehash3"
|
||||
var cFakeHash3: [CChar] = fakeHash3.cArray
|
||||
var cFakeHash3: [CChar] = fakeHash3.cArray.nullTerminated()
|
||||
let pushData10: UnsafeMutablePointer<config_push_data> = config_push(conf2)
|
||||
config_confirm_pushed(conf2, pushData10.pointee.seqno, &cFakeHash3)
|
||||
|
||||
|
@ -470,13 +479,13 @@ class ConfigUserGroupsSpec {
|
|||
expect(user_groups_size_legacy_groups(conf)).to(equal(1))
|
||||
|
||||
var prio: Int32 = 0
|
||||
var cBeanstalkBaseUrl: [CChar] = "http://jacksbeanstalk.org".cArray
|
||||
var cBeanstalkBaseUrl: [CChar] = "http://jacksbeanstalk.org".cArray.nullTerminated()
|
||||
var cBeanstalkPubkey: [UInt8] = Data(
|
||||
hex: "0000111122223333444455556666777788889999aaaabbbbccccddddeeeeffff"
|
||||
).cArray
|
||||
|
||||
["fee", "fi", "fo", "fum"].forEach { room in
|
||||
var cRoom: [CChar] = room.cArray
|
||||
var cRoom: [CChar] = room.cArray.nullTerminated()
|
||||
prio += 1
|
||||
|
||||
var community4: ugroups_community_info = ugroups_community_info()
|
||||
|
@ -491,7 +500,7 @@ class ConfigUserGroupsSpec {
|
|||
expect(user_groups_size_legacy_groups(conf)).to(equal(1))
|
||||
|
||||
let fakeHash4: String = "fakehash4"
|
||||
var cFakeHash4: [CChar] = fakeHash4.cArray
|
||||
var cFakeHash4: [CChar] = fakeHash4.cArray.nullTerminated()
|
||||
let pushData11: UnsafeMutablePointer<config_push_data> = config_push(conf)
|
||||
config_confirm_pushed(conf, pushData11.pointee.seqno, &cFakeHash4)
|
||||
expect(pushData11.pointee.seqno).to(equal(4))
|
||||
|
@ -500,11 +509,11 @@ class ConfigUserGroupsSpec {
|
|||
|
||||
// Load some obsolete ones in just to check that they get immediately obsoleted
|
||||
let fakeHash10: String = "fakehash10"
|
||||
let cFakeHash10: [CChar] = fakeHash10.cArray
|
||||
let cFakeHash10: [CChar] = fakeHash10.cArray.nullTerminated()
|
||||
let fakeHash11: String = "fakehash11"
|
||||
let cFakeHash11: [CChar] = fakeHash11.cArray
|
||||
let cFakeHash11: [CChar] = fakeHash11.cArray.nullTerminated()
|
||||
let fakeHash12: String = "fakehash12"
|
||||
let cFakeHash12: [CChar] = fakeHash12.cArray
|
||||
let cFakeHash12: [CChar] = fakeHash12.cArray.nullTerminated()
|
||||
var mergeHashes3: [UnsafePointer<CChar>?] = [cFakeHash10, cFakeHash11, cFakeHash12, cFakeHash4].unsafeCopy()
|
||||
var mergeData3: [UnsafePointer<UInt8>?] = [
|
||||
UnsafePointer(pushData10.pointee.config),
|
||||
|
|
|
@ -204,7 +204,7 @@ class ConfigUserProfileSpec {
|
|||
|
||||
// So now imagine we got back confirmation from the swarm that the push has been stored:
|
||||
let fakeHash1: String = "fakehash1"
|
||||
var cFakeHash1: [CChar] = fakeHash1.cArray
|
||||
var cFakeHash1: [CChar] = fakeHash1.cArray.nullTerminated()
|
||||
config_confirm_pushed(conf, pushData2.pointee.seqno, &cFakeHash1)
|
||||
pushData2.deallocate()
|
||||
|
||||
|
@ -289,13 +289,13 @@ class ConfigUserProfileSpec {
|
|||
expect(config_needs_push(conf2)).to(beTrue())
|
||||
|
||||
let fakeHash2: String = "fakehash2"
|
||||
var cFakeHash2: [CChar] = fakeHash2.cArray
|
||||
var cFakeHash2: [CChar] = fakeHash2.cArray.nullTerminated()
|
||||
let pushData3: UnsafeMutablePointer<config_push_data> = config_push(conf)
|
||||
expect(pushData3.pointee.seqno).to(equal(2)) // incremented, since we made a field change
|
||||
config_confirm_pushed(conf, pushData3.pointee.seqno, &cFakeHash2)
|
||||
|
||||
let fakeHash3: String = "fakehash3"
|
||||
var cFakeHash3: [CChar] = fakeHash3.cArray
|
||||
var cFakeHash3: [CChar] = fakeHash3.cArray.nullTerminated()
|
||||
let pushData4: UnsafeMutablePointer<config_push_data> = config_push(conf2)
|
||||
expect(pushData4.pointee.seqno).to(equal(2)) // incremented, since we made a field change
|
||||
config_confirm_pushed(conf, pushData4.pointee.seqno, &cFakeHash3)
|
||||
|
@ -365,9 +365,9 @@ class ConfigUserProfileSpec {
|
|||
expect(user_profile_get_nts_priority(conf2)).to(equal(9))
|
||||
|
||||
let fakeHash4: String = "fakehash4"
|
||||
var cFakeHash4: [CChar] = fakeHash4.cArray
|
||||
var cFakeHash4: [CChar] = fakeHash4.cArray.nullTerminated()
|
||||
let fakeHash5: String = "fakehash5"
|
||||
var cFakeHash5: [CChar] = fakeHash5.cArray
|
||||
var cFakeHash5: [CChar] = fakeHash5.cArray.nullTerminated()
|
||||
config_confirm_pushed(conf, pushData5.pointee.seqno, &cFakeHash4)
|
||||
config_confirm_pushed(conf2, pushData6.pointee.seqno, &cFakeHash5)
|
||||
pushData5.deallocate()
|
||||
|
|
|
@ -49,10 +49,10 @@ class LibSessionTypeConversionUtilitiesSpec: QuickSpec {
|
|||
it("returns a string when valid") {
|
||||
let test: [CChar] = [84, 101, 115, 116]
|
||||
let result = test.withUnsafeBufferPointer { ptr in
|
||||
String(pointer: UnsafeRawPointer(ptr.baseAddress), length: 5)
|
||||
String(pointer: UnsafeRawPointer(ptr.baseAddress), length: 4)
|
||||
}
|
||||
|
||||
expect(result).to(equal("Test\0"))
|
||||
expect(result).to(equal("Test"))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -186,7 +186,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
|
||||
it("generates the correct request") {
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.poll(
|
||||
db,
|
||||
server: "testserver",
|
||||
|
@ -221,7 +221,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
|
||||
it("retrieves recent messages if there was no last message") {
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.poll(
|
||||
db,
|
||||
server: "testserver",
|
||||
|
@ -250,7 +250,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
}
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.poll(
|
||||
db,
|
||||
server: "testserver",
|
||||
|
@ -279,7 +279,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
}
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.poll(
|
||||
db,
|
||||
server: "testserver",
|
||||
|
@ -308,7 +308,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
}
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.poll(
|
||||
db,
|
||||
server: "testserver",
|
||||
|
@ -340,7 +340,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
|
||||
it("does not call the inbox and outbox endpoints") {
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.poll(
|
||||
db,
|
||||
server: "testserver",
|
||||
|
@ -439,7 +439,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
|
||||
it("includes the inbox and outbox endpoints") {
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.poll(
|
||||
db,
|
||||
server: "testserver",
|
||||
|
@ -466,7 +466,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
|
||||
it("retrieves recent inbox messages if there was no last message") {
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.poll(
|
||||
db,
|
||||
server: "testserver",
|
||||
|
@ -495,7 +495,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
}
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.poll(
|
||||
db,
|
||||
server: "testserver",
|
||||
|
@ -519,7 +519,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
|
||||
it("retrieves recent outbox messages if there was no last message") {
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.poll(
|
||||
db,
|
||||
server: "testserver",
|
||||
|
@ -548,7 +548,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
}
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.poll(
|
||||
db,
|
||||
server: "testserver",
|
||||
|
@ -609,7 +609,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
dependencies = dependencies.with(onionApi: TestApi.self)
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.poll(
|
||||
db,
|
||||
server: "testserver",
|
||||
|
@ -639,7 +639,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
|
||||
it("errors when no data is returned") {
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.poll(
|
||||
db,
|
||||
server: "testserver",
|
||||
|
@ -668,7 +668,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
dependencies = dependencies.with(onionApi: TestApi.self)
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.poll(
|
||||
db,
|
||||
server: "testserver",
|
||||
|
@ -697,7 +697,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
dependencies = dependencies.with(onionApi: TestApi.self)
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.poll(
|
||||
db,
|
||||
server: "testserver",
|
||||
|
@ -726,7 +726,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
dependencies = dependencies.with(onionApi: TestApi.self)
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.poll(
|
||||
db,
|
||||
server: "testserver",
|
||||
|
@ -787,7 +787,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
dependencies = dependencies.with(onionApi: TestApi.self)
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.poll(
|
||||
db,
|
||||
server: "testserver",
|
||||
|
@ -825,7 +825,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: OpenGroupAPI.Capabilities)?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.capabilities(
|
||||
db,
|
||||
server: "testserver",
|
||||
|
@ -895,7 +895,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: [OpenGroupAPI.Room])?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.rooms(
|
||||
db,
|
||||
server: "testserver",
|
||||
|
@ -986,7 +986,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: OpenGroupAPI.CapabilitiesAndRoomResponse?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.capabilitiesAndRoom(
|
||||
db,
|
||||
for: "testRoom",
|
||||
|
@ -1041,7 +1041,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: OpenGroupAPI.CapabilitiesAndRoomResponse?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.capabilitiesAndRoom(
|
||||
db,
|
||||
|
@ -1113,7 +1113,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: OpenGroupAPI.CapabilitiesAndRoomResponse?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.capabilitiesAndRoom(
|
||||
db,
|
||||
|
@ -1202,7 +1202,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: OpenGroupAPI.CapabilitiesAndRoomResponse?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI.capabilitiesAndRoom(
|
||||
db,
|
||||
for: "testRoom",
|
||||
|
@ -1261,7 +1261,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: OpenGroupAPI.Message)?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.send(
|
||||
db,
|
||||
|
@ -1306,7 +1306,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: OpenGroupAPI.Message)?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.send(
|
||||
db,
|
||||
|
@ -1346,7 +1346,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: OpenGroupAPI.Message)?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.send(
|
||||
db,
|
||||
|
@ -1381,7 +1381,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: OpenGroupAPI.Message)?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.send(
|
||||
db,
|
||||
|
@ -1414,7 +1414,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: OpenGroupAPI.Message)?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.send(
|
||||
db,
|
||||
|
@ -1454,7 +1454,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: OpenGroupAPI.Message)?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.send(
|
||||
db,
|
||||
|
@ -1494,7 +1494,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: OpenGroupAPI.Message)?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.send(
|
||||
db,
|
||||
|
@ -1529,7 +1529,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: OpenGroupAPI.Message)?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.send(
|
||||
db,
|
||||
|
@ -1570,7 +1570,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: OpenGroupAPI.Message)?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.send(
|
||||
db,
|
||||
|
@ -1623,7 +1623,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: OpenGroupAPI.Message)?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.message(
|
||||
db,
|
||||
|
@ -1675,7 +1675,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: Data?)?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.messageUpdate(
|
||||
db,
|
||||
|
@ -1716,7 +1716,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: Data?)?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.messageUpdate(
|
||||
db,
|
||||
|
@ -1755,7 +1755,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: Data?)?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.messageUpdate(
|
||||
db,
|
||||
|
@ -1789,7 +1789,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: Data?)?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.messageUpdate(
|
||||
db,
|
||||
|
@ -1821,7 +1821,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: Data?)?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.messageUpdate(
|
||||
db,
|
||||
|
@ -1860,7 +1860,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: Data?)?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.messageUpdate(
|
||||
db,
|
||||
|
@ -1899,7 +1899,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: Data?)?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.messageUpdate(
|
||||
db,
|
||||
|
@ -1933,7 +1933,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: Data?)?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.messageUpdate(
|
||||
db,
|
||||
|
@ -1973,7 +1973,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: Data?)?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.messageUpdate(
|
||||
db,
|
||||
|
@ -2010,7 +2010,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: Data?)?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.messageDelete(
|
||||
db,
|
||||
|
@ -2054,7 +2054,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
|
||||
it("generates the request and handles the response correctly") {
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.messagesDeleteAll(
|
||||
db,
|
||||
|
@ -2094,7 +2094,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: ResponseInfoType?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.pinMessage(
|
||||
db,
|
||||
|
@ -2132,7 +2132,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: ResponseInfoType?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.unpinMessage(
|
||||
db,
|
||||
|
@ -2170,7 +2170,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: ResponseInfoType?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.unpinAll(
|
||||
db,
|
||||
|
@ -2209,7 +2209,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
dependencies = dependencies.with(onionApi: TestApi.self)
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.uploadFile(
|
||||
db,
|
||||
|
@ -2245,7 +2245,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
dependencies = dependencies.with(onionApi: TestApi.self)
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.uploadFile(
|
||||
db,
|
||||
|
@ -2281,7 +2281,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
dependencies = dependencies.with(onionApi: TestApi.self)
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.uploadFile(
|
||||
db,
|
||||
|
@ -2319,7 +2319,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
dependencies = dependencies.with(onionApi: TestApi.self)
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.downloadFile(
|
||||
db,
|
||||
|
@ -2376,7 +2376,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
var response: (info: ResponseInfoType, data: OpenGroupAPI.SendDirectMessageResponse)?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.send(
|
||||
db,
|
||||
|
@ -2425,7 +2425,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
|
||||
it("generates the request and handles the response correctly") {
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.userBan(
|
||||
db,
|
||||
|
@ -2455,7 +2455,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
|
||||
it("does a global ban if no room tokens are provided") {
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.userBan(
|
||||
db,
|
||||
|
@ -2487,7 +2487,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
|
||||
it("does room specific bans if room tokens are provided") {
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.userBan(
|
||||
db,
|
||||
|
@ -2534,7 +2534,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
|
||||
it("generates the request and handles the response correctly") {
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.userUnban(
|
||||
db,
|
||||
|
@ -2563,7 +2563,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
|
||||
it("does a global ban if no room tokens are provided") {
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.userUnban(
|
||||
db,
|
||||
|
@ -2594,7 +2594,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
|
||||
it("does room specific bans if room tokens are provided") {
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.userUnban(
|
||||
db,
|
||||
|
@ -2640,7 +2640,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
|
||||
it("generates the request and handles the response correctly") {
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.userModeratorUpdate(
|
||||
db,
|
||||
|
@ -2672,7 +2672,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
|
||||
it("does a global update if no room tokens are provided") {
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.userModeratorUpdate(
|
||||
db,
|
||||
|
@ -2706,7 +2706,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
|
||||
it("does room specific updates if room tokens are provided") {
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.userModeratorUpdate(
|
||||
db,
|
||||
|
@ -2740,7 +2740,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
|
||||
it("fails if neither moderator or admin are set") {
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.userModeratorUpdate(
|
||||
db,
|
||||
|
@ -2804,7 +2804,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
|
||||
it("generates the request and handles the response correctly") {
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.userBanAndDeleteAllMessages(
|
||||
db,
|
||||
|
@ -2833,7 +2833,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
|
||||
it("bans the user from the specified room rather than globally") {
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.userBanAndDeleteAllMessages(
|
||||
db,
|
||||
|
@ -2890,7 +2890,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
}
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.rooms(
|
||||
db,
|
||||
|
@ -2917,7 +2917,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
}
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.rooms(
|
||||
db,
|
||||
|
@ -2944,7 +2944,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
}
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.rooms(
|
||||
db,
|
||||
|
@ -2975,7 +2975,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
|
||||
it("signs correctly") {
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.rooms(
|
||||
db,
|
||||
|
@ -3011,7 +3011,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
mockSign.when { $0.signature(message: anyArray(), secretKey: anyArray()) }.thenReturn(nil)
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.rooms(
|
||||
db,
|
||||
|
@ -3044,7 +3044,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
|
||||
it("signs correctly") {
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.rooms(
|
||||
db,
|
||||
|
@ -3081,7 +3081,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
.thenReturn(nil)
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.rooms(
|
||||
db,
|
||||
|
@ -3108,7 +3108,7 @@ class OpenGroupAPISpec: QuickSpec {
|
|||
.thenReturn(nil)
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { db in
|
||||
.readPublisherFlatMap { db in
|
||||
OpenGroupAPI
|
||||
.rooms(
|
||||
db,
|
||||
|
|
|
@ -815,7 +815,7 @@ class OpenGroupManagerSpec: QuickSpec {
|
|||
var didComplete: Bool = false // Prevent multi-threading test bugs
|
||||
|
||||
mockStorage
|
||||
.writePublisherFlatMap(receiveOn: DispatchQueue.main) { (db: Database) -> AnyPublisher<Void, Error> in
|
||||
.writePublisherFlatMap { (db: Database) -> AnyPublisher<Void, Error> in
|
||||
openGroupManager
|
||||
.add(
|
||||
db,
|
||||
|
@ -846,7 +846,7 @@ class OpenGroupManagerSpec: QuickSpec {
|
|||
var didComplete: Bool = false // Prevent multi-threading test bugs
|
||||
|
||||
mockStorage
|
||||
.writePublisherFlatMap(receiveOn: DispatchQueue.main) { (db: Database) -> AnyPublisher<Void, Error> in
|
||||
.writePublisherFlatMap { (db: Database) -> AnyPublisher<Void, Error> in
|
||||
openGroupManager
|
||||
.add(
|
||||
db,
|
||||
|
@ -883,7 +883,7 @@ class OpenGroupManagerSpec: QuickSpec {
|
|||
var didComplete: Bool = false // Prevent multi-threading test bugs
|
||||
|
||||
mockStorage
|
||||
.writePublisherFlatMap(receiveOn: DispatchQueue.main) { (db: Database) -> AnyPublisher<Void, Error> in
|
||||
.writePublisherFlatMap { (db: Database) -> AnyPublisher<Void, Error> in
|
||||
openGroupManager
|
||||
.add(
|
||||
db,
|
||||
|
@ -939,7 +939,7 @@ class OpenGroupManagerSpec: QuickSpec {
|
|||
var error: Error?
|
||||
|
||||
mockStorage
|
||||
.writePublisherFlatMap(receiveOn: DispatchQueue.main) { (db: Database) -> AnyPublisher<Void, Error> in
|
||||
.writePublisherFlatMap { (db: Database) -> AnyPublisher<Void, Error> in
|
||||
openGroupManager
|
||||
.add(
|
||||
db,
|
||||
|
@ -3598,7 +3598,7 @@ class OpenGroupManagerSpec: QuickSpec {
|
|||
|
||||
var result: Data?
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { (db: Database) -> AnyPublisher<Data, Error> in
|
||||
.readPublisherFlatMap { (db: Database) -> AnyPublisher<Data, Error> in
|
||||
OpenGroupManager
|
||||
.roomImage(
|
||||
db,
|
||||
|
@ -3617,7 +3617,7 @@ class OpenGroupManagerSpec: QuickSpec {
|
|||
it("does not save the fetched image to storage") {
|
||||
var didComplete: Bool = false
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { (db: Database) -> AnyPublisher<Data, Error> in
|
||||
.readPublisherFlatMap { (db: Database) -> AnyPublisher<Data, Error> in
|
||||
OpenGroupManager
|
||||
.roomImage(
|
||||
db,
|
||||
|
@ -3648,7 +3648,7 @@ class OpenGroupManagerSpec: QuickSpec {
|
|||
it("does not update the image update timestamp") {
|
||||
var didComplete: Bool = false
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { (db: Database) -> AnyPublisher<Data, Error> in
|
||||
.readPublisherFlatMap { (db: Database) -> AnyPublisher<Data, Error> in
|
||||
OpenGroupManager
|
||||
.roomImage(
|
||||
db,
|
||||
|
@ -3690,7 +3690,7 @@ class OpenGroupManagerSpec: QuickSpec {
|
|||
dependencies = dependencies.with(onionApi: TestNeverReturningApi.self)
|
||||
|
||||
let publisher = mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { (db: Database) -> AnyPublisher<Data, Error> in
|
||||
.readPublisherFlatMap { (db: Database) -> AnyPublisher<Data, Error> in
|
||||
OpenGroupManager
|
||||
.roomImage(
|
||||
db,
|
||||
|
@ -3716,7 +3716,7 @@ class OpenGroupManagerSpec: QuickSpec {
|
|||
var result: Data?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { (db: Database) -> AnyPublisher<Data, Error> in
|
||||
.readPublisherFlatMap { (db: Database) -> AnyPublisher<Data, Error> in
|
||||
OpenGroupManager
|
||||
.roomImage(
|
||||
db,
|
||||
|
@ -3736,7 +3736,7 @@ class OpenGroupManagerSpec: QuickSpec {
|
|||
var didComplete: Bool = false
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { (db: Database) -> AnyPublisher<Data, Error> in
|
||||
.readPublisherFlatMap { (db: Database) -> AnyPublisher<Data, Error> in
|
||||
OpenGroupManager
|
||||
.roomImage(
|
||||
db,
|
||||
|
@ -3768,7 +3768,7 @@ class OpenGroupManagerSpec: QuickSpec {
|
|||
var didComplete: Bool = false
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { (db: Database) -> AnyPublisher<Data, Error> in
|
||||
.readPublisherFlatMap { (db: Database) -> AnyPublisher<Data, Error> in
|
||||
OpenGroupManager
|
||||
.roomImage(
|
||||
db,
|
||||
|
@ -3816,7 +3816,7 @@ class OpenGroupManagerSpec: QuickSpec {
|
|||
var result: Data?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { (db: Database) -> AnyPublisher<Data, Error> in
|
||||
.readPublisherFlatMap { (db: Database) -> AnyPublisher<Data, Error> in
|
||||
OpenGroupManager
|
||||
.roomImage(
|
||||
db,
|
||||
|
@ -3846,7 +3846,7 @@ class OpenGroupManagerSpec: QuickSpec {
|
|||
var result: Data?
|
||||
|
||||
mockStorage
|
||||
.readPublisherFlatMap(receiveOn: DispatchQueue.main) { (db: Database) -> AnyPublisher<Data, Error> in
|
||||
.readPublisherFlatMap { (db: Database) -> AnyPublisher<Data, Error> in
|
||||
OpenGroupManager
|
||||
.roomImage(
|
||||
db,
|
||||
|
|
|
@ -189,7 +189,7 @@ final class ThreadPickerVC: UIViewController, UITableViewDataSource, UITableView
|
|||
NotificationCenter.default.post(name: Database.resumeNotification, object: self)
|
||||
|
||||
Storage.shared
|
||||
.writePublisher(receiveOn: DispatchQueue.global(qos: .userInitiated)) { db -> MessageSender.PreparedSendData in
|
||||
.writePublisher { db -> MessageSender.PreparedSendData in
|
||||
guard
|
||||
let threadVariant: SessionThread.Variant = try SessionThread
|
||||
.filter(id: threadId)
|
||||
|
@ -253,6 +253,7 @@ final class ThreadPickerVC: UIViewController, UITableViewDataSource, UITableView
|
|||
threadVariant: threadVariant
|
||||
)
|
||||
}
|
||||
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
|
||||
.flatMap {
|
||||
MessageSender.performUploadsIfNeeded(
|
||||
queue: DispatchQueue.global(qos: .userInitiated),
|
||||
|
|
|
@ -23,7 +23,10 @@ public enum GetSnodePoolJob: JobExecutor {
|
|||
// to block if we have no Snode pool and prevent other jobs from failing but avoids having to
|
||||
// wait if we already have a potentially valid snode pool
|
||||
guard !SnodeAPI.hasCachedSnodesInclusingExpired() else {
|
||||
SnodeAPI.getSnodePool().sinkUntilComplete()
|
||||
SnodeAPI
|
||||
.getSnodePool()
|
||||
.subscribe(on: DispatchQueue.global(qos: .default))
|
||||
.sinkUntilComplete()
|
||||
success(job, false)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -169,15 +169,16 @@ public final class SnodeAPI {
|
|||
.catch { _ in getSnodePoolFromSeedNode() }
|
||||
.eraseToAnyPublisher()
|
||||
}()
|
||||
|
||||
getSnodePoolPublisher.mutate { $0 = publisher }
|
||||
|
||||
/// Actually assign the atomic value
|
||||
result = publisher
|
||||
|
||||
return publisher
|
||||
.tryFlatMap { snodePool -> AnyPublisher<Set<Snode>, Error> in
|
||||
guard !snodePool.isEmpty else { throw SnodeAPIError.snodePoolUpdatingFailed }
|
||||
|
||||
return Storage.shared
|
||||
.writePublisher(receiveOn: Threading.workQueue) { db in
|
||||
.writePublisher { db in
|
||||
db[.lastSnodePoolRefreshDate] = now
|
||||
setSnodePool(db, to: snodePool)
|
||||
|
||||
|
|
|
@ -98,6 +98,13 @@ open class Storage {
|
|||
|
||||
// MARK: - Migrations
|
||||
|
||||
public static func appliedMigrationIdentifiers(_ db: Database) -> Set<String> {
|
||||
let migrator: DatabaseMigrator = DatabaseMigrator()
|
||||
|
||||
return (try? migrator.appliedIdentifiers(db))
|
||||
.defaulting(to: [])
|
||||
}
|
||||
|
||||
public func perform(
|
||||
migrations: [TargetMigrations],
|
||||
async: Bool = true,
|
||||
|
@ -336,30 +343,50 @@ open class Storage {
|
|||
)
|
||||
}
|
||||
|
||||
open func writePublisher<S, T>(
|
||||
receiveOn scheduler: S,
|
||||
open func writePublisher<T>(
|
||||
updates: @escaping (Database) throws -> T
|
||||
) -> AnyPublisher<T, Error> where S: Scheduler {
|
||||
) -> AnyPublisher<T, Error> {
|
||||
guard isValid, let dbWriter: DatabaseWriter = dbWriter else {
|
||||
return Fail<T, Error>(error: StorageError.databaseInvalid)
|
||||
.eraseToAnyPublisher()
|
||||
}
|
||||
|
||||
return dbWriter.writePublisher(receiveOn: scheduler, updates: updates)
|
||||
.eraseToAnyPublisher()
|
||||
/// **Note:** GRDB does have a `writePublisher` method but it appears to asynchronously trigger
|
||||
/// both the `output` and `complete` closures at the same time which causes a lot of unexpected
|
||||
/// behaviours (this behaviour is apparently expected but still causes a number of odd behaviours in our code
|
||||
/// for more information see https://github.com/groue/GRDB.swift/issues/1334)
|
||||
///
|
||||
/// Instead of this we are just using `Deferred { Future {} }` which is executed on the specified scheduled
|
||||
/// which behaves in a much more expected way than the GRDB `writePublisher` does
|
||||
return Deferred {
|
||||
Future { resolver in
|
||||
do { resolver(Result.success(try dbWriter.write(updates))) }
|
||||
catch { resolver(Result.failure(error)) }
|
||||
}
|
||||
}.eraseToAnyPublisher()
|
||||
}
|
||||
|
||||
open func readPublisher<S, T>(
|
||||
receiveOn scheduler: S,
|
||||
open func readPublisher<T>(
|
||||
value: @escaping (Database) throws -> T
|
||||
) -> AnyPublisher<T, Error> where S: Scheduler {
|
||||
) -> AnyPublisher<T, Error> {
|
||||
guard isValid, let dbWriter: DatabaseWriter = dbWriter else {
|
||||
return Fail<T, Error>(error: StorageError.databaseInvalid)
|
||||
.eraseToAnyPublisher()
|
||||
}
|
||||
|
||||
return dbWriter.readPublisher(receiveOn: scheduler, value: value)
|
||||
.eraseToAnyPublisher()
|
||||
/// **Note:** GRDB does have a `readPublisher` method but it appears to asynchronously trigger
|
||||
/// both the `output` and `complete` closures at the same time which causes a lot of unexpected
|
||||
/// behaviours (this behaviour is apparently expected but still causes a number of odd behaviours in our code
|
||||
/// for more information see https://github.com/groue/GRDB.swift/issues/1334)
|
||||
///
|
||||
/// Instead of this we are just using `Deferred { Future {} }` which is executed on the specified scheduled
|
||||
/// which behaves in a much more expected way than the GRDB `readPublisher` does
|
||||
return Deferred {
|
||||
Future { resolver in
|
||||
do { resolver(Result.success(try dbWriter.read(value))) }
|
||||
catch { resolver(Result.failure(error)) }
|
||||
}
|
||||
}.eraseToAnyPublisher()
|
||||
}
|
||||
|
||||
@discardableResult public final func read<T>(_ value: (Database) throws -> T?) -> T? {
|
||||
|
@ -423,20 +450,18 @@ open class Storage {
|
|||
// MARK: - Combine Extensions
|
||||
|
||||
public extension Storage {
|
||||
func readPublisherFlatMap<S, T>(
|
||||
receiveOn scheduler: S,
|
||||
func readPublisherFlatMap<T>(
|
||||
value: @escaping (Database) throws -> AnyPublisher<T, Error>
|
||||
) -> AnyPublisher<T, Error> where S: Scheduler {
|
||||
return readPublisher(receiveOn: scheduler, value: value)
|
||||
) -> AnyPublisher<T, Error> {
|
||||
return readPublisher(value: value)
|
||||
.flatMap { resultPublisher -> AnyPublisher<T, Error> in resultPublisher }
|
||||
.eraseToAnyPublisher()
|
||||
}
|
||||
|
||||
func writePublisherFlatMap<S, T>(
|
||||
receiveOn scheduler: S,
|
||||
func writePublisherFlatMap<T>(
|
||||
updates: @escaping (Database) throws -> AnyPublisher<T, Error>
|
||||
) -> AnyPublisher<T, Error> where S: Scheduler {
|
||||
return writePublisher(receiveOn: scheduler, updates: updates)
|
||||
) -> AnyPublisher<T, Error> {
|
||||
return writePublisher(updates: updates)
|
||||
.flatMap { resultPublisher -> AnyPublisher<T, Error> in resultPublisher }
|
||||
.eraseToAnyPublisher()
|
||||
}
|
||||
|
|
|
@ -4,8 +4,9 @@
|
|||
|
||||
#import "OWSViewController.h"
|
||||
#import "UIView+OWS.h"
|
||||
#import <SessionUIKit/SessionUIKit.h>
|
||||
#import "AppContext.h"
|
||||
#import <PureLayout/PureLayout.h>
|
||||
#import <SessionUIKit/SessionUIKit.h>
|
||||
#import <SignalCoreKit/OWSAsserts.h>
|
||||
|
||||
NS_ASSUME_NONNULL_BEGIN
|
||||
|
|
|
@ -5,10 +5,9 @@ import GRDB
|
|||
import SessionUtilitiesKit
|
||||
|
||||
class SynchronousStorage: Storage {
|
||||
override func readPublisher<S, T>(
|
||||
receiveOn scheduler: S,
|
||||
override func readPublisher<T>(
|
||||
value: @escaping (Database) throws -> T
|
||||
) -> AnyPublisher<T, Error> where S: Scheduler {
|
||||
) -> AnyPublisher<T, Error> {
|
||||
guard let result: T = super.read(value) else {
|
||||
return Fail(error: StorageError.generic)
|
||||
.eraseToAnyPublisher()
|
||||
|
@ -19,10 +18,9 @@ class SynchronousStorage: Storage {
|
|||
.eraseToAnyPublisher()
|
||||
}
|
||||
|
||||
override func writePublisher<S, T>(
|
||||
receiveOn scheduler: S,
|
||||
override func writePublisher<T>(
|
||||
updates: @escaping (Database) throws -> T
|
||||
) -> AnyPublisher<T, Error> where S: Scheduler {
|
||||
) -> AnyPublisher<T, Error> {
|
||||
guard let result: T = super.write(updates: updates) else {
|
||||
return Fail(error: StorageError.generic)
|
||||
.eraseToAnyPublisher()
|
||||
|
|
Loading…
Reference in New Issue