Reworked SnodeAPI auth and prepared requests

Started refactored some logic to make it easier for unit testing
Started updating SnodeAPI calls with prepared requests
Fixed some issues with the PreparedRequest 'map' and 'handleEvents' functions
Genericised common pre-request behaviours for SnodeAPI calls
This commit is contained in:
Morgan Pretty 2023-09-07 08:53:37 +10:00
parent 5d9a2335ba
commit b31afa89e1
99 changed files with 3178 additions and 2644 deletions

@ -1 +1 @@
Subproject commit 194f972d161a57dae07430f92eac44e95c208c84
Subproject commit bb7a2cfa1bbbfe94db7a69ffc4875cdf48330432

View File

@ -626,6 +626,15 @@
FD432437299DEA38008A0213 /* TypeConversion+Utilities.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD432436299DEA38008A0213 /* TypeConversion+Utilities.swift */; };
FD43EE9D297A5190009C87C5 /* SessionUtil+UserGroups.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD43EE9C297A5190009C87C5 /* SessionUtil+UserGroups.swift */; };
FD43EE9F297E2EE0009C87C5 /* SessionUtil+ConvoInfoVolatile.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD43EE9E297E2EE0009C87C5 /* SessionUtil+ConvoInfoVolatile.swift */; };
FD47E0AB2AA68EEA00A55E41 /* Authentication.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD47E0AA2AA68EEA00A55E41 /* Authentication.swift */; };
FD47E0AD2AA6918B00A55E41 /* Crypto+SessionSnodeKit.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD47E0AC2AA6918B00A55E41 /* Crypto+SessionSnodeKit.swift */; };
FD47E0AF2AA692F400A55E41 /* JSONDecoder+Utilities.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD47E0AE2AA692F400A55E41 /* JSONDecoder+Utilities.swift */; };
FD47E0B12AA6A05800A55E41 /* Authentication+SessionMessagingKit.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD47E0B02AA6A05800A55E41 /* Authentication+SessionMessagingKit.swift */; };
FD47E0B32AA6D5A300A55E41 /* RequestTarget.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD47E0B22AA6D5A300A55E41 /* RequestTarget.swift */; };
FD47E0B52AA6D7AA00A55E41 /* Request+SnodeAPI.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD47E0B42AA6D7AA00A55E41 /* Request+SnodeAPI.swift */; };
FD47E0B82AA6E62600A55E41 /* Request+OpenGroupAPI.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD47E0B62AA6E5FF00A55E41 /* Request+OpenGroupAPI.swift */; };
FD47E0BA2AA6EBA200A55E41 /* Request+PushNotificationAPI.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD47E0B92AA6EBA200A55E41 /* Request+PushNotificationAPI.swift */; };
FD47E0C02AA83D7300A55E41 /* SwarmDrainBehaviour.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD47E0BF2AA83D7300A55E41 /* SwarmDrainBehaviour.swift */; };
FD4B200E283492210034334B /* InsetLockableTableView.swift in Sources */ = {isa = PBXBuildFile; fileRef = FD4B200D283492210034334B /* InsetLockableTableView.swift */; };
FD52090028AF6153006098F6 /* OWSBackgroundTask.m in Sources */ = {isa = PBXBuildFile; fileRef = C33FDC1B255A581F00E217F9 /* OWSBackgroundTask.m */; };
FD52090128AF61BA006098F6 /* OWSBackgroundTask.h in Headers */ = {isa = PBXBuildFile; fileRef = C33FDB38255A580B00E217F9 /* OWSBackgroundTask.h */; settings = {ATTRIBUTES = (Public, ); }; };
@ -781,7 +790,6 @@
FDB5DAEC2A95E2C2002C8721 /* PreparedRequest.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDB5DAEB2A95E2C2002C8721 /* PreparedRequest.swift */; };
FDB5DAEE2A95E462002C8721 /* BatchRequest.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDB5DAED2A95E462002C8721 /* BatchRequest.swift */; };
FDB5DAF32A96DD4F002C8721 /* PreparedRequest+OnionRequest.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDB5DAF22A96DD4F002C8721 /* PreparedRequest+OnionRequest.swift */; };
FDB5DAF52A9721A5002C8721 /* PreparedRequest+OpenGroup.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDB5DAF42A9721A5002C8721 /* PreparedRequest+OpenGroup.swift */; };
FDB5DAFE2A981C43002C8721 /* SessionSnodeKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = C3C2A59F255385C100C340D1 /* SessionSnodeKit.framework */; platformFilter = ios; };
FDB5DB062A981C67002C8721 /* PreparedRequestOnionRequestsSpec.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDB5DB052A981C67002C8721 /* PreparedRequestOnionRequestsSpec.swift */; };
FDB5DB072A981F88002C8721 /* Mock.swift in Sources */ = {isa = PBXBuildFile; fileRef = FDC290A527D860CE005DAE71 /* Mock.swift */; };
@ -1821,6 +1829,15 @@
FD432436299DEA38008A0213 /* TypeConversion+Utilities.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "TypeConversion+Utilities.swift"; sourceTree = "<group>"; };
FD43EE9C297A5190009C87C5 /* SessionUtil+UserGroups.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "SessionUtil+UserGroups.swift"; sourceTree = "<group>"; };
FD43EE9E297E2EE0009C87C5 /* SessionUtil+ConvoInfoVolatile.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "SessionUtil+ConvoInfoVolatile.swift"; sourceTree = "<group>"; };
FD47E0AA2AA68EEA00A55E41 /* Authentication.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Authentication.swift; sourceTree = "<group>"; };
FD47E0AC2AA6918B00A55E41 /* Crypto+SessionSnodeKit.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "Crypto+SessionSnodeKit.swift"; sourceTree = "<group>"; };
FD47E0AE2AA692F400A55E41 /* JSONDecoder+Utilities.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "JSONDecoder+Utilities.swift"; sourceTree = "<group>"; };
FD47E0B02AA6A05800A55E41 /* Authentication+SessionMessagingKit.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "Authentication+SessionMessagingKit.swift"; sourceTree = "<group>"; };
FD47E0B22AA6D5A300A55E41 /* RequestTarget.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = RequestTarget.swift; sourceTree = "<group>"; };
FD47E0B42AA6D7AA00A55E41 /* Request+SnodeAPI.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "Request+SnodeAPI.swift"; sourceTree = "<group>"; };
FD47E0B62AA6E5FF00A55E41 /* Request+OpenGroupAPI.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "Request+OpenGroupAPI.swift"; sourceTree = "<group>"; };
FD47E0B92AA6EBA200A55E41 /* Request+PushNotificationAPI.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "Request+PushNotificationAPI.swift"; sourceTree = "<group>"; };
FD47E0BF2AA83D7300A55E41 /* SwarmDrainBehaviour.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SwarmDrainBehaviour.swift; sourceTree = "<group>"; };
FD4B200D283492210034334B /* InsetLockableTableView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = InsetLockableTableView.swift; sourceTree = "<group>"; };
FD52090228B4680F006098F6 /* RadioButton.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = RadioButton.swift; sourceTree = "<group>"; };
FD52090428B4915F006098F6 /* PrivacySettingsViewModel.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = PrivacySettingsViewModel.swift; sourceTree = "<group>"; };
@ -1963,7 +1980,6 @@
FDB5DAEB2A95E2C2002C8721 /* PreparedRequest.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = PreparedRequest.swift; sourceTree = "<group>"; };
FDB5DAED2A95E462002C8721 /* BatchRequest.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = BatchRequest.swift; sourceTree = "<group>"; };
FDB5DAF22A96DD4F002C8721 /* PreparedRequest+OnionRequest.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "PreparedRequest+OnionRequest.swift"; sourceTree = "<group>"; };
FDB5DAF42A9721A5002C8721 /* PreparedRequest+OpenGroup.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "PreparedRequest+OpenGroup.swift"; sourceTree = "<group>"; };
FDB5DAFA2A981C42002C8721 /* SessionSnodeKitTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = SessionSnodeKitTests.xctest; sourceTree = BUILT_PRODUCTS_DIR; };
FDB5DB052A981C67002C8721 /* PreparedRequestOnionRequestsSpec.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = PreparedRequestOnionRequestsSpec.swift; sourceTree = "<group>"; };
FDB7400A28EB99A70094D718 /* TimeInterval+Utilities.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "TimeInterval+Utilities.swift"; sourceTree = "<group>"; };
@ -2698,6 +2714,7 @@
C3C2A5D92553860B00C340D1 /* JSON.swift */,
FDF8488529405A60007DCAE5 /* Request.swift */,
FDC438B027BB159600C60D73 /* RequestInfo.swift */,
FD47E0B22AA6D5A300A55E41 /* RequestTarget.swift */,
FDB5DAEB2A95E2C2002C8721 /* PreparedRequest.swift */,
FDB5DAED2A95E462002C8721 /* BatchRequest.swift */,
FDF8488229405A12007DCAE5 /* BatchResponse.swift */,
@ -3355,6 +3372,7 @@
children = (
C33FDB01255A580700E217F9 /* AppReadiness.h */,
C33FDB75255A581000E217F9 /* AppReadiness.m */,
FD47E0B02AA6A05800A55E41 /* Authentication+SessionMessagingKit.swift */,
FD23CE232A675C440000B97C /* Crypto+SessionMessagingKit.swift */,
FD859EF127BF6BA200510D0C /* Data+Utilities.swift */,
C38EF309255B6DBE007E1867 /* DeviceSleepManager.swift */,
@ -3407,6 +3425,7 @@
C3C2A5CD255385F300C340D1 /* Utilities */ = {
isa = PBXGroup;
children = (
FD47E0AC2AA6918B00A55E41 /* Crypto+SessionSnodeKit.swift */,
C3C2A5D82553860B00C340D1 /* Data+Utilities.swift */,
C3C2A5D22553860900C340D1 /* String+Trimming.swift */,
C3C2A5D42553860A00C340D1 /* Threading.swift */,
@ -3694,6 +3713,7 @@
FD23CE1E2A65269C0000B97C /* Crypto.swift */,
FD559DF42A7368CB00C7C62A /* DispatchQueue+Utilities.swift */,
FD09796A27F6C67500936362 /* Failable.swift */,
FD47E0AE2AA692F400A55E41 /* JSONDecoder+Utilities.swift */,
FDFF9FDE2A787F57005E0628 /* JSONEncoder+Utilities.swift */,
FD09797127FAA2F500936362 /* Optional+Utilities.swift */,
FD09797C27FBDB2000936362 /* Notification+Utilities.swift */,
@ -4334,6 +4354,7 @@
FDC13D482A16EC20007267C7 /* Service.swift */,
FDD82C3E2A205D0A00425F05 /* ProcessResult.swift */,
FDC13D4F2A16EE50007267C7 /* PushNotificationAPIEndpoint.swift */,
FD47E0B92AA6EBA200A55E41 /* Request+PushNotificationAPI.swift */,
);
path = Types;
sourceTree = "<group>";
@ -4354,7 +4375,7 @@
children = (
FDF8487D29405993007DCAE5 /* HTTPHeader+OpenGroup.swift */,
FDF8487E29405994007DCAE5 /* HTTPQueryParam+OpenGroup.swift */,
FDB5DAF42A9721A5002C8721 /* PreparedRequest+OpenGroup.swift */,
FD47E0B62AA6E5FF00A55E41 /* Request+OpenGroupAPI.swift */,
FDC4381F27B36ADC00C60D73 /* SOGSEndpoint.swift */,
FDC4380827B31D4E00C60D73 /* OpenGroupAPIError.swift */,
FDC4381627B32EC700C60D73 /* Personalization.swift */,
@ -4551,12 +4572,13 @@
FDF8488F29405C13007DCAE5 /* Types */ = {
isa = PBXGroup;
children = (
FDF848DF29405D6E007DCAE5 /* SnodeAPIEndpoint.swift */,
FDF848E029405D6E007DCAE5 /* SnodeAPIError.swift */,
FDF8489029405C13007DCAE5 /* SnodeAPINamespace.swift */,
FDF848DE29405D6E007DCAE5 /* OnionRequestAPIVersion.swift */,
FDF848E229405D6E007DCAE5 /* OnionRequestAPIError.swift */,
FDF848E129405D6E007DCAE5 /* OnionRequestAPIDestination.swift */,
FDF848DF29405D6E007DCAE5 /* SnodeAPIEndpoint.swift */,
FDF848E029405D6E007DCAE5 /* SnodeAPIError.swift */,
FDF8489029405C13007DCAE5 /* SnodeAPINamespace.swift */,
FD47E0BF2AA83D7300A55E41 /* SwarmDrainBehaviour.swift */,
FD43242F2999F0BC008A0213 /* ValidatableResponse.swift */,
);
path = Types;
@ -4565,10 +4587,12 @@
FDF8489229405C1B007DCAE5 /* Networking */ = {
isa = PBXGroup;
children = (
FD47E0AA2AA68EEA00A55E41 /* Authentication.swift */,
FDF8489329405C1B007DCAE5 /* SnodeAPI.swift */,
FDF848EA29405E4E007DCAE5 /* Notification+OnionRequestAPI.swift */,
FD47E0B42AA6D7AA00A55E41 /* Request+SnodeAPI.swift */,
FDF848E829405E4E007DCAE5 /* OnionRequestAPI.swift */,
FDF848E929405E4E007DCAE5 /* OnionRequestAPI+Encryption.swift */,
FDF848EA29405E4E007DCAE5 /* Notification+OnionRequestAPI.swift */,
FDB5DAF22A96DD4F002C8721 /* PreparedRequest+OnionRequest.swift */,
);
path = Networking;
@ -5865,12 +5889,14 @@
FDB5DAF32A96DD4F002C8721 /* PreparedRequest+OnionRequest.swift in Sources */,
FDF848C629405C5B007DCAE5 /* DeleteAllMessagesRequest.swift in Sources */,
FDF848D429405C5B007DCAE5 /* DeleteAllBeforeResponse.swift in Sources */,
FD47E0B52AA6D7AA00A55E41 /* Request+SnodeAPI.swift in Sources */,
FDF848D629405C5B007DCAE5 /* SnodeMessage.swift in Sources */,
FDF848D129405C5B007DCAE5 /* SnodeSwarmItem.swift in Sources */,
FDF848DD29405C5B007DCAE5 /* LegacySendMessageRequest.swift in Sources */,
FDF848BD29405C5A007DCAE5 /* GetMessagesRequest.swift in Sources */,
FDF848DB29405C5B007DCAE5 /* DeleteMessagesResponse.swift in Sources */,
FDF848E629405D6E007DCAE5 /* OnionRequestAPIDestination.swift in Sources */,
FD47E0AB2AA68EEA00A55E41 /* Authentication.swift in Sources */,
FDF848CC29405C5B007DCAE5 /* SnodeReceivedMessage.swift in Sources */,
FDF848C129405C5A007DCAE5 /* UpdateExpiryRequest.swift in Sources */,
FDF848C729405C5B007DCAE5 /* SendMessageResponse.swift in Sources */,
@ -5879,6 +5905,7 @@
FDF848D229405C5B007DCAE5 /* LegacyGetMessagesRequest.swift in Sources */,
FDF848CB29405C5B007DCAE5 /* SnodePoolResponse.swift in Sources */,
FDF848C429405C5A007DCAE5 /* RevokeSubkeyResponse.swift in Sources */,
FD47E0AD2AA6918B00A55E41 /* Crypto+SessionSnodeKit.swift in Sources */,
FDF848E529405D6E007DCAE5 /* SnodeAPIError.swift in Sources */,
FDF848D529405C5B007DCAE5 /* DeleteAllMessagesResponse.swift in Sources */,
FDF848E329405D6E007DCAE5 /* OnionRequestAPIVersion.swift in Sources */,
@ -5890,6 +5917,7 @@
FDF848CD29405C5B007DCAE5 /* GetNetworkTimestampResponse.swift in Sources */,
FDF848DA29405C5B007DCAE5 /* GetMessagesResponse.swift in Sources */,
FD39353628F7C3390084DADA /* _004_FlagMessageHashAsDeletedOrInvalid.swift in Sources */,
FD47E0C02AA83D7300A55E41 /* SwarmDrainBehaviour.swift in Sources */,
FDF8489429405C1B007DCAE5 /* SnodeAPI.swift in Sources */,
FDF848C829405C5B007DCAE5 /* ONSResolveRequest.swift in Sources */,
C3C2A5C2255385EE00C340D1 /* Configuration.swift in Sources */,
@ -6002,9 +6030,11 @@
C32C5E0C256DDAFA003C73A2 /* NSRegularExpression+SSK.swift in Sources */,
FD29598D2A43BC0B00888A17 /* Version.swift in Sources */,
FDF8487C29405906007DCAE5 /* HTTPMethod.swift in Sources */,
FD47E0AF2AA692F400A55E41 /* JSONDecoder+Utilities.swift in Sources */,
FDF8488429405A2B007DCAE5 /* RequestInfo.swift in Sources */,
C3BBE0A92554D4DE0050F1E3 /* HTTP.swift in Sources */,
FD71160028C8253500B47552 /* UIView+Combine.swift in Sources */,
FD47E0B32AA6D5A300A55E41 /* RequestTarget.swift in Sources */,
B8856D23256F116B001CE70E /* Weak.swift in Sources */,
FD17D7CD27F546FF00122BE0 /* Setting.swift in Sources */,
FD7115FC28C8155800B47552 /* Publisher+Utilities.swift in Sources */,
@ -6057,6 +6087,7 @@
7B4C75CB26B37E0F0000AC89 /* UnsendRequest.swift in Sources */,
C300A5F22554B09800555489 /* MessageSender.swift in Sources */,
FDB5DAC52A944757002C8721 /* SessionUtil+GroupInfo.swift in Sources */,
FD47E0B12AA6A05800A55E41 /* Authentication+SessionMessagingKit.swift in Sources */,
FDB5DACB2A944E72002C8721 /* SessionUtil+GroupKeys.swift in Sources */,
FDF848F729414477007DCAE5 /* CurrentUserPoller.swift in Sources */,
B8B558FF26C4E05E00693325 /* WebRTCSession+MessageHandling.swift in Sources */,
@ -6082,7 +6113,6 @@
7B7AD41F2A5512CA00469FB1 /* GetExpirationJob.swift in Sources */,
FDA8EAFE280E8B78002B68E5 /* FailedMessageSendsJob.swift in Sources */,
FD245C6A2850666F00B966DD /* FileServerAPI.swift in Sources */,
FDB5DAF52A9721A5002C8721 /* PreparedRequest+OpenGroup.swift in Sources */,
FDFBB74D2A1F3C4E00CA7350 /* NotificationMetadata.swift in Sources */,
FDC4386927B4E6B800C60D73 /* String+Utlities.swift in Sources */,
FD716E6628502EE200C96BF4 /* CurrentCallProtocol.swift in Sources */,
@ -6098,6 +6128,7 @@
FDC4386527B4DE7600C60D73 /* RoomPollInfo.swift in Sources */,
FD245C6B2850667400B966DD /* VisibleMessage+Profile.swift in Sources */,
FD37EA0F28AB3330003AE748 /* _006_FixHiddenModAdminSupport.swift in Sources */,
FD47E0BA2AA6EBA200A55E41 /* Request+PushNotificationAPI.swift in Sources */,
FD2B4AFD294688D000AB4848 /* SessionUtil+Contacts.swift in Sources */,
7B81682328A4C1210069F315 /* UpdateTypes.swift in Sources */,
FDC13D472A16E4CA007267C7 /* SubscribeRequest.swift in Sources */,
@ -6164,6 +6195,7 @@
FD83B9CC27D179BC005E1583 /* FSEndpoint.swift in Sources */,
FDC13D4B2A16ECBA007267C7 /* SubscribeResponse.swift in Sources */,
FD7115F228C6CB3900B47552 /* _010_AddThreadIdToFTS.swift in Sources */,
FD47E0B82AA6E62600A55E41 /* Request+OpenGroupAPI.swift in Sources */,
FD716E6428502DDD00C96BF4 /* CallManagerProtocol.swift in Sources */,
FDC438C727BB6DF000C60D73 /* DirectMessage.swift in Sources */,
FDC13D502A16EE50007267C7 /* PushNotificationAPIEndpoint.swift in Sources */,

View File

@ -300,12 +300,13 @@ public final class SessionCall: CurrentCallProtocol, WebRTCSessionDelegate {
guard
let infoMessageData: Data = (interaction.body ?? "").data(using: .utf8),
let messageInfo: CallMessage.MessageInfo = try? JSONDecoder().decode(
let messageInfo: CallMessage.MessageInfo = try? JSONDecoder(using: dependencies).decode(
CallMessage.MessageInfo.self,
from: infoMessageData
),
messageInfo.state == .incoming,
let missedCallInfoData: Data = try? JSONEncoder().encode(missedCallInfo)
let missedCallInfoData: Data = try? JSONEncoder(using: dependencies)
.encode(missedCallInfo)
else { return }
_ = try interaction

View File

@ -579,7 +579,7 @@ extension ConversationVC:
// Process any attachments
try Attachment.process(
db,
data: optimisticData.attachmentData,
attachments: optimisticData.attachmentData,
for: insertedInteraction.id
)
@ -1340,8 +1340,8 @@ extension ConversationVC:
}
}
.subscribe(on: DispatchQueue.global(qos: .userInitiated), using: dependencies)
.flatMap { pendingChange -> AnyPublisher<(MessageSender.PreparedSendData?, OpenGroupInfo?), Error> in
dependencies[singleton: .storage].writePublisher { [weak self] db -> (MessageSender.PreparedSendData?, OpenGroupInfo?) in
.flatMap { pendingChange -> AnyPublisher<HTTP.PreparedRequest<Void>, Error> in
dependencies[singleton: .storage].writePublisher { [weak self] db -> HTTP.PreparedRequest<Void> in
// Update the thread to be visible (if it isn't already)
if self?.viewModel.threadData.threadShouldBeVisible == false {
_ = try SessionThread
@ -1424,10 +1424,25 @@ extension ConversationVC:
.map { _, response in response.seqNo }
}()
return (nil, (pendingReaction, pendingChange, preparedRequest))
return preparedRequest
.handleEvents(
receiveOutput: { _, seqNo in
OpenGroupManager.updatePendingChange(pendingChange, seqNo: seqNo)
},
receiveCompletion: { [weak self] result in
switch result {
case .finished: break
case .failure:
OpenGroupManager.removePendingChange(pendingChange)
self?.handleReactionSentFailure(pendingReaction, remove: remove)
}
}
)
.map { _, _ in () }
default:
let sendData: MessageSender.PreparedSendData = try MessageSender.preparedSendData(
return try MessageSender.preparedSend(
db,
message: VisibleMessage(
sentTimestamp: UInt64(sentTimestamp),
@ -1451,47 +1466,13 @@ extension ConversationVC:
.from(db, threadId: cellViewModel.threadId, threadVariant: cellViewModel.threadVariant)
.defaultNamespace,
interactionId: cellViewModel.id,
fileIds: [],
using: dependencies
)
return (sendData, nil)
}
}
}
.tryFlatMap { messageSendData, openGroupInfo -> AnyPublisher<Void, Error> in
switch (messageSendData, openGroupInfo) {
case (.some(let sendData), _):
return MessageSender.sendImmediate(data: sendData, using: dependencies)
case (_, .some(let info)):
return info.preparedRequest.send(using: dependencies)
.handleEvents(
receiveOutput: { _, seqNo in
OpenGroupManager
.updatePendingChange(
info.pendingChange,
seqNo: seqNo
)
},
receiveCompletion: { [weak self] result in
switch result {
case .finished: break
case .failure:
OpenGroupManager.removePendingChange(info.pendingChange)
self?.handleReactionSentFailure(
info.pendingReaction,
remove: remove
)
}
}
)
.map { _ in () }
.eraseToAnyPublisher()
default: throw MessageSenderError.invalidMessage
}
}
.flatMap { $0.send(using: dependencies) }
.sinkUntilComplete()
}
@ -2085,11 +2066,17 @@ extension ConversationVC:
deleteRemotely(
from: self,
request: SnodeAPI
.deleteMessages(
publicKey: targetPublicKey,
serverHashes: [serverHash]
)
request: dependencies[singleton: .storage]
.readPublisher(using: dependencies) { db in
try SnodeAPI.AuthenticationInfo(db, threadId: targetPublicKey, using: dependencies)
}
.flatMap { authInfo in
SnodeAPI
.deleteMessages(
serverHashes: [serverHash],
authInfo: authInfo
)
}
.map { _ in () }
.eraseToAnyPublisher()
) { completeServerDeletion() }

View File

@ -472,7 +472,7 @@ public class ConversationViewModel: OWSAudioPlayerDelegate {
id: UUID,
messageViewModel: MessageViewModel,
interaction: Interaction,
attachmentData: Attachment.PreparedData?,
attachmentData: [Attachment]?,
linkPreviewDraft: LinkPreviewDraft?,
linkPreviewAttachment: Attachment?,
quoteModel: QuotedReplyModel?
@ -507,7 +507,7 @@ public class ConversationViewModel: OWSAudioPlayerDelegate {
),
linkPreviewUrl: linkPreviewDraft?.urlString
)
let optimisticAttachments: Attachment.PreparedData? = attachments
let optimisticAttachments: [Attachment]? = attachments
.map { Attachment.prepare(attachments: $0) }
let linkPreviewAttachment: Attachment? = linkPreviewDraft.map { draft in
try? LinkPreview.generateAttachmentIfPossible(
@ -557,7 +557,7 @@ public class ConversationViewModel: OWSAudioPlayerDelegate {
)
},
linkPreviewAttachment: linkPreviewAttachment,
attachments: optimisticAttachments?.attachments
attachments: optimisticAttachments
)
let optimisticData: OptimisticMessageData = (
optimisticMessageId,

View File

@ -569,7 +569,7 @@ class NotificationActionHandler {
}
return dependencies[singleton: .storage]
.writePublisher { db in
.writePublisher { db -> HTTP.PreparedRequest<Void> in
let interaction: Interaction = try Interaction(
threadId: threadId,
authorId: getUserHexEncodedPublicKey(db),
@ -593,15 +593,17 @@ class NotificationActionHandler {
using: dependencies
)
return try MessageSender.preparedSendData(
return try MessageSender.preparedSend(
db,
interaction: interaction,
fileIds: [],
threadId: threadId,
threadVariant: thread.variant,
using: dependencies
)
}
.flatMap { MessageSender.sendImmediate(data: $0, using: dependencies) }
.flatMap { $0.send(using: dependencies) }
.map { _ in () }
.handleEvents(
receiveCompletion: { result in
switch result {

View File

@ -299,7 +299,7 @@ public enum PushRegistrationError: Error {
)
let messageInfoString: String? = {
if let messageInfoData: Data = try? JSONEncoder().encode(messageInfo) {
if let messageInfoData: Data = try? JSONEncoder(using: dependencies).encode(messageInfo) {
return String(data: messageInfoData, encoding: .utf8)
} else {
return "Incoming call." // TODO: We can do better here.

View File

@ -32,19 +32,17 @@ enum Onboarding {
) -> AnyPublisher<String?, Error> {
let userPublicKey: String = getUserHexEncodedPublicKey(using: dependencies)
return SnodeAPI.getSwarm(for: userPublicKey)
.tryFlatMapWithRandomSnode { snode -> AnyPublisher<[Message], Error> in
CurrentUserPoller
.poll(
namespaces: [.configUserProfile],
from: snode,
for: userPublicKey,
// Note: These values mean the received messages will be
// processed immediately rather than async as part of a Job
calledFromBackgroundPoller: true,
isBackgroundPollValid: { true }
)
}
return CurrentUserPoller
.poll(
namespaces: [.configUserProfile],
for: userPublicKey,
// Note: These values mean the received messages will be
// processed immediately rather than async as part of a Job
calledFromBackgroundPoller: true,
isBackgroundPollValid: { true },
drainBehaviour: .alwaysRandom,
using: dependencies
)
.map { _ -> String? in
guard requestId == profileNameRetrievalIdentifier.wrappedValue else { return nil }

View File

@ -193,8 +193,15 @@ final class NukeDataModal: Modal {
.collect()
.subscribe(on: DispatchQueue.global(qos: .userInitiated), using: dependencies)
.flatMap { results in
SnodeAPI
.deleteAllMessages(namespace: .all)
dependencies[singleton: .storage]
.readPublisher(using: dependencies) { db in
try SnodeAPI.AuthenticationInfo(
db,
threadId: getUserHexEncodedPublicKey(db, using: dependencies),
using: dependencies
)
}
.flatMap { SnodeAPI.deleteAllMessages(namespace: .all, authInfo: $0) }
.map { results.reduce($0) { result, next in result.updated(with: next) } }
.eraseToAnyPublisher()
}

View File

@ -73,19 +73,15 @@ public final class BackgroundPoller {
private static func pollForMessages(
using dependencies: Dependencies
) -> AnyPublisher<Void, Error> {
let userPublicKey: String = getUserHexEncodedPublicKey(using: dependencies)
return SnodeAPI.getSwarm(for: userPublicKey)
.tryFlatMapWithRandomSnode { snode -> AnyPublisher<[Message], Error> in
CurrentUserPoller.poll(
namespaces: CurrentUserPoller.namespaces,
from: snode,
for: userPublicKey,
calledFromBackgroundPoller: true,
isBackgroundPollValid: { BackgroundPoller.isValid },
using: dependencies
)
}
return CurrentUserPoller
.poll(
namespaces: CurrentUserPoller.namespaces,
for: getUserHexEncodedPublicKey(using: dependencies),
calledFromBackgroundPoller: true,
isBackgroundPollValid: { BackgroundPoller.isValid },
drainBehaviour: .alwaysRandom,
using: dependencies
)
.map { _ in () }
.eraseToAnyPublisher()
}
@ -107,24 +103,17 @@ public final class BackgroundPoller {
.fetchAll(db)
}
.defaulting(to: [])
.map { groupPublicKey in
SnodeAPI.getSwarm(for: groupPublicKey)
.tryFlatMap { swarm -> AnyPublisher<[Message], Error> in
guard let snode: Snode = swarm.randomElement() else {
throw OnionRequestAPIError.insufficientSnodes
}
return ClosedGroupPoller.poll(
namespaces: ClosedGroupPoller.namespaces,
from: snode,
for: groupPublicKey,
calledFromBackgroundPoller: true,
isBackgroundPollValid: { BackgroundPoller.isValid },
using: dependencies
)
}
.map { _ in () }
.eraseToAnyPublisher()
.map { groupId in
ClosedGroupPoller.poll(
namespaces: ClosedGroupPoller.namespaces,
for: groupId,
calledFromBackgroundPoller: true,
isBackgroundPollValid: { BackgroundPoller.isValid },
drainBehaviour: .alwaysRandom,
using: dependencies
)
.map { _ in () }
.eraseToAnyPublisher()
}
}
}

View File

@ -129,21 +129,20 @@ public final class WebRTCSession : NSObject, RTCPeerConnectionDelegate {
) throws -> AnyPublisher<Void, Error> {
SNLog("[Calls] Sending pre-offer message.")
return MessageSender
.sendImmediate(
data: try MessageSender
.preparedSendData(
db,
message: message,
to: try Message.Destination.from(db, threadId: thread.id, threadVariant: thread.variant),
namespace: try Message.Destination
.from(db, threadId: thread.id, threadVariant: thread.variant)
.defaultNamespace,
interactionId: interactionId,
using: dependencies
),
return try MessageSender
.preparedSend(
db,
message: message,
to: try Message.Destination.from(db, threadId: thread.id, threadVariant: thread.variant),
namespace: try Message.Destination
.from(db, threadId: thread.id, threadVariant: thread.variant)
.defaultNamespace,
interactionId: interactionId,
fileIds: [],
using: dependencies
)
.send(using: dependencies)
.map { _ in () }
.handleEvents(receiveOutput: { _ in SNLog("[Calls] Pre-offer message has been sent.") })
.eraseToAnyPublisher()
}
@ -160,10 +159,7 @@ public final class WebRTCSession : NSObject, RTCPeerConnectionDelegate {
return Deferred {
Future<Void, Error> { [weak self] resolver in
self?.peerConnection?.offer(for: mediaConstraints) { sdp, error in
if let error = error {
return
}
guard error == nil else { return }
guard let sdp: RTCSessionDescription = self?.correctSessionDescription(sdp: sdp) else {
preconditionFailure()
}
@ -177,9 +173,9 @@ public final class WebRTCSession : NSObject, RTCPeerConnectionDelegate {
}
dependencies[singleton: .storage]
.writePublisher { db in
.writePublisher { db -> HTTP.PreparedRequest<Void> in
try MessageSender
.preparedSendData(
.preparedSend(
db,
message: CallMessage(
uuid: uuid,
@ -193,10 +189,11 @@ public final class WebRTCSession : NSObject, RTCPeerConnectionDelegate {
.from(db, threadId: thread.id, threadVariant: thread.variant)
.defaultNamespace,
interactionId: nil,
fileIds: [],
using: dependencies
)
}
.flatMap { MessageSender.sendImmediate(data: $0, using: dependencies) }
.flatMap { $0.send(using: dependencies) }
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
.sinkUntilComplete(
receiveCompletion: { result in
@ -248,9 +245,9 @@ public final class WebRTCSession : NSObject, RTCPeerConnectionDelegate {
}
dependencies[singleton: .storage]
.writePublisher { db in
.writePublisher { db -> HTTP.PreparedRequest<Void> in
try MessageSender
.preparedSendData(
.preparedSend(
db,
message: CallMessage(
uuid: uuid,
@ -263,10 +260,11 @@ public final class WebRTCSession : NSObject, RTCPeerConnectionDelegate {
.from(db, threadId: thread.id, threadVariant: thread.variant)
.defaultNamespace,
interactionId: nil,
fileIds: [],
using: dependencies
)
}
.flatMap { MessageSender.sendImmediate(data: $0, using: dependencies) }
.flatMap { $0.send(using: dependencies) }
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
.sinkUntilComplete(
receiveCompletion: { result in
@ -301,7 +299,7 @@ public final class WebRTCSession : NSObject, RTCPeerConnectionDelegate {
self.queuedICECandidates.removeAll()
dependencies[singleton: .storage]
.writePublisher { db in
.writePublisher { db -> HTTP.PreparedRequest<Void> in
guard let thread: SessionThread = try SessionThread.fetchOne(db, id: contactSessionId) else {
throw WebRTCSessionError.noThread
}
@ -309,7 +307,7 @@ public final class WebRTCSession : NSObject, RTCPeerConnectionDelegate {
SNLog("[Calls] Batch sending \(candidates.count) ICE candidates.")
return try MessageSender
.preparedSendData(
.preparedSend(
db,
message: CallMessage(
uuid: uuid,
@ -325,11 +323,12 @@ public final class WebRTCSession : NSObject, RTCPeerConnectionDelegate {
.from(db, threadId: thread.id, threadVariant: thread.variant)
.defaultNamespace,
interactionId: nil,
fileIds: [],
using: dependencies
)
}
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
.flatMap { MessageSender.sendImmediate(data: $0, using: dependencies) }
.flatMap { $0.send(using: dependencies) }
.sinkUntilComplete()
}
@ -342,8 +341,8 @@ public final class WebRTCSession : NSObject, RTCPeerConnectionDelegate {
SNLog("[Calls] Sending end call message.")
let preparedSendData: MessageSender.PreparedSendData = try MessageSender
.preparedSendData(
try MessageSender
.preparedSend(
db,
message: CallMessage(
uuid: self.uuid,
@ -355,12 +354,11 @@ public final class WebRTCSession : NSObject, RTCPeerConnectionDelegate {
.from(db, threadId: thread.id, threadVariant: thread.variant)
.defaultNamespace,
interactionId: nil,
fileIds: [],
using: dependencies
)
MessageSender
.sendImmediate(data: preparedSendData, using: dependencies)
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
.send(using: dependencies)
.subscribe(on: DispatchQueue.global(qos: .userInitiated), using: dependencies)
.sinkUntilComplete()
}

View File

@ -986,50 +986,44 @@ extension Attachment {
// MARK: - Upload
extension Attachment {
public enum Destination {
private enum Destination {
case fileServer
case openGroup(OpenGroup)
case community(OpenGroup)
var shouldEncrypt: Bool {
switch self {
case .fileServer: return true
case .openGroup: return false
case .community: return false
}
}
}
public struct PreparedData {
public let attachments: [Attachment]
}
public static func prepare(attachments: [SignalAttachment]) -> PreparedData {
return PreparedData(
attachments: attachments.compactMap { signalAttachment in
Attachment(
variant: (signalAttachment.isVoiceMessage ?
.voiceMessage :
.standard
),
contentType: signalAttachment.mimeType,
dataSource: signalAttachment.dataSource,
sourceFilename: signalAttachment.sourceFilename,
caption: signalAttachment.captionText
)
}
)
public static func prepare(attachments: [SignalAttachment]) -> [Attachment] {
return attachments.compactMap { signalAttachment in
Attachment(
variant: (signalAttachment.isVoiceMessage ?
.voiceMessage :
.standard
),
contentType: signalAttachment.mimeType,
dataSource: signalAttachment.dataSource,
sourceFilename: signalAttachment.sourceFilename,
caption: signalAttachment.captionText
)
}
}
public static func process(
_ db: Database,
data: PreparedData?,
attachments: [Attachment]?,
for interactionId: Int64?
) throws {
guard
let data: PreparedData = data,
let attachments: [Attachment] = attachments,
let interactionId: Int64 = interactionId
else { return }
try data.attachments
try attachments
.enumerated()
.forEach { index, attachment in
let interactionAttachment: InteractionAttachment = InteractionAttachment(
@ -1043,160 +1037,187 @@ extension Attachment {
}
}
internal func upload(
to destination: Attachment.Destination,
public func preparedUpload(
_ db: Database,
threadId: String,
using dependencies: Dependencies
) -> AnyPublisher<String, Error> {
// This can occur if an AttachmnetUploadJob was explicitly created for a message
// dependant on the attachment being uploaded (in this case the attachment has
// already been uploaded so just succeed)
guard state != .uploaded else {
guard let fileId: String = Attachment.fileId(for: self.downloadUrl) else {
SNLog("Previously uploaded attachment had invalid fileId.")
return Fail(error: AttachmentError.invalidData)
.eraseToAnyPublisher()
) throws -> HTTP.PreparedRequest<String> {
typealias UploadInfo = (
attachment: Attachment,
preparedRequest: HTTP.PreparedRequest<FileUploadResponse>,
encryptionKey: Data?,
digest: Data?
)
// Retrieve the correct destination for the given thread
let destination: Destination = (try? OpenGroup.fetchOne(db, id: threadId))
.map { .community($0) }
.defaulting(to: .fileServer)
let uploadInfo: UploadInfo = try {
let finalData: Data
let finalEncryptionKey: Data?
let finalDigest: Data?
// This can occur if an AttachmentUploadJob was explicitly created for a message
// dependant on the attachment being uploaded (in this case the attachment has
// already been uploaded so just succeed)
if state == .uploaded, let fileId: String = Attachment.fileId(for: downloadUrl) {
return (
self,
HTTP.PreparedRequest.cached(
FileUploadResponse(id: fileId),
endpoint: FileServerAPI.Endpoint.file
),
self.encryptionKey,
self.digest
)
}
return Just(fileId)
.setFailureType(to: Error.self)
.eraseToAnyPublisher()
}
// Get the attachment
guard var data = try? readDataFromFile() else {
SNLog("Couldn't read attachment from disk.")
return Fail(error: AttachmentError.noAttachment)
.eraseToAnyPublisher()
}
let attachmentId: String = self.id
return dependencies[singleton: .storage]
.writePublisher { db -> (HTTP.PreparedRequest<FileUploadResponse>?, String?, Data?, Data?) in
// If the attachment is a downloaded attachment, check if it came from
// the server and if so just succeed immediately (no use re-uploading
// an attachment that is already present on the server) - or if we want
// it to be encrypted and it's not then encrypt it
//
// Note: The most common cases for this will be for LinkPreviews or Quotes
guard
state != .downloaded ||
serverId == nil ||
downloadUrl == nil ||
!destination.shouldEncrypt ||
encryptionKey == nil ||
digest == nil
else {
// Save the final upload info
_ = try? Attachment
.filter(id: attachmentId)
.updateAll(db, Attachment.Columns.state.set(to: Attachment.State.uploaded))
// If the attachment is a downloaded attachment, check if it came from
// the server and if so just succeed immediately (no use re-uploading
// an attachment that is already present on the server) - or if we want
// it to be encrypted and it's not then encrypt it
//
// Note: The most common cases for this will be for LinkPreviews or Quotes
if
state == .downloaded,
serverId != nil,
let fileId: String = Attachment.fileId(for: downloadUrl),
(
!destination.shouldEncrypt || (
encryptionKey != nil &&
digest != nil
)
)
{
return (
self,
HTTP.PreparedRequest.cached(
FileUploadResponse(id: fileId),
endpoint: FileServerAPI.Endpoint.file
),
self.encryptionKey,
self.digest
)
}
// Get the raw attachment data
guard let rawData: Data = try? readDataFromFile() else {
SNLog("Couldn't read attachment from disk.")
throw AttachmentError.noAttachment
}
// Perform encryption if needed
switch destination.shouldEncrypt {
case false:
finalEncryptionKey = nil
finalDigest = nil
finalData = rawData
return (nil, Attachment.fileId(for: self.downloadUrl), nil, nil)
}
var encryptionKey: NSData = NSData()
var digest: NSData = NSData()
// Encrypt the attachment if needed
if destination.shouldEncrypt {
guard let ciphertext = Cryptography.encryptAttachmentData(data, shouldPad: true, outKey: &encryptionKey, outDigest: &digest) else {
case true:
var encryptionKey: NSData = NSData()
var digest: NSData = NSData()
guard let ciphertext = Cryptography.encryptAttachmentData(rawData, shouldPad: true, outKey: &encryptionKey, outDigest: &digest) else {
SNLog("Couldn't encrypt attachment.")
throw AttachmentError.encryptionFailed
}
data = ciphertext
}
// Check the file size
SNLog("File size: \(data.count) bytes.")
if data.count > FileServerAPI.maxFileSize { throw HTTPError.maxFileSizeExceeded }
// Update the attachment to the 'uploading' state
_ = try? Attachment
.filter(id: attachmentId)
.updateAll(db, Attachment.Columns.state.set(to: Attachment.State.uploading))
// We need database access for OpenGroup uploads so generate prepared data
let preparedRequest: HTTP.PreparedRequest<FileUploadResponse>? = try {
switch destination {
case .openGroup(let openGroup):
return try OpenGroupAPI
.preparedUploadFile(
db,
bytes: data.bytes,
to: openGroup.roomToken,
on: openGroup.server
)
default: return nil
}
}()
return (
preparedRequest,
nil,
(destination.shouldEncrypt ? encryptionKey as Data : nil),
(destination.shouldEncrypt ? digest as Data : nil)
)
finalEncryptionKey = encryptionKey as Data
finalDigest = digest as Data
finalData = ciphertext
}
.flatMap { preparedRequest, existingFileId, encryptionKey, digest -> AnyPublisher<(String, Data?, Data?), Error> in
// No need to upload if the file was already uploaded
if let fileId: String = existingFileId {
return Just((fileId, encryptionKey, digest))
.setFailureType(to: Error.self)
.eraseToAnyPublisher()
}
switch destination {
case .openGroup:
return preparedRequest.send(using: dependencies)
.map { _, response -> (String, Data?, Data?) in (response.id, encryptionKey, digest) }
.eraseToAnyPublisher()
case .fileServer:
return FileServerAPI.upload(data)
.map { response -> (String, Data?, Data?) in (response.id, encryptionKey, digest) }
.eraseToAnyPublisher()
}
}
.flatMap { fileId, encryptionKey, digest -> AnyPublisher<String, Error> in
/// Save the final upload info
///
/// **Note:** We **MUST** use the `.with` function here to ensure the `isValid` flag is
/// updated correctly
dependencies[singleton: .storage]
.writePublisher { db in
try self
.with(
serverId: fileId,
state: .uploaded,
creationTimestamp: (
self.creationTimestamp ??
(TimeInterval(SnodeAPI.currentOffsetTimestampMs()) / 1000)
),
downloadUrl: Attachment.downloadUrl(for: fileId),
encryptionKey: encryptionKey,
digest: digest
)
.saved(db)
}
.map { _ in fileId }
.eraseToAnyPublisher()
// Ensure the file size is smaller than our upload limit
SNLog("File size: \(finalData.count) bytes.")
guard finalData.count <= FileServerAPI.maxFileSize else { throw HTTPError.maxFileSizeExceeded }
// Generate the request
switch destination {
case .fileServer:
return (
self,
try FileServerAPI
.preparedUpload(finalData, using: dependencies),
finalEncryptionKey,
finalDigest
)
case .community(let openGroup):
return (
self,
try OpenGroupAPI.preparedUploadFile(
db,
bytes: Array(finalData),
to: openGroup.roomToken,
on: openGroup.server,
using: dependencies
),
finalEncryptionKey,
finalDigest
)
}
}()
return uploadInfo.preparedRequest
.handleEvents(
receiveSubscription: {
// If we have a `cachedResponse` (ie. already uploaded) then don't change
// the attachment state to uploading as it's already been done
guard uploadInfo.preparedRequest.cachedResponse == nil else { return }
// Update the attachment to the 'uploading' state
dependencies[singleton: .storage].write(using: dependencies) { db in
_ = try? Attachment
.filter(id: uploadInfo.attachment.id)
.updateAll(db, Attachment.Columns.state.set(to: Attachment.State.uploading))
}
},
receiveOutput: { _, response in
/// Save the final upload info
///
/// **Note:** We **MUST** use the `.with` function here to ensure the `isValid` flag is
/// updated correctly
let updatedAttachment: Attachment = uploadInfo.attachment
.with(
serverId: response.id,
state: .uploaded,
creationTimestamp: (
uploadInfo.attachment.creationTimestamp ??
(TimeInterval(SnodeAPI.currentOffsetTimestampMs(using: dependencies)) / 1000)
),
downloadUrl: Attachment.downloadUrl(for: response.id),
encryptionKey: uploadInfo.encryptionKey,
digest: uploadInfo.digest
)
// Ensure there were changes before triggering a db write to avoid unneeded
// write queue use and UI updates
guard updatedAttachment != uploadInfo.attachment else { return }
dependencies[singleton: .storage].write(using: dependencies) { db in
try updatedAttachment.saved(db)
}
},
receiveCompletion: { result in
switch result {
case .finished: break
case .failure:
dependencies[singleton: .storage].write { db in
try Attachment
.filter(id: attachmentId)
.filter(id: uploadInfo.attachment.id)
.updateAll(db, Attachment.Columns.state.set(to: Attachment.State.failedUpload))
}
}
},
receiveCancel: {
dependencies[singleton: .storage].write { db in
try Attachment
.filter(id: uploadInfo.attachment.id)
.updateAll(db, Attachment.Columns.state.set(to: Attachment.State.failedUpload))
}
}
)
.eraseToAnyPublisher()
.map { _, response in response.id }
}
}

View File

@ -111,4 +111,14 @@ public extension ConfigDump.Variant {
case .convoInfoVolatile: return 2
}
}
/// This value defines the order that the config messages should be sent in, we need to send the `groupKeys`
/// config _before_ the `groupInfo` and `groupMembers` configs as they both get encrypted with the latest key
/// and we want to avoid weird edge-cases
var sendOrder: Int {
switch self {
case .groupKeys: return 0
default: return 1
}
}
}

View File

@ -24,7 +24,10 @@ public enum FileServerAPI {
// MARK: - File Storage
public static func upload(_ file: Data) -> AnyPublisher<FileUploadResponse, Error> {
public static func upload(
_ file: Data,
using dependencies: Dependencies = Dependencies()
) -> AnyPublisher<FileUploadResponse, Error> {
let request = Request(
method: .post,
server: server,
@ -33,33 +36,65 @@ public enum FileServerAPI {
.contentDisposition: "attachment",
.contentType: "application/octet-stream"
],
x25519PublicKey: serverPublicKey,
body: Array(file)
)
return send(request, serverPublicKey: serverPublicKey, timeout: FileServerAPI.fileUploadTimeout)
return send(request, serverPublicKey: serverPublicKey, timeout: FileServerAPI.fileUploadTimeout, using: dependencies)
.decoded(as: FileUploadResponse.self)
}
public static func download(_ fileId: String, useOldServer: Bool) -> AnyPublisher<Data, Error> {
public static func preparedUpload(
_ file: Data,
using dependencies: Dependencies = Dependencies()
) throws -> HTTP.PreparedRequest<FileUploadResponse> {
return try prepareRequest(
request: Request(
method: .post,
server: server,
endpoint: Endpoint.file,
headers: [
.contentDisposition: "attachment",
.contentType: "application/octet-stream"
],
x25519PublicKey: serverPublicKey,
body: Array(file)
),
responseType: FileUploadResponse.self,
timeout: FileServerAPI.fileUploadTimeout,
using: dependencies
)
}
public static func download(
_ fileId: String,
useOldServer: Bool,
using dependencies: Dependencies = Dependencies()
) -> AnyPublisher<Data, Error> {
let serverPublicKey: String = (useOldServer ? oldServerPublicKey : serverPublicKey)
let request = Request<NoBody, Endpoint>(
server: (useOldServer ? oldServer : server),
endpoint: .fileIndividual(fileId: fileId)
endpoint: .fileIndividual(fileId: fileId),
x25519PublicKey: serverPublicKey
)
return send(request, serverPublicKey: serverPublicKey, timeout: FileServerAPI.fileDownloadTimeout)
return send(request, serverPublicKey: serverPublicKey, timeout: FileServerAPI.fileDownloadTimeout, using: dependencies)
}
public static func getVersion(_ platform: String) -> AnyPublisher<String, Error> {
public static func getVersion(
_ platform: String,
using dependencies: Dependencies = Dependencies()
) -> AnyPublisher<String, Error> {
let request = Request<NoBody, Endpoint>(
server: server,
endpoint: .sessionVersion,
queryParameters: [
.platform: platform
]
],
x25519PublicKey: serverPublicKey
)
return send(request, serverPublicKey: serverPublicKey, timeout: HTTP.defaultTimeout)
return send(request, serverPublicKey: serverPublicKey, timeout: HTTP.defaultTimeout, using: dependencies)
.decoded(as: VersionResponse.self)
.map { response in response.version }
.eraseToAnyPublisher()
@ -70,25 +105,25 @@ public enum FileServerAPI {
private static func send<T: Encodable>(
_ request: Request<T, Endpoint>,
serverPublicKey: String,
timeout: TimeInterval
timeout: TimeInterval,
using dependencies: Dependencies
) -> AnyPublisher<Data, Error> {
let urlRequest: URLRequest
let preparedRequest: HTTP.PreparedRequest<Data?>
do {
urlRequest = try request.generateUrlRequest()
preparedRequest = try prepareRequest(
request: request,
responseType: Data?.self,
timeout: timeout,
using: dependencies
)
}
catch {
return Fail(error: error)
.eraseToAnyPublisher()
}
return OnionRequestAPI
.sendOnionRequest(
urlRequest,
to: request.server,
with: serverPublicKey,
timeout: timeout
)
return preparedRequest.send(using: dependencies)
.tryMap { _, response -> Data in
guard let response: Data = response else { throw HTTPError.parsingFailed }
@ -96,4 +131,20 @@ public enum FileServerAPI {
}
.eraseToAnyPublisher()
}
private static func prepareRequest<T: Encodable, R: Decodable>(
request: Request<T, Endpoint>,
responseType: R.Type,
retryCount: Int = 0,
timeout: TimeInterval,
using dependencies: Dependencies
) throws -> HTTP.PreparedRequest<R> {
return HTTP.PreparedRequest<R>(
request: request,
urlRequest: try request.generateUrlRequest(using: dependencies),
responseType: responseType,
retryCount: retryCount,
timeout: timeout
)
}
}

View File

@ -9,6 +9,8 @@ extension FileServerAPI {
case fileIndividual(fileId: String)
case sessionVersion
public static var name: String { "FileServerAPI.Endpoint" }
public var path: String {
switch self {
case .file: return "file"

View File

@ -22,7 +22,7 @@ public enum AttachmentDownloadJob: JobExecutor {
guard
let threadId: String = job.threadId,
let detailsData: Data = job.details,
let details: Details = try? JSONDecoder().decode(Details.self, from: detailsData),
let details: Details = try? JSONDecoder(using: dependencies).decode(Details.self, from: detailsData),
let attachment: Attachment = dependencies[singleton: .storage]
.read({ db in try Attachment.fetchOne(db, id: details.attachmentId) })
else {
@ -49,7 +49,7 @@ public enum AttachmentDownloadJob: JobExecutor {
.compactMap { info -> String? in
guard let data: Data = info.detailsData else { return nil }
return (try? JSONDecoder().decode(Details.self, from: data))?
return (try? JSONDecoder(using: dependencies).decode(Details.self, from: data))?
.attachmentId
}
.asSet()

View File

@ -23,14 +23,9 @@ public enum AttachmentUploadJob: JobExecutor {
let threadId: String = job.threadId,
let interactionId: Int64 = job.interactionId,
let detailsData: Data = job.details,
let details: Details = try? JSONDecoder().decode(Details.self, from: detailsData),
let (attachment, openGroup): (Attachment, OpenGroup?) = dependencies[singleton: .storage].read({ db in
guard let attachment: Attachment = try Attachment.fetchOne(db, id: details.attachmentId) else {
return nil
}
return (attachment, try OpenGroup.fetchOne(db, id: threadId))
})
let details: Details = try? JSONDecoder(using: dependencies).decode(Details.self, from: detailsData),
let attachment: Attachment = dependencies[singleton: .storage]
.read({ db in try Attachment.fetchOne(db, id: details.attachmentId) })
else {
SNLog("[AttachmentUploadJob] Failed due to missing details")
return failure(job, JobRunnerError.missingRequiredDetails, true, dependencies)
@ -52,11 +47,11 @@ public enum AttachmentUploadJob: JobExecutor {
// If this upload is related to sending a message then trigger the 'handleMessageWillSend' logic
// as if this is a retry the logic wouldn't run until after the upload has completed resulting in
// a potentially incorrect delivery status
dependencies[singleton: .storage].write { db in
dependencies[singleton: .storage].write(using: dependencies) { db in
guard
let sendJob: Job = try Job.fetchOne(db, id: details.messageSendJobId),
let sendJobDetails: Data = sendJob.details,
let details: MessageSendJob.Details = try? JSONDecoder()
let details: MessageSendJob.Details = try? JSONDecoder(using: dependencies)
.decode(MessageSendJob.Details.self, from: sendJobDetails)
else { return }
@ -71,10 +66,13 @@ public enum AttachmentUploadJob: JobExecutor {
// Note: In the AttachmentUploadJob we intentionally don't provide our own db instance to prevent
// reentrancy issues when the success/failure closures get called before the upload as the JobRunner
// will attempt to update the state of the job immediately
attachment
.upload(to: (openGroup.map { .openGroup($0) } ?? .fileServer), using: dependencies)
.subscribe(on: queue)
.receive(on: queue)
dependencies[singleton: .storage]
.writePublisher(using: dependencies) { db -> HTTP.PreparedRequest<String> in
try attachment.preparedUpload(db, threadId: threadId, using: dependencies)
}
.flatMap { $0.send(using: dependencies) }
.subscribe(on: queue, using: dependencies)
.receive(on: queue, using: dependencies)
.sinkUntilComplete(
receiveCompletion: { result in
switch result {
@ -82,25 +80,29 @@ public enum AttachmentUploadJob: JobExecutor {
// If this upload is related to sending a message then trigger the
// 'handleFailedMessageSend' logic as we want to ensure the message
// has the correct delivery status
dependencies[singleton: .storage].read { db in
var didLogError: Bool = false
dependencies[singleton: .storage].read(using: dependencies) { db in
guard
let sendJob: Job = try Job.fetchOne(db, id: details.messageSendJobId),
let sendJobDetails: Data = sendJob.details,
let details: MessageSendJob.Details = try? JSONDecoder()
let details: MessageSendJob.Details = try? JSONDecoder(using: dependencies)
.decode(MessageSendJob.Details.self, from: sendJobDetails)
else { return }
MessageSender.handleFailedMessageSend(
db,
message: details.message,
with: .other(error),
with: .other("[AttachmentUploadJob] Failed", error),
interactionId: interactionId,
isSyncMessage: details.isSyncMessage,
using: dependencies
)
didLogError = true
}
SNLog("[AttachmentUploadJob] Failed due to error: \(error)")
// If we didn't log an error above then log it now
if !didLogError { SNLog("[AttachmentUploadJob] Failed due to error: \(error)") }
failure(job, error, false, dependencies)
case .finished: success(job, false, dependencies)

View File

@ -38,7 +38,7 @@ public enum ConfigMessageReceiveJob: JobExecutor {
guard
let detailsData: Data = job.details,
let details: Details = try? JSONDecoder().decode(Details.self, from: detailsData)
let details: Details = try? JSONDecoder(using: dependencies).decode(Details.self, from: detailsData)
else {
removeDependencyOnMessageReceiveJobs()
return failure(job, JobRunnerError.missingRequiredDetails, true, dependencies)

View File

@ -72,76 +72,72 @@ public enum ConfigurationSyncJob: JobExecutor {
}
// Identify the destination and merge all obsolete hashes into a single set
let destination: Message.Destination = (publicKey == getUserHexEncodedPublicKey(using: dependencies) ?
let currentUserPublicKey: String = getUserHexEncodedPublicKey(using: dependencies)
let destination: Message.Destination = (publicKey == currentUserPublicKey ?
Message.Destination.contact(publicKey: publicKey) :
Message.Destination.closedGroup(groupPublicKey: publicKey)
)
let allObsoleteHashes: Set<String> = pendingConfigChanges
let allObsoleteHashes: Set<String>? = pendingConfigChanges
.map { $0.obsoleteHashes }
.reduce([], +)
.nullIfEmpty()?
.asSet()
let jobStartTimestamp: TimeInterval = dependencies.dateNow.timeIntervalSince1970
let messageSendTimestamp: Int64 = SnodeAPI.currentOffsetTimestampMs(using: dependencies)
SNLog("[ConfigurationSyncJob] For \(publicKey) started with \(pendingConfigChanges.count) change\(pendingConfigChanges.count == 1 ? "" : "s")")
dependencies[singleton: .storage]
.readPublisher { db -> (keyPair: KeyPair, changes: [MessageSender.PreparedSendData]) in
let changes: [MessageSender.PreparedSendData] = try pendingConfigChanges.map { change -> MessageSender.PreparedSendData in
try MessageSender.preparedSendData(
db,
message: change.message,
to: destination,
namespace: change.namespace,
interactionId: nil
)
}
switch destination {
case .contact:
return (
(
try Identity.fetchUserEd25519KeyPair(db, using: dependencies) ??
{ throw SnodeAPIError.noKeyPair }()
),
changes
)
case .closedGroup(let groupPublicKey):
// Only admins can update the group config messages
let keyPair: KeyPair = try {
guard
let group: ClosedGroup = try ClosedGroup.fetchOne(db, id: groupPublicKey),
let adminKey: Data = group.groupIdentityPrivateKey
else {
throw MessageSenderError.invalidClosedGroupUpdate
.readPublisher { db -> HTTP.PreparedRequest<HTTP.BatchResponse> in
try SnodeAPI.preparedSequence(
db,
requests: try pendingConfigChanges
.map { change -> ErasedPreparedRequest in
do {
return try MessageSender.preparedSendToSnodeDestination(
db,
message: change.message,
to: destination,
namespace: change.namespace,
interactionId: nil,
fileIds: [],
userPublicKey: currentUserPublicKey,
messageSendTimestamp: messageSendTimestamp,
using: dependencies
)
}
return KeyPair(
publicKey: Array(Data(hex: groupPublicKey).removingIdPrefixIfNeeded()),
secretKey: Array(adminKey)
)
}()
return (keyPair, changes)
default: throw HTTPError.invalidPreparedRequest
}
}
.flatMap { (keyPair: KeyPair, changes: [MessageSender.PreparedSendData]) -> AnyPublisher<(ResponseInfoType, HTTP.BatchResponse), Error> in
SnodeAPI
.sendConfigMessages(
changes.compactMap { change in
guard
let namespace: SnodeAPI.Namespace = change.namespace,
let snodeMessage: SnodeMessage = change.snodeMessage
else { return nil }
return (snodeMessage, namespace)
},
signedWith: keyPair,
allObsoleteHashes: Array(allObsoleteHashes),
using: dependencies
)
catch let error as MessageSenderError {
throw MessageSender.handleFailedMessageSend(
db,
message: change.message,
with: error,
interactionId: nil,
isSyncMessage: false,
using: dependencies
)
}
}
.appending(
try allObsoleteHashes.map { serverHashes -> ErasedPreparedRequest in
// TODO: Seems like older hashes aren't getting exposed via this method? (ie. I keep getting old ones when polling but not sure if they are included and not getting deleted, or just not included...)
// TODO: Need to test this in updated groups
try SnodeAPI.preparedDeleteMessages(
serverHashes: Array(serverHashes),
requireSuccessfulDeletion: false,
authInfo: try SnodeAPI.AuthenticationInfo(
db,
threadId: publicKey,
using: dependencies
),
using: dependencies
)
}
),
requireAllBatchResponses: false,
associatedWith: publicKey,
using: dependencies
)
}
.flatMap { $0.send(using: dependencies) }
.subscribe(on: queue)
.receive(on: queue)
.map { (_: ResponseInfoType, response: HTTP.BatchResponse) -> [ConfigDump] in
@ -149,7 +145,7 @@ public enum ConfigurationSyncJob: JobExecutor {
/// in the same order, this means we can just `zip` the two arrays as it will take the smaller of the two and
/// correctly align the response to the change
zip(response, pendingConfigChanges)
.compactMap { (subResponse: Decodable, change: SessionUtil.OutgoingConfResult) in
.compactMap { (subResponse: Any, change: SessionUtil.OutgoingConfResult) in
/// If the request wasn't successful then just ignore it (the next time we sync this config we will try
/// to send the changes again)
guard

View File

@ -21,7 +21,7 @@ public enum ExpirationUpdateJob: JobExecutor {
) {
guard
let detailsData: Data = job.details,
let details: Details = try? JSONDecoder().decode(Details.self, from: detailsData)
let details: Details = try? JSONDecoder(using: dependencies).decode(Details.self, from: detailsData)
else {
SNLog("[ExpirationUpdateJob] Failing due to missing details")
failure(job, JobRunnerError.missingRequiredDetails, true, dependencies)
@ -29,14 +29,20 @@ public enum ExpirationUpdateJob: JobExecutor {
}
let userPublicKey: String = getUserHexEncodedPublicKey(using: dependencies)
SnodeAPI
.updateExpiry(
publicKey: userPublicKey,
serverHashes: details.serverHashes,
updatedExpiryMs: details.expirationTimestampMs,
shortenOnly: true,
using: dependencies
)
dependencies[singleton: .storage]
.readPublisher(using: dependencies) { db in
try SnodeAPI.AuthenticationInfo(db, threadId: userPublicKey, using: dependencies)
}
.flatMap { authInfo in
SnodeAPI
.updateExpiry(
serverHashes: details.serverHashes,
updatedExpiryMs: details.expirationTimestampMs,
shortenOnly: true,
authInfo: authInfo,
using: dependencies
)
}
.subscribe(on: queue, using: dependencies)
.receive(on: queue, using: dependencies)
.map { response -> [UInt64: [String]] in

View File

@ -31,7 +31,7 @@ public enum GarbageCollectionJob: JobExecutor {
/// **Note:** The reason we default to handle all cases (instead of just doing nothing in that case) is so the initial registration
/// of the garbageCollection job never needs to be updated as we continue to add more types going forward
let typesToCollect: [Types] = (job.details
.map { try? JSONDecoder().decode(Details.self, from: $0) }?
.map { try? JSONDecoder(using: dependencies).decode(Details.self, from: $0) }?
.typesToCollect)
.defaulting(to: Types.allCases)
let timestampNow: TimeInterval = dependencies.dateNow.timeIntervalSince1970

View File

@ -22,7 +22,7 @@ public enum GetExpirationJob: JobExecutor {
) {
guard
let detailsData: Data = job.details,
let details: Details = try? JSONDecoder().decode(Details.self, from: detailsData)
let details: Details = try? JSONDecoder(using: dependencies).decode(Details.self, from: detailsData)
else {
SNLog("[GetExpirationJob] Failing due to missing details")
failure(job, JobRunnerError.missingRequiredDetails, true, dependencies)
@ -47,13 +47,19 @@ public enum GetExpirationJob: JobExecutor {
.getSwarm(for: userPublicKey, using: dependencies)
.tryFlatMap { swarm -> AnyPublisher<(ResponseInfoType, GetExpiriesResponse), Error> in
guard let snode = swarm.randomElement() else { throw SnodeAPIError.generic }
return SnodeAPI.getExpiries(
from: snode,
associatedWith: userPublicKey,
of: expirationInfo.map { $0.key },
using: dependencies
)
return dependencies[singleton: .storage]
.readPublisher(using: dependencies) { db in
try SnodeAPI.AuthenticationInfo(db, threadId: userPublicKey, using: dependencies)
}
.flatMap { authInfo in
SnodeAPI.getExpiries(
from: snode,
of: expirationInfo.map { $0.key },
authInfo: authInfo,
using: dependencies
)
}
.eraseToAnyPublisher()
}
.subscribe(on: queue, using: dependencies)
.receive(on: queue, using: dependencies)

View File

@ -33,7 +33,7 @@ public enum GroupInviteMemberJob: JobExecutor {
return (groupName, Profile.fetchOrCreateCurrentUser(db, using: dependencies))
}),
let details: Details = try? JSONDecoder().decode(Details.self, from: detailsData)
let details: Details = try? JSONDecoder(using: dependencies).decode(Details.self, from: detailsData)
else {
SNLog("[InviteGroupMemberJob] Failing due to missing details")
failure(job, JobRunnerError.missingRequiredDetails, true, dependencies)

View File

@ -22,7 +22,7 @@ public enum GroupLeavingJob: JobExecutor {
) {
guard
let detailsData: Data = job.details,
let details: Details = try? JSONDecoder().decode(Details.self, from: detailsData),
let details: Details = try? JSONDecoder(using: dependencies).decode(Details.self, from: detailsData),
let threadId: String = job.threadId,
let interactionId: Int64 = job.interactionId
else {
@ -33,7 +33,7 @@ public enum GroupLeavingJob: JobExecutor {
let destination: Message.Destination = .closedGroup(groupPublicKey: threadId)
dependencies[singleton: .storage]
.writePublisher { db in
.writePublisher { db -> HTTP.PreparedRequest<Void> in
guard (try? SessionThread.exists(db, id: threadId)) == true else {
SNLog("[GroupLeavingJob] Failed due to non-existent group conversation")
throw MessageSenderError.noThread
@ -43,7 +43,7 @@ public enum GroupLeavingJob: JobExecutor {
throw MessageSenderError.invalidClosedGroupUpdate
}
return try MessageSender.preparedSendData(
return try MessageSender.preparedSend(
db,
message: ClosedGroupControlMessage(
kind: .memberLeft
@ -51,13 +51,14 @@ public enum GroupLeavingJob: JobExecutor {
to: destination,
namespace: destination.defaultNamespace,
interactionId: job.interactionId,
fileIds: [],
isSyncMessage: false,
using: dependencies
)
}
.flatMap { MessageSender.sendImmediate(data: $0, using: dependencies) }
.subscribe(on: queue)
.receive(on: queue)
.flatMap { $0.send(using: dependencies) }
.subscribe(on: queue, using: dependencies)
.receive(on: queue, using: dependencies)
.sinkUntilComplete(
receiveCompletion: { result in
let failureChanges: [ConfigColumnAssignment] = [

View File

@ -20,7 +20,7 @@ public enum MessageReceiveJob: JobExecutor {
guard
let threadId: String = job.threadId,
let detailsData: Data = job.details,
let details: Details = try? JSONDecoder().decode(Details.self, from: detailsData)
let details: Details = try? JSONDecoder(using: dependencies).decode(Details.self, from: detailsData)
else {
return failure(job, JobRunnerError.missingRequiredDetails, true, dependencies)
}

View File

@ -22,7 +22,7 @@ public enum MessageSendJob: JobExecutor {
) {
guard
let detailsData: Data = job.details,
let details: Details = try? JSONDecoder().decode(Details.self, from: detailsData)
let details: Details = try? JSONDecoder(using: dependencies).decode(Details.self, from: detailsData)
else {
SNLog("[MessageSendJob] Failing due to missing details")
return failure(job, JobRunnerError.missingRequiredDetails, true, dependencies)
@ -168,19 +168,19 @@ public enum MessageSendJob: JobExecutor {
/// **Note:** No need to upload attachments as part of this process as the above logic splits that out into it's own job
/// so we shouldn't get here until attachments have already been uploaded
dependencies[singleton: .storage]
.writePublisher { db in
try MessageSender.preparedSendData(
.writePublisher { db -> HTTP.PreparedRequest<Void> in
try MessageSender.preparedSend(
db,
message: details.message,
to: details.destination,
namespace: details.destination.defaultNamespace,
interactionId: job.interactionId,
fileIds: messageFileIds,
isSyncMessage: details.isSyncMessage,
using: dependencies
)
}
.map { sendData in sendData.with(fileIds: messageFileIds) }
.flatMap { MessageSender.sendImmediate(data: $0, using: dependencies) }
.flatMap { $0.send(using: dependencies) }
.subscribe(on: queue, using: dependencies)
.receive(on: queue, using: dependencies)
.sinkUntilComplete(

View File

@ -21,7 +21,7 @@ public enum NotifyPushServerJob: JobExecutor {
) {
guard
let detailsData: Data = job.details,
let details: Details = try? JSONDecoder().decode(Details.self, from: detailsData)
let details: Details = try? JSONDecoder(using: dependencies).decode(Details.self, from: detailsData)
else {
SNLog("[NotifyPushServerJob] Failing due to missing details")
return failure(job, JobRunnerError.missingRequiredDetails, true, dependencies)
@ -37,8 +37,8 @@ public enum NotifyPushServerJob: JobExecutor {
)
}
.flatMap { $0.send(using: dependencies) }
.subscribe(on: queue)
.receive(on: queue)
.subscribe(on: queue, using: dependencies)
.receive(on: queue, using: dependencies)
.sinkUntilComplete(
receiveCompletion: { result in
switch result {

View File

@ -22,7 +22,7 @@ public enum SendReadReceiptsJob: JobExecutor {
guard
let threadId: String = job.threadId,
let detailsData: Data = job.details,
let details: Details = try? JSONDecoder().decode(Details.self, from: detailsData)
let details: Details = try? JSONDecoder(using: dependencies).decode(Details.self, from: detailsData)
else {
return failure(job, JobRunnerError.missingRequiredDetails, true, dependencies)
}
@ -35,8 +35,8 @@ public enum SendReadReceiptsJob: JobExecutor {
}
dependencies[singleton: .storage]
.writePublisher { db in
try MessageSender.preparedSendData(
.writePublisher { db -> HTTP.PreparedRequest<Void> in
try MessageSender.preparedSend(
db,
message: ReadReceipt(
timestamps: details.timestampMsValues.map { UInt64($0) }
@ -44,12 +44,13 @@ public enum SendReadReceiptsJob: JobExecutor {
to: details.destination,
namespace: details.destination.defaultNamespace,
interactionId: nil,
fileIds: [],
isSyncMessage: false
)
}
.flatMap { MessageSender.sendImmediate(data: $0, using: dependencies) }
.subscribe(on: queue)
.receive(on: queue)
.flatMap { $0.send(using: dependencies) }
.subscribe(on: queue, using: dependencies)
.receive(on: queue, using: dependencies)
.sinkUntilComplete(
receiveCompletion: { result in
switch result {
@ -139,7 +140,8 @@ public extension SendReadReceiptsJob {
.fetchOne(db),
!dependencies[singleton: .jobRunner].isCurrentlyRunning(existingJob),
let existingDetailsData: Data = existingJob.details,
let existingDetails: Details = try? JSONDecoder().decode(Details.self, from: existingDetailsData)
let existingDetails: Details = try? JSONDecoder(using: dependencies)
.decode(Details.self, from: existingDetailsData)
{
let maybeUpdatedJob: Job? = existingJob
.with(

View File

@ -1,6 +1,7 @@
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import SessionSnodeKit
import SessionUtilitiesKit
extension OpenGroupAPI {
@ -69,9 +70,6 @@ extension OpenGroupAPI.Message {
guard let sender: String = maybeSender, let data = Data(base64Encoded: base64EncodedData), let signature = Data(base64Encoded: base64EncodedSignature) else {
throw HTTPError.parsingFailed
}
guard let dependencies: Dependencies = decoder.userInfo[Dependencies.userInfoKey] as? Dependencies else {
throw HTTPError.parsingFailed
}
// Verify the signature based on the SessionId.Prefix type
let publicKey: Data = Data(hex: sender.removingIdPrefixIfNeeded())
@ -79,7 +77,7 @@ extension OpenGroupAPI.Message {
switch SessionId.Prefix(from: sender) {
case .blinded15, .blinded25:
guard
dependencies[singleton: .crypto].verify(
decoder.dependencies[singleton: .crypto].verify(
.signature(message: data.bytes, publicKey: publicKey.bytes, signature: signature.bytes)
)
else {
@ -89,7 +87,7 @@ extension OpenGroupAPI.Message {
case .standard, .unblinded:
guard
dependencies[singleton: .crypto].verify(
decoder.dependencies[singleton: .crypto].verify(
.signatureEd25519(signature, publicKey: publicKey, data: data)
)
else {

View File

@ -151,8 +151,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<HTTP.BatchResponseMap<Endpoint>> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request(
db,
method: .post,
server: server,
endpoint: .batch,
@ -182,8 +182,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<HTTP.BatchResponseMap<Endpoint>> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request(
db,
method: .post,
server: server,
endpoint: .sequence,
@ -212,13 +212,13 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<Capabilities> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
server: server,
endpoint: .capabilities
endpoint: .capabilities,
forceBlinded: forceBlinded
),
responseType: Capabilities.self,
forceBlinded: forceBlinded,
using: dependencies
)
.signed(db, with: OpenGroupAPI.signRequest, using: dependencies)
@ -236,8 +236,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<[Room]> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
server: server,
endpoint: .rooms
),
@ -256,8 +256,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<Room> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
server: server,
endpoint: .room(roomToken)
),
@ -280,8 +280,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<RoomPollInfo> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
server: server,
endpoint: .roomPollInfo(roomToken, lastUpdated)
),
@ -319,7 +319,7 @@ public enum OpenGroupAPI {
.signed(db, with: OpenGroupAPI.signRequest, using: dependencies)
.map { (info: ResponseInfoType, response: HTTP.BatchResponseMap<Endpoint>) -> CapabilitiesAndRoomResponse in
let maybeCapabilities: HTTP.BatchSubResponse<Capabilities>? = (response[.capabilities] as? HTTP.BatchSubResponse<Capabilities>)
let maybeRoomResponse: Decodable? = response.data
let maybeRoomResponse: Any? = response.data
.first(where: { key, _ in
switch key {
case .room: return true
@ -416,8 +416,8 @@ public enum OpenGroupAPI {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request(
db,
method: .post,
server: server,
endpoint: Endpoint.roomMessage(roomToken),
@ -445,8 +445,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<Message> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
server: server,
endpoint: .roomMessageIndividual(roomToken, id: id)
),
@ -478,8 +478,8 @@ public enum OpenGroupAPI {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request(
db,
method: .put,
server: server,
endpoint: Endpoint.roomMessageIndividual(roomToken, id: id),
@ -505,8 +505,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<NoResponse> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
method: .delete,
server: server,
endpoint: .roomMessageIndividual(roomToken, id: id)
@ -530,8 +530,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<[Failable<Message>]> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
server: server,
endpoint: .roomMessagesRecent(roomToken),
queryParameters: [
@ -560,8 +560,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<[Failable<Message>]> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
server: server,
endpoint: .roomMessagesBefore(roomToken, id: messageId),
queryParameters: [
@ -590,8 +590,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<[Failable<Message>]> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
server: server,
endpoint: .roomMessagesSince(roomToken, seqNo: seqNo),
queryParameters: [
@ -627,8 +627,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<NoResponse> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
method: .delete,
server: server,
endpoint: Endpoint.roomDeleteMessages(roomToken, sessionId: sessionId)
@ -658,8 +658,8 @@ public enum OpenGroupAPI {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
method: .get,
server: server,
endpoint: .reactors(roomToken, id: id, emoji: encodedEmoji)
@ -690,8 +690,8 @@ public enum OpenGroupAPI {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
method: .put,
server: server,
endpoint: .reaction(roomToken, id: id, emoji: encodedEmoji)
@ -720,8 +720,8 @@ public enum OpenGroupAPI {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
method: .delete,
server: server,
endpoint: .reaction(roomToken, id: id, emoji: encodedEmoji)
@ -751,8 +751,8 @@ public enum OpenGroupAPI {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
method: .delete,
server: server,
endpoint: .reactionDelete(roomToken, id: id, emoji: encodedEmoji)
@ -784,8 +784,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<NoResponse> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
method: .post,
server: server,
endpoint: .roomPinMessage(roomToken, id: id)
@ -808,8 +808,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<NoResponse> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
method: .post,
server: server,
endpoint: .roomUnpinMessage(roomToken, id: id)
@ -831,8 +831,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<NoResponse> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
method: .post,
server: server,
endpoint: .roomUnpinAll(roomToken)
@ -861,8 +861,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<FileUploadResponse> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request(
db,
method: .post,
server: server,
endpoint: Endpoint.roomFile(roomToken),
@ -894,8 +894,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<Data> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
server: server,
endpoint: .roomFileIndividual(roomToken, fileId)
),
@ -918,8 +918,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<[DirectMessage]?> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
server: server,
endpoint: .inbox
),
@ -940,8 +940,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<[DirectMessage]?> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
server: server,
endpoint: .inboxSince(id: id)
),
@ -959,8 +959,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<DeleteInboxResponse> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
method: .delete,
server: server,
endpoint: .inbox
@ -983,8 +983,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<SendDirectMessageResponse> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request(
db,
method: .post,
server: server,
endpoint: Endpoint.inboxFor(sessionId: blindedSessionId),
@ -1008,8 +1008,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<[DirectMessage]?> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
server: server,
endpoint: .outbox
),
@ -1030,8 +1030,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<[DirectMessage]?> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request<NoBody, Endpoint>(
db,
server: server,
endpoint: .outboxSince(id: id)
),
@ -1084,8 +1084,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<NoResponse> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request(
db,
method: .post,
server: server,
endpoint: Endpoint.userBan(sessionId),
@ -1134,8 +1134,8 @@ public enum OpenGroupAPI {
) throws -> HTTP.PreparedRequest<NoResponse> {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request(
db,
method: .post,
server: server,
endpoint: Endpoint.userUnban(sessionId),
@ -1217,8 +1217,8 @@ public enum OpenGroupAPI {
return try OpenGroupAPI
.prepareRequest(
db,
request: Request(
db,
method: .post,
server: server,
endpoint: Endpoint.userModerator(sessionId),
@ -1282,7 +1282,7 @@ public enum OpenGroupAPI {
using dependencies: Dependencies
) throws -> (publicKey: String, signature: Bytes) {
guard
let userEdKeyPair: KeyPair = Identity.fetchUserEd25519KeyPair(db),
let userEdKeyPair: KeyPair = Identity.fetchUserEd25519KeyPair(db, using: dependencies),
let serverPublicKey: String = try? OpenGroup
.select(.publicKey)
.filter(OpenGroup.Columns.server == serverName.lowercased())
@ -1331,7 +1331,7 @@ public enum OpenGroupAPI {
// Default to using the 'standard' key
default:
guard
let userKeyPair: KeyPair = Identity.fetchUserKeyPair(db),
let userKeyPair: KeyPair = Identity.fetchUserKeyPair(db, using: dependencies),
let signatureResult: Bytes = try? dependencies[singleton: .crypto].perform(
.signEd25519(data: messageBytes, keyPair: userKeyPair)
)
@ -1352,11 +1352,7 @@ public enum OpenGroupAPI {
) throws -> URLRequest {
guard
let url: URL = preparedRequest.request.url,
let serverPublicKey: String = try? OpenGroup
.select(.publicKey)
.filter(OpenGroup.Columns.server == preparedRequest.server.lowercased())
.asRequest(of: String.self)
.fetchOne(db)
let target: HTTP.OpenGroupAPITarget = preparedRequest.target as? HTTP.OpenGroupAPITarget
else { throw OpenGroupAPIError.signingFailed }
var updatedRequest: URLRequest = preparedRequest.request
@ -1364,7 +1360,7 @@ public enum OpenGroupAPI {
.appending(url.query.map { value in "?\(value)" })
let method: String = preparedRequest.method.rawValue
let timestamp: Int = Int(floor(dependencies.dateNow.timeIntervalSince1970))
let serverPublicKeyData: Data = Data(hex: serverPublicKey)
let serverPublicKeyData: Data = Data(hex: target.serverPublicKey)
guard
!serverPublicKeyData.isEmpty,
@ -1398,9 +1394,9 @@ public enum OpenGroupAPI {
let signResult: (publicKey: String, signature: Bytes) = try sign(
db,
messageBytes: messageBytes,
for: preparedRequest.server,
for: target.server,
fallbackSigningType: .unblinded,
forceBlinded: ((preparedRequest.metadata[.forceBlinded] as? Bool) == true),
forceBlinded: target.forceBlinded,
using: dependencies
)
@ -1421,27 +1417,15 @@ public enum OpenGroupAPI {
/// method is mainly here so we can separate the preparation of a request, which requires access to the database for signing, from the
/// actual sending of the reuqest to ensure we don't run into any unexpected blocking of the database write thread
private static func prepareRequest<T: Encodable, R: Decodable>(
_ db: Database,
request: Request<T, Endpoint>,
responseType: R.Type,
forceBlinded: Bool = false,
timeout: TimeInterval = HTTP.defaultTimeout,
using dependencies: Dependencies = Dependencies()
) throws -> HTTP.PreparedRequest<R> {
let maybePublicKey: String? = try? OpenGroup
.select(.publicKey)
.filter(OpenGroup.Columns.server == request.server.lowercased())
.asRequest(of: String.self)
.fetchOne(db)
guard let publicKey: String = maybePublicKey else { throw OpenGroupAPIError.noPublicKey }
return HTTP.PreparedRequest(
request: request,
urlRequest: try request.generateUrlRequest(),
publicKey: publicKey,
urlRequest: try request.generateUrlRequest(using: dependencies),
responseType: responseType,
metadata: [.forceBlinded: forceBlinded],
timeout: timeout
)
}

View File

@ -1,8 +0,0 @@
// Copyright © 2023 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import SessionUtilitiesKit
public extension HTTPRequestMetadata {
static let forceBlinded: HTTPRequestMetadata = "forceBlinded"
}

View File

@ -0,0 +1,58 @@
// Copyright © 2023 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import GRDB
import SessionUtilitiesKit
// MARK: - OpenGroupAPITarget
internal extension HTTP {
struct OpenGroupAPITarget: ServerRequestTarget {
public let server: String
let path: String
let queryParameters: [HTTPQueryParam: String]
public let serverPublicKey: String
public let forceBlinded: Bool
public var url: URL? { URL(string: "\(server)\(urlPathAndParamsString)") }
public var urlPathAndParamsString: String { pathFor(path: path, queryParams: queryParameters) }
public var x25519PublicKey: String { serverPublicKey }
}
}
// MARK: Request - OpenGroupAPITarget
public extension Request {
init(
_ db: Database,
method: HTTPMethod = .get,
server: String,
endpoint: Endpoint,
queryParameters: [HTTPQueryParam: String] = [:],
headers: [HTTPHeader: String] = [:],
body: T? = nil,
forceBlinded: Bool = false
) throws {
let maybePublicKey: String? = try? OpenGroup
.select(.publicKey)
.filter(OpenGroup.Columns.server == server.lowercased())
.asRequest(of: String.self)
.fetchOne(db)
guard let publicKey: String = maybePublicKey else { throw OpenGroupAPIError.noPublicKey }
self = Request(
method: method,
endpoint: endpoint,
target: HTTP.OpenGroupAPITarget(
server: server,
path: endpoint.path,
queryParameters: queryParameters,
serverPublicKey: publicKey,
forceBlinded: forceBlinded
),
headers: headers,
body: body
)
}
}

View File

@ -59,6 +59,7 @@ extension OpenGroupAPI {
case userUnban(String)
case userModerator(String)
public static var name: String { "OpenGroupAPI.Endpoint" }
public static var batchRequestVariant: HTTP.BatchRequest.Child.Variant = .sogs
public static var excludedSubRequestHeaders: [HTTPHeader] = [
.sogsPubKey, .sogsTimestamp, .sogsNonce, .sogsSignature

View File

@ -18,7 +18,7 @@ public enum MessageSenderError: LocalizedError, Equatable {
case noKeyPair
case invalidClosedGroupUpdate
case other(Error)
case other(String, Error)
internal var isRetryable: Bool {
switch self {
@ -46,7 +46,7 @@ public enum MessageSenderError: LocalizedError, Equatable {
case .noThread: return "Couldn't find a thread associated with the given group public key."
case .noKeyPair: return "Couldn't find a private key associated with the given group public key."
case .invalidClosedGroupUpdate: return "Invalid group update."
case .other(let error): return error.localizedDescription
case .other(_, let error): return error.localizedDescription
}
}
@ -65,9 +65,12 @@ public enum MessageSenderError: LocalizedError, Equatable {
case (.invalidClosedGroupUpdate, .invalidClosedGroupUpdate): return true
case (.blindingFailed, .blindingFailed): return true
case (.other(let lhsError), .other(let rhsError)):
case (.other(let lhsDescription, let lhsError), .other(let rhsDescription, let rhsError)):
// Not ideal but the best we can do
return (lhsError.localizedDescription == rhsError.localizedDescription)
return (
lhsDescription == rhsDescription &&
lhsError.localizedDescription == rhsError.localizedDescription
)
default: return false
}

View File

@ -213,7 +213,7 @@ extension MessageReceiver {
guard
let caller: String = message.sender,
let messageInfoData: Data = try? JSONEncoder().encode(messageInfo),
let messageInfoData: Data = try? JSONEncoder(using: dependencies).encode(messageInfo),
let thread: SessionThread = try SessionThread.fetchOne(db, id: caller),
!thread.isMessageRequest(db)
else { return }
@ -243,27 +243,26 @@ extension MessageReceiver {
)
.inserted(db)
MessageSender.sendImmediate(
data: try MessageSender
.preparedSendData(
db,
message: CallMessage(
uuid: message.uuid,
kind: .endCall,
sdps: [],
sentTimestampMs: nil // Explicitly nil as it's a separate message from above
),
to: try Message.Destination.from(db, threadId: thread.id, threadVariant: thread.variant),
namespace: try Message.Destination
.from(db, threadId: thread.id, threadVariant: thread.variant)
.defaultNamespace,
interactionId: nil, // Explicitly nil as it's a separate message from above
using: dependencies
try MessageSender
.preparedSend(
db,
message: CallMessage(
uuid: message.uuid,
kind: .endCall,
sdps: [],
sentTimestampMs: nil // Explicitly nil as it's a separate message from above
),
using: dependencies
)
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
.sinkUntilComplete()
to: try Message.Destination.from(db, threadId: thread.id, threadVariant: thread.variant),
namespace: try Message.Destination
.from(db, threadId: thread.id, threadVariant: thread.variant)
.defaultNamespace,
interactionId: nil, // Explicitly nil as it's a separate message from above
fileIds: [],
using: dependencies
)
.send(using: dependencies)
.subscribe(on: DispatchQueue.global(qos: .userInitiated))
.sinkUntilComplete()
}
@discardableResult public static func insertCallInfoMessage(
@ -294,10 +293,12 @@ extension MessageReceiver {
)
let timestampMs: Int64 = (
message.sentTimestamp.map { Int64($0) } ??
SnodeAPI.currentOffsetTimestampMs()
SnodeAPI.currentOffsetTimestampMs(using: dependencies)
)
guard let messageInfoData: Data = try? JSONEncoder().encode(messageInfo) else { return nil }
guard let messageInfoData: Data = try? JSONEncoder(using: dependencies).encode(messageInfo) else {
return nil
}
return try Interaction(
serverHash: message.serverHash,

View File

@ -51,6 +51,7 @@ extension MessageReceiver {
name: name,
authData: authData,
joinedAt: created,
approved: approved,
using: dependencies
)
}

View File

@ -45,12 +45,18 @@ extension MessageReceiver {
}
if author == message.sender, let serverHash: String = interaction.serverHash {
SnodeAPI
.deleteMessages(
publicKey: author,
serverHashes: [serverHash],
using: dependencies
)
dependencies[singleton: .storage]
.readPublisher(using: dependencies) { db in
try SnodeAPI.AuthenticationInfo(db, threadId: author, using: dependencies)
}
.flatMap { authInfo in
SnodeAPI
.deleteMessages(
serverHashes: [serverHash],
authInfo: authInfo,
using: dependencies
)
}
.subscribe(on: DispatchQueue.global(qos: .background), using: dependencies)
.sinkUntilComplete()
}

View File

@ -16,7 +16,7 @@ extension MessageSender {
using dependencies: Dependencies = Dependencies()
) -> AnyPublisher<SessionThread, Error> {
dependencies[singleton: .storage]
.writePublisher { db -> (String, SessionThread, [MessageSender.PreparedSendData], Set<String>) in
.writePublisher { db -> (String, SessionThread, [HTTP.PreparedRequest<Void>], Set<String>) in
// Generate the group's two keys
guard
let groupKeyPair: KeyPair = dependencies[singleton: .crypto].generate(.x25519KeyPair()),
@ -88,9 +88,9 @@ extension MessageSender {
using: dependencies
)
let memberSendData: [MessageSender.PreparedSendData] = try members
.map { memberId -> MessageSender.PreparedSendData in
try MessageSender.preparedSendData(
let memberSendData: [HTTP.PreparedRequest<Void>] = try members
.map { memberId -> HTTP.PreparedRequest<Void> in
try MessageSender.preparedSend(
db,
message: ClosedGroupControlMessage(
kind: .new(
@ -108,6 +108,7 @@ extension MessageSender {
to: .contact(publicKey: memberId),
namespace: Message.Destination.contact(publicKey: memberId).defaultNamespace,
interactionId: nil,
fileIds: [],
using: dependencies
)
}
@ -129,7 +130,6 @@ extension MessageSender {
.MergeMany(
// Send a closed group update message to all members individually
memberSendData
.map { MessageSender.sendImmediate(data: $0, using: dependencies) }
.appending(
// Resubscribe to all legacy groups
try? PushNotificationAPI
@ -137,10 +137,9 @@ extension MessageSender {
currentUserPublicKey: userPublicKey,
legacyGroupIds: allActiveLegacyGroupIds
)?
.send(using: dependencies)
.map { _ in () }
.eraseToAnyPublisher()
.map { _, _ in () }
)
.map { $0.send(using: dependencies) }
)
.collect()
.map { _ in thread }
@ -173,7 +172,7 @@ extension MessageSender {
}
return dependencies[singleton: .storage]
.readPublisher { db -> (ClosedGroupKeyPair, MessageSender.PreparedSendData) in
.readPublisher { db -> (ClosedGroupKeyPair, HTTP.PreparedRequest<Void>) in
// Generate the new encryption key pair
guard let legacyNewKeyPair: KeyPair = dependencies[singleton: .crypto].generate(.x25519KeyPair()) else {
throw MessageSenderError.noKeyPair
@ -198,8 +197,8 @@ extension MessageSender {
.appending(newKeyPair)
}
let sendData: MessageSender.PreparedSendData = try MessageSender
.preparedSendData(
let preparedRequest: HTTP.PreparedRequest<Void> = try MessageSender
.preparedSend(
db,
message: ClosedGroupControlMessage(
kind: .encryptionKeyPair(
@ -223,13 +222,14 @@ extension MessageSender {
.from(db, threadId: closedGroup.threadId, threadVariant: .legacyGroup)
.defaultNamespace,
interactionId: nil,
fileIds: [],
using: dependencies
)
return (newKeyPair, sendData)
return (newKeyPair, preparedRequest)
}
.flatMap { newKeyPair, sendData -> AnyPublisher<ClosedGroupKeyPair, Error> in
MessageSender.sendImmediate(data: sendData, using: dependencies)
.flatMap { newKeyPair, preparedRequest -> AnyPublisher<ClosedGroupKeyPair, Error> in
preparedRequest.send(using: dependencies)
.map { _ in newKeyPair }
.eraseToAnyPublisher()
}
@ -553,7 +553,7 @@ extension MessageSender {
// Send the update to the group and generate + distribute a new encryption key pair
return try MessageSender
.preparedSendData(
.preparedSend(
db,
message: ClosedGroupControlMessage(
kind: .membersRemoved(
@ -566,10 +566,11 @@ extension MessageSender {
.from(db, threadId: closedGroup.threadId, threadVariant: .legacyGroup)
.defaultNamespace,
interactionId: interactionId,
fileIds: [],
using: dependencies
)
}
.flatMap { MessageSender.sendImmediate(data: $0, using: dependencies) }
.flatMap { $0.send(using: dependencies) }
.flatMap { _ -> AnyPublisher<Void, Error> in
MessageSender.generateAndSendNewEncryptionKeyPair(
targetMembers: members,

View File

@ -79,39 +79,52 @@ public enum MessageReceiver {
else {
throw MessageReceiverError.invalidGroupPublicKey
}
guard
let encryptionKeyPairs: [ClosedGroupKeyPair] = try? closedGroup.keyPairs
.order(ClosedGroupKeyPair.Columns.receivedTimestamp.desc)
.fetchAll(db),
!encryptionKeyPairs.isEmpty
else {
throw MessageReceiverError.noGroupKeyPair
}
// Loop through all known group key pairs in reverse order (i.e. try the latest key
// pair first (which'll more than likely be the one we want) but try older ones in
// case that didn't work)
func decrypt(keyPairs: [ClosedGroupKeyPair], lastError: Error? = nil) throws -> (Data, String) {
guard let keyPair: ClosedGroupKeyPair = keyPairs.first else {
throw (lastError ?? MessageReceiverError.decryptionFailed)
}
do {
return try decryptWithSessionProtocol(
switch SessionId.Prefix(from: hexEncodedGroupPublicKey) {
case .group:
groupPublicKey = hexEncodedGroupPublicKey
plaintext = try SessionUtil.decrypt(
ciphertext: ciphertext,
using: KeyPair(
publicKey: keyPair.publicKey.bytes,
secretKey: keyPair.secretKey.bytes
)
groupIdentityPublicKey: hexEncodedGroupPublicKey,
using: dependencies
)
}
catch {
return try decrypt(keyPairs: Array(keyPairs.suffix(from: 1)), lastError: error)
}
sender = getUserHexEncodedPublicKey(db, using: dependencies)
default:
guard
let encryptionKeyPairs: [ClosedGroupKeyPair] = try? closedGroup.keyPairs
.order(ClosedGroupKeyPair.Columns.receivedTimestamp.desc)
.fetchAll(db),
!encryptionKeyPairs.isEmpty
else {
throw MessageReceiverError.noGroupKeyPair
}
// Loop through all known group key pairs in reverse order (i.e. try the latest key
// pair first (which'll more than likely be the one we want) but try older ones in
// case that didn't work)
func decrypt(keyPairs: [ClosedGroupKeyPair], lastError: Error? = nil) throws -> (Data, String) {
guard let keyPair: ClosedGroupKeyPair = keyPairs.first else {
throw (lastError ?? MessageReceiverError.decryptionFailed)
}
do {
return try decryptWithSessionProtocol(
ciphertext: ciphertext,
using: KeyPair(
publicKey: keyPair.publicKey.bytes,
secretKey: keyPair.secretKey.bytes
)
)
}
catch {
return try decrypt(keyPairs: Array(keyPairs.suffix(from: 1)), lastError: error)
}
}
groupPublicKey = hexEncodedGroupPublicKey
(plaintext, sender) = try decrypt(keyPairs: encryptionKeyPairs)
}
groupPublicKey = hexEncodedGroupPublicKey
(plaintext, sender) = try decrypt(keyPairs: encryptionKeyPairs)
default: throw MessageReceiverError.unknownEnvelopeType
}

View File

@ -6,7 +6,6 @@ import GRDB
import SessionUtilitiesKit
extension MessageSender {
// MARK: - Durable
public static func send(
@ -95,18 +94,19 @@ extension MessageSender {
// MARK: - Non-Durable
public static func preparedSendData(
public static func preparedSend(
_ db: Database,
interaction: Interaction,
fileIds: [String],
threadId: String,
threadVariant: SessionThread.Variant,
using dependencies: Dependencies
) throws -> PreparedSendData {
) throws -> HTTP.PreparedRequest<Void> {
// Only 'VisibleMessage' types can be sent via this method
guard interaction.variant == .standardOutgoing else { throw MessageSenderError.invalidMessage }
guard let interactionId: Int64 = interaction.id else { throw StorageError.objectNotSaved }
return try MessageSender.preparedSendData(
return try MessageSender.preparedSend(
db,
message: VisibleMessage.from(db, interaction: interaction),
to: try Message.Destination.from(db, threadId: threadId, threadVariant: threadVariant),
@ -114,83 +114,11 @@ extension MessageSender {
.from(db, threadId: threadId, threadVariant: threadVariant)
.defaultNamespace,
interactionId: interactionId,
fileIds: fileIds,
using: dependencies
)
}
public static func performUploadsIfNeeded(
preparedSendData: PreparedSendData,
using dependencies: Dependencies
) -> AnyPublisher<PreparedSendData, Error> {
// We need an interactionId in order for a message to have uploads
guard let interactionId: Int64 = preparedSendData.interactionId else {
return Just(preparedSendData)
.setFailureType(to: Error.self)
.eraseToAnyPublisher()
}
let threadId: String = {
switch preparedSendData.destination {
case .contact(let publicKey): return publicKey
case .closedGroup(let groupPublicKey): return groupPublicKey
case .openGroup(let roomToken, let server, _, _, _):
return OpenGroup.idFor(roomToken: roomToken, server: server)
case .openGroupInbox(_, _, let blindedPublicKey): return blindedPublicKey
}
}()
return dependencies[singleton: .storage]
.readPublisher { db -> (attachments: [Attachment], openGroup: OpenGroup?) in
let attachmentStateInfo: [Attachment.StateInfo] = (try? Attachment
.stateInfo(interactionId: interactionId, state: .uploading)
.fetchAll(db))
.defaulting(to: [])
// If there is no attachment data then just return early
guard !attachmentStateInfo.isEmpty else { return ([], nil) }
// Otherwise fetch the open group (if there is one)
return (
(try? Attachment
.filter(ids: attachmentStateInfo.map { $0.attachmentId })
.fetchAll(db))
.defaulting(to: []),
try? OpenGroup.fetchOne(db, id: threadId)
)
}
.flatMap { attachments, openGroup -> AnyPublisher<[String], Error> in
guard !attachments.isEmpty else {
return Just<[String]>([])
.setFailureType(to: Error.self)
.eraseToAnyPublisher()
}
return Publishers
.MergeMany(
attachments
.map { attachment -> AnyPublisher<String, Error> in
attachment
.upload(
to: (
openGroup.map { Attachment.Destination.openGroup($0) } ??
.fileServer
),
using: dependencies
)
}
)
.collect()
.eraseToAnyPublisher()
}
.map { fileIds -> PreparedSendData in
// Once the attachments are processed then update the PreparedSendData with
// the fileIds associated to the message
return preparedSendData.with(fileIds: fileIds)
}
.eraseToAnyPublisher()
}
// MARK: - Convenience
internal static func getSpecifiedTTL(

File diff suppressed because it is too large Load Diff

View File

@ -196,7 +196,6 @@ public enum PushNotificationAPI {
.prepareRequest(
request: Request(
method: .post,
server: PushNotificationAPI.server,
endpoint: .subscribe,
body: SubscribeRequest(
pubkey: publicKey,
@ -244,7 +243,6 @@ public enum PushNotificationAPI {
.prepareRequest(
request: Request(
method: .post,
server: PushNotificationAPI.server,
endpoint: .unsubscribe,
body: UnsubscribeRequest(
pubkey: publicKey,
@ -288,7 +286,6 @@ public enum PushNotificationAPI {
.prepareRequest(
request: Request(
method: .post,
server: PushNotificationAPI.legacyServer,
endpoint: .legacyNotify,
body: LegacyNotifyRequest(
data: message,
@ -336,7 +333,6 @@ public enum PushNotificationAPI {
.prepareRequest(
request: Request(
method: .post,
server: PushNotificationAPI.legacyServer,
endpoint: .legacyGroupsOnlySubscribe,
body: LegacyGroupOnlyRequest(
token: deviceToken,
@ -373,7 +369,6 @@ public enum PushNotificationAPI {
.prepareRequest(
request: Request(
method: .post,
server: PushNotificationAPI.legacyServer,
endpoint: .legacyGroupUnsubscribe,
body: LegacyGroupRequest(
pubKey: currentUserPublicKey,
@ -519,7 +514,7 @@ public enum PushNotificationAPI {
) -> AnyPublisher<(ResponseInfoType, Data?), Error> {
guard
let url: URL = URL(string: "\(request.endpoint.server)/\(request.endpoint.path)"),
let payload: Data = try? JSONEncoder().encode(request.body)
let payload: Data = try? JSONEncoder(using: dependencies).encode(request.body)
else {
return Fail(error: HTTPError.invalidJSON)
.eraseToAnyPublisher()
@ -563,8 +558,7 @@ public enum PushNotificationAPI {
) throws -> HTTP.PreparedRequest<R> {
return HTTP.PreparedRequest<R>(
request: request,
urlRequest: try request.generateUrlRequest(),
publicKey: request.endpoint.serverPublicKey,
urlRequest: try request.generateUrlRequest(using: dependencies),
responseType: responseType,
retryCount: retryCount,
timeout: timeout

View File

@ -17,6 +17,8 @@ public extension PushNotificationAPI {
case legacyGroupSubscribe
case legacyGroupUnsubscribe
public static var name: String { "PushNotificationAPI.Endpoint" }
public var path: String {
switch self {
case .subscribe: return "subscribe"

View File

@ -0,0 +1,29 @@
// Copyright © 2023 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import SessionUtilitiesKit
// MARK: Request - PushNotificationAPI
public extension Request where Endpoint == PushNotificationAPI.Endpoint {
init(
method: HTTPMethod = .get,
endpoint: Endpoint,
queryParameters: [HTTPQueryParam: String] = [:],
headers: [HTTPHeader: String] = [:],
body: T? = nil
) {
self = Request(
method: method,
endpoint: endpoint,
target: HTTP.ServerTarget(
server: endpoint.server,
path: endpoint.path,
queryParameters: queryParameters,
x25519PublicKey: endpoint.serverPublicKey
),
headers: headers,
body: body
)
}
}

View File

@ -30,7 +30,7 @@ public final class ClosedGroupPoller: Poller {
return ClosedGroupPoller.namespaces
}
override var maxNodePollCount: UInt { 0 }
override var pollDrainBehaviour: SwarmDrainBehaviour { .alwaysRandom }
private static let minPollInterval: Double = 3
private static let maxPollInterval: Double = 30

View File

@ -16,11 +16,11 @@ public final class CurrentUserPoller: Poller {
override func namespaces(for publicKey: String) -> [SnodeAPI.Namespace] { CurrentUserPoller.namespaces }
/// After polling a given snode this many times we always switch to a new one.
/// After polling a given snode 6 times we always switch to a new one.
///
/// The reason for doing this is that sometimes a snode will be giving us successful responses while
/// it isn't actually getting messages from other snodes.
override var maxNodePollCount: UInt { 6 }
override var pollDrainBehaviour: SwarmDrainBehaviour { .limitedReuse(count: 6) }
private let pollInterval: TimeInterval = 1.5
private let retryInterval: TimeInterval = 0.25
@ -64,9 +64,12 @@ public final class CurrentUserPoller: Poller {
if UserDefaults.sharedLokiProject?[.isMainAppActive] != true {
// Do nothing when an error gets throws right after returning from the background (happens frequently)
}
else if let targetSnode: Snode = targetSnode.wrappedValue {
else if
let drainBehaviour: Atomic<SwarmDrainBehaviour> = drainBehaviour.wrappedValue[publicKey],
case .limitedReuse(_, .some(let targetSnode), _, _) = drainBehaviour.wrappedValue
{
SNLog("Main Poller polling \(targetSnode) failed; dropping it and switching to next snode.")
self.targetSnode.mutate { $0 = nil }
drainBehaviour.mutate { $0 = $0.clearTargetSnode() }
SnodeAPI.dropSnodeFromSwarmIfNeeded(targetSnode, publicKey: publicKey, using: dependencies)
}
else {

View File

@ -166,7 +166,6 @@ extension OpenGroupAPI {
failureCount: failureCount,
using: dependencies
)
dependencies.mutate(cache: .openGroupManager) { cache in
cache.hasPerformedInitialPoll[server] = true
@ -377,7 +376,7 @@ extension OpenGroupAPI {
using dependencies: Dependencies
) {
let server: String = self.server
let validResponses: [OpenGroupAPI.Endpoint: Decodable] = response.data
let validResponses: [OpenGroupAPI.Endpoint: Any] = response.data
.filter { endpoint, data in
switch endpoint {
case .capabilities:
@ -476,7 +475,7 @@ extension OpenGroupAPI {
return (capabilities, groups)
}
let changedResponses: [OpenGroupAPI.Endpoint: Decodable] = validResponses
let changedResponses: [OpenGroupAPI.Endpoint: Any] = validResponses
.filter { endpoint, data in
switch endpoint {
case .capabilities:

View File

@ -10,11 +10,8 @@ import SessionUtilitiesKit
public class Poller {
private var cancellables: Atomic<[String: AnyCancellable]> = Atomic([:])
internal var isPolling: Atomic<[String: Bool]> = Atomic([:])
internal var pollCount: Atomic<[String: Int]> = Atomic([:])
internal var failureCount: Atomic<[String: Int]> = Atomic([:])
internal var targetSnode: Atomic<Snode?> = Atomic(nil)
private var usedSnodes: Atomic<Set<Snode>> = Atomic([])
internal var drainBehaviour: Atomic<[String: Atomic<SwarmDrainBehaviour>]> = Atomic([:])
// MARK: - Settings
@ -23,8 +20,8 @@ public class Poller {
preconditionFailure("abstract class - override in subclass")
}
/// The number of times the poller can poll a single snode before swapping to a new snode
internal var maxNodePollCount: UInt {
/// The behaviour for how the poller should drain it's swarm when polling
internal var pollDrainBehaviour: SwarmDrainBehaviour {
preconditionFailure("abstract class - override in subclass")
}
@ -42,6 +39,8 @@ public class Poller {
public func stopPolling(for publicKey: String) {
isPolling.mutate { $0[publicKey] = false }
failureCount.mutate { $0[publicKey] = nil }
drainBehaviour.mutate { $0[publicKey] = nil }
cancellables.mutate { $0[publicKey]?.cancel() }
}
@ -67,6 +66,8 @@ public class Poller {
internal func startIfNeeded(for publicKey: String, using dependencies: Dependencies) {
// Run on the 'pollerQueue' to ensure any 'Atomic' access doesn't block the main thread
// on startup
let drainBehaviour: Atomic<SwarmDrainBehaviour> = Atomic(pollDrainBehaviour)
Threading.pollerQueue.async(using: dependencies) { [weak self] in
guard self?.isPolling.wrappedValue[publicKey] != true else { return }
@ -74,67 +75,14 @@ public class Poller {
// and the timer is not created, if we mark the group as is polling
// after setUpPolling. So the poller may not work, thus misses messages
self?.isPolling.mutate { $0[publicKey] = true }
self?.pollRecursively(for: publicKey, using: dependencies)
self?.drainBehaviour.mutate { $0[publicKey] = drainBehaviour }
self?.pollRecursively(for: publicKey, drainBehaviour: drainBehaviour, using: dependencies)
}
}
internal func getSnodeForPolling(
for publicKey: String,
using dependencies: Dependencies
) -> AnyPublisher<Snode, Error> {
// If we don't want to poll a snode multiple times then just grab a random one from the swarm
guard maxNodePollCount > 0 else {
return SnodeAPI.getSwarm(for: publicKey, using: dependencies)
.tryMap { swarm -> Snode in
try swarm.randomElement() ?? { throw OnionRequestAPIError.insufficientSnodes }()
}
.eraseToAnyPublisher()
}
// If we already have a target snode then use that
if let targetSnode: Snode = self.targetSnode.wrappedValue {
return Just(targetSnode)
.setFailureType(to: Error.self)
.eraseToAnyPublisher()
}
// Select the next unused snode from the swarm (if we've used them all then clear the used list and
// start cycling through them again)
return SnodeAPI.getSwarm(for: publicKey, using: dependencies)
.tryMap { [usedSnodes = self.usedSnodes, targetSnode = self.targetSnode] swarm -> Snode in
let unusedSnodes: Set<Snode> = swarm.subtracting(usedSnodes.wrappedValue)
// If we've used all of the SNodes then clear out the used list
if unusedSnodes.isEmpty {
usedSnodes.mutate { $0.removeAll() }
}
// Select the next SNode
let nextSnode: Snode = try swarm.randomElement() ?? { throw OnionRequestAPIError.insufficientSnodes }()
targetSnode.mutate { $0 = nextSnode }
usedSnodes.mutate { $0.insert(nextSnode) }
return nextSnode
}
.eraseToAnyPublisher()
}
internal func incrementPollCount(publicKey: String) {
guard maxNodePollCount > 0 else { return }
let pollCount: Int = (self.pollCount.wrappedValue[publicKey] ?? 0)
self.pollCount.mutate { $0[publicKey] = (pollCount + 1) }
// Check if we've polled the serice node too many times
guard pollCount > maxNodePollCount else { return }
// If we have polled this service node more than the maximum allowed then clear out
// the 'targetServiceNode' value
self.targetSnode.mutate { $0 = nil }
}
private func pollRecursively(
for publicKey: String,
drainBehaviour: Atomic<SwarmDrainBehaviour>,
using dependencies: Dependencies
) {
guard isPolling.wrappedValue[publicKey] == true else { return }
@ -142,20 +90,17 @@ public class Poller {
let namespaces: [SnodeAPI.Namespace] = self.namespaces(for: publicKey)
let lastPollStart: TimeInterval = dependencies.dateNow.timeIntervalSince1970
let lastPollInterval: TimeInterval = nextPollDelay(for: publicKey, using: dependencies)
let getSnodePublisher: AnyPublisher<Snode, Error> = getSnodeForPolling(for: publicKey, using: dependencies)
// Store the publisher intp the cancellables dictionary
cancellables.mutate { [weak self] cancellables in
cancellables[publicKey] = getSnodePublisher
.flatMap { snode -> AnyPublisher<[Message], Error> in
Poller.poll(
namespaces: namespaces,
from: snode,
for: publicKey,
poller: self,
using: dependencies
)
}
cancellables[publicKey] = Poller
.poll(
namespaces: namespaces,
for: publicKey,
drainBehaviour: drainBehaviour,
poller: self,
using: dependencies
)
.subscribe(on: Threading.pollerQueue, using: dependencies)
.receive(on: Threading.pollerQueue, using: dependencies)
.sink(
@ -170,9 +115,6 @@ public class Poller {
case .finished: break
}
// Increment the poll count
self?.incrementPollCount(publicKey: publicKey)
// Calculate the remaining poll delay
let currentTime: TimeInterval = dependencies.dateNow.timeIntervalSince1970
let nextPollInterval: TimeInterval = (
@ -184,12 +126,12 @@ public class Poller {
// Schedule the next poll
guard remainingInterval > 0 else {
return Threading.pollerQueue.async(using: dependencies) {
self?.pollRecursively(for: publicKey, using: dependencies)
self?.pollRecursively(for: publicKey, drainBehaviour: drainBehaviour, using: dependencies)
}
}
Threading.pollerQueue.asyncAfter(deadline: .now() + .milliseconds(Int(remainingInterval * 1000)), qos: .default, using: dependencies) {
self?.pollRecursively(for: publicKey, using: dependencies)
self?.pollRecursively(for: publicKey, drainBehaviour: drainBehaviour, using: dependencies)
}
},
receiveValue: { _ in }
@ -204,10 +146,10 @@ public class Poller {
/// for cases where we need explicit/custom behaviours to occur (eg. Onboarding)
public static func poll(
namespaces: [SnodeAPI.Namespace],
from snode: Snode,
for publicKey: String,
calledFromBackgroundPoller: Bool = false,
isBackgroundPollValid: @escaping (() -> Bool) = { true },
drainBehaviour: Atomic<SwarmDrainBehaviour>,
poller: Poller? = nil,
using dependencies: Dependencies = Dependencies()
) -> AnyPublisher<[Message], Error> {
@ -227,16 +169,26 @@ public class Poller {
)
let configHashes: [String] = SessionUtil.configHashes(for: publicKey, using: dependencies)
// Fetch the messages
return SnodeAPI
.poll(
namespaces: namespaces,
refreshingConfigHashes: configHashes,
from: snode,
associatedWith: publicKey,
using: dependencies
)
.flatMap { namespacedResults -> AnyPublisher<[Message], Error> in
/// Fetch the messages
///
/// **Note:** We need a `writePublisher` here because we want to prune the `lastMessageHash` value when preparing
/// the request
return SnodeAPI.getSwarm(for: publicKey, using: dependencies)
.tryFlatMapWithRandomSnode(drainBehaviour: drainBehaviour) { snode -> AnyPublisher<HTTP.PreparedRequest<SnodeAPI.PollResponse>, Error> in
dependencies[singleton: .storage]
.writePublisher(using: dependencies) { db -> HTTP.PreparedRequest<SnodeAPI.PollResponse> in
try SnodeAPI.preparedPoll(
db,
namespaces: namespaces,
refreshingConfigHashes: configHashes,
from: snode,
authInfo: try SnodeAPI.AuthenticationInfo(db, threadId: publicKey, using: dependencies),
using: dependencies
)
}
}
.flatMap { $0.send(using: dependencies) }
.flatMap { (_: ResponseInfoType, namespacedResults: SnodeAPI.PollResponse) -> AnyPublisher<[Message], Error> in
guard
(calledFromBackgroundPoller && isBackgroundPollValid()) ||
poller?.isPolling.wrappedValue[publicKey] == true

View File

@ -476,13 +476,7 @@ public extension SessionUtil {
)
}
static func fetchAll(_ db: Database? = nil, ids: [String]? = nil) -> [VolatileThreadInfo] {
guard let db: Database = db else {
return Storage.shared
.read { db in fetchAll(db, ids: ids) }
.defaulting(to: [])
}
static func fetchAll(_ db: Database, ids: [String]? = nil) -> [VolatileThreadInfo] {
struct FetchedInfo: FetchableRecord, Codable, Hashable {
let id: String
let variant: SessionThread.Variant

View File

@ -22,6 +22,7 @@ internal extension SessionUtil {
static func handleGroupInfoUpdate(
_ db: Database,
in config: Config?,
groupIdentityPublicKey: String,
latestConfigSentTimestampMs: Int64,
using dependencies: Dependencies
) throws {
@ -29,6 +30,93 @@ internal extension SessionUtil {
guard config.needsDump else { return }
guard case .object(let conf) = config else { throw SessionUtilError.invalidConfigObject }
// If the group is destroyed then remove everything, no other properties matter and this
// can't be reversed
guard !groups_info_is_destroyed(conf) else {
return
}
// A group must have a name so if this is null then it's invalid and can be ignored
guard let groupNamePtr: UnsafePointer<CChar> = groups_info_get_name(conf) else { return }
let groupName: String = String(cString: groupNamePtr)
let formationTimestamp: TimeInterval = TimeInterval(groups_info_get_created(conf))
let displayPic: user_profile_pic = groups_info_get_pic(conf)
let displayPictureUrl: String? = String(libSessionVal: displayPic.url, nullIfEmpty: true)
let displayPictureKey: Data? = Data(
libSessionVal: displayPic.key,
count: ProfileManager.avatarAES256KeyByteLength
)
// Update the group name
let existingGroup: ClosedGroup? = try? ClosedGroup
.filter(id: groupIdentityPublicKey)
.fetchOne(db)
let needsDisplayPictureUpdate: Bool = (
existingGroup?.displayPictureUrl != displayPictureUrl ||
existingGroup?.displayPictureEncryptionKey != displayPictureKey
)
let groupChanges: [ConfigColumnAssignment] = [
((existingGroup?.name == groupName) ? nil :
ClosedGroup.Columns.name.set(to: groupName)
),
((existingGroup?.formationTimestamp != formationTimestamp && formationTimestamp != 0) ? nil :
ClosedGroup.Columns.formationTimestamp.set(to: formationTimestamp)
),
// If we are removing the display picture do so here
(!needsDisplayPictureUpdate || displayPictureUrl != nil ? nil :
ClosedGroup.Columns.displayPictureUrl.set(to: nil)
),
(!needsDisplayPictureUpdate || displayPictureUrl != nil ? nil :
ClosedGroup.Columns.displayPictureFilename.set(to: nil)
),
(!needsDisplayPictureUpdate || displayPictureUrl != nil ? nil :
ClosedGroup.Columns.displayPictureEncryptionKey.set(to: nil)
),
(!needsDisplayPictureUpdate || displayPictureUrl != nil ? nil :
ClosedGroup.Columns.lastDisplayPictureUpdate.set(to: dependencies.dateNow)
)
].compactMap { $0 }
if !groupChanges.isEmpty {
try ClosedGroup
.filter(id: groupIdentityPublicKey)
.updateAll( // Handling a config update so don't use `updateAllAndConfig`
db,
groupChanges
)
}
if needsDisplayPictureUpdate && displayPictureUrl != nil {
}
// Update the disappearing messages configuration
let targetExpiry: Int32 = groups_info_get_expiry_timer(conf)
let targetIsEnable: Bool = (targetExpiry > 0)
let targetConfig: DisappearingMessagesConfiguration = DisappearingMessagesConfiguration(
threadId: groupIdentityPublicKey,
isEnabled: targetIsEnable,
durationSeconds: TimeInterval(targetExpiry),
type: (targetIsEnable ? .disappearAfterSend : .unknown),
lastChangeTimestampMs: latestConfigSentTimestampMs
)
let localConfig: DisappearingMessagesConfiguration = try DisappearingMessagesConfiguration
.fetchOne(db, id: groupIdentityPublicKey)
.defaulting(to: DisappearingMessagesConfiguration.defaultWith(groupIdentityPublicKey))
if
let remoteLastChangeTimestampMs = targetConfig.lastChangeTimestampMs,
let localLastChangeTimestampMs = localConfig.lastChangeTimestampMs,
remoteLastChangeTimestampMs > localLastChangeTimestampMs
{
_ = try localConfig.with(
isEnabled: targetConfig.isEnabled,
durationSeconds: targetConfig.durationSeconds,
type: targetConfig.type,
lastChangeTimestampMs: targetConfig.lastChangeTimestampMs
).save(db)
}
}
}
@ -59,7 +147,10 @@ internal extension SessionUtil {
) { config in
guard case .object(let conf) = config else { throw SessionUtilError.invalidConfigObject }
// Update the name
/// Update the name
///
/// **Note:** We indentionally only update the `GROUP_INFO` and not the `USER_GROUPS` as once the
/// group is synced between devices we want to rely on the proper group config to get display info
var updatedName: [CChar] = group.name.cArray.nullTerminated()
groups_info_set_name(conf, &updatedName)

View File

@ -15,11 +15,12 @@ internal extension SessionUtil {
static func handleGroupKeysUpdate(
_ db: Database,
in config: Config?,
groupIdentityPublicKey: String,
latestConfigSentTimestampMs: Int64,
using dependencies: Dependencies
) throws {
guard config.needsDump else { return }
guard case .groupKeys(let conf) = config else { throw SessionUtilError.invalidConfigObject }
guard case .groupKeys(let conf, _, _) = config else { throw SessionUtilError.invalidConfigObject }
}
}

View File

@ -15,6 +15,7 @@ internal extension SessionUtil {
static func handleGroupMembersUpdate(
_ db: Database,
in config: Config?,
groupIdentityPublicKey: String,
latestConfigSentTimestampMs: Int64,
using dependencies: Dependencies
) throws {

View File

@ -492,7 +492,10 @@ public extension SessionUtil {
return false
case .group:
return false
var group: ugroups_group_info = ugroups_group_info()
/// Not handling the `hidden` behaviour for legacy groups so just indicate the existence
return user_groups_get_group(conf, &group, &cThreadId)
}
}
.defaulting(to: false)

View File

@ -210,23 +210,94 @@ internal extension SessionUtil {
name: group.name,
authData: group.authData,
joinedAt: Int64(floor(group.formationTimestamp)),
approved: group.approved,
using: dependencies
)
}
@discardableResult static func addGroup(
_ db: Database,
groupIdentityPublicKey: [UInt8],
@discardableResult static func createGroupState(
groupId: String,
userED25519KeyPair: KeyPair,
groupIdentityPrivateKey: Data?,
name: String,
tag: Data?,
subkey: Data?,
joinedAt: Int64,
approved: Bool,
authData: Data?,
using dependencies: Dependencies
) throws -> (group: ClosedGroup, members: [GroupMember]) {
// TODO: This!!!
preconditionFailure()
) throws -> [ConfigDump.Variant: Config] {
var secretKey: [UInt8] = userED25519KeyPair.secretKey
var groupIdentityPublicKey: [UInt8] = Array(Data(hex: groupId.removingIdPrefixIfNeeded()))
var groupIdentityPrivateKey: [UInt8] = Array(groupIdentityPrivateKey!)
// Create the new config objects
var groupKeysConf: UnsafeMutablePointer<config_group_keys>? = nil
var groupInfoConf: UnsafeMutablePointer<config_object>? = nil
var groupMembersConf: UnsafeMutablePointer<config_object>? = nil
var error: [CChar] = [CChar](repeating: 0, count: 256)
try groups_info_init(
&groupInfoConf,
&groupIdentityPublicKey,
&groupIdentityPrivateKey,
nil,
0,
&error
).orThrow(error: error)
try groups_members_init(
&groupMembersConf,
&groupIdentityPublicKey,
&groupIdentityPrivateKey,
nil,
0,
&error
).orThrow(error: error)
try groups_keys_init(
&groupKeysConf,
&secretKey,
&groupIdentityPublicKey,
&groupIdentityPrivateKey,
groupInfoConf,
groupMembersConf,
nil,
0,
&error
).orThrow(error: error)
guard
let keysConf: UnsafeMutablePointer<config_group_keys> = groupKeysConf,
let infoConf: UnsafeMutablePointer<config_object> = groupInfoConf,
let membersConf: UnsafeMutablePointer<config_object> = groupMembersConf
else {
SNLog("[SessionUtil Error] Group config objects were null")
throw SessionUtilError.unableToCreateConfigObject
}
// Define the config state map and load it into memory
let groupState: [ConfigDump.Variant: Config] = [
.groupKeys: .groupKeys(keysConf, info: infoConf, members: membersConf),
.groupInfo: .object(infoConf),
.groupMembers: .object(membersConf),
]
dependencies.mutate(cache: .sessionUtil) { cache in
groupState.forEach { variant, config in
cache.setConfig(for: variant, publicKey: groupId, to: config)
}
}
return groupState
}
static func encrypt(
message: Data,
groupIdentityPublicKey: String,
using dependencies: Dependencies
) throws -> Data {
return message
}
static func decrypt(
ciphertext: Data,
groupIdentityPublicKey: String,
using dependencies: Dependencies
) throws -> Data {
return ciphertext
}
}

View File

@ -424,7 +424,6 @@ internal extension SessionUtil {
try groups.forEach { group in
guard
let name: String = group.name,
let joinedAt: Int64 = group.joinedAt
else { return }
@ -434,20 +433,20 @@ internal extension SessionUtil {
db,
groupIdentityPublicKey: group.groupIdentityPublicKey,
groupIdentityPrivateKey: group.groupIdentityPrivateKey,
name: name,
name: group.name,
authData: group.authData,
created: Int64((group.joinedAt ?? (latestConfigSentTimestampMs / 1000))),
approved: true,// TODO: What to do here???? <#T##Bool#>,
approved: (group.approved == true),
calledFromConfigHandling: true,
using: dependencies
)
}
else {
// Otherwise update the existing group
/// Otherwise update the existing group
///
/// **Note:** We ignore the `name` value here as if it's an existing group then assume we will get the
/// proper name by polling for the `GROUP_INFO` instead of via syncing the `USER_GROUPS` data
let groupChanges: [ConfigColumnAssignment] = [
(existingGroups[group.groupIdentityPublicKey]?.name == name ? nil :
ClosedGroup.Columns.name.set(to: name)
),
(existingGroups[group.groupIdentityPublicKey]?.formationTimestamp == TimeInterval(joinedAt) ? nil :
ClosedGroup.Columns.formationTimestamp.set(to: TimeInterval(joinedAt))
),
@ -456,6 +455,9 @@ internal extension SessionUtil {
),
(existingGroups[group.groupIdentityPublicKey]?.groupIdentityPrivateKey == group.groupIdentityPrivateKey ? nil :
ClosedGroup.Columns.groupIdentityPrivateKey.set(to: group.groupIdentityPrivateKey)
),
(existingGroups[group.groupIdentityPublicKey]?.approved == group.approved ? nil :
ClosedGroup.Columns.approved.set(to: (group.approved ?? false))
)
].compactMap { $0 }
@ -481,9 +483,9 @@ internal extension SessionUtil {
}
}
// Remove any legacy groups which are no longer in the config
// Remove any groups which are no longer in the config
let groupIdsToRemove: Set<String> = existingGroupIds
.subtracting(legacyGroups.map { $0.id })
.subtracting(groups.map { $0.groupIdentityPublicKey })
if !groupIdsToRemove.isEmpty {
SessionUtil.kickFromConversationUIIfNeeded(removedThreadIds: Array(groupIdsToRemove))
@ -496,6 +498,14 @@ internal extension SessionUtil {
groupLeaveType: .forced,
calledFromConfigHandling: true
)
groupIdsToRemove.forEach { groupId in
SessionUtil.removeGroupStateIfNeeded(
db,
groupIdentityPublicKey: groupId,
using: dependencies
)
}
}
}
@ -946,6 +956,7 @@ public extension SessionUtil {
name: String?,
authData: Data?,
joinedAt: Int64,
approved: Bool,
using dependencies: Dependencies
) throws {
try SessionUtil.performAndPushChange(
@ -961,7 +972,8 @@ public extension SessionUtil {
groupIdentityPrivateKey: groupIdentityPrivateKey,
name: name,
authData: authData,
joinedAt: joinedAt
joinedAt: joinedAt,
approved: approved
)
],
in: config
@ -975,6 +987,7 @@ public extension SessionUtil {
groupIdentityPrivateKey: Data? = nil,
name: String? = nil,
authData: Data? = nil,
approved: Bool? = nil,
using dependencies: Dependencies
) throws {
try SessionUtil.performAndPushChange(
@ -989,7 +1002,8 @@ public extension SessionUtil {
groupIdentityPublicKey: groupIdentityPublicKey,
groupIdentityPrivateKey: groupIdentityPrivateKey,
name: name,
authData: authData
authData: authData,
approved: approved
)
],
in: config
@ -1160,6 +1174,7 @@ extension SessionUtil {
let authData: Data?
let priority: Int32?
let joinedAt: Int64?
let approved: Bool?
init(
groupIdentityPublicKey: String,
@ -1167,7 +1182,8 @@ extension SessionUtil {
name: String? = nil,
authData: Data? = nil,
priority: Int32? = nil,
joinedAt: Int64? = nil
joinedAt: Int64? = nil,
approved: Bool? = nil
) {
self.groupIdentityPublicKey = groupIdentityPublicKey
self.groupIdentityPrivateKey = groupIdentityPrivateKey
@ -1175,6 +1191,7 @@ extension SessionUtil {
self.authData = authData
self.priority = priority
self.joinedAt = joinedAt
self.approved = approved
}
}
}

View File

@ -9,6 +9,8 @@ import SessionUtilitiesKit
// MARK: - SessionUtil
public enum SessionUtil {
internal static let logLevel: config_log_level = LOG_LEVEL_INFO
public struct ConfResult {
let needsPush: Bool
let needsDump: Bool
@ -74,14 +76,16 @@ public enum SessionUtil {
cache.setConfig(
for: dump.variant,
publicKey: dump.publicKey,
to: try? SessionUtil.loadState(
for: dump.variant,
publicKey: dump.publicKey,
userEd25519SecretKey: ed25519SecretKey,
groupEd25519SecretKey: groupsByKey[dump.publicKey].map { Array($0) },
cachedData: dump.data,
cache: cache
)
to: try? SessionUtil
.loadState(
for: dump.variant,
publicKey: dump.publicKey,
userEd25519SecretKey: ed25519SecretKey,
groupEd25519SecretKey: groupsByKey[dump.publicKey].map { Array($0) },
cachedData: dump.data,
cache: cache
)
.addingLogger()
)
}
@ -89,14 +93,16 @@ public enum SessionUtil {
cache.setConfig(
for: variant,
publicKey: currentUserPublicKey,
to: try? SessionUtil.loadState(
for: variant,
publicKey: currentUserPublicKey,
userEd25519SecretKey: ed25519SecretKey,
groupEd25519SecretKey: nil,
cachedData: nil,
cache: cache
)
to: try? SessionUtil
.loadState(
for: variant,
publicKey: currentUserPublicKey,
userEd25519SecretKey: ed25519SecretKey,
groupEd25519SecretKey: nil,
cachedData: nil,
cache: cache
)
.addingLogger()
)
}
}
@ -113,7 +119,7 @@ public enum SessionUtil {
// Setup initial variables (including getting the memory address for any cached data)
var conf: UnsafeMutablePointer<config_object>? = nil
var keysConf: UnsafeMutablePointer<config_group_keys>? = nil
var secretKey: [UInt8]? = userEd25519SecretKey
var secretKey: [UInt8] = userEd25519SecretKey
var error: [CChar] = [CChar](repeating: 0, count: 256)
let cachedDump: (data: UnsafePointer<UInt8>, length: Int)? = cachedData?.withUnsafeBytes { unsafeBytes in
return unsafeBytes.baseAddress.map {
@ -123,132 +129,116 @@ public enum SessionUtil {
)
}
}
let userConfigInitCalls: [ConfigDump.Variant: UserConfigInitialiser] = [
.userProfile: user_profile_init,
.contacts: contacts_init,
.convoInfoVolatile: convo_info_volatile_init,
.userGroups: user_groups_init
]
let groupConfigInitCalls: [ConfigDump.Variant: GroupConfigInitialiser] = [
.groupInfo: groups_info_init,
.groupMembers: groups_members_init
]
// Try to create the object
return try {
switch variant {
case .userProfile:
return try user_profile_init(
&conf,
&secretKey,
cachedDump?.data,
(cachedDump?.length ?? 0),
&error
)
.returning(
Config.from(conf),
orThrow: "Unable to create \(variant.rawValue) config object",
error: error
)
case .contacts:
return try contacts_init(
&conf,
&secretKey,
cachedDump?.data,
(cachedDump?.length ?? 0),
&error
)
.returning(
Config.from(conf),
orThrow: "Unable to create \(variant.rawValue) config object",
error: error
)
case .convoInfoVolatile:
return try convo_info_volatile_init(
&conf,
&secretKey,
cachedDump?.data,
(cachedDump?.length ?? 0),
&error
)
.returning(
Config.from(conf),
orThrow: "Unable to create \(variant.rawValue) config object",
error: error
)
case .userGroups:
return try user_groups_init(
&conf,
&secretKey,
cachedDump?.data,
(cachedDump?.length ?? 0),
&error
)
.returning(
Config.from(conf),
orThrow: "Unable to create \(variant.rawValue) config object",
error: error
)
case .groupInfo:
return try groups_info_init(
&conf,
&secretKey,
&secretKey,
cachedDump?.data,
(cachedDump?.length ?? 0),
&error
)
.returning(
Config.from(conf),
orThrow: "Unable to create \(variant.rawValue) config object",
error: error
)
case .groupMembers:
return try groups_members_init(
&conf,
&secretKey,
&secretKey,
cachedDump?.data,
(cachedDump?.length ?? 0),
&error
)
.returning(
Config.from(conf),
orThrow: "Unable to create \(variant.rawValue) config object",
error: error
)
case .groupKeys:
var identityPublicKey: [UInt8] = Array(Data(hex: publicKey))
var adminSecretKey: [UInt8]? = groupEd25519SecretKey
let infoConfig: Config? = cache
.config(for: .groupInfo, publicKey: publicKey)
.wrappedValue
let membersConfig: Config? = cache
.config(for: .groupMembers, publicKey: publicKey)
.wrappedValue
guard
case .object(let infoConf) = infoConfig,
case .object(let membersConf) = membersConfig
else {
SNLog("[SessionUtil Error] Unable to create \(variant.rawValue) config object: Group info and member config states not loaded")
throw SessionUtilError.unableToCreateConfigObject
}
return try groups_keys_init(
&keysConf,
&secretKey,
&identityPublicKey,
&adminSecretKey,
infoConf,
membersConf,
cachedDump?.data,
(cachedDump?.length ?? 0),
&error
)
.returning(
Config.from(keysConf, info: infoConf, members: membersConf),
orThrow: "Unable to create \(variant.rawValue) config object",
error: error
)
}
}()
switch (variant, groupEd25519SecretKey) {
case (.userProfile, _), (.contacts, _), (.convoInfoVolatile, _), (.userGroups, _):
return try (userConfigInitCalls[variant]?(
&conf,
&secretKey,
cachedDump?.data,
(cachedDump?.length ?? 0),
&error
))
.toConfig(conf, variant: variant, error: error)
case (.groupInfo, .some(var adminSecretKey)), (.groupMembers, .some(var adminSecretKey)):
var identityPublicKey: [UInt8] = Array(Data(hex: publicKey.removingIdPrefixIfNeeded()))
return try (groupConfigInitCalls[variant]?(
&conf,
&identityPublicKey,
&adminSecretKey,
cachedDump?.data,
(cachedDump?.length ?? 0),
&error
))
.toConfig(conf, variant: variant, error: error)
case (.groupKeys, .some(var adminSecretKey)):
var identityPublicKey: [UInt8] = Array(Data(hex: publicKey.removingIdPrefixIfNeeded()))
let infoConfig: Config? = cache.config(for: .groupInfo, publicKey: publicKey)
.wrappedValue
let membersConfig: Config? = cache
.config(for: .groupMembers, publicKey: publicKey)
.wrappedValue
guard
case .object(let infoConf) = infoConfig,
case .object(let membersConf) = membersConfig
else {
SNLog("[SessionUtil Error] Unable to create \(variant.rawValue) config object: Group info and member config states not loaded")
throw SessionUtilError.unableToCreateConfigObject
}
return try groups_keys_init(
&keysConf,
&secretKey,
&identityPublicKey,
&adminSecretKey,
infoConf,
membersConf,
cachedDump?.data,
(cachedDump?.length ?? 0),
&error
)
.toConfig(keysConf, info: infoConf, members: membersConf, variant: variant, error: error)
// It looks like C doesn't deal will passing pointers to null variables well so we need
// to explicitly pass 'nil' for the admin key in this case
case (.groupInfo, .none), (.groupMembers, .none):
var identityPublicKey: [UInt8] = Array(Data(hex: publicKey.removingIdPrefixIfNeeded()))
return try (groupConfigInitCalls[variant]?(
&conf,
&identityPublicKey,
nil,
cachedDump?.data,
(cachedDump?.length ?? 0),
&error
))
.toConfig(conf, variant: variant, error: error)
// It looks like C doesn't deal will passing pointers to null variables well so we need
// to explicitly pass 'nil' for the admin key in this case
case (.groupKeys, .none):
var identityPublicKey: [UInt8] = Array(Data(hex: publicKey.removingIdPrefixIfNeeded()))
let infoConfig: Config? = cache.config(for: .groupInfo, publicKey: publicKey)
.wrappedValue
let membersConfig: Config? = cache
.config(for: .groupMembers, publicKey: publicKey)
.wrappedValue
guard
case .object(let infoConf) = infoConfig,
case .object(let membersConf) = membersConfig
else {
SNLog("[SessionUtil Error] Unable to create \(variant.rawValue) config object: Group info and member config states not loaded")
throw SessionUtilError.unableToCreateConfigObject
}
return try groups_keys_init(
&keysConf,
&secretKey,
&identityPublicKey,
nil,
infoConf,
membersConf,
cachedDump?.data,
(cachedDump?.length ?? 0),
&error
)
.toConfig(keysConf, info: infoConf, members: membersConf, variant: variant, error: error)
}
}
internal static func createDump(
@ -292,6 +282,7 @@ public enum SessionUtil {
// Extract any pending changes from the cached config entry for each variant
return try targetVariants
.sorted { (lhs: ConfigDump.Variant, rhs: ConfigDump.Variant) in lhs.sendOrder < rhs.sendOrder }
.compactMap { variant -> OutgoingConfResult? in
try dependencies[cache: .sessionUtil]
.config(for: variant, publicKey: publicKey)
@ -301,34 +292,7 @@ public enum SessionUtil {
guard config.needsPush else { return nil }
var result: (data: Data, seqNo: Int64, obsoleteHashes: [String])!
let configCountInfo: String = {
var result: String = "Invalid"
try? CExceptionHelper.performSafely {
switch (config, variant) {
case (_, .userProfile): result = "1 profile"
case (.object(let conf), .contacts):
result = "\(contacts_size(conf)) contacts"
case (.object(let conf), .userGroups):
result = "\(user_groups_size(conf)) group conversations"
case (.object(let conf), .convoInfoVolatile):
result = "\(convo_info_volatile_size(conf)) volatile conversations"
case (_, .groupInfo): result = "1 group info"
case (.object(let conf), .groupMembers):
result = "\(groups_members_size(conf)) group members"
case (.groupKeys(let conf, _, _), .groupKeys):
result = "\(groups_keys_size(conf)) group keys"
default: break
}
}
return result
}()
let configCountInfo: String = config.count(for: variant)
do { result = try config.push() }
catch {
@ -464,6 +428,7 @@ public enum SessionUtil {
try SessionUtil.handleGroupInfoUpdate(
db,
in: config,
groupIdentityPublicKey: publicKey,
latestConfigSentTimestampMs: latestConfigSentTimestampMs,
using: dependencies
)
@ -472,6 +437,7 @@ public enum SessionUtil {
try SessionUtil.handleGroupMembersUpdate(
db,
in: config,
groupIdentityPublicKey: publicKey,
latestConfigSentTimestampMs: latestConfigSentTimestampMs,
using: dependencies
)
@ -480,6 +446,7 @@ public enum SessionUtil {
try SessionUtil.handleGroupKeysUpdate(
db,
in: config,
groupIdentityPublicKey: publicKey,
latestConfigSentTimestampMs: latestConfigSentTimestampMs,
using: dependencies
)
@ -569,14 +536,44 @@ public extension SessionUtil {
// MARK: - Convenience
private extension Int32 {
func returning(_ config: SessionUtil.Config?, orThrow description: String, error: [CChar]) throws -> SessionUtil.Config {
guard self == 0, let config: SessionUtil.Config = config else {
SNLog("[SessionUtil Error] \(description): \(String(cString: error))")
private extension Optional where Wrapped == Int32 {
func toConfig(
_ maybeConf: UnsafeMutablePointer<config_object>?,
variant: ConfigDump.Variant,
error: [CChar]
) throws -> SessionUtil.Config {
guard self == 0, let conf: UnsafeMutablePointer<config_object> = maybeConf else {
SNLog("[SessionUtil Error] Unable to create \(variant.rawValue) config object: \(String(cString: error))")
throw SessionUtilError.unableToCreateConfigObject
}
return config
switch variant {
case .userProfile, .contacts, .convoInfoVolatile,
.userGroups, .groupInfo, .groupMembers:
return .object(conf)
case .groupKeys: throw SessionUtilError.unableToCreateConfigObject
}
}
}
private extension Int32 {
func toConfig(
_ maybeConf: UnsafeMutablePointer<config_group_keys>?,
info: UnsafeMutablePointer<config_object>,
members: UnsafeMutablePointer<config_object>,
variant: ConfigDump.Variant,
error: [CChar]
) throws -> SessionUtil.Config {
guard self == 0, let conf: UnsafeMutablePointer<config_group_keys> = maybeConf else {
SNLog("[SessionUtil Error] Unable to create \(variant.rawValue) config object: \(String(cString: error))")
throw SessionUtilError.unableToCreateConfigObject
}
switch variant {
case .groupKeys: return .groupKeys(conf, info: info, members: members)
default: throw SessionUtilError.unableToCreateConfigObject
}
}
}

View File

@ -5,6 +5,23 @@ import SessionUtil
import SessionUtilitiesKit
public extension SessionUtil {
typealias UserConfigInitialiser = (
UnsafeMutablePointer<UnsafeMutablePointer<config_object>?>?, // conf
UnsafePointer<UInt8>?, // ed25519_secretkey
UnsafePointer<UInt8>?, // dump
Int, // dumplen
UnsafeMutablePointer<CChar>? // error
) -> Int32
typealias GroupConfigInitialiser = (
UnsafeMutablePointer<UnsafeMutablePointer<config_object>?>?, // conf
UnsafePointer<UInt8>?, // ed25519_pubkey
UnsafePointer<UInt8>?, // ed25519_secretkey
UnsafePointer<UInt8>?, // dump
Int, // dumplen
UnsafeMutablePointer<CChar>? // error
) -> Int32
typealias ConfigSizeInfo = (UnsafePointer<config_object>?) -> Int
enum Config {
case object(UnsafeMutablePointer<config_object>)
case groupKeys(
@ -29,7 +46,7 @@ public extension SessionUtil {
var needsDump: Bool {
switch self {
case .object(let conf): return config_needs_push(conf)
case .object(let conf): return config_needs_dump(conf)
case .groupKeys(let conf, _, _): return groups_keys_needs_dump(conf)
}
}
@ -43,16 +60,27 @@ public extension SessionUtil {
// MARK: - Functions
static func from(_ conf: UnsafeMutablePointer<config_object>?) -> Config? {
return conf.map { .object($0) }
}
static func from(
_ conf: UnsafeMutablePointer<config_group_keys>?,
info: UnsafeMutablePointer<config_object>,
members: UnsafeMutablePointer<config_object>
) -> Config? {
return conf.map { .groupKeys($0, info: info, members: members) }
func addingLogger() -> Config {
switch self {
case .object(let conf):
config_set_logger(
conf,
{ logLevel, messagePtr, _ in
guard
logLevel.rawValue >= SessionUtil.logLevel.rawValue,
let messagePtr = messagePtr
else { return }
let message: String = String(cString: messagePtr)
print("[SessionUtil] \(message)")
},
nil
)
default: break
}
return self
}
func push() throws -> (data: Data, seqNo: Int64, obsoleteHashes: [String]) {
@ -106,16 +134,11 @@ public extension SessionUtil {
var dumpResult: UnsafeMutablePointer<UInt8>? = nil
var dumpResultLen: Int = 0
switch self {
case .object(let conf):
try CExceptionHelper.performSafely {
config_dump(conf, &dumpResult, &dumpResultLen)
}
case .groupKeys(let conf, _, _):
try CExceptionHelper.performSafely {
groups_keys_dump(conf, &dumpResult, &dumpResultLen)
}
try CExceptionHelper.performSafely {
switch self {
case .object(let conf): config_dump(conf, &dumpResult, &dumpResultLen)
case .groupKeys(let conf, _, _): groups_keys_dump(conf, &dumpResult, &dumpResultLen)
}
}
guard let dumpResult: UnsafeMutablePointer<UInt8> = dumpResult else { return nil }
@ -142,7 +165,19 @@ public extension SessionUtil {
return result
case .groupKeys(var conf): return []
case .groupKeys(let conf, _, _):
guard let hashList: UnsafeMutablePointer<config_string_list> = groups_keys_current_hashes(conf) else {
return []
}
let result: [String] = [String](
pointer: hashList.pointee.value,
count: hashList.pointee.len,
defaultValue: []
)
hashList.deallocate()
return result
}
}
@ -172,9 +207,11 @@ public extension SessionUtil {
return messages
.map { message -> Bool in
var data: [UInt8] = Array(message.data)
var messageHash: [CChar] = (message.serverHash ?? "").cArray.nullTerminated()
return groups_keys_load_message(
conf,
&messageHash,
&data,
data.count,
Int64(message.sentTimestamp ?? 0),
@ -186,6 +223,27 @@ public extension SessionUtil {
.count
}
}
func count(for variant: ConfigDump.Variant) -> String {
var result: String? = nil
let funcMap: [ConfigDump.Variant: (info: String, size: ConfigSizeInfo)] = [
.userProfile: ("profile", { _ in 1 }),
.contacts: ("contacts", contacts_size),
.userGroups: ("group conversations", user_groups_size),
.convoInfoVolatile: ("volatile conversations", convo_info_volatile_size),
.groupInfo: ("group info", { _ in 1 }),
.groupMembers: ("group members", groups_members_size)
]
try? CExceptionHelper.performSafely {
switch self {
case .object(let conf): result = funcMap[variant].map { "\($0.size(conf)) \($0.info)" }
case .groupKeys(let conf, _, _): result = "\(groups_keys_size(conf)) group keys"
}
}
return (result ?? "Invalid")
}
}
}

View File

@ -941,7 +941,7 @@ public extension SessionThreadViewModel {
/// the `disappearingMessageSConfiguration` entry below otherwise the query will fail to parse and might throw
///
/// Explicitly set default values for the fields ignored for search results
let numColumnsBeforeProfiles: Int = 17
let numColumnsBeforeProfiles: Int = 16
let request: SQLRequest<ViewModel> = """
SELECT
\(thread[.rowId]) AS \(ViewModel.Columns.rowId),
@ -972,9 +972,8 @@ public extension SessionThreadViewModel {
\(aggregateInteraction[.threadHasUnreadMessagesOfAnyKind]),
\(disappearingMessagesConfiguration.allColumns),
\(contact[.lastKnownClientVersion]) AS \(ViewModel.Columns.contactLastKnownClientVersion),
\(contactProfile.allColumns),
\(contact[.lastKnownClientVersion]) AS \(ViewModel.Columns.contactLastKnownClientVersion),
\(closedGroup[.name]) AS \(ViewModel.Columns.closedGroupName),
\(closedGroupUserCount[.closedGroupUserCount]),

View File

@ -0,0 +1,45 @@
// Copyright © 2023 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import GRDB
import SessionSnodeKit
import SessionUtilitiesKit
public extension SnodeAPI.AuthenticationInfo {
init(
_ db: Database,
threadId: String,
using dependencies: Dependencies
) throws {
switch SessionId.Prefix(from: threadId) {
case .standard:
guard let keyPair: KeyPair = Identity.fetchUserEd25519KeyPair(db, using: dependencies) else {
throw SnodeAPIError.noKeyPair
}
self = .standard(pubkey: threadId, ed25519KeyPair: keyPair)
case .group:
struct GroupAuthData: Codable, FetchableRecord {
let groupIdentityPrivateKey: Data?
let authData: Data?
}
let authData: GroupAuthData? = try? ClosedGroup
.filter(id: threadId)
.select(.authData, .groupIdentityPrivateKey)
.asRequest(of: GroupAuthData.self)
.fetchOne(db)
switch (authData?.groupIdentityPrivateKey, authData?.authData) {
case (.some(let privateKey), _):
self = .groupAdmin(pubkey: threadId, ed25519SecretKey: Array(privateKey))
case (_, .some(let authData)): self = .groupMember(pubkey: threadId, authData: authData)
default: throw SnodeAPIError.invalidAuthentication
}
default: throw SnodeAPIError.invalidAuthentication
}
}
}

View File

@ -66,20 +66,6 @@ public extension Crypto.Action {
Sodium().sign.toX25519(ed25519SecretKey: ed25519SecretKey)
}
}
static func signature(message: Bytes, secretKey: Bytes) -> Crypto.Action {
return Crypto.Action(id: "signature", args: [message, secretKey]) {
Sodium().sign.signature(message: message, secretKey: secretKey)
}
}
}
public extension Crypto.Verification {
static func signature(message: Bytes, publicKey: Bytes, signature: Bytes) -> Crypto.Verification {
return Crypto.Verification(id: "signature", args: [message, publicKey, signature]) {
Sodium().sign.verify(message: message, publicKey: publicKey, signature: signature)
}
}
}
// MARK: - Box

View File

@ -21,7 +21,7 @@ class SessionUtilSpec: QuickSpec {
var mockCrypto: MockCrypto!
var mockSessionUtilCache: MockSessionUtilCache!
var createGroupOutput: (identityKeyPair: KeyPair, group: ClosedGroup, members: [GroupMember])!
var createGroupOutput: SessionUtil.CreatedGroupInfo!
var userGroupsConfig: SessionUtil.Config!
describe("SessionUtil") {

View File

@ -39,9 +39,7 @@ class SOGSMessageSpec: QuickSpec {
}
"""
messageData = messageJson.data(using: .utf8)!
decoder = JSONDecoder()
decoder.userInfo = [ Dependencies.userInfoKey: dependencies as Any ]
decoder = JSONDecoder(using: dependencies)
}
afterEach {

View File

@ -261,15 +261,11 @@ final class ThreadPickerVC: UIViewController, UITableViewDataSource, UITableView
.map { _ in () }
.eraseToAnyPublisher()
}
.flatMap { _ in
dependencies[singleton: .storage].writePublisher { db -> MessageSender.PreparedSendData in
guard
let threadVariant: SessionThread.Variant = try SessionThread
.filter(id: threadId)
.select(.variant)
.asRequest(of: SessionThread.Variant.self)
.fetchOne(db)
else { throw MessageSenderError.noThread }
.flatMap { _ -> AnyPublisher<(Interaction, [HTTP.PreparedRequest<String>]), Error> in
dependencies[singleton: .storage].writePublisher { db -> (Interaction, [HTTP.PreparedRequest<String>]) in
guard (try? SessionThread.exists(db, id: threadId)) == true else {
throw MessageSenderError.noThread
}
// Create the interaction
let interaction: Interaction = try Interaction(
@ -307,25 +303,53 @@ final class ThreadPickerVC: UIViewController, UITableViewDataSource, UITableView
}
// Prepare any attachments
try Attachment.process(
db,
data: Attachment.prepare(attachments: finalAttachments),
for: interactionId
)
let preparedAttachments: [Attachment] = Attachment.prepare(attachments: finalAttachments)
try Attachment.process(db, attachments: preparedAttachments, for: interactionId)
return (
interaction,
try preparedAttachments.map {
try $0.preparedUpload(db, threadId: threadId, using: dependencies)
}
)
}
}
.flatMap { (interaction: Interaction, preparedUploads: [HTTP.PreparedRequest<String>]) -> AnyPublisher<(Interaction, [String]), Error> in
guard !preparedUploads.isEmpty else {
return Just((interaction, []))
.setFailureType(to: Error.self)
.eraseToAnyPublisher()
}
return Publishers
.MergeMany(preparedUploads.map { $0.send(using: dependencies) })
.collect()
.map { results in (interaction, results.map { _, id in id }) }
.eraseToAnyPublisher()
}
.flatMap { interaction, fileIds in
// Prepare the message send data
dependencies[singleton: .storage].writePublisher { db -> HTTP.PreparedRequest<Void> in
guard
let threadVariant: SessionThread.Variant = try SessionThread
.filter(id: interaction.threadId)
.select(.variant)
.asRequest(of: SessionThread.Variant.self)
.fetchOne(db)
else { throw MessageSenderError.noThread }
// Prepare the message send data
return try MessageSender
.preparedSendData(
.preparedSend(
db,
interaction: interaction,
fileIds: fileIds,
threadId: threadId,
threadVariant: threadVariant,
using: dependencies
)
}
}
.flatMap { MessageSender.performUploadsIfNeeded(preparedSendData: $0, using: dependencies) }
.flatMap { MessageSender.sendImmediate(data: $0, using: dependencies) }
.flatMap { $0.send(using: dependencies) }
.receive(on: DispatchQueue.main)
.sinkUntilComplete(
receiveCompletion: { [weak self] result in

View File

@ -76,42 +76,35 @@ public extension SnodeReceivedMessageInfo {
// MARK: - GRDB Interactions
public extension SnodeReceivedMessageInfo {
/// Delete any expired SnodeReceivedMessageInfo values associated to a specific node
static func pruneExpiredMessageHashInfo(
_ db: Database,
for snode: Snode,
namespace: SnodeAPI.Namespace,
associatedWith publicKey: String,
using dependencies: Dependencies
) {
// Delete any expired SnodeReceivedMessageInfo values associated to a specific node (even
// though this runs very quickly we fetch the rowIds we want to delete from a 'read' call
// to avoid blocking the write queue since this method is called very frequently)
let rowIds: [Int64] = dependencies[singleton: .storage]
.read { db in
// Only prune the hashes if new hashes exist for this Snode (if they don't then
// we don't want to clear out the legacy hashes)
let hasNonLegacyHash: Bool = SnodeReceivedMessageInfo
.filter(SnodeReceivedMessageInfo.Columns.key == key(for: snode, publicKey: publicKey, namespace: namespace))
.isNotEmpty(db)
guard hasNonLegacyHash else { return [] }
return try SnodeReceivedMessageInfo
.select(Column.rowID)
.filter(SnodeReceivedMessageInfo.Columns.key == key(for: snode, publicKey: publicKey, namespace: namespace))
.filter(SnodeReceivedMessageInfo.Columns.expirationDateMs <= SnodeAPI.currentOffsetTimestampMs(using: dependencies))
.asRequest(of: Int64.self)
.fetchAll(db)
}
.defaulting(to: [])
) throws {
// Only prune the hashes if new hashes exist for this Snode (if they don't then
// we don't want to clear out the legacy hashes)
let hasNonLegacyHash: Bool = SnodeReceivedMessageInfo
.filter(SnodeReceivedMessageInfo.Columns.key == key(for: snode, publicKey: publicKey, namespace: namespace))
.isNotEmpty(db)
guard hasNonLegacyHash else { return }
let rowIds: [Int64] = try SnodeReceivedMessageInfo
.select(Column.rowID)
.filter(SnodeReceivedMessageInfo.Columns.key == key(for: snode, publicKey: publicKey, namespace: namespace))
.filter(SnodeReceivedMessageInfo.Columns.expirationDateMs <= SnodeAPI.currentOffsetTimestampMs(using: dependencies))
.asRequest(of: Int64.self)
.fetchAll(db)
// If there are no rowIds to delete then do nothing
guard !rowIds.isEmpty else { return }
dependencies[singleton: .storage].write { db in
try SnodeReceivedMessageInfo
.filter(rowIds.contains(Column.rowID))
.deleteAll(db)
}
try SnodeReceivedMessageInfo
.filter(rowIds.contains(Column.rowID))
.deleteAll(db)
}
/// This method fetches the last non-expired hash from the database for message retrieval
@ -120,35 +113,34 @@ public extension SnodeReceivedMessageInfo {
/// very common for this method to be called after the hash value has been updated but before the various `read` threads
/// have been updated, resulting in a pointless fetch for data the app has already received
static func fetchLastNotExpired(
_ db: Database,
for snode: Snode,
namespace: SnodeAPI.Namespace,
associatedWith publicKey: String,
using dependencies: Dependencies
) -> SnodeReceivedMessageInfo? {
return dependencies[singleton: .storage].read { db in
let nonLegacyHash: SnodeReceivedMessageInfo? = try SnodeReceivedMessageInfo
.filter(
SnodeReceivedMessageInfo.Columns.wasDeletedOrInvalid == nil ||
SnodeReceivedMessageInfo.Columns.wasDeletedOrInvalid == false
)
.filter(SnodeReceivedMessageInfo.Columns.key == key(for: snode, publicKey: publicKey, namespace: namespace))
.filter(SnodeReceivedMessageInfo.Columns.expirationDateMs > SnodeAPI.currentOffsetTimestampMs())
.order(SnodeReceivedMessageInfo.Columns.id.desc)
.fetchOne(db)
// If we have a non-legacy hash then return it immediately (legacy hashes had a different
// 'key' structure)
if nonLegacyHash != nil { return nonLegacyHash }
return try SnodeReceivedMessageInfo
.filter(
SnodeReceivedMessageInfo.Columns.wasDeletedOrInvalid == nil ||
SnodeReceivedMessageInfo.Columns.wasDeletedOrInvalid == false
)
.filter(SnodeReceivedMessageInfo.Columns.key == publicKey)
.order(SnodeReceivedMessageInfo.Columns.id.desc)
.fetchOne(db)
}
) throws -> SnodeReceivedMessageInfo? {
let nonLegacyHash: SnodeReceivedMessageInfo? = try SnodeReceivedMessageInfo
.filter(
SnodeReceivedMessageInfo.Columns.wasDeletedOrInvalid == nil ||
SnodeReceivedMessageInfo.Columns.wasDeletedOrInvalid == false
)
.filter(SnodeReceivedMessageInfo.Columns.key == key(for: snode, publicKey: publicKey, namespace: namespace))
.filter(SnodeReceivedMessageInfo.Columns.expirationDateMs > SnodeAPI.currentOffsetTimestampMs())
.order(SnodeReceivedMessageInfo.Columns.id.desc)
.fetchOne(db)
// If we have a non-legacy hash then return it immediately (legacy hashes had a different
// 'key' structure)
if nonLegacyHash != nil { return nonLegacyHash }
return try SnodeReceivedMessageInfo
.filter(
SnodeReceivedMessageInfo.Columns.wasDeletedOrInvalid == nil ||
SnodeReceivedMessageInfo.Columns.wasDeletedOrInvalid == false
)
.filter(SnodeReceivedMessageInfo.Columns.key == publicKey)
.order(SnodeReceivedMessageInfo.Columns.id.desc)
.fetchOne(db)
}
/// There are some cases where the latest message can be removed from a swarm, if we then try to poll for that message the swarm

View File

@ -1,6 +1,7 @@
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import SessionUtilitiesKit
extension SnodeAPI {
public class DeleteAllBeforeRequest: SnodeAuthenticatedRequestBody {
@ -17,18 +18,14 @@ extension SnodeAPI {
public init(
beforeMs: UInt64,
namespace: SnodeAPI.Namespace?,
pubkey: String,
timestampMs: UInt64,
ed25519PublicKey: [UInt8],
ed25519SecretKey: [UInt8]
authInfo: AuthenticationInfo,
timestampMs: UInt64
) {
self.beforeMs = beforeMs
self.namespace = namespace
super.init(
pubkey: pubkey,
ed25519PublicKey: ed25519PublicKey,
ed25519SecretKey: ed25519SecretKey,
authInfo: authInfo,
timestampMs: timestampMs
)
}
@ -52,12 +49,12 @@ extension SnodeAPI {
// MARK: - Abstract Methods
override func generateSignature() throws -> [UInt8] {
override func generateSignature(using dependencies: Dependencies) throws -> [UInt8] {
/// Ed25519 signature of `("delete_before" || namespace || before)`, signed by
/// `pubkey`. Must be base64 encoded (json) or bytes (OMQ). `namespace` is the stringified
/// version of the given non-default namespace parameter (i.e. "-42" or "all"), or the empty
/// string for the default namespace (whether explicitly given or not).
let verificationBytes: [UInt8] = SnodeAPI.Endpoint.deleteAllBefore.rawValue.bytes
let verificationBytes: [UInt8] = SnodeAPI.Endpoint.deleteAllBefore.path.bytes
.appending(
contentsOf: (namespace == nil ?
"all" :
@ -66,16 +63,7 @@ extension SnodeAPI {
)
.appending(contentsOf: "\(beforeMs)".data(using: .ascii)?.bytes)
guard
let signatureBytes: [UInt8] = sodium.wrappedValue.sign.signature(
message: verificationBytes,
secretKey: ed25519SecretKey
)
else {
throw SnodeAPIError.signingFailed
}
return signatureBytes
return try authInfo.generateSignature(with: verificationBytes, using: dependencies)
}
}
}

View File

@ -1,6 +1,7 @@
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import SessionUtilitiesKit
extension SnodeAPI {
public class DeleteAllMessagesRequest: SnodeAuthenticatedRequestBody {
@ -19,17 +20,13 @@ extension SnodeAPI {
public init(
namespace: SnodeAPI.Namespace,
pubkey: String,
timestampMs: UInt64,
ed25519PublicKey: [UInt8],
ed25519SecretKey: [UInt8]
authInfo: AuthenticationInfo,
timestampMs: UInt64
) {
self.namespace = namespace
super.init(
pubkey: pubkey,
ed25519PublicKey: ed25519PublicKey,
ed25519SecretKey: ed25519SecretKey,
authInfo: authInfo,
timestampMs: timestampMs
)
}
@ -50,26 +47,17 @@ extension SnodeAPI {
// MARK: - Abstract Methods
override func generateSignature() throws -> [UInt8] {
override func generateSignature(using dependencies: Dependencies) throws -> [UInt8] {
/// Ed25519 signature of `( "delete_all" || namespace || timestamp )`, where
/// `namespace` is the empty string for the default namespace (whether explicitly specified or
/// not), and otherwise the stringified version of the namespace parameter (i.e. "99" or "-42" or "all").
/// The signature must be signed by the ed25519 pubkey in `pubkey` (omitting the leading prefix).
/// Must be base64 encoded for json requests; binary for OMQ requests.
let verificationBytes: [UInt8] = SnodeAPI.Endpoint.deleteAll.rawValue.bytes
let verificationBytes: [UInt8] = SnodeAPI.Endpoint.deleteAll.path.bytes
.appending(contentsOf: namespace.verificationString.bytes)
.appending(contentsOf: timestampMs.map { "\($0)" }?.data(using: .ascii)?.bytes)
guard
let signatureBytes: [UInt8] = sodium.wrappedValue.sign.signature(
message: verificationBytes,
secretKey: ed25519SecretKey
)
else {
throw SnodeAPIError.signingFailed
}
return signatureBytes
return try authInfo.generateSignature(with: verificationBytes, using: dependencies)
}
}
}

View File

@ -1,6 +1,7 @@
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import SessionUtilitiesKit
extension SnodeAPI {
public class DeleteMessagesRequest: SnodeAuthenticatedRequestBody {
@ -17,18 +18,12 @@ extension SnodeAPI {
public init(
messageHashes: [String],
requireSuccessfulDeletion: Bool,
pubkey: String,
ed25519PublicKey: [UInt8]?,
ed25519SecretKey: [UInt8]
authInfo: AuthenticationInfo
) {
self.messageHashes = messageHashes
self.requireSuccessfulDeletion = requireSuccessfulDeletion
super.init(
pubkey: pubkey,
ed25519PublicKey: ed25519PublicKey,
ed25519SecretKey: ed25519SecretKey
)
super.init(authInfo: authInfo)
}
// MARK: - Coding
@ -48,23 +43,14 @@ extension SnodeAPI {
// MARK: - Abstract Methods
override func generateSignature() throws -> [UInt8] {
override func generateSignature(using dependencies: Dependencies) throws -> [UInt8] {
/// Ed25519 signature of `("delete" || messages...)`; this signs the value constructed
/// by concatenating "delete" and all `messages` values, using `pubkey` to sign. Must be base64
/// encoded for json requests; binary for OMQ requests.
let verificationBytes: [UInt8] = SnodeAPI.Endpoint.deleteMessages.rawValue.bytes
let verificationBytes: [UInt8] = SnodeAPI.Endpoint.deleteMessages.path.bytes
.appending(contentsOf: messageHashes.joined().bytes)
guard
let signatureBytes: [UInt8] = sodium.wrappedValue.sign.signature(
message: verificationBytes,
secretKey: ed25519SecretKey
)
else {
throw SnodeAPIError.signingFailed
}
return signatureBytes
return try authInfo.generateSignature(with: verificationBytes, using: dependencies)
}
}
}

View File

@ -1,6 +1,7 @@
// Copyright © 2023 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import SessionUtilitiesKit
extension SnodeAPI {
public class GetExpiriesRequest: SnodeAuthenticatedRequestBody {
@ -16,19 +17,13 @@ extension SnodeAPI {
public init(
messageHashes: [String],
pubkey: String,
subkey: String?,
timestampMs: UInt64,
ed25519PublicKey: [UInt8],
ed25519SecretKey: [UInt8]
authInfo: AuthenticationInfo,
timestampMs: UInt64
) {
self.messageHashes = messageHashes
super.init(
pubkey: pubkey,
ed25519PublicKey: ed25519PublicKey,
ed25519SecretKey: ed25519SecretKey,
subkey: subkey,
authInfo: authInfo,
timestampMs: timestampMs
)
}
@ -45,23 +40,14 @@ extension SnodeAPI {
// MARK: - Abstract Methods
override func generateSignature() throws -> [UInt8] {
override func generateSignature(using dependencies: Dependencies) throws -> [UInt8] {
/// Ed25519 signature of `("get_expiries" || timestamp || messages[0] || ... || messages[N])`
/// where `timestamp` is expressed as a string (base10). The signature must be base64 encoded (json) or bytes (bt).
let verificationBytes: [UInt8] = SnodeAPI.Endpoint.getExpiries.rawValue.bytes
let verificationBytes: [UInt8] = SnodeAPI.Endpoint.getExpiries.path.bytes
.appending(contentsOf: timestampMs.map { "\($0)" }?.data(using: .ascii)?.bytes)
.appending(contentsOf: messageHashes.joined().bytes)
guard
let signatureBytes: [UInt8] = sodium.wrappedValue.sign.signature(
message: verificationBytes,
secretKey: ed25519SecretKey
)
else {
throw SnodeAPIError.signingFailed
}
return signatureBytes
return try authInfo.generateSignature(with: verificationBytes, using: dependencies)
}
}
}

View File

@ -1,6 +1,7 @@
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import SessionUtilitiesKit
extension SnodeAPI {
public class GetMessagesRequest: SnodeAuthenticatedRequestBody {
@ -21,11 +22,8 @@ extension SnodeAPI {
public init(
lastHash: String,
namespace: SnodeAPI.Namespace?,
pubkey: String,
subkey: String?,
authInfo: AuthenticationInfo,
timestampMs: UInt64,
ed25519PublicKey: [UInt8],
ed25519SecretKey: [UInt8],
maxCount: Int64? = nil,
maxSize: Int64? = nil
) {
@ -35,10 +33,7 @@ extension SnodeAPI {
self.maxSize = maxSize
super.init(
pubkey: pubkey,
ed25519PublicKey: ed25519PublicKey,
ed25519SecretKey: ed25519SecretKey,
subkey: subkey,
authInfo: authInfo,
timestampMs: timestampMs
)
}
@ -58,25 +53,16 @@ extension SnodeAPI {
// MARK: - Abstract Methods
override func generateSignature() throws -> [UInt8] {
override func generateSignature(using dependencies: Dependencies) throws -> [UInt8] {
/// Ed25519 signature of `("retrieve" || namespace || timestamp)` (if using a non-0
/// namespace), or `("retrieve" || timestamp)` when fetching from the default namespace. Both
/// namespace and timestamp are the base10 expressions of the relevant values. Must be base64
/// encoded for json requests; binary for OMQ requests.
let verificationBytes: [UInt8] = SnodeAPI.Endpoint.getMessages.rawValue.bytes
let verificationBytes: [UInt8] = SnodeAPI.Endpoint.getMessages.path.bytes
.appending(contentsOf: namespace?.verificationString.bytes)
.appending(contentsOf: timestampMs.map { "\($0)" }?.data(using: .ascii)?.bytes)
guard
let signatureBytes: [UInt8] = sodium.wrappedValue.sign.signature(
message: verificationBytes,
secretKey: ed25519SecretKey
)
else {
throw SnodeAPIError.signingFailed
}
return signatureBytes
return try authInfo.generateSignature(with: verificationBytes, using: dependencies)
}
}
}

View File

@ -15,7 +15,7 @@ public struct OxenDaemonRPCRequest<T: Encodable>: Encodable {
endpoint: SnodeAPI.Endpoint,
body: T
) {
self.endpoint = endpoint.rawValue
self.endpoint = endpoint.path
self.body = body
}
}

View File

@ -1,6 +1,7 @@
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import SessionUtilitiesKit
extension SnodeAPI {
public class RevokeSubkeyRequest: SnodeAuthenticatedRequestBody {
@ -14,17 +15,11 @@ extension SnodeAPI {
public init(
subkeyToRevoke: String,
pubkey: String,
ed25519PublicKey: [UInt8],
ed25519SecretKey: [UInt8]
authInfo: AuthenticationInfo
) {
self.subkeyToRevoke = subkeyToRevoke
super.init(
pubkey: pubkey,
ed25519PublicKey: ed25519PublicKey,
ed25519SecretKey: ed25519SecretKey
)
super.init(authInfo: authInfo)
}
// MARK: - Coding
@ -39,22 +34,13 @@ extension SnodeAPI {
// MARK: - Abstract Methods
override func generateSignature() throws -> [UInt8] {
override func generateSignature(using dependencies: Dependencies) throws -> [UInt8] {
/// Ed25519 signature of `("revoke_subkey" || subkey)`; this signs the subkey tag,
/// using `pubkey` to sign. Must be base64 encoded for json requests; binary for OMQ requests.
let verificationBytes: [UInt8] = SnodeAPI.Endpoint.revokeSubkey.rawValue.bytes
let verificationBytes: [UInt8] = SnodeAPI.Endpoint.revokeSubkey.path.bytes
.appending(contentsOf: subkeyToRevoke.bytes)
guard
let signatureBytes: [UInt8] = sodium.wrappedValue.sign.signature(
message: verificationBytes,
secretKey: ed25519SecretKey
)
else {
throw SnodeAPIError.signingFailed
}
return signatureBytes
return try authInfo.generateSignature(with: verificationBytes, using: dependencies)
}
}
}

View File

@ -1,6 +1,7 @@
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import SessionUtilitiesKit
extension SnodeAPI {
public class SendMessageRequest: SnodeAuthenticatedRequestBody {
@ -16,19 +17,14 @@ extension SnodeAPI {
public init(
message: SnodeMessage,
namespace: SnodeAPI.Namespace,
subkey: String?,
timestampMs: UInt64,
ed25519PublicKey: [UInt8]?,
ed25519SecretKey: [UInt8]
authInfo: AuthenticationInfo,
timestampMs: UInt64
) {
self.message = message
self.namespace = namespace
super.init(
pubkey: message.recipient,
ed25519PublicKey: ed25519PublicKey,
ed25519SecretKey: ed25519SecretKey,
subkey: subkey,
authInfo: authInfo,
timestampMs: timestampMs
)
}
@ -51,26 +47,17 @@ extension SnodeAPI {
// MARK: - Abstract Methods
override func generateSignature() throws -> [UInt8] {
override func generateSignature(using dependencies: Dependencies) throws -> [UInt8] {
/// Ed25519 signature of `("store" || namespace || timestamp)`, where namespace and
/// `timestamp` are the base10 expression of the namespace and `timestamp` values. Must be
/// base64 encoded for json requests; binary for OMQ requests. For non-05 type pubkeys (i.e. non
/// session ids) the signature will be verified using `pubkey`. For 05 pubkeys, see the following
/// option.
let verificationBytes: [UInt8] = SnodeAPI.Endpoint.sendMessage.rawValue.bytes
let verificationBytes: [UInt8] = SnodeAPI.Endpoint.sendMessage.path.bytes
.appending(contentsOf: namespace.verificationString.bytes)
.appending(contentsOf: timestampMs.map { "\($0)" }?.data(using: .ascii)?.bytes)
guard
let signatureBytes: [UInt8] = sodium.wrappedValue.sign.signature(
message: verificationBytes,
secretKey: ed25519SecretKey
)
else {
throw SnodeAPIError.signingFailed
}
return signatureBytes
return try authInfo.generateSignature(with: verificationBytes, using: dependencies)
}
}
}

View File

@ -12,27 +12,16 @@ public class SnodeAuthenticatedRequestBody: Encodable {
case signatureBase64 = "signature"
}
private let pubkey: String
/// This value should only be provided if the `pubkey` value is an x25519 public key
private let ed25519PublicKey: [UInt8]?
internal let ed25519SecretKey: [UInt8]
private let subkey: String?
internal let authInfo: SnodeAPI.AuthenticationInfo
internal let timestampMs: UInt64?
// MARK: - Initialization
public init(
pubkey: String,
ed25519PublicKey: [UInt8]?,
ed25519SecretKey: [UInt8],
subkey: String? = nil,
authInfo: SnodeAPI.AuthenticationInfo,
timestampMs: UInt64? = nil
) {
self.pubkey = pubkey
self.ed25519PublicKey = ed25519PublicKey
self.ed25519SecretKey = ed25519SecretKey
self.subkey = subkey
self.authInfo = authInfo
self.timestampMs = timestampMs
}
@ -42,17 +31,26 @@ public class SnodeAuthenticatedRequestBody: Encodable {
var container: KeyedEncodingContainer<CodingKeys> = encoder.container(keyedBy: CodingKeys.self)
// Generate the signature for the request for encoding
let signatureBase64: String = try generateSignature().toBase64()
try container.encode(pubkey, forKey: .pubkey)
try container.encodeIfPresent(subkey, forKey: .subkey)
let signatureBase64: String = try generateSignature(using: encoder.dependencies).toBase64()
try container.encodeIfPresent(timestampMs, forKey: .timestampMs)
try container.encodeIfPresent(ed25519PublicKey?.toHexString(), forKey: .ed25519PublicKey)
try container.encode(signatureBase64, forKey: .signatureBase64)
switch authInfo {
case .standard(let pubkey, let ed25519KeyPair):
try container.encode(pubkey, forKey: .pubkey)
try container.encode(ed25519KeyPair.publicKey.toHexString(), forKey: .ed25519PublicKey)
case .groupAdmin(let pubkey, _):
try container.encode(pubkey, forKey: .pubkey)
case .groupMember(let pubkey, let authData):
try container.encode(pubkey, forKey: .pubkey)
}
}
// MARK: - Abstract Functions
func generateSignature() throws -> [UInt8] {
func generateSignature(using dependencies: Dependencies) throws -> [UInt8] {
preconditionFailure("abstract class - override in subclass")
}
}

View File

@ -55,7 +55,7 @@ internal extension SnodeAPI {
public func encode(to encoder: Encoder) throws {
var container: KeyedEncodingContainer<CodingKeys> = encoder.container(keyedBy: CodingKeys.self)
try container.encode(endpoint.rawValue, forKey: .method)
try container.encode(endpoint.path, forKey: .method)
try jsonBodyEncoder?(&container, .params)
}
}

View File

@ -27,7 +27,13 @@ public struct SnodeRequest<T: Encodable>: Encodable {
public func encode(to encoder: Encoder) throws {
var container: KeyedEncodingContainer<CodingKeys> = encoder.container(keyedBy: CodingKeys.self)
try container.encode(endpoint.rawValue, forKey: .method)
try container.encode(endpoint.path, forKey: .method)
try container.encode(body, forKey: .body)
}
}
// MARK: - BatchRequestChildRetrievable
extension SnodeRequest: BatchRequestChildRetrievable where T: BatchRequestChildRetrievable {
public var requests: [HTTP.BatchRequest.Child] { body.requests }
}

View File

@ -1,6 +1,7 @@
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import SessionUtilitiesKit
extension SnodeAPI {
public class UpdateExpiryAllRequest: SnodeAuthenticatedRequestBody {
@ -23,18 +24,12 @@ extension SnodeAPI {
public init(
expiryMs: UInt64,
namespace: SnodeAPI.Namespace?,
pubkey: String,
ed25519PublicKey: [UInt8],
ed25519SecretKey: [UInt8]
authInfo: AuthenticationInfo
) {
self.expiryMs = expiryMs
self.namespace = namespace
super.init(
pubkey: pubkey,
ed25519PublicKey: ed25519PublicKey,
ed25519SecretKey: ed25519SecretKey
)
super.init(authInfo: authInfo)
}
// MARK: - Coding
@ -56,12 +51,12 @@ extension SnodeAPI {
// MARK: - Abstract Methods
override func generateSignature() throws -> [UInt8] {
override func generateSignature(using dependencies: Dependencies) throws -> [UInt8] {
/// Ed25519 signature of `("expire_all" || namespace || expiry)`, signed by `pubkey`. Must be
/// base64 encoded (json) or bytes (OMQ). namespace should be the stringified namespace for
/// non-default namespace expiries (i.e. "42", "-99", "all"), or an empty string for the default
/// namespace (whether or not explicitly provided).
let verificationBytes: [UInt8] = SnodeAPI.Endpoint.expireAll.rawValue.bytes
let verificationBytes: [UInt8] = SnodeAPI.Endpoint.expireAll.path.bytes
.appending(
contentsOf: (namespace == nil ?
"all" :
@ -70,16 +65,7 @@ extension SnodeAPI {
)
.appending(contentsOf: "\(expiryMs)".data(using: .ascii)?.bytes)
guard
let signatureBytes: [UInt8] = sodium.wrappedValue.sign.signature(
message: verificationBytes,
secretKey: ed25519SecretKey
)
else {
throw SnodeAPIError.signingFailed
}
return signatureBytes
return try authInfo.generateSignature(with: verificationBytes, using: dependencies)
}
}
}

View File

@ -1,6 +1,7 @@
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import SessionUtilitiesKit
extension SnodeAPI {
public class UpdateExpiryRequest: SnodeAuthenticatedRequestBody {
@ -38,22 +39,14 @@ extension SnodeAPI {
expiryMs: UInt64,
shorten: Bool? = nil,
extend: Bool? = nil,
pubkey: String,
ed25519PublicKey: [UInt8],
ed25519SecretKey: [UInt8],
subkey: String?
authInfo: AuthenticationInfo
) {
self.messageHashes = messageHashes
self.expiryMs = expiryMs
self.shorten = shorten
self.extend = extend
super.init(
pubkey: pubkey,
ed25519PublicKey: ed25519PublicKey,
ed25519SecretKey: ed25519SecretKey,
subkey: subkey
)
super.init(authInfo: authInfo)
}
// MARK: - Coding
@ -71,27 +64,18 @@ extension SnodeAPI {
// MARK: - Abstract Methods
override func generateSignature() throws -> [UInt8] {
override func generateSignature(using dependencies: Dependencies) throws -> [UInt8] {
/// Ed25519 signature of `("expire" || ShortenOrExtend || expiry || messages[0] || ...`
/// ` || messages[N])` where `expiry` is the expiry timestamp expressed as a string.
/// `ShortenOrExtend` is string signature must be base64 "shorten" if the shorten option is given (and true),
/// "extend" if `extend` is true, and empty otherwise. The signature must be base64 encoded (json) or bytes (bt).
let verificationBytes: [UInt8] = SnodeAPI.Endpoint.expire.rawValue.bytes
let verificationBytes: [UInt8] = SnodeAPI.Endpoint.expire.path.bytes
.appending(contentsOf: (shorten == true ? "shorten".bytes : []))
.appending(contentsOf: (extend == true ? "extend".bytes : []))
.appending(contentsOf: "\(expiryMs)".data(using: .ascii)?.bytes)
.appending(contentsOf: messageHashes.joined().bytes)
guard
let signatureBytes: [UInt8] = sodium.wrappedValue.sign.signature(
message: verificationBytes,
secretKey: ed25519SecretKey
)
else {
throw SnodeAPIError.signingFailed
}
return signatureBytes
return try authInfo.generateSignature(with: verificationBytes, using: dependencies)
}
}
}

View File

@ -0,0 +1,49 @@
// Copyright © 2023 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import GRDB
import SessionUtilitiesKit
public extension SnodeAPI {
enum AuthenticationInfo: Equatable {
/// Used for when interacting as the current user
case standard(pubkey: String, ed25519KeyPair: KeyPair)
/// Used for when interacting as a group admin
case groupAdmin(pubkey: String, ed25519SecretKey: [UInt8])
/// Used for when interacting as a group member
case groupMember(pubkey: String, authData: Data)
// MARK: - Variables
var publicKey: String {
switch self {
case .standard(let pubkey, _), .groupAdmin(let pubkey, _), .groupMember(let pubkey, _):
return pubkey
}
}
// MARK: - Functions
func generateSignature(
with verificationBytes: [UInt8],
using dependencies: Dependencies
) throws -> [UInt8] {
switch self {
case .standard(_, let ed25519KeyPair):
return try dependencies[singleton: .crypto].perform(
.signature(message: verificationBytes, secretKey: ed25519KeyPair.secretKey)
)
case .groupAdmin(_, let ed25519SecretKey):
return try dependencies[singleton: .crypto].perform(
.signature(message: verificationBytes, secretKey: ed25519SecretKey)
)
case .groupMember(_, let authData):
preconditionFailure()
}
}
}
}

View File

@ -7,18 +7,57 @@ import SessionUtilitiesKit
public extension HTTP.PreparedRequest {
/// Send an onion request for the prepared data
func send(using dependencies: Dependencies) -> AnyPublisher<(ResponseInfoType, R), Error> {
return dependencies[singleton: .network]
.send(
.onionRequest(
request,
to: server,
with: publicKey,
timeout: timeout
// If we have a cached response then user that directly
if let cachedResponse: HTTP.PreparedRequest<R>.CachedResponse = self.cachedResponse {
return Just(cachedResponse)
.setFailureType(to: Error.self)
.handleEvents(
receiveSubscription: { _ in self.subscriptionHandler?() },
receiveOutput: self.outputEventHandler,
receiveCompletion: self.completionEventHandler,
receiveCancel: self.cancelEventHandler
)
)
.eraseToAnyPublisher()
}
// Otherwise trigger the request
return Just(())
.setFailureType(to: Error.self)
.tryFlatMap { _ in
switch target {
case let serverTarget as any ServerRequestTarget:
return dependencies[singleton: .network]
.send(
.onionRequest(
request,
to: serverTarget.server,
with: serverTarget.x25519PublicKey,
timeout: timeout
)
)
case let randomSnode as HTTP.RandomSnodeTarget:
guard let payload: Data = request.httpBody else { throw HTTPError.invalidPreparedRequest }
return SnodeAPI.getSwarm(for: randomSnode.publicKey, using: dependencies)
.tryFlatMapWithRandomSnode(retry: SnodeAPI.maxRetryCount) { snode in
dependencies[singleton: .network]
.send(
.onionRequest(
payload,
to: snode,
timeout: timeout
)
)
}
default: throw HTTPError.invalidPreparedRequest
}
}
.decoded(with: self, using: dependencies)
.retry(retryCount, using: dependencies)
.handleEvents(
receiveSubscription: { _ in self.subscriptionHandler?() },
receiveOutput: self.outputEventHandler,
receiveCompletion: self.completionEventHandler,
receiveCancel: self.cancelEventHandler

View File

@ -0,0 +1,39 @@
// Copyright © 2023 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import SessionUtilitiesKit
// MARK: - RandomSnodeTarget
internal extension HTTP {
struct RandomSnodeTarget: RequestTarget, Equatable {
let publicKey: String
let requiresLatestNetworkTime: Bool
var url: URL? { URL(string: "snode:\(publicKey)") }
var urlPathAndParamsString: String { return "" }
}
}
// MARK: Request - RandomSnodeTarget
public extension Request {
init(
method: HTTPMethod = .get,
endpoint: Endpoint,
publicKey: String,
headers: [HTTPHeader: String] = [:],
body: T? = nil
) {
self = Request(
method: method,
endpoint: endpoint,
target: HTTP.RandomSnodeTarget(
publicKey: publicKey,
requiresLatestNetworkTime: false // TODO: Sort this out
),
headers: headers,
body: body
)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,34 +1,69 @@
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import SessionUtilitiesKit
public extension SnodeAPI {
enum Endpoint: String {
case sendMessage = "store"
case getMessages = "retrieve"
case deleteMessages = "delete"
case deleteAll = "delete_all"
case deleteAllBefore = "delete_before"
case revokeSubkey = "revoke_subkey"
case expire = "expire"
case expireAll = "expire_all"
case getExpiries = "get_expiries"
case batch = "batch"
case sequence = "sequence"
enum Endpoint: EndpointType {
case sendMessage
case getMessages
case deleteMessages
case deleteAll
case deleteAllBefore
case revokeSubkey
case expire
case expireAll
case getExpiries
case batch
case sequence
case getInfo = "info"
case getSwarm = "get_snodes_for_pubkey"
case getInfo
case getSwarm
case jsonRPCCall = "json_rpc"
case oxenDaemonRPCCall = "oxend_request"
case jsonRPCCall
case oxenDaemonRPCCall
// jsonRPCCall proxied calls
case jsonGetNServiceNodes = "get_n_service_nodes"
case jsonGetNServiceNodes
// oxenDaemonRPCCall proxied calls
case daemonOnsResolve = "ons_resolve"
case daemonGetServiceNodes = "get_service_nodes"
case daemonOnsResolve
case daemonGetServiceNodes
public static var name: String { "SnodeAPI.Endpoint" }
public static var batchRequestVariant: HTTP.BatchRequest.Child.Variant = .storageServer
public var path: String {
switch self {
case .sendMessage: return "store"
case .getMessages: return "retrieve"
case .deleteMessages: return "delete"
case .deleteAll: return "delete_all"
case .deleteAllBefore: return "delete_before"
case .revokeSubkey: return "revoke_subkey"
case .expire: return "expire"
case .expireAll: return "expire_all"
case .getExpiries: return "get_expiries"
case .batch: return "batch"
case .sequence: return "sequence"
case .getInfo: return "info"
case .getSwarm: return "get_snodes_for_pubkey"
case .jsonRPCCall: return "json_rpc"
case .oxenDaemonRPCCall: return "oxend_request"
// jsonRPCCall proxied calls
case .jsonGetNServiceNodes: return "get_n_service_nodes"
// oxenDaemonRPCCall proxied calls
case .daemonOnsResolve: return "ons_resolve"
case .daemonGetServiceNodes: return "get_service_nodes"
}
}
}
}

View File

@ -10,6 +10,7 @@ public enum SnodeAPIError: LocalizedError {
case noKeyPair
case signingFailed
case signatureVerificationFailed
case invalidAuthentication
case invalidIP
case emptySnodePool
case responseFailedValidation
@ -28,6 +29,7 @@ public enum SnodeAPIError: LocalizedError {
case .noKeyPair: return "Missing user key pair."
case .signingFailed: return "Couldn't sign message."
case .signatureVerificationFailed: return "Failed to verify the signature."
case .invalidAuthentication: return "Invalid authentication data provided."
case .invalidIP: return "Invalid IP."
case .emptySnodePool: return "Service Node pool is empty."
case .responseFailedValidation: return "Response failed validation."

View File

@ -1,6 +1,7 @@
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import SessionUtilitiesKit
public extension SnodeAPI {
enum Namespace: Int, Codable, Hashable {

View File

@ -0,0 +1,80 @@
// Copyright © 2023 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import SessionUtilitiesKit
public enum SwarmDrainBehaviour {
case alwaysRandom
case limitedReuse(
count: UInt,
targetSnode: Snode?,
targetUseCount: Int,
usedSnodes: Set<Snode>
)
public static func limitedReuse(count: UInt) -> SwarmDrainBehaviour {
guard count > 1 else { return .alwaysRandom }
return .limitedReuse(count: count, targetSnode: nil, targetUseCount: 0, usedSnodes: [])
}
// MARK: - Convenience
func use(snode: Snode) -> SwarmDrainBehaviour {
switch self {
case .alwaysRandom: return .alwaysRandom
case .limitedReuse(let count, let targetSnode, let targetUseCount, let usedSnodes):
// If we are using a new snode then reset everything
guard targetSnode == snode else {
return .limitedReuse(
count: count,
targetSnode: snode,
targetUseCount: 1,
usedSnodes: usedSnodes.inserting(snode)
)
}
// Increment the use count and clear the target if it's been used too many times
let updatedUseCount: Int = (targetUseCount + 1)
return .limitedReuse(
count: count,
targetSnode: (updatedUseCount < count ? snode : nil),
targetUseCount: updatedUseCount,
usedSnodes: usedSnodes
)
}
}
public func clearTargetSnode() -> SwarmDrainBehaviour {
switch self {
case .alwaysRandom: return .alwaysRandom
case .limitedReuse(let count, _, _, let usedSnodes):
return .limitedReuse(
count: count,
targetSnode: nil,
targetUseCount: 0,
usedSnodes: usedSnodes
)
}
}
public func reset() -> SwarmDrainBehaviour {
switch self {
case .alwaysRandom: return .alwaysRandom
case .limitedReuse(let count, _, _, _):
return .limitedReuse(
count: count,
targetSnode: nil,
targetUseCount: 0,
usedSnodes: []
)
}
}
}
// MARK: - Convenience
public extension Atomic where Value == SwarmDrainBehaviour {
static var alwaysRandom: Atomic<SwarmDrainBehaviour> { Atomic(.alwaysRandom) }
}

View File

@ -0,0 +1,23 @@
// Copyright © 2023 Rangeproof Pty Ltd. All rights reserved.
import Foundation
import Sodium
import SessionUtilitiesKit
// MARK: - Sign
public extension Crypto.Action {
static func signature(message: Bytes, secretKey: Bytes) -> Crypto.Action {
return Crypto.Action(id: "signature", args: [message, secretKey]) {
Sodium().sign.signature(message: message, secretKey: secretKey)
}
}
}
public extension Crypto.Verification {
static func signature(message: Bytes, publicKey: Bytes, signature: Bytes) -> Crypto.Verification {
return Crypto.Verification(id: "signature", args: [message, publicKey, signature]) {
Sodium().sign.verify(message: message, publicKey: publicKey, signature: signature)
}
}
}

View File

@ -14,6 +14,7 @@ class PreparedRequestOnionRequestsSpec: QuickSpec {
case endpoint1
case endpoint2
static var name: String { "TestEndpoint" }
static var batchRequestVariant: HTTP.BatchRequest.Child.Variant { .storageServer }
static var excludedSubRequestHeaders: [HTTPHeader] { [] }
@ -52,7 +53,7 @@ class PreparedRequestOnionRequestsSpec: QuickSpec {
)
preparedRequest = HTTP.PreparedRequest(
request: request,
urlRequest: try! request.generateUrlRequest(),
urlRequest: try! request.generateUrlRequest(using: dependencies),
publicKey: TestConstants.publicKey,
responseType: Int.self,
metadata: [:],

View File

@ -2,18 +2,9 @@
import Foundation
public extension Dependencies {
static let userInfoKey: CodingUserInfoKey = CodingUserInfoKey(rawValue: "io.oxen.dependencies.codingOptions")!
}
public extension Data {
func decoded<T: Decodable>(as type: T.Type, using dependencies: Dependencies = Dependencies()) throws -> T {
do {
let decoder: JSONDecoder = JSONDecoder()
decoder.userInfo = [ Dependencies.userInfoKey: dependencies ]
return try decoder.decode(type, from: self)
}
do { return try JSONDecoder(using: dependencies).decode(type, from: self) }
catch { throw HTTPError.parsingFailed }
}

View File

@ -3,7 +3,7 @@
import Foundation
import Sodium
public struct SessionId {
public struct SessionId: Equatable {
public static let byteCount: Int = 33
public enum Prefix: String, CaseIterable {

View File

@ -2,20 +2,39 @@
import Foundation
public protocol BatchRequestChildRetrievable {
var requests: [HTTP.BatchRequest.Child] { get }
}
public extension HTTP {
struct BatchRequest: Encodable {
let requests: [Child]
struct BatchRequest: Encodable, BatchRequestChildRetrievable {
public enum CodingKeys: String, CodingKey {
// Storage Server keys
case requests
}
public init(requests: [any ErasedPreparedRequest]) {
let requestsKey: CodingKeys?
public let requests: [Child]
public init(requestsKey: CodingKeys? = nil, requests: [any ErasedPreparedRequest]) {
self.requestsKey = requestsKey
self.requests = requests.map { Child(request: $0) }
}
// MARK: - Encodable
public func encode(to encoder: Encoder) throws {
var container = encoder.singleValueContainer()
switch requestsKey {
case .requests:
var container: KeyedEncodingContainer<CodingKeys> = encoder.container(keyedBy: CodingKeys.self)
try container.encode(requests)
try container.encode(requests, forKey: .requests)
case .none:
var container: SingleValueEncodingContainer = encoder.singleValueContainer()
try container.encode(requests)
}
}
// MARK: - BatchRequest.Child

View File

@ -5,25 +5,50 @@ import Combine
public extension HTTP {
// MARK: - HTTP.BatchResponse
typealias BatchResponse = [Decodable]
struct BatchResponse: Decodable, Collection {
public let data: [Any]
// MARK: - Collection Conformance
public var startIndex: Int { data.startIndex }
public var endIndex: Int { data.endIndex }
public var count: Int { data.count }
public subscript(index: Int) -> Any { data[index] }
public func index(after i: Int) -> Int { return data.index(after: i) }
// MARK: - Initialization
init(data: [Any]) {
self.data = data
}
public init(from decoder: Decoder) throws {
#if DEBUG
preconditionFailure("The `HTTP.BatchResponse` type cannot be decoded directly, this is simply here to allow for `PreparedSendData<HTTP.BatchResponse>` support")
#else
data = []
#endif
}
}
// MARK: - BatchResponseMap<E>
struct BatchResponseMap<E: EndpointType>: Decodable, ErasedBatchResponseMap {
public let data: [E: Decodable]
public let data: [E: Any]
public subscript(position: E) -> Decodable? {
public subscript(position: E) -> Any? {
get { return data[position] }
}
public var count: Int { data.count }
public var keys: Dictionary<E, Decodable>.Keys { data.keys }
public var values: Dictionary<E, Decodable>.Values { data.values }
public var keys: Dictionary<E, Any>.Keys { data.keys }
public var values: Dictionary<E, Any>.Values { data.values }
// MARK: - Initialization
init(data: [E: Decodable]) {
init(data: [E: Any]) {
self.data = data
}
@ -39,14 +64,14 @@ public extension HTTP {
public static func from(
batchEndpoints: [any EndpointType],
responses: [Decodable]
response: HTTP.BatchResponse
) throws -> Self {
let convertedEndpoints: [E] = batchEndpoints.compactMap { $0 as? E }
guard convertedEndpoints.count == responses.count else { throw HTTPError.parsingFailed }
guard convertedEndpoints.count == response.data.count else { throw HTTPError.parsingFailed }
return BatchResponseMap(
data: zip(convertedEndpoints, responses)
data: zip(convertedEndpoints, response.data)
.reduce(into: [:]) { result, next in
result[next.0] = next.1
}
@ -56,7 +81,7 @@ public extension HTTP {
// MARK: - BatchSubResponse<T>
struct BatchSubResponse<T>: ResponseInfoType {
struct BatchSubResponse<T>: ErasedBatchSubResponse {
public enum CodingKeys: String, CodingKey {
case code
case headers
@ -72,6 +97,8 @@ public extension HTTP {
/// The body of the request; will be plain json if content-type is `application/json`, otherwise it will be base64 encoded data
public let body: T?
var erasedBody: Any? { body }
/// A flag to indicate that there was a body but it failed to parse
public let failedToParseBody: Bool
@ -94,7 +121,7 @@ public extension HTTP {
public protocol ErasedBatchResponseMap {
static func from(
batchEndpoints: [any EndpointType],
responses: [Decodable]
response: HTTP.BatchResponse
) throws -> Self
}
@ -119,6 +146,12 @@ extension HTTP.BatchSubResponse: Decodable {
}
}
// MARK: - ErasedBatchSubResponse
protocol ErasedBatchSubResponse: ResponseInfoType {
var erasedBody: Any? { get }
}
// MARK: - Convenience
internal extension HTTP.BatchResponse {
@ -159,29 +192,9 @@ internal extension HTTP.BatchResponse {
default: throw HTTPError.parsingFailed
}
return try zip(dataArray, types)
.map { data, type in try type.decoded(from: data, using: dependencies) }
}
}
public extension Publisher where Output == (ResponseInfoType, Data?), Failure == Error {
func decoded(
as types: [Decodable.Type],
requireAllResults: Bool = true,
using dependencies: Dependencies = Dependencies()
) -> AnyPublisher<(ResponseInfoType, HTTP.BatchResponse), Error> {
self
.tryMap { responseInfo, maybeData -> (ResponseInfoType, HTTP.BatchResponse) in
(
responseInfo,
try HTTP.BatchResponse.decodingResponses(
from: maybeData,
as: types,
requireAllResults: requireAllResults,
using: dependencies
)
)
}
.eraseToAnyPublisher()
return HTTP.BatchResponse(
data: try zip(dataArray, types)
.map { data, type in try type.decoded(from: data, using: dependencies) }
)
}
}

View File

@ -4,24 +4,26 @@ import Foundation
import Combine
import GRDB
// MARK: - HTTPRequestMetadata
public typealias HTTPRequestMetadata = String
// MARK: - HTTP.PreparedRequest<R>
public extension HTTP {
struct PreparedRequest<R> {
public struct CachedResponse {
fileprivate let info: ResponseInfoType
fileprivate let originalData: Any
fileprivate let convertedData: R
}
public let request: URLRequest
public let server: String
public let publicKey: String
public let target: any RequestTarget
public let originalType: Decodable.Type
public let responseType: R.Type
public let metadata: [HTTPRequestMetadata: Any]
public let retryCount: Int
public let timeout: TimeInterval
public let cachedResponse: CachedResponse?
fileprivate let responseConverter: ((ResponseInfoType, Any) throws -> R)
public let outputEventHandler: (((ResponseInfoType, R)) -> Void)?
public let subscriptionHandler: (() -> Void)?
public let outputEventHandler: (((CachedResponse)) -> Void)?
public let completionEventHandler: ((Subscribers.Completion<Error>) -> Void)?
public let cancelEventHandler: (() -> Void)?
@ -33,23 +35,23 @@ public extension HTTP {
public let batchEndpoints: [any EndpointType]
public let batchRequestVariant: HTTP.BatchRequest.Child.Variant
public let batchResponseTypes: [Decodable.Type]
public let requireAllBatchResponses: Bool
public let excludedSubRequestHeaders: [String]
/// The `jsonBodyEncoder` is used to simplify the encoding for `BatchRequest`
private let jsonBodyEncoder: ((inout KeyedEncodingContainer<HTTP.BatchRequest.Child.CodingKeys>, HTTP.BatchRequest.Child.CodingKeys) throws -> ())?
private let jsonKeyedBodyEncoder: ((inout KeyedEncodingContainer<HTTP.BatchRequest.Child.CodingKeys>, HTTP.BatchRequest.Child.CodingKeys) throws -> ())?
private let jsonBodyEncoder: ((inout SingleValueEncodingContainer) throws -> ())?
private let b64: String?
private let bytes: [UInt8]?
public init<T: Encodable, E: EndpointType>(
request: Request<T, E>,
urlRequest: URLRequest,
publicKey: String,
responseType: R.Type,
metadata: [HTTPRequestMetadata: Any] = [:],
requireAllBatchResponses: Bool = true,
retryCount: Int = 0,
timeout: TimeInterval
) where R: Decodable {
let batchRequests: [HTTP.BatchRequest.Child]? = (request.body as? HTTP.BatchRequest)?.requests
let batchRequests: [HTTP.BatchRequest.Child]? = (request.body as? BatchRequestChildRetrievable)?.requests
let batchEndpoints: [E] = (batchRequests?
.compactMap { $0.request.batchRequestEndpoint(of: E.self) })
.defaulting(to: [])
@ -62,25 +64,79 @@ public extension HTTP {
.flatMap { $0 })
self.request = urlRequest
self.server = request.server
self.publicKey = publicKey
self.originalType = responseType
self.target = request.target
self.originalType = R.self
self.responseType = responseType
self.metadata = metadata
self.retryCount = retryCount
self.timeout = timeout
self.responseConverter = { _, response in
guard let validResponse: R = response as? R else { throw HTTPError.invalidResponse }
return validResponse
}
self.cachedResponse = nil
// When we are making a batch request we also want to call though any sub request event
// handlers (this allows a lot more reusability for individual requests to manage their
// own results or custom handling just when triggered via a batch request)
self.responseConverter = {
guard
let subRequestResponseConverters: [(Int, ((ResponseInfoType, Any) throws -> Any))] = batchRequests?
.enumerated()
.compactMap({ ($0.0, $0.1.request.erasedResponseConverter) }),
!subRequestResponseConverters.isEmpty
else {
return { info, response in
guard let validResponse: R = response as? R else { throw HTTPError.invalidResponse }
return validResponse
}
}
// Results are returned in the same order they were made in so we can use the matching
// indexes to get the correct response
return { info, response in
let convertedResponse: Any? = try? {
switch response {
case let batchResponse as HTTP.BatchResponse:
return HTTP.BatchResponse(
data: try subRequestResponseConverters
.map { index, responseConverter in
guard batchResponse.count > index else {
throw HTTPError.invalidResponse
}
return try responseConverter(info, batchResponse[index])
}
)
case let batchResponseMap as HTTP.BatchResponseMap<E>:
return HTTP.BatchResponseMap(
data: try subRequestResponseConverters
.reduce(into: [E: Any]()) { result, subResponse in
let index: Int = subResponse.0
let responseConverter: ((ResponseInfoType, Any) throws -> Any) = subResponse.1
guard
batchEndpoints.count > index,
let targetResponse: Any = batchResponseMap[batchEndpoints[index]]
else { throw HTTPError.invalidResponse }
let endpoint: E = batchEndpoints[index]
result[endpoint] = try responseConverter(info, targetResponse)
}
)
default: throw HTTPError.invalidResponse
}
}()
guard let validResponse: R = convertedResponse as? R else {
SNLog("[PreparedRequest] Unable to convert responses for missing response")
throw HTTPError.invalidResponse
}
return validResponse
}
}()
self.outputEventHandler = {
guard
let subRequestEventHandlers: [(Int, (((ResponseInfoType, Any)) -> Void))] = batchRequests?
let subRequestEventHandlers: [(Int, ((ResponseInfoType, Any, Any) -> Void))] = batchRequests?
.enumerated()
.compactMap({ index, batchRequest in
batchRequest.request.erasedOutputEventHandler.map { (index, $0) }
@ -91,7 +147,7 @@ public extension HTTP {
// Results are returned in the same order they were made in so we can use the matching
// indexes to get the correct response
return { data in
switch data.1 {
switch data.originalData {
case let batchResponse as HTTP.BatchResponse:
subRequestEventHandlers.forEach { index, eventHandler in
guard batchResponse.count > index else {
@ -99,26 +155,27 @@ public extension HTTP {
return
}
eventHandler((data.0, batchResponse[index]))
eventHandler(data.info, batchResponse[index], batchResponse[index])
}
case let batchResponseMap as HTTP.BatchResponseMap<E>:
subRequestEventHandlers.forEach { index, eventHandler in
guard
batchEndpoints.count > index,
let targetResponse: Decodable = batchResponseMap[batchEndpoints[index]]
let targetResponse: Any = batchResponseMap[batchEndpoints[index]]
else {
SNLog("[PreparedRequest] Unable to handle output events for missing response")
return
}
eventHandler((data.0, targetResponse))
eventHandler(data.info, targetResponse, targetResponse)
}
default: SNLog("[PreparedRequest] Unable to handle output events for unknown batch response type")
}
}
}()
self.subscriptionHandler = nil
self.completionEventHandler = {
guard
let subRequestEventHandlers: [((Subscribers.Completion<Error>) -> Void)] = batchRequests?
@ -143,12 +200,13 @@ public extension HTTP {
// The following data is needed in this type for handling batch requests
self.method = request.method
self.endpoint = request.endpoint
self.endpointName = "\(E.self)"
self.path = request.urlPathAndParamsString
self.endpointName = E.name
self.path = request.target.urlPathAndParamsString
self.batchEndpoints = batchEndpoints
self.batchRequestVariant = E.batchRequestVariant
self.batchResponseTypes = batchResponseTypes.defaulting(to: [HTTP.BatchSubResponse<R>.self])
self.requireAllBatchResponses = requireAllBatchResponses
self.excludedSubRequestHeaders = E.excludedSubRequestHeaders
if batchRequests != nil && self.batchEndpoints.count != self.batchResponseTypes.count {
@ -159,19 +217,24 @@ public extension HTTP {
// they are encoded correctly so the server knows how to handle them
switch request.body {
case let bodyString as String:
self.jsonKeyedBodyEncoder = nil
self.jsonBodyEncoder = nil
self.b64 = bodyString
self.bytes = nil
case let bodyBytes as [UInt8]:
self.jsonKeyedBodyEncoder = nil
self.jsonBodyEncoder = nil
self.b64 = nil
self.bytes = bodyBytes
default:
self.jsonBodyEncoder = { [body = request.body] container, key in
self.jsonKeyedBodyEncoder = { [body = request.body] container, key in
try container.encodeIfPresent(body, forKey: key)
}
self.jsonBodyEncoder = { [body = request.body] container in
try container.encode(body)
}
self.b64 = nil
self.bytes = nil
}
@ -179,15 +242,15 @@ public extension HTTP {
fileprivate init<U: Decodable>(
request: URLRequest,
server: String,
publicKey: String,
target: any RequestTarget,
originalType: U.Type,
responseType: R.Type,
metadata: [HTTPRequestMetadata: Any],
retryCount: Int,
timeout: TimeInterval,
cachedResponse: CachedResponse?,
responseConverter: @escaping (ResponseInfoType, Any) throws -> R,
outputEventHandler: (((ResponseInfoType, R)) -> Void)?,
subscriptionHandler: (() -> Void)?,
outputEventHandler: ((CachedResponse) -> Void)?,
completionEventHandler: ((Subscribers.Completion<Error>) -> Void)?,
cancelEventHandler: (() -> Void)?,
method: HTTPMethod,
@ -197,20 +260,22 @@ public extension HTTP {
batchEndpoints: [any EndpointType],
batchRequestVariant: HTTP.BatchRequest.Child.Variant,
batchResponseTypes: [Decodable.Type],
requireAllBatchResponses: Bool,
excludedSubRequestHeaders: [String],
jsonBodyEncoder: ((inout KeyedEncodingContainer<HTTP.BatchRequest.Child.CodingKeys>, HTTP.BatchRequest.Child.CodingKeys) throws -> ())?,
jsonKeyedBodyEncoder: ((inout KeyedEncodingContainer<HTTP.BatchRequest.Child.CodingKeys>, HTTP.BatchRequest.Child.CodingKeys) throws -> ())?,
jsonBodyEncoder: ((inout SingleValueEncodingContainer) throws -> ())?,
b64: String?,
bytes: [UInt8]?
) {
self.request = request
self.server = server
self.publicKey = publicKey
self.target = target
self.originalType = originalType
self.responseType = responseType
self.metadata = metadata
self.retryCount = retryCount
self.timeout = timeout
self.cachedResponse = cachedResponse
self.responseConverter = responseConverter
self.subscriptionHandler = subscriptionHandler
self.outputEventHandler = outputEventHandler
self.completionEventHandler = completionEventHandler
self.cancelEventHandler = cancelEventHandler
@ -223,7 +288,9 @@ public extension HTTP {
self.batchEndpoints = batchEndpoints
self.batchRequestVariant = batchRequestVariant
self.batchResponseTypes = batchResponseTypes
self.requireAllBatchResponses = requireAllBatchResponses
self.excludedSubRequestHeaders = excludedSubRequestHeaders
self.jsonKeyedBodyEncoder = jsonKeyedBodyEncoder
self.jsonBodyEncoder = jsonBodyEncoder
self.b64 = b64
self.bytes = bytes
@ -239,7 +306,8 @@ public protocol ErasedPreparedRequest {
var batchResponseTypes: [Decodable.Type] { get }
var excludedSubRequestHeaders: [String] { get }
var erasedOutputEventHandler: (((ResponseInfoType, Any)) -> Void)? { get }
var erasedResponseConverter: ((ResponseInfoType, Any) throws -> Any) { get }
var erasedOutputEventHandler: ((ResponseInfoType, Any, Any) -> Void)? { get }
var completionEventHandler: ((Subscribers.Completion<Error>) -> Void)? { get }
var cancelEventHandler: (() -> Void)? { get }
@ -248,21 +316,60 @@ public protocol ErasedPreparedRequest {
}
extension HTTP.PreparedRequest: ErasedPreparedRequest {
public var erasedOutputEventHandler: (((ResponseInfoType, Any)) -> Void)? {
guard let outputEventHandler: (((ResponseInfoType, R)) -> Void) = self.outputEventHandler else {
public var erasedResponseConverter: ((ResponseInfoType, Any) throws -> Any) {
let originalType: Decodable.Type = self.originalType
let responseConverter: ((ResponseInfoType, Any) throws -> R) = self.responseConverter
return { info, data in
switch data {
case let erasedSubResponse as ErasedBatchSubResponse:
return HTTP.BatchSubResponse(
code: erasedSubResponse.code,
headers: erasedSubResponse.headers,
body: try erasedSubResponse.erasedBody
.map { originalType.from($0) }
.map { try responseConverter(info, $0) }
)
default: return try originalType.from(data).map { try responseConverter(info, $0) } as Any
}
}
}
public var erasedOutputEventHandler: ((ResponseInfoType, Any, Any) -> Void)? {
guard let outputEventHandler: ((CachedResponse) -> Void) = self.outputEventHandler else {
return nil
}
return { data in
guard let subResponse: HTTP.BatchSubResponse<R> = data.1 as? HTTP.BatchSubResponse<R> else {
guard let directResponse: R = data.1 as? R else { return }
outputEventHandler((data.0, directResponse))
return
let originalType: Decodable.Type = self.originalType
let originalConverter: ((ResponseInfoType, Any) throws -> R) = self.responseConverter
return { info, _, data in
switch data {
case let erasedSubResponse as ErasedBatchSubResponse:
guard
let erasedBody: Any = erasedSubResponse.erasedBody.map({ originalType.from($0) }),
let validResponse: R = try? originalConverter(info, erasedBody)
else { return }
outputEventHandler(CachedResponse(
info: info,
originalData: erasedSubResponse.erasedBody as Any,
convertedData: validResponse
))
default:
guard
let erasedBody: Any = originalType.from(data),
let validResponse: R = try? originalConverter(info, erasedBody)
else { return }
outputEventHandler(CachedResponse(
info: info,
originalData: erasedBody,
convertedData: validResponse
))
}
guard let value: R = subResponse.body else { return }
outputEventHandler((subResponse, value))
}
}
@ -271,13 +378,13 @@ extension HTTP.PreparedRequest: ErasedPreparedRequest {
}
public func encodeForBatchRequest(to encoder: Encoder) throws {
var container: KeyedEncodingContainer<HTTP.BatchRequest.Child.CodingKeys> = encoder.container(keyedBy: HTTP.BatchRequest.Child.CodingKeys.self)
switch batchRequestVariant {
case .unsupported:
SNLog("Attempted to encode unsupported request type \(endpointName) as a batch subrequest")
case .sogs:
var container: KeyedEncodingContainer<HTTP.BatchRequest.Child.CodingKeys> = encoder.container(keyedBy: HTTP.BatchRequest.Child.CodingKeys.self)
// Exclude request signature headers (not used for sub-requests)
let excludedSubRequestHeaders: [String] = excludedSubRequestHeaders.map { $0.lowercased() }
let batchRequestHeaders: [String: String] = (request.allHTTPHeaderFields ?? [:])
@ -289,13 +396,14 @@ extension HTTP.PreparedRequest: ErasedPreparedRequest {
try container.encode(method, forKey: .method)
try container.encode(path, forKey: .path)
try jsonBodyEncoder?(&container, .json)
try jsonKeyedBodyEncoder?(&container, .json)
try container.encodeIfPresent(b64, forKey: .b64)
try container.encodeIfPresent(bytes, forKey: .bytes)
case .storageServer:
try container.encode(method, forKey: .method)
try jsonBodyEncoder?(&container, .params)
var container: SingleValueEncodingContainer = encoder.singleValueContainer()
try jsonBodyEncoder?(&container)
}
}
}
@ -310,14 +418,14 @@ public extension HTTP.PreparedRequest {
) throws -> HTTP.PreparedRequest<R> {
return HTTP.PreparedRequest(
request: try requestSigner(db, self, dependencies),
server: server,
publicKey: publicKey,
target: target,
originalType: originalType,
responseType: responseType,
metadata: metadata,
retryCount: retryCount,
timeout: timeout,
cachedResponse: cachedResponse,
responseConverter: responseConverter,
subscriptionHandler: subscriptionHandler,
outputEventHandler: outputEventHandler,
completionEventHandler: completionEventHandler,
cancelEventHandler: cancelEventHandler,
@ -328,36 +436,60 @@ public extension HTTP.PreparedRequest {
batchEndpoints: batchEndpoints,
batchRequestVariant: batchRequestVariant,
batchResponseTypes: batchResponseTypes,
requireAllBatchResponses: requireAllBatchResponses,
excludedSubRequestHeaders: excludedSubRequestHeaders,
jsonKeyedBodyEncoder: jsonKeyedBodyEncoder,
jsonBodyEncoder: jsonBodyEncoder,
b64: b64,
bytes: bytes
)
}
/// Due to the way prepared requests work we need to cast between different types and as a result can't avoid potentially
/// throwing when mapping so the `map` function just calls through to the `tryMap` function, but we have both to make
/// the interface more consistent for dev use
func map<O>(transform: @escaping (ResponseInfoType, R) throws -> O) -> HTTP.PreparedRequest<O> {
let originalResponseConverter: ((ResponseInfoType, Any) throws -> R) = self.responseConverter
return tryMap(transform: transform)
}
func tryMap<O>(transform: @escaping (ResponseInfoType, R) throws -> O) -> HTTP.PreparedRequest<O> {
let originalConverter: ((ResponseInfoType, Any) throws -> R) = self.responseConverter
let responseConverter: ((ResponseInfoType, Any) throws -> O) = { info, response in
let validResponse: R = try originalResponseConverter(info, response)
let validResponse: R = try originalConverter(info, response)
return try transform(info, validResponse)
}
return HTTP.PreparedRequest<O>(
request: request,
server: server,
publicKey: publicKey,
target: target,
originalType: originalType,
responseType: O.self,
metadata: metadata,
retryCount: retryCount,
timeout: timeout,
cachedResponse: cachedResponse.map { data in
(try? responseConverter(data.info, data.convertedData))
.map { convertedData in
HTTP.PreparedRequest<O>.CachedResponse(
info: data.info,
originalData: data.originalData,
convertedData: convertedData
)
}
},
responseConverter: responseConverter,
subscriptionHandler: subscriptionHandler,
outputEventHandler: self.outputEventHandler.map { eventHandler in
{ data in
guard let validResponse: R = try? originalResponseConverter(data.0, data.1) else { return }
guard let validResponse: R = try? originalConverter(data.info, data.originalData) else {
return
}
eventHandler((data.0, validResponse))
eventHandler(CachedResponse(
info: data.info,
originalData: data.originalData,
convertedData: validResponse
))
}
},
completionEventHandler: completionEventHandler,
@ -369,7 +501,9 @@ public extension HTTP.PreparedRequest {
batchEndpoints: batchEndpoints,
batchRequestVariant: batchRequestVariant,
batchResponseTypes: batchResponseTypes,
requireAllBatchResponses: requireAllBatchResponses,
excludedSubRequestHeaders: excludedSubRequestHeaders,
jsonKeyedBodyEncoder: jsonKeyedBodyEncoder,
jsonBodyEncoder: jsonBodyEncoder,
b64: b64,
bytes: bytes
@ -377,19 +511,36 @@ public extension HTTP.PreparedRequest {
}
func handleEvents(
receiveSubscription: (() -> Void)? = nil,
receiveOutput: (((ResponseInfoType, R)) -> Void)? = nil,
receiveCompletion: ((Subscribers.Completion<Error>) -> Void)? = nil,
receiveCancel: (() -> Void)? = nil
) -> HTTP.PreparedRequest<R> {
let outputEventHandler: (((ResponseInfoType, R)) -> Void)? = {
switch (self.outputEventHandler, receiveOutput) {
let subscriptionHandler: (() -> Void)? = {
switch (self.subscriptionHandler, receiveSubscription) {
case (.none, .none): return nil
case (.some(let eventHandler), .none): return eventHandler
case (.none, .some(let eventHandler)): return eventHandler
case (.some(let originalEventHandler), .some(let eventHandler)):
return {
originalEventHandler()
eventHandler()
}
}
}()
let outputEventHandler: ((CachedResponse) -> Void)? = {
switch (self.outputEventHandler, receiveOutput) {
case (.none, .none): return nil
case (.some(let eventHandler), .none): return eventHandler
case (.none, .some(let eventHandler)):
return { data in
eventHandler((data.info, data.convertedData))
}
case (.some(let originalEventHandler), .some(let eventHandler)):
return { data in
originalEventHandler(data)
eventHandler(data)
eventHandler((data.info, data.convertedData))
}
}
}()
@ -420,14 +571,14 @@ public extension HTTP.PreparedRequest {
return HTTP.PreparedRequest(
request: request,
server: server,
publicKey: publicKey,
target: target,
originalType: originalType,
responseType: responseType,
metadata: metadata,
retryCount: retryCount,
timeout: timeout,
cachedResponse: cachedResponse,
responseConverter: responseConverter,
subscriptionHandler: subscriptionHandler,
outputEventHandler: outputEventHandler,
completionEventHandler: completionEventHandler,
cancelEventHandler: cancelEventHandler,
@ -438,7 +589,9 @@ public extension HTTP.PreparedRequest {
batchEndpoints: batchEndpoints,
batchRequestVariant: batchRequestVariant,
batchResponseTypes: batchResponseTypes,
requireAllBatchResponses: requireAllBatchResponses,
excludedSubRequestHeaders: excludedSubRequestHeaders,
jsonKeyedBodyEncoder: jsonKeyedBodyEncoder,
jsonBodyEncoder: jsonBodyEncoder,
b64: b64,
bytes: bytes
@ -446,9 +599,64 @@ public extension HTTP.PreparedRequest {
}
}
// MARK: - Response
public extension HTTP.PreparedRequest {
static func cached<E: EndpointType>(
_ cachedResponse: R,
endpoint: E
) -> HTTP.PreparedRequest<R> where R: Decodable {
return HTTP.PreparedRequest(
request: URLRequest(url: URL(fileURLWithPath: "")),
target: HTTP.ServerTarget(server: "", path: "", queryParameters: [:], x25519PublicKey: ""),
originalType: R.self,
responseType: R.self,
retryCount: 0,
timeout: 0,
cachedResponse: HTTP.PreparedRequest<R>.CachedResponse(
info: HTTP.ResponseInfo(code: 0, headers: [:]),
originalData: cachedResponse,
convertedData: cachedResponse
),
responseConverter: { _, _ in cachedResponse },
subscriptionHandler: nil,
outputEventHandler: nil,
completionEventHandler: nil,
cancelEventHandler: nil,
method: .get,
endpoint: endpoint,
endpointName: E.name,
path: "",
batchEndpoints: [],
batchRequestVariant: .unsupported,
batchResponseTypes: [],
requireAllBatchResponses: false,
excludedSubRequestHeaders: [],
jsonKeyedBodyEncoder: nil,
jsonBodyEncoder: nil,
b64: nil,
bytes: nil
)
}
}
// MARK: - HTTP.PreparedRequest<R>.CachedResponse
public extension Publisher where Failure == Error {
func eraseToAnyPublisher<R>() -> AnyPublisher<(ResponseInfoType, R), Error> where Output == HTTP.PreparedRequest<R>.CachedResponse {
return self
.map { ($0.info, $0.convertedData) }
.eraseToAnyPublisher()
}
}
// MARK: - Decoding
public extension Decodable {
fileprivate static func from(_ value: Any) -> Self? {
return (value as? Self)
}
static func decoded(from data: Data, using dependencies: Dependencies = Dependencies()) throws -> Self {
return try data.decoded(as: Self.self, using: dependencies)
}
@ -458,23 +666,31 @@ public extension Publisher where Output == (ResponseInfoType, Data?), Failure ==
func decoded<R>(
with preparedRequest: HTTP.PreparedRequest<R>,
using dependencies: Dependencies
) -> AnyPublisher<(ResponseInfoType, R), Error> {
) -> AnyPublisher<HTTP.PreparedRequest<R>.CachedResponse, Error> {
self
.tryMap { responseInfo, maybeData -> (ResponseInfoType, R) in
.tryMap { responseInfo, maybeData -> HTTP.PreparedRequest<R>.CachedResponse in
// Depending on the 'originalType' we need to process the response differently
let targetData: Any = try {
switch preparedRequest.originalType {
case let erasedBatchResponse as ErasedBatchResponseMap.Type:
let responses: HTTP.BatchResponse = try HTTP.BatchResponse.decodingResponses(
let response: HTTP.BatchResponse = try HTTP.BatchResponse.decodingResponses(
from: maybeData,
as: preparedRequest.batchResponseTypes,
requireAllResults: true,
requireAllResults: preparedRequest.requireAllBatchResponses,
using: dependencies
)
return try erasedBatchResponse.from(
batchEndpoints: preparedRequest.batchEndpoints,
responses: responses
response: response
)
case is HTTP.BatchResponse.Type:
return try HTTP.BatchResponse.decodingResponses(
from: maybeData,
as: preparedRequest.batchResponseTypes,
requireAllResults: preparedRequest.requireAllBatchResponses,
using: dependencies
)
case is NoResponse.Type: return NoResponse()
@ -494,9 +710,11 @@ public extension Publisher where Output == (ResponseInfoType, Data?), Failure ==
}()
// Generate and return the converted data
let convertedData: R = try preparedRequest.responseConverter(responseInfo, targetData)
return (responseInfo, convertedData)
return HTTP.PreparedRequest<R>.CachedResponse(
info: responseInfo,
originalData: targetData,
convertedData: try preparedRequest.responseConverter(responseInfo, targetData)
)
}
.eraseToAnyPublisher()
}

View File

@ -10,6 +10,7 @@ public typealias NoBody = Empty
public typealias NoResponse = Empty
public protocol EndpointType: Hashable {
static var name: String { get }
static var batchRequestVariant: HTTP.BatchRequest.Child.Variant { get }
static var excludedSubRequestHeaders: [HTTPHeader] { get }
@ -25,9 +26,8 @@ public extension EndpointType {
public struct Request<T: Encodable, Endpoint: EndpointType> {
public let method: HTTPMethod
public let server: String
public let target: any RequestTarget
public let endpoint: Endpoint
public let queryParameters: [HTTPQueryParam: String]
public let headers: [HTTPHeader: String]
/// This is the body value sent during the request
///
@ -39,27 +39,21 @@ public struct Request<T: Encodable, Endpoint: EndpointType> {
public init(
method: HTTPMethod = .get,
server: String,
endpoint: Endpoint,
queryParameters: [HTTPQueryParam: String] = [:],
target: any RequestTarget,
headers: [HTTPHeader: String] = [:],
body: T? = nil
) {
self.method = method
self.server = server
self.endpoint = endpoint
self.queryParameters = queryParameters
self.target = target
self.headers = headers
self.body = body
}
// MARK: - Internal Methods
private var url: URL? {
return URL(string: "\(server)\(urlPathAndParamsString)")
}
private func bodyData() throws -> Data? {
private func bodyData(using dependencies: Dependencies) throws -> Data? {
// Note: Need to differentiate between JSON, b64 string and bytes body values to ensure they are
// encoded correctly so the server knows how to handle them
switch body {
@ -78,34 +72,20 @@ public struct Request<T: Encodable, Endpoint: EndpointType> {
// Having no body is fine so just return nil
guard let body: T = body else { return nil }
return try JSONEncoder().encode(body)
return try JSONEncoder(using: dependencies).encode(body)
}
}
// MARK: - Request Generation
public var urlPathAndParamsString: String {
return [
"/\(endpoint.path)",
queryParameters
.map { key, value in "\(key)=\(value)" }
.joined(separator: "&")
]
.compactMap { $0 }
.filter { !$0.isEmpty }
.joined(separator: "?")
}
public func generateUrlRequest() throws -> URLRequest {
guard let url: URL = url else { throw HTTPError.invalidURL }
public func generateUrlRequest(using dependencies: Dependencies) throws -> URLRequest {
guard let url: URL = target.url else { throw HTTPError.invalidURL }
var urlRequest: URLRequest = URLRequest(url: url)
urlRequest.httpMethod = method.rawValue
urlRequest.allHTTPHeaderFields = headers.toHTTPHeaders()
urlRequest.httpBody = try bodyData()
urlRequest.httpBody = try bodyData(using: dependencies)
return urlRequest
}
}
extension Request: Equatable where T: Equatable {}

View File

@ -0,0 +1,82 @@
// Copyright © 2023 Rangeproof Pty Ltd. All rights reserved.
import Foundation
public protocol RequestTarget: Equatable {
var url: URL? { get }
var urlPathAndParamsString: String { get }
}
public protocol ServerRequestTarget: RequestTarget {
var server: String { get }
var x25519PublicKey: String { get }
}
public extension ServerRequestTarget {
func pathFor(path: String, queryParams: [HTTPQueryParam: String]) -> String {
return [
"/\(path)",
queryParams
.map { key, value in "\(key)=\(value)" }
.joined(separator: "&")
]
.compactMap { $0 }
.filter { !$0.isEmpty }
.joined(separator: "?")
}
}
// MARK: - ServerTarget
public extension HTTP {
struct ServerTarget: ServerRequestTarget {
public let server: String
let path: String
let queryParameters: [HTTPQueryParam: String]
public let x25519PublicKey: String
public var url: URL? { URL(string: "\(server)\(urlPathAndParamsString)") }
public var urlPathAndParamsString: String { pathFor(path: path, queryParams: queryParameters) }
// MARK: - Initialization
public init(
server: String,
path: String,
queryParameters: [HTTPQueryParam: String],
x25519PublicKey: String
) {
self.server = server
self.path = path
self.queryParameters = queryParameters
self.x25519PublicKey = x25519PublicKey
}
}
}
// MARK: Request - ServerTarget
public extension Request {
init(
method: HTTPMethod = .get,
server: String,
endpoint: Endpoint,
queryParameters: [HTTPQueryParam: String] = [:],
headers: [HTTPHeader: String] = [:],
x25519PublicKey: String,
body: T? = nil
) {
self = Request(
method: method,
endpoint: endpoint,
target: HTTP.ServerTarget(
server: server,
path: endpoint.path,
queryParameters: queryParameters,
x25519PublicKey: x25519PublicKey
),
headers: headers,
body: body
)
}
}

View File

@ -0,0 +1,19 @@
// Copyright © 2023 Rangeproof Pty Ltd. All rights reserved.
import Foundation
public extension JSONDecoder {
convenience init(using dependencies: Dependencies = Dependencies()) {
self.init()
self.userInfo = [ Dependencies.userInfoKey: dependencies ]
}
}
public extension Decoder {
var dependencies: Dependencies {
(
(self.userInfo[Dependencies.userInfoKey] as? Dependencies) ??
Dependencies()
)
}
}

View File

@ -3,6 +3,11 @@
import Foundation
public extension JSONEncoder {
convenience init(using dependencies: Dependencies = Dependencies()) {
self.init()
self.userInfo = [ Dependencies.userInfoKey: dependencies ]
}
func with(outputFormatting: JSONEncoder.OutputFormatting) -> JSONEncoder {
let result: JSONEncoder = self
result.outputFormatting = outputFormatting
@ -10,3 +15,12 @@ public extension JSONEncoder {
return result
}
}
public extension Encoder {
var dependencies: Dependencies {
(
(self.userInfo[Dependencies.userInfoKey] as? Dependencies) ??
Dependencies()
)
}
}

View File

@ -16,6 +16,7 @@ class BatchRequestSpec: QuickSpec {
enum TestEndpoint1: EndpointType {
case endpoint1
static var name: String { "TestEndpoint1" }
static var batchRequestVariant: HTTP.BatchRequest.Child.Variant { .sogs }
static var excludedSubRequestHeaders: [HTTPHeader] { [.testHeader] }
@ -25,6 +26,7 @@ class BatchRequestSpec: QuickSpec {
enum TestEndpoint2: EndpointType {
case endpoint2
static var name: String { "TestEndpoint2" }
static var batchRequestVariant: HTTP.BatchRequest.Child.Variant { .storageServer }
static var excludedSubRequestHeaders: [HTTPHeader] { [] }
@ -38,8 +40,19 @@ class BatchRequestSpec: QuickSpec {
// MARK: - Spec
override func spec() {
var dependencies: TestDependencies!
var request: HTTP.BatchRequest!
describe("a BatchRequest.Child") {
var request: HTTP.BatchRequest!
beforeEach {
dependencies = TestDependencies()
}
afterEach {
dependencies = nil
request = nil
}
// MARK: - when encoding
context("when encoding") {
@ -60,7 +73,7 @@ class BatchRequestSpec: QuickSpec {
requests: [
HTTP.PreparedRequest<NoResponse>(
request: httpRequest,
urlRequest: try! httpRequest.generateUrlRequest(),
urlRequest: try! httpRequest.generateUrlRequest(using: dependencies),
publicKey: "",
responseType: NoResponse.self,
timeout: 0
@ -94,7 +107,7 @@ class BatchRequestSpec: QuickSpec {
requests: [
HTTP.PreparedRequest<NoResponse>(
request: httpRequest,
urlRequest: try! httpRequest.generateUrlRequest(),
urlRequest: try! httpRequest.generateUrlRequest(using: dependencies),
publicKey: "",
responseType: NoResponse.self,
timeout: 0

View File

@ -16,6 +16,7 @@ class PreparedRequestSpec: QuickSpec {
enum TestEndpoint: EndpointType {
case endpoint
static var name: String { "TestEndpoint" }
static var batchRequestVariant: HTTP.BatchRequest.Child.Variant { .storageServer }
static var excludedSubRequestHeaders: [HTTPHeader] { [.testHeader] }
@ -29,9 +30,21 @@ class PreparedRequestSpec: QuickSpec {
// MARK: - Spec
override func spec() {
var dependencies: TestDependencies!
var urlRequest: URLRequest?
var request: Request<NoBody, TestEndpoint>!
describe("a PreparedRequest") {
var urlRequest: URLRequest?
var request: Request<NoBody, TestEndpoint>!
beforeEach {
dependencies = TestDependencies()
}
afterEach {
dependencies = nil
urlRequest = nil
request = nil
}
// MARK: - when generating a URLRequest
context("when generating a URLRequest") {
@ -48,7 +61,7 @@ class PreparedRequestSpec: QuickSpec {
],
body: nil
)
urlRequest = try? request.generateUrlRequest()
urlRequest = try? request.generateUrlRequest(using: dependencies)
expect(urlRequest?.url?.absoluteString).to(equal("testServer/endpoint"))
expect(urlRequest?.httpMethod).to(equal("POST"))
@ -70,7 +83,7 @@ class PreparedRequestSpec: QuickSpec {
],
body: nil
)
urlRequest = try? request.generateUrlRequest()
urlRequest = try? request.generateUrlRequest(using: dependencies)
expect(TestEndpoint.excludedSubRequestHeaders).to(equal([HTTPHeader.testHeader]))
expect(urlRequest?.allHTTPHeaderFields?.keys).to(contain([HTTPHeader.testHeader]))

View File

@ -66,13 +66,19 @@ extension HTTP.BatchSubResponse {
// MARK: - Encodable Convenience
extension Mocked where Self: Encodable {
func encoded() -> Data { try! JSONEncoder().with(outputFormatting: .sortedKeys).encode(self) }
func encoded(using dependencies: Dependencies) -> Data {
try! JSONEncoder(using: dependencies).with(outputFormatting: .sortedKeys).encode(self)
}
}
extension MockedGeneric where Self: Encodable {
func encoded() -> Data { try! JSONEncoder().with(outputFormatting: .sortedKeys).encode(self) }
func encoded(using dependencies: Dependencies) -> Data {
try! JSONEncoder(using: dependencies).with(outputFormatting: .sortedKeys).encode(self)
}
}
extension Array where Element: Encodable {
func encoded() -> Data { try! JSONEncoder().with(outputFormatting: .sortedKeys).encode(self) }
func encoded(using dependencies: Dependencies) -> Data {
try! JSONEncoder(using: dependencies).with(outputFormatting: .sortedKeys).encode(self)
}
}