session-ios/SessionMessagingKit/Sending & Receiving/Pollers/Poller.swift

157 lines
6.6 KiB
Swift
Raw Normal View History

// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
import Foundation
2020-11-11 00:58:56 +01:00
import PromiseKit
import Sodium
import SessionSnodeKit
2020-11-11 00:58:56 +01:00
@objc(LKPoller)
public final class Poller : NSObject {
private let storage = OWSPrimaryStorage.shared()
private var isPolling = false
private var usedSnodes = Set<Snode>()
2020-11-11 00:58:56 +01:00
private var pollCount = 0
// MARK: Settings
2021-05-03 07:13:18 +02:00
private static let pollInterval: TimeInterval = 1.5
2020-11-11 00:58:56 +01:00
private static let retryInterval: TimeInterval = 0.25
/// After polling a given snode this many times we always switch to a new one.
///
/// The reason for doing this is that sometimes a snode will be giving us successful responses while
/// it isn't actually getting messages from other snodes.
private static let maxPollCount: UInt = 6
// MARK: Error
private enum Error : LocalizedError {
case pollLimitReached
var localizedDescription: String {
switch self {
case .pollLimitReached: return "Poll limit reached for current snode."
}
}
}
// MARK: Public API
@objc public func startIfNeeded() {
guard !isPolling else { return }
2020-11-20 00:14:35 +01:00
SNLog("Started polling.")
2020-11-11 00:58:56 +01:00
isPolling = true
setUpPolling()
}
@objc public func stop() {
2020-11-20 00:14:35 +01:00
SNLog("Stopped polling.")
2020-11-11 00:58:56 +01:00
isPolling = false
usedSnodes.removeAll()
}
// MARK: Private API
private func setUpPolling() {
guard isPolling else { return }
Threading.pollerQueue.async {
let _ = SnodeAPI.getSwarm(for: getUserHexEncodedPublicKey()).then(on: Threading.pollerQueue) { [weak self] _ -> Promise<Void> in
guard let strongSelf = self else { return Promise { $0.fulfill(()) } }
strongSelf.usedSnodes.removeAll()
let (promise, seal) = Promise<Void>.pending()
strongSelf.pollNextSnode(seal: seal)
return promise
}.ensure(on: Threading.pollerQueue) { [weak self] in // Timers don't do well on background queues
guard let strongSelf = self, strongSelf.isPolling else { return }
Timer.scheduledTimerOnMainThread(withTimeInterval: Poller.retryInterval, repeats: false) { _ in
guard let strongSelf = self else { return }
strongSelf.setUpPolling()
}
2020-11-11 00:58:56 +01:00
}
}
2020-11-11 00:58:56 +01:00
}
private func pollNextSnode(seal: Resolver<Void>) {
let userPublicKey = getUserHexEncodedPublicKey()
let swarm = SnodeAPI.swarmCache[userPublicKey] ?? []
let unusedSnodes = swarm.subtracting(usedSnodes)
2020-11-11 00:58:56 +01:00
if !unusedSnodes.isEmpty {
// randomElement() uses the system's default random generator, which is cryptographically secure
let nextSnode = unusedSnodes.randomElement()!
usedSnodes.insert(nextSnode)
2022-02-23 02:47:44 +01:00
poll(nextSnode, seal: seal).done2 {
2020-11-11 00:58:56 +01:00
seal.fulfill(())
2022-02-23 02:47:44 +01:00
}.catch2 { [weak self] error in
2020-11-11 00:58:56 +01:00
if let error = error as? Error, error == .pollLimitReached {
self?.pollCount = 0
} else {
2020-11-20 00:14:35 +01:00
SNLog("Polling \(nextSnode) failed; dropping it and switching to next snode.")
2020-11-11 00:58:56 +01:00
SnodeAPI.dropSnodeFromSwarmIfNeeded(nextSnode, publicKey: userPublicKey)
}
2022-02-23 02:47:44 +01:00
Threading.pollerQueue.async {
self?.pollNextSnode(seal: seal)
}
2020-11-11 00:58:56 +01:00
}
} else {
seal.fulfill(())
}
}
private func poll(_ snode: Snode, seal longTermSeal: Resolver<Void>) -> Promise<Void> {
2020-11-11 00:58:56 +01:00
guard isPolling else { return Promise { $0.fulfill(()) } }
let userPublicKey = getUserHexEncodedPublicKey()
return SnodeAPI.getRawMessages(from: snode, associatedWith: userPublicKey)
.then(on: Threading.pollerQueue) { [weak self] rawResponse -> Promise<Void> in
guard let strongSelf = self, strongSelf.isPolling else { return Promise { $0.fulfill(()) } }
let messages: [SnodeReceivedMessage] = SnodeAPI.parseRawMessagesResponse(rawResponse, from: snode, associatedWith: userPublicKey)
if !messages.isEmpty {
SNLog("Received \(messages.count) new message(s).")
GRDBStorage.shared.write { db in
messages.forEach { message in
guard let envelope = SNProtoEnvelope.from(message) else { return }
// Extract the threadId and add that to the messageReceive job for
// multi-threading and garbage collection purposes
let threadId: String? = MessageReceiver.extractSenderPublicKey(db, from: envelope)
if threadId == nil {
}
do {
JobRunner.add(
db,
job: Job(
variant: .messageReceive,
behaviour: .runOnce,
threadId: threadId,
details: MessageReceiveJob.Details(
data: try envelope.serializedData(),
serverHash: message.info.hash,
isBackgroundPoll: false
)
)
)
// Persist the received message after the MessageReceiveJob is created
try message.info.save(db)
}
catch {
SNLog("Failed to deserialize envelope due to error: \(error).")
}
}
2020-11-12 06:02:21 +01:00
}
2020-11-11 00:58:56 +01:00
}
strongSelf.pollCount += 1
guard strongSelf.pollCount < Poller.maxPollCount else {
throw Error.pollLimitReached
}
return withDelay(Poller.pollInterval, completionQueue: Threading.pollerQueue) {
guard let strongSelf = self, strongSelf.isPolling else { return Promise { $0.fulfill(()) } }
return strongSelf.poll(snode, seal: longTermSeal)
}
}
2020-11-11 00:58:56 +01:00
}
}