2022-05-25 10:48:04 +02:00
|
|
|
// Copyright © 2022 Rangeproof Pty Ltd. All rights reserved.
|
|
|
|
|
|
|
|
import Foundation
|
|
|
|
import GRDB
|
2022-10-24 05:52:28 +02:00
|
|
|
import DifferenceKit
|
2022-05-25 10:48:04 +02:00
|
|
|
|
|
|
|
// MARK: - PagedDatabaseObserver
|
|
|
|
|
|
|
|
/// This type manages observation and paging for the provided dataQuery
|
|
|
|
///
|
|
|
|
/// **Note:** We **MUST** have accurate `filterSQL` and `orderSQL` values otherwise the indexing won't work
|
|
|
|
public class PagedDatabaseObserver<ObservedTable, T>: TransactionObserver where ObservedTable: TableRecord & ColumnExpressible & Identifiable, T: FetchableRecordWithRowId & Identifiable {
|
2023-07-07 05:20:32 +02:00
|
|
|
private let commitProcessingQueue: DispatchQueue = DispatchQueue(
|
|
|
|
label: "PagedDatabaseObserver.commitProcessingQueue",
|
|
|
|
qos: .userInitiated,
|
|
|
|
attributes: [] // Must be serial in order to avoid updates getting processed in the wrong order
|
|
|
|
)
|
|
|
|
|
2022-05-25 10:48:04 +02:00
|
|
|
// MARK: - Variables
|
|
|
|
|
|
|
|
private let pagedTableName: String
|
|
|
|
private let idColumnName: String
|
2022-06-24 10:29:45 +02:00
|
|
|
public var pageInfo: Atomic<PagedData.PageInfo>
|
2022-05-25 10:48:04 +02:00
|
|
|
|
2022-06-24 10:29:45 +02:00
|
|
|
private let observedTableChangeTypes: [String: PagedData.ObservedChanges]
|
2022-05-25 10:48:04 +02:00
|
|
|
private let allObservedTableNames: Set<String>
|
|
|
|
private let observedInserts: Set<String>
|
|
|
|
private let observedUpdateColumns: [String: Set<String>]
|
|
|
|
private let observedDeletes: Set<String>
|
|
|
|
|
|
|
|
private let joinSQL: SQL?
|
|
|
|
private let filterSQL: SQL
|
2022-06-24 10:29:45 +02:00
|
|
|
private let groupSQL: SQL?
|
2022-05-25 10:48:04 +02:00
|
|
|
private let orderSQL: SQL
|
2022-12-08 04:21:38 +01:00
|
|
|
private let dataQuery: ([Int64]) -> any FetchRequest<T>
|
2022-05-25 10:48:04 +02:00
|
|
|
private let associatedRecords: [ErasedAssociatedRecord]
|
|
|
|
|
|
|
|
private var dataCache: Atomic<DataCache<T>> = Atomic(DataCache())
|
|
|
|
private var isLoadingMoreData: Atomic<Bool> = Atomic(false)
|
|
|
|
private let changesInCommit: Atomic<Set<PagedData.TrackedChange>> = Atomic([])
|
|
|
|
private let onChangeUnsorted: (([T], PagedData.PageInfo) -> ())
|
|
|
|
|
|
|
|
// MARK: - Initialization
|
|
|
|
|
2022-05-28 09:25:38 +02:00
|
|
|
public init(
|
2022-05-25 10:48:04 +02:00
|
|
|
pagedTable: ObservedTable.Type,
|
|
|
|
pageSize: Int,
|
|
|
|
idColumn: ObservedTable.Columns,
|
|
|
|
observedChanges: [PagedData.ObservedChanges],
|
|
|
|
joinSQL: SQL? = nil,
|
|
|
|
filterSQL: SQL,
|
2022-06-24 10:29:45 +02:00
|
|
|
groupSQL: SQL? = nil,
|
2022-05-25 10:48:04 +02:00
|
|
|
orderSQL: SQL,
|
2022-12-08 04:21:38 +01:00
|
|
|
dataQuery: @escaping ([Int64]) -> any FetchRequest<T>,
|
2022-05-25 10:48:04 +02:00
|
|
|
associatedRecords: [ErasedAssociatedRecord] = [],
|
2022-05-28 09:25:38 +02:00
|
|
|
onChangeUnsorted: @escaping ([T], PagedData.PageInfo) -> ()
|
2022-05-25 10:48:04 +02:00
|
|
|
) {
|
|
|
|
let associatedTables: Set<String> = associatedRecords.map { $0.databaseTableName }.asSet()
|
|
|
|
assert(!associatedTables.contains(pagedTable.databaseTableName), "The paged table cannot also exist as an associatedRecord")
|
|
|
|
|
|
|
|
self.pagedTableName = pagedTable.databaseTableName
|
|
|
|
self.idColumnName = idColumn.name
|
|
|
|
self.pageInfo = Atomic(PagedData.PageInfo(pageSize: pageSize))
|
|
|
|
self.joinSQL = joinSQL
|
|
|
|
self.filterSQL = filterSQL
|
2022-06-24 10:29:45 +02:00
|
|
|
self.groupSQL = groupSQL
|
2022-05-25 10:48:04 +02:00
|
|
|
self.orderSQL = orderSQL
|
|
|
|
self.dataQuery = dataQuery
|
|
|
|
self.associatedRecords = associatedRecords
|
2022-06-28 09:53:03 +02:00
|
|
|
.map { $0.settingPagedTableName(pagedTableName: pagedTable.databaseTableName) }
|
2022-05-25 10:48:04 +02:00
|
|
|
self.onChangeUnsorted = onChangeUnsorted
|
|
|
|
|
|
|
|
// Combine the various observed changes into a single set
|
2022-06-24 10:29:45 +02:00
|
|
|
self.observedTableChangeTypes = observedChanges
|
|
|
|
.reduce(into: [:]) { result, next in result[next.databaseTableName] = next }
|
2022-05-25 10:48:04 +02:00
|
|
|
let allObservedChanges: [PagedData.ObservedChanges] = observedChanges
|
|
|
|
.appending(contentsOf: associatedRecords.flatMap { $0.observedChanges })
|
|
|
|
self.allObservedTableNames = allObservedChanges
|
|
|
|
.map { $0.databaseTableName }
|
|
|
|
.asSet()
|
|
|
|
self.observedInserts = allObservedChanges
|
|
|
|
.filter { $0.events.contains(.insert) }
|
|
|
|
.map { $0.databaseTableName }
|
|
|
|
.asSet()
|
|
|
|
self.observedUpdateColumns = allObservedChanges
|
|
|
|
.filter { $0.events.contains(.update) }
|
|
|
|
.reduce(into: [:]) { (prev: inout [String: Set<String>], next: PagedData.ObservedChanges) in
|
|
|
|
guard !next.columns.isEmpty else { return }
|
|
|
|
|
|
|
|
prev[next.databaseTableName] = next.columns.asSet()
|
|
|
|
}
|
|
|
|
self.observedDeletes = allObservedChanges
|
|
|
|
.filter { $0.events.contains(.delete) }
|
|
|
|
.map { $0.databaseTableName }
|
|
|
|
.asSet()
|
|
|
|
}
|
|
|
|
|
|
|
|
// MARK: - TransactionObserver
|
|
|
|
|
|
|
|
public func observes(eventsOfKind eventKind: DatabaseEventKind) -> Bool {
|
|
|
|
switch eventKind {
|
|
|
|
case .insert(let tableName): return self.observedInserts.contains(tableName)
|
|
|
|
case .delete(let tableName): return self.observedDeletes.contains(tableName)
|
|
|
|
|
|
|
|
case .update(let tableName, let columnNames):
|
|
|
|
return (self.observedUpdateColumns[tableName]?
|
|
|
|
.intersection(columnNames)
|
|
|
|
.isEmpty == false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
public func databaseDidChange(with event: DatabaseEvent) {
|
|
|
|
// This will get called whenever the `observes(eventsOfKind:)` returns
|
|
|
|
// true and will include all changes which occurred in the commit so we
|
|
|
|
// need to ignore any non-observed tables, unfortunately we also won't
|
|
|
|
// know if the changes to observed tables are actually relevant yet as
|
|
|
|
// changes only include table and column info at this stage
|
|
|
|
guard allObservedTableNames.contains(event.tableName) else { return }
|
|
|
|
|
2022-08-25 04:55:41 +02:00
|
|
|
// When generating the tracked change we need to check if the change was
|
|
|
|
// a deletion to a related table (if so then once the change is performed
|
|
|
|
// there won't be a way to associated the deleted related record to the
|
|
|
|
// original so we need to retrieve the association in here)
|
|
|
|
let trackedChange: PagedData.TrackedChange = {
|
|
|
|
guard
|
|
|
|
event.tableName != pagedTableName,
|
|
|
|
event.kind == .delete,
|
|
|
|
let observedChange: PagedData.ObservedChanges = observedTableChangeTypes[event.tableName],
|
|
|
|
let joinToPagedType: SQL = observedChange.joinToPagedType
|
|
|
|
else { return PagedData.TrackedChange(event: event) }
|
|
|
|
|
|
|
|
// Retrieve the pagedRowId for the related value that is
|
|
|
|
// getting deleted
|
|
|
|
let pagedRowIds: [Int64] = Storage.shared
|
|
|
|
.read { db in
|
|
|
|
PagedData.pagedRowIdsForRelatedRowIds(
|
|
|
|
db,
|
|
|
|
tableName: event.tableName,
|
|
|
|
pagedTableName: pagedTableName,
|
|
|
|
relatedRowIds: [event.rowID],
|
|
|
|
joinToPagedType: joinToPagedType
|
|
|
|
)
|
|
|
|
}
|
|
|
|
.defaulting(to: [])
|
|
|
|
|
|
|
|
return PagedData.TrackedChange(event: event, pagedRowIdsForRelatedDeletion: pagedRowIds)
|
|
|
|
}()
|
|
|
|
|
2022-05-25 10:48:04 +02:00
|
|
|
// The 'event' object only exists during this method so we need to copy the info
|
|
|
|
// from it, otherwise it will cease to exist after this metod call finishes
|
2022-08-25 04:55:41 +02:00
|
|
|
changesInCommit.mutate { $0.insert(trackedChange) }
|
2022-05-25 10:48:04 +02:00
|
|
|
}
|
|
|
|
|
2023-07-07 05:20:32 +02:00
|
|
|
/// We will process all updates which come through this method even if 'onChange' is null because if the UI stops observing and then starts
|
|
|
|
/// again later we don't want to have missed any changes which happened while the UI wasn't subscribed (and doing a full re-query seems painful...)
|
|
|
|
///
|
|
|
|
/// **Note:** This function is generally called within the DBWrite thread but we don't actually need write access to process the commit, in order
|
|
|
|
/// to avoid blocking the DBWrite thread we dispatch to a serial `commitProcessingQueue` to process the incoming changes (in the past not doing
|
|
|
|
/// so was resulting in hanging when there was a lot of activity happening)
|
2022-05-25 10:48:04 +02:00
|
|
|
public func databaseDidCommit(_ db: Database) {
|
2023-07-10 09:56:58 +02:00
|
|
|
// If there were no pending changes in the commit then do nothing
|
|
|
|
guard !self.changesInCommit.wrappedValue.isEmpty else { return }
|
|
|
|
|
2023-07-07 05:20:32 +02:00
|
|
|
// Since we can't be sure the behaviours of 'databaseDidChange' and 'databaseDidCommit' won't change in
|
|
|
|
// the future we extract and clear the values in 'changesInCommit' since it's 'Atomic<T>' so will different
|
|
|
|
// threads modifying the data resulting in us missing a change
|
2022-05-25 10:48:04 +02:00
|
|
|
var committedChanges: Set<PagedData.TrackedChange> = []
|
2023-07-07 05:20:32 +02:00
|
|
|
|
2022-05-25 10:48:04 +02:00
|
|
|
self.changesInCommit.mutate { cachedChanges in
|
|
|
|
committedChanges = cachedChanges
|
|
|
|
cachedChanges.removeAll()
|
|
|
|
}
|
|
|
|
|
2023-07-07 05:20:32 +02:00
|
|
|
commitProcessingQueue.async { [weak self] in
|
|
|
|
self?.processDatabaseCommit(committedChanges: committedChanges)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private func processDatabaseCommit(committedChanges: Set<PagedData.TrackedChange>) {
|
|
|
|
typealias AssociatedDataInfo = [(hasChanges: Bool, data: ErasedAssociatedRecord)]
|
|
|
|
typealias UpdatedData = (cache: DataCache<T>, pageInfo: PagedData.PageInfo, hasChanges: Bool, associatedData: AssociatedDataInfo)
|
|
|
|
|
|
|
|
// Store the instance variables locally to avoid unwrapping
|
|
|
|
let dataCache: DataCache<T> = self.dataCache.wrappedValue
|
|
|
|
let pageInfo: PagedData.PageInfo = self.pageInfo.wrappedValue
|
2022-06-24 10:29:45 +02:00
|
|
|
let joinSQL: SQL? = self.joinSQL
|
2022-05-25 10:48:04 +02:00
|
|
|
let orderSQL: SQL = self.orderSQL
|
|
|
|
let filterSQL: SQL = self.filterSQL
|
|
|
|
let associatedRecords: [ErasedAssociatedRecord] = self.associatedRecords
|
2023-07-07 05:20:32 +02:00
|
|
|
let getAssociatedDataInfo: (Database, PagedData.PageInfo) -> AssociatedDataInfo = { db, updatedPageInfo in
|
|
|
|
associatedRecords.map { associatedRecord in
|
|
|
|
let hasChanges: Bool = associatedRecord.tryUpdateForDatabaseCommit(
|
|
|
|
db,
|
|
|
|
changes: committedChanges,
|
|
|
|
joinSQL: joinSQL,
|
|
|
|
orderSQL: orderSQL,
|
|
|
|
filterSQL: filterSQL,
|
|
|
|
pageInfo: updatedPageInfo
|
|
|
|
)
|
2022-05-25 10:48:04 +02:00
|
|
|
|
2023-07-07 05:20:32 +02:00
|
|
|
return (hasChanges, associatedRecord)
|
2022-05-25 10:48:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-07 05:20:32 +02:00
|
|
|
// Determine if there were any direct or related data changes
|
2022-06-24 10:29:45 +02:00
|
|
|
let directChanges: Set<PagedData.TrackedChange> = committedChanges
|
2022-05-25 10:48:04 +02:00
|
|
|
.filter { $0.tableName == pagedTableName }
|
2022-06-24 10:29:45 +02:00
|
|
|
let relatedChanges: [String: [PagedData.TrackedChange]] = committedChanges
|
|
|
|
.filter { $0.tableName != pagedTableName }
|
2022-08-25 04:55:41 +02:00
|
|
|
.filter { $0.kind != .delete }
|
2022-06-24 10:29:45 +02:00
|
|
|
.reduce(into: [:]) { result, next in
|
|
|
|
guard observedTableChangeTypes[next.tableName] != nil else { return }
|
|
|
|
|
|
|
|
result[next.tableName] = (result[next.tableName] ?? []).appending(next)
|
|
|
|
}
|
2022-08-25 04:55:41 +02:00
|
|
|
let relatedDeletions: [PagedData.TrackedChange] = committedChanges
|
|
|
|
.filter { $0.tableName != pagedTableName }
|
|
|
|
.filter { $0.kind == .delete }
|
2022-05-25 10:48:04 +02:00
|
|
|
|
2023-07-07 05:20:32 +02:00
|
|
|
// Process and retrieve the updated data
|
|
|
|
let updatedData: UpdatedData = Storage.shared
|
|
|
|
.read { db -> UpdatedData in
|
|
|
|
// If there aren't any direct or related changes then early-out
|
|
|
|
guard !directChanges.isEmpty || !relatedChanges.isEmpty || !relatedDeletions.isEmpty else {
|
|
|
|
return (dataCache, pageInfo, false, getAssociatedDataInfo(db, pageInfo))
|
|
|
|
}
|
2022-05-25 10:48:04 +02:00
|
|
|
|
2023-07-07 05:20:32 +02:00
|
|
|
// Store a mutable copies of the dataCache and pageInfo for updating
|
|
|
|
var updatedDataCache: DataCache<T> = dataCache
|
|
|
|
var updatedPageInfo: PagedData.PageInfo = pageInfo
|
|
|
|
let deletionChanges: [Int64] = directChanges
|
|
|
|
.filter { $0.kind == .delete }
|
|
|
|
.map { $0.rowId }
|
|
|
|
let oldDataCount: Int = dataCache.count
|
|
|
|
|
|
|
|
// First remove any items which have been deleted
|
|
|
|
if !deletionChanges.isEmpty {
|
|
|
|
updatedDataCache = updatedDataCache.deleting(rowIds: deletionChanges)
|
2022-06-24 10:29:45 +02:00
|
|
|
|
2023-07-07 05:20:32 +02:00
|
|
|
// Make sure there were actually changes
|
|
|
|
if updatedDataCache.count != oldDataCount {
|
|
|
|
let dataSizeDiff: Int = (updatedDataCache.count - oldDataCount)
|
|
|
|
|
|
|
|
updatedPageInfo = PagedData.PageInfo(
|
|
|
|
pageSize: updatedPageInfo.pageSize,
|
|
|
|
pageOffset: updatedPageInfo.pageOffset,
|
|
|
|
currentCount: (updatedPageInfo.currentCount + dataSizeDiff),
|
|
|
|
totalCount: (updatedPageInfo.totalCount + dataSizeDiff)
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there are no inserted/updated rows then trigger then early-out
|
|
|
|
let changesToQuery: [PagedData.TrackedChange] = directChanges
|
|
|
|
.filter { $0.kind != .delete }
|
|
|
|
|
|
|
|
guard !changesToQuery.isEmpty || !relatedChanges.isEmpty || !relatedDeletions.isEmpty else {
|
|
|
|
let associatedData: AssociatedDataInfo = getAssociatedDataInfo(db, updatedPageInfo)
|
|
|
|
return (updatedDataCache, updatedPageInfo, !deletionChanges.isEmpty, associatedData)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next we need to determine if any related changes were associated to the pagedData we are
|
|
|
|
// observing, if they aren't (and there were no other direct changes) we can early-out
|
|
|
|
let pagedRowIdsForRelatedChanges: Set<Int64> = {
|
|
|
|
guard !relatedChanges.isEmpty else { return [] }
|
2022-06-24 10:29:45 +02:00
|
|
|
|
2023-07-07 05:20:32 +02:00
|
|
|
return relatedChanges
|
|
|
|
.reduce(into: []) { result, next in
|
|
|
|
guard
|
|
|
|
let observedChange: PagedData.ObservedChanges = observedTableChangeTypes[next.key],
|
|
|
|
let joinToPagedType: SQL = observedChange.joinToPagedType
|
|
|
|
else { return }
|
|
|
|
|
|
|
|
let pagedRowIds: [Int64] = PagedData.pagedRowIdsForRelatedRowIds(
|
|
|
|
db,
|
|
|
|
tableName: next.key,
|
|
|
|
pagedTableName: pagedTableName,
|
|
|
|
relatedRowIds: Array(next.value.map { $0.rowId }.asSet()),
|
|
|
|
joinToPagedType: joinToPagedType
|
|
|
|
)
|
|
|
|
|
|
|
|
result.append(contentsOf: pagedRowIds)
|
|
|
|
}
|
|
|
|
.asSet()
|
|
|
|
}()
|
|
|
|
|
|
|
|
guard !changesToQuery.isEmpty || !pagedRowIdsForRelatedChanges.isEmpty || !relatedDeletions.isEmpty else {
|
|
|
|
let associatedData: AssociatedDataInfo = getAssociatedDataInfo(db, updatedPageInfo)
|
|
|
|
return (updatedDataCache, updatedPageInfo, !deletionChanges.isEmpty, associatedData)
|
2022-06-24 10:29:45 +02:00
|
|
|
}
|
2023-07-07 05:20:32 +02:00
|
|
|
|
|
|
|
// Fetch the indexes of the rowIds so we can determine whether they should be added to the screen
|
|
|
|
let directRowIds: Set<Int64> = changesToQuery.map { $0.rowId }.asSet()
|
|
|
|
let pagedRowIdsForRelatedDeletions: Set<Int64> = relatedDeletions
|
|
|
|
.compactMap { $0.pagedRowIdsForRelatedDeletion }
|
|
|
|
.flatMap { $0 }
|
|
|
|
.asSet()
|
|
|
|
let itemIndexes: [PagedData.RowIndexInfo] = PagedData.indexes(
|
|
|
|
db,
|
|
|
|
rowIds: Array(directRowIds),
|
|
|
|
tableName: pagedTableName,
|
|
|
|
requiredJoinSQL: joinSQL,
|
|
|
|
orderSQL: orderSQL,
|
|
|
|
filterSQL: filterSQL
|
2022-06-24 10:29:45 +02:00
|
|
|
)
|
2023-07-07 05:20:32 +02:00
|
|
|
let relatedChangeIndexes: [PagedData.RowIndexInfo] = PagedData.indexes(
|
|
|
|
db,
|
|
|
|
rowIds: Array(pagedRowIdsForRelatedChanges),
|
|
|
|
tableName: pagedTableName,
|
|
|
|
requiredJoinSQL: joinSQL,
|
|
|
|
orderSQL: orderSQL,
|
|
|
|
filterSQL: filterSQL
|
|
|
|
)
|
|
|
|
let relatedDeletionIndexes: [PagedData.RowIndexInfo] = PagedData.indexes(
|
|
|
|
db,
|
|
|
|
rowIds: Array(pagedRowIdsForRelatedDeletions),
|
|
|
|
tableName: pagedTableName,
|
|
|
|
requiredJoinSQL: joinSQL,
|
|
|
|
orderSQL: orderSQL,
|
|
|
|
filterSQL: filterSQL
|
|
|
|
)
|
|
|
|
|
|
|
|
// Determine if the indexes for the row ids should be displayed on the screen and remove any
|
|
|
|
// which shouldn't - values less than 'currentCount' or if there is at least one value less than
|
|
|
|
// 'currentCount' and the indexes are sequential (ie. more than the current loaded content was
|
|
|
|
// added at once)
|
|
|
|
func determineValidChanges(for indexInfo: [PagedData.RowIndexInfo]) -> [Int64] {
|
|
|
|
let indexes: [Int64] = Array(indexInfo
|
|
|
|
.map { $0.rowIndex }
|
|
|
|
.sorted()
|
|
|
|
.asSet())
|
|
|
|
let indexesAreSequential: Bool = (indexes.map { $0 - 1 }.dropFirst() == indexes.dropLast())
|
|
|
|
let hasOneValidIndex: Bool = indexInfo.contains(where: { info -> Bool in
|
2022-06-29 10:10:10 +02:00
|
|
|
info.rowIndex >= updatedPageInfo.pageOffset && (
|
2022-07-26 03:36:32 +02:00
|
|
|
info.rowIndex < updatedPageInfo.currentCount || (
|
|
|
|
updatedPageInfo.currentCount < updatedPageInfo.pageSize &&
|
|
|
|
info.rowIndex <= (updatedPageInfo.pageOffset + updatedPageInfo.pageSize)
|
|
|
|
)
|
2022-06-24 10:29:45 +02:00
|
|
|
)
|
2023-07-07 05:20:32 +02:00
|
|
|
})
|
|
|
|
|
|
|
|
return (indexesAreSequential && hasOneValidIndex ?
|
|
|
|
indexInfo.map { $0.rowId } :
|
|
|
|
indexInfo
|
|
|
|
.filter { info -> Bool in
|
|
|
|
info.rowIndex >= updatedPageInfo.pageOffset && (
|
|
|
|
info.rowIndex < updatedPageInfo.currentCount || (
|
|
|
|
updatedPageInfo.currentCount < updatedPageInfo.pageSize &&
|
|
|
|
info.rowIndex <= (updatedPageInfo.pageOffset + updatedPageInfo.pageSize)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
}
|
|
|
|
.map { info -> Int64 in info.rowId }
|
|
|
|
)
|
|
|
|
}
|
|
|
|
let validChangeRowIds: [Int64] = determineValidChanges(for: itemIndexes)
|
|
|
|
let validRelatedChangeRowIds: [Int64] = determineValidChanges(for: relatedChangeIndexes)
|
|
|
|
let validRelatedDeletionRowIds: [Int64] = determineValidChanges(for: relatedDeletionIndexes)
|
|
|
|
let countBefore: Int = itemIndexes.filter { $0.rowIndex < updatedPageInfo.pageOffset }.count
|
|
|
|
|
|
|
|
// If the number of indexes doesn't match the number of rowIds then it means something changed
|
|
|
|
// resulting in an item being filtered out
|
|
|
|
func performRemovalsIfNeeded(for rowIds: Set<Int64>, indexes: [PagedData.RowIndexInfo]) {
|
|
|
|
let uniqueIndexes: Set<Int64> = indexes.map { $0.rowId }.asSet()
|
|
|
|
|
|
|
|
// If they have the same count then nothin was filtered out so do nothing
|
|
|
|
guard rowIds.count != uniqueIndexes.count else { return }
|
|
|
|
|
|
|
|
// Otherwise something was probably removed so try to remove it from the cache
|
|
|
|
let rowIdsRemoved: Set<Int64> = rowIds.subtracting(uniqueIndexes)
|
|
|
|
let preDeletionCount: Int = updatedDataCache.count
|
|
|
|
updatedDataCache = updatedDataCache.deleting(rowIds: Array(rowIdsRemoved))
|
|
|
|
|
|
|
|
// Lastly make sure there were actually changes before updating the page info
|
|
|
|
guard updatedDataCache.count != preDeletionCount else { return }
|
|
|
|
|
|
|
|
let dataSizeDiff: Int = (updatedDataCache.count - preDeletionCount)
|
|
|
|
|
|
|
|
updatedPageInfo = PagedData.PageInfo(
|
|
|
|
pageSize: updatedPageInfo.pageSize,
|
|
|
|
pageOffset: updatedPageInfo.pageOffset,
|
|
|
|
currentCount: (updatedPageInfo.currentCount + dataSizeDiff),
|
|
|
|
totalCount: (updatedPageInfo.totalCount + dataSizeDiff)
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Actually perform any required removals
|
|
|
|
performRemovalsIfNeeded(for: directRowIds, indexes: itemIndexes)
|
|
|
|
performRemovalsIfNeeded(for: pagedRowIdsForRelatedChanges, indexes: relatedChangeIndexes)
|
|
|
|
performRemovalsIfNeeded(for: pagedRowIdsForRelatedDeletions, indexes: relatedDeletionIndexes)
|
|
|
|
|
|
|
|
// Update the offset and totalCount even if the rows are outside of the current page (need to
|
|
|
|
// in order to ensure the 'load more' sections are accurate)
|
|
|
|
updatedPageInfo = PagedData.PageInfo(
|
|
|
|
pageSize: updatedPageInfo.pageSize,
|
|
|
|
pageOffset: (updatedPageInfo.pageOffset + countBefore),
|
|
|
|
currentCount: updatedPageInfo.currentCount,
|
|
|
|
totalCount: (
|
|
|
|
updatedPageInfo.totalCount +
|
|
|
|
changesToQuery
|
|
|
|
.filter { $0.kind == .insert }
|
|
|
|
.filter { validChangeRowIds.contains($0.rowId) }
|
|
|
|
.count
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
// If there are no valid row ids then early-out (at this point the pageInfo would have changed
|
|
|
|
// so we want to flat 'hasChanges' as true)
|
|
|
|
guard !validChangeRowIds.isEmpty || !validRelatedChangeRowIds.isEmpty || !validRelatedDeletionRowIds.isEmpty else {
|
|
|
|
let associatedData: AssociatedDataInfo = getAssociatedDataInfo(db, updatedPageInfo)
|
|
|
|
return (updatedDataCache, updatedPageInfo, true, associatedData)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the inserted/updated rows
|
|
|
|
let targetRowIds: [Int64] = Array((validChangeRowIds + validRelatedChangeRowIds + validRelatedDeletionRowIds).asSet())
|
|
|
|
let updatedItems: [T] = {
|
|
|
|
do { return try dataQuery(targetRowIds).fetchAll(db) }
|
|
|
|
catch {
|
|
|
|
SNLog("[PagedDatabaseObserver] Error fetching data during change: \(error)")
|
|
|
|
return []
|
2022-06-24 10:29:45 +02:00
|
|
|
}
|
2023-07-07 05:20:32 +02:00
|
|
|
}()
|
|
|
|
|
|
|
|
updatedDataCache = updatedDataCache.upserting(items: updatedItems)
|
|
|
|
|
|
|
|
// Update the currentCount for the upserted data
|
|
|
|
let dataSizeDiff: Int = (updatedDataCache.count - oldDataCount)
|
|
|
|
updatedPageInfo = PagedData.PageInfo(
|
|
|
|
pageSize: updatedPageInfo.pageSize,
|
|
|
|
pageOffset: updatedPageInfo.pageOffset,
|
|
|
|
currentCount: (updatedPageInfo.currentCount + dataSizeDiff),
|
|
|
|
totalCount: updatedPageInfo.totalCount
|
|
|
|
)
|
|
|
|
|
|
|
|
// Return the final updated data
|
|
|
|
let associatedData: AssociatedDataInfo = getAssociatedDataInfo(db, updatedPageInfo)
|
|
|
|
return (updatedDataCache, updatedPageInfo, true, associatedData)
|
|
|
|
}
|
|
|
|
.defaulting(to: (cache: dataCache, pageInfo: pageInfo, hasChanges: false, associatedData: []))
|
|
|
|
|
|
|
|
// Now that we have all of the changes, check if there were actually any changes
|
|
|
|
guard updatedData.hasChanges || updatedData.associatedData.contains(where: { hasChanges, _ in hasChanges }) else {
|
|
|
|
return
|
2022-06-24 10:29:45 +02:00
|
|
|
}
|
2022-05-29 11:26:06 +02:00
|
|
|
|
2023-07-07 05:20:32 +02:00
|
|
|
// If the associated data changed then update the updatedCachedData with the updated associated data
|
|
|
|
var finalUpdatedDataCache: DataCache<T> = updatedData.cache
|
2022-08-26 06:09:41 +02:00
|
|
|
|
2023-07-07 05:20:32 +02:00
|
|
|
updatedData.associatedData.forEach { hasChanges, associatedData in
|
|
|
|
guard updatedData.hasChanges || hasChanges else { return }
|
2022-08-26 06:09:41 +02:00
|
|
|
|
2023-07-07 05:20:32 +02:00
|
|
|
finalUpdatedDataCache = associatedData.updateAssociatedData(to: finalUpdatedDataCache)
|
2022-08-26 06:09:41 +02:00
|
|
|
}
|
2022-05-25 10:48:04 +02:00
|
|
|
|
2023-07-07 05:20:32 +02:00
|
|
|
// Update the cache, pageInfo and the change callback
|
|
|
|
self.dataCache.mutate { $0 = finalUpdatedDataCache }
|
|
|
|
self.pageInfo.mutate { $0 = updatedData.pageInfo }
|
2022-05-25 10:48:04 +02:00
|
|
|
|
2023-07-07 05:20:32 +02:00
|
|
|
// Trigger the unsorted change callback (the actual UI update triggering should eventually be run on
|
|
|
|
// the main thread via the `PagedData.processAndTriggerUpdates` function)
|
|
|
|
self.onChangeUnsorted(finalUpdatedDataCache.values, updatedData.pageInfo)
|
2022-05-25 10:48:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public func databaseDidRollback(_ db: Database) {}
|
|
|
|
|
|
|
|
// MARK: - Functions
|
|
|
|
|
|
|
|
fileprivate func load(_ target: PagedData.PageInfo.InternalTarget) {
|
|
|
|
// Only allow a single page load at a time
|
|
|
|
guard !self.isLoadingMoreData.wrappedValue else { return }
|
|
|
|
|
|
|
|
// Prevent more fetching until we have completed adding the page
|
|
|
|
self.isLoadingMoreData.mutate { $0 = true }
|
|
|
|
|
|
|
|
let currentPageInfo: PagedData.PageInfo = self.pageInfo.wrappedValue
|
|
|
|
|
|
|
|
if case .initialPageAround(_) = target, currentPageInfo.currentCount > 0 {
|
|
|
|
SNLog("Unable to load initialPageAround if there is already data")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store locally to avoid giant capture code
|
|
|
|
let pagedTableName: String = self.pagedTableName
|
|
|
|
let idColumnName: String = self.idColumnName
|
|
|
|
let joinSQL: SQL? = self.joinSQL
|
|
|
|
let filterSQL: SQL = self.filterSQL
|
2022-06-24 10:29:45 +02:00
|
|
|
let groupSQL: SQL? = self.groupSQL
|
2022-05-25 10:48:04 +02:00
|
|
|
let orderSQL: SQL = self.orderSQL
|
2022-12-08 04:21:38 +01:00
|
|
|
let dataQuery: ([Int64]) -> any FetchRequest<T> = self.dataQuery
|
2022-05-25 10:48:04 +02:00
|
|
|
|
2022-08-19 08:58:47 +02:00
|
|
|
let loadedPage: (data: [T]?, pageInfo: PagedData.PageInfo, failureCallback: (() -> ())?)? = Storage.shared.read { [weak self] db in
|
|
|
|
typealias QueryInfo = (limit: Int, offset: Int, updatedCacheOffset: Int)
|
2022-06-24 10:29:45 +02:00
|
|
|
let totalCount: Int = PagedData.totalCount(
|
|
|
|
db,
|
|
|
|
tableName: pagedTableName,
|
|
|
|
requiredJoinSQL: joinSQL,
|
|
|
|
filterSQL: filterSQL
|
|
|
|
)
|
|
|
|
|
2022-08-19 08:58:47 +02:00
|
|
|
let (queryInfo, callback): (QueryInfo?, (() -> ())?) = {
|
2022-05-25 10:48:04 +02:00
|
|
|
switch target {
|
|
|
|
case .initialPageAround(let targetId):
|
|
|
|
// If we want to focus on a specific item then we need to find it's index in
|
|
|
|
// the queried data
|
|
|
|
let maybeIndex: Int? = PagedData.index(
|
|
|
|
db,
|
|
|
|
for: targetId,
|
|
|
|
tableName: pagedTableName,
|
|
|
|
idColumn: idColumnName,
|
|
|
|
requiredJoinSQL: joinSQL,
|
|
|
|
orderSQL: orderSQL,
|
|
|
|
filterSQL: filterSQL
|
|
|
|
)
|
|
|
|
|
|
|
|
// If we couldn't find the targetId then just load the first page
|
|
|
|
guard let targetIndex: Int = maybeIndex else {
|
2022-08-19 08:58:47 +02:00
|
|
|
return ((currentPageInfo.pageSize, 0, 0), nil)
|
2022-05-25 10:48:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
let updatedOffset: Int = {
|
|
|
|
// If the focused item is within the first or last half of the page
|
|
|
|
// then we still want to retrieve a full page so calculate the offset
|
|
|
|
// needed to do so (snapping to the ends)
|
|
|
|
let halfPageSize: Int = Int(floor(Double(currentPageInfo.pageSize) / 2))
|
|
|
|
|
|
|
|
guard targetIndex > halfPageSize else { return 0 }
|
|
|
|
guard targetIndex < (totalCount - halfPageSize) else {
|
2022-07-12 09:43:52 +02:00
|
|
|
return max(0, (totalCount - currentPageInfo.pageSize))
|
2022-05-25 10:48:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return (targetIndex - halfPageSize)
|
|
|
|
}()
|
|
|
|
|
2022-08-19 08:58:47 +02:00
|
|
|
return ((currentPageInfo.pageSize, updatedOffset, updatedOffset), nil)
|
2022-05-25 10:48:04 +02:00
|
|
|
|
|
|
|
case .pageBefore:
|
|
|
|
let updatedOffset: Int = max(0, (currentPageInfo.pageOffset - currentPageInfo.pageSize))
|
|
|
|
|
|
|
|
return (
|
2022-08-19 08:58:47 +02:00
|
|
|
(
|
|
|
|
currentPageInfo.pageSize,
|
|
|
|
updatedOffset,
|
|
|
|
updatedOffset
|
|
|
|
),
|
|
|
|
nil
|
2022-05-25 10:48:04 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
case .pageAfter:
|
|
|
|
return (
|
2022-08-19 08:58:47 +02:00
|
|
|
(
|
|
|
|
currentPageInfo.pageSize,
|
|
|
|
(currentPageInfo.pageOffset + currentPageInfo.currentCount),
|
|
|
|
currentPageInfo.pageOffset
|
|
|
|
),
|
|
|
|
nil
|
2022-05-25 10:48:04 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
case .untilInclusive(let targetId, let padding):
|
|
|
|
// If we want to focus on a specific item then we need to find it's index in
|
|
|
|
// the queried data
|
|
|
|
let maybeIndex: Int? = PagedData.index(
|
|
|
|
db,
|
|
|
|
for: targetId,
|
|
|
|
tableName: pagedTableName,
|
|
|
|
idColumn: idColumnName,
|
2022-09-07 09:37:01 +02:00
|
|
|
requiredJoinSQL: joinSQL,
|
2022-05-25 10:48:04 +02:00
|
|
|
orderSQL: orderSQL,
|
|
|
|
filterSQL: filterSQL
|
|
|
|
)
|
|
|
|
let cacheCurrentEndIndex: Int = (currentPageInfo.pageOffset + currentPageInfo.currentCount)
|
|
|
|
|
|
|
|
// If we couldn't find the targetId or it's already in the cache then do nothing
|
|
|
|
guard
|
|
|
|
let targetIndex: Int = maybeIndex.map({ max(0, min(totalCount, $0)) }),
|
|
|
|
(
|
|
|
|
targetIndex < currentPageInfo.pageOffset ||
|
2022-06-22 10:32:17 +02:00
|
|
|
targetIndex >= cacheCurrentEndIndex
|
2022-05-25 10:48:04 +02:00
|
|
|
)
|
2022-08-19 08:58:47 +02:00
|
|
|
else { return (nil, nil) }
|
2022-05-25 10:48:04 +02:00
|
|
|
|
|
|
|
// If the target is before the cached data then load before
|
|
|
|
if targetIndex < currentPageInfo.pageOffset {
|
|
|
|
let finalIndex: Int = max(0, (targetIndex - abs(padding)))
|
|
|
|
|
|
|
|
return (
|
2022-08-19 08:58:47 +02:00
|
|
|
(
|
|
|
|
(currentPageInfo.pageOffset - finalIndex),
|
|
|
|
finalIndex,
|
|
|
|
finalIndex
|
|
|
|
),
|
|
|
|
nil
|
2022-05-25 10:48:04 +02:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2022-06-16 05:14:56 +02:00
|
|
|
// Otherwise load after (targetIndex is 0-indexed so we need to add 1 for this to
|
|
|
|
// have the correct 'limit' value)
|
|
|
|
let finalIndex: Int = min(totalCount, (targetIndex + 1 + abs(padding)))
|
2022-05-25 10:48:04 +02:00
|
|
|
|
|
|
|
return (
|
2022-08-19 08:58:47 +02:00
|
|
|
(
|
|
|
|
(finalIndex - cacheCurrentEndIndex),
|
|
|
|
cacheCurrentEndIndex,
|
|
|
|
currentPageInfo.pageOffset
|
|
|
|
),
|
|
|
|
nil
|
|
|
|
)
|
|
|
|
|
|
|
|
case .jumpTo(let targetId, let paddingForInclusive):
|
|
|
|
// If we want to focus on a specific item then we need to find it's index in
|
|
|
|
// the queried data
|
|
|
|
let maybeIndex: Int? = PagedData.index(
|
|
|
|
db,
|
|
|
|
for: targetId,
|
|
|
|
tableName: pagedTableName,
|
|
|
|
idColumn: idColumnName,
|
2022-09-07 09:37:01 +02:00
|
|
|
requiredJoinSQL: joinSQL,
|
2022-08-19 08:58:47 +02:00
|
|
|
orderSQL: orderSQL,
|
|
|
|
filterSQL: filterSQL
|
2022-05-25 10:48:04 +02:00
|
|
|
)
|
2022-08-19 08:58:47 +02:00
|
|
|
let cacheCurrentEndIndex: Int = (currentPageInfo.pageOffset + currentPageInfo.currentCount)
|
|
|
|
|
|
|
|
// If we couldn't find the targetId or it's already in the cache then do nothing
|
|
|
|
guard
|
|
|
|
let targetIndex: Int = maybeIndex.map({ max(0, min(totalCount, $0)) }),
|
|
|
|
(
|
|
|
|
targetIndex < currentPageInfo.pageOffset ||
|
|
|
|
targetIndex >= cacheCurrentEndIndex
|
|
|
|
)
|
|
|
|
else { return (nil, nil) }
|
|
|
|
|
|
|
|
// If the target id is within a single page of the current cached data
|
|
|
|
// then trigger the `untilInclusive` behaviour instead
|
|
|
|
guard
|
|
|
|
abs(targetIndex - cacheCurrentEndIndex) > currentPageInfo.pageSize ||
|
|
|
|
abs(targetIndex - currentPageInfo.pageOffset) > currentPageInfo.pageSize
|
|
|
|
else {
|
|
|
|
let callback: () -> () = {
|
|
|
|
self?.load(.untilInclusive(id: targetId, padding: paddingForInclusive))
|
|
|
|
}
|
|
|
|
return (nil, callback)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the targetId is further than 1 pageSize away then discard the current
|
|
|
|
// cached data and trigger a fresh `initialPageAround`
|
|
|
|
let callback: () -> () = {
|
|
|
|
self?.dataCache.mutate { $0 = DataCache() }
|
|
|
|
self?.associatedRecords.forEach { $0.clearCache(db) }
|
|
|
|
self?.pageInfo.mutate {
|
|
|
|
$0 = PagedData.PageInfo(
|
|
|
|
pageSize: currentPageInfo.pageSize,
|
|
|
|
pageOffset: 0,
|
|
|
|
currentCount: 0,
|
|
|
|
totalCount: 0
|
|
|
|
)
|
|
|
|
}
|
|
|
|
self?.load(.initialPageAround(id: targetId))
|
|
|
|
}
|
|
|
|
|
|
|
|
return (nil, callback)
|
2022-07-25 09:03:09 +02:00
|
|
|
|
|
|
|
case .reloadCurrent:
|
|
|
|
return (
|
2022-08-19 08:58:47 +02:00
|
|
|
(
|
|
|
|
currentPageInfo.currentCount,
|
|
|
|
currentPageInfo.pageOffset,
|
|
|
|
currentPageInfo.pageOffset
|
|
|
|
),
|
|
|
|
nil
|
2022-07-25 09:03:09 +02:00
|
|
|
)
|
2022-05-25 10:48:04 +02:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// If there is no queryOffset then we already have the data we need so
|
|
|
|
// early-out (may as well update the 'totalCount' since it may be relevant)
|
2022-08-19 08:58:47 +02:00
|
|
|
guard let queryInfo: QueryInfo = queryInfo else {
|
2022-05-25 10:48:04 +02:00
|
|
|
return (
|
|
|
|
nil,
|
|
|
|
PagedData.PageInfo(
|
|
|
|
pageSize: currentPageInfo.pageSize,
|
|
|
|
pageOffset: currentPageInfo.pageOffset,
|
|
|
|
currentCount: currentPageInfo.currentCount,
|
|
|
|
totalCount: totalCount
|
2022-08-19 08:58:47 +02:00
|
|
|
),
|
|
|
|
callback
|
2022-05-25 10:48:04 +02:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the desired data
|
2023-06-23 09:54:29 +02:00
|
|
|
let pageRowIds: [Int64]
|
2022-08-12 09:28:00 +02:00
|
|
|
let newData: [T]
|
2023-06-23 09:54:29 +02:00
|
|
|
let updatedLimitInfo: PagedData.PageInfo
|
2022-08-12 09:28:00 +02:00
|
|
|
|
2023-06-23 09:54:29 +02:00
|
|
|
do {
|
|
|
|
pageRowIds = try PagedData.rowIds(
|
2022-05-25 10:48:04 +02:00
|
|
|
db,
|
2023-06-23 09:54:29 +02:00
|
|
|
tableName: pagedTableName,
|
|
|
|
requiredJoinSQL: joinSQL,
|
|
|
|
filterSQL: filterSQL,
|
|
|
|
groupSQL: groupSQL,
|
|
|
|
orderSQL: orderSQL,
|
|
|
|
limit: queryInfo.limit,
|
|
|
|
offset: queryInfo.offset
|
|
|
|
)
|
|
|
|
newData = try dataQuery(pageRowIds).fetchAll(db)
|
|
|
|
updatedLimitInfo = PagedData.PageInfo(
|
|
|
|
pageSize: currentPageInfo.pageSize,
|
|
|
|
pageOffset: queryInfo.updatedCacheOffset,
|
|
|
|
currentCount: {
|
|
|
|
switch target {
|
|
|
|
case .reloadCurrent: return currentPageInfo.currentCount
|
|
|
|
default: return (currentPageInfo.currentCount + newData.count)
|
|
|
|
}
|
|
|
|
}(),
|
|
|
|
totalCount: totalCount
|
2022-05-25 10:48:04 +02:00
|
|
|
)
|
2023-06-23 09:54:29 +02:00
|
|
|
|
|
|
|
// Update the associatedRecords for the newly retrieved data
|
|
|
|
let newDataRowIds: [Int64] = newData.map { $0.rowId }
|
|
|
|
try self?.associatedRecords.forEach { record in
|
|
|
|
record.updateCache(
|
|
|
|
db,
|
|
|
|
rowIds: try PagedData.associatedRowIds(
|
|
|
|
db,
|
|
|
|
tableName: record.databaseTableName,
|
|
|
|
pagedTableName: pagedTableName,
|
|
|
|
pagedTypeRowIds: newDataRowIds,
|
|
|
|
joinToPagedType: record.joinToPagedType
|
|
|
|
),
|
|
|
|
hasOtherChanges: false
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
catch {
|
|
|
|
SNLog("[PagedDatabaseObserver] Error loading data: \(error)")
|
|
|
|
throw error
|
2022-05-25 10:48:04 +02:00
|
|
|
}
|
|
|
|
|
2022-08-19 08:58:47 +02:00
|
|
|
return (newData, updatedLimitInfo, nil)
|
2022-05-25 10:48:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Unwrap the updated data
|
|
|
|
guard
|
|
|
|
let loadedPageData: [T] = loadedPage?.data,
|
|
|
|
let updatedPageInfo: PagedData.PageInfo = loadedPage?.pageInfo
|
|
|
|
else {
|
|
|
|
// It's possible to get updated page info without having updated data, in that case
|
|
|
|
// we do want to update the cache but probably don't need to trigger the change callback
|
|
|
|
if let updatedPageInfo: PagedData.PageInfo = loadedPage?.pageInfo {
|
|
|
|
self.pageInfo.mutate { $0 = updatedPageInfo }
|
|
|
|
}
|
2022-05-26 10:13:16 +02:00
|
|
|
self.isLoadingMoreData.mutate { $0 = false }
|
2022-08-19 08:58:47 +02:00
|
|
|
loadedPage?.failureCallback?()
|
2022-05-25 10:48:04 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attach any associated data to the loadedPageData
|
|
|
|
var associatedLoadedData: DataCache<T> = DataCache(items: loadedPageData)
|
|
|
|
|
|
|
|
self.associatedRecords.forEach { record in
|
2022-07-25 07:39:56 +02:00
|
|
|
associatedLoadedData = record.updateAssociatedData(to: associatedLoadedData)
|
2022-05-25 10:48:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update the cache and pageInfo
|
|
|
|
self.dataCache.mutate { $0 = $0.upserting(items: associatedLoadedData.values) }
|
|
|
|
self.pageInfo.mutate { $0 = updatedPageInfo }
|
|
|
|
|
|
|
|
let triggerUpdates: () -> () = { [weak self, dataCache = self.dataCache.wrappedValue] in
|
|
|
|
self?.onChangeUnsorted(dataCache.values, updatedPageInfo)
|
|
|
|
self?.isLoadingMoreData.mutate { $0 = false }
|
|
|
|
}
|
|
|
|
|
2022-10-24 05:52:28 +02:00
|
|
|
// Make sure the updates run on the main thread
|
|
|
|
guard Thread.isMainThread else {
|
|
|
|
DispatchQueue.main.async { triggerUpdates() }
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-05-25 10:48:04 +02:00
|
|
|
triggerUpdates()
|
|
|
|
}
|
2022-07-25 09:03:09 +02:00
|
|
|
|
|
|
|
public func reload() {
|
|
|
|
self.load(.reloadCurrent)
|
|
|
|
}
|
2022-05-25 10:48:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// MARK: - Convenience
|
|
|
|
|
|
|
|
public extension PagedDatabaseObserver {
|
|
|
|
func load(_ target: PagedData.PageInfo.Target<ObservedTable.ID>) where ObservedTable.ID: SQLExpressible {
|
|
|
|
self.load(target.internalTarget)
|
|
|
|
}
|
|
|
|
|
|
|
|
func load<ID>(_ target: PagedData.PageInfo.Target<ID>) where ObservedTable.ID == Optional<ID>, ID: SQLExpressible {
|
|
|
|
self.load(target.internalTarget)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// MARK: - FetchableRecordWithRowId
|
|
|
|
|
|
|
|
public protocol FetchableRecordWithRowId: FetchableRecord {
|
|
|
|
var rowId: Int64 { get }
|
|
|
|
}
|
|
|
|
|
|
|
|
// MARK: - ErasedAssociatedRecord
|
|
|
|
|
|
|
|
public protocol ErasedAssociatedRecord {
|
|
|
|
var databaseTableName: String { get }
|
2022-06-28 09:53:03 +02:00
|
|
|
var pagedTableName: String { get }
|
2022-05-25 10:48:04 +02:00
|
|
|
var observedChanges: [PagedData.ObservedChanges] { get }
|
|
|
|
var joinToPagedType: SQL { get }
|
|
|
|
|
2022-06-28 09:53:03 +02:00
|
|
|
func settingPagedTableName(pagedTableName: String) -> Self
|
2022-05-25 10:48:04 +02:00
|
|
|
func tryUpdateForDatabaseCommit(
|
|
|
|
_ db: Database,
|
|
|
|
changes: Set<PagedData.TrackedChange>,
|
2022-06-28 09:53:03 +02:00
|
|
|
joinSQL: SQL?,
|
2022-05-25 10:48:04 +02:00
|
|
|
orderSQL: SQL,
|
|
|
|
filterSQL: SQL,
|
|
|
|
pageInfo: PagedData.PageInfo
|
|
|
|
) -> Bool
|
|
|
|
@discardableResult func updateCache(_ db: Database, rowIds: [Int64], hasOtherChanges: Bool) -> Bool
|
2022-08-19 08:58:47 +02:00
|
|
|
func clearCache(_ db: Database)
|
2022-07-25 07:39:56 +02:00
|
|
|
func updateAssociatedData<O>(to unassociatedCache: DataCache<O>) -> DataCache<O>
|
2022-05-25 10:48:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// MARK: - DataCache
|
|
|
|
|
|
|
|
public struct DataCache<T: FetchableRecordWithRowId & Identifiable> {
|
|
|
|
/// This is a map of `[RowId: Value]`
|
|
|
|
public let data: [Int64: T]
|
|
|
|
|
|
|
|
/// This is a map of `[(Identifiable)id: RowId]` and can be used to find the RowId for
|
|
|
|
/// a cached value given it's `Identifiable` `id` value
|
|
|
|
public let lookup: [AnyHashable: Int64]
|
|
|
|
|
|
|
|
public var count: Int { data.count }
|
|
|
|
public var values: [T] { Array(data.values) }
|
|
|
|
|
|
|
|
// MARK: - Initialization
|
|
|
|
|
|
|
|
public init(
|
|
|
|
data: [Int64: T] = [:],
|
|
|
|
lookup: [AnyHashable: Int64] = [:]
|
|
|
|
) {
|
|
|
|
self.data = data
|
|
|
|
self.lookup = lookup
|
|
|
|
}
|
|
|
|
|
|
|
|
fileprivate init(items: [T]) {
|
|
|
|
self = DataCache().upserting(items: items)
|
|
|
|
}
|
|
|
|
|
|
|
|
// MARK: - Functions
|
|
|
|
|
|
|
|
public func deleting(rowIds: [Int64]) -> DataCache<T> {
|
|
|
|
var updatedData: [Int64: T] = self.data
|
|
|
|
var updatedLookup: [AnyHashable: Int64] = self.lookup
|
|
|
|
|
|
|
|
rowIds.forEach { rowId in
|
|
|
|
if let cachedItem: T = updatedData.removeValue(forKey: rowId) {
|
|
|
|
updatedLookup.removeValue(forKey: cachedItem.id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return DataCache(
|
|
|
|
data: updatedData,
|
|
|
|
lookup: updatedLookup
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
public func upserting(_ item: T) -> DataCache<T> {
|
|
|
|
return upserting(items: [item])
|
|
|
|
}
|
|
|
|
|
|
|
|
public func upserting(items: [T]) -> DataCache<T> {
|
2022-07-25 07:39:56 +02:00
|
|
|
guard !items.isEmpty else { return self }
|
|
|
|
|
2022-05-25 10:48:04 +02:00
|
|
|
var updatedData: [Int64: T] = self.data
|
|
|
|
var updatedLookup: [AnyHashable: Int64] = self.lookup
|
|
|
|
|
|
|
|
items.forEach { item in
|
|
|
|
updatedData[item.rowId] = item
|
|
|
|
updatedLookup[item.id] = item.rowId
|
|
|
|
}
|
|
|
|
|
|
|
|
return DataCache(
|
|
|
|
data: updatedData,
|
|
|
|
lookup: updatedLookup
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// MARK: - PagedData
|
|
|
|
|
|
|
|
public enum PagedData {
|
2022-05-27 10:37:59 +02:00
|
|
|
public static let autoLoadNextPageDelay: DispatchTimeInterval = .milliseconds(400)
|
|
|
|
|
2022-05-25 10:48:04 +02:00
|
|
|
// MARK: - PageInfo
|
|
|
|
|
|
|
|
public struct PageInfo {
|
|
|
|
/// This type is identical to the 'Target' type but has it's 'SQLExpressible' requirement removed
|
|
|
|
fileprivate enum InternalTarget {
|
|
|
|
case initialPageAround(id: SQLExpression)
|
|
|
|
case pageBefore
|
|
|
|
case pageAfter
|
|
|
|
case untilInclusive(id: SQLExpression, padding: Int)
|
2022-08-19 08:58:47 +02:00
|
|
|
case jumpTo(id: SQLExpression, paddingForInclusive: Int)
|
2022-07-25 09:03:09 +02:00
|
|
|
case reloadCurrent
|
2022-05-25 10:48:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public enum Target<ID: SQLExpressible> {
|
|
|
|
/// This will attempt to load a page of data around a specified id
|
|
|
|
///
|
|
|
|
/// **Note:** This target will only work if there is no other data in the cache
|
|
|
|
case initialPageAround(id: ID)
|
|
|
|
|
|
|
|
/// This will attempt to load a page of data before the first item in the cache
|
|
|
|
case pageBefore
|
|
|
|
|
|
|
|
/// This will attempt to load a page of data after the last item in the cache
|
|
|
|
case pageAfter
|
|
|
|
|
|
|
|
/// This will attempt to load all data between what is currently in the cache until the
|
|
|
|
/// specified id (plus the padding amount)
|
|
|
|
///
|
|
|
|
/// **Note:** If the id is already within the cache then this will do nothing (even if
|
|
|
|
/// the padding would mean more data should be loaded)
|
|
|
|
case untilInclusive(id: ID, padding: Int)
|
|
|
|
|
2022-08-19 08:58:47 +02:00
|
|
|
/// This will jump to the specified id, loading a page around it and clearing out any
|
|
|
|
/// data that was previously cached
|
|
|
|
///
|
|
|
|
/// **Note:** If the id is within 1 pageSize of the currently cached data then this
|
|
|
|
/// will behave as per the `untilInclusive(id:padding:)` type
|
|
|
|
case jumpTo(id: ID, paddingForInclusive: Int)
|
|
|
|
|
2022-05-25 10:48:04 +02:00
|
|
|
fileprivate var internalTarget: InternalTarget {
|
|
|
|
switch self {
|
|
|
|
case .initialPageAround(let id): return .initialPageAround(id: id.sqlExpression)
|
|
|
|
case .pageBefore: return .pageBefore
|
|
|
|
case .pageAfter: return .pageAfter
|
|
|
|
case .untilInclusive(let id, let padding):
|
|
|
|
return .untilInclusive(id: id.sqlExpression, padding: padding)
|
2022-08-19 08:58:47 +02:00
|
|
|
|
|
|
|
case .jumpTo(let id, let paddingForInclusive):
|
|
|
|
return .jumpTo(id: id.sqlExpression, paddingForInclusive: paddingForInclusive)
|
2022-05-25 10:48:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
public let pageSize: Int
|
|
|
|
public let pageOffset: Int
|
|
|
|
public let currentCount: Int
|
|
|
|
public let totalCount: Int
|
|
|
|
|
|
|
|
// MARK: - Initizliation
|
|
|
|
|
|
|
|
public init(
|
|
|
|
pageSize: Int,
|
|
|
|
pageOffset: Int = 0,
|
|
|
|
currentCount: Int = 0,
|
|
|
|
totalCount: Int = 0
|
|
|
|
) {
|
|
|
|
self.pageSize = pageSize
|
|
|
|
self.pageOffset = pageOffset
|
|
|
|
self.currentCount = currentCount
|
|
|
|
self.totalCount = totalCount
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// MARK: - ObservedChanges
|
|
|
|
|
|
|
|
/// This type contains the information needed to define what changes should be included when observing
|
|
|
|
/// changes to a database
|
|
|
|
///
|
|
|
|
/// - Parameters:
|
|
|
|
/// - table: The table whose changes should be observed
|
|
|
|
/// - events: The database events which should be observed
|
|
|
|
/// - columns: The specific columns which should trigger changes (**Note:** These only apply to `update` changes)
|
|
|
|
public struct ObservedChanges {
|
|
|
|
public let databaseTableName: String
|
|
|
|
public let events: [DatabaseEvent.Kind]
|
|
|
|
public let columns: [String]
|
2022-06-24 10:29:45 +02:00
|
|
|
public let joinToPagedType: SQL?
|
2022-05-25 10:48:04 +02:00
|
|
|
|
|
|
|
public init<T: TableRecord & ColumnExpressible>(
|
|
|
|
table: T.Type,
|
|
|
|
events: [DatabaseEvent.Kind] = [.insert, .update, .delete],
|
2022-06-24 10:29:45 +02:00
|
|
|
columns: [T.Columns],
|
|
|
|
joinToPagedType: SQL? = nil
|
2022-05-25 10:48:04 +02:00
|
|
|
) {
|
|
|
|
self.databaseTableName = table.databaseTableName
|
|
|
|
self.events = events
|
|
|
|
self.columns = columns.map { $0.name }
|
2022-06-24 10:29:45 +02:00
|
|
|
self.joinToPagedType = joinToPagedType
|
2022-05-25 10:48:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// MARK: - TrackedChange
|
|
|
|
|
|
|
|
public struct TrackedChange: Hashable {
|
|
|
|
let tableName: String
|
|
|
|
let kind: DatabaseEvent.Kind
|
|
|
|
let rowId: Int64
|
2022-08-25 04:55:41 +02:00
|
|
|
let pagedRowIdsForRelatedDeletion: [Int64]?
|
2022-05-25 10:48:04 +02:00
|
|
|
|
2022-08-25 04:55:41 +02:00
|
|
|
init(event: DatabaseEvent, pagedRowIdsForRelatedDeletion: [Int64]? = nil) {
|
2022-05-25 10:48:04 +02:00
|
|
|
self.tableName = event.tableName
|
|
|
|
self.kind = event.kind
|
|
|
|
self.rowId = event.rowID
|
2022-08-25 04:55:41 +02:00
|
|
|
self.pagedRowIdsForRelatedDeletion = pagedRowIdsForRelatedDeletion
|
2022-05-25 10:48:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-29 10:10:10 +02:00
|
|
|
fileprivate struct RowIndexInfo: Decodable, FetchableRecord {
|
|
|
|
let rowId: Int64
|
|
|
|
let rowIndex: Int64
|
|
|
|
}
|
|
|
|
|
2022-10-24 05:52:28 +02:00
|
|
|
// MARK: - Convenience Functions
|
|
|
|
|
|
|
|
// FIXME: Would be good to clean this up further in the future (should be able to do more processing on BG threads)
|
|
|
|
public static func processAndTriggerUpdates<SectionModel: DifferentiableSection>(
|
|
|
|
updatedData: [SectionModel]?,
|
|
|
|
currentDataRetriever: @escaping (() -> [SectionModel]?),
|
|
|
|
onDataChange: (([SectionModel], StagedChangeset<[SectionModel]>) -> ())?,
|
|
|
|
onUnobservedDataChange: @escaping (([SectionModel], StagedChangeset<[SectionModel]>) -> Void)
|
|
|
|
) {
|
|
|
|
guard let updatedData: [SectionModel] = updatedData else { return }
|
|
|
|
|
|
|
|
// Note: While it would be nice to generate the changeset on a background thread it introduces
|
|
|
|
// a multi-threading issue where a data change can come in while the table is processing multiple
|
|
|
|
// updates resulting in the data being in a partially updated state (which makes the subsequent
|
|
|
|
// table reload crash due to inconsistent state)
|
|
|
|
let performUpdates = {
|
|
|
|
guard let currentData: [SectionModel] = currentDataRetriever() else { return }
|
|
|
|
|
|
|
|
let changeset: StagedChangeset<[SectionModel]> = StagedChangeset(
|
|
|
|
source: currentData,
|
|
|
|
target: updatedData
|
|
|
|
)
|
|
|
|
|
2023-02-01 08:12:36 +01:00
|
|
|
/// If we have the callback then trigger it, otherwise just store the changes to be sent to the callback if we ever
|
|
|
|
/// start observing again (when we have the callback it needs to do the data updating as it's tied to UI updates
|
|
|
|
/// and can cause crashes if not updated in the correct order)
|
|
|
|
///
|
|
|
|
/// **Note:** We do this even if the 'changeset' is empty because if this change reverts a previous change we
|
|
|
|
/// need to ensure the `onUnobservedDataChange` gets cleared so it doesn't end up in an invalid state
|
2022-10-24 05:52:28 +02:00
|
|
|
guard let onDataChange: (([SectionModel], StagedChangeset<[SectionModel]>) -> ()) = onDataChange else {
|
|
|
|
onUnobservedDataChange(updatedData, changeset)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-02-01 08:12:36 +01:00
|
|
|
// No need to do anything if there were no changes
|
|
|
|
guard !changeset.isEmpty else { return }
|
|
|
|
|
2022-10-24 05:52:28 +02:00
|
|
|
onDataChange(updatedData, changeset)
|
|
|
|
}
|
|
|
|
|
|
|
|
// No need to dispatch to the next run loop if we are alread on the main thread
|
|
|
|
guard !Thread.isMainThread else {
|
|
|
|
performUpdates()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run any changes on the main thread (as they will generally trigger UI updates)
|
|
|
|
DispatchQueue.main.async {
|
|
|
|
performUpdates()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-25 10:48:04 +02:00
|
|
|
// MARK: - Internal Functions
|
|
|
|
|
2022-06-24 10:29:45 +02:00
|
|
|
fileprivate static func totalCount(
|
|
|
|
_ db: Database,
|
|
|
|
tableName: String,
|
|
|
|
requiredJoinSQL: SQL? = nil,
|
|
|
|
filterSQL: SQL
|
|
|
|
) -> Int {
|
|
|
|
let tableNameLiteral: SQL = SQL(stringLiteral: tableName)
|
2022-10-21 08:32:51 +02:00
|
|
|
let finalJoinSQL: SQL = (requiredJoinSQL ?? "")
|
2022-06-24 10:29:45 +02:00
|
|
|
let request: SQLRequest<Int> = """
|
|
|
|
SELECT \(tableNameLiteral).rowId
|
|
|
|
FROM \(tableNameLiteral)
|
2022-10-21 08:32:51 +02:00
|
|
|
\(finalJoinSQL)
|
2022-06-24 10:29:45 +02:00
|
|
|
WHERE \(filterSQL)
|
|
|
|
"""
|
|
|
|
|
|
|
|
return (try? request.fetchCount(db))
|
|
|
|
.defaulting(to: 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
fileprivate static func rowIds(
|
|
|
|
_ db: Database,
|
|
|
|
tableName: String,
|
|
|
|
requiredJoinSQL: SQL? = nil,
|
|
|
|
filterSQL: SQL,
|
|
|
|
groupSQL: SQL? = nil,
|
|
|
|
orderSQL: SQL,
|
|
|
|
limit: Int,
|
|
|
|
offset: Int
|
2023-06-23 09:54:29 +02:00
|
|
|
) throws -> [Int64] {
|
2022-06-24 10:29:45 +02:00
|
|
|
let tableNameLiteral: SQL = SQL(stringLiteral: tableName)
|
2022-10-21 08:32:51 +02:00
|
|
|
let finalJoinSQL: SQL = (requiredJoinSQL ?? "")
|
|
|
|
let finalGroupSQL: SQL = (groupSQL ?? "")
|
2022-06-24 10:29:45 +02:00
|
|
|
let request: SQLRequest<Int64> = """
|
|
|
|
SELECT \(tableNameLiteral).rowId
|
|
|
|
FROM \(tableNameLiteral)
|
2022-10-21 08:32:51 +02:00
|
|
|
\(finalJoinSQL)
|
2022-06-24 10:29:45 +02:00
|
|
|
WHERE \(filterSQL)
|
2022-10-21 08:32:51 +02:00
|
|
|
\(finalGroupSQL)
|
2022-06-24 10:29:45 +02:00
|
|
|
ORDER BY \(orderSQL)
|
|
|
|
LIMIT \(limit) OFFSET \(offset)
|
|
|
|
"""
|
|
|
|
|
2023-06-23 09:54:29 +02:00
|
|
|
return try request.fetchAll(db)
|
2022-06-24 10:29:45 +02:00
|
|
|
}
|
|
|
|
|
2022-05-25 10:48:04 +02:00
|
|
|
fileprivate static func index<ID: SQLExpressible>(
|
|
|
|
_ db: Database,
|
|
|
|
for id: ID,
|
|
|
|
tableName: String,
|
|
|
|
idColumn: String,
|
|
|
|
requiredJoinSQL: SQL? = nil,
|
|
|
|
orderSQL: SQL,
|
2022-05-29 11:26:06 +02:00
|
|
|
filterSQL: SQL
|
2022-05-25 10:48:04 +02:00
|
|
|
) -> Int? {
|
|
|
|
let tableNameLiteral: SQL = SQL(stringLiteral: tableName)
|
|
|
|
let idColumnLiteral: SQL = SQL(stringLiteral: idColumn)
|
2022-10-21 08:32:51 +02:00
|
|
|
let finalJoinSQL: SQL = (requiredJoinSQL ?? "")
|
2022-05-25 10:48:04 +02:00
|
|
|
let request: SQLRequest<Int> = """
|
|
|
|
SELECT
|
|
|
|
(data.rowIndex - 1) AS rowIndex -- Converting from 1-Indexed to 0-indexed
|
|
|
|
FROM (
|
|
|
|
SELECT
|
|
|
|
\(tableNameLiteral).\(idColumnLiteral) AS \(idColumnLiteral),
|
|
|
|
ROW_NUMBER() OVER (ORDER BY \(orderSQL)) AS rowIndex
|
|
|
|
FROM \(tableNameLiteral)
|
2022-10-21 08:32:51 +02:00
|
|
|
\(finalJoinSQL)
|
2022-05-25 10:48:04 +02:00
|
|
|
WHERE \(filterSQL)
|
|
|
|
) AS data
|
|
|
|
WHERE \(SQL("data.\(idColumnLiteral) = \(id)"))
|
|
|
|
"""
|
|
|
|
|
|
|
|
return try? request.fetchOne(db)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the indexes the requested rowIds will have in the paged query
|
|
|
|
///
|
|
|
|
/// **Note:** If the `associatedRecord` is null then the index for the rowId of the paged data type will be returned
|
|
|
|
fileprivate static func indexes(
|
|
|
|
_ db: Database,
|
|
|
|
rowIds: [Int64],
|
|
|
|
tableName: String,
|
|
|
|
requiredJoinSQL: SQL? = nil,
|
|
|
|
orderSQL: SQL,
|
2022-06-28 09:53:03 +02:00
|
|
|
filterSQL: SQL
|
2022-06-29 10:10:10 +02:00
|
|
|
) -> [RowIndexInfo] {
|
2022-06-24 10:29:45 +02:00
|
|
|
guard !rowIds.isEmpty else { return [] }
|
|
|
|
|
2022-05-25 10:48:04 +02:00
|
|
|
let tableNameLiteral: SQL = SQL(stringLiteral: tableName)
|
2022-10-21 08:32:51 +02:00
|
|
|
let finalJoinSQL: SQL = (requiredJoinSQL ?? "")
|
2022-06-29 10:10:10 +02:00
|
|
|
let request: SQLRequest<RowIndexInfo> = """
|
2022-05-25 10:48:04 +02:00
|
|
|
SELECT
|
2022-06-29 10:10:10 +02:00
|
|
|
data.rowId AS rowId,
|
2022-05-25 10:48:04 +02:00
|
|
|
(data.rowIndex - 1) AS rowIndex -- Converting from 1-Indexed to 0-indexed
|
|
|
|
FROM (
|
|
|
|
SELECT
|
|
|
|
\(tableNameLiteral).rowid AS rowid,
|
|
|
|
ROW_NUMBER() OVER (ORDER BY \(orderSQL)) AS rowIndex
|
|
|
|
FROM \(tableNameLiteral)
|
2022-10-21 08:32:51 +02:00
|
|
|
\(finalJoinSQL)
|
2022-05-25 10:48:04 +02:00
|
|
|
WHERE \(filterSQL)
|
|
|
|
) AS data
|
|
|
|
WHERE \(SQL("data.rowid IN \(rowIds)"))
|
|
|
|
"""
|
|
|
|
|
|
|
|
return (try? request.fetchAll(db))
|
|
|
|
.defaulting(to: [])
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the rowIds for the associated types based on the specified pagedTypeRowIds
|
|
|
|
fileprivate static func associatedRowIds(
|
|
|
|
_ db: Database,
|
|
|
|
tableName: String,
|
|
|
|
pagedTableName: String,
|
|
|
|
pagedTypeRowIds: [Int64],
|
|
|
|
joinToPagedType: SQL
|
2023-06-23 09:54:29 +02:00
|
|
|
) throws -> [Int64] {
|
2022-06-24 10:29:45 +02:00
|
|
|
guard !pagedTypeRowIds.isEmpty else { return [] }
|
|
|
|
|
2022-05-25 10:48:04 +02:00
|
|
|
let tableNameLiteral: SQL = SQL(stringLiteral: tableName)
|
|
|
|
let pagedTableNameLiteral: SQL = SQL(stringLiteral: pagedTableName)
|
|
|
|
let request: SQLRequest<Int64> = """
|
|
|
|
SELECT \(tableNameLiteral).rowid AS rowid
|
2022-06-28 09:53:03 +02:00
|
|
|
FROM \(pagedTableNameLiteral)
|
2022-05-25 10:48:04 +02:00
|
|
|
\(joinToPagedType)
|
|
|
|
WHERE \(pagedTableNameLiteral).rowId IN \(pagedTypeRowIds)
|
|
|
|
"""
|
|
|
|
|
2023-06-23 09:54:29 +02:00
|
|
|
return try request.fetchAll(db)
|
2022-05-25 10:48:04 +02:00
|
|
|
}
|
2022-06-24 10:29:45 +02:00
|
|
|
|
|
|
|
/// Returns the rowIds for the paged type based on the specified relatedRowIds
|
|
|
|
fileprivate static func pagedRowIdsForRelatedRowIds(
|
|
|
|
_ db: Database,
|
|
|
|
tableName: String,
|
|
|
|
pagedTableName: String,
|
|
|
|
relatedRowIds: [Int64],
|
|
|
|
joinToPagedType: SQL
|
|
|
|
) -> [Int64] {
|
|
|
|
guard !relatedRowIds.isEmpty else { return [] }
|
|
|
|
|
|
|
|
let tableNameLiteral: SQL = SQL(stringLiteral: tableName)
|
|
|
|
let pagedTableNameLiteral: SQL = SQL(stringLiteral: pagedTableName)
|
|
|
|
let request: SQLRequest<Int64> = """
|
|
|
|
SELECT \(pagedTableNameLiteral).rowid AS rowid
|
|
|
|
FROM \(pagedTableNameLiteral)
|
|
|
|
\(joinToPagedType)
|
|
|
|
WHERE \(tableNameLiteral).rowId IN \(relatedRowIds)
|
|
|
|
"""
|
|
|
|
|
|
|
|
return (try? request.fetchAll(db))
|
|
|
|
.defaulting(to: [])
|
|
|
|
}
|
2022-05-25 10:48:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// MARK: - AssociatedRecord
|
|
|
|
|
|
|
|
public class AssociatedRecord<T, PagedType>: ErasedAssociatedRecord where T: FetchableRecordWithRowId & Identifiable, PagedType: FetchableRecordWithRowId & Identifiable {
|
|
|
|
public let databaseTableName: String
|
2022-06-28 09:53:03 +02:00
|
|
|
public private(set) var pagedTableName: String = ""
|
2022-05-25 10:48:04 +02:00
|
|
|
public let observedChanges: [PagedData.ObservedChanges]
|
|
|
|
public let joinToPagedType: SQL
|
|
|
|
|
|
|
|
fileprivate let dataCache: Atomic<DataCache<T>> = Atomic(DataCache())
|
2022-12-08 04:21:38 +01:00
|
|
|
fileprivate let dataQuery: (SQL?) -> any FetchRequest<T>
|
2022-05-25 10:48:04 +02:00
|
|
|
fileprivate let associateData: (DataCache<T>, DataCache<PagedType>) -> DataCache<PagedType>
|
|
|
|
|
|
|
|
// MARK: - Initialization
|
|
|
|
|
|
|
|
public init<Table: TableRecord>(
|
|
|
|
trackedAgainst: Table.Type,
|
|
|
|
observedChanges: [PagedData.ObservedChanges],
|
2022-12-08 04:21:38 +01:00
|
|
|
dataQuery: @escaping (SQL?) -> any FetchRequest<T>,
|
2022-05-25 10:48:04 +02:00
|
|
|
joinToPagedType: SQL,
|
|
|
|
associateData: @escaping (DataCache<T>, DataCache<PagedType>) -> DataCache<PagedType>
|
|
|
|
) {
|
|
|
|
self.databaseTableName = trackedAgainst.databaseTableName
|
|
|
|
self.observedChanges = observedChanges
|
|
|
|
self.dataQuery = dataQuery
|
|
|
|
self.joinToPagedType = joinToPagedType
|
|
|
|
self.associateData = associateData
|
|
|
|
}
|
|
|
|
|
|
|
|
// MARK: - AssociatedRecord
|
|
|
|
|
2022-06-28 09:53:03 +02:00
|
|
|
public func settingPagedTableName(pagedTableName: String) -> Self {
|
|
|
|
self.pagedTableName = pagedTableName
|
|
|
|
return self
|
|
|
|
}
|
|
|
|
|
2022-05-25 10:48:04 +02:00
|
|
|
public func tryUpdateForDatabaseCommit(
|
|
|
|
_ db: Database,
|
|
|
|
changes: Set<PagedData.TrackedChange>,
|
2022-06-28 09:53:03 +02:00
|
|
|
joinSQL: SQL?,
|
2022-05-25 10:48:04 +02:00
|
|
|
orderSQL: SQL,
|
|
|
|
filterSQL: SQL,
|
|
|
|
pageInfo: PagedData.PageInfo
|
|
|
|
) -> Bool {
|
|
|
|
// Ignore any changes which aren't relevant to this type
|
|
|
|
let relevantChanges: Set<PagedData.TrackedChange> = changes
|
|
|
|
.filter { $0.tableName == databaseTableName }
|
|
|
|
|
|
|
|
guard !relevantChanges.isEmpty else { return false }
|
|
|
|
|
|
|
|
// First remove any items which have been deleted
|
|
|
|
let oldCount: Int = self.dataCache.wrappedValue.count
|
|
|
|
let deletionChanges: [Int64] = relevantChanges
|
|
|
|
.filter { $0.kind == .delete }
|
|
|
|
.map { $0.rowId }
|
|
|
|
|
|
|
|
dataCache.mutate { $0 = $0.deleting(rowIds: deletionChanges) }
|
|
|
|
|
|
|
|
// Get an updated count to avoid locking the dataCache unnecessarily
|
|
|
|
let countAfterDeletions: Int = self.dataCache.wrappedValue.count
|
|
|
|
|
|
|
|
// If there are no inserted/updated rows then trigger the update callback and stop here
|
|
|
|
let rowIdsToQuery: [Int64] = relevantChanges
|
|
|
|
.filter { $0.kind != .delete }
|
|
|
|
.map { $0.rowId }
|
|
|
|
|
|
|
|
guard !rowIdsToQuery.isEmpty else { return (oldCount != countAfterDeletions) }
|
|
|
|
|
|
|
|
// Fetch the indexes of the rowIds so we can determine whether they should be added to the screen
|
2022-06-28 09:53:03 +02:00
|
|
|
let pagedRowIds: [Int64] = PagedData.pagedRowIdsForRelatedRowIds(
|
2022-05-25 10:48:04 +02:00
|
|
|
db,
|
|
|
|
tableName: databaseTableName,
|
2022-06-28 09:53:03 +02:00
|
|
|
pagedTableName: pagedTableName,
|
|
|
|
relatedRowIds: rowIdsToQuery,
|
|
|
|
joinToPagedType: joinToPagedType
|
|
|
|
)
|
|
|
|
|
|
|
|
// If the associated data change isn't related to the paged type then no need to continue
|
|
|
|
guard !pagedRowIds.isEmpty else { return (oldCount != countAfterDeletions) }
|
|
|
|
|
2022-06-29 10:10:10 +02:00
|
|
|
let pagedItemIndexes: [PagedData.RowIndexInfo] = PagedData.indexes(
|
2022-06-28 09:53:03 +02:00
|
|
|
db,
|
|
|
|
rowIds: pagedRowIds,
|
|
|
|
tableName: pagedTableName,
|
|
|
|
requiredJoinSQL: joinSQL,
|
2022-05-25 10:48:04 +02:00
|
|
|
orderSQL: orderSQL,
|
2022-06-28 09:53:03 +02:00
|
|
|
filterSQL: filterSQL
|
2022-05-25 10:48:04 +02:00
|
|
|
)
|
|
|
|
|
2022-06-28 09:53:03 +02:00
|
|
|
// If we can't get the item indexes for the paged row ids then it's likely related to data
|
|
|
|
// which was filtered out (eg. message attachment related to a different thread)
|
|
|
|
guard !pagedItemIndexes.isEmpty else { return (oldCount != countAfterDeletions) }
|
|
|
|
|
|
|
|
/// **Note:** The `PagedData.indexes` works by returning the index of a row in a given query, unfortunately when
|
|
|
|
/// dealing with associated data its possible for multiple associated data values to connect to an individual paged result,
|
|
|
|
/// this throws off the indexes so we can't actually tell what `rowIdsToQuery` value is associated to which
|
|
|
|
/// `pagedItemIndexes` value
|
|
|
|
///
|
|
|
|
/// Instead of following the pattern the `PagedDatabaseObserver` does where we get the proper `validRowIds` we
|
|
|
|
/// basically have to check if there is a single valid index, and if so retrieve and store all data related to the changes for this
|
|
|
|
/// commit - this will mean in some cases we cache data which is actually unrelated to the filtered paged data
|
2022-06-29 10:10:10 +02:00
|
|
|
let hasOneValidIndex: Bool = pagedItemIndexes.contains(where: { info -> Bool in
|
|
|
|
info.rowIndex >= pageInfo.pageOffset && (
|
2022-07-26 03:36:32 +02:00
|
|
|
info.rowIndex < pageInfo.currentCount || (
|
|
|
|
pageInfo.currentCount < pageInfo.pageSize &&
|
|
|
|
info.rowIndex <= (pageInfo.pageOffset + pageInfo.pageSize)
|
|
|
|
)
|
2022-06-16 05:14:56 +02:00
|
|
|
)
|
2022-05-29 11:26:06 +02:00
|
|
|
})
|
2022-06-28 09:53:03 +02:00
|
|
|
|
|
|
|
// Don't bother continuing if we don't have a valid index
|
|
|
|
guard hasOneValidIndex else { return (oldCount != countAfterDeletions) }
|
2022-05-25 10:48:04 +02:00
|
|
|
|
|
|
|
// Attempt to update the cache with the `validRowIds` array
|
|
|
|
return updateCache(
|
|
|
|
db,
|
2022-06-28 09:53:03 +02:00
|
|
|
rowIds: rowIdsToQuery,
|
2022-05-25 10:48:04 +02:00
|
|
|
hasOtherChanges: (oldCount != countAfterDeletions)
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
@discardableResult public func updateCache(_ db: Database, rowIds: [Int64], hasOtherChanges: Bool = false) -> Bool {
|
|
|
|
// If there are no rowIds then stop here
|
|
|
|
guard !rowIds.isEmpty else { return hasOtherChanges }
|
|
|
|
|
|
|
|
// Fetch the inserted/updated rows
|
|
|
|
let additionalFilters: SQL = SQL(rowIds.contains(Column.rowID))
|
|
|
|
|
2023-08-17 08:39:47 +02:00
|
|
|
do {
|
|
|
|
let updatedItems: [T] = try dataQuery(additionalFilters)
|
|
|
|
.fetchAll(db)
|
|
|
|
|
|
|
|
// If the inserted/updated rows we irrelevant (eg. associated to another thread, a quote or a link
|
|
|
|
// preview) then trigger the update callback (if there were deletions) and stop here
|
|
|
|
guard !updatedItems.isEmpty else { return hasOtherChanges }
|
|
|
|
|
|
|
|
// Process the upserted data (assume at least one value changed)
|
|
|
|
dataCache.mutate { $0 = $0.upserting(items: updatedItems) }
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
catch {
|
|
|
|
SNLog("[PagedDatabaseObserver] Error loading associated data: \(error)")
|
|
|
|
return hasOtherChanges
|
|
|
|
}
|
2022-05-25 10:48:04 +02:00
|
|
|
}
|
|
|
|
|
2022-08-19 08:58:47 +02:00
|
|
|
public func clearCache(_ db: Database) {
|
|
|
|
dataCache.mutate { $0 = DataCache() }
|
|
|
|
}
|
|
|
|
|
2022-07-25 07:39:56 +02:00
|
|
|
public func updateAssociatedData<O>(to unassociatedCache: DataCache<O>) -> DataCache<O> {
|
2022-05-25 10:48:04 +02:00
|
|
|
guard let typedCache: DataCache<PagedType> = unassociatedCache as? DataCache<PagedType> else {
|
|
|
|
return unassociatedCache
|
|
|
|
}
|
|
|
|
|
|
|
|
return (associateData(dataCache.wrappedValue, typedCache) as? DataCache<O>)
|
|
|
|
.defaulting(to: unassociatedCache)
|
|
|
|
}
|
|
|
|
}
|