store snodes list on db, use them if possible on app start

if not enough snodes or no snodes at all, a request to the seed node is
made instead
This commit is contained in:
Audric Ackermann 2021-06-08 14:35:30 +10:00
parent c992deb6f8
commit 0a208c0d15
No known key found for this signature in database
GPG Key ID: 999F434D76324AD4
11 changed files with 106 additions and 72 deletions

View File

@ -46,12 +46,15 @@ export type GuardNode = {
ed25519PubKey: string;
};
export type SwarmNode = {
address: string;
export interface Snode {
ip: string;
port: string;
pubkey_ed25519: string;
port: number;
pubkey_x25519: string;
pubkey_ed25519: string;
}
export type SwarmNode = Snode & {
address: string;
};
export type ServerToken = {
@ -976,3 +979,20 @@ export async function getMessagesWithFileAttachments(
limit: options?.limit,
});
}
export const SNODE_POOL_ITEM_ID = 'SNODE_POOL_ITEM_ID';
export async function getSnodePoolFromDb(): Promise<Array<Snode> | null> {
// this is currently all stored as a big string as we don't really need to do anything with them (no filtering or anything)
// everything is made in memory and written to disk
const snodesJson = await exports.getItemById(SNODE_POOL_ITEM_ID);
if (!snodesJson || !snodesJson.value) {
return null;
}
return JSON.parse(snodesJson.value);
}
export async function updateSnodePoolOnDb(snodesAsJsonString: string): Promise<void> {
await exports.createOrUpdateItem({ id: SNODE_POOL_ITEM_ID, value: snodesAsJsonString });
}

View File

@ -296,6 +296,7 @@ export async function queueAttachmentDownloads(
count += await processQuoteAttachments(message, conversation);
// I don 't think we rely on this for anything
if (await processGroupAvatar(message, conversation)) {
count += 1;
}

View File

@ -1,4 +1,4 @@
import { getGuardNodes, updateGuardNodes } from '../../../ts/data/data';
import { getGuardNodes, Snode, updateGuardNodes } from '../../../ts/data/data';
import * as SnodePool from '../snode_api/snodePool';
import _ from 'lodash';
import { default as insecureNodeFetch } from 'node-fetch';
@ -9,7 +9,7 @@ import { allowOnlyOneAtATime } from '../utils/Promise';
const desiredGuardCount = 3;
const minimumGuardCount = 2;
export type SnodePath = Array<SnodePool.Snode>;
export type SnodePath = Array<Snode>;
const onionRequestHops = 3;
let onionPaths: Array<SnodePath> = [];
@ -56,7 +56,7 @@ const pathFailureThreshold = 3;
// This array is meant to store nodes will full info,
// so using GuardNode would not be correct (there is
// some naming issue here it seems)
let guardNodes: Array<SnodePool.Snode> = [];
let guardNodes: Array<Snode> = [];
export const ed25519Str = (ed25519Key: string) => `(...${ed25519Key.substr(58)})`;
@ -119,7 +119,7 @@ export async function dropSnodeFromPath(snodeEd25519: string) {
onionPaths[pathWithSnodeIndex] = pathtoPatchUp;
}
export async function getOnionPath(toExclude?: SnodePool.Snode): Promise<Array<SnodePool.Snode>> {
export async function getOnionPath(toExclude?: Snode): Promise<Array<Snode>> {
let attemptNumber = 0;
while (onionPaths.length < minimumGuardCount) {
window?.log?.error(
@ -215,14 +215,14 @@ async function dropPathStartingWithGuardNode(guardNodeEd25519: string) {
guardNodes = guardNodes.filter(g => g.pubkey_ed25519 !== guardNodeEd25519);
pathFailureCount[guardNodeEd25519] = 0;
SnodePool.dropSnodeFromSnodePool(guardNodeEd25519);
await SnodePool.dropSnodeFromSnodePool(guardNodeEd25519);
// write the updates guard nodes to the db.
// the next call to getOnionPath will trigger a rebuild of the path
await updateGuardNodes(edKeys);
}
async function testGuardNode(snode: SnodePool.Snode) {
async function testGuardNode(snode: Snode) {
window?.log?.info(`Testing a candidate guard node ${ed25519Str(snode.pubkey_ed25519)}`);
// Send a post request and make sure it is OK
@ -276,7 +276,7 @@ async function testGuardNode(snode: SnodePool.Snode) {
/**
* Only exported for testing purpose. DO NOT use this directly
*/
export async function selectGuardNodes(): Promise<Array<SnodePool.Snode>> {
export async function selectGuardNodes(): Promise<Array<Snode>> {
// `getRandomSnodePool` is expected to refresh itself on low nodes
const nodePool = await SnodePool.getRandomSnodePool();
if (nodePool.length < desiredGuardCount) {
@ -289,7 +289,7 @@ export async function selectGuardNodes(): Promise<Array<SnodePool.Snode>> {
const shuffled = _.shuffle(nodePool);
let selectedGuardNodes: Array<SnodePool.Snode> = [];
let selectedGuardNodes: Array<Snode> = [];
// The use of await inside while is intentional:
// we only want to repeat if the await fails
@ -308,7 +308,7 @@ export async function selectGuardNodes(): Promise<Array<SnodePool.Snode>> {
const goodNodes = _.zip(idxOk, candidateNodes)
.filter(x => x[0])
.map(x => x[1]) as Array<SnodePool.Snode>;
.map(x => x[1]) as Array<Snode>;
selectedGuardNodes = _.concat(selectedGuardNodes, goodNodes);
}

View File

@ -7,12 +7,12 @@ import {
snodeHttpsAgent,
SnodeResponse,
} from '../snode_api/onions';
import { Snode } from '../snode_api/snodePool';
import _, { toNumber } from 'lodash';
import { default as insecureNodeFetch } from 'node-fetch';
import { PROTOCOLS } from '../constants';
import { toHex } from '../utils/String';
import pRetry from 'p-retry';
import { Snode } from '../../data/data';
// FIXME audric we should soon be able to get rid of that
const FILESERVER_HOSTS = [

View File

@ -11,7 +11,7 @@ const { remote } = Electron;
import { snodeRpc } from './lokiRpc';
import { getRandomSnode, getRandomSnodePool, requiredSnodesForAgreement, Snode } from './snodePool';
import { getRandomSnode, getRandomSnodePool, requiredSnodesForAgreement } from './snodePool';
import { Constants } from '..';
import { getSodium, sha256 } from '../crypto';
import _, { range } from 'lodash';
@ -23,6 +23,7 @@ import {
stringToUint8Array,
toHex,
} from '../utils/String';
import { Snode } from '../../data/data';
// ONS name can have [a-zA-Z0-9_-] except that - is not allowed as start or end
// do not define a regex but rather create it on the fly to avoid https://stackoverflow.com/questions/3891641/regex-test-only-works-every-other-time

View File

@ -1,6 +1,5 @@
import { default as insecureNodeFetch } from 'node-fetch';
import { Snode } from './snodePool';
import { Snode } from '../../data/data';
import { lokiOnionFetch, snodeHttpsAgent, SnodeResponse } from './onions';

View File

@ -1,12 +1,7 @@
import { default as insecureNodeFetch } from 'node-fetch';
import https from 'https';
import {
dropSnodeFromSnodePool,
dropSnodeFromSwarmIfNeeded,
Snode,
updateSwarmFor,
} from './snodePool';
import { dropSnodeFromSnodePool, dropSnodeFromSwarmIfNeeded, updateSwarmFor } from './snodePool';
import ByteBuffer from 'bytebuffer';
import { OnionPaths } from '../onions';
import { fromBase64ToArrayBuffer, toHex } from '../utils/String';
@ -17,6 +12,8 @@ import { hrefPnServerDev, hrefPnServerProd } from '../../pushnotification/PnServ
// hold the ed25519 key of a snode against the time it fails. Used to remove a snode only after a few failures (snodeFailureThreshold failures)
let snodeFailureCount: Record<string, number> = {};
import { Snode } from '../../data/data';
// tslint:disable-next-line: variable-name
export const TEST_resetSnodeFailureCount = () => {
snodeFailureCount = {};
@ -590,7 +587,7 @@ export async function incrementBadSnodeCountOrDrop({
}
window?.log?.info(`Dropping ${snodeEd25519} from snodepool`);
dropSnodeFromSnodePool(snodeEd25519);
await dropSnodeFromSnodePool(snodeEd25519);
// the snode was ejected from the pool so it won't be used again.
// in case of snode pool refresh, we need to be able to try to contact this node again so reset its failure count to 0.
snodeFailureCount[snodeEd25519] = 0;

View File

@ -28,16 +28,8 @@ const minSnodePoolCount = 12;
*/
export const requiredSnodesForAgreement = 24;
export interface Snode {
ip: string;
port: number;
pubkey_x25519: string;
pubkey_ed25519: string;
version: string;
}
// This should be renamed to `allNodes` or something
let randomSnodePool: Array<Snode> = [];
let randomSnodePool: Array<Data.Snode> = [];
// We only store nodes' identifiers here,
const swarmCache: Map<string, Array<string>> = new Map();
@ -48,7 +40,9 @@ export type SeedNode = {
};
// just get the filtered list
async function tryGetSnodeListFromLokidSeednode(seedNodes: Array<SeedNode>): Promise<Array<Snode>> {
async function tryGetSnodeListFromLokidSeednode(
seedNodes: Array<SeedNode>
): Promise<Array<Data.Snode>> {
window?.log?.info('tryGetSnodeListFromLokidSeednode starting...');
if (!seedNodes.length) {
@ -111,10 +105,11 @@ async function tryGetSnodeListFromLokidSeednode(seedNodes: Array<SeedNode>): Pro
* Use `dropSnodeFromSwarmIfNeeded` for that
* @param snodeEd25519 the snode ed25519 to drop from the snode pool
*/
export function dropSnodeFromSnodePool(snodeEd25519: string) {
export async function dropSnodeFromSnodePool(snodeEd25519: string) {
const exists = _.some(randomSnodePool, x => x.pubkey_ed25519 === snodeEd25519);
if (exists) {
_.remove(randomSnodePool, x => x.pubkey_ed25519 === snodeEd25519);
await Data.updateSnodePoolOnDb(JSON.stringify(randomSnodePool));
window?.log?.warn(
`Marking ${ed25519Str(snodeEd25519)} as unreachable, ${
@ -128,7 +123,7 @@ export function dropSnodeFromSnodePool(snodeEd25519: string) {
*
* @param excluding can be used to exclude some nodes from the random list. Useful to rebuild a path excluding existing node already in a path
*/
export async function getRandomSnode(excludingEd25519Snode?: Array<string>): Promise<Snode> {
export async function getRandomSnode(excludingEd25519Snode?: Array<string>): Promise<Data.Snode> {
// resolve random snode
if (randomSnodePool.length === 0) {
// Should not this be saved to the database?
@ -140,7 +135,7 @@ export async function getRandomSnode(excludingEd25519Snode?: Array<string>): Pro
}
// We know the pool can't be empty at this point
if (!excludingEd25519Snode) {
return _.sample(randomSnodePool) as Snode;
return _.sample(randomSnodePool) as Data.Snode;
}
// we have to double check even after removing the nodes to exclude we still have some nodes in the list
@ -157,42 +152,37 @@ export async function getRandomSnode(excludingEd25519Snode?: Array<string>): Pro
// used for tests
throw new Error('SeedNodeError');
}
return _.sample(snodePoolExcluding) as Snode;
return _.sample(snodePoolExcluding) as Data.Snode;
}
/**
* This function force the snode poll to be refreshed from a random seed node again.
* This should be called once in a day or so for when the app it kept on.
*/
export async function forceRefreshRandomSnodePool(): Promise<Array<Snode>> {
await refreshRandomPool();
export async function forceRefreshRandomSnodePool(): Promise<Array<Data.Snode>> {
await refreshRandomPool(true);
return randomSnodePool;
}
export async function getRandomSnodePool(): Promise<Array<Snode>> {
export async function getRandomSnodePool(): Promise<Array<Data.Snode>> {
if (randomSnodePool.length === 0) {
await refreshRandomPool();
}
return randomSnodePool;
}
// not cacheable because we write to this.randomSnodePool elsewhere
export function getNodesMinVersion(minVersion: string): Array<Snode> {
return randomSnodePool.filter((node: any) => node.version && semver.gt(node.version, minVersion));
}
async function getSnodeListFromLokidSeednode(
seedNodes: Array<SeedNode>,
retries = 0
): Promise<Array<Snode>> {
): Promise<Array<Data.Snode>> {
const SEED_NODE_RETRIES = 3;
window?.log?.info('getSnodeListFromLokidSeednode starting...');
if (!seedNodes.length) {
window?.log?.info('loki_snode_api::getSnodeListFromLokidSeednode - seedNodes are empty');
return [];
}
let snodes: Array<Snode> = [];
let snodes: Array<Data.Snode> = [];
try {
snodes = await tryGetSnodeListFromLokidSeednode(seedNodes);
} catch (e) {
@ -225,7 +215,9 @@ async function getSnodeListFromLokidSeednode(
* Exported only for tests. This is not to be used by the app directly
* @param seedNodes the seednodes to use to fetch snodes details
*/
export async function refreshRandomPoolDetail(seedNodes: Array<SeedNode>): Promise<Array<Snode>> {
export async function refreshRandomPoolDetail(
seedNodes: Array<SeedNode>
): Promise<Array<Data.Snode>> {
let snodes = [];
try {
window?.log?.info(`refreshRandomPoolDetail with seedNodes.length ${seedNodes.length}`);
@ -265,8 +257,10 @@ export async function refreshRandomPoolDetail(seedNodes: Array<SeedNode>): Promi
* This function runs only once at a time, and fetches the snode pool from a random seed node,
* or if we have enough snodes, fetches the snode pool from one of the snode.
*/
export async function refreshRandomPool(): Promise<void> {
if (!window.getSeedNodeList() || !window.getSeedNodeList()?.length) {
export async function refreshRandomPool(forceRefresh = false): Promise<void> {
const seedNodes = window.getSeedNodeList();
if (!seedNodes || !seedNodes.length) {
window?.log?.error(
'LokiSnodeAPI:::refreshRandomPool - getSeedNodeList has not been loaded yet'
);
@ -274,12 +268,30 @@ export async function refreshRandomPool(): Promise<void> {
return;
}
// tslint:disable-next-line:no-parameter-reassignment
const seedNodes = window.getSeedNodeList();
window?.log?.info("right before allowOnlyOneAtATime 'refreshRandomPool'");
return allowOnlyOneAtATime('refreshRandomPool', async () => {
window?.log?.info("running allowOnlyOneAtATime 'refreshRandomPool'");
// if we have forceRefresh set, we want to request snodes from snodes or from the seed server.
if (randomSnodePool.length === 0 && !forceRefresh) {
const fetchedFromDb = await Data.getSnodePoolFromDb();
// write to memory only if it is valid.
// if the size is not enough. we will contact a seed node.
if (fetchedFromDb?.length) {
window?.log?.info(`refreshRandomPool: fetched from db ${fetchedFromDb.length} snodes.`);
randomSnodePool = fetchedFromDb;
if (randomSnodePool.length < minSnodePoolCount) {
window?.log?.warn('refreshRandomPool: not enough snodes in db, going to fetch from seed');
}
return;
} else {
window?.log?.warn('refreshRandomPool: did not find snodes in db.');
}
}
// we don't have nodes to fetch the pool from them, so call the seed node instead.
if (randomSnodePool.length < minSnodePoolCount) {
window?.log?.info(
@ -287,7 +299,7 @@ export async function refreshRandomPool(): Promise<void> {
);
randomSnodePool = await exports.refreshRandomPoolDetail(seedNodes);
await Data.updateSnodePoolOnDb(JSON.stringify(randomSnodePool));
return;
}
try {
@ -308,6 +320,7 @@ export async function refreshRandomPool(): Promise<void> {
}
window?.log?.info('updating snode list with snode pool length:', commonNodes.length);
randomSnodePool = commonNodes;
await Data.updateSnodePoolOnDb(JSON.stringify(randomSnodePool));
},
{
retries: 3,
@ -328,6 +341,7 @@ export async function refreshRandomPool(): Promise<void> {
// fallback to a seed node fetch of the snode pool
randomSnodePool = await exports.refreshRandomPoolDetail(seedNodes);
await Data.updateSnodePoolOnDb(JSON.stringify(randomSnodePool));
}
});
}
@ -352,8 +366,8 @@ export async function dropSnodeFromSwarmIfNeeded(
await internalUpdateSwarmFor(pubkey, updatedSwarm);
}
export async function updateSwarmFor(pubkey: string, snodes: Array<Snode>): Promise<void> {
const edkeys = snodes.map((sn: Snode) => sn.pubkey_ed25519);
export async function updateSwarmFor(pubkey: string, snodes: Array<Data.Snode>): Promise<void> {
const edkeys = snodes.map((sn: Data.Snode) => sn.pubkey_ed25519);
await internalUpdateSwarmFor(pubkey, edkeys);
}
@ -382,12 +396,14 @@ export async function getSwarmFromCacheOrDb(pubkey: string): Promise<Array<strin
* This call fetch from cache or db the swarm and extract only the one currently reachable.
* If not enough snodes valid are in the swarm, if fetches new snodes for this pubkey from the network.
*/
export async function getSwarmFor(pubkey: string): Promise<Array<Snode>> {
export async function getSwarmFor(pubkey: string): Promise<Array<Data.Snode>> {
const nodes = await getSwarmFromCacheOrDb(pubkey);
// See how many are actually still reachable
// the nodes still reachable are the one still present in the snode pool
const goodNodes = randomSnodePool.filter((n: Snode) => nodes.indexOf(n.pubkey_ed25519) !== -1);
const goodNodes = randomSnodePool.filter(
(n: Data.Snode) => nodes.indexOf(n.pubkey_ed25519) !== -1
);
if (goodNodes.length >= minSwarmSnodeCount) {
return goodNodes;
@ -396,7 +412,7 @@ export async function getSwarmFor(pubkey: string): Promise<Array<Snode>> {
// Request new node list from the network
const freshNodes = _.shuffle(await requestSnodesForPubkey(pubkey));
const edkeys = freshNodes.map((n: Snode) => n.pubkey_ed25519);
const edkeys = freshNodes.map((n: Data.Snode) => n.pubkey_ed25519);
await internalUpdateSwarmFor(pubkey, edkeys);
return freshNodes;

View File

@ -1,5 +1,5 @@
import { PubKey } from '../types';
import { getSwarmFor, Snode } from './snodePool';
import { getSwarmFor } from './snodePool';
import { retrieveNextMessages } from './SNodeAPI';
import { SignalService } from '../../protobuf';
import * as Receiver from '../../receiver/receiver';
@ -8,6 +8,7 @@ import {
getLastHashBySnode,
getSeenMessagesByHashList,
saveSeenMessageHashes,
Snode,
updateLastHash,
} from '../../../ts/data/data';

View File

@ -1,5 +1,4 @@
import { Snode } from '../snode_api/snodePool';
import { Snode } from '../../data/data';
type SimpleFunction<T> = (arg: T) => void;
type Return<T> = Promise<T> | T;

View File

@ -17,12 +17,7 @@ import {
} from '../../../../session/snode_api/onions';
import AbortController from 'abort-controller';
import * as Data from '../../../../../ts/data/data';
import { Snode } from '../../../../session/snode_api/snodePool';
import {
pathFailureCount,
SnodePath,
TEST_getTestguardNodes,
} from '../../../../session/onions/onionPath';
import { pathFailureCount, SnodePath } from '../../../../session/onions/onionPath';
chai.use(chaiAsPromised as any);
chai.should();
@ -60,10 +55,10 @@ describe('OnionPathsErrors', () => {
// tslint:disable-next-line: one-variable-per-declaration
let guardPubkeys: Array<string>,
otherNodesPubkeys: Array<string>,
guardNodesArray: Array<Snode>,
guardSnode1: Snode,
otherNodesArray: Array<Snode>,
fakeSnodePool: Array<Snode>,
guardNodesArray: Array<Data.Snode>,
guardSnode1: Data.Snode,
otherNodesArray: Array<Data.Snode>,
fakeSnodePool: Array<Data.Snode>,
associatedWith: string,
fakeSwarmForAssociatedWith: Array<string>;
@ -119,6 +114,11 @@ describe('OnionPathsErrors', () => {
// those are still doing what they do, but we spy on their executation
updateSwarmSpy = sandbox.stub(Data, 'updateSwarmNodesForPubkey').resolves();
sandbox
.stub(Data, 'getItemById')
.withArgs(Data.SNODE_POOL_ITEM_ID)
.resolves({ id: Data.SNODE_POOL_ITEM_ID, value: '' });
sandbox.stub(Data, 'createOrUpdateItem').resolves();
dropSnodeFromSnodePool = sandbox.spy(SNodeAPI.SnodePool, 'dropSnodeFromSnodePool');
dropSnodeFromSwarmIfNeededSpy = sandbox.spy(SNodeAPI.SnodePool, 'dropSnodeFromSwarmIfNeeded');
dropSnodeFromPathSpy = sandbox.spy(OnionPaths, 'dropSnodeFromPath');
@ -296,7 +296,7 @@ describe('OnionPathsErrors', () => {
it('throws a non-retryable error we get a 421 status code with a new swarm', async () => {
const targetNode = otherNodesPubkeys[0];
const resultExpected: Array<Snode> = [
const resultExpected: Array<Data.Snode> = [
otherNodesArray[4],
otherNodesArray[5],
otherNodesArray[6],