Session 1.7.1 (#1908)

* Keep line breaks when message has some new lines

Relates #1758 and #1898 and #1901

* Fix path building (#1903)

* Keep line breaks when message has some new lines

Relates #1758 and #1898 and #1901

* fix link preview cropped when received

* make sure we fetch from seed if we end up with not enough snodes

* try to download recent previews if we just trusted a user

* throw if we need to rebuild path while fetching snode list from snode

* fixup no refecthing of snode list while we are fetching them already

* added test for fetch from db or seed

* fetch snode list from snode every hour

also make sure the path building does not try to get more snodes. It
just throws

* do not drop a path if an opengroup room is down and we get errors  back

* do not throw an error if the snode in error in not in any path

* fix tests

* bump to v1.7.11
This commit is contained in:
Audric Ackermann 2021-09-13 18:05:49 +10:00 committed by GitHub
parent 9a5c72c898
commit 797bf0650f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 1389 additions and 908 deletions

View File

@ -75,18 +75,6 @@
appendStack(this, error);
}
function SeedNodeError(message) {
this.name = 'SeedNodeError';
this.message = message;
Error.call(this, message);
// Maintains proper stack trace, where our error was thrown (only available on V8)
// via https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Error
if (Error.captureStackTrace) {
Error.captureStackTrace(this);
}
}
function HTTPError(message, response) {
this.name = 'HTTPError';
this.message = `${response.status} Error: ${message}`;
@ -114,7 +102,6 @@
window.textsecure.SendMessageNetworkError = SendMessageNetworkError;
window.textsecure.ReplayableError = ReplayableError;
window.textsecure.EmptySwarmError = EmptySwarmError;
window.textsecure.SeedNodeError = SeedNodeError;
window.textsecure.HTTPError = HTTPError;
window.textsecure.NotFoundError = NotFoundError;
window.textsecure.TimestampError = TimestampError;

View File

@ -7,7 +7,6 @@ export interface LibTextsecure {
SendMessageNetworkError: any;
ReplayableError: any;
EmptySwarmError: any;
SeedNodeError: any;
HTTPError: any;
NotFoundError: any;
TimestampError: any;

View File

@ -2,7 +2,7 @@
"name": "session-desktop",
"productName": "Session",
"description": "Private messaging from your desktop",
"version": "1.7.0",
"version": "1.7.1",
"license": "GPL-3.0",
"author": {
"name": "Loki Project",
@ -35,7 +35,7 @@
"build-protobuf": "yarn build-module-protobuf",
"clean-protobuf": "yarn clean-module-protobuf",
"test": "yarn test-node",
"test-node": "mocha --recursive --exit --timeout 10000 test/app test/modules \"./ts/test/**/*_test.js\" ",
"test-node": "mocha --recursive --exit --timeout 10000 \"./ts/test/**/*_test.js\" ",
"eslint-full": "eslint .",
"lint-full": "yarn format-full && yarn lint-files-full",
"lint-files-full": "yarn eslint-full && yarn tslint",

View File

@ -174,18 +174,12 @@
.module-message__link-preview {
cursor: pointer;
margin-inline-start: -12px;
margin-inline-end: -12px;
margin-top: -10px;
margin-bottom: 5px;
border-top-left-radius: $session_message-container-border-radius;
border-top-right-radius: $session_message-container-border-radius;
}
.module-message__link-preview__content {
padding: 8px;
border-top-left-radius: $session_message-container-border-radius;
border-top-right-radius: $session_message-container-border-radius;
background-color: $color-white;
display: flex;
flex-direction: row;
@ -1027,8 +1021,12 @@
}
.module-image__image {
object-fit: cover;
object-fit: contain;
cursor: pointer;
&-cover {
object-fit: cover;
}
}
.module-image__bottom-overlay {
@ -1489,9 +1487,10 @@
.module-staged-link-preview--is-loading {
align-items: center;
justify-content: center;
}
.module-staged-link-preview__loading {
color: var(--color-sent-message-text);
color: var(--color-text);
font-size: 14px;
text-align: center;

View File

@ -1281,6 +1281,10 @@ input {
}
}
.module-message__text {
white-space: pre-wrap;
}
.session-info-box {
display: flex;
flex-direction: column;

View File

@ -424,15 +424,6 @@
// Module: Staged Link Preview
.module-staged-link-preview__loading {
color: $color-gray-25;
}
.module-staged-link-preview__title {
color: $color-gray-05;
}
.module-staged-link-preview__location {
color: $color-gray-25;
}
.module-staged-link-preview__close-button {
@include color-svg('../images/x-16.svg', $color-gray-25);
}

View File

@ -21,6 +21,7 @@ type Props = {
darkOverlay?: boolean;
playIconOverlay?: boolean;
softCorners?: boolean;
forceSquare?: boolean;
onClick?: (attachment: AttachmentTypeWithPath | AttachmentType) => void;
onClickClose?: (attachment: AttachmentTypeWithPath | AttachmentType) => void;
@ -42,6 +43,7 @@ export const Image = (props: Props) => {
overlayText,
playIconOverlay,
softCorners,
forceSquare,
url,
width,
} = props;
@ -84,13 +86,17 @@ export const Image = (props: Props) => {
canClick ? 'module-image__with-click-handler' : null,
softCorners ? 'module-image--soft-corners' : null
)}
style={{
maxHeight: `${height}px`,
maxWidth: `${width}px`,
}}
>
{pending || loading ? (
<div
className="module-image__loading-placeholder"
style={{
height: `${height}px`,
width: `${width}px`,
maxHeight: `${height}px`,
maxWidth: `${width}px`,
lineHeight: `${height}px`,
textAlign: 'center',
}}
@ -100,10 +106,17 @@ export const Image = (props: Props) => {
) : (
<img
onError={onErrorUrlFilterering}
className="module-image__image"
className={classNames(
'module-image__image',
forceSquare ? 'module-image__image-cover' : ''
)}
alt={alt}
height={height}
width={width}
style={{
maxHeight: `${height}px`,
maxWidth: `${width}px`,
width: forceSquare ? `${width}px` : '',
height: forceSquare ? `${height}px` : '',
}}
src={srcData}
onDragStart={onDragStart}
/>

View File

@ -79,6 +79,7 @@ export const StagedAttachmentList = (props: Props) => {
playIconOverlay={isVideoAttachment(attachment)}
height={IMAGE_HEIGHT}
width={IMAGE_WIDTH}
forceSquare={true}
url={getUrl(attachment)}
closeButton={true}
onClick={clickCallback}

View File

@ -4,6 +4,7 @@ import classNames from 'classnames';
import { Image } from './Image';
import { AttachmentType, isImageAttachment } from '../../types/Attachment';
import { SessionSpinner } from '../session/SessionSpinner';
type Props = {
isLoaded: boolean;
@ -24,16 +25,16 @@ export const StagedLinkPreview = (props: Props) => {
return <></>;
}
const isLoading = !isLoaded;
return (
<div
className={classNames(
'module-staged-link-preview',
!isLoaded ? 'module-staged-link-preview--is-loading' : null
isLoading ? 'module-staged-link-preview--is-loading' : null
)}
>
{!isLoaded ? (
<div className="module-staged-link-preview__loading">{window.i18n('loading')}</div>
) : null}
{isLoading ? <SessionSpinner loading={isLoading} /> : null}
{isLoaded && image && isImage ? (
<div className="module-staged-link-preview__icon-container">
<Image

View File

@ -50,10 +50,14 @@ export const ClickToTrustSender = (props: { messageId: string }) => {
await Promise.all(
messagesInConvo.map(async message => {
const msgAttachments = message.get('attachments');
const messagePreviews = message.get('preview');
if (message.get('direction') !== 'incoming') {
return;
}
if (!msgAttachments || msgAttachments.length === 0) {
if (
(!msgAttachments || msgAttachments.length === 0) &&
(!messagePreviews || messagePreviews.length === 0)
) {
return;
}
@ -72,6 +76,28 @@ export const ClickToTrustSender = (props: { messageId: string }) => {
})
);
const preview = await Promise.all(
(messagePreviews || []).map(async (item: any, index: any) => {
if (!item.image) {
return item;
}
const image = message.isTrustedForAttachmentDownload()
? await AttachmentDownloads.addJob(item.image, {
messageId: message.id,
type: 'preview',
index,
isOpenGroupV2: false,
openGroupV2Details: undefined,
})
: null;
return { ...item, image };
})
);
message.set({ preview });
message.set({ attachments: downloadedAttachments });
await message.commit();
})

View File

@ -177,10 +177,12 @@ export const MessageContent = (props: Props) => {
handleImageError={handleImageError}
/>
{hasContentAfterAttachmentAndQuote ? (
<Flex padding="7px" container={true} flexDirection="column">
<>
<MessagePreview messageId={props.messageId} handleImageError={handleImageError} />
<MessageText messageId={props.messageId} />
</Flex>
<Flex padding="7px" container={true} flexDirection="column">
<MessageText messageId={props.messageId} />
</Flex>
</>
) : null}
</IsMessageVisibleContext.Provider>
</InView>

View File

@ -14,9 +14,12 @@ import {
removeConversation,
removeOneOpenGroupV1Message,
} from '../../data/data';
import { OnionPaths } from '../../session/onions';
import { getMessageQueue } from '../../session/sending';
import { useDispatch, useSelector } from 'react-redux';
// tslint:disable: no-submodule-imports
import useInterval from 'react-use/lib/useInterval';
import useTimeoutFn from 'react-use/lib/useTimeoutFn';
import { getOurNumber } from '../../state/selectors/user';
import {
getOurPrimaryConversation,
@ -24,7 +27,6 @@ import {
} from '../../state/selectors/conversations';
import { applyTheme } from '../../state/ducks/theme';
import { getFocusedSection } from '../../state/selectors/section';
import { useInterval } from '../../hooks/useInterval';
import { clearSearch } from '../../state/ducks/search';
import { SectionType, showLeftPaneSection } from '../../state/ducks/section';
@ -204,11 +206,6 @@ const triggerAvatarReUploadIfNeeded = async () => {
* This function is called only once: on app startup with a logged in user
*/
const doAppStartUp = () => {
if (window.lokiFeatureFlags.useOnionRequests || window.lokiFeatureFlags.useFileOnionRequests) {
// Initialize paths for onion requests
void OnionPaths.buildNewOnionPathsOneAtATime();
}
// init the messageQueue. In the constructor, we add all not send messages
// this call does nothing except calling the constructor, which will continue sending message in the pipeline
void getMessageQueue().processAllPending();
@ -271,8 +268,15 @@ export const ActionsPanel = () => {
}, DURATION.DAYS * 2);
useInterval(() => {
// trigger an updates from the snodes every hour
void forceRefreshRandomSnodePool();
}, DURATION.DAYS * 1);
}, DURATION.HOURS * 1);
useTimeoutFn(() => {
// trigger an updates from the snodes after 5 minutes, once
void forceRefreshRandomSnodePool();
}, DURATION.MINUTES * 5);
useInterval(() => {
// this won't be run every days, but if the app stays open for more than 10 days

View File

@ -94,8 +94,8 @@ async function joinOpenGroupV2(room: OpenGroupV2Room, fromConfigMessage: boolean
await forceSyncConfigurationNowIfNeeded();
}
} catch (e) {
window?.log?.error('Could not join open group v2', e);
throw new Error(e);
window?.log?.error('Could not join open group v2', e.message);
throw e;
}
}
@ -157,7 +157,7 @@ export async function joinOpenGroupV2WithUIEvents(
}
}
} catch (error) {
window?.log?.warn('got error while joining open group:', error);
window?.log?.warn('got error while joining open group:', error.message);
if (showToasts) {
ToastUtils.pushToastError('connectToServerFail', window.i18n('connectToServerFail'));
}

View File

@ -211,7 +211,7 @@ export class OpenGroupManagerV2 {
return conversation;
} catch (e) {
window?.log?.warn('Failed to join open group v2', e);
window?.log?.warn('Failed to join open group v2', e.message);
await removeV2OpenGroupRoom(conversationId);
// throw new Error(window.i18n('connectToServerFail'));
return undefined;

View File

@ -348,7 +348,7 @@ export class OpenGroupServerPoller {
// ==> At this point all those results need to trigger conversation updates, so update what we have to update
await handleCompactPollResults(this.serverUrl, compactFetchResults);
} catch (e) {
window?.log?.warn('Got error while compact fetch:', e);
window?.log?.warn('Got error while compact fetch:', e.message);
} finally {
this.isPolling = false;
}

View File

@ -1,4 +1,4 @@
import { getGuardNodes, Snode, updateGuardNodes } from '../../../ts/data/data';
import * as Data from '../../../ts/data/data';
import * as SnodePool from '../snode_api/snodePool';
import _ from 'lodash';
import { default as insecureNodeFetch } from 'node-fetch';
@ -14,8 +14,10 @@ import { updateOnionPaths } from '../../state/ducks/onion';
import { ERROR_CODE_NO_CONNECT } from '../snode_api/SNodeAPI';
import { getStoragePubKey } from '../types/PubKey';
import { OnionPaths } from './';
const ONION_REQUEST_HOPS = 3;
export let onionPaths: Array<Array<Snode>> = [];
export let onionPaths: Array<Array<Data.Snode>> = [];
/**
* Used for testing only
@ -59,17 +61,18 @@ const pathFailureThreshold = 3;
// This array is meant to store nodes will full info,
// so using GuardNode would not be correct (there is
// some naming issue here it seems)
export let guardNodes: Array<Snode> = [];
export let guardNodes: Array<Data.Snode> = [];
export const ed25519Str = (ed25519Key: string) => `(...${ed25519Key.substr(58)})`;
let buildNewOnionPathsWorkerRetry = 0;
export async function buildNewOnionPathsOneAtATime() {
// this function may be called concurrently make sure we only have one inflight
return allowOnlyOneAtATime('buildNewOnionPaths', async () => {
buildNewOnionPathsWorkerRetry = 0;
await buildNewOnionPathsWorker();
try {
await buildNewOnionPathsWorker();
} catch (e) {
window?.log?.warn(`buildNewOnionPathsWorker failed with ${e.message}`);
}
});
}
@ -91,10 +94,11 @@ export async function dropSnodeFromPath(snodeEd25519: string) {
);
if (pathWithSnodeIndex === -1) {
window?.log?.warn(
`Could not drop ${ed25519Str(snodeEd25519)} from path index: ${pathWithSnodeIndex}`
);
throw new Error(`Could not drop snode ${ed25519Str(snodeEd25519)} from path: not in any paths`);
window?.log?.warn(`Could not drop ${ed25519Str(snodeEd25519)} as it is not in any paths`);
// this can happen for instance if the snode given is the destination snode.
// like a `retrieve` request returns node not found being the request the snode is made to.
// in this case, nothing bad is happening for the path. We just have to use another snode to do the request
return;
}
window?.log?.info(
`dropping snode ${ed25519Str(snodeEd25519)} from path index: ${pathWithSnodeIndex}`
@ -121,15 +125,24 @@ export async function dropSnodeFromPath(snodeEd25519: string) {
onionPaths[pathWithSnodeIndex] = pathtoPatchUp;
}
export async function getOnionPath(toExclude?: Snode): Promise<Array<Snode>> {
export async function getOnionPath({
toExclude,
}: {
toExclude?: Data.Snode;
}): Promise<Array<Data.Snode>> {
let attemptNumber = 0;
// the buildNewOnionPathsOneAtATime will try to fetch from seed if it needs more snodes
while (onionPaths.length < minimumGuardCount) {
window?.log?.info(
`Must have at least ${minimumGuardCount} good onion paths, actual: ${onionPaths.length}, attempt #${attemptNumber} fetching more...`
`getOnionPath: Must have at least ${minimumGuardCount} good onion paths, actual: ${onionPaths.length}, attempt #${attemptNumber}`
);
// eslint-disable-next-line no-await-in-loop
await buildNewOnionPathsOneAtATime();
try {
// eslint-disable-next-line no-await-in-loop
await buildNewOnionPathsOneAtATime();
} catch (e) {
window?.log?.warn(`buildNewOnionPathsOneAtATime failed with ${e.message}`);
}
// should we add a delay? buildNewOnionPathsOneA tATime should act as one
// reload goodPaths now
@ -141,7 +154,7 @@ export async function getOnionPath(toExclude?: Snode): Promise<Array<Snode>> {
}
}
if (onionPaths.length <= 0) {
if (onionPaths.length === 0) {
if (!_.isEmpty(window.inboxStore?.getState().onionPaths.snodePaths)) {
window.inboxStore?.dispatch(updateOnionPaths([]));
}
@ -156,23 +169,29 @@ export async function getOnionPath(toExclude?: Snode): Promise<Array<Snode>> {
}
}
const onionPathsWithoutExcluded = toExclude
? onionPaths.filter(
path => !_.some(path, node => node.pubkey_ed25519 === toExclude.pubkey_ed25519)
)
: onionPaths;
if (!onionPathsWithoutExcluded) {
window?.log?.error('LokiSnodeAPI::getOnionPath - no path in', onionPathsWithoutExcluded);
return [];
if (!toExclude) {
// no need to exclude a node, then just return a random path from the list of path
if (!onionPaths || onionPaths.length === 0) {
throw new Error('No onion paths available');
}
const randomPathNoExclude = _.sample(onionPaths);
if (!randomPathNoExclude) {
throw new Error('No onion paths available');
}
return randomPathNoExclude;
}
// here we got a snode to exclude from the returned path
const onionPathsWithoutExcluded = onionPaths.filter(
path => !_.some(path, node => node.pubkey_ed25519 === toExclude.pubkey_ed25519)
);
if (!onionPathsWithoutExcluded || onionPathsWithoutExcluded.length === 0) {
throw new Error('No onion paths available after filtering');
}
const randomPath = _.sample(onionPathsWithoutExcluded);
if (!randomPath) {
throw new Error('No onion paths available after filtering');
}
return randomPath;
}
@ -185,11 +204,11 @@ export async function incrementBadPathCountOrDrop(snodeEd25519: string) {
);
if (pathWithSnodeIndex === -1) {
window?.log?.info('Did not find any path containing this snode');
// this can only be bad. throw an abortError so we use another path if needed
throw new pRetry.AbortError(
'incrementBadPathCountOrDrop: Did not find any path containing this snode'
);
window?.log?.info('incrementBadPathCountOrDrop: Did not find any path containing this snode');
// this might happen if the snodeEd25519 is the one of the target snode, just increment the target snode count by 1
await incrementBadSnodeCountOrDrop({ snodeEd25519 });
return;
}
const guardNodeEd25519 = onionPaths[pathWithSnodeIndex][0].pubkey_ed25519;
@ -210,7 +229,7 @@ export async function incrementBadPathCountOrDrop(snodeEd25519: string) {
// a guard node is dropped when the path is dropped completely (in dropPathStartingWithGuardNode)
for (let index = 1; index < pathWithIssues.length; index++) {
const snode = pathWithIssues[index];
await incrementBadSnodeCountOrDrop({ snodeEd25519: snode.pubkey_ed25519, guardNodeEd25519 });
await incrementBadSnodeCountOrDrop({ snodeEd25519: snode.pubkey_ed25519 });
}
if (newPathFailureCount >= pathFailureThreshold) {
@ -226,7 +245,8 @@ export async function incrementBadPathCountOrDrop(snodeEd25519: string) {
* @param ed25519Key the guard node ed25519 pubkey
*/
async function dropPathStartingWithGuardNode(guardNodeEd25519: string) {
// we are dropping it. Reset the counter in case this same guard gets choosen later
await SnodePool.dropSnodeFromSnodePool(guardNodeEd25519);
const failingPathIndex = onionPaths.findIndex(p => p[0].pubkey_ed25519 === guardNodeEd25519);
if (failingPathIndex === -1) {
window?.log?.warn('No such path starts with this guard node ');
@ -241,22 +261,22 @@ async function dropPathStartingWithGuardNode(guardNodeEd25519: string) {
// make sure to drop the guard node even if the path starting with this guard node is not found
guardNodes = guardNodes.filter(g => g.pubkey_ed25519 !== guardNodeEd25519);
// write the updates guard nodes to the db.
await internalUpdateGuardNodes(guardNodes);
// we are dropping it. Reset the counter in case this same guard gets choosen later
pathFailureCount[guardNodeEd25519] = 0;
await SnodePool.dropSnodeFromSnodePool(guardNodeEd25519);
// write the updates guard nodes to the db.
// the next call to getOnionPath will trigger a rebuild of the path
await internalUpdateGuardNodes(guardNodes);
// trigger path rebuilding for the dropped path. This will throw if anything happens
await buildNewOnionPathsOneAtATime();
}
async function internalUpdateGuardNodes(updatedGuardNodes: Array<Snode>) {
async function internalUpdateGuardNodes(updatedGuardNodes: Array<Data.Snode>) {
const edKeys = updatedGuardNodes.map(n => n.pubkey_ed25519);
await updateGuardNodes(edKeys);
await Data.updateGuardNodes(edKeys);
}
async function testGuardNode(snode: Snode) {
export async function TEST_testGuardNode(snode: Data.Snode) {
window?.log?.info(`Testing a candidate guard node ${ed25519Str(snode.pubkey_ed25519)}`);
// Send a post request and make sure it is OK
@ -316,13 +336,16 @@ async function testGuardNode(snode: Snode) {
}
/**
* Only exported for testing purpose. DO NOT use this directly
* Only exported for testing purpose.
* If the random snode p
*/
export async function selectGuardNodes(): Promise<Array<Snode>> {
// `getRandomSnodePool` is expected to refresh itself on low nodes
const nodePool = await SnodePool.getRandomSnodePool();
window.log.info('selectGuardNodes snodePool:', nodePool.length);
if (nodePool.length < desiredGuardCount) {
export async function selectGuardNodes(): Promise<Array<Data.Snode>> {
// `getSnodePoolFromDBOrFetchFromSeed` does not refetch stuff. It just throws.
// this is to avoid having circular dependencies of path building, needing new snodes, which needs new paths building...
const nodePool = await SnodePool.getSnodePoolFromDBOrFetchFromSeed();
window.log.info(`selectGuardNodes snodePool length: ${nodePool.length}`);
if (nodePool.length < SnodePool.minSnodePoolCount) {
window?.log?.error(
`Could not select guard nodes. Not enough nodes in the pool: ${nodePool.length}`
);
@ -333,7 +356,7 @@ export async function selectGuardNodes(): Promise<Array<Snode>> {
const shuffled = _.shuffle(nodePool);
let selectedGuardNodes: Array<Snode> = [];
let selectedGuardNodes: Array<Data.Snode> = [];
let attempts = 0;
@ -345,14 +368,10 @@ export async function selectGuardNodes(): Promise<Array<Snode>> {
window?.log?.error('selectedGuardNodes: offline');
throw new Error('selectedGuardNodes: offline');
}
if (shuffled.length < desiredGuardCount) {
window?.log?.error('Not enough nodes in the pool');
break;
}
const candidateNodes = shuffled.splice(0, desiredGuardCount);
if (attempts > 10) {
if (attempts > 5) {
// too many retries. something is wrong.
window.log.info(`selectGuardNodes stopping after attempts: ${attempts}`);
throw new Error(`selectGuardNodes stopping after attempts: ${attempts}`);
@ -361,122 +380,125 @@ export async function selectGuardNodes(): Promise<Array<Snode>> {
// Test all three nodes at once, wait for all to resolve or reject
// eslint-disable-next-line no-await-in-loop
const idxOk = (await Promise.allSettled(candidateNodes.map(testGuardNode))).flatMap(p =>
p.status === 'fulfilled' ? p.value : null
);
const idxOk = (
await Promise.allSettled(candidateNodes.map(OnionPaths.TEST_testGuardNode))
).flatMap(p => (p.status === 'fulfilled' ? p.value : null));
const goodNodes = _.zip(idxOk, candidateNodes)
.filter(x => x[0])
.map(x => x[1]) as Array<Snode>;
.map(x => x[1]) as Array<Data.Snode>;
selectedGuardNodes = _.concat(selectedGuardNodes, goodNodes);
attempts++;
}
if (selectedGuardNodes.length < desiredGuardCount) {
window?.log?.error(`Cound't get enough guard nodes, only have: ${guardNodes.length}`);
}
guardNodes = selectedGuardNodes;
if (guardNodes.length < desiredGuardCount) {
window?.log?.error(`Cound't get enough guard nodes, only have: ${guardNodes.length}`);
throw new Error(`Cound't get enough guard nodes, only have: ${guardNodes.length}`);
}
await internalUpdateGuardNodes(guardNodes);
return guardNodes;
}
async function buildNewOnionPathsWorker() {
window?.log?.info('LokiSnodeAPI::buildNewOnionPaths - building new onion paths...');
let allNodes = await SnodePool.getRandomSnodePool();
/**
* Fetches from db if needed the current guard nodes.
* If we do find in the snode pool (cached or got from seed in here) those guard nodes, use them.
* Otherwise select new guard nodes (might refetch from seed if needed).
*
* This function might throw
*
* This function will not try to fetch snodes from snodes. Only from seed.
* This is to avoid circular dependency of building new path needing new snodes, which needs a new path,...
*/
export async function getGuardNodeOrSelectNewOnes() {
if (guardNodes.length === 0) {
// Not cached, load from DB
const nodes = await getGuardNodes();
const guardNodesFromDb = await Data.getGuardNodes();
if (nodes.length === 0) {
if (guardNodesFromDb.length === 0) {
window?.log?.warn(
'LokiSnodeAPI::buildNewOnionPaths - no guard nodes in DB. Will be selecting new guards nodes...'
'LokiSnodeAPI::getGuardNodeOrSelectNewOnes - no guard nodes in DB. Will be selecting new guards nodes...'
);
} else {
const allNodes = await SnodePool.getSnodePoolFromDBOrFetchFromSeed();
// We only store the nodes' keys, need to find full entries:
const edKeys = nodes.map(x => x.ed25519PubKey);
const edKeys = guardNodesFromDb.map(x => x.ed25519PubKey);
guardNodes = allNodes.filter(x => edKeys.indexOf(x.pubkey_ed25519) !== -1);
if (guardNodes.length < edKeys.length) {
window?.log?.warn(
`LokiSnodeAPI::buildNewOnionPaths - could not find some guard nodes: ${guardNodes.length}/${edKeys.length} left`
`LokiSnodeAPI::getGuardNodeOrSelectNewOnes - could not find some guard nodes: ${guardNodes.length}/${edKeys.length} left`
);
}
}
}
// If guard nodes is still empty (the old nodes are now invalid), select new ones:
if (guardNodes.length < desiredGuardCount) {
try {
guardNodes = await exports.selectGuardNodes();
} catch (e) {
window.log.warn('selectGuardNodes throw error. Not retrying.', e);
return;
}
// if an error is thrown, the caller must take care of it.
guardNodes = await OnionPaths.selectGuardNodes();
}
// be sure to fetch again as that list might have been refreshed by selectGuardNodes
allNodes = await SnodePool.getRandomSnodePool();
window?.log?.info(
'LokiSnodeAPI::buildNewOnionPaths - after refetch, snodePool length:',
allNodes.length
);
// TODO: select one guard node and 2 other nodes randomly
let otherNodes = _.differenceBy(allNodes, guardNodes, 'pubkey_ed25519');
if (otherNodes.length <= SnodePool.minSnodePoolCount) {
window?.log?.warn(
'LokiSnodeAPI::buildNewOnionPaths - Too few nodes to build an onion path! Refreshing pool and retrying'
);
await SnodePool.refreshRandomPool();
// this is a recursive call limited to only one call at a time. we use the timeout
// here to make sure we retry this call if we cannot get enough otherNodes
// how to handle failing to rety
buildNewOnionPathsWorkerRetry = buildNewOnionPathsWorkerRetry + 1;
window?.log?.warn(
'buildNewOnionPathsWorker failed to get otherNodes. Current retry:',
buildNewOnionPathsWorkerRetry
);
if (buildNewOnionPathsWorkerRetry >= 3) {
// we failed enough. Something is wrong. Lets get out of that function and get a new fresh call.
window?.log?.warn(
`buildNewOnionPathsWorker failed to get otherNodes even after retries... Exiting after ${buildNewOnionPathsWorkerRetry} retries`
);
return;
} else {
window?.log?.info(
`buildNewOnionPathsWorker failed to get otherNodes. Next attempt: ${buildNewOnionPathsWorkerRetry}`
);
}
await buildNewOnionPathsWorker();
return;
}
otherNodes = _.shuffle(otherNodes);
const guards = _.shuffle(guardNodes);
// Create path for every guard node:
const nodesNeededPerPaths = ONION_REQUEST_HOPS - 1;
// Each path needs nodesNeededPerPaths nodes in addition to the guard node:
const maxPath = Math.floor(Math.min(guards.length, otherNodes.length / nodesNeededPerPaths));
window?.log?.info(
`Building ${maxPath} onion paths based on guard nodes length: ${guards.length}, other nodes length ${otherNodes.length} `
);
// TODO: might want to keep some of the existing paths
onionPaths = [];
for (let i = 0; i < maxPath; i += 1) {
const path = [guards[i]];
for (let j = 0; j < nodesNeededPerPaths; j += 1) {
path.push(otherNodes[i * nodesNeededPerPaths + j]);
}
onionPaths.push(path);
}
window?.log?.info(`Built ${onionPaths.length} onion paths`);
}
async function buildNewOnionPathsWorker() {
return pRetry(
async () => {
window?.log?.info('LokiSnodeAPI::buildNewOnionPaths - building new onion paths...');
// get an up to date list of snodes from cache, from db, or from the a seed node.
let allNodes = await SnodePool.getSnodePoolFromDBOrFetchFromSeed();
if (allNodes.length <= SnodePool.minSnodePoolCount) {
throw new Error(`Cannot rebuild path as we do not have enough snodes: ${allNodes.length}`);
}
// make sure we have enough guard nodes to build the paths
// this function will throw if for some reason we cannot do it
await OnionPaths.getGuardNodeOrSelectNewOnes();
// be sure to fetch again as that list might have been refreshed by selectGuardNodes
allNodes = await SnodePool.getSnodePoolFromDBOrFetchFromSeed();
window?.log?.info(`LokiSnodeAPI::buildNewOnionPaths, snodePool length: ${allNodes.length}`);
// get all snodes minus the selected guardNodes
if (allNodes.length <= SnodePool.minSnodePoolCount) {
throw new Error('Too few nodes to build an onion path. Even after fetching from seed.');
}
const otherNodes = _.shuffle(_.differenceBy(allNodes, guardNodes, 'pubkey_ed25519'));
const guards = _.shuffle(guardNodes);
// Create path for every guard node:
const nodesNeededPerPaths = ONION_REQUEST_HOPS - 1;
// Each path needs nodesNeededPerPaths nodes in addition to the guard node:
const maxPath = Math.floor(Math.min(guards.length, otherNodes.length / nodesNeededPerPaths));
window?.log?.info(
`Building ${maxPath} onion paths based on guard nodes length: ${guards.length}, other nodes length ${otherNodes.length} `
);
// TODO: might want to keep some of the existing paths
onionPaths = [];
for (let i = 0; i < maxPath; i += 1) {
const path = [guards[i]];
for (let j = 0; j < nodesNeededPerPaths; j += 1) {
path.push(otherNodes[i * nodesNeededPerPaths + j]);
}
onionPaths.push(path);
}
window?.log?.info(`Built ${onionPaths.length} onion paths`);
},
{
retries: 3, // 4 total
factor: 1,
minTimeout: 1000,
onFailedAttempt: e => {
window?.log?.warn(
`buildNewOnionPathsWorker attemp #${e.attemptNumber} failed. ${e.retriesLeft} retries left... Error: ${e.message}`
);
},
}
);
}

View File

@ -72,7 +72,7 @@ const buildSendViaOnionPayload = (url: URL, fetchOptions: OnionFetchOptions): On
export const getOnionPathForSending = async () => {
let pathNodes: Array<Snode> = [];
try {
pathNodes = await OnionPaths.getOnionPath();
pathNodes = await OnionPaths.getOnionPath({});
} catch (e) {
window?.log?.error(`sendViaOnion - getOnionPath Error ${e.code} ${e.message}`);
}
@ -92,39 +92,6 @@ const initOptionsWithDefaults = (options: OnionFetchBasicOptions) => {
return _.defaults(options, defaultFetchBasicOptions);
};
const sendViaOnionToNonSnodeRetryable = async ({
castedDestinationX25519Key,
finalRelayOptions,
payloadObj,
abortSignal,
}: {
castedDestinationX25519Key: string;
finalRelayOptions: FinalRelayOptions;
payloadObj: OnionPayloadObj;
abortSignal?: AbortSignal;
}) => {
const pathNodes = await getOnionPathForSending();
if (!pathNodes) {
throw new Error('getOnionPathForSending is emtpy');
}
/**
* This call handles ejecting a snode or a path if needed. If that happens, it throws a retryable error and the pRetry
* call above will call us again with the same params but a different path.
* If the error is not recoverable, it throws a pRetry.AbortError.
*/
const result: SnodeResponse = await sendOnionRequestHandlingSnodeEject({
nodePath: pathNodes,
destX25519Any: castedDestinationX25519Key,
finalDestOptions: payloadObj,
finalRelayOptions,
abortSignal,
});
return result;
};
/**
*
* This function can be used to make a request via onion to a non snode server.
@ -173,18 +140,28 @@ export const sendViaOnionToNonSnode = async (
try {
result = await pRetry(
async () => {
return sendViaOnionToNonSnodeRetryable({
castedDestinationX25519Key,
const pathNodes = await getOnionPathForSending();
if (!pathNodes) {
throw new Error('getOnionPathForSending is emtpy');
}
/**
* This call handles ejecting a snode or a path if needed. If that happens, it throws a retryable error and the pRetry
* call above will call us again with the same params but a different path.
* If the error is not recoverable, it throws a pRetry.AbortError.
*/
return sendOnionRequestHandlingSnodeEject({
nodePath: pathNodes,
destX25519Any: castedDestinationX25519Key,
finalDestOptions: payloadObj,
finalRelayOptions,
payloadObj,
abortSignal,
});
},
{
retries: 4, // each path can fail 3 times before being dropped, we have 3 paths at most
factor: 1,
minTimeout: 100,
maxTimeout: 4000,
retries: 2, // retry 3 (2+1) times at most
minTimeout: 500,
onFailedAttempt: e => {
window?.log?.warn(
`sendViaOnionToNonSnodeRetryable attempt #${e.attemptNumber} failed. ${e.retriesLeft} retries left...`
@ -193,7 +170,7 @@ export const sendViaOnionToNonSnode = async (
}
);
} catch (e) {
window?.log?.warn('sendViaOnionToNonSnodeRetryable failed ', e);
window?.log?.warn('sendViaOnionToNonSnodeRetryable failed ', e.message);
return null;
}

View File

@ -0,0 +1,306 @@
import { Constants } from '..';
import { default as insecureNodeFetch } from 'node-fetch';
import https from 'https';
import _ from 'lodash';
import fs from 'fs';
import path from 'path';
import tls from 'tls';
import Electron from 'electron';
import { sha256 } from '../crypto';
import * as Data from '../../../ts/data/data';
import pRetry from 'p-retry';
import { SeedNodeAPI } from '.';
const { remote } = Electron;
// tslint:disable: function-name
export type SeedNode = {
url: string;
};
/**
* Fetch all snodes from seed nodes.
* Exported only for tests. This is not to be used by the app directly
* @param seedNodes the seednodes to use to fetch snodes details
*/
export async function fetchSnodePoolFromSeedNodeWithRetries(
seedNodes: Array<SeedNode>
): Promise<Array<Data.Snode>> {
try {
window?.log?.info(`fetchSnodePoolFromSeedNode with seedNodes.length ${seedNodes.length}`);
let snodes = await getSnodeListFromSeednode(seedNodes);
// make sure order of the list is random, so we get version in a non-deterministic way
snodes = _.shuffle(snodes);
// commit changes to be live
// we'll update the version (in case they upgrade) every cycle
const fetchSnodePool = snodes.map(snode => ({
ip: snode.public_ip,
port: snode.storage_port,
pubkey_x25519: snode.pubkey_x25519,
pubkey_ed25519: snode.pubkey_ed25519,
}));
window?.log?.info(
'SeedNodeAPI::fetchSnodePoolFromSeedNodeWithRetries - Refreshed random snode pool with',
snodes.length,
'snodes'
);
return fetchSnodePool;
} catch (e) {
window?.log?.warn(
'LokiSnodeAPI::fetchSnodePoolFromSeedNodeWithRetries - error',
e.code,
e.message
);
throw new Error('Failed to contact seed node');
}
}
const getSslAgentForSeedNode = (seedNodeHost: string, isSsl = false) => {
let filePrefix = '';
let pubkey256 = '';
let cert256 = '';
if (!isSsl) {
return undefined;
}
switch (seedNodeHost) {
case 'storage.seed1.loki.network':
filePrefix = 'storage-seed-1';
pubkey256 = 'JOsnIcAanVbgECNA8lHtC8f/cqN9m8EP7jKT6XCjeL8=';
cert256 =
'6E:2B:AC:F3:6E:C1:FF:FF:24:F3:CA:92:C6:94:81:B4:82:43:DF:C7:C6:03:98:B8:F5:6B:7D:30:7B:16:C1:CB';
break;
case 'storage.seed3.loki.network':
filePrefix = 'storage-seed-3';
pubkey256 = 'mMmZD3lG4Fi7nTC/EWzRVaU3bbCLsH6Ds2FHSTpo0Rk=';
cert256 =
'24:13:4C:0A:03:D8:42:A6:09:DE:35:76:F4:BD:FB:11:60:DB:F9:88:9F:98:46:B7:60:A6:60:0C:4C:CF:60:72';
break;
case 'public.loki.foundation':
filePrefix = 'public-loki-foundation';
pubkey256 = 'W+Zv52qlcm1BbdpJzFwxZrE7kfmEboq7h3Dp/+Q3RPg=';
cert256 =
'40:E4:67:7D:18:6B:4D:08:8D:E9:D5:47:52:25:B8:28:E0:D3:63:99:9B:38:46:7D:92:19:5B:61:B9:AE:0E:EA';
break;
default:
throw new Error(`Unknown seed node: ${seedNodeHost}`);
}
// tslint:disable: non-literal-fs-path
// read the cert each time. We only run this request once for each seed node nevertheless.
const appPath = remote.app.getAppPath();
const crt = fs.readFileSync(path.join(appPath, `/certificates/${filePrefix}.crt`), 'utf-8');
const sslOptions = {
// as the seed nodes are using a self signed certificate, we have to provide it here.
ca: crt,
// we have to reject them, otherwise our errors returned in the checkServerIdentity are simply not making the call fail.
// so in production, rejectUnauthorized must be true.
rejectUnauthorized: true,
keepAlive: false,
checkServerIdentity: (host: string, cert: any) => {
// Make sure the certificate is issued to the host we are connected to
const err = tls.checkServerIdentity(host, cert);
if (err) {
return err;
}
// Pin the public key, similar to HPKP pin-sha25 pinning
if (sha256(cert.pubkey) !== pubkey256) {
const msg =
'Certificate verification error: ' +
`The public key of '${cert.subject.CN}' ` +
'does not match our pinned fingerprint';
return new Error(msg);
}
// Pin the exact certificate, rather than the pub key
if (cert.fingerprint256 !== cert256) {
const msg =
'Certificate verification error: ' +
`The certificate of '${cert.subject.CN}' ` +
'does not match our pinned fingerprint';
return new Error(msg);
}
return undefined;
},
};
// we're creating a new Agent that will now use the certs we have configured
return new https.Agent(sslOptions);
};
export interface SnodeFromSeed {
public_ip: string;
storage_port: number;
pubkey_x25519: string;
pubkey_ed25519: string;
}
/**
* This call will try 4 times to contact a seed nodes (random) and get the snode list from it.
* If all attempts fails, this function will throw the last error.
* The returned list is not shuffled when returned.
*/
async function getSnodeListFromSeednode(seedNodes: Array<SeedNode>): Promise<Array<SnodeFromSeed>> {
const SEED_NODE_RETRIES = 4;
return pRetry(
async () => {
window?.log?.info('getSnodeListFromSeednode starting...');
if (!seedNodes.length) {
window?.log?.info('loki_snode_api::getSnodeListFromSeednode - seedNodes are empty');
throw new Error('getSnodeListFromSeednode - seedNodes are empty');
}
// do not try/catch, we do want exception to bubble up so pRetry, well, retries
const snodes = await SeedNodeAPI.TEST_fetchSnodePoolFromSeedNodeRetryable(seedNodes);
return snodes;
},
{
retries: SEED_NODE_RETRIES - 1,
factor: 2,
minTimeout: SeedNodeAPI.getMinTimeout(),
onFailedAttempt: e => {
window?.log?.warn(
`fetchSnodePoolFromSeedNodeRetryable attempt #${e.attemptNumber} failed. ${e.retriesLeft} retries left... Error: ${e.message}`
);
},
}
);
}
export function getMinTimeout() {
return 1000;
}
/**
* This functions choose randonly a seed node from seedNodes and try to get the snodes from it, or throws.
* This function is to be used with a pRetry caller
*/
export async function TEST_fetchSnodePoolFromSeedNodeRetryable(
seedNodes: Array<SeedNode>
): Promise<Array<SnodeFromSeed>> {
window?.log?.info('fetchSnodePoolFromSeedNodeRetryable starting...');
if (!seedNodes.length) {
window?.log?.info('loki_snode_api::fetchSnodePoolFromSeedNodeRetryable - seedNodes are empty');
throw new Error('fetchSnodePoolFromSeedNodeRetryable: Seed nodes are empty');
}
const seedNode = _.sample(seedNodes);
if (!seedNode) {
window?.log?.warn(
'loki_snode_api::fetchSnodePoolFromSeedNodeRetryable - Could not select random snodes from',
seedNodes
);
throw new Error('fetchSnodePoolFromSeedNodeRetryable: Seed nodes are empty #2');
}
const tryUrl = new URL(seedNode.url);
const snodes = await getSnodesFromSeedUrl(tryUrl);
if (snodes.length === 0) {
window?.log?.warn(
`loki_snode_api::fetchSnodePoolFromSeedNodeRetryable - ${seedNode.url} did not return any snodes`
);
throw new Error(`Failed to contact seed node: ${seedNode.url}`);
}
return snodes;
}
/**
* Try to get the snode list from the given seed node URL, or throws.
* This function throws for whatever reason might happen (timeout, invalid response, 0 valid snodes returned, ...)
* This function is to be used inside a pRetry function
*/
async function getSnodesFromSeedUrl(urlObj: URL): Promise<Array<any>> {
// Removed limit until there is a way to get snode info
// for individual nodes (needed for guard nodes); this way
// we get all active nodes
window?.log?.info(`getSnodesFromSeedUrl starting with ${urlObj.href}`);
const params = {
active_only: true,
fields: {
public_ip: true,
storage_port: true,
pubkey_x25519: true,
pubkey_ed25519: true,
},
};
const endpoint = 'json_rpc';
const url = `${urlObj.href}${endpoint}`;
const body = {
jsonrpc: '2.0',
id: '0',
method: 'get_n_service_nodes',
params,
};
const sslAgent = getSslAgentForSeedNode(
urlObj.hostname,
urlObj.protocol !== Constants.PROTOCOLS.HTTP
);
const fetchOptions = {
method: 'POST',
timeout: 5000,
body: JSON.stringify(body),
headers: {
'User-Agent': 'WhatsApp',
'Accept-Language': 'en-us',
},
agent: sslAgent,
};
window?.log?.info('insecureNodeFetch => plaintext for getSnodesFromSeedUrl');
const response = await insecureNodeFetch(url, fetchOptions);
if (response.status !== 200) {
window?.log?.error(
`loki_snode_api:::getSnodesFromSeedUrl - invalid response from seed ${urlObj.toString()}:`,
response
);
throw new Error(
`getSnodesFromSeedUrl: status is not 200 ${response.status} from ${urlObj.href}`
);
}
if (response.headers.get('Content-Type') !== 'application/json') {
window?.log?.error('Response is not json');
throw new Error(`getSnodesFromSeedUrl: response is not json Content-Type from ${urlObj.href}`);
}
try {
const json = await response.json();
const result = json.result;
if (!result) {
window?.log?.error(
`loki_snode_api:::getSnodesFromSeedUrl - invalid result from seed ${urlObj.toString()}:`,
response
);
throw new Error(`getSnodesFromSeedUrl: json.result is empty from ${urlObj.href}`);
}
// Filter 0.0.0.0 nodes which haven't submitted uptime proofs
const validNodes = result.service_node_states.filter(
(snode: any) => snode.public_ip !== '0.0.0.0'
);
if (validNodes.length === 0) {
throw new Error(`Did not get a single valid snode from ${urlObj.href}`);
}
return validNodes;
} catch (e) {
window?.log?.error('Invalid json response');
throw new Error(`getSnodesFromSeedUrl: cannot parse content as JSON from ${urlObj.href}`);
}
}

View File

@ -0,0 +1,3 @@
import * as SeedNodeAPI from './SeedNodeAPI';
export { SeedNodeAPI };

View File

@ -1,24 +1,12 @@
// we don't throw or catch here
import { default as insecureNodeFetch } from 'node-fetch';
import https from 'https';
import fs from 'fs';
import path from 'path';
import tls from 'tls';
import Electron from 'electron';
const { remote } = Electron;
import { snodeRpc } from './lokiRpc';
import {
getRandomSnode,
getRandomSnodePool,
getSwarmFor,
minSnodePoolCount,
requiredSnodesForAgreement,
} from './snodePool';
import { Constants } from '..';
import { getSodium, sha256 } from '../crypto';
import { getSodium } from '../crypto';
import _, { range } from 'lodash';
import pRetry from 'p-retry';
import {
@ -32,6 +20,7 @@ import { Snode } from '../../data/data';
import { updateIsOnline } from '../../state/ducks/onion';
import { ed25519Str } from '../onions/onionPath';
import { StringUtils, UserUtils } from '../utils';
import { SnodePool } from '.';
// ONS name can have [a-zA-Z0-9_-] except that - is not allowed as start or end
// do not define a regex but rather create it on the fly to avoid https://stackoverflow.com/questions/3891641/regex-test-only-works-every-other-time
@ -39,161 +28,6 @@ export const onsNameRegex = '^\\w([\\w-]*[\\w])?$';
export const ERROR_CODE_NO_CONNECT = 'ENETUNREACH: No network connection.';
const getSslAgentForSeedNode = (seedNodeHost: string, isSsl = false) => {
let filePrefix = '';
let pubkey256 = '';
let cert256 = '';
if (!isSsl) {
return undefined;
}
switch (seedNodeHost) {
case 'storage.seed1.loki.network':
filePrefix = 'storage-seed-1';
pubkey256 = 'JOsnIcAanVbgECNA8lHtC8f/cqN9m8EP7jKT6XCjeL8=';
cert256 =
'6E:2B:AC:F3:6E:C1:FF:FF:24:F3:CA:92:C6:94:81:B4:82:43:DF:C7:C6:03:98:B8:F5:6B:7D:30:7B:16:C1:CB';
break;
case 'storage.seed3.loki.network':
filePrefix = 'storage-seed-3';
pubkey256 = 'mMmZD3lG4Fi7nTC/EWzRVaU3bbCLsH6Ds2FHSTpo0Rk=';
cert256 =
'24:13:4C:0A:03:D8:42:A6:09:DE:35:76:F4:BD:FB:11:60:DB:F9:88:9F:98:46:B7:60:A6:60:0C:4C:CF:60:72';
break;
case 'public.loki.foundation':
filePrefix = 'public-loki-foundation';
pubkey256 = 'W+Zv52qlcm1BbdpJzFwxZrE7kfmEboq7h3Dp/+Q3RPg=';
cert256 =
'40:E4:67:7D:18:6B:4D:08:8D:E9:D5:47:52:25:B8:28:E0:D3:63:99:9B:38:46:7D:92:19:5B:61:B9:AE:0E:EA';
break;
default:
throw new Error(`Unknown seed node: ${seedNodeHost}`);
}
// tslint:disable: non-literal-fs-path
// read the cert each time. We only run this request once for each seed node nevertheless.
const appPath = remote.app.getAppPath();
const crt = fs.readFileSync(path.join(appPath, `/certificates/${filePrefix}.crt`), 'utf-8');
const sslOptions = {
// as the seed nodes are using a self signed certificate, we have to provide it here.
ca: crt,
// we have to reject them, otherwise our errors returned in the checkServerIdentity are simply not making the call fail.
// so in production, rejectUnauthorized must be true.
rejectUnauthorized: true,
keepAlive: false,
checkServerIdentity: (host: string, cert: any) => {
// Make sure the certificate is issued to the host we are connected to
const err = tls.checkServerIdentity(host, cert);
if (err) {
return err;
}
// Pin the public key, similar to HPKP pin-sha25 pinning
if (sha256(cert.pubkey) !== pubkey256) {
const msg =
'Certificate verification error: ' +
`The public key of '${cert.subject.CN}' ` +
'does not match our pinned fingerprint';
return new Error(msg);
}
// Pin the exact certificate, rather than the pub key
if (cert.fingerprint256 !== cert256) {
const msg =
'Certificate verification error: ' +
`The certificate of '${cert.subject.CN}' ` +
'does not match our pinned fingerprint';
return new Error(msg);
}
return undefined;
},
};
// we're creating a new Agent that will now use the certs we have configured
return new https.Agent(sslOptions);
};
export async function getSnodesFromSeedUrl(urlObj: URL): Promise<Array<any>> {
// Removed limit until there is a way to get snode info
// for individual nodes (needed for guard nodes); this way
// we get all active nodes
window?.log?.info(`getSnodesFromSeedUrl starting with ${urlObj.href}`);
const params = {
active_only: true,
fields: {
public_ip: true,
storage_port: true,
pubkey_x25519: true,
pubkey_ed25519: true,
},
};
const endpoint = 'json_rpc';
const url = `${urlObj.href}${endpoint}`;
const body = {
jsonrpc: '2.0',
id: '0',
method: 'get_n_service_nodes',
params,
};
const sslAgent = getSslAgentForSeedNode(
urlObj.hostname,
urlObj.protocol !== Constants.PROTOCOLS.HTTP
);
const fetchOptions = {
method: 'POST',
timeout: 5000,
body: JSON.stringify(body),
headers: {
'User-Agent': 'WhatsApp',
'Accept-Language': 'en-us',
},
agent: sslAgent,
};
window?.log?.info('insecureNodeFetch => plaintext for getSnodesFromSeedUrl');
const response = await insecureNodeFetch(url, fetchOptions);
if (response.status !== 200) {
window?.log?.error(
`loki_snode_api:::getSnodesFromSeedUrl - invalid response from seed ${urlObj.toString()}:`,
response
);
return [];
}
if (response.headers.get('Content-Type') !== 'application/json') {
window?.log?.error('Response is not json');
return [];
}
try {
const json = await response.json();
// TODO: validate that all of the fields are present?
const result = json.result;
if (!result) {
window?.log?.error(
`loki_snode_api:::getSnodesFromSeedUrl - invalid result from seed ${urlObj.toString()}:`,
response
);
return [];
}
// Filter 0.0.0.0 nodes which haven't submitted uptime proofs
return result.service_node_states.filter((snode: any) => snode.public_ip !== '0.0.0.0');
} catch (e) {
window?.log?.error('Invalid json response');
return [];
}
}
export type SendParams = {
pubKey: string;
ttl: string;
@ -210,7 +44,12 @@ async function requestSnodesForPubkeyWithTargetNodeRetryable(
const params = {
pubKey,
};
const result = await snodeRpc('get_snodes_for_pubkey', params, targetNode, pubKey);
const result = await snodeRpc({
method: 'get_snodes_for_pubkey',
params,
targetNode,
associatedWith: pubKey,
});
if (!result) {
window?.log?.warn(
@ -328,7 +167,7 @@ export async function getSessionIDForOnsName(onsNameCase: string) {
// we do this request with validationCount snodes
const promises = range(0, validationCount).map(async () => {
const targetNode = await getRandomSnode();
const result = await snodeRpc('oxend_request', params, targetNode);
const result = await snodeRpc({ method: 'oxend_request', params, targetNode });
if (!result || result.status !== 200 || !result.body) {
throw new Error('ONSresolve:Failed to resolve ONS');
}
@ -436,22 +275,36 @@ export async function getSessionIDForOnsName(onsNameCase: string) {
/**
* Try to fetch from 3 different snodes an updated list of snodes.
* If we get less than 24 common snodes in those result, we consider the request to failed and an exception is thrown.
* The three snode we make the request to is randomized.
* This function is to be called with a pRetry so that if one snode does not reply anything, another might be choose next time.
* Return the list of nodes all snodes agreed on.
*/
export async function getSnodePoolFromSnodes() {
const existingSnodePool = await getRandomSnodePool();
if (existingSnodePool.length < 3) {
window?.log?.warn('cannot get snodes from snodes; not enough snodes', existingSnodePool.length);
return;
const existingSnodePool = await SnodePool.getSnodePoolFromDBOrFetchFromSeed();
if (existingSnodePool.length <= minSnodePoolCount) {
window?.log?.warn(
'getSnodePoolFromSnodes: Cannot get snodes list from snodes; not enough snodes',
existingSnodePool.length
);
throw new Error(
`Cannot get snodes list from snodes; not enough snodes even after refetching from seed', ${existingSnodePool.length}`
);
}
// Note intersectionWith only works with 3 at most array to find the common snodes.
const nodesToRequest = _.sampleSize(existingSnodePool, 3);
const results = await Promise.all(
nodesToRequest.map(async node => {
// this call is already retried if the snode does not reply
// at least when onion requests enabled
return getSnodePoolFromSnode(node);
/**
* this call is already retried if the snode does not reply
* (at least when onion requests are enabled)
* this request might want to rebuild a path if the snode length gets < minSnodePoolCount during the
* retries, so we need to make sure this does not happen.
*
* Remember that here, we are trying to fetch from snodes the updated list of snodes to rebuild a path.
* If we don't disable rebuilding a path below, this gets to a chicken and egg problem.
*/
return TEST_getSnodePoolFromSnode(node);
})
);
@ -466,21 +319,24 @@ export async function getSnodePoolFromSnodes() {
);
// We want the snodes to agree on at least this many snodes
if (commonSnodes.length < requiredSnodesForAgreement) {
throw new Error('inconsistentSnodePools');
throw new Error(
`Inconsistent snode pools. We did not get at least ${requiredSnodesForAgreement} in common`
);
}
return commonSnodes;
}
/**
* Returns a list of uniq snodes got from the specified targetNode.
* Returns a list of unique snodes got from the specified targetNode.
* This function won't try to rebuild a path if at some point we don't have enough snodes.
* This is exported for testing purpose only
*/
export async function getSnodePoolFromSnode(targetNode: Snode): Promise<Array<Snode>> {
// tslint:disable-next-line: function-name
export async function TEST_getSnodePoolFromSnode(targetNode: Snode): Promise<Array<Snode>> {
const params = {
endpoint: 'get_service_nodes',
params: {
active_only: true,
// limit: 256,
fields: {
public_ip: true,
storage_port: true,
@ -489,7 +345,11 @@ export async function getSnodePoolFromSnode(targetNode: Snode): Promise<Array<Sn
},
},
};
const result = await snodeRpc('oxend_request', params, targetNode);
const result = await snodeRpc({
method: 'oxend_request',
params,
targetNode,
});
if (!result || result.status !== 200) {
throw new Error('Invalid result');
}
@ -499,7 +359,7 @@ export async function getSnodePoolFromSnode(targetNode: Snode): Promise<Array<Sn
if (!json || !json.result || !json.result.service_node_states?.length) {
window?.log?.error(
'loki_snode_api:::getSnodePoolFromSnode - invalid result from seed',
'loki_snode_api:::getSnodePoolFromSnode - invalid result from snode',
result.body
);
return [];
@ -513,7 +373,6 @@ export async function getSnodePoolFromSnode(targetNode: Snode): Promise<Array<Sn
port: snode.storage_port,
pubkey_x25519: snode.pubkey_x25519,
pubkey_ed25519: snode.pubkey_ed25519,
version: '',
})) as Array<Snode>;
// we the return list by the snode is already made of uniq snodes
@ -528,7 +387,12 @@ export async function storeOnNode(targetNode: Snode, params: SendParams): Promis
try {
// no retry here. If an issue is with the path this is handled in lokiOnionFetch
// if there is an issue with the targetNode, we still send a few times this request to a few snodes in // already so it's handled
const result = await snodeRpc('store', params, targetNode, params.pubKey);
const result = await snodeRpc({
method: 'store',
params,
targetNode,
associatedWith: params.pubKey,
});
if (!result || result.status !== 200) {
return false;
@ -558,7 +422,7 @@ export async function retrieveNextMessages(
// let exceptions bubble up
// no retry for this one as this a call we do every few seconds while polling for messages
const result = await snodeRpc('retrieve', params, targetNode, associatedWith);
const result = await snodeRpc({ method: 'retrieve', params, targetNode, associatedWith });
if (!result) {
window?.log?.warn(
@ -601,7 +465,7 @@ export async function retrieveNextMessages(
*/
// tslint:disable-next-line: variable-name
export const TEST_getNetworkTime = async (snode: Snode): Promise<string | number> => {
const response: any = await snodeRpc('info', {}, snode);
const response: any = await snodeRpc({ method: 'info', params: {}, targetNode: snode });
const body = JSON.parse(response.body);
const timestamp = body?.timestamp;
if (!timestamp) {
@ -650,12 +514,12 @@ export const forceNetworkDeletion = async (): Promise<Array<string> | null> => {
timestamp,
signature: signatureBase64,
};
const ret = await snodeRpc(
'delete_all',
deleteMessageParams,
snodeToMakeRequestTo,
userX25519PublicKey
);
const ret = await snodeRpc({
method: 'delete_all',
params: deleteMessageParams,
targetNode: snodeToMakeRequestTo,
associatedWith: userX25519PublicKey,
});
if (!ret) {
throw new Error(

View File

@ -14,13 +14,17 @@ interface FetchOptions {
* A small wrapper around node-fetch which deserializes response
* returns insecureNodeFetch response or false
*/
async function lokiFetch(
url: string,
options: FetchOptions,
targetNode?: Snode,
associatedWith?: string,
test?: string
): Promise<undefined | SnodeResponse> {
async function lokiFetch({
options,
url,
associatedWith,
targetNode,
}: {
url: string;
options: FetchOptions;
targetNode?: Snode;
associatedWith?: string;
}): Promise<undefined | SnodeResponse> {
const timeout = 10000;
const method = options.method || 'GET';
@ -38,7 +42,11 @@ async function lokiFetch(
? true
: window.lokiFeatureFlags?.useOnionRequests;
if (useOnionRequests && targetNode) {
const fetchResult = await lokiOnionFetch(targetNode, fetchOptions.body, associatedWith, test);
const fetchResult = await lokiOnionFetch({
targetNode,
body: fetchOptions.body,
associatedWith,
});
if (!fetchResult) {
return undefined;
}
@ -84,10 +92,17 @@ async function lokiFetch(
* The
*/
export async function snodeRpc(
method: string,
params: any,
targetNode: Snode,
associatedWith?: string //the user pubkey this call is for. if the onion request fails, this is used to handle the error for this user swarm for instance
{
method,
params,
targetNode,
associatedWith,
}: {
method: string;
params: any;
targetNode: Snode;
associatedWith?: string;
} //the user pubkey this call is for. if the onion request fails, this is used to handle the error for this user swarm for instance
): Promise<undefined | SnodeResponse> {
const url = `https://${targetNode.ip}:${targetNode.port}/storage_rpc/v1`;
@ -115,5 +130,10 @@ export async function snodeRpc(
},
};
return lokiFetch(url, fetchOptions, targetNode, associatedWith, method);
return lokiFetch({
url,
options: fetchOptions,
targetNode,
associatedWith,
});
}

View File

@ -14,6 +14,7 @@ let snodeFailureCount: Record<string, number> = {};
import { Snode } from '../../data/data';
import { ERROR_CODE_NO_CONNECT } from './SNodeAPI';
import { Onions } from '.';
export const resetSnodeFailureCount = () => {
snodeFailureCount = {};
@ -107,6 +108,9 @@ async function buildOnionCtxs(
finalRelayOptions?: FinalRelayOptions
) {
const ctxes = [destCtx];
if (!nodePath) {
throw new Error('buildOnionCtxs needs a valid path');
}
// from (3) 2 to 0
const firstPos = nodePath.length - 1;
@ -194,6 +198,9 @@ async function buildOnionGuardNodePayload(
return encodeCiphertextPlusJson(guardCtx.ciphertext, guardPayloadObj);
}
/**
* 406 is a clock out of sync error
*/
function process406Error(statusCode: number) {
if (statusCode === 406) {
// clock out of sync
@ -209,17 +216,18 @@ function processOxenServerError(_statusCode: number, body?: string) {
}
}
/**
* 421 is a invalid swarm error
*/
async function process421Error(
statusCode: number,
body: string,
guardNodeEd25519: string,
associatedWith?: string,
lsrpcEd25519Key?: string
) {
if (statusCode === 421) {
await handle421InvalidSwarm({
snodeEd25519: lsrpcEd25519Key,
guardNodeEd25519,
body,
associatedWith,
});
@ -235,13 +243,11 @@ async function process421Error(
async function processOnionRequestErrorAtDestination({
statusCode,
body,
guardNodeEd25519,
destinationEd25519,
associatedWith,
}: {
statusCode: number;
body: string;
guardNodeEd25519: string;
destinationEd25519?: string;
associatedWith?: string;
}) {
@ -251,56 +257,58 @@ async function processOnionRequestErrorAtDestination({
window?.log?.info('processOnionRequestErrorAtDestination. statusCode nok:', statusCode);
process406Error(statusCode);
await process421Error(statusCode, body, guardNodeEd25519, associatedWith, destinationEd25519);
await process421Error(statusCode, body, associatedWith, destinationEd25519);
processOxenServerError(statusCode, body);
if (destinationEd25519) {
await processAnyOtherErrorAtDestination(
statusCode,
body,
guardNodeEd25519,
destinationEd25519,
associatedWith
);
await processAnyOtherErrorAtDestination(statusCode, body, destinationEd25519, associatedWith);
}
}
async function handleNodeNotFound({
ed25519NotFound,
associatedWith,
}: {
ed25519NotFound: string;
associatedWith?: string;
}) {
const shortNodeNotFound = ed25519Str(ed25519NotFound);
window?.log?.warn('Handling NODE NOT FOUND with: ', shortNodeNotFound);
if (associatedWith) {
await dropSnodeFromSwarmIfNeeded(associatedWith, ed25519NotFound);
}
await dropSnodeFromSnodePool(ed25519NotFound);
snodeFailureCount[ed25519NotFound] = 0;
// try to remove the not found snode from any of the paths if it's there.
// it may not be here, as the snode note found might be the target snode of the request.
await OnionPaths.dropSnodeFromPath(ed25519NotFound);
}
async function processAnyOtherErrorOnPath(
status: number,
guardNodeEd25519: string,
ciphertext?: string,
associatedWith?: string
) {
// this test checks for on error in your path.
if (
// response.status === 502 ||
// response.status === 503 ||
// response.status === 504 ||
// response.status === 404 ||
status !== 200 // this is pretty strong. a 400 (Oxen server error) will be handled as a bad path.
) {
// this test checks for an error in your path.
if (status !== 200) {
window?.log?.warn(`[path] Got status: ${status}`);
//
let nodeNotFound;
// If we have a specific node in fault we can exclude just this node.
if (ciphertext?.startsWith(NEXT_NODE_NOT_FOUND_PREFIX)) {
nodeNotFound = ciphertext.substr(NEXT_NODE_NOT_FOUND_PREFIX.length);
const nodeNotFound = ciphertext.substr(NEXT_NODE_NOT_FOUND_PREFIX.length);
// we are checking errors on the path, a nodeNotFound on the path should trigger a rebuild
await handleNodeNotFound({ ed25519NotFound: nodeNotFound, associatedWith });
} else {
// Otherwise we increment the whole path failure count
await incrementBadPathCountOrDrop(guardNodeEd25519);
}
processOxenServerError(status, ciphertext);
// If we have a specific node in fault we can exclude just this node.
// Otherwise we increment the whole path failure count
if (nodeNotFound) {
window?.log?.warn('node not found error with: ', ed25519Str(nodeNotFound));
await exports.incrementBadSnodeCountOrDrop({
snodeEd25519: nodeNotFound,
guardNodeEd25519,
associatedWith,
});
// we are checking errors on the path, a nodeNotFound on the path should trigger a rebuild
} else {
await incrementBadPathCountOrDrop(guardNodeEd25519);
}
throw new Error(`Bad Path handled. Retry this request. Status: ${status}`);
}
}
@ -308,7 +316,6 @@ async function processAnyOtherErrorOnPath(
async function processAnyOtherErrorAtDestination(
status: number,
body: string,
guardNodeEd25519: string,
destinationEd25519: string,
associatedWith?: string
) {
@ -320,31 +327,23 @@ async function processAnyOtherErrorAtDestination(
) {
window?.log?.warn(`[path] Got status at destination: ${status}`);
let nodeNotFound;
if (body?.startsWith(NEXT_NODE_NOT_FOUND_PREFIX)) {
nodeNotFound = body.substr(NEXT_NODE_NOT_FOUND_PREFIX.length);
const nodeNotFound = body.substr(NEXT_NODE_NOT_FOUND_PREFIX.length);
// if we get a nodeNotFound at the destination. it means the targetNode to which we made the request is not found.
await handleNodeNotFound({
ed25519NotFound: nodeNotFound,
associatedWith,
});
if (nodeNotFound) {
await exports.incrementBadSnodeCountOrDrop({
snodeEd25519: destinationEd25519,
guardNodeEd25519,
associatedWith,
});
// if we get a nodeNotFound at the desitnation. it means the targetNode to which we made the request is not found.
// We have to retry with another targetNode so it's not just rebuilding the path. We have to go one lever higher (lokiOnionFetch).
// status is 502 for a node not found
throw new pRetry.AbortError(
`Bad Path handled. Retry this request with another targetNode. Status: ${status}`
);
}
// We have to retry with another targetNode so it's not just rebuilding the path. We have to go one lever higher (lokiOnionFetch).
// status is 502 for a node not found
throw new pRetry.AbortError(
`Bad Path handled. Retry this request with another targetNode. Status: ${status}`
);
}
// If we have a specific node in fault we can exclude just this node.
// Otherwise we increment the whole path failure count
// if (nodeNotFound) {
await exports.incrementBadSnodeCountOrDrop({
await Onions.incrementBadSnodeCountOrDrop({
snodeEd25519: destinationEd25519,
guardNodeEd25519,
associatedWith,
});
@ -363,13 +362,7 @@ async function processOnionRequestErrorOnPath(
window?.log?.warn('errorONpath:', ciphertext);
}
process406Error(httpStatusCode);
await process421Error(
httpStatusCode,
ciphertext,
guardNodeEd25519,
associatedWith,
lsrpcEd25519Key
);
await process421Error(httpStatusCode, ciphertext, associatedWith, lsrpcEd25519Key);
await processAnyOtherErrorOnPath(httpStatusCode, guardNodeEd25519, ciphertext, associatedWith);
}
@ -416,7 +409,6 @@ export async function processOnionResponse({
abortSignal,
associatedWith,
lsrpcEd25519Key,
test,
}: {
response?: { text: () => Promise<string>; status: number };
symmetricKey?: ArrayBuffer;
@ -424,7 +416,6 @@ export async function processOnionResponse({
lsrpcEd25519Key?: string;
abortSignal?: AbortSignal;
associatedWith?: string;
test?: string;
}): Promise<SnodeResponse> {
let ciphertext = '';
@ -455,7 +446,7 @@ export async function processOnionResponse({
let ciphertextBuffer;
try {
const decoded = await exports.decodeOnionResult(symmetricKey, ciphertext, test);
const decoded = await exports.decodeOnionResult(symmetricKey, ciphertext);
plaintext = decoded.plaintext;
ciphertextBuffer = decoded.ciphertextBuffer;
@ -492,7 +483,6 @@ export async function processOnionResponse({
await processOnionRequestErrorAtDestination({
statusCode: status,
body: jsonRes?.body, // this is really important. the `.body`. the .body should be a string. for isntance for nodeNotFound but is most likely a dict (Record<string,any>))
guardNodeEd25519: guardNode.pubkey_ed25519,
destinationEd25519: lsrpcEd25519Key,
associatedWith,
});
@ -529,11 +519,9 @@ export type DestinationContext = {
async function handle421InvalidSwarm({
body,
snodeEd25519,
guardNodeEd25519,
associatedWith,
}: {
body: string;
guardNodeEd25519: string;
snodeEd25519?: string;
associatedWith?: string;
}) {
@ -571,7 +559,7 @@ async function handle421InvalidSwarm({
await dropSnodeFromSwarmIfNeeded(associatedWith, snodeEd25519);
}
}
await exports.incrementBadSnodeCountOrDrop({ snodeEd25519, guardNodeEd25519, associatedWith });
await Onions.incrementBadSnodeCountOrDrop({ snodeEd25519, associatedWith });
// this is important we throw so another retry is made and we exit the handling of that reponse
throw new pRetry.AbortError(exceptionMessage);
@ -591,47 +579,26 @@ async function handle421InvalidSwarm({
*/
export async function incrementBadSnodeCountOrDrop({
snodeEd25519,
guardNodeEd25519,
associatedWith,
}: {
snodeEd25519: string;
guardNodeEd25519: string;
associatedWith?: string;
}) {
if (!guardNodeEd25519) {
window?.log?.warn('We need a guardNodeEd25519 at all times');
}
const oldFailureCount = snodeFailureCount[snodeEd25519] || 0;
const newFailureCount = oldFailureCount + 1;
snodeFailureCount[snodeEd25519] = newFailureCount;
if (newFailureCount >= snodeFailureThreshold) {
window?.log?.warn(`Failure threshold reached for: ${ed25519Str(snodeEd25519)}; dropping it.`);
window?.log?.warn(
`Failure threshold reached for snode: ${ed25519Str(snodeEd25519)}; dropping it.`
);
if (associatedWith) {
window?.log?.warn(
`Dropping ${ed25519Str(snodeEd25519)} from swarm of ${ed25519Str(associatedWith)}`
);
await dropSnodeFromSwarmIfNeeded(associatedWith, snodeEd25519);
}
window?.log?.info(`Dropping ${ed25519Str(snodeEd25519)} from snodepool`);
await dropSnodeFromSnodePool(snodeEd25519);
// the snode was ejected from the pool so it won't be used again.
// in case of snode pool refresh, we need to be able to try to contact this node again so reset its failure count to 0.
snodeFailureCount[snodeEd25519] = 0;
try {
await OnionPaths.dropSnodeFromPath(snodeEd25519);
} catch (e) {
window?.log?.warn(
'dropSnodeFromPath, got error while patching up... incrementing the whole path as bad',
e.message
);
// If dropSnodeFromPath throws, it means there is an issue patching up the path, increment the whole path issues count
// but using the guardNode we got instead of the snodeEd25519.
//
await OnionPaths.incrementBadPathCountOrDrop(guardNodeEd25519);
}
await OnionPaths.dropSnodeFromPath(snodeEd25519);
} else {
window?.log?.warn(
`Couldn't reach snode at: ${ed25519Str(
@ -652,7 +619,6 @@ export const sendOnionRequestHandlingSnodeEject = async ({
abortSignal,
associatedWith,
finalRelayOptions,
test,
}: {
nodePath: Array<Snode>;
destX25519Any: string;
@ -664,7 +630,6 @@ export const sendOnionRequestHandlingSnodeEject = async ({
finalRelayOptions?: FinalRelayOptions;
abortSignal?: AbortSignal;
associatedWith?: string;
test?: string;
}): Promise<SnodeResponse> => {
// this sendOnionRequest() call has to be the only one like this.
// If you need to call it, call it through sendOnionRequestHandlingSnodeEject because this is the one handling path rebuilding and known errors
@ -678,14 +643,21 @@ export const sendOnionRequestHandlingSnodeEject = async ({
finalDestOptions,
finalRelayOptions,
abortSignal,
test,
});
response = result.response;
if (
!_.isEmpty(finalRelayOptions) &&
response.status === 502 &&
response.statusText === 'Bad Gateway'
) {
// it's an opengroup server and his is not responding. Consider this as a ENETUNREACH
throw new pRetry.AbortError('ENETUNREACH');
}
decodingSymmetricKey = result.decodingSymmetricKey;
} catch (e) {
window.log.warn('sendOnionRequest', e);
if (e.code === 'ENETUNREACH') {
window?.log?.warn('sendOnionRequest error message: ', e.message);
if (e.code === 'ENETUNREACH' || e.message === 'ENETUNREACH') {
throw e;
}
}
@ -698,7 +670,6 @@ export const sendOnionRequestHandlingSnodeEject = async ({
lsrpcEd25519Key: finalDestOptions?.destination_ed25519_hex,
abortSignal,
associatedWith,
test,
});
return processed;
@ -731,7 +702,6 @@ const sendOnionRequest = async ({
};
finalRelayOptions?: FinalRelayOptions;
abortSignal?: AbortSignal;
test?: string;
}) => {
// get destination pubkey in array buffer format
let destX25519hex = destX25519Any;
@ -826,8 +796,7 @@ async function sendOnionRequestSnodeDest(
onionPath: Array<Snode>,
targetNode: Snode,
plaintext?: string,
associatedWith?: string,
test?: string
associatedWith?: string
) {
return sendOnionRequestHandlingSnodeEject({
nodePath: onionPath,
@ -837,7 +806,6 @@ async function sendOnionRequestSnodeDest(
body: plaintext,
},
associatedWith,
test,
});
}
@ -845,37 +813,30 @@ export function getPathString(pathObjArr: Array<{ ip: string; port: number }>):
return pathObjArr.map(node => `${node.ip}:${node.port}`).join(', ');
}
async function onionFetchRetryable(
targetNode: Snode,
body?: string,
associatedWith?: string,
test?: string
): Promise<SnodeResponse> {
// Get a path excluding `targetNode`:
const path = await OnionPaths.getOnionPath(targetNode);
const result = await sendOnionRequestSnodeDest(path, targetNode, body, associatedWith, test);
return result;
}
/**
* If the fetch throws a retryable error we retry this call with a new path at most 3 times. If another error happens, we return it. If we have a result we just return it.
*/
export async function lokiOnionFetch(
targetNode: Snode,
body?: string,
associatedWith?: string,
test?: string
): Promise<SnodeResponse | undefined> {
export async function lokiOnionFetch({
targetNode,
associatedWith,
body,
}: {
targetNode: Snode;
body?: string;
associatedWith?: string;
}): Promise<SnodeResponse | undefined> {
try {
const retriedResult = await pRetry(
async () => {
return onionFetchRetryable(targetNode, body, associatedWith, test);
// Get a path excluding `targetNode`:
const path = await OnionPaths.getOnionPath({ toExclude: targetNode });
const result = await sendOnionRequestSnodeDest(path, targetNode, body, associatedWith);
return result;
},
{
retries: 4,
retries: 3,
factor: 1,
minTimeout: 1000,
maxTimeout: 2000,
minTimeout: 100,
onFailedAttempt: e => {
window?.log?.warn(
`onionFetchRetryable attempt #${e.attemptNumber} failed. ${e.retriesLeft} retries left...`

View File

@ -1,15 +1,14 @@
import _ from 'lodash';
import { getSnodePoolFromSnodes, getSnodesFromSeedUrl, requestSnodesForPubkey } from './SNodeAPI';
import { getSnodePoolFromSnodes, requestSnodesForPubkey } from './SNodeAPI';
import * as Data from '../../../ts/data/data';
import { allowOnlyOneAtATime } from '../utils/Promise';
import pRetry from 'p-retry';
import { ed25519Str } from '../onions/onionPath';
import { OnionPaths } from '../onions';
import { Onions } from '.';
import { Onions, SnodePool } from '.';
import { SeedNodeAPI } from '../seed_node_api';
/**
* If we get less than this snode in a swarm, we fetch new snodes for this pubkey
*/
@ -21,6 +20,11 @@ const minSwarmSnodeCount = 3;
*/
export const minSnodePoolCount = 12;
/**
* If we get less than this amount of snodes (24), lets try to get an updated list from those while we can
*/
export const minSnodePoolCountBeforeRefreshFromSnodes = minSnodePoolCount * 2;
/**
* If we do a request to fetch nodes from snodes and they don't return at least
* the same `requiredSnodesForAgreement` snodes we consider that this is not a valid return.
@ -29,65 +33,17 @@ export const minSnodePoolCount = 12;
*/
export const requiredSnodesForAgreement = 24;
// This should be renamed to `allNodes` or something
export let randomSnodePool: Array<Data.Snode> = [];
let randomSnodePool: Array<Data.Snode> = [];
// tslint:disable-next-line: function-name
export function TEST_resetState() {
randomSnodePool = [];
swarmCache.clear();
}
// We only store nodes' identifiers here,
const swarmCache: Map<string, Array<string>> = new Map();
export type SeedNode = {
url: string;
};
// just get the filtered list
async function tryGetSnodeListFromLokidSeednode(
seedNodes: Array<SeedNode>
): Promise<Array<Data.Snode>> {
window?.log?.info('tryGetSnodeListFromLokidSeednode starting...');
if (!seedNodes.length) {
window?.log?.info('loki_snode_api::tryGetSnodeListFromLokidSeednode - seedNodes are empty');
return [];
}
const seedNode = _.sample(seedNodes);
if (!seedNode) {
window?.log?.warn(
'loki_snode_api::tryGetSnodeListFromLokidSeednode - Could not select random snodes from',
seedNodes
);
return [];
}
let snodes = [];
try {
const tryUrl = new URL(seedNode.url);
snodes = await getSnodesFromSeedUrl(tryUrl);
// throw before clearing the lock, so the retries can kick in
if (snodes.length === 0) {
window?.log?.warn(
`loki_snode_api::tryGetSnodeListFromLokidSeednode - ${seedNode.url} did not return any snodes`
);
// does this error message need to be exactly this?
throw new window.textsecure.SeedNodeError('Failed to contact seed node');
}
return snodes;
} catch (e) {
window?.log?.warn(
'LokiSnodeAPI::tryGetSnodeListFromLokidSeednode - error',
e.code,
e.message,
'on',
seedNode
);
if (snodes.length === 0) {
throw new window.textsecure.SeedNodeError('Failed to contact seed node');
}
}
return [];
}
/**
* Drop a snode from the snode pool. This does not update the swarm containing this snode.
* Use `dropSnodeFromSwarmIfNeeded` for that
@ -97,28 +53,34 @@ export async function dropSnodeFromSnodePool(snodeEd25519: string) {
const exists = _.some(randomSnodePool, x => x.pubkey_ed25519 === snodeEd25519);
if (exists) {
_.remove(randomSnodePool, x => x.pubkey_ed25519 === snodeEd25519);
await Data.updateSnodePoolOnDb(JSON.stringify(randomSnodePool));
window?.log?.warn(
`Marking ${ed25519Str(snodeEd25519)} as unreachable, ${
`Droppping ${ed25519Str(snodeEd25519)} from snode pool. ${
randomSnodePool.length
} snodes remaining in randomPool`
);
await Data.updateSnodePoolOnDb(JSON.stringify(randomSnodePool));
}
}
/**
*
* @param excluding can be used to exclude some nodes from the random list. Useful to rebuild a path excluding existing node already in a path
* excludingEd25519Snode can be used to exclude some nodes from the random list.
* Useful to rebuild a path excluding existing node already in a path
*/
export async function getRandomSnode(excludingEd25519Snode?: Array<string>): Promise<Data.Snode> {
// resolve random snode
if (randomSnodePool.length === 0) {
// Should not this be saved to the database?
await refreshRandomPool();
// make sure we have a few snodes in the pool excluding the one passed as args
const requiredCount = minSnodePoolCount + (excludingEd25519Snode?.length || 0);
if (randomSnodePool.length < requiredCount) {
await getSnodePoolFromDBOrFetchFromSeed(excludingEd25519Snode?.length);
if (randomSnodePool.length === 0) {
throw new window.textsecure.SeedNodeError('Invalid seed node response');
if (randomSnodePool.length < requiredCount) {
window?.log?.warn(
`getRandomSnode: failed to fetch snodes from seed. Current pool: ${randomSnodePool.length}`
);
throw new Error(
`getRandomSnode: failed to fetch snodes from seed. Current pool: ${randomSnodePool.length}, required count: ${requiredCount}`
);
}
}
// We know the pool can't be empty at this point
@ -131,211 +93,156 @@ export async function getRandomSnode(excludingEd25519Snode?: Array<string>): Pro
e => !excludingEd25519Snode.includes(e.pubkey_ed25519)
);
if (!snodePoolExcluding || !snodePoolExcluding.length) {
if (window?.textsecure) {
throw new window.textsecure.SeedNodeError(
'Not enough snodes with excluding length',
excludingEd25519Snode.length
);
}
// used for tests
throw new Error('SeedNodeError');
throw new Error(`Not enough snodes with excluding length ${excludingEd25519Snode.length}`);
}
return _.sample(snodePoolExcluding) as Data.Snode;
}
/**
* This function force the snode poll to be refreshed from a random seed node again.
* This function force the snode poll to be refreshed from a random seed node or snodes if we have enough of them.
* This should be called once in a day or so for when the app it kept on.
*/
export async function forceRefreshRandomSnodePool(): Promise<Array<Data.Snode>> {
await refreshRandomPool(true);
try {
await getSnodePoolFromDBOrFetchFromSeed();
window?.log?.info(
`forceRefreshRandomSnodePool: enough snodes to fetch from them, so we try using them ${randomSnodePool.length}`
);
// this function throws if it does not have enough snodes to do it
await tryToGetConsensusWithSnodesWithRetries();
if (randomSnodePool.length < minSnodePoolCountBeforeRefreshFromSnodes) {
throw new Error('forceRefreshRandomSnodePool still too small after refetching from snodes');
}
} catch (e) {
window?.log?.warn(
'forceRefreshRandomSnodePool: Failed to fetch snode pool from snodes. Fetching from seed node instead:',
e.message
);
// if that fails to get enough snodes, even after retries, well we just have to retry later.
try {
await SnodePool.TEST_fetchFromSeedWithRetriesAndWriteToDb();
} catch (e) {
window?.log?.warn(
'forceRefreshRandomSnodePool: Failed to fetch snode pool from seed. Fetching from seed node instead:',
e.message
);
}
}
return randomSnodePool;
}
/**
* Fetches from DB if snode pool is not cached, and returns it if the length is >= 12.
* If length is < 12, fetches from seed an updated list of snodes
*/
export async function getSnodePoolFromDBOrFetchFromSeed(
countToAddToRequirement = 0
): Promise<Array<Data.Snode>> {
if (randomSnodePool && randomSnodePool.length > minSnodePoolCount + countToAddToRequirement) {
return randomSnodePool;
}
const fetchedFromDb = await Data.getSnodePoolFromDb();
if (!fetchedFromDb || fetchedFromDb.length <= minSnodePoolCount + countToAddToRequirement) {
window?.log?.warn(
`getSnodePoolFromDBOrFetchFromSeed: not enough snodes in db (${fetchedFromDb?.length}), Fetching from seed node instead... `
);
// if that fails to get enough snodes, even after retries, well we just have to retry later.
// this call does not throw
await SnodePool.TEST_fetchFromSeedWithRetriesAndWriteToDb();
return randomSnodePool;
}
// write to memory only if it is valid.
randomSnodePool = fetchedFromDb;
return randomSnodePool;
}
export async function getRandomSnodePool(): Promise<Array<Data.Snode>> {
if (randomSnodePool.length === 0) {
await refreshRandomPool();
if (randomSnodePool.length <= minSnodePoolCount) {
await getSnodePoolFromDBOrFetchFromSeed();
}
return randomSnodePool;
}
async function getSnodeListFromLokidSeednode(
seedNodes: Array<SeedNode>,
retries = 0
): Promise<Array<Data.Snode>> {
const SEED_NODE_RETRIES = 3;
window?.log?.info('getSnodeListFromLokidSeednode starting...');
if (!seedNodes.length) {
window?.log?.info('loki_snode_api::getSnodeListFromLokidSeednode - seedNodes are empty');
return [];
}
let snodes: Array<Data.Snode> = [];
try {
snodes = await tryGetSnodeListFromLokidSeednode(seedNodes);
} catch (e) {
window?.log?.warn('loki_snode_api::getSnodeListFromLokidSeednode - error', e.code, e.message);
// handle retries in case of temporary hiccups
if (retries < SEED_NODE_RETRIES) {
setTimeout(async () => {
window?.log?.info(
'loki_snode_api::getSnodeListFromLokidSeednode - Retrying initialising random snode pool, try #',
retries,
'seed nodes total',
seedNodes.length
);
try {
await getSnodeListFromLokidSeednode(seedNodes, retries + 1);
} catch (e) {
window?.log?.warn('getSnodeListFromLokidSeednode failed retr y #', retries, e);
}
}, retries * retries * 5000);
} else {
window?.log?.error('loki_snode_api::getSnodeListFromLokidSeednode - failing');
throw new window.textsecure.SeedNodeError('Failed to contact seed node');
}
}
return snodes;
}
/**
* Fetch all snodes from a seed nodes if we don't have enough snodes to make the request ourself.
* Exported only for tests. This is not to be used by the app directly
* @param seedNodes the seednodes to use to fetch snodes details
* This function tries to fetch snodes list from seednodes and handle retries.
* It will write the updated snode list to the db once it succeeded.
* It also resets the onionpaths failure count and snode failure count.
* This function does not throw.
*/
export async function refreshRandomPoolDetail(
seedNodes: Array<SeedNode>
): Promise<Array<Data.Snode>> {
let snodes = [];
try {
window?.log?.info(`refreshRandomPoolDetail with seedNodes.length ${seedNodes.length}`);
snodes = await getSnodeListFromLokidSeednode(seedNodes);
// make sure order of the list is random, so we get version in a non-deterministic way
snodes = _.shuffle(snodes);
// commit changes to be live
// we'll update the version (in case they upgrade) every cycle
const fetchSnodePool = snodes.map((snode: any) => ({
ip: snode.public_ip,
port: snode.storage_port,
pubkey_x25519: snode.pubkey_x25519,
pubkey_ed25519: snode.pubkey_ed25519,
version: '',
}));
window?.log?.info(
'LokiSnodeAPI::refreshRandomPool - Refreshed random snode pool with',
snodes.length,
'snodes'
);
return fetchSnodePool;
} catch (e) {
window?.log?.warn('LokiSnodeAPI::refreshRandomPool - error', e.code, e.message);
/*
log.error(
'LokiSnodeAPI:::refreshRandomPoolPromise - Giving up trying to contact seed node'
);
*/
if (snodes.length === 0) {
throw new window.textsecure.SeedNodeError('Failed to contact seed node');
}
return [];
}
}
/**
* This function runs only once at a time, and fetches the snode pool from a random seed node,
* or if we have enough snodes, fetches the snode pool from one of the snode.
*/
export async function refreshRandomPool(forceRefresh = false): Promise<void> {
// tslint:disable: function-name
export async function TEST_fetchFromSeedWithRetriesAndWriteToDb() {
const seedNodes = window.getSeedNodeList();
if (!seedNodes || !seedNodes.length) {
window?.log?.error(
'LokiSnodeAPI:::refreshRandomPool - getSeedNodeList has not been loaded yet'
'LokiSnodeAPI:::fetchFromSeedWithRetriesAndWriteToDb - getSeedNodeList has not been loaded yet'
);
return;
}
window?.log?.info("right before allowOnlyOneAtATime 'refreshRandomPool'");
try {
randomSnodePool = await SeedNodeAPI.fetchSnodePoolFromSeedNodeWithRetries(seedNodes);
await Data.updateSnodePoolOnDb(JSON.stringify(randomSnodePool));
return allowOnlyOneAtATime('refreshRandomPool', async () => {
window?.log?.info("running allowOnlyOneAtATime 'refreshRandomPool'");
OnionPaths.resetPathFailureCount();
Onions.resetSnodeFailureCount();
} catch (e) {
window?.log?.error(
'LokiSnodeAPI:::fetchFromSeedWithRetriesAndWriteToDb - Failed to fetch snode poll from seed node with retries. Error:',
e
);
}
}
// if we have forceRefresh set, we want to request snodes from snodes or from the seed server.
if (randomSnodePool.length === 0 && !forceRefresh) {
const fetchedFromDb = await Data.getSnodePoolFromDb();
// write to memory only if it is valid.
// if the size is not enough. we will contact a seed node.
if (fetchedFromDb?.length) {
window?.log?.info(`refreshRandomPool: fetched from db ${fetchedFromDb.length} snodes.`);
randomSnodePool = fetchedFromDb;
if (randomSnodePool.length <= minSnodePoolCount) {
window?.log?.warn('refreshRandomPool: not enough snodes in db, going to fetch from seed');
} else {
return;
}
} else {
window?.log?.warn('refreshRandomPool: did not find snodes in db.');
/**
* This function retries a few times to get a consensus between 3 snodes of at least 24 snodes in the snode pool.
*
* If a consensus cannot be made, this function throws an error and the caller needs to call the fetch snodes from seed.
*
*/
async function tryToGetConsensusWithSnodesWithRetries() {
// let this request try 4 (3+1) times. If all those requests end up without having a consensus,
// fetch the snode pool from one of the seed nodes (see the catch).
return pRetry(
async () => {
const commonNodes = await getSnodePoolFromSnodes();
if (!commonNodes || commonNodes.length < requiredSnodesForAgreement) {
// throwing makes trigger a retry if we have some left.
window?.log?.info(
`tryToGetConsensusWithSnodesWithRetries: Not enough common nodes ${commonNodes?.length}`
);
throw new Error('Not enough common nodes.');
}
}
// we don't have nodes to fetch the pool from them, so call the seed node instead.
if (randomSnodePool.length <= minSnodePoolCount) {
window?.log?.info(
`refreshRandomPool: NOT enough snodes to fetch from them ${randomSnodePool.length} <= ${minSnodePoolCount}, so falling back to seedNodes ${seedNodes?.length}`
'Got consensus: updating snode list with snode pool length:',
commonNodes.length
);
randomSnodePool = await exports.refreshRandomPoolDetail(seedNodes);
randomSnodePool = commonNodes;
await Data.updateSnodePoolOnDb(JSON.stringify(randomSnodePool));
return;
}
try {
window?.log?.info(
`refreshRandomPool: enough snodes to fetch from them, so we try using them ${randomSnodePool.length}`
);
// let this request try 3 (3+1) times. If all those requests end up without having a consensus,
// fetch the snode pool from one of the seed nodes (see the catch).
await pRetry(
async () => {
const commonNodes = await getSnodePoolFromSnodes();
if (!commonNodes || commonNodes.length < requiredSnodesForAgreement) {
// throwing makes trigger a retry if we have some left.
window?.log?.info(`refreshRandomPool: Not enough common nodes ${commonNodes?.length}`);
throw new Error('Not enough common nodes.');
}
window?.log?.info('updating snode list with snode pool length:', commonNodes.length);
randomSnodePool = commonNodes;
OnionPaths.resetPathFailureCount();
Onions.resetSnodeFailureCount();
await Data.updateSnodePoolOnDb(JSON.stringify(randomSnodePool));
},
{
retries: 3,
factor: 1,
minTimeout: 1000,
onFailedAttempt: e => {
window?.log?.warn(
`getSnodePoolFromSnodes attempt #${e.attemptNumber} failed. ${e.retriesLeft} retries left...`
);
},
}
);
} catch (e) {
window?.log?.warn(
'Failed to fetch snode pool from snodes. Fetching from seed node instead:',
e
);
// fallback to a seed node fetch of the snode pool
randomSnodePool = await exports.refreshRandomPoolDetail(seedNodes);
OnionPaths.resetPathFailureCount();
Onions.resetSnodeFailureCount();
await Data.updateSnodePoolOnDb(JSON.stringify(randomSnodePool));
},
{
retries: 3,
factor: 1,
minTimeout: 1000,
onFailedAttempt: e => {
window?.log?.warn(
`tryToGetConsensusWithSnodesWithRetries attempt #${e.attemptNumber} failed. ${e.retriesLeft} retries left...`
);
},
}
});
);
}
/**
@ -348,6 +255,10 @@ export async function dropSnodeFromSwarmIfNeeded(
snodeToDropEd25519: string
): Promise<void> {
// this call either used the cache or fetch the swarm from the db
window?.log?.warn(
`Dropping ${ed25519Str(snodeToDropEd25519)} from swarm of ${ed25519Str(pubkey)}`
);
const existingSwarm = await getSwarmFromCacheOrDb(pubkey);
if (!existingSwarm.includes(snodeToDropEd25519)) {

View File

@ -286,7 +286,7 @@ export class SwarmPolling {
},
{
minTimeout: 100,
retries: 2,
retries: 1,
onFailedAttempt: e => {
window?.log?.warn(
`retrieveNextMessages attempt #${e.attemptNumber} failed. ${e.retriesLeft} retries left...`

View File

@ -56,7 +56,7 @@ export async function allowOnlyOneAtATime(
}
// tslint:disable-next-line: no-dynamic-delete
delete snodeGlobalLocks[name]; // clear lock
throw e;
reject(e);
}
// clear timeout timer
if (timeoutMs) {

View File

@ -0,0 +1,196 @@
// tslint:disable: no-implicit-dependencies max-func-body-length no-unused-expression
import chai from 'chai';
import * as sinon from 'sinon';
import _ from 'lodash';
import { describe } from 'mocha';
import { TestUtils } from '../../../test-utils';
import { Onions, SnodePool } from '../../../../session/snode_api';
import * as Data from '../../../../data/data';
import chaiAsPromised from 'chai-as-promised';
import * as OnionPaths from '../../../../session/onions/onionPath';
import { generateFakeSnodes, generateFakeSnodeWithEdKey } from '../../../test-utils/utils';
import { SeedNodeAPI } from '../../../../session/seed_node_api';
chai.use(chaiAsPromised as any);
chai.should();
const { expect } = chai;
const guard1ed = 'e3ec6fcc79e64c2af6a48a9865d4bf4b739ec7708d75f35acc3d478f9161534e';
const guard2ed = 'e3ec6fcc79e64c2af6a48a9865d4bf4b739ec7708d75f35acc3d478f91615349';
const guard3ed = 'e3ec6fcc79e64c2af6a48a9865d4bf4b739ec7708d75f35acc3d478f9161534a';
const fakeSnodePool: Array<Data.Snode> = [
...generateFakeSnodes(12),
generateFakeSnodeWithEdKey(guard1ed),
generateFakeSnodeWithEdKey(guard2ed),
generateFakeSnodeWithEdKey(guard3ed),
...generateFakeSnodes(3),
];
// tslint:disable: variable-name
// tslint:disable-next-line: max-func-body-length
describe('GuardNodes', () => {
// Initialize new stubbed cache
const sandbox = sinon.createSandbox();
let getSnodePoolFromDBOrFetchFromSeed: sinon.SinonStub;
let fetchFromSeedWithRetriesAndWriteToDb: sinon.SinonStub;
describe('selectGuardNodes', () => {
beforeEach(() => {
OnionPaths.clearTestOnionPath();
TestUtils.stubWindowLog();
TestUtils.stubWindow('getGlobalOnlineStatus', () => true);
Onions.resetSnodeFailureCount();
OnionPaths.resetPathFailureCount();
SnodePool.TEST_resetState();
});
afterEach(() => {
TestUtils.restoreStubs();
sandbox.restore();
});
it('does not fetch from seed if we got 12 or more snodes in the db', async () => {
sandbox.stub(Data, 'getSnodePoolFromDb').resolves(fakeSnodePool);
getSnodePoolFromDBOrFetchFromSeed = sandbox
.stub(SnodePool, 'getSnodePoolFromDBOrFetchFromSeed')
.callThrough();
fetchFromSeedWithRetriesAndWriteToDb = sandbox
.stub(SnodePool, 'TEST_fetchFromSeedWithRetriesAndWriteToDb')
.resolves();
const testGuardNode = sandbox.stub(OnionPaths, 'TEST_testGuardNode').resolves(true);
sandbox.stub(Data, 'updateGuardNodes').resolves();
// run the command
const fetchedGuardNodes = await OnionPaths.selectGuardNodes();
expect(
getSnodePoolFromDBOrFetchFromSeed.callCount,
'getSnodePoolFromDBOrFetchFromSeed should have been called'
).to.be.eq(1);
expect(
fetchFromSeedWithRetriesAndWriteToDb.callCount,
'fetchFromSeedWithRetriesAndWriteToDb should not have been called'
).to.be.eq(0);
expect(
testGuardNode.callCount,
'firstGuardNode should have been called three times'
).to.be.eq(3);
const firstGuardNode = testGuardNode.firstCall.args[0];
const secondGuardNode = testGuardNode.secondCall.args[0];
const thirdGuardNode = testGuardNode.thirdCall.args[0];
expect(fetchedGuardNodes).to.deep.equal([firstGuardNode, secondGuardNode, thirdGuardNode]);
});
it('throws an error if we got enough snodes in the db but none test passes', async () => {
sandbox.stub(Data, 'getSnodePoolFromDb').resolves(fakeSnodePool);
getSnodePoolFromDBOrFetchFromSeed = sandbox
.stub(SnodePool, 'getSnodePoolFromDBOrFetchFromSeed')
.callThrough();
fetchFromSeedWithRetriesAndWriteToDb = sandbox
.stub(SnodePool, 'TEST_fetchFromSeedWithRetriesAndWriteToDb')
.resolves();
const testGuardNode = sandbox.stub(OnionPaths, 'TEST_testGuardNode').resolves(false);
sandbox.stub(Data, 'updateGuardNodes').resolves();
// run the command
let throwedError: string | undefined;
try {
await OnionPaths.selectGuardNodes();
} catch (e) {
throwedError = e.message;
}
expect(
getSnodePoolFromDBOrFetchFromSeed.callCount,
'getSnodePoolFromDBOrFetchFromSeed should have been called'
).to.be.eq(1);
expect(
fetchFromSeedWithRetriesAndWriteToDb.callCount,
'fetchFromSeedWithRetriesAndWriteToDb should not have been called'
).to.be.eq(0);
expect(
testGuardNode.callCount,
'firstGuardNode should have been called three times'
).to.be.eq(18);
expect(throwedError).to.be.equal('selectGuardNodes stopping after attempts: 6');
});
it('throws an error if we have to fetch from seed, fetch from seed enough snode but we still fail', async () => {
const invalidSndodePool = fakeSnodePool.slice(0, 11);
sandbox.stub(Data, 'getSnodePoolFromDb').resolves(invalidSndodePool);
TestUtils.stubWindow('getSeedNodeList', () => [{ url: 'whatever' }]);
getSnodePoolFromDBOrFetchFromSeed = sandbox
.stub(SnodePool, 'getSnodePoolFromDBOrFetchFromSeed')
.callThrough();
fetchFromSeedWithRetriesAndWriteToDb = sandbox
.stub(SeedNodeAPI, 'fetchSnodePoolFromSeedNodeWithRetries')
.resolves(fakeSnodePool);
sandbox.stub(Data, 'updateGuardNodes').resolves();
// run the command
let throwedError: string | undefined;
try {
await OnionPaths.selectGuardNodes();
} catch (e) {
throwedError = e.message;
}
expect(throwedError).to.be.equal('selectGuardNodes stopping after attempts: 6');
});
it('returns valid guardnode if we have to fetch from seed, fetch from seed enough snodes but guard node tests passes', async () => {
const invalidSndodePool = fakeSnodePool.slice(0, 11);
sandbox.stub(Data, 'getSnodePoolFromDb').resolves(invalidSndodePool);
TestUtils.stubWindow('getSeedNodeList', () => [{ url: 'whatever' }]);
const testGuardNode = sandbox.stub(OnionPaths, 'TEST_testGuardNode').resolves(true);
getSnodePoolFromDBOrFetchFromSeed = sandbox
.stub(SnodePool, 'getSnodePoolFromDBOrFetchFromSeed')
.callThrough();
fetchFromSeedWithRetriesAndWriteToDb = sandbox
.stub(SeedNodeAPI, 'fetchSnodePoolFromSeedNodeWithRetries')
.resolves(fakeSnodePool);
sandbox.stub(Data, 'updateGuardNodes').resolves();
// run the command
const guardNodes = await OnionPaths.selectGuardNodes();
expect(guardNodes.length).to.be.equal(3);
expect(testGuardNode.callCount).to.be.equal(3);
});
it('throws if we have to fetch from seed, fetch from seed but not have enough fetched snodes', async () => {
const invalidSndodePool = fakeSnodePool.slice(0, 11);
sandbox.stub(Data, 'getSnodePoolFromDb').resolves(invalidSndodePool);
TestUtils.stubWindow('getSeedNodeList', () => [{ url: 'whatever' }]);
getSnodePoolFromDBOrFetchFromSeed = sandbox
.stub(SnodePool, 'getSnodePoolFromDBOrFetchFromSeed')
.callThrough();
fetchFromSeedWithRetriesAndWriteToDb = sandbox
.stub(SeedNodeAPI, 'fetchSnodePoolFromSeedNodeWithRetries')
.resolves(invalidSndodePool);
sandbox.stub(Data, 'updateGuardNodes').resolves();
// run the command
let throwedError: string | undefined;
try {
await OnionPaths.selectGuardNodes();
} catch (e) {
throwedError = e.message;
}
expect(throwedError).to.be.equal(
'Could not select guard nodes. Not enough nodes in the pool: 11'
);
});
});
});

View File

@ -17,6 +17,7 @@ import {
import AbortController from 'abort-controller';
import * as Data from '../../../../../ts/data/data';
import { pathFailureCount } from '../../../../session/onions/onionPath';
import { SeedNodeAPI } from '../../../../session/seed_node_api';
chai.use(chaiAsPromised as any);
chai.should();
@ -67,7 +68,7 @@ describe('OnionPathsErrors', () => {
beforeEach(async () => {
guardPubkeys = TestUtils.generateFakePubKeys(3).map(n => n.key);
otherNodesPubkeys = TestUtils.generateFakePubKeys(13).map(n => n.key);
otherNodesPubkeys = TestUtils.generateFakePubKeys(20).map(n => n.key);
SNodeAPI.Onions.resetSnodeFailureCount();
@ -78,7 +79,6 @@ describe('OnionPathsErrors', () => {
port: fakePortCurrent,
pubkey_ed25519: ed25519,
pubkey_x25519: ed25519,
version: '',
};
});
guardSnode1 = guardNodesArray[0];
@ -90,7 +90,6 @@ describe('OnionPathsErrors', () => {
port: fakePortCurrent,
pubkey_ed25519: ed25519,
pubkey_x25519: ed25519,
version: '',
};
});
@ -100,14 +99,14 @@ describe('OnionPathsErrors', () => {
fakeSwarmForAssociatedWith = otherNodesPubkeys.slice(0, 6);
// Stubs
sandbox.stub(OnionPaths, 'selectGuardNodes').resolves(guardNodesArray);
sandbox.stub(SNodeAPI.SNodeAPI, 'getSnodePoolFromSnode').resolves(guardNodesArray);
sandbox.stub(SNodeAPI.SNodeAPI, 'TEST_getSnodePoolFromSnode').resolves(guardNodesArray);
TestUtils.stubData('getGuardNodes').resolves([
guardPubkeys[0],
guardPubkeys[1],
guardPubkeys[2],
]);
TestUtils.stubWindow('getSeedNodeList', () => ['seednode1']);
sandbox.stub(SNodeAPI.SnodePool, 'refreshRandomPoolDetail').resolves(fakeSnodePool);
sandbox.stub(SeedNodeAPI, 'fetchSnodePoolFromSeedNodeWithRetries').resolves(fakeSnodePool);
sandbox.stub(Data, 'getSwarmNodesForPubkey').resolves(fakeSwarmForAssociatedWith);
updateGuardNodesStub = sandbox.stub(Data, 'updateGuardNodes').resolves();
@ -128,7 +127,7 @@ describe('OnionPathsErrors', () => {
OnionPaths.resetPathFailureCount();
await OnionPaths.getOnionPath();
await OnionPaths.getOnionPath({});
oldOnionPaths = OnionPaths.TEST_getTestOnionPath();
sandbox
@ -280,7 +279,6 @@ describe('OnionPathsErrors', () => {
expect(incrementBadSnodeCountOrDropSpy.callCount).to.eq(1);
expect(incrementBadSnodeCountOrDropSpy.firstCall.args[0]).to.deep.eq({
snodeEd25519: targetNode,
guardNodeEd25519: guardSnode1.pubkey_ed25519,
associatedWith,
});
});
@ -325,7 +323,6 @@ describe('OnionPathsErrors', () => {
expect(incrementBadSnodeCountOrDropSpy.callCount).to.eq(1);
expect(incrementBadSnodeCountOrDropSpy.firstCall.args[0]).to.deep.eq({
snodeEd25519: targetNode,
guardNodeEd25519: guardSnode1.pubkey_ed25519,
associatedWith,
});
});
@ -363,7 +360,6 @@ describe('OnionPathsErrors', () => {
expect(incrementBadSnodeCountOrDropSpy.callCount).to.eq(1);
expect(incrementBadSnodeCountOrDropSpy.firstCall.args[0]).to.deep.eq({
snodeEd25519: targetNode,
guardNodeEd25519: guardSnode1.pubkey_ed25519,
associatedWith,
});
});
@ -403,7 +399,6 @@ describe('OnionPathsErrors', () => {
expect(incrementBadSnodeCountOrDropSpy.callCount).to.eq(1);
expect(incrementBadSnodeCountOrDropSpy.firstCall.args[0]).to.deep.eq({
snodeEd25519: targetNode,
guardNodeEd25519: guardSnode1.pubkey_ed25519,
associatedWith,
});
});
@ -470,19 +465,25 @@ describe('OnionPathsErrors', () => {
expect(e.name).to.not.equal('AbortError');
}
expect(updateSwarmSpy.callCount).to.eq(0);
// now we make sure that this bad snode was dropped from this pubkey's swarm
expect(dropSnodeFromSwarmIfNeededSpy.callCount).to.eq(0);
// this specific node failed just once
expect(dropSnodeFromSnodePool.callCount).to.eq(0);
expect(dropSnodeFromPathSpy.callCount).to.eq(0);
expect(incrementBadPathCountOrDropSpy.callCount).to.eq(0);
expect(incrementBadSnodeCountOrDropSpy.callCount).to.eq(1);
expect(incrementBadSnodeCountOrDropSpy.firstCall.args[0]).to.deep.eq({
snodeEd25519: failingSnode.pubkey_ed25519,
guardNodeEd25519: guardSnode1.pubkey_ed25519,
associatedWith,
});
// this specific node failed just once but it was a node not found error. Force drop it
expect(
dropSnodeFromSwarmIfNeededSpy.callCount,
'dropSnodeFromSwarmIfNeededSpy should have been called'
).to.eq(1);
expect(
dropSnodeFromSnodePool.callCount,
'dropSnodeFromSnodePool should have been called'
).to.eq(1);
expect(dropSnodeFromPathSpy.callCount, 'dropSnodeFromPath should have been called').to.eq(1);
expect(
incrementBadPathCountOrDropSpy.callCount,
'incrementBadPathCountOrDrop should not have been called'
).to.eq(0);
expect(
incrementBadSnodeCountOrDropSpy.callCount,
'incrementBadSnodeCountOrDrop should not have been called'
).to.eq(0);
});
it('throws a retryable error on 502 on last snode', async () => {
@ -505,20 +506,23 @@ describe('OnionPathsErrors', () => {
expect(e.name).to.not.equal('AbortError');
}
expect(updateSwarmSpy.callCount).to.eq(0);
// now we make sure that this bad snode was dropped from this pubkey's swarm
expect(dropSnodeFromSwarmIfNeededSpy.callCount).to.eq(0);
// this specific node failed just once
expect(dropSnodeFromSnodePool.callCount).to.eq(0);
expect(dropSnodeFromPathSpy.callCount).to.eq(0);
// we have a specific failing node so don't increment path errors
expect(incrementBadPathCountOrDropSpy.callCount).to.eq(0);
expect(incrementBadSnodeCountOrDropSpy.callCount).to.eq(1);
expect(incrementBadSnodeCountOrDropSpy.firstCall.args[0]).to.deep.eq({
snodeEd25519: failingSnode.pubkey_ed25519,
guardNodeEd25519: guardSnode1.pubkey_ed25519,
associatedWith,
});
// this specific node failed just once but it was a node not found error. Force drop it
expect(dropSnodeFromSwarmIfNeededSpy.callCount).to.eq(1);
expect(
dropSnodeFromSnodePool.callCount,
'dropSnodeFromSnodePool should have been called'
).to.eq(1);
expect(dropSnodeFromPathSpy.callCount, 'dropSnodeFromPath should have been called').to.eq(1);
expect(
incrementBadPathCountOrDropSpy.callCount,
'incrementBadPathCountOrDrop should not have been called'
).to.eq(0);
expect(
incrementBadSnodeCountOrDropSpy.callCount,
'incrementBadSnodeCountOrDrop should not have been called'
).to.eq(0);
});
it('drop a snode from pool, swarm and path if it keep failing', async () => {
@ -545,34 +549,23 @@ describe('OnionPathsErrors', () => {
expect(updateSwarmSpy.callCount).to.eq(0);
// now we make sure that this bad snode was dropped from this pubkey's swarm
expect(dropSnodeFromSwarmIfNeededSpy.callCount).to.eq(1);
expect(dropSnodeFromSwarmIfNeededSpy.callCount).to.eq(3);
expect(dropSnodeFromSwarmIfNeededSpy.firstCall.args[0]).to.eq(associatedWith);
expect(dropSnodeFromSwarmIfNeededSpy.firstCall.args[1]).to.eq(failingSnode.pubkey_ed25519);
// this specific node failed just once
expect(dropSnodeFromSnodePool.callCount).to.eq(1);
expect(dropSnodeFromSnodePool.firstCall.args[0]).to.eq(failingSnode.pubkey_ed25519);
expect(dropSnodeFromPathSpy.callCount).to.eq(1);
expect(dropSnodeFromPathSpy.firstCall.args[0]).to.eq(failingSnode.pubkey_ed25519);
// we expect incrementBadSnodeCountOrDropSpy to be called three times with the same failing snode as we know who it is
expect(incrementBadSnodeCountOrDropSpy.callCount).to.eq(3);
expect(incrementBadSnodeCountOrDropSpy.args[0][0]).to.deep.eq({
snodeEd25519: failingSnode.pubkey_ed25519,
guardNodeEd25519: guardSnode1.pubkey_ed25519,
associatedWith,
});
expect(incrementBadSnodeCountOrDropSpy.args[1][0]).to.deep.eq({
snodeEd25519: failingSnode.pubkey_ed25519,
guardNodeEd25519: guardSnode1.pubkey_ed25519,
associatedWith,
});
expect(incrementBadSnodeCountOrDropSpy.args[2][0]).to.deep.eq({
snodeEd25519: failingSnode.pubkey_ed25519,
guardNodeEd25519: guardSnode1.pubkey_ed25519,
associatedWith,
});
expect(incrementBadPathCountOrDropSpy.callCount).to.eq(0);
expect(
dropSnodeFromSnodePool.callCount,
'dropSnodeFromSnodePool should have been called'
).to.eq(3);
expect(dropSnodeFromPathSpy.callCount, 'dropSnodeFromPath should have been called').to.eq(3);
expect(
incrementBadPathCountOrDropSpy.callCount,
'incrementBadPathCountOrDrop should not have been called'
).to.eq(0);
expect(
incrementBadSnodeCountOrDropSpy.callCount,
'incrementBadSnodeCountOrDrop should not have been called'
).to.eq(0);
});
});
it('drop a path if it keep failing without a specific node in fault', async () => {
@ -612,7 +605,6 @@ describe('OnionPathsErrors', () => {
for (let index = 0; index < 6; index++) {
expect(incrementBadSnodeCountOrDropSpy.args[index][0]).to.deep.eq({
snodeEd25519: oldOnionPaths[0][(index % 2) + 1].pubkey_ed25519,
guardNodeEd25519: guardNode.pubkey_ed25519,
});
}

View File

@ -7,11 +7,13 @@ import { describe } from 'mocha';
import { TestUtils } from '../../../test-utils';
import * as SNodeAPI from '../../../../session/snode_api';
import * as Data from '../../../../../ts/data/data';
import chaiAsPromised from 'chai-as-promised';
import * as OnionPaths from '../../../../session/onions/onionPath';
import { Snode } from '../../../../data/data';
import { generateFakeSnodes, generateFakeSnodeWithEdKey } from '../../../test-utils/utils';
import { SeedNodeAPI } from '../../../../session/seed_node_api';
chai.use(chaiAsPromised as any);
chai.should();
@ -26,11 +28,16 @@ const fakeSnodePool: Array<Snode> = [
generateFakeSnodeWithEdKey(guard1ed),
generateFakeSnodeWithEdKey(guard2ed),
generateFakeSnodeWithEdKey(guard3ed),
...generateFakeSnodes(3),
...generateFakeSnodes(9),
];
const fakeGuardNodesEd25519 = [guard1ed, guard2ed, guard3ed];
const fakeGuardNodes = fakeSnodePool.filter(m => fakeGuardNodesEd25519.includes(m.pubkey_ed25519));
const fakeGuardNodesFromDB: Array<Data.GuardNode> = fakeGuardNodesEd25519.map(ed25519PubKey => {
return {
ed25519PubKey,
};
});
// tslint:disable-next-line: max-func-body-length
describe('OnionPaths', () => {
@ -38,38 +45,39 @@ describe('OnionPaths', () => {
const sandbox = sinon.createSandbox();
let oldOnionPaths: Array<Array<Snode>>;
beforeEach(async () => {
// Utils Stubs
OnionPaths.clearTestOnionPath();
sandbox.stub(OnionPaths, 'selectGuardNodes').resolves(fakeGuardNodes);
sandbox.stub(SNodeAPI.SNodeAPI, 'getSnodePoolFromSnode').resolves(fakeGuardNodes);
TestUtils.stubData('getGuardNodes').resolves(fakeGuardNodesEd25519);
TestUtils.stubData('createOrUpdateItem').resolves();
TestUtils.stubWindow('getSeedNodeList', () => ['seednode1']);
// tslint:disable: no-void-expression no-console
TestUtils.stubWindowLog();
sandbox.stub(SNodeAPI.SnodePool, 'refreshRandomPoolDetail').resolves(fakeSnodePool);
SNodeAPI.Onions.resetSnodeFailureCount();
OnionPaths.resetPathFailureCount();
// get a copy of what old ones look like
await OnionPaths.getOnionPath();
oldOnionPaths = OnionPaths.TEST_getTestOnionPath();
if (oldOnionPaths.length !== 3) {
throw new Error(`onion path length not enough ${oldOnionPaths.length}`);
}
// this just triggers a build of the onionPaths
});
afterEach(() => {
TestUtils.restoreStubs();
sandbox.restore();
});
describe('dropSnodeFromPath', () => {
beforeEach(async () => {
// Utils Stubs
OnionPaths.clearTestOnionPath();
sandbox.stub(OnionPaths, 'selectGuardNodes').resolves(fakeGuardNodes);
sandbox.stub(SNodeAPI.SNodeAPI, 'TEST_getSnodePoolFromSnode').resolves(fakeGuardNodes);
sandbox.stub(Data, 'getSnodePoolFromDb').resolves(fakeSnodePool);
TestUtils.stubData('getGuardNodes').resolves(fakeGuardNodesFromDB);
TestUtils.stubData('createOrUpdateItem').resolves();
TestUtils.stubWindow('getSeedNodeList', () => ['seednode1']);
// tslint:disable: no-void-expression no-console
TestUtils.stubWindowLog();
sandbox.stub(SeedNodeAPI, 'fetchSnodePoolFromSeedNodeWithRetries').resolves(fakeSnodePool);
SNodeAPI.Onions.resetSnodeFailureCount();
OnionPaths.resetPathFailureCount();
// get a copy of what old ones look like
await OnionPaths.getOnionPath({});
oldOnionPaths = OnionPaths.TEST_getTestOnionPath();
if (oldOnionPaths.length !== 3) {
throw new Error(`onion path length not enough ${oldOnionPaths.length}`);
}
// this just triggers a build of the onionPaths
});
afterEach(() => {
TestUtils.restoreStubs();
sandbox.restore();
});
describe('with valid snode pool', () => {
it('rebuilds after removing last snode on path', async () => {
await OnionPaths.dropSnodeFromPath(oldOnionPaths[2][2].pubkey_ed25519);

View File

@ -0,0 +1,93 @@
// tslint:disable: no-implicit-dependencies max-func-body-length no-unused-expression
import chai from 'chai';
import * as sinon from 'sinon';
import _ from 'lodash';
import { describe } from 'mocha';
import { TestUtils } from '../../../test-utils';
import { Onions, SnodePool } from '../../../../session/snode_api';
import * as Data from '../../../../data/data';
import chaiAsPromised from 'chai-as-promised';
import * as OnionPaths from '../../../../session/onions/onionPath';
import { generateFakeSnodes, generateFakeSnodeWithEdKey } from '../../../test-utils/utils';
import { SeedNodeAPI } from '../../../../session/seed_node_api';
import { SnodeFromSeed } from '../../../../session/seed_node_api/SeedNodeAPI';
chai.use(chaiAsPromised as any);
chai.should();
const { expect } = chai;
const guard1ed = 'e3ec6fcc79e64c2af6a48a9865d4bf4b739ec7708d75f35acc3d478f9161534e';
const guard2ed = 'e3ec6fcc79e64c2af6a48a9865d4bf4b739ec7708d75f35acc3d478f91615349';
const guard3ed = 'e3ec6fcc79e64c2af6a48a9865d4bf4b739ec7708d75f35acc3d478f9161534a';
const fakeSnodePool: Array<Data.Snode> = [
...generateFakeSnodes(12),
generateFakeSnodeWithEdKey(guard1ed),
generateFakeSnodeWithEdKey(guard2ed),
generateFakeSnodeWithEdKey(guard3ed),
...generateFakeSnodes(3),
];
const fakeSnodePoolFromSeedNode: Array<SnodeFromSeed> = fakeSnodePool.map(m => {
return {
public_ip: m.ip,
storage_port: m.port,
pubkey_x25519: m.pubkey_x25519,
pubkey_ed25519: m.pubkey_ed25519,
};
});
// tslint:disable: variable-name
// tslint:disable-next-line: max-func-body-length
describe('SeedNodeAPI', () => {
// Initialize new stubbed cache
const sandbox = sinon.createSandbox();
describe('getSnodeListFromSeednode', () => {
beforeEach(() => {
// Utils Stubs
OnionPaths.clearTestOnionPath();
TestUtils.stubWindowLog();
Onions.resetSnodeFailureCount();
OnionPaths.resetPathFailureCount();
SnodePool.TEST_resetState();
});
afterEach(() => {
TestUtils.restoreStubs();
sandbox.restore();
});
it('if the cached snode pool has less than 12 snodes, trigger a fetch from the seed nodes with retries', async () => {
const TEST_fetchSnodePoolFromSeedNodeRetryable = sandbox
.stub(SeedNodeAPI, 'TEST_fetchSnodePoolFromSeedNodeRetryable')
.onFirstCall()
.throws()
.onSecondCall()
.resolves(fakeSnodePoolFromSeedNode);
sandbox.stub(SeedNodeAPI, 'getMinTimeout').returns(20);
// run the command
const fetched = await SeedNodeAPI.fetchSnodePoolFromSeedNodeWithRetries([
{ url: 'seednode1' },
]);
const sortedFetch = fetched.sort((a, b) => (a.pubkey_ed25519 > b.pubkey_ed25519 ? -1 : 1));
const sortedFakeSnodePool = fakeSnodePool.sort((a, b) =>
a.pubkey_ed25519 > b.pubkey_ed25519 ? -1 : 1
);
expect(sortedFetch).to.deep.equal(sortedFakeSnodePool);
expect(
TEST_fetchSnodePoolFromSeedNodeRetryable.callCount,
'TEST_fetchSnodePoolFromSeedNodeRetryable called twice as the first one failed'
).to.be.eq(2);
});
});
});

View File

@ -0,0 +1,101 @@
// tslint:disable: no-implicit-dependencies max-func-body-length no-unused-expression
import chai from 'chai';
import * as sinon from 'sinon';
import _ from 'lodash';
import { describe } from 'mocha';
import { TestUtils } from '../../../test-utils';
import { Onions, SnodePool } from '../../../../session/snode_api';
import * as Data from '../../../../data/data';
import chaiAsPromised from 'chai-as-promised';
import * as OnionPaths from '../../../../session/onions/onionPath';
import { generateFakeSnodes, generateFakeSnodeWithEdKey } from '../../../test-utils/utils';
import { SeedNodeAPI } from '../../../../session/seed_node_api';
chai.use(chaiAsPromised as any);
chai.should();
const { expect } = chai;
const guard1ed = 'e3ec6fcc79e64c2af6a48a9865d4bf4b739ec7708d75f35acc3d478f9161534e';
const guard2ed = 'e3ec6fcc79e64c2af6a48a9865d4bf4b739ec7708d75f35acc3d478f91615349';
const guard3ed = 'e3ec6fcc79e64c2af6a48a9865d4bf4b739ec7708d75f35acc3d478f9161534a';
const fakeSnodePool: Array<Data.Snode> = [
...generateFakeSnodes(12),
generateFakeSnodeWithEdKey(guard1ed),
generateFakeSnodeWithEdKey(guard2ed),
generateFakeSnodeWithEdKey(guard3ed),
...generateFakeSnodes(3),
];
// tslint:disable: variable-name
// tslint:disable-next-line: max-func-body-length
describe('OnionPaths', () => {
// Initialize new stubbed cache
const sandbox = sinon.createSandbox();
describe('getSnodePoolFromDBOrFetchFromSeed', () => {
let getSnodePoolFromDb: sinon.SinonStub;
let fetchFromSeedWithRetriesAndWriteToDb: sinon.SinonStub;
let fetchSnodePoolFromSeedNodeWithRetries: sinon.SinonStub;
beforeEach(() => {
// Utils Stubs
OnionPaths.clearTestOnionPath();
TestUtils.stubWindow('getSeedNodeList', () => ['seednode1']);
TestUtils.stubWindowLog();
Onions.resetSnodeFailureCount();
OnionPaths.resetPathFailureCount();
SnodePool.TEST_resetState();
});
afterEach(() => {
TestUtils.restoreStubs();
sandbox.restore();
});
it('if the cached snode pool has at least 12 snodes, just return it without fetching from seed', async () => {
getSnodePoolFromDb = sandbox.stub(Data, 'getSnodePoolFromDb').resolves(fakeSnodePool);
fetchFromSeedWithRetriesAndWriteToDb = sandbox.stub(
SnodePool,
'TEST_fetchFromSeedWithRetriesAndWriteToDb'
);
const fetched = await SnodePool.getSnodePoolFromDBOrFetchFromSeed();
expect(getSnodePoolFromDb.callCount).to.be.eq(1);
expect(fetchFromSeedWithRetriesAndWriteToDb.callCount).to.be.eq(0);
expect(fetched).to.deep.equal(fakeSnodePool);
});
it('if the cached snode pool 12 or less snodes, trigger a fetch from the seed nodes', async () => {
const length12 = fakeSnodePool.slice(0, 12);
expect(length12.length).to.eq(12);
getSnodePoolFromDb = sandbox.stub(Data, 'getSnodePoolFromDb').resolves(length12);
sandbox.stub(Data, 'updateSnodePoolOnDb').resolves();
fetchFromSeedWithRetriesAndWriteToDb = sandbox
.stub(SnodePool, 'TEST_fetchFromSeedWithRetriesAndWriteToDb')
.callThrough();
fetchSnodePoolFromSeedNodeWithRetries = sandbox
.stub(SeedNodeAPI, 'fetchSnodePoolFromSeedNodeWithRetries')
.resolves(fakeSnodePool);
// run the command
const fetched = await SnodePool.getSnodePoolFromDBOrFetchFromSeed();
expect(getSnodePoolFromDb.callCount).to.be.eq(1);
expect(
fetchFromSeedWithRetriesAndWriteToDb.callCount,
'fetchFromSeedWithRetriesAndWriteToDb eq 1'
).to.be.eq(1);
expect(
fetchSnodePoolFromSeedNodeWithRetries.callCount,
'fetchSnodePoolFromSeedNodeWithRetries eq 1'
).to.be.eq(1);
expect(fetched).to.deep.equal(fakeSnodePool);
});
});
});