fix: make sure to include the associatedWith to handle 421

This commit is contained in:
Audric Ackermann 2023-05-26 10:51:02 +10:00
parent 2b3e490ade
commit 7b42c64cf3
18 changed files with 176 additions and 135 deletions

View File

@ -1,25 +1,28 @@
import React from 'react';
import { CSSProperties } from 'styled-components';
export class MessageView extends React.Component {
public render() {
return (
<div className="conversation placeholder">
<div className="conversation-header" />
<div className="container">
<div className="content session-full-logo">
<img
src="images/session/brand.svg"
className="session-brand-logo"
alt="full-brand-logo"
/>
<img
src="images/session/session-text.svg"
className="session-text-logo"
alt="full-brand-logo"
/>
</div>
export const MessageView = () => {
const noDragStyle = { '-webkit-user-drag': 'none' } as CSSProperties;
return (
<div className="conversation placeholder">
<div className="conversation-header" />
<div className="container">
<div className="content session-full-logo">
<img
src="images/session/brand.svg"
className="session-brand-logo"
alt="full-brand-logo"
style={noDragStyle}
/>
<img
src="images/session/session-text.svg"
className="session-text-logo"
alt="full-brand-logo"
style={noDragStyle}
/>
</div>
</div>
);
}
}
</div>
);
};

View File

@ -40,7 +40,10 @@ import { LeftPaneSectionContainer } from './LeftPaneSectionContainer';
import { SettingsKey } from '../../data/settings-key';
import { getLatestReleaseFromFileServer } from '../../session/apis/file_server_api/FileServerApi';
import { forceRefreshRandomSnodePool } from '../../session/apis/snode_api/snodePool';
import {
forceRefreshRandomSnodePool,
getFreshSwarmFor,
} from '../../session/apis/snode_api/snodePool';
import { isDarkTheme } from '../../state/selectors/theme';
import { ThemeStateType } from '../../themes/constants/colors';
import { switchThemeTo } from '../../themes/switchTheme';
@ -198,6 +201,7 @@ const doAppStartUp = async () => {
void triggerSyncIfNeeded();
void getSwarmPollingInstance().start();
void loadDefaultRooms();
void getFreshSwarmFor(UserUtils.getOurPubKeyStrFromCache()); // refresh our swarm on start to speed up the first message fetching event
// TODOLATER make this a job of the JobRunner
debounce(triggerAvatarReUploadIfNeeded, 200);

View File

@ -104,7 +104,18 @@ async function mergeConfigsWithIncomingUpdates(
`printDumpsForDebugging: before merge of ${variant}:`,
StringUtils.toHex(await GenericWrapperActions.dump(variant))
);
for (let index = 0; index < toMerge.length; index++) {
const element = toMerge[index];
window.log.info(
`printDumpsForDebugging: toMerge of ${index}:${element.hash}: ${StringUtils.toHex(
element.data
)} `,
StringUtils.toHex(await GenericWrapperActions.dump(variant))
);
}
}
const mergedCount = await GenericWrapperActions.merge(variant, toMerge);
const needsPush = await GenericWrapperActions.needsPush(variant);
const needsDump = await GenericWrapperActions.needsDump(variant);
@ -354,17 +365,24 @@ async function handleCommunitiesUpdate() {
});
}
// this call can take quite a long time and should not cause issues to not be awaited
void Promise.all(
communitiesToJoinInDB.map(async toJoin => {
window.log.info('joining community with convoId ', toJoin.fullUrlWithPubkey);
return getOpenGroupManager().attemptConnectionV2OneAtATime(
toJoin.baseUrl,
toJoin.roomCasePreserved,
toJoin.pubkeyHex
);
})
);
// this call can take quite a long time but must be awaited (as it is async and create the entry in the DB, used as a diff)
try {
await Promise.all(
communitiesToJoinInDB.map(async toJoin => {
window.log.info('joining community with convoId ', toJoin.fullUrlWithPubkey);
return getOpenGroupManager().attemptConnectionV2OneAtATime(
toJoin.baseUrl,
toJoin.roomCasePreserved,
toJoin.pubkeyHex
);
})
);
} catch (e) {
window.log.warn(
`joining community with failed with one of ${communitiesToJoinInDB}`,
e.message
);
}
// if the convos already exists, make sure to update the fields if needed
for (let index = 0; index < allCommunitiesInWrapper.length; index++) {

View File

@ -303,7 +303,7 @@ async function getSnodesFromSeedUrl(urlObj: URL): Promise<Array<any>> {
}
return validNodes;
} catch (e) {
window?.log?.error('Invalid json response');
window?.log?.error('Invalid json response. error:', e.message);
throw new Error(`getSnodesFromSeedUrl: cannot parse content as JSON from ${urlObj.href}`);
}
}

View File

@ -18,7 +18,7 @@ export async function doSnodeBatchRequest(
subRequests: Array<SnodeApiSubRequests>,
targetNode: Snode,
timeout: number,
associatedWith?: string,
associatedWith: string | null,
method: 'batch' | 'sequence' = 'batch'
): Promise<NotEmptyArrayOfBatchResults> {
// console.warn(
@ -49,7 +49,7 @@ export async function doSnodeBatchRequest(
await processOnionRequestErrorAtDestination({
statusCode: resultRow.code,
body: JSON.stringify(resultRow.body),
associatedWith,
associatedWith: associatedWith || undefined,
destinationSnodeEd25519: targetNode.pubkey_ed25519,
});
}

View File

@ -18,7 +18,7 @@ function getNetworkTimeSubRequests(): Array<NetworkTimeSubRequest> {
// tslint:disable-next-line: variable-name
const getNetworkTime = async (snode: Snode): Promise<string | number> => {
const subRequests = getNetworkTimeSubRequests();
const result = await doSnodeBatchRequest(subRequests, snode, 4000);
const result = await doSnodeBatchRequest(subRequests, snode, 4000, null);
if (!result || !result.length) {
window?.log?.warn(`getNetworkTime on ${snode.ip}:${snode.port} returned falsish value`, result);
throw new Error('getNetworkTime: Invalid result');

View File

@ -32,7 +32,7 @@ function buildSnodeListRequests(): Array<GetServiceNodesSubRequest> {
*/
async function getSnodePoolFromSnode(targetNode: Snode): Promise<Array<Snode>> {
const requests = buildSnodeListRequests();
const results = await doSnodeBatchRequest(requests, targetNode, 4000);
const results = await doSnodeBatchRequest(requests, targetNode, 4000, null);
const firstResult = results[0];

View File

@ -708,7 +708,7 @@ async function handle421InvalidSwarm({
if (parsedBody?.snodes?.length) {
// the snode gave us the new swarm. Save it for the next retry
window?.log?.warn(
'Wrong swarm, now looking at snodes',
`Wrong swarm, now looking for pk ${ed25519Str(associatedWith)} at snodes: `,
parsedBody.snodes.map((s: any) => ed25519Str(s.pubkey_ed25519))
);

View File

@ -41,7 +41,7 @@ async function getSessionIDForOnsName(onsNameCase: string) {
const promises = range(0, validationCount).map(async () => {
const targetNode = await getRandomSnode();
const results = await doSnodeBatchRequest(onsResolveRequests, targetNode, 4000);
const results = await doSnodeBatchRequest(onsResolveRequests, targetNode, 4000, null);
const firstResult = results[0];
if (!firstResult || firstResult.code !== 200 || !firstResult.body) {
throw new Error('ONSresolve:Failed to resolve ONS');

View File

@ -124,7 +124,12 @@ async function retrieveNextMessages(
// let exceptions bubble up
// no retry for this one as this a call we do every few seconds while polling for messages
const results = await doSnodeBatchRequest(retrieveRequestsParams, targetNode, 4000);
const results = await doSnodeBatchRequest(
retrieveRequestsParams,
targetNode,
4000,
associatedWith
);
if (!results || !results.length) {
window?.log?.warn(

View File

@ -29,7 +29,7 @@ async function doRequest({
url: string;
options: LokiFetchOptions;
targetNode?: Snode;
associatedWith?: string;
associatedWith: string | null;
timeout: number;
}): Promise<undefined | SnodeResponse> {
const method = options.method || 'GET';
@ -52,7 +52,7 @@ async function doRequest({
targetNode,
body: fetchOptions.body,
headers: fetchOptions.headers,
associatedWith,
associatedWith: associatedWith || undefined,
});
if (!fetchResult) {
return undefined;
@ -117,7 +117,7 @@ export async function snodeRpc(
method: string;
params: Record<string, any> | Array<Record<string, any>>;
targetNode: Snode;
associatedWith?: string;
associatedWith: string | null;
timeout?: number;
} //the user pubkey this call is for. if the onion request fails, this is used to handle the error for this user swarm for instance
): Promise<undefined | SnodeResponse> {

View File

@ -313,12 +313,27 @@ export async function getSwarmFor(pubkey: string): Promise<Array<Snode>> {
return goodNodes;
}
// Request new node list from the network and save it
return getSwarmFromNetworkAndSave(pubkey);
}
/**
* Force a request to be made to the network to fetch the swarm of the specificied pubkey, and cache the result.
* Note: should not be called directly unless you know what you are doing. Use the cached `getSwarmFor()` function instead
* @param pubkey the pubkey to request the swarm for
* @returns the fresh swarm, shuffled
*/
export async function getFreshSwarmFor(pubkey: string): Promise<Array<Snode>> {
return getSwarmFromNetworkAndSave(pubkey);
}
async function getSwarmFromNetworkAndSave(pubkey: string) {
// Request new node list from the network
const swarm = await requestSnodesForPubkeyFromNetwork(pubkey);
const mixedSwarm = shuffle(swarm);
const shuffledSwarm = shuffle(swarm);
const edkeys = mixedSwarm.map((n: Snode) => n.pubkey_ed25519);
const edkeys = shuffledSwarm.map((n: Snode) => n.pubkey_ed25519);
await internalUpdateSwarmFor(pubkey, edkeys);
return mixedSwarm;
return shuffledSwarm;
}

View File

@ -6,27 +6,26 @@ import { PubKey } from '../../types';
import { ERROR_CODE_NO_CONNECT } from './SNodeAPI';
import * as snodePool from './snodePool';
import pRetry from 'p-retry';
import { ConversationModel } from '../../../models/conversation';
import { ConfigMessageHandler } from '../../../receiver/configMessage';
import { decryptEnvelopeWithOurKey } from '../../../receiver/contentMessage';
import { EnvelopePlus } from '../../../receiver/types';
import { updateIsOnline } from '../../../state/ducks/onion';
import { ReleasedFeatures } from '../../../util/releaseFeature';
import {
GenericWrapperActions,
UserGroupsWrapperActions,
} from '../../../webworker/workers/browser/libsession_worker_interface';
import { DURATION, SWARM_POLLING_TIMEOUT } from '../../constants';
import { getConversationController } from '../../conversations';
import { IncomingMessage } from '../../messages/incoming/IncomingMessage';
import { ed25519Str } from '../../onions/onionPath';
import { StringUtils, UserUtils } from '../../utils';
import { perfEnd, perfStart } from '../../utils/Performance';
import { LibSessionUtil } from '../../utils/libsession/libsession_utils';
import { SnodeNamespace, SnodeNamespaces } from './namespaces';
import { SnodeAPIRetrieve } from './retrieveRequest';
import { RetrieveMessageItem, RetrieveMessagesResultsBatched } from './types';
import { ReleasedFeatures } from '../../../util/releaseFeature';
import { LibSessionUtil } from '../../utils/libsession/libsession_utils';
import {
GenericWrapperActions,
UserGroupsWrapperActions,
} from '../../../webworker/workers/browser/libsession_worker_interface';
export function extractWebSocketContent(
message: string,
@ -404,93 +403,80 @@ export class SwarmPolling {
const pkStr = pubkey.key;
try {
return await pRetry(
async () => {
const prevHashes = await Promise.all(
namespaces.map(namespace => this.getLastHash(snodeEdkey, pkStr, namespace))
);
const configHashesToBump: Array<string> = [];
const prevHashes = await Promise.all(
namespaces.map(namespace => this.getLastHash(snodeEdkey, pkStr, namespace))
);
const configHashesToBump: Array<string> = [];
if (await ReleasedFeatures.checkIsUserConfigFeatureReleased()) {
// TODOLATER add the logic to take care of the closed groups too once we have a way to do it with the wrappers
if (isUs) {
for (let index = 0; index < LibSessionUtil.requiredUserVariants.length; index++) {
const variant = LibSessionUtil.requiredUserVariants[index];
try {
const toBump = await GenericWrapperActions.currentHashes(variant);
if (toBump?.length) {
configHashesToBump.push(...toBump);
}
} catch (e) {
window.log.warn(`failed to get currentHashes for user variant ${variant}`);
}
}
window.log.debug(`configHashesToBump: ${configHashesToBump}`);
}
}
let results = await SnodeAPIRetrieve.retrieveNextMessages(
node,
prevHashes,
pkStr,
namespaces,
UserUtils.getOurPubKeyStrFromCache(),
configHashesToBump
);
if (!results.length) {
return [];
}
// when we asked to extend the expiry of the config messages, exclude it from the list of results as we do not want to mess up the last hash tracking logic
if (configHashesToBump.length) {
if (await ReleasedFeatures.checkIsUserConfigFeatureReleased()) {
// TODOLATER add the logic to take care of the closed groups too once we have a way to do it with the wrappers
if (isUs) {
for (let index = 0; index < LibSessionUtil.requiredUserVariants.length; index++) {
const variant = LibSessionUtil.requiredUserVariants[index];
try {
const lastResult = results[results.length - 1];
if (lastResult?.code !== 200) {
// the update expiry of our config messages didn't work.
window.log.warn(
`the update expiry of our tracked config hashes didn't work: ${JSON.stringify(
lastResult
)}`
);
const toBump = await GenericWrapperActions.currentHashes(variant);
if (toBump?.length) {
configHashesToBump.push(...toBump);
}
} catch (e) {
// nothing to do I suppose here.
window.log.warn(`failed to get currentHashes for user variant ${variant}`);
}
results = results.slice(0, results.length - 1);
}
const lastMessages = results.map(r => {
return last(r.messages.messages);
});
await Promise.all(
lastMessages.map(async (lastMessage, index) => {
if (!lastMessage) {
return;
}
return this.updateLastHash({
edkey: snodeEdkey,
pubkey,
namespace: namespaces[index],
hash: lastMessage.hash,
expiration: lastMessage.expiration,
});
})
);
return results;
},
{
minTimeout: 100,
retries: 1,
onFailedAttempt: e => {
window?.log?.warn(
`retrieveNextMessages attempt #${e.attemptNumber} failed. ${e.retriesLeft} retries left... ${e.name}`
);
},
window.log.debug(`configHashesToBump: ${configHashesToBump}`);
}
}
let results = await SnodeAPIRetrieve.retrieveNextMessages(
node,
prevHashes,
pkStr,
namespaces,
UserUtils.getOurPubKeyStrFromCache(),
configHashesToBump
);
if (!results.length) {
return [];
}
// when we asked to extend the expiry of the config messages, exclude it from the list of results as we do not want to mess up the last hash tracking logic
if (configHashesToBump.length) {
try {
const lastResult = results[results.length - 1];
if (lastResult?.code !== 200) {
// the update expiry of our config messages didn't work.
window.log.warn(
`the update expiry of our tracked config hashes didn't work: ${JSON.stringify(
lastResult
)}`
);
}
} catch (e) {
// nothing to do I suppose here.
}
results = results.slice(0, results.length - 1);
}
const lastMessages = results.map(r => {
return last(r.messages.messages);
});
await Promise.all(
lastMessages.map(async (lastMessage, index) => {
if (!lastMessage) {
return;
}
return this.updateLastHash({
edkey: snodeEdkey,
pubkey,
namespace: namespaces[index],
hash: lastMessage.hash,
expiration: lastMessage.expiration,
});
})
);
return results;
} catch (e) {
if (e.message === ERROR_CODE_NO_CONNECT) {
if (window.inboxStore?.getState().onionPaths.isOnline) {

View File

@ -515,6 +515,8 @@ async function leaveClosedGroup(groupId: string, fromSyncMessage: boolean) {
namespace: SnodeNamespaces.ClosedGroupMessage,
pubkey: PubKey.cast(groupId),
});
// TODO our leaving message might fail to be sent for some specific reason we want to still delete the group.
// for instance, if we do not have the encryption keypair anymore, we cannot send our left message, but we should still delete it's content
if (wasSent) {
window?.log?.info(
`Leaving message sent ${groupId}. Removing everything related to this group.`

View File

@ -312,7 +312,7 @@ export async function testGuardNode(snode: Snode) {
response = await insecureNodeFetch(url, fetchOptions);
} catch (e) {
if (e.type === 'request-timeout') {
window?.log?.warn('test timeout for node,', ed25519Str(snode.pubkey_ed25519));
window?.log?.warn('test :,', ed25519Str(snode.pubkey_ed25519));
}
if (e.code === 'ENETUNREACH') {
window?.log?.warn('no network on node,', snode);

View File

@ -71,6 +71,13 @@ async function updateProfileOfContact(
avatarChanged = true; // allow changes from strings to null/undefined to trigger a AvatarDownloadJob. If that happens, we want to remove the local attachment file.
}
// if we have a local path to an downloaded avatar, but no corresponding url/key for it, it means that
// the avatar was most likely removed so let's remove our link to that file.
if ((!profileUrl || !profileKeyHex) && conversation.get('avatarInProfile')) {
conversation.set({ avatarInProfile: undefined });
changes = true;
}
if (changes) {
await conversation.commit();
}

View File

@ -236,7 +236,7 @@ async function sendMessagesDataToSnode(
signedDeleteOldHashesRequest
);
if (snode) {
if (!isEmpty(storeResults)) {
window?.log?.info(
`sendMessagesToSnode - Successfully stored messages to ${ed25519Str(destination)} via ${
snode.ip

View File

@ -223,6 +223,7 @@ class ConfigurationSyncJob extends PersistedJob<ConfigurationSyncPersistedData>
window.log.info(
`ConfigurationSyncJob: unexpected result length: expected ${expectedReplyLength} but got ${result?.length}`
);
// this might be a 421 error (already handled) so let's retry this request a little bit later
return RunJobResult.RetryJobIfPossible;
}